max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
data/water/CMEMS_ocean_pH/viz/generate.py | ilopezgp/human_impacts | 4 | 12762951 | <filename>data/water/CMEMS_ocean_pH/viz/generate.py
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Load the production data.
data = pd.read_csv('../processed/CMEMS_average_ocean_pH.csv')
data['year'] = pd.to_datetime(data['year'], format='%Y')
data['pH'] = round(data['pH'], 3)
#%%
# Generate a plot for global average surface pH
chart = alt.Chart(data).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field='pH', type='quantitative', title='ocean pH', scale=alt.Scale(domain=[8.05, 8.12])),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field='pH', type='nominal', title='pH')]
).properties(width='container', height=300)
# Add uncertainty bands
bands = chart.mark_area(color='dodgerblue', fillOpacity=0.4).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y('pH_uncert_low:Q', scale=alt.Scale(zero=False)),
y2='pH_uncert_high:Q'
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(l, p, bands)
layer.save('surface_ocean_pH.json')
# %%
# Load the production data.
data = pd.read_csv('../processed/CMEMS_trends_ocean_pH.csv')
data['year'] = pd.to_datetime(data['year'], format='%Y')
agg_data = pd.DataFrame()
agg_data['year'] = data[data['Measure type']=='[H+] percentage trend']['year'][1:]
agg_data['H+ percentage trend'] = data[data['Measure type']=='[H+] percentage trend']['Value'][1:]
pd.set_option("display.max_rows", None, "display.max_columns", None)
#%%
# Generate a plot for global average surface pH
chart = alt.Chart(agg_data).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field=r'H+ percentage trend', type='quantitative', title=r'[H+] percentage change'),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field=r'H+ percentage trend', type='nominal', title=r'H+ percentage trend')]
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(l, p)
layer.save('percentage_change_H.json')
# %% | 2.421875 | 2 |
utils/nuswide_dataset.py | stevehuanghe/multi_label_zsl | 4 | 12762952 | <reponame>stevehuanghe/multi_label_zsl
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import os
from pathlib import Path
import pickle
import numpy as np
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import csv
import copy
class NUSWideDataset(data.Dataset):
"""Custom Dataset compatible with torch.utils.data.DataLoader."""
def __init__(self, image_dir, anno_dir, transform=None, n_val=0, mode="train", n_unseen=16, unseen_file=None):
"""Set the path for images, captions and vocabulary wrapper.
Args:
image_dir: image directory.
anno_json: coco annotation file path.
label_set: list of labels, IDs or names.
transform: image transformation function, callable.
"""
assert n_val >= 0
self.image_dir = image_dir
self.anno_dir = anno_dir
self.transform = transform
self.mode = mode
self.valid_ids = []
common = ['plane', 'zebra', 'valley', 'tiger', 'castle']
unseen_labels_file = Path(anno_dir) / Path("Concepts81.txt")
seen_labels_file = Path(anno_dir) / Path("NUS_WID_Tags/TagList1k.txt")
unseen_cats = self.load_label_set(unseen_labels_file)
seen_cats = self.load_label_set(seen_labels_file)
assert len(seen_cats) == 1000
assert len(unseen_cats) == 81
seen_cats_new = [x for x in seen_cats if x not in unseen_cats]
seen_label_idx = [i for i, x in enumerate(seen_cats) if x not in unseen_cats]
assert len(seen_cats_new) == 925
self.seen_label_idx = torch.tensor(seen_label_idx).long()
unseen_cats_new = [x for x in unseen_cats if x not in common]
assert len(unseen_cats_new) == 76
unseen_label_idx = [i for i, x in enumerate(unseen_cats) if x not in common]
self.unseen_label_idx = torch.tensor(unseen_label_idx).long()
self.seen_idx = torch.tensor([i for i in range(925)]).long()
self.unseen_idx = torch.tensor([i+925 for i in range(len(unseen_cats_new))]).long()
self.all_cats = seen_cats_new + unseen_cats_new
self.seen_cats = seen_cats_new
self.unseen_cats = unseen_cats_new
self.train_idx = self.seen_idx
self.val_idx = self.seen_idx
train_seen_anno = Path(anno_dir) / Path("NUS_WID_Tags/Train_Tags1k.dat")
test_unseen_anno = Path(anno_dir) / Path("NUS_WID_Tags/Test_Tags81.txt")
test_seen_anno = Path(anno_dir) / Path("NUS_WID_Tags/Test_Tags1k.dat")
train_image_file = Path(anno_dir) / Path("ImageList/TrainImagelist.txt")
test_image_file = Path(anno_dir) / Path("ImageList/TestImagelist.txt")
if mode == "train":
self.img_list = self.load_image_list(train_image_file, image_dir)
self.gt_labels = self.load_gt_labels(train_seen_anno)[:,self.seen_label_idx]
else:
self.img_list = self.load_image_list(test_image_file, image_dir)
test_unseen_gt = self.load_gt_labels(test_unseen_anno)[:, self.unseen_label_idx]
test_seen_gt = self.load_gt_labels(test_seen_anno)[:, self.seen_label_idx]
self.gt_labels = torch.cat([test_seen_gt, test_unseen_gt], dim=1)
assert len(self.img_list) == self.gt_labels.size(0)
@staticmethod
def load_label_set(label_file):
if not os.path.isfile(label_file):
raise FileNotFoundError(f"file not found: {label_file}")
label_set = []
with open(label_file, "r") as fin:
lines = fin.readlines()
for line in lines:
word = line.split('\n')[0]
if word != '':
label_set.append(word)
return label_set[:1000]
def load_image_list(self, image_file, image_dir):
if not os.path.isfile(image_file):
raise FileNotFoundError(f"file not found: {image_file}")
image_list = []
with open(image_file, "r") as fin:
lines = fin.readlines()
for idx, line in enumerate(lines):
filename = line.split()[0]
filename = os.path.join(image_dir, filename.split('_')[-1])
if os.path.isfile(filename):
image_list.append(filename)
self.valid_ids.append(idx)
return image_list
def load_gt_labels(self, anno_file):
if not os.path.isfile(anno_file):
raise FileNotFoundError(f"file not found: {anno_file}")
gt_labels = []
with open(anno_file, "r") as fin:
reader = fin.readlines()
for line in reader:
line = line.split()
labels = torch.from_numpy(np.array(line) == '1').long()
gt_labels.append(labels.view(1, -1))
assert len(self.valid_ids) > 0
gt_labels = torch.cat(gt_labels, dim=0)[self.valid_ids]
return gt_labels
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
labels = self.gt_labels[index]
image = Image.open(os.path.join(self.image_dir, self.img_list[index])).convert('RGB')
if self.transform is not None:
image = self.transform(image)
else:
image = transforms.ToTensor()(image)
return image, labels
class NUSWideDataset81(data.Dataset):
"""Custom Dataset compatible with torch.utils.data.DataLoader."""
def __init__(self, image_dir, anno_dir, transform=None, n_val=0, mode="train", n_unseen=16, unseen_file=None):
"""Set the path for images, captions and vocabulary wrapper.
Args:
image_dir: image directory.
anno_json: coco annotation file path.
label_set: list of labels, IDs or names.
transform: image transformation function, callable.
"""
assert n_val >= 0
self.image_dir = image_dir
self.anno_dir = anno_dir
self.transform = transform
self.mode = mode
self.valid_ids = []
common = ['plane', 'zebra', 'valley', 'tiger', 'castle']
labels_file = Path(anno_dir) / Path("Concepts81.txt")
all_cats = self.load_label_set(labels_file)
unseen_names = []
if unseen_file is not None:
with Path(unseen_file).open('r') as fin:
lines = fin.readlines()
for line in lines:
label = line.split('\n')[0]
unseen_names.append(label)
elif n_unseen > 0:
all_cats_copy = copy.deepcopy(all_cats)
while True:
np.random.shuffle(all_cats_copy)
unseen_names = all_cats_copy[:n_unseen]
if set(unseen_names).intersection(set(common)) == set():
break
else:
unseen_names = all_cats
self.n_unseen = len(unseen_names)
self.n_seen = len(all_cats) - self.n_unseen
self.n_all = len(all_cats)
seen_cats = []
unseen_cats = []
seen_idx = []
unseen_idx = []
for i, cat in enumerate(all_cats):
if cat not in unseen_names:
seen_idx.append(i)
seen_cats.append(cat)
else:
unseen_idx.append(i)
unseen_cats.append(cat)
if len(seen_cats) == 0:
self.n_seen = self.n_all
seen_cats = unseen_cats
seen_idx = unseen_idx
self.seen_idx = torch.tensor(seen_idx).long()
self.unseen_idx = torch.tensor(unseen_idx).long()
self.all_cats = all_cats
self.seen_cats = seen_cats
self.unseen_cats = unseen_cats
# TODO:
self.train_idx = self.seen_idx
self.val_idx = self.seen_idx
train_anno = Path(anno_dir) / Path("NUS_WID_Tags/Train_Tags81.txt")
test_anno = Path(anno_dir) / Path("NUS_WID_Tags/Test_Tags81.txt")
train_image_file = Path(anno_dir) / Path("ImageList/TrainImagelist.txt")
test_image_file = Path(anno_dir) / Path("ImageList/TestImagelist.txt")
if mode == "train":
self.img_list = self.load_image_list(train_image_file, image_dir)
self.gt_labels = self.load_gt_labels(train_anno)[:, self.seen_idx]
else:
self.img_list = self.load_image_list(test_image_file, image_dir)
self.gt_labels = self.load_gt_labels(test_anno)
nonempty_idx = []
for i in range(self.gt_labels.size(0)):
if self.gt_labels[i].sum() > 0:
nonempty_idx.append(i)
self.img_list = [x for i, x in enumerate(self.img_list) if i in nonempty_idx]
self.gt_labels = self.gt_labels[nonempty_idx, :]
assert len(self.img_list) == self.gt_labels.size(0)
@staticmethod
def load_label_set(label_file, n_max=1000):
if not os.path.isfile(label_file):
raise FileNotFoundError(f"file not found: {label_file}")
label_set = []
with open(label_file, "r") as fin:
lines = fin.readlines()
for line in lines:
word = line.split('\n')[0]
if word != '':
label_set.append(word)
return label_set[:n_max]
def load_image_list(self, image_file, image_dir):
if not os.path.isfile(image_file):
raise FileNotFoundError(f"file not found: {image_file}")
image_list = []
with open(image_file, "r") as fin:
lines = fin.readlines()
for idx, line in enumerate(lines):
filename = line.split()[0]
filename = os.path.join(image_dir, filename.split('_')[-1])
if os.path.isfile(filename):
image_list.append(filename)
self.valid_ids.append(idx)
return image_list
def load_gt_labels(self, anno_file):
if not os.path.isfile(anno_file):
raise FileNotFoundError(f"file not found: {anno_file}")
gt_labels = []
with open(anno_file, "r") as fin:
reader = fin.readlines()
for line in reader:
line = line.split()
labels = torch.from_numpy(np.array(line) == '1').long()
gt_labels.append(labels.view(1, -1))
assert len(self.valid_ids) > 0
gt_labels = torch.cat(gt_labels, dim=0)[self.valid_ids]
return gt_labels
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
labels = self.gt_labels[index]
image = Image.open(os.path.join(self.image_dir, self.img_list[index])).convert('RGB')
if self.transform is not None:
image = self.transform(image)
else:
image = transforms.ToTensor()(image)
return image, labels
if __name__ == '__main__':
from torch.utils.data import DataLoader
def transform_fn(image):
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
return transform(image)
nus_img_dir = '/media/hehuang/Data/nus_wide/images'
nus_anno_dir = '/media/hehuang/Data/nus_wide/annotations'
dataset = NUSWideDataset(nus_img_dir, nus_anno_dir, transform=transform_fn, mode="train")
loader = DataLoader(dataset,
batch_size=10,
num_workers=2,
shuffle=False)
print(len(dataset))
for image, target in loader:
print(image.size())
print(target.size())
break
| 2.4375 | 2 |
src/catsys/imgs/hgencoder.py | AtomCrafty/catsystem-py | 6 | 12762953 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""HG image encoding utility
Provides the encoding and decoding methods for HG image encodings (HG-2, HG-3).
"""
__version__ = '1.0.0'
__date__ = '2020-09-19'
__author__ = '<NAME>'
__all__ = []
#######################################################################################
import io, os, struct, zlib
import enum, collections # for type declarations only
from typing import Iterable, Iterator, List, Optional, NoReturn, Tuple, Type, Union # for hinting in declarations
# local imports
from ._baseimg import ImageContainer, ImageFrame
## PREDECLARE TYPES ##
HgSlice = collections.namedtuple('HgSlice', ('index', 'length')) #, 'data', 'cmd'))
HgData = collections.namedtuple('HgData', ('data', 'cmd'))
Point = collections.namedtuple('Point', ('x, y'))
Size = collections.namedtuple('Size', ('width', 'height'))
Rect = collections.namedtuple('Rect', ('x', 'y', 'width', 'height'))
#FORMAT: in byte-order (little endian)
#24bit = BGR
#32bit = BGRA
Color = collections.namedtuple('Color', ('r', 'g', 'b', 'a'))
def get_color(color:Union[int, Tuple[int,int,int], Tuple[int,int,int,int]]) -> Color:
if not isinstance(color, int):
if len(color) == 3:
return Color(*color[0], 0xff)
return Color(*color)
return Color((color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff, (color >> 24) & 0xff)
class HgAttribute(object):
__slots__ = ('x', 'y', 'width', 'height', 'rawcolor')
#
def __init__(self, x:int, y:int, width:int, height:int, color:Union[int, Color]):
self.x = x
self.y = y
self.width = width
self.height = height
if not isinstance(color, int):
if len(color) == 3:
color = Color(*color[0], 0xff)
else:
color = Color(*color)
else:
color = Color((color >> 16) & 0xff, (color >> 8) & 0xff, color & 0xff, (color >> 24) & 0xff)
self.color = color
#
@property
def point(self) -> Point: return Point(self.x, self.y)
@point.setter
def point(self, point:Size): self.x, self.y = point
#
#
@property
def size(self) -> Size: return Size(self.width, self.height)
@size.setter
def size(self, size:Size): self.width, self.height = size
#
@property
def rawcolor(self) -> int:
return ((self.color[0] << 16) | (self.color[1] << 8) |
(self.color[2] ) | (self.color[3] << 24))
@rawcolor.setter
def rawcolor(self, rawcolor):
self.color = Color(
(rawcolor >> 16) & 0xff, (rawcolor >> 8) & 0xff,
(rawcolor ) & 0xff, (rawcolor >> 24) & 0xff)
class StandardInfo(object):
__slots__ = ('width', 'height', 'bpp', 'offsetx', 'offsety', 'fullwidth', 'fullheight', 'transparent', 'originx', 'originy')
#
def __init__(self, width:int=0, height:int=0, bpp:int=0, offsetx:int=0, offsety:int=0, fullwidth:int=0, fullheight:int=0, transparent:bool=False, originx:int=0, originy:int=0):
self.width = 0
self.height = 0
self.bpp = 0 # pixel bit depth (bits per pixel)
self.bpc = 0 # channel bit depth (bits per channel) 0 => 8
self.canvasx = 0
self.canvasy = 0
self.canvaswidth = 0
self.canvasheight = 0
self.transparent = False
self.originx = 0
self.originy = 0
@property
def size(self) -> Size: return Size(self.width, self.height)
@size.setter
def size(self, size:Size): self.width, self.height = size
#
@property
def canvas(self) -> Rect: return Rect(self.canvasx, self.canvasy, self.canvaswidth, self.canvasheight)
@canvas.setter
def canvas(self, canvasrect:Rect): self.canvasx, self.canvasy, self.canvaswidth, self.canvasheight = canvasrect
#
@property
def canvassize(self) -> Size: return Size(self.canvaswidth, self.canvasheight)
@canvassize.setter
def canvassize(self, canvassize:Size): self.canvaswidth, self.canvasheight = canvassize
#
@property
def canvaspos(self) -> Point: return Point(self.canvasx, self.canvasy)
@canvaspos.setter
def canvaspos(self, canvaspos:Point): self.canvasx, self.canvasy = canvaspos
#
@property
def origin(self) -> Point: return Point(self.originx, self.originy)
@origin.setter
def origin(self, origin:Point): self.originx, self.originy = origin
#
@property
def bytedepth(self) -> int: return (self.bpp + 7) // 8
# @bytedepth.setter
# def bytedepth(self, bytedepth:int): self.bpp = bytedepth * 8
@property
def stride(self) -> int: return (self.width * self.bpp + 7) // 8
@property
def buffersize(self) -> int: return (self.stride * self.height)
@property
def hasalpha(self) -> bool: return self.bpp == 32
#
@property
def depthmax(self) -> int:
return (((1 << self.depth ** 2) - 1) & 0xff) if self.depth else 0xff
class StandardInfo(object):
"""StandardInfo(**kwargs) -> stdinfo with assigned kwargs
"""
__slots__ = ('size', 'bpp', 'depth', 'canvassize', 'canvaspos', 'transparent', 'origin')
def __init__(self, **kwargs):
self.size = Size(0, 0)
self.canvassize = Size(0, 0)
self.canvaspos = Point(0, 0)
self.origin = Point(0, 0)
self.transpareny = False
self.bpp = 0 # 24 or 32 required
self.bpc = 0 # only used in HG-2, (see source of info)
#<https://github.com/morkt/GARbro/blob/c5e13f6db1d24a62eb621c38c6fc31387338d857/ArcFormats/CatSystem/ImageHG2.cs#L117-L126>
for key,val in kwargs.items():
setattr(self, key, val)
#
#ATTRIBUTE PROPERTIES:
#
@property
def width(self) -> int: return self.size[0]
@width.setter
def width(self, width): self.size = Size(width, self.size[1])
@property
def height(self) -> int: return self.size[1]
@height.setter
def height(self, height): self.size = Size(self.size[0], height)
#
@property
def canvaswidth(self) -> int: return self.canvassize[0]
@canvaswidth.setter
def canvaswidth(self, canvaswidth): self.canvassize = Size(canvaswidth, self.canvassize[1])
@property
def canvasheight(self) -> int: return self.canvassize[1]
@canvasheight.setter
def canvasheight(self, canvasheight): self.canvassize = Size(self.canvassize[0], canvasheight)
#
@property
def canvasx(self) -> int: return self.canvaspos[0]
@canvasx.setter
def canvasx(self, canvasx): self.canvaspos = Point(canvasx, self.canvaspos[1])
@property
def canvasy(self) -> int: return self.canvaspos[1]
@canvasy.setter
def canvasy(self, canvasy): self.canvaspos = Point(self.canvaspos[0], canvasy)
#
@property
def originx(self) -> int: return self.origin[0]
@originx.setter
def originx(self, originx): self.origin = Point(originx, self.origin[1])
@property
def originy(self) -> int: return self.origin[1]
@originy.setter
def originy(self, originy): self.origin = Point(self.origin[0], originy)
#
#CALCULATED PROPERTIES:
#
@property
def bytedepth(self) -> int: return (self.bpp + 7) // 8
@bytedepth.setter
def bytedepth(self, bytedepth): self.bpp = bytedepth * 8
#
#
#ALIAS PROPERTIES
#
@property
def bitdepth(self) -> int: return self.bpp
@bitdepth.setter
def bitdepth(self, bitdepth): self.bpp = bitdepth
#
@property
def channeldepth(self) -> int: return self.bpc
@channeldepth.setter
def channeldepth(self, channeldepth): self.bpc = channeldepth
@property
def channelmax(self) -> int:
return ((2**self.bpc - 1) & 0xff) if self.bpc else 0xff # bitmask for number of bits
# @bytedepth.setter
# def bytedepth(self, bytedepth:int):
# self.bpp = bytedepth * 8
#
@property
def stride(self) -> int: return (self.width * self.bpp + 7) // 8
#
@property
def buffersize(self) -> int: return (self.stride * self.height)
#
@property
def offsetstride(self) -> int: return (self.canvasx * self.bpp + 7) // 8
#
@property
def canvasstride(self) -> int: return (self.canvaswidth * self.bpp + 7) // 8
@property
def canvasbuffersize(self) -> int: return (self.canvasstride * self.canvasheight)
#
@property
def hasalpha(self) -> bool: return self.bpp == 32
# [Flags]
# public enum HgxOptions {
# None = 0,
# Flip = (1 << 0), // Flip vertically (applied after crop when encoding)
# Crop = (1 << 2), // Expand or Shrink
# }
class HgOptions(enum.IntFlag):
"""FLIP: vertically flip the image
CROP: expand or shrink the image around transparent area
"""
#NONE = 0 # no options
FLIP = (1 << 0) # vertical flip
CROP = (1 << 1) # shrink/expand canvas size around transparency
_CHANGE_ALPHA = (1 << 2)
ADD_ALPHA = _CHANGE_ALPHA | 0
REMOVE_ALPHA = _CHANGE_ALPHA | (1 << 3)
class HgEncoding(object):
"""HgEncoding() -> HG image encoder/decoder
"""
_weighttables:Tuple[List[int], List[int], List[int], List[int]] = make_weight_tables()
_abstables:Tuple[List[int], List[int]] = make_abs_tables()
def __init__(self):
self.pixels = bytearray()
self.options = HgOptions(0)
self.size = Size(0, 0)
self.bpp = 0
self.depth = 0
self.canvassize = Size(0, 0)
self.canvaspos = Point(0, 0)
self.transparent = False
self.origin = Point(0, 0)
def unpack_slicemem(self, data:bytes, cmd:bytes):
block0 = block1 = block2 = block3 = buffer = None
try:
data = memoryview(zlib.decompress(data))
cmd = memoryview(zlib.decompress(cmd))
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = memoryview(bytearray(buflen))
data = data.release()
cmd = cmd.release()
except:
# if isinstance(data, memoryview):
# data.release()
# if isinstance(cmd, memoryview):
# cmd.release()
if isinstance(buffer, memoryview):
buffer.release()
raise
buffer = unpack_datamem(data, cmd)
finally:
if isinstance(data, memoryview):
data.release()
if isinstance(cmd, memoryview):
cmd.release()
if isinstance(buffer, memoryview):
buffer.release()
with memoryview(data) as data:
with memoryview(cmd) as cmd:
buffer = unpack_datamem(data, cmd)
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
_buffer = bytearray(buflen) # already filled with zero-bytes
buffer = unpack_datamem(data, cmd)
memoryblocks = [None] * 4
with memoryview
def unpack_zrle_pt1(self, data:memoryview, cmd:memoryview) -> Tuple[BitBuffer, copyflag, bytearray]:
# with memoryview(zlib.decompress(data)) as data:
# with memoryview(zlib.decompress(cmd)) as cmd:
# return unpack_datamem2(data, cmd)
# def unpack_datamem(self, data:memoryview, cmd:memoryview) -> bytearray:
# data = zlib.decompress(data)
# cmd = zlib.decompress(cmd)
# with memoryview(zlib.decompress(data)) as data:
# with memoryview(zlib.decompress(cmd)) as cmd:
# return unpack_datamem2(data, cmd)
def make_fwd_weight_tables() -> List[Tuple[int], Tuple[int], Tuple[int], Tuple[int]]:
pass
def rgba_image(image:PIL.Image, needalpha:bool=False):
bands = image.getbands()
hasalpha = 'A' in bands or 'a' in bands
if image.mode != 'RGBA' and (needalpha or hasalpha):
image = image.convert('RGBA')
elif image.mode != 'RGB' and (not needalpha and not hasalpha):
image = image.convert('RGB')
return image
def rgba_bytes(image:PIL.Image, needalpha:bool=False, orientation:int=1) -> Tuple[PIL.Image, str]:
image = rgba_image(image, needalpha)
if image.mode == 'RGBA':
bpp = 32
mode = 'BGRA'
elif image.mode == 'RGBA':
bpp = 24
mode = 'BGR'
stride = ((image.width * bpp + 7) // 8 + 3) & ~0x3
return (image.tobytes('raw', 'BGRA', stride, orientation), image.mode)
if image.mode == 'RGBA':
bpp = 32
mode = 'BGRA'
elif image.mode == 'RGBA':
bpp = 24
mode = 'BGR'
stride = ((image.width * bpp + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGRA', stride, orientation)
if image.mode != 'RGBA' and (image.mode.endswith('A') or image.mode.endswith('a')):
image = image.convert('RGBA')
elif image.mode == 'RGBA':
bitdepth = 32
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGRA', stride, orientation)
elif image.mode == 'RGB':
bitdepth = 24
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGR', stride, orientation)
elif image.mode == 'L':
bitdepth = 8
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'L', stride, orientation)
elif image.mode == '1':
bitdepth = 1
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', '1', stride, orientation)
else:
#TODO: Auto-convert? or fail?
#image = image.convert('RGBA')
#bitdepth = 32
#stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
#pixels = image.tobytes('raw', 'BGRA', stride, orientation)
raise ValueError('Unsupported image mode {0!r} for writing'.format(image.mode))
def run_tests():
import PIL
import PIL.Image
import PIL.ImageOps
needalpha = False
orientation = 1
imgpath = r"path\to\testimage.png"
with PIL.Image.open(imgpath) as imgfile:
image = rgba_image(imgfile, needalpha)
hasalpha = image.mode == 'RGBA'
mode = image.mode
with memoryview(rgba_bytes(image, needalpha, orientation)):
image.tobytes('raw', 'BGRA', stride, orientation)
image = image.crop(bbox)
if image.mode != 'RGBA' and (image.mode.endswith('A') or image.mode.endswith('a')):
image = image.convert('RGBA')
elif image.mode == 'RGBA':
bitdepth = 32
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGRA', stride, orientation)
elif image.mode == 'RGB':
bitdepth = 24
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'BGR', stride, orientation)
elif image.mode == 'L':
bitdepth = 8
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', 'L', stride, orientation)
elif image.mode == '1':
bitdepth = 1
stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
pixels = image.tobytes('raw', '1', stride, orientation)
else:
#TODO: Auto-convert? or fail?
#image = image.convert('RGBA')
#bitdepth = 32
#stride = ((width * bitdepth + 7) // 8 + 3) & ~0x3
#pixels = image.tobytes('raw', 'BGRA', stride, orientation)
raise ValueError('Unsupported image mode {0!r} for writing'.format(image.mode))
def pack_from_pixels(pixels:bytearray, sliceidx:int, slicelen:int, width:int, height:int, bpp:int, bpc:int, offsetx:int, offsety:int, fullwidth:int, fullheight:int, flip:bool):
bytedepth = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
fullstride = (fullwidth * bytedepth + 3) & ~0x3
colstart = offsetx
colend = offsetx + width
rowstart = offsety + sliceidx
rowend = offsety + sliceidx + slicelen
fullstart = rowstart * fullstride + colstart * bytedepth
fullend = rowend * fullstride + colend * bytedepth
del colstart, colend, rowstart, rowend
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# buflen = len(buffer)
# sectlen = buflen // 4
sectlen = len(buffer) // 4
# sect0, sect1, sect2, sect3 = sects = range(0, sectlen * 4, sectlen)
abstable = make_abs_tables()[0] # fwd abstable
#table0, table1, table2, table3 = make_weight_tables()
#block0, block1, block2, block3 = blocks = [None] * 4
block0 = block1 = block2 = block3 = None
abstable = bytes(make_abs_tables()[0]) # fwd abstable
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
sectlen = slicelen * stride // 4
blocks = bytearray(sectlen * 4)
block0 = block1 = block2 = block3 = None
try:
buffer = memoryview(buffer)
abstable = memoryview(abstable)
# for i, sect in enumerate(range(0, sectlen * 4, sectlen)):
# blocks[i] = buffer[sect:sect + sectlent]
# block0, block1, block2, block3 = blocks
block0 = buffer[sect0:sect0 + sectlen]
block1 = buffer[sect1:sect1 + sectlen]
block2 = buffer[sect2:sect2 + sectlen]
block3 = buffer[sect3:sect3 + sectlen]
buffer = buffer.release()
# normalize pixel buffer into data blocks
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
# val = unpack_from('<I', pixels, src)
# b = abstable[pixels[xy ]]
# v3 = ((b ) & 0x3)
# v2 = ((b >> 2) & 0x3)
# v1 = ((b >> 4) & 0x3)
# v0 = ((b >> 6) & 0x3)
# b = abstable[pixels[xy + 1]]
# v3 |= ((b << 2) & 0xc)
# v2 |= ((b ) & 0xc)
# v1 |= ((b >> 2) & 0xc)
# v0 |= ((b >> 4) & 0xc)
# b = abstable[pixels[xy + 2]]
# v3 |= ((b << 4) & 0x30)
# v2 |= ((b << 2) & 0x30)
# v1 |= ((b ) & 0x30)
# v0 |= ((b >> 2) & 0x30)
# b = abstable[pixels[xy + 3]]
# v3 |= ((b << 6) & 0xc0)
# v2 |= ((b << 4) & 0xc0)
# v1 |= ((b << 2) & 0xc0)
# v0 |= ((b ) & 0xc0)
# v0 = v1 = v2 = v3 = 0
#m = 0x3
for j in range(0, 8, 2):
#for j in range(0, 8, 2): # section mask to byte
#m = 0x3 << j
b = abstable[pixels[xy]]
xy += 1
v3 |= ((b ) & 0x3) << j
v2 |= ((b >> 2) & 0x3) << j
v1 |= ((b >> 4) & 0x3) << j
v0 |= ((b >> 6) & 0x3) << j
#m <<= 2
# b = ((((pixels[src ] >> k) & 0x3)) |
# (((pixels[src + 1] >> k) & 0x3) << 2) |
# (((pixels[src + 2] >> k) & 0x3) << 4) |
# (((pixels[src + 3] >> k) & 0x3) << 6))
# idx |= (((pixels[src] >> k) & 0x3) << j)
# src += 1
block3[i] = v3
block2[i] = v2
block1[i] = v1
block0[i] = v0
#blocks[i] = idx & 0xff
i += 1
# val = (table0[block0[i]] | table1[block1[i]] |
# table2[block2[i]] | table3[block3[i]]))
# pixels[yx ] = invtable[(val ) & 0xff]
# pixels[yx + 1] = invtable[(val >> 8) & 0xff]
# pixels[yx + 2] = invtable[(val >> 16) & 0xff]
# pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
finally:
# if isinstance(pixels, memoryview):
# pixels.release()
# if isinstance(buffer, memoryview):
# buffer.release()
for memitem in (block0, block1, block2, block3, buffer, pixels, abstable): #blocks:
if isinstance(memitem, memoryview):
memitem.release()
# first loop through the entire delta slice and perform absolute transform
for i in range(bufstart, bufend, 1):
delta[i] = abstable[delta[i]]
# Iterate through each section one at a time, each pass
# through delta encodes a different mask (section/block) of bytes
i = 0
# Section masks: [0xc0c0c0c0, 0x30303030, 0x0c0c0c0c, 0x03030303]
for k in range(6, -1, -2): # current section
src = bufstart
for i in range(sectlen): # section iteration
idx = 0
val = unpack_from('<I', delta, src)
b0 = b1 = b2 = b3 = 0
m = 0x3
for j in range(0, 8, 2): # section mask to byte
m = 0x3 << j
b = abstable[delta[]
b3 |= (b ) & m
b >>= 2
b2 |= (b >> 2) & m
b >>= 2
b1 |= (b >> 4) & m
b >>= 2
b0 |= (b >> 6) & m
m <<= 2
b = ((((delta[src ] >> k) & 0x3)) |
(((delta[src + 1] >> k) & 0x3) << 2) |
(((delta[src + 2] >> k) & 0x3) << 4) |
(((delta[src + 3] >> k) & 0x3) << 6))
idx |= (((delta[src] >> k) & 0x3) << j)
src += 1
blocks[i] = idx & 0xff
dst += 1
return blocks
def unpack_data(data:bytes, cmd:bytes) -> bytearray:
buffer = None
try:
data = memoryview(zlib.decompress(data))
cmd = memoryview(zlib.decompress(cmd))
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = memoryview(bytearray(buflen))
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return _buffer # underlying bytearray
finally:
# if isinstance(data, memoryview):
# data.release()
# if isinstance(cmd, memoryview):
# cmd.release()
# if isinstance(buffer, memoryview):
# buffer.release()
for memitem in (data, cmd, buffer):
if isinstance(memitem, memoryview):
memitem.release()
def unpack_into_pixels(buffer:bytes, pixels:bytearray, sliceidx:int, slicelen:int, width:int, height:int, bpp:int, bpc:int, offsetx:int, offsety:int, fullwidth:int, fullheight:int, flip:bool):
bytedepth = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
fullstride = (fullwidth * bytedepth + 3) & ~0x3
colstart = offsetx
colend = offsetx + width
rowstart = offsety + sliceidx
rowend = offsety + sliceidx + slicelen
fullstart = rowstart * fullstride + colstart * bytedepth
fullend = rowend * fullstride + colend * bytedepth
del colstart, colend, rowstart, rowend
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# buflen = len(buffer)
# sectlen = buflen // 4
sectlen = len(buffer) // 4
# sect0, sect1, sect2, sect3 = sects = range(0, sectlen * 4, sectlen)
invtable = make_abs_tables()[1] # inv abstable
table0, table1, table2, table3 = make_weight_tables()
#block0, block1, block2, block3 = blocks = [None] * 4
block0 = block1 = block2 = block3 = None
try:
buffer = memoryview(buffer)
# for i, sect in enumerate(range(0, sectlen * 4, sectlen)):
# blocks[i] = buffer[sect:sect + sectlent]
# block0, block1, block2, block3 = blocks
block0 = buffer[sect0:sect0 + sectlen]
block1 = buffer[sect1:sect1 + sectlen]
block2 = buffer[sect2:sect2 + sectlen]
block3 = buffer[sect3:sect3 + sectlen]
buffer = buffer.release()
# inverse normalize data blocks into pixel buffer
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
pixels[yx ] = invtable[(val ) & 0xff]
pixels[yx + 1] = invtable[(val >> 8) & 0xff]
pixels[yx + 2] = invtable[(val >> 16) & 0xff]
pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
finally:
# if isinstance(pixels, memoryview):
# pixels.release()
# if isinstance(buffer, memoryview):
# buffer.release()
for memitem in (block0, block1, block2, block3, buffer, pixels): #blocks:
if isinstance(memitem, memoryview):
memitem.release()
# block0, block1, block2, block3 = tuple(buffer[i:i + sectlen] for i in range(0, buflen, sectlen))
# buflen = len(buffer)
# sectlen = buflen // 4
# sect0, sect1, sect2, sect3 = range(slicestart, slicestart + sectlen * 4, sectlen)
# block0, block1, block2, block3 = blocks = tuple(buffer[i:i + sectlen] for i in range(0, buflen, sectlen))
# if not flip:
# normyrange = range(fullstart, fullend, fullstride)
# deltayrange = range(fullstart + stride, fullend, fullstride)
# deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
# else:
# normyrange = range(fullend - stride, fullstart - 1, fullstride)
# deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
# deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# # inverse normalize data blocks into pixel buffer
# i = 0
# for y0 in normyrange:
# for yx in range(y0, y0 + stride, 4):
# val = (table0[block0[i]] | table1[block1[i]] |
# table2[block2[i]] | table3[block3[i]]))
# i += 1
# pixels[yx ] = invtable[(val ) & 0xff]
# pixels[yx + 1] = invtable[(val >> 8) & 0xff]
# pixels[yx + 2] = invtable[(val >> 16) & 0xff]
# pixels[yx + 3] = invtable[(val >> 24) ]
# # undelta RGB(A) channels of each previous pixel in first row
# for x0 in deltaxrange:
# pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# # undelta RGB(A) channels of each previous stride in all but first row
# for y0 in deltayrange:
# for yx in range(y0, y0 + stride, 1)):
# pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
def unpack_data(self, data:bytes, cmd:bytes) -> bytearray:
data = zlib.decompress(data)
cmd = zlib.decompress(cmd)
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
sectlen = buflen // 4
blocks = tuple(bytes(sectlen) for _ in range(4))
buffer = bytearray(buflen) # already filled with zero-bytes
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return buffer
def unpack_slice(self, hgslice:HgSlice, hgdata:HgData, pixels:bytearray, stdinfo:StandardInfo, options:HgOptions=HgOptions(0)):
stride = stdinfo.stride
bytedepth = stdinfo.bytedepth
channeldepth = stdinfo.channeldepth
width = stdinfo.width
height = stdinfo.height
canvasx = stdinfo.canvasx
canvasy = stdinfo.canvasy
canvaswidth = stdinfo.canvaswidth
canvasheight = stdinfo.canvasheight
#
#
data = hgdata.data #TODO: zlib.decompress()
cmd = hgdata.cmd #TODO: zlib.decompress()
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = bytearray(buflen) # already filled with zero-bytes
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return buffer
table0, table1, table2, table3 = tables = make_weight_tables()
invtable = make_abs_tables()[1] # inv abstable
pixlen = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
stridefull = (canvaswidth * pixlen + 3) & ~0x3
stridestart = (canvasx * pixlen + 3) & ~0x3
#
slicestride = stride
slicestart = hgslice.index * stridefull + stridestart
sliceend = (hgslice.index + hgslice.length) * stridefull + stridestart
sliceheight = hgslice.length
#
sliceidx = hgslice.index
slicelen = hgslice.len
bytedepth = bpp // 8 # only bpp 24,32 not supported by CS2
stride = (width * pixlen + 3) & ~0x3
fullstride = (canvaswidth * bytedepth + 3) & ~0x3
colstart = canvasx
colend = canvasx + width
rowstart = canvasy + sliceidx
rowend = canvasy + sliceidx + slicelen
fullstart = rowstart * fullstride + colstart * bytedepth
fullend = rowend * fullstride + colend * bytedepth
outstart = (canvasy + sliceidx) * stridefull + canvasx * pixlen
outend = outstart + slicelen * stridefull + stride
canvasidx
#
sectlen = buflen // 4
block0, block1, block2, block3 = blocks = tuple(buffer[i:i + sectlen] for i in range(0, buflen, sectlen))
# bytedepth = (bpp + 7) // 8
# stride = ((width * bpp + 7) // 8 + 3) & ~0x3
# stridefull = ((canvaswidth * bpp + 7) // 8 + 3) & ~0x3
# stridestart = ((canvasx * bpp + 7) // 8 + 3) & ~0x3
# strideend = stridestart + stride
# #
# slicestart = hgslice.index * stride
# sliceend = (hgslice.index + hgslice.length) * stride
# #
# sectlen = buflen // 4
# sect0, sect1, sect2, sect3 = range(slicestart, slicestart + sectlen * 4, sectlen)
u4_s = struct.Struct('<I')
stride = ((canvasx * bpp + 7) // 8 + 3) & ~0x3
fullstride = ((canvasx * bpp + 7) // 8 + 3) & ~0x3
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
#bufend = (hgslice.index + hgslice.length) * stride
sectlen = hgslice.length * stride // 4
#sect0, sect1, sect2, sect3 = range(bufstart, bufstart + sectlen * 4, sectlen)
sect0, sect1, sect2, sect3 = range(0, sectlen * 4, sectlen)
delta = bytearray(sectlen * 4)
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# inverse normalize data blocks into pixel buffer
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
pixels[yx ] = invtable[(val ) & 0xff]
pixels[yx + 1] = invtable[(val >> 8) & 0xff]
pixels[yx + 2] = invtable[(val >> 16) & 0xff]
pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
u4_1 = struct.Struct('<I')
u1_packin = u4_1.pack_into
bi = 0
yrange = range(fullstart, fullend, fullstride)
if flip: yrange = reversed(yrange)
for yj in (reversed(yrange) if flip else yrange): #range(slicestart, sliceend, stridefull):
for xj in range(yj, yj + stride, 4):
u1_packin(pixels, xj, (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
bi += 1
# undelta RGB(A) channels of each previous pixel in first row
for x1, x0 in zip(range(bufstart + bytedepth, bufstart + stride, 1),
range(bufstart, bufstart + stride - bytedepth, 1)):
delta[x1] = (delta[x1] + delta[x0]) & 0xff
yranges (range(fullstart + stride, fullend, fullstride),
range(fullstart, fullend - stride, fullstride)):
if flip: = tuple(reversed(yr) for yr in yranges)
yranges = (range(fullstart + bytedepth, fullstart + stride, fullstride),
range(fullstart, fullstart + stride - bytedepth, fullstride))
if flip: = tuple(reversed(yr) for yr in yranges)
yrange1 = range(bufstart + bytedepth, bufstart + stride, 1)
yrange0 = range(bufstart, bufstart + stride - bytedepth, 1)
if flip: yrange1, yrange0 = reversed(yrange1), reversed(yrange0)
yrange = zip(yrange1, yrange0)
# undelta RGB(A) channels of each previous stride in all but first row
for y1, y0 in zip(range(bufstart + stride, bufstart, 1),
range(bufstart, bufstart - stride, 1)):
delta[y1] = (delta[y1] + delta[y0]) & 0xff
# if flip:
# yrange = range(sliceend - 1, slicestart - 1, -stridefull)
# else:
# yrange = range(slicestart, sliceend, stridefull)
# if flip: yrange = reversed(yrange)
i = 0
yrange = range(slicestart, sliceend, stridefull)
for yj in (reversed(yrange) if flip else yrange): #range(slicestart, sliceend, stridefull):
for xj in range(yj, yj + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
delta[j ] = invtable[(val ) & 0xff]
delta[j + 1] = invtable[(val >> 8) & 0xff]
delta[j + 2] = invtable[(val >> 16) & 0xff]
delta[j + 3] = invtable[(val >> 24) ]
u1_packin(pixels, xj, (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
xranges = (range(bufstart + bytedepth, bufstart + stride, 1),
range(bufstart, bufstart + stride - bytedepth, 1))
if not flip:
normyrange = range(fullstart, fullend, fullstride)
deltayrange = range(fullstart + stride, fullend, fullstride)
deltaxrange = range(fullstart + bytedepth, fullstart + stride, 1)
else:
normyrange = range(fullend - stride, fullstart - 1, fullstride)
deltayrange = range(fullend - stride - stride, fullstart - 1, -fullstride)
deltaxrange = range(fullend - stride + bytedepth, fullend, 1)
# inverse normalize data blocks into pixel buffer
i = 0
for y0 in normyrange:
for yx in range(y0, y0 + stride, 4):
val = (table0[block0[i]] | table1[block1[i]] |
table2[block2[i]] | table3[block3[i]]))
i += 1
pixels[yx ] = invtable[(val ) & 0xff]
pixels[yx + 1] = invtable[(val >> 8) & 0xff]
pixels[yx + 2] = invtable[(val >> 16) & 0xff]
pixels[yx + 3] = invtable[(val >> 24) ]
# undelta RGB(A) channels of each previous pixel in first row
for x0 in deltaxrange:
pixels[x0] = (pixels[x0] + pixels[x0 - bytedepth]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for y0 in deltayrange:
for yx in range(y0, y0 + stride, 1)):
pixels[yx] = (pixels[yx] + pixels[yx - fullstride]) & 0xff
#
### INITIALIZATION TABLES ###
#
#TODO: test import array.array type
def make_weight_tables() -> Tuple[List[int], List[int], List[int], List[int]]:
"""make_weight_tables() -> weight_tables[4][256]
NOTE: These tables contain unsigned integers
>>> make_weight_tables()
([0x0, 0x40, 0x80, 0xc0, 0x4000, 0x4040, 0x4080, ...0xc0c0c080, 0xc0c0c0c0],
[0x0, 0x10, 0x20, 0x30, 0x1000, 0x1010, 0x1020, ...0x30303020, 0x30303030],
[0x0, 0x04, 0x08, 0x0c, 0x0400, 0x0404, 0x0408, ...0x0c0c0c08, 0x0c0c0c0c],
[0x0, 0x01, 0x02, 0x03, 0x0100, 0x0101, 0x0102, ...0x03030302, 0x03030303])
"""
weighttables = tuple([0] * 0x100 for _ in range(4)) # weighttables[4][256]
for i in range(0x100):
val = (((i & 0xc0) << 6 | (i & 0x30)) << 6 | (i & 0xc)) << 6 | (i & 0x3)
#
weighttables[3][i] = val
weighttables[2][i] = val << 2
weighttables[1][i] = val << 4
weighttables[0][i] = val << 6
#
# convert to tuples for performance
return tuple(tuple(t) for t in weighttables)
#TODO: test import array.array type/bytes type
#TODO: Would returning tables as (bytes, bytes) be better for efficiency?
def make_abs_tables() -> Tuple[List[int], List[int]]:
"""make_abs_tables() -> (abstable, inv_abstable)
NOTE: These tables contain unsigned integers
>>> make_abs_tables()
# signed representation (however result is unsigned)
([0, 2, 4, 6, 8, 10, 11, ...5, 3, 1],
[0, -1, 1, -2, 2, -3, 3, ...-127, 127, -128])
"""
abstable = [0] * 0x100
inv_abstable = [0] * 0x100
#
for i, j in zip(range(0x80), range(0, 0x100, 2)): # for(i=0,j=0; i<128; i++,j+=2)
abstable[ i] = j
abstable[0xff-i] = j + 1 # ~i
#
inv_abstable[j ] = i
inv_abstable[j + 1] = 0xff-i # ~i
#
# convert to tuples for performance
return (tuple(abstable), tuple(inv_abstable))
## BIT BUFFER / ELIAS GAMMA CODING ##
class BitBuffer(object):
"""BitBuffer(bytes) -> readable bitbuffer
BitBuffer(bytearray) -> writable bitbuffer
Returns a bitbuffer class for reading and writing individual bits and
positive integers in elias gamma coding.
NOTE: no bounds checking is performed
---
attr : b:bytes, i:next, k:next
order : check k, inc k, read k-1
get_bit : return int [0,1]
get_elias_gamma : inline get_bit()
conditional value |= bit
no __repr__
short local var names
check bit >= 8
local attrs : i, k, b
"""
__slots__ = ('b', 'i', 'k') # buffer, byte index, bit index
def __init__(self, b:bytes):
self.b = self.i = self.k = 0 # buffer, byte index, bit index
# this._bit = 8; // set to check for EOF on next bit
# this._index = index - 1; // Incremented on next bit
# this._remaining = length + 1; // Decremented on next bit
#
# READ BITBUFFER:
#
def read_flag(self) -> bool:
"""Bb.read_flag() -> bool
Reads the next boolean from the bitbuffer as a single bit.
"""
k = self.k # bit index
if k >= 8:
k = 0
self.i += 1
self.k = k + 1
return bool((self.b[self.i] >> k) & 0x1)
#return (self.b[self.i] >> (k )) & 0x1
#
def read_eliasgamma(self) -> int:
"""Bb.read_eliasgamma() -> positive integer
Reads the next positive integer from the bitbuffer in elias gamma coding.
"""
b = self.b # buffer
i = self.i # byte index
k = self.k # bit index
if k >= 8: # incr bit [0:1]
k = 0
i += 1
k += 1
d = 0 # digits
while not ((b[i] >> (k - 1)) & 0x1): # read bit [0:d+1]
d += 1
if k >= 8: # incr bit [1:d+1]
k = 0
i += 1
k += 1
v = 1 << d # value
while d:
d -= 1
if k >= 8: # incr bit [d+1:d*2+1]
k = 0
i += 1
k += 1
if (b[i] >> (k - 1)) & 0x1: # read bit [d+1:d*2+1]
v |= (1 << d)
self.i = i
self.k = k
return v
#
# WRITE BITBUFFER:
#
def write_flag(self, f:bool) -> int:
"""Bb.write_flag(flag) -> integer bits written (always 1)
Writes a boolean to the bitbuffer as a single bit.
"""
k = self.k # bit index
if k >= 8:
k = 0
self.i += 1
# assume buffer is initialized with zero-bytes
self.k = k + 1
if f:
self.b[self.i] |= (1 << k)
#
def write_eliasgamma(self, v:int) -> int:
"""Bb.write_eliasgamma(integer) -> integer bits written
Writes a positive integer to the bitbuffer in elias gamma coding.
"""
if v <= 0:
raise ValueError('Elias gamma coded integer must be positive, not {0!r}'.format(v))
b = self.b # buffer
i = self.i # byte index
k = self.k # bit index
d = 0 # digits
while v >> (d + 1):
d += 1
if k >= 8: # incr bit [0:d]
k = 0
i += 1
k += 1
#b[i] |= (0 << (k - 1) # skip bit [0:d] (false)
if k >= 8: # incr bit [d:d+1]
k = 0
i += 1
k += 1
b[i] |= (1 << (k - 1)) # write bit [d:d+1] (true)
v = 1 << d # value
while d:
d -= 1
if k >= 8: # incr bit [d+1:d*2+1]
k = 0
i += 1
k += 1
if (v >> d) & 0x1: # write bit [d+1:d*2+1] (if true)
b[i] |= 1 << (k - 1):
self.i = i
self.k = k
return v
def sizeof_eliasgamma(v:int) -> int:
"""sizeof_eliasgamma(value) -> integer bit length
Measures the bit length of a positive integer in elias gamma coding.
"""
if v <= 0:
raise ValueError('Elias gamma coded integer must be positive, not {0!r}'.format(v))
d = 0 # digits
while v >> (d + 1):
d += 1
# 1 bit minimum plus 2 bits per digit
return d * 2 + 1
## ZERO RUN-LENGTH CODING ##
# --- format ---
# data bytes:
# [<non-zero run 0:bytes>, <non-zero run 1:bytes>, ...<non-zero run m-1:bytes>]
# cmd bits:
# <copyflag:bit>, <buffer length:eliasgamma>,
# [<run length 0:eliasgamma>, <run length 1:eliasgamma>, ...<run length n-1:eliasgamma>]
def encode_zrle(hgslice:HgSlice, buffer:bytes) -> HgSlice:
"""encode_zrle(hgslice, data bytes) -> HgSlice(index, length, data bytearray, cmd bytearray)
"""
buflen = len(buffer)
## STEP 1 MEASURE: measure length and offset of all runs ##
# (to allocate correct buffer sizes the first time)
datalen = 0
cmdbitlen = 1 # 1 bit for consumed copyflag
cmdbitlen += sizeof_eliasgamma(buflen)
runs = [] # includes zero and non-zero runs
copyflag = bool(buffer[0]) # is first run non-zero data?
off = 0
while off < buflen:
runlen = 1 # starts with the first non-conforming byte reached last run
if copyflag:
# length of non-zero run
while off + runlen < buflen and buffer[off + runlen]:
runlen += 1
datalen += runlen
else:
# length of zero run
while off + runlen < buflen and not buffer[off + runlen]:
runlen += 1
#
runs.append(runlen)
cmdbitlen += sizeof_eliasgamma(runlen)
off += runlen
copyflag = not copyflag
## STEP 2 BUILD: non-zero data runs buffer, cmd bits buffer ##
data = bytearray(datalen) # already filled with zero-bytes
cmd = bytearray((cmdbitlen + 7) // 8)
copyflag = bool(buffer[0])
bitbuf = BitBuffer(cmd, len(cmd))
bitbuf.write_flag(copyflag)
bitbuf.write_eliasgamma(length)
off = dataoff = 0
for runlen in runs:
if copyflag:
data[dataoff:dataoff + runlen] = buffer[off:off + runlen]
dataoff += runlen
bitbuf.write_eliasgamma(runlen)
off += runlen
copyflag = not copyflag
return (data, cmd)
databuf, cmdbuf = bytearray(), bytearray()
deltalen = len(deltabuf)
copyflag = (deltabuf[0] != 0)
#
# cmd = BitBuffer(cmdbuf)
# #
# cmd.set_bit(copyflag)
# cmd.set_elias_gamma(deltalen)
firstcopy = copyflag
runs = []
cmdlen = 0
datalen = 0
#
i = 0
while i < deltalen:
n = 1
if copyflag: # copy fill
while i+n < deltalen and deltabuf[i+n] != 0:
n += 1
#
#databuf.extend(deltabuf[i:i+n])
datalen += n
#
else: # zero fill
while i+n < deltalen and deltabuf[i+n] == 0:
n += 1
#
#cmd.set_elias_gamma(n)
cmdlen += elias_gamma_size(n)
runs.append(n)
#
i += n
copyflag = not copyflag
#
cmdlen += elias_gamma_size(datalen)
cmdlen += 1 # copyflag bit
#
return ((datalen, cmdlen // 8), copyflag, runs)
return databuf, cmdbuf
def decode_zrle(hgslice:HgSlice, data:bytes, cmd:bytes) -> bytearray:
"""decode_zrle(hgslice, data bytes, cmd bytes) -> buffer bytearray
"""
bifbuf = BitBuffer(cmd, len(cmd))
copyflag = bitbuf.read_flag() # is first run non-zero data?
buflen = bitbuf.read_eliasgamma() # length of output buffer (usually height x stride)
buffer = bytearray(buflen) # already filled with zero-bytes
off = dataoff = 0
while off < buflen:
runlen = bitbuf.read_eliasgamma()
#
if copyflag:
# Copy non-zero data into ouput buffer
buffer[off:off + runlen] = data[dataoff:dataoff + runlen]
dataoff += runlen
#else skip zero bytes, buffer already filled with zero-bytes
off += runlen
copyflag = not copyflag
return buffer
## ENCODE/DECODE BLOCKS ##
def encode_blocks(hgslice:HgSlice, delta:bytes, width:int, height:int, bpp:int) -> bytearray:
"""encode_blocks(hgslice, delta bytes, width, height, bpp) -> blocks bytearray
"""
abstable = make_abs_tables()[0] # fwd abstable
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
sectlen = hgslice.length * stride // 4
blocks = bytearray(sectlen * 4)
# first loop through the entire delta slice and perform absolute transform
for i in range(bufstart, bufend, 1):
delta[i] = abstable[delta[i]]
# Iterate through each section one at a time, each pass
# through delta encodes a different mask (section/block) of bytes
dst = 0
# Section masks: [0xc0c0c0c0, 0x30303030, 0x0c0c0c0c, 0x03030303]
for k in range(6, -1, -2): # current section
src = bufstart
for i in range(sectlen): # section iteration
val = 0
for j in range(0, 8, 2): # section mask to byte
val |= (((delta[src] >> k) & 0x3) << j)
src += 1
blocks[dst] = val & 0xff
dst += 1
return blocks
def decode_blocks(hgslice:HgSlice, blocks:bytes, width:int, height:int, bpp:int) -> bytearray:
"""decode_blocks(hgslice, blocks bytes, width, height, bpp) -> delta bytearray
"""
table0, table1, table2, table3 = make_weight_tables()
invtable = make_abs_tables()[1] # inv abstable
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
#bufend = (hgslice.index + hgslice.length) * stride
sectlen = hgslice.length * stride // 4
#sect0, sect1, sect2, sect3 = range(bufstart, bufstart + sectlen * 4, sectlen)
sect0, sect1, sect2, sect3 = range(0, sectlen * 4, sectlen)
delta = bytearray(sectlen * 4)
for i, j in zip(range(sectlen), range(0, sectlen * 4, 4)):
val = (table0[blocks[sect0 + i]] | table1[blocks[sect1 + i]] |
table2[blocks[sect2 + i]] | table3[blocks[sect3 + i]])
delta[j ] = invtable[(val ) & 0xff]
delta[j + 1] = invtable[(val >> 8) & 0xff]
delta[j + 2] = invtable[(val >> 16) & 0xff]
delta[j + 3] = invtable[(val >> 24) ]
return delta
## ENCODE/DECODE DELTA ##
def encode_delta(hgslice:HgSlice, pixels:bytes, width:int, height:int, bpp:int) -> bytearray:
"""encode_delta(hgslice, pixels bytes, width, height, bpp) -> delta bytearray
"""
bytedepth = (bpp + 7) // 8
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
delta = bytearray(pixels)
# delta RGB(A) channels of each previous stride in all but first row
for yi, yj in zip(range(bufend - 1, bufstart + stride - 1, -1),
range(bufend - stride - 1, bufstart - 1, -1)):
delta[yi] = (delta[yi] - delta[yj]) & 0xff
# delta RGB(A) channels of each previous pixel in first row
for xi, xj in zip(range(bufstart + stride - 1, bufstart + bytedepth - 1, -1),
range(bufstart + stride - bytedepth - 1, bufstart - 1, -1)):
delta[xi] = (delta[xi] - delta[xj]) & 0xff
return delta
def decode_delta(hgslice:HgSlice, delta:bytes, width:int, height:int, bpp:int) -> bytearray:
"""decode_delta(hgslice, delta bytes, width, height, bpp) -> pixels bytearray
"""
bytedepth = (bpp + 7) // 8
stride = ((width * bpp + 7) // 8 + 3) & ~0x3
bufstart = hgslice.index * stride
bufend = (hgslice.index + hgslice.length) * stride
# undelta RGB(A) channels of each previous pixel in first row
for xi, xj in zip(range(bufstart + bytedepth, bufend + stride, 1),
range(bufstart, bufend + stride - bytedepth, 1)):
delta[xi] = (delta[xi] + delta[xj]) & 0xff
# undelta RGB(A) channels of each previous stride in all but first row
for yi, yj in zip(range(bufstart + stride, bufstart, 1),
range(bufstart, bufstart - stride, 1)):
delta[yi] = (delta[yi] + delta[yj]) & 0xff
pixels = bytearray(delta)
return pixels
## ENCODE/DECODE SLICES ##
def encode_slice(hgslice:HgSlice, pixels:bytes, width:int, height:int, bpp:int) -> HgSlice:
"""encode_slice(hgslice, pixels bytes, width, height, bpp) -> HgData(data bytearray, cmd bytearray)
"""
delta = encode_delta(hgslice, pixels, width, height, bpp)
blocks = encode_blocks(hgslice, width, height, bpp)
data, cmd = encode_zrle(hgslice, blocks)
return HgData(data, cmd)
def decode_slice(hgslice:HgSlice, hgdata:HgData, width:int, height:int, bpp:int) -> bytearray:
"""decode_slice(hgslice, HgData(data bytes, cmd bytes), width, height, bpp) -> pixels bytearray
"""
data, cmd = decode_zrle(hgslice, hgdata.data, hgdata.cmd)
## CLEANUP ##
del enum, collections # only used during type declarations
del Iterable, Iterator, List, Optional, NoReturn, Tuple, Type, Union # only used during declarations
| 2.34375 | 2 |
tests/Usage.py | astronalta/gamepython | 0 | 12762954 | <filename>tests/Usage.py<gh_stars>0
import os
os.system(os.path.join("..", "bin", "jogo") + " --help")
os.system(os.path.join("..", "bin", "jogo") + " -h")
| 1.632813 | 2 |
frille-lang/lib/python3.6/site-packages/typer/completion.py | frillecode/CDS-spring-2021-language | 1 | 12762955 | import os
import re
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import Any, Optional, Tuple
import click
import click._bashcomplete
from .models import ParamMeta
from .params import Option
from .utils import get_params_from_function
try:
import shellingham
except ImportError: # pragma: nocover
shellingham = None
_click_patched = False
def get_completion_inspect_parameters() -> Tuple[ParamMeta, ParamMeta]:
completion_init()
test_disable_detection = os.getenv("_TYPER_COMPLETE_TEST_DISABLE_SHELL_DETECTION")
if shellingham and not test_disable_detection:
parameters = get_params_from_function(_install_completion_placeholder_function)
else:
parameters = get_params_from_function(
_install_completion_no_auto_placeholder_function
)
install_param, show_param = parameters.values()
return install_param, show_param
def install_callback(ctx: click.Context, param: click.Parameter, value: Any) -> Any:
if not value or ctx.resilient_parsing:
return value # pragma no cover
if isinstance(value, str):
shell, path = install(shell=value)
else:
shell, path = install()
click.secho(f"{shell} completion installed in {path}", fg="green")
click.echo("Completion will take effect once you restart the terminal")
sys.exit(0)
def show_callback(ctx: click.Context, param: click.Parameter, value: Any) -> Any:
if not value or ctx.resilient_parsing:
return value # pragma no cover
prog_name = ctx.find_root().info_name
assert prog_name
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
if isinstance(value, str):
shell = value
elif shellingham:
shell, _ = shellingham.detect_shell()
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
click.echo(script_content)
sys.exit(0)
class Shells(str, Enum):
bash = "bash"
zsh = "zsh"
fish = "fish"
powershell = "powershell"
pwsh = "pwsh"
# Create a fake command function to extract the completion parameters
def _install_completion_placeholder_function(
install_completion: bool = Option(
None,
"--install-completion",
is_flag=True,
callback=install_callback,
expose_value=False,
help="Install completion for the current shell.",
),
show_completion: bool = Option(
None,
"--show-completion",
is_flag=True,
callback=show_callback,
expose_value=False,
help="Show completion for the current shell, to copy it or customize the installation.",
),
) -> Any:
pass # pragma no cover
def _install_completion_no_auto_placeholder_function(
install_completion: Shells = Option(
None,
callback=install_callback,
expose_value=False,
help="Install completion for the specified shell.",
),
show_completion: Shells = Option(
None,
callback=show_callback,
expose_value=False,
help="Show completion for the specified shell, to copy it or customize the installation.",
),
) -> Any:
pass # pragma no cover
COMPLETION_SCRIPT_BASH = """
%(complete_func)s() {
local IFS=$'\n'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
%(autocomplete_var)s=complete_bash $1 ) )
return 0
}
complete -o default -F %(complete_func)s %(prog_name)s
"""
COMPLETION_SCRIPT_ZSH = """
#compdef %(prog_name)s
%(complete_func)s() {
eval $(env _TYPER_COMPLETE_ARGS="${words[1,$CURRENT]}" %(autocomplete_var)s=complete_zsh %(prog_name)s)
}
compdef %(complete_func)s %(prog_name)s
"""
COMPLETION_SCRIPT_FISH = 'complete --command %(prog_name)s --no-files --arguments "(env %(autocomplete_var)s=complete_fish _TYPER_COMPLETE_FISH_ACTION=get-args _TYPER_COMPLETE_ARGS=(commandline -cp) %(prog_name)s)" --condition "env %(autocomplete_var)s=complete_fish _TYPER_COMPLETE_FISH_ACTION=is-args _TYPER_COMPLETE_ARGS=(commandline -cp) %(prog_name)s"'
COMPLETION_SCRIPT_POWER_SHELL = """
Import-Module PSReadLine
Set-PSReadLineKeyHandler -Chord Tab -Function MenuComplete
$scriptblock = {
param($wordToComplete, $commandAst, $cursorPosition)
$Env:%(autocomplete_var)s = "complete_powershell"
$Env:_TYPER_COMPLETE_ARGS = $commandAst.ToString()
$Env:_TYPER_COMPLETE_WORD_TO_COMPLETE = $wordToComplete
%(prog_name)s | ForEach-Object {
$commandArray = $_ -Split ":::"
$command = $commandArray[0]
$helpString = $commandArray[1]
[System.Management.Automation.CompletionResult]::new(
$command, $command, 'ParameterValue', $helpString)
}
$Env:%(autocomplete_var)s = ""
$Env:_TYPER_COMPLETE_ARGS = ""
$Env:_TYPER_COMPLETE_WORD_TO_COMPLETE = ""
}
Register-ArgumentCompleter -Native -CommandName %(prog_name)s -ScriptBlock $scriptblock
"""
def install(
shell: Optional[str] = None,
prog_name: Optional[str] = None,
complete_var: Optional[str] = None,
) -> Tuple[str, Path]:
prog_name = prog_name or click.get_current_context().find_root().info_name
assert prog_name
if complete_var is None:
complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
if shell is None and shellingham is not None:
shell, _ = shellingham.detect_shell()
if shell == "bash":
installed_path = install_bash(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell == "zsh":
installed_path = install_zsh(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell == "fish":
installed_path = install_fish(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
elif shell in {"powershell", "pwsh"}:
installed_path = install_powershell(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
return shell, installed_path
else:
click.echo(f"Shell {shell} is not supported.")
raise click.exceptions.Exit(1)
def install_bash(*, prog_name: str, complete_var: str, shell: str) -> Path:
# Ref: https://github.com/scop/bash-completion#faq
# It seems bash-completion is the official completion system for bash:
# Ref: https://www.gnu.org/software/bash/manual/html_node/A-Programmable-Completion-Example.html
# But installing in the locations from the docs doesn't seem to have effect
completion_path = Path.home() / f".bash_completions/{prog_name}.sh"
rc_path = Path.home() / ".bashrc"
rc_path.parent.mkdir(parents=True, exist_ok=True)
rc_content = ""
if rc_path.is_file():
rc_content = rc_path.read_text()
completion_init_lines = [f"source {completion_path}"]
for line in completion_init_lines:
if line not in rc_content: # pragma: nocover
rc_content += f"\n{line}"
rc_content += "\n"
rc_path.write_text(rc_content)
# Install completion
completion_path.parent.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
completion_path.write_text(script_content)
return completion_path
def install_zsh(*, prog_name: str, complete_var: str, shell: str) -> Path:
# Setup Zsh and load ~/.zfunc
zshrc_path = Path.home() / ".zshrc"
zshrc_path.parent.mkdir(parents=True, exist_ok=True)
zshrc_content = ""
if zshrc_path.is_file():
zshrc_content = zshrc_path.read_text()
completion_init_lines = [
"autoload -Uz compinit",
"compinit",
"zstyle ':completion:*' menu select",
"fpath+=~/.zfunc",
]
for line in completion_init_lines:
if line not in zshrc_content: # pragma: nocover
zshrc_content += f"\n{line}"
zshrc_content += "\n"
zshrc_path.write_text(zshrc_content)
# Install completion under ~/.zfunc/
path_obj = Path.home() / f".zfunc/_{prog_name}"
path_obj.parent.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
path_obj.write_text(script_content)
return path_obj
def install_fish(*, prog_name: str, complete_var: str, shell: str) -> Path:
path_obj = Path.home() / f".config/fish/completions/{prog_name}.fish"
parent_dir: Path = path_obj.parent
parent_dir.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
path_obj.write_text(f"{script_content}\n")
return path_obj
def install_powershell(*, prog_name: str, complete_var: str, shell: str) -> Path:
subprocess.run(
[
shell,
"-Command",
"Set-ExecutionPolicy",
"Unrestricted",
"-Scope",
"CurrentUser",
]
)
result = subprocess.run(
[shell, "-NoProfile", "-Command", "echo", "$profile"],
check=True,
stdout=subprocess.PIPE,
)
if result.returncode != 0: # pragma: nocover
click.echo("Couldn't get PowerShell user profile", err=True)
raise click.exceptions.Exit(result.returncode)
path_str = ""
if isinstance(result.stdout, str): # pragma: nocover
path_str = result.stdout
if isinstance(result.stdout, bytes):
try:
# PowerShell would be predominant in Windows
path_str = result.stdout.decode("windows-1252")
except UnicodeDecodeError: # pragma: nocover
try:
path_str = result.stdout.decode("utf8")
except UnicodeDecodeError:
click.echo("Couldn't decode the path automatically", err=True)
raise click.exceptions.Exit(1)
path_obj = Path(path_str.strip())
parent_dir: Path = path_obj.parent
parent_dir.mkdir(parents=True, exist_ok=True)
script_content = get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
with path_obj.open(mode="a") as f:
f.write(f"{script_content}\n")
return path_obj
def do_bash_complete(cli: click.Command, prog_name: str) -> bool:
cwords = click.parser.split_arg_string(os.getenv("COMP_WORDS", ""))
cword = int(os.getenv("COMP_CWORD", 0))
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
for item in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
click.echo(item[0])
return True
def do_zsh_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
if args and not completion_args.endswith(" "):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ""
def escape(s: str) -> str:
return (
s.replace('"', '""')
.replace("'", "''")
.replace("$", "\\$")
.replace("`", "\\`")
)
res = []
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
if help:
res.append(f'"{escape(item)}":"{escape(help)}"')
else:
res.append(f'"{escape(item)}"')
if res:
args_str = "\n".join(res)
click.echo(f"_arguments '*: :(({args_str}))'")
else:
click.echo("_files")
return True
def do_fish_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
complete_action = os.getenv("_TYPER_COMPLETE_FISH_ACTION", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
if args and not completion_args.endswith(" "):
incomplete = args[-1]
args = args[:-1]
else:
incomplete = ""
show_args = []
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
if help:
formatted_help = re.sub(r"\s", " ", help)
show_args.append(f"{item}\t{formatted_help}")
else:
show_args.append(item)
if complete_action == "get-args":
if show_args:
for arg in show_args:
click.echo(arg)
elif complete_action == "is-args":
if show_args:
# Activate complete args (no files)
sys.exit(0)
else:
# Deactivate complete args (allow files)
sys.exit(1)
return True
def do_powershell_complete(cli: click.Command, prog_name: str) -> bool:
completion_args = os.getenv("_TYPER_COMPLETE_ARGS", "")
incomplete = os.getenv("_TYPER_COMPLETE_WORD_TO_COMPLETE", "")
cwords = click.parser.split_arg_string(completion_args)
args = cwords[1:]
for item, help in click._bashcomplete.get_choices(cli, prog_name, args, incomplete):
click.echo(f"{item}:::{help or ' '}")
return True
def do_shell_complete(*, cli: click.Command, prog_name: str, shell: str) -> bool:
if shell == "bash":
return do_bash_complete(cli, prog_name)
elif shell == "zsh":
return do_zsh_complete(cli, prog_name)
elif shell == "fish":
return do_fish_complete(cli, prog_name)
elif shell in {"powershell", "pwsh"}:
return do_powershell_complete(cli, prog_name)
return False
_completion_scripts = {
"bash": COMPLETION_SCRIPT_BASH,
"zsh": COMPLETION_SCRIPT_ZSH,
"fish": COMPLETION_SCRIPT_FISH,
"powershell": COMPLETION_SCRIPT_POWER_SHELL,
"pwsh": COMPLETION_SCRIPT_POWER_SHELL,
}
def get_completion_script(*, prog_name: str, complete_var: str, shell: str) -> str:
cf_name = click._bashcomplete._invalid_ident_char_re.sub(
"", prog_name.replace("-", "_")
)
script = _completion_scripts.get(shell)
if script is None:
click.echo(f"Shell {shell} not supported.", err=True)
sys.exit(1)
return (
script
% dict(
complete_func="_{}_completion".format(cf_name),
prog_name=prog_name,
autocomplete_var=complete_var,
)
).strip()
def handle_shell_complete(
cli: click.Command, prog_name: str, complete_var: str, complete_instr: str
) -> bool:
if "_" not in complete_instr:
click.echo("Invalid completion instruction.", err=True)
sys.exit(1)
command, shell = complete_instr.split("_", 1)
if command == "source":
click.echo(
get_completion_script(
prog_name=prog_name, complete_var=complete_var, shell=shell
)
)
return True
elif command == "complete":
return do_shell_complete(cli=cli, prog_name=prog_name, shell=shell)
return False
def completion_init() -> None:
global _click_patched
if not _click_patched:
testing = os.getenv("_TYPER_COMPLETE_TESTING")
def testing_handle_shell_complete(
cli: click.Command, prog_name: str, complete_var: str, complete_instr: str
) -> bool:
result = handle_shell_complete(cli, prog_name, complete_var, complete_instr)
if result:
# Avoid fast_exit(1) in Click so Coverage can finish
sys.exit(1)
return result
if testing:
click._bashcomplete.bashcomplete = testing_handle_shell_complete
else:
click._bashcomplete.bashcomplete = handle_shell_complete
_click_patched = True
| 2.140625 | 2 |
src/program/migrations/0042_auto_20170715_1547.py | flokli/bornhack-website | 7 | 12762956 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-07-15 13:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("program", "0041_auto_20170711_2248")]
operations = [
migrations.AddField(
model_name="eventproposal",
name="submission_notes",
field=models.TextField(
blank=True,
help_text="Private notes for the event. Only visible to the submitting user and the BornHack organisers.",
),
),
migrations.AddField(
model_name="speakerproposal",
name="submission_notes",
field=models.TextField(
blank=True,
help_text="Private notes for the event. Only visible to the submitting user and the BornHack organisers.",
),
),
]
| 1.671875 | 2 |
harnessed_jobs/brighter_fatter_BOT/v0/bf_jh_task.py | duncanwood/EO-analysis-jobs | 2 | 12762957 | <filename>harnessed_jobs/brighter_fatter_BOT/v0/bf_jh_task.py
#!/usr/bin/env ipython
"""
Script for BOT brighter-fatter analysis.
"""
def bf_jh_task(det_name):
"""JH version of single sensor execution of the brighter-fatter task."""
import glob
import siteUtils
from bot_eo_analyses import make_file_prefix, glob_pattern,\
bias_filename, bf_task, find_flat2_bot, get_mask_files,\
get_amplifier_gains
run = siteUtils.getRunNumber()
file_prefix = make_file_prefix(run, det_name)
acq_jobname = siteUtils.getProcessName('BOT_acq')
flat_files \
= siteUtils.dependency_glob(glob_pattern('brighter_fatter', det_name),
acq_jobname=acq_jobname)
if not flat_files:
print("bf_jh_task: Flat pairs files not found for detector", det_name)
return None
flat_files = [_ for _ in flat_files if 'flat1' in _]
mask_files = get_mask_files(det_name)
eotest_results_file = '{}_eotest_results.fits'.format(file_prefix)
gains = get_amplifier_gains(eotest_results_file)
bias_frame = bias_filename(run, det_name)
return bf_task(run, det_name, flat_files, gains, mask_files=mask_files,
flat2_finder=find_flat2_bot, bias_frame=bias_frame)
if __name__ == '__main__':
import sys
det_name = sys.argv[1]
bf_jh_task(det_name)
| 2.109375 | 2 |
tests/test_postprocessing/test_shap_explainer.py | moonson619/AI4Water-1 | 17 | 12762958 | <filename>tests/test_postprocessing/test_shap_explainer.py
import time
import unittest
import os
import sys
import site
ai4_dir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
site.addsitedir(ai4_dir)
import shap
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from ai4water import Model
from ai4water.datasets import busan_beach, MtropicsLaos
from ai4water.postprocessing.explain import ShapExplainer, explain_model_with_shap
from test_lime_explainer import make_lstm_reg_model, lstm_model, get_fitted_model, make_mlp_model
laos = MtropicsLaos()
class_data = laos.make_classification()
beach_data = busan_beach()
# todo, do not use any transformation on y for classification problem
# todo, allowed y_transformation are only log and sqrt
# todo unable to use functional api with transformation for model explainability
def fit_and_plot(model_name, data, heatmap=False, beeswarm_plot=False):
model = get_fitted_model(model_name, data)
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
interpreter = ShapExplainer(model, x_test, path=model.path,
explainer="Explainer", framework="ML")
if heatmap: interpreter.heatmap(show=False)
if beeswarm_plot: interpreter.beeswarm_plot(show=False)
interpreter = ShapExplainer(model, x_test.values, path=model.path,
explainer="Explainer", framework="ML")
if heatmap: interpreter.heatmap(show=False)
if beeswarm_plot: interpreter.beeswarm_plot(show=False)
return
def fit_and_interpret(model_name:str,
data,
draw_heatmap=True,
**kwargs
):
model = get_fitted_model(model_name, data)
x_train, y_train = model.training_data()
x_train = pd.DataFrame(x_train, columns=model.input_features).iloc[0:11]
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:2]
interpreter = ShapExplainer(model, x_test,
train_data=x_train,
path=model.path,
framework="ML",
**kwargs
)
interpreter(save=False)
if draw_heatmap:
interpreter.heatmap(show=False)
explainer = ShapExplainer(model,
x_test.values,
train_data=x_train.values,
features=model.input_features,
path=model.path,
framework="ML",
**kwargs
)
explainer(save=False)
return
def get_explainer(model_name, data):
model = get_fitted_model(model_name, data)
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
explainer = ShapExplainer(model, x_test, explainer="Explainer",
path=model.path)
return explainer
def fit_and_draw_plots(model_name, data, draw_heatmap=False):
model = get_fitted_model(model_name, data)
x_test, y_test = model.test_data()
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
explainer = ShapExplainer(model, x_test, explainer="Explainer",
path=model.path, framework="ML")
explainer.waterfall_plot_all_examples(show=False)
explainer.scatter_plot_all_features(show=False)
if draw_heatmap:
explainer.heatmap(show=False)
explainer = ShapExplainer(model, x_test.values, explainer="Explainer",
path=model.path, framework="ML")
explainer.waterfall_plot_all_examples(show=False)
explainer.scatter_plot_all_features(show=False)
#explainer.heatmap()
return
def get_mlp():
model = make_mlp_model()
#train_x, train_y = model.training_data()
testx, testy = model.test_data()
testx = pd.DataFrame(testx, columns=model.input_features).iloc[0:5]
#train_x = pd.DataFrame(train_x, columns=model.input_features).iloc[0:5]
plt.rcParams.update(plt.rcParamsDefault)
return model, testx
class TestShapExplainers(unittest.TestCase):
def test_doc_example(self):
X, y = shap.datasets.diabetes()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
lin_regr = linear_model.LinearRegression()
lin_regr.fit(X_train, y_train)
explainer = ShapExplainer(lin_regr,
data=X_test.iloc[0:14],
train_data=X_train,
num_means=12,
path=os.path.join(os.getcwd(), "results"))
explainer(plot_force_all=True)
explainer.heatmap(show=False)
explainer.plot_shap_values(show=False)
return
def test_pd_plot(self):
for mod in [
"XGBRegressor", # todo error
"RandomForestRegressor",
"LGBMRegressor",
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"HistGradientBoostingRegressor",
"XGBRFRegressor" # todo error
]:
exp = get_explainer(mod, busan_beach(inputs=["pcp_mm", "air_p_hpa", "air_temp_c"]))
exp.pdp_single_feature(feature_name=exp.features[0], show=False, save=False)
time.sleep(1)
return
def test_ai4water_model(self):
model = Model(
model="LinearRegression",
verbosity=0
)
model.fit(data=busan_beach(inputs=['wat_temp_c', 'tide_cm']))
x_train, y_train = model.training_data()
x_test, y_test = model.test_data()
x_train = pd.DataFrame(x_train, columns=model.input_features)
x_test = pd.DataFrame(x_test, columns=model.input_features).iloc[0:5]
explainer = ShapExplainer(model,
data=x_test, train_data=x_train,
num_means=10, path=model.path,
explainer="KernelExplainer")
explainer(plot_force_all=False)
#explainer.heatmap()
explainer = ShapExplainer(model,
train_data=x_train.values, data=x_test.values,
num_means=10, path=model.path, explainer="KernelExplainer")
explainer(plot_force_all=False)
#explainer.heatmap()
return
def test_raise_error(self):
model = Model(
model={"layers": {"LSTM":{"units": 4}}},
input_features=['wat_temp_c', 'tide_cm'],
output_features=['tetx_coppml', "ecoli", "16s", "inti1"],
verbosity=0
)
model.fit(data=busan_beach(inputs=['wat_temp_c', 'tide_cm'],
target=['tetx_coppml', "ecoli", "16s", "inti1"]))
x_test, y_test = model.test_data()
def initiate_class():
return ShapExplainer(model, x_test)
self.assertRaises(AssertionError,
initiate_class)
return
def test_xgb(self):
fit_and_interpret("XGBRegressor", data=busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=True, explainer="TreeExplainer")
return
def test_lgbm(self):
fit_and_interpret("LGBMRegressor", data=busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=False, explainer="TreeExplainer")
return
def test_catboost(self):
fit_and_interpret("CatBoostRegressor", data=busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=False, explainer="TreeExplainer")
return
def test_waterfall_with_xgb(self):
fit_and_draw_plots("XGBRegressor", busan_beach(inputs=['wat_temp_c', 'tide_cm']),
draw_heatmap=True)
return
def test_waterfall_with_catboost(self):
fit_and_draw_plots("CatBoostRegressor", busan_beach(inputs=['wat_temp_c', 'tide_cm']))
return
def test_heatmap(self):
for mod in [
"XGBRegressor",
"RandomForestRegressor",
##"LGBMRegressor", # process stopping problem
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
##"HISTGRADIENTBOOSTINGREGRESSOR", # taking very long time
"XGBRFRegressor"
]:
fit_and_plot(mod, beach_data, heatmap=True)
time.sleep(1)
return
def test_beeswarm_plot(self):
for mod in [
"XGBRegressor",
"RandomForestRegressor",
"LGBMRegressor",
"DecisionTreeRegressor",
"ExtraTreeRegressor",
"ExtraTreesRegressor",
"GradientBoostingRegressor",
"HistGradientBoostingRegressor",
"XGBRFRegressor"
]:
fit_and_plot(mod, beach_data, beeswarm_plot=True)
time.sleep(1)
return
def test_deepexplainer_mlp(self):
model, testx = get_mlp()
ex = ShapExplainer(model, testx, explainer="DeepExplainer", layer=2,
path=model.path)
ex.plot_shap_values(show=False)
return
def test_gradientexplainer_mlp(self):
model, testx = get_mlp()
ex = ShapExplainer(model, testx, layer=1, explainer="GradientExplainer",
path=model.path)
plt.rcParams.update(plt.rcParamsDefault)
ex.plot_shap_values(show=False)
return
def test_class_model(self):
fit_and_interpret("DecisionTreeClassifier", data=class_data, draw_heatmap=False,
explainer="KernelExplainer")
return
def test_lstm_model_deep_exp(self):
m = make_lstm_reg_model()
train_x, _ = m.training_data()
exp = ShapExplainer(model=m, data=train_x, layer=2, features=m.input_features, path=m.path)
exp.summary_plot(show=False)
exp.force_plot_single_example(0, show=False)
exp.plot_shap_values(show=False)
return
def test_lstm_model_gradient_exp(self):
m = make_lstm_reg_model()
train_x, _ = m.training_data()
exp = ShapExplainer(model=m, data=train_x, layer="LSTM", explainer="GradientExplainer",
features=m.input_features, path=m.path)
exp.plot_shap_values(show=False)
exp.force_plot_single_example(0, show=False)
return
def test_lstm_model_ai4water(self):
time.sleep(1)
m = make_lstm_reg_model()
train_x, _ = m.training_data()
exp = ShapExplainer(model=m, data=train_x, layer="LSTM", explainer="GradientExplainer",
features=m.input_features, path=m.path)
exp.force_plot_single_example(0, show=False)
return
def test_ai4water_ml(self):
for m in [
"XGBRegressor",
"RandomForestRegressor",
"GradientBoostingRegressor"
]:
model = get_fitted_model(m, busan_beach(inputs=['wat_temp_c', 'tide_cm']))
exp = explain_model_with_shap(model, examples_to_explain=2, explainer="TreeExplainer")
assert isinstance(exp, ShapExplainer)
return
def test_ai4water_mlp(self):
time.sleep(1)
model = make_mlp_model()
exp = explain_model_with_shap(model, examples_to_explain=2)
assert isinstance(exp, ShapExplainer)
return
def test_ai4water_lstm(self):
m = lstm_model()
exp = explain_model_with_shap(m, examples_to_explain=2)
assert isinstance(exp, ShapExplainer)
return
def test_plots_for_3d_input(self):
model = lstm_model()
test_x, _ = model.test_data()
p = model.predict(test_x)
exp = ShapExplainer(model, test_x, layer=2, path=model.path,
features=model.input_features
)
exp.force_plot_single_example(np.argmin(p).item(), show=False)
exp.force_plot_single_example(np.argmax(p).item(), show=False)
exp.waterfall_plot_single_example(np.argmin(p).item(), show=False)
exp.waterfall_plot_single_example(np.argmax(p).item(), show=False)
exp.pdp_all_features(lookback=0, show=False)
return
def test_multiple_inputs(self):
model = Model(model={"layers": {"Input_0": {"shape": (10, 2)},
"LSTM_0": {"config": {"units": 62},
"inputs": "Input_0",
"outputs": "lstm0_output"},
"Input_1": {"shape": (5, 3)},
"LSTM_1": {"config": {"units": 32},
"inputs": "Input_1",
"outputs": "lstm1_output"},
"Concatenate": {"config": {}, "inputs": ["lstm0_output", "lstm1_output"]},
"Dense": {"config": 1}
}}, verbosity=0)
test_x = [np.random.random((100, 10, 2)), np.random.random((100, 5, 3))]
exp = ShapExplainer(model, test_x, layer="LSTM_1", path=model.path)
exp.summary_plot(show=False)
exp.plot_shap_values(show=False)
return
if __name__ == "__main__":
unittest.main()
| 2.546875 | 3 |
cinema/movies/apps.py | kevinGarcia15/cinemaAPI | 0 | 12762959 | """users apps"""
#django
from django.apps import AppConfig
class MoviesAppConfig(AppConfig):
"""
Movies app config
"""
name = 'cinema.movies'
verbose_name = 'Movies' | 1.617188 | 2 |
aio_telegraph/__init__.py | bluzir/aio-telegraph | 1 | 12762960 | name = 'aio_telegraph'
| 0.996094 | 1 |
src/rez/utils/execution.py | alexey-pelykh/rez | 0 | 12762961 | # SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
"""
Utilities related to process/script execution.
"""
from rez.vendor.six import six
from rez.utils.yaml import dump_yaml
from rez.vendor.enum import Enum
from contextlib import contextmanager
import subprocess
import sys
import stat
import os
import io
@contextmanager
def add_sys_paths(paths):
"""Add to sys.path, and revert on scope exit.
"""
original_syspath = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = original_syspath
if six.PY2:
class _PopenBase(subprocess.Popen):
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.wait()
else: # py3
_PopenBase = subprocess.Popen
class Popen(_PopenBase):
"""subprocess.Popen wrapper.
Allows for Popen to be used as a context in both py2 and py3.
"""
def __init__(self, args, **kwargs):
# Avoids python bug described here: https://bugs.python.org/issue3905.
# This can arise when apps (maya) install a non-standard stdin handler.
#
# In newer version of maya and katana, the sys.stdin object can also
# become replaced by an object with no 'fileno' attribute, this is also
# taken into account.
#
if "stdin" not in kwargs:
try:
file_no = sys.stdin.fileno()
# https://github.com/nerdvegas/rez/pull/966
except (AttributeError, io.UnsupportedOperation):
file_no = None
if file_no is None and sys.__stdin__ is not None:
file_no = sys.__stdin__.fileno()
if file_no not in (0, 1, 2):
kwargs["stdin"] = subprocess.PIPE
# Add support for the new py3 "text" arg, which is equivalent to
# "universal_newlines".
# https://docs.python.org/3/library/subprocess.html#frequently-used-arguments
#
text = kwargs.pop("text", None)
universal_newlines = kwargs.pop("universal_newlines", None)
if text or universal_newlines:
kwargs["universal_newlines"] = True
# fixes py3/cmd.exe UnicodeDecodeError() with some characters.
# UnicodeDecodeError: 'charmap' codec can't decode byte
# 0x8d in position 1023172: character maps to <undefined>
#
# NOTE: currently no solution for `python3+<3.6`
#
if sys.version_info[:2] >= (3, 6) and "encoding" not in kwargs:
kwargs["encoding"] = "utf-8"
super(Popen, self).__init__(args, **kwargs)
class ExecutableScriptMode(Enum):
"""
Which scripts to create with util.create_executable_script.
"""
# Start with 1 to not collide with None checks
# Requested script only. Usually extension-less.
single = 1
# Create .py script that will allow launching scripts on
# windows without extension, but may require extension on
# other systems.
py = 2
# Will create py script on windows and requested on
# other platforms
platform_specific = 3
# Creates the requested script and an .py script so that scripts
# can be launched without extension from windows and other
# systems.
both = 4
# TODO: Maybe also allow distlib.ScriptMaker instead of the .py + PATHEXT.
def create_executable_script(filepath, body, program=None, py_script_mode=None):
"""
Create an executable script. In case a py_script_mode has been set to create
a .py script the shell is expected to have the PATHEXT environment
variable to include ".PY" in order to properly launch the command without
the .py extension.
Args:
filepath (str): File to create.
body (str or callable): Contents of the script. If a callable, its code
is used as the script body.
program (str): Name of program to launch the script. Default is 'python'
py_script_mode(ExecutableScriptMode): What kind of script to create.
Defaults to rezconfig.create_executable_script_mode.
Returns:
List of filepaths of created scripts. This may differ from the supplied
filepath depending on the py_script_mode
"""
from rez.config import config
from rez.utils.platform_ import platform_
program = program or "python"
py_script_mode = py_script_mode or config.create_executable_script_mode
# https://github.com/nerdvegas/rez/pull/968
is_forwarding_script_on_windows = (
program == "_rez_fwd"
and platform_.name == "windows"
and filepath.lower().endswith(".cmd")
)
if callable(body):
from rez.utils.sourcecode import SourceCode
code = SourceCode(func=body)
body = code.source
if not body.endswith('\n'):
body += '\n'
# Windows does not support shebang, but it will run with
# default python, or in case of later python versions 'py' that should
# try to use sensible python interpreters depending on the shebang line.
# Compare PEP-397.
# In order for execution to work in windows we need to create a .py
# file and set the PATHEXT to include .py (as done by the shell plugins)
# So depending on the py_script_mode we might need to create more then
# one script
script_filepaths = [filepath]
if program == "python":
script_filepaths = _get_python_script_files(filepath, py_script_mode,
platform_.name)
for current_filepath in script_filepaths:
with open(current_filepath, 'w') as f:
# TODO: make cross platform
if is_forwarding_script_on_windows:
# following lines of batch script will be stripped
# before yaml.load
f.write("@echo off\n")
f.write("%s.exe %%~dpnx0 %%*\n" % program)
f.write("goto :eof\n") # skip YAML body
f.write(":: YAML\n") # comment for human
else:
f.write("#!/usr/bin/env %s\n" % program)
f.write(body)
# TODO: Although Windows supports os.chmod you can only set the readonly
# flag. Setting the file readonly breaks the unit tests that expect to
# clean up the files once the test has run. Temporarily we don't bother
# setting the permissions, but this will need to change.
if os.name == "posix":
os.chmod(
current_filepath,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR
| stat.S_IXGRP | stat.S_IXOTH
)
return script_filepaths
def _get_python_script_files(filepath, py_script_mode, platform):
"""
Evaluates the py_script_mode for the requested filepath on the given
platform.
Args:
filepath: requested filepath
py_script_mode (ExecutableScriptMode):
platform (str): Platform to evaluate the script files for
Returns:
list of str: filepaths of scripts to create based on inputs
"""
script_filepaths = []
base_filepath, extension = os.path.splitext(filepath)
has_py_ext = extension == ".py"
is_windows = platform == "windows"
if (
py_script_mode == ExecutableScriptMode.single
or py_script_mode == ExecutableScriptMode.both
or (py_script_mode == ExecutableScriptMode.py and has_py_ext)
or (py_script_mode == ExecutableScriptMode.platform_specific and not is_windows)
or (py_script_mode == ExecutableScriptMode.platform_specific and is_windows and has_py_ext)
):
script_filepaths.append(filepath)
if (
not has_py_ext
and (
py_script_mode == ExecutableScriptMode.both
or py_script_mode == ExecutableScriptMode.py
or (py_script_mode == ExecutableScriptMode.platform_specific and is_windows)
)
):
script_filepaths.append(base_filepath + ".py")
return script_filepaths
def create_forwarding_script(filepath, module, func_name, *nargs, **kwargs):
"""Create a 'forwarding' script.
A forwarding script is one that executes some arbitrary Rez function. This
is used internally by Rez to dynamically create a script that uses Rez,
even though the parent environment may not be configured to do so.
"""
from rez.utils.platform_ import platform_
if platform_.name == "windows" and \
os.path.splitext(filepath)[-1].lower() != ".cmd":
filepath += ".cmd"
doc = dict(
module=module,
func_name=func_name)
if nargs:
doc["nargs"] = nargs
if kwargs:
doc["kwargs"] = kwargs
body = dump_yaml(doc)
create_executable_script(filepath, body, "_rez_fwd")
| 1.90625 | 2 |
tests/test_vlan/test_network.py | mteter-upenn/bacpypes | 0 | 12762962 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test Module Template
--------------------
"""
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
def setup_module():
if _debug: setup_module._debug("setup_module")
@bacpypes_debugging
def teardown_module():
if _debug: teardown_module._debug("teardown_module")
@bacpypes_debugging
def setup_function(function):
if _debug: setup_function._debug("setup_function %r", function)
@bacpypes_debugging
def teardown_function(function):
if _debug: teardown_function._debug("teardown_function %r", function)
@bacpypes_debugging
class TestCaseTemplate(unittest.TestCase):
@classmethod
def setup_class(cls):
if _debug: TestCaseTemplate._debug("setup_class")
@classmethod
def teardown_class(cls):
if _debug: TestCaseTemplate._debug("teardown_class")
def setup_method(self, method):
if _debug: TestCaseTemplate._debug("setup_module %r", method)
def teardown_method(self, method):
if _debug: TestCaseTemplate._debug("teardown_method %r", method)
def test_something(self):
if _debug: TestCaseTemplate._debug("test_something")
def test_something_else(self):
if _debug: TestCaseTemplate._debug("test_something_else")
| 2.375 | 2 |
include/ClientNetworkingSessions.py | MsgLosers/hydrus | 0 | 12762963 | import pickle
from . import ClientConstants as CC
from . import ClientNetworkingContexts
from . import ClientNetworkingDomain
from . import HydrusData
from . import HydrusSerialisable
from . import HydrusGlobals as HG
import requests
import threading
try:
import socket
import socks
SOCKS_PROXY_OK = True
except:
SOCKS_PROXY_OK = False
class NetworkSessionManager( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER
SERIALISABLE_NAME = 'Session Manager'
SERIALISABLE_VERSION = 1
SESSION_TIMEOUT = 60 * 60
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
self.engine = None
self._dirty = False
self._lock = threading.Lock()
self._network_contexts_to_sessions = {}
self._network_contexts_to_session_timeouts = {}
self._proxies_dict = {}
self._Reinitialise()
HG.client_controller.sub( self, 'Reinitialise', 'notify_new_options' )
def _CleanSessionCookies( self, network_context, session ):
if network_context not in self._network_contexts_to_session_timeouts:
self._network_contexts_to_session_timeouts[ network_context ] = 0
if HydrusData.TimeHasPassed( self._network_contexts_to_session_timeouts[ network_context ] ):
session.cookies.clear_session_cookies()
self._network_contexts_to_session_timeouts[ network_context ] = HydrusData.GetNow() + self.SESSION_TIMEOUT
session.cookies.clear_expired_cookies()
def _GenerateSession( self, network_context ):
session = requests.Session()
if network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
session.verify = False
return session
def _GetSerialisableInfo( self ):
serialisable_network_contexts_to_sessions = [ ( network_context.GetSerialisableTuple(), pickle.dumps( session ).hex() ) for ( network_context, session ) in list(self._network_contexts_to_sessions.items()) ]
return serialisable_network_contexts_to_sessions
def _GetSessionNetworkContext( self, network_context ):
# just in case one of these slips through somehow
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN:
second_level_domain = ClientNetworkingDomain.ConvertDomainIntoSecondLevelDomain( network_context.context_data )
network_context = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, second_level_domain )
return network_context
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
serialisable_network_contexts_to_sessions = serialisable_info
for ( serialisable_network_context, pickled_session_hex ) in serialisable_network_contexts_to_sessions:
network_context = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_network_context )
try:
session = pickle.loads( bytes.fromhex( pickled_session_hex ) )
except:
# new version of requests uses a diff format, wew
continue
session.cookies.clear_session_cookies()
self._network_contexts_to_sessions[ network_context ] = session
def _Reinitialise( self ):
self._proxies_dict = {}
http_proxy = HG.client_controller.new_options.GetNoneableString( 'http_proxy' )
https_proxy = HG.client_controller.new_options.GetNoneableString( 'https_proxy' )
if http_proxy is not None:
self._proxies_dict[ 'http' ] = http_proxy
if https_proxy is not None:
self._proxies_dict[ 'https' ] = https_proxy
def _SetDirty( self ):
self._dirty = True
def ClearSession( self, network_context ):
with self._lock:
network_context = self._GetSessionNetworkContext( network_context )
if network_context in self._network_contexts_to_sessions:
del self._network_contexts_to_sessions[ network_context ]
self._SetDirty()
def GetNetworkContexts( self ):
with self._lock:
return list(self._network_contexts_to_sessions.keys())
def GetSession( self, network_context ):
with self._lock:
network_context = self._GetSessionNetworkContext( network_context )
if network_context not in self._network_contexts_to_sessions:
self._network_contexts_to_sessions[ network_context ] = self._GenerateSession( network_context )
session = self._network_contexts_to_sessions[ network_context ]
if session.proxies != self._proxies_dict:
session.proxies = dict( self._proxies_dict )
#
self._CleanSessionCookies( network_context, session )
#
# tumblr can't into ssl for some reason, and the data subdomain they use has weird cert properties, looking like amazon S3
# perhaps it is inward-facing somehow? whatever the case, let's just say fuck it for tumblr
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN and network_context.context_data == 'tumblr.com':
session.verify = False
#
self._SetDirty()
return session
def GetSessionForDomain( self, domain ):
network_context = ClientNetworkingContexts.NetworkContext( context_type = CC.NETWORK_CONTEXT_DOMAIN, context_data = domain )
return self.GetSession( network_context )
def IsDirty( self ):
with self._lock:
return self._dirty
def Reinitialise( self ):
with self._lock:
self._Reinitialise()
def SetClean( self ):
with self._lock:
self._dirty = False
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_SESSION_MANAGER ] = NetworkSessionManager
| 2.046875 | 2 |
src/users/schema.py | cbsBiram/xarala__ssr | 0 | 12762964 | from django.db.models.aggregates import Sum
import graphene
import graphql_jwt
from django.conf import settings
from django.contrib.auth.forms import PasswordChangeForm
from django.db.models import Q
from graphene_django import DjangoObjectType
from graphene_django.forms.mutation import DjangoFormMutation
from graphql import GraphQLError
from blog.query_types import PostType
from .models import CustomUser as User
from .models import ResetCode
from .tasks import account_created, send_password_reset_email
from xarala.utils import email_validation_function, get_paginator, save_base_64
class UserType(DjangoObjectType):
class Meta:
model = User
get_user_posts = graphene.List(PostType)
def resolve_get_user_posts(instance, info, **kwargs):
return instance.user_posts()
class UserPaginatedType(graphene.ObjectType):
page = graphene.Int()
pages = graphene.Int()
has_next = graphene.Boolean()
has_prev = graphene.Boolean()
objects = graphene.List(UserType)
class AdminKpisType(graphene.ObjectType):
students_count = graphene.Int()
teachers_count = graphene.Int()
authors_count = graphene.Int()
sales_figures = graphene.Decimal()
class Query(graphene.ObjectType):
me = graphene.Field(UserType)
user = graphene.Field(UserType, id=graphene.Int(required=True))
users = graphene.Field(AdminKpisType)
students = graphene.Field(UserPaginatedType, page=graphene.Int())
teachers = graphene.Field(UserPaginatedType, page=graphene.Int())
authors = graphene.Field(UserPaginatedType, page=graphene.Int())
listTeachers = graphene.List(UserType)
listAuthors = graphene.List(UserType)
def resolve_user(self, info, id):
return User.objects.get(id=id)
def resolve_me(self, info):
user = info.context.user
if user.is_anonymous:
raise GraphQLError("Not loged in!")
return user
def resolve_users(self, info):
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.all()
students_count = users.filter(is_student=True).count()
teachers_count = users.filter(is_teacher=True).count()
authors_count = users.filter(is_writer=True).count()
students = users.filter(is_student=True).exclude(courses_enrolled=None)
prices_list = [
student.courses_enrolled.aggregate(Sum("price"))["price__sum"]
for student in students
]
sales_figures = sum(prices_list)
return AdminKpisType(
students_count, teachers_count, authors_count, sales_figures
)
def resolve_students(self, info, page):
page_size = 10
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.filter(is_student=True).order_by("-id")
return get_paginator(users, page_size, page, UserPaginatedType)
def resolve_teachers(self, info, page):
page_size = 10
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.filter(is_teacher=True).order_by("-id")
return get_paginator(users, page_size, page, UserPaginatedType)
def resolve_authors(self, info, page):
page_size = 10
user = info.context.user
if not user.is_staff:
raise GraphQLError("You're not admin!")
users = User.objects.filter(is_writer=True).order_by("-id")
return get_paginator(users, page_size, page, UserPaginatedType)
def resolve_listTeachers(self, info):
return User.objects.filter(is_teacher=True).order_by("-id")
def resolve_listAuthors(self, info):
return User.objects.filter(is_writer=True).order_by("-id")
class UpdateUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
firstName = graphene.String()
lastName = graphene.String()
phone = graphene.String()
address = graphene.String()
userId = graphene.Int(required=True)
def mutate(self, info, firstName, lastName, phone, address, userId):
user = User.objects.get(id=userId)
user.first_name = firstName
user.last_name = lastName
user.address = address
user.phone = phone
user.save()
return UpdateUser(user=user)
class UpdateAvatar(graphene.Mutation):
success = graphene.Boolean()
class Arguments:
file = graphene.String()
def mutate(self, info, file):
final_file_url = save_base_64(file)
user = info.context.user
if user.is_anonymous:
raise GraphQLError("Log in to edit user account!")
user.avatar = final_file_url
user.save()
return UpdateAvatar(success=True)
class AuthMutation(graphene.ObjectType):
# django-graphql-jwt inheritances
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
revoke_token = graphql_jwt.Revoke.Field()
class RegisterUser(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
password = graphene.String(required=True)
firstName = graphene.String(required=True)
lastName = graphene.String(required=True)
def mutate(self, info, email, password, firstName, lastName):
mail_to_lower = email_validation_function(email.lower())
user = User(email=mail_to_lower)
user.set_password(password)
user.is_student = True
user.first_name = firstName
user.last_name = lastName
user.save()
try:
account_created.delay(mail_to_lower)
except Exception:
pass
return RegisterUser(user)
class PasswordResetEmail(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
def mutate(self, info, email):
mail_to_lower = email_validation_function(email.lower())
user = ""
try:
user = User.objects.get(email=mail_to_lower)
except User.DoesNotExist:
raise GraphQLError(
"Compte non trouvé, merci de bien vérifier votre adresse email"
)
try:
send_password_reset_email.delay(mail_to_lower)
except Exception:
pass
return PasswordResetEmail(user)
class PasswordReset(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
email = graphene.String(required=True)
code = graphene.String(required=True)
newPassword = graphene.String(required=True)
def mutate(self, info, email, code, newPassword):
mail_to_lower = email_validation_function(email.lower())
reset_code = ResetCode.objects.filter(
Q(code=code) & Q(email=mail_to_lower) & Q(expired=False)
).exists()
if reset_code:
try:
user = User.objects.get(email=mail_to_lower)
user.set_password(<PASSWORD>)
user.save()
except User.DoesNotExist:
raise GraphQLError(
"Compte non trouvé, merci de bien vérifier votre adresse email"
)
else:
raise GraphQLError("Code non trouvé dans notre système")
ResetCode.objects.filter(code=code).update(expired=True)
user = User.objects.get(email=mail_to_lower)
return PasswordReset(user)
class PasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
user = kwargs.pop("user", None)
super().__init__(user, *args, **kwargs)
class ChangePassword(DjangoFormMutation):
class Meta:
form_class = PasswordChangeForm
@classmethod
def get_form_kwargs(cls, root, info, **mutation_input):
return {
**super().get_form_kwargs(root, info, **mutation_input),
"user": info.context.user,
}
class Mutation(AuthMutation, graphene.ObjectType):
update_user = UpdateUser.Field()
update_avatar = UpdateAvatar.Field()
register = RegisterUser.Field()
send_password_reset_email = PasswordResetEmail.Field()
reset_password = PasswordReset.Field()
change_password = ChangePassword.Field()
schema = graphene.Schema(query=Query)
| 1.953125 | 2 |
config/api_router.py | devnelmar/Pokeindex-application | 0 | 12762965 | <reponame>devnelmar/Pokeindex-application
from django.conf import settings
from rest_framework.routers import DefaultRouter, SimpleRouter
from pokeindexapi.users.api.views import UserViewSet
from pokeindexapi.apps.pokedex.api.views import PokemonViewSet
if settings.DEBUG:
router = DefaultRouter()
else:
router = SimpleRouter()
router.register("users", UserViewSet)
router.register("pokemon", PokemonViewSet, basename="pokemon")
app_name = "api"
urlpatterns = router.urls
| 1.65625 | 2 |
test/geocoders/geonames.py | hyper750/geopy | 1 | 12762966 | # -*- coding: UTF-8 -*-
import unittest
import uuid
import pytz
from geopy import Point
from geopy.compat import u
from geopy.exc import GeocoderAuthenticationFailure, GeocoderQueryError
from geopy.geocoders import GeoNames
from test.geocoders.util import GeocoderTestBase, env
class GeoNamesTestCaseUnitTest(GeocoderTestBase):
def test_user_agent_custom(self):
geocoder = GeoNames(
username='DUMMYUSER_NORBERT',
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
@unittest.skipUnless(
bool(env.get('GEONAMES_USERNAME')),
"No GEONAMES_USERNAME env variable set"
)
class GeoNamesTestCase(GeocoderTestBase):
delta = 0.04
@classmethod
def setUpClass(cls):
cls.geocoder = GeoNames(username=env['GEONAMES_USERNAME'])
def reverse_timezone_run(self, payload, expected):
timezone = self._make_request(self.geocoder.reverse_timezone, **payload)
if expected is None:
self.assertIsNone(timezone)
else:
self.assertEqual(timezone.pytz_timezone, expected)
return timezone
def test_unicode_name(self):
self.geocode_run(
{"query": "Mount Everest, Nepal"},
{"latitude": 27.987, "longitude": 86.925},
skiptest_on_failure=True, # sometimes the result is empty
)
def test_query_urlencoding(self):
location = self.geocode_run(
{"query": u("Ry\u016b\u014d")},
{"latitude": 35.65, "longitude": 138.5},
skiptest_on_failure=True, # sometimes the result is empty
)
self.assertIn(u("Ry\u016b\u014d"), location.address)
def test_reverse(self):
location = self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
},
{
"latitude": 40.75376406311989,
"longitude": -73.98489005863667,
},
)
self.assertIn("Times Square", location.address)
def test_geocode_empty_response(self):
self.geocode_run(
{"query": "sdlahaslkhdkasldhkjsahdlkash"},
{},
expect_failure=True,
)
def test_reverse_nearby_place_name_raises_for_feature_code(self):
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"feature_code": "ADM1",
},
{},
)
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"feature_code": "ADM1",
"find_nearby_type": "findNearbyPlaceName",
},
{},
)
def test_reverse_nearby_place_name_lang(self):
location = self.reverse_run(
{
"query": "52.50, 13.41",
"exactly_one": True,
"lang": 'ru',
},
{},
)
self.assertIn(u'<NAME>', location.address)
def test_reverse_find_nearby_raises_for_lang(self):
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
"lang": 'en',
},
{},
)
def test_reverse_find_nearby(self):
location = self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
},
{
"latitude": 40.75376406311989,
"longitude": -73.98489005863667,
},
)
self.assertIn("New York, United States", location.address)
def test_reverse_find_nearby_feature_code(self):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
"feature_code": "ADM1",
},
{
"latitude": 40.16706,
"longitude": -74.49987,
},
)
def test_reverse_raises_for_unknown_find_nearby_type(self):
with self.assertRaises(GeocoderQueryError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": "findSomethingNonExisting",
},
{},
)
def test_reverse_timezone(self):
new_york_point = Point(40.75376406311989, -73.98489005863667)
america_new_york = pytz.timezone("America/New_York")
timezone = self.reverse_timezone_run(
{"query": new_york_point},
america_new_york,
)
self.assertEqual(timezone.raw['countryCode'], 'US')
def test_reverse_timezone_unknown(self):
self.reverse_timezone_run(
# Geonames doesn't return `timezoneId` for Antarctica,
# but it provides GMT offset which can be used
# to create a FixedOffset pytz timezone.
{"query": "89.0, 1.0"},
pytz.UTC,
)
self.reverse_timezone_run(
{"query": "89.0, 80.0"},
pytz.FixedOffset(5 * 60),
)
def test_country_str(self):
self.geocode_run(
{"query": "kazan", "country": "TR"},
{"latitude": 40.2317, "longitude": 32.6839},
)
def test_country_list(self):
self.geocode_run(
{"query": "kazan", "country": ["CN", "TR", "JP"]},
{"latitude": 40.2317, "longitude": 32.6839},
)
def test_country_bias(self):
self.geocode_run(
{"query": "kazan", "country_bias": "TR"},
{"latitude": 40.2317, "longitude": 32.6839},
)
class GeoNamesInvalidAccountTestCase(GeocoderTestBase):
@classmethod
def setUpClass(cls):
cls.geocoder = GeoNames(username="geopy-not-existing-%s" % uuid.uuid4())
def reverse_timezone_run(self, payload, expected):
timezone = self._make_request(self.geocoder.reverse_timezone, **payload)
if expected is None:
self.assertIsNone(timezone)
else:
self.assertEqual(timezone.pytz_timezone, expected)
return timezone
def test_geocode(self):
with self.assertRaises(GeocoderAuthenticationFailure):
self.geocode_run(
{"query": "moscow"},
{},
expect_failure=True,
)
def test_reverse_timezone(self):
with self.assertRaises(GeocoderAuthenticationFailure):
self.reverse_timezone_run(
{"query": "40.6997716, -73.9753359"},
None,
)
| 2.84375 | 3 |
tests/unit/trace/propagation/test_text_format.py | Flared/opencensus-python | 650 | 12762967 | <reponame>Flared/opencensus-python
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.trace.propagation import text_format
class Test_from_carrier(unittest.TestCase):
def test_from_carrier_keys_exist(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_span_id = '00f067aa0ba902b7'
test_options = 1
carrier = {
text_format._TRACE_ID_KEY: test_trace_id,
text_format._SPAN_ID_KEY: test_span_id,
text_format._TRACE_OPTIONS_KEY: test_options,
}
propagator = text_format.TextFormatPropagator()
span_context = propagator.from_carrier(carrier)
self.assertEqual(span_context.trace_id, test_trace_id)
self.assertEqual(span_context.span_id, test_span_id)
self.assertEqual(span_context.trace_options.enabled,
bool(test_options))
def test_from_carrier_keys_not_exist(self):
carrier = {}
propagator = text_format.TextFormatPropagator()
span_context = propagator.from_carrier(carrier)
self.assertIsNotNone(span_context.trace_id)
# Span_id should be None here which indicates no parent span_id for
# the child spans
self.assertIsNone(span_context.span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_to_carrier_has_span_id(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_span_id = '00f067aa0ba902b7'
test_options = '2'
span_context = mock.Mock()
span_context.trace_id = test_trace_id
span_context.span_id = test_span_id
span_context.trace_options.trace_options_byte = test_options
carrier = {}
propagator = text_format.TextFormatPropagator()
carrier = propagator.to_carrier(span_context, carrier)
self.assertEqual(carrier[text_format._TRACE_ID_KEY], test_trace_id)
self.assertEqual(carrier[text_format._SPAN_ID_KEY], str(test_span_id))
self.assertEqual(carrier[text_format._TRACE_OPTIONS_KEY], test_options)
def test_to_carrier_no_span_id(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_options = '1'
span_context = mock.Mock()
span_context.trace_id = test_trace_id
span_context.span_id = None
span_context.trace_options.trace_options_byte = test_options
carrier = {}
propagator = text_format.TextFormatPropagator()
carrier = propagator.to_carrier(span_context, carrier)
self.assertEqual(carrier[text_format._TRACE_ID_KEY], test_trace_id)
self.assertIsNone(carrier.get(text_format._SPAN_ID_KEY))
self.assertEqual(carrier[text_format._TRACE_OPTIONS_KEY], test_options)
| 2.09375 | 2 |
cwr/grammar/factory/adapter.py | orenyodfat/CWR-DataApi | 37 | 12762968 | # -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from cwr.grammar.field import basic, special, table, filename
"""
CWR fields grammar adapters.
These classes allow the factories to create rules in an homogeneous way,
by setting a basic interface which will wrap around field rules, giving
a basic common method through which rules can be created.
This interface is the FieldAdapter, having only the get_field method, which
will receive a series of parameters, all of them optional, and generate a
field rule from them. The concrete rule will depend on the implementation.
Additionally, it offers the wrap_as_optional method, which allows setting a
field as optional. It is meant to be used with a field created by the adapter,
so it can be overriden for specific fields.
"""
__author__ = '<NAME>'
__license__ = 'MIT'
__status__ = 'Development'
class FieldAdapter(object, metaclass=ABCMeta):
"""
Interface for adapting field rules creation to the parser factory
requirements.
This is meant to receive always the same, or similar, groups of values,
and then generate a specific field rule
from them.
"""
def __init__(self):
pass
@abstractmethod
def get_field(self, name=None, columns=None, values=None):
"""
Generates the rules for the field, applying the received parameters.
:param name: the name of the field
:param columns: number of columns
:param values: allowed values for the field
:return: the rule for the field
"""
raise NotImplementedError("The get_field method is not implemented")
def is_numeric(self):
return False
class AlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(AlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
field = basic.alphanum(columns, name, extended=False)
return field
class ExtendedAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(ExtendedAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.alphanum(columns, name, extended=True)
class EndAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an Alphanumeric (A) field, accepting only the
specified number of characters.
By default Alphanumeric fields accept only ASCII characters, excluding
lowercases. If the extended flag is set to True, then non-ASCII characters
are allowed, but the no ASCII lowercase constraint is kept.
This can be a compulsory field, in which case the empty string is
disallowed.
The text will be stripped of heading and trailing whitespaces.
"""
def __init__(self):
super(EndAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
reg = basic.alphanum(columns, name, extended=True, isLast=True)
return reg
class NumericAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field, accepting only the specified
number of characters.
This version only allows integers.
"""
def __init__(self):
super(NumericAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.numeric(columns, name)
class BooleanAdapter(FieldAdapter):
"""
Creates the grammar for a Boolean (B) field, accepting only 'Y' or 'N'
"""
def __init__(self):
super(BooleanAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.boolean(name)
class FlagAdapter(FieldAdapter):
"""
Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'.
"""
def __init__(self):
super(FlagAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.flag(name)
class DateAdapter(FieldAdapter):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
"""
def __init__(self):
super(DateAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.date(name)
def is_numeric(self):
return True
class TimeAdapter(FieldAdapter):
"""
Creates the grammar for a Time (D) field, accepting only numbers in a
certain pattern.
"""
def __init__(self):
super(TimeAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.time(name)
class DateTimeAdapter(FieldAdapter):
"""
Creates the grammar for a date and time field, which is a combination of
the Date (D) and Time or Duration field (T)
.
"""
def __init__(self):
super(DateTimeAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.date_time(name)
class BlankAdapter(FieldAdapter):
"""
Creates the grammar for a blank field.
These are for constant empty strings which should be ignored, as they are
used just as fillers.
"""
def __init__(self):
super(BlankAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.blank(columns, name)
class LookupAdapter(FieldAdapter):
"""
Creates the grammar for a Lookup (L) field, accepting only values from a
list.
"""
def __init__(self):
super(LookupAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return basic.lookup(values, name)
class ISWCAdapter(FieldAdapter):
"""
ISWC field.
"""
def __init__(self):
super(ISWCAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.iswc(name)
class IPIBaseNumberAdapter(FieldAdapter):
"""
IPI Base Number field.
"""
def __init__(self):
super(IPIBaseNumberAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ipi_base_number(name)
class IPINameNumberAdapter(FieldAdapter):
"""
IPI Name Number field.
"""
def __init__(self):
super(IPINameNumberAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ipi_name_number(name, )
class PercentageAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field storing a percentage and
accepting only the specified number of
characters.
"""
def __init__(self):
super(PercentageAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
maximum = int(values[0])
else:
maximum = 100
return special.percentage(columns=columns, maximum=maximum, name=name)
class EAN13Adapter(FieldAdapter):
"""
Creates the grammar for an EAN 13 code.
"""
def __init__(self):
super(EAN13Adapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.ean_13(name=name)
class ISRCAdapter(FieldAdapter):
"""
Creates the grammar for an ISRC code.
"""
def __init__(self):
super(ISRCAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.isrc(name=name)
class VISANAdapter(FieldAdapter):
"""
Creates the grammar for a V-ISAN code.
"""
def __init__(self):
super(VISANAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.visan(name=name)
class AudioVisualKeydapter(FieldAdapter):
"""
Creates the grammar for an Audio Visual Key code.
"""
def __init__(self):
super(AudioVisualKeydapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
field = special.audio_visual_key(name=name)
return field
class CharSetAdapter(FieldAdapter):
"""
Character set code field.
"""
def __init__(self):
super(CharSetAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return table.char_code(columns=columns, name=name)
class VariableAlphanumAdapter(FieldAdapter):
"""
Creates the grammar for an alphanumeric code where the size ranges between
two values.
"""
def __init__(self):
super(VariableAlphanumAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
min_size = int(values[0])
else:
min_size = columns
return filename.alphanum_variable(min_size=min_size, max_size=columns,
name=name)
class NumericFloatAdapter(FieldAdapter):
"""
Creates the grammar for a Numeric (N) field, accepting only the specified
number of characters.
"""
def __init__(self):
super(NumericFloatAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
if values is not None and len(values) > 0:
nums_int = int(values[0])
else:
nums_int = columns
return basic.numeric_float(columns=columns, nums_int=nums_int,
name=name)
class YearAdapter(FieldAdapter):
"""
Creates the grammar for a year field, accepting only the specified number
of integers.
"""
def __init__(self):
super(YearAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return filename.year(columns=columns, name=name)
class FilenameVersionAdapter(FieldAdapter):
"""
Creates the grammar for a filename version field, accepting only specific
delimiters.
"""
def __init__(self):
super(FilenameVersionAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return filename.filename_version(values=values, name=name)
class LookupIntAdapter(FieldAdapter):
"""
Creates the grammar for an integer lookup field, accepting only specific
values, and transforming them to an integer.
"""
def __init__(self):
super(LookupIntAdapter, self).__init__()
def get_field(self, name=None, columns=None, values=None):
return special.lookup_int(values=values, name=name)
| 3.375 | 3 |
rpi_camera_code.py | AGarcia-20/Python_practice | 1 | 12762969 | <gh_stars>1-10
import time
import RPi.GPIO as GPIO
from picamera import PiCamera
import os
from subprocess import check_output
from datetime import datetime
import numpy as np
from PIL import Image
GPIO.setwarnings(False)
i = 0
light = 12
wifi = 7
ping_hub = "ping 192.168.0.1 -c 1"
subp = "sudo pkill -9 -f ADXL345_Sampler_100Hz.py"
#Looping through frame rate:
fps_top=30 #fps_top is the max(top) frame rate limit
fps_bottom=15 #fps_bottom is the min(bottom) frame rate limit
fps_increment=12 #fps_increment is the increment value
fps_lst=[fps_bottom] #fps_lst the list in which frame rates will go, starting with the lower limit
while fps_bottom < fps_top: #Conditions set for the while loop: while top limit < bottom limit
fps_bottom=fps_bottom+fps_increment # addition of fps_increment + fps_bottom= fps_bottom
fps_lst.append(fps_bottom) # appending the new fps_bottom value to fps_lst
if fps_lst[len(fps_lst)-1] > fps_top: #If the last number is greater than the top limit
fps_lst.pop() #Then it will be popped out (won't be included in final list)
#Looping though ISO:
iso_top=800 #iso_top is the max(top) iso limit
iso_bottom=100 #iso_bottom is the min(bottom) iso limit
iso_increment=250 #iso_increment is the increment value
iso_lst=[iso_bottom] #iso_lst the list in which ISO values will go, starting with the lower limit
while iso_bottom < iso_top: # Conditions for the while loop: while the iso bottom limit is < iso top limit
iso_bottom=iso_bottom+iso_increment # add iso_bottom and increments to replace iso_bottom valeu (Adding itself + increment)
iso_lst.append(iso_bottom) # append the new iso_bottom value to iso_lst
if iso_lst[len(iso_lst)-1] > iso_top: # if the last number is greater than top limit it will be popped out and it won't be included in final list
iso_lst.pop()
#Combinding both lists to get all possible permutations
#Total permutations saved on total_per
combo=[]
total_per=0
for a in fps_lst: #for a variable (a) in list 1
for b in iso_lst: #for a variable (b) in list 2
combo.append([a,b]) #append variables a and b into list called combo
total_per=total_per+1
#Making an array called permu_array and placing it in a list
permu_array=np.array(combo)
permu_array=combo
#Image naming using for loop
image= Image.open('dino1.jpg')
for i in range(total_per):
condition=permu_array[i]
fps=condition[0]
iso=condition[1]
#print('Condition:',condition,' fps:',str(fps),' iso:',str(iso))
#image.save('my_dino_FR%s_ISO%s.jpg' %(fps,iso))
#Camera Functions:
def off(): #Camera off
GPIO.output(light, 0)
def on(): #Camera on
GPIO.output(light, 1)
def picture(fr,iso):
camera.resolution = (2592, 1944) #Camera resolution
camera.framerate = fr #fr assigned to camera.framerate in picture function
camera.iso= iso #iso assigned to camera.iso in picture function
camera.start_preview()
pictime = datetime.now().strftime('%Y_%m_%d_%H-%M-%S.%f')[:-4] #pictime assigned to time photo was taken displaying in Years_month_day_hour-minute-seconds
time.sleep(10)
camera.capture('/home/pi/Documents/minion_pics/%s_FR%s_ISO%s.jpg' %(pictime,fr,iso)) #Directory where photo is saved and naming format
camera.stop_preview()
def send():
who = check_output("who",shell=True)
who = who.split('(')[1]
ip = who.split(')')[0]
# print(ip)
scp = "sudo sshpass -p 'ramboat' scp /home/pi/Documents/minion_pics/%s.jpg jack@%s:/home/jack/minion_pics/" % (pictime, ip)
os.system(scp)
# print(scp)
if __name__ == '__main__':
status = os.system(ping_hub)
if status == 0:
status = "Connected"
os.system(subp)
quit()
else:
status = "Not Connected"
camera = PiCamera()
GPIO.setmode(GPIO.BOARD)
GPIO.setup(light, GPIO.OUT)
GPIO.setup(wifi, GPIO.OUT)
GPIO.output(wifi, 1)
# on()
for i in fps_lst: #loop through i in fps_lst and j in iso_lst and call the function picture.
for j in iso_lst: #This will result in camera.framerate and camera.iso cycling through a different value, taking a photo and going to the next value.
picture(i,j)
# off()
time.sleep(5)
# status = os.system(ping_hub)
#
# if status == 0:
# status = "Connected"
# else:
# status = "Not Connected"
#
# print(status)
if status == "Connected":
# send()
os.system(subp)
# GPIO.output(wifi, 1)
# quit()
else:
GPIO.output(wifi, 0)
time.sleep(6)
os.system('sudo shutdown now')
| 2.828125 | 3 |
tests/package1/subpackage1/__init__.py | sizrailev/py2reqs | 0 | 12762970 | from .module3 import foo3 as bar3
def foo():
bar3()
| 1.429688 | 1 |
seev/apps/core/views.py | Kairn/se-eon-venture | 1 | 12762971 | <reponame>Kairn/se-eon-venture
"""
View logic used in core app
"""
import traceback
from django.conf import settings
from django.http import HttpRequest
from django.db import transaction
from django.shortcuts import render, redirect, reverse
from django.core.paginator import Paginator
from django.core.exceptions import ObjectDoesNotExist
from seev.apps.utils.generators import (getRandomSalt, getSha384Hash,
getSha224Hash, getAdminCredentials, getCpAdminId,
getClientStates)
from seev.apps.utils.validations import isValidRegisterRequest
from seev.apps.utils.messages import get_app_message, addSnackDataToContext, getNewOppoMessage
from seev.apps.utils.session import store_context_in_session, get_context_in_session
from seev.apps.utils.process import logError
from .models import UnoClient, UnoCredentials, UnoApproval, UnoCustomer, UnoOpportunity
from .forms import (LoginForm, PasswordResetForm, RegisterForm,
ApprovalForm, CustomerForm, OpportunityForm)
def go_landing(request):
# Test cookie (disabled)
# request.session.set_test_cookie()
context = {}
return render(request, 'core/index.html', context=context)
def go_login(request, context=None):
try:
if request and request.session:
pass
# Test cookie (disabled)
# if request.session.test_cookie_worked():
# print('Django session is working')
# request.session.delete_test_cookie()
except AttributeError:
pass
# Retrieve session context if passed
context = get_context_in_session(request)
if context is None:
context = {}
if request.method == 'GET':
loginForm = LoginForm()
psrForm = PasswordResetForm()
context['loginForm'] = loginForm
context['psrForm'] = psrForm
return render(request, 'core/login.html', context=context)
def auth_login(request):
context = {}
if request.method == 'POST':
try:
username = request.POST['username']
password = request.POST['password']
unHash = getSha224Hash(username)
psHash = getSha224Hash(password)
if unHash == getAdminCredentials()[0] and psHash == getAdminCredentials()[1]:
request.session['id'] = getCpAdminId()
return redirect('go_admin')
# Get client credentials data
credObj = UnoCredentials.objects.get(username=username)
if credObj and credObj.password_hash == getSha384Hash(password + credObj.password_salt):
client = UnoClient.objects.get(client_id=credObj.client_id)
if client.active:
request.session['id'] = str(
credObj.client_id).replace('-', '')
return redirect('go_client')
else:
store_context_in_session(
request, addSnackDataToContext(context, 'Access denied'))
return redirect('go_login')
else:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid credentials'))
return redirect('go_login')
except ObjectDoesNotExist:
store_context_in_session(
request, addSnackDataToContext(context, 'User not found'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
request.session.clear()
store_context_in_session(
request, addSnackDataToContext(context, 'ERR01'))
return redirect('go_login')
else:
return redirect('go_login')
def auth_password_reset(request):
"""
Deprecated
"""
if request.method == 'POST':
return redirect('go_landing')
else:
return redirect('go_login')
def go_register(request):
context = {}
if request.method == 'GET':
registerForm = RegisterForm()
context['registerForm'] = registerForm
return render(request, 'core/register.html', context=context)
@transaction.atomic
def do_register(request):
if request.method == 'POST':
registerForm = RegisterForm(request.POST, request.FILES)
# Basic validation
if registerForm.is_multipart() and registerForm.is_valid():
# Specific validation
if isValidRegisterRequest(request.POST):
entity_name = request.POST['entity_name']
country = request.POST['country']
trade_ticker = request.POST['trade_ticker']
contact_email = request.POST['contact_email']
contact_phone = request.POST['contact_phone']
summary = request.POST['summary']
website = request.POST['website']
username = request.POST['username']
password = request.POST['password']
recovery_email = request.POST['recovery_email']
pin = request.POST['pin']
# Obtain binary data (deprecated but doable)
sl_bin = b''
try:
signature_letter = request.FILES['signature_letter']
for chunk in signature_letter.chunks():
sl_bin += chunk
except KeyError:
sl_bin = b''
pass
password_salt = <PASSWORD>(8)
if len(trade_ticker) == 0:
trade_ticker = None
if len(summary) == 0:
summary = None
if len(website) == 0:
website = None
try:
# Create client object
newClient = UnoClient(
ctg_name=None,
entity_name=entity_name,
country=country,
trade_ticker=trade_ticker,
contact_email=contact_email,
contact_phone=contact_phone,
signature_letter=sl_bin,
summary=summary,
website=website
)
# Create credentials object
newCredentials = UnoCredentials(
client=newClient,
username=username,
password_salt=<PASSWORD>,
password_hash=<PASSWORD>(password + password_<PASSWORD>),
recovery_email=recovery_email,
pin=pin
)
newClient.save()
newCredentials.save()
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('register_error'), 'message': get_app_message('register_error_message')})
return go_success(HttpRequest(), {'message': get_app_message('register_success')})
else:
return go_error(HttpRequest(), {'error': get_app_message('register_error'), 'message': get_app_message('register_error_message')})
else:
return go_error(HttpRequest(), {'error': get_app_message('register_error'), 'message': get_app_message('register_error_message')})
else:
return redirect('go_register')
def go_success(request, context=None):
context = context
if not context and not settings.DEBUG:
return redirect('go_landing')
if context is None:
context = {}
if 'return_link' in context:
pass
else:
context['return_link'] = reverse('go_landing')
return render(request, 'core/success.html', context=context)
def go_error(request, context=None):
context = context
if not context and not settings.DEBUG:
return redirect('go_landing')
return render(request, 'core/error.html', context=context)
def go_admin(request, context=None):
try:
if request is None:
return redirect('go_login')
elif request.session['id'] != getCpAdminId():
request.session.clear()
return redirect('go_login')
except KeyError:
return redirect('go_login')
context = get_context_in_session(request)
if context is None:
context = {}
ITEMS_PER_PAGE = 3
requestPage = None
if request.GET.get('request_page'):
requestPage = request.GET.get('request_page')
else:
requestPage = 1
# Fetch client data
clientList = UnoClient.objects.all().order_by('-creation_time', 'client_id')
pagedList = Paginator(clientList, ITEMS_PER_PAGE)
clients = pagedList.get_page(requestPage)
# Store the current page in temp session variable
request.session['admin_page'] = requestPage
# Deprecated but usable
for client in clients:
tempBytes = client.signature_letter
if tempBytes:
client.signature_letter = tempBytes.decode('U8')
context['clients'] = clients
context['approvalForm'] = ApprovalForm()
return render(request, 'core/admin.html', context=context)
def go_logout(request):
if request and hasattr(request, 'session') and request.session:
request.session.clear()
return redirect('go_landing')
@transaction.atomic
def do_approve(request):
if request.method == 'POST':
try:
if request.session['id'] != getCpAdminId():
request.session.clear()
return redirect('go_login')
# Retrieve form data
client_id = request.POST['client_id']
ctg_name = request.POST['ctg_name']
action = request.POST['action']
comment = request.POST['message']
# Get client data
client = UnoClient.objects.get(client_id=client_id)
# Validate action
valid = False
tempStatus = ''
if client.status == getClientStates('PE'):
if action == 'AP' and ctg_name:
valid = True
tempStatus = getClientStates('AP')
elif action == 'DE':
valid = True
tempStatus = getClientStates('DE')
elif client.status == getClientStates('AP'):
if action == 'RV':
valid = True
tempStatus = getClientStates('RV')
elif client.status == getClientStates('RV'):
if action == 'RI':
valid = True
tempStatus = getClientStates('AP')
else:
valid = False
if not valid:
raise RuntimeError
# Create approval data
newApproval = UnoApproval(
client=client,
action=action,
message=comment
)
newApproval.save()
# Update client data
if (tempStatus == getClientStates('AP')):
client.active = 1
client.ctg_name = ctg_name
else:
client.active = 0
client.status = tempStatus
client.save()
# Retrieve the current page
redirectPage = 1
if 'admin_page' in request.session:
redirectPage = request.session['admin_page']
# Success message
store_context_in_session(request, addSnackDataToContext(
{}, 'Your action has been applied'))
return redirect(reverse('go_admin') + '?request_page=' + str(redirectPage))
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('approval_error'), 'message': get_app_message('approval_error_message')})
else:
return redirect('go_admin')
def go_client(request, context=None):
if request and hasattr(request, 'session') and request.session and 'id' in request.session:
if len(request.session['id']) != 32:
request.session.clear()
return redirect('go_login')
else:
context = get_context_in_session(request)
if context is None:
context = {}
client = UnoClient.objects.get(client_id=request.session['id'])
context['client'] = client
# Customer form
context['customerForm'] = CustomerForm()
# Opportunity form
oppoForm = OpportunityForm(initial={'client_id': client.client_id})
customerList = UnoCustomer.objects.filter(client=client)
custChoice = []
for cust in customerList:
choice = (cust.customer_id, cust.customer_name)
custChoice.append(choice)
if len(custChoice) > 0:
oppoForm.fields['customer'].choices = custChoice
context['oppoForm'] = oppoForm
else:
context['oppoForm'] = None
return render(request, 'core/client.html', context=context)
else:
return redirect('go_login')
@transaction.atomic
def do_enroll(request):
if request and request.method == 'POST':
try:
context = {}
# Verify client
client = None
if request.session:
client = UnoClient.objects.get(client_id=request.session['id'])
if not client:
raise RuntimeError
# Retrieve form values
customer_name = request.POST['customer_name']
contact_email = request.POST['contact_email']
country = request.POST['country']
if customer_name and contact_email and country:
newCustomer = UnoCustomer(
client=client,
customer_name=customer_name,
contact_email=contact_email,
country=country
)
newCustomer.save()
return go_success(HttpRequest(), {'message': get_app_message('enroll_success'), 'return_link': reverse('go_client')})
else:
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid form data'))
return redirect('go_login')
except RuntimeError:
if hasattr(request, 'session') and request.session:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid client session'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('enroll_error'), 'message': get_app_message('enroll_error_message')})
else:
return redirect('go_client')
@transaction.atomic
def do_oppo(request, context=None):
if request and request.method == 'POST':
try:
if not context:
context = {}
client = None
if request.session:
client = UnoClient.objects.get(client_id=request.session['id'])
if not client:
raise RuntimeError
# Get opportunity details
customer_id = request.POST['customer']
discount_nrc = request.POST['discount_nrc']
discount_mrc = request.POST['discount_mrc']
deal_limit = int(request.POST['deal_limit'])
if deal_limit < 1 or deal_limit > 32:
raise AssertionError
customer_id = str(customer_id).replace('-', '')
customer = UnoCustomer.objects.get(customer_id=customer_id)
newOpportunity = UnoOpportunity(
client=client,
customer=customer,
discount_nrc=discount_nrc,
discount_mrc=discount_mrc,
deal_limit=deal_limit
)
newOpportunity.save()
return go_success(HttpRequest(), {'message': getNewOppoMessage(newOpportunity.opportunity_number), 'return_link': reverse('go_client')})
except AssertionError:
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid data encountered'))
return redirect('go_client')
except RuntimeError:
if hasattr(request, 'session') and request.session:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid client session'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('oppo_error'), 'message': get_app_message('oppo_error_message')})
else:
return redirect('go_client')
def go_records(request):
try:
client = UnoClient.objects.get(client_id=request.session['id'])
context = get_context_in_session(request)
if not context:
context = {}
context['entity_name'] = client.entity_name
records = []
oppoList = UnoOpportunity.objects.filter(client=client)
if len(oppoList) < 1:
store_context_in_session(request, addSnackDataToContext(
context, 'No opportunity found'))
return redirect('go_client')
for oppo in oppoList:
customer = UnoCustomer.objects.get(customer_id=oppo.customer_id)
records.append(
(str(oppo.opportunity_number).replace('-', ''), oppo.creation_time, customer.customer_name, oppo.active))
context['records'] = records
return render(request, 'core/records.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
if request and hasattr(request, 'session'):
request.session.clear()
store_context_in_session(
request, addSnackDataToContext(context, 'Unexpected Error'))
return redirect('go_login')
@transaction.atomic
def can_oppo(request, context=None):
if request and request.method == 'POST':
try:
if not context:
context = {}
client = None
if request.session:
client = UnoClient.objects.get(client_id=request.session['id'])
if not client:
raise RuntimeError
opportunity = UnoOpportunity.objects.get(
opportunity_number=request.POST['oppoNb'])
if not opportunity.active:
raise Exception
opportunity.active = False
opportunity.save()
store_context_in_session(request, addSnackDataToContext(
context, 'Opportunity annulled'))
return redirect('go_records')
except RuntimeError:
if hasattr(request, 'session') and request.session:
request.session.clear()
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid client session'))
return redirect('go_login')
except Exception:
# traceback.print_exc()
logError(request)
return go_error(HttpRequest(), {'error': get_app_message('oppo_can_error'), 'message': get_app_message('oppo_can_message')})
else:
return redirect('go_records')
def go_bad_view(request, context=None):
return render(request, 'core/bad-view.html', context=context)
| 1.757813 | 2 |
setup.py | Jingren-hou/NeuralCDE | 438 | 12762972 | <reponame>Jingren-hou/NeuralCDE<filename>setup.py
import pathlib
import setuptools
here = pathlib.Path(__file__).resolve().parent
with open(here / 'controldiffeq/README.md', 'r') as f:
readme = f.read()
setuptools.setup(name='controldiffeq',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='PyTorch functions for solving CDEs.',
long_description=readme,
url='https://github.com/patrick-kidger/NeuralCDE/tree/master/controldiffeq',
license='Apache-2.0',
zip_safe=False,
python_requires='>=3.5, <4',
install_requires=['torch>=1.0.0', 'torchdiffeq>=0.0.1'],
packages=['controldiffeq'],
classifiers=["Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License"])
| 1.539063 | 2 |
util/chplenv/chplenv.py | MayukhSobo/chapel | 1,602 | 12762973 | import chpl_cpu
import chpl_atomics
import chpl_aux_filesys
import chpl_bin_subdir
import chpl_make
import chpl_platform
import chpl_comm
import chpl_comm_debug
import chpl_comm_segment
import chpl_comm_substrate
import chpl_compiler
import chpl_gasnet
import chpl_gmp
import chpl_hwloc
import chpl_jemalloc
import chpl_launcher
import chpl_libfabric
import chpl_llvm
import chpl_locale_model
import chpl_gpu
import chpl_arch
import chpl_mem
import chpl_qthreads
import chpl_re2
import chpl_tasks
import chpl_timers
import chpl_unwind
import chpl_lib_pic
import chpl_sanitizers
# General purpose helpers
import chpl_home_utils
import chpl_python_version
import compiler_utils
import overrides
import utils
| 0.960938 | 1 |
tests/r/test_swahili.py | hajime9652/observations | 199 | 12762974 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.swahili import swahili
def test_swahili():
"""Test module swahili.py by downloading
swahili.csv and testing shape of
extracted data has 480 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = swahili(test_path)
try:
assert x_train.shape == (480, 4)
except:
shutil.rmtree(test_path)
raise()
| 2.21875 | 2 |
ITE-428/basicproject2/MyLibrary.py | richeyphu/ITE-428-LAB | 1 | 12762975 | <reponame>richeyphu/ITE-428-LAB<filename>ITE-428/basicproject2/MyLibrary.py<gh_stars>1-10
# function declaration
import math
def line1():
print("-" * 35)
def line2(ch): # line2('#')
# print(ch * 35)
print("{}".format(ch) * 35)
def line3(ch, num):
print("{}".format(ch) * num)
def calBMI(w, h):
bmi = w / math.pow(h, 2)
return bmi
def create_email(name, last):
return <EMAIL>".format(last[0:2].lower(), name.lower())
| 2.734375 | 3 |
updateAttendance.py | Shivani-781/AI-Powered-Hourly-Attendance-Capturing-System | 0 | 12762976 | import json
import boto3
dynamo = boto3.resource("dynamodb")
table = dynamo.Table("Attendance_Count")
def lambda_handler(event, context):
# TODO implement
res = table.get_item(Key = {"RollNo" : event['RollNo']})
print(res['Item']['Name'])
Count = res['Item']['Count']
Count= Count+1
inp = {"RollNo" : event['RollNo'], "Count" : Count, "Name" : res['Item']['Name']}
table.put_item(Item = inp)
return "Successful" | 2.3125 | 2 |
src/cyborgbackup/main/management/commands/cleanup_jobs.py | ikkemaniac/cyborgbackup | 0 | 12762977 | # Python
import datetime
import logging
# Django
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.utils.timezone import now
# CyBorgBackup
from cyborgbackup.main.models import Job, Repository
class Command(BaseCommand):
'''
Management command to cleanup old jobs.
'''
help = 'Remove old jobs from the database.'
def add_arguments(self, parser):
parser.add_argument('--dry-run', dest='dry_run', action='store_true',
default=False, help='Dry run mode (show items that would '
'be removed)')
parser.add_argument('--jobs', dest='only_jobs', action='store_true',
default=True,
help='Remove jobs')
def cleanup_jobs(self):
# Sanity check: Is there already a running job on the System?
jobs = Job.objects.filter(status="running")
if jobs.exists():
print('A job is already running, exiting.')
return
repos = Repository.objects.filter(enabled=True)
repoArchives = []
if repos.exists():
for repo in repos:
lines = self.launch_command(["borg", "list", "::"], repo, repo.repository_key, repo.path, **kwargs)
for line in lines:
archive_name = line.split(' ')[0] #
for type in ('rootfs', 'vm', 'mysql', 'postgresql', 'config', 'piped', 'mail', 'folders'):
if '{}-'.format(type) in archive_name:
repoArchives.append(archive_name)
entries = Job.objects.filter(job_type='job')
if entries.exists():
for entry in entries:
if entry.archive_name != '' and entry.archive_name not in repoArchives:
action_text = 'would delete' if self.dry_run else 'deleting'
self.logger.info('%s %s', action_text, entry.archive_name)
if not self.dry_run:
entry.delete()
return 0, 0
@transaction.atomic
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity', 1))
self.init_logging()
self.dry_run = bool(options.get('dry_run', False))
model_names = ('jobs',)
models_to_cleanup = set()
for m in model_names:
if options.get('only_%s' % m, False):
models_to_cleanup.add(m)
if not models_to_cleanup:
models_to_cleanup.update(model_names)
for m in model_names:
if m in models_to_cleanup:
skipped, deleted = getattr(self, 'cleanup_%s' % m)()
if self.dry_run:
self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '),
deleted, skipped)
else:
self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped) | 2.21875 | 2 |
python_scripts_youtube/desafio16_final.py | diegocarloni/python | 0 | 12762978 | <filename>python_scripts_youtube/desafio16_final.py
from math import trunc
num = float(input('Digite um número: '))
print('A parte inteira de {} é igual a {}'.format(num, trunc(num)))
| 3.8125 | 4 |
src/SocialNetwork_API/services/user.py | mungpham/mungpham | 0 | 12762979 | import _thread
import time
import hashlib
from django.core.exceptions import ValidationError
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from django.template.defaultfilters import slugify
from SocialNetwork_API.services.base import BaseService
from SocialNetwork_API.models import *
from SocialNetwork_API.const import ResourceType
class UserService(BaseService):
@classmethod
def get_all_users(cls):
try:
users = User.objects.all()
if len(users) > 0:
return users
return None
except Exception as exception:
cls.log_exception(exception)
return None
@classmethod
def get_user_friend(cls, user_id, friend_id):
try:
user_friend = Friend.objects.filter(user_id=user_id, friend_user_id=friend_id)
if len(user_friend) > 0:
return user_friend[0]
return None
except Exception as exception:
cls.log_exception(exception)
return None
@classmethod
def get_single_user(cls, user_id):
try:
return User.objects.get(pk=user_id)
except:
return None
@classmethod
def authenticate(cls, email, username, password):
try:
if email:
user = User.objects.filter(email=email)[0]
if username:
user = User.objects.filter(username=username)[0]
if user and user.check_password(password):
return user
else:
return None
except Exception as exception:
return None
@classmethod
def save(cls, user_data, instance=None):
try:
password = user_data.pop('password', None)
user = instance if instance else User()
is_new = instance is None
# Set property values
if 'username' in user_data and user.username != user_data['username']:
user.slug = slugify(user_data['username'])
for key in user_data:
setattr(user, key, user_data[key])
# Set password
if is_new:
user.set_password(password)
else:
if password:
user.set_password(password)
with transaction.atomic():
user.save()
return cls.get_user(user.id)
except Exception as exception:
raise exception
@classmethod
def user_friend(cls, user, friend):
try:
user_friend = Friend()
user_friend.user_id = user.id
user_friend.friend_user_id = friend.id
with transaction.atomic():
user_friend.save()
# # Save follow_user to arangodb
# if settings.SAVE_TO_ARANGODB:
# ArangoUserService.follow_band(band.userband.__dict__, activity.__dict__)
return True
except Exception as exception:
raise exception
@classmethod
def get_email(cls, email):
try:
user_email = UserEmail.objects.get(email=email)
if user_email:
return user_email
except Exception as e:
cls.log_exception(e)
return None
return None
@classmethod
def gen_token(cls, user_id):
text = str(user_id) + Utils.id_generator(10) + str(int(time.time()))
hash_object = hashlib.md5(text.encode('utf-8'))
return hash_object.hexdigest()
@classmethod
def get_by_email(cls, email):
try:
user = User.objects.get(email=email)
return cls.get_user(user.pk)
except User.DoesNotExist:
return None
@classmethod
def get_users(cls, *args, **kwargs):
limit = kwargs.get('limit', 20)
offset = kwargs.get('offset', 0)
search = kwargs.get('search', None)
end = offset + limit
filter = kwargs.get('filter', {})
order_by = kwargs.get('order', '-id')
includes = kwargs.get('includes', [])
users = []
if search:
term = Q(username__icontains=search)
user_ids = User.objects.values_list('id', flat=True) \
.order_by(order_by).filter(**filter).filter(term)[offset:end]
count = User.objects.values_list('id', flat=True) \
.order_by(order_by).filter(**filter).filter(term).count()
else:
user_ids = User.objects.values_list('id', flat=True).order_by(order_by).filter(**filter)[offset:end]
count = User.objects.values_list('id', flat=True).order_by(order_by).filter(**filter).count()
for id in user_ids:
users.append(cls.get_user(id, includes=includes))
return {
'result': users,
'count': count
}
@classmethod
def get_user(cls, user_id):
try:
user = User.objects.get(pk=user_id)
except Exception as e:
cls.log_exception(e)
return None
return user | 1.992188 | 2 |
nonebot_plugin_nokia/nokia.py | kexue-z/nonebot-plugin-nokia | 3 | 12762980 | import base64
from io import BytesIO
from os.path import dirname
from typing import Tuple
from collections import deque
from PIL import Image, ImageFont, ImageDraw, ImageOps
font_size = 70
line_gap = 20
body_pos = (205, 340)
subtitle_pos = (790, 320)
body_color = (0, 0, 0, 255)
subtitle_color = (129, 212, 250, 255)
line_rotate = -9.8
max_line_width = 680
max_content_height = 450
print(dirname(__file__) + "/res/font.ttc")
font = ImageFont.truetype(dirname(__file__) + "/res/font.ttf", font_size)
def image_to_byte_array(image: Image):
imgByteArr = io.BytesIO()
image.save(imgByteArr, format=image.format)
imgByteArr = imgByteArr.getvalue()
return imgByteArr
def im_2_b64(pic: Image.Image) -> str:
buf = BytesIO()
pic.save(buf, format="PNG")
base64_str = base64.b64encode(buf.getbuffer()).decode()
return "base64://" + base64_str
def draw_subtitle(im, text: str):
width, height = font.getsize(text)
image2 = Image.new("RGBA", (width, height))
draw2 = ImageDraw.Draw(image2)
draw2.text((0, 0), text=text, font=font, fill=subtitle_color)
image2 = image2.rotate(line_rotate, expand=1)
px, py = subtitle_pos
sx, sy = image2.size
im.paste(image2, (px, py, px + sx, py + sy), image2)
def generate_image(text: str):
origin_im = Image.open(dirname(__file__) + "/res/img.png")
text = text[:900]
length = len(text)
width, height = font.getsize(text)
current_width = 0
lines = []
line = ""
q = deque(text)
while q:
word = q.popleft()
width, _ = font.getsize(word)
current_width += width
if current_width >= max_line_width:
q.appendleft(word)
lines.append(line)
current_width = 0
line = ""
else:
line += word
lines.append(line)
image2 = Image.new("RGBA", (max_line_width, max_content_height))
draw2 = ImageDraw.Draw(image2)
for i, line in enumerate(lines):
y = i * (height + line_gap)
if y > max_content_height:
break
draw2.text((0, y), text=line, font=font, fill=body_color)
image2 = image2.rotate(line_rotate, expand=1)
px, py = body_pos
sx, sy = image2.size
origin_im.paste(image2, (px, py, px + sx, py + sy), image2)
draw_subtitle(origin_im, f"{length}/900")
return im_2_b64(origin_im)
| 2.90625 | 3 |
tests/unit/helpers/test_build_io.py | cicharka/epiphany | 2 | 12762981 | import os
from collections import OrderedDict
import pytest
from ruamel.yaml import YAML
from cli.src.helpers.build_io import (ANSIBLE_CFG_FILE, ANSIBLE_INVENTORY_FILE,
ANSIBLE_OUTPUT_DIR,
ANSIBLE_VAULT_OUTPUT_DIR,
MANIFEST_FILE_NAME, SP_FILE_NAME,
TERRAFORM_OUTPUT_DIR,
get_ansible_config_file_path,
get_ansible_config_file_path_for_build,
get_ansible_path,
get_ansible_path_for_build,
get_ansible_vault_path, get_build_path,
get_inventory_path,
get_inventory_path_for_build,
get_manifest_path, get_output_path,
get_terraform_path, load_manifest,
save_ansible_config_file, save_inventory,
save_manifest, save_sp)
from cli.src.helpers.objdict_helpers import dict_to_objdict
from cli.src.helpers.yaml_helpers import safe_load, safe_load_all
from tests.unit.helpers.constants import (CLUSTER_NAME_LOAD, CLUSTER_NAME_SAVE,
NON_EXISTING_CLUSTER, OUTPUT_PATH,
TEST_CLUSTER_MODEL, TEST_DOCS,
TEST_INVENTORY)
TEST_SP = {'appId': 'xxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx',
'displayName': 'test-rg',
'name': 'http://test-rg',
'password': '<PASSWORD>',
'tenant': 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx'}
ANSIBLE_CONFIG_FILE_SETTINGS = [('defaults', {
'interpreter_python': 'auto_legacy_silent',
'allow_world_readable_tmpfiles': 'true'
})]
def test_get_output_path():
output_path = os.path.join(OUTPUT_PATH)
result_path = os.path.normpath(get_output_path())
assert os.path.exists(output_path)
assert result_path == output_path
def test_get_build_path():
build_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE)
result_path = get_build_path(CLUSTER_NAME_SAVE)
assert os.path.exists(build_path)
assert result_path == build_path
def test_get_inventory_path():
assert get_inventory_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)
def test_get_manifest_path():
assert get_manifest_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)
def test_get_terraform_path():
terraform_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, TERRAFORM_OUTPUT_DIR)
result_path = get_terraform_path(CLUSTER_NAME_SAVE)
assert os.path.exists(terraform_path)
assert result_path == terraform_path
def test_get_ansible_path():
assert get_ansible_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR)
def test_get_ansible_vault_path():
ansible_vault_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_VAULT_OUTPUT_DIR)
result_path = get_ansible_vault_path(CLUSTER_NAME_SAVE)
assert os.path.exists(ansible_vault_path)
assert result_path == ansible_vault_path
def test_get_ansible_config_file_path():
assert get_ansible_config_file_path(CLUSTER_NAME_SAVE) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR, ANSIBLE_CFG_FILE)
def test_get_inventory_path_for_build():
assert get_inventory_path_for_build(os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE)) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE)
def test_get_ansible_path_for_build():
ansible_path_for_build_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR)
result_path = get_ansible_path_for_build(os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE))
assert os.path.exists(ansible_path_for_build_path)
assert result_path == ansible_path_for_build_path
def test_get_ansible_config_file_path_for_build():
assert get_ansible_config_file_path_for_build(os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE)) == os.path.join(
OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR, ANSIBLE_CFG_FILE)
def test_save_manifest():
save_manifest(TEST_DOCS, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)
manifest_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, MANIFEST_FILE_NAME)
manifest_stream = open(manifest_path, 'r')
manifest_file_content = safe_load_all(manifest_stream)
assert TEST_DOCS == manifest_file_content
def test_load_manifest():
build_path = get_build_path(CLUSTER_NAME_LOAD)
docs = load_manifest(build_path)
assert docs == TEST_DOCS
def test_load_not_existing_manifest_docs():
build_path = get_build_path(NON_EXISTING_CLUSTER)
with pytest.raises(Exception):
load_manifest(build_path)
def test_save_sp():
save_sp(TEST_SP, CLUSTER_NAME_SAVE)
sp_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, TERRAFORM_OUTPUT_DIR, SP_FILE_NAME)
sp_stream = open(sp_path, 'r')
sp_file_content = safe_load(sp_stream)
assert TEST_SP == sp_file_content
def test_save_inventory():
cluster_model = dict_to_objdict(TEST_CLUSTER_MODEL)
save_inventory(TEST_INVENTORY, cluster_model)
f = open(os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_INVENTORY_FILE), mode='r')
inventory_content = f.read()
assert 'test-1 ansible_host=10.0.0.1' in inventory_content
assert 'test-2 ansible_host=10.0.0.2' in inventory_content
assert 'test-3 ansible_host=10.0.0.3' in inventory_content
assert 'test-4 ansible_host=10.0.0.4' in inventory_content
assert 'ansible_user=operations' in inventory_content
assert 'ansible_ssh_private_key_file=id_rsa' in inventory_content
def test_save_ansible_config_file():
config_file_settings = OrderedDict(ANSIBLE_CONFIG_FILE_SETTINGS)
ansible_config_file_path = os.path.join(OUTPUT_PATH, CLUSTER_NAME_SAVE, ANSIBLE_OUTPUT_DIR, ANSIBLE_CFG_FILE)
save_ansible_config_file(config_file_settings, ansible_config_file_path)
f = open(ansible_config_file_path, mode='r')
ansible_config_file_content = f.read()
assert 'interpreter_python = auto_legacy_silent' in ansible_config_file_content
assert 'allow_world_readable_tmpfiles = true' in ansible_config_file_content
| 1.976563 | 2 |
python/EXERCICIO 69 - ANALISE DE DADOS DO GRUPO.py | debor4h/exerciciosPython | 1 | 12762982 | <filename>python/EXERCICIO 69 - ANALISE DE DADOS DO GRUPO.py
maiores = homens = mulheres = 0
while True:
print('-'*30)
print('CADASTRO DE PESSOA')
print('-' * 30)
idade = int(input('Qual sua idade: '))
sexo = ' '
while sexo not in 'M' and sexo not in 'F' :
sexo = str(input('Qual seu sexo [F|M]: ')).upper().strip()
print('-' * 30)
if sexo == 'F' and idade < 20:
mulheres+=1
if sexo == 'M':
homens+=1
if idade>18:
maiores+=1
resp = ' '
while resp != 'S' and resp != 'N':
resp = str(input('Deseja continuar [S|N]: ')).upper().strip()
if resp == 'N':
break;
print(f'Há {mulheres} mulheres com menos de 20 anos.')
print(f'Há {homens} homens.')
print(f'Há {maiores} com mais de 18 anos.')
| 3.875 | 4 |
day-02/part-01.py | gregn610/advent2021 | 0 | 12762983 | <filename>day-02/part-01.py
eg = [
'forward 5',
'down 5',
'forward 8',
'up 3',
'down 8',
'forward 2',
]
def load(file):
with open(file) as f:
data = f.readlines()
return data
def steps(lines, pos_char, neg_char):
directions = [ln.split(' ') for ln in lines if ln[0] in [pos_char, neg_char]]
steps = [int(ln[1]) * (1 if ln[0][0] == pos_char else -1) for ln in directions]
return steps
def split(data):
horiz = steps(data, 'f', 'b')
vert = steps(data, 'd', 'u')
return horiz, vert
hh, vv = split(eg)
assert (sum(hh) * sum(vv)) == 150
dd = load('data.txt')
hh, vv = split(dd)
ans = sum(hh) * sum(vv)
print(f"Answer: {ans}") | 3.46875 | 3 |
api/swagger_server/test/test_file_controller.py | fujitsu/controlled-vocabulary-designer | 5 | 12762984 | <filename>api/swagger_server/test/test_file_controller.py
"""
test_file_controller.py COPYRIGHT FUJITSU LIMITED 2021
"""
# coding: utf-8
from __future__ import absolute_import
from flask import json
from six import BytesIO
from swagger_server.models.check_error_response import CheckErrorResponse # noqa: E501
from swagger_server.models.error_response import ErrorResponse # noqa: E501
from swagger_server.models.success_response import SuccessResponse # noqa: E501
from swagger_server.test import BaseTestCase
class TestFileController(BaseTestCase):
"""FileController integration test stubs"""
def test_download_file(self):
"""Test case for download_file
Download the file from the server
"""
query_string = [('out_format', 'out_format_example')]
response = self.client.open(
'/download/{file_type}'.format(file_type='file_type_example'),
method='GET',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
def test_upload_file(self):
"""Test case for upload_file
Upload the file to the server
"""
data = dict(editing_vocabulary='editing_vocabulary_example',
reference_vocabulary1='reference_vocabulary1_example',
reference_vocabulary2='reference_vocabulary2_example',
reference_vocabulary3='reference_vocabulary3_example',
example_phrases='example_phrases_example')
response = self.client.open(
'/upload',
method='POST',
data=data,
content_type='multipart/form-data')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 2.53125 | 3 |
tests/test_03_classification_audit_trends.py | carolinesadlerr/wiggum | 5 | 12762985 | # # Developing a classification Audit trend
#
# In first iteration, this will only work on datasets that already have two or more binary classification variables included.
#
# We will need additional metadata: role options of being predictions or ground truths.
#
import pytest
import numpy as np
import pandas as pd
import wiggum as wg
# First, we will need a dataset that we can work with
# In[2]:
def test_classification_trends():
dataset = 'data/multi_decision_admisions/'
labeled_df = wg.LabeledDataFrame(dataset)
acc_trend = wg.Binary_Accuracy_Trend()
tpr_trend = wg.Binary_TPR_Trend()
ppv_trend = wg.Binary_PPV_Trend()
tnr_trend = wg.Binary_TNR_Trend()
fdr_trend = wg.Binary_FDR_Trend()
fnr_trend = wg.Binary_FNR_Trend()
err_trend = wg.Binary_Error_Trend()
f1_trend = wg.Binary_F1_Trend()
trend_list = [acc_trend,tpr_trend,ppv_trend, tnr_trend,fdr_trend,f1_trend, fnr_trend,
err_trend]
[trend.is_computable(labeled_df) for trend in trend_list]
labeled_df.get_subgroup_trends_1lev(trend_list)
# In[36]:
labeled_df.get_SP_rows(thresh=.2)
| 2.578125 | 3 |
src/dewloosh/math/function/relation.py | dewloosh/dewloosh-math | 2 | 12762986 | # -*- coding: utf-8 -*-
from enum import Enum
import operator as op
from typing import TypeVar, Callable
from dewloosh.core.tools import getasany
from .function import Function
__all__ = ['Equality', 'InEquality']
class Relations(Enum):
eq = '='
gt = '>'
ge = '>='
lt = '<'
le = '<='
def to_op(self):
return _rel_to_op[self]
_rel_to_op = {
Relations.eq: op.eq,
Relations.gt: op.gt,
Relations.ge: op.ge,
Relations.lt: op.lt,
Relations.le: op.le
}
RelationType = TypeVar('RelationType', str, Relations, Callable)
class Relation(Function):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.op = None
self.opfunc = None
op = getasany(['op', 'operator'], None, **kwargs)
if op:
if isinstance(op, str):
self.op = Relations(op)
elif isinstance(op, Relations):
self.op = op
elif isinstance(op, Callable):
self.opfunc = op
self.op = None
else:
self.op = Relations.eq
if op and isinstance(self.op, Relations):
self.opfunc = self.op.to_op()
self.slack = 0
@property
def operator(self):
return self.op
def to_eq(self):
raise NotImplementedError
def relate(self, *args, **kwargs):
return self.opfunc(self.f0(*args, **kwargs), 0)
def __call__(self, *args, **kwargs):
return self.opfunc(self.f0(*args, **kwargs), 0)
class Equality(Relation):
def __init__(self, *args, **kwargs):
kwargs['op'] = Relations.eq
super().__init__(*args, **kwargs)
def to_eq(self):
return self
class InEquality(Relation):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def to_eq(self):
raise
if __name__ == '__main__':
gt = InEquality('x + y', op='>')
print(gt([0.0, 0.0]))
ge = InEquality('x + y', op='>=')
print(ge([0.0, 0.0]))
le = InEquality('x + y', op=lambda x, y: x <= y)
print(le([0.0, 0.0]))
lt = InEquality('x + y', op=lambda x, y: x < y)
print(lt([0.0, 0.0]))
| 2.359375 | 2 |
tests/database.py | AnthonyPerez/bigorm | 0 | 12762987 | """
python -m tests.database
"""
try:
import _thread
except ImportError:
import thread as _thread
from bigorm.database import BigQueryDatabaseContext as DatabaseContext
from tests import UNIT_TEST_PROJECT
def _open_context():
with DatabaseContext(project=UNIT_TEST_PROJECT):
DatabaseContext.get_session()
def test_multithread():
with DatabaseContext(project=UNIT_TEST_PROJECT):
pass
thread_id = _thread.start_new_thread(_open_context, ())
if __name__ == '__main__':
test_multithread()
| 2.40625 | 2 |
training/learnG.py | AIARTSJTU/ToyGAN_Zoo | 0 | 12762988 | <reponame>AIARTSJTU/ToyGAN_Zoo
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import random
import math
import sys
import datetime
import time
from collections import namedtuple
def print_now(cmd, file=None):
time_now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if file is None:
print('%s %s' % (time_now, cmd))
else:
print_str = '%s %s' % (time_now, cmd)
print(print_str, file=file)
sys.stdout.flush()
def learnG_Realness(param, D, G, optimizerG, random_sample, Triplet_Loss, x, anchor1):
device = 'cuda' if param.cuda else 'cpu'
z = torch.FloatTensor(param.batch_size, param.z_size, 1, 1)
z = z.to(device)
G.train()
for p in D.parameters():
p.requires_grad = False
for t in range(param.G_updates):
G.zero_grad()
optimizerG.zero_grad()
# gradients are accumulated through subiters
for _ in range(param.effective_batch_size // param.batch_size):
images, _ = random_sample.__next__()
x.copy_(images)
del images
num_outcomes = Triplet_Loss.atoms
anchor_real = torch.zeros((x.shape[0], num_outcomes), dtype=torch.float).to(device) + torch.tensor(anchor1, dtype=torch.float).to(device)
# real images
feat_real = D(x).log_softmax(1).exp()
# fake images
z.normal_(0, 1)
imgs_fake = G(z)
feat_fake = D(imgs_fake).log_softmax(1).exp()
# compute loss
if param.relativisticG:
lossG = Triplet_Loss(feat_real, feat_fake)
else:
lossG = Triplet_Loss(anchor_real, feat_fake, skewness=param.positive_skew)
lossG.backward()
optimizerG.step()
return lossG
| 2.140625 | 2 |
api/lms/models/location.py | hachimihamza/leavemanager | 0 | 12762989 | from django.db import models
class Region(models.Model):
region_name = models.CharField(max_length=30, primary_key=True)
def __str__(self):
return self.region_name
class Country(models.Model):
country_name = models.CharField(max_length=30, primary_key=True)
region = models.ForeignKey(Region, on_delete=models.CASCADE)
def __str__(self):
return self.country_name
class Location(models.Model):
street_adress = models.TextField()
postal_code = models.IntegerField()
city = models.CharField(max_length=30)
state_province = models.CharField(max_length=30, blank=True)
country = models.ForeignKey(Country, on_delete=models.CASCADE)
def __str__(self):
return '%s %d %s %s %s' % (self.street_adress, self.postal_code, self.city, self.state_province, self.country) | 2.484375 | 2 |
tests/test_helper.py | rsheeter/nanoemoji | 3 | 12762990 | <filename>tests/test_helper.py
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import io
import os
import re
import shutil
import subprocess
import sys
from lxml import etree
from fontTools import ttLib
from nanoemoji import codepoints
from nanoemoji import config
from nanoemoji import features
from nanoemoji.glyph import glyph_name
from nanoemoji import write_font
from nanoemoji.png import PNG
from pathlib import Path
from picosvg.svg import SVG
import pytest
import shutil
import tempfile
def test_data_dir() -> Path:
return Path(__file__).parent
def locate_test_file(filename) -> Path:
return test_data_dir() / filename
def parse_svg(filename, locate=False, topicosvg=True):
if locate:
filename = locate_test_file(filename)
svg = SVG.parse(filename)
return svg.topicosvg(inplace=True) if topicosvg else svg
def rasterize_svg(input_file: Path, output_file: Path, resolution: int = 128) -> PNG:
resvg = shutil.which("resvg")
if not resvg:
pytest.skip("resvg not installed")
result = subprocess.run(
[
resvg,
"-h",
f"{resolution}",
"-w",
f"{resolution}",
input_file,
output_file,
]
)
return PNG.read_from(output_file)
def color_font_config(
config_overrides,
svgs,
tmp_dir=None,
codepoint_fn=lambda svg_file, idx: (0xE000 + idx,),
):
if tmp_dir is None:
tmp_dir = Path(tempfile.gettempdir())
svgs = tuple(locate_test_file(s) for s in svgs)
fea_file = tmp_dir / "test.fea"
rgi_seqs = tuple(codepoints.from_filename(str(f)) for f in svgs)
with open(fea_file, "w") as f:
f.write(features.generate_fea(rgi_seqs))
font_config = (
config.load(config_file=None, additional_srcs=svgs)
._replace(
family="UnitTest",
upem=100,
ascender=100,
descender=0,
width=100,
keep_glyph_names=True,
fea_file=str(fea_file),
)
._replace(**config_overrides)
)
has_svgs = font_config.has_svgs
has_picosvgs = font_config.has_picosvgs
has_bitmaps = font_config.has_bitmaps
svg_inputs = [(None, None)] * len(svgs)
if has_svgs:
svg_inputs = [
(Path(os.path.relpath(svg)), parse_svg(svg, topicosvg=has_picosvgs))
for svg in svgs
]
bitmap_inputs = [(None, None)] * len(svgs)
if has_bitmaps:
bitmap_inputs = [
(
tmp_dir / (svg.stem + ".png"),
rasterize_svg(
svg, tmp_dir / (svg.stem + ".png"), font_config.bitmap_resolution
),
)
for svg in svgs
]
return (
font_config,
[
write_font.InputGlyph(
svg_file,
bitmap_file,
codepoint_fn(svg_file, idx),
glyph_name(codepoint_fn(svg_file, idx)),
svg,
bitmap,
)
for idx, ((svg_file, svg), (bitmap_file, bitmap)) in enumerate(
zip(svg_inputs, bitmap_inputs)
)
],
)
def reload_font(ttfont):
tmp = io.BytesIO()
ttfont.save(tmp)
return ttLib.TTFont(tmp)
def _save_actual_ttx(expected_ttx, ttx_content):
tmp_file = os.path.join(tempfile.gettempdir(), expected_ttx)
with open(tmp_file, "w") as f:
f.write(ttx_content)
return tmp_file
def _strip_inline_bitmaps(ttx_content):
parser = etree.XMLParser(strip_cdata=False)
root = etree.fromstring(bytes(ttx_content, encoding="utf-8"), parser=parser)
made_changes = False
# bitmapGlyphDataFormat="extfile" doesn't work for sbix so wipe those manually
for hexdata in root.xpath("//sbix/strike/glyph/hexdata"):
glyph = hexdata.getparent()
glyph.remove(hexdata)
glyph.text = (glyph.attrib["name"] + "." + glyph.attrib["graphicType"]).strip()
made_changes = True
# Windows gives \ instead of /, if we see that flip it
for imagedata in root.xpath("//extfileimagedata"):
imagedata.attrib["value"] = Path(imagedata.attrib["value"]).name
made_changes = True
if not made_changes:
return ttx_content
actual_ttx = io.BytesIO()
etree.ElementTree(root).write(actual_ttx, encoding="utf-8")
# Glue on the *exact* xml decl and wrapping newline saveXML produces
return (
'<?xml version="1.0" encoding="UTF-8"?>\n'
+ actual_ttx.getvalue().decode("utf-8")
+ "\n"
)
def assert_expected_ttx(
svgs,
ttfont,
expected_ttx,
include_tables=None,
skip_tables=("head", "hhea", "maxp", "name", "post", "OS/2"),
):
actual_ttx = io.StringIO()
# Timestamps inside files #@$@#%@#
# force consistent Unix newlines (the expected test files use \n too)
ttfont.saveXML(
actual_ttx,
newlinestr="\n",
tables=include_tables,
skipTables=skip_tables,
bitmapGlyphDataFormat="extfile",
)
# Elide ttFont attributes because ttLibVersion may change
actual = re.sub(r'\s+ttLibVersion="[^"]+"', "", actual_ttx.getvalue())
actual = _strip_inline_bitmaps(actual)
expected_location = locate_test_file(expected_ttx)
if os.path.isfile(expected_location):
with open(expected_location) as f:
expected = f.read()
else:
tmp_file = _save_actual_ttx(expected_ttx, actual)
raise FileNotFoundError(
f"Missing expected in {expected_location}. Actual in {tmp_file}"
)
if actual != expected:
for line in difflib.unified_diff(
expected.splitlines(keepends=True),
actual.splitlines(keepends=True),
fromfile=f"{expected_ttx} (expected)",
tofile=f"{expected_ttx} (actual)",
):
sys.stderr.write(line)
print(f"SVGS: {svgs}")
tmp_file = _save_actual_ttx(expected_ttx, actual)
pytest.fail(f"{tmp_file} != {expected_ttx}")
# Copied from picosvg
def drop_whitespace(svg):
svg._update_etree()
for el in svg.svg_root.iter("*"):
if el.text is not None:
el.text = el.text.strip()
if el.tail is not None:
el.tail = el.tail.strip()
# Copied from picosvg
def pretty_print(svg_tree):
def _reduce_text(text):
text = text.strip() if text else None
return text if text else None
# lxml really likes to retain whitespace
for e in svg_tree.iter("*"):
e.text = _reduce_text(e.text)
e.tail = _reduce_text(e.tail)
return etree.tostring(svg_tree, pretty_print=True).decode("utf-8")
# Copied from picosvg
def svg_diff(actual_svg: SVG, expected_svg: SVG):
drop_whitespace(actual_svg)
drop_whitespace(expected_svg)
print(f"A: {pretty_print(actual_svg.toetree())}")
print(f"E: {pretty_print(expected_svg.toetree())}")
assert actual_svg.tostring() == expected_svg.tostring()
def run(cmd):
cmd = tuple(str(c) for c in cmd)
print("subprocess:", " ".join(cmd)) # very useful on failure
env = {
# We may need to find nanoemoji and other pip-installed cli tools
"PATH": str(Path(shutil.which("nanoemoji")).parent),
# We may need to find test modules
"PYTHONPATH": os.pathsep.join((str(Path(__file__).parent),)),
}
# Needed for windows CI to function; ref https://github.com/appveyor/ci/issues/1995
if "SYSTEMROOT" in os.environ:
env["SYSTEMROOT"] = os.environ["SYSTEMROOT"]
return subprocess.run(cmd, check=True, env=env)
def run_nanoemoji(args, tmp_dir=None):
if not tmp_dir:
tmp_dir = mkdtemp()
run(
(
"nanoemoji",
"--build_dir",
str(tmp_dir),
)
+ tuple(str(a) for a in args)
)
assert (tmp_dir / "build.ninja").is_file()
return tmp_dir
_TEMPORARY_DIRS = set()
def active_temp_dirs():
return _TEMPORARY_DIRS
def forget_temp_dirs():
global _TEMPORARY_DIRS
_TEMPORARY_DIRS = set()
assert len(active_temp_dirs()) == 0 # this can occur due to local/global confusion
def mkdtemp() -> Path:
tmp_dir = Path(tempfile.mkdtemp())
assert tmp_dir not in _TEMPORARY_DIRS
_TEMPORARY_DIRS.add(tmp_dir)
return tmp_dir
def cleanup_temp_dirs():
while _TEMPORARY_DIRS:
shutil.rmtree(_TEMPORARY_DIRS.pop(), ignore_errors=True)
def bool_flag(name: str, value: bool) -> str:
result = "--"
if not value:
result += "no"
result += name
return result
| 2.0625 | 2 |
dsa_stl/testdsa.py | aman2000jaiswal14/dsa_stl | 0 | 12762991 | <gh_stars>0
def test():
print("test successful...")
def update():
print("Updating DSA")
if __name__=='__main__':
pass | 1.554688 | 2 |
nzmath/factor/find.py | turkeydonkey/nzmath3 | 1 | 12762992 | <gh_stars>1-10
"""
All methods defined here return one of a factor of given integer.
When 1 is returned, the method has failed to factor,
but 1 is a factor anyway.
'verbose' boolean flag can be specified for verbose reports.
"""
import logging
import nzmath.arith1 as arith1
import nzmath.bigrandom as bigrandom
import nzmath.gcd as gcd
import nzmath.prime as prime
_log = logging.getLogger('nzmath.factor.find')
# Pollard's rho method
def rhomethod(n, **options):
"""
Find a non-trivial factor of n using Pollard's rho algorithm.
The implementation refers the explanation in C.Pomerance's book.
"""
# verbosity
verbose = options.get('verbose', False)
if not verbose:
_silence()
if n <= 3:
return 1
g = n
while g == n:
# x^2 + a is iterated. Starting value x = u.
a = bigrandom.randrange(1, n-2)
u = v = bigrandom.randrange(0, n-1)
_log.info("%d %d" % (a, u))
g = gcd.gcd((v**2 + v + a) % n - u, n)
while g == 1:
u = (u**2 + a) % n
v = ((pow(v, 2, n) + a)**2 + a) % n
g = gcd.gcd(v - u, n)
if not verbose:
_verbose()
return g
# p-1 method
def pmom(n, **options):
"""
This function tries to find a non-trivial factor of n using
Algorithm 8.8.2 (p-1 first stage) of Cohen's book.
In case of N = pow(2,i), this program will not terminate.
"""
# verbosity
verbose = options.get('verbose', False)
if not verbose:
_silence()
# initialize
x = y = 2
primes = []
if 'B' in options:
B = options['B']
else:
B = 10000
for q in prime.generator():
primes.append(q)
if q > B:
if gcd.gcd(x-1, n) == 1:
if not verbose:
_verbose()
return 1
x = y
break
q1 = q
l = B // q
while q1 <= l:
q1 *= q
x = pow(x, q1, n)
if len(primes) >= 20:
if gcd.gcd(x-1, n) == 1:
primes, y = [], x
else:
x = y
break
for q in primes:
q1 = q
while q1 <= B:
x = pow(x, q, n)
g = gcd.gcd(x-1, n)
if g != 1:
if not verbose:
_verbose()
if g == n:
return 1
return g
q1 *= q
def trialDivision(n, **options):
"""
Return a factor of given integer by trial division.
options can be either:
1) 'start' and 'stop' as range parameters.
2) 'iterator' as an iterator of primes.
If both options are not given, prime factor is searched from 2
to the square root of the given integer.
"""
# verbosity
verbose = options.get('verbose', False)
if not verbose:
_silence()
if 'start' in options and 'stop' in options:
if 'step' in options:
trials = list(range(options['start'], options['stop'], options['step']))
else:
trials = list(range(options['start'], options['stop']))
elif 'iterator' in options:
trials = options['iterator']
elif n < 1000000:
trials = prime.generator_eratosthenes(arith1.floorsqrt(n))
else:
trials = prime.generator()
limit = arith1.floorsqrt(n)
for p in trials:
if limit < p:
break
if 0 == n % p:
if not verbose:
_verbose()
return p
if not verbose:
_verbose()
return 1
def _silence():
"""
Stop verbose outputs.
"""
_log.setLevel(logging.NOTSET)
def _verbose():
"""
Stop silencing.
"""
_log.setLevel(logging.DEBUG)
| 3.1875 | 3 |
cogs/dev.py | Hattyot/MuusikBot | 1 | 12762993 | <filename>cogs/dev.py
import discord
import ast
import config
import logging
import psutil
import copy
import time
import traceback
from discord.ext import commands
from config import DEV_IDS
from modules import database, command, embed_maker
from datetime import datetime
from cogs.utils import get_member
db = database.Connection()
logger = logging.getLogger(__name__)
def is_dev(ctx):
return ctx.author.id in DEV_IDS
def insert_returns(body):
if isinstance(body[-1], ast.Expr):
body[-1] = ast.Return(body[-1].value)
ast.fix_missing_locations(body[-1])
if isinstance(body[-1], ast.If):
insert_returns(body[-1].body)
insert_returns(body[-1].orelse)
if isinstance(body[-1], ast.With):
insert_returns(body[-1].body)
class Dev(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(hidden=True, help='Time a command', usage='time_cmd [command]', examples=['time_cmd lb'],
clearance='Dev', cls=command.Command)
async def time_cmd(self, ctx, *, cmd=None):
if cmd is None:
return await embed_maker.command_error(ctx)
cmd_obj = self.bot.get_command(cmd)
if cmd_obj is None:
return await embed_maker.message(ctx, 'Invalid command', colour='red')
msg = copy.copy(ctx.message)
msg.content = ctx.prefix + cmd
new_ctx = await self.bot.get_context(msg, cls=type(ctx))
start = time.perf_counter()
try:
await new_ctx.command.invoke(new_ctx)
except commands.CommandError:
end = time.perf_counter()
success = False
try:
await ctx.send(f'```py\n{traceback.format_exc()}\n```')
except discord.HTTPException:
pass
else:
end = time.perf_counter()
success = True
colour='green' if success else 'red'
await embed_maker.message(ctx, f'Success: {success} | Time: {(end - start) * 1000:.2f}ms', colour=colour)
@commands.command(hidden=True, help='Run any command as another user', usage='sudo [user] [command]',
examples=['sudo hattyot lb'], clearance='Dev', cls=command.Command)
async def sudo(self, ctx, user=None, *, cmd=None):
if user is None or cmd is None:
return await embed_maker.command_error(ctx)
member = await get_member(ctx, self.bot, user)
if member is None:
return await embed_maker.message(ctx, 'Invalid user', colour='red')
cmd_obj = self.bot.get_command(cmd)
if cmd_obj is None:
return await embed_maker.message(ctx, 'Invalid command', colour='red')
msg = copy.copy(ctx.message)
msg.channel = ctx.channel
msg.author = member
msg.content = config.PREFIX + cmd
new_ctx = await self.bot.get_context(msg, cls=type(ctx))
await self.bot.invoke(new_ctx)
@commands.command(hidden=True, help='Reload an extension, so you dont have to restart the bot',
usage='reload_extension [ext]', examples=['reload_extension cogs.levels'],
clearance='Dev', cls=command.Command, aliases=['re'])
@commands.check(is_dev)
async def reload_extension(self, ctx, ext):
if ext in self.bot.extensions.keys():
self.bot.reload_extension(ext)
return await embed_maker.message(ctx, f'{ext} has been reloaded', colour='green')
else:
return await embed_maker.message(ctx, 'That is not a valid extension', colour='red')
@commands.command(hidden=True, help='Evaluate code', usage='eval [code]',
examples=['eval ctx.author.id'], clearance='Dev', cls=command.Command)
@commands.check(is_dev)
async def eval(self, ctx, *, cmd=None):
if cmd is None:
return await embed_maker.command_error(ctx)
fn_name = "_eval_expr"
cmd = cmd.strip("` ")
cmd = "\n".join(f" {i}" for i in cmd.splitlines())
body = f"async def {fn_name}():\n{cmd}"
parsed = ast.parse(body)
body = parsed.body[0].body
insert_returns(body)
env = {
'bot': ctx.bot,
'discord': discord,
'commands': commands,
'ctx': ctx,
'__import__': __import__,
'db': db
}
exec(compile(parsed, filename="<ast>", mode="exec"), env)
result = repr(await eval(f"{fn_name}()", env))
# return done if result is empty, so it doesnt cause empty message error
if result == '':
result = 'Done'
await ctx.send(result)
@commands.command(hidden=True, help='Kill the bot', usage='kill_bot', examples=['kill_bot'], clearance='Dev', cls=command.Command)
@commands.check(is_dev)
async def kill_bot(self):
await self.bot.close()
@commands.command(hidden=True, help='monitors bot resource usage', usage='resource_usage', examples=['resource_usage'], clearance='Dev', cls=command.Command)
@commands.check(is_dev)
async def resource_usage(self, ctx):
embed_colour = config.EMBED_COLOUR
resource_overview = discord.Embed(colour=embed_colour, timestamp=datetime.now())
resource_overview.set_footer(text=f'{ctx.author.name}#{ctx.author.discriminator}', icon_url=ctx.author.avatar_url)
resource_overview.set_author(name='Resource Usage Overview', icon_url=ctx.guild.icon_url)
resource_overview.add_field(name='**CPU Usage**', value=(str(psutil.cpu_percent()) + '%'), inline=False)
resource_overview.add_field(name='**Memory Usage**', value=(str(psutil.virtual_memory().percent) + '%'), inline=False)
await ctx.send(embed=resource_overview)
def setup(bot):
bot.add_cog(Dev(bot))
| 2.3125 | 2 |
s4gutils.py | perwin/s4g_barsizes | 2 | 12762994 | <reponame>perwin/s4g_barsizes
# Miscellaneous code for analysis of S4G bar fractions
import copy
import math
import random
import numpy as np
random.seed()
# lower and upper bounds of 68.3% confidence interval:
ONESIGMA_LOWER = 0.1585
ONESIGMA_UPPER = 0.8415
def Read2ColumnProfile( fname ):
"""Read in the (first) two columns from a simple text file where the columns
are separated by whitespace and lines beginning with '#' are ignored.
Returns tuple of (x, y), where x and y are numpy 1D arrays corresponding to
the first and second column
"""
dlines = [line for line in open(fname) if len(line) > 1 and line[0] != "#"]
x = [float(line.split()[0]) for line in dlines]
y = [float(line.split()[1]) for line in dlines]
return np.array(x), np.array(y)
def dtomm( distanceMpc ):
"""Converts distance in Mpc to distance modulus (M - m, in magnitudes)
"""
five_logD = 5.0 * np.log10(distanceMpc)
return (25.0 + five_logD)
def HIMassToFlux( M_HI, dist_Mpc ):
"""Converts H I mass (in solar masses) to equivalent H I flux (in Jy km/s)
based on distance in Mpc. Equation originally from Giovanelli & Haynes
(1988, in Galactic and extragalactic radio astronomy (2nd edition), p.522),
based on Roberts (1975, n <NAME>, <NAME>, and <NAME> (eds.),
Galaxies and the Universe. Chicago: University of Chicago Press; p. 309).
"""
return M_HI / (2.356e5 * dist_Mpc**2)
def GetRadialSampleFromSphere( rMin, rMax ):
"""Get radius sample from spherical Euclidean volume (or spherical shell) using
the discarding method: generate a random point within a cube of half-width = rMax;
discard and re-generate if radius to that point is outside [rMin, rMax]
"""
rMin2 = rMin*rMin
rMax2 = rMax*rMax
done = False
while not done:
x = random.uniform(-rMax, rMax)
y = random.uniform(-rMax, rMax)
z = random.uniform(-rMax, rMax)
r2 = x*x + y*y + z*z
if (r2 >= rMin2) and (r2 <= rMax2):
done = True
return math.sqrt(r2)
def AIC( logLikelihood, nParams ):
"""Calculate the original Akaike Information Criterion for a model fit
to data, given the ln(likelihood) of the best-fit model and the number of
model parameters nParams.
Note that this should only be used for large sample sizes; for small
sample sizes (e.g., nData < 40*nParams), use the corrected AIC function
AICc [below].
"""
return -2.0*logLikelihood + 2.0*nParams
def AICc( logLikelihood, nParams, nData, debug=False ):
"""Calculate the bias-corrected Akaike Information Criterion for a
model fit to data, given the ln(likelihood) of the best-fit model,
the number of model parameters nParams, and the number of data points
nData (the latter is used to correct the 2*nParams part of AIC for small
sample size).
Formula from Burnham & Anderson, Model selection and multimodel inference:
a practical information-theoretic approach (2002), p.66.
"""
# use corrected form of nParams term
aic = AIC(logLikelihood, nParams)
# add bias-correction term
correctionTerm = 2*nParams*(nParams + 1) / (nData - nParams - 1.0)
if debug:
print("AICc: ", aic, correctionTerm)
return aic + correctionTerm
def ConfidenceInterval( vect ):
nVals = len(vect)
lower_ind = int(round(ONESIGMA_LOWER*nVals)) - 1
upper_ind = int(round(ONESIGMA_UPPER*nVals))
vect_sorted = copy.copy(vect)
vect_sorted.sort()
return (vect_sorted[lower_ind], vect_sorted[upper_ind])
def Binomial( n, n_tot, nsigma=1.0, conf_level=None, method="wilson" ):
"""Computes fraction (aka frequency or rate) of occurances p = (n/n_tot).
Also computes the lower and upper confidence limits using either the
Wilson (1927) or Agresti & Coull (1998) method (method="wilson" or method="agresti");
default is to use Wilson method.
Default is to calculate 68.26895% confidence limits (i.e., 1-sigma in the
Gaussian approximation).
Returns tuple of (p, sigma_minus, sigma_plus).
"""
p = (1.0 * n) / n_tot
q = 1.0 - p
if (conf_level is not None):
print("Alternate values of nsigma or conf_limit not yet supported!")
alpha = 1.0 - conf_level
# R code would be the following:
#z_alpha = qnorm(1.0 - alpha/2.0)
return None
else:
z_alpha = nsigma # e.g., z_alpha = nsigma = 1.0 for 68.26895% conf. limits
if (method == "wald"):
# Wald (aka asymptotic) method -- don't use except for testing purposes!
sigma_minus = sigma_plus = z_alpha * np.sqrt(p*q/n_tot)
else:
z_alpha2 = z_alpha**2
n_tot_mod = n_tot + z_alpha2
p_mod = (n + 0.5*z_alpha2) / n_tot_mod
if (method == "wilson"):
# Wilson (1927) method
sigma_mod = np.sqrt(z_alpha2 * n_tot * (p*q + z_alpha2/(4.0*n_tot))) / n_tot_mod
elif (method == "agresti"):
# Agresti=Coull method
sigma_mod = np.sqrt(z_alpha2 * p_mod * (1.0 - p_mod) / n_tot_mod)
else:
print("ERROR: method \"%s\" not implemented in Binomial!" % method)
return None
p_upper = p_mod + sigma_mod
p_lower = p_mod - sigma_mod
sigma_minus = p - p_lower
sigma_plus = p_upper - p
return (p, sigma_minus, sigma_plus)
def bootstrap_validation( x, y, nIter, fittingFn, modelFn=None, computeModelFn=None,
initialParams=None, adjustEstimate=True, errs=None,
verbose=False ):
"""
Uses bootstrap resampling to estimate the accuracy of a model (analogous to
"leave-k-out" cross-validation).
See Sec. 7.11 of Hastie, Tibshirani, and Friedman 2008, Elements of Statistical
Learning (2nd Ed.).
Parameters
----------
x : numpy array of independent variable values (predictors)
Can also be tuple or list of 2 numpy arrays
y : numpy array of dependent variable values
nIter : int
number of bootstrap iterations to run
fittingFn : function or callable
fittingFn(x, y, initialParams=None) fits the model
specified by modelFn to the data specified by x and y
Returns "fitResult", which will be used by modelFn or computeModelFn
If modelFn is supplied, then we use
fittingFn(x, y, modelFn, initialParams)
modelFn : function or callable, quasi-optional
modelFn(x, params) -- used by fittingFn; computes model which is fit to data
params = either initialParams or fitResult
computeModelFn : function or callable, quasi-optional
computeModelFn(x, fitResult) -- computes model which is fit to data;
meant for cases when modelFn is not needed.
initialParams : any or None, optional
object passed as optional input to fittingFn
adjustEstimate : bool, optional [default = True]
If True (default), then the final error estimate is corrected using
the ".632+ bootstrap estimator" rule (Efron & Tibshirani 1997):
err = 0.368*err_training + 0.632*err_bootstrap
where err_training is the mean squared error of the model fit to the
complete dataset and err_bootstrap is the mean of the mean squared
errors from bootstrap resampling
If False, then the return value is just err_bootstrap
errs : numpy array of float or None, optional
array of Gaussian sigmas associated with y
Returns
---------
errorEstimate : float
Approximation to the test error (mean squared error for predictions from the model
Examples
---------
Fit a 2nd-order polynomial to data:
# define wrapper for np.polyval, since that function uses reverse of
# our normal input ordering
def nicepolyval( x, p ):
return np.polyval(p, x)
# use initialParams to set the "deg" parameter for np.polyfit
bootstrap_validation(x, y, 100, np.polyfit, computeModelFn=nicepolyval,
initialParams=2)
"""
if modelFn is None and computeModelFn is None:
print("ERROR: you must supply at least one of modelFn or computeModelFn!")
return None
if computeModelFn is None:
evaluateModel = modelFn
else:
evaluateModel = computeModelFn
nData = len(y)
# initial fit to all the data ("training")
fitResult = fittingFn(x, y, initialParams, errs)
# MSE for fit to all the data
residuals = y - evaluateModel(x, fitResult)
errorTraining = np.mean(residuals**2)
if verbose:
print(fitResult)
print("training MSE = %g" % errorTraining)
# Do bootstrap iterations
indices = np.arange(0, nData)
nIterSuccess = 0
individualBootstrapErrors = []
for b in range(nIter):
i_bootstrap = np.random.choice(indices, nData, replace=True)
i_excluded = [i for i in indices if i not in i_bootstrap]
nExcluded = len(i_excluded)
if (nExcluded > 0):
if type(x) in [tuple,list]:
x_b = (x[0][i_bootstrap], x[1][i_bootstrap])
else:
x_b = x[i_bootstrap]
y_b = y[i_bootstrap]
try:
if errs is None:
fitResult_b = fittingFn(x_b, y_b, initialParams, None)
else:
fitResult_b = fittingFn(x_b, y_b, initialParams, errs[i_bootstrap])
residuals = y - evaluateModel(x, fitResult_b)
# calculate mean squared prediction error for this sample
errorB = (1.0/nExcluded) * np.sum(residuals[i_excluded]**2)
individualBootstrapErrors.append(errorB)
nIterSuccess += 1
except RuntimeError:
# couldn't get a proper fit, so let's discard this sample and try again
pass
individualBootstrapErrors = np.array(individualBootstrapErrors)
errorPredict = np.mean(individualBootstrapErrors)
if verbose:
print("test MSE = %g (%d successful iterations)" % (errorPredict, nIterSuccess))
if adjustEstimate is True:
adjustedErrorPredict = 0.368*errorTraining + 0.632*errorPredict
if verbose:
print("Adjusted test MSE = %g" % adjustedErrorPredict)
return adjustedErrorPredict
else:
return errorPredict
# Various functions for estimating stellar masses from absolute magnitudes and color-based
# M/L values
def magratio( mag1, mag2, mag1_err=None, mag2_err=None ):
"""Calculates luminosity ratio given two magnitudes; optionally
computes the error on the ratio using standard error propagation
(only if at least one of the errors is given; if only one is given,
the other is assumed to be = 0)."""
diff = mag1 - mag2
lumRatio = 10.0**(-diff*0.4)
if (mag1_err is None) and (mag2_err is None):
return lumRatio
else:
if (mag1_err is None):
mag1_err = 0.0
elif (mag2_err is None):
mag2_err = 0.0
p1 = ln10*lumRatio*(-0.4) * mag1_err
p2 = ln10*lumRatio*(0.4) * mag2_err
lumRatio_err = math.sqrt(p1**2 + p2**2)
return (lumRatio, lumRatio_err)
# Solar absolute magnitudes from Table 1.2 of Sparke & Gallagher for U, from
# Bell & de Jong (2001) for Johnson B and V, Kron-Cousins R and I, and
# Johnson J, H, and K original sources: Cox 2000; Bessel 1979; Worthey 1994).
# Solar absolute magnitudes for SDSS ugriz (AB mag) are from Bell et al. (2003 ApJS 149: 289).
# Thus, filters are standard Johnson-Cousins UBVRIJHK + SDSS ugriz, with
# K = standard ("broad") K, *not* K_s.
# K_s value taken from Kormendy+10: "The 2MASS survey uses a Ks bandpass whose
# effective wavelength is ~ 2.16 microns (Carpenter 2001; Bessell 2005). Following
# the above papers, we assume that Ks = K - 0.044. Then the Ks-band absolute
# magnitude of the Sun is 3.29."
solarAbsMag = { "U": 5.62, "B": 5.47, "V": 4.82, "R": 4.46, "I": 4.14,
"J": 3.70, "H": 3.37, "K": 3.33, "u": 6.41, "g": 5.15,
"r": 4.67, "i": 4.56, "z": 4.53, "K_s": 3.29 }
def solarL( mag, filterName, mag_err=None, Ks=False ):
"""Takes an absolute magnitude and the corresponding bandpass, and
returns corresponding solar luminosities. Uses solar absolute magnitudes
from Table 1.2 of Sparke & Gallagher for U and from Bell & de Jong (2001,
ApJ 550: 212) for Johnson B and V, Kron-Cousins R and I, and <NAME>, H,
and K (original sources: Cox 2000; Bessel 1979; Worthey 1994). Solar absolute
magnitudes for SDSS ugriz are from Bell et al. (ApJS 149: 289).
Thus, filters are standard Johnson-Cousins UBVRIJHK + SDSS ugriz, with
K = standard ("broad") K, *not* K_s.
If Ks = True, then we substitute K_s for K
If mag_err is given, then the error on the luminosity is also computed,
using standard error propagattion [done in magratio() function], assuming
the solar absolute magnitude has no error."""
if (Ks is True) and (filterName == "K"):
filterName = "K_s"
try:
m_Sun = solarAbsMag[filterName]
except KeyError as e:
print(" solarL: unrecognized filter \"%s\"!" % filterName)
return 0
if (mag_err is None):
return magratio(mag, m_Sun)
else:
return magratio(mag, m_Sun, mag_err)
def MassToLight( band, colorType, color, err=None, mode="Bell" ):
"""Calculates stellar mass-to-light ratio for a specified band (one of
BVRIJHK), given a color index.
band = the desired band for the mass-to-light ratio (one of Johnson-Cousins
BVRIJHK [Vega magnitudes] or SDSS ugriz [AB magnitudes]).
colorType="B-V", "B-R", "V-I", "V-J", "V-H", or "V-K" for Johnson-Cousins
colors, or "u-g", "u-r", "u-i", "u-z", "g-r", "g-i", "g-z", "r-i", or "r-z"
for SDSS colors.
color = value of the specified color index.
Returns M/L (mass in solar masses / luminosity in solar luminosities).
If err != None, then the error in M/L is also returned (using the
dex value provided in err, which should be 0.1--0.2).
Based on Table 1 of Bell & de Jong (2001, ApJ 550: 212) and
Table 7 of Bell et al. (2003, ApJS 149: 289); note that B-V and B-R
values use Bell+2003, but other optical colors use Bell & de Jong.
Alternately, the fits in Zibetti+2009 (Table B1) can be used instead,
by specifying mode="Zibetti"
"""
# dictionaries indexed by colorType, holding sub-dictionaries with
# corresponding coefficients, indexed by band
coefficients_B = {}
# M/L ratios for Johnson-Cousin bands, from Bell et al. (2003) for B-V
# and B-R, and from Bell & de Jong (2001) for other colors:
coefficients_B['B-V'] = {'B': [-0.942, 1.737], 'V': [-0.628, 1.305],
'R': [-0.520, 1.094], 'I': [-0.399, 0.824], 'J': [-0.261, 0.433],
'H': [-0.209, 0.210], 'K': [-0.206, 0.135]}
coefficients_B['B-R'] = {'B': [-1.224, 1.251], 'V': [-0.916, 0.976],
'R': [-0.523, 0.683], 'I': [-0.405, 0.518], 'J': [-0.289, 0.297],
'H': [-0.262, 0.180], 'K': [-0.264, 0.138]}
coefficients_B['V-I'] = {'B': [-1.919, 2.214], 'V': [-1.476, 1.747],
'R': [-1.314, 1.528], 'I': [-1.204, 1.347], 'J': [-1.040, 0.987],
'H': [-1.030, 0.870], 'K': [-1.027, 0.800]}
coefficients_B['V-J'] = {'B': [-1.903, 1.138], 'V': [-1.477, 0.905],
'R': [-1.319, 0.794], 'I': [-1.209, 0.700], 'J': [-1.029, 0.505],
'H': [-1.014, 0.442], 'K': [-1.005, 0.402]}
coefficients_B['V-H'] = {'B': [-2.181, 0.978], 'V': [-1.700, 0.779],
'R': [-1.515, 0.684], 'I': [-1.383, 0.603], 'J': [-1.151, 0.434],
'H': [-1.120, 0.379], 'K': [-1.100, 0.345]}
coefficients_B['V-K'] = {'B': [-2.156, 0.895], 'V': [-1.683, 0.714],
'R': [-1.501, 0.627], 'I': [-1.370, 0.553], 'J': [-1.139, 0.396],
'H': [-1.108, 0.346], 'K': [-1.087, 0.314]}
# M/L ratios for SDSS + Johnson-Cousins NIR bands, from Bell et al. 2003:
coefficients_B['u-g'] = {'g': [-0.221, 0.485], 'r': [-0.099, 0.345],
'i': [-0.053, 0.268], 'z': [-0.105, 0.226], 'J': [-0.128, 0.169],
'H': [-0.209, 0.133], 'K': [-0.260, 0.123]}
coefficients_B['u-r'] = {'g': [-0.390, 0.417], 'r': [-0.223, 0.299],
'i': [-0.151, 0.233], 'z': [-0.178, 0.192], 'J': [-0.172, 0.138],
'H': [-0.237, 0.104], 'K': [-0.273, 0.091]}
coefficients_B['u-i'] = {'g': [-0.375, 0.359], 'r': [-0.212, 0.257],
'i': [-0.144, 0.201], 'z': [-0.171, 0.165], 'J': [-0.169, 0.119],
'H': [-0.233, 0.090], 'K': [-0.267, 0.077]}
coefficients_B['u-z'] = {'g': [-0.400, 0.332], 'r': [-0.232, 0.239],
'i': [-0.161, 0.187], 'z': [-0.179, 0.151], 'J': [-0.163, 0.105],
'H': [-0.205, 0.071], 'K': [-0.232, 0.056]}
coefficients_B['g-r'] = {'g': [-0.499, 1.519], 'r': [-0.306, 1.097],
'i': [-0.222, 0.864], 'z': [-0.223, 0.689], 'J': [-0.172, 0.444],
'H': [-0.189, 0.266], 'K': [-0.209, 0.197]}
coefficients_B['g-i'] = {'g': [-0.379, 0.914], 'r': [-0.220, 0.661],
'i': [-0.152, 0.518], 'z': [-0.175, 0.421], 'J': [-0.153, 0.283],
'H': [-0.186, 0.179], 'K': [-0.211, 0.137]}
coefficients_B['g-z'] = {'g': [-0.367, 0.698], 'r': [-0.215, 0.508],
'i': [-0.153, 0.402], 'z': [-0.171, 0.322], 'J': [-0.097, 0.175],
'H': [-0.117, 0.083], 'K': [-0.138, 0.047]}
coefficients_B['r-i'] = {'g': [-0.106, 1.982], 'r': [-0.022, 1.431],
'i': [0.006, 1.114], 'z': [-0.052, 0.923], 'J': [-0.079, 0.650],
'H': [-0.148, 0.437], 'K': [-0.186, 0.349]}
coefficients_B['r-z'] = {'g': [-0.124, 1.067], 'r': [-0.041, 0.780],
'i': [-0.018, 0.623], 'z': [-0.041, 0.463], 'J': [-0.011, 0.224],
'H': [-0.059, 0.076], 'K': [-0.092, 0.019]}
coefficients_Z = {}
# M/L ratios for SDSS colors + SDSS or JHK bands, from Zibetti+2009:
coefficients_Z['u-g'] = {'g': [-1.628, 1.360], 'r': [-1.319, 1.093],
'i': [-1.277, 0.980], 'z': [-1.315, 0.913], 'J': [-1.350, 0.804],
'H': [-1.467, 0.750], 'K': [-1.578, 0.739]}
coefficients_Z['u-r'] = {'g': [-1.427, 0.835], 'r': [-1.157, 0.672],
'i': [-1.130, 0.602], 'z': [-1.181, 0.561], 'J': [-1.235, 0.495],
'H': [-1.361, 0.463], 'K': [-1.471, 0.455]}
coefficients_Z['u-i'] = {'g': [-1.468, 0.716], 'r': [-1.193, 0.577],
'i': [-1.160, 0.517], 'z': [-1.206, 0.481], 'J': [-1.256, 0.422],
'H': [-1.374, 0.393], 'K': [-1.477, 0.384]}
coefficients_Z['u-z'] = {'g': [-1.559, 0.658], 'r': [-1.268, 0.531],
'i': [-1.225, 0.474], 'z': [-1.260, 0.439], 'J': [-1.297, 0.383],
'H': [-1.407, 0.355], 'K': [-1.501, 0.344]}
coefficients_Z['g-r'] = {'g': [-1.030, 2.053], 'r': [-0.840, 1.654],
'i': [-0.845, 1.481], 'z': [-0.914, 1.382], 'J': [-1.007, 1.225],
'H': [-1.147, 1.144], 'K': [-1.257, 1.119]}
coefficients_Z['g-i'] = {'g': [-1.197, 1.431], 'r': [-0.977, 1.157],
'i': [-0.963, 1.032], 'z': [-1.019, 0.955], 'J': [-1.098, 0.844],
'H': [-1.222, 0.780], 'K': [-1.321, 0.754]}
coefficients_Z['g-z'] = {'g': [-1.370, 1.190], 'r': [-1.122, 0.965],
'i': [-1.089, 0.858], 'z': [-1.129, 0.791], 'J': [-1.183, 0.689],
'H': [-1.291, 0.632], 'K': [-1.379, 0.604]}
coefficients_Z['r-i'] = {'g': [-1.405, 4.280], 'r': [-1.155, 3.482],
'i': [-1.114, 3.087], 'z': [-1.145, 2.828], 'J': [-1.199, 2.467],
'H': [-1.296, 2.234], 'K': [-1.371, 2.109]}
coefficients_Z['r-z'] = {'g': [-1.576, 2.490], 'r': [-1.298, 2.032],
'i': [-1.238, 1.797], 'z': [-1.250, 1.635], 'J': [-1.271, 1.398],
'H': [-1.347, 1.247], 'K': [-1.405, 1.157]}
# M/L ratios for Johnson-Cousin colors and bands
coefficients_Z['B-V'] = {'B': [-1.330, 2.237], 'V': [-1.075, 1.837],
'R': [-0.989, 1.620], 'I': [-1.003, 1.475], 'J': [-1.135, 1.267],
'H': [-1.274, 1.190], 'K': [-1.390, 1.176]}
coefficients_Z['B-R'] = {'B': [-1.614, 1.466], 'V': [-1.314, 1.208],
'R': [-1.200, 1.066], 'I': [-1.192, 0.967], 'J': [-1.289, 0.822],
'H': [-1.410, 0.768], 'K': [-1.513, 0.750]}
if (mode == "Bell"):
coefficients = coefficients_B
elif (mode == "Zibetti"):
coefficients = coefficients_Z
else:
print("\n*** bad mode (\"%s\") selected in MassToLight! *** \n" % mode)
return None
try:
a = coefficients[colorType][band][0]
b = coefficients[colorType][band][1]
except KeyError as err:
txt = "\n*** %s is not an allowed color or band (or color/band combination) for %s et al. mass ratios! ***\n" % (err, mode)
txt += " (MassToLight called with colorType = '%s', band = '%s')\n" % (colorType, band)
print(txt)
return None
logML = a + b*color
if err is None:
return 10**logML
else:
MtoL = 10**logML
sigma_MtoL = ln10*err*MtoL
return (MtoL, sigma_MtoL)
def AbsMagToStellarMass( absMag, band, colorType="B-V", color=None, mag_err=None,
MtoL_err=0.1, mode="Bell", MtoL=None ):
"""Calculates a galaxy's stellar mass (in solar masses) given as input an
absolute magnitude, the corresponding filter (one of BVRIJK), the galaxy
color type (e.g., "B-V", "B-R", "V-I", "V-J", "V-H", "V-K"; SDSS colors
such as "u-g", "u-r", "u-i", "u-z", "g-r", "g-i", "g0z", etc., can also
be used), and the color index.
If mag_err is defined, then error propagation is used and
(M_stellar, err_M_stellar) is returned. Note that if mag_err=0.0,
errors for the M/L ratio will still be propagated. The default error
for M/L is 0.1 dex, but this can be changed with the MtoL_err keyword;
if so, it must be in *log* units.
Uses M/L ratios from Table 1 of Bell & de Jong [see MassToLight() above]
and solar-luminosity conversion from Table 1.2 of Sparke & Gallagher
[see solarL() above]; to use the M/L ratios from Zibetti+2009, use
mode="Zibetti".
Alternatively, a user-supplied M/L value can be given with the MtoL
keyword.
"""
if (mag_err is None):
if MtoL is None:
MtoL = MassToLight(band, colorType, color, mode=mode)
if MtoL is None:
return None
solarLum = solarL(absMag, band)
return MtoL * solarLum
else:
if MtoL is None:
(MtoL, err_MtoL) = MassToLight(band, colorType, color, err=MtoL_err, mode=mode)
if MtoL is None:
return (None, None)
(solarLum, err_solarLum) = solarL(absMag, band, mag_err)
M_stellar = MtoL * solarLum
p1 = err_MtoL/MtoL
if (err_solarLum > 0.0):
p2 = err_solarLum/solarLum
else:
p2 = 0.0
err_M_stellar = math.sqrt(p1**2 + p2**2) * M_stellar
return (M_stellar, err_M_stellar)
| 2.546875 | 3 |
test/requirements/test_rdf_source.py | denz/ldp | 0 | 12762995 | <filename>test/requirements/test_rdf_source.py
"""
### 4.3 RDF Source
The following section contains normative clauses for Linked Data Platform RDF
Source.
#### 4.3.1 General
##### 4.3.1.1 Each LDP RDF Source _MUST_ also be a conforming LDP Resource as
defined in section 4.2 Resource, along with the restrictions in this section.
LDP clients _MAY_ infer the following triple: one whose subject is the LDP-RS,
whose predicate is `rdf:type`, and whose object is `ldp:Resource`, but there
is no requirement to materialize this triple in the LDP-RS representation.
##### 4.3.1.2 LDP-RSs representations _SHOULD_ have at least one `rdf:type`
set explicitly. This makes the representations much more useful to client
applications that don't support inferencing.
##### 4.3.1.3 The representation of a LDP-RS _MAY_ have an `rdf:type` of
`ldp:RDFSource` for Linked Data Platform RDF Source.
##### 4.3.1.4 LDP servers _MUST_ provide an RDF representation for LDP-RSs.
The HTTP `Request-URI` of the LDP-RS is typically the subject of most triples
in the response.
##### 4.3.1.5 LDP-RSs _SHOULD_ reuse existing vocabularies instead of creating
their own duplicate vocabulary terms. In addition to this general rule, some
specific cases are covered by other conformance rules.
##### 4.3.1.6 LDP-RSs predicates _SHOULD_ use standard vocabularies such as
Dublin Core [DC-TERMS], RDF [rdf11-concepts] and RDF Schema [rdf-schema],
whenever possible.
##### 4.3.1.7 In the absence of special knowledge of the application or
domain, LDP clients _MUST_ assume that any LDP-RS can have multiple `rdf:type`
triples with different objects.
##### 4.3.1.8 In the absence of special knowledge of the application or
domain, LDP clients _MUST_ assume that the `rdf:type` values of a given LDP-RS
can change over time.
##### 192.168.3.11 LDP clients _SHOULD_ always assume that the set of predicates
for a LDP-RS of a particular type at an arbitrary server is open, in the sense
that different resources of the same type may not all have the same set of
predicates in their triples, and the set of predicates that are used in the
state of any one LDP-RS is not limited to any pre-defined set.
##### 192.168.127.12 LDP servers _MUST NOT_ require LDP clients to implement
inferencing in order to recognize the subset of content defined by LDP. Other
specifications built on top of LDP may require clients to implement
inferencing [rdf11-concepts]. The practical implication is that all content
defined by LDP must be explicitly represented, unless noted otherwise within
this document.
##### 172.16.31.10 A LDP client _MUST_ preserve all triples retrieved from a LDP-
RS using HTTP `GET` that it doesn't change whether it understands the
predicates or not, when its intent is to perform an update using HTTP `PUT`.
The use of HTTP `PATCH` instead of HTTP `PUT` for update avoids this burden
for clients [RFC5789].
##### 192.168.3.11 LDP clients _MAY_ provide LDP-defined hints that allow servers
to optimize the content of responses. section 7.2 Preferences on the Prefer
Request Header defines hints that apply to LDP-RSs.
##### 192.168.127.12 LDP clients _MUST_ be capable of processing responses formed
by a LDP server that ignores hints, including LDP-defined hints.
Feature At Risk
The LDP Working Group proposes incorporation of the following clause to make
LDP clients paging aware:
##### 192.168.3.11 LDP clients _SHOULD_ be capable of processing successful HTTP
`GET` responses formed by a LDP server that independently initiated paging,
returning a page of representation instead of full resource representation
[LDP-PAGING].
#### 4.3.2 HTTP GET
##### 172.16.17.32 LDP servers _MUST_ respond with a Turtle representation of the
requested LDP-RS when the request includes an `Accept` header specifying
`text/turtle`, unless HTTP content negotiation _requires_ a different outcome
[turtle].
> _Non-normative note: _ In other words, Turtle must be returned by LDP
servers in the usual case clients would expect (client requests it) as well as
cases where the client requests Turtle or other media type(s), content
negotiation results in a tie, and Turtle is one of the tying media types. For
example, if the `Accept` header lists `text/turtle` as one of several media
types with the highest relative quality factor (`q=` value), LDP servers must
respond with Turtle. HTTP servers in general are not required to resolve ties
in this way, or to support Turtle at all, but LDP servers are. On the other
hand, if Turtle is one of several requested media types, but another media
type the server supports has a higher relative quality factor, standard HTTP
content negotiation rules apply and the server (LDP or not) would not respond
with Turtle.
##### 172.16.58.3 LDP servers _SHOULD_ respond with a `text/turtle` representation
of the requested LDP-RS whenever the `Accept` request header is absent
[turtle].
Feature At Risk
The LDP Working Group proposes incorporation of the following clause requiring
JSON-LD support.
##### 192.168.3.11 LDP servers _MUST_ respond with a `application/ld+json`
representation of the requested LDP-RS when the request includes an `Accept`
header, unless content negotiation or Turtle support _requires_ a different
outcome [JSON-LD].
*[LDPRs]: Linked Data Platform Resources
*[LDP-RS]: Linked Data Platform RDF Source
*[RDF]: Resource Description Framework
*[LDPR]: Linked Data Platform Resource
*[LDPC]: Linked Data Platform Container
"""
from test.requirements.base import LdpTestCase
from test.requirements.base import CONTINENTS
class LdprsGeneral(LdpTestCase):
def test_4_3_1_1(self):
"""
4.3.1.1 Each LDP RDF Source MUST also be
a conforming LDP Resource as defined in section 4.2 Resource, along with the
restrictions in this section. LDP clients MAY infer the following triple: one
whose subject is the LDP-RS,
whose predicate is rdf:type,
and whose object is ldp:Resource,
but there is no requirement to materialize this triple in the LDP-RS representation.
"""
pass
def test_4_3_1_2(self):
"""
4.3.1.2 LDP-RSs representations SHOULD
have at least one rdf:type
set explicitly. This makes the representations much more useful to
client applications that don’t support inferencing.
"""
pass
def test_4_3_1_4(self):
"""
4.3.1.4 LDP servers MUST provide an RDF representation for LDP-RSs.
The HTTP Request-URI of the LDP-RS is typically the subject of most triples in the response.
"""
pass
def test_4_3_1_5(self):
"""
4.3.1.5 LDP-RSs SHOULD reuse existing vocabularies instead of creating
their own duplicate vocabulary terms. In addition to this general rule, some specific cases are
covered by other conformance rules.
"""
pass
def test_4_3_1_6(self):
"""
4.3.1.6 LDP-RSs predicates SHOULD use standard vocabularies such as Dublin Core
[DC-TERMS], RDF [rdf11-concepts] and RDF Schema [rdf-schema], whenever
possible.
"""
pass
def test_4_3_1_7(self):
"""
4.3.1.7 In the absence of special knowledge of the application or domain,
LDP clients MUST assume that any LDP-RS can have multiple rdf:type triples with different objects.
"""
pass
def test_4_3_1_8(self):
"""
4.3.1.8 In the absence of special knowledge of the application or domain,
LDP clients MUST assume that the rdf:type values
of a given LDP-RS can change over time.
"""
pass
def test_4_3_1_9(self):
"""
4.3.1.9 LDP clients SHOULD always assume that the set of predicates for a
LDP-RS of a particular type at an arbitrary server is open, in the
sense that different resources of the same type may not all have the
same set of predicates in their triples, and the set of predicates that
are used in the state of any one LDP-RS is not limited to any pre-defined
set.
"""
pass
def test_4_3_1_11(self):
"""
172.16.31.10
A LDP client MUST preserve all triples retrieved from a LDP-RS using HTTP GET that
it doesn’t change whether it understands the predicates or not, when
its intent is to perform an update using HTTP PUT. The use of HTTP
PATCH instead of HTTP PUT for update avoids this burden for clients
[RFC5789].
"""
pass
def test_4_3_1_13(self):
"""
192.168.127.12 LDP clients MUST
be capable of processing responses formed by a LDP server that ignores hints,
including LDP-defined hints.
"""
pass
def test_4_3_1_14(self):
"""
192.168.3.11
LDP clients SHOULD
be capable of processing successful HTTP GET responses formed by a LDP server
that independently initiated paging, returning a page of representation instead of full resource
representation [LDP-PAGING].
"""
pass
class LdprsHttpGet(LdpTestCase):
GRAPHS = {'continents': {'source': 'test/continents.rdf',
'publicID': CONTINENTS},
}
def test_4_3_2_1(self):
"""
172.16.17.32 LDP servers MUST
respond with a Turtle
representation of the requested LDP-RS when
the request includes an Accept header specifying text/turtle,
unless HTTP content negotiation requires a different outcome
[turtle].
Non-normative note:
In other words, Turtle must be returned by LDP servers
in the usual case clients would expect (client requests it)
as well as cases where the client requests Turtle or other media type(s), content negotiation results in a tie,
and Turtle is one of the tying media types.
For example, if the Accept header lists text/turtle as one of several media types with the
highest relative quality
factor (q= value), LDP servers must respond with Turtle.
HTTP servers in general are not required to resolve ties in this way, or to support Turtle at all, but
LDP servers are.
On the other hand, if Turtle is one of several requested media types,
but another media type the server supports has a higher relative quality factor,
standard HTTP content negotiation rules apply and the server (LDP or not) would not respond with Turtle.
"""
response = self.app.get('/rdfsource/AF',
headers={'Accept':'application/ld+json'})
def test_4_3_2_2(self):
"""
172.16.58.3 LDP servers SHOULD
respond with a text/turtle
representation of the requested LDP-RS whenever
the Accept request header is absent [turtle].
"""
"""this is violated since html application lies on top"""
pass
def test_4_3_2_3(self):
"""
192.168.3.11 LDP servers MUST
respond with a application/ld+json
representation of the requested LDP-RS
when the request includes an Accept header, unless content negotiation
or Turtle support
requires a different outcome [JSON-LD].
"""
pass | 1.515625 | 2 |
back-end/f1hub/constructors/schema.py | mmzboys/F1Hub | 0 | 12762996 | <reponame>mmzboys/F1Hub<gh_stars>0
import graphene
from graphene_django import DjangoObjectType
from .models import Constructor
class ConstructorType(DjangoObjectType):
class Meta:
model = Constructor
class Query(graphene.ObjectType):
constructors = graphene.List(ConstructorType, name=graphene.String())
def resolve_constructors(self, info, name=None, **kwargs):
if name:
return Constructor.objects.filter(constructorRef=name)
return Constructor.objects.all() | 2.3125 | 2 |
address_extractor/street_type.py | shaynem/address_extractor | 1 | 12762997 |
from address_extractor import datafile
def load_street_types():
return set(line.strip().lower() for line in datafile.read_street_types())
STREET_TYPES = load_street_types()
def is_valid(token):
return token.lower() in STREET_TYPES
| 3 | 3 |
test_syntax.py | SlavaRa/as2cs | 8 | 12762998 | """
Concise grammar unit test format in definitions.
definitions[definition]: [[input], [output]]
Related to gUnit: Grammar unit test for ANTLR
https://theantlrguy.atlassian.net/wiki/display/ANTLR3/gUnit+-+Grammar+Unit+Testing
"""
from glob import glob
from unittest import main, TestCase
from as2cs import cfg, convert, compare_files, \
format_taglist, literals, may_format, realpath, reset
from pretty_print_code.pretty_print_code import format_difference
is_debug_fail = False
taglist_head = 4000 # 500
debug_definitions = [
# 'data_type'
# 'compilation_unit'
# 'function_definition'
# 'import_definition'
# 'ts'
# 'variable_declaration'
]
debug_source = [
# 'as'
]
debug_indexes = [
# 2
]
directions = [
# source, to, source_index, to_index
['as', 'cs', 0, 1],
['cs', 'as', 1, 0],
['as', 'js', 0, 2],
['js', 'as', 2, 0],
]
definitions = [
('new_expression', [
['new C(a, b)',
'new C(a, b)',
'new C(a, b)'],
['new Vector.<Number>()',
'new List<float>()',
'new Array/*<Vector.<Number>>*/()'],
]),
('expression', [
['"as.g"',
'"as.g"',
'"as.g"'],
['0.125',
'0.125f',
'0.125'],
['a % b',
'a % b',
'a % b'],
['((a + 2) % b)',
'((a + 2) % b)',
'((a + 2) % b)'],
['a ~ b',
'a ~ b',
'a ~ b'],
['a && b',
'a && b',
'a && b'],
['a || b',
'a || b',
'a || b'],
['typeof(index)',
'typeof(index)',
'typeof(index)'],
['parseInt(s)',
'int.Parse(s)',
'parseInt(s)'],
['parseFloat(s)',
'float.Parse(s)',
'parseFloat(s)'],
['path as a.b.string',
'path as a.b.string',
'path /*<as a.b.string>*/'
],
['aString as String',
'aString as string',
'aString /*<as String>*/'
],
['int(path)',
'(int)(path)',
'int(path)'],
['Number(path)',
'(float)(path)',
'Number(path)'],
['paths.length',
'paths.Count',
'paths.length'],
['paths.push(p)',
'paths.Add(p)',
'paths.push(p)'],
['paths.indexOf(p)',
'paths.IndexOf(p)',
'paths.indexOf(p)'],
['paths.splice(p, 1)',
'paths.RemoveRange(p, 1)',
'paths.splice(p, 1)'],
['paths.lengths',
'paths.lengths',
'paths.lengths'],
['paths.length.i',
'paths.length.i',
'paths.length.i'],
['paths.push.i',
'paths.push.i',
'paths.push.i'],
['name.toLowerCase',
'name.ToLower',
'name.toLowerCase'],
['name.lastIndexOf',
'name.LastIndexOf',
'name.lastIndexOf'],
['trace(s)',
'Debug.Log(s)',
'console.log(s)'],
['a.trace(s)',
'a.trace(s)',
'a.trace(s)'],
['Math.floor(a)',
'Mathf.Floor(a)',
'Math.floor(a)'],
['a.Math.floor(index)',
'a.Math.floor(index)',
'a.Math.floor(index)'],
['Math.PI',
'Mathf.PI',
'Math.PI'],
['Math.random()',
'(Random.value % 1.0f)',
'Math.random()'],
['my.Math.random()',
'my.Math.random()',
'my.Math.random()'],
['Math',
'Math',
'Math'],
['-a',
'-a',
'-a'],
[' ++i',
' ++i',
' ++i'],
['-- j',
'-- j',
'-- j'],
# ['thesePaths.concat()',
# 'new ArrayList(thesePaths)'],
# Not supported:
# ['pools["Explosion"].next()',
# 'pools["Explosion"].next()'],
]),
('argument_declaration', [
['path:String',
'string path',
'path:String'],
['index:int = -1',
'int index = -1',
'index:int = -1'],
]),
('variable_declaration', [
['var path:String',
'string path',
'var path:String'],
['var index:int',
'int index',
'var index:int'],
['var a:C = new C()',
'C a = new C()',
'var a:C = new C()'],
['var v:Vector.<Vector.<Boolean>> = new Vector.<Vector.<Boolean>>()',
'List<List<bool>> v = new List<List<bool>>()',
'var v:Array/*<Vector.<Vector.<Boolean>>>*/ = new Array/*<Vector.<Vector.<Boolean>>>*/()'],
['var v:Vector.<Vector.<CustomType>> = new Vector.<Vector.<CustomType>>()',
'List<List<CustomType>> v = new List<List<CustomType>>()',
'var v:Array/*<Vector.<Vector.<CustomType>>>*/ = new Array/*<Vector.<Vector.<CustomType>>>*/()'],
['var a:*',
'var a',
'var a:any'],
]),
('argument_list', [
['path:String',
'string path',
'path:String'],
['path:String, index:int',
'string path, int index',
'path:String, index:int'],
['index:int, isEnabled:Boolean, a:Number',
'int index, bool isEnabled, float a',
'index:int, isEnabled:Boolean, a:Number'],
['path:String, index:int = -1',
'string path, int index = -1',
'path:String, index:int = -1'],
]),
('member_declaration', [
['var path:String="as.g";',
'string path="as.g";',
'path:String="as.g";'],
['var a:int;',
'int a;',
'a:int;'],
['private var index:int=16;',
'private int index=16;',
'/*<private >*/ index:int=16;'],
['private static var index:int = 16;',
'private static int index = 16;',
'/*<private >*/ static index:int = 16;'],
['static var path:String = "as.g";',
'static string path = "as.g";',
'static path:String = "as.g";'],
]),
('conditional_expression', [
['path is Boolean',
'path is bool',
'path instanceof Boolean'],
['path is a.b.Boolean',
'path is a.b.Boolean',
'path instanceof a.b.Boolean'],
['.0 === null',
'object.ReferenceEquals(.0f, null)'],
['.0 === ""',
'object.ReferenceEquals(.0f, "")'],
['a !== b',
'!object.ReferenceEquals(a, b)'],
['i ? 1 : 2',
'i ? 1 : 2'],
['i == 0 ? 1 : 2',
'i == 0 ? 1 : 2'],
['i ? 1 : (b ? c : 4)',
'i ? 1 : (b ? c : 4)'],
]),
('reordered_call', [
['abc.concat()',
'new ArrayList(abc)'],
['a.b.c.concat()',
'new ArrayList(a.b.c)'],
]),
('data_declaration', [
['const path:String',
'const string path'],
]),
('data_type', [
['int', 'int'],
['String', 'string'],
['Boolean', 'bool'],
['Number', 'float'],
['Custom', 'Custom'],
['Array', 'ArrayList'],
['Object', 'Dictionary<string, object>'],
['*', 'var'],
['A.B.C', 'A.B.C'],
['Vector.<String>', 'List<string>'],
['Vector.<Point>', 'List<Vector2>'],
['Vector.<DisplayObject>', 'List<Collider2D>'],
['Vector.<DisplayObjectContainer>', 'List<GameObject>'],
]),
('identifier', [
['_a', '_a'],
['_2', '_2'],
['I', 'I'],
['b', 'b'],
]),
('address', [
['_a', '_a'],
['salad', 'salad'],
['OIL', 'OIL'],
['_0._1._2', '_0._1._2'],
['a[i]', 'a[i]'],
['_0._1[a.b]._2', '_0._1[a.b]._2'],
['_0._1[a % b]._2', '_0._1[a % b]._2'],
]),
('import_definition', [
['import com.finegamedesign.anagram.Model;',
'using /*<com>*/finegamedesign.anagram/*<Model>*/;'],
['import _._2;',
'using _/*<_2>*/;'],
['import _.*;',
'using _/*<*>*/;'],
]),
('class_definition', [
['class C{}', 'class C{}'],
['public class PC{}', 'public class PC{}'],
['internal class IC{}', 'internal class IC{}'],
]),
('ts', [
['/*c*/', '/*c*/'],
['//c', '//c'],
['// var i:int;', '// var i:int;'],
]),
('namespace_modifiers_place', [
['public ',
'public '],
['private static ',
'private static '],
['static private ',
'static private '],
]),
('function_declaration', [
[' function f():void',
' void f()'],
[' function g( ):void',
' void g( )'],
]),
('function_definition', [
[' function f():void{}',
' void f(){}'],
[' function f():void{}',
' void f(){}'],
[' public function f():void{}',
' public void f(){}'],
[' internal function isF():Boolean{}',
' internal bool isF(){}'],
[' protected function getF():Number{}',
' protected float getF(){}'],
[' function ClassConstructor(){i = index;}',
' ClassConstructor(){i = index;}'],
# Not supported:
# [' function f():*{}',
# ' var f(){}'],
]),
('function_declaration', [
[' function f(path:String, index:int):void',
' void f(string path, int index)'],
[' private function isF(index:int, isEnabled:Boolean, a:Number):Boolean',
' private bool isF(int index, bool isEnabled, float a)'],
['\n\n private static function shuffle(cards:Array):void',
'\n\n private static void shuffle(ArrayList cards)'],
]),
('variable_assignment', [
['path = "as.g"',
'path = "as.g"'],
['index = 16',
'index = 16'],
['a = index',
'a = index'],
['this.a = index',
'this.a = index'],
['a += 2',
'a += 2'],
['a -= 2',
'a -= 2'],
['a /= 2',
'a /= 2'],
['a *= 2',
'a *= 2'],
['a %= 2',
'a %= 2'],
['a ^= 2',
'a ^= 2'],
['a &= 2',
'a &= 2'],
['a |= 2',
'a |= 2'],
['a <<= 2',
'a <<= 2'],
['a >>= 2',
'a >>= 2'],
['a[1 + i] = 2',
'a[1 + i] = 2'],
]),
('variable_declaration', [
['var path:String = "as.g"',
'string path = "as.g"'],
['var index:int = 16',
'int index = 16'],
['var swap:* = cards[r]',
'var swap = cards[r]'],
['var r:int = Math.random() * (i + 1)',
'int r = (Random.value % 1.0f) * (i + 1)'],
]),
('function_declaration', [
[' function f(path:String, index:int = -1):void',
' void f(string path, int index = -1)'],
[' private function isF(index:int, isEnabled:Boolean, a:Number=NaN):Boolean',
' private bool isF(int index, bool isEnabled, float a=NaN)'],
]),
('number_format', [
['125',
'125'],
['-125',
'-125'],
['0xFF',
'0xFF'],
['0.125',
'0.125f'],
]),
('function_definition', [
[' function f():void{var i:int = index;}',
' void f(){int i = index;}'],
[' function f():void{i = index;}',
' void f(){i = index;}'],
[' function f():void{var i:int = Math.floor(index);}',
' void f(){int i = Mathf.Floor(index);}'],
]),
('class_definition', [
['class C{ var path:String = "as.g";}',
'class C{ string path = "as.g";}'],
['public class PC{ private static var index:int = 16;}',
'public class PC{ private static int index = 16;}'],
['internal class PC{ private static var index:int = 16;\nprivate var a:String;}',
'internal class PC{ private static int index = 16;\nprivate string a;}'],
['internal final class PC{ private static var index:int = 16;\nprivate var a:String;}',
'internal sealed class PC{ private static int index = 16;\nprivate string a;}'],
]),
('class_base_clause', [
[' extends B',
' : B'],
[' extends B implements IA',
' : B, IA'],
[' extends B implements IA, II',
' : B, IA, II'],
[' extends B implements IA, II',
' : B, IA, II'],
[' implements IPc',
' : IPc'],
[' extends It',
' : It'],
]),
('unary_expression', [
['a', 'a'],
['a++', 'a++'],
['b--', 'b--'],
['""', '""'],
['!a', '!a'],
['.0', '.0f'],
]),
('relational_expression', [
['a == b',
'a == b'],
['.0 == ""',
'.0f == ""'],
['a != b',
'a != b'],
['a < b',
'a < b'],
['a >= b',
'a >= b'],
]),
('contains_expression', [
['oil in italian.salad',
'italian.salad.ContainsKey(oil)'],
['Content in Container.Container',
'Container.Container.ContainsKey(Content)'],
]),
('conditional_function', [
['oil in salad',
'salad.ContainsKey(oil)'],
['!(apple in basket)',
'!basket.ContainsKey(apple)'],
]),
('logical_expression', [
['a.b.c >= x.y',
'a.b.c >= x.y'],
['a.b + 1.0 >= x.y',
'a.b + 1.0f >= x.y'],
['a.b + 1.0 >= y - 1',
'a.b + 1.0f >= y - 1'],
['(a + 1.0) >= y',
'(a + 1.0f) >= y'],
['!(a.b + 1.0 == y - 1) && c',
'!(a.b + 1.0f == y - 1) && c'],
]),
('conditional_expression', [
['a >= y',
'a >= y'],
]),
('if_statement', [
['if (a >= x) a = x;',
'if (a >= x) a = x;'],
['if (a) a = x;',
'if (a) a = x;'],
['if (!a) a = x;',
'if (!a) a = x;'],
['if (a.b.c >= x.y) a.b.c = x.y; else x.y = -1.0;',
'if (a.b.c >= x.y) a.b.c = x.y; else x.y = -1.0f;'],
]),
('statement', [
['a = 0;', 'a = 0;'],
['{}', '{}'],
['{a = 0; b = "c";}', '{a = 0; b = "c";}'],
['i++;', 'i++;'],
['--i;', '--i;'],
['for (i=0; i<L;i++){}',
'for (i=0; i<L;i++){}'],
]),
('if_statement', [
['if (a) {a = x; c = 1.0;}',
'if (a) {a = x; c = 1.0f;}'],
['if (a.b.c >= x.y) a.b.c = x.y; else {x.y = -1.0;}',
'if (a.b.c >= x.y) a.b.c = x.y; else {x.y = -1.0f;}'],
['if (a.b.c >= x.y) a.b.c = x.y; else {x.y = -1.0; z++;}',
'if (a.b.c >= x.y) a.b.c = x.y; else {x.y = -1.0f; z++;}'],
]),
('function_definition', [
[' function f():void{if (a){}}',
' void f(){if (a){}}'],
]),
('expression_list', [
['i-=s', 'i-=s']
]),
('iteration_statement', [
['for (i=0; i<L;i++){}',
'for (i=0; i<L;i++){}'],
['for (var i:int=0; i<L;i++){}',
'for (int i=0; i<L;i++){}'],
['for(;;);',
'for(;;);'],
['for(;; i++, j--);',
'for(;; i++, j--);'],
['for(;; i++, j--){break; continue;}',
'for(;; i++, j--){break; continue;}'],
['while(a == b){i++; j--;}',
'while(a == b){i++; j--;}'],
['while(true){i++; j--;}',
'while(true){i++; j--;}'],
['while( true ){i++; j--;}',
'while( true ){i++; j--;}'],
['do {i++; j--;}while(false)',
'do {i++; j--;}while(false)'],
['for(var key:String in items){text += key; a = key;}',
'foreach(KeyValuePair<string, object> _entry in items){string key = _entry.Key; text += key; a = key;}'],
['for(key in items){text += key;}',
'foreach(KeyValuePair<string, object> _entry in items){key = _entry.Key; text += key;}'],
['for (var i:int = cards.length - 1; 1 <= i; i--){}',
'for (int i = cards.Count - 1; 1 <= i; i--){}'],
['for (var i:int=0; i <= L; i -= s){}',
'for (int i=0; i <= L; i -= s){}'],
]),
('member_expression', [
[' internal var /*<delegate>*/ ActionDelegate:/*<void>*/*;',
' internal delegate /*<var>*/void ActionDelegate();'],
[' internal var onComplete:/*<ActionDelegate>*/Function;',
' internal /*<Function>*/ActionDelegate onComplete;'],
[' public var /*<delegate>*/ IsJustPressed:Boolean, letter:String;',
' public delegate bool IsJustPressed(string letter);'],
[' public function getPresses(justPressed:/*<IsJustPressed>*/Function):Array{}',
' public ArrayList getPresses(/*<Function>*/IsJustPressed justPressed){}'],
]),
('block', [
['{var word:Vector.<String>; available = word.concat();}',
'{List<string> word; available = new List<string>(word);}'],
]),
('compilation_unit', [
['package{public class C{}}',
'public class C{}'],
['package{class C{}}',
'class C{}'],
['package{public class C{}}',
'public class C{}'],
]),
('statement', [
['return;',
'return;'],
['return a ? b : c;',
'return a ? b : c;'],
['return "object" === typeof(value);',
'return object.ReferenceEquals("object", typeof(value));'],
['f(a ? b : c);',
'f(a ? b : c);'],
['delete container[key];',
'container.Remove(key);'],
['throw new Error("message");',
'throw new System.InvalidOperationException("message");'],
['a.sort();',
'a.Sort();'],
# Not supported:
# ['return f()[i];',
# 'return f()[i];'],
# ['f(a.split(",")[0])',
# 'f(a.split(",")[0])'],
['break;',
'break;'],
['continue;',
'continue;'],
]),
# ASUnit to NUnit:
('call_expression', [
['assertEquals(expected, got)',
'Assert.AreEqual(expected, got)'],
['assertEquals(message, expected, got)',
'Assert.AreEqual(expected, got, message)'],
]),
('function_declaration', [
[' public function testThis():void',
' [Test] public void This()'],
[' /*comment*/public function testThis():void',
' /*comment*/[Test] public void This()'],
]),
('class_definition', [
['internal class TestThis extends TestCase{}',
'[TestFixture] internal class TestThis{}'],
]),
('import_definition', [
['import asunit.framework.TestCase;',
'using NUnit.Framework;'],
]),
# Complex cases:
('variable_declaration', [
['var columnOffset:int = offset == 0 ? -1 : 1',
'int columnOffset = offset == 0 ? -1 : 1'],
['var isChanged:Boolean = g == grid[index] || g == gridPreviously[index]',
'bool isChanged = g == grid[index] || g == gridPreviously[index]'],
]),
('expression', [
['power(trimmed).toString()',
'power(trimmed).ToString()'],
['new <int>[1, 0]',
'new List<int>(){1, 0}'],
['new <int>[]',
'new List<int>(){}'],
['int(Math.random() * (i + 1))',
'(int)((Random.value % 1.0f) * (i + 1))'],
['null == a && a is Array',
'null == a && a is ArrayList'],
['null == a || a is Array',
'null == a || a is ArrayList'],
['(null == hash) || (key in hash)',
'(null == hash) || (hash.ContainsKey(key))'],
# Not supported:
# ['null == hash || key in hash',
# 'null == hash || hash.ContainsKey(key)'],
# Collection literals:
['[]', 'new ArrayList(){}'],
['[a, 1.0, ""]', 'new ArrayList(){a, 1.0f, ""}'],
['{}', 'new Dictionary<string, object>(){}'],
['{a: b, "1.0": 2.0}', 'new Dictionary<string, object>(){{"a", b}, {"1.0", 2.0f}}'],
['{a: {b: "1.0"}}', 'new Dictionary<string, object>(){{"a", new Dictionary<string, object>(){{"b", "1.0"}}}}'],
]),
('class_definition', [
['class C{\n var a:int;\n var b:int;\n}',
'class C{\n int a;\n int b;\n}'],
]),
('compilation_unit', [
['package P{import A.B;\n\npublic class C{}}',
'using A/*<B>*/;\nnamespace P{\n public class C{}}'],
['package P{\npublic class C1{}}',
'namespace P{\n public class C1{}}'],
['package P{public class C2{}}',
'namespace P{public class C2{}}'],
['package N{\npublic class C3{}}',
'namespace N{\n public class C3{}}'],
['package N\n{\npublic class C4{}}',
'namespace N\n{\n public class C4{}}'],
['//c\npackage N\n{\npublic class C5{}}',
'//c\nnamespace N\n{\n public class C5{}}'],
['package N\n{\n//c\npublic class C7{}}',
'namespace N\n{\n //c\n public class C7{}}'],
['/*c*/\npackage N\n{\npublic class C6{}}',
'/*c*/\nnamespace N\n{\n public class C6{}}'],
['package N{ class C{ var a:Vector.<String>;}}',
'using System.Collections.Generic;\nnamespace N{ class C{ List<string> a;}}'],
]),
]
one_ways = {
'as': {
'js': [
#('expression', [
# ['int(path)',
# 'Math.floor(path)'],
# ]),
],
'cs': [
('literal', [
['undefined',
'null'],
]),
('data_type', [
['Dictionary',
'Hashtable'],
['Sprite',
'GameObject'],
['MovieClip',
'GameObject'],
['TextField',
'GameObject'],
]),
('expression', [
['[[a, b]]', 'new ArrayList(){new ArrayList(){a, b}}'],
['[{a: b}]', 'new ArrayList(){new Dictionary<string, object>(){{"a", b}}}'],
]),
('class_definition', [
['class C{\n var a:int;\n var repeat:Object = {};\n var b:int;\n}',
'class C{\n int a;\n Dictionary<string, object> repeat = new Dictionary<string, object>(){};\n int b;\n}'],
]),
('import_definition', [
['import flash.display.Sprite;',
'// using flash.display.Sprite;'],
]),
('import_definition_place', [
['import flash.display.Sprite;\nimport flash.display.MovieClip;',
'// using flash.display.Sprite;\n// using flash.display.MovieClip;'],
]),
]},
'cs': {'as': [
('number_format', [
['3.5',
'3.5F'],
]),
]},
'js': {'as':[
#('expression', [
# ['Math.floor(path)',
# 'Math.floor(path)'],
# ]),
]},
}
case_definitions = [
('compilation_unit', [
['package org.p{import com.a.b; class C{}}',
'using /*<com>*/A/*<b>*/;namespace /*<org>*/P{ class C{}}'],
]),
('import_definition', [
['import com.finegamedesign.anagram.Model;',
'using /*<com>*/Finegamedesign.Anagram/*<Model>*/;'],
]),
('function_definition', [
[' function doThis():void{ doThis();}',
' void DoThis(){ DoThis();}'],
[' function doThis():void{ doThis(); b.do(); A.B.c.go();}',
' void DoThis(){ DoThis(); b.Do(); A.B.c.Go();}'],
[' function doThis():void{ doThis(); b.do(); A.B.c.go(); f = int(a);}',
' void DoThis(){ DoThis(); b.Do(); A.B.c.Go(); f = (int)(a);}'],
]),
('expression', [
['"as.g"',
'"as.g"'],
['0.125',
'0.125f'],
['a % b',
'a % b'],
['((a + 2) % b)',
'((a + 2) % b)'],
['a ~ b',
'a ~ b'],
['a && b',
'a && b'],
['a || b',
'a || b'],
['new C(a, b)',
'new C(a, b)'],
['typeof(index)',
'typeof(index)'],
['parseInt(s)',
'int.Parse(s)'],
['parseFloat(s)',
'float.Parse(s)'],
['path as a.b.string',
'path as a.b.string'],
['path as String',
'path as string'],
['int(path)',
'(int)(path)'],
['Number(path)',
'(float)(path)'],
['paths.length',
'paths.Count'],
['paths.push(p)',
'paths.Add(p)'],
['paths.indexOf(p)',
'paths.IndexOf(p)'],
['paths.splice(p, 1)',
'paths.RemoveRange(p, 1)'],
# ['paths.concat()',
# 'new ArrayList(paths)'],
['paths.lengths',
'paths.lengths'],
['paths.length.i',
'paths.length.i'],
['paths.push.i',
'paths.push.i'],
['name.toLowerCase',
'name.ToLower'],
['name.lastIndexOf',
'name.LastIndexOf'],
['trace(s)',
'Debug.Log(s)'],
['a.trace(s)',
'a.Trace(s)'],
['Math.floor(a)',
'Mathf.Floor(a)'],
['a.Math.floor(index)',
'a.Math.Floor(index)'],
['Math.PI',
'Mathf.PI'],
['Math.random()',
'(Random.value % 1.0f)'],
['my.Math.random()',
'my.Math.Random()'],
['Math',
'Math'],
['-a',
'-a'],
[' ++i',
' ++i'],
['-- j',
'-- j'],
# Not supported:
# ['pools["Explosion"].next()',
# 'pools["Explosion"].Next()'],
]),
('data_declaration', [
['const path:String',
'const string path'],
]),
('data_type', [
['int', 'int'],
['String', 'string'],
['Boolean', 'bool'],
['Number', 'float'],
['Custom', 'Custom'],
['Array', 'ArrayList'],
['Object', 'Dictionary<string, object>'],
['*', 'var'],
['A.B.C', 'A.B.C'],
['Vector.<String>', 'List<string>'],
['Vector.<Point>', 'List<Vector2>'],
['Vector.<DisplayObject>', 'List<Collider2D>'],
['Vector.<DisplayObjectContainer>', 'List<GameObject>'],
]),
('identifier', [
['_a', '_a'],
['_2', '_2'],
['I', 'I'],
['b', 'b'],
]),
('address', [
['_a', '_a'],
['salad', 'salad'],
['OIL', 'OIL'],
['_0._1._2', '_0._1._2'],
['a[i]', 'a[i]'],
['_0._1[a.b]._2', '_0._1[a.b]._2'],
['_0._1[a % b]._2', '_0._1[a % b]._2'],
]),
('import_definition', [
['import _._2;',
'using _/*<_2>*/;'],
['import _.*;',
'using _/*<*>*/;'],
]),
('class_definition', [
['class C{}', 'class C{}'],
['public class PC{}', 'public class PC{}'],
['internal class IC{}', 'internal class IC{}'],
]),
('ts', [
['/*c*/', '/*c*/'],
['//c', '//c'],
['// var i:int;', '// var i:int;'],
]),
('namespace_modifiers_place', [
['public ',
'public '],
['private static ',
'private static '],
['static private ',
'static private '],
]),
('function_declaration', [
[' function f():void',
' void F()'],
[' function g( ):void',
' void G( )'],
]),
('function_definition', [
[' function f():void{}',
' void F(){}'],
[' function f():void{}',
' void F(){}'],
[' public function f():void{}',
' public void F(){}'],
[' internal function isF():Boolean{}',
' internal bool IsF(){}'],
[' protected function getF():Number{}',
' protected float GetF(){}'],
[' function f(){i = index;}',
' F(){i = index;}'],
# Not supported:
# [' function f():*{}',
# ' var f(){}'],
]),
('function_declaration', [
[' function f(path:String, index:int):void',
' void F(string path, int index)'],
[' private function isF(index:int, isEnabled:Boolean, a:Number):Boolean',
' private bool IsF(int index, bool isEnabled, float a)'],
['\n\n private static function shuffle(cards:Array):void',
'\n\n private static void Shuffle(ArrayList cards)'],
]),
('function_declaration', [
[' function f(path:String, index:int = -1):void',
' void F(string path, int index = -1)'],
[' private function isF(index:int, isEnabled:Boolean, a:Number=NaN):Boolean',
' private bool IsF(int index, bool isEnabled, float a=NaN)'],
]),
('function_definition', [
[' function f():void{var i:int = index;}',
' void F(){int i = index;}'],
[' function f():void{i = index;}',
' void F(){i = index;}'],
[' function f():void{var i:int = Math.floor(index);}',
' void F(){int i = Mathf.Floor(index);}'],
]),
('contains_expression', [
['oil in italian.salad',
'italian.salad.ContainsKey(oil)'],
['Content in Container.Container',
'Container.Container.ContainsKey(Content)'],
]),
('conditional_function', [
['oil in salad',
'salad.ContainsKey(oil)'],
['!(apple in basket)',
'!basket.ContainsKey(apple)'],
]),
# ASUnit to NUnit:
('call_expression', [
['assertEquals(expected, got)',
'Assert.AreEqual(expected, got)'],
['assertEquals(message, expected, got)',
'Assert.AreEqual(expected, got, message)'],
]),
('function_declaration', [
[' public function testThis():void',
' [Test] public void This()'],
[' /*comment*/public function testThis():void',
' /*comment*/[Test] public void This()'],
]),
('class_definition', [
['internal class TestThis extends TestCase{}',
'[TestFixture] internal class TestThis{}'],
]),
('import_definition', [
['import asunit.framework.TestCase;',
'using NUnit.Framework;'],
]),
# Complex cases:
('expression', [
['power(trimmed).toString()',
'Power(trimmed).ToString()'],
]),
]
original_source = cfg['source']
original_to = cfg['to']
def print_expected(expected, got, input, definition, index, err):
difference = format_difference(expected, got)
if got is None:
got = err.message
tag_text = ''
try:
tag_text = format_taglist(input, definition)[:taglist_head]
except:
tag_text = 'Failed formatting.'
message = (''
+ '\nConverting from %s to %s' % (cfg['source'], cfg['to'])
+ '\n' + definition + ' ' + str(index)
+ '\n' + 'Input (first 200 characters):'
+ '\n' + input[:200]
+ '\n' + 'Difference (expected to got, first 500 characters):'
+ '\n' + difference[:500]
+ '\n' + 'Tag parts (first 500 characters):'
+ '\n' + tag_text)
message = message.encode('ascii', 'replace')
print(message)
class TestDefinitions(TestCase):
def assertExample(self, definition, expected, input, index):
got = None
try:
expected = may_format(definition, expected)
if definition in debug_definitions:
if not debug_indexes or index in debug_indexes:
if cfg['source'] in debug_source:
import pdb
pdb.set_trace()
got = convert(input, definition)
self.assertEqual(expected, got)
except Exception as err:
print 'is_conform_case: %r' % cfg['is_conform_case']
print_expected(expected, got, input, definition, index, err)
if is_debug_fail:
import pdb
pdb.set_trace()
got = convert(input, definition)
self.assertEqual(expected, got)
raise err
def test_definitions(self):
self.assert_definitions_case(False, definitions)
self.assert_definitions_case(True, case_definitions)
def assert_definitions_case(self, is_conform_case, definitions):
cfg['is_conform_case'] = is_conform_case
for source, to, source_index, to_index in directions:
cfg['source'] = source
cfg['to'] = to
if is_conform_case:
these_definitions = definitions
else:
these_definitions = definitions + one_ways[source][to]
for definition, examples in these_definitions:
reset()
for example_index, example in enumerate(examples):
if to_index < len(example) and source_index < len(example):
expected = example[to_index]
input = example[source_index]
self.assertExample(definition, expected, input, example_index)
cfg['source'] = original_source
cfg['to'] = original_to
def DISABLED_test_files(self):
for source, to, s, t in directions:
cfg['source'] = source
cfg['to'] = to
pattern = 'test/*.%s' % cfg['source']
paths = glob(realpath(pattern))
expected_gots = compare_files(paths)
definition = 'compilation_unit'
for index, expected_got in enumerate(expected_gots):
expected, got = expected_got
expected = may_format(definition, expected)
path = paths[index]
try:
self.assertEqual(expected, got)
except Exception as err:
print_expected(expected, got, open(path).read(), definition, index, err)
raise err
cfg['source'] = original_source
cfg['to'] = original_to
def test_quote(self):
self.assertEqual('"', literals['cs']['QUOTE'])
self.assertEqual('"', literals['as']['QUOTE'])
if '__main__' == __name__:
main()
| 2.703125 | 3 |
test/terra/backends/test_qasm_stabilizer_simulator.py | eliarbel/qiskit-aer | 0 | 12762999 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
QasmSimulator Integration Tests
"""
import unittest
from test.terra import common
from test.terra.backends.qasm_simulator.qasm_method import QasmMethodTests
from test.terra.backends.qasm_simulator.qasm_measure import QasmMeasureTests
from test.terra.backends.qasm_simulator.qasm_reset import QasmResetTests
from test.terra.backends.qasm_simulator.qasm_conditional import QasmConditionalTests
from test.terra.backends.qasm_simulator.qasm_cliffords import QasmCliffordTests
from test.terra.backends.qasm_simulator.qasm_algorithms import QasmAlgorithmTests
from test.terra.backends.qasm_simulator.qasm_extra import QasmExtraTests
class TestQasmStabilizerSimulator(common.QiskitAerTestCase,
QasmMethodTests,
QasmMeasureTests,
QasmResetTests,
QasmConditionalTests,
QasmCliffordTests,
QasmAlgorithmTests,
QasmExtraTests):
"""QasmSimulator stabilizer method tests."""
BACKEND_OPTS = {"method": "stabilizer"}
if __name__ == '__main__':
unittest.main()
| 1.78125 | 2 |
05_hydrothermal_venture.py | KanegaeGabriel/advent-of-code-2021 | 2 | 12763000 | #######################################
# --- Day 5: Hydrothermal Venture --- #
#######################################
from collections import defaultdict
import AOCUtils
def get_overlap_count(vents, part_two=False):
grid = defaultdict(int)
for start, end in vents:
delta = None
if start[0] == end[0]:
delta = (0, 1)
elif start[1] == end[1]:
delta = (1, 0)
elif part_two:
if start[1] < end[1]:
delta = (1, 1)
else:
delta = (1, -1)
if delta is None: continue
pos = start
grid[pos] += 1
while pos != end:
pos = (pos[0]+delta[0], pos[1]+delta[1])
grid[pos] += 1
return sum(v > 1 for v in grid.values())
#######################################
raw_vents = AOCUtils.load_input(5)
vents = []
for raw_vent in raw_vents:
raw_start, raw_end = raw_vent.split(' -> ')
start = tuple(map(int, raw_start.split(',')))
end = tuple(map(int, raw_end.split(',')))
vent = tuple(sorted([start, end]))
vents.append(vent)
print(f'Part 1: {get_overlap_count(vents)}')
print(f'Part 2: {get_overlap_count(vents, part_two=True)}')
AOCUtils.print_time_taken() | 3.09375 | 3 |
data_generation/convert_to_tfrecord.py | anguillanneuf/next18-ai-in-motion | 10 | 12763001 | <gh_stars>1-10
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# source: https://github.com/sararob/tswift-detection/blob/master/convert_to_tfrecord.py
import os
import io
import xml.etree.ElementTree as ET
import tensorflow as tf
from PIL import Image
flags = tf.app.flags
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('images_dir', '', 'Path to directory of images')
flags.DEFINE_string('labels_dir', '', 'Path to directory of labels')
FLAGS = flags.FLAGS
# helper functions
# https://github.com/tensorflow/models/blob/master/research/object_detection/utils/dataset_util.py
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def create_tf_example(example):
image_path = os.path.join(FLAGS.images_dir, example)
labels_path = os.path.join(FLAGS.labels_dir, os.path.splitext(example)[0] + '.xml')
# Read the image
img = Image.open(image_path)
width, height = img.size
img_bytes = io.BytesIO()
img.save(img_bytes, format=img.format)
height = height
width = width
encoded_image_data = img_bytes.getvalue()
image_format = img.format.encode('utf-8')
# Read the label XML
tree = ET.parse(labels_path)
root = tree.getroot()
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for object_ in root.iter('object'):
bbox = object_.find('bndbox')
class_ = object_.find('class')
xmins.append(float(bbox.find('xmin').text))
xmaxs.append(float(bbox.find('xmax').text))
ymins.append(float(bbox.find('ymin').text))
ymaxs.append(float(bbox.find('ymax').text))
classes_text.append(class_.find('text').text.encode('utf-8'))
classes.append(int(class_.find('label').text))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/filename': bytes_feature(example.encode('utf-8')),
'image/source_id': bytes_feature('overlay.py'.encode('utf-8')),
'image/encoded': bytes_feature(encoded_image_data),
'image/format': bytes_feature(image_format),
'image/object/bbox/xmin': float_list_feature(xmins),
'image/object/bbox/xmax': float_list_feature(xmaxs),
'image/object/bbox/ymin': float_list_feature(ymins),
'image/object/bbox/ymax': float_list_feature(ymaxs),
'image/object/class/text': bytes_list_feature(classes_text),
'image/object/class/label': int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
for filename in os.listdir(FLAGS.images_dir):
tf_example = create_tf_example(filename)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.app.run() | 1.984375 | 2 |
apps/accounts/views/confirm_email.py | vicobits/django-wise | 5 | 12763002 | # -*- coding: utf-8 -*-
from django.views import View
from django.shortcuts import render
from apps.accounts.models.choices import ActionCategory
from apps.accounts.models.pending_action import PendingAction
from apps.accounts.services.auth_service import AuthService
class ConfirmEmailView(View):
"""Renders the email confirmation page."""
def get(self, request, token, **kwargs):
"""It renders the html template to confirm email."""
context = {}
try:
pending_action = PendingAction.objects.get(
token=token,
category=ActionCategory.CONFIRM_EMAIL,
)
context['user'] = pending_action.user
context['next'] = pending_action.extra.get('next')
AuthService.confirm_email(pending_action)
except PendingAction.DoesNotExist:
context['user'] = None
return render(request, 'transactions/confirm_email.html', context)
| 1.867188 | 2 |
machine_learn2.py | agw2105/Natural-Language-Processing | 1 | 12763003 | <filename>machine_learn2.py
import pandas as pd
import nltk
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as numpy
from sklearn.pipeline import Pipeline
from nltk.stem import PorterStemmer
from nltk import word_tokenize
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
#import training and test data
df = pd.read_csv("training_set.csv")
data_frame = pd.DataFrame(df, columns = ["Title", "Class"], index=None)
#data_frame = data_frame.reindex(numpy.random.permutation(data_frame.index))
def stemming_tokenizer(text):
stemmer = PorterStemmer()
return [stemmer.stem(w) for w in word_tokenize(text)]
data_train, data_test, y_train, y_true = \
train_test_split(data_frame['Title'], data_frame['Class'], stratify=data_frame['Class'], test_size=0.2, random_state=42)
trial = Pipeline([('vectorizer', TfidfVectorizer(tokenizer = stemming_tokenizer)),('classifier', MultinomialNB(alpha=0.05))])
model = trial.fit(data_train, y_train)
y_test = model.predict(data_test)
print(sklearn.metrics.accuracy_score(y_true, y_test))
labels = [#list of labels]
cm = sklearn.metrics.confusion_matrix(y_true, y_test, labels=labels)
df_cm = pd.DataFrame(cm, index = [i for i in labels], columns = [i for i in labels])
sns.heatmap(df_cm, annot=True)
cv = ShuffleSplit(n_splits=3, test_size=0.2, random_state=42)
scores = cross_val_score(trial, data_frame.Title, data_frame.Class, cv=10)
print(scores.mean())
| 3.21875 | 3 |
build/lib/pyhamilton/interface.py | dgretton/pyhamilton | 59 | 12763004 | <gh_stars>10-100
import time, json, signal, os, requests, string, logging, subprocess, win32gui, win32con
from http import server
from threading import Thread
from multiprocessing import Process
from pyhamilton import OEM_RUN_EXE_PATH, OEM_HSL_PATH
from .oemerr import * #TODO: specify
from .defaultcmds import defaults_by_cmd
class HamiltonCmdTemplate:
@staticmethod
def unique_id():
return hex(int((time.time()%3600e4)*1e6))
def __init__(self, cmd_name, params_list):
self.cmd_name = cmd_name
self.params_list = params_list
if cmd_name in defaults_by_cmd:
const_name, default_dict = defaults_by_cmd[cmd_name]
self.defaults = {k:v for k, v in default_dict.items() if v is not None}
else:
self.defaults = {}
def assemble_cmd(self, *args, **kwargs):
if args:
raise ValueError('assemble_cmd can only take keyword arguments.')
assembled_cmd = {'command':self.cmd_name, 'id':HamiltonCmdTemplate.unique_id()}
assembled_cmd.update(self.defaults)
assembled_cmd.update(kwargs)
self.assert_valid_cmd(assembled_cmd)
return assembled_cmd
def assert_valid_cmd(self, cmd_dict):
prefix = 'Assert valid command "' + self.cmd_name + '" failed: '
if 'id' not in cmd_dict:
raise ValueError(prefix + 'no key "id"')
if 'command' not in cmd_dict:
raise ValueError(prefix + 'no key "command"')
if cmd_dict['command'] != self.cmd_name:
raise ValueError(prefix + 'command name "' + cmd_dict['command'] + '" does not match')
needs = set(['command', 'id'])
needs.update(self.params_list)
givens = set(cmd_dict.keys())
if givens != needs:
prints = [prefix + 'template parameter keys (left) do not match given keys (right)\n']
q_mark = ' (?) '
l_col_space = 4
r_col_space = max((len(key) for key in needs)) + len(q_mark) + 1
needs_l = sorted(list(needs))
givens_l = sorted(list(givens))
while needs_l or givens_l:
if needs_l:
lval = needs_l.pop(0)
if lval not in givens:
lval = q_mark + lval
else:
lval = ''
if givens_l:
rval = givens_l.pop(0)
if rval not in needs:
rval = q_mark + rval
else:
rval = ''
prints.append(' '*l_col_space + lval + ' '*(r_col_space - len(lval)) + rval)
raise ValueError('\n'.join(prints))
_builtin_templates_by_cmd = {}
for cmd in defaults_by_cmd:
const_name, default_dict = defaults_by_cmd[cmd]
const_template = HamiltonCmdTemplate(cmd, list(default_dict.keys()))
globals()[const_name] = const_template
_builtin_templates_by_cmd[cmd] = const_template
def _make_new_hamilton_serv_handler(resp_indexing_fn):
class HamiltonServerHandler(server.BaseHTTPRequestHandler):
_send_queue = []
indexed_responses = {}
indexing_fn = resp_indexing_fn
MAX_QUEUED_RESPONSES = 1000
@staticmethod
def send_str(cmd_str):
if not isinstance(cmd_str, b''.__class__):
if isinstance(cmd_str, ''.__class__):
cmd_str = cmd_str.encode()
else:
raise ValueError('send_command can only send strings, not ' + str(cmd_str))
HamiltonServerHandler._send_queue.append(cmd_str)
@staticmethod
def has_queued_cmds():
return bool(HamiltonServerHandler._send_queue)
@staticmethod
def pop_response(idx):
ir = HamiltonServerHandler.indexed_responses
if idx not in ir:
raise KeyError('No response received with index ' + str(idx))
return ir.pop(idx).decode()
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/HTML')
self.end_headers()
def do_GET(self):
sq = HamiltonServerHandler._send_queue
response_to_send = sq.pop(0) if sq else b''
self._set_headers()
self.wfile.write(response_to_send)
def do_HEAD(self):
self._set_headers()
def do_POST(self):
content_len = int(self.headers.get('content-length', 0))
post_body = self.rfile.read(content_len)
self._set_headers()
self.wfile.write(b'<html><body><h1>POST!</h1></body></html>')
ir = HamiltonServerHandler.indexed_responses
index = HamiltonServerHandler.indexing_fn(post_body)
if index is None:
return
ir[index] = post_body
def log_message(self, *args, **kwargs):
pass
return HamiltonServerHandler
def run_hamilton_process():
import clr
from pyhamilton import OEM_STAR_PATH, OEM_HSL_PATH
clr.AddReference(os.path.join(OEM_STAR_PATH, 'RunHSLExecutor'))
clr.AddReference(os.path.join(OEM_STAR_PATH, 'HSLHttp'))
try:
from RunHSLExecutor import Class1
except ModuleNotFoundError:
raise RuntimeError('RunHSLExecutor DLLs successfully located, but an internal '
'error prevented import as a CLR module. You might be '
'missing the standard Hamilton software suite HSL '
'executables, their DLLs may not be registered with Windows, '
'or they may not be located in the expected system '
'directory.')
C = Class1()
C.StartMethod(OEM_HSL_PATH)
try:
while True:
pass # Send external signal to end process
except:
pass
_block_numfield = 'Num'
_block_mainerrfield = 'MainErr'
BLOCK_FIELDS = _block_numfield, _block_mainerrfield, 'SlaveErr', 'RecoveryBtnId', 'StepData', 'LabwareName', 'LabwarePos'
_block_field_types = int, int, int, int, str, str, str
class HamiltonInterface:
known_templates = _builtin_templates_by_cmd
default_port = 3221
default_address = '127.0.0.1' # localhost
class HamiltonServerThread(Thread):
def __init__(self, address, port):
Thread.__init__(self)
self.server_address = (address, port)
self.should_continue = True
self.exited = False
def index_on_resp_id(response_str):
try:
response = json.loads(response_str)
if 'id' in response:
return response['id']
return None
except json.decoder.JSONDecodeError:
return None
self.server_handler_class = _make_new_hamilton_serv_handler(index_on_resp_id)
self.httpd = None
def run(self):
self.exited = False
self.httpd = server.HTTPServer(self.server_address, self.server_handler_class)
while self.should_continue:
self.httpd.handle_request()
self.exited = True
def disconnect(self):
self.should_continue = False
def has_exited(self):
return self.exited
def __init__(self, address=None, port=None, simulate=False):
self.address = HamiltonInterface.default_address if address is None else address
self.port = HamiltonInterface.default_port if port is None else port
self.simulate = simulate
self.server_thread = None
self.oem_process = None
self.active = False
self.logger = None
self.log_queue = []
def start(self):
if self.active:
return
self.log('starting a Hamilton interface')
if self.simulate:
sim_window_handle = None
try:
sim_window_handle = win32gui.FindWindow(None, 'Hamilton Run Control - ' + os.path.basename(OEM_HSL_PATH))
except win32gui.error:
pass
if sim_window_handle:
try:
win32gui.SendMessage(sim_window_handle, win32con.WM_CLOSE, 0, 0)
os.system('taskkill /f /im HxRun.exe')
except win32gui.error:
self.stop()
self.log_and_raise(OSError('Simulator already open'))
subprocess.Popen([OEM_RUN_EXE_PATH, OEM_HSL_PATH])
self.log('started the oem application for simulation')
else:
self.oem_process = Process(target=run_hamilton_process, args=())
self.oem_process.start()
self.log('started the oem process')
self.server_thread = HamiltonInterface.HamiltonServerThread(self.address, self.port)
self.server_thread.start()
self.log('started the server thread')
self.active = True
def stop(self):
if not self.active:
return
try:
if self.simulate:
self.log('sending end run command to simulator')
try:
self.wait_on_response(self.send_command(command='end', id=hex(0)), timeout=1.5)
except HamiltonTimeoutError:
pass
else:
for i in range(2):
try:
os.kill(self.oem_process.pid, signal.SIGTERM)
self.log('sent sigterm to oem process')
self.oem_process.join()
self.log('oem process exited')
break
except PermissionError:
self.log('permission denied, trying again...', 'warn')
time.sleep(2)
else:
self.log('Could not kill oem process, moving on with shutdown', 'warn')
finally:
self.active = False
self.server_thread.disconnect()
self.log('disconnected from server')
time.sleep(.1)
if not self.server_thread.has_exited():
self.log('server did not exit yet, sending dummy request to exit its loop')
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=20)
session.mount('http://', adapter)
session.get('http://' + HamiltonInterface.default_address + ':' + str(HamiltonInterface.default_port))
self.log('dummy get request sent to server')
self.server_thread.join()
self.log('server thread exited')
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, tb):
self.stop()
def is_open(self):
return self.active
def send_command(self, template=None, block_until_sent=False, *args, **cmd_dict): # returns unique id of command
if not self.is_open():
self.log_and_raise(RuntimeError('Cannot send a command from a closed HamiltonInterface'))
if template is None:
if 'command' not in cmd_dict:
self.log_and_raise(ValueError('Command dicts from HamiltonInterface must have a \'command\' key'))
cmd_name = cmd_dict['command']
if cmd_name in HamiltonInterface.known_templates:
# raises if this is a known command but some fields in cmd_dict are invalid
send_cmd_dict = HamiltonInterface.known_templates[cmd_name].assemble_cmd(**cmd_dict)
else:
send_cmd_dict = cmd_dict
else:
send_cmd_dict = template.assemble_cmd(**cmd_dict)
if 'id' not in send_cmd_dict:
self.log_and_raise(ValueError("Command dicts sent from HamiltonInterface must have a unique id with key 'id'"))
self.server_thread.server_handler_class.send_str(json.dumps(send_cmd_dict))
if block_until_sent:
self._block_until_sq_clear()
return send_cmd_dict['id']
def wait_on_response(self, id, timeout=0, raise_first_exception=False):
if timeout:
start_time = time.time()
else:
start_time = float('inf')
response_tup = None
while time.time() - start_time < timeout:
try:
response_tup = self.pop_response(id, raise_first_exception)
except KeyError:
pass
if response_tup is not None:
return response_tup
time.sleep(.1)
self.log_and_raise(HamiltonTimeoutError('Timed out after ' + str(timeout) + ' sec while waiting for response id ' + str(id)))
def pop_response(self, id, raise_first_exception=False):
"""
Raise KeyError if id has no matching response. If there is a response, remove it and return a 2-tuple:
[0] parsed response block dict from Hamilton as in parse_hamilton_return
[1] Error map: dict mapping int keys (data block Num field) that had exceptions, if any,
to an exception that was coded in block; None to any error not associated with a block; {} if no error
"""
try:
response = self.server_thread.server_handler_class.pop_response(id)
except KeyError:
raise KeyError('No Hamilton interface response indexed for id ' + str(id))
errflag, blocks = self.parse_hamilton_return(response)
err_map = {}
if errflag:
for blocknum in sorted(blocks.keys()):
errcode = blocks[blocknum][_block_mainerrfield]
if errcode != 0:
self.log('Exception encoded in Hamilton return.', 'warn')
try:
decoded_exception = HAMILTON_ERROR_MAP[errcode]()
except KeyError:
self.log_and_raise(InvalidErrCodeError('Response returned had an unknown error code: ' + str(errcode)))
self.log('Exception: ' + repr(decoded_exception), 'warn')
if raise_first_exception:
self.log('Raising first exception.', 'warn')
raise decoded_exception
err_map[blocknum] = decoded_exception
else:
unknown_exc = HamiltonStepError('Hamilton step did not execute correctly; no error code given.')
err_map[None] = unknown_exc
if raise_first_exception:
self.log('Raising first exception; exception has no error code.', 'warn')
raise unknown_exc
return blocks, err_map
def _block_until_sq_clear(self):
while HamiltonServerHandler.has_queued_cmds():
pass
def parse_hamilton_return(self, return_str):
"""
Return a 2-tuple:
[0] errflag: any error code present in response
[1] Block map: dict mapping int keys to:
dicts with str keys (MainErr, SlaveErr, RecoveryBtnId, StepData, LabwareName, LabwarePos)
Result value 3 is the field that is returned by the OEM interface.
"Result value 3 contains one error flag (ErrFlag) and the block data package."
Data Block Format Rules
The error flag is set once only at the beginning of result value 3. The error flag
does not belong to the block data but may be used for a simpler error recovery.
If this flag is set, an error code has been set in any of the block data entries.
Each block data package starts with the opening square bracket character '['
The information within the block data package is separated by the comma delimiter ','
Block data information may be empty; anyway a comma delimiter is set.
The result value may contain more than one block data package.
Block data packages are returned independent of Num value ( unsorted ).
Block data information
Num
Step depended information (e.g. the channel number, a loading position etc.).
Note: The meaning and data type for this information is described in the corresponding help of single step.
MainErr
Main error code which occurred on instrument.
SlaveErr
Detailed error code of depended slave (e.g. auto load, washer etc.).
RecoveryBtnId
Recovery which has been used to handle this error.
StepData
Step depended information, e.g. the barcode read, the volume aspirated etc.
Note: The meaning and data type for this information is described in the corresponding help of single step.
LabwareName
Labware name of used labware.
LabwarePos
Used labware position.
"""
def raise_parse_error():
msg = 'Could not parse response ' + repr(return_str)
self.log(msg, 'error')
raise HamiltonReturnParseError(msg)
try:
block_data_str = str(json.loads(return_str)['step-return1'])
except KeyError:
raise_parse_error()
blocks = block_data_str.split('[')
try:
errflag = int(blocks.pop(0)) != 0
except ValueError:
raise_parse_error()
blocks_by_blocknum = {}
any_error_code = False
for block_str in blocks:
field_vals = block_str.split(',')
if len(field_vals) != len(BLOCK_FIELDS):
raise_parse_error()
try:
block_contents = {field:cast(val) for field, cast, val in zip(BLOCK_FIELDS, _block_field_types, field_vals)}
except ValueError:
raise_parse_error()
if block_contents[_block_mainerrfield] != 0:
any_error_code = True
blocks_by_blocknum[block_contents.pop(_block_numfield)] = block_contents
if blocks and errflag != any_error_code:
raise_parse_error()
return errflag, blocks_by_blocknum
def set_log_dir(self, log_dir):
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
hdlr = logging.FileHandler(log_dir)
formatter = logging.Formatter('[%(asctime)s] %(name)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self._dump_log_queue()
def log(self, msg, msg_type='info'):
self.log_queue.append((msg, msg_type))
self._dump_log_queue()
def _dump_log_queue(self):
if self.logger is None:
return
log_actions = {'error':self.logger.error,
'warn':self.logger.warn,
'debug':self.logger.debug,
'info':self.logger.info,
'critical':self.logger.critical}
while self.log_queue:
msg, msg_type = self.log_queue.pop(0)
log_actions.get(msg_type.lower(), self.logger.info)(msg) # prints if no log path set
def log_and_raise(self, err):
self.log(repr(err), 'error')
raise err
| 2.140625 | 2 |
rest.py | yjlou/simple-restful | 2 | 12763005 | <gh_stars>1-10
#!/usr/bin/python
#
# Access control, file/directory permission, authorization.
#
# TODO:
# ~ for home of HTTP authenticator
#
DEBUG = 4
class Rest():
def __init__(self, db):
self.db = db
def can_access(self, user, path):
"""
If the user is permitted to access this directory
'user' -- String. The username in the Authenticate header.
None if no authenticate header.
'path' -- String. The path to access.
Can be a file or a directory.
Returns: True if the user is accessible to the path.
"""
if not self.db.need_authentication(path):
# Free for all. No need to authenticate.
if DEBUG >= 5:
print "The path '%s' is free to access (not under .htpasswd)." % path
return True
if user is None:
if DEBUG >= 5:
print "The path '%s' isn't free to access, but no user is provided." % path
return False
#
# return error if the 'user' is not prefix of 'path'.
#
i = 0
u = self.db.safe_path(user.split("/")).split("/")
p = self.db.safe_path(path.split("/")).split("/")
if DEBUG >= 5:
print "User: ", u
print "Path: ", p
if len(u) > len(p):
return False
for i in range(len(u)):
if u[i] != p[i]:
return False
if DEBUG >= 5:
print "The user '%s' is able to access the path '%s'." % (user, path)
return True
def handle(self, http_method, full_path, args = None):
if args and "action" in args:
http_method = args["action"][0]
if args and "content" in args:
content = args["content"][0]
else:
content = ""
if len(full_path) > 1 and full_path[-1] == "/":
slash_at_end = True
else:
slash_at_end = False
# split path into array
path = full_path.split("/")
type = self.db.path_type(path)
if http_method == "GET":
if type is self.db.DIR:
ret = self.db.get_dir(path)
status_code = 200
elif type is self.db.FILE:
ret = self.db.get_file(path)
status_code = 200
else:
ret = "GET %s is not found." % full_path
status_code = 404
elif http_method == "PUT":
if slash_at_end:
if self.db.put_dir(path):
ret = "PUT: dirctory %s is created." % full_path
status_code = 201
else:
ret = "PUT: directory %s is failed." % full_path
status_code = 403
else:
if self.db.put_file(path, content):
ret = "PUT: file %s is created." % full_path
status_code = 201
else:
ret = "PUT: file %s is failed." % full_path
status_code = 403
elif http_method == "UPDATE":
if type is self.db.FILE:
if self.db.update_file(path, content):
ret = "UPDATE: file %s is updated." % full_path
status_code = 200
else:
ret = "UPDATE: file %s is failed." % full_path
status_code = 403
else:
ret = "UPDATE: %s is not a file." % full_path
status_code = 403
elif http_method == "DELETE":
if type is self.db.DIR:
if self.db.delete_dir(path):
ret = "DELETE: directory %s is deleted." % full_path
status_code = 200
else:
ret = "DELETE: directory %s is failed." % full_path
status_code = 403
elif type is self.db.FILE:
if self.db.delete_file(path):
ret = "DELETE: file %s is deleted." % full_path
status_code = 200
else:
ret = "DELETE: file %s is failed." % full_path
status_code = 403
elif type is self.db.NOT_EXIST:
ret = "DELETE: file %s is not found." % full_path
status_code = 404
else:
ret = "DELETE: type %d of path %s is not supported." % (type, full_path)
status_code = 501
""" TODO:
elif http_method == "POST":
if type is self.db.DIR:
ret = self.db.post_dir(path, content)
else:
ret = self.db.EXISTED
"""
else:
status_code = 400
ret = "The HTTP method %s is not supported." % http_method
return (status_code, ret)
| 3.109375 | 3 |
ADoCSM/Automated_Documentation/UML_Diagram/Internal_Block_Diagram_Interface.py | ErnestoVivas/X-HD | 2 | 12763006 | <reponame>ErnestoVivas/X-HD
from UML_Diagram import General
from UML_Diagram import Object_Oriented_Relations
from UML_Diagram import ClassDiagram
from UML_Diagram import UML_Interface
import os
import shutil
import numpy as np
ClassConverter = General.Class_Converter()
Realtion = Object_Oriented_Relations.Relation()
DataCheck = General.DataCheck
ClassDiagram = ClassDiagram.ClassDiagram()
UML_Interface = UML_Interface.UMLClassDiagram_MainInterface()
### Visualise a Internal_Block_Diagram
class Internal_Block_Diagram_Interface():
def Internal_Block_Diagram(self,filename_input,output_path,AixLib_path,HierarchyLevel,parameter,variables,primitivevariables,complexvariables,methode,block,showPackages,Relation,showconstant,showType):
fileList = os.listdir(output_path)
Package = []
for f in fileList:
f=output_path+"\\"+f
if os.path.isfile(f):
os.remove(f)
DataCheck.AixLibPath_exist(AixLib_path)
DataCheck.filename_exist(filename_input)
RelationModel = filename_input.split("\\")
RelationModel = RelationModel[len(RelationModel)-1].replace(".mo","")
#RelationModel = "MassFlowRateChoke"
filename_input = ClassConverter.Model_Converter(filename_input,output_path,RelationModel,AixLib_path)
#Instanz.getRelationExample(filename_input, AixLib_path)
Instanz.put_full_Package(filename_input,output_path,parameter,variables,primitivevariables,complexvariables,methode,Package)
for z in Realtion.setConnectinModel(filename_input, AixLib_path):
ClassDiagram.insert_line(filename_input,output_path,ClassDiagram.number_of_lines(filename_input,output_path)-1,"\n"+z+"\n" )
classname = Internal_Block_Diagram_Interface.packagename(self, filename_input)
if Instanz.packagename(filename_input)!=None:
ClassDiagram.insert_line(filename_input,output_path, 0, "@startuml")
ClassDiagram.insert_line(filename_input,output_path,ClassDiagram.number_of_lines(filename_input,output_path),"\n"+"}"+ "\n"+ "@enduml")
ClassDiagram.insert_line(filename_input,output_path, 1,"\n"+"\n"+"\n"+"\n"+ classname+"\n")
for i in Realtion.get_Relation(filename_input,AixLib_path,block):
i= i.split(",")
filename_input = i[0]
RelationModel = i[1]
filename_input = ClassConverter.Model_Converter(filename_input,output_path,RelationModel,AixLib_path) #Formats the file and outputs the file
DataCheck.filename_exist(filename_input)
ClassDiagram.put_full_class(filename_input,output_path,parameter,variables,primitivevariables,complexvariables,methode,Package,showPackages,Relation,showconstant,showType)
DataCheck.Appender(output_path, finalData)
"""ClassDiagram.put_full_class(filename_input,output_path,parameter,variables,primitivevariables,complexvariables,methode,Package,showPackages,Relation,showconstant,showType)
for z in Realtion.setConnectinModel(filename_input, AixLib_path):
ClassDiagram.insert_line(filename_input,output_path,ClassDiagram.number_of_lines(filename_input,output_path)-1,"\n"+z+"\n" )
for i in Realtion.get_Relation(filename_input,AixLib_path,block):
Package = []
i= i.split(",")
if len(i)==3:
filename_input = i[0]
RelationModel = i[1]
Package.append(i[2])
else:
filename_input = i[0]
RelationModel = i[1]
DataCheck.filename_exist(filename_input)
filename_input = ClassConverter.Model_Converter(filename_input,output_path,RelationModel,AixLib_path)
T = ClassDiagram.put_full_class(filename_input,output_path,parameter,variables,primitivevariables,complexvariables,methode,Package,showPackages,Relation,showconstant,showType)
continue"""
def packagename(self,filename_input):
self.readfile_in = open(filename_input,'r')
stereotyp=[]
for line in self.readfile_in.readlines():
x = line.split()
x_array = np.asarray(x)
if len(x_array)>0:
if x_array[0] == 'function':
stereotyp.append(x_array)
classname = " package "+stereotyp[0][1] + " << " +stereotyp[0][0] + " >> {"
return classname
if x_array[0] == 'partial' and x_array[1]=="connector":
stereotyp.append(x_array)
classname= " package "+stereotyp[0][2] + " << " +stereotyp[0][0] +" "+ stereotyp[0][1]+ " >> {"
return classname
if x_array[0] == 'partial':
stereotyp.append(x_array)
classname= " package "+stereotyp[0][2] + " << " +stereotyp[0][0]+ " >> {"
return classname
if x_array[0] == "model":
stereotyp.append(x_array)
classname = "package "+stereotyp[0][1] + " << " +stereotyp[0][0]+ " >> {"
return classname
if x_array[0] == "block":
stereotyp.append(x_array)
classname = "package "+stereotyp[0][1] + " << " +stereotyp[0][0]+ " >> {"
return classname
if x_array[0] == "connector":
stereotyp.append(x_array)
classname = "package "+stereotyp[0][1] + " << " +stereotyp[0][0]+ " >> {"
return classname
if x_array[0] == "type":
stereotyp.append(x_array)
classname = "package "+stereotyp[0][1] + " << " +stereotyp[0][0]+ " >> {"
return classname
if x_array[0] == "record":
stereotyp.append(x_array)
classname = "package"+stereotyp[0][1] + " << " +stereotyp[0][0]+ " >> {"
return classname
def put_full_Package(self,filename_input,output_path,parameter,variables,primitivevariables,complexvariables,methode,Package):
filename_output = DataCheck.setUMLModell(filename_input, output_path)
ClassDiagram.set_Attribute_public(filename_input,filename_output, output_path,parameter,variables,primitivevariables,complexvariables,Relation,showconstant)
ClassDiagram.set_Attribute_protected(filename_input, filename_output,output_path,parameter,variables,primitivevariables,complexvariables,showconstant)
ClassDiagram.set_initialMethode(filename_input,filename_output,output_path,parameter,variables,primitivevariables,complexvariables,methode)
ClassDiagram.set_Methode(filename_input, filename_output,output_path,parameter,variables,primitivevariables,complexvariables,methode)
return filename_input
def getRelationExample(self, filename_input, AixLib_path):
self.ListLibrary = DataCheck.allModelicaLibrary(AixLib_path)
self.filename_input = filename_input
readfile_in = open(self.filename_input, 'r+')
ListRelation = []
ModelList = []
for z in Realtion.setConnectinModel(filename_input, AixLib_path):
w = z.split('"')
x = z.split(':')
x = x[0].split('"')
ModelList.append(w[0])
ModelList.append(x[len(x)-1])
mainModel = filename_input
mainModel = mainModel.split("\\")
mainModel = mainModel[len(mainModel) - 1].split(".")
OriginalLibrary = ""
count = 0
for w in mainModel[0:3]:
count = count + 1
if count == 3:
OriginalLibrary = OriginalLibrary + w
else:
OriginalLibrary = OriginalLibrary + w + "\\"
for z in ModelList:
z = z.lstrip()
RelationModel = z
relation_directory = DataCheck.set_relationmodel_path(AixLib_path, RelationModel, OriginalLibrary)
def setdirection(self,finalData):
readfile_in = open(finalData,"r+")
for line in readfile_in.readflines():
print(line)
Instanz = Internal_Block_Diagram_Interface()
if __name__ == "__main__":
filename_input= r"C:\Users\hinack\Dropbox\09_Modelica_Library\AixLib\Fluid\Actuators\Valves\ExpansionValves\Examples\MassFlowRateChoke.mo"
AixLib_path=r"C:\Users\hinack\Dropbox\09_Modelica_Library"
output_path = r"C:\Users\hinack\Dropbox\08_Eclipse_Workspace_Automated_Documentation\Automated_Documentation\UML_Diagram\Java_Klassen"
Model = [filename_input]
finalData = r"C:\Users\hinack\Dropbox\08_Eclipse_Workspace_Automated_Documentation\Automated_Documentation\UML_Diagram\Java_Klassen\Gesamt\Ventil.java"
parameter = True
variables = False
Relation = False
Medium = False
primitivevariables= False
methode = False
complexvariables = False
HierarchyLevel = 10
block = True
showPackages = True
showconstant = False
showType = True
showConnect = True
Instanz.Internal_Block_Diagram(filename_input, output_path, AixLib_path, HierarchyLevel, parameter, variables, primitivevariables, complexvariables, methode, block, showPackages, Relation, showconstant, showType)
UML_Interface.test(finalData,0,"@startuml{")
UML_Interface.test(finalData,UML_Interface.test2(finalData), " \n @enduml")
print("Conversion Succsesfull!")
| 2.4375 | 2 |
linode_core.py | pathbreak/linodecommon | 0 | 12763007 | import time
import Queue
import threading
import collections
import traceback
import linode_api as lin
import image_manager
from passwordgen import pattern
import simplejson as json
import yaml
import logger
from exc import CreationError
class Core(object):
def __init__(self, app_ctx):
assert app_ctx
self.app_ctx = app_ctx
def create_linode(self, linode_spec, boot = True, delete_on_error = True):
''' Create a linode.
Args:
linode_spec : A `dict` with all details required to create a linode. For example:
{
'plan_id' : 1,
'datacenter' : 9,
'distribution' : 'CentOS 7',
or
'image' : 'image_label',
'kernel' : 'Latest 64 bit',
'label' : 'myserver',
'group' : 'mycluster',
'disks' : {
'boot' : {
'disk_size' : 5000
},
'swap' : {
'disk_size' : 'auto'
},
'others' : [
{
'label' : 'mydata',
'disk_size' : 18000,
'type' : 'ext4'
}
]
}
}
linode_spec['disks']['others'] is optional.
linode_spec['disks']['others'][...]['type'] should be [ext4 | ext3 | swap | raw]
Returns:
A Linode object.
'''
assert any( [linode_spec.get('distribution'), linode_spec.get('image')] )
logger.msg("Create node")
linode = Linode()
linode.inited = False
linode_id = None
try:
success, linode_id, errors = lin.create_node(linode_spec['plan_id'], linode_spec['datacenter'])
linode.created = success
linode.id = linode_id
if not success:
logger.error_msg("Create node failed." + errors)
raise CreationError()
logger.msg("Created node %d" % (linode.id))
logger.msg("Update node label")
label = linode_spec['label']
if '{linode_id}' in label:
label = label.replace('{linode_id}', str(linode_id))
success, linode_id, errors = lin.update_node(linode_id, label, linode_spec['group'])
if not success:
logger.warning_msg("Update node failed but continuing." + errors)
# If update node fails, don't abort because it's not a critical failure.
# Linode requires passwords to have atleast 2 of these 4 classes - lowercase, uppercase, numbers, digits.
# See https://github.com/nkrim/passwordgen for understanding the pattern.
# TODO Use Vault here
root_password = pattern.Pattern('%{cwds+^}[64]').generate()
root_ssh_key_file = '/home/karthik/.ssh/id_rsa.pub'
jobs = []
disks = []
distro = linode_spec.get('distribution')
image_label = linode_spec.get('image')
if image_label:
logger.msg("Create boot disk from image '%s'" % (image_label))
img_mgr = image_manager.ImageManager(self.app_ctx)
disk_spec = {
'linode_id' : linode_id,
'label' : 'boot',
'disk_size' : linode_spec['disks']['boot']['disk_size'],
'root_password' : <PASSWORD>,
'root_ssh_key_file' : root_ssh_key_file
}
success, disk_details, errors = img_mgr.create_disk_from_image(image_label, disk_spec)
if not success:
logger.error_msg("Create disk from image failed." + errors)
raise CreationError()
assert disk_details['disk_id']
disk_id = disk_details['disk_id']
elif distro:
logger.msg("Create boot disk from distribution")
success, disk_id, disk_job_id, errors = lin.create_disk_from_distribution(linode_id,
linode_spec['distribution'], linode_spec['disks']['boot']['disk_size'],
root_password, root_ssh_key_file)
if not success:
logger.error_msg("Create disk from distribution failed." + errors)
raise CreationError()
jobs.append( (linode_id, disk_job_id) )
print("Creating boot disk %d, job %d" % (disk_id, disk_job_id))
linode.boot_disk_id = disk_id
disks.append(disk_id)
swap_disk = linode_spec['disks'].get('swap')
if swap_disk is not None:
logger.msg("Create swap disk")
swap_disk_size_mb = swap_disk['disk_size']
if str(swap_disk_size_mb) == 'auto':
swap_disk_size_mb = None
success, swap_disk_id, swap_job_id, errors = lin.create_swap_disk(linode_id, swap_disk_size_mb)
if not success:
logger.error_msg("Create swap disk failed." + errors)
raise CreationError()
jobs.append( (linode_id, swap_job_id) )
disks.append(swap_disk_id)
other_disks = linode_spec['disks'].get('others')
if other_disks is not None:
logger.msg('Create additional disks')
for other_disk in other_disks:
# disk type should be one of 'ext4|ext3|swap|raw'. If
# it's a different filesystem, leave it as raw and create
# filesystem during provisioning.
disk_type = other_disk['type']
if disk_type not in ['ext4', 'ext3', 'swap']:
disk_type = 'raw'
success, other_disk_id, other_disk_job_id, errors = lin.create_disk(
linode_id,
disk_type,
other_disk['disk_size'],
other_disk['label'])
if not success:
logger.error_msg('other disk creation failed.' + errors)
raise CreationError()
logger.msg('Created additional disk:%d' % (other_disk_id))
jobs.append( (linode_id, other_disk_job_id) )
disks.append(other_disk_id)
results = self.wait_for_jobs(jobs)
for r in results:
if not r['success']:
logger.error_msg("Job failed. Aborting")
print(r)
raise CreationError()
print("Create configuration")
success, config_id, errors = lin.create_config(linode_id, linode_spec['kernel'],
disks, 'testconfig')
if not success:
logger.error_msg('Configuration failed.' + errors)
raise CreationError()
print("Configure private IP")
success, linode.private_ip = lin.add_private_ip(linode_id)
if not success:
print("Private IP failed")
raise CreationError()
print('Private IP: %s' % (linode.private_ip))
linode.public_ip = [lin.get_public_ip_address(linode_id)]
print('Public IP: %s' % (linode.public_ip))
logger.success_msg('Linode Created')
if boot:
print("Booting")
success, boot_job_id, errors = lin.boot_node(linode_id, config_id)
if not success:
logger.error_msg('Booting failed.' + errors)
raise CreationError()
finished, success = self.wait_for_job(linode_id, boot_job_id)
if not success:
logger.error_msg('Booting failed')
raise CreationError()
logger.success_msg('Linode Booted')
linode.inited = True
return linode
except Exception as e:
if delete_on_error:
# Delete the temporarily created linode.
logger.error_msg('Deleting node due to error:%s\n%s' % (e, traceback.format_exc()))
deleted, _, errors = lin.delete_node(linode_id, True)
if not deleted:
logger.warn_msg('Warning: Unable to delete node. Please delete from Linode Manager.' + errors)
return None
def wait_for_jobs(self, linodes_jobs):
# Multithreaded wait for jobs
# linodes_jobs is a list of (linode_id, job_id) tuples
#
# Returns an iterable with (linode_id, job_id, finished, success) tuples
def job_waiter(q, results):
linode_id, job_id = q.get()
finished, success = self.wait_for_job(linode_id, job_id)
results.append( {
'linode_id' : linode_id,
'job_id' : job_id,
'finished' : finished,
'success' : success} )
q.task_done()
q = Queue.Queue()
results = collections.deque()
threads = []
for linode_id, job_id in linodes_jobs:
t = threading.Thread( target = job_waiter, args = (q, results) )
t.start()
threads.append(t)
for item in linodes_jobs:
q.put(item)
# block until all tasks are done
q.join()
# stop workers
for t in threads:
t.join()
return results
def wait_for_job(self, linode_id, job_id):
timeout = 240 # 4 minutes
poll_interval = 5 # seconds
poll_count = timeout / poll_interval
for i in range(poll_count):
time.sleep(poll_interval)
finished, success = lin.is_job_finished(linode_id, job_id)
if finished is None:
logger.error_msg('No such job %d for linode %d' % (job_id, linode_id))
break
if finished is True:
logger.msg('Finished job %d for linode %d' % (job_id, linode_id))
break
return finished, success
class Linode(object):
def __init__(self):
pass
| 2.21875 | 2 |
sqllineage/core.py | surajpotnuru/sqllineage | 0 | 12763008 | <gh_stars>0
import re
from typing import Set, TYPE_CHECKING, Tuple
from sqlparse.sql import (
Comment,
Comparison,
Function,
Identifier,
IdentifierList,
Parenthesis,
Statement,
TokenList,
)
from sqlparse.tokens import Number
from sqllineage.exceptions import SQLLineageException
from sqllineage.models import Table
SOURCE_TABLE_TOKENS = (
r"FROM",
# inspired by https://github.com/andialbrecht/sqlparse/blob/master/sqlparse/keywords.py
r"((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN",
)
TARGET_TABLE_TOKENS = ("INTO", "OVERWRITE", "TABLE", "VIEW", "UPDATE")
TEMP_TABLE_TOKENS = ("WITH",)
class LineageResult:
"""
Statement Level Lineage Result.
LineageResult will hold attributes like read, write, rename, drop, intermediate.
Each of them is a Set[:class:`sqllineage.models.Table`] except for rename.
For rename, it a Set[Tuple[:class:`sqllineage.models.Table`, :class:`sqllineage.models.Table`]], with the first
table being original table before renaming and the latter after renaming.
This is the most atomic representation of lineage result.
"""
__slots__ = ["read", "write", "rename", "drop", "intermediate"]
if TYPE_CHECKING:
read = write = drop = intermediate = set() # type: Set[Table]
rename = set() # type: Set[Tuple[Table, Table]]
def __init__(self) -> None:
for attr in self.__slots__:
setattr(self, attr, set())
def __add__(self, other):
lineage_result = LineageResult()
for attr in self.__slots__:
setattr(
lineage_result, attr, getattr(self, attr).union(getattr(other, attr))
)
return lineage_result
def __str__(self):
return "\n".join(
f"table {attr}: {sorted(getattr(self, attr), key=lambda x: str(x)) if getattr(self, attr) else '[]'}"
for attr in self.__slots__
)
def __repr__(self):
return str(self)
class LineageAnalyzer:
"""SQL Statement Level Lineage Analyzer."""
def __init__(self) -> None:
self._lineage_result = LineageResult()
def analyze(self, stmt: Statement) -> LineageResult:
"""
to analyze the Statement and store the result into :class:`LineageResult`.
:param stmt: a SQL statement parsed by `sqlparse`
"""
if stmt.get_type() == "DROP":
self._extract_from_ddl_drop(stmt)
elif stmt.get_type() == "ALTER":
self._extract_from_ddl_alter(stmt)
elif (
stmt.get_type() == "DELETE"
or stmt.token_first(skip_cm=True).normalized == "TRUNCATE"
or stmt.token_first(skip_cm=True).normalized.upper() == "REFRESH"
or stmt.token_first(skip_cm=True).normalized == "CACHE"
or stmt.token_first(skip_cm=True).normalized.upper() == "UNCACHE"
):
pass
else:
# DML parsing logic also applies to CREATE DDL
self._extract_from_dml(stmt)
return self._lineage_result
def _extract_from_ddl_drop(self, stmt: Statement) -> None:
for table in {
Table.create(t) for t in stmt.tokens if isinstance(t, Identifier)
}:
self._lineage_result.drop.add(table)
def _extract_from_ddl_alter(self, stmt: Statement) -> None:
tables = [Table.create(t) for t in stmt.tokens if isinstance(t, Identifier)]
keywords = [t for t in stmt.tokens if t.is_keyword]
if any(k.normalized == "RENAME" for k in keywords) and len(tables) == 2:
self._lineage_result.rename.add((tables[0], tables[1]))
def _extract_from_dml(self, token: TokenList) -> None:
source_table_token_flag = False
target_table_token_flag = False
temp_table_token_flag = False
for sub_token in token.tokens:
if self.__token_negligible_before_tablename(sub_token):
continue
if isinstance(sub_token, TokenList):
self._extract_from_dml(sub_token)
if sub_token.is_keyword:
if any(
re.match(regex, sub_token.normalized)
for regex in SOURCE_TABLE_TOKENS
) and not isinstance(sub_token.parent.parent, Function):
# SELECT trim(BOTH ' ' FROM ' abc '); Here FROM is not a source table flag
source_table_token_flag = True
elif sub_token.normalized in TARGET_TABLE_TOKENS:
target_table_token_flag = True
elif sub_token.normalized in TEMP_TABLE_TOKENS:
temp_table_token_flag = True
continue
if source_table_token_flag:
self._handle_source_table_token(sub_token)
source_table_token_flag = False
elif target_table_token_flag:
self._handle_target_table_token(sub_token)
target_table_token_flag = False
elif temp_table_token_flag:
self._handle_temp_table_token(sub_token)
temp_table_token_flag = False
def _handle_source_table_token(self, sub_token: TokenList) -> None:
if isinstance(sub_token, Identifier):
if isinstance(sub_token.token_first(skip_cm=True), Parenthesis):
# SELECT col1 FROM (SELECT col2 FROM tab1) dt, the subquery will be parsed as Identifier
# and this Identifier's get_real_name method would return alias name dt
# referring https://github.com/andialbrecht/sqlparse/issues/218 for further information
pass
else:
self._lineage_result.read.add(Table.create(sub_token))
elif isinstance(sub_token, IdentifierList):
# This is to support join in ANSI-89 syntax
for token in sub_token.tokens:
# when real name and alias name are the same, it means subquery here
if (
isinstance(token, Identifier)
and token.get_real_name() != token.get_alias()
):
self._lineage_result.read.add(Table.create(token))
elif isinstance(sub_token, Parenthesis):
# SELECT col1 FROM (SELECT col2 FROM tab1), the subquery will be parsed as Parenthesis
# This syntax without alias for subquery is invalid in MySQL, while valid for SparkSQL
pass
else:
raise SQLLineageException(
"An Identifier is expected, got %s[value: %s] instead"
% (type(sub_token).__name__, sub_token)
)
def _handle_target_table_token(self, sub_token: TokenList) -> None:
if isinstance(sub_token, Function):
# insert into tab (col1, col2) values (val1, val2); Here tab (col1, col2) will be parsed as Function
# referring https://github.com/andialbrecht/sqlparse/issues/483 for further information
if not isinstance(sub_token.token_first(skip_cm=True), Identifier):
raise SQLLineageException(
"An Identifier is expected, got %s[value: %s] instead"
% (type(sub_token).__name__, sub_token)
)
self._lineage_result.write.add(
Table.create(sub_token.token_first(skip_cm=True))
)
elif isinstance(sub_token, Comparison):
# create table tab1 like tab2, tab1 like tab2 will be parsed as Comparison
# referring https://github.com/andialbrecht/sqlparse/issues/543 for further information
if not (
isinstance(sub_token.left, Identifier)
and isinstance(sub_token.right, Identifier)
):
raise SQLLineageException(
"An Identifier is expected, got %s[value: %s] instead"
% (type(sub_token).__name__, sub_token)
)
self._lineage_result.write.add(Table.create(sub_token.left))
self._lineage_result.read.add(Table.create(sub_token.right))
else:
if not isinstance(sub_token, Identifier):
raise SQLLineageException(
"An Identifier is expected, got %s[value: %s] instead"
% (type(sub_token).__name__, sub_token)
)
if sub_token.token_first(skip_cm=True).ttype is Number.Integer:
# Special Handling for Spark Bucket Table DDL
pass
else:
self._lineage_result.write.add(Table.create(sub_token))
def _handle_temp_table_token(self, sub_token: TokenList) -> None:
if isinstance(sub_token, Identifier):
self._lineage_result.intermediate.add(Table.create(sub_token))
self._extract_from_dml(sub_token)
elif isinstance(sub_token, IdentifierList):
for temp_tab_token in sub_token:
if isinstance(temp_tab_token, Identifier):
self._lineage_result.intermediate.add(Table.create(temp_tab_token))
self._extract_from_dml(temp_tab_token)
else:
raise SQLLineageException(
"An Identifier or IdentifierList is expected, got %s[value: %s] instead"
% (type(sub_token).__name__, sub_token)
)
@classmethod
def __token_negligible_before_tablename(cls, token: TokenList) -> bool:
return token.is_whitespace or isinstance(token, Comment)
| 2.5 | 2 |
python/PythonForNetworkEngineers/Examples/example1.py | ModestTG/scripts | 0 | 12763009 | # allows print() funtion and bytestrings to work in python2
from __future__ import print_function, unicode_literals
my_str = 'whatever'
ip_addr1 = '8.8.8.8'
print(ip_addr1)
try:
ip_addr2 = raw_input("Enter an IP Address: ") # python2 input function
except NameError:
ip_addr2 = input("Enter an IP Address: ") # python3 input funtion
print(ip_addr2)
| 3.96875 | 4 |
notebook.py | ivddorrka/notebook | 0 | 12763010 | """
Notebook
"""
import datetime
last_id = 0
class Note:
def __init__(self, memo, tags=''):
self.memo = memo
self.tags = tags
self.creation_date = datetime.date.today()
global last_id
last_id += 1
self.id = last_id
def match(self, filter):
return filter in self.memo or filter in self.tags
# n1 = Note("hello")
# print(n1.id)
# print(n1.match('Hello'))
class Notebook:
def __init__(self):
self.notes = []
def new_note(self, memo, tags=''):
self.notes.append(Note(memo, tags))
# def modify_memo(self, note_id, memo):
# for note in self.notes:
# if note.id == note_id:
# note.memo = memo
# break
def modify_tags(self, note_id, tags):
for note in self.notes:
if note.id == note_id:
note.tags = tags
break
def search(self, filter):
return [note for note in self.notes if note.match(filter)]
def _find_note(self, note_id):
for note in self.notes:
if str(note.id) == str(note_id):
return note
return None
def modify_memo(self, note_id, memo):
note = self._find_note(note_id)
if note:
note.memo = memo
return True
return False
| 3.421875 | 3 |
manager_core/tests/test_program_states.py | Junt0/CurbTheScreen | 2 | 12763011 | <reponame>Junt0/CurbTheScreen
from unittest.mock import patch
import pytest
from manager_core.CurbTheScreen import TrackedProgram, Program, ProgramStates
@pytest.fixture
def states_fixture_full(states_fixture_empty, program_class_fixture):
ps = states_fixture_empty
program_arr = []
for program in program_class_fixture:
pg = Program(program)
program_arr.append(pg)
ps.program_objs = program_arr
return ps
def test_init_program_objs_no_save(program_class_fixture):
# Tests that the program objs are loaded in from the settings file directly if there is no save
with patch('manager_core.CurbTheScreen.ProgramStates.tracked_from_settings') as patched_settings:
patched_settings.return_value = program_class_fixture
with patch('manager_core.CurbTheScreen.DataManager.get_latest_save') as patched_save:
patched_save.return_value = None
ps = ProgramStates()
assert ps.program_objs[0] == program_class_fixture[0]
assert ps.program_objs[1] == program_class_fixture[1]
assert ps.program_objs[2] == program_class_fixture[2]
@pytest.mark.parametrize("time_left, expected_left, expected_block", [
(90, 90, False),
(0, 0, True)
])
def test_init_program_objs_with_save_no_db(time_left, expected_left, expected_block):
# Tests if there is a saved program and it updates only the time left and blocked status
test1 = TrackedProgram.min_init("test1", 100)
test1_saved = TrackedProgram("test1", 100, 50, 60, time_left)
with patch('manager_core.CurbTheScreen.ProgramStates.tracked_from_settings') as tracked:
tracked.return_value = [test1]
with patch('manager_core.CurbTheScreen.TrackedProgram.has_save_today') as has_save:
has_save.return_value = True
with patch('manager_core.CurbTheScreen.DataManager.get_latest_save') as latest_save:
latest_save.return_value = test1_saved
ps = ProgramStates()
assert ps.program_objs[0].time_left == expected_left
assert ps.program_objs[0].blocked == expected_block
def test_update_elapsed(program_class_fixture, states_fixture_empty):
# Tests that for all programs that are in currently running the elapsed time is increased when method is called
test1, test2, test3 = program_class_fixture
ps = states_fixture_empty
ps.currently_running = program_class_fixture
with patch('manager_core.CurbTheScreen.ProgramStates.get_loop_time') as loop_time:
loop_time.return_value = 1
ps.update_elapsed()
assert test1.elapsed_time == 1
assert test2.elapsed_time == 1
assert test3.elapsed_time == 1
def test_reset(program_class_fixture):
# Tests that the PIDS for all program objects are reset
with patch('manager_core.CurbTheScreen.ProgramStates.tracked_from_settings') as patched:
patched.return_value = program_class_fixture
with patch('manager_core.CurbTheScreen.TrackedProgram.has_save_today') as has_save:
has_save.return_value = False
ps = ProgramStates()
for program in ps.program_objs:
program.add_pid(1)
assert program.PIDS == [1]
ps.reset()
for program in ps.program_objs:
assert program.PIDS == []
def test_populate_program_pids(states_fixture_full):
ps = states_fixture_full
with patch('manager_core.CurbTheScreen.psutil.process_iter') as mocked_process_iter:
with patch('manager_core.CurbTheScreen.psutil.Process') as mocked_process:
temp_program = TrackedProgram.min_init("test1", 100)
# Tests when pids are added that match the name of the program
mocked_process.info = {"name": 'test1', "pid": 1}
mocked_process_iter.return_value = iter([mocked_process])
ps.populate_program_pids()
mocked_process_iter.assert_called_with(attrs=["name", "pid"])
assert [1] == ps.get_program(temp_program).PIDS
mocked_process.info = {"name": 'test1', "pid": 2}
mocked_process_iter.return_value = iter([mocked_process])
ps.populate_program_pids()
assert [1, 2] == ps.get_program(temp_program).PIDS
# Tests that the program is retrieved when it contains the name of the program
mocked_process.info = {"name": 'Containstest1', "pid": 3}
mocked_process_iter.return_value = iter([mocked_process])
ps.populate_program_pids()
assert [1, 2, 3] == ps.get_program(temp_program).PIDS
# Tests when the name is not contained
# Tests that the program is retrieved when it contains the name of the program
mocked_process.info = {"name": 'test42', "pid": 10}
mocked_process_iter.return_value = iter([mocked_process])
ps.populate_program_pids()
assert [1, 2, 3] == ps.get_program(temp_program).PIDS
def test_add_to_running(states_fixture_full):
ps = states_fixture_full
test1 = ps.program_objs[0]
test2 = ps.program_objs[1]
test1.PIDS = [1, 2]
test2.PIDS = [3, 4]
assert ps.currently_running == []
ps.add_to_running()
assert ps.currently_running == [test1, test2]
| 2.265625 | 2 |
dcs/bin/scripts/parse_dcs_site.py | anoopsharma00/incubator-trafodion | 0 | 12763012 | #
# @@@ START COPYRIGHT @@@
#
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
#
# @@@ END COPYRIGHT @@@
#
#
import os
from xml.dom import minidom
dcsconfig_dir = os.environ.get('DCS_CONF_DIR')
if not dcsconfig_dir:
name = os.environ.get('DCS_INSTALL_DIR')
dcsconfig_dir=name+"/conf"
doc = minidom.parse(dcsconfig_dir+"/dcs-site.xml")
props = doc.getElementsByTagName("property")
for prop in props:
pname = prop.getElementsByTagName("name")[0]
if (pname.firstChild.data == "dcs.master.port"):
pvalue = prop.getElementsByTagName("value")[0]
dcsPort=pvalue.firstChild.data
print("%s" % (dcsPort))
if (pname.firstChild.data == "dcs.master.floating.ip.external.ip.address"):
pvalue = prop.getElementsByTagName("value")[0]
float_ipaddress=pvalue.firstChild.data
print("%s" % (float_ipaddress))
if (pname.firstChild.data == "dcs.master.floating.ip.external.interface"):
pvalue = prop.getElementsByTagName("value")[0]
float_interface=pvalue.firstChild.data
print("%s" % (float_interface))
| 1.867188 | 2 |
examples/add_reference.py | jaideep-seth/PyOpenWorm | 1 | 12763013 | """
How to reference supporting evidence for some object in the database.
See: "Metadata in PyOpenWorm" for discussion on semantics of what giving
evidence for an object means.
"""
from __future__ import absolute_import
from __future__ import print_function
import PyOpenWorm as P
from PyOpenWorm.evidence import Evidence
from PyOpenWorm.neuron import Neuron
from PyOpenWorm.document import Document
from PyOpenWorm.data import Data
from PyOpenWorm.context import Context
# Create dummy database configuration.
d = Data()
# Connect to database with dummy configuration
conn = P.connect(conf=d)
ctx = Context(ident='http://example.org/data', conf=conn.conf)
evctx = Context(ident='http://example.org/meta', conf=conn.conf)
# Create a new Neuron object to work with
n = ctx(Neuron)(name='AVAL')
# Create a new Evidence object with `doi` and `pmid` fields populated.
# See `PyOpenWorm/evidence.py` for other available fields.
d = evctx(Document)(key='Anonymous2011', doi='125.41.3/ploscompbiol', pmid='12345678')
e = evctx(Evidence)(key='Anonymous2011', reference=d)
# Evidence object asserts something about the enclosed dataObject.
# Here we add a receptor to the Neuron we made earlier, and "assert it".
# As the discussion (see top) reads, this might be asserting the existence of
# receptor UNC-8 on neuron AVAL.
n.receptor('UNC-8')
e.supports(ctx.rdf_object)
# Save the Neuron and Evidence objects to the database.
ctx.save_context()
evctx.save_context()
# What does my evidence object contain?
for e_i in evctx.stored(Evidence)().load():
print(e_i.reference(), e_i.supports())
# Disconnect from the database.
P.disconnect(conn)
| 2.640625 | 3 |
psyrun/jobs.py | jgosmann/psyrun | 2 | 12763014 | <gh_stars>1-10
"""Handling and processing of job trees."""
import itertools
import os
import os.path
import warnings
from psyrun.exceptions import JobsRunningWarning
from psyrun.utils.doc import inherit_docs
class Job(object):
"""Describes a single processing job.
Parameters
----------
name : str
Name of the job.
submit_fn : function
Function to use to submit the job for processing.
submit_kwargs : dict
Additional Keyword arguments to submit function (in addition to *name*
and *depends_on*).
dependencies : sequence
Identifiers of other jobs that need to finish first before this job
can be run.
targets : sequence of str
Files created by this job.
Attributes
----------
name : str
Name of the job.
submit_fn : function
Function to use to submit the job for processing.
code : str
Python code to execute.
dependencies : sequence
Identifiers of other jobs that need to finish first before this job
can be run.
targets : sequence of str
Files created by this job.
"""
def __init__(self, name, submit_fn, submit_kwargs, dependencies, targets):
self.name = name
self.submit_fn = submit_fn
self.submit_kwargs = submit_kwargs
self.dependencies = dependencies
self.targets = targets
class JobArray(object):
def __init__(
self, n, name, submit_fn, single_submit_fn, submit_kwargs,
dependency_patterns, target_patterns):
self.n = n
self.name = name
self.submit_fn = submit_fn
self.single_submit_fn = single_submit_fn
self.submit_kwargs = submit_kwargs
self.dependency_patterns = dependency_patterns
self.target_patterns = target_patterns
self.jobs = []
for i in range(self.n):
dependencies = [
d.replace('%a', str(i)) for d in self.dependency_patterns]
targets = [t.replace('%a', str(i)) for t in self.target_patterns]
submit_kwargs = dict(self.submit_kwargs)
submit_kwargs['args'] = [
a.replace('%a', str(i)) for a in self.submit_kwargs['args']]
self.jobs.append(Job(
str(i), self.single_submit_fn, submit_kwargs, dependencies,
targets))
@property
def dependencies(self):
for i in range(self.n):
for d in self.dependency_patterns:
yield d.replace('%a', str(i))
@property
def targets(self):
for i in range(self.n):
for t in self.target_patterns:
yield t.replace('%a', str(i))
class JobChain(object):
"""Chain of jobs to run in succession.
Parameters
----------
name : str
Name of the job chain.
jobs : sequence of Job
Jobs to run in succession.
Attributes
----------
name : str
Name of the job chain.
jobs : sequence of Job
Jobs to run in succession.
dependencies : sequence
Jobs that need to run first before the job chain can be run (equivalent
to the dependencies of the first job in the chain).
targets : sequence of str
Files created or updated by the job chain (equivalent to the targets
of the last job in the chain).
"""
def __init__(self, name, jobs):
self.name = name
self.jobs = jobs
@property
def dependencies(self):
return self.jobs[0].dependencies
@property
def targets(self):
return self.jobs[-1].targets
class JobGroup(object):
"""Group of jobs that can run in parallel.
Parameters
----------
name : str
Name of the job group.
jobs : sequence of Job
Jobs to run in the job group.
Attributes
----------
name : str
Name of the job group.
jobs : sequence of Job
Jobs to run in the job group.
dependencies : sequence
Jobs that need to run first before the job group can be run (equivalent
to the union of all the group's job's dependencies).
targets : sequence of str
Files that will be created or updated by the group's jobs (equivalent
to the union of all the group's job's targets).
"""
def __init__(self, name, jobs):
self.name = name
self.jobs = jobs
@property
def dependencies(self):
return itertools.chain(j.dependencies for j in self.jobs)
@property
def targets(self):
return itertools.chain.from_iterable(j.targets for j in self.jobs)
class JobTreeVisitor(object):
"""Abstract base class to implement visitors on trees of jobs.
Base class to implement visitors following the Visitor pattern to traverse
the tree constructed out of `Job`, `JobChain`, and `JobGroup` instances.
A deriving class should overwrite `visit_job`, `visit_chain`, and
`visit_group`. Use the `visit` method to start visiting a tree of jobs.
"""
def __init__(self):
self._dispatcher = {
Job: self.visit_job,
JobArray: self.visit_array,
JobChain: self.visit_chain,
JobGroup: self.visit_group,
}
def visit_job(self, job):
raise NotImplementedError()
def visit_array(self, job_array):
return self.visit_group(job_array)
def visit_chain(self, chain):
raise NotImplementedError()
def visit_group(self, group):
raise NotImplementedError()
def visit(self, job):
"""Visit all jobs in the tree *job*."""
return self._dispatcher[job.__class__](job)
@inherit_docs
class Submit(JobTreeVisitor):
"""Submit all jobs that are not up-to-date.
The constructor will call `visit`.
Parameters
----------
job : job tree
Tree of jobs to submit.
names : dict
Maps jobs to their names. (Can be obtained with `Fullname`.)
uptodate : dict
Maps jobs to their up-to-date status.
(Can be obtained with `Uptodate`.)
Attributes
----------
names : dict
Maps jobs to their names.
uptodate : dict
Maps jobs to their up-to-date status.
"""
def __init__(self, job, names, uptodate):
super(Submit, self).__init__()
self.names = names
self.uptodate = uptodate
self._depends_on = []
self.visit(job)
def visit_job(self, job):
if self.uptodate.status[job]:
print('-', self.names[job])
return []
else:
print('.', self.names[job])
return [job.submit_fn(
name=self.names[job], depends_on=self._depends_on,
**job.submit_kwargs)]
def visit_array(self, job):
if self.uptodate.status[job]:
print('-', self.names[job])
return []
else:
print('.', self.names[job])
try:
return [job.submit_fn(
job.n, name=self.names[job], depends_on=self._depends_on,
**job.submit_kwargs)]
except NotImplementedError:
return self.visit_group(job)
def visit_group(self, group):
return sum((self.visit(job) for job in group.jobs), [])
def visit_chain(self, chain):
old_depends_on = self._depends_on
job_ids = []
for job in chain.jobs:
ids = self.visit(job)
job_ids.extend(ids)
self._depends_on = old_depends_on + ids
self._depends_on = old_depends_on
return job_ids
@inherit_docs
class Clean(JobTreeVisitor):
"""Clean all target files and supporting files of jobs that are outdated.
The constructor will call visit.
Parameters
----------
job : job tree
Tree of jobs to clean.
task : TaskDef
Task that generated the job tree.
names : dict
Maps jobs to their names. (Can be obtained with Fullname.)
uptodate : dict, optional
Maps jobs to their up-to-date status.
(Can be obtained with Uptodate.)
If not provided, all jobs are treated as outdated.
Attributes
----------
task : TaskDef
Task that generated the job tree.
names : dict
Maps jobs to their names.
uptodate : dict
Maps jobs to their up-to-date status.
"""
def __init__(self, job, task, names, uptodate=None):
super(Clean, self).__init__()
self.task = task
self.names = names
if uptodate is None:
self.uptodate = {}
else:
self.uptodate = uptodate.status
self.visit(job)
def visit_job(self, job):
if self.uptodate.get(job, False):
return
workdir = os.path.join(self.task.workdir, self.task.name)
for item in os.listdir(workdir):
if item.startswith(self.names[job]):
os.remove(os.path.join(workdir, item))
for t in job.targets:
if os.path.exists(t):
os.remove(t)
def visit_chain(self, chain):
for job in chain.jobs:
self.visit(job)
def visit_group(self, group):
for job in group.jobs:
self.visit(job)
@inherit_docs
class Fullname(JobTreeVisitor):
"""Construct names of the jobs.
The constructor will call `visit`.
Parameters
----------
jobtree : job tree
Tree of jobs to construct names for.
Attributes
----------
names : dict
Maps jobs to their names.
"""
def __init__(self, jobtree):
super(Fullname, self).__init__()
self._prefix = ''
self.names = {}
self.visit(jobtree)
def visit_job(self, job):
self.names[job] = self._prefix + job.name
def visit_chain(self, chain):
self.visit_group(chain)
def visit_group(self, group):
self.names[group] = self._prefix + group.name
old_prefix = self._prefix
self._prefix += group.name + ':'
for job in group.jobs:
self.visit(job)
self._prefix = old_prefix
@inherit_docs
class Uptodate(JobTreeVisitor):
"""Determines the up-to-date status of jobs.
The constructor will call visit.
Parameters
----------
jobtree : job tree
Tree of jobs to determine the up-to-date status for.
names : dict
Maps jobs to their names. (Can be obtained with Fullname.)
task : TaskDef
Task that generated the job tree.
Attributes
----------
names : dict
Maps jobs to their names.
task : TaskDef
Task that generated the job tree.
status : dict
Maps jobs to their up-to-date status.
"""
def __init__(self, jobtree, names, task):
super(Uptodate, self).__init__()
self.names = names
self.task = task
self.status = {}
self._clamp = None
self.any_queued = False
self.outdated = False
self.visit(jobtree)
self.post_visit()
def post_visit(self):
"""Called after `visit`.
Checks whether jobs are still running and marks these as up-to-date
while issuing a warning.
"""
skip = False
if self.any_queued and self.outdated:
skip = True
warnings.warn(JobsRunningWarning(self.task.name))
if skip:
for k in self.status:
self.status[k] = True
def visit_job(self, job):
if self.is_job_queued(job):
self.status[job] = True
elif self._clamp is None:
tref = self._get_tref(job.dependencies)
self.status[job] = self.files_uptodate(tref, job.targets)
else:
self.status[job] = self._clamp
return self.status[job]
def visit_chain(self, chain):
if self._clamp is None:
tref = self._get_tref(chain.jobs[0].dependencies)
last_uptodate = -1
for i, job in enumerate(reversed(chain.jobs)):
if self.files_uptodate(tref, job.targets):
last_uptodate = len(chain.jobs) - i - 1
break
for i, job in enumerate(chain.jobs):
if i <= last_uptodate:
self._clamp = True
elif i == last_uptodate + 1:
self._clamp = None
else:
self._clamp = False
self.visit(job)
self.status[chain] = last_uptodate + 1 == len(chain.jobs)
self._clamp = None
else:
for job in chain.jobs:
self.visit(job)
self.status[chain] = self._clamp
return self.status[chain]
def visit_group(self, group):
subtask_status = [self.visit(j) for j in group.jobs]
self.status[group] = all(subtask_status)
return self.status[group]
def is_job_queued(self, job):
"""Checks whether *job* is queud."""
job_names = [
self.task.scheduler.get_status(j).name
for j in self.task.scheduler.get_jobs()]
is_queued = self.names[job] in job_names
self.any_queued |= is_queued
return is_queued
def files_uptodate(self, tref, targets):
"""Checks that all *targets* are newer than *tref*."""
uptodate = all(
self._is_newer_than_tref(target, tref) for target in targets)
self.outdated |= not uptodate
return uptodate
def _get_tref(self, dependencies):
tref = 0
deps = [d for d in dependencies if os.path.exists(d)]
if len(deps) > 0:
tref = max(os.stat(d).st_mtime for d in deps)
return tref
def _is_newer_than_tref(self, filename, tref):
return os.path.exists(filename) and os.stat(filename).st_mtime >= tref
| 2.890625 | 3 |
integration-tests/test_hub.py | MayeulC/the-littlest-jupyterhub | 0 | 12763015 | <filename>integration-tests/test_hub.py<gh_stars>0
import requests
from hubtraf.user import User
from hubtraf.auth.dummy import login_dummy
import secrets
import pytest
from functools import partial
import asyncio
import pwd
import grp
import sys
import subprocess
from tljh.normalize import generate_system_username
# Use sudo to invoke it, since this is how users invoke it.
# This catches issues with PATH
TLJH_CONFIG_PATH = ['sudo', 'tljh-config']
def test_hub_up():
r = requests.get('http://127.0.0.1')
r.raise_for_status()
@pytest.mark.asyncio
async def test_user_code_execute():
"""
User logs in, starts a server & executes code
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = 'http://localhost'
username = secrets.token_hex(8)
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'set', 'auth.type', 'dummyauthenticator.DummyAuthenticator')).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'reload')).wait()
async with User(username, hub_url, partial(login_dummy, password='')) as u:
await u.login()
await u.ensure_server()
await u.start_kernel()
await u.assert_code_output("5 * 4", "20", 5, 5)
# Assert that the user exists
assert pwd.getpwnam(f'jupyter-{username}') is not None
@pytest.mark.asyncio
async def test_user_admin_add():
"""
User is made an admin, logs in and we check if they are in admin group
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = 'http://localhost'
username = secrets.token_hex(8)
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'set', 'auth.type', 'dummyauthenticator.DummyAuthenticator')).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'add-item', 'users.admin', username)).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'reload')).wait()
async with User(username, hub_url, partial(login_dummy, password='')) as u:
await u.login()
await u.ensure_server()
# Assert that the user exists
assert pwd.getpwnam(f'jupyter-{username}') is not None
# Assert that the user has admin rights
assert f'jupyter-{username}' in grp.getgrnam('jupyterhub-admins').gr_mem
# FIXME: Make this test pass
@pytest.mark.asyncio
@pytest.mark.xfail(reason="Unclear why this is failing")
async def test_user_admin_remove():
"""
User is made an admin, logs in and we check if they are in admin group.
Then we remove them from admin group, and check they *aren't* in admin group :D
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = 'http://localhost'
username = secrets.token_hex(8)
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'set', 'auth.type', 'dummyauthenticator.DummyAuthenticator')).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'add-item', 'users.admin', username)).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'reload')).wait()
async with User(username, hub_url, partial(login_dummy, password='')) as u:
await u.login()
await u.ensure_server()
# Assert that the user exists
assert pwd.getpwnam(f'jupyter-{username}') is not None
# Assert that the user has admin rights
assert f'jupyter-{username}' in grp.getgrnam('jupyterhub-admins').gr_mem
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'remove-item', 'users.admin', username)).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'reload')).wait()
await u.stop_server()
await u.ensure_server()
# Assert that the user does *not* have admin rights
assert f'jupyter-{username}' not in grp.getgrnam('jupyterhub-admins').gr_mem
@pytest.mark.asyncio
async def test_long_username():
"""
User with a long name logs in, and we check if their name is properly truncated.
"""
# This *must* be localhost, not an IP
# aiohttp throws away cookies if we are connecting to an IP!
hub_url = 'http://localhost'
username = secrets.token_hex(32)
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'set', 'auth.type', 'dummyauthenticator.DummyAuthenticator')).wait()
assert 0 == await (await asyncio.create_subprocess_exec(*TLJH_CONFIG_PATH, 'reload')).wait()
try:
async with User(username, hub_url, partial(login_dummy, password='')) as u:
await u.login()
await u.ensure_server()
# Assert that the user exists
system_username = generate_system_username(f'jupyter-{username}')
assert pwd.getpwnam(system_username) is not None
await u.stop_server()
except:
# If we have any errors, print jupyterhub logs before exiting
subprocess.check_call([
'journalctl',
'-u', 'jupyterhub',
'--no-pager'
])
raise | 2.234375 | 2 |
tests/filter_tests/digits_test.py | projectshift/shift-validate | 1 | 12763016 | <reponame>projectshift/shift-validate<gh_stars>1-10
from unittest import TestCase, mock
from nose.plugins.attrib import attr
from shiftschema.filters import Digits
@attr('filter', 'digits')
class DigitsTest(TestCase):
""" String digits filter test"""
def test_create(self):
""" Can create digits filter """
filter = Digits()
self.assertIsInstance(filter, Digits)
def test_pass_through_non_strings(self):
""" Digits: Pass through non-string values (don't do anything) """
filter = Digits()
self.assertEquals(None, filter.filter(None))
self.assertEquals(False, filter.filter(False))
self.assertEquals(123, filter.filter(123))
def test_can_filter_digits(self):
""" Can filter out the digits """
value = '123 some string with 456 digits 789'
expected = '123456789'
filter = Digits()
result = filter.filter(value)
self.assertEqual(expected, result)
def test_empty_string_if_not_found(self):
""" Return empty string if no digits found """
value = 'me contains no digits'
filter = Digits()
self.assertEqual('', filter.filter(value))
def test_convert_to_integer(self):
""" Converting digits result to integer """
value = 'I was born in 1964'
filter = Digits(to_int=True)
self.assertEqual(1964, filter.filter(value))
| 2.75 | 3 |
tools/simnet/preprocess/quora.py | deternan/AnyQ | 1 | 12763017 | <gh_stars>1-10
import os
os.chdir('/tmp/AnyQ-master/tools/simnet/preprocess/')
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from string import punctuation
import itertools
from collections import defaultdict
from gensim import corpora
inputFolder = '/tmp/AnyQ-master/tools/simnet/train/tf/data/'
outputFolder = inputFolder
train = pd.read_csv(inputFolder + "train.csv")
#test = pd.read_csv("data/test.csv")
# Add the string 'empty' to empty strings
train = train.fillna('empty')
#test = test.fillna('empty')
stop_words = ['the','a','an','and','but','if','or','because','as','what','which','this','that','these','those','then',
'just','so','than','such','both','through','about','for','is','of','while','during','to','What','Which',
'Is','If','While','This']
def text_to_wordlist(text, remove_stop_words=True, stem_words=False):
# Clean the text, with the option to remove stop_words and to stem words.
# Clean the text
text = re.sub(r"[^A-Za-z0-9]", " ", text)
text = re.sub(r"what's", "", text)
text = re.sub(r"What's", "", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r" m ", " am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"60k", " 60000 ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e-mail", "email", text)
text = re.sub(r"\s{2,}", " ", text)
text = re.sub(r"quikly", "quickly", text)
text = re.sub(r" usa ", " America ", text)
text = re.sub(r" USA ", " America ", text)
text = re.sub(r" u s ", " America ", text)
text = re.sub(r" uk ", " England ", text)
text = re.sub(r" UK ", " England ", text)
text = re.sub(r"india", "India", text)
text = re.sub(r"switzerland", "Switzerland", text)
text = re.sub(r"china", "China", text)
text = re.sub(r"chinese", "Chinese", text)
text = re.sub(r"imrovement", "improvement", text)
text = re.sub(r"intially", "initially", text)
text = re.sub(r"quora", "Quora", text)
text = re.sub(r" dms ", "direct messages ", text)
text = re.sub(r"demonitization", "demonetization", text)
text = re.sub(r"actived", "active", text)
text = re.sub(r"kms", " kilometers ", text)
text = re.sub(r"KMs", " kilometers ", text)
text = re.sub(r" cs ", " computer science ", text)
text = re.sub(r" upvotes ", " up votes ", text)
text = re.sub(r" iPhone ", " phone ", text)
text = re.sub(r"\0rs ", " rs ", text)
text = re.sub(r"calender", "calendar", text)
text = re.sub(r"ios", "operating system", text)
text = re.sub(r"gps", "GPS", text)
text = re.sub(r"gst", "GST", text)
text = re.sub(r"programing", "programming", text)
text = re.sub(r"bestfriend", "best friend", text)
text = re.sub(r"dna", "DNA", text)
text = re.sub(r"III", "3", text)
text = re.sub(r"the US", "America", text)
text = re.sub(r"Astrology", "astrology", text)
text = re.sub(r"Method", "method", text)
text = re.sub(r"Find", "find", text)
text = re.sub(r"banglore", "Banglore", text)
text = re.sub(r" J K ", " JK ", text)
# Remove punctuation from text
text = ''.join([c for c in text if c not in punctuation])
# Optionally, remove stop words
if remove_stop_words:
text = text.split()
text = [w for w in text if not w in stop_words]
text = " ".join(text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return(text)
def process_questions(question_list, questions, question_list_name, dataframe):
'''transform questions and display progress'''
for question in questions:
question_list.append(text_to_wordlist(question))
if len(question_list) % 100000 == 0:
progress = len(question_list)/len(dataframe) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
train_question1 = []
process_questions(train_question1, train.question1, 'train_question1', train)
train_question2 = []
process_questions(train_question2, train.question2, 'train_question2', train)
#test_question1 = []
#process_questions(test_question1, test.question1, 'test_question1', test)
#
#test_question2 = []
#process_questions(test_question2, test.question2, 'test_question2', test)
# Preview some transformed pairs of questions
a = 0
#for i in range(a,a+10):
# print(train_question1[i])
# print(train_question2[i])
# print()
raw_corpus = list(itertools.chain.from_iterable([train_question1,train_question2]))
#[train_question1,train_question2]
stoplist = stop_words
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in raw_corpus]
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
precessed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
# dictionary
dictionary = corpora.Dictionary(precessed_corpus)
new_doc = "would happen Indian government stole Kohinoor Koh i Noor diamond back"
new_vec = dictionary.doc2bow(new_doc.lower().split())
bow_corpus = [dictionary.doc2idx(text) for text in precessed_corpus]
bow_corpus_plus_1 = [[i+1 for i in bow_corpu] for bow_corpu in bow_corpus]
bow_corpus_str = [[str(i) for i in bow_corpu_plus] for bow_corpu_plus in bow_corpus_plus_1]
bow_corpus_join = [' '.join(bow_corpus_) for bow_corpus_ in bow_corpus_str]
pointwise_train = pd.DataFrame(bow_corpus_join[:404290], columns = ['question1'])
pointwise_train['question2'] = bow_corpus_join[404290:]
pointwise_train['is_duplicate'] = train['is_duplicate']
pointwise_train = pointwise_train[[len(i)>0 for i in pointwise_train['question1']]]
pointwise_train = pointwise_train[[len(i)>0 for i in pointwise_train['question2']]]
size = round(len(pointwise_train)*0.8)
pointwise_train[:size].to_csv(outputFolder + 'train_.tsv',sep = '\t', index=False, header=False)
pointwise_train[size:].to_csv(outputFolder + 'test_.tsv',sep = '\t', index=False, header=False)
print('finished')
| 2.671875 | 3 |
tests/config.py | kajusK/HiddenPlaces | 0 | 12763018 | from app.config import Config
class TestConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
SQLALCHEMY_ECHO = False
WTF_CSRF_ENABLED = False
| 1.492188 | 1 |
takco/cluster/context.py | karmaresearch/takco | 16 | 12763019 | <gh_stars>10-100
import hashlib
import logging as log
import copy
from takco import Table
def tables_add_context_rows(tables, fields=()):
"""Add context to table depending on table dict fields"""
for table in tables:
table = Table(table).to_dict()
for field in list(fields)[::-1]:
empty_header = {
"text": f"_{field}",
"surfaceLinks": [],
}
table["tableHeaders"] = [
[empty_header] + list(hrow) for hrow in table["tableHeaders"]
]
tableHeaders = table["tableHeaders"]
headerText = tuple(
tuple([cell.get("text", "").lower() for cell in r])
for r in tableHeaders
)
table["headerId"] = Table.get_headerId(headerText)
fieldtext = table.get(field, "")
context_cells = [
{
"text": fieldtext,
"surfaceLinks": [
{
"offset": 0,
"endOffset": len(fieldtext),
"linkType": "INTERNAL",
"target": {"href": fieldtext},
}
],
}
]
table["tableData"] = [
copy.deepcopy(context_cells) + list(drow) for drow in table["tableData"]
]
table["numCols"] = len(table["tableData"][0]) if table["tableData"] else 0
n = len(fields)
if "entities" in table:
table["entities"] = {
str(int(ci) + n): x for ci, x in table["entities"].items()
}
if "classes" in table:
table["classes"] = {
str(int(ci) + n): x for ci, x in table["classes"].items()
}
if "properties" in table:
table["properties"] = {
str(int(fci) + n): {str(int(tci) + n): e for tci, e in te.items()}
for fci, te in table["properties"].items()
}
yield Table(table)
| 2.28125 | 2 |
consultantform/forms.py | rajeshgupta14/pathscriptfinal | 0 | 12763020 | <gh_stars>0
from django import forms
from consultantform.models import Relatedcompany,Article,Backgroundcheck,Backgroundcheckb, Problemsolving, Problemsolvingp, Digitalization, Digitalizationp, Miom, Miomp, Duediligence, Script, Strategy,Duediligencep, Scriptp, Strategyp, Branch,Subsidiary
from myapp.models import Project,Client,User,Product
from django.utils.translation import ugettext_lazy as _
class ArticleForm(forms.ModelForm):#kyc form
founding_date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Article
fields = ('founding_date','headquarter_location',
'areas_served','no_of_employees','type_of_company','type_of_industry','type_of_activity','warehouse_addresses',
'factory_addresses','number_of_owners_and_officers','officers_and_roles',
'registered_address','telephone','email','website',
'services_opted','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5','upload_Doc6','upload_Doc7','upload_Doc8','upload_Doc9','upload_Doc10','upload_Doc11','upload_Doc12','notes')
labels = {
'services_opted' : _('Services Opted (Hold Ctrl + select for alternate choices, Hold Shift + select for continuous choices)'),
}
def __init__(self,request,*args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
# self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class BranchForm(forms.ModelForm):#branch form
branch_founding_date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Branch
fields = (
'branch_name','branch_founding_date','branch_location',
'areas_served_by_branch','no_of_employees_in_branch','type_of_business_by_branch',
'number_of_owners_and_officers_in_branch','officers_and_roles_in_branch',
'branch_registered_address','branch_telephone','branch_email','branch_website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(BranchForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class SubsidiaryForm(forms.ModelForm):
subsidiary_founding_date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Subsidiary
fields = (
'subsidiary_name','subsidiary_founding_date','subsidiary_location',
'areas_served_by_subsidiary','no_of_employees_in_subsidiary','type_of_business_by_subsidiary',
'subsidiary_warehouse_addresses','subsidiary_factory_addresses','number_of_owners_and_officers_in_subsidiary','officers_and_roles_in_subsidiary',
'subsidiary_registered_address','subsidiary_telephone','subsidiary_email','subsidiary_website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(SubsidiaryForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class RelatedcompanyForm(forms.ModelForm):
class Meta:
model = Relatedcompany
fields = (
'related_company_name','relation','related_company_registered_address','related_company_telephone','related_company_email','related_company_website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5','upload_Doc6','upload_Doc7')
def __init__(self,request,*args, **kwargs):
super(RelatedcompanyForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class BackgroundcheckForm(forms.ModelForm):
class Meta:
model = Backgroundcheck
fields = ('c1','rOC_Certificates','c2','mOA','c3',
'current_List_of_Directors_including_Photo_ID','c4',
'term_Sheets','c5','current_Bankers_and_Auditors_and_Company_Secretary','c6',
'sales_Tax_Registration_Certificate','c7','last_Filed_Sales_Tax_Certificate','c8',
'municipal_Certificate','c9','last_2_years_Audited_Books_of_Accounts','c10',
'last_Paid_Tax_Receipt','c11','employee_List_Statement','c12',
'last_Provident_Fund_Receipt','c13','list_of_Competitors')
labels = {
'company_name': _('Company Name'),
'c1': _('Currently with'),
'rOC_Certificates': _('ROC Certificates'),
'c2': _('Currently with'),
'mOA': _('MOA'),
'c3': _('Currently with'),
'current_List_of_Directors_including_Photo_ID': _('Current List of Directors including Photo ID'),
'c4': _('Currently with'),
'term_Sheets': _('Term Sheets'),
'c5': _('Currently with'),
'current_Bankers_and_Auditors_and_Company_Secretary': _('Current Bankers, Auditors and Company Secretary'),
'c6': _('Currently with'),
'sales_Tax_Registration_Certificate': _('Sales Tax Registration Certificate'),
'c7': _('Currently with'),
'last_Filed_Sales_Tax_Certificate': _('Last Filed Sales Tax Certificate'),
'c8': _('Currently with'),
'municipal_Certificate': _('Municipal Certificate'),
'c9': _('Currently with'),
'last_2_years_Audited_Books_of_Accounts': _('Last 2 years Audited Books of Accounts'),
'c10': _('Currently with'),
'last_Paid_Tax_Receipt': _('Last Paid Tax Receipt'),
'c11': _('Currently with'),
'employee_List_Statement': _('Employee List Statement'),
'c12': _('Currently with'),
'last_Provident_Fund_Receipt': _('last Provident Fund Receipt'),
'c13': _('Currently with'),
'list_of_Competitors': _('List of Competitors'),
}
def __init__(self,request,*args, **kwargs):
super(BackgroundcheckForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(userid=request.user.id)
self.fields['c1'].queryset = User.objects.filter(is_staff=True)
self.fields['c2'].queryset = User.objects.filter(is_staff=True)
self.fields['c3'].queryset = User.objects.filter(is_staff=True)
self.fields['c4'].queryset = User.objects.filter(is_staff=True)
self.fields['c5'].queryset = User.objects.filter(is_staff=True)
self.fields['c6'].queryset = User.objects.filter(is_staff=True)
self.fields['c7'].queryset = User.objects.filter(is_staff=True)
self.fields['c8'].queryset = User.objects.filter(is_staff=True)
self.fields['c9'].queryset = User.objects.filter(is_staff=True)
self.fields['c10'].queryset = User.objects.filter(is_staff=True)
self.fields['c11'].queryset = User.objects.filter(is_staff=True)
self.fields['c12'].queryset = User.objects.filter(is_staff=True)
self.fields['c13'].queryset = User.objects.filter(is_staff=True)
class BackgroundcheckbForm(forms.ModelForm):
class Meta:
model = Backgroundcheckb
fields = ('c1','rOC_Certificates','c2','mOA','c3',
'current_List_of_Directors_including_Photo_ID','c4',
'term_Sheets','c5','current_Bankers_and_Auditors_and_Company_Secretary','c6',
'sales_Tax_Registration_Certificate','c7','last_Filed_Sales_Tax_Certificate','c8',
'municipal_Certificate','c9','last_2_years_Audited_Books_of_Accounts','c10',
'last_Paid_Tax_Receipt','c11','employee_List_Statement','c12',
'last_Provident_Fund_Receipt','c13','list_of_Competitors')
# self.fields['company_name'].queryset = Client.objects.filter(id=User.objects.get(clientid=request.user.clientid).clientid)
class ProjectForm(forms.ModelForm):#create, view project form
class Meta:
model = Project
fields=('name','client','product','user')
def __init__(self,request,*args, **kwargs):
super(ProjectForm, self).__init__(*args, **kwargs)
self.fields['client'].queryset = Client.objects.filter(
userid=request.user.id)
class ProductForm(forms.ModelForm):#create , view product form
class Meta:
model = Product
fields=('name','description','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProductForm, self).__init__(*args, **kwargs)
class CustomerForm(forms.ModelForm):#kyc form in client view
class Meta:
model = Article
fields = ('founding_date','headquarter_location',
'areas_served','no_of_employees','type_of_company','type_of_industry','type_of_activity','warehouse_addresses',
'factory_addresses','number_of_owners_and_officers','officers_and_roles',
'registered_address','telephone','email','website','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5','upload_Doc6','upload_Doc7','upload_Doc8','upload_Doc9','upload_Doc10','upload_Doc11','upload_Doc12')
def __init__(self,request,*args, **kwargs):
super(CustomerForm, self).__init__(*args, **kwargs)
#self.fields['company_name'].queryset = Client.objects.filter(
# userid=request.user.id)
class DuediligencetForm(forms.ModelForm):#duediligence create form
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Duediligence
fields = ('date','version',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_key_need_you_are_providing_for_your_customer',
'evidences_that_show_need_stated_above_for_customer_is_fulfilled',
'what_are_some_of_the_aspects_you_are_facing_a_challenge_with','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DuediligencetForm, self).__init__(*args, **kwargs)
class DuediligenceForm(forms.ModelForm):#duediligence temporary form
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Duediligence
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_key_need_you_are_providing_for_your_customer',
'evidences_that_show_need_stated_above_for_customer_is_fulfilled',
'what_are_some_of_the_aspects_you_are_facing_a_challenge_with','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DuediligenceForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
#user=request.user.id)
class DuediligencepForm(forms.ModelForm):#duediligence permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Duediligencep
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_key_need_you_are_providing_for_your_customer',
'evidences_that_show_need_stated_above_for_customer_is_fulfilled',
'what_are_some_of_the_aspects_you_are_facing_a_challenge_with','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DuediligencepForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
#user=request.user.id)
class ScripttForm(forms.ModelForm):#script create form
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Script
fields = ('date','version',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_idea_you_are_looking_to_implement',
'why_do_you_think_that_the_idea_should_be_implemented','was_this_idea_previously_executed_and_if_yes_state_the_method',
'reasons_for_failure_of_previous_implementation_methods','other_methods_of_implementation_that_you_would_suggest',
'is_the_level_of_implementation_generic_or_specific','deadline_by_which_you_need_the_idea_to_be_implemented','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ScripttForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class ScriptForm(forms.ModelForm):#script temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Script
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_idea_you_are_looking_to_implement',
'why_do_you_think_that_the_idea_should_be_implemented','was_this_idea_previously_executed_and_if_yes_state_the_method',
'reasons_for_failure_of_previous_implementation_methods','other_methods_of_implementation_that_you_would_suggest',
'is_the_level_of_implementation_generic_or_specific','deadline_by_which_you_need_the_idea_to_be_implemented','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ScriptForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class ScriptpForm(forms.ModelForm):#script permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Scriptp
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','what_is_the_idea_you_are_looking_to_implement',
'why_do_you_think_that_the_idea_should_be_implemented','was_this_idea_previously_executed_and_if_yes_state_the_method',
'reasons_for_failure_of_previous_implementation_methods','other_methods_of_implementation_that_you_would_suggest',
'is_the_level_of_implementation_generic_or_specific','deadline_by_which_you_need_the_idea_to_be_implemented','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ScriptpForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class StrategytForm(forms.ModelForm):#strategy create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Strategy
fields = ('date','version',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','business_strategies_that_are_already_deployed_in_your_company',
'what_are_the_strategies_that_were_deployed_but_failed','limitations_of_previous_strategies',
'factors_to_be_considered_before_planning_new_strategies','deadline_by_which_strategy_needs_to_be_deployed','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(StrategytForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class StrategyForm(forms.ModelForm):#strategy temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Strategy
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','business_strategies_that_are_already_deployed_in_your_company',
'what_are_the_strategies_that_were_deployed_but_failed','limitations_of_previous_strategies',
'factors_to_be_considered_before_planning_new_strategies','deadline_by_which_strategy_needs_to_be_deployed','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(StrategyForm, self).__init__(*args, **kwargs)
# self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class StrategypForm(forms.ModelForm):#strategy permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Strategyp
fields = ('date',
'please_select_the_growth_stage_of_your_company','please_tick_the_type_of_company',
'stock_exchange','ticker_ID','business_strategies_that_are_already_deployed_in_your_company',
'what_are_the_strategies_that_were_deployed_but_failed','limitations_of_previous_strategies',
'factors_to_be_considered_before_planning_new_strategies','deadline_by_which_strategy_needs_to_be_deployed','upload_Doc1','upload_Doc2','upload_Doc3','upload_Doc4','upload_Doc5')
def __init__(self,request,*args, **kwargs):
super(StrategypForm, self).__init__(*args, **kwargs)
#self.fields['project'].queryset = Project.objects.filter(
# user=request.user.id)
class ProblemSolvingtForm(forms.ModelForm):#problem solving create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Problemsolving
fields = ('date','version',
'what_is_the_issue_that_needs_to_be_addressed','what_is_its_effect_on_the_company',
'researches_that_have_been_done_on_the_possible_solutions','what_are_the_solutions_that_have_already_been_tried',
'what_are_the_solutions_that_failed_and_the_reasons_for_failure','what_are_the_parameters_to_be_considered','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProblemSolvingtForm, self).__init__(*args, **kwargs)
class ProblemSolvingForm(forms.ModelForm):#problem solving temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Problemsolving
fields = ('date',
'what_is_the_issue_that_needs_to_be_addressed','what_is_its_effect_on_the_company',
'researches_that_have_been_done_on_the_possible_solutions','what_are_the_solutions_that_have_already_been_tried',
'what_are_the_solutions_that_failed_and_the_reasons_for_failure','what_are_the_parameters_to_be_considered','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProblemSolvingForm, self).__init__(*args, **kwargs)
class ProblemSolvingpForm(forms.ModelForm):#problem solving permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Problemsolvingp
fields = ('date',
'what_is_the_issue_that_needs_to_be_addressed','what_is_its_effect_on_the_company',
'researches_that_have_been_done_on_the_possible_solutions','what_are_the_solutions_that_have_already_been_tried',
'what_are_the_solutions_that_failed_and_the_reasons_for_failure','what_are_the_parameters_to_be_considered','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(ProblemSolvingpForm, self).__init__(*args, **kwargs)
class DigitalizationtForm(forms.ModelForm):#digitalization create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Digitalization
fields = ('date','version',
'what_are_the_departments_that_need_to_be_digitalized_and_why',
'please_mention_if_they_are_new_or_preexisting_departments',
'what_is_the_budget_allocated_for_the_digitlization_process',
'the_priority_in_which_the_deaprtments_need_to_be_digitalized',
'what_are_the_limitations_that_need_to_be_considered','the_deadline_by_which_digitalization_needs_to_be_done','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DigitalizationtForm, self).__init__(*args, **kwargs)
class DigitalizationForm(forms.ModelForm):#digitalization temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Digitalization
fields = ('date',
'what_are_the_departments_that_need_to_be_digitalized_and_why',
'please_mention_if_they_are_new_or_preexisting_departments',
'what_is_the_budget_allocated_for_the_digitlization_process',
'the_priority_in_which_the_deaprtments_need_to_be_digitalized',
'what_are_the_limitations_that_need_to_be_considered','the_deadline_by_which_digitalization_needs_to_be_done','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DigitalizationForm, self).__init__(*args, **kwargs)
class DigitalizationpForm(forms.ModelForm):#digitalization permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Digitalizationp
fields = ('date',
'what_are_the_departments_that_need_to_be_digitalized_and_why',
'please_mention_if_they_are_new_or_preexisting_departments',
'what_is_the_budget_allocated_for_the_digitlization_process',
'the_priority_in_which_the_deaprtments_need_to_be_digitalized',
'what_are_the_limitations_that_need_to_be_considered','the_deadline_by_which_digitalization_needs_to_be_done','upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(DigitalizationpForm, self).__init__(*args, **kwargs)
class MiomtForm(forms.ModelForm):#min of meeting create
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Miom
fields = ('date','version',
'meeting_description','main_concerns','restrictions',
'plan_of_action_for_Pathscript','plan_of_action_for_Client',
'upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(MiomtForm, self).__init__(*args, **kwargs)
class MiomForm(forms.ModelForm):#min of meeting temporary
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Miom
fields = ('date',
'meeting_description','main_concerns','restrictions',
'plan_of_action_for_Pathscript','plan_of_action_for_Client',
'upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(MiomForm, self).__init__(*args, **kwargs)
class MiompForm(forms.ModelForm):#min of meeting permanent
date=forms.DateField(widget=forms.SelectDateWidget(years=range(1900, 2100)))
class Meta:
model = Miomp
fields = ('date',
'meeting_description','main_concerns','restrictions',
'plan_of_action_for_Pathscript','plan_of_action_for_Client',
'upload_Doc1','upload_Doc2')
def __init__(self,request,*args, **kwargs):
super(MiompForm, self).__init__(*args, **kwargs)
| 1.875 | 2 |
src/q1102.py | mirzadm/ctci-5th-py | 0 | 12763021 | <reponame>mirzadm/ctci-5th-py<filename>src/q1102.py
"""Sort an array by anagram."""
def sort_by_anagram(array):
n = len(array)
if n <= 2:
return array
i = 0
while i < n-2:
for j in range(i+1, n):
if is_anagram(array[i], array[j]):
i += 1
array[i], array[j] = array[j], array[i]
i += 1
return array
def is_anagram(s, t):
if len(s) != len(t):
return False
s_array = list(s.lower())
t_array = list(t.lower())
s_array.sort()
t_array.sort()
return s_array == t_array
| 3.640625 | 4 |
thefeck/rules/dirty_untar.py | eoinjordan/thefeck | 0 | 12763022 | <gh_stars>0
import tarfile
import os
from thefeck.utils import for_app
from thefeck.shells import shell
tar_extensions = ('.tar', '.tar.Z', '.tar.bz2', '.tar.gz', '.tar.lz',
'.tar.lzma', '.tar.xz', '.taz', '.tb2', '.tbz', '.tbz2',
'.tgz', '.tlz', '.txz', '.tz')
def _is_tar_extract(cmd):
if '--extract' in cmd:
return True
cmd = cmd.split()
return len(cmd) > 1 and 'x' in cmd[1]
def _tar_file(cmd):
for c in cmd:
for ext in tar_extensions:
if c.endswith(ext):
return (c, c[0:len(c) - len(ext)])
@for_app('tar')
def match(command):
return ('-C' not in command.script
and _is_tar_extract(command.script)
and _tar_file(command.script_parts) is not None)
def get_new_command(command):
dir = shell.quote(_tar_file(command.script_parts)[1])
return shell.and_('mkdir -p {dir}', '{cmd} -C {dir}') \
.format(dir=dir, cmd=command.script)
def side_effect(old_cmd, command):
with tarfile.TarFile(_tar_file(old_cmd.script_parts)[0]) as archive:
for file in archive.getnames():
try:
os.remove(file)
except OSError:
# does not try to remove directories as we cannot know if they
# already existed before
pass
| 2.375 | 2 |
mvpnet/evaluate_3d.py | shnhrtkyk/mvpnet | 79 | 12763023 | <filename>mvpnet/evaluate_3d.py
import numpy as np
from sklearn.metrics import confusion_matrix as CM
CLASS_NAMES = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain',
'refridgerator', 'showercurtain', 'toilet', 'sink', 'bathtub', 'otherfurniture',
]
EVAL_CLASS_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
class Evaluator(object):
def __init__(self, class_names, labels=None):
self.class_names = tuple(class_names)
self.num_classes = len(class_names)
self.labels = np.arange(self.num_classes) if labels is None else np.array(labels)
assert self.labels.shape[0] == self.num_classes
self.confusion_matrix = np.zeros((self.num_classes, self.num_classes))
def update(self, pred_label, gt_label):
"""Update per instance
Args:
pred_label (np.ndarray): (num_points)
gt_label (np.ndarray): (num_points,)
"""
# convert ignore_label to num_classes
# refer to sklearn.metrics.confusion_matrix
if np.all(gt_label < 0):
print('Invalid label.')
return
gt_label[gt_label == -100] = self.num_classes
confusion_matrix = CM(gt_label.flatten(),
pred_label.flatten(),
labels=self.labels)
self.confusion_matrix += confusion_matrix
def batch_update(self, pred_labels, gt_labels):
assert len(pred_labels) == len(gt_labels)
for pred_label, gt_label in zip(pred_labels, gt_labels):
self.update(pred_label, gt_label)
@property
def overall_acc(self):
return np.sum(np.diag(self.confusion_matrix)) / np.sum(self.confusion_matrix)
@property
def overall_iou(self):
return np.nanmean(self.class_iou)
@property
def class_seg_acc(self):
return [self.confusion_matrix[i, i] / np.sum(self.confusion_matrix[i])
for i in range(self.num_classes)]
@property
def class_iou(self):
iou_list = []
for i in range(self.num_classes):
tp = self.confusion_matrix[i, i]
p = self.confusion_matrix[:, i].sum()
g = self.confusion_matrix[i, :].sum()
union = p + g - tp
if union == 0:
iou = float('nan')
else:
iou = tp / union
iou_list.append(iou)
return iou_list
def print_table(self):
from tabulate import tabulate
header = ['Class', 'Accuracy', 'IOU', 'Total']
seg_acc_per_class = self.class_seg_acc
iou_per_class = self.class_iou
table = []
for ind, class_name in enumerate(self.class_names):
table.append([class_name,
seg_acc_per_class[ind] * 100,
iou_per_class[ind] * 100,
int(self.confusion_matrix[ind].sum()),
])
return tabulate(table, headers=header, tablefmt='psql', floatfmt='.2f')
def save_table(self, filename):
from tabulate import tabulate
header = ('overall acc', 'overall iou') + self.class_names
table = [[self.overall_acc, self.overall_iou] + self.class_iou]
with open(filename, 'w') as f:
# In order to unify format, remove all the alignments.
f.write(tabulate(table, headers=header, tablefmt='tsv', floatfmt='.5f',
numalign=None, stralign=None))
def main():
"""Integrated official evaluation scripts
Use multiple threads to process in parallel
References: https://github.com/ScanNet/ScanNet/blob/master/BenchmarkScripts/3d_evaluation/evaluate_semantic_label.py
"""
import os
import sys
import argparse
from torch.utils.data import DataLoader
parser = argparse.ArgumentParser(description='Evaluate mIoU on ScanNetV2')
parser.add_argument(
'--pred-path', type=str, help='path to prediction',
)
parser.add_argument(
'--gt-path', type=str, help='path to ground-truth',
)
args = parser.parse_args()
pred_files = [f for f in os.listdir(args.pred_path) if f.endswith('.txt')]
gt_files = []
if len(pred_files) == 0:
raise RuntimeError('No result files found.')
for i in range(len(pred_files)):
gt_file = os.path.join(args.gt_path, pred_files[i])
if not os.path.isfile(gt_file):
raise RuntimeError('Result file {} does not match any gt file'.format(pred_files[i]))
gt_files.append(gt_file)
pred_files[i] = os.path.join(args.pred_path, pred_files[i])
evaluator = Evaluator(CLASS_NAMES, EVAL_CLASS_IDS)
print('evaluating', len(pred_files), 'scans...')
dataloader = DataLoader(list(zip(pred_files, gt_files)), batch_size=1, num_workers=4,
collate_fn=lambda x: tuple(np.loadtxt(xx, dtype=np.uint8) for xx in x[0]))
# sync
# for i in range(len(pred_files)):
# # It takes a long time to load data.
# pred_label = np.loadtxt(pred_files[i], dtype=np.uint8)
# gt_label = np.loadtxt(gt_files[i], dtype=np.uint8)
# evaluator.update(pred_label, gt_label)
# sys.stdout.write("\rscans processed: {}".format(i + 1))
# sys.stdout.flush()
# async, much faster
for i, (pred_label, gt_label) in enumerate(dataloader):
evaluator.update(pred_label, gt_label)
sys.stdout.write("\rscans processed: {}".format(i + 1))
sys.stdout.flush()
print('')
print(evaluator.print_table())
if __name__ == '__main__':
main()
| 2.609375 | 3 |
server/users/forms.py | NRshka/distconfig | 0 | 12763024 | from flask_wtf import FlaskForm
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class LoginForm(FlaskForm):
username = TextField("Login", validators=[DataRequired(), Length(min=6, max=20)])
password = PasswordField("Password", validators=[DataRequired(), Length(min=6, max=48)])
class RegisterForm(FlaskForm):
username = TextField("Login", validators=[DataRequired(), Length(min=6, max=20)])
email = TextField("Email", validators=[DataRequired(), Email()])
password = PasswordField(
"Password", validators=[DataRequired(), Length(min=6, max=48)]
)
confirm = PasswordField(
"Repeat Password", validators=[DataRequired(), EqualTo("password")]
)
fullname = TextField("Full Name", validators=[DataRequired()])
| 3.03125 | 3 |
src/domain/component.py | gmdlba/simulation | 0 | 12763025 | class Component:
def __init__(self, fail_ratio, repair_ratio, state):
self.fail_ratio = fail_ratio
self.repair_ratio = repair_ratio
self.state = state | 2.578125 | 3 |
setup.py | jason-ennis/runcloud-letsencrypt | 77 | 12763026 | from setuptools import setup
setup(name='rcssl',
version='1.5',
description='Install Let\'s Encrypt SSL on RunCloud servers the easy way.',
author="Rehmat",
author_email="<EMAIL>",
url="https://github.com/rehmatworks/runcloud-letsencrypt",
license="MIT",
entry_points={
'console_scripts': [
'rcssl = rcssl.rcssl:main'
],
},
packages=[
'rcssl'
],
install_requires=[
'python-nginx'
]
) | 1.390625 | 1 |
WIN_FileProtectionSetting/OS_gathering/files/extracting.py | exastro-playbook-collection/OS-Windows2019 | 0 | 12763027 | import re
import json
import sys
import os
args = sys.argv
if (len(args) < 2):
sys.exit(1)
path = args[1]
if(path[-1:] == "/"):
path = path[:-1]
result_filedata_list = []
count = 0
while True:
# Decectory exist check
dirpath = path + '/command/' + str(count)
if os.path.isdir(dirpath):
count +=1
else:
break
filepath = dirpath + '/stdout.txt'
if os.path.isfile(filepath) and os.path.getsize(filepath) > 0:
with open(filepath) as file_object:
reader = json.load(file_object)
if isinstance(reader, list):
rows = reader
else:
rows = []
rows.append(reader)
for row in rows:
if count % 2 == 1:
encrypt_table = {}
for param_key, param_value in row.items():
encrypt_table[param_key] = param_value
else:
filedata_table = {}
for param_key, param_value in row.items():
if param_key == 'Path':
index = param_value.find('::')
if index != -1:
param_value = param_value[(index + 2):].strip()
if param_value in encrypt_table:
filedata_table['Encrypt'] = encrypt_table[param_value]
filedata_table['Name'] = param_value
elif param_key == 'AccessToString':
filedata_table[param_key] = param_value.split('\n')
else:
filedata_table[param_key] = param_value
if len(filedata_table) > 0:
filedata_table['Action'] = 'file'
result_filedata_list.append(filedata_table)
result = {}
target_parameter_root_key = 'VAR_WIN_FileProtectionSetting'
result[target_parameter_root_key] = result_filedata_list
print(json.dumps(result))
| 2.796875 | 3 |
twitter.py | projeto7c0/7c0-tweets | 10 | 12763028 | <filename>twitter.py<gh_stars>1-10
import tweepy
import twitter_auth
import time
from datetime import datetime
def tweet(handle, tweet, archive_url, creation_date, idTweets, status):
api = twitter_auth.autentica_tweets()
status = api.update_status(in_reply_to_status_id = status.id, status = "O tweet com id " + str(idTweets[0]) + " de " + str(creation_date) + " que falava sobre: ")
# print(str(idTweets[0]) + " - " + creation_date)
tweet = str(tweet).replace("//", "/ /")
status = api.update_status(in_reply_to_status_id = status.id, status = (tweet[0:200]+"..."))
# print(tweet[0:200])
if not archive_url.startswith("Não"):
# print(archive_url)
status = api.update_status(in_reply_to_status_id = status.id, status = "O bot tentou arquivar o tweet nesse link: " + archive_url)
return status
def tweet_start():
api = twitter_auth.autentica_tweets()
status = api.update_status(status = ("Começando o relatório às " + datetime.now().isoformat(timespec='minutes') +". Os tweets serão espaçados de tempos em tempos, para evitar que o bot seja bloqueado pelo twitter."))
# print(datetime.now().isoformat(timespec='minutes'))
return status
def tweet_start_arroba(handle, qtde_tweets):
api = twitter_auth.autentica_tweets()
status = api.update_status("Começando a listagem de tweets recuperados para a arroba " + handle[0] + ". Sumiram desde a nossa última checagem " + str(qtde_tweets) + " tweets.")
# print(handle[0] +" "+ str(qtde_tweets))
return status
def tweet_end(qtde_tweets):
api = twitter_auth.autentica_tweets()
print(qtde_tweets)
status = api.update_status("Fim da triagem diária, foram encontrados " + str(qtde_tweets) + " tweets que sumiram compartilhe o perfil @projeto7c0 para que mais " +
"pessoas saibam o que desaparece da timeline dos políticos.")
status = api.update_status(in_reply_to_status_id = status.id, status = "Quer saber mais sobre o projeto? Acesse https://projeto7c0.com.br/ e veja saiba tudo sobre o projeto")
status = api.update_status(in_reply_to_status_id = status.id, status = "Quer ajudar a financiar a transparência na comunicação da democracia brasileira? Acesse o nosso apoia-se em https://apoia.se/projeto-7c0 e veja como contribuir")
status = api.update_status(in_reply_to_status_id = status.id, status="Quer pagar um café pra gente sem se comprometer mensalmente? Manda um Pix! A chave é <EMAIL>, e qualquer valor é bem vindo!")
api.update_status(in_reply_to_status_id = status.id, status = "Quer ficar atualizado? Assine a nossa newsletter, que teremos informações quinzenais para você! Para assinar só clicar aqui https://projeto7c0.us20.list-manage.com//subscribe/post?u=984470f280d60b82c247e3d7b&id=00a31b0d4a")
def tweet_end_arroba(arroba, last_tweet):
api = twitter_auth.autentica_tweets()
api.update_status(in_reply_to_status_id = last_tweet.id, status = ("Fim da listagem de tweets recuperados para a arroba " + arroba[0]))
# print(arroba[0])
| 3.03125 | 3 |
timezone.py | Vargynja/timezone-converter | 1 | 12763029 | #!/usr/bin/python
# coding=utf-8
import sys
import csv
import datetime
#run with python timezone.py 8.30 pdt eest
# time timezone timezone-of-conversion
def main():
# check if correct ammount of arguments doesnt work otherwise
if len(sys.argv) != 4:
print('Incorrect amount of arguments.')
sys.exit(1)
target_time = -99
# get the time given in UTC
with open('timezones.csv', newline='') as csvfile:
timezones = csv.reader(csvfile, delimiter=',')
utc_time = get_utc_time(timezones)
target_time = utc_time
#get the time difference of the target to UTC
with open('timezones.csv', newline='') as csvfile:
timezones = csv.reader(csvfile, delimiter=',')
split_time = get_target_time_dif(timezones)
#split utc time zone to hours and minutes in case of time zones with minutes
time_dif_h = float(split_time[0])
time_dif_m = 0
#check if there was a split before trying to get minutes
if len(split_time) == 2:
time_dif_m = float(split_time[1])
#apply timezone time difference
target_time = target_time + datetime.timedelta(hours=time_dif_h, minutes=time_dif_m)
to_print = sys.argv[1] + ' ' + sys.argv[2].upper() + ' is ' + target_time.strftime('%H.%M') + ' ' + sys.argv[3].upper()
print(to_print)
def get_utc_time(timezones):
for row in timezones:
#check for timezone argument against csv data
if row[0].lower() == sys.argv[2].lower():
utc_timezone = row[2]
utc_time_dif = utc_timezone[3:]
entered_time = datetime.datetime.strptime(sys.argv[1], '%H.%M')
#split utc time zone to hours and minutes in case of time zones with minutes
split_time = utc_time_dif.split('.')
time_dif_h = 0 - float(split_time[0])
time_dif_m = 0
#check if there was a split before trying to get minutes
if len(split_time) == 2:
time_dif_m = float(split_time[1])
#apply timezone time difference
utc_time = entered_time + datetime.timedelta(hours=time_dif_h, minutes=time_dif_m)
return utc_time
#if it get's here timezone code was wrong
print('First timezone not found')
sys.exit(1)
def get_target_time_dif(timezones):
#check for timezone argument against csv data
for row in timezones:
if row[0].lower() == sys.argv[3].lower():
utc_timezone = row[2]
utc_time_dif = utc_timezone[3:]
split_time = utc_time_dif.split('.')
return split_time
#if it get's here timezone code was wrong
print('Second timezone not found')
sys.exit(1)
if __name__ == "__main__":
main()
| 3.59375 | 4 |
vega/core/__init__.py | wnov/vega | 6 | 12763030 | from .run import run, env_args, init_local_cluster_args
from .backend_register import set_backend
from zeus import is_gpu_device, is_npu_device, is_torch_backend, is_tf_backend, is_ms_backend
from zeus.trainer import *
# from .evaluator import *
from zeus.common import FileOps, TaskOps, UserConfig, module_existed
| 1.125 | 1 |
custom1/custom1/report/item_summary/item_summary.py | jof2jc/custom1 | 0 | 12763031 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import flt,cstr
def execute(filters=None):
if not filters: filters = {}
item_map = {}
columns = []
pl = {}
columns = get_columns(filters)
data = []
#if "SERIMPI" in cstr(frappe.db.get_single_value('Global Defaults', 'default_company')):
# if not filters.get("item_code"):
# return columns, data #frappe.throw("Please define Item Code")
item_map = get_item_details(filters)
pl = get_price_list(filters)
#last_purchase_rate = get_last_purchase_rate()
#bom_rate = get_item_bom_rate()
#val_rate_map = get_bin_details()
from erpnext.accounts.utils import get_currency_precision
precision = get_currency_precision() or 2
if ("Accounts Manager" in frappe.get_roles(frappe.session.user) or "Accounting" in frappe.get_roles(frappe.session.user)):
for item in sorted(item_map):
#avg_sales = get_avg_sales_qty(item.name) or 0.0
actual_qty = item.actual_qty or 0.0
safety_stock = item.safety_stock or 0.0
data.append([item.name, item["item_name"], actual_qty, item.stock_uom,
item.warehouse, item.location_bin,
#avg_sales, int(item.actual_qty/avg_sales) if avg_sales > 0 else 0.0,
item.avg_qty or 0.0, int(item.actual_qty/item.avg_qty) if item.avg_qty > 0 else 0.0,
pl.get(item.name, {}).get("Selling"), item.valuation_rate, #val_rate_map[item]["val_rate"], #flt(val_rate_map.get(item, 0), precision),
item.last_purchase_rate or 0.0, #get_last_purchase_rate(item.name) or 0.0, #flt(last_purchase_rate.get(item.name, 0), precision),
item.brand, item.item_group, item.description, safety_stock, actual_qty - safety_stock, item.default_supplier
#pl.get(item, {}).get("Buying"),
#flt(bom_rate.get(item, 0), precision)
])
else:
for item in sorted(item_map):
actual_qty = item.actual_qty or 0.0
safety_stock = item.safety_stock or 0.0
#avg_sales = get_avg_sales_qty(item.name) or 0.0
data.append([item.name, item["item_name"], actual_qty, item.stock_uom,
item.warehouse, item.location_bin,
#avg_sales, int(item.actual_qty/avg_sales) if avg_sales > 0 else 0.0,
item.avg_qty or 0.0, int(item.actual_qty/item.avg_qty) if item.avg_qty > 0 else 0.0,
pl.get(item.name, {}).get("Selling"), #item.valuation_rate, #val_rate_map[item]["val_rate"], #flt(val_rate_map.get(item, 0), precision),
#flt(last_purchase_rate.get(item.name, 0), precision),
item.brand, item.item_group, item.description, safety_stock, actual_qty - safety_stock, item.default_supplier
#pl.get(item, {}).get("Buying"),
#flt(bom_rate.get(item, 0), precision)
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
if ("Accounts Manager" in frappe.get_roles(frappe.session.user) or "Accounting" in frappe.get_roles(frappe.session.user)):
columns = [_("Item") + ":Link/Item:125", _("Item Name") + "::200", _("Actual Qty") + ":Float:75", _("UOM") + ":Link/UOM:65",
_("Warehouse") + ":Link/Warehouse:125", _("Location") + "::80", _("Sales Avg/30d") + ":Float:100",_("Age Days") + "::70",
_("Sales Price List") + "::240",
_("Valuation Rate") + ":Currency:80", _("Last Purchase Rate") + ":Currency:90",
_("Brand") + ":Link/Brand:100", _("Item Group") + ":Link/Item Group:125", _("Description") + "::150",
_("Safety Stock") + ":Float:85", _("Safety Gap") + ":Float:85", _("Default Supplier") + ":Link/Supplier:125"]
#_("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90"]
else:
columns = [_("Item") + ":Link/Item:125", _("Item Name") + "::200", _("Actual Qty") + ":Float:75", _("UOM") + ":Link/UOM:65",
_("Warehouse") + ":Link/Warehouse:125", _("Location") + "::80",
_("Sales Avg/30d") + "::100",_("Age Days") + "::70",
_("Sales Price List") + "::240",
#_("Valuation Rate") + ":Currency:80", _("Last Purchase Rate") + ":Currency:90",
_("Brand") + ":Link/Brand:100", _("Item Group") + ":Link/Item Group:125", _("Description") + "::150",
_("Safety Stock") + ":Float:85", _("Safety Gap") + ":Float:85", _("Default Supplier") + ":Link/Supplier:125"]
#_("Purchase Price List") + "::180", _("BOM Rate") + ":Currency:90"]
return columns
def get_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("it.item_code=%(item_code)s")
#if not filters.get("company"):
# filters.get("company") = frappe.defaults.get_user_default("Company")
if filters.get("company"):
conditions.append("wh.company=%(company)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_item_details(filters):
"""returns all items details"""
item_map = {}
if "location_bin" not in frappe.db.get_table_columns("Item") or not frappe.get_meta("Item").has_field("locations"):
item_map = frappe.db.sql("""select it.name,
(select cast(avg(si_item.stock_qty) as unsigned) as avg_qty from `tabSales Invoice Item` si_item
inner join `tabSales Invoice` si on si.name=si_item.parent where si_item.item_code=it.item_code
and si_item.stock_uom=it.stock_uom and si_item.warehouse=bin.warehouse
and si.docstatus=1 and si.posting_date between date_add(curdate(),INTERVAL -30 DAY) and curdate()) as avg_qty,
(select rate from `tabPurchase Invoice` pi join `tabPurchase Invoice Item` pi_item on pi.name=pi_item.parent
where pi_item.item_code=it.item_code and pi_item.stock_uom=it.stock_uom
and pi.docstatus=1 order by pi.posting_date desc limit 1) as last_purchase_rate,
it.item_group, it.brand, it.item_name, "" as location_bin, it.description, bin.actual_qty, bin.warehouse, wh.company,it.safety_stock,
it.stock_uom, bin.valuation_rate, it.default_supplier from `tabItem` it left join `tabBin` bin on (it.name=bin.item_code and it.stock_uom = bin.stock_uom)
left join `tabWarehouse` wh on wh.name=bin.warehouse
where it.is_stock_item=1 and it.disabled <> 1 {item_conditions} order by it.item_code, it.item_group"""\
.format(item_conditions=get_conditions(filters)),filters, as_dict=1)
else:
item_map = frappe.db.sql("""select it.name,
(select cast(avg(si_item.stock_qty) as unsigned) as avg_qty from `tabSales Invoice Item` si_item
inner join `tabSales Invoice` si on si.name=si_item.parent where si_item.item_code=it.item_code
and si_item.stock_uom=it.stock_uom and si_item.warehouse=bin.warehouse
and si.docstatus=1 and si.posting_date between date_add(curdate(),INTERVAL -30 DAY) and curdate()) as avg_qty,
(select rate from `tabPurchase Invoice` pi join `tabPurchase Invoice Item` pi_item on pi.name=pi_item.parent
where pi_item.item_code=it.item_code and pi_item.stock_uom=it.stock_uom
and pi.docstatus=1 order by pi.posting_date desc limit 1) as last_purchase_rate,
it.item_group, it.brand, it.item_name, it.description, ifnull(loc.location,it.location_bin) as location_bin, bin.actual_qty, bin.warehouse, wh.company,it.safety_stock,
it.stock_uom, bin.valuation_rate, it.default_supplier from `tabItem` it left join `tabBin` bin on (it.name=bin.item_code and it.stock_uom = bin.stock_uom)
left join `tabItem Location` loc on (loc.parent=it.name and bin.warehouse=loc.warehouse) left join `tabWarehouse` wh on wh.name=bin.warehouse
where it.is_stock_item=1 and it.disabled <> 1 {item_conditions} order by it.item_code, it.item_group"""\
.format(item_conditions=get_conditions(filters)),filters, as_dict=1)
#print item_map
return item_map
def get_avg_sales_qty(item_code):
"""Get average sales qty in last 30 days of an item"""
sales_avg = {}
for i in frappe.db.sql("""select cast(avg(si_item.qty) as unsigned) as avg_qty from `tabSales Invoice` si join `tabSales Invoice Item` si_item on si.name=si_item.parent
where si.posting_date between date_add(si.posting_date,INTERVAL -30 DAY) and curdate() and si_item.item_code=%s""", item_code,as_dict=1):
sales_avg.setdefault(item_code,i.avg_qty)
return sales_avg.get(item_code,0)
def get_pl_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("ip.item_code=%(item_code)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_price_list(filters):
"""Get selling & buying price list of every item"""
rate = {}
price_list = frappe.db.sql("""select ip.item_code, ip.buying, ip.selling,
concat(ifnull(cu.symbol,ip.currency), " ", FORMAT(ip.price_list_rate,2), " - ", ip.price_list) as price
from `tabItem Price` ip, `tabPrice List` pl, `tabCurrency` cu
where ip.price_list=pl.name and pl.currency=cu.name and pl.enabled=1 and ip.selling=1 {pl_conditions} order by ip.item_code, ip.price_list"""\
.format(pl_conditions=get_pl_conditions(filters)), filters, as_dict=1)
for j in price_list:
if j.price:
rate.setdefault(j.item_code, {}).setdefault("Buying" if j.buying else "Selling", []).append(j.price)
item_rate_map = {}
#print rate
for item in rate:
for buying_or_selling in rate[item]:
item_rate_map.setdefault(item, {}).setdefault(buying_or_selling,
", ".join(rate[item].get(buying_or_selling, [])))
#print item_rate_map
return item_rate_map
def get_last_purchase_rate(item_code):
item_last_purchase_rate_map = {}
query = """select * from (select
result.item_code, result.posting_date,
result.base_rate
from (
(select
po_item.item_code,
po_item.item_name,
po.transaction_date as posting_date,
po_item.base_price_list_rate,
po_item.discount_percentage,
po_item.base_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.name = po_item.parent and po.docstatus = 1 and po_item.item_code='{item}' order by po.transaction_date desc limit 1)
union
(select
pr_item.item_code,
pr_item.item_name,
pr.posting_date,
pr_item.base_price_list_rate,
pr_item.discount_percentage,
pr_item.base_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.name = pr_item.parent and pr.docstatus = 1 and pr_item.item_code='{item}' order by pr.posting_date desc limit 1)
union
(select
pi_item.item_code,
pi_item.item_name,
pi.posting_date,
pi_item.base_price_list_rate,
pi_item.discount_percentage,
pi_item.base_rate
from `tabPurchase Invoice` pi, `tabPurchase Invoice Item` pi_item
where pi.name = pi_item.parent and pi.docstatus = 1 and pi_item.item_code='{item}' order by pi.posting_date desc limit 1)
) result
order by result.item_code asc, result.posting_date desc) result_wrapper
group by item_code, posting_date order by posting_date desc""".format( item = item_code )
#if item_code == 'GC001':
# frappe.msgprint(query)
for idx, d in enumerate (frappe.db.sql(query, as_dict=1)):
if idx==0:
item_last_purchase_rate_map.setdefault(d.item_code, d.base_rate)
#print item_last_purchase_rate_map
return item_last_purchase_rate_map.get(item_code,0)
def get_item_bom_rate():
"""Get BOM rate of an item from BOM"""
item_bom_map = {}
for b in frappe.db.sql("""select item, (total_cost/quantity) as bom_rate
from `tabBOM` where is_active=1 and is_default=1""", as_dict=1):
item_bom_map.setdefault(b.item, flt(b.bom_rate))
return item_bom_map
def get_bin_details():
"""Get bin details from all warehouses"""
bin_details = frappe.db.sql("""select name, item_code, actual_qty, valuation_rate as val_rate, warehouse
from `tabBin` order by item_code""", as_dict=1)
#print bin_details
bin_map = frappe._dict()
for i in bin_details:
bin_map.setdefault(i.item_code, i)
print bin_map
return bin_map
| 1.679688 | 2 |
Figures_tables/5_Fig3A_3B.py | jlanillos/clinAcc_PGx_WES | 2 | 12763032 | # Script wh helps to plot Figures 3A and 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
mask = (dff['N_alleles'] != 0) & (dff['Phenotype_G6PD'] != 'G6PD_Normal')
dff_valid = dff[mask]
dff['N_phenotypes'] = 0
dff['Phenotype'] = dff['Phenotype'].apply(lambda x: ','.join([i for i in x.split(',') if i != 'G6PD_Normal']))
dff.loc[mask, 'N_phenotypes'] = dff_valid['Phenotype'].apply(lambda x: len(x.split(',')))
gf = dff.groupby('N_phenotypes')['sample'].count()
GF = {'Nr. phenotypes': list(gf.index), 'Count':100*(gf.values / gf.sum()), 'Group':['(N=5001)']*len(gf)}
GF = pd.DataFrame(GF)
tf = GF.iloc[0:4]
d = {'Nr. phenotypes':'[4,7]', 'Count':sum(GF['Count'].iloc[4:]), 'Group':'(N=5001)'}
tf = tf.append(d, ignore_index=True)
bottom = 0
f, ax1 = plt.subplots(figsize=(2,4))
f.set_size_inches(2.7, 4.0)
for i,j, in zip(list(tf['Count'].values), list(tf['Nr. phenotypes'])):
ax1.bar('N=5001',i,label=j, bottom = bottom, edgecolor = 'black')
bottom = bottom + i
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1], loc='center left',bbox_to_anchor=(1.0, 0.5), title='Nr.alleles', fontsize=14,title_fontsize=14) # title = TITLE,
plt.ylabel('%',fontsize=14)
plt.yticks(np.arange(0, 100,10 ))
plt.subplots_adjust(left=0.23, bottom=0.1, right=0.5, top=0.95, wspace=0.14, hspace=0.24)
plt.savefig('/path/to/Figures/Figure_3A_nrphenotypes.png',format = 'png', dpi = 500)
plt.show()
####################################### FIGURE 3B
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Include all GENES, those containing Indels and SNVS (that's why I repeat this step of loading "alleles" dataframe) This prevents badly groupping in 20210105_plotStacked...INDELS.py
alleles = pd.read_csv('/path/to/Alleles_20201228.csv',sep='\t')
#alleles['actionable'].loc[(alleles['SYMBOL'] == 'CYP4F2') & (alleles['allele'] == '*2')] = 'Yes'
alleles = alleles.loc[(alleles['count_carrier_ids'].astype(str) != 'nan') & (alleles['actionable'] == 'Yes')].copy()
GENES = list(set(list(alleles['SYMBOL'])))
dff = pd.read_csv('/path/to/phenotypes_20210107.csv',sep='\t')
#dff = dff.loc[dff['from'] == 'ESPAÑA']
mask = (dff['N_alleles'] != 0) & (dff['Phenotype_G6PD'] != 'G6PD_Normal')
dff_valid = dff[mask]
dff['N_phenotypes'] = 0
dff['Phenotype'] = dff['Phenotype'].apply(lambda x: ','.join([i for i in x.split(',') if i != 'G6PD_Normal']))
dff.loc[mask, 'N_phenotypes'] = dff_valid['Phenotype'].apply(lambda x: len(x.split(',')))
GENES.sort()
pct_phenot = list()
for gene in GENES:
pct_phenot.append(100*(dff.groupby('Phenotype_' + gene)['sample'].count().values.sum() / len(dff)))
f, ax1 = plt.subplots(figsize=(6,3.5))
plt.grid(axis='x')
plt.barh(GENES, [100]*len(GENES), align='center', height=.35, color='tab:grey',label='Actionable phenotype')
plt.barh(GENES, pct_phenot, align='center', height=.35, color='tab:red',label='Actionable phenotype',edgecolor = 'k')
plt.xlim([0,100])
plt.xlabel('% population with pharmacogenetic phenotype (n=5001)', fontsize=12)
plt.subplots_adjust(left=0.130, bottom=0.140, right=0.945, top=0.97, wspace=0.14, hspace=0.24)
#plt.savefig('/path/to/Figures/Fig3B.png',format = 'png', dpi = 500)
plt.savefig('Fig3B.png',format = 'png', dpi = 500)
plt.show()
'''### Figure 2A
cols = ['N_alleles','SNV_N_alleles','INDELS_N_alleles']
gf = df.groupby(cols[0])['sample'].count().reset_index()
gf = gf.rename(columns={'sample':cols[0] + '_all'})
dgf = dict(zip(list(df.groupby(cols[1])['sample'].count().index), list(df.groupby(cols[1])['sample'].count().values)))
plt.subplots_adjust(left=0.10, bottom=0.08, right=0.85, top=0.90, wspace=0.14, hspace=0.24)
plt.xticks(rotation=0)
plt.ylim(0,100)
plt.xlabel('')
plt.show()
plt.xticks(rotation=90)
plt.ylim(0,100)
plt.ylabel('%')
plt.show()'''
| 2.40625 | 2 |
graphs/sequence_result_plot.py | kavinyao/SKBPR | 0 | 12763033 | from plot_common import output_graph
# results of N = 5, 10, 15, 20
Ns = [5, 10, 15, 20]
large_dataset_precisions = {
'SKBPR-BC': (Ns, [0.0366, 0.0297, 0.0263, 0.0244], [0.0004, 0.0003, 0.0002, 0.0002], [0.0002, 0.0003, 0.0003, 0.0003]),
'SKBPR-BC-SEQ': (Ns, [0.0363, 0.0293, 0.0260, 0.0241], [0.0001, 0.0002, 0.0003, 0.0002], [0.0003, 0.0002, 0.0003, 0.0002]),
}
large_dataset_recalls = {
'SKBPR-BC': (Ns, [0.0844, 0.1139, 0.1367, 0.1559], [0.0011, 0.0010, 0.0010, 0.0006], [0.0011, 0.0011, 0.0006, 0.0004]),
'SKBPR-BC-SEQ': (Ns, [0.0855, 0.1154, 0.1384, 0.1578], [0.0005, 0.0006, 0.0007, 0.0006], [0.0007, 0.0006, 0.0009, 0.0010]),
}
if __name__ == '__main__':
precision_axis = [0, 25, 0.020, 0.040]
recall_axis = [0, 25, 0.06, 0.20]
# draw precision and recall on on graph
# omit results of small datasets
mixed_datasets = [
('Precision', 'Large', precision_axis, large_dataset_precisions),
('Recall', 'Large', recall_axis, large_dataset_recalls),
]
output_graph(mixed_datasets, 'output/sequence_precision_recall.png')
| 2.171875 | 2 |
cocojson/tools/insert_img_meta.py | TeyrCrimson/cocojson | 0 | 12763034 | <reponame>TeyrCrimson/cocojson
'''
Insert any extra attributes/image meta information associated with the images into the coco json file.
Input will be a paired list of image name (comma seperated) and meta-information, together with the attribute name
For example:
Paired List:
img1.jpg,iphone
img2.jpg,iphone
img3.jpg,dslr
img4.jpg,dslr
Attribute name: "source"
Will be inserted into the coco json under each image's "attributes":
{
"images":[
{
"id": int (usually start from 1),
"file_name": subpath that should append to root image directory to give you a path to the image,
"height": int,
"width": int,
"attributes": {
"attribute name": any extra meta info associated with image,
},
},
...
]
}
If `collate_count` is flagged, image counts of the respective attributes will be given in the "info" block of the coco json under "image_meta_count".
{
"info": {
"year": ...,
"version": ...,
"description": ...,
"contributor": ...,
"url": ...,
"date_created": ...,
"image_meta_count": (this is an addition specific to this repo) dictionaries of image meta information
}
}
'''
from pathlib import Path
from collections import defaultdict
from cocojson.utils.common import read_coco_json, path, write_json_in_place
IMAGES_ATTRIBUTES = 'attributes'
INFO_IMAGEMETACOUNT = 'image_meta_count'
def insert_img_meta_from_file(coco_json, paired_list_file, attribute_name='metainfo', out_json=None, collate_count=False):
coco_dict, setname = read_coco_json(coco_json)
paired_list = path(paired_list_file)
img2metainfo = {}
with paired_list.open('r') as f:
for line in f.readlines():
fp, val = line.strip().split(',')
fn = Path(fp).stem # just in case full path is given
img2metainfo[fn] = val
coco_dict = insert_img_meta(coco_dict, img2metainfo, attribute_name=attribute_name, collate_count=collate_count)
write_json_in_place(coco_json, coco_dict, append_str='inserted', out_json=out_json)
def insert_img_meta(coco_dict, img2metainfo, attribute_name='metainfo', collate_count=False):
if collate_count:
count = defaultdict(int)
for img_block in coco_dict['images']:
img_name = img_block['file_name']
img_stem = Path(img_name).stem
assert img_stem in img2metainfo, img_name
val = img2metainfo[img_stem]
if IMAGES_ATTRIBUTES not in img_block:
img_block[IMAGES_ATTRIBUTES] = {}
img_block[IMAGES_ATTRIBUTES][f"{attribute_name}"] = val
if collate_count:
count[val] += 1
if collate_count:
if INFO_IMAGEMETACOUNT not in coco_dict['info']:
coco_dict['info'][INFO_IMAGEMETACOUNT] = {}
coco_dict['info'][INFO_IMAGEMETACOUNT][f"{attribute_name}"] = count
return coco_dict
| 2.859375 | 3 |
1DNewtonianFreeFall.py | HenryBass/Python-Simulations | 2 | 12763035 | <reponame>HenryBass/Python-Simulations
# Gravity Simulation
import math
import numpy as np
# Welcome
print("Welcome to a 1D Free Fall Simulation. Please enter only in metric integers. ")
print("You will enter the amount of time you want to simulate, it includes information about impact, \n"
"even if the simulation stops before impact.")
# Constants
g = 9.81
# Variables
height = int(input("Drop Height: "))
timeElapsed = 1
runTime = int(input("How long do you want to record data from the fall? "))
location = 0
mass = int((input("Mass: ")))
timeToImpact = 0
# Simulation
print("Second by second info of free fall in given time.")
while timeElapsed <= runTime and timeToImpact >= 0 and location >= 0:
# Equations
distanceTraveled = (g * timeElapsed ** 2) / 2
velocity = distanceTraveled / timeElapsed
location = height - distanceTraveled
momentum = mass * velocity
timeToImpact = math.sqrt((2 * height) / g)
# Print Stats
print("\nTime to simulation end: " + str(runTime - timeElapsed))
print('Y Location: ' + str(location) + 'M')
print('Velocity: ' + str(velocity) + 'M/S')
print('Distance Traveled: ' + str(distanceTraveled) + 'M')
print('Momentum: ' + str(momentum) + 'N')
print('Time Elapsed: ' + str(timeElapsed))
print('Rough time to impact: ' + str(timeToImpact - timeElapsed))
print()
# Add to time elapsed
timeElapsed = timeElapsed + 1
# Print impact results
print("\n The Simulation is done. \n")
print("*This is printed even if the object fell past the ground, or never hit it.* \n Info on impact: ")
print('Y Location: 0M')
print('Velocity: ' + str(height / runTime) + 'M/S')
print('Distance Traveled: ' + str(height) + 'M')
print('Momentum: ' + str(mass * (height / runTime)) + 'N')
print('Time Spent in free fall: ' + str(timeToImpact))
print()
| 4.03125 | 4 |
mods/Maze/main.py | SummitChen/opennero | 215 | 12763036 | # OpenNero will execute ModMain when this mod is loaded
from Maze.client import ClientMain
def ModMain(mode = ""):
ClientMain(mode)
def StartMe():
from Maze.module import getMod
getMod().set_speedup(1.0) # full speed ahead
getMod().start_sarsa() # start an algorithm for headless mode
| 2.09375 | 2 |
src/prefect/backend/__init__.py | gabrielvieira37/prefect | 0 | 12763037 | from prefect.backend.task_run import TaskRunView
from prefect.backend.flow_run import FlowRunView, execute_flow_run
from prefect.backend.flow import FlowView
from prefect.backend.kv_store import set_key_value, get_key_value, delete_key, list_keys
| 1.226563 | 1 |
kunquat/tracker/ui/views/utils.py | kagu/kunquat | 13 | 12763038 | # -*- coding: utf-8 -*-
#
# Author: <NAME>, Finland 2015-2020
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from kunquat.tracker.ui.qt import *
def get_abs_window_size(width_norm, height_norm):
screen_rect = QApplication.desktop().screenGeometry()
return QSize(
int(screen_rect.width() * width_norm),
int(screen_rect.height() * height_norm))
def update_ref_font_height(font, style_mgr):
for w in QApplication.topLevelWidgets():
widget = w
break
else:
raise RuntimeError('Main window not found')
fm = QFontMetrics(QFont(*font), widget)
style_mgr.set_reference_font_height(fm.tightBoundingRect('E').height())
def get_default_font():
screen = QApplication.primaryScreen()
# Get default true DPI based on screen size (within reason)
min_true_dpi = 7.2
max_true_dpi = 12
min_dpi_at_width = 350
max_dpi_at_width = 675
physical_width = screen.physicalSize().width()
dpi_add = (physical_width - min_dpi_at_width)
t = min(max(0, dpi_add / (max_dpi_at_width - min_dpi_at_width)), 1)
true_dpi = lerp_val(min_true_dpi, max_true_dpi, t)
# Scale true DPI to compensate for misconfiguration in the system
ldpi = screen.logicalDotsPerInch()
pdpi = screen.physicalDotsPerInch()
size = int(round(true_dpi * pdpi / ldpi))
return QFont(QFont().defaultFamily(), size)
def get_default_font_info(style_mgr):
df = get_default_font()
def_font_family = style_mgr.get_style_param('def_font_family') or df.family()
def_font_size = style_mgr.get_style_param('def_font_size') or df.pointSize()
return (def_font_family, def_font_size)
def get_scaled_font(style_mgr, scale, *attrs):
ref_font_family, ref_font_size = get_default_font_info(style_mgr)
scaled_font = QFont(ref_font_family, int(round(ref_font_size * scale)), *attrs)
scaled_font.setPointSizeF(ref_font_size * scale)
return scaled_font
def lerp_val(v1, v2, t):
assert 0 <= t <= 1, 'lerp value {} is not within valid range [0, 1]'.format(t)
return v1 + (v2 - v1) * t
def lerp_colour(c1, c2, t):
assert 0 <= t <= 1, 'lerp value {} is not within valid range [0, 1]'.format(t)
return QColor(
int(lerp_val(c1.red(), c2.red(), t)),
int(lerp_val(c1.green(), c2.green(), t)),
int(lerp_val(c1.blue(), c2.blue(), t)))
def get_colour_from_str(s):
if len(s) == 4:
cs = [s[1], s[2], s[3]]
cs = [c + c for c in cs]
elif len(s) == 7:
cs = [s[1:3], s[3:5], s[5:7]]
else:
assert False
colour = [int(c, 16) for c in cs]
return QColor(colour[0], colour[1], colour[2])
def get_str_from_colour(colour):
components = [colour.red(), colour.green(), colour.blue()]
cs = ['{:02x}'.format(c) for c in components]
s = '#' + ''.join(cs)
assert len(s) == 7
return s
def get_glyph_rel_width(font, widget_cls, ref_str):
fm = QFontMetricsF(font, widget_cls())
rect = fm.tightBoundingRect(ref_str)
return rect.width() / rect.height()
def set_glyph_rel_width(font, widget_cls, ref_str, rel_width):
cur_rel_width = get_glyph_rel_width(font, widget_cls, ref_str)
stretch = int(round(100 * rel_width / cur_rel_width))
font.setStretch(stretch)
| 1.867188 | 2 |
src/org_to_anki/ankiClasses/AnkiDeck.py | Leodore59/org_to_anki | 65 | 12763039 | from .AnkiQuestion import AnkiQuestion
# from .AnkiQuestionMedia import AnkiQuestionMedia
class AnkiDeck:
# Basic file => represented in a single deck
# MultiDeck file => File will have multiple subdecks of general topic
# represented by file
def __init__(self, name): # (str)
self.deckName = name
self.subDecks = []
self._ankiQuestions = []
self._parameters = {}
self._comments = []
self._media = []
self._sourceFilePath = ""
def getMedia(self):
media = []
if self.hasSubDeck():
for subDeck in self.subDecks:
media.extend(subDeck.getMedia())
media.extend(self._media)
return media
def addComment(self, comment): # (str)
self._comments.append(comment)
def getComments(self):
return self._comments
def addParameter(self, key, value): # (str, str)
self._parameters[key] = value
def getParameters(self):
return dict(self._parameters)
def getParameter(self, key, default=None):
return self._parameters.get(key, default)
def getQuestions(self, parentName = None, parentParamaters = None, joiner = '::'): # (str, dict, str)
ankiQuestions = []
for question in self._ankiQuestions:
if parentName is not None:
question.setDeckName(parentName + joiner + self.deckName)
else:
question.setDeckName(self.deckName)
if parentParamaters is not None:
for key in parentParamaters:
if self.getParameter(key) is None:
self.addParameter(key, parentParamaters[key])
for key in self._parameters:
if question.getParameter(key) is None:
question.addParameter(key, self._parameters[key])
ankiQuestions.append(question)
if self.hasSubDeck():
name = self.deckName
if parentName is not None:
name = parentName + joiner + self.deckName
if parentParamaters is not None:
for key in parentParamaters:
if self.getParameter(key) is None:
self.addParameter(key, parentParamaters[key])
for i in self.subDecks:
ankiQuestions.extend(i.getQuestions(name, self._parameters))
return ankiQuestions
def getDeckNames(self, parentName = None, joiner = '::'): # (str, str)
deckNames = []
if parentName is not None:
deckNames.append(parentName + joiner + self.deckName)
else:
deckNames.append(self.deckName)
if self.hasSubDeck():
name = self.deckName
if parentName is not None:
name = parentName + joiner + self.deckName
for i in self.subDecks:
deckNames.extend(i.getDeckNames(name))
return deckNames
def addQuestion(self, ankiQuestion): # (AnkiQuestion)
# Add media to the main deck
# TODO if question is removed its media will remain in the deck
if ankiQuestion.hasMedia():
self._media.extend(ankiQuestion.getMedia())
self._ankiQuestions.append(ankiQuestion)
def addSubdeck(self, ankiDeck): # TODO Should have type of AnkiDeck
self.subDecks.append(ankiDeck)
def hasSubDeck(self):
return len(self.subDecks) > 0
def __str__(self):
return ("DeckName: %s.\nSubDecks: %s.\nQuestions: %s.\nParamters: %s.\nComments: %s.\nMedia: %s") % (
self.deckName, self.subDecks, self._ankiQuestions, self._parameters, self._comments, self._media)
def __eq__(self, other):
if other == None:
return False
return self.deckName == other.deckName and self.getDeckNames() == other.getDeckNames() and self.getQuestions() == other.getQuestions(
) and self.subDecks == other.subDecks and self._parameters == other._parameters and self._comments == other._comments and self._media == other._media
| 2.875 | 3 |
tests/arch/arm/translators/test_branch.py | IMULMUL/barf-project | 1,395 | 12763040 | <reponame>IMULMUL/barf-project
# Copyright (c) 2019, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import platform
import unittest
from .armtranslator import ArmTranslationTestCase
@unittest.skipUnless(platform.machine().lower() in ['armv6l', 'armv7l'],
'Not running on an ARMv6 system')
class ArmTranslationBranchTests(ArmTranslationTestCase):
def test_branch_instructions(self):
untouched_value = 0x45454545
touched_value = 0x31313131
# R11 is used as a dirty register to check if the branch was
# taken or not.
instr_samples = [
["mov r11, #0x{:x}".format(untouched_value),
"b #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"bx #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"bl #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"blx #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["movs r11, #0x{:x}".format(untouched_value),
"bne #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"mov r1, #0x8010",
"bx r1",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"mov r1, #0x8010",
"blx r1",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
]
for instr in instr_samples:
reil_ctx_out = self._execute_asm(instr, 0x8000)
self.assertTrue(reil_ctx_out['r11'] == untouched_value)
| 1.570313 | 2 |
Push2/bank_selection_component.py | phatblat/AbletonLiveMIDIRemoteScripts | 4 | 12763041 | # Source Generated with Decompyle++
# File: bank_selection_component.pyc (Python 2.5)
from __future__ import absolute_import
from ableton.v2.base import NamedTuple, listenable_property, listens, listens_group, liveobj_valid, SlotManager, nop
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import control_list, forward_control, ButtonControl
from item_lister_component import ItemListerComponent, ItemProvider
from bank_definitions import MAIN_KEY
class BankProvider(ItemProvider, SlotManager):
def __init__(self, bank_registry = None, banking_info = None, *a, **k):
if not bank_registry is not None:
raise AssertionError
if not banking_info is not None:
raise AssertionError
super(BankProvider, self).__init__(*a, **a)
self._bank_registry = bank_registry
self._banking_info = banking_info
self._device = None
self._on_device_bank_changed.subject = bank_registry
def set_device(self, device):
if self._device != device:
self._device = device
self.notify_items()
self.notify_selected_item()
def device(self):
return self._device
device = property(device)
def items(self):
nesting_level = 0
bank_names = self.internal_bank_names(self._banking_info.device_bank_names(self._device))
continue
return [ (NamedTuple(name = b), nesting_level) for b in bank_names ]
items = property(items)
def selected_item(self):
selected = None
if liveobj_valid(self._device) and len(self.items) > 0:
bank_index = self._bank_registry.get_device_bank(self._device)
selected = self.items[bank_index][0]
return selected
selected_item = property(selected_item)
def select_item(self, item):
nesting_level = 0
bank_index = self.items.index((item, nesting_level))
self._bank_registry.set_device_bank(self._device, bank_index)
def _on_device_bank_changed(self, device, _):
if device == self._device:
self.notify_selected_item()
_on_device_bank_changed = listens('device_bank')(_on_device_bank_changed)
def internal_bank_names(self, original_bank_names):
num_banks = len(original_bank_names)
if num_banks > 0:
pass
1
return [
MAIN_KEY]
class EditModeOptionsComponent(Component):
option_buttons = control_list(ButtonControl, color = 'ItemNavigation.ItemSelected', control_count = 8)
def __init__(self, back_callback = nop, device_options_provider = None, *a, **k):
super(EditModeOptionsComponent, self).__init__(*a, **a)
self._device = None
self._device_options_provider = device_options_provider
self._back = back_callback
self._EditModeOptionsComponent__on_device_changed.subject = device_options_provider
self._EditModeOptionsComponent__on_options_changed.subject = device_options_provider
self._update_button_feedback()
def _option_for_button(self, button):
options = self.options
if len(options) > button.index - 1:
pass
1
def option_buttons(self, button):
if button.index == 0:
self._back()
else:
option = self._option_for_button(button)
if option:
try:
option.trigger()
except RuntimeError:
pass
option_buttons = option_buttons.pressed(option_buttons)
def _set_device(self, device):
self._device = device
self._EditModeOptionsComponent__on_device_name_changed.subject = device
self.notify_device()
def device(self):
if liveobj_valid(self._device):
pass
1
return ''
device = listenable_property(device)
def options(self):
if self._device_options_provider:
pass
1
return []
options = listenable_property(options)
def __on_device_changed(self):
self._update_device()
_EditModeOptionsComponent__on_device_changed = listens('device')(__on_device_changed)
def __on_device_name_changed(self):
self.notify_device()
_EditModeOptionsComponent__on_device_name_changed = listens('name')(__on_device_name_changed)
def __on_options_changed(self):
self._EditModeOptionsComponent__on_active_options_changed.replace_subjects(self.options)
self._update_button_feedback()
self.notify_options()
_EditModeOptionsComponent__on_options_changed = listens('options')(__on_options_changed)
def __on_active_options_changed(self, _):
self._update_button_feedback()
_EditModeOptionsComponent__on_active_options_changed = listens_group('active')(__on_active_options_changed)
def _update_button_feedback(self):
for button in self.option_buttons:
if button.index > 0:
option = self._option_for_button(button)
if option:
pass
has_active_option = option.active
if has_active_option:
pass
1
button.color = 'ItemNotSelected' + 'NoItem'
continue
'ItemNavigation.'
def _update_device(self):
self._set_device(self._device_options_provider.device())
def update(self):
super(EditModeOptionsComponent, self).update()
if self.is_enabled():
self._update_device()
class BankSelectionComponent(ItemListerComponent):
__events__ = ('back',)
select_buttons = forward_control(ItemListerComponent.select_buttons)
def __init__(self, bank_registry = None, banking_info = None, device_options_provider = None, *a, **k):
self._bank_provider = BankProvider(bank_registry = bank_registry, banking_info = banking_info)
super(BankSelectionComponent, self).__init__(item_provider = self._bank_provider, *a, **a)
self._options = self.register_component(EditModeOptionsComponent(back_callback = self.notify_back, device_options_provider = device_options_provider))
self.register_disconnectable(self._bank_provider)
def select_buttons(self, button):
self._bank_provider.select_item(self.items[button.index].item)
select_buttons = select_buttons.checked(select_buttons)
def set_option_buttons(self, buttons):
self._options.option_buttons.set_control_element(buttons)
def set_device(self, item):
if item != self._bank_provider.device:
pass
1
device = None
self._bank_provider.set_device(device)
def options(self):
return self._options
options = property(options)
| 2.34375 | 2 |
blog/app.py | jancr/blog | 1 | 12763042 | <reponame>jancr/blog<filename>blog/app.py<gh_stars>1-10
# core imports
import argparse
# 3rd party imports
from flask import Flask
# local imports
from views import bp
from models import Entry, FTSEntry, db
import config
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--devel', action='store_true', default=False)
parser.add_argument('--production', action='store_true', default=False)
args = parser.parse_args()
if args.devel + args.production != 1:
raise ValueError("set ONE of --devel and --production")
return args
def get_app(devel=True):
# make app object
app = Flask(__name__)
conf = config.get_conf(devel)
app.config.from_object(conf)
# database stuff
db.init_app(app)
db.database.create_tables([Entry, FTSEntry], safe=True)
# views stuff
app.register_blueprint(bp)
return app
def run(devel=True):
app = get_app(devel)
if devel:
app.run(host="0.0.0.0", port=app.config['PORT'])
else:
from waitress import serve
serve(app, host="0.0.0.0", port=app.config['PORT'])
if __name__ == '__main__':
args = parse_args()
run(args.devel)
| 2.203125 | 2 |
model/event_pairs_with_particle_obj.py | wenlinyao/RegularTemporalEventPairs | 1 | 12763043 | <filename>model/event_pairs_with_particle_obj.py
# this program is going to find verb word's particle and object to make each event pair more precise
from StringIO import StringIO
from sets import Set
import glob
import timeit
import gc
import gzip
import re
#----------------------------------------------------------------------
class WordToken:
def __init__ (self, id, word, lemma, POS, NER):
self.id = id
self.word = word
self.lemma = lemma
#self.CharacterBegin = CharacterBegin
#self.CharacterEnd = CharacterEnd
self.POS = POS
self.NER = NER
class BasicDependency:
def __init__ (self, type, gov, dep):
self.type = type
self.gov = gov
self.dep = dep
class CollapsedDependency:
def __init__ (self, type, gov, dep):
self.type = type
self.gov = gov
self.dep = dep
"""
def translate_NER(WordToken):
person_pronoun = ["i", "you", "he", "she", "we", "they"]
if WordToken.word.lower() in person_pronoun:
return "PERSON"
elif WordToken.word[0].isupper() and WordToken.NER != 'O':
return WordToken.NER
else:
return WordToken.word
"""
def translate_NER(WordToken):
return WordToken.word
def get_particle_obj(word_id, tokenList, collapsed_dependenciesList):
index_result = []
words_result = []
temp_index_result = []
temp_words_result = []
noun_flag = 0
obj_flag = 0
words_result.append( '[' + tokenList[int(word_id) - 1].word + ']')
index_result.append('[' + tokenList[int(word_id) - 1].id + ']')
key_dependency_type_dict = {} # map dependency type to its dependent word
for element in collapsed_dependenciesList:
if int(element.gov) == int(word_id):
key_dependency_type_dict[element.type] = element.dep
if tokenList[int(word_id) - 1].POS[0] =='N':
if 'prep_of' in key_dependency_type_dict:
dep = key_dependency_type_dict['prep_of']
translated = translate_NER (tokenList[int(dep) - 1])
words_result.append('of')
index_result.append('of')
words_result.append(translated)
index_result.append(tokenList[int(dep) - 1].id)
elif 'prep_by' in key_dependency_type_dict:
dep = key_dependency_type_dict['prep_by']
translated = translate_NER (tokenList[int(dep) - 1])
words_result.append('by')
index_result.append('by')
words_result.append(translated)
index_result.append(tokenList[int(dep) - 1].id)
else:
prep_other_flag = 0
prep_other_type = ""
dep = 0
for ele in key_dependency_type_dict:
if 'prep_' in ele:
prep_other_flag = 1
prep_other_type = ele.replace("prep_", "")
dep = key_dependency_type_dict[ele]
if prep_other_flag == 1 and prep_other_type not in ["after", "before"]:
translated = translate_NER (tokenList[int(dep) - 1])
words_result.append(prep_other_type)
index_result.append(prep_other_type)
words_result.append(translated)
index_result.append(tokenList[int(dep) - 1].id)
elif tokenList[int(word_id) - 1].POS[0] =='V':
if 'prt' in key_dependency_type_dict:
dep = key_dependency_type_dict['prt']
words_result.append(tokenList[int(dep) - 1].word)
index_result.append(tokenList[int(dep) - 1].id)
if 'dobj' in key_dependency_type_dict:
dep = key_dependency_type_dict['dobj']
translated = translate_NER (tokenList[int(dep) - 1])
words_result.append(translated)
index_result.append(tokenList[int(dep) - 1].id)
elif 'nsubjpass' in key_dependency_type_dict:
dep = key_dependency_type_dict['nsubjpass']
translated = translate_NER (tokenList[int(dep) - 1])
words_result.insert(0, 'be') # this order is for the convenience of words_result.insert()
index_result.insert(0, 'be')
words_result.insert(0, translated)
index_result.insert(0, tokenList[int(dep) - 1].id)
elif 'xcomp' in key_dependency_type_dict:
dep = key_dependency_type_dict['xcomp']
translated = translate_NER (tokenList[int(dep) - 1])
if tokenList[int(dep) - 2].word == 'to':
words_result.append('to')
index_result.append(tokenList[int(dep) - 2].id)
words_result.append(translated)
index_result.append(tokenList[ int(dep) - 1].id)
else:
words_result.append(translated)
index_result.append(tokenList[ int(dep) - 1].id)
elif 'nsubj' in key_dependency_type_dict:
dep = key_dependency_type_dict['nsubj']
translated = translate_NER (tokenList[int(dep) - 1])
words_result.insert(0, translated)
index_result.insert(0, tokenList[int(dep) - 1].id)
else:
prep_other_flag = 0
prep_other_type = ""
dep = 0
for ele in key_dependency_type_dict:
if 'prep_' in ele:
prep_other_flag = 1
prep_other_type = ele.replace("prep_", "")
dep = key_dependency_type_dict[ele]
if prep_other_flag == 1 and prep_other_type not in ["after", "before"]:
translated = translate_NER (tokenList[int(dep) - 1])
words_result.append(prep_other_type)
index_result.append(prep_other_type)
words_result.append(translated)
index_result.append(tokenList[int(dep) - 1].id)
return index_result, words_result
def parseXML(xmlFile, count, gen_flag):
start = timeit.default_timer()
output_file = open('../rank_event_pairs_with_particle_obj/event_pairs_with_particle_obj_' + gen_flag, 'a')
print count,
print " ",
print xmlFile
print( "# " + str(count) + " " + str(xmlFile) + '\n' )
f = open(xmlFile, "r")
sentence_flag = False
tokens_flag = False
token_flag = False
collapsed_dependencies_flag = False
basic_dependencies_flag = False
basic_dep_flag = False
collapsed_dep_flag = False
after_find_flag = False
before_find_flag = False
after_id = -1
before_id = -1
gov_id = -1
dep_id = -1
tokenList = []
basic_dependenciesList = []
event1 = ""
event2 = ""
dobjDict = {}
word = ""
lemma = ""
POS = ""
NER = ""
sentence = ""
relation_flag = ""
collapsed_dep_type = ""
event_sentence = []
event_sentence_flag = False
for each_line in f:
words = each_line.split()
#print words
if (len(words) == 0):
continue
# save sentences information which include event pairs
if (words[0] == '<DOC'):
continue
#structure start
if (words[0] == '<sentence'):
after_find_flag = False
before_find_flag = False
after_id = -1
before_id = -1
tokenList = []
collapsed_dependenciesList = []
event1 = ""
event2 = ""
dobjDict = {}
relation_flag = ""
sentence_flag = True #sentences structure start
continue # process next line
if (sentence_flag == True and words[0] == '<tokens>'):
tokens_flag = True #tokens structure start
continue
if (tokens_flag == True and words[0] == '<token' and len(words) >= 2):
token_flag = True
token_id = int (words[1].replace("id=\"", "").replace("\">", ""))
continue
if (sentence_flag == True and words[0] == '<collapsed-ccprocessed-dependencies>'):
collapsed_dependencies_flag = True
continue
if (collapsed_dependencies_flag == True and words[0] == '<dep' and len(words) >= 2):
collapsed_dep_flag = True
collapsed_dep_type = words[1].replace("type=\"", "").replace("\">", "")
#print collapsed_dep_type
continue
if (collapsed_dep_flag == True):
if (words[0].find('<governor>') != -1):
collapsed_gov = words[0].replace("<governor>", "").replace("</governor>", "")
#print collapsed_gov
#raw_input("continue?")
continue
if (words[0].find('<dependent>') != -1):
collapsed_dep = words[0].replace("<dependent>", "").replace("</dependent>", "")
#print collapsed_dep
#raw_input("continue?")
continue
#structure end
if (token_flag == True and words[0] == '</token>'):
# reminder: token list start with index 0, but token id start with 1
tokenList.append(WordToken(str(token_id), word, lemma, POS, NER))
token_flag = False
continue
if (tokens_flag == True and words[0] == '</tokens>'):
tokens_flag = False
continue
if (sentence_flag == True and words[0] == '</sentence>'):
"""
if before_find_flag == True or after_find_flag == True:
counter = 0
for each_element in tokenList:
counter += 1
print '[', counter,': ', each_element.word, ']'
print '\n'
raw_input("continue?")
"""
after_id = -1
before_id = -1
after_find_flag = False
before_find_flag = False
tokenList = []
basic_dependenciesList = []
dobjDict = {}
sentence_flag = False
continue
if (collapsed_dependencies_flag == True and words[0] == '</collapsed-ccprocessed-dependencies>'):
for each_element in collapsed_dependenciesList:
event1 = ""
event2 = ""
temp_event1 = ""
temp_event2 = ""
if each_element.type == 'prep_after' or each_element.type == 'prepc_after':
#obj1_flag = 0 # object flag
#obj2_flag = 0
gov_id = int(each_element.gov)
dep_id = int(each_element.dep)
index1, words1 = get_particle_obj(gov_id, tokenList, collapsed_dependenciesList)
index2, words2 = get_particle_obj(dep_id, tokenList, collapsed_dependenciesList)
relation_flag = "<=="
event1 = ' < ' + " ".join(words1) + ' > '
event2 = ' < ' + " ".join(words2) + ' > '
output_file.write(event1 + relation_flag + event2 + '\n')
event1 = ""
event2 = ""
temp_event1 = ""
temp_event2 = ""
if each_element.type == 'prep_before' or each_element.type == 'prepc_before':
gov_id = int(each_element.gov)
dep_id = int(each_element.dep)
index1, words1 = get_particle_obj(gov_id, tokenList, collapsed_dependenciesList)
index2, words2 = get_particle_obj(dep_id, tokenList, collapsed_dependenciesList)
relation_flag = "==>"
event1 = ' < ' + " ".join(words1) + ' > '
event2 = ' < ' + " ".join(words2) + ' > '
output_file.write(event1 + relation_flag + event2 + '\n')
collapsed_dependencies_flag = False
continue
if (collapsed_dep_flag == True and words[0] == '</dep>'):
collapsed_dependenciesList.append(CollapsedDependency(collapsed_dep_type, collapsed_gov, collapsed_dep))
# find the direct object of selected events
"""
if (collapsed_dep_type == 'dobj'):
dobjDict[collapsed_gov] = collapsed_dep
#print dobjDict
#raw_input("continue?")
#print basic_gov,
#print basic_dep
continue
"""
collapsed_dep_type = ""
collapsed_dep_flag = False
continue
if (token_flag == True):
if (words[0].find('<word>') != -1):
word = words[0].replace("<word>", "").replace("</word>", "")
continue
if (words[0].find('<lemma>') != -1):
lemma = words[0].replace("<lemma>", "").replace("</lemma>", "")
#sentence_dic[token_id] = word
if (lemma == 'after'):
after_find_flag = True
after_id = token_id
if (lemma == 'before'):
before_find_flag = True
before_id = token_id
continue
if (words[0].find('<POS>') != -1):
POS = words[0].replace("<POS>", "").replace("</POS>", "")
continue
if (words[0].find('<NER>') != -1):
NER = words[0].replace("<NER>", "").replace("</NER>", "")
continue
"""
print collapsed_dependencies_flag
print collapsed_dep_flag
print collapsed_dep_type
raw_input("continue?")
"""
f.close()
output_file.close()
stop = timeit.default_timer()
print stop-start
def event_pairs_with_particle_obj_main(gen_flag):
#valid_pairs_set = valid_event_pairs_set()
count = 1
open('../rank_event_pairs_with_particle_obj/event_pairs_with_particle_obj_' + gen_flag, 'w').close()
for xmlFile in glob.glob("../../event_pairs_sentences_result/event_pairs_sentences_result_*"):
parseXML(xmlFile, count, gen_flag)
gc.collect()
count = count +1
print "over!"
| 2.65625 | 3 |
lucy/kvmemnn.py | nemanja-m/key-value-memory-network | 9 | 12763044 | import torch
from torch.nn import Module, Linear, Softmax, CosineSimilarity, Embedding
class KeyValueMemoryNet(Module):
"""Defines PyTorch model for Key-Value Memory Network.
Key-Value Memory Networks (KV-MemNN) are described here: https://arxiv.org/pdf/1606.03126.pdf
Goal is to read correct response from memory, given query. Memory slots are
defined as pairs (k, v) where k is query and v is correct response. This
implementation of KV-MemNN uses separate encodings for input query and
possible candidates. Instead of using cross-entropy loss, we use cosine
embedding loss where we measure cosine distance between read responses and
candidate responses. We use only one 'hop' because more hops don't provide
any improvements.
This implementation supports batch training.
"""
def __init__(self, vocab_size, embedding_dim):
"""Initializes model layers.
Args:
vocab_size (int): Number of tokens in corpus. This is used to init embeddings.
embedding_dim (int): Dimension of embedding vector.
"""
super().__init__()
self._embedding_dim = embedding_dim
self.encoder_in = Encoder(vocab_size, embedding_dim)
self.encoder_out = Encoder(vocab_size, embedding_dim)
self.linear = Linear(embedding_dim, embedding_dim, bias=False)
self.similarity = CosineSimilarity(dim=2)
self.softmax = Softmax(dim=2)
def forward(self, query, response, memory_keys, memory_values, candidates):
"""Performs forward step.
Args:
query (torch.Tensor): Tensor with shape of (NxM) where N is batch size,
and M is length of padded query.
response (torch.Tensor): Tensor with same shape as query denoting correct responses.
memory_keys (torch.Tensor): Relevant memory keys for given query batch. Shape
of tensor is (NxMxD) where N is batch size, M is number of relevant memories
per query and D is length of memories.
memory_values (torch.Tensor): Relevant memory values for given query batch
with same shape as memory_keys.
candidates (torch.Tensor): Possible responses for query batch with shape
(NxMxD) where N is batch size, M is number of candidates per query and
D is length of candidates.
"""
view_shape = (len(query), 1, self._embedding_dim)
query_embedding = self.encoder_in(query).view(*view_shape)
memory_keys_embedding = self.encoder_in(memory_keys, mean_axis=2)
memory_values_embedding = self.encoder_in(memory_values, mean_axis=2)
similarity = self.similarity(query_embedding, memory_keys_embedding).unsqueeze(1)
softmax = self.softmax(similarity)
value_reading = torch.matmul(softmax, memory_values_embedding)
result = self.linear(value_reading)
candidates_embedding = self.encoder_out(candidates, mean_axis=2)
train_time = response is not None
if train_time:
response_embedding = self.encoder_out(response).view(*view_shape)
# First candidate response is correct one.
# This makes computing loss easier
candidates_embedding[:, 0, :] = response_embedding[:, 0, :]
x_encoded = torch.cat([result] * candidates.shape[1], dim=1)
y_encoded = candidates_embedding
return x_encoded, y_encoded
class Encoder(Module):
"""Embeds queries, memories or responses into vectors."""
def __init__(self, num_embeddings, embedding_dim):
"""Initializes embedding layer.
Args:
num_embeddings (int): Number of possible embeddings.
embedding_dim (int): Dimension of embedding vector.
"""
super().__init__()
self.embedding = Embedding(num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
max_norm=5,
padding_idx=1)
def forward(self, tokens, mean_axis=1):
return self.embedding(tokens).mean(mean_axis)
| 3.71875 | 4 |
scripts/preprocessing/enumerate_dir.py | trirop/Kayakcounter | 2 | 12763045 | import os
# Function to rename multiple files
def main():
i = 252
path="/home/tristan/Bilder/Temp2/"
for filename in os.listdir(path):
print(filename)
if "JPG" in filename:
print(filename)
my_dest =str(i) + ".jpg"
my_source =path + filename
my_dest =path + my_dest
os.rename(my_source, my_dest)
i += 1
# Driver Code
if __name__ == '__main__':
# Calling main() function
main()
| 3.609375 | 4 |
paramgenerator/timeparameters.py | hubertsuprunowicz/ldbc_snb_datagen | 0 | 12763046 | <reponame>hubertsuprunowicz/ldbc_snb_datagen
#!/usr/bin/env python3
import sys
LAST_MONTHS = 3 # number of months that we consider for date parameters in the filters of a form timestamp <= Date0
START_YEAR = 2010 # default value that gets over-written from the data generator output
class MonthYearCount:
def __init__(self, month, year, count):
self.month=month
self.year=year
self.count=count
class TimeParameter:
def __init__(self, year, month, day, duration):
self.month=month
self.year=year
self.day=day
self.duration=duration
def findTimeParameters(persons, factors, procedure, timestampSelection):
print(procedure)
if "w" == procedure: # this only takes values f, ff, ffg in the code
medians = computeTimeMedians(factors, lastmonthcount = 12)
else:
medians = computeTimeMedians(factors)
timeParams = timestampSelection(factors,medians)
return timeParams
def getMedian(data, sort_key, getEntireTuple = False):
if len(data) == 0:
if getEntireTuple:
return MonthYearCount(0,0,0)
return 0
if len(data) == 1:
if getEntireTuple:
return data[0]
return data[0].count
srtd = sorted(data,key=sort_key)
mid = len(data)//2
if len(data) % 2 == 0:
if getEntireTuple:
return srtd[mid]
return (sort_key(srtd[mid-1]) + sort_key(srtd[mid])) / 2.0
if getEntireTuple:
return srtd[mid]
return sort_key(srtd[mid])
def MonthYearToDate(myc, day):
return "%d-%d-%d"%(myc.year, myc.month, day)
def getTimeParamsWithMedian(factors, stats):
# strategy: find the median of the given distribution, then increase the time interval until it matches the given parameter
(medianFirstMonth, medianLastMonth, median) = stats
res = []
for values in factors:
input = sorted(values,key=lambda myc: (myc.year, myc.month))
currentMedian = getMedian(values,lambda myc: myc.count, True)
if int(median) == 0 or int(currentMedian.count) == 0 or int(currentMedian.year) == 0:
res.append(TimeParameter(START_YEAR,1,1,0))
continue
if currentMedian.count > median:
duration = int(28*currentMedian.count//median)
res.append(TimeParameter(currentMedian.year, currentMedian.month, 1, duration))
else:
duration = int(28*median//currentMedian.count)
res.append(TimeParameter(currentMedian.year, currentMedian.month, 1, duration))
return res
def getTimeParamsBeforeMedian(factors, stats):
# strategy: find the interval [0: median] with the sum of counts as close as possible to medianFirstMonth
(medianFirstMonth, medianLastMonth, median) = stats
res = []
i = 0
for values in factors:
input = sorted(values,key=lambda myc: (myc.year, myc.month))
localsum = 0
best = MonthYearCount(0,0,0)
for myc in input:
localsum += myc.count
i+=1
if localsum >= medianFirstMonth:
day = max(28 -28*(localsum-medianFirstMonth)//myc.count,1)
res.append(TimeParameter(myc.year, myc.month, day, None))
break
best = myc
if localsum < medianFirstMonth:
res.append(TimeParameter(best.year, best.month, 28, None))
return res
def getTimeParamsAfterMedian(factors, stats):
# strategy: find the interval [median: end] with the sum of counts as close as possible to medianFirstMonth
(medianFirstMonth, medianLastMonth, median) = stats
res = []
for values in factors:
input = sorted(values,key=lambda myc: (-myc.year, -myc.month))
localsum = 0
best = MonthYearCount(0,0,0)
for myc in input:
localsum += myc.count
if localsum >= medianLastMonth:
day = max(28 * (localsum-medianLastMonth)//myc.count,1)
res.append(TimeParameter(myc.year, myc.month, day, None))
break
best = myc
if localsum < medianLastMonth:
res.append(TimeParameter(best.year, best.month, 1, None))
return res
def computeTimeMedians(factors, lastmonthcount = LAST_MONTHS):
mediantimes = []
lastmonths = []
firstmonths = []
for values in factors:
values.sort(key=lambda myc: (myc.year, myc.month))
l = len(values)
lastmonthsum = sum(myc.count for myc in values[max(l-lastmonthcount,0):l])
lastmonths.append(lastmonthsum)
cutoff_max = l-lastmonthcount
if cutoff_max < 0:
cutoff_max = l
firstmonthsum = sum(myc.count for myc in values[0:cutoff_max])
firstmonths.append(firstmonthsum)
mediantimes.append(getMedian(values,lambda myc: myc.count))
median = getMedian(mediantimes, lambda x: x)
medianLastMonth = getMedian(lastmonths, lambda x: x)
medianFirstMonth = getMedian(firstmonths, lambda x: x)
return (medianFirstMonth, medianLastMonth, median)
def readTimeParams(persons, personFactorFiles, activityFactorFiles, friendFiles):
postCounts = {}
groupCounts = {}
offset = 9
monthcount = 12*3 + 1
for inputFactorFile in personFactorFiles:
with open(inputFactorFile, 'r', encoding="utf-8") as f:
for line in f.readlines():
line = line.split("|")
person = int(line[0])
localPostCounts = list(map(int, line[9].split(";")))
localGroupCounts = list(map(int, line[10].split(";")))
if not person in postCounts:
postCounts[person] = localPostCounts
else:
postCounts[person] = [sum(x) for x in zip(postCounts[person], localPostCounts)]
if not person in groupCounts:
groupCounts[person] = localGroupCounts
else:
groupCounts[person] = [sum(x) for x in zip(groupCounts[person], localGroupCounts)]
friendsPostsCounts = {}
fGroupCount = {}
for inputFriendFile in friendFiles:
with open(inputFriendFile, 'r', encoding="utf-8") as f:
for line in f:
people = list(map(int, line.split("|")))
person = people[0]
friendsPostsCounts[person] = [0]*monthcount
for friend in people[1:]:
if not friend in postCounts:
continue
friendsPostsCounts[person] = [x+y for x,y in zip(friendsPostsCounts[person], postCounts[friend])]
fGroupCount[person] = [0]*monthcount
for friend in people[1:]:
if not friend in groupCounts:
continue
fGroupCount[person] = [x+y for x,y in zip(fGroupCount[person], groupCounts[friend])]
ffPostCounts = {}
ffGroupCount = {}
for inputFriendFile in friendFiles:
with open(inputFriendFile, 'r', encoding="utf-8") as f:
for line in f:
people = list(map(int, line.split("|")))
person = people[0]
ffPostCounts[person] = [0]*monthcount
for friend in people[1:]:
if not friend in friendsPostsCounts:
continue
ffPostCounts[person] = [x+y for x,y in zip(ffPostCounts[person],friendsPostsCounts[friend])]
ffGroupCount[person] = [0]*monthcount
for friend in people[1:]:
if not friend in fGroupCount:
continue
ffGroupCount[person] = [x+y for x,y in zip(ffGroupCount[person],fGroupCount[friend])]
return (friendsPostsCounts, ffPostCounts, ffGroupCount)
def findTimeParams(input, personFactorFiles, activityFactorFiles, friendFiles, startYear):
START_YEAR = startYear
fPostCount = {}
ffPostCount = {}
persons = []
for queryId in input:
persons += input[queryId][0]
(fPostCount, ffPostCount, ffGroupCount) = readTimeParams(set(persons),personFactorFiles, activityFactorFiles, friendFiles)
mapParam = {
"f" : fPostCount,
"ff": ffPostCount,
"ffg": ffGroupCount
}
output = {}
for queryId in input:
factors = mapParam[input[queryId][1]]
mycFactors = []
for person in input[queryId][0]:
countsPerMonth = factors[person]
myc = []
for (month,count) in enumerate(countsPerMonth):
if count == 0:
continue
year = startYear + month // 12
myc.append(MonthYearCount(month % 12 + 1, int(year), count))
mycFactors.append(myc)
output[queryId] = findTimeParameters(input[queryId][0], mycFactors, input[queryId][1], input[queryId][2])
return output
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv)< 2:
print("arguments: <input persons file>")
return 1
f = open(argv[1])
factors = prepareTimedFactors(f, getFriendFriendPostByTime)
medians = computeTimeMedians(factors)
timestamps = getTimeParamsWithMedian(factors,medians)
if __name__ == "__main__":
sys.exit(main()) | 2.890625 | 3 |
sauronlib/audio_handler.py | dmyersturnbull/sauronlib | 0 | 12763047 | import platform, typing
from typing import Optional
from klgists.common.exceptions import ExternalCommandFailed
from klgists.files.wrap_cmd_call import wrap_cmd_call
from sauronlib import logger
from sauronlib.audio_info import AudioInfo
class CouldNotConfigureOsAudioError(IOError):
def description(self):
return "Could not set a needed audio device or gain through operating system calls." + "The platform appears to be {}.".format(
platform.system())
class GlobalAudio:
"""A global lock for audio input and output.
Calling start() turns it on and calling stop() turns it off.
Doing so may change the intput and output device if necessary.
"""
def start(self) -> None:
"""
Override this to do things like change the output source and volume.
"""
pass
def stop(self) -> None:
"""
Override this to do things like reset the output source and volume to their original values.
"""
pass
def __init__(self) -> None:
self.is_on = False # type: bool
def __enter__(self):
self.start()
self.is_on = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.is_on = False
def play(self, info: AudioInfo, blocking: bool = False):
assert self.is_on, "Cannot play sound because the audio service is off"
if info.intensity > 0:
aud = info.wave_obj.play()
if blocking:
aud.wait_done()
class SmartGlobalAudio(GlobalAudio):
"""
A GlobalAudio that switches devices and volumes by detecting the OS.
Looks for a parameterless function _start_{platform.system().lower()} to start, and _stop_{platform.system().lower()} to stop.
For example, for Mac OS 10.12, this will be _start_darwin() and _stop_darwin()
"""
def __init__(
self,
input_device: Optional[typing.Tuple[str, str]], output_device: Optional[typing.Tuple[str, str]],
input_gain: Optional[typing.Tuple[int, int]], output_gain: Optional[typing.Tuple[int, int]],
timeout_secs: float
):
super(SmartGlobalAudio, self).__init__()
self.input_device = input_device
self.output_device = output_device
self.input_gain = input_gain
self.output_gain = output_gain
self.timeout_secs = timeout_secs
def __repr__(self):
return "SmartGlobalAudio:{}(input={}@{},output={}@{})" \
.format(self.is_on, self.input_device, self.input_gain, self.output_device, self.output_gain)
def __str__(self):
return repr(self)
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback) -> None:
self.stop()
def start(self) -> None:
if self.is_on:
logger.debug("Audio handler is already on. Ignoring.")
return
self.__wrap('start')
logger.debug("Starting audio handler.")
self.is_on = True
logger.info("Started audio handler.")
def stop(self) -> None:
if not self.is_on:
logger.debug("Audio handler is already off. Ignoring.")
logger.debug("Stopping audio handler.")
self.__wrap('stop')
self.is_on = False
logger.info("Stopped audio handler.")
def __wrap(self, prefix: str):
try:
getattr(self, '_' + prefix + '_' + platform.system().lower())()
except ExternalCommandFailed as e:
raise CouldNotConfigureOsAudioError() from e
except AttributeError as e:
raise CouldNotConfigureOsAudioError("OS {} not recognized".format(platform.system())) from e
class DefaultSmartGlobalAudio(SmartGlobalAudio):
"""
A default implementation of SmartGlobalAudio.
WARNING: Won't break on Windows, but only changes sources in Mac OS.
"""
def __repr__(self):
return "{}:{}(input={}@{},output={}@{})" \
.format(self.__class__.__name__, self.is_on, self.input_device, self.input_gain, self.output_device,
self.output_gain)
def __str__(self):
return repr(self)
def _start_mac(self) -> None:
self.__darwin_switch(0)
def _stop_mac(self) -> None:
self.__darwin_switch(1)
def _start_windows(self) -> None:
self.__windows_switch(0)
def _stop_windows(self) -> None:
self.__windows_switch(1)
def __windows_switch(self, i: int):
def percent_to_real(percent: int) -> int:
audio_max = 65535 # This is true of Windows in general, so not necessary to put in config
# audio min is 0
return round(audio_max * percent / 100)
if self.output_device is not None:
logger.debug("Setting audio output device to %s" % self.output_device[i])
wrap_cmd_call(
['nircmd', 'setdefaultsounddevice', '%s' % self.output_device[i]],
timeout_secs=self.timeout_secs
)
if self.input_device is not None:
logger.debug("Setting audio input device to %s" % self.input_device[i])
wrap_cmd_call(
['nircmd', 'setdefaultsounddevice', '%s' % self.input_device[i], '2'],
timeout_secs=self.timeout_secs
)
if self.output_gain is not None:
logger.debug("Setting system volume to configured default %s" % self.output_gain[i])
wrap_cmd_call(
['nircmd', 'setsysvolume', '%s' % percent_to_real(self.output_gain[i]), self.output_device[i]],
timeout_secs=self.timeout_secs
)
if self.input_gain is not None:
logger.debug("Setting input gain to configured default %s" % self.input_gain[i])
wrap_cmd_call(
['nircmd', 'setsysvolume', '%s' % percent_to_real(self.input_gain[i]), self.input_device[i]],
timeout_secs=self.timeout_secs
)
def __darwin_switch(self, i: int):
if self.output_device is not None:
logger.debug("Setting audio output device to %s" % self.output_device[i])
wrap_cmd_call(['SwitchAudioSource', '-s', '%s' % self.output_device[i]])
if self.input_device is not None:
logger.debug("Setting audio input device to %s" % self.input_device[i])
wrap_cmd_call(['SwitchAudioSource', '-t input', '-s', '%s' % self.input_device[i]])
if self.output_gain is not None:
logger.debug("Setting system volume to configured default %s" % self.output_gain[i])
wrap_cmd_call(['osascript', '-e', 'set volume output volume %s' % self.output_gain[i]])
if self.input_gain is not None:
logger.debug("Setting input gain to configured default %s" % self.input_gain[i])
wrap_cmd_call(['osascript', '-e', 'set volume input volume %s' % self.input_gain[i]])
logger.debug("Done configuring audio")
__all__ = ['GlobalAudio', 'SmartGlobalAudio', 'DefaultSmartGlobalAudio']
| 2.375 | 2 |
magnum/conductor/handlers/k8s_conductor.py | ISCAS-VDI/magnum-base | 0 | 12763048 | <gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from k8sclient.client import rest
from oslo_log import log as logging
from oslo_utils import uuidutils
from magnum.common import exception
from magnum.common import k8s_manifest
from magnum.conductor import k8s_api as k8s
from magnum.conductor import utils as conductor_utils
from magnum import objects
import ast
LOG = logging.getLogger(__name__)
class Handler(object):
"""Magnum Kubernetes RPC handler.
These are the backend operations. They are executed by the backend service.
API calls via AMQP (within the ReST API) trigger the handlers to be called.
"""
def __init__(self):
super(Handler, self).__init__()
# Replication Controller Operations
def rc_create(self, context, rc):
LOG.debug("rc_create")
bay = conductor_utils.retrieve_bay(context, rc.bay_uuid)
self.k8s_api = k8s.create_k8s_api(context, bay)
manifest = k8s_manifest.parse(rc.manifest)
try:
resp = self.k8s_api.create_namespaced_replication_controller(
body=manifest,
namespace='default')
except rest.ApiException as err:
raise exception.KubernetesAPIFailed(err=err)
if resp is None:
raise exception.ReplicationControllerCreationFailed(
bay_uuid=rc.bay_uuid)
rc['uuid'] = resp.metadata.uid
rc['name'] = resp.metadata.name
rc['images'] = [c.image for c in resp.spec.template.spec.containers]
rc['labels'] = ast.literal_eval(resp.metadata.labels)
rc['replicas'] = resp.status.replicas
return rc
def rc_update(self, context, rc_ident, bay_ident, manifest):
LOG.debug("rc_update %s", rc_ident)
bay = conductor_utils.retrieve_bay(context, bay_ident)
self.k8s_api = k8s.create_k8s_api(context, bay)
if uuidutils.is_uuid_like(rc_ident):
rc = objects.ReplicationController.get_by_uuid(context, rc_ident,
bay.uuid,
self.k8s_api)
else:
rc = objects.ReplicationController.get_by_name(context, rc_ident,
bay.uuid,
self.k8s_api)
try:
resp = self.k8s_api.replace_namespaced_replication_controller(
name=str(rc.name),
body=manifest,
namespace='default')
except rest.ApiException as err:
raise exception.KubernetesAPIFailed(err=err)
if resp is None:
raise exception.ReplicationControllerNotFound(rc=rc.uuid)
rc['uuid'] = resp.metadata.uid
rc['name'] = resp.metadata.name
rc['project_id'] = context.project_id
rc['user_id'] = context.user_id
rc['images'] = [c.image for c in resp.spec.template.spec.containers]
rc['bay_uuid'] = bay.uuid
rc['labels'] = ast.literal_eval(resp.metadata.labels)
rc['replicas'] = resp.status.replicas
return rc
def rc_delete(self, context, rc_ident, bay_ident):
LOG.debug("rc_delete %s", rc_ident)
bay = conductor_utils.retrieve_bay(context, bay_ident)
self.k8s_api = k8s.create_k8s_api(context, bay)
if uuidutils.is_uuid_like(rc_ident):
rc = objects.ReplicationController.get_by_uuid(context, rc_ident,
bay.uuid,
self.k8s_api)
rc_name = rc.name
else:
rc_name = rc_ident
if conductor_utils.object_has_stack(context, bay.uuid):
try:
self.k8s_api.delete_namespaced_replication_controller(
name=str(rc_name),
body={},
namespace='default')
except rest.ApiException as err:
if err.status == 404:
pass
else:
raise exception.KubernetesAPIFailed(err=err)
def rc_show(self, context, rc_ident, bay_ident):
LOG.debug("rc_show %s", rc_ident)
bay = conductor_utils.retrieve_bay(context, bay_ident)
self.k8s_api = k8s.create_k8s_api(context, bay)
if uuidutils.is_uuid_like(rc_ident):
rc = objects.ReplicationController.get_by_uuid(context, rc_ident,
bay.uuid,
self.k8s_api)
else:
rc = objects.ReplicationController.get_by_name(context, rc_ident,
bay.uuid,
self.k8s_api)
return rc
def rc_list(self, context, bay_ident):
bay = conductor_utils.retrieve_bay(context, bay_ident)
self.k8s_api = k8s.create_k8s_api(context, bay)
try:
resp = self.k8s_api.list_namespaced_replication_controller(
namespace='default')
except rest.ApiException as err:
raise exception.KubernetesAPIFailed(err=err)
if resp is None:
raise exception.ReplicationControllerListNotFound(
bay_uuid=bay.uuid)
rcs = []
for entry in resp._items:
rc = {}
rc['uuid'] = entry.metadata.uid
rc['name'] = entry.metadata.name
rc['project_id'] = context.project_id
rc['user_id'] = context.user_id
rc['images'] = [
c.image for c in entry.spec.template.spec.containers]
rc['bay_uuid'] = bay.uuid
# Convert string to dictionary
rc['labels'] = ast.literal_eval(entry.metadata.labels)
rc['replicas'] = entry.status.replicas
rc_obj = objects.ReplicationController(context, **rc)
rcs.append(rc_obj)
return rcs
| 1.851563 | 2 |
lib/pavilion/var_dict.py | lanl-preteam/pavilion2-tmp | 1 | 12763049 | from collections import UserDict
from functools import wraps
import logging
def var_method(func):
"""This decorator marks the given function as a scheduler variable. The
function must take no arguments (other than self)."""
# pylint: disable=W0212
# The scheduler plugin class will search for these.
func._is_var_method = True
func._is_deferable = False
# Wrap the function function so it keeps it's base attributes.
@wraps(func)
def _func(self):
# This is primarily to enforce the fact that these can't take arguments
return str(func(self))
return _func
class VarDict(UserDict):
"""A dictionary for defining dynamic variables in Pavilion.
Usage:
To add a variable, create a method and decorate it with
either '@var_method' or '@dfr_var_method()'. The method name will be the
variable name, and the method will be called to resolve the variable
value. Methods that start with '_' are ignored.
"""
def __init__(self, name):
"""Initialize the scheduler var dictionary.
:param str name: The name of this var dict.
"""
super().__init__(self)
self._name = name
self._keys = self._find_vars()
self.logger = logging.getLogger('{}_vars'.format(name))
@classmethod
def _find_vars(cls):
"""Find all the scheduler variables and add them as variables."""
keys = set()
for key in cls.__dict__.keys():
# Ignore anything that starts with an underscore
if key.startswith('_'):
continue
obj = getattr(cls, key)
if callable(obj) and getattr(obj, '_is_var_method', False):
keys.add(key)
return keys
def __getitem__(self, key):
"""As per the dict class."""
if key not in self._keys:
raise KeyError("Invalid {} variable '{}'"
.format(self._name, key))
if key not in self.data:
self.data[key] = getattr(self, key)()
return self.data[key]
def keys(self):
"""As per the dict class."""
return (k for k in self._keys)
def get(self, key, default=None):
"""As per the dict class."""
if key not in self._keys:
return default
return self[key]
def values(self):
"""As per the dict class."""
return (self[k] for k in self.keys())
def items(self):
"""As per the dict class."""
return ((k, self[k]) for k in self.keys())
def info(self, key):
"""Get an info dictionary about the given key."""
if key not in self._keys:
raise KeyError("Key '{}' does not exist in vardict '{}'"
.format(key, self._name))
func = getattr(self, key)
# Get rid of newlines
help_text = func.__doc__.replace('\n', ' ')
# Get rid of extra whitespace
help_text = ' '.join(help_text.split())
return {
'name': key,
'deferred': func._is_deferable, # pylint: disable=W0212
'help': help_text,
}
| 3.34375 | 3 |
src/spaceone/notification/manager/__init__.py | jihyungSong/plugin-email-notification-protocol | 0 | 12763050 | <reponame>jihyungSong/plugin-email-notification-protocol
from spaceone.notification.manager.notification_manager import NotificationManager
from spaceone.notification.manager.smtp_manager import SMTPManager
| 1.046875 | 1 |