max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
pyunit_idcard/idcard.py
PyUnit/pyunit-idCard
5
12777051
# !/usr/bin/python3.7 # -*- coding: utf-8 -*- # @Time : 2020/6/22 上午10:07 # @Author: <EMAIL> # @Notes : 身份证实体抽取,身份证补全,身份证检测等功能 import json import os import re from datetime import datetime class NumberNotShortError(Exception): ... class IDCardNotStingError(Exception): ... class IDCardFormatError(Exception): ... class VerificationLegalError(Exception): ... chinese = { ord('一'): '1', ord('二'): '2', ord('三'): '3', ord('四'): '4', ord('五'): '5', ord('六'): '6', ord('七'): '7', ord('八'): '8', ord('九'): '9', ord('幺'): '1', ord('拐'): '7', ord('洞'): '0', ord('两'): '2', ord('勾'): '9', ord('x'): 'X' } class IdCard: def __init__(self): with open(os.path.join(os.path.dirname(__file__), 'idCard.json')) as fp: regions = json.load(fp) self.region = {region['code']: region['name'] for region in regions} self.card = [] @staticmethod def correct_card(card: str): """纠正数字数字 比如:方言,中文数据等 """ translate = card.translate(chinese) return translate def check_up(self, id_card: str): """检验身份证信息的合法性""" assert isinstance(id_card, str), IDCardNotStingError('身份证号码必须是字符串类型') assert len(id_card) == 18, NumberNotShortError(F'身份证号码必须是18位,不支持{len(id_card)}位身份证') if not (id_card[:-1].isdigit() and re.match('[0-9X]', id_card[-1])): raise IDCardFormatError('身份证格式错误') assert self._verification_legal(id_card) == id_card[-1], VerificationLegalError('合法性验证失败') return True @staticmethod def _verification_legal(id_card: str): """检验最后一位""" coefficient = [7, 9, 10, 5, 8, 4, 2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2] last = [1, 0, 'X', 9, 8, 7, 6, 5, 4, 3, 2] s = sum([int(x) * y for x, y in zip(id_card[:-1], coefficient)]) remainder = last[s % 11] return str(remainder) def find_card(self, id_card: str): """查询身份证""" id_card = self.correct_card(id_card) self.check_up(id_card) province = id_card[:6] year = id_card[6:10] month = id_card[10:12] day = id_card[12:14] sex = '女' if int(id_card[16]) % 2 == 0 else '男' return {'发证地': self.region[province], '出生日期': f'{year}年{month}月{day}日', '性别': sex} def _completion(self, id_card: str): """补全身份证缺失位 缺失位用*来填充: 比如: ***121199505307*51 """ assert len(id_card) == 18, NumberNotShortError(F'身份证号码必须是18位,不支持{len(id_card)}位身份证') province = id_card[:6] year = id_card[6:10] month = id_card[10:12] day = id_card[12:14] sort = id_card[14:17] last = id_card[17] if '*' in province: province_re = province.replace('*', '.') for k in self.region: if re.match(province_re, k): self._completion(k + id_card[6:]) elif '*' in year: current_year = str(datetime.now().year) if '*' in year[0]: for y_1 in ['1', '2']: id_card = id_card[:6] + y_1 + id_card[7:] self._completion(id_card) if '*' in year[1:]: year_re = year.replace('*', '.') for y_2 in range(1984, int(current_year) + 1): if re.match(year_re, str(y_2)): id_card = id_card[:6] + str(y_2) + id_card[10:] self._completion(id_card) elif '*' in month: month_re = month.replace('*', '.') for mon in range(1, 13): m = f'{mon:0>2}' if re.match(month_re, m): id_card = id_card[:10] + m + id_card[12:] self._completion(id_card) elif '*' in day: day_re = day.replace('*', '.') for d in range(1, 32): ds = f'{d:0>2}' try: datetime(int(year), int(month), d) if re.match(day_re, ds): id_card = id_card[:12] + ds + id_card[14:] self._completion(id_card) except ValueError: pass elif '*' in sort: sort_re = sort.replace('*', '.') for st in range(1, 1000): s = f'{st:0>3}' if re.match(sort_re, s): id_card = id_card[:14] + s + id_card[-1] self._completion(id_card) elif '*' in last: new_last = self._verification_legal(id_card) id_card = id_card[:-1] + new_last self._completion(id_card) else: self.card.append(id_card) def complete_information(self, id_card: str): """补全身份证缺失位 缺失位用*来填充: 比如: ***121199505307*51 """ id_card = self.correct_card(id_card) self.card.clear() self._completion(id_card) comps = [] for comp in self.card: try: if self.check_up(comp): comps.append(comp) except AssertionError: pass return comps def match_card(self, card): """包含一句身份证信息的话 包含18位身份证信息,可以自动补全信息 eg: 我的身份证信息是5**121199*05*07051你能猜出来吗? :param card: 包含一句身份证信息的语句 :return: 身份证信息 """ messages = [] for message in re.finditer('[0-9*]{17}[0-9*xX]', card): cards = self.complete_information(message.group()) for card in cards: data = self.find_card(card) data['身份证号码'] = card messages.append(data) return messages
2.765625
3
root_gnn/src/datasets/herwig_hadrons.py
Calvin-Qiu/TopReco
0
12777052
import numpy as np import itertools from graph_nets import utils_tf from root_gnn.src.datasets.base import DataSet n_node_features = 6 max_nodes = 3 # including the particle that decays def num_particles(event): return len(event) // n_node_features def make_graph(event, debug=False): # each particle contains: pdgID, E, px, py, pz. scale = 0.0001 n_nodes = num_particles(event) nodes = [[ event[inode*n_node_features+1], # E event[inode*n_node_features+2], # px event[inode*n_node_features+3], # py event[inode*n_node_features+4] # pz ] for inode in range(n_nodes)] nodes = np.array(nodes, dtype=np.float32) * scale if debug: print(n_nodes, "nodes") print("node features:", nodes.shape) if nodes.shape[0] > max_nodes: print("cluster decays to more than {} nodes".format(max_nodes)) return [(None, None)] elif nodes.shape[0] < max_nodes: print("nodes: {} less than maximum {}".format(nodes.shape[0], max_nodes)) print(event) new_nodes = np.zeros([max_nodes, 4], dtype=np.float32) new_nodes[:nodes.shape[0], :] = nodes nodes = new_nodes all_edges = list(itertools.combinations(range(n_nodes), 2)) senders = np.array([x[0] for x in all_edges]) receivers = np.array([x[1] for x in all_edges]) n_edges = len(all_edges) edges = np.expand_dims(np.array([0.0]*n_edges, dtype=np.float32), axis=1) input_datadict = { "n_node": 1, "n_edge": 1, "nodes": nodes[0, :].reshape((1, -1)), "edges": np.expand_dims(np.array([1.0]*1, dtype=np.float32), axis=1), "senders": np.array([0]), "receivers": np.array([0]), "globals": np.array([1], dtype=np.float32) } target_datadict = { "n_node": n_nodes, "n_edge": n_edges, "nodes": nodes, "edges": edges, "senders": senders, "receivers": receivers, "globals": np.array([1]*(n_nodes-1)+[0]*(max_nodes-n_nodes+1), dtype=np.float32) } input_graph = utils_tf.data_dicts_to_graphs_tuple([input_datadict]) target_graph = utils_tf.data_dicts_to_graphs_tuple([target_datadict]) return [(input_graph, target_graph)] def read(filename): with open(filename, 'r') as f: for line in f: yield [float(x) for x in line.split()] class HerwigHadrons(DataSet): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.read = read self.make_graph = make_graph
2.375
2
labs_web/views/student/announcement.py
okorienev/labs_web
2
12777053
<gh_stars>1-10 from flask.views import View from flask_login import current_user, login_required from flask import render_template, redirect, url_for, flash from labs_web.extensions import get_announcement_by_oid class Announcement(View): decorators = [login_required] def dispatch_request(self, *args, **kwargs): announcement = get_announcement_by_oid(kwargs.get('announcement_id')) if not announcement: flash('Announcement not found') return redirect(url_for('student.student_home')) if current_user.group[0].group_id not in announcement.get('groups'): flash("You have no rights to view this announcement") return redirect(url_for('student.student_home')) return render_template('student/announcement.html', announcement=announcement)
2.265625
2
models/network.py
uidmice/placement-simulation
0
12777054
<reponame>uidmice/placement-simulation import numpy as np import networkx as nx from networkx.drawing.nx_pydot import graphviz_layout import matplotlib.pyplot as plt from models.domain import Domain from models.device import Device from models.router import Router from models.link import Link """ Constants associated with the network model """ LINK_NOISE_SCALE = 5 # ms class Network: ''' The Network object captures the hierarchical network model Parameters ---------- G_nodes : nx.Graph The network graph. Nodes in the graph represents either Devices (including end-devices and edge-servers) or Routers. Edges are communication links. G_domain : nx.Graph The hierarchical domain graph. A node in the graph is either a transit domain, a stub domain, or a LAN. pos_node : dict {device_id : (x, y) } The location of the each nodes in the network graph G_node Attributes ---------- G_nodes : nx.Graph store G_nodes, each node in the graph has a node_id G_domain : nx.Graph store G_domain, each domian in the graph has a domain_id pos_node : dict store pos_node domain : dict {domain_id : Domain} a dictionary that holds the domain objects with domain ids as keys nodes : dict {node_id : Device/Router} a dictionary that holds Device and Router objects in the graph with node_id as keys end_devices : list a list of node_ids of all end devices in the network edge_servers : list a list of node_ids of edge servers in the network transit_domain : list a list of domain_ids of transit domains stub_domain : list a list of domain_ids of stub domains lan_domain : list a list of domain_ids of LANs ''' def __init__(self, G_nodes: nx.Graph, G_domain: nx.Graph, pos_node): self.G_nodes = G_nodes self.G_domain = G_domain self.pos_node = pos_node self.domains = {} self.nodes = {} self.end_devices = [n for n in self.G_nodes.nodes if self.G_nodes.nodes[n]['type']=='host'] self.edge_servers = [n for n in self.G_nodes.nodes if self.G_nodes.nodes[n]['type'] == 'edge'] self.transit_domain = [n for n in self.G_domain.nodes if self.G_domain.nodes[n]['type'] == 'transit'] self.stub_domain = [n for n in self.G_domain.nodes if self.G_domain.nodes[n]['type'] == 'stub'] self.lan_domain = [n for n in self.G_domain.nodes if self.G_domain.nodes[n]['type'] == 'lan'] for d in self.G_domain.nodes: type = self.G_domain.nodes[d]['type'] self.domains[d] = Domain(d, type) if type == 'transit': self.G_domain.nodes[d]['level'] = 3 elif type == 'stub': self.G_domain.nodes[d]['level'] = 2 else: self.G_domain.nodes[d]['level'] = 1 for n in self.G_domain.adj[d]: if d in self.transit_domain and n in self.stub_domain: self.domains[d].child_domains.append(n) elif d in self.stub_domain and n in self.transit_domain: self.domains[d].parent_domains.append(n) elif d in self.stub_domain and n in self.lan_domain: self.domains[d].child_domains.append(n) elif d in self.lan_domain and n in self.stub_domain: self.domains[d].parent_domains.append(n) for n in self.G_nodes.nodes: type = self.G_nodes.nodes[n]['type'] if type in ['host', 'edge']: self.nodes[n] = Device(n, self.G_nodes.nodes[n]['rate']) else: self.nodes[n] = Router(n, self.G_nodes.nodes[n]['rate']) self.domains[self.G_nodes.nodes[n]['domain']].add_node(n, type) for e in self.G_domain.edges: self.G_domain.edges[e]['weight'] = self.latency_between_domains(e[0], e[1], 10) def get_shortest_path(self, node1, node2): '''Return the shortest path between node1 and node2 in the graph''' return nx.shortest_path(self.G_nodes, node1, node2, weight='weight') def get_fastest_path(self, node1, node2, kbytes): for path in nx.all_simple_paths(self.G_nodes, node1, node2): for i in range(len(path) - 1): edge = (path[i], path[i+1]) self.G_nodes.edges[edge][kbytes] = self.G_nodes.edges[edge]['weight']/1000 + kbytes/self.G_nodes.edges[edge]['bw'] if not (path[i+1] in self.edge_servers or path[i+1] in self.end_devices): self.G_nodes.edges[edge][kbytes] += self.nodes[edge[1]].delay() return nx.shortest_path(self.G_nodes, node1, node2, weight=kbytes) def latency_between_nodes_on_shortest_path(self, node1, node2, kbytes, average=True): """Return the communication latency of sending kbytes from node1 to node2. If average is True, the expected latency is returned. Otherwise, it is sampled from a distribution""" path = nx.shortest_path(self.G_nodes, node1, node2, weight='weight') d = 0 if node1 == node2: return 0 for i in range(len(path) - 1): d += self.G_nodes[path[i]][path[i+1]]['weight']/1000 + kbytes/self.G_nodes[path[i]][path[i+1]]['bw'] if not average: d += LINK_NOISE_SCALE * np.random.randn() for i in range(len(path) - 2): d += self.nodes[path[i+1]].delay(average) return d * 10 def latency_between_nodes_on_fastest_path(self, node1, node2, kbytes, average=True): for path in nx.all_simple_paths(self.G_nodes, node1, node2): for i in range(len(path) - 1): edge = (path[i], path[i + 1]) self.G_nodes.edges[edge][kbytes] = self.G_nodes.edges[edge]['weight'] / 1000 + kbytes / \ self.G_nodes.edges[edge]['bw'] if not average: self.G_nodes.edges[edge][kbytes] += LINK_NOISE_SCALE * np.random.randn() if not (path[i + 1] in self.edge_servers or path[i + 1] in self.end_devices): self.G_nodes.edges[edge][kbytes] += self.nodes[edge[1]].delay(average) return nx.shortest_path_length(self.G_nodes, node1, node2, weight=kbytes) * 10 def latency_from_node_to_domain(self, node, domain, kbytes): """ Return the average latency of sending kbytes from node (in self.nodes) to a domain (in self.domains) """ target_nodes = self.domains[domain].nodes return np.average([self.latency_between_nodes_on_shortest_path(node, tn, kbytes) for tn in target_nodes]) def latency_between_domains(self, domain1, domain2, kbytes): """ Return the average latency of sending kbytes from domain1 (in self.domains) to domain2 (in self.domains)""" nodes = self.domains[domain1].nodes return np.average([self.latency_from_node_to_domain(node, domain2, kbytes) for node in nodes]) def get_domain_id(self, node): """ Return the id of the domain where node belongs""" return self.G_nodes.nodes[node]['domain'] def get_parent_domain_id(self, domain): """ Return the id of the parent domain of the domain""" return self.domains[domain].parent_domains def children_lan_domain(self, domain): """ Return a list of domain_ids of all LAN domains under the domain """ if domain in self.lan_domain: return [domain] if domain in self.stub_domain: return self.domains[domain].child_domains if domain in self.transit_domain: stubs = self.domains[domain].child_domains return [item for n in stubs for item in self.domains[n].child_domains] def sub_graph_domains(self, domain): """ Return a set of node_ids that are in the sub-area of that domain""" if domain in self.lan_domain: return {domain} a = set().union(*[self.sub_graph_domains(d) for d in self.domains[domain].child_domains]) a.add(domain) return a def is_operating(self, domain): """ Return True if the domain is an operating domain; False otherwise""" return self.domains[domain].function == 'operating' def is_routing(self, domain): """ Return True if the domain is an routing domain; False otherwise""" return self.domains[domain].function == 'routing' def get_operating_devices(self, domain): """ Return a list of node_ids of devives in the domain""" if self.is_routing(domain): return [] return [n for n in self.domains[domain].nodes if n in self.edge_servers or n in self.end_devices] def random_node(self, domain): """ Return the node_id of a randomly selected node in the domain""" return np.random.choice(self.domains[domain].nodes) def common_domain(self, domain_list: list): if len(domain_list) == 0: return None if len(domain_list) == 1: return domain_list[0] if len(domain_list) == 2: d1 = domain_list[0] d2 = domain_list[1] if d1 in self.sub_graph_domains(d2): return d2 if d2 in self.sub_graph_domains(d1): return d1 p = nx.shortest_path(self.G_domain, d1, d2, weight='weight') current_level = self.G_domain.nodes[d1]['level'] current_domain = d1 for i in range(len(p) - 1): if self.G_domain.nodes[p[i+1]]['level'] > current_level: current_level = self.G_domain.nodes[p[i+1]]['level'] current_domain = p[i+1] return current_domain return self.common_domain([domain_list[0], self.common_domain(domain_list[1:])]) def draw_nodes(self, show=False): plt.figure(figsize=(8, 8)) node_color = {'transit': 'blue', 'stub': 'orangered', 'lan': 'g', 'edge': 'grey', 'host': 'lawngreen', 'gateway': 'darkgreen'} link_color = {'T': 'cornflowerblue', 'TT': 'dodgerblue', 'TS': 'tomato', 'S': 'tomato', 'SL': 'g', 'L': 'lime'} node_size = {'transit': 150, 'stub': 150, 'host': 150,'edge': 150, 'gateway': 150} width_map = {'T': 6, 'TT': 6, 'TS': 6, 'S': 6, 'SL': 6, 'L': 6} color_map = [node_color[self.G_nodes.nodes[n]['type']] for n in self.G_nodes.nodes] edge_map = [link_color[self.G_nodes.edges[e]['type']] for e in self.G_nodes.edges] nx.draw(self.G_nodes, pos=self.pos_node, node_size=[node_size[self.G_nodes.nodes[n]['type']] for n in self.G_nodes.nodes], edge_color=edge_map, width=[width_map[self.G_nodes.edges[e]['type']] for e in self.G_nodes.edges], node_color=color_map) if show: plt.show() def draw_domains(self, show=False): plt.figure(figsize=(8,4)) node_color = {'transit': 'blue', 'stub': 'r', 'lan': 'g', 'host': 'g', 'gateway': 'b'} color_map = [node_color[self.G_domain.nodes[n]['type']] for n in self.G_domain.nodes] nx.draw(self.G_domain, pos=graphviz_layout(self.G_domain, prog="dot"), node_color=color_map) if show: plt.show()
2.96875
3
model/resnet.py
xzgz/vehicle-reid
3
12777055
from __future__ import absolute_import from __future__ import division import torch import copy from torch import nn from torch.nn import functional as F from torchvision.models.resnet import resnet50, Bottleneck from .hacnn import SoftBlock, SoftHardBlock import torchvision class ResNet50(nn.Module): def __init__(self, num_classes, loss_type='xent', **kwargs): super(ResNet50, self).__init__() self.loss_type = loss_type resnet50 = torchvision.models.resnet50(pretrained=True) self.base = nn.Sequential(*list(resnet50.children())[:-2]) self.classifier = nn.Linear(2048, num_classes) def forward(self, x): x = self.base(x) x = F.avg_pool2d(x, x.size()[2:]) f = x.view(x.size(0), -1) if self.loss_type == 'xent': if self.training: y = self.classifier(f) return [y] else: feat = torch.div(f, f.norm(dim=1, keepdim=True)) return feat elif self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']: feat = torch.div(f, f.norm(dim=1, keepdim=True)) if self.training: y = self.classifier(f) return [y], feat else: return feat else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class MGN(nn.Module): def __init__(self, num_classes, loss_type='xent', **kwargs): super(MGN, self).__init__() self.loss_type = loss_type self.dimension_branch = 512 # self.dimension_branch = 1024 resnet = resnet50(pretrained=True) self.backbone = nn.Sequential( resnet.conv1, # nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False), resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, # res_conv2 resnet.layer2, # res_conv3 resnet.layer3[0], # res_conv4_1 ) # res_conv4x res_conv4 = nn.Sequential(*resnet.layer3[1:]) res_g_conv5 = resnet.layer4 res_p_conv5 = nn.Sequential( Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))), Bottleneck(2048, 512), Bottleneck(2048, 512)) res_p_conv5.load_state_dict(resnet.layer4.state_dict()) self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5)) self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5)) self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(8, 8)) self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(16, 16)) reduction_512 = nn.Sequential(nn.Conv2d(2048, self.dimension_branch, 1, bias=False), nn.BatchNorm2d(self.dimension_branch), nn.ReLU()) self.reduction_1 = copy.deepcopy(reduction_512) self.reduction_2 = copy.deepcopy(reduction_512) self.fc_id_512_1 = nn.Linear(self.dimension_branch, num_classes) self.fc_id_512_2 = nn.Linear(self.dimension_branch, num_classes) # self.fc_id_512_1 = nn.Linear(2048, num_classes) # self.fc_id_512_2 = nn.Linear(2048, num_classes) def forward(self, x): x = self.backbone(x) p1 = self.p1(x) p2 = self.p2(x) zg_p1 = self.maxpool_zg_p1(p1) zg_p2 = self.maxpool_zg_p2(p2) fg_p1 = self.reduction_1(zg_p1).squeeze(dim=3).squeeze(dim=2) fg_p2 = self.reduction_2(zg_p2).squeeze(dim=3).squeeze(dim=2) l_p1 = self.fc_id_512_1(fg_p1) l_p2 = self.fc_id_512_2(fg_p2) # l_p1 = self.fc_id_512_1(zg_p1.squeeze(dim=3).squeeze(dim=2)) # l_p2 = self.fc_id_512_2(zg_p2.squeeze(dim=3).squeeze(dim=2)) if self.loss_type in ['xent']: if self.training: feat_clfy = [l_p1, l_p2] return feat_clfy else: # feat_embed = torch.cat([fg_p1, fg_p2], dim=1) # feat_embed = torch.div(feat_embed, feat_embed.norm(dim=1, keepdim=True)) # return feat_embed # fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True)) # fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True)) feat_global = torch.cat([fg_p1, fg_p2], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) return feat_global elif self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']: # # feat_clfy = torch.cat([l_p1, l_p2], dim=0) # feat_clfy = [l_p1, l_p2] # # feat_clfy = l_p1 # feat_global = torch.cat([fg_p1, fg_p2], dim=1) # # feat_global = fg_p1 # feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) # # feat_local = torch.cat([fz_p1, fz_p2, fz_p3, fz_p4], dim=1) # # feat_local = torch.div(feat_local, feat_local.norm(dim=1, keepdim=True)) # if self.training: # return feat_clfy, feat_global # else: # return feat_global # feat_clfy = [l_p1, l_p2] # fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True)) # fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True)) # feat_global = [fg_p1, fg_p2] # if self.training: # return feat_clfy, feat_global # else: # feat_global = torch.cat([fg_p1, fg_p2], dim=1) # return feat_global # feat_clfy = [l_p1, l_p2] # feat_global = [fg_p1, fg_p2] # if self.training: # return feat_clfy, feat_global # else: # # fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True)) # # fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True)) # feat_global = torch.cat([fg_p1, fg_p2], dim=1) # feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) # return feat_global feat_clfy = [l_p1, l_p2] feat_global = torch.cat([fg_p1, fg_p2], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) if self.training: # fg_p1 = torch.div(fg_p1, fg_p1.norm(dim=1, keepdim=True)) # fg_p2 = torch.div(fg_p2, fg_p2.norm(dim=1, keepdim=True)) # feat_global = [fg_p1, fg_p2] return feat_clfy, feat_global else: # feat_global = torch.cat([fg_p1, fg_p2], dim=1) # feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) return feat_global else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class OriginMGN(nn.Module): """ @ARTICLE{2018arXiv180401438W, author = {{<NAME>. and {<NAME>. and {<NAME>. and {<NAME>. and {Zhou}, X.}, title = "{Learning Discriminative Features with Multiple Granularities for Person Re-Identification}", journal = {ArXiv e-prints}, archivePrefix = "arXiv", eprint = {1804.01438}, primaryClass = "cs.CV", keywords = {Computer Science - Computer Vision and Pattern Recognition}, year = 2018, month = apr, adsurl = {http://adsabs.harvard.edu/abs/2018arXiv180401438W}, adsnote = {Provided by the SAO/NASA Astrophysics Data System} } """ def __init__(self, num_classes, loss_type='xent', **kwargs): super(OriginMGN, self).__init__() self.loss_type = loss_type resnet = resnet50(pretrained=True) self.backbone = nn.Sequential( resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, # res_conv2 resnet.layer2, # res_conv3 resnet.layer3[0], # res_conv4_1 ) # res_conv4x res_conv4 = nn.Sequential(*resnet.layer3[1:]) # res_conv5 global res_g_conv5 = resnet.layer4 # res_conv5 part res_p_conv5 = nn.Sequential( Bottleneck(1024, 512, downsample=nn.Sequential(nn.Conv2d(1024, 2048, 1, bias=False), nn.BatchNorm2d(2048))), Bottleneck(2048, 512), Bottleneck(2048, 512)) res_p_conv5.load_state_dict(resnet.layer4.state_dict()) # mgn part-1 global self.p1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_g_conv5)) # mgn part-2 self.p2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5)) # mgn part-3 self.p3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_p_conv5)) # global max pooling self.maxpool_zg_p1 = nn.MaxPool2d(kernel_size=(12, 4)) self.maxpool_zg_p2 = nn.MaxPool2d(kernel_size=(24, 8)) self.maxpool_zg_p3 = nn.MaxPool2d(kernel_size=(24, 8)) self.maxpool_zp2 = nn.MaxPool2d(kernel_size=(12, 8)) self.maxpool_zp3 = nn.MaxPool2d(kernel_size=(8, 8)) # conv1 reduce reduction = nn.Sequential(nn.Conv2d(2048, 256, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU()) self.reduction_0 = copy.deepcopy(reduction) self.reduction_1 = copy.deepcopy(reduction) self.reduction_2 = copy.deepcopy(reduction) self.reduction_3 = copy.deepcopy(reduction) self.reduction_4 = copy.deepcopy(reduction) self.reduction_5 = copy.deepcopy(reduction) self.reduction_6 = copy.deepcopy(reduction) self.reduction_7 = copy.deepcopy(reduction) # fc softmax loss self.fc_id_2048_0 = nn.Linear(2048, num_classes) self.fc_id_2048_1 = nn.Linear(2048, num_classes) self.fc_id_2048_2 = nn.Linear(2048, num_classes) self.fc_id_256_1_0 = nn.Linear(256, num_classes) self.fc_id_256_1_1 = nn.Linear(256, num_classes) self.fc_id_256_2_0 = nn.Linear(256, num_classes) self.fc_id_256_2_1 = nn.Linear(256, num_classes) self.fc_id_256_2_2 = nn.Linear(256, num_classes) def forward(self, x): x = self.backbone(x) p1 = self.p1(x) p2 = self.p2(x) p3 = self.p3(x) zg_p1 = self.maxpool_zg_p1(p1) # z_g^G zg_p2 = self.maxpool_zg_p2(p2) # z_g^P2 zg_p3 = self.maxpool_zg_p3(p3) # z_g^P3 zp2 = self.maxpool_zp2(p2) z0_p2 = zp2[:, :, 0:1, :] # z_p0^P2 z1_p2 = zp2[:, :, 1:2, :] # z_p1^P2 zp3 = self.maxpool_zp3(p3) z0_p3 = zp3[:, :, 0:1, :] # z_p0^P3 z1_p3 = zp3[:, :, 1:2, :] # z_p1^P3 z2_p3 = zp3[:, :, 2:3, :] # z_p2^P3 fg_p1 = self.reduction_0(zg_p1).squeeze(dim=3).squeeze(dim=2) # f_g^G, L_triplet^G fg_p2 = self.reduction_1(zg_p2).squeeze(dim=3).squeeze(dim=2) # f_g^P2, L_triplet^P2 fg_p3 = self.reduction_2(zg_p3).squeeze(dim=3).squeeze(dim=2) # f_g^P3, L_triplet^P3 f0_p2 = self.reduction_3(z0_p2).squeeze(dim=3).squeeze(dim=2) # f_p0^P2 f1_p2 = self.reduction_4(z1_p2).squeeze(dim=3).squeeze(dim=2) # f_p1^P2 f0_p3 = self.reduction_5(z0_p3).squeeze(dim=3).squeeze(dim=2) # f_p0^P3 f1_p3 = self.reduction_6(z1_p3).squeeze(dim=3).squeeze(dim=2) # f_p1^P3 f2_p3 = self.reduction_7(z2_p3).squeeze(dim=3).squeeze(dim=2) # f_p2^P3 l_p1 = self.fc_id_2048_0(zg_p1.squeeze(dim=3).squeeze(dim=2)) # L_softmax^G l_p2 = self.fc_id_2048_1(zg_p2.squeeze(dim=3).squeeze(dim=2)) # L_softmax^P2 l_p3 = self.fc_id_2048_2(zg_p3.squeeze(dim=3).squeeze(dim=2)) # L_softmax^P3 l0_p2 = self.fc_id_256_1_0(f0_p2) # L_softmax0^P2 l1_p2 = self.fc_id_256_1_1(f1_p2) # L_softmax1^P2 l0_p3 = self.fc_id_256_2_0(f0_p3) # L_softmax0^P3 l1_p3 = self.fc_id_256_2_1(f1_p3) # L_softmax1^P3 l2_p3 = self.fc_id_256_2_2(f2_p3) # L_softmax2^P3 if self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']: if self.training: feat_clfy = [l_p1, l_p2, l_p3, l0_p2, l1_p2, l0_p3, l1_p3, l2_p3] feat = torch.cat([fg_p1, fg_p2, fg_p3], dim=1) feat = torch.div(feat, feat.norm(dim=1, keepdim=True)) return feat_clfy, feat else: feat = torch.cat([fg_p1, fg_p2, fg_p3, f0_p2, f1_p2, f0_p3, f1_p3, f2_p3], dim=1) feat = torch.div(feat, feat.norm(dim=1, keepdim=True)) return feat else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class MGNB4(nn.Module): def __init__(self, num_classes, loss_type='xent', **kwargs): super(MGNB4, self).__init__() self.loss_type = loss_type resnet = resnet50(pretrained=True) self.backbone = nn.Sequential( resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, # res_conv2 resnet.layer2, # res_conv3 resnet.layer3[0], # res_conv4_1 ) # res_conv4x res_conv4 = nn.Sequential(*resnet.layer3[1:]) res_conv5 = resnet.layer4 self.b1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5)) self.b2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5)) self.b3 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5)) self.b4 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5)) self.maxpool_b1 = nn.MaxPool2d(kernel_size=(8, 8)) self.maxpool_b2 = nn.MaxPool2d(kernel_size=(8, 8)) self.maxpool_b3 = nn.MaxPool2d(kernel_size=(8, 8)) self.maxpool_b4 = nn.MaxPool2d(kernel_size=(8, 8)) reduction_512 = nn.Sequential(nn.Conv2d(2048, 512, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU()) self.reduction_1 = copy.deepcopy(reduction_512) self.reduction_2 = copy.deepcopy(reduction_512) self.reduction_3 = copy.deepcopy(reduction_512) self.reduction_4 = copy.deepcopy(reduction_512) self.fc_id_512_1 = nn.Linear(512, num_classes) self.fc_id_512_2 = nn.Linear(512, num_classes) self.fc_id_512_3 = nn.Linear(512, num_classes) self.fc_id_512_4 = nn.Linear(512, num_classes) def forward(self, x): x = self.backbone(x) b1 = self.b1(x) b2 = self.b2(x) b3 = self.b3(x) b4 = self.b4(x) pb1 = self.maxpool_b1(b1) pb2 = self.maxpool_b2(b2) pb3 = self.maxpool_b3(b3) pb4 = self.maxpool_b4(b4) f_b1 = self.reduction_1(pb1).squeeze(dim=3).squeeze(dim=2) f_b2 = self.reduction_2(pb2).squeeze(dim=3).squeeze(dim=2) f_b3 = self.reduction_3(pb3).squeeze(dim=3).squeeze(dim=2) f_b4 = self.reduction_4(pb4).squeeze(dim=3).squeeze(dim=2) cf_b1 = self.fc_id_512_1(f_b1) cf_b2 = self.fc_id_512_2(f_b2) cf_b3 = self.fc_id_512_3(f_b3) cf_b4 = self.fc_id_512_4(f_b4) if self.loss_type in ['xent']: if self.training: feat_clfy = [cf_b1, cf_b2, cf_b3, cf_b4] return feat_clfy else: feat_global = torch.cat([f_b1, f_b2, f_b3, f_b4], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) return feat_global elif self.loss_type in ['xent_triplet', 'xent_tripletv2']: feat_clfy = [cf_b1, cf_b2, cf_b3, cf_b4] feat_global = torch.cat([f_b1, f_b2, f_b3, f_b4], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) if self.training: return feat_clfy, feat_global else: return feat_global else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class MGNB2(nn.Module): def __init__(self, num_classes, loss_type='xent', **kwargs): super(MGNB2, self).__init__() self.loss_type = loss_type self.dimension_branch = 1024 resnet = resnet50(pretrained=True) self.backbone = nn.Sequential( resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, # res_conv2 resnet.layer2, # res_conv3 resnet.layer3[0], # res_conv4_1 ) # res_conv4x res_conv4 = nn.Sequential(*resnet.layer3[1:]) res_conv5 = resnet.layer4 self.b1 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5)) self.b2 = nn.Sequential(copy.deepcopy(res_conv4), copy.deepcopy(res_conv5)) self.maxpool_b1 = nn.MaxPool2d(kernel_size=(8, 8)) self.maxpool_b2 = nn.MaxPool2d(kernel_size=(8, 8)) reduction_512 = nn.Sequential(nn.Conv2d(2048, self.dimension_branch, 1, bias=False), nn.BatchNorm2d(self.dimension_branch), nn.ReLU()) self.reduction_1 = copy.deepcopy(reduction_512) self.reduction_2 = copy.deepcopy(reduction_512) self.fc_id_512_1 = nn.Linear(self.dimension_branch, num_classes) self.fc_id_512_2 = nn.Linear(self.dimension_branch, num_classes) def forward(self, x): x = self.backbone(x) b1 = self.b1(x) b2 = self.b2(x) pb1 = self.maxpool_b1(b1) pb2 = self.maxpool_b2(b2) f_b1 = self.reduction_1(pb1).squeeze(dim=3).squeeze(dim=2) f_b2 = self.reduction_2(pb2).squeeze(dim=3).squeeze(dim=2) cf_b1 = self.fc_id_512_1(f_b1) cf_b2 = self.fc_id_512_2(f_b2) if self.loss_type in ['xent']: if self.training: feat_clfy = [cf_b1, cf_b2] return feat_clfy else: feat_global = torch.cat([f_b1, f_b2], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) return feat_global elif self.loss_type in ['xent_triplet', 'xent_tripletv2', 'xent_triplet_sqrt', 'xent_triplet_squa']: feat_clfy = [cf_b1, cf_b2] feat_global = torch.cat([f_b1, f_b2], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) if self.training: return feat_clfy, feat_global else: return feat_global else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class ResSoAttn(nn.Module): def __init__(self, num_classes, loss_type='xent', nchannels=[128, 256, 384], branch_feat_dim=682, **kwargs): super(ResSoAttn, self).__init__() self.loss_type = loss_type resnet = resnet50(pretrained=True) self.backbone = nn.Sequential( resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, # res_conv2 resnet.layer2, # res_conv3 ) self.habk1 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim), nn.Dropout(p=0.5, inplace=True)) self.habk2 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim), nn.Dropout(p=0.5, inplace=True)) self.habk3 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim), nn.Dropout(p=0.5, inplace=True)) self.fc_id_1 = nn.Linear(branch_feat_dim, num_classes) self.fc_id_2 = nn.Linear(branch_feat_dim, num_classes) self.fc_id_3 = nn.Linear(branch_feat_dim, num_classes) def forward(self, x): x = self.backbone(x) f_b1 = self.habk1(x) f_b2 = self.habk2(x) f_b3 = self.habk3(x) cf_b1 = self.fc_id_1(f_b1) cf_b2 = self.fc_id_2(f_b2) cf_b3 = self.fc_id_3(f_b3) if self.loss_type in ['xent']: if self.training: feat_clfy = [cf_b1, cf_b2, cf_b3] return feat_clfy else: feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) return feat_global elif self.loss_type in ['xent_triplet', 'xent_tripletv2']: feat_clfy = [cf_b1, cf_b2, cf_b3] feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) if self.training: return feat_clfy, feat_global else: return feat_global else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class ResSoHaAttn(nn.Module): def __init__(self, num_classes, loss_type='xent', nchannels=[128, 256, 384], branch_feat_dim=682, **kwargs): super(ResSoHaAttn, self).__init__() self.loss_type = loss_type resnet = resnet50(pretrained=True) self.backbone = nn.Sequential( resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1, # res_conv2 resnet.layer2, # res_conv3 ) self.habk1 = SoftHardBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim) self.habk2 = SoftHardBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim) self.habk3 = SoftHardBlock(nchannels=nchannels, input_channel=512, feat_dim=branch_feat_dim) self.fc_id_1 = nn.Linear(branch_feat_dim, num_classes) self.fc_id_2 = nn.Linear(branch_feat_dim, num_classes) self.fc_id_3 = nn.Linear(branch_feat_dim, num_classes) def forward(self, x): x = self.backbone(x) fg_b1, fl_b1 = self.habk1(x) fg_b2, fl_b2 = self.habk2(x) fg_b3, fl_b3 = self.habk3(x) f_b1 = torch.cat([fg_b1, fl_b1], dim=1) f_b2 = torch.cat([fg_b2, fl_b2], dim=1) f_b3 = torch.cat([fg_b3, fl_b3], dim=1) cf_b1 = self.fc_id_1(f_b1) cf_b2 = self.fc_id_2(f_b2) cf_b3 = self.fc_id_3(f_b3) if self.loss_type in ['xent']: if self.training: feat_clfy = [cf_b1, cf_b2, cf_b3] return feat_clfy else: feat = torch.cat([f_b1, f_b2, f_b3], dim=1) feat = torch.div(feat, feat.norm(dim=1, keepdim=True)) return feat elif self.loss_type in ['xent_triplet', 'xent_tripletv2']: feat_clfy = [cf_b1, cf_b2, cf_b3] # feat_global = torch.cat([fg_b1, fg_b2, fg_b3], dim=1) # feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) feat = torch.cat([f_b1, f_b2, f_b3], dim=1) feat = torch.div(feat, feat.norm(dim=1, keepdim=True)) if self.training: # return feat_clfy, feat_global return feat_clfy, feat else: # feat = torch.cat([f_b1, f_b2, f_b3], dim=1) # feat = torch.div(feat, feat.norm(dim=1, keepdim=True)) return feat else: raise KeyError("Unsupported loss: {}".format(self.loss_type)) class Resv2SoAttn(nn.Module): def __init__(self, num_classes, loss_type='xent', nchannels=[256, 384, 512], branch_feat_dim=682, **kwargs): super(Resv2SoAttn, self).__init__() self.loss_type = loss_type self.inplanes = 16 self.layer1 = self.make_layer(Bottleneck, 16, 3, stride=1) self.layer2 = self.make_layer(Bottleneck, 32, 4, stride=2) self.backbone = nn.Sequential( nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False), nn.BatchNorm2d(16), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), self.layer1, self.layer2, ) self.habk1 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=128, feat_dim=branch_feat_dim), nn.Dropout(p=0.5, inplace=True)) self.habk2 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=128, feat_dim=branch_feat_dim), nn.Dropout(p=0.5, inplace=True)) self.habk3 = nn.Sequential(SoftBlock(nchannels=nchannels, input_channel=128, feat_dim=branch_feat_dim), nn.Dropout(p=0.5, inplace=True)) self.fc_id_1 = nn.Linear(branch_feat_dim, num_classes) self.fc_id_2 = nn.Linear(branch_feat_dim, num_classes) self.fc_id_3 = nn.Linear(branch_feat_dim, num_classes) def make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.backbone(x) f_b1 = self.habk1(x) f_b2 = self.habk2(x) f_b3 = self.habk3(x) cf_b1 = self.fc_id_1(f_b1) cf_b2 = self.fc_id_2(f_b2) cf_b3 = self.fc_id_3(f_b3) if self.loss_type in ['xent']: if self.training: feat_clfy = [cf_b1, cf_b2, cf_b3] return feat_clfy else: feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) return feat_global elif self.loss_type in ['xent_triplet', 'xent_tripletv2']: feat_clfy = [cf_b1, cf_b2, cf_b3] feat_global = torch.cat([f_b1, f_b2, f_b3], dim=1) feat_global = torch.div(feat_global, feat_global.norm(dim=1, keepdim=True)) if self.training: return feat_clfy, feat_global else: return feat_global else: raise KeyError("Unsupported loss: {}".format(self.loss_type))
2.296875
2
setup.py
ccnmtl/epubbuilder
0
12777056
<gh_stars>0 from __future__ import unicode_literals from setuptools import setup setup( name="epubbuilder", version="0.2.1", author="<NAME>", author_email="<EMAIL>", url="https://github.com/ccnmtl/epubbuilder", description="epub builder library", long_description="forked from python-epub-builder", install_requires=[ "future", "lxml", "Genshi", "nose" ], scripts=[], license="BSD", platforms=["any"], zip_safe=False, packages=['epubbuilder'], test_suite='nose.collector', include_package_data=True, )
1.273438
1
framework/models/courses.py
sperea/LMS-Backend-Boilerplate-Django-REST
0
12777057
import os import uuid from django.db import models from accounts.models import Student from accounts.models import Teacher import cloudinary class CourseORM(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=150) slug = models.SlugField(db_index=True, max_length=150) code = models.CharField(max_length=6, default=uuid.uuid4().hex.upper()[0:6], blank=True, verbose_name="Code") image = cloudinary.models.CloudinaryField('image', null=True, blank=True) hours = models.CharField(max_length=4) #image = models.ImageField(null=True, blank=True, upload_to='courses_images') description = models.TextField() teacher = models.ForeignKey(Teacher, on_delete=models.PROTECT) students = models.ManyToManyField(Student) def __str__(self): return self.name class Meta: app_label="courses" class ModuleORM(models.Model): title = models.CharField(max_length=200) description = models.TextField(null=True, blank=True) order = models.PositiveIntegerField(default=1) course = models.ForeignKey(CourseORM, on_delete=models.CASCADE) def __str__(self): return self.title class Meta: app_label="courses" class SectionORM(models.Model): title = models.CharField(max_length=200) description = models.TextField(null=True, blank=True) order = models.PositiveIntegerField(default=1) module = models.ForeignKey(ModuleORM, on_delete=models.CASCADE) def __str__(self): return self.title class Meta: app_label="courses" class TopicORM(models.Model): TOPIC_TYPE_GENERAL = 1 TOPIC_TYPE_HOMEWORK = 2 TOPIC_TYPE_QUIZ = 2 TOPIC_TYPE_CHOICES = ( (TOPIC_TYPE_GENERAL, 'General'), (TOPIC_TYPE_HOMEWORK, 'Homework'), (TOPIC_TYPE_QUIZ, 'Quiz'), ) title = models.CharField(max_length=200) description = models.TextField(null=True, blank=True) order = models.PositiveIntegerField(default=1) type = models.SmallIntegerField(choices=TOPIC_TYPE_CHOICES, default=TOPIC_TYPE_GENERAL, verbose_name='Type') section = models.ForeignKey(SectionORM, on_delete=models.CASCADE) def __str__(self): return self.name class Meta: app_label="courses" def get_resource_file_path(instance, filename): ext = filename.split('.')[-1] filename = "%s.%s" % (uuid.uuid4(), ext) return os.path.join("resources/", filename) class Meta: app_label="courses" class ResourcesORM(models.Model): topic = models.ForeignKey(TopicORM, on_delete=models.CASCADE) resource = models.FileField(upload_to=get_resource_file_path, verbose_name="File") class Meta: app_label="courses" class PackageORM(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=150) slug = models.SlugField(db_index=True, max_length=150) code = models.CharField(max_length=10) parent = models.ForeignKey('PackageORM', on_delete=models.CASCADE) courses = models.ManyToManyField(CourseORM) def __str__(self): return self.name class Meta: app_label="courses"
2.21875
2
app/hook_details/push_hook_details.py
futuresimple/triggear
14
12777058
<reponame>futuresimple/triggear<filename>app/hook_details/push_hook_details.py from typing import Dict, Set, Union from app.clients.github_client import GithubClient from app.enums.event_types import EventType from app.hook_details.hook_details import HookDetails from app.mongo.registration_cursor import RegistrationCursor from app.request_schemes.register_request_data import RegisterRequestData from app.utilities.functions import get_all_starting_with, any_starts_with class PushHookDetails(HookDetails): def __repr__(self) -> str: return f"<PushHookDetails " \ f"repository: {self.repository}, " \ f"branch: {self.branch}, " \ f"sha: {self.sha}, " \ f"changes: {self.changes} " \ f">" def __init__(self, repository: str, branch: str, sha: str, changes: Set[str]) -> None: self.repository = repository self.branch = branch self.sha = sha self.changes = changes def get_changes_as_string(self) -> str: return ','.join(self.changes) def get_allowed_parameters(self) -> Dict[str, str]: return { RegisterRequestData.RequestedParams.branch: self.branch, RegisterRequestData.RequestedParams.sha: self.sha, RegisterRequestData.RequestedParams.changes: self.get_changes_as_string() } def get_query(self) -> Dict[str, str]: return dict(repository=self.repository) def get_event_type(self) -> EventType: return EventType.PUSH def get_ref(self) -> str: return self.sha def setup_final_param_values(self, registration_cursor: RegistrationCursor) -> None: if registration_cursor.change_restrictions: self.changes = get_all_starting_with(self.changes, registration_cursor.change_restrictions) async def should_trigger(self, cursor: RegistrationCursor, github_client: GithubClient) -> bool: if cursor.change_restrictions and not any_starts_with(any_list=self.changes, starts_with_list=cursor.change_restrictions): return False elif cursor.branch_restrictions and self.branch not in cursor.branch_restrictions: return False elif cursor.file_restrictions and not await github_client.are_files_in_repo(self.repository, self.sha, cursor.file_restrictions): return False return True
2.09375
2
bench/py/keyedpq_a.py
pskopnik/apq
4
12777059
<gh_stars>1-10 from dataclasses import dataclass, field from typing import Any, Dict, Generic, List, Tuple, TypeVar, Union import heapq import math _KT = TypeVar('_KT') # key type _DT = TypeVar('_DT') # data type _KT_inner = TypeVar('_KT_inner') # alternative key type for inner definitions _DT_inner = TypeVar('_DT_inner') # alternative data type for inner definitions class PyKeyedPQA(Generic[_KT, _DT]): @dataclass(order=True) class _Entry(Generic[_KT_inner, _DT_inner]): value: float = field(init=True, compare=True) change_index: int = field(init=True, compare=True) key: _KT_inner = field(init=True, compare=False) data: _DT_inner = field(init=True, compare=False) class Item(Generic[_KT_inner, _DT_inner]): def __init__(self, entry: 'PyKeyedPQA._Entry[_KT_inner, _DT_inner]') -> None: self._entry: PyKeyedPQA._Entry[_KT_inner, _DT_inner] = entry @property def key(self) -> _KT_inner: return self._entry.key @property def value(self) -> float: return self._entry.value @property def data(self) -> _DT_inner: return self._entry.data def __init__(self) -> None: self._heap: List[PyKeyedPQA._Entry[_KT, _DT]] = [] self._change_index = 1 self._lookup_dict: Dict[_KT, PyKeyedPQA._Entry[_KT, _DT]] = {} def _entry_from_identifier(self, identifier: Union[_KT, 'PyKeyedPQA.Item[_KT, _DT]']) -> 'PyKeyedPQA._Entry[_KT, _DT]': if isinstance(identifier, PyKeyedPQA.Item): return identifier._entry else: return self._lookup_dict[identifier] def __len__(self) -> int: return len(self._heap) def __contains__(self, key: _KT) -> bool: return key in self._lookup_dict def __getitem__(self, key: _KT) -> 'PyKeyedPQA.Item[_KT, _DT]': entry = self._lookup_dict[key] return PyKeyedPQA.Item(entry) def __delitem__(self, identifier: Union[_KT, 'PyKeyedPQA.Item[_KT, _DT]']) -> None: entry = self._entry_from_identifier(identifier) entry.value, entry.change_index = -math.inf, 0 # impl A heapq.heapify(self._heap) heapq.heappop(self._heap) del self._lookup_dict[entry.key] def add(self, key: _KT, value: float, data: _DT) -> 'PyKeyedPQA.Item[_KT, _DT]': entry = PyKeyedPQA._Entry(value, self._change_index, key, data) self._change_index += 1 heapq.heappush(self._heap, entry) self._lookup_dict[key] = entry return PyKeyedPQA.Item(entry) def change_value(self, identifier: Union[_KT, 'PyKeyedPQA.Item[_KT, _DT]'], value: float) -> None: entry = self._entry_from_identifier(identifier) self._change_value(entry, value) def _change_value(self, entry: 'PyKeyedPQA._Entry[_KT, _DT]', value: float) -> None: entry.value, entry.change_index = value, self._change_index self._change_index += 1 # impl A heapq.heapify(self._heap) def add_or_change(self, key: _KT, value: float, data: _DT) -> 'PyKeyedPQA.Item[_KT, _DT]': try: entry = self._lookup_dict[key] self._change_value(entry, value) return PyKeyedPQA.Item(entry) except KeyError: return self.add(key, value, data) def peek(self) -> 'PyKeyedPQA.Item[_KT, _DT]': entry = self._heap[0] return PyKeyedPQA.Item(entry) def pop(self) -> Tuple[_KT, float, _DT]: entry = heapq.heappop(self._heap) del self._lookup_dict[entry.key] return entry.key, entry.value, entry.data
2.40625
2
display.py
qxcross/muniak
0
12777060
<reponame>qxcross/muniak<filename>display.py # These functions deal with displaying information in CLI ## Check Payment Display def display_lessons(lessons): print('displaying lesson payment info') unpaid_lessons = [] paid_lessons = [] for lesson in lessons: if lesson['paid'] == 'n': unpaid_lessons.append(lesson) elif lesson['paid'] == 'y': paid_lessons.append(lesson) for lesson in unpaid_lessons: print(f'Lesson #{lesson["id"]} remains unpaid.') for lesson in paid_lessons: print(f'Lesson #{lesson["id"]} has been paid for.') print('OK!')
3.125
3
jrk/el_hyperparams.py
mehdi-mirzapour/eigenthemes
11
12777061
''' Code modified by the authors of the paper: "Low-rank Subspaces for Unsupervised Entity Linking" to enable working with "Wikidata" instead of "Freebase" ''' import argparse sup_train=False MAX_POS = 10 MAX_N_POSS_TEST = 100 MAX_N_POSS_TRAIN = 100 N_NEGS = 10 SAMPLE_NEGS = True TYPE_OPT = 'mean' parser = argparse.ArgumentParser() parser.add_argument("--mode", type=str, help="train or eval", default='train') parser.add_argument("--model_path", type=str, help="model path to save/load", default='model') parser.add_argument("--n_epochs", type=int, help="number of epochs", default=20 if not sup_train else 50) parser.add_argument("--batchsize", type=int, help="batchsize", default=50) parser.add_argument("--max_len", type=int, help="max sentence length", default=100) parser.add_argument("--lr", type=float, help="learning rate", default=1e-3) parser.add_argument("--dropout", type=float, help="dropout rate", default=0) parser.add_argument("--lstm_hiddim", type=int, help="hiddim of the encoder's combine", default=100) parser.add_argument("--enc_type", type=str, default="lstm") parser.add_argument("--n_filters", type=int, default=200) parser.add_argument("--en_dim", type=int, default = 300) parser.add_argument("--pos_embdim", type=int, default=5) parser.add_argument("--type_embdim", type=int, default=50) parser.add_argument("--ent_embdim", type=int, default=100) parser.add_argument("--datadir", type=str, default='data/wikidata/') parser.add_argument("--noise_threshold", type=float, default=0.75 if not sup_train else 1) parser.add_argument("--margin", type=float, default=0.1) parser.add_argument("--kl_coef", type=float, default=5 if not sup_train else 0) parser.add_argument("--noise_prior", type=float, default=0.9) parser.add_argument("--train_data", type=str, default='data/el_annotated_170k_le_titov.json') parser.add_argument("--dev_data", type=str, default='data/aida_testa_le_titov.json') parser.add_argument("--test_data", type=str, default='data/aida_testb_le_titov.json')
2.609375
3
in-class/Week 5 BikeShare ClusterTree.py
dvtate/cs484
0
12777062
# Load the necessary libraries import matplotlib.pyplot as plt import numpy import pandas import sklearn.cluster as cluster import sklearn.metrics as metrics bikeshare = pandas.read_csv('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Data\\BikeSharingDemand_Train.csv', delimiter=',') # Use only these four interval variables trainData = bikeshare[['temp', 'humidity', 'windspeed']].dropna() nObs = trainData.shape[0] # Determine the number of clusters using the Silhouette metrics nClusters = numpy.zeros(15) Elbow = numpy.zeros(15) Silhouette = numpy.zeros(15) for c in range(15): KClusters = c + 1 nClusters[c] = KClusters kmeans = cluster.KMeans(n_clusters=KClusters, random_state=60616).fit(trainData) if (KClusters > 1): Silhouette[c] = metrics.silhouette_score(trainData, kmeans.labels_) WCSS = numpy.zeros(KClusters) nC = numpy.zeros(KClusters) for i in range(nObs): k = kmeans.labels_[i] nC[k] += 1 diff = trainData.iloc[i,] - kmeans.cluster_centers_[k] WCSS[k] += diff.dot(diff) Elbow[c] = 0 for k in range(KClusters): Elbow[c] += WCSS[k] / nC[k] print("Cluster Size Elbow Value Silhouette Value: /n") for c in range(15): print(nClusters[c], Elbow[c], Silhouette[c]) plt.plot(nClusters, Elbow, linewidth = 2, marker = 'o') plt.xticks(range(1,15,1)) plt.grid(True) plt.xlabel("Number of Clusters") plt.ylabel("Elbow Value") plt.show() # Plot the Silhouette metrics versus the number of clusters plt.plot(nClusters, Silhouette, linewidth = 2, marker = 'o') plt.xticks(range(1,15,1)) plt.grid(True) plt.xlabel("Number of Clusters") plt.ylabel("Silhouette Value") plt.show() KClusters = 2 kmeans = cluster.KMeans(n_clusters=KClusters, random_state=60616).fit(trainData) nC = numpy.zeros(KClusters) for i in range(nObs): k = kmeans.labels_[i] nC[k] += 1 print(nC) for k in range(KClusters): print("Cluster ", k) print("Centroid = ", kmeans.cluster_centers_[k]) # Load the TREE library from SKLEARN from sklearn import tree classTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=4, random_state=60616) bikeshare_DT = classTree.fit(trainData, kmeans.labels_) print('Accuracy of Decision Tree classifier on training set: {:.6f}' .format(classTree.score(trainData, kmeans.labels_))) import graphviz dot_data = tree.export_graphviz(bikeshare_DT, out_file=None, impurity = True, filled = True, feature_names = ['temp', 'humidity', 'windspeed'], class_names = ['Cluster 0', 'Cluster 1']) graph = graphviz.Source(dot_data) graph graph.render('C:\\Users\\minlam\\Documents\\IIT\\Machine Learning\\Job\\hmeq_output')
3.0625
3
examples/Hunter_Johnston_Dissertation/Chapter_6/Example_6_1/outerLoop/outSpec_EOL.py
leakec/tfc
15
12777063
from tfc import utfc from tfc.utils import TFCDictRobust, egrad, NllsClass, MakePlot import numpy as onp import jax.numpy as np from jax import vmap, jacfwd, jit, lax import tqdm import pickle from scipy.optimize import fsolve from scipy.integrate import simps from time import process_time as timer ## TEST PARAMETERS: *************************************************** tol = np.finfo(float).eps maxIter = 50 W = False if W == False: Gam = 0. else: Gam = 100. ## CONSTANTS: ********************************************************* # Number of points to use N = 100 # Number of basis functions to use ms = 30 mc = 1 # Number of constraints nCx = 0 nCy = 0 ## GET CHEBYSHEV VALUES ********************************************** stfc = utfc(N,nCx,ms,basis='CP',x0 = -1, xf = 1.) ctfc = utfc(N,nCy,mc,basis='CP',x0 = -1, xf = 1.) Hs = stfc.H Hc = ctfc.H ## DEFINE THE ASSUMED SOLUTION ************************************** z = stfc.z z0 = z[0] zf = z[-1] ## DEFINE CONSTRAINED EXPRESSION ************************************* r = lambda z, xi, IC: np.dot(Hs(z),xi['xis']) v = egrad(r,0) a = egrad(v,0) lam = lambda z, xi: np.dot(Hc(z),xi['xic']) lamr = egrad(lam,0) ## FORM LOSS AND JACOBIAN *********************************************************************************** L0 = lambda xi,IC: r(z,xi,IC)[0,:] - IC['R0'] Ld0 = lambda xi,IC: IC['c'] * v(z,xi,IC)[0,:] - IC['V0'] Lf = lambda xi,IC: r(z,xi,IC)[-1,:] Ldf = lambda xi,IC: IC['c'] * v(z,xi,IC)[-1,:] Ls = lambda xi,IC: IC['c']**2 * a(z,xi,IC) - IC['ag'] + lam(z,xi) # Htf = lambda xi,IC: np.dot(lam(z,xi)[-1,:],(-1./2.*lam(z,xi)[-1,:] + IC['ag'])) # Updated because need to at lam_r * v term for spectral method Htf = lambda xi,IC: np.dot(lam(z,xi)[-1,:],(-1./2.*lam(z,xi)[-1,:] + IC['ag'])) \ + np.dot(-IC['c'] *lamr(z,xi)[-1,:], IC['c'] * v(z,xi,IC)[-1,:]) + IC['Gam'] L = jit(lambda xi,IC: np.hstack(( Ls(xi,IC)[1:-1,:].flatten(), \ L0(xi,IC).flatten(), \ Ld0(xi,IC).flatten(), \ Lf(xi,IC).flatten(), \ Ldf(xi,IC).flatten() )) ) ## INITIALIZE VARIABLES ************************************************************************************* xis = onp.zeros((Hs(z).shape[1],3)) xic = onp.zeros((Hc(z).shape[1],3)) if W == False: b = np.sqrt(2)*onp.ones(1) else: b = np.sqrt(10)*onp.ones(1) xi = TFCDictRobust({'xis':xis,\ 'xic':xic}) IC = {'R0': np.zeros((3,)), \ 'V0': np.zeros((3,)), \ 'ag': np.zeros((3,)), \ 'Gam': np.zeros((1,)), \ 'c': 2.*onp.ones(1)} ## NONLINEAR LEAST-SQUARES CLASS ***************************************************************************** nlls = NllsClass(xi,L,maxIter=2,timer=True) R0 = np.array([500000., 100000., 50000.]) V0 = np.array([-3000., 0., 0.]) ## scale initial conditons pscale = np.max(np.abs(R0)) tscale = pscale/np.max(np.abs(V0)) IC['R0'] = R0 / pscale IC['V0'] = V0 * tscale/pscale IC['ag'] = np.array([0., 0., -5.314961]) * tscale**2/pscale IC['Gam'] = Gam * tscale**4/pscale**2 global it it = 0 def Innerloop(tf,xi,IC): global it IC['c'] = 2./tf it += 1 xi,_,time = nlls.run(xi,IC) loss1 = np.max(np.abs(L(xi,IC))) loss2 = np.max(np.abs(Htf(xi,IC))) return np.max(np.hstack((loss1,loss2))) t0 = 2./IC['c'] start = timer() tf = fsolve(Innerloop, t0, args=(xi,IC), xtol=1e-13,epsfcn=tol) time = timer() - start IC['c'] = 2./tf xi,_,_ = nlls.run(xi,IC) ## CONSTRUCT SOLUTION ********************************************** t = (z-z[0])/IC['c'] * tscale IC['Gam']= IC['Gam'] * pscale**2/tscale**4 R = r(z,xi,IC) * pscale V = v(z,xi,IC) * pscale/tscale LamV = lam(z,xi) * pscale/tscale**2 LamR = -IC['c'] * egrad(lam)(z,xi) * pscale/tscale**3 Ac = - LamV Ham = onp.zeros(len(t)) int = onp.zeros(len(t)) a_mag = onp.zeros(len(t)) for i in range(0,len(t)): int[i] = np.dot(Ac[i,:],Ac[i,:]) Ham[i] = 0.5*int[i] + np.dot(LamR[i,:],V[i,:]) + np.dot(LamV[i,:],IC['ag'] + Ac[i,:]) a_mag[i] = np.linalg.norm(Ac[i,:]) cost = IC['Gam']* t[-1] + 0.5 * simps(int,t) loss1 = np.max(np.abs(L(xi,IC))) loss2 = np.max(np.abs(Htf(xi,IC)))\ ##: print final answers to screen print('\nFinal time [s]:\t' + str(t[-1])) print('Cost:\t\t' + str(cost)) print('Comp time [ms]:\t' + str(time*1000)) print('Iterations:\t' + str(it)) print('Loss:\t\t' + str(np.max(np.hstack((loss1,loss2)))))
1.664063
2
milky_way_drift/data.py
MABradley/MilkyWayDrift
0
12777064
""" Data ================ data storage and manipulation classes, should be sufficient to run the game without display """ from enum import Enum import numpy class Facing(Enum): YP = 0 XP = 1 ZN = 2 YN = 3 XN = 4 ZP = 5 # gives a directional delta array in hex coordinates for given Facing def facing_array(enum): if enum == Facing.YP: return [0, 1, 0] if enum == Facing.XP: return [1, 0, 0] if enum == Facing.ZN: return [0, 0, -1] if enum == Facing.YN: return [0, -1, 0] if enum == Facing.XN: return [-1, 0, 0] if enum == Facing.ZP: return [0, 0, 1] raise Exception(f'{enum} is not a valid Facing') class Body: def __init__(self, position=None, momentum=None, facing=Facing.YP, image=''): if momentum is None: momentum = [0, 0, 0] if position is None: position = [[0, 0]] self.facing = facing self.position = position # hex x y positions self.momentum = momentum # hex x y z velocities self._momentum_next = [0, 0, 0] # hex x y z velocities self.image = image # single hex movement by provided direction, none or 0 for inaction def move(self, direction=None): if direction is None: direction = [0, 0, 0] else: direction = facing_array(direction) numpy.add(self.position, direction) numpy.add(self.momentum_next, direction) # positive rotations are clockwise def rotate(self, rotations, pivot=None): if pivot is None: pivot = self.position[0] self.facing += rotations if len(self.position) > 1: for r in range(0, abs(rotations)): if rotations > 0: for i in range(0, len(self.position)): p = numpy.subtract(self.position[i], pivot) p = [-p[2], -p[0], -p[1]] self.position[i] = numpy.add(p, pivot) else: for i in range(0, len(self.position)): p = numpy.subtract(self.position[i], pivot) p = [-p[1], -p[2], -p[10]] self.position[i] = numpy.add(p, pivot) # 1 = 60°, 6 rotations in a 360° turn def degree_facing(self): return self.facing * 60 def elapse_turn(self): self.momentum = self._momentum_next self._momentum_next = [0, 0, 0] class Ship(Body): def __init__(self, position=None, momentum=None, facing=Facing.YP, image='', speed=1, rotation_speed=1, move_directions=[Facing.YP]): super().__init__(position, momentum, facing, image) self.speed = speed # number of movement/rotation actions you can make in a turn self.action_points = speed self.rotation_speed = rotation_speed # number of 60 degree turns allowed in one rotation action self.move_directions = move_directions # legal directions to make moves in def move(self, direction=None): if direction is None: direction = [0, 0, 0] elif direction in self.move_directions: super().move(self, direction) else: raise Exception(f'Invalid move direction {direction}, valid directions are {self.move_directions}') self.action_points -= 1 def rotate(self, rotations, pivot=None): return class Map: def __init__(self, width, height): self.width = width self.height = height self.bodies = [] # NOTES FROM INITIAL GAME PLAY MECHANICS REVIEW: # # Body(): # (x, y)[] # position # (x1, x2, y) # momentum # (x1, x2, y) # momentum_next # # # def rotate((x, y)pivot, rotations # # ): # # # updates position # # def move((x1, x2, y)direction # # ): # # updates position and momentum_next # # # ImmutableBody(Body): # # # def rotate((x, y)pivot, rotations # # ): # return # # # def move((x1, x2, y)direction # # ): # return # # Ship(Body): # rotation_speed # speed # Facing[] # legal_moves # which direction thrusters can take us, 1 non zero value in tuple # # # def rotate((x, y)pivot, rotations # # ): # # if you rotate your legal_moves must update # # # Map(): # x_width # y_width # [] # bodies # # # class Facing(Enum): # YP = 0 # X1P = 1 # X2P = 2 # YN = 3 # X1N = 4 # X2N = 5
3.15625
3
clsim/python/GetIceCubeCableShadow.py
hschwane/offline_production
1
12777065
from icecube.icetray import OMKey from icecube.simclasses import I3MapModuleKeyI3ExtraGeometryItemCylinder, I3ExtraGeometryItemCylinder from icecube.dataclasses import I3Position, ModuleKey from I3Tray import I3Units import numpy as np from os.path import expandvars from_cable_shadow = expandvars("$I3_BUILD/ice-models/resources/models/cable_position/orientation.cable_shadow.txt") from_led7 = expandvars("$I3_BUILD/ice-models/resources/models/cable_position/orientation.led7.txt") def GetIceCubeCableShadow(CableAngles=from_led7, DOMRadius=165.1*I3Units.mm, CableRadius=23*I3Units.mm, CableLength=1*I3Units.m): """ Get a cylinder representing the position of the cable at each DOM :param CableAngles: text file containing string, om, angle (degrees), angle error (degrees) :param DOMRadius: radius of the DOM sphere :param CableRadius: radius of the cable :param CableLength: length of the cable segment at each DOM :returns: a map of I3ExtraGeometryItem representing the local position of the cable *in DOM-centered coordinates* """ # assume the cable runs along the surface of the DOM radius = DOMRadius + CableRadius shadows = I3MapModuleKeyI3ExtraGeometryItemCylinder() for string, om, angle, _ in np.loadtxt(CableAngles, dtype=[('string',int),('om',int),('angle',float),('angle_err',float)]): pos = I3Position(radius*np.cos(np.radians(angle)), radius*np.sin(np.radians(angle)), 0) shadows[ModuleKey(int(string),int(om))] = I3ExtraGeometryItemCylinder(pos + I3Position(0,0,CableLength/2.), pos + I3Position(0,0,-CableLength/2.), CableRadius) return shadows
2.453125
2
translations.py
anast20sm/Addarr
0
12777066
import i18n from config import config from definitions import LANG_PATH i18n.load_path.append(LANG_PATH) i18n.set('locale', config["language"])
2.140625
2
quflow/laplacian/sparse.py
kmodin/quflow
0
12777067
<filename>quflow/laplacian/sparse.py import numpy as np from numba import njit import scipy.sparse.linalg from scipy.sparse import coo_matrix # ---------------- # GLOBAL VARIABLES # ---------------- _lu_laplacian_cache = dict() _sparse_laplacian_cache = dict() _lu_heat_flow_cache = dict() _use_umfpack = True # --------------------- # LOWER LEVEL FUNCTIONS # --------------------- def compute_sparse_laplacian_alt(N): s = (N - 1)/2 mvals = np.linspace(-s, s, N) m1 = mvals[:, np.newaxis] m2 = mvals[np.newaxis, :] # Set diagonal elements coeff1 = (2*(s*(s+1)-m1*m2)).ravel() ivals = ((m1 + s)*N + m2 + s).ravel().astype(int) jvals = ivals.copy() values = -coeff1 # Set first off diagonal m1 = mvals[:-1, np.newaxis] m2 = mvals[np.newaxis, :-1] coeff2 = (-np.sqrt(s*(s+1)-m1*(m1+1))*np.sqrt(s*(s+1)-m2*(m2+1))).ravel() ivals = np.hstack((ivals, ((m1 + s)*N + m2 + s).ravel().astype(int))) jvals = np.hstack((jvals, ((m1 + s + 1)*N + m2 + s + 1).ravel().astype(int))) values = np.hstack((values, -coeff2)) # Set second off diagonal m1 = mvals[1:, np.newaxis] m2 = mvals[np.newaxis, 1:] coeff3 = (-np.sqrt(s*(s+1)-m1*(m1-1))*np.sqrt(s*(s+1)-m2*(m2-1))).ravel() ivals = np.hstack((ivals, ((m1 + s)*N + m2 + s).ravel().astype(int))) jvals = np.hstack((jvals, ((m1 + s - 1)*N + m2 + s - 1).ravel().astype(int))) values = np.hstack((values, -coeff3)) # Set BC @njit def compute_sparse_laplacian_ind_(N, values, ivals, jvals, bc=False): s = (N - 1)/2 mvals = np.linspace(-s, s, N) count = 0 for m1 in mvals: for m2 in mvals: coeff1 = 2*(s*(s+1)-m1*m2) if abs(coeff1) > 1e-10: ivals[count] = round((m1 + s)*N + m2 + s) jvals[count] = round((m1 + s)*N + m2 + s) values[count] = -coeff1 count += 1 if m1 < s and m2 < s: coeff2 = -np.sqrt(s*(s+1)-m1*(m1+1))*np.sqrt(s*(s+1)-m2*(m2+1)) if abs(coeff2) > 1e-10: ivals[count] = round((m1 + s)*N + m2 + s) jvals[count] = round((m1 + s + 1)*N + m2 + s + 1) values[count] = -coeff2 count += 1 if m1 > -s and m2 > -s: coeff3 = -np.sqrt(s*(s+1)-m1*(m1-1))*np.sqrt(s*(s+1)-m2*(m2-1)) if abs(coeff3) > 1e-10: ivals[count] = round((m1 + s)*N + m2 + s) jvals[count] = round((m1 + s - 1)*N + m2 + s - 1) values[count] = -coeff3 count += 1 # Make sure matrix is invertible (corresponds to adding BC in Poisson equations) if bc: for h in range(N): ivals[count] = 0 jvals[count] = h*(N+1) values[count] = -1/N count += 1 def compute_sparse_laplacian(N, bc=False): """ Return the sparse laplacian for a specific bandwidth `N`. Parameters ---------- N: int bc: bool Whether to add conditions to exclude singular matrix. Returns ------- A: scipy.sparse.spmatrix Sparse matrix in some scipy format (typically `csc_matrix`). """ values = np.zeros(3*N**2-4*N+2+N, dtype=complex) # Used to be 'complex' but no need for that ivals = np.zeros(values.shape, dtype=int) jvals = np.zeros(values.shape, dtype=int) compute_sparse_laplacian_ind_(N, values, ivals, jvals, bc=bc) # Create sparse matrix A = coo_matrix((values, (ivals, jvals)), shape=(N**2, N**2)).tocsc() return A def compute_lu(A): if _use_umfpack: scipy.sparse.linalg.use_solver(useUmfpack=True) else: scipy.sparse.linalg.use_solver(useUmfpack=False) return scipy.sparse.linalg.splu(A) # ---------------------- # HIGHER LEVEL FUNCTIONS # ---------------------- def laplacian(N, bc=False): """ Return quantized laplacian (as a sparse matrix). Parameters ---------- N: int bc: bool Whether to add boundary conditions to remove singularity. Returns ------- A : sparse matrix """ global _sparse_laplacian_cache if (N, bc) not in _sparse_laplacian_cache: A = compute_sparse_laplacian(N) _sparse_laplacian_cache[(N, bc)] = A return _sparse_laplacian_cache[(N, bc)] def laplace(P): """ Return quantized laplacian applied to stream function `P`. Parameters ---------- P: ndarray(shape=(N, N), dtype=complex) Returns ------- W: ndarray(shape=(N, N), dtype=complex) """ N = P.shape[0] A = laplacian(N) W = A.dot(P.ravel()).reshape((N, N)) return W def solve_poisson(W): """ Return stream matrix `P` for `W`. Parameters ---------- W: ndarray(shape=(N, N), dtype=complex) Returns ------- P: ndarray(shape=(N, N), dtype=complex) """ global _lu_laplacian_cache N = W.shape[0] if N not in _lu_laplacian_cache: # Get sparse laplacian A = laplacian(N, bc=True) # Compute sparse LU _lu_laplacian_cache[N] = compute_lu(A) P = _lu_laplacian_cache[N].solve(W.ravel()).reshape((N, N)) P.ravel()[::N+1] -= np.trace(P)/N return P def solve_heat(h_times_nu, W0): """ Solve quantized heat equation. Parameters ---------- h_times_nu: float Time-step times viscosity. W0: ndarray(shape=(N, N), dtype=complex) Returns ------- Wh: ndarray(shape=(N, N), dtype=complex) """ global _lu_heat_flow_cache N = W0.shape[0] if (N, h_times_nu) not in _lu_heat_flow_cache: # Get sparse laplacian A = laplacian(N, False) # Using backward Euler method T = scipy.sparse.eye(N**2) - h_times_nu*A # Compute sparse LU _lu_heat_flow_cache[(N, h_times_nu)] = compute_lu(T.tocsc()) Wh = _lu_heat_flow_cache[(N, h_times_nu)].solve(W0.ravel()).reshape((N, N)) return Wh
2.40625
2
PyQt5/Multi Form/Main.py
kuhakuu04/Python_PyQt5_GUI
0
12777068
import PyQt5.QtWidgets import MainForm app_module = PyQt5.QtWidgets.QApplication([]) app = MainForm.MainForm() app.show() app_module.exec()
1.507813
2
profile/load-singly-bbn-time.py
gitter-badger/py-bbn
48
12777069
<reponame>gitter-badger/py-bbn import time from pybbn.graph.dag import Bbn from pybbn.pptc.inferencecontroller import InferenceController # deserialization 0.02801 # junction tree 6.10584 start = time.time() bbn = Bbn.from_json('singly-bbn.json') stop = time.time() diff = stop - start print(f'deserialization {diff:.5f}') start = time.time() join_tree = InferenceController.apply(bbn) stop = time.time() diff = stop - start print(f'junction tree {diff:.5f}')
2.328125
2
expenda_api/expenses/serializers.py
ihsaro/Expenda
0
12777070
from rest_framework.serializers import ModelSerializer from expenses.models import Expense class ListRetrieveExpenseSerializer(ModelSerializer): class Meta: model = Expense fields = '__all__' class CreateExpenseSerializer(ModelSerializer): class Meta: model = Expense fields = ['name', 'description', 'price', 'quantity', 'purchased_timestamp', 'owner'] class UpdateExpenseSerializer(ModelSerializer): class Meta: model = Expense fields = ['name', 'description', 'price', 'quantity', 'purchased_timestamp']
2.0625
2
python/csv2sql.py
Ellian-aragao/aleatoriedades
0
12777071
<reponame>Ellian-aragao/aleatoriedades<filename>python/csv2sql.py import sys import csv def main(args): with open(args[1]) as csvfile: reader = csv.reader(csvfile, delimiter=',') # print('INSERT INTO tb_idioma (id_idioma, ds_abreviacao, ds_descricao) VALUES') for i, line in enumerate(reader): print('({},'.format(i + 1), end='') print('\'{}\''.format(line[0]), end='') print('),') if __name__ == '__main__': sys.exit(main(sys.argv))
3.421875
3
observer/observers/json_observer.py
rosenpin/EyeTracker
0
12777072
from observers.observer import Observer from observers.eys_state import EyeStateItem, ChooseState import json import os class JsonObserver(Observer): CSV_FILE_NAME = 'db.json' def __init__(self): super().__init__() def trigger(self, eye_state_item: EyeStateItem): if eye_state_item.choose_state == ChooseState.CHOOSE: j = {} try: with open(JsonObserver.CSV_FILE_NAME, 'r') as f: j = json.load(f) except: pass key = eye_state_item.screen_part.name if key in j: j[key] += 1 else: j[key] = 1 with open(JsonObserver.CSV_FILE_NAME, 'w') as f: json.dump(j, f)
2.8125
3
hackerrank/algorithm/almost-sorted/a.py
SwordYoung/cutprob
0
12777073
#!/usr/bin/env python def test(n, nums): assert n == len(nums) mmax = [] stk = [] for i in xrange(n): # print "i = %d" % (i) mmax.append(i) while stk and nums[i] >= nums[stk[-1]]: stk.pop() if not stk: mmax[i] = 0 else: mmax[i] = stk[-1]+1 stk.append(i) mmin = [0] * n stk = [] for i in xrange(n-1, -1, -1): mmin[i] = i while stk and nums[i] <= nums[stk[-1]]: stk.pop() if not stk: mmin[i] = n-1 else: mmin[i] = stk[-1]-1 stk.append(i) res = 0 for j in xrange(n): for i in xrange(mmax[j], j+1): # if j <= mmin[i] and i >= mmax[j]: if j <= mmin[i]: res += 1 print res return res def test2(n, nums): stk = [] stk2 = [] sub_res = [1] * n mmin = nums[0] for i in xrange(n): while stk and nums[i] >= nums[stk[-1]]: sub_res[i] += sub_res[stk[-1]] stk.pop() if nums[i] < mmin: mmin = nums[i] stk = [] stk.append(i) print "%s" % (sub_res) res = 0 for i in sub_res: res += i print res return res def read_line_int_lst(): l = raw_input() l = l.split(' ') res = [] for r in l: res.append(int(r)) return res def oj_test(): n = int(raw_input()) nums = read_line_int_lst() test2(n, nums) def slow(n, nums): res = 0 for i in xrange(n): for j in xrange(i, n): min_true = True min_false = True for k in xrange(i,j+1): if k != i and nums[k] < nums[i]: min_true = False break if k != j and nums[k] > nums[j]: min_false = False break if min_true and min_false: res += 1 print res return res def random_atest(size, rrange): import random # size = random.randint(1, 50000) nums = [] for i in xrange(size): nums.append(random.randint(1, rrange)) res1 = test2(size, nums) if size <= 500: res2 = slow(size, nums) assert res1 == res2, "%s, %d %d" % (nums, res1, res2) def random_test(): for i in xrange(1000): random_atest(5, 50) for i in xrange(10): random_atest(50, 500) random_atest(50000, 500000) if __name__ == "__main__": random_test()
3.125
3
test_frcnn_count.py
vvc-unal/keras-frcnn
0
12777074
import itertools import operator import os import pickle import re import sys import time import cv2 from keras import backend as K from keras.layers import Input from keras.models import Model import skvideo.io from keras_frcnn import roi_helpers import keras_frcnn.resnet as nn import numpy as np video_folder = '../../Videos/' videoName = "MOV_0861" input_video_file = os.path.abspath(video_folder + videoName + ".mp4") output_video_file = os.path.abspath(video_folder + "OUTPUT/" + videoName + ".mp4") img_path = os.path.join(video_folder +"OUTPUT/input", '') output_path = os.path.join(video_folder +"OUTPUT/output", '') num_rois = 32 frame_rate = 30 def cleanup(): print("cleaning up...") os.popen('rm -f ' + img_path + '*') os.popen('rm -f ' + output_path + '*') def get_file_names(search_path): for (dirpath, _, filenames) in os.walk(search_path): for filename in filenames: yield filename # os.path.join(dirpath, filename) def convert_to_images(): counter = 0 videodata = skvideo.io.vreader(input_video_file) for frame in videodata: skvideo.io.vwrite(os.path.join(img_path, str(counter) + '.jpg'), frame) counter = counter + 1 def save_to_video(): list_files = sorted(get_file_names(output_path), key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) # start the FFmpeg writing subprocess with following parameters writer = skvideo.io.FFmpegWriter(output_video_file, outputdict={ '-vcodec': 'libx264', "-r":str(frame_rate)}, verbosity=1) for file in list_files: frame = skvideo.io.vread(os.path.join(output_path, file)) writer.writeFrame(frame) writer.close() def format_img(img, C): img_min_side = float(C.im_size) (height, width, _) = img.shape if width <= height: f = img_min_side / width new_height = int(f * height) new_width = int(img_min_side) else: f = img_min_side / height new_width = int(f * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) img = img[:, :, (2, 1, 0)] img = img.astype(np.float32) img[:, :, 0] -= C.img_channel_mean[0] img[:, :, 1] -= C.img_channel_mean[1] img[:, :, 2] -= C.img_channel_mean[2] img /= C.img_scaling_factor img = np.transpose(img, (2, 0, 1)) img = np.expand_dims(img, axis=0) return img def accumulate(l): it = itertools.groupby(l, operator.itemgetter(0)) for key, subiter in it: yield key, sum(item[1] for item in subiter) def main(): sys.setrecursionlimit(40000) config_output_filename = './config.pickle' with open(config_output_filename, 'rb') as f_in: C = pickle.load(f_in) # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print(class_mapping) class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3).tolist() for v in class_mapping} C.num_rois = num_rois if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (1024, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, 1024) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') bbox_threshold = 0.8 print("anotating...") list_files = sorted(get_file_names(img_path), key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) for img_name in list_files: if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue print(img_name) st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X = format_img(img, C) img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy() img_scaled[:, :, 0] += 123.68 img_scaled[:, :, 1] += 116.779 img_scaled[:, :, 2] += 103.939 img_scaled = img_scaled.astype(np.uint8) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: # pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([16 * x, 16 * y, 16 * (x + w), 16 * (y + h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] all_objects = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] cv2.rectangle(img_scaled, (x1, y1), (x2, y2), class_to_color[key], 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) all_objects.append((key, 1)) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (x1, y1 - 0) cv2.rectangle(img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle(img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img_scaled, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) height, width, channels = img_scaled.shape cv2.rectangle(img_scaled, (0, 0), (width, 30), (0, 0, 0), -1) cv2.putText(img_scaled, "Obj count: " + str(list(accumulate(all_objects))), (5, 19), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1) cv2.imwrite(os.path.join(output_path, img_name), img_scaled) print(all_dets) if __name__ == '__main__': cleanup() print("Converting video to images..") convert_to_images() print("Main ...") main() print("saving to video..") save_to_video()
2.359375
2
microgrid/control/__init__.py
bcornelusse/microgrid-bench
10
12777075
""" The control module provides an interface for designing microgrid operational planning controllers. """ from .idle_controller import IdleController __all__ = [ 'IdleController' ]
1.398438
1
week02_1.py
allqoow/exerciseML
0
12777076
#!/usr/bin/python # -*- coding: utf-8 -*- # # Author : allqoow # Contact : <EMAIL> # Started on: 20161029(yyyymmdd) # Project : exerciseML(Exercise for Machine Learning) # H2.1.A print "H2.1.A" import matplotlib.pyplot import math import numpy.random x1List = [] x2List = [] yList = [] colourList = [] with open("applesOranges.csv","r") as openedF: rawContent = openedF.read() rawContentByLine = rawContent.split("\n") for obs in rawContentByLine[1:]: # For stability reason. It operates only if the input data are good. if len(obs.split(",")) == 3: splitRec = obs.split(",") x1List.append(float(splitRec[0])) x2List.append(float(splitRec[1])) yList.append(int(splitRec[2])) if int(splitRec[2]) == 0: colourList.append((1,0,0,1)) # red for apples elif int(splitRec[2]) == 1: colourList.append((0,0,1,1)) # orange for oranges print len(colourList) matplotlib.pyplot.scatter(x1List,x2List, c=colourList) matplotlib.pyplot.show() # H2.1.B print "\nH2.1.B" pi = math.pi wVecList = [] for alpha in range(20): wVecList.append([math.sin(pi*alpha/20), math.cos(pi*alpha/20)]) #print wVecList obsVecList = zip(x1List, x2List, yList) sampleSize = float(len(obsVecList)) bestPerformance = 0 for wVec in wVecList: countCorrect = 0 for obsVec in obsVecList: if wVec[0]*obsVec[0] + wVec[1]*obsVec[1] > 0: est = 1 elif wVec[0]*obsVec[0] + wVec[1]*obsVec[1] < 0: est = 0 if est == int(obsVec[2]): countCorrect += 1 # evaluation of performance performance = countCorrect/sampleSize print str(wVec) + " => " + str(performance) if bestPerformance < performance: bestWVec = wVec bestPerformance = performance # plotting matplotlib.pyplot.scatter(x1List,x2List, c=colourList) x2Vec = [-wVec[0]*(-2),0,-wVec[0]*2] x1Vec = [wVec[1]*(-2),0,wVec[1]*2] matplotlib.pyplot.plot(x1Vec, x2Vec) #matplotlib.pyplot.show() # H2.1.C print "\nH2.1.C" print str(bestWVec) + " => " + str(performance) thetaList = [-3 + (x/10.0) for x in range(61)] bestPerformance = 0 for theta in thetaList: countCorrect = 0 inputText = "" for obsVec in obsVecList: if bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + theta > 0: est = 1 elif bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + theta < 0: est = 0 if est == int(obsVec[2]): countCorrect += 1 #print str(obsVec[0]) +","+str(obsVec[1])+","+str(est)+","+str(obsVec[2]) inputText += str(obsVec[0]) +","+str(obsVec[1])+","+str(est)+"\n" #print inputText performance = countCorrect/sampleSize print str(theta) + " => " + str(performance) if bestPerformance < performance: bestTheta = theta bestPerformance = performance bestInputText = inputText print bestWVec print bestTheta # H2.1.D with open("applesOrangesEst.txt","w") as res: alphaList =range(20) thetaList = [-3 + (x/10.0) for x in range(61)] writeStr = "" for obsVec in obsVecList: if bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + bestTheta> 0: est = 1 elif bestWVec[0]*obsVec[0] + bestWVec[1]*obsVec[1] + bestTheta < 0: est = 0 if est == int(obsVec[2]): countCorrect += 1 writeStr += str(obsVec[0]) +","+ str(obsVec[1])+","+str(est)+"\n" res.write(writeStr) with open("applesOrangesEst.txt","r") as openedF: x1List2 = [] x2List2 = [] yList2 = [] colourList2 = [] rawContent = openedF.read() rawContentByLine = rawContent.split("\n") for obs in rawContentByLine: # For stability reason. It operates only if the input data are good. if len(obs.split(",")) == 3: splitRec = obs.strip().split(",") x1List2.append(float(splitRec[0])) x2List2.append(float(splitRec[1])) #yList2.append(int(splitRec[2])) if int(splitRec[2]) == 0: colourList2.append((1,0,0,1)) # red for apples elif int(splitRec[2]) == 1: colourList2.append((1,0.5,0,1)) # orange for oranges bestx2Vec = [(-bestWVec[0])*(-2)-bestTheta,0-bestTheta,(-bestWVec[0])*2-bestTheta] bestx1Vec = [bestWVec[1]*(-2),0,bestWVec[1]*2] matplotlib.pyplot.clf() matplotlib.pyplot.scatter(x1List2,x2List2, c=colourList2) matplotlib.pyplot.plot(bestx1Vec, bestx2Vec) matplotlib.pyplot.show() # H2.1.E with open("results.txt","w") as res: alphaList =range(360) thetaList = [-3 + (x/40.0) for x in range(241)] performanceList = [] for alpha in alphaList: wVec = [math.sin(pi*alpha/80), math.cos(pi*alpha/80)] for theta in thetaList: countCorrect = 0 for obsVec in obsVecList: if wVec[0]*obsVec[0] + wVec[1]*obsVec[1] + theta> 0: est = 0 elif wVec[0]*obsVec[0] + wVec[1]*obsVec[1] + theta < 0: est = 1 if est == int(obsVec[2]): countCorrect += 1 # evaluation of performance performance = countCorrect/sampleSize #print "(alpha=" + str(alpha) + ", theta=" + str(theta) + ") => " + str(performance) performanceList.append(performance) writeStr = str(alpha) +","+ str(theta)+","+str(performance)+"\n" res.write(writeStr) data = numpy.genfromtxt('results.txt',delimiter=',') alphas=numpy.unique(data[:,0]) thetas=numpy.unique(data[:,1]) Alphas,Thetas = numpy.meshgrid(alphas,thetas) Performances=data[:,2].reshape(len(thetas),len(alphas)) matplotlib.pyplot.pcolormesh(Alphas,Thetas,Performances) matplotlib.pyplot.show() # H2.1.F # No. # What if there is a non-linear border (or a borderlike something) between classes? # We cannot distinguish those (two) classes with a line or hyperplane. # clearing existing data matplotlib.pyplot.clf()
3.15625
3
knockoff/utilities/io.py
Nike-Inc/knockoff-factory
26
12777077
import logging from sqlalchemy import create_engine from joblib import Parallel, delayed logger = logging.getLogger(__name__) def _to_sql(df, table, url, **kwargs): to_sql_kwargs = { 'index': False, 'method': 'multi', 'if_exists': 'append' } to_sql_kwargs.update(kwargs) engine = create_engine(url) with engine.connect() as conn: df.to_sql(table, conn, **to_sql_kwargs) def to_sql(df, table, url, parallelize=True, chunksize=1000, n_jobs=-1, **kwargs): logger.info("Populating table: {}".format(table)) # TODO: better default for more effect parallelization? nrows = df.shape[0] if parallelize and nrows > chunksize: Parallel(n_jobs=n_jobs)( delayed(_to_sql)( df[i:i+chunksize], table, url, **kwargs ) for i in range(0, nrows, chunksize)) else: _to_sql(df, table, url, **kwargs) logger.info("Populated table: {}".format(table))
2.640625
3
deepcave/utils/importing.py
PhMueller/DeepCAVE
0
12777078
<gh_stars>0 import os import glob from typing import List from importlib import import_module import importlib.util import sys import inspect import collections from deepcave.utils.logs import get_logger logger = get_logger(__name__) def auto_import_iter(module, paths: List[str]): for path in paths: for f in glob.glob(path): if os.path.basename(f).startswith('__'): continue module_name = f'{module}.' + os.path.basename(f).replace('.py', '') if "pending" in module_name: continue # dynamic import # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path try: spec = importlib.util.spec_from_file_location(module_name, f) foo = importlib.util.module_from_spec(spec) sys.modules[spec.name] = foo spec.loader.exec_module(foo) except Exception as e: logger.exception( f'Problem when loading file {f} as {module_name} from path {path}') # iterate module content # https://stackoverflow.com/questions/1796180/how-can-i-get-a-list-of-all-classes-within-current-module-in-python # allow only class # inspect.isclass for name, obj in inspect.getmembers(sys.modules[module_name], inspect.isclass): yield name, obj
2.390625
2
setup.py
Mykrass/data-synthesis-for-machine-learning
1
12777079
<filename>setup.py #!/usr/bin/env python from setuptools import setup INSTALL_REQUIRES = [ 'numpy >= 1.14.3', 'matplotlib >= 2.2.2', 'mako ==1.0.12', 'pandas >= 0.24.2', 'scikit-learn >= 0.20.2', 'pytest >= 4.6.2', 'python-dateutil >= 2.7.3', 'setuptools >= 39.1.0' ] LONG_DESCRIPTION = """ The recent enforcement of data privacy protection regulations, such as GDPR, has made data sharing more difficult. This tool intends to facilitate data sharing from a customer by synthesizing a dataset based on the original dataset for later machine learning. There are two parts to this tool: - Data synthesizer Synthesize a dataset based on the original dataset. It accepts CSV data as input, and output a synthesized dataset based on Differential Privacy. The algorithm in the data synthesizer reference to the paper ( http://dimacs.rutgers.edu/~graham/pubs/papers/privbayes-tods.pdf). - Data utility evaluation Evaluate the data utility for the synthesized dataset. The original dataset and the synthesized dataset as the input, one utility evaluation report will be generated with several indicators. """ URL = "https://github.com/SAP/data-synthesis-for-machine-learning" PROJECT_URLS = { "Bug Tracker": URL + "/issues", "Documentation": URL, "Source Code": URL, } def main(): setup(name='ds4ml', description='A python library for data synthesis and evaluation', long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', project_urls=PROJECT_URLS, url=URL, version='0.1.2', packages=['ds4ml', 'ds4ml.command'], package_data={ '': ['template/*.html'] }, entry_points={ 'console_scripts': [ 'data-synthesize = ds4ml.command.synthesize:main', 'data-evaluate = ds4ml.command.evaluate:main' ] }, maintainer="<NAME>", maintainer_email="<EMAIL>", install_requires=INSTALL_REQUIRES, platform='any') if __name__ == '__main__': main()
1.6875
2
Medium/comisions.py
Nahalius/PythonBasics
0
12777080
city = input("Enter city name = ") sales = int(input("Enter sales volume = ")) cities = ["Sofia", "Varna", "Plovdiv"] index = 4 #Discounts by cities if 0 <= sales and sales <= 500: comision = [0.05,0.045,0.055] elif 500 < sales and sales <= 1000: comision = [0.07,0.075,0.08] elif 1000 < sales and sales <= 10001: comision = [0.08,0.1,0.12] else: comision = [0.12,0.13,0.145] #Indexing if city in cities: index = cities.index(city) discount = sales * comision[index] print("{0:.2f}".format(discount)) #rounding
3.75
4
test.py
hearteam/Linebot_project
0
12777081
# # # coding: utf-8 # # # import re # # # import inverted_index # # # # # # msg = '我覺得我眼睛有點乾' # # # # # # pattern = '我覺得' # # # # # # txt = re.match(pattern,msg) # # # # # # # if txt != None: # # # # print(msg.split(pattern)) # # # keyword ='' # # # # # # if msg[0:3] == '我覺得': # # # sentence = msg.split('我覺得')[1] # # # print(inverted_index.main(sentence)) # # # # # # import tool # import urllib.parse # # # def left(): # left = [['貓爪蒸蛋【VICI的懶人廚房】', # 'https://img.cook1cook.com/upload/cover/17/10/2597714042478411710_thumb.jpg', # "['雞蛋']", # "['一顆']", # "['11一顆蛋對應二個貓爪模2先把蛋黃及蛋白分開後,接著二個各自打散', '23用隔水加熱法,將模放入裝水的鍋中,開微火4用吸管小心的把蛋黃先滴入爪肉裡', '35待蛋黃凝固後,將一半的蛋白分別倒入二個貓瓜內', '46等蛋白表層也略凝固時,再把剩下的蛋黃分二邊全倒入貓瓜模', '57蛋黃表面凝固時,把剩下的蛋白分二邊全倒入模具裡。待表面完全凝固即可起鍋', '6蒸的過程可隨家裡鍋具自行調整火力,基本上火不要太大才不會焦掉每倒一層後,可蓋上蓋子加速蒸蛋表面凝固', '7蒸好的貓爪搭配著兒童餐盤,就是一份卡哇伊的兒童餐嘍']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['Egg Omelet 蛋奄列', # 'https://img.cook1cook.com/upload/cover/92/52/2642714396796809252_thumb.JPG', # "['蛋']", # "['']", # "['1先把蛋攪拌成蛋漿,鹽少許繼續攪拌(備用)', '2熱鍋下材料炒至軟身,灑上適量黑胡椒,上碟備用', '3另一鍋或同鍋洗淨抹乾後,熱鍋中火牛油1片,下蛋漿', '4放已熟材料在蛋面,然後從一端輕輕挑起,將蛋餅捲起,煎至金黃色完成。']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['小飯煲半熟蛋', # 'https://img.cook1cook.com/upload/cover/33/40/4718514404270023340_thumb.jpg', # "['蛋']", # "['1隻']", # "['1-室溫蛋用微波爐保鮮紙包好', '2-小飯煲下滾水,放進蛋蓋上蓋子煮7分鐘', '3-取出蛋撕去保鮮紙,放入冰水內,再放入雪櫃雪一小時,取出用匙羮脫殼便完成。*雪過的半熟蛋黃會creamyD㗎!']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['影音食譜 法式吐司鮮奶捲+雪花糕', # 'https://img.cook1cook.com/upload/cover/68/45/2604615012493426845_thumb.PNG', # "['吐司']", # "['適量']", # "['1詳細影音食譜:http://goo.gl/xYddGz', '2鮮奶餡用中火,一直攪拌煮至濃稠', '3容器抹油,倒入1冷藏凝固', '4取出切適當大小,沾椰子粉就是雪花糕', '5吐司去邊,桿成薄片', '6包入鮮奶餡,輕壓固定', '7雞蛋加一大匙水打散', '8吐司捲沾裹6', '9用少許油,煎至表面金黃取出切塊即可!']", # {'Calorie_correction': 283.0, # 'Moisture': 33.3, # 'Crude_protein': 10.0, # 'Crude_fat': 6.1, # 'Saturated_fat': 2.9, # 'Total_carbohydrates': 49.2, # 'Dietary_fiber': 4.2, # 'Total_carbohydrate': 6.7, # 'glucose': 2.1, # 'fructose': 2.9, # 'Galactose': 0.0, # 'maltose': 1.6, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 355.0, # 'Potassium': 137.0, # 'calcium': 18.0, # 'magnesium': 46.0, # 'iron': 1.3, # 'Zinc': 1.4, # 'phosphorus': 130.0, # 'copper': 0.0, # 'manganese': 0.0, # 'VitaminB1': 0.22, # 'VitaminB2': 0.07, # 'VitaminB6': 0.11, # 'VitaminB12': 0.21, # 'VitaminC': 0.0, # 'Folic_acid': 66.7, # 'VitaminA': 5.0, # 'VitaminD': 0.0, # 'VitaminE': 2.04}], # ['雲朵蛋Cloud Egg', # 'https://img.cook1cook.com/upload/cover/4/53/1200541516674027453_thumb.jpg', # "['雞蛋']", # "['1']", # "['1以200度預熱焗爐10分鐘分開蛋白同蛋黃,蛋黃要保持完整用電動打蛋器打發蛋白至企身', '2在焗盤上放上蛋白,形成一團雲蛋白中間預留一個窿,放入蛋黃', '3以200度焗20-25分鐘就完成**BB吃的是全熟蛋黃,所以焗的時間較長。如果大人食,不要全熟蛋,焗的時間可以短一點。']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['櫻花茶碗蒸(昆布鰹魚清湯)', # 'https://img.cook1cook.com/upload/cover/201803/1/767165abb98a1e4f1a6_thumb.jpg', # "['雞蛋']", # "['4隻']", # "['1材料:(2人份量)雞蛋4隻、昆布鰹魚清湯適量(可用清雞湯代替)、味醂1茶匙、海鹽1茶匙、櫻花蝦少許、鹽漬櫻花2-4朵', '2鹽漬櫻花用清水浸30分鐘,除去鹽份,用廚房紙印乾水份,備用。', '3櫻花蝦用清水浸30分鐘,用廚房紙印乾水份,備用。', '4雞蛋打匀後加昆布鰹魚清湯混合(比例1:1.5,即1份雞蛋:1.5份昆布鰹魚清湯),加入海鹽及味醂拌匀,用小篩隔過濾泡沫,倒入已加熱容器内,放上櫻花蝦,蓋上微波爐保鮮紙,隔水大火蒸5分鐘,熄火,不要打開蓋焗20分鐘,加上樱花裝飾,即成。', '5昆布鰹魚清湯:水500cc、昆布10g、魚片15g', '6昆布用廚房紙巾輕輕把灰塵抹掉(昆布上面白色是鮮味來源,不可用水洗掉),加水浸至少4個小時。', '7將昆布水小火加熱至60度煮約3分鐘,取出昆布;加入鰹魚片小火煮約2-3分鐘,熄火静置20分鐘;用廚房紙及小篩隔過濾清湯,可得昆布鰹魚清湯約300cc。(完成昆布鰹魚清湯可放在密封瓶內放進雪櫃可保存一個月以上)']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['果醬該怎麼吃呢 附『實作影片』', # 'https://img.cook1cook.com/upload/cover/201804/91/105672-1524661964_thumb.JPG', # "['吐司']", # "['適量']", # "['1果醬的6大類料理運用:', '21.)做麵包🍞:將材料中的糖份量,改以果醬取代,就能做出帶有水果風味的各式手作麵包!', '32.)抹醬:吐司、麵包、煎餅、鬆餅、饅頭、', '4可麗餅、法式吐司、雞蛋泡泡芙Popovers...等。', '53.)沾醬、醬汁:沙拉醬、涼拌菜醬料', '6a.)百香果油醋醬:冷壓橄欖油2大匙、百香果鳳梨果醬2大匙、梅醋1.5大匙、塩1/4茶匙、粗黑胡椒粒1/8茶匙(全部食材混勻至乳化為止!)', '7b.)涼拌菜醬料:百香果鳳梨果醬100-80g,糯米醋3茶匙,乾燥洋香菜葉1/8茶匙(可用在青木瓜、南瓜、大頭菜、小黃瓜、青花菜梗等!)', '8甜點🍰:製作蛋糕、餅乾、司康scone沾醬、冰淇淋淋醬、', '9冰淇淋、起司蛋糕淋醬、奶酪淋醬', '105.)水果優格:🍇【最聰明的吃法】✔️健康新概念➜可以自己製作優格,網路上有許多方法可以搜尋;或是購買市售商品,但是請務必要選擇❮原味無糖優格❯搭配才能吃出其美味喔!添加一些當季新鮮水果、或是綜合莓果,即成健康又美味的【水果優格】!你一定要試試看!', '116.)飲料🍹:冷熱飲皆宜!還可將水果、冷開水、果醬一起攪打成果汁(果醬約1-2大匙即可)']", # {'Calorie_correction': 283.0, # 'Moisture': 33.3, # 'Crude_protein': 10.0, # 'Crude_fat': 6.1, # 'Saturated_fat': 2.9, # 'Total_carbohydrates': 49.2, # 'Dietary_fiber': 4.2, # 'Total_carbohydrate': 6.7, # 'glucose': 2.1, # 'fructose': 2.9, # 'Galactose': 0.0, # 'maltose': 1.6, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 355.0, # 'Potassium': 137.0, # 'calcium': 18.0, # 'magnesium': 46.0, # 'iron': 1.3, # 'Zinc': 1.4, # 'phosphorus': 130.0, # 'copper': 0.0, # 'manganese': 0.0, # 'VitaminB1': 0.22, # 'VitaminB2': 0.07, # 'VitaminB6': 0.11, # 'VitaminB12': 0.21, # 'VitaminC': 0.0, # 'Folic_acid': 66.7, # 'VitaminA': 5.0, # 'VitaminD': 0.0, # 'VitaminE': 2.04}], # ['1000次梳乎厘奄列', # 'https://img.cook1cook.com/upload/cover/202005/20/144150-1588396279_thumb.JPG', # "['雞蛋']", # "['兩隻']", # "['1最小火燒熱平底鑊,抹上一層薄油,', '2將蛋白蛋黃分開,蛋白放進大盤,蛋黃放入小碗。用打蛋器打起蛋白,先把蛋白放入雪櫃半小時,蛋白放久了會減低黏稠度,讓空氣更容易打入蛋白中!', '3打至反轉大盤,蛋白不會倒出就可以了,', '4蛋黃打散,加入蛋白中拌勻,記住動作要輕,否則蛋白容易消泡。', '5蛋漿加入平底鑊中,造成圓形,喜歡咸食的可在此時加入芝士及蔥花,蓋上鍋蓋,焗4分鐘,喜歡更熟一點可以多煎1分鐘,', '6奄列的底部形成脆脆外皮後。用鑊鏟輕輕鬆開奄列,將奄列慢慢推向鍋邊,再放到碟上。加上糖漿。']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['日式溏心蛋', # 'https://img.cook1cook.com/upload/cover/202007/20/30114-1594781581_thumb.jpg', # "['鸡蛋']", # "['']", # "['1常溫蛋(若是冰箱取出請放置常溫1小時以上)蛋的\\ue81e兩端分別轻敲至有点裂纹,或可以用大頭針在蛋的兩端分別刺一小孔', '2大火煮開水後,輕輕將蛋放入,转去中火偏小的火力,持續煮6分鐘,((注明,時間按蛋大小調節一下**不同爐具火力不一,最好先做一次,查看結果,作下次準則))這是比較完美的時間,大概是5-6分熟的狀態,這段時間千萬不要離開鍋,用用具輕輕打圈攪動雞蛋,可令蛋黃固定在蛋的中央,可確保蛋黃在蛋的中心部位,保證切開的時候半截面是完美的形狀', '3時間一到立刻關火放進冰冷水里,待蛋略為降溫,將蛋殼敲裂,再浸在水中,讓水滲進蛋內,會較容易剝殼,小心剝去蛋殼,糖心蛋完成了。', '4將剝好的雞蛋放进醤汁里,(给儿子偷吃两粒了)醬汁一定要淹沒過雞蛋,保證整顆蛋的顏色是均勻的', '5也可做日式风味的醬汁--醬油:味淋:水=1:1:3,混在一起煮開備用泡蛋']", # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['蝦肉炒蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/361106/2969fbf48a208dac.jpg', # "['雞蛋']", # "['2']", # 'step-1 \nstep-2 \nstep-3 \n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['菜脯煎蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/363584/0d4fc1b263a2299d.jpg', # "['雞蛋']", # "['6']", # 'step-1 \nstep-2 \n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['菜雞飯', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/364298/649f5e6088987207.jpg', # "['雞蛋']", # "['3']", # 'step-1 洗淨菜切絲,洗米,放雞\nstep-2 \n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['无油煎鸡蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/364439/1ea4c16497ef9987.jpg', # "['鸡蛋']", # "['3']", # 'step-1 打开鸡蛋盖住熟\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['【簡易炒蛋】兩顆蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/365609/c36b14a26ae86a60.jpg', # "['蛋']", # "['兩顆']", # 'step-1 兩顆蛋,均勻打散(不同玫瑰歐姆蛋,不打均勻反而能呈現黃白色澤層次感)\nstep-2 中火熱鍋後蛋液下鍋\nstep-3 因為今日要做的是碎蛋下鍋後就可以開始用矽膠鍋鏟推擠\nstep-4 不斷推擠\nstep-5 差不多8分熟了\nstep-6 起鍋\nstep-7 完成💗\nstep-8 \nstep-9 汪🐶:小激動\nstep-10 搭配 早午餐花生漢堡肉蛋吐司+拿鐵 食譜請至➡️下篇文章觀看謝謝收看😊\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['[懶人/電鍋]白煮蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/366953/653f6acfcc2f9d9c.jpg', # "['蛋']", # "['7個']", # 'step-1 蛋放上蛋架。電鍋一杯水。\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['原味舒芙蕾烘蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/368558/39e28d06a773fd8a.jpg', # "['雞蛋']", # "['兩顆']", # 'step-1 先將蛋白蛋黃分離\nstep-2 蛋白部分打致硬性發泡\nstep-3 將兩者混合均勻\nstep-4 \nstep-5 鍋內均勻抹上奶油(全程小火)\nstep-6 倒入雞蛋糊(蓋上鍋蓋悶3-4分鐘)\nstep-7 折半盛起即可\nstep-8 \n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['零技術歐姆蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/369488/27f476b71229431c.jpg', # "['雞蛋']", # "['2顆']", # 'step-1 將蛋白和蛋黃分開\nstep-2 把蛋白打發成泡沫狀*建議用電動攪拌機打發*\nstep-3 將蛋黃和打發的蛋白混合均勻\nstep-4 熱鍋以後加入油將混合後的蛋液倒入鍋中注意火候\nstep-5 中間也可以包起司等喜歡的料\nstep-6 之後將底部成形的蛋對折就可以出鍋\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['溏心蛋半熟蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/369505/c5a5e03edda50bb7.jpg', # "['蛋']", # "['1']", # 'step-1蛋放至室溫。\nstep-2水滾下蛋,煮三分鐘,熄火焗三分鐘。\nstep-3用水喉水降溫後,在水中剝殼。\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['神奇鸡蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/344298/cd2774bfa32afa82.jpg', # "['鸡蛋']", # "['3']", # 'step-1 \n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['創意鮭魚芙蓉蒸蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/346654/e8adcfaf0abde1cf.jpg', # "['雞蛋']", # "['10顆']", # 'step-1雞蛋與水二比四\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['迷你煎蛋', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/349814/8099f05de213fb20.jpg', # "['雞蛋']", # "['3顆']", # 'step-1 先清洗雞蛋的外殼,然後放進封口袋。\nstep-2 置冰箱冷凍庫,直至雞蛋完全結凍。\nstep-3 凍雞蛋由冷凍庫取出後,沖水幾秒、即可輕鬆剝下蛋殼。\nstep-4 迅速將凍蛋切片、置盤中。\nstep-5 熱鍋、倒入些許蔬菜油,把凍蛋切片逐一排入鍋裡。\nstep-6 蛋單面煎至金黃色,翻面煎另一面;\nstep-7 煎至蛋雙面熟、微焦,即可熄火。\nstep-8 三顆雞蛋,即可煎出一大盤迷你煎蛋,增添生活樂趣。\nstep-9 正餐、點心或是便當菜,都適宜。\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}], # ['飛利浦氣炸鍋》氣炸雞蛋社頭店', # 'https://tokyo-kitchen.icook.network/uploads/recipe/cover/354309/c6bcf6fe81c1e753.jpg', # "['雞蛋']", # "['2顆']", # 'step-1 氣炸溫度180度,時間10分鐘\nstep-2 完成了\n', # {'Calorie_correction': 135.0, # 'Moisture': 75.9, # 'Crude_protein': 12.7, # 'Crude_fat': 8.9, # 'Saturated_fat': 3.1, # 'Total_carbohydrates': 1.6, # 'Dietary_fiber': 0.0, # 'Total_carbohydrate': 0.2, # 'glucose': 0.2, # 'fructose': 0.0, # 'Galactose': 0.0, # 'maltose': 0.0, # 'sucrose': 0.0, # 'lactose': 0.0, # 'sodium': 138.0, # 'Potassium': 135.0, # 'calcium': 54.0, # 'magnesium': 11.0, # 'iron': 1.9, # 'Zinc': 1.3, # 'phosphorus': 186.0, # 'copper': 0.07, # 'manganese': 0.03, # 'VitaminB1': 0.09, # 'VitaminB2': 0.48, # 'VitaminB6': 0.11, # 'VitaminB12': 0.8, # 'VitaminC': 0.6, # 'Folic_acid': 78.6, # 'VitaminA': 558.0, # 'VitaminD': 84.4, # 'VitaminE': 2.22}]] # return left # # # # def input_column(recipes): # # if recipes == '{查無符合資料}': # # text_message = TextSendMessage(text='查無符合資料') # # line_bot_api.reply_message(event.reply_token, text_message) # # # # # # else: # # columnlist = [] # # for recipe in recipes: # # column = CarouselColumn( # # thumbnail_image_url=f'{recipe[0]}', # # title=f'{recipe[1]}', # # text=f'{recipe[2]}', # # actions=[ # # URITemplateAction( # # label=f'{recipe[1]}作法影片', # # uri=f'{recipe[3]}' # # ), # # URITemplateAction( # # label=f'{recipe[1]}食譜查詢', # # uri=f'{recipe[4]}' # # ), # # MessageTemplateAction( # # label=f'{recipe[1]}營養素', # # text=f'{recipe[5]}') # # ] # # ) # # columnlist.append(column) # # print(columnlist) # # Carousel_template = TemplateSendMessage( # # alt_text='Carousel template', # # template=CarouselTemplate( # # columns=columnlist)) # # line_bot_api.reply_message(event.reply_token, Carousel_template) # # # recipelist = left() # # a = tool.recipetempelete(recipelist) # # b= tool.input_column(a) # # print(b) # # recipes = tool.recipetempelete(recipelist) # # input_column(recipes) # # # def recipeurl(left): # # myfinaldict = {} # # namelist = [] # # urllist = [] # # # # for content in left[0:6]: # # name = content[0] # # imgurl = content[1] # # # print(name) # # # print(imgurl) # # if imgurl[0:11] == r'https://img': # # namelist.append(name) # # url =f"https://cook1cook.com/search?keyword={name}&theme=recipe" # # urllist.append(url) # # elif imgurl[0:11] == r'https://tok': # # namelist.append(name) # # url = f'https://icook.tw/search/{name}' # # urllist.append(url) # # mydict = dict(zip(namelist, urllist)) # # myfinaldict.update(mydict) # # return myfinaldict # # print(recipeurl(left)) # # #尋找yt # # def ytname(name): # # return f'https://www.youtube.com/results?search_query={name}' # # # # def nutrient(left): # # # # if len(left) > 6: # # myfinaldict = {} # # namelist = [] # # nutrientlist = [] # # for content in left[0:6]: # # # for i in content[5]: # # # print # # # print(content) # # name = content[0] # # # print(name) # # nutrientsentence = '' # # for nutrient , weight in content[5].items(): # # nutrientsentence += (f'營養素名:{nutrient},重量:{weight}\n') # # namelist.append(name) # # nutrientlist.append(nutrientsentence) # # mydict = dict(zip(namelist, nutrientlist)) # # myfinaldict.update(mydict) # # return myfinaldict # # # # # print(nutrient(left)) # # # def manageForm(event,msg): # # # try: # # # flist = msg[3:].split('/') # # # text1 = '姓名' +flist[0] + '/n' # # # text1 += 'email' + flist[1] + '/n' # # # text1 += '性別' + flist[2] + '/n' # # # text1 += '年齡' + flist[3] + '/n' # # # text1 += '身高' + flist[4] + '/n' # # # text1 += '體重' + flist[5] + '/n' # # # text1 += '不能吃' + flist[6] + '/n' # # # text1 += '養生' + flist[7] + '/n' # # # text1 += '油炸' + flist[8] + '/n' # # # text1 += '蔬果攝取' + flist[9] + '/n' # # # text1 += '活動量' + flist[10] # # # message = TextSendMessage(text = text1) # # # line_bot_api.reply_message(event.reply_token,message) # # # except: # # # line_bot_api.reply_message(event.reply_token, # # # TextSendMessage(text='發生錯誤!')) # # a={} # # b={1:2} # # a.update(b) # # print(a) # # # print(urllib.parse.quote('雲朵蛋')) # # # string = "python" # # print(string[-4:]) # # # sentence = '###085/778/男/17歲以下/616/999/124/是/是/是/輕度活動' # # source = sentence.split('/') # # username = source[0].split('###')[1] # # # user_id = user_id # # email = source[1] # # gender = source[2] # # age = source[3] # # height = source[4] # # weight = source[5] # # disliked = source[6] # # health = source[7] # # fried = source[8] # # vegetable = source[9] # # activity = source[10] # # # # cur = mysql.connection.cursor() # # increase = f"""INSERT INTO USER VALUES( # # '{username}','{user_id}','{email}','{gender}','{age}','{height}','{weight}','{disliked}','{health}','{fried}','{vegetable}','{activity}')""" # # cur.execute(increase) # # mysql.connection.commit() # # a= (1,2,3) # # b=() # # print(a[1]) # # print(len(a)) # # print(len(b)) # # a=[1,2,3] # # print(list(a)) # print(a[-1])
2.953125
3
dl_simulation.py
cleary-lab/CS-SMAF
16
12777082
<reponame>cleary-lab/CS-SMAF import numpy as np from sklearn import decomposition from sklearn.linear_model import MultiTaskLassoCV,OrthogonalMatchingPursuit,RidgeCV,Ridge,ElasticNetCV,Lasso import spams from scipy.spatial import distance from scipy.stats import spearmanr, entropy import sys from sklearn import mixture THREADS = 10 def random_phi(m,g,d_thresh=0.2,nonneg=False): Phi = np.zeros((m,g)) Phi[0] = np.random.randn(g) if nonneg: Phi[0] = abs(Phi[0]) Phi[0] /= np.linalg.norm(Phi[0]) for i in range(1,m): dmax = 1 while dmax > d_thresh: p = np.random.randn(g) if nonneg: p = abs(p) dmax = max(abs(1 - distance.cdist(Phi,[p],'correlation'))) Phi[i] = p/np.linalg.norm(p) return Phi def random_phi_subsets(m,g,n,d_thresh=0.2): Phi = np.zeros((m,g)) Phi[0,np.random.choice(g,n,replace=False)] = n**-0.5 for i in range(1,m): dmax = 1 while dmax > d_thresh: p = np.zeros(g) p[np.random.choice(g,n,replace=False)] = n**-0.5 dmax = Phi[:i].dot(p).max() Phi[i] = p return Phi def get_observations(X0,Phi,snr=5,return_noise=False): noise = np.array([np.random.randn(X0.shape[1]) for _ in range(X0.shape[0])]) noise *= np.linalg.norm(X0)/np.linalg.norm(noise)/snr if return_noise: return Phi.dot(X0 + noise),noise else: return Phi.dot(X0 + noise) def coherence(U,m): Phi = random_phi(m,U.shape[0]) PU = Phi.dot(U) d = distance.pdist(PU.T,'cosine') return abs(1-d) def sparse_decode(Y,D,k,worstFit=1.,mink=4): while k > mink: W = spams.omp(np.asfortranarray(Y),np.asfortranarray(D),L=k,numThreads=THREADS) W = np.asarray(W.todense()) fit = 1 - np.linalg.norm(Y - D.dot(W))**2/np.linalg.norm(Y)**2 if fit < worstFit: break else: k -= 1 return W def update_sparse_predictions(Y,D,W,Psi,lda=0.0001): X = np.zeros((Psi.shape[0],W.shape[1])) for i in range(W.shape[1]): used = (W[:,i] != 0) if used.sum() > 0: d = np.copy(D) d = d[:,used] model = Ridge(alpha=lda) model.fit(d,Y[:,i]) X[:,i] = model.predict(Psi[:,used]) return X def recover_system_knownBasis(X0,m,k,Psi=[],use_ridge=False,snr=0,nsr_pool=0,subset_size=0): if len(Psi) == 0: Psi,s,vt = np.linalg.svd(X0) if subset_size == 0: Phi = random_phi(m,X0.shape[0]) else: Phi = random_phi_subsets(m,X0.shape[0],subset_size) Phi_noise = random_phi(m,X0.shape[0])*nsr_pool D = Phi.dot(Psi) Y = get_observations(X0,Phi+Phi_noise,snr=snr) W = sparse_decode(Y,D,k) if use_ridge: X = update_sparse_predictions(Y,D,W,Psi) else: X = Psi.dot(W) return X,Phi,Y,W,D,Psi
2.203125
2
pygame_geometry/triangle.py
MarcPartensky/Pygame-Geometry
3
12777083
from .abstract import Form class Triangle(Form): """Representation of triangle.""" @classmethod def random(cls, **kwargs): return super().random(n=3, **kwargs) def __init__(self, points, **kwargs): if len(points)!=3: raise Exception("There must be 3 points in a triangle not {}".format(len(self.points))) super().__init__(points, **kwargs) if __name__=="__main__": from .manager import AbstractManager t = Triangle.random() c = t.center m=AbstractManager(t, c) m()
3.90625
4
app/models.py
oblassers/tuw-servicebroker
0
12777084
<reponame>oblassers/tuw-servicebroker<gh_stars>0 from openbrokerapi.catalog import ServicePlan, ServiceMetadata, ServicePlanMetaData, ServicePlanCost, Schemas from openbrokerapi.service_broker import ( ServiceBroker, Service, ProvisionDetails, ProvisionedServiceSpec, BindDetails, Binding, UpdateDetails, UpdateServiceSpec, UnbindDetails, DeprovisionDetails, DeprovisionServiceSpec, LastOperation ) class TuFilesServiceBroker(ServiceBroker): def catalog(self): return Service( id='f02f46b2-d20d-4b6b-a9fa-6e7d28fddc9c', name='tu-files', description='Highly available network drive for institutes and organizational units.', bindable=True, metadata=ServiceMetadata( displayName='TUfiles', imageUrl='http://example.com/tufiles_logo.png', longDescription='With TUfiles, we give you the opportunity to store data on a central and readily available network drive with backup (hosted on Windows servers). TUfiles is suitable for storing data with moderate access requirements, but high availability demands. TUfiles is not suitable for applications demanding high storage performance for a long period of time, such as high-performance databases, computer applications with high data access requirements and for storing local Microsoft Outlook PST files and backups.', providerDisplayName='TU.it', documentationUrl='https://www.it.tuwien.ac.at/tufiles/', supportUrl='https://support.tuwien.ac.at/assystnet/' ), plans=[ ServicePlan( id='04cd4e4a-f296-4090-b4f4-0e5717bb90c6', name='standard', description='Highly available network drive with standard authorization concept.', free=False, metadata=ServicePlanMetaData( displayName='Standard authorization concept', bullets=['Redundant and readily available network drive', 'You can personally administer access rights and authorisations in folders', 'Useful for working with older file versions', 'With Windows 7/8/8.1/10, Linux and MacOS from SMB version 2.1', 'Not intended for storing your backups' ], costs=[ServicePlanCost( amount={'eur': 0.03}, unit='GB per quarter' )] ), schemas=Schemas( service_instance={ 'create': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'share-name': { 'description': 'Name of the network drive.', 'type': 'string' }, 'size': { 'description': 'Size of storage in GB.', 'type': 'int' }, 'authorization-group': { 'description': 'Name of the upTUdate authorization group.', 'type': 'string' } } } }, 'update': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'size': { 'description': 'Size of storage in GB.', 'type': 'int' }, } } } }, service_binding={ 'create': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'xxxx': { 'description': 'Some parameter needed for binding the service instance.', 'type': 'string' } } } } } ) ) ], tags=['network-drive', 'storage'], plan_updateable=True, ) def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec: pass def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding: pass def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec: pass def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails): pass def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec: pass def last_operation(self, instance_id: str, operation_data: str) -> LastOperation: pass class TuCloudServiceBroker(ServiceBroker): def catalog(self): return Service( id='c4573c4b-0fec-4c2d-b650-a3daa91a3bf0', name='tu-cloud', description='File Sync and Share service located on servers of the TU Wien.', bindable=True, metadata=ServiceMetadata( displayName='TUcloud', imageUrl='http://example.com/tucloud_logo.png', longDescription='With our File Sync and Share service, you can save your data to a \"virtual memory stick\". The open source software ownCloud runs on TU.it servers and basically offers the familiar features also provided by public cloud systems such as Dropbox.', providerDisplayName='TU.it', documentationUrl='https://www.it.tuwien.ac.at/tuprocloud/', supportUrl='https://support.tuwien.ac.at/assystnet/' ), plans=[ ServicePlan( id='b1e8a5fd-0abc-4259-8d66-ab6fe8ee8b1d', name='tu-owncloud', description='Free file sync and share service for internal use.', free=True, metadata=ServicePlanMetaData( displayName='TUownCloud', bullets=['20 GB of personal storage space', 'Running on TU.it servers - your data is present locally on our systems', 'Data access possible via clients, web and WebDAV', 'Synchronisation with any number of devices is either automatic or in accordance with settings you make yourself' ] ) ), ServicePlan( id='433b4d74-6ed9-41f5-81e6-4bef7fd66c1f', name='tu-procloud', description='File sync and share service for collaboration with external project partners.', free=False, metadata=ServicePlanMetaData( displayName='TUproCloud', bullets=['Collaborate with external project partners', 'File-synchronization and sharing', 'Configure access / authorization of members', 'Several 100GB of storage possible' ], costs=[ServicePlanCost( amount={'eur': 0.03}, unit='GB per quarter' )] ), schemas=Schemas( service_instance={ 'create': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'name': { 'description': 'Unique TU-wide name, not changeable.', 'type': 'string' }, 'size': { 'description': 'Size of storage in GB.', 'type': 'int' }, } } }, 'update': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'size': { 'description': 'Size of storage in GB.', 'type': 'int' }, } } } }, service_binding={ 'create': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'xxxx': { 'description': 'Some parameter needed for binding the service instance.', 'type': 'string' } } } } } ) ) ], tags=['cloud-storage', 'file-sync', 'share'], plan_updateable=True, ) def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec: pass def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding: pass def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec: pass def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails): pass def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec: pass def last_operation(self, instance_id: str, operation_data: str) -> LastOperation: pass class TuHostServiceBroker(ServiceBroker): def catalog(self): return Service( id='e636ea62-a613-41a5-88b5-34f7be4b8d34', name='tu-host', description='Run virtual machine on central, highly available virtualization platform', bindable=False, metadata=ServiceMetadata( displayName='TUhost', imageUrl='http://example.com/tuhost_logo.png', longDescription='You have the opportunity to operate virtual machines on the central and highly available TU.it virtualisation platform, hosted on VMware ESXi. TUhost is suitable for operating servers with moderate resource requirements, but high availability demands. The virtualisation platform is not suitable for operating servers that are to support applications demanding a particularly high computing capacity and/or storage performance, such as simulation calculations, high-performance databases or storage for vast quantities of data.', providerDisplayName='TU.it', documentationUrl='https://www.it.tuwien.ac.at/tuhost/', supportUrl='https://support.tuwien.ac.at/assystnet/' ), plans=[ ServicePlan( id='057997b7-22ac-4d6b-90a9-b2c8d5e289e6', name='standard', description='Configurable, highly available Virtual Machine.', free=False, bindable=False, metadata=ServicePlanMetaData( displayName='TUhost Standard', bullets=['Provision of virtual servers including storage for TU organisational units', 'Backup for VMs', 'VM administration portal', 'Restore option in self-service via a portal' ], costs=[ ServicePlanCost( amount={'eur': 8}, unit='vCPU per quarter' ), ServicePlanCost( amount={'eur': 8}, unit='GB RAM per quarter' ), ServicePlanCost( amount={'eur': 0.1}, unit='GB system disk per quarter' ), ServicePlanCost( amount={'eur': 0.1}, unit='GB data disk per quarter' ), ServicePlanCost( amount={'eur': 0.25}, unit='GB high-performance disk per quarter' ), ] ), schemas=Schemas( service_instance={ 'create': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'vmName': { 'description': 'Virtual machine name (host.subdomain).', 'type': 'string' }, 'vCPUs': { 'description': 'Number of virtual CPUs.', 'type': 'int' }, 'ram': { 'description': 'Amount of RAM in GB.', 'type': 'int' }, 'diskSys': { 'description': 'Amount of system disk storage in GB.', 'type': 'int' }, 'diskData': { 'description': 'Amount of data disk storage in GB.', 'type': 'int' }, 'diskHighPerf': { 'description': 'Amount of high performance disk storage in GB.', 'type': 'int' }, 'os': { 'description': 'Virtual machine operating system.', 'type': 'string', 'enum': ['CentOS', 'Debian', 'Windows Server'] }, 'usage': { 'description': 'Intended usage of the virtual machine.', 'type': 'string' }, } } }, 'update': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'vCPUs': { 'description': 'Number of virtual CPUs.', 'type': 'int' }, 'ram': { 'description': 'Amount of RAM in GB.', 'type': 'int' }, 'diskSys': { 'description': 'Amount of system disk storage in GB.', 'type': 'int' }, 'diskData': { 'description': 'Amount of data disk storage in GB.', 'type': 'int' }, 'diskHighPerf': { 'description': 'Amount of high performance disk storage in GB.', 'type': 'int' }, } } } }, service_binding={ 'create': { 'parameters': { '$schema': 'http://json-schema.org/draft-04/schema#', 'type': 'object', 'properties': { 'xxxx': { 'description': 'Some parameter needed for binding the service instance.', 'type': 'string' } } } } } ) ), ], tags=['vm', 'virtual-server'], plan_updateable=True, ) def provision(self, instance_id: str, service_details: ProvisionDetails, async_allowed: bool) -> ProvisionedServiceSpec: pass def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding: pass def update(self, instance_id: str, details: UpdateDetails, async_allowed: bool) -> UpdateServiceSpec: pass def unbind(self, instance_id: str, binding_id: str, details: UnbindDetails): pass def deprovision(self, instance_id: str, details: DeprovisionDetails, async_allowed: bool) -> DeprovisionServiceSpec: pass def last_operation(self, instance_id: str, operation_data: str) -> LastOperation: pass
1.96875
2
system/tasks.py
topicgit/seal
132
12777085
import logging import requests import json from celery import shared_task from system.models import Users from seal import settings logger = logging.getLogger('system_celery') @shared_task def system_demo(one): ##因为开启了时区,所以django在数据库里面保存的为 utc 时间, 调用的时候会帮你 转为 东八区, celery会自动识别时间 from django.utils import timezone for i in Users.objects.all(): print(i.last_login) ## 直接读取时间,会是 utc时间,未转换,如果需要处理 请注意 print(timezone.localtime(i.last_login).strftime("%Y-%m-%d %H:%M:%S")) ## 时间格式化为 正常时间 print("celery定时任务demo 每分钟执行一遍",one) return @shared_task def ding_ding_to_info(content,type=None): """ 钉钉接口 异步调用 ding_ding_to_info.delay("报警1") :param content: 文本内容 :param type: :return: """ web_hook_url = getattr(settings, 'web_hook_url'), headers = {'content-type': 'application/json'} data = { "msgtype": "text", "text": { "content": content }, "at": { "atMobiles": [ ], } } try: r = requests.post(web_hook_url[0], data=json.dumps(data), headers=headers) print(r.text) except Exception as e: logger.error(e)
2.1875
2
src/py/component.py
snwjas/RandomDesktopBackground-WEBUI
17
12777086
<filename>src/py/component.py<gh_stars>10-100 # -*-coding:utf-8-*- """ 组件 @author <NAME> """ import mmap import os import sys import typing from threading import Timer from typing import Dict class SingletonMetaclass(type): """ 单例元类 """ def __init__(cls, *args, **kwargs): cls.__instance = None super(SingletonMetaclass, cls).__init__(*args, **kwargs) def __call__(cls, *args, **kwargs): if cls.__instance is None: cls.__instance = super(SingletonMetaclass, cls).__call__(*args, **kwargs) return cls.__instance class CustomLogger(object, metaclass=SingletonMetaclass): """ 自定义日志记录类 """ from loguru import logger from loguru._logger import Logger logger.opt(lazy=True, colors=True) # 日志格式 __log_format = '{time:YYYY-MM-DD HH:mm:ss,SSS} | {level}\t | {file}:{function}:{line} | {message}' # 日志类型定义 LOGGING_DEBUG = 10 LOGGING_INFO = 20 LOGGING_WARNING = 30 LOGGING_ERROR = 40 LOGGING_CRITICAL = 50 LOGURU_SUCCESS = 25 def __init__(self, log_srcpath: str = ''): self.__log_srcpath = log_srcpath if log_srcpath else '' def __add_file_handler(self): handler_id = self.logger.add( sink=os.path.join(self.__log_srcpath, 'run.{time:YYYYMMDD}.log'), format=self.__log_format, rotation='1 day', retention=30, enqueue=True, encoding='UTF-8' ) return handler_id def __add_console_handler(self): info_handler_id = self.logger.add(sink=sys.stdout, level=self.LOGGING_INFO, # fg #097D80 format='<g>' + self.__log_format + '</>', colorize=True, filter=lambda record: record["level"].name == "INFO" ) err_handler_id = self.logger.add(sink=sys.stdout, level=self.LOGGING_ERROR, # fg #F56C6C format='<r>' + self.__log_format + '</>', colorize=True, filter=lambda record: record["level"].name == "ERROR" ) return info_handler_id, err_handler_id def use_file_console_logger(self) -> Logger: self.logger.remove(handler_id=None) self.__add_file_handler() self.__add_console_handler() return self.logger def use_console_logger(self) -> Logger: self.logger.remove(handler_id=None) self.__add_console_handler() return self.logger def use_file_logger(self) -> Logger: self.logger.remove(handler_id=None) self.__add_file_handler() return self.logger def user_none(self) -> Logger: self.logger.remove(handler_id=None) return self.logger def set_logpath(self, log_srcpath: str): log_srcpath = log_srcpath if log_srcpath else '' self.__log_srcpath = log_srcpath def get_logger(self) -> Logger: return self.logger class SimpleTaskTimer(object): """ 简单的循环单任务定时器,非阻塞当前线程 @author <NAME> """ def __init__(self): self.__timer: Timer = None self.__seconds = 0.0 self.__action = None self.__args = None self.__kwargs = None def run(self, seconds: float, action, args=None, kwargs=None): """ 执行循环定时任务 :param seconds: 任务执行间隔,单位秒 :param action: 任务函数 :param args: 函数参数 """ if not callable(action): raise AttributeError("参数action非法,请传入函数变量") if self.is_running(): return self.__action = action self.__seconds = seconds self.__args = args if args is not None else [] self.__kwargs = kwargs if kwargs is not None else {} self.__run_action() def __run_action(self): self.__timer = Timer(self.__seconds, self.__hook, self.__args, self.__kwargs) self.__timer.start() def __hook(self, *args, **kwargs): self.__action(*args, **kwargs) self.__run_action() def is_running(self) -> bool: """ 判断任务是否在执行 """ return self.__timer and self.__timer.is_alive() def cancel(self): """ 取消循环定时任务 """ if self.is_running(): self.__timer.cancel() self.__timer = None class AppBreathe(object): """ 定义App心跳行为,用于检测客户端是否仍然在连接(客户端循环请求发送心跳请求): 定时器 __seconds 检测一次,连续 __times 次没接收到心跳请求则判定客户端失去连接 @author <NAME> """ def __init__(self, interval: int = 60, times: int = 5): """ :param interval: 循环计时器间隔,单位秒 :param times: 循环检测次数 """ self.__timer: SimpleTaskTimer = None # 定时器循环频率 self.__seconds: int = interval # 检测次数 self.__times: int = times # 记录没有收到心跳信号次数 self.__signals: int = 0 def __action(self, callback): self.__signals += 1 if self.__signals > self.__times: if callback and callable(callback): callback() def run(self, callback=None): """ 启动 :param callback: 判定为失去连接时发生的回调 """ if self.__timer: return self.__timer = SimpleTaskTimer() self.__timer.run(self.__seconds, self.__action, [callback]) def is_alive(self) -> bool: """ 客户端连接是否仍存活 """ return self.__signals <= self.__times def record_alive(self): """ 重置信号 """ self.__signals = 0 return True class SimpleMmapActuator(object): """ 基于mmap内存通信的实时单指令无参执行器 @author <NAME> """ def __init__(self, mname: str = 'GlobalRandomDesktopBackgroundShareMemory', msize: int = 64): """ :param mname: 映射文件名称 :param msize: 映射大小(字节) """ self.name = mname self.size = msize self.sm_rd: mmap = None self.sm_rd_timer: SimpleTaskTimer = None self.sm_rd_call_map: Dict[str, typing.Callable] = {} pass def run_monitor(self, check_seconds: float = 0.125): """ 监控命令 :param cmd_func_map: 命令与执行函数的映射表 :param check_seconds: 命令检查间隔 """ if self.sm_rd: return self.sm_rd = mmap.mmap(-1, self.size, tagname=self.name, access=mmap.ACCESS_WRITE) self.__empty_sm() self.sm_rd_timer = SimpleTaskTimer() self.sm_rd_timer.run(check_seconds, self.__call_read) def __call_read(self): self.sm_rd.seek(0) cmd = self.sm_rd.read(self.size) cmd = str(cmd, encoding='utf-8').replace('\x00', '') if cmd: self.__empty_sm() func = self.sm_rd_call_map.get(cmd) if func and callable(func): func() def __empty_sm(self): self.sm_rd.seek(0) self.sm_rd.write(b'\x00' * self.size) self.sm_rd.flush() def set_cmd_func_map(self, cmd_func_map: Dict[str, typing.Callable]): self.sm_rd_call_map = cmd_func_map def append_cmd_func_map(self, cmd_func_map: Dict[str, typing.Callable]): self.sm_rd_call_map.update(cmd_func_map) def cancel_monitor(self): """ 取消监控 """ if not self.sm_rd: return self.sm_rd_timer.cancel() self.sm_rd.close() self.sm_rd = None self.sm_rd_call_map = None def send_command(self, command: str): """ 发送执行命令 """ with mmap.mmap(-1, self.size, tagname=self.name, access=mmap.ACCESS_WRITE) as m: m.seek(0) m.write(bytes(command, encoding='utf-8')) m.flush()
2.40625
2
pirates/kraken/KrakenGameFSM.py
Willy5s/Pirates-Online-Rewritten
81
12777087
<reponame>Willy5s/Pirates-Online-Rewritten<filename>pirates/kraken/KrakenGameFSM.py from direct.directnotify import DirectNotifyGlobal from direct.fsm.FSM import FSM class KrakenGameFSM(FSM): notify = DirectNotifyGlobal.directNotify.newCategory('KrakenGameFSM') def __init__(self, av): FSM.__init__(self, 'KrakenGameFSM') self.av = av def enterRam(self): pass def exitRam(self): pass def enterGrab(self): self.av.emergeInterval.pause() self.av.submergeInterval.start() def exitGrab(self): pass
2.140625
2
src/amrpkg/scripts/scan_control.py
rafafigueroa/amrws
0
12777088
#!/usr/bin/env python # -*- coding: utf-8 -*- import rospy from nav_msgs.msg import Odometry from geometry_msgs.msg import Twist from sensor_msgs.msg import LaserScan import numpy as np pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=10) def sc_callback(robot_sc): scan_ranges = robot_sc.ranges #print 'scan ranges len', len(scan_ranges) x = scan_ranges[320] print 'x:', x if not np.isnan(x): r = 1.0 k = 0.5 v = - k * (r - x) robot_tw = Twist() robot_tw.linear.x = v pub.publish(robot_tw) def control(): rospy.init_node('amr_control') rospy.Subscriber('/scan', LaserScan, sc_callback) rospy.spin() if __name__ == '__main__': try: control() except rospy.ROSInterruptException: pass
2.234375
2
django_cradmin/acemarkdown/widgets.py
appressoas/django_cradmin
11
12777089
import json from django import forms from django.template.loader import render_to_string class AceMarkdownWidget(forms.widgets.Textarea): template_name = 'django_cradmin/acemarkdown/widget.django.html' directiveconfig = { # 'showTextarea': False, # 'theme': 'tomorrow' } @property def media(self): return forms.Media( js=[ 'django_cradmin/dist/vendor/js/ace-editor/ace.js', ] ) def render(self, name, value, attrs=None): attrs = attrs.copy() attrs['textarea django-cradmin-acemarkdown-textarea'] = '' textarea = super(AceMarkdownWidget, self).render(name, value, attrs) return render_to_string( self.template_name, { 'textarea': textarea, 'directiveconfig': json.dumps(self.directiveconfig) } )
2.046875
2
src/pycropml/transpiler/antlr_py/tests/test_to_specification.py
AgriculturalModelExchangeInitiative/PyCropML
3
12777090
from pycropml.transpiler.antlr_py.to_specification import * specification =""" !<CyML Description Begin> ! ABD Average bulk density for soil profile (g [soil] / cm3 [soil]) ! (10.5, [0.5 - 100.0]) state variable ! TLL Total soil water in the profile at the lower limit of ! plant-extractable water (cm) (, [0.0 - 10000]) exogenous variable !<CyML Description End> """ specification2 = """ #<CyML Description Begin> # ABD(L) Average bulk density for soil profile (g [soil] / cm3 [soil]) # (10.5, [0.5 - 100.0]) state variable #<CyML Description End> """ def test_extractMetaInfo(): res1 = extractMetaInfo(specification, "!") print(res1) assert res1 == {"ABD":{"description":"Average bulk density for soil profile","unit":"g [soil] / cm3 [soil]", "default":"10.5", "max":"100.0", "min":"0.5","len":"", "category":"state", "type":"variable"},\ "TLL" :{"description":"Total soil water in the profile at the lower limit of plant-extractable water", "unit":"cm", "default":"", "min":"0.0", "max": "10000","len":"", "category":"exogenous", "type":"variable"} } res1 = extractMetaInfo(specification2, "#") assert res1 == {"ABD":{"description":"Average bulk density for soil profile","unit":"g [soil] / cm3 [soil]", "default":"10.5", "max":"100.0", "min":"0.5","len":"L", "category":"state", "type":"variable"}} test_extractMetaInfo()
2.421875
2
cities_seeder.py
ishidur/TSPSolver
8
12777091
import numpy as np # type: ignore city_num = 20 file_path = "./coordinates/" output_file = "random_" + str(city_num) + "_cities.csv" if __name__ == "__main__": # “continuous uniform” distribution random np_cities = np.random.random((city_num, 2)) np.savetxt(file_path + output_file, np_cities, delimiter=",")
2.65625
3
duguai/game/human.py
ChiangYintso/du-guai
6
12777092
<gh_stars>1-10 # -*- coding: utf-8 -*- """ 人类玩家模块 @author: 江胤佐 """ from typing import Iterator, Union, Set from duguai import mode from duguai.card.cards import cards_view from duguai.utils import is_in from ..game.game_env import GameEnv, _remove_last_combo, SPLIT_LINE class Human(GameEnv.AbstractPlayer, GameEnv.MessageObserver): """ 人类玩家,由控制台输入输出进行操作 @author 江胤佐 """ def __init__(self, game_env: GameEnv, name: str): super().__init__(game_env, name) def update_msg(self, msgs: Union[Iterator, str]) -> None: """ 人类玩家收到GameEnv对象发来的消息 @param msgs: 消息 """ if isinstance(msgs, Iterator): for msg in msgs: print(msg) else: print(msgs) def update_last_combo(self) -> None: """ GameEnv更新了上一次出牌操作 """ if self.game_env.last_combo_owner_id == self.game_env.turn: print(self.game_env.rel_user_info(0) + '打出了' + self.game_env.last_combo.cards_view) else: print(self.game_env.rel_user_info(0) + '空过') print(SPLIT_LINE) if mode == 'debug': print(self.game_env.cards[(self.game_env.turn + 1) % 3]) def update_game_over(self, victors: Set[int]) -> None: """ GameEnv通知玩家游戏结束 @param victors: 胜利者 """ for i in range(3): print('玩家' + self.game_env.abs_user_info(i) + '的牌: ', self.game_env.cards[i]) print('玩家', victors, '获胜') if self._order in victors: if len(victors) == 1: self._landlord_victory_count += 1 else: self._farmer_victory_count += 1 def call_landlord(self) -> bool: """ 玩家叫地主 @return: 叫: True; 不叫: False """ print('玩家{}的手牌:'.format(self._order), cards_view(self.hand)) return input('>>> (输入1叫地主, 输入其它键不叫地主)') == '1' def update_landlord(self, landlord_id: int) -> None: """ 通知人类玩家,谁成为了地主 """ print(SPLIT_LINE) print('玩家{}叫了地主'.format(landlord_id)) print('地主获得了3张牌: {}'.format(cards_view(self.game_env.cards[3]))) print(SPLIT_LINE) def __get_input(self): return input('你的手牌: {}\n上家 {} 手牌数量: {}\n下家 {} 手牌数量: {}\n>>> (输入要出的牌,以空格分隔。直接回车代表空过。)' .format(cards_view(self.hand), self.game_env.rel_user_info(-1), self.game_env.hand_p, self.game_env.rel_user_info(1), self.game_env.hand_n)).upper() @_remove_last_combo def follow(self) -> None: """ 玩家跟牌 """ while True: cards_v = self.__get_input() self.last_combo.cards_view = cards_v if self.valid_follow(): break else: print('输入非法!') @_remove_last_combo def play(self) -> None: """ 玩家出牌 """ while True: self.last_combo.cards_view = self.__get_input() if is_in(self.last_combo.cards, self.hand) and self.last_combo.is_not_empty(): break else: print('输入非法!')
2.78125
3
setup.py
udellgroup/oboe
74
12777093
<gh_stars>10-100 import setuptools import os with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() def package_files(directory): """ Recursively find all files in a (sub)directory. Source: https://stackoverflow.com/a/36693250 """ paths = [] for (path, directories, filenames) in os.walk(directory): for filename in filenames: paths.append(os.path.join('..', path, filename)) return paths install_requires = [ "numpy>=1.16.4", "scipy>=1.4.1", "pandas>=0.24.2", "scikit-learn>=0.22.1", "tensorly==0.6.0", "OpenML>=0.9.0", "mkl>=1.0.0", ], setuptools.setup( name="oboe", version="0.2.0", author="<NAME>, <NAME>, <NAME>, <NAME>", author_email="<EMAIL>", description="An AutoML pipeline selection system to quickly select a promising pipeline for a new dataset.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/udellgroup/oboe", project_urls={ "Bug Tracker": "https://github.com/udellgroup/oboe/issues", }, classifiers=[ "Development Status :: 4 - Beta", "Programming Language :: Python :: 3", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", ], packages=setuptools.find_packages(exclude='large_files'), package_data={'': package_files('oboe/defaults')}, install_requires=install_requires, python_requires=">=3.7", )
1.71875
2
lab_7/main_4.py
MrLuckUA/python_course
0
12777094
<reponame>MrLuckUA/python_course #! /usr/bin/python3 # -*- coding: utf-8 -*-S def crypt(line: str) -> str: result = str() for symbol in line: result += chr(ord(symbol) + 1) return result print(crypt(input()))
4.21875
4
tg_utils/checks.py
wanaryytel/tg-utils
6
12777095
from django.conf import settings from django.core.checks import Warning def check_production_settings(app_configs, **kwargs): issues = [] if settings.DEBUG: return issues if not settings.EMAIL_HOST_PASSWORD or 'TODO' in settings.EMAIL_HOST_PASSWORD: issues.append( Warning( "EMAIL_HOST_PASSWORD setting is not set to proper value", id='tg_utils.W001', ) ) if 'TODO' in settings.SITE_URL: issues.append( Warning( "SITE_URL setting is not set to proper value", id='tg_utils.W002', ) ) return issues def check_sentry_config(app_configs, **kwargs): issues = [] if 'sentry' not in settings.LOGGING['handlers']: return issues if 'sentry' not in settings.LOGGING['loggers']['']['handlers']: issues.append( Warning( "Sentry logging handler is present but unused", hint="Ensure that sentry handler is part of LOGGING['loggers']['']['handlers']", id='tg_utils.W011', ) ) return issues
2.015625
2
apps/build.py
broadinstitute/ebola-predictor
12
12777096
<filename>apps/build.py #!/usr/bin/env python """ Build a Kivy app for the selected platform. @copyright: The Broad Institute of MIT and Harvard 2015 """ import os, argparse, shutil, platform def build_android(app_dir, build_options): p4a_dist = '' with open("./settings.cfg", "r") as sfile: lines = sfile.readlines() for line in lines: line = line.strip() if not line: continue key, val = line.split("=") if key == "python-for-android": p4a_dist = val propfile = os.path.join(app_dir, "properties.txt") name="Name" version="0.0" package="org.company.dept" orientation="sensor" image_icon="" image_load="" sdk_target="19" sdk_minimum="19" with open(propfile, "r") as pfile: lines = pfile.readlines() for line in lines: line = line.strip() if not line: continue key, val = line.split("=") if key == "name": name = val elif key == "version": version = val elif key == "package": package = val elif key == "orientation": orientation = val elif key == "images.icon": image_icon = val elif key == "images.load": image_load = val elif key == "sdk.target": sdk_target = val elif key == "sdk.minimum": sdk_minimum = val curr_dir = os.getcwd() app_dir = os.path.join(curr_dir, app_dir) icon_file = os.path.join(app_dir, image_icon) load_file = os.path.join(app_dir, image_load) p4a_dir = os.path.abspath(p4a_dist) cmd_str = './build.py --package ' + package + ' --sdk ' + sdk_target + ' --minsdk ' + sdk_minimum + ' --name "' + name + '" --version ' + version + ' --dir ' + app_dir + ' --orientation "' + orientation + '" --icon ' + icon_file + ' --presplash ' + load_file + ' ' + build_options if "release" in build_options: build="release-unsigned" elif "debug" in build_options: build="debug" package_name = name.replace(" ", "") + "-" + version + "-" + build + ".apk" bin_dir = os.path.join(p4a_dir, "bin") src_file = os.path.join(bin_dir, package_name) dst_file = os.path.join(app_dir, "bin", package_name) if os.path.exists(bin_dir): shutil.rmtree(bin_dir) if os.path.exists(dst_file): os.remove(dst_file) os.chdir(p4a_dir) os.system(cmd_str) os.chdir(curr_dir) shutil.copyfile(src_file, dst_file) print "" print "Copied app package from",src_file,"to",dst_file def build_ios(app_dir, build_options): k4ios_dir = '' with open("./settings.cfg", "r") as sfile: lines = sfile.readlines() for line in lines: line = line.strip() if not line: continue key, val = line.split("=") if key == "kivy-ios": k4ios_dir = val propfile = os.path.join(app_dir, "properties.txt") name="Name" with open(propfile, "r") as pfile: lines = pfile.readlines() for line in lines: line = line.strip() if not line: continue key, val = line.split("=") if key == "name": name = val.replace(" ", "") curr_dir = os.getcwd() app_dir = os.path.join(curr_dir, app_dir) cmd_str = './toolchain.py create "' + name + '" ' + app_dir os.chdir(k4ios_dir) os.system(cmd_str) os.chdir(curr_dir) print "" prj_path = os.path.abspath(os.path.join(k4ios_dir, name.lower() + "-ios")) print "XCode project created in " + prj_path parser = argparse.ArgumentParser() parser.add_argument("-d", "--dir", nargs=1, default=["test"], help="App directory") parser.add_argument("-b", "--build", nargs=1, default=["release"], help="Build options") args = parser.parse_args() app_dir = args.dir[0] build_options = args.build[0] if platform.system() == 'Linux': build_android(app_dir, build_options) elif platform.system() == 'Darwin': build_ios(app_dir, build_options) else: print "Unsupported platform."
2.84375
3
updater/updater.py
Cosmic-Chatter/Constellation
0
12777097
<filename>updater/updater.py """Module to handle updating Constellation components""" import urllib.request
1.460938
1
utils/decoder/model.py
gaoyiyeah/ZASR_tensorflow
115
12777098
"""Contains DeepSpeech2 model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import time import logging import gzip import copy import numpy as np import inspect from utils.decoder.swig_wrapper import Scorer from utils.decoder.swig_wrapper import ctc_greedy_decoder from utils.decoder.swig_wrapper import ctc_beam_search_decoder_batch class LM_decoder(object): def __init__(self, beam_alpha, beam_beta, language_model_path, vocab_list): """Initialize the external scorer. :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param language_model_path: Filepath for language model. If it is empty, the external scorer will be set to None, and the decoding method will be pure beam search without scorer. :type language_model_path: basestring|None :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list """ if language_model_path != '': print("begin to initialize the external scorer " "for decoding") self._ext_scorer = Scorer(beam_alpha, beam_beta, language_model_path, vocab_list) lm_char_based = self._ext_scorer.is_character_based() lm_max_order = self._ext_scorer.get_max_order() lm_dict_size = self._ext_scorer.get_dict_size() print("language model: " "is_character_based = %d," % lm_char_based + " max_order = %d," % lm_max_order + " dict_size = %d" % lm_dict_size) print("end initializing scorer") else: self._ext_scorer = None print("no language model provided, " "decoding by pure beam search without scorer.") def decode_batch_beam_search(self, probs_split, beam_alpha, beam_beta, beam_size, cutoff_prob, cutoff_top_n, vocab_list, num_processes): """Decode by beam search for a batch of probs matrix input. :param probs_split: List of 2-D probability matrix, and each consists of prob vectors for one speech utterancce. :param probs_split: List of matrix :param beam_alpha: Parameter associated with language model. :type beam_alpha: float :param beam_beta: Parameter associated with word count. :type beam_beta: float :param beam_size: Width for Beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in vocabulary will be used in beam search, default 40. :type cutoff_top_n: int :param vocab_list: List of tokens in the vocabulary, for decoding. :type vocab_list: list :param num_processes: Number of processes (CPU) for decoder. :type num_processes: int :return: List of transcription texts. :rtype: List of basestring """ if self._ext_scorer != None: self._ext_scorer.reset_params(beam_alpha, beam_beta) # beam search decode num_processes = min(num_processes, np.shape(probs_split)[0]) beam_search_results = ctc_beam_search_decoder_batch( probs_split=probs_split, vocabulary=vocab_list, beam_size=beam_size, num_processes=num_processes, ext_scoring_func=self._ext_scorer, cutoff_prob=cutoff_prob, cutoff_top_n=cutoff_top_n) results = [result[0][1] for result in beam_search_results] return results def _adapt_feeding_dict(self, feeding_dict): """Adapt feeding dict according to network struct. To remove impacts from padding part, we add scale_sub_region layer and sub_seq layer. For sub_seq layer, 'sequence_offset' and 'sequence_length' fields are appended. For each scale_sub_region layer 'convN_index_range' field is appended. :param feeding_dict: Feeding is a map of field name and tuple index of the data that reader returns. :type feeding_dict: dict|list :return: Adapted feeding dict. :rtype: dict|list """ adapted_feeding_dict = copy.deepcopy(feeding_dict) if isinstance(feeding_dict, dict): adapted_feeding_dict["sequence_offset"] = len(adapted_feeding_dict) adapted_feeding_dict["sequence_length"] = len(adapted_feeding_dict) for i in xrange(self._num_conv_layers): adapted_feeding_dict["conv%d_index_range" %i] = \ len(adapted_feeding_dict) elif isinstance(feeding_dict, list): adapted_feeding_dict.append("sequence_offset") adapted_feeding_dict.append("sequence_length") for i in xrange(self._num_conv_layers): adapted_feeding_dict.append("conv%d_index_range" % i) else: raise ValueError("Type of feeding_dict is %s, not supported." % type(feeding_dict)) return adapted_feeding_dict def _adapt_data(self, data): """Adapt data according to network struct. For each convolution layer in the conv_group, to remove impacts from padding data, we can multiply zero to the padding part of the outputs of each batch normalization layer. We add a scale_sub_region layer after each batch normalization layer to reset the padding data. For rnn layers, to remove impacts from padding data, we can truncate the padding part before output data feeded into the first rnn layer. We use sub_seq layer to achieve this. :param data: Data from data_provider. :type data: list|function :return: Adapted data. :rtype: list|function """ def adapt_instance(instance): if len(instance) < 2 or len(instance) > 3: raise ValueError("Size of instance should be 2 or 3.") padded_audio = instance[0] text = instance[1] # no padding part if len(instance) == 2: audio_len = padded_audio.shape[1] else: audio_len = instance[2] adapted_instance = [padded_audio, text] # Stride size for conv0 is (3, 2) # Stride size for conv1 to convN is (1, 2) # Same as the network, hard-coded here padded_conv0_h = (padded_audio.shape[0] - 1) // 2 + 1 padded_conv0_w = (padded_audio.shape[1] - 1) // 3 + 1 valid_w = (audio_len - 1) // 3 + 1 adapted_instance += [ [0], # sequence offset, always 0 [valid_w], # valid sequence length # Index ranges for channel, height and width # Please refer scale_sub_region layer to see details [1, 32, 1, padded_conv0_h, valid_w + 1, padded_conv0_w] ] pre_padded_h = padded_conv0_h for i in xrange(self._num_conv_layers - 1): padded_h = (pre_padded_h - 1) // 2 + 1 pre_padded_h = padded_h adapted_instance += [ [1, 32, 1, padded_h, valid_w + 1, padded_conv0_w] ] return adapted_instance if isinstance(data, list): return map(adapt_instance, data) elif inspect.isgeneratorfunction(data): def adapted_reader(): for instance in data(): yield map(adapt_instance, instance) return adapted_reader else: raise ValueError("Type of data is %s, not supported." % type(data)) def _create_parameters(self, model_path=None): """Load or create model parameters.""" if model_path is None: self._parameters = paddle.parameters.create(self._loss) else: self._parameters = paddle.parameters.Parameters.from_tar( gzip.open(model_path)) def _create_network(self, vocab_size, num_conv_layers, num_rnn_layers, rnn_layer_size, use_gru, share_rnn_weights): """Create data layers and model network.""" # paddle.data_type.dense_array is used for variable batch input. # The size 161 * 161 is only an placeholder value and the real shape # of input batch data will be induced during training. audio_data = paddle.layer.data( name="audio_spectrogram", type=paddle.data_type.dense_array(161 * 161)) text_data = paddle.layer.data( name="transcript_text", type=paddle.data_type.integer_value_sequence(vocab_size)) seq_offset_data = paddle.layer.data( name='sequence_offset', type=paddle.data_type.integer_value_sequence(1)) seq_len_data = paddle.layer.data( name='sequence_length', type=paddle.data_type.integer_value_sequence(1)) index_range_datas = [] for i in xrange(num_rnn_layers): index_range_datas.append( paddle.layer.data( name='conv%d_index_range' % i, type=paddle.data_type.dense_vector(6))) self._log_probs, self._loss = deep_speech_v2_network( audio_data=audio_data, text_data=text_data, seq_offset_data=seq_offset_data, seq_len_data=seq_len_data, index_range_datas=index_range_datas, dict_size=vocab_size, num_conv_layers=num_conv_layers, num_rnn_layers=num_rnn_layers, rnn_size=rnn_layer_size, use_gru=use_gru, share_rnn_weights=share_rnn_weights)
2.53125
3
src/exojax/spec/atomll.py
dcmvdbekerom/exojax
0
12777099
<reponame>dcmvdbekerom/exojax<gh_stars>0 import numpy as np from exojax.spec import atomllapi from exojax.utils.constants import ccgs, m_u, kB, hcperk, ecgs, hcgs, Rcgs, a0, eV2wn import jax.numpy as jnp def Sij0(A, gupper, nu_lines, elower, QTref_284, QTmask, Irwin=False): """Reference Line Strength in Tref=296K, S0. Note: Tref=296K Args: A: Einstein coefficient (s-1) gupper: the upper state statistical weight nu_lines: line center wavenumber (cm-1) elower: elower QTref_284: partition function Q(Tref) QTmask: mask to identify a rows of QTref_284 to apply for each line Irwin: if True(1), the partition functions of Irwin1981 is used, otherwise those of Barklem&Collet2016 Returns: Sij(T): Line strength (cm) """ Tref=296.0 #Assign Q(Tref) for each line QTref = np.zeros_like(QTmask, dtype=float) for i, mask in enumerate(QTmask): QTref[i] = QTref_284[mask] #Use Irwin_1981 for Fe I (mask==76) #test211013Tako if Irwin==True: QTref[jnp.where(QTmask == 76)[0]] = atomllapi.partfn_Fe(Tref) S0 = -A*gupper*np.exp(-hcperk*elower/Tref)*np.expm1(-hcperk*nu_lines/Tref)\ /(8.0*np.pi*ccgs*nu_lines**2*QTref) return(S0) def gamma_vald3(T, PH, PHH, PHe, ielem, iion, \ nu_lines, elower, eupper, atomicmass, ionE, \ gamRad, gamSta, vdWdamp, enh_damp=1.0): #, vdW_meth="V"): """HWHM of Lorentzian (cm-1) caluculated as gamma/(4*pi*c) [cm-1] for lines with the van der Waals gamma in the line list (VALD or Kurucz), otherwise estimated according to the Unsoeld (1955) Args: T: temperature (K) PH: hydrogen pressure (bar) #1 bar = 1e6 dyn/cm2 PHH: H2 molecule pressure (bar) PHe: helium pressure (bar) ielem: atomic number (e.g., Fe=26) iion: ionized level (e.g., neutral=1, singly ionized=2, etc.) nu_lines: transition waveNUMBER in [cm-1] (NOT frequency in [s-1]) elower: excitation potential (lower level) [cm-1] eupper: excitation potential (upper level) [cm-1] atomicmass: atomic mass [amu] ionE: ionization potential [eV] gamRad: log of gamma of radiation damping (s-1) (https://www.astro.uu.se/valdwiki/Vald3Format) gamSta: log of gamma of Stark damping (s-1) vdWdamp: log of (van der Waals damping constant / neutral hydrogen number) (s-1) enh_damp: empirical "enhancement factor" for classical Unsoeld's damping constant cf.) This coefficient (enh_damp) depends on each species in some codes such as Turbospectrum. #tako210917 chi_lam (=h*nu=1.2398e4/wvl[AA]): energy of a photon in the line (computed) C6: interaction constant (Eq.11.17 in Gray2005) (computed) logg6: log(gamma6) (Eq.11.29 in Gray2005) (computed) gam6H: 17*v**(0.6)*C6**(0.4)*N (computed) (v:relative velocity, N:number density of neutral perturber) Texp: temperature dependency (gamma6 \sim T**((1-α)/2) ranging 0.3–0.4) (computed) Returns: gamma: pressure gamma factor (cm-1) Note: "/(4*np.pi*ccgs)" means: damping constant -> HWHM of Lorentzian in [cm^-1] * Reference of van der Waals damping constant (pressure/collision gamma): * Unsöld1955: https://ui.adsabs.harvard.edu/abs/1955psmb.book.....U * Kurucz+1981: https://ui.adsabs.harvard.edu/abs/1981SAOSR.391.....K * Barklem+1998: https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..863B * Barklem+2000: https://ui.adsabs.harvard.edu/abs/2000A&AS..142..467B * Gray+2005: https://ui.adsabs.harvard.edu/abs/2005oasp.book.....G """ gamRad = jnp.where(gamRad==0., -99, gamRad) gamSta = jnp.where(gamSta==0., -99, gamSta) chi_lam = nu_lines/eV2wn #[cm-1] -> [eV] chi = elower/eV2wn #[cm-1] -> [eV] C6 = 0.3e-30 * ((1/(ionE-chi-chi_lam)**2) - (1/(ionE-chi)**2)) #possibly with "ION**2" factor? gam6H = 1e20 * C6**0.4 * PH*1e6 / T**0.7 gam6He = 1e20 * C6**0.4 * PHe*1e6*0.41336 / T**0.7 gam6HH = 1e20 * C6**0.4 * PHH*1e6*0.85 / T**0.7 gamma6 = enh_damp * (gam6H + gam6He + gam6HH) gamma_case1 = (gamma6 + 10**gamRad + 10**gamSta) /(4*np.pi*ccgs) #Avoid nan (appeared by np.log10(negative C6)) gamma_case1 = jnp.where(jnp.isnan(gamma_case1), 0., gamma_case1) Texp = 0.38 #Barklem+2000 gam6H = 10**vdWdamp * (T/10000.)**Texp * PH*1e6 /(kB*T) gam6He = 10**vdWdamp * (T/10000.)**Texp * PHe*1e6*0.41336 /(kB*T) gam6HH = 10**vdWdamp * (T/10000.)**Texp * PHH*1e6*0.85 /(kB*T) gamma6 = gam6H + gam6He + gam6HH gamma_case2 = (gamma6 + 10**gamRad + 10**gamSta) /(4*np.pi*ccgs) #Adopt case2 for lines with vdW in VALD, otherwise Case1 gamma = (gamma_case1 * jnp.where(vdWdamp>=0., 1, 0) + gamma_case2 * jnp.where(vdWdamp<0., 1, 0)) return(gamma) def gamma_uns(T, PH, PHH, PHe, ielem, iion, \ nu_lines, elower, eupper, atomicmass, ionE, \ gamRad, gamSta, vdWdamp, enh_damp=1.0): #, vdW_meth="U"): """HWHM of Lorentzian (cm-1) estimated with the classical approximation by Unsoeld (1955) Args: T: temperature (K) PH: hydrogen pressure (bar) #1 bar = 1e6 dyn/cm2 PHH: H2 molecule pressure (bar) PHe: helium pressure (bar) ielem: atomic number (e.g., Fe=26) iion: ionized level (e.g., neutral=1, singly ionized=2, etc.) nu_lines: transition waveNUMBER in [cm-1] (NOT frequency in [s-1]) elower: excitation potential (lower level) [cm-1] eupper: excitation potential (upper level) [cm-1] atomicmass: atomic mass [amu] ionE: ionization potential [eV] gamRad: log of gamma of radiation damping (s-1) #(https://www.astro.uu.se/valdwiki/Vald3Format) gamSta: log of gamma of Stark damping (s-1) vdWdamp: log of (van der Waals damping constant / neutral hydrogen number) (s-1) enh_damp: empirical "enhancement factor" for classical Unsoeld's damping constant cf.) This coefficient (enh_damp) depends on each species in some codes such as Turbospectrum. #tako210917 chi_lam (=h*nu=1.2398e4/wvl[AA]): energy of a photon in the line (computed) C6: interaction constant (Eq.11.17 in Gray2005) (computed) logg6: log(gamma6) (Eq.11.29 in Gray2005) (computed) gam6H: 17*v**(0.6)*C6**(0.4)*N (v:relative velocity, N:number density of neutral perturber) (computed) Texp: temperature dependency (gamma6 \sim T**((1-α)/2) ranging 0.3–0.4)(computed) Returns: gamma: pressure gamma factor (cm-1) Note: "/(4*np.pi*ccgs)" means: damping constant -> HWHM of Lorentzian in [cm^-1] * Reference of van der Waals damping constant (pressure/collision gamma): * Unsöld1955: https://ui.adsabs.harvard.edu/abs/1955psmb.book.....U * Kurucz+1981: https://ui.adsabs.harvard.edu/abs/1981SAOSR.391.....K * Barklem+1998: https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..863B * Barklem+2000: https://ui.adsabs.harvard.edu/abs/2000A&AS..142..467B * Gray+2005: https://ui.adsabs.harvard.edu/abs/2005oasp.book.....G """ gamRad = jnp.where(gamRad==0., -99, gamRad) gamSta = jnp.where(gamSta==0., -99, gamSta) chi_lam = nu_lines/eV2wn #[cm-1] -> [eV] chi = elower/eV2wn #[cm-1] -> [eV] C6 = 0.3e-30 * ((1/(ionE-chi-chi_lam)**2) - (1/(ionE-chi)**2)) #possibly with "ION**2" factor? gam6H = 1e20 * C6**0.4 * PH*1e6 / T**0.7 gam6He = 1e20 * C6**0.4 * PHe*1e6*0.41336 / T**0.7 gam6HH = 1e20 * C6**0.4 * PHH*1e6*0.85 / T**0.7 gamma6 = enh_damp * (gam6H + gam6He + gam6HH) gamma_case1 = (gamma6 + 10**gamRad + 10**gamSta) /(4*np.pi*ccgs) #Avoid nan (appeared by np.log10(negative C6)) gamma = jnp.where(jnp.isnan(gamma_case1), 0., gamma_case1) return(gamma) def gamma_KA3(T, PH, PHH, PHe, ielem, iion, \ nu_lines, elower, eupper, atomicmass, ionE, \ gamRad, gamSta, vdWdamp, enh_damp=1.0): #, vdW_meth="KA3"): """HWHM of Lorentzian (cm-1) caluculated with the 3rd equation in p.4 of Kurucz&Avrett1981 Args: T: temperature (K) PH: hydrogen pressure (bar) #1 bar = 1e6 dyn/cm2 PHH: H2 molecule pressure (bar) PHe: helium pressure (bar) ielem: atomic number (e.g., Fe=26) iion: ionized level (e.g., neutral=1, singly ionized=2, etc.) nu_lines: transition waveNUMBER in [cm-1] (NOT frequency in [s-1]) elower: excitation potential (lower level) [cm-1] eupper: excitation potential (upper level) [cm-1] atomicmass: atomic mass [amu] ionE: ionization potential [eV] gamRad: log of gamma of radiation damping (s-1) #(https://www.astro.uu.se/valdwiki/Vald3Format) gamSta: log of gamma of Stark damping (s-1) vdWdamp: log of (van der Waals damping constant / neutral hydrogen number) (s-1) enh_damp: empirical "enhancement factor" for classical Unsoeld's damping constant cf.) This coefficient (enh_damp) depends on each species in some codes such as Turbospectrum. #tako210917 chi_lam (=h*nu=1.2398e4/wvl[AA]): energy of a photon in the line (computed) C6: interaction constant (Eq.11.17 in Gray2005) (computed) logg6: log(gamma6) (Eq.11.29 in Gray2005) (computed) gam6H: 17*v**(0.6)*C6**(0.4)*N (v:relative velocity, N:number density of neutral perturber) (computed) Texp: temperature dependency (gamma6 \sim T**((1-α)/2) ranging 0.3–0.4) (computed) Returns: gamma: pressure gamma factor (cm-1) Note: "/(4*np.pi*ccgs)" means: damping constant -> HWHM of Lorentzian in [cm^-1] * Reference of van der Waals damping constant (pressure/collision gamma): * Kurucz+1981: https://ui.adsabs.harvard.edu/abs/1981SAOSR.391.....K * Barklem+1998: https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..863B * Barklem+2000: https://ui.adsabs.harvard.edu/abs/2000A&AS..142..467B * Gray+2005: https://ui.adsabs.harvard.edu/abs/2005oasp.book.....G """ gamRad = jnp.where(gamRad==0., -99, gamRad) gamSta = jnp.where(gamSta==0., -99, gamSta) Zeff = iion #effective charge (=1 for Fe I, 2 for Fe II, etc.) n_eff2_upper = Rcgs * Zeff**2 / (ionE*eV2wn - eupper) #Square of effective quantum number of the upper state n_eff2_lower = Rcgs * Zeff**2 / (ionE*eV2wn - elower) #Mean of square of radius (in units of a0, the radius of the first Bohr orbit; p.320 in Aller (1963); https://ui.adsabs.harvard.edu/abs/1963aass.book.....A) msr_upper_iron = (45-ielem)/Zeff #for iron group elements (5th equation in Kurucz&Avrett1981) msr_upper_noiron = jnp.where(n_eff2_upper>0., (2.5 * (n_eff2_upper/Zeff)**2), 25) #for other elements (6th equation in Kurucz&Avrett1981) msr_upper = jnp.where((ielem >= 26) & (ielem <= 28), msr_upper_iron, msr_upper_noiron) msr_lower = 2.5 * (n_eff2_lower/Zeff)**2 gap_msr = msr_upper - msr_lower gap_msr_rev = gap_msr * jnp.where(gap_msr < 0, -1., 1.) #Reverse upper and lower if necessary (TBC) #test2109\\\\ gap_msr_rev_cm = a0**2 * gap_msr_rev #[Bohr radius -> cm] gam6H = 17 * (8*kB*T*(1./atomicmass+1./1.)/(np.pi*m_u))**0.3 \ * (6.63e-25*ecgs**2/hcgs*(gap_msr_rev_cm))**0.4 \ * PH*1e6 /(kB*T) gam6He = 17 * (8*kB*T*(1./atomicmass+1./4.)/(np.pi*m_u))**0.3 \ * (2.07e-25*ecgs**2/hcgs*(gap_msr_rev_cm))**0.4 \ * PHe*1e6 /(kB*T) gam6HH = 17 * (8*kB*T*(1./atomicmass+1./2.)/(np.pi*m_u))**0.3 \ * (8.04e-25*ecgs**2/hcgs*(gap_msr_rev_cm))**0.4 \ * PHH*1e6 /(kB*T) gamma6 = gam6H + gam6He + gam6HH gamma = (gamma6 + 10**gamRad + 10**gamSta) /(4*np.pi*ccgs) return(gamma) def gamma_KA4(T, PH, PHH, PHe, ielem, iion, \ nu_lines, elower, eupper, atomicmass, ionE, \ gamRad, gamSta, vdWdamp, enh_damp=1.0): #, vdW_meth="KA4"): """HWHM of Lorentzian (cm-1) caluculated with the 4rd equation in p.4 of Kurucz&Avrett1981 Args: T: temperature (K) PH: hydrogen pressure (bar) #1 bar = 1e6 dyn/cm2 PHH: H2 molecule pressure (bar) PHe: helium pressure (bar) ielem: atomic number (e.g., Fe=26) iion: ionized level (e.g., neutral=1, singly ionized=2, etc.) nu_lines: transition waveNUMBER in [cm-1] (NOT frequency in [s-1]) elower: excitation potential (lower level) [cm-1] eupper: excitation potential (upper level) [cm-1] atomicmass: atomic mass [amu] ionE: ionization potential [eV] gamRad: log of gamma of radiation damping (s-1) #(https://www.astro.uu.se/valdwiki/Vald3Format) gamSta: log of gamma of Stark damping (s-1) vdWdamp: log of (van der Waals damping constant / neutral hydrogen number) (s-1) enh_damp: empirical "enhancement factor" for classical Unsoeld's damping constant #cf.) This coefficient (enh_damp) depends on each species in some codes such as Turbospectrum. #tako210917 chi_lam (=h*nu=1.2398e4/wvl[AA]): energy of a photon in the line (computed) C6: interaction constant (Eq.11.17 in Gray2005) (computed) logg6: log(gamma6) (Eq.11.29 in Gray2005) (computed) gam6H: 17*v**(0.6)*C6**(0.4)*N (v:relative velocity, N:number density of neutral perturber) (computed) Texp: temperature dependency (gamma6 \sim T**((1-α)/2) ranging 0.3–0.4) (computed) Returns: gamma: pressure gamma factor (cm-1) Note: Approximation of case4 assume "that the atomic weight A is much greater than 4, and that the mean-square-radius of the lower level <r^2>_lo is small compared to <r^2>_up". "/(4*np.pi*ccgs)" means: damping constant -> HWHM of Lorentzian in [cm^-1] * Reference of van der Waals damping constant (pressure/collision gamma): * Kurucz+1981: https://ui.adsabs.harvard.edu/abs/1981SAOSR.391.....K * Barklem+1998: https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..863B * Barklem+2000: https://ui.adsabs.harvard.edu/abs/2000A&AS..142..467B * Gray+2005: https://ui.adsabs.harvard.edu/abs/2005oasp.book.....G """ gamRad = jnp.where(gamRad==0., -99, gamRad) gamSta = jnp.where(gamSta==0., -99, gamSta) Zeff = iion #effective charge (=1 for Fe I, 2 for Fe II, etc.) n_eff2_upper = Rcgs * Zeff**2 / (ionE*eV2wn - eupper) #Square of effective quantum number of the upper state #Mean of square of radius (in units of a0, the radius of the first Bohr orbit; p.320 in Aller (1963); https://ui.adsabs.harvard.edu/abs/1963aass.book.....A) msr_upper_iron = (45-ielem)/Zeff #for iron group elements (5th equation in Kurucz&Avrett1981) msr_upper_noiron = jnp.where(n_eff2_upper>0., (2.5 * (n_eff2_upper/Zeff)**2), 25) #for other elements (6th equation in Kurucz&Avrett1981) msr_upper = jnp.where((ielem >= 26) & (ielem <= 28), msr_upper_iron, msr_upper_noiron) gamma6 = 4.5e-9 * msr_upper**0.4 \ * ((PH + 0.42*PHe + 0.85*PHH)*1e6/(kB*T)) * (T/10000.)**0.3 gamma = (gamma6 + 10**gamRad + 10**gamSta) /(4*np.pi*ccgs) return(gamma) def gamma_KA3s(T, PH, PHH, PHe, ielem, iion, \ nu_lines, elower, eupper, atomicmass, ionE, \ gamRad, gamSta, vdWdamp, enh_damp=1.0): #, vdW_meth="KA3s"): (supplemetary) """(supplemetary:) HWHM of Lorentzian (cm-1) caluculated with the 3rd equation in p.4 of Kurucz&Avrett1981 but without discriminating iron group elements Args: T: temperature (K) PH: hydrogen pressure (bar) #1 bar = 1e6 dyn/cm2 PHH: H2 molecule pressure (bar) PHe: helium pressure (bar) ielem: atomic number (e.g., Fe=26) iion: ionized level (e.g., neutral=1, singly ionized=2, etc.) nu_lines: transition waveNUMBER in [cm-1] (NOT frequency in [s-1]) elower: excitation potential (lower level) [cm-1] eupper: excitation potential (upper level) [cm-1] atomicmass: atomic mass [amu] ionE: ionization potential [eV] gamRad: log of gamma of radiation damping (s-1) #(https://www.astro.uu.se/valdwiki/Vald3Format) gamSta: log of gamma of Stark damping (s-1) vdWdamp: log of (van der Waals damping constant / neutral hydrogen number) (s-1) enh_damp: empirical "enhancement factor" for classical Unsoeld's damping constant cf.) This coefficient (enh_damp) depends on each species in some codes such as Turbospectrum. #tako210917 chi_lam (=h*nu=1.2398e4/wvl[AA]): energy of a photon in the line (computed) C6: interaction constant (Eq.11.17 in Gray2005) (computed) logg6: log(gamma6) (Eq.11.29 in Gray2005) (computed) gam6H: 17*v**(0.6)*C6**(0.4)*N (v:relative velocity, N:number density of neutral perturber) (computed) Texp: temperature dependency (gamma6 \sim T**((1-α)/2) ranging 0.3–0.4)(computed) Returns: gamma: pressure gamma factor (cm-1) Note: "/(4*np.pi*ccgs)" means: damping constant -> HWHM of Lorentzian in [cm^-1] * Reference of van der Waals damping constant (pressure/collision gamma): * Kurucz+1981: https://ui.adsabs.harvard.edu/abs/1981SAOSR.391.....K * Barklem+1998: https://ui.adsabs.harvard.edu/abs/1998MNRAS.300..863B * Barklem+2000: https://ui.adsabs.harvard.edu/abs/2000A&AS..142..467B * Gray+2005: https://ui.adsabs.harvard.edu/abs/2005oasp.book.....G """ gamRad = jnp.where(gamRad==0., -99, gamRad) gamSta = jnp.where(gamSta==0., -99, gamSta) Zeff = iion #effective charge (=1 for Fe I, 2 for Fe II, etc.) n_eff2_upper = Rcgs * Zeff**2 / (ionE*eV2wn - eupper) #Square of effective quantum number of the upper state n_eff2_lower = Rcgs * Zeff**2 / (ionE*eV2wn - elower) #Mean of square of radius (in units of a0, the radius of the first Bohr orbit; p.320 in Aller (1963); https://ui.adsabs.harvard.edu/abs/1963aass.book.....A) msr_upper_noiron = jnp.where(n_eff2_upper>0., (2.5 * (n_eff2_upper/Zeff)**2), 25) #for other elements (6th equation in Kurucz&Avrett1981) msr_upper = msr_upper_noiron msr_lower = 2.5 * (n_eff2_lower/Zeff)**2 gap_msr = msr_upper - msr_lower gap_msr_rev = gap_msr * jnp.where(gap_msr < 0, -1., 1.) #Reverse upper and lower if necessary (TBC) #test2109\\\\ gap_msr_rev_cm = a0**2 * gap_msr_rev #[Bohr radius -> cm] gam6H = 17 * (8*kB*T*(1./atomicmass+1./1.)/(np.pi*m_u))**0.3 \ * (6.63e-25*ecgs**2/hcgs*(gap_msr_rev_cm))**0.4 \ * PH*1e6 /(kB*T) gam6He = 17 * (8*kB*T*(1./atomicmass+1./4.)/(np.pi*m_u))**0.3 \ * (2.07e-25*ecgs**2/hcgs*(gap_msr_rev_cm))**0.4 \ * PHe*1e6 /(kB*T) gam6HH = 17 * (8*kB*T*(1./atomicmass+1./2.)/(np.pi*m_u))**0.3 \ * (8.04e-25*ecgs**2/hcgs*(gap_msr_rev_cm))**0.4 \ * PHH*1e6 /(kB*T) gamma6 = gam6H + gam6He + gam6HH gamma = (gamma6 + 10**gamRad + 10**gamSta) /(4*np.pi*ccgs) return(gamma)
2.015625
2
tests/boards/utils.py
trickeydan/j5-dev
10
12777100
"""Utility classes for testing.""" from typing import Optional, Set, Type from j5.backends import Backend from j5.boards import Board from j5.components import Component class MockBoard(Board): """A testing board with little to no functionality.""" def __init__(self, serial: str): self._serial = serial self._safe = False @property def name(self) -> str: """Get the name of this board.""" return "Testing Board" @property def serial_number(self) -> str: """Get the serial number of this board.""" return self._serial @property def firmware_version(self) -> Optional[str]: """Get the firmware version of this board.""" return None def make_safe(self) -> None: """Make this board safe.""" self._safe = True @staticmethod def supported_components() -> Set[Type[Component]]: """List the types of component supported by this Board.""" return set() class MockBoardWithConstructor(MockBoard): """A testing board with a constructor.""" def __init__(self, test_param: str, another_param: str, one_that_defaults: bool = True) -> None: self.test_param = test_param self.another_param = another_param self.one_that_defaults = one_that_defaults class NoBoardMockBackend(Backend): """This backend never finds any testing boards.""" board = MockBoard @property def firmware_version(self) -> Optional[str]: """The firmware version of the board.""" return None @classmethod def discover(cls) -> Set[Board]: """Discover boards available on this backend.""" return set() class OneBoardMockBackend(Backend): """This backend finds exactly one.""" board = MockBoard @property def firmware_version(self) -> Optional[str]: """The firmware version of the board.""" return None @classmethod def discover(cls) -> Set[Board]: """Discover boards available on this backend.""" return {MockBoard("TESTSERIAL1")} class TwoBoardsMockBackend(Backend): """This backend finds exactly two.""" board = MockBoard @property def firmware_version(self) -> Optional[str]: """The firmware version of the board.""" return None @classmethod def discover(cls) -> Set[Board]: """Discover boards available on this backend.""" return {MockBoard("TESTSERIAL1"), MockBoard("TESTSERIAL2")}
2.984375
3
Agents/NetworkAgent/network/agent.py
Entek-Technical-Services/BEMOSS3.5
73
12777101
# -*- coding: utf-8 -*- ''' Copyright (c) 2016, Virginia Tech All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of the FreeBSD Project. This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees, nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty, express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States Government or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof. VIRGINIA TECH – ADVANCED RESEARCH INSTITUTE under Contract DE-EE0006352 #__author__ = "BEMOSS Team" #__credits__ = "" #__version__ = "2.0" #__maintainer__ = "BEMOSS Team" #__email__ = "<EMAIL>" #__website__ = "www.bemoss.org" #__created__ = "2014-09-12 12:04:50" #__lastUpdated__ = "2016-03-14 11:23:33" ''' import base64 import datetime import json import logging from subprocess import Popen, PIPE import subprocess import uuid import sys import psycopg2 import re import settings from bemoss_lib.utils import find_own_ip from bemoss_lib.utils.BEMOSS_globals import * from bemoss_lib.utils.agentstats import agentstats from bemoss_lib.utils.catcherror import catcherror from volttron import platform from volttron.platform.agent import utils from volttron.platform.vip.agent import Agent, Core, PubSub, compat from bemoss_lib.utils import db_helper from bemoss_lib.utils.BEMOSSAgent import BEMOSSAgent import random from bemoss_lib.utils import db_helper #initiliazation utils.setup_logging() _log = logging.getLogger(__name__) Agents_DIR = settings.Agents_DIR clock_time = 20 #frequency of polling nodes Agents_Launch_DIR = settings.Agents_Launch_DIR building_name = settings.PLATFORM['node']['building_name'] db_database = settings.DATABASES['default']['NAME'] my_ip_address = find_own_ip.getIPs()[-1] #use the last IP in the list for host ip debug_agent = settings.DEBUG host_name = settings.PLATFORM['node']['name'] db_host = settings.DATABASES['default']['HOST'] db_port = settings.DATABASES['default']['PORT'] db_user = settings.DATABASES['default']['USER'] db_password = settings.DATABASES['default']['PASSWORD'] db_table_node_info = settings.DATABASES['default']['TABLE_node_info'] db_table_node_device = settings.DATABASES['default']['TABLE_node_device'] db_table_device_info = settings.DATABASES['default']['TABLE_device_info'] db_table_device_data = settings.DATABASES['default']['TABLE_device'] db_table_active_alert = settings.DATABASES['default']['TABLE_active_alert'] db_table_device_type = settings.DATABASES['default']['TABLE_device_type'] db_table_bemoss_notify = settings.DATABASES['default']['TABLE_bemoss_notify'] db_table_alerts_notificationchanneladdress = settings.DATABASES['default']['TABLE_alerts_notificationchanneladdress'] db_table_temp_time_counter = settings.DATABASES['default']['TABLE_temp_time_counter'] db_table_temp_failure_time = settings.DATABASES['default']['TABLE_temp_failure_time'] db_table_priority = settings.DATABASES['default']['TABLE_priority'] node_type = settings.PLATFORM['node']['type'] node_name = settings.PLATFORM['node']['name'] my_zone = settings.PLATFORM['node']['zone'] if node_type == "core": node_offline_timeout = settings.PLATFORM['node']['node_offline_timeout'] node_monitor_time = settings.PLATFORM['node']['node_monitor_time'] else: node_monitor_time = 60000000 # arbitrary large number since it's not required for type "node" node_offline_timeout = 60000000 # arbitrary large number since it's not required for type "node" #offline_event var class NetworkAgent(BEMOSSAgent): def __init__(self, config_path, **kwargs): kwargs['identity'] = 'networkagent' super(NetworkAgent, self).__init__(**kwargs) self.config = utils.load_config(config_path) def get_config(name): try: kwargs.pop(name) except KeyError: return self.config.get(name, '') self.agent_id = get_config('agent_id') self.building_name = building_name self.host_ip_address = my_ip_address self.db_host = db_host self.host_name = host_name self.host_type = settings.PLATFORM['node']['type'] self.host_building_name = building_name self.my_node_id = db_helper.get_node_id() print "host_zone_id "+str(self.my_node_id) self.time_sent_notifications = {} @Core.receiver('onsetup') def setup(self,sender,**kwargs): #super(NetworkAgent, self).setup() pass #self.multibuilting_subscribe() #can't do this because VIP isn't setup? @PubSub.subscribe('pubsub', '') def on_match(self, peer, sender, bus, topic, headers, message): '''Use match_all to receive all messages and print them out.''' if sender == 'pubsub.compat': message = compat.unpack_legacy_message(headers, message) _log.debug( "Peer: %r, Sender: %r:, Bus: %r, Topic: %r, Headers: %r, " "Message: %r", peer, sender, bus, topic, headers, message) @PubSub.subscribe('pubsub', 'from/multinodeagent/update_parent') def updateParent(self,peer,sender,bus,topic, headers, message): print "Updating Connection to database" self.curcon.close() # close old connection self.curcon = db_helper.db_connection() # start new connection using new parent_ip # Behavior to listen to message from UI when user change zone of a device @PubSub.subscribe('pubsub', 'to/networkagent/status_change') #@catcherror('Failed ui-to-networkagent') def on_match_change(self,peer,sender,bus,topic, headers, message): #can approve or make pending a list of agents if debug_agent: print "{} >> received the message at {}".format(self.agent_id, datetime.datetime.now()) print "Topic: {topic}".format(topic=topic) print "Headers: {headers}".format(headers=headers) print "Message: {message}\n".format(message=message) for entry in message: volttron_agents_status = agentstats() agent_status = entry[STATUS_CHANGE.AGENT_STATUS] if STATUS_CHANGE.NODE in entry: requested_node_id = int(entry[STATUS_CHANGE.NODE]) agent_id = entry[STATUS_CHANGE.AGENT_ID] is_app=False self.curcon.execute('select agent_id from device_info where agent_id=%s',(agent_id,)) if self.curcon.rowcount: is_app = False self.curcon.execute('select app_agent_id from application_running where app_agent_id=%s',(agent_id,)) if self.curcon.rowcount: is_app = True if 'is_app' in entry: is_app = entry['is_app'] zone_assignment_type = ZONE_ASSIGNMENT_TYPES.TEMPORARY #default zone assignment type if STATUS_CHANGE.NODE_ASSIGNMENT_TYPE in entry: zone_assignment_type = entry[STATUS_CHANGE.NODE_ASSIGNMENT_TYPE] running = False installed = False if agent_id in volttron_agents_status: installed = True running = volttron_agents_status[agent_id] == 'running' if agent_status == 'start' and requested_node_id == self.my_node_id: if not running: if not is_app: self.initialize_devicedata(agent_id) self.launch_agent(agent_id,installed,is_app) elif running and (requested_node_id != self.my_node_id or agent_status == 'stop'): self.stopAgent(agent_id) continue else: continue self.curcon.execute("SELECT assigned_node_id FROM "+db_table_node_device+ " WHERE agent_id=%s", (agent_id,)) if self.curcon.rowcount == 0: # update node_device_table with the new zone of a device self.curcon.execute("INSERT INTO "+db_table_node_device+" (agent_id, assigned_node_id,current_node_id,date_move) " "VALUES(%s,%s,%s,%s)", (agent_id, requested_node_id, requested_node_id, datetime.datetime.now())) self.curcon.commit() else: existing_assigned_node_id = self.curcon.fetchone() if zone_assignment_type == ZONE_ASSIGNMENT_TYPES.PERMANENT: new_assigned_node_id = requested_node_id else: new_assigned_node_id = existing_assigned_node_id self.curcon.execute("UPDATE "+db_table_node_device+" SET assigned_node_id=(%s),current_node_id=(%s), \ date_move=(%s) WHERE agent_id=(%s)",(new_assigned_node_id, requested_node_id, datetime.datetime.now(), agent_id)) self.curcon.commit() def initialize_devicedata(self, agent_id): if agent_id in settings.SYSTEM_AGENTS: return #System agents already have configuration json file self.curcon.execute("SELECT data FROM " + db_table_device_data + " WHERE agent_id=%s", (agent_id,)) if self.curcon.rowcount == 0: # no entry made for this agent json_temp = '{}' self.curcon.execute( "INSERT INTO " + db_table_device_data + " (agent_id, data,network_status,last_scanned_time,last_offline_time,dashboard_view) " "VALUES(%s,%s,%s,%s,%s,%s)", (agent_id, json_temp, 'ONLINE', datetime.datetime.now(), None, json_temp)) self.curcon.commit() def launch_agent(self,agent_id, installed, is_app=False): #_launch_file = os.path.join(dir, launch_file) env_path = settings.PROJECT_DIR+'/env/bin/' def is_agent_installed(agent_id): statusreply = subprocess.check_output(env_path+'volttron-ctl status',shell=True) statusreply = statusreply.split('\n') agent_installed = False reg_search_term = " "+agent_id+" " for line in statusreply: #print(line, end='') #write to a next file name outfile match = re.search(reg_search_term, line) if match: # The agent for this device is running agent_installed = True else: pass return agent_installed if agent_id in settings.SYSTEM_AGENTS: #find the case-insensetive match for the folder name files_and_folders = os.listdir(settings.PROJECT_DIR+'/Agents/') folders = [folder for folder in files_and_folders if os.path.isdir(settings.PROJECT_DIR + '/Agents/' + folder)] for folder in folders: if agent_id.lower() == folder.lower(): agent_folder = folder agent_path = "/Agents/" + agent_folder break else: raise ValueError('no matching agent folder exists for system agent:'+ agent_id) _launch_file = settings.PROJECT_DIR + "/Agents/" + agent_folder + "/" + agent_id+'.launch.json' else: _launch_file = Agents_Launch_DIR + "/" + agent_id + '.launch' with open(_launch_file, 'w') as outfile: data = { "agent_id": agent_id, } json.dump(data, outfile) if not is_app: self.curcon.execute("select device_model from device_info where agent_id=(%s)", (agent_id,)) if not self.curcon.rowcount: print "Bad agent_id name" return device_model = self.curcon.fetchone()[0] self.curcon.execute("select agent_type from supported_devices where device_model=(%s)",(device_model,)) if not self.curcon.rowcount: print "Non supported device" return agent_folder = self.curcon.fetchone()[0] agent_path = "/Agents/" + agent_folder else: self.curcon.execute("select app_type_id from application_running where app_agent_id=(%s)", (agent_id,)) app_type_id = self.curcon.fetchone()[0] self.curcon.execute("select app_folder from application_registered where application_id=(%s)", (app_type_id,)) agent_folder = self.curcon.fetchone()[0] agent_path = "/Applications/code/" + agent_folder if not installed: os.system(#". env/bin/activate" env_path + "volttron-pkg package " + settings.PROJECT_DIR + agent_path+";"+\ env_path+"volttron-pkg configure "+platform.get_home()+"/packaged/"+agent_folder.lower()+"-3.0-py2-none-any.whl "+ _launch_file+";"+\ env_path+"volttron-ctl install "+agent_id+"="+platform.get_home()+"/packaged/"+agent_folder.lower()+"-3.0-py2-none-any.whl;"+\ env_path+"volttron-ctl start --tag " + agent_id + ";"+\ env_path+"volttron-ctl status") else: os.system(#". env/bin/activate" env_path+"volttron-ctl start --tag " + agent_id +";"+ env_path+"volttron-ctl status") # p = Popen([ # ". env/bin/activate" # env_path + "volttron-ctl auth-update 0 ;"], stdin=PIPE, shell=True) # p.communicate("\n".join(["BEMOSSAGENT", "", "", "BEMOSS_BASIC_AGENT", "", "", "", "", "", "", ""])) print "{} >> has successfully launched {} located in {}".format(self.agent_id, agent_id, dir) def stopAgent(self, agent_id): env_path = settings.PROJECT_DIR + '/env/bin/' os.system( # ". env/bin/activate" env_path + "volttron-ctl stop --tag " + agent_id+"; volttron-ctl remove --tag " + agent_id) _launch_file = Agents_Launch_DIR + "/" + agent_id + '.launch' try: os.remove(_launch_file) except OSError: pass def main(argv=sys.argv): '''Main method called by the eggsecutable.''' try: utils.vip_main(NetworkAgent) except Exception as e: _log.exception('unhandled exception') if __name__ == '__main__': # Entry point for script try: sys.exit(main()) except KeyboardInterrupt: pass
1.101563
1
JupyterNotebooks/JupyterNotebooksLib/cli.py
allihuwa/Content-Production-for-Improving-Cancer-Treatment-Worklflow
1
12777102
import slicer def cliRunSync(module, node=None, parameters=None, delete_temporary_files=True, update_display=True): """Run CLI module. If ipywidgets are installed then it reports progress. """ try: from ipywidgets import IntProgress from IPython.display import display # Asynchronous run, with progerss reporting using widget node = slicer.cli.run(module, node=node, parameters=parameters, wait_for_completion=False, delete_temporary_files=delete_temporary_files, update_display=update_display) import time progress = IntProgress() display(progress) # display progress bar while node.IsBusy(): progress.value = node.GetProgress() slicer.app.processEvents() time.sleep(.3) progress.layout.display = 'none' # hide progress bar except ImportError: # No widgets, therefore no progress reporting - do just a simpe synchronous CLI run node = slicer.cli.runSync(module, node=node, parameters=parameters, wait_for_completion=False, delete_temporary_files=delete_temporary_files, update_display=update_display) return node
2.75
3
fence/blueprints/login/shib.py
ohsu-comp-bio/fence
0
12777103
import flask from flask_restful import Resource from fence.auth import login_user from fence.blueprints.login.redirect import validate_redirect from fence.errors import InternalError, Unauthorized from fence.models import IdentityProvider from fence.config import config class ShibbolethLogin(Resource): def get(self): """ The login flow is: user -> {fence}/login/shib?redirect={portal} -> user login at {nih_shibboleth_idp} -> nih idp POST to fence shibboleth and establish a shibboleth sp session -> redirect to {fence}/login/shib/login that sets up fence session -> redirect to portal """ redirect_url = flask.request.args.get("redirect") validate_redirect(redirect_url) if redirect_url: flask.session["redirect"] = redirect_url actual_redirect = config["BASE_URL"] + "/login/shib/login" return flask.redirect(config["SSO_URL"] + actual_redirect) class ShibbolethCallback(Resource): def get(self): """ Complete the shibboleth login. """ if "SHIBBOLETH_HEADER" in config: eppn = flask.request.headers.get(config["SHIBBOLETH_HEADER"]) else: raise InternalError("Missing shibboleth header configuration") username = eppn.split("!")[-1] if eppn else None if username: login_user(flask.request, username, IdentityProvider.itrust) if flask.session.get("redirect"): return flask.redirect(flask.session.get("redirect")) return "logged in" else: raise Unauthorized("Please login")
2.390625
2
automatewithpython/.practiceprojects/fantasygamedictionary.py
Coalemus/Python-Projects
0
12777104
#!/bin/zsh ''' Fantasy Game Inventory You are creating a fantasy video game. The data structure to model the player’s inventory will be a dictionary where the keys are string values describing the item in the inventory and the value is an integer value detailing how many of that item the player has. For example, the dictionary value {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12} means the player has 1 rope, 6 torches, 42 gold coins, and so on. Write a function named displayInventory() that would take any possible “inventory” and display it like the following: Inventory: 12 arrow 42 gold coin 1 rope 6 torch 1 dagger Total number of items: 63 Hint: You can use a for loop to loop through all the keys in a dictionary. # inventory.py stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12} def display_inventory(inventory): print("Inventory:") item_total = 0 for k, v in inventory.items(): print(str(v) + ' ' + k) item_total += v print("Total number of items: " + str(item_total)) display_inventory(stuff) ''' '''Fantasy Game Inventory Imagine that a vanquished dragon’s loot is represented as a list of strings like this: dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] Write a function named addToInventory(inventory, addedItems) , where the inventory parameter is a dictionary representing the player’s inventory (like in the previous project) and the addedItems parameter is a list like dragonLoot . The addToInventory() function should return a dictionary that represents the updated inventory. Note that the addedItems list can contain multiples of the same item. Your code could look something like this: def addToInventory(inventory, addedItems): # your code goes here inv = {'gold coin': 42, 'rope': 1} dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] inv = addToInventory(inv, dragonLoot) displayInventory(inv) The previous program (with your displayInventory() function from the previous project) would output the following: Inventory: 45 gold coin 1 rope 1 ruby 1 dagger Total number of items: 48 ''' stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12} def display_inventory(inventory): print("Inventory:") item_total = 0 for k, v in inventory.items(): print(str(v) + ' ' + k) item_total += v print("Total number of items: " + str(item_total)) display_inventory(stuff) dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] def addToInventory(inventory, addedItems): # your code goes here TODO: Complete Function. inv = {'gold coin': 42, 'rope': 1} dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby'] inv = addToInventory(inv, dragonLoot) displayInventory(inv)
4.3125
4
tests/test_fft.py
mfkiwl/amalthea-ecp5-usb-sdr
21
12777105
from amalthea.gateware.fft import * class TestFFT(unittest.TestCase): def test_fft(self): clk = 60e6 m = FFT(32, Q(1,10)) input = [1+0j]*16 + [-1+0j]*16 # Map input to Complex fixed-point data = list(map(lambda x: ComplexConst(m.internal_shape, x).value(), input)) # Re-order with bit-reversed indices & write to FFT memory rev = lambda i: int('{:05b}'.format(i)[::-1], 2) m.mem.init = [data[rev(i)] for i in range(len(data))] sim = Simulator(m) sim.add_clock(1/clk) def process(): yield m.start.eq(1) yield yield m.start.eq(0) yield while True: if ((yield m.done) == 1): break yield expected = np.fft.fft(input) for i, sig in enumerate(m.mem._array): self.assertAlmostEqual( (yield from Complex(shape=m.internal_shape, value=sig).to_complex()), expected[i], delta=0.02 ) sim.add_sync_process(process) with sim.write_vcd("fft.vcd", "fft.gtkw", traces=[]): sim.run() class TestAddressGenerator(unittest.TestCase): def test_agu(self): clk = 60e6 m = AddressGenerator(level_count=5, butterfly_count=16) sim = Simulator(m) sim.add_clock(1/clk) def process(): expected_addrs = [ (0,1), (2,3), (4,5), (6,7), (8,9), (10,11), (12,13), (14,15), (16,17), (18,19), (20,21), (22,23), (24,25), (26,27), (28,29), (30,31), (0,2), (4,6), (8,10), (12,14), (16,18), (20,22), (24,26), (28,30), (1,3), (5,7), (9,11), (13,15), (17,19), (21,23), (25,27), (29,31), (0,4), (8,12), (16,20), (24,28), (1,5), (9,13), (17,21), (25,29), (2,6), (10,14), (18,22), (26,30), (3,7), (11,15), (19,23), (27,31), (0,8), (16,24), (1,9), (17,25), (2,10), (18,26), (3,11), (19,27), (4,12), (20,28), (5,13), (21,29), (6,14), (22,30), (7,15), (23,31), (0,16), (1,17), (2,18), (3,19), (4,20), (5,21), (6,22), (7,23), (8,24), (9,25), (10,26), (11,27), (12,28), (13,29), (14,30), (15,31), ] for i in range(5): for j in range(16): self.assertEqual(((yield m.addr_a), (yield m.addr_b)), expected_addrs[i*16+j]) self.assertEqual((yield m.addr_twiddle), ((0xfffffff0 >> i) & 0xf) & j) yield sim.add_sync_process(process) with sim.write_vcd("agu.vcd", "agu.gtkw", traces=[]): sim.run() class TestTwiddleFactors(unittest.TestCase): def test_tf(self): clk = 60e6 m = TwiddleFactors(32, Q(1,15)) # cast to unsigned real = Signal(16) imag = Signal(16) sim = Simulator(m) sim.add_clock(1/clk) def process(): expected_tfs = [ (0x7fff, 0x0000), (0x7d89, 0xe707), (0x7641, 0xcf05), (0x6a6d, 0xb8e4), (0x5a82, 0xa57e), (0x471c, 0x9593), (0x30fb, 0x89bf), (0x18f9, 0x8277), (0x0000, 0x8001), (0xe707, 0x8277), (0xcf05, 0x89bf), (0xb8e4, 0x9593), (0xa57e, 0xa57e), (0x9593, 0xb8e4), (0x89bf, 0xcf05), (0x8277, 0xe707), ] for i in range(16): yield m.addr.eq(i) yield yield yield real.eq(m.out.real.value) yield imag.eq(m.out.imag.value) yield self.assertEqual(((yield real), (yield imag)), expected_tfs[i]) sim.add_sync_process(process) with sim.write_vcd("tf.vcd", "tf.gtkw", traces=[]): sim.run() if __name__ == "__main__": unittest.main()
2.1875
2
test/lazy/test_root_lazy_variable.py
orionr/gpytorch
0
12777106
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import torch import unittest from torch.autograd import Variable from gpytorch.lazy import RootLazyVariable from gpytorch.utils import approx_equal class TestRootLazyVariable(unittest.TestCase): def test_matmul(self): root = Variable(torch.randn(5, 3), requires_grad=True) covar = RootLazyVariable(root) mat = Variable(torch.eye(5)) res = covar.matmul(mat) root_clone = Variable(root.data.clone(), requires_grad=True) mat_clone = Variable(mat.data.clone()) actual = root_clone.matmul(root_clone.transpose(-1, -2)).matmul(mat_clone) self.assertTrue(approx_equal(res.data, actual.data)) gradient = torch.randn(5, 5) actual.backward(gradient=Variable(gradient)) res.backward(gradient=Variable(gradient)) self.assertTrue(approx_equal(root.grad.data, root_clone.grad.data)) def test_diag(self): root = Variable(torch.randn(5, 3)) actual = root.matmul(root.transpose(-1, -2)) res = RootLazyVariable(root) self.assertTrue(approx_equal(actual.diag().data, res.diag().data)) def test_batch_diag(self): root = Variable(torch.randn(4, 5, 3)) actual = root.matmul(root.transpose(-1, -2)) actual_diag = torch.cat( [ actual[0].diag().unsqueeze(0), actual[1].diag().unsqueeze(0), actual[2].diag().unsqueeze(0), actual[3].diag().unsqueeze(0), ] ) res = RootLazyVariable(root) self.assertTrue(approx_equal(actual_diag.data, res.diag().data)) def test_evaluate(self): root = Variable(torch.randn(5, 3)) actual = root.matmul(root.transpose(-1, -2)) res = RootLazyVariable(root) self.assertTrue(approx_equal(actual.data, res.evaluate().data)) if __name__ == "__main__": unittest.main()
2.3125
2
experiments/references/naf_many_envs.py
Asap7772/rail-rl-franka-eval
0
12777107
""" Run PyTorch NAF on many envs. """ import random import railrl.torch.pytorch_util as ptu from railrl.exploration_strategies.ou_strategy import OUStrategy from railrl.launchers.launcher_util import run_experiment from railrl.torch.naf import NafPolicy, NAF from rllab.envs.mujoco.ant_env import AntEnv from rllab.envs.mujoco.half_cheetah_env import HalfCheetahEnv from rllab.envs.mujoco.hopper_env import HopperEnv from rllab.envs.mujoco.swimmer_env import SwimmerEnv from rllab.envs.normalized_env import normalize def example(variant): env = variant['env_class']() env = normalize(env) es = OUStrategy(action_space=env.action_space) qf = NafPolicy( int(env.observation_space.flat_dim), int(env.action_space.flat_dim), 100, ) algorithm = NAF( env, naf_policy=qf, exploration_strategy=es, **variant['algo_params'] ) algorithm.to(ptu.device) algorithm.train() if __name__ == "__main__": # noinspection PyTypeChecker variant = dict( algo_params=dict( num_epochs=100, num_steps_per_epoch=10000, num_steps_per_eval=1000, use_soft_update=True, tau=1e-2, batch_size=128, max_path_length=1000, discount=0.99, naf_policy_learning_rate=1e-4, ), version="NAF", ) for env_class in [ SwimmerEnv, HalfCheetahEnv, AntEnv, HopperEnv, ]: variant['env_class'] = env_class variant['version'] = str(env_class) for _ in range(5): seed = random.randint(0, 999999) run_experiment( example, exp_prefix="naf-benchmarks-envs-pytorch", seed=seed, mode='ec2', variant=variant, use_gpu=False, )
2.296875
2
tests/blueprints/test_points.py
drkane/find-that-postcode
4
12777108
from tests.fixtures import client def test_point_json(client): rv = client.get("/points/51.501,-0.2936") point_json = rv.get_json() assert rv.headers["Access-Control-Allow-Origin"] == "*" assert ( point_json.get("data", {}) .get("relationships", {}) .get("nearest_postcode", {}) .get("data", {}) .get("id") == "EX36 4AT" ) assert ( point_json.get("data", {}).get("attributes", {}).get("distance_from_postcode") == 68.9707515287199 ) def test_point_json_distance(client): pass def test_point_html(client): rv = client.get("/points/51.501,-0.2936.html") assert rv.mimetype == "text/html" content = rv.data.decode("utf8") assert "EX36 4AT" in content assert "E01020135" in content assert "69.0" in content def test_point_html_distance(client): pass
2.1875
2
roles/trouter/molecule/default/tests/test_trouter.py
Mieszko96/alfresco-ansible-deployment
14
12777109
<reponame>Mieszko96/alfresco-ansible-deployment """TRouter Tests""" import os import pytest from hamcrest import assert_that, contains_string # pylint: disable=redefined-outer-name @pytest.fixture() def get_ansible_vars(host): """Define get_ansible_vars""" java_role = "file=../../../roles/java/vars/main.yml name=java_role" common_vars = "../../../common/vars/main.yml name=common_vars" common_defaults = "../../../common/defaults/main.yml name=common_defaults" common_hosts = "../../../common/vars/hosts.yml name=common_hosts" ansible_vars = host.ansible("include_vars", java_role)["ansible_facts"]["java_role"] ansible_vars.update(host.ansible("include_vars", common_vars)["ansible_facts"]["common_vars"]) ansible_vars.update(host.ansible("include_vars", common_hosts)["ansible_facts"]["common_hosts"]) ansible_vars.update(host.ansible("include_vars", common_defaults)["ansible_facts"]["common_defaults"]) return ansible_vars test_host = os.environ.get('TEST_HOST') def test_trouter_service_is_running_and_enabled(host, get_ansible_vars): """Check trouter service""" trouter = host.service("alfresco-transform-router") assert_that(trouter.is_running) assert_that(trouter.is_enabled) def test_trouter_log_exists(host, get_ansible_vars): "Check that ats-atr.log exists in /var/log/alfresco" assert_that(host.file("/var/log/alfresco/ats-atr.log").exists) def test_trouter_response(host, get_ansible_vars): "Check that trouter context is available and returns a HTTP 200 status code" cmd = host.run("curl -iL http://{}:8095/transform/config".format(test_host)) http_response = host.run("curl -sL -w '%{http_code}' http://" + test_host + ":8095/transform/config -o /dev/null") assert_that(http_response.stdout, contains_string("200")) assert_that(cmd.stdout, contains_string("pdfRendererOptions")) assert_that(cmd.stdout, contains_string("archiveOptions")) assert_that(cmd.stdout, contains_string("imageMagickOptions")) assert_that(cmd.stdout, contains_string("tikaOptions")) assert_that(cmd.stdout, contains_string("pdfboxOptions")) assert_that(cmd.stdout, contains_string("textToPdfOptions")) assert_that(cmd.stdout, contains_string("stringOptions")) def test_environment_jvm_opts(host, get_ansible_vars): "Check that overwritten JVM_OPTS are taken into consideration" pid = host.run("/opt/openjdk*/bin/jps -lV | grep transform-router | awk '{print $1}'") process_map = host.run("/opt/openjdk*/bin/jhsdb jmap --heap --pid {}".format(pid.stdout)) assert_that(process_map.stdout, contains_string("MaxHeapSize = 943718400 (900.0MB)"))
2.09375
2
src/events/pygame_events/click.py
ArcosJuan/Get-out-of-my-fucking-maze
2
12777110
from src.events import Event class Click(Event): def __init__(self, pos, button): super().__init__() self.pos = pos self.button = button def get_pos(self): return self.pos def get_button(self): return self.button
2.96875
3
tools/odelayplot.py
AitchisonLab/ODELAY-ODELAM-python
0
12777111
import os import pathlib import re import time import sys import json import cv2 import h5py import math import numpy as np import matplotlib.pyplot as plt import matplotlib.path as mpath import matplotlib.lines as mlines import matplotlib.patches as mpatches import matplotlib as mpl from scipy.sparse import csr_matrix from fast_histogram import histogram1d from datetime import datetime from importlib import reload from PyQt5 import QtCore, QtGui, QtWidgets # from PyQt5.QtMultimedia import QMediaPlayer # from PyQt5.QtMultimedia import QMediaContent # from PyQt5.QtMultimediaWidgets import QVideoWidget from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QSizePolicy, QWidget, QInputDialog, QFileDialog from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QStyle, QVBoxLayout, QWidget, QSlider, QPushButton, QAction from PyQt5.QtGui import QImage, QPixmap, QIcon from PyQt5.QtCore import QDir, Qt, QUrl import tools.imagepl as opl import tools.fileio as fio def figPlotGCs(roiDict, organism='Yeast', saveAll=False, savePath=None): ''' Plots growth curves using matplotlib''' plt.close('all') pltRange = setPlotRange(organism) roiID = roiDict['roi'] timePoints = roiDict['timePoints']/pltRange['GCs']['devisor'] rawobjectArea = roiDict['objectArea'] rawfitData = roiDict['fitData'] numObsv = pltRange['GCs']['numObservations'] rngTime = pltRange['GCs']['xRange'] rngArea = pltRange['GCs']['yRange'] rngTdbl = pltRange['Dbl']['xRange'] rngTlag = pltRange['Lag']['xRange'] rngTexp = pltRange['Tex']['xRange'] rngNDub = pltRange['NumDbl']['xRange'] if len(roiDict['roiInfo'])>0 : roiID = roiDict['roiInfo']['Strain ID'] numObservations = np.sum(rawobjectArea>0, 1) > numObsv numDbl = rawfitData[:,1]>0 fitIndex = rawfitData[:,0]>0 dataFilter = numObservations * fitIndex * numDbl fitData = rawfitData[dataFilter, :] objectArea = rawobjectArea[dataFilter,:].transpose() fitData[:,3]/=pltRange['Tex']['devisor'] fitData[:,5]/=pltRange['Lag']['devisor'] fitData[:,6]/=pltRange['Dbl']['devisor'] textLbls= ['Growth Curves','Td (hrs)', 'Tlag (hrs)','Texp (hrs)','Num Dbl'] lineColor = np.array([ [0, 0, 0, 0.3], [0, 0, 1, 1], [0, 0.7, 0, 1], [1, 0, 0, 1], [0.7,0.5, 0, 1]], dtype = 'float') xLim = np.array([rngTime, rngTdbl, rngTlag, rngTexp, rngNDub], dtype = 'float64') wScale = 0.75 numbins = 75 fitCol = [6,6,5,3,1] normVirts = np.zeros((5,numbins), dtype='float64') virts = np.zeros((5,numbins), dtype='float64') nbins = np.zeros((5,numbins), dtype='float64') for cnt in range(5): nbins[cnt,:] = np.linspace(xLim[cnt,0], xLim[cnt,1], num=numbins) virts[cnt,:] = histogram1d( fitData[:,fitCol[cnt]], 75, xLim[cnt,:], weights = None) normVirts[cnt,:] = (virts[cnt,:]/np.max(virts[cnt,2:-10]))*wScale axesPos = np.array([[0.1875, 0.66666667, 0.75, 0.28], [0.1875, 0.48666667, 0.75, 0.1], [0.1875, 0.33333333, 0.75, 0.1], [0.1875, 0.19333333, 0.75, 0.1], [0.1875, 0.05333333, 0.75, 0.1]], dtype = 'float64') xLim = np.array([rngTime, rngTdbl, rngTlag, rngTexp, rngNDub], dtype = 'float64') yLim = np.array( [rngArea, [0,1], [0,1], [0,1], [0,1]], dtype = 'float64') Label_Font = 12 Title_Font = 12 mpl.rcParams['axes.linewidth'] = 2 mpl.rcParams['xtick.major.width'] = 2 mpl.rcParams['ytick.major.width'] = 2 mpl.rcParams['xtick.direction'] = 'in' mpl.rcParams['ytick.direction'] = 'in' mpl.rcParams['font.family'] = 'Arial' mpl.rcParams['font.weight'] = 'bold' mpl.rcParams['axes.titlesize'] = Title_Font mpl.rcParams['axes.labelsize'] = Label_Font gcFig = plt.figure(figsize=[4,7.5], dpi=100, facecolor='w') axs = [] n = 0 axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=yLim[n,:])) axs[0].plot(timePoints, np.log2(objectArea), color=lineColor[n,:], linewidth=0.8) axs[0].set_xlabel('Time (hrs)', fontsize=Label_Font, fontweight='bold') axs[0].set_ylabel('log2[Area]', fontsize=Label_Font, fontweight='bold') axs[0].set_title(roiID, fontsize=Label_Font, fontweight='bold') for n in range(1,5): axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=yLim[n,:])) axs[n].plot(nbins[n,:],normVirts[n,:],color=lineColor[n,:]) xPos = 0.7*np.abs(np.diff(xLim[n,:]))+xLim[n,0] axs[n].text(xPos,0.75, textLbls[n], fontsize = Label_Font,fontweight='bold',color=lineColor[n,:]) if saveAll: plt.savefig(savePath) else: plt.show() return None def figExpSummary(expDict, organism='Yeast'): plt.close('all') Label_Font = 12 Title_Font = 12 mpl.rcParams['axes.linewidth'] = 2 mpl.rcParams['xtick.major.width'] = 2 mpl.rcParams['ytick.major.width'] = 2 mpl.rcParams['xtick.direction'] = 'in' mpl.rcParams['ytick.direction'] = 'in' mpl.rcParams['font.family'] = 'Arial' mpl.rcParams['font.weight'] = 'bold' mpl.rcParams['axes.titlesize'] = Title_Font mpl.rcParams['axes.labelsize'] = Label_Font plotDict = setPlotRange(organism) rngGCs = plotDict['GCs']['xRange'] rngTdbl = plotDict['Dbl']['xRange'] rngTlag = plotDict['Lag']['xRange'] rngTexp = plotDict['Tex']['xRange'] rngNDub = plotDict['NumDbl']['xRange'] rngPopNum = plotDict['PopNum']['xRange'] cntrLbl = ['Dbl', 'Lag', 'Tex', 'NumDbl', 'PopNum'] tickList = {} left = 1.25/6 bottom = 0.4/10 width = 1.2/8 height = 9/10 spacing = 0.05/6 xLim = np.array([rngTdbl, rngTlag, rngTexp, rngNDub, rngPopNum], dtype = 'float64') textLbls= ['Td (hrs)', 'Tlag (hrs)','Texp (hrs)','Num Dbl','Pop Cnt'] Path = mpath.Path commands = {'M': (mpath.Path.MOVETO,), 'L': (mpath.Path.LINETO,), 'Q': (mpath.Path.CURVE3,)*2, 'C': (mpath.Path.CURVE4,)*3, 'Z': (mpath.Path.CLOSEPOLY,)} numbins = 75 fitCol = [6,5,3,1] # breakpoint() devisor = [ plotDict['Dbl']['devisor'], plotDict['Lag']['devisor'], plotDict['Tex']['devisor'], plotDict['NumDbl']['devisor'] ] roiList = [*expDict.keys()] key1='roiInfo' key2='Strain ID' yTickLbl=[] for roi in expDict.keys(): if len(expDict[roi][key1])>0: yTickLbl.append(expDict[roi][key1][key2]) else: yTickLbl.append(roi) roiList = [x for _, x in sorted( zip(yTickLbl, roiList), key=lambda pair: pair[0])] roiList.reverse() yTickLbl.sort() yTickLbl.reverse() yTickLbl.insert(0,'') yTickLbl.append('') numRoi = len(roiList) poptot = np.zeros((numRoi+1,2), dtype='int') wScale = 0.8 pathDict = {} cntr=0 for key in roiList: cntr+=1 normVirts = np.zeros((5,numbins), dtype='float64') virts = np.zeros((5,numbins), dtype='float64') nbins = np.zeros((5,numbins), dtype='float64') fitData = expDict[key]['fitData'] poptot[cntr,:] = fitData.shape pathDict[key]={} for n in range(4): nbins[n,:] = np.linspace(xLim[n,0], xLim[n,1], num=numbins) virts[n,:] = histogram1d( fitData[:,fitCol[n]]/devisor[n], numbins, xLim[n,:], weights = None) normVirts[n,:] = (virts[n,:]/np.max(virts[n,2:-10]))*wScale codes, verts = parseVirts(nbins[n,:], normVirts[n,:]) verts[:,1] += cntr-0.5 path = mpath.Path(verts, codes) pathDict[key][textLbls[n]] = path pathDict[key]['nbins'] = nbins pathDict[key]['normVirts'] = normVirts axesPos = np.zeros((5,4),dtype = 'float') for n in range(5): axesPos[n,:] = [left+n*(width+spacing),bottom,width,height] gcFig = plt.figure(figsize=[7,9], dpi=100, facecolor='w') axs = [] n = 0 xTicks = plotDict[cntrLbl[n]]['xTicks'] xticklabels = [str(value) for value in xTicks] axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=[0,numRoi+1], yticks=list(range(numRoi+1)), xticks=xTicks)) axs[n].set_yticklabels(yTickLbl, fontsize=6, fontweight = 'bold') axs[n].set_xticklabels(xticklabels, fontsize=8, fontweight = 'bold', rotation= 45 ) axs[n].set_title(textLbls[n], fontsize=10, fontweight = 'bold' ) for roi in roiList: patch = mpatches.PathPatch(pathDict[roi][textLbls[n]], facecolor = [0,0,1,1], edgecolor = None, linewidth = 0 ) axs[n].add_patch(patch) for n in range(1,4): xTicks = plotDict[cntrLbl[n]]['xTicks'] xticklabels = [str(value) for value in xTicks] axs.append(plt.axes(axesPos[n,:], xlim=xLim[n,:], ylim=[0,numRoi+1], yticks=[], xticks=xTicks)) axs[n].set_xticklabels(xticklabels, fontsize=8, fontweight = 'bold', rotation= 45 ) axs[n].set_title(textLbls[n], fontsize=10, fontweight = 'bold' ) for roi in roiList: patch = mpatches.PathPatch(pathDict[roi][textLbls[n]], facecolor = [0,0,1,1], edgecolor = None, linewidth = 0 ) axs[n].add_patch(patch) n +=1 xTicks = plotDict[cntrLbl[n]]['xTicks'] xticklabels = [str(value) for value in xTicks] ypos = np.arange(poptot.shape[0]) xstart = np.zeros((poptot.shape[0],),dtype = 'float') axs.append(plt.axes(axesPos[n,:], xscale = 'log', xlim=[1,10000], ylim=[0,numRoi+1], yticks=[], xticks=xTicks)) axs[n].hlines(ypos, xstart, poptot[:,0], linewidth = 5, color = [0,0,1,1] ) axs[n].set_yticklabels(yTickLbl, fontsize=6, fontweight = 'bold') axs[n].set_xticklabels(xticklabels, fontsize=8, fontweight = 'bold', rotation= 45 ) axs[n].set_title(textLbls[n], fontsize=10, fontweight = 'bold' ) plt.show() return None def stitchIm( roiLbl, imNum, imageDir, dataDir): expPath = pathlib.Path(imageDir) # indexList = [k for k in expPath.glob('*Index_ODELAYData.*')] # Generate image file Path by combining the region of interest lable with the experiment path roiFolder = pathlib.Path('./'+ roiLbl) imageFileName = pathlib.Path('./'+ roiLbl + '_'+ f'{imNum:00d}' + '.mat') imageFilePath = expPath / roiFolder / imageFileName # Load Region of Interest Data. This HDF5 file should containt location of image stitch coordinates dataPath = pathlib.Path(dataDir) initPath = list(dataPath.glob('*Index_ODELAYData.hdf5')) initData = fio.loadData(initPath[0]) background = initData['backgroundImage'] pixSize = initData['pixSize'] magnification = initData['magnification'] anImage = opl.stitchImage(imageFilePath, pixSize, magnification, background) im = anImage['Bf'] imSize = im.shape # This data should be recorded from image display to make sure the image is visible. imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float') # Calculate the cumulative probability ignoring zero values cumHist = np.cumsum(imageHist) cumProb = (cumHist-cumHist[0])/(cumHist[2**16-1]-cumHist[0]) # set low and high values ot normalize image contrast. loval = np.argmax(cumProb>0.00001) hival = np.argmax(cumProb>=0.9995) adjIm = np.array((im.astype('float') - loval.astype('float'))/(hival.astype('float') - loval.astype('float'))*254, dtype = 'uint8') rsIm = cv2.resize(adjIm, (round(imSize[1]/5), round(imSize[0]/5))) cv2.imshow('Display Image', rsIm) k = cv2.waitKey(0) if k == 107 or k == -1: cv2.destroyWindow('Display Image') return k def showImage(roiLbl, imNum, imageDir, dataDir): # image = odp.stitchImage(imageFileName, pixSize, magnification, background) expPath = pathlib.Path(imageDir) # Generate image file Path by combining the region of interest lable with the experiment path roiFolder = pathlib.Path('./'+ roiLbl) imageFileName = pathlib.Path('./'+ roiLbl + '_'+ f'{imNum:00d}' + '.mat') imageFilePath = expPath / roiFolder / imageFileName # Load Region of Interest Data. This HDF5 file should containt location of image stitch coordinates dataPath = pathlib.Path(dataDir) initPath = list(dataPath.glob('*Index_ODELAYData.hdf5')) initData = fio.loadData(initPath[0]) roiPath = dataPath / 'ODELAY Roi Data' / f'{roiLbl}.hdf5' roiData = fio.loadData(roiPath) background = initData['backgroundImage'] # This data should be extracted from the Experiment Index file or stage data file. pixSize = initData['pixSize'] magnification = initData['magnification'] stInd = f'{imNum-1:03d}' stitchCorners = roiData['stitchMeta'][stInd]['imPix'] # breakpoint() anImage = opl.assembleImage(imageFilePath, pixSize, magnification, background, stitchCorners) im = anImage['Bf'] # im = opl.SobelGradient(im) imSize = im.shape # This data should be recorded from image display to make sure the image is visible. imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float') # Calculate the cumulative probability ignoring zero values cumHist = np.cumsum(imageHist) cumProb = (cumHist-cumHist[0])/(cumHist[2**16-1]-cumHist[0]) # set low and high values ot normalize image contrast. loval = np.argmax(cumProb>0.00001) hival = np.argmax(cumProb>=0.9995) adjIm = np.array((im.astype('float') - loval.astype('float'))/(hival.astype('float') - loval.astype('float'))*254, dtype = 'uint8') rsIm = cv2.resize(adjIm, (round(imSize[1]/5), round(imSize[0]/5))) cv2.imshow('Display Image', rsIm) k = cv2.waitKey(0) if k == 107 or k == -1: cv2.destroyWindow('Display Image') return k def setPlotRange(organism=None): plotRange = {} plotRange['Mtb'] = {} plotRange['Mtb']['GCs'] = {} plotRange['Mtb']['Dbl'] = {} plotRange['Mtb']['Lag'] = {} plotRange['Mtb']['Tex'] = {} plotRange['Mtb']['Area'] = {} plotRange['Mtb']['NumDbl'] = {} plotRange['Mtb']['PopNum'] = {} plotRange['Mtb']['GCs']['xRange'] = [0, 170] plotRange['Mtb']['GCs']['yRange'] = [4, 14] plotRange['Mtb']['GCs']['xTicks'] = np.arange(0,100,20) plotRange['Mtb']['GCs']['xLabel'] = 'Hours' plotRange['Mtb']['GCs']['titleFrag'] = 'Dbl Time Hr' plotRange['Mtb']['GCs']['devisor'] = 60 plotRange['Mtb']['GCs']['numObservations'] = 20 plotRange['Mtb']['Dbl']['xRange'] = [0, 100] plotRange['Mtb']['Dbl']['xTicks'] = [20,40,60,80] plotRange['Mtb']['Dbl']['xStep'] = 5 plotRange['Mtb']['Dbl']['xLabel'] = 'Hours' plotRange['Mtb']['Dbl']['titleFrag'] = 'Dbl Time Hr' plotRange['Mtb']['Dbl']['devisor'] = 60 plotRange['Mtb']['Lag']['xRange'] = [0, 100] plotRange['Mtb']['Lag']['xTicks'] = [20,40,60,80] plotRange['Mtb']['Lag']['xStep'] = 2 plotRange['Mtb']['Lag']['xLabel'] = 'Hours' plotRange['Mtb']['Lag']['titleFrag'] = 'Lag Time Hr' plotRange['Mtb']['Lag']['devisor'] = 60 plotRange['Mtb']['Tex']['xRange'] = [0, 100] plotRange['Mtb']['Tex']['xTicks'] = [20,40,60,80] plotRange['Mtb']['Tex']['xStep'] = 2 plotRange['Mtb']['Tex']['xLabel'] = 'Hours' plotRange['Mtb']['Tex']['titleFrag'] = 'Tex Hr' plotRange['Mtb']['Tex']['devisor'] = 30 plotRange['Mtb']['Area']['xRange'] = [0, 30] plotRange['Mtb']['Area']['xTicks'] = [2,4,6,8] plotRange['Mtb']['Area']['xStep'] = 0.25 plotRange['Mtb']['Area']['xLabel'] = 'log2 Pixels' plotRange['Mtb']['Area']['titleFrag'] = 'log2 Area' plotRange['Mtb']['Area']['devisor'] = 1 plotRange['Mtb']['NumDbl']['xRange'] = [0, 10] plotRange['Mtb']['NumDbl']['xTicks'] = [2,4,6,8] plotRange['Mtb']['NumDbl']['xStep'] = 0.25 plotRange['Mtb']['NumDbl']['xLabel'] = 'Num Dbl Rel' plotRange['Mtb']['NumDbl']['titleFrag'] = 'Num Dbl Rel' plotRange['Mtb']['NumDbl']['devisor'] = 1 plotRange['Mtb']['PopNum']['xRange'] = [0, 10000] plotRange['Mtb']['PopNum']['xTicks'] = [10,100,1000] plotRange['Mtb']['PopNum']['xStep'] = 10 plotRange['Mtb']['PopNum']['xLabel'] = 'log10 Pop' plotRange['Mtb']['PopNum']['titleFrag'] = 'Pop Num' plotRange['Mtb']['PopNum']['devisor'] = 1 plotRange['Mabs'] = {} plotRange['Mabs']['GCs'] = {} plotRange['Mabs']['Dbl'] = {} plotRange['Mabs']['Lag'] = {} plotRange['Mabs']['Tex'] = {} plotRange['Mabs']['Area'] = {} plotRange['Mabs']['NumDbl'] = {} plotRange['Mabs']['PopNum'] = {} plotRange['Mabs']['GCs']['xRange'] = [0, 70] plotRange['Mabs']['GCs']['yRange'] = [4, 16] plotRange['Mabs']['GCs']['xTicks'] = np.arange(0,70,10) plotRange['Mabs']['GCs']['xLabel'] = 'Hours' plotRange['Mabs']['GCs']['titleFrag'] = 'Dbl Time Hr' plotRange['Mabs']['GCs']['devisor'] = 60 plotRange['Mabs']['GCs']['numObservations'] = 20 plotRange['Mabs']['Dbl']['xRange'] = [0, 10] plotRange['Mabs']['Dbl']['xTicks'] = [2,4,6,8] plotRange['Mabs']['Dbl']['xStep'] = 0.5 plotRange['Mabs']['Dbl']['xLabel'] = 'Hours' plotRange['Mabs']['Dbl']['titleFrag'] = 'Dbl Time Hr' plotRange['Mabs']['Dbl']['devisor'] = 60 plotRange['Mabs']['Lag']['xRange'] = [0, 40] plotRange['Mabs']['Lag']['xTicks'] = [10,20,30] plotRange['Mabs']['Lag']['xStep'] = 1 plotRange['Mabs']['Lag']['xLabel'] = 'Hours' plotRange['Mabs']['Lag']['titleFrag'] = 'Lag Time Hr' plotRange['Mabs']['Lag']['devisor'] = 60 plotRange['Mabs']['Tex']['xRange'] = [0, 40] plotRange['Mabs']['Tex']['xTicks'] = [10,20,30] plotRange['Mabs']['Tex']['xStep'] = 1 plotRange['Mabs']['Tex']['xLabel'] = 'Hours' plotRange['Mabs']['Tex']['titleFrag'] = 'Tex Hr' plotRange['Mabs']['Tex']['devisor'] = 30 plotRange['Mabs']['Area']['xRange'] = [0, 30] plotRange['Mabs']['Area']['xTicks'] = [20,40,60,80] plotRange['Mabs']['Area']['xStep'] = 0.25 plotRange['Mabs']['Area']['xLabel'] = 'log2 Pixels' plotRange['Mabs']['Area']['titleFrag'] = 'log2 Area' plotRange['Mabs']['Area']['devisor'] = 1 plotRange['Mabs']['NumDbl']['xRange'] = [0, 10] plotRange['Mabs']['NumDbl']['xTicks'] = [2,4,6,8] plotRange['Mabs']['NumDbl']['xStep'] = 0.25 plotRange['Mabs']['NumDbl']['xLabel'] = 'log2 Pixels' plotRange['Mabs']['NumDbl']['titleFrag'] = 'Num Dbl Rel' plotRange['Mabs']['NumDbl']['devisor'] = 1 plotRange['Mabs']['PopNum']['xRange'] = [0, 10000] plotRange['Mabs']['PopNum']['xTicks'] = [10,100,1000] plotRange['Mabs']['PopNum']['xStep'] = 10 plotRange['Mabs']['PopNum']['xLabel'] = 'log10 Pop' plotRange['Mabs']['PopNum']['titleFrag'] = 'Pop Num' plotRange['Mabs']['PopNum']['devisor'] = 1 plotRange['Yeast'] = {} plotRange['Yeast']['GCs'] = {} plotRange['Yeast']['Dbl'] = {} plotRange['Yeast']['Lag'] = {} plotRange['Yeast']['Tex'] = {} plotRange['Yeast']['Area'] = {} plotRange['Yeast']['NumDbl'] = {} plotRange['Yeast']['PopNum'] = {} plotRange['Yeast']['GCs']['xRange'] = [0, 3000] plotRange['Yeast']['GCs']['yRange'] = [4, 16] plotRange['Yeast']['GCs']['xTicks'] = [100,200,300,400] plotRange['Yeast']['GCs']['xStep'] = 4 plotRange['Yeast']['GCs']['xLabel'] = 'Minutes' plotRange['Yeast']['GCs']['titleFrag'] = 'Time Min' plotRange['Yeast']['GCs']['devisor'] = 1 plotRange['Yeast']['GCs']['numObservations'] = 10 plotRange['Yeast']['Dbl']['xRange'] = [25, 400] plotRange['Yeast']['Dbl']['xTicks'] = [100,200,300,400] plotRange['Yeast']['Dbl']['xStep'] = 4 plotRange['Yeast']['Dbl']['xLabel'] = 'Minutes' plotRange['Yeast']['Dbl']['titleFrag'] = 'Dbl Time Min' plotRange['Yeast']['Dbl']['devisor'] = 1 plotRange['Yeast']['Lag']['xRange'] = [0, 3000] plotRange['Yeast']['Lag']['xTicks'] = [100,200,300,400, 500] plotRange['Yeast']['Lag']['xStep'] = 1 plotRange['Yeast']['Lag']['xLabel'] = 'Minutes' plotRange['Yeast']['Lag']['titleFrag'] = 'Lag Time Min' plotRange['Yeast']['Lag']['devisor'] = 1 plotRange['Yeast']['Tex']['xRange'] = [0, 3000] plotRange['Yeast']['Tex']['xTicks'] = [200,400,600,800,1000] plotRange['Yeast']['Tex']['xStep'] = 1 plotRange['Yeast']['Tex']['xLabel'] = 'Minutes' plotRange['Yeast']['Tex']['titleFrag'] = 'Tex Min' plotRange['Yeast']['Tex']['devisor'] = 0.5 plotRange['Yeast']['Area']['xRange'] = [0, 40] plotRange['Yeast']['Area']['xTicks'] = [10,20,30] plotRange['Yeast']['Area']['xStep'] = 0.5 plotRange['Yeast']['Area']['xLabel'] = 'log2 Pixels' plotRange['Yeast']['Area']['titleFrag'] = 'log2 Area' plotRange['Yeast']['Area']['devisor'] = 1 plotRange['Yeast']['NumDbl']['xRange'] = [0, 10] plotRange['Yeast']['NumDbl']['xTicks'] = [2,4,6,8] plotRange['Yeast']['NumDbl']['xStep'] = 0.25 plotRange['Yeast']['NumDbl']['xLabel'] = 'log2 Pixels' plotRange['Yeast']['NumDbl']['titleFrag'] = 'Num Dbl Rel' plotRange['Yeast']['NumDbl']['devisor'] = 1 plotRange['Yeast']['PopNum']['xRange'] = [0, 10000] plotRange['Yeast']['PopNum']['xTicks'] = [10,100,1000] plotRange['Yeast']['PopNum']['xStep'] = 10 plotRange['Yeast']['PopNum']['xLabel'] = 'log10 Pop' plotRange['Yeast']['PopNum']['titleFrag'] = 'Pop Num' plotRange['Yeast']['PopNum']['devisor'] = 1 if organism == None: return plotRange else: return plotRange[organism] def scaleImage(im, lowcut = 0.00001, highcut = 0.9995, scaleImage = 1): # make a histogram of the image in the bitdept that the image was recorded. imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float') # Calculate the cumulative probability ignoring zero values cumHist = np.empty(imageHist.shape, dtype='float') cumHist[0] = 0 cumHist[1:] = np.cumsum(imageHist[1:]) # if you expect a lot of zero set cumRange = cumHist[2**16-1]-cumHist[0] # if you expect a lot of zero set cumHist-=cumHist[0] cumHist /=cumRange # set low and high values ot normalize image contrast. loval = np.argmax(cumHist>=lowcut) hival = np.argmax(cumHist>=highcut) scIm = np.clip(im, loval, hival).astype('float') # scale the image linearly over the range given. This does not set alpha values or whatever. scaleFactor = 254/(hival-loval) scIm -=loval scIm *= scaleFactor adjIm = np.require(scIm, dtype = 'uint8', requirements = 'C') # resize if you need to rsIm = cv2.resize(adjIm, (round(im.shape[1]/scaleImage), round(im.shape[0]/scaleImage))) return rsIm def parseVirts(x, y): commands = {'M': (mpath.Path.MOVETO,), 'L': (mpath.Path.LINETO,), 'Q': (mpath.Path.CURVE3,)*2, 'C': (mpath.Path.CURVE4,)*3, 'Z': (mpath.Path.CLOSEPOLY,)} rc = y.shape vertices = np.zeros((rc[0]+3,2),dtype='float') vertices[0,:] = [x[0],y[0]] codes = [] codes.extend(commands['M']) for n in range(1,rc[0]): codes.extend(commands['L']) vertices[n,:] = [x[n],y[n]] vertices[-3,:] = [x[-1],0] codes.extend(commands['L']) vertices[-2,:] = [0,0] codes.extend(commands['L']) vertices[-2,:] = [0,0] codes.extend(commands['Z']) return codes, vertices class OImageView(QtWidgets.QGraphicsView): photoClicked = QtCore.pyqtSignal(QtCore.QPoint) def __init__(self, parent): super(OImageView, self).__init__(parent) self._zoom = 0 self._empty = True self._scene = QtWidgets.QGraphicsScene(self) self._photo = QtWidgets.QGraphicsPixmapItem() self.qImage = QImage() self._scene.addItem(self._photo) self.setScene(self._scene) self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse) self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse) self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) self.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(30, 30, 30))) self.setFrameShape(QtWidgets.QFrame.NoFrame) def hasPhoto(self): return not self._empty def fitInView(self, scale=True): rect = QtCore.QRectF(self._photo.pixmap().rect()) if not rect.isNull(): self.setSceneRect(rect) if self.hasPhoto(): unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1)) self.scale(1 / unity.width(), 1 / unity.height()) viewrect = self.viewport().rect() scenerect = self.transform().mapRect(rect) factor = min(viewrect.width() / scenerect.width(), viewrect.height() / scenerect.height()) self.scale(factor, factor) self._zoom = 0 def setPhoto(self, pixmap=None, reset=True): self._zoom = 0 if pixmap and not pixmap.isNull(): self._empty = False self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag) self._photo.setPixmap(pixmap) else: self._empty = True self.setDragMode(QtWidgets.QGraphicsView.NoDrag) self._photo.setPixmap(QtGui.QPixmap()) if reset: self.fitInView() def wheelEvent(self, event): if self.hasPhoto(): if event.angleDelta().y() > 0: factor = 1.25 self._zoom += 1 else: factor = 0.8 self._zoom -= 1 if self._zoom > 0: self.scale(factor, factor) elif self._zoom == 0: self.fitInView() else: self._zoom = 0 def toggleDragMode(self): if self.dragMode() == QtWidgets.QGraphicsView.ScrollHandDrag: self.setDragMode(QtWidgets.QGraphicsView.NoDrag) elif not self._photo.pixmap().isNull(): self.setDragMode(QtWidgets.QGraphicsView.ScrollHandDrag) def mousePressEvent(self, event): if self._photo.isUnderMouse(): self.photoClicked.emit(self.mapToScene(event.pos()).toPoint()) super(OImageView, self).mousePressEvent(event) # Window is called to view window. class ImageWindow(QtWidgets.QWidget): ''' ImageWindow: a QWidget which holds the GraphicsView and button elements ''' def __init__(self): super(ImageWindow, self).__init__() # Load experiment and odelayConfig data into Window data. self.odelayConfig = fio.loadConfig() self.experimentData = self.loadExperimentData() self.roiList = [*self.experimentData['roiFiles']] self.roiLbl = self.roiList[0] self.numImages=len(self.experimentData['roiFiles'][self.roiLbl]) self.imageNumber = 1 #Create Photoviewer object self.viewer = OImageView(self) # 'Load image' button self.selectRoi = QtWidgets.QComboBox(self) qroiList = [self.tr(item) for item in self.roiList] self.selectRoi.addItems(qroiList) self.selectRoi.currentTextChanged.connect(self.chooseRoi) #Button for load previous Image self.btnPrevImage = QtWidgets.QToolButton(self) self.btnPrevImage.setText('Prev') self.btnPrevImage.setObjectName('btnPrevImage') self.btnPrevImage.clicked.connect(self.changeImage) #Button for load previous Image self.btnNextImage = QtWidgets.QToolButton(self) self.btnNextImage.setText('Next') self.btnNextImage.setObjectName('btnNextImage') self.btnNextImage.clicked.connect(self.changeImage) #Button for load previous Image self.btnSaveImage = QtWidgets.QToolButton(self) self.btnSaveImage.setText('Save') self.btnSaveImage.setObjectName('btnSaveImage') self.btnSaveImage.clicked.connect(self.saveImage) # Button to change from drag/pan to getting pixel info self.btnPixInfo = QtWidgets.QToolButton(self) self.btnPixInfo.setText('Enter pixel info mode') self.btnPixInfo.clicked.connect(self.pixInfo) self.editPixInfo = QtWidgets.QLineEdit(self) self.editPixInfo.setReadOnly(True) self.viewer.photoClicked.connect(self.photoClicked) # Add Image time slider self.imageSlider = QSlider(Qt.Horizontal) self.imageSlider.setRange(1,self.numImages) self.imageSlider.sliderReleased.connect(self.changeImage) # Arrange layout VBlayout = QtWidgets.QVBoxLayout(self) VBlayout.addWidget(self.viewer) VBlayout.addWidget(self.imageSlider) HBlayout = QtWidgets.QHBoxLayout() HBlayout.setAlignment(QtCore.Qt.AlignLeft) HBlayout.addWidget(self.selectRoi) HBlayout.addWidget(self.btnPrevImage) HBlayout.addWidget(self.btnNextImage) HBlayout.addWidget(self.btnSaveImage) HBlayout.addWidget(self.btnPixInfo) HBlayout.addWidget(self.editPixInfo) VBlayout.addLayout(HBlayout) def chooseRoi(self, ind): self.roiLbl = ind self.numImages = len(self.experimentData['roiFiles'][self.roiLbl]) if self.imageNumber>self.numImages: self.imageNumber = self.numImages self.imageSlider.setValue = self.numImages self.loadImage() def loadImage(self): self.viewer.qImage = self.readImage() pixmap = QPixmap.fromImage(self.viewer.qImage) self.viewer.setPhoto(pixmap) def saveImage(self): location = self.odelayConfig['LocalDataDir'] options = QFileDialog.Options() fileName, _ = QFileDialog.getSaveFileName(self,"Save Image", self.tr(location),"Images (*.png, *.jpg)", options=options) print(fileName) val = self.viewer.qImage.save(fileName, format=None, quality=100) if val: print('Image saved') def changeImage(self): sending_widget = self.sender() if sending_widget.objectName() == self.btnNextImage.objectName(): self.imageNumber += 1 if self.imageNumber>self.numImages: self.imageNumber = self.numImages else: self.viewer.qImage = self.readImage() pixmap = QPixmap.fromImage(self.viewer.qImage) self.imageSlider.setValue(self.imageNumber) self.viewer.setPhoto(pixmap, False) elif sending_widget.objectName() == self.btnPrevImage.objectName(): self.imageNumber -= 1 if self.imageNumber<1: self.imageNumber = 1 else: self.viewer.qImage = self.readImage() pixmap = QPixmap.fromImage(self.viewer.qImage) self.imageSlider.setValue(self.imageNumber) self.viewer.setPhoto(pixmap, False) elif sending_widget.objectName() == self.imageSlider.objectName(): self.imageNumber = sending_widget.value() self.viewer.qImage = self.readImage() pixmap = QPixmap.fromImage(self.viewer.qImage) self.viewer.setPhoto(pixmap, False) def pixInfo(self): self.viewer.toggleDragMode() def photoClicked(self, pos): if self.viewer.dragMode() == QtWidgets.QGraphicsView.NoDrag: self.editPixInfo.setText('%d, %d' % (pos.x(), pos.y())) def openFileDialog(): options = QFileDialog.Options() fileName, _ = QFileDialog.getOpenFileName(None,"Select ODELAY Data Set", "","ODELAYExpDisc (*Index_ODELAYData.mat);; Mat-Files (*.mat)", options=options) return fileName def loadExperimentData(self): imagePath = pathlib.Path(self.odelayConfig['LocalImageDir']) dataPath = pathlib.Path(self.odelayConfig['LocalDataDir']) indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')] if len(indexList)==1: expIndexPath = dataPath / indexList[0] expData = fio.loadData(expIndexPath) return expData def readImage(self, lowcut = 0.0005, highcut = 0.99995): roiLbl = self.roiLbl imNum = self.imageNumber imagePath = pathlib.Path(self.odelayConfig['LocalImageDir']) dataPath = pathlib.Path(self.odelayConfig['LocalDataDir']) # Generate image file Path by combining the region of interest lable with the experiment path roiFolder = pathlib.Path('./'+ roiLbl) imageFileName = pathlib.Path('./'+ roiLbl + '_'+ f'{imNum:00d}' + '.mat') imageFilePath = imagePath / roiFolder / imageFileName # Load Region of Interest Data. This HDF5 file should containt location of image stitch coordinates roiPath = dataPath / 'ODELAY Roi Data' / f'{roiLbl}.hdf5' roiData = fio.loadData(roiPath) background = self.experimentData['backgroundImage'] # This data should be extracted from the Experiment Index file or stage data file. pixSize = self.experimentData['pixSize'] magnification = self.experimentData['magnification'] stInd = f'{imNum-1:03d}' stitchCorners = roiData['stitchMeta'][stInd]['imPix'] anImage = opl.assembleImage(imageFilePath, pixSize, magnification, background, stitchCorners) im = anImage['Bf'] # make a histogram of the image in the bitdept that the image was recorded. imageHist = histogram1d(im.ravel(),2**16,[0,2**16],weights = None).astype('float') # Calculate the cumulative probability ignoring zero values cumHist = np.zeros(imageHist.shape, dtype='float') cumHist[1:] = np.cumsum(imageHist[1:]) # if you expect a lot of zero set cumProb = (cumHist-cumHist[0])/(cumHist[2**16-1]-cumHist[0]) # set low and high values ot normalize image contrast. loval = np.argmax(cumProb>=lowcut) hival = np.argmax(cumProb>=highcut) scIm = (im.astype('float') - loval.astype('float'))/(hival.astype('float') - loval.astype('float'))*254 lim = np.iinfo('uint8') scIm = np.clip(scIm, lim.min, lim.max) # Set image data type and make sure the array is contiguous in memory. imageData = np.require(scIm, dtype = 'uint8', requirements = 'C') # Set data as a QImage. This is a greyscale image Qim = QImage(imageData.data, imageData.shape[1], imageData.shape[0], imageData.shape[1], QImage.Format_Grayscale8) Qim.data = imageData return Qim class VideoWindow(QMainWindow): def __init__(self, parent=None): super(VideoWindow, self).__init__(parent) self.setWindowTitle("PyQt Video Player Widget Example - pythonprogramminglanguage.com") self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface) videoWidget = QVideoWidget() self.playButton = QPushButton() self.playButton.setEnabled(False) self.playButton.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) self.playButton.clicked.connect(self.play) self.positionSlider = QSlider(Qt.Horizontal) self.positionSlider.setRange(0, 0) self.positionSlider.sliderReleased.connect(self.setPosition) self.errorLabel = QLabel() self.errorLabel.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Maximum) # Create new action openAction = QAction(QIcon('open.png'), '&Open', self) openAction.setShortcut('Ctrl+O') openAction.setStatusTip('Open movie') openAction.triggered.connect(self.openFile) # Create exit action exitAction = QAction(QIcon('exit.png'), '&Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(self.exitCall) # Create menu bar and add action menuBar = self.menuBar() fileMenu = menuBar.addMenu('&File') #fileMenu.addAction(newAction) fileMenu.addAction(openAction) fileMenu.addAction(exitAction) # Create a widget for window contents wid = QWidget(self) self.setCentralWidget(wid) # Create layouts to place inside widget controlLayout = QHBoxLayout() controlLayout.setContentsMargins(0, 0, 0, 0) controlLayout.addWidget(self.playButton) controlLayout.addWidget(self.positionSlider) layout = QVBoxLayout() layout.addWidget(videoWidget) layout.addLayout(controlLayout) layout.addWidget(self.errorLabel) # Set widget to contain window contents wid.setLayout(layout) self.mediaPlayer.setVideoOutput(videoWidget) self.mediaPlayer.stateChanged.connect(self.mediaStateChanged) self.mediaPlayer.positionChanged.connect(self.positionChanged) self.mediaPlayer.durationChanged.connect(self.durationChanged) self.mediaPlayer.error.connect(self.handleError) def openFile(self): odelayConfig = fio.loadConfig() fileName, _ = QFileDialog.getOpenFileName(self, "Open Movie", odelayConfig['LocalDataDir']) if fileName != '': self.mediaPlayer.setMedia( QMediaContent(QUrl.fromLocalFile(fileName))) self.playButton.setEnabled(True) def exitCall(self): sys.exit(app.exec_()) def play(self): if self.mediaPlayer.state() == QMediaPlayer.PlayingState: self.mediaPlayer.pause() else: self.mediaPlayer.play() def mediaStateChanged(self, state): if self.mediaPlayer.state() == QMediaPlayer.PlayingState: self.playButton.setIcon( self.style().standardIcon(QStyle.SP_MediaPause)) else: self.playButton.setIcon( self.style().standardIcon(QStyle.SP_MediaPlay)) def positionChanged(self, position): self.positionSlider.setValue(position) def durationChanged(self, duration): self.positionSlider.setRange(0, duration) def setPosition(self): position = self.positionSlider.value() self.mediaPlayer.setPosition(position) def handleError(self): self.playButton.setEnabled(False) self.errorLabel.setText("Error: " + self.mediaPlayer.errorString()) def videoViewer(): app = QApplication(sys.argv) player = VideoWindow() player.resize(640, 480) player.show() sys.exit(app.exec_()) def imageViewer(): app = QtWidgets.QApplication(sys.argv) window = ImageWindow() window.setGeometry(500, 300, 800, 600) window.show() window.loadImage() sys.exit(app.exec_()) def waveLengthToRGB(wl=650): try: wl=int(wl) except: wl=450 # print(wl) if wl<380: wl= 380 elif wl>780: wl = 780 if wl>=380 and wl<=440: R = np.abs((wl-440)/(440-380)) G = 0 B = 1 elif wl>440 and wl<=490: R = 0 G = np.abs((wl-440)/(490-440)) B = 1 elif wl>490 and wl<=510: R = 0 G = 1 B = np.abs((wl-510)/(510-490)) elif wl>510 and wl<=580: R = np.abs((wl-510)/(580-510)) G = 1 B = 0; elif wl>580 and wl<=645: R = 1; G = np.abs((wl-645)/(645-580)) B = 0 elif wl>645 and wl<=780: R = 1 G = 0 B = 0 # LET THE INTENSITY SSS FALL OFF NEAR THE VISION LIMITS if wl>700: SSS=0.3+0.7* (780-wl)/(780-700) elif wl<420: SSS=.3+.7*(wl-380)/(420-380) else: SSS=1 r = np.round(SSS*R*255).astype('uint8') g = np.round(SSS*G*255).astype('uint8') b = np.round(SSS*B*255).astype('uint8') return [r,g,b] # class FocusPlot(QMainWindow): # def __init__(self, parent=None): # QMainWindow.__init__(self, parent) # self.setWindowTitle('Demo: PyQt with matplotlib') # self.create_menu() # self.create_main_frame() # self.create_status_bar() # self.textbox.setText('1 2 3 4') # self.on_draw() # def save_plot(self): # file_choices = "PNG (*.png)|*.png" # path, ext = QFileDialog.getSaveFileName(self, # 'Save file', '', # file_choices) # path = path.encode('utf-8') # if not path[-4:] == file_choices[-4:].encode('utf-8'): # path += file_choices[-4:].encode('utf-8') # print(path) # if path: # self.canvas.print_figure(path.decode(), dpi=self.dpi) # self.statusBar().showMessage('Saved to %s' % path, 2000) # def on_about(self): # msg = """ A demo of using PyQt with matplotlib: # * Use the matplotlib navigation bar # * Add values to the text box and press Enter (or click "Draw") # * Show or hide the grid # * Drag the slider to modify the width of the bars # * Save the plot to a file using the File menu # * Click on a bar to receive an informative message # """ # QMessageBox.about(self, "About the demo", msg.strip()) # def on_pick(self, event): # # The event received here is of the type # # matplotlib.backend_bases.PickEvent # # # # It carries lots of information, of which we're using # # only a small amount here. # # # box_points = event.artist.get_bbox().get_points() # msg = "You've clicked on a bar with coords:\n %s" % box_points # QMessageBox.information(self, "Click!", msg) # def on_draw(self): # """ Redraws the figure # """ # str = self.textbox.text().encode('utf-8') # self.data = [int(s) for s in str.split()] # x = range(len(self.data)) # # clear the axes and redraw the plot anew # # # self.axes.clear() # self.axes.grid(self.grid_cb.isChecked()) # self.axes.bar( # x=x, # height=self.data, # width=self.slider.value() / 100.0, # align='center', # alpha=0.44, # picker=5) # self.canvas.draw() # def create_main_frame(self): # self.main_frame = QWidget() # # Create the mpl Figure and FigCanvas objects. # # 5x4 inches, 100 dots-per-inch # # # self.dpi = 100 # self.fig = Figure((5.0, 4.0), dpi=self.dpi) # self.canvas = FigureCanvas(self.fig) # self.canvas.setParent(self.main_frame) # # Since we have only one plot, we can use add_axes # # instead of add_subplot, but then the subplot # # configuration tool in the navigation toolbar wouldn't # # work. # # # self.axes = self.fig.add_subplot(111) # # Bind the 'pick' event for clicking on one of the bars # # # self.canvas.mpl_connect('pick_event', self.on_pick) # # Create the navigation toolbar, tied to the canvas # # # self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame) # # Other GUI controls # # # self.textbox = QLineEdit() # self.textbox.setMinimumWidth(200) # self.textbox.editingFinished.connect(self.on_draw) # self.draw_button = QPushButton("&Draw") # self.draw_button.clicked.connect(self.on_draw) # self.grid_cb = QCheckBox("Show &Grid") # self.grid_cb.setChecked(False) # self.grid_cb.stateChanged.connect(self.on_draw) # slider_label = QLabel('Bar width (%):') # self.slider = QSlider(Qt.Horizontal) # self.slider.setRange(1, 100) # self.slider.setValue(20) # self.slider.setTracking(True) # self.slider.setTickPosition(QSlider.TicksBothSides) # self.slider.valueChanged.connect(self.on_draw) # # # # Layout with box sizers # # # hbox = QHBoxLayout() # for w in [ self.textbox, self.draw_button, self.grid_cb, # slider_label, self.slider]: # hbox.addWidget(w) # hbox.setAlignment(w, Qt.AlignVCenter) # vbox = QVBoxLayout() # vbox.addWidget(self.canvas) # vbox.addWidget(self.mpl_toolbar) # vbox.addLayout(hbox) # self.main_frame.setLayout(vbox) # self.setCentralWidget(self.main_frame) # def create_status_bar(self): # self.status_text = QLabel("This is a demo") # self.statusBar().addWidget(self.status_text, 1) # def create_menu(self): # self.file_menu = self.menuBar().addMenu("&File") # load_file_action = self.create_action("&Save plot", # shortcut="Ctrl+S", slot=self.save_plot, # tip="Save the plot") # quit_action = self.create_action("&Quit", slot=self.close, # shortcut="Ctrl+Q", tip="Close the application") # self.add_actions(self.file_menu, # (load_file_action, None, quit_action)) # self.help_menu = self.menuBar().addMenu("&Help") # about_action = self.create_action("&About", # shortcut='F1', slot=self.on_about, # tip='About the demo') # self.add_actions(self.help_menu, (about_action,)) # def add_actions(self, target, actions): # for action in actions: # if action is None: # target.addSeparator() # else: # target.addAction(action) # def create_action( self, text, slot=None, shortcut=None, # icon=None, tip=None, checkable=False): # action = QAction(text, self) # if icon is not None: # action.setIcon(QIcon(":/%s.png" % icon)) # if shortcut is not None: # action.setShortcut(shortcut) # if tip is not None: # action.setToolTip(tip) # action.setStatusTip(tip) # if slot is not None: # action.triggered.connect(slot) # if checkable: # action.setCheckable(True) # return action # # def main(): # # app = QApplication(sys.argv) # # form = AppForm() # # form.show() # # app.exec_() # # if __name__ == "__main__": # # main() # class InteractiveGCPlot(QWidget)
1.695313
2
backend/api/decapod_api/views/__init__.py
angry-tony/ceph-lcm-decapod
41
12777112
# -*- coding: utf-8 -*- # Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """This module has basic routines for Flask views. Decapod uses Flask as a web framework and leverages by its pluggable views. Currently, registration of views into app is done by traversing a list of subclasses of generic view and this requires explicit module imports. It is ok, because we have a limited set of APIs and do not require to have view as plugins. """ from decapod_api.views import v1 def register_api(application): """Register API endpoints to the application.""" application.register_blueprint(v1.BLUEPRINT, url_prefix="/v1")
1.53125
2
tests/core/pyspec/eth2spec/utils/hash_function.py
MicahZoltu/eth2.0-specs
2,161
12777113
from hashlib import sha256 from remerkleable.byte_arrays import Bytes32 from typing import Union ZERO_BYTES32 = b'\x00' * 32 def hash(x: Union[bytes, bytearray, memoryview]) -> Bytes32: return Bytes32(sha256(x).digest())
2.484375
2
ef/1_waves/wave_test.py
urbanij/misc-scripts
0
12777114
#!/usr/bin/env python3.7 # -*- coding: utf-8 -*- # # Created on: Sat Nov 2 14:41:15 CET 2019 # # Author(s): <NAME> <https://urbanij.github.io> # # Description: Unit test for traveling_wave_1.py # # ========================================================== import unittest from wave import * from functions import * class TravelingWaveTest(unittest.TestCase): def test_gamma_tau(self): self.assertEquals(1+gamma(2,5), tau(2,5)) self.assertEquals(1+gamma(-3,7), tau(-3,7)) def boundary_condition(self): self.assertEqual(e1_tot(0, t[0], e2_t(0, t[0]))) if __name__ == "__main__": unittest.main()
3.328125
3
backend/cbt/urls.py
harryface/cbt-django-react
0
12777115
from django.urls import path, include urlpatterns = [ path('api/examiner/', include('core.examiner.urls')), path('api/taker/', include('core.taker.urls')), ]
1.53125
2
sektor/db.py
jesuejunior/sektor
1
12777116
# encoding: utf-8 import sqlite3 from datetime import datetime class DB: def connect(): conn = sqlite3.connect("sektor.db") return conn def init(): conn = DB.connect() cursor = conn.cursor() try: cursor.execute( """CREATE TABLE IF NOT EXISTS track (time INTEGER, lat DOUBLE PRECISION, lon DOUBLE PRECISION, speed INT, distance INT, oil BOOLEAN, created_at DATETIME) """ ) conn.commit() cursor.execute( """ CREATE TABLE IF NOT EXISTS km_for_oil (id INT AUTO_INCREMENT, counter INTEGER) """ ) conn.commit() finally: conn.close() return True def update_km_for_oil(distance): conn = DB.connect() cursor = conn.cursor() try: result = cursor.execute( """ UPDATE counter set """ ).fetchone() return result[0] if result else False except Exception as ex: print("Exception: ", ex) return False def get_last_oil_counter(): conn = DB.connect() cursor = conn.cursor() try: result = cursor.execute( """ SELECT counter FROM km_for_oil """ ).fetchone() return result[0] if result else False except Exception as ex: print("Exception: ", ex) return False def find_last_position(): conn = DB.connect() cursor = conn.cursor() try: result = cursor.execute( """ SELECT track. `time`, track.lat, track.lon, track.speed, track.distance, track.oil FROM track ORDER BY created_at DESC LIMIT 1 """ ).fetchone() return result if result else False except Exception as ex: print("Exception on DB.find_last_position()") print("Exception: ", ex) return False finally: conn.close() return True def find_last_oil(): conn = DB.connect() cursor = cursor.execute("""SELECT * FROM track WHERE distance > 300 """) return cursor.fetchone() def save(time, lat, lon, speed, distance, oil=False): conn = DB.connect() cursor = conn.cursor() created_at = datetime.now() try: cursor.execute( """INSERT INTO track VALUES (?,?,?,?,?,?,?)""", (time, lat, lon, speed, distance, oil, created_at), ) conn.commit() except Exception as ex: print("Exception on DB.save()") print("Exception: ", ex) # TO-DO: Use logger return False finally: conn.close() return True
3.4375
3
antelope-image-classification/antelope_classification.py
avanwyk/fastai-projects
1
12777117
<reponame>avanwyk/fastai-projects<filename>antelope-image-classification/antelope_classification.py """ Antelope Classification using FastAI This script is an end-to-end case study of creating a custom image dataset of major African antelope and training a deep convolutional neural network to classify each species. The basic workflow is as follows: 1. Download images of each antelope and build a dataset. 2. Pre-process and prepare the dataset for learning. 3. Create a deep neural network model for classification. 4. Train the DNN using transfer learning on the data. 5. Output error rate. """ import logging from typing import List from fastai.vision import * from fastai.metrics import error_rate from google_images_download import google_images_download logging.basicConfig(level=logging.INFO) ANTELOPE = ['kudu', 'eland', 'sable antelope', 'roan antelope', 'waterbuck', 'impala antelope', 'nyala', 'bushbuck', 'tsessebe', 'lichtensteins hartebeest', 'grey duiker', 'steenbok', 'klipspringer'] DATA_PATH = Path('data') VALID_PCT = 0.2 IMAGE_SIZE = 224 BATCH_SIZE = 32 ARCHITECTURE = models.resnet34 def download_antelope_images(output_path: Path, limit: int = 50, kws = []) -> None: """Download images for each of the antelope to the output path. Each species is put in a separate sub-directory under output_path. """ try: if len(output_path.ls()) > 0: logging.info(f"Directory '{output_path}' is not empty. Skipping image download.") return except FileNotFoundError: logging.info(f"Directory '{output_path} does not exist and will be created.") response = google_images_download.googleimagesdownload() for antelope in ANTELOPE: for gender in ['male', 'female']: output_directory = str(output_path/antelope).replace(' ', '_') arguments = { 'keywords': f'wild {antelope} {gender} {" ".join(kws)} -hunting -stock', 'output_directory': output_directory, #'usage_rights': 'labeled-for-nocommercial-reuse', 'no_directory': True, 'size': 'medium', 'limit': limit } response.download(arguments) def validate_labels(data_path: Path, labels: List[str]) -> None: """Validate the file names of each of the labeled images. If a file name contains the label of another class, the path is logged. """ non_alpha = re.compile('([^a-zA-Z]+|antelope)') filtered_labels = [non_alpha.sub('', label) for label in labels] for path in [d for d in data_path.ls() if d.is_dir()]: label = non_alpha.sub('', path.name) other_labels = [other for other in filtered_labels if other != label] file_names = [non_alpha.sub('', f.name) for f in path.ls() if f.is_file()] for name in file_names: for other_label in other_labels: if other_label in name: logging.info(f'Potential mislabeling: {path}/{name}') def train_model(data_path: Path, valid_pct, image_size, batch_size, architecture) -> Learner: """Train a deep convolutional NN classifier on the downloaded data. Learning rates were found using learn.lr_find() in accompanying Jupyter notebook. """ image_data = ImageDataBunch.from_folder(data_path, valid_pct=valid_pct,\ ds_tfms=get_transforms(), size=image_size, bs=batch_size).normalize(imagenet_stats) learner = cnn_learner(image_data, architecture, metrics=error_rate) learner.fit_one_cycle(4, max_lr=slice(1e-3, 1e-2)) learner.unfreeze() learner.fit_one_cycle(4, 1e-4) return learner if __name__ == '__main__': download_antelope_images(DATA_PATH) learner = train_model(DATA_PATH, VALID_PCT, IMAGE_SIZE, BATCH_SIZE, ARCHITECTURE) print(f'Error rate: {learner.recorder.metrics[-1]}')
2.90625
3
tests/test_basics.py
vfxetc/zoneconfig
1
12777118
from . import * class TestBasics(TestCase): def test_find(self): path = root_path('basics') root = Zone(path=path) self.assertFalse(root.found) root.find() self.assertTrue(root.found) self.assertEqual(root.path, [path]) self.assertEqual(len(root.loaders), 1) self.assertEqual(root.loaders[0].url, os.path.join(path, '__init__.py')) module = root.zone('module') self.assertFalse(module.found) module.find() self.assertTrue(module.found) self.assertEqual(module.path, []) self.assertEqual(len(module.loaders), 1) self.assertEqual(module.loaders[0].url, os.path.join(path, 'module.py')) package = root.zone('package') self.assertFalse(package.found) package.find() self.assertTrue(package.found) self.assertEqual(package.path, [os.path.join(path, 'package')]) self.assertEqual(len(package.loaders), 1) self.assertEqual(package.loaders[0].url, os.path.join(path, 'package', '__init__.py')) submod = package.zone('submodule') mod2 = root.zone('package.submodule') self.assertIs(submod, mod2) self.assertFalse(submod.found) submod.find() self.assertTrue(submod.found) self.assertEqual(len(submod.loaders), 1) self.assertEqual(submod.loaders[0].url, os.path.join(path, 'package', 'submodule.py')) def test_load(self): path = root_path('basics') root = Zone(path=path) submod = root.zone('package.submodule') package = root.zone('package') # Out of order on purpose. module = root.zone('module') # Out of order on purpose. self.assertFalse(root.loaded) self.assertFalse(submod.loaded) submod.load() self.assertTrue(root.loaded) self.assertEqual(len(root.sources), 1) self.assertEqual(root.sources[0].url, os.path.join(path, '__init__.py')) self.assertIn('FOO = 1', root.sources[0].content) self.assertTrue(package.loaded) self.assertEqual(len(package.sources), 1) self.assertEqual(package.sources[0].url, os.path.join(path, 'package', '__init__.py')) self.assertIn('FOO = 3', package.sources[0].content) self.assertTrue(submod.loaded) self.assertEqual(len(submod.sources), 1) self.assertEqual(submod.sources[0].url, os.path.join(path, 'package', 'submodule.py')) self.assertIn('FOO = 4', submod.sources[0].content) def test_eval(self): path = root_path('basics') root = Zone(path=path) submod = root.zone('package.submodule') package = root.zone('package') self.assertFalse(submod.evaled) submod.eval() self.assertTrue(submod.evaled) self.assertEqual(len(submod.stores), 1) self.assertEqual(submod.stores[()].tags, ()) self.assertEqual(submod.stores[()]['foo'], 4) self.assertEqual(root['foo'], 1) self.assertEqual(package['foo'], 3) self.assertEqual(submod['foo'], 4) self.assertEqual(root['bar'], 'root-bar') self.assertEqual(root['count'], 2) self.assertEqual(root.view({'test:basics': 'baz'})['foo'], 'baz-view-foo')
2.546875
3
scripts/duck_prepare_sys.py
mihaelasmilova/duck
1
12777119
import argparse import pickle try: from duck.steps.parametrize import prepare_system from duck.utils.cal_ints import find_interaction from duck.steps.equlibrate import do_equlibrate from duck.utils.check_system import check_if_equlibrated except ModuleNotFoundError: print('Dependencies missing; check openmm, pdbfixer, and yank are installed from Omnia.') def main(): parser = argparse.ArgumentParser(description='Prepare system for dynamic undocking') parser.add_argument('-p', '--protein', help='Apoprotein in PDB format') parser.add_argument('-l', '--ligand', help='Ligand in mol format') # parser.add_argument('-o', '--output', help="PDB output") parser.add_argument('-c', '--chunk', help='Chunked protein') parser.add_argument('-i', '--interaction', help='Protein atom to use for ligand interaction.') parser.add_argument('-s', '--seed', type=int, help='Random seed.') parser.add_argument('--gpu-id', type=int, help='GPU ID (optional); if not specified, runs on CPU only.') parser.add_argument('--force-constant-eq', type=float, default=1.0, help='Force constant for equilibration.') args = parser.parse_args() # Parameterize the ligand prepare_system(args.ligand, args.chunk) # Now find the interaction and save to a file results = find_interaction(args.interaction, args.protein) print(results) # what happens to these? with open('complex_system.pickle', 'rb') as f: p = pickle.load(f) + results with open('complex_system.pickle', 'wb') as f: pickle.dump(p, f, protocol=pickle.HIGHEST_PROTOCOL) # pickle.dump(l, 'complex_system.pickle') # Now do the equlibration do_equlibrate(force_constant_equilibrate=args.force_constant_eq, gpu_id=args.gpu_id) if not check_if_equlibrated("density.csv", 1): raise EquilibrationError("System is not equilibrated.") if __name__ == "__main__": main()
2.4375
2
testbase/report.py
fossabot/QTAF
0
12777120
# -*- coding: utf-8 -*- # # Tencent is pleased to support the open source community by making QTA available. # Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved. # Licensed under the BSD 3-Clause License (the "License"); you may not use this # file except in compliance with the License. You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # '''测试报告 ''' import sys import codecs import cgi import socket import os import shutil import json import getpass import string import locale import argparse import pkg_resources import xml.dom.minidom as dom import xml.sax.saxutils as saxutils from datetime import datetime from testbase import testresult from testbase.testresult import EnumLogLevel REPORT_ENTRY_POINT = "qtaf.report" report_types = {} os_encoding = locale.getdefaultlocale()[1] report_usage = 'runtest <test ...> --report-type <report-type> [--report-args "<report-args>"]' def _to_unicode( s ): '''将任意字符串转换为unicode编码 ''' if isinstance(str, unicode): return s try: return s.decode('utf8') except UnicodeDecodeError: return s.decode(os_encoding) class ITestReport(object): '''测试报告接口 ''' def begin_report(self): '''开始测试执行 ''' pass def end_report(self): '''结束测试执行 :param passed: 测试是否通过 :type passed: boolean ''' pass def log_test_result(self, testcase, testresult ): '''记录一个测试结果 :param testcase: 测试用例 :type testcase: TestCase :param testresult: 测试结果 :type testresult: TestResult ''' pass def log_record(self, level, tag, msg, record): '''增加一个记录 :param level: 日志级别 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type level: string :type tag: string :type msg: string :type record: dict ''' pass def log_loaded_tests(self, loader, testcases): '''记录加载成功的用例 :param loader: 用例加载器 :type loader: TestLoader :param testcases: 测试用例列表 :type testcases: list ''' pass def log_filtered_test(self, loader, testcase, reason): '''记录一个被过滤的测试用例 :param loader: 用例加载器 :type loader: TestLoader :param testcase: 测试用例 :type testcase: TestCase :param reason: 过滤原因 :type reason: str ''' pass def log_load_error(self, loader, name, error): '''记录一个加载失败的用例或用例集 :param loader: 用例加载器 :type loader: TestLoader :param name: 名称 :type name: str :param error: 错误信息 :type error: str ''' pass def log_test_target(self, test_target): '''记录被测对象 :param test_target: 被测对象详情 :type test_target: any ''' pass def log_resource(self, res_type, resource): '''记录测试使用的资源 :param res_type: 资源类型 :type res_type: str :param resource: 资源详情 :type resource: dict ''' pass def get_testresult_factory(self): '''获取对应的TestResult工厂 :returns ITestResultFactory ''' raise NotImplementedError() def debug(self, tag, msg, record=None): '''记录一个DEBUG日志 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type tag: string :type msg: string :type record: dict ''' if record is None: record = {} self.log_record(EnumLogLevel.DEBUG, tag, msg, record) def info(self, tag, msg, record=None): '''记录一个INFO日志 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type tag: string :type msg: string :type record: dict ''' if record is None: record = {} self.log_record(EnumLogLevel.INFO, tag, msg, record) def warning(self, tag, msg, record=None): '''记录一个WARN日志 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type tag: string :type msg: string :type record: dict ''' if record is None: record = {} self.log_record(EnumLogLevel.WARNING, tag, msg, record) def error(self, tag, msg, record=None): '''记录一个ERROR日志 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type tag: string :type msg: string :type record: dict ''' if record is None: record = {} self.log_record(EnumLogLevel.ERROR, tag, msg, record) def critical(self, tag, msg, record=None): '''记录一个CRITICAL日志 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type tag: string :type msg: string :type record: dict ''' if record is None: record = {} self.log_record(EnumLogLevel.CRITICAL, tag, msg, record) @classmethod def get_parser(cls): '''获取命令行参数解析器(如果实现) :returns: 解析器对象 :rtype: argparse.ArgumentParser ''' raise NotImplementedError() @classmethod def parse_args(cls, args_string): '''通过命令行参数构造对象 :returns: 测试报告 :rtype: cls ''' raise NotImplementedError() class ITestResultFactory(object): '''TestResult工厂接口 ''' def create(self, testcase ): '''创建TestResult对象 :param testcase: 测试用例 :type testcase: TestCase :return TestResult ''' raise NotImplementedError() def dumps(self): '''序列化 :return picklable object ''' pass def loads(self, buf): '''反序列化 :param buf: dumps返回的序列化后的数据 :type buf: object ''' pass class EmptyTestResultFactory(ITestResultFactory): '''测试结果工厂 ''' def __init__(self, result_factory_func=None ): '''构造函数 :param result_factory_func: TestResult工厂函数 :type result_factory_func: Function ''' self._result_factory_func = result_factory_func def create(self, testcase ): '''创建TestResult对象 :param testcase: 测试用例 :type testcase: TestCase :return TestResult ''' if self._result_factory_func is None: return testresult.EmptyResult() else: return self._result_factory_func(testcase) def dumps(self): '''序列化 :return picklable object ''' return self._result_factory_func def loads(self, buf): '''反序列化 :param buf: dumps返回的序列化后的数据 :type buf: object ''' self._result_factory_func = buf class EmptyTestReport(ITestReport): '''不输出测试报告 ''' def __init__(self, result_factory_func=None ): '''构造函数 :param result_factory_func: TestResult工厂函数 :type result_factory_func: callable ''' self._result_factory_func = result_factory_func self._is_passed = True def get_testresult_factory(self): '''获取对应的TestResult工厂 :returns ITestResultFactory ''' return EmptyTestResultFactory(self._result_factory_func) def log_test_result(self, testcase, testresult ): '''记录一个测试结果 :param testcase: 测试用例 :type testcase: TestCase :param testresult: 测试结果 :type testresult: TestResult ''' if not testresult.passed: self._is_passed = False @property def passed(self): '''测试是否通过 ''' return self._is_passed @classmethod def get_parser(cls): '''获取命令行参数解析器(如果实现) :returns: 解析器对象 :rtype: argparse.ArgumentParser ''' return argparse.ArgumentParser(usage=report_usage) @classmethod def parse_args(cls, args_string): '''通过命令行参数构造对象 :returns: 测试报告 :rtype: cls ''' return EmptyTestReport() class StreamTestResultFactory(ITestResultFactory): '''流形式TestResult工厂 ''' def __init__(self, stream ): '''构造函数 :param stream: 指定要输出的流设备 :type stream: file ''' self._stream = stream def create(self, testcase ): '''创建TestResult对象 :param testcase: 测试用例 :type testcase: TestCase :return TestResult ''' return testresult.StreamResult(self._stream) def dumps(self): '''序列化 :return picklable object ''' fileno = self._stream.fileno() if fileno not in [0, 1]: raise ValueError("不支持的流对象: %s" % self._stream) return fileno def loads(self, buf): '''反序列化 :param buf: dumps返回的序列化后的数据 :type buf: object ''' fileno = buf if fileno == 1: self._stream = sys.stdout elif fileno == 2: self._stream = sys.stderr else: raise ValueError("invalid fd: %s" % fileno ) class StreamTestReport(ITestReport): '''流形式的测试报告 ''' def __init__(self, stream=sys.stdout, error_stream=sys.stderr, output_testresult=False, output_summary=True ): '''构造函数 :param stream: 指定要输出的流设备 :type stream: file :param output_testresult: 是否输出测试用例执行的日志 :type output_testresult: boolean :param output_summary: 是否输出执行汇总信息 :type output_summary: boolean ''' self._stream = stream self._err_stream = error_stream self._output_testresult = output_testresult self._output_summary = output_summary if stream.encoding and stream.encoding != 'utf8': self._write = lambda x: self._stream.write(x.decode('utf8').encode(stream.encoding)) self._write_err = lambda x: self._err_stream.write(x.decode('utf8').encode(stream.encoding)) else: self._write = self._stream.write self._write_err = self._err_stream.write self._passed_testresults = [] self._failed_testresults = [] def begin_report(self): '''开始测试执行 ''' self._start_time = datetime.now() self._write("Test runs at:%s.\n" % self._start_time.strftime("%Y-%m-%d %H:%M:%S")) def end_report(self): '''结束测试执行 :param passed: 测试是否通过 :type passed: boolean ''' end_time = datetime.now() self._write("Test ends at:%s.\n" % end_time.strftime("%Y-%m-%d %H:%M:%S")) #self._write("Total execution time is :%s\n" % str(end_time-self._start_time).split('.')[0]) if self._output_summary: self._write("\n" + "="*60 + "\n") self._write("SUMMARY:\n\n") self._write(" Totals: %s\t%0.4fs\n\n" % (len(self._failed_testresults) + len(self._passed_testresults), (end_time-self._start_time).total_seconds())) self._write(" Passed: %s\n" % len(self._passed_testresults)) for it in self._passed_testresults: self._write(" \t%s\t%0.4fs\n" % (it.testcase.test_name, it.end_time-it.begin_time)) self._write("\n") self._write(" Failed: %s\n" % len(self._failed_testresults)) for it in self._failed_testresults: self._write_err(" \t%s\t%0.4fs\n" % (it.testcase.test_name, it.end_time-it.begin_time)) def log_test_result(self, testcase, testresult ): '''记录一个测试结果 :param testcase: 测试用例 :type testcase: TestCase :param testresult: 测试结果 :type testresult: TestResult ''' if testresult.passed: self._passed_testresults.append(testresult) else: self._failed_testresults.append(testresult) self._write("run test case: %s(pass?:%s)\n" % (testcase.test_name, testresult.passed)) def log_record(self, level, tag, msg, record={}): '''增加一个记录 :param level: 日志级别 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type level: string :type tag: string :type msg: string :type record: dict ''' self._write("%s\n" % (msg)) def log_filtered_test(self, loader, testcase, reason): '''记录一个被过滤的测试用例 :param loader: 用例加载器 :type loader: TestLoader :param testcase: 测试用例 :type testcase: TestCase :param reason: 过滤原因 :type reason: str ''' self._write("filtered test case: %s (reason: %s)\n" % (testcase.test_name, reason)) def log_load_error(self, loader, name, error): '''记录一个加载失败的用例或用例集 :param loader: 用例加载器 :type loader: TestLoader :param name: 名称 :type name: str :param error: 错误信息 :type error: str ''' line = "" for line in reversed(error.split("\n")): if line.strip(): break self._write_err("load test failed: %s (error: %s)\n" % (name, line)) def get_testresult_factory(self): '''获取对应的TestResult工厂 :returns ITestResultFactory ''' if self._output_testresult: return StreamTestResultFactory(self._stream) else: return EmptyTestResultFactory() @classmethod def get_parser(cls): '''获取命令行参数解析器(如果实现) :returns: 解析器对象 :rtype: argparse.ArgumentParser ''' parser = argparse.ArgumentParser(usage=report_usage) parser.add_argument("--no-output-result", action="store_true", help="don't output detail result of test cases") parser.add_argument("--no-summary", action="store_true", help="don't output summary information") return parser @classmethod def parse_args(cls, args_string): '''通过命令行参数构造对象 :returns: 测试报告 :rtype: cls ''' args = cls.get_parser().parse_args(args_string) return cls( output_testresult=not args.no_output_result, output_summary=not args.no_summary) REPORT_XSL = """<?xml version="1.0" encoding="utf-8"?> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:template match="/RunResult"> <html> <head> <style> *{ font-size:12px; font-family: '宋体' , 'Courier New', Arial, 'Arial Unicode MS', ''; } .title { font-size:14px; font-weight: bold; margin: 20px auto 5px auto; } table{ border:solid 1px #0099CC; border-collapse:collapse; margin: 0px auto; } td { border:solid 1px #0099CC; padding: 6px 6px; } .td_Title { color:#FFF; font-weight: bold; background-color:#66CCFF; } .tr_pass { background-color:#B3E8B8; } .tr_fail { background-color:#F5BCBD; } .success { color:#0000FF; } .fail { color:#FF0000; } .exception { color:#00AA00; } </style> </head> <body> <div class='title'> <td>测试报告链接:</td> <td><a><xsl:attribute name="href"><xsl:value-of select="TestReportLink/Url"/></xsl:attribute>点击这里</a></td> </div> <div class='title'>测试运行环境:</div> <table> <tr> <td class='td_Title'>主机名</td> <td><xsl:value-of select="TestEnv/PC"/></td> </tr> <tr> <td class='td_Title'>操作系统</td> <td><xsl:value-of select="TestEnv/OS"/></td> </tr> </table> <div class='title'>测试运行时间:</div> <table> <tr> <td class='td_Title'>Run开始时间</td> <td><xsl:value-of select="RunTime/StartTime"/></td> </tr> <tr> <td class='td_Title'>Run结束时间</td> <td><xsl:value-of select="RunTime/EndTime"/></td> </tr> <tr> <td class='td_Title'>Run执行时间</td> <td><xsl:value-of select="RunTime/Duration"/></td> </tr> </table> <div class='title'>测试用例汇总:</div> <table> <tr> <td class='td_Title'>用例总数</td> <td class='td_Title'>通过用例数</td> <td class='td_Title'>失败用例数</td> </tr> <tr> <td> <xsl:value-of select="count(TestResult)"/> </td> <td> <xsl:value-of select="count(TestResult[@result='True'])"/> </td> <td> <xsl:value-of select="count(TestResult[@result='False'])"/> </td> </tr> </table> <div class='title'>加载失败模块:</div> <table> <tr> <td class='td_Title'>模块名</td> <td class='td_Title'>失败Log</td> </tr> <tr> <xsl:for-each select="LoadTestError"> <tr> <td><xsl:value-of select="@name"/></td> <td><a><xsl:attribute name="href"> <xsl:value-of select="@log"/> </xsl:attribute> Log </a></td> </tr> </xsl:for-each> </tr> </table> <div class='title'>测试用例详细信息:</div> <table> <tr> <td class='td_Title'>测试结果</td> <td class='td_Title'>测试用例</td> <td class='td_Title'>负责人</td> <td class='td_Title'>用例描述</td> <td class='td_Title'>用例状态</td> <td class='td_Title'>用例Log</td> </tr> <xsl:for-each select="TestResult"> <xsl:if test="@result='False'"> <tr class='tr_fail'> <td>失败</td> <td><xsl:value-of select="@name"/></td> <td><xsl:value-of select="@owner"/></td> <td><xsl:value-of select="."/></td> <td><xsl:value-of select="@status"/></td> <td><a><xsl:attribute name="href"> <xsl:value-of select="@log"/> </xsl:attribute> Log </a></td> </tr> </xsl:if> <xsl:if test="@result='True'"> <tr class='tr_pass'> <td>通过</td> <td><xsl:value-of select="@name"/></td> <td><xsl:value-of select="@owner"/></td> <td><xsl:value-of select="."/></td> <td><xsl:value-of select="@status"/></td> <td><a><xsl:attribute name="href"> <xsl:value-of select="@log"/> </xsl:attribute> Log </a></td> </tr> </xsl:if> </xsl:for-each> </table> </body> </html> </xsl:template> </xsl:stylesheet>""" RESULT_XLS = """<?xml version="1.0" encoding="utf-8"?><!-- DWXMLSource="tmp/qqtest.hello.HelloW.xml" --><!DOCTYPE xsl:stylesheet [ <!ENTITY nbsp "&#160;"> <!ENTITY copy "&#169;"> <!ENTITY reg "&#174;"> <!ENTITY trade "&#8482;"> <!ENTITY mdash "&#8212;"> <!ENTITY ldquo "&#8220;"> <!ENTITY rdquo "&#8221;"> <!ENTITY pound "&#163;"> <!ENTITY yen "&#165;"> <!ENTITY euro "&#8364;"> ]> <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:strip-space elements="*"/> <xsl:template match="/TEST"> <html> <head> <style> *{ font-size:12px; font-family: '宋体' , 'Courier New', Arial, 'Arial Unicode MS', ''; } .title { font-size:14px; font-weight: bold; margin: 20px auto 5px auto; } .subtable{ border:solid 1px #0099CC; border-collapse:collapse; margin: 0px auto auto 0px; } .subtable td { border:solid 1px #0099CC; padding: 6px 6px; } .td_title { color:#FFF; font-weight: bold; background-color:#66CCFF; } .tr_pass { background-color:#B3E8B8; } .tr_fail { background-color:#F5BCBD; } .suc_step_title { background-color:#B3E8B8; padding:2px 2px } .STYLE1 {font-size: 16px} .STYLE3 {font-size: 14px; color:#666666;} .STYLE4 {color: #999999} .STYLE5 { color: #FF0000; font-weight: bold; } .STYLE6 { color: #FF9900; font-weight: bold; } </style> </head> <body> <div> <table class="subtable"> <tr> <td class='td_title'>用例名字:</td> <td><xsl:value-of select="@name"/></td> <td class='td_title'>运行结果:</td> <td> <span> <xsl:attribute name="style"> <xsl:if test="@result='True'">color: #00FF00</xsl:if> <xsl:if test="@result='False'">color: #FF0000</xsl:if> </xsl:attribute> <xsl:apply-templates select="@result"/> </span> </td> </tr> <tr> <td class='td_title'>开始时间:</td> <td><xsl:value-of select="@begintime"/></td> <td class='td_title'>负责人:</td> <td><xsl:value-of select="@owner"/></td> </tr> <tr> <td class='td_title'>结束时间:</td> <td><xsl:value-of select="@endtime"/></td> <td class='td_title'>优先级:</td> <td><xsl:value-of select="@priority"/></td> </tr> <tr> <td class="td_title">运行时间:</td> <td><xsl:value-of select="@duration"/></td> <td class='td_title'>用例超时:</td> <td><xsl:value-of select="@timeout"/>分钟</td> </tr> </table> </div> <xsl:apply-templates/> </body> </html> </xsl:template> <xsl:template name="break_lines"> <xsl:param name="text" select="string(.)"/> <xsl:choose> <xsl:when test="contains($text, '&#xa;')"> <xsl:value-of select="substring-before($text, '&#xa;')"/> <br/> <xsl:call-template name="break_lines"> <xsl:with-param name="text" select="substring-after($text, '&#xa;')" /> </xsl:call-template> </xsl:when> <xsl:otherwise> <xsl:value-of select="$text"/> </xsl:otherwise> </xsl:choose> </xsl:template> <xsl:template match="@result"> <xsl:if test=".='True'">通过</xsl:if> <xsl:if test=".='False'">失败</xsl:if> </xsl:template> <xsl:template match="STEP"> <hr /> <div> <xsl:if test="@result='True'"> <xsl:attribute name="style"> padding:2px 2px; background-color:#B3E8B8 </xsl:attribute> </xsl:if> <xsl:if test="@result='False'"> <xsl:attribute name="style"> padding:2px 2px; background-color:#F5BCBD </xsl:attribute> </xsl:if> <table border="0"> <tr> <td><span class="STYLE1">步骤:</span></td> <td><span class="STYLE1"><xsl:value-of select="@title"/></span></td> <td><span class="STYLE1">&nbsp;<xsl:value-of select="@time"/></span></td> <td><span class="STYLE1">&nbsp; <xsl:apply-templates select="@result"/> </span></td> </tr> </table> </div> <hr /> <table> <xsl:apply-templates/> </table> </xsl:template> <xsl:template match="DEBUG"> <tr> <td valign="top"><strong>DEBUG:</strong></td> <td><xsl:value-of select="text()"/></td> </tr> </xsl:template> <xsl:template match="INFO"> <tr> <!--<td valign="top"><span class="STYLE4">12:12:11</span></td> --> <td valign="top"><strong>INFO:</strong></td> <td><xsl:value-of select="text()"/></td> </tr> </xsl:template> <xsl:template match="WARNING"> <tr> <!--<td valign="top"><span class="STYLE4">12:12:11</span></td> --> <td valign="top"><span class="STYLE6">WARNING:</span></td> <td><xsl:value-of select="text()"/></td> </tr> </xsl:template> <xsl:template match="ERROR"> <tr> <!--<td valign="top"><span class="STYLE4">12:12:11</span></td> --> <td valign="top"><span class="STYLE5">ERROR:</span></td> <td> <xsl:call-template name="break_lines" /> <pre> <xsl:value-of select="EXCEPT/text()"/> </pre> <table border="0"> <xsl:apply-templates select="EXPECT"/> <xsl:apply-templates select="ACTUAL"/> </table> <xsl:for-each select="ATTACHMENT"> <a> <xsl:attribute name="href"> <xsl:value-of select="@filepath"/> </xsl:attribute> [<xsl:value-of select="text()"/>] </a> </xsl:for-each> </td> </tr> </xsl:template> <xsl:template match="EXPECT"> <tr> <td>&nbsp;&nbsp;期望值:</td> <td><xsl:value-of select="text()"/></td> </tr> </xsl:template> <xsl:template match="ACTUAL"> <tr> <td>&nbsp;&nbsp;实际值:</td> <td><xsl:value-of select="text()"/></td> </tr> </xsl:template> </xsl:stylesheet>""" class XMLTestResultFactory(ITestResultFactory): '''XML形式TestResult工厂 ''' BAD_CHARS = r'\/*?:<>"|~' TRANS = string.maketrans(BAD_CHARS, '='*len(BAD_CHARS)) def create(self, testcase ): '''创建TestResult对象 :param testcase: 测试用例 :type testcase: TestCase :return TestResult ''' time_str=datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3] filename = '%s_%s.xml' % (testcase.test_name.translate(self.TRANS),time_str) return testresult.XmlResult(filename) class XMLTestReport(ITestReport): '''XML形式的测试报告 ''' def __init__(self): '''构造函数 ''' self._xmldoc = dom.Document() self._xmldoc.appendChild(self._xmldoc.createProcessingInstruction("xml-stylesheet", 'type="text/xsl" href="TestReport.xsl"')) self._runrstnode = self._xmldoc.createElement("RunResult") self._xmldoc.appendChild(self._runrstnode) self._result_factory = XMLTestResultFactory() def begin_report(self): '''开始测试执行 ''' self._time_start = datetime.now() xmltpl = "<TestEnv><PC>%s</PC><OS>%s</OS></TestEnv>" hostname = socket.gethostname() if sys.platform == 'win32': osver = os.popen("ver").read().decode('gbk').encode('utf-8') else: osver = os.uname() # @UndefinedVariable envxml = dom.parseString(xmltpl % (hostname, osver)) self._runrstnode.appendChild(envxml.childNodes[0]) def end_report(self): '''结束测试执行 :param passed: 测试是否通过 :type passed: boolean ''' time_end = datetime.now() timexml = "<RunTime><StartTime>%s</StartTime><EndTime>%s</EndTime><Duration>%s</Duration></RunTime>" timexml = timexml % (self._time_start.strftime("%Y-%m-%d %H:%M:%S"), time_end.strftime("%Y-%m-%d %H:%M:%S"), str(time_end-self._time_start).split('.')[0] ) timenodes = dom.parseString(timexml) self._runrstnode.appendChild(timenodes.childNodes[0]) xmldata = self._xmldoc.toprettyxml(indent=" ", newl="\n", encoding='utf-8') with codecs.open('TestReport.xml', 'w') as fd: fd.write(xmldata) with codecs.open('TestReport.xsl', 'w') as fd: fd.write(REPORT_XSL) with codecs.open('TestResult.xsl', 'w') as fd: fd.write(RESULT_XLS) def log_test_result(self, testcase, testresult ): '''记录一个测试结果 :param testcase: 测试用例 :type testcase: TestCase :param testresult: 测试结果 :type testresult: XmlResult ''' casemark = cgi.escape(testcase.test_doc) nodestr = """<TestResult result="%s" log="%s" status="%s">%s</TestResult> """ % (testresult.passed, testresult.file_path, testcase.status, casemark) doc2 = dom.parseString(nodestr) resultNode = doc2.childNodes[0] resultNode.setAttribute("name", _to_unicode(saxutils.escape(testcase.test_name))) resultNode.setAttribute("owner", _to_unicode(saxutils.escape(testcase.owner))) self._runrstnode.appendChild(resultNode) def log_record(self, level, tag, msg, record={}): '''增加一个记录 :param level: 日志级别 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type level: string :type tag: string :type msg: string :type record: dict ''' if tag == 'LOADER' and level == EnumLogLevel.ERROR: if record.has_key('error_testname') and record.has_key('error'): testname = record['error_testname'] mdfailsnode = self._xmldoc.createElement("LoadFailure") self._runrstnode.appendChild(mdfailsnode) logfile = '%s.log' % testname xmltpl = """<Module name="%s" log="%s"/>""" % (testname, logfile) mdfailsnode.appendChild(dom.parseString(xmltpl).childNodes[0]) with open(logfile, 'w') as fd: fd.write(record['error']) def log_filtered_test(self, loader, testcase, reason): '''记录一个被过滤的测试用例 :param loader: 用例加载器 :type loader: TestLoader :param testcase: 测试用例 :type testcase: TestCase :param reason: 过滤原因 :type reason: str ''' nodestr = """<FilterTest name="%s" reason="%s"></FilterTest> """ % ( _to_unicode(saxutils.escape(testcase.test_name)), _to_unicode(saxutils.escape(reason)) ) doc2 = dom.parseString(nodestr) filterNode = doc2.childNodes[0] self._runrstnode.appendChild(filterNode) def log_load_error(self, loader, name, error): '''记录一个加载失败的用例或用例集 :param loader: 用例加载器 :type loader: TestLoader :param name: 名称 :type name: str :param error: 错误信息 :type error: str ''' log_file = "%s.log" % name nodestr = """<LoadTestError name="%s" log="%s"></LoadTestError> """ % ( _to_unicode(saxutils.escape(name)), log_file, ) doc2 = dom.parseString(nodestr) errNode = doc2.childNodes[0] self._runrstnode.appendChild(errNode) with open(log_file, 'w') as fd: fd.write(error) def get_testresult_factory(self): '''获取对应的TestResult工厂 :returns ITestResultFactory ''' return self._result_factory @classmethod def get_parser(cls): '''获取命令行参数解析器(如果实现) :returns: 解析器对象 :rtype: argparse.ArgumentParser ''' return argparse.ArgumentParser(usage=report_usage) @classmethod def parse_args(cls, args_string): '''通过命令行参数构造对象 :returns: 测试报告 :rtype: cls ''' return cls() class JSONTestResultFactory(ITestResultFactory): '''JSON形式TestResult工厂 ''' def create(self, testcase ): '''创建TestResult对象 :param testcase: 测试用例 :type testcase: TestCase :return TestResult ''' return testresult.JSONResult(testcase) class JSONTestReport(ITestReport): '''JSON格式的测试报告 ''' def __init__(self, name="调试测试报告", fd=None ): '''构造函数 :param name: 报告名 :type name: str :param fd: 输出流 :type fd: file object ''' if fd is None: self._fd = sys.stdout else: self._fd = fd self._results = [] self._logs = [] self._filtered_tests = [] self._load_errors = [] self._testcases = [] self._data = { "version": "1.0", "summary": { "tool": "QTA", "name": name, }, "results": self._results, "logs": self._logs, "filtered_tests": self._filtered_tests, "load_errors": self._load_errors, "loaded_testcases": self._testcases } self._testcase_total = 0 self._testcase_passed = 0 def begin_report(self): '''开始测试执行 ''' self._data["summary"]["start_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") def end_report(self): '''结束测试执行 :param passed: 测试是否通过 :type passed: boolean ''' self._data["summary"]["testcase_total"] = self._testcase_total self._data["summary"]["testcase_passed"] = self._testcase_passed self._data["summary"]["succeed"] = self._testcase_passed == self._testcase_total self._data["summary"]["end_time"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") json.dump(self._data, self._fd) def log_test_result(self, testcase, testresult ): '''记录一个测试结果 :param testcase: 测试用例 :type testcase: TestCase :param testresult: 测试结果 :type testresult: TestResult ''' self._testcase_total += 1 if testresult.passed: self._testcase_passed += 1 self._results.append(testresult.get_data()) def log_record(self, level, tag, msg, record): '''增加一个记录 :param level: 日志级别 :param msg: 日志消息 :param tag: 日志标签 :param record: 日志记录信息 :type level: string :type tag: string :type msg: string :type record: dict ''' self._logs.append({ "level": level, "tag": tag, "message": msg, "record": record }) def log_loaded_tests(self, loader, testcases): '''记录加载成功的用例 :param loader: 用例加载器 :type loader: TestLoader :param testcases: 测试用例列表 :type testcases: list ''' self._testcases += [ {"name": testcase.test_name} for testcase in testcases ] def log_filtered_test(self, loader, testcase, reason): '''记录一个被过滤的测试用例 :param loader: 用例加载器 :type loader: TestLoader :param testcase: 测试用例 :type testcase: TestCase :param reason: 过滤原因 :type reason: str ''' self._filtered_tests.append({ "name": testcase.test_name, "reason": reason }) def log_load_error(self, loader, name, error): '''记录一个加载失败的用例或用例集 :param loader: 用例加载器 :type loader: TestLoader :param name: 名称 :type name: str :param error: 错误信息 :type error: str ''' self._load_errors.append({ "name": name, "error": error }) def get_testresult_factory(self): '''获取对应的TestResult工厂 :returns ITestResultFactory ''' return JSONTestResultFactory() @classmethod def get_parser(cls): '''获取命令行参数解析器(如果实现) :returns: 解析器对象 :rtype: argparse.ArgumentParser ''' parser = argparse.ArgumentParser(usage=report_usage) parser.add_argument("--name", help="report title", default="Debug test report") parser.add_argument("-o", "--output", help="output file path, can be stdout & stderr", default="stdout") return parser @classmethod def parse_args(cls, args_string): '''通过命令行参数构造对象 :returns: 测试报告 :rtype: cls ''' args = cls.get_parser().parse_args(args_string) if args.output == 'stdout': fd = sys.stdout elif args.output == 'stderr': fd = sys.stderr else: fd = open(args.output, 'w') return cls( name=args.name, fd=fd) def __init_report_types(): global report_types if report_types: return report_types.update({ "empty": EmptyTestReport, "stream": StreamTestReport, "xml": XMLTestReport, "json": JSONTestReport, }) # Register other `ITestReport` implementiations from entry points for ep in pkg_resources.iter_entry_points(REPORT_ENTRY_POINT): if ep.name not in report_types: report_types[ep.name] = ep.load() __init_report_types() del __init_report_types
1.664063
2
tasks/affective_task/utils/display_msg_affective_disscussion.py
eduongAZ/tomcat-baseline-tasks
1
12777121
import pygame from config import UPDATE_RATE from common import render_text_center def display_msg_affective_disscussion(screen, msg: str, milliseconds: int): start_ticks = pygame.time.get_ticks() clock = pygame.time.Clock() while pygame.time.get_ticks() - start_ticks < milliseconds: render_text_center(msg, (1250, 90), screen, font_size = 55 , x_offset = 0, y_offset=0) pygame.event.get() clock.tick(UPDATE_RATE)
2.84375
3
challenges/challenge_14.py
ysaunier/ring-zer0
0
12777122
<reponame>ysaunier/ring-zer0 import hashlib from core.client import RingClient def decode_binary_string(text) -> str: return ''.join(chr(int(text[i * 8:i * 8 + 8], 2)) for i in range(len(text) // 8)) def execute(): client = RingClient() client.login() page = client.get_challenge(challenge=14) message = page.find('div', attrs={'class': 'message'}) text = message.contents[2].strip() response = hashlib.sha512(decode_binary_string(text=text).encode()).hexdigest() print(client.send_answer(challenge=14, response=response)) if __name__ == '__main__': execute()
2.65625
3
soldash/views.py
ap0ught/soldash
2
12777123
<reponame>ap0ught/soldash # -*- coding: utf-8 -*- #Copyright 2011 <NAME> <<EMAIL>> # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. # from multiprocessing import Pool from flask import request, jsonify, abort, redirect from flaskext.mako import render_template from soldash import app import soldash.helpers as h @app.route('/') def homepage(): """ Render and return the main homepage HTML. """ versions = {} for host in app.config['HOSTS']: versions[host['hostname']] = h.get_solr_version(host) pool = Pool(processes=len(app.config['HOSTS'])) pool_data = [] for core in app.config['CORES']: for host in app.config['HOSTS']: pool_data.append({'core': core, 'host': host}) c = h.repackage_details(pool.map(h.get_details, pool_data)) return render_template('/main.mako', c=c, h=h, versions=versions, config=app.config) @app.route('/execute/<command>', methods=['GET']) def execute(command): """ Execute a command """ hostname = request.args.get('hostname') core = request.args.get('core') params = {} if core not in app.config['CORES']: abort(400, 'Invalid core') if command == 'filelist': params['indexversion'] = request.args.get('indexversion') elif command == 'select': params['q'] = request.args.get('q') params['fl'] = request.args.get('fl', '') # TODO: check validity of command name try: host = [obj for obj in app.config['HOSTS'] if obj['hostname'] == hostname][0] except KeyError: abort(400, 'Invalid hostname') # TODO: Error checking from Solr retval = h.query_solr(host, command, core, params=params) if command in ['filelist', 'select']: return jsonify(retval) return redirect('/')
1.992188
2
Face detcetion and dataset generation/ex12.py
jyothiprakashpanaik/ML-4-e
1
12777124
# Face recognization import cv2 import os alg = "haarcascade_frontalface_default.xml" haar = cv2.CascadeClassifier(alg) cam = cv2.VideoCapture(0) path = "dataset" if not os.path.isdir(path): os.mkdir(path) (width,height) = (100,100) count = 0 while count<100: count+=1 print(count) _,img = cam.read() grayImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = haar.detectMultiScale(grayImg,1.3,5) for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),3) onlyFace = grayImg[y:y+h,x:x+w] resizeImg = cv2.resize(onlyFace, (width,height)) cv2.imwrite("%s/%s.jpg"%(path,count), resizeImg) cv2.imshow("faceDection", img) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break print("Sucessfully colleted dataset") cam.release() cv2.destroyAllWindows()
2.875
3
tlgbotcore/csvdbutils/csvdb/tests/test_csvdb.py
kaefik/py-tlgbotcore
0
12777125
""" провести в порядок тесты """ import unittest import os import shutil import csv from csvdb import CSVDB class TestCSVDB(unittest.TestCase): """ тесты для проверки работы объекта CSVDB """ def remove_dbdir(self): """ удаление БД папки даже она существует """ if os.path.exists(self.tst_name_db): shutil.rmtree(self.tst_name_db) def create_dbdir(self): """ создание БД папки если ёё нет, если есть то удаляется и заново создается """ if os.path.exists(self.tst_name_db): shutil.rmtree(self.tst_name_db) os.mkdir(self.tst_name_db) print("File ", self.file1) # создаем простой файл внутри папки with open(self.file1, "w") as f: f.write("Tecт") def setUp(self) -> None: self.tst_name_db = "my_test_db" self.file1 = f"{self.tst_name_db}/file1.csv" self.tst_table1 = 'table1' def tearDown(self) -> None: self.remove_dbdir() def test_initdb_noexist_dirdb(self): """ проверка правильно ли отрабатывается инициализация БД когда папки БД не существует """ # инициализация тестового окружения self.remove_dbdir() db = CSVDB(name_db=self.tst_name_db) flag = os.path.exists(self.tst_name_db) and os.path.isdir(self.tst_name_db) self.assertEqual(True, flag) def test_initdb_exist_dirdb_force(self): """ проверка правильно ли отрабатывается инициализация БД когда папки БД существует и нужно перезаписать """ # инициализация тестового окружения self.create_dbdir() db = CSVDB(name_db=self.tst_name_db, force=True) flag_dir = os.path.exists(self.tst_name_db) and os.path.isdir(self.tst_name_db) flag_file = os.path.exists(self.file1) and os.path.isfile(self.tst_name_db) self.assertEqual(True, flag_dir) self.assertEqual(False, flag_file) def test_initdb_exist_dirdb_noforce(self): """ проверка правильно ли отрабатывается инициализация БД когда папки БД существует и НЕ нужно перезаписать """ # инициализация тестового окружения self.create_dbdir() db = CSVDB(name_db=self.tst_name_db, force=False) flag_dir = os.path.exists(self.tst_name_db) and os.path.isdir(self.tst_name_db) flag_file = os.path.exists(self.file1) and os.path.isfile(self.file1) self.assertEqual(True, flag_dir) self.assertEqual(True, flag_file) def test_create_table(self): """ создание таблицы """ self.remove_dbdir() db = CSVDB(name_db=self.tst_name_db, force=False) headers_original = ['NUMBER', 'FIO', 'ROLE'] db.create_table(name_table=self.tst_table1, colums=headers_original) full_path_table1 = db.full_path(self.tst_table1) flag_name_table = db.tables[0] flag_exist_table = os.path.exists(full_path_table1) print(full_path_table1) # проверяем что файл присутствует self.assertEqual(True, flag_exist_table) # проверяем заголовки файла таблицы headers = [] with open(full_path_table1) as f: reader = csv.DictReader(f, delimiter=";") headers = reader.fieldnames self.assertEqual(headers, headers_original) def test_create_table_exist_table(self): """ создание таблицы, файл которой уже есть """ self.remove_dbdir() db = CSVDB(name_db=self.tst_name_db, force=False) headers_original = ['NUMBER', 'FIO', 'ROLE'] flag_noexist = db.create_table(name_table=self.tst_table1, colums=headers_original) flag_exist = db.create_table(name_table=self.tst_table1, colums=headers_original) self.assertEqual(True, flag_noexist) self.assertEqual(False, flag_exist) def test_insert_data(self): """ тест вставки данных :return: """ headers_original = ['NUMBER', 'FIO', 'ROLE'] data_original = {'NUMBER': '1', 'FIO': '<NAME>', 'ROLE': 'Admin'} self.remove_dbdir() db = CSVDB(name_db=self.tst_name_db, force=False) flag_noexist = db.create_table(name_table=self.tst_table1, colums=headers_original) full_path_table1 = db.full_path(self.tst_table1) db.insert_data(name_table=self.tst_table1, data=data_original) result_data = db.getall(name_table=self.tst_table1) self.assertEqual(result_data[0], data_original) # проверяем что запись одна self.assertEqual(1, len(result_data)) # добавляем ещё одну запись db.insert_data(name_table=self.tst_table1, data=data_original) result_data = db.getall(name_table=self.tst_table1) self.assertEqual(2, len(result_data)) if __name__ == '__main__': unittest.main()
3
3
uiuc_transportation_demand/tests/test_functions.py
yardasol/pride
2
12777126
from functions import units from functions import gge_dictionary from functions import unit_cost_dictionary from functions import gge_cost_dictionary from functions import fuel_equivalent from functions import fuel_equivalent_cost from functions import co2_equivalent from functions import co2_emissions import pytest # ============================================================================ # Tests for units() # ============================================================================ def test_units(): """ Should not raise an error if units is correct. """ test = units() assert test == { 'Gasoline': 'gallon', 'Diesel': 'gallon', 'E85': 'gallon', 'Hydrogen': 'kg', 'Electricity': 'kWh' } # ============================================================================ # Tests for gge_dictionary() # ============================================================================ def test_gge_dictionary(): """ Should not raise an error if gge_dictionary is correctly formatted. """ test = gge_dictionary() assert test == { 'Gasoline': 1.0, 'Diesel': 1.155, 'E85': 0.734, 'Hydrogen': 1.019, 'Electricity': 0.031 } # ============================================================================ # Tests for unit_cost_dictionary() # ============================================================================ def test_unit_cost_dictionary(): """ Should not raise an error if unit_cost_dictionary is correctly formatted. """ test = unit_cost_dictionary() assert test == { 'Gasoline': 2.23, 'Diesel': 2.41, 'E85': 1.71, 'Hydrogen': 13.99, 'Electricity': 0.0426 } # ============================================================================ # Tests for gge_cost_dictionary() # ============================================================================ def test_gge_cost_dictionary(): """ Should not raise an error if gge_cost_dictionary is correctly formatted. """ test = gge_cost_dictionary() assert test == { 'Gasoline': 2.23, 'Diesel': 2.0865800865800868, 'E85': 2.329700272479564, 'Hydrogen': 13.729146221786067, 'Electricity': 1.3741935483870968 } # ============================================================================ # Tests for fuel_equivalent() # ============================================================================ def test_fuel_equivalent_1(): """ Should raise an IndexError if fuel_equivalent is properly set up. """ fuel_test = 'Plutonium' with pytest.raises(IndexError, match='Plutonium not supported.'): fuel_equivalent(fuel_test) def test_fuel_equivalent_2(): """ Should raise a TypeError if fuel_equivalent is properly set up. """ Hydrogen = 4 fuel_test = Hydrogen with pytest.raises(TypeError, match='Please'): fuel_equivalent(fuel_test) # ============================================================================ # Tests for fuel_equivalent_cost() # ============================================================================ def test_fuel_equivalent_cost_1(): """ Should raise an IndexError if fuel_equivalent_cost is properly set up. """ fuel_test = 'Plutonium' with pytest.raises(IndexError, match='Plutonium not supported.'): fuel_equivalent(fuel_test) def test_fuel_equivalent_cost_2(): """ Should raise a TypeError if fuel_equivalent_cost is properly set up. """ Hydrogen = 4 fuel_test = Hydrogen with pytest.raises(TypeError, match='Please'): fuel_equivalent(fuel_test) # ============================================================================ # Tests for co2_equivalent() # ============================================================================ def test_co2_equivalent(): """ Should not raise an error if co2_equivalent is properly set up. """ test = co2_equivalent() assert test == { 'Gasoline': 8.89, 'Diesel': 10.16, 'E85': 6.221, 'Hydrogen': 0, 'Electricity': 0 } # ============================================================================ # Tests for co2_emissions() # ============================================================================ def test_co2_emissions_1(): """ Should raise an IndexError if co2_emissions is set up properly. """ fuel_test = 'Plutonium' with pytest.raises(IndexError, match='Plutonium not supported.'): fuel_equivalent(fuel_test) def test_co2_emissions_2(): """ Should raise a TypeError if co2_emissions is set up properly. """ Hydrogen = 4 fuel_test = Hydrogen with pytest.raises(TypeError, match='Please'): fuel_equivalent(fuel_test)
2.6875
3
tfjs_graph_converter/quirks.py
httpsgithu/tfjs-to-tf
114
12777127
# SPDX-License-Identifier: MIT # Copyright © 2020 <NAME> """Functions to fix various known issues with exported TFJS models""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import base64 from typing import Any, Dict, List, Optional import tfjs_graph_converter.common as common def _find_if_has_key(obj: Dict[str, Any], key: str, of_type: Optional[type] = None) -> List[Any]: """ Recursively find all objects with a given key in a dictionary Args: obj: Dictionary to search key: Key to find of_type: [optional] Type of the referenced item Returns: List of all objects that contain an item with the given key and matching type """ def get_children(item: Any) -> List[Any]: return [val for val in item.values() if isinstance(val, dict)] found = [] stack = get_children(obj) while len(stack) > 0: item = stack.pop() if key in item and (of_type is None or isinstance(item[key], of_type)): found.append(item) stack.extend(get_children(item)) return found def _convert_string_attrs(node: Dict[str, Any]) -> None: """ Deep search string attributes (labelled "s" in GraphDef proto) and convert ascii code lists to base64-encoded strings if necessary """ attr_key = common.TFJS_NODE_ATTR_KEY str_key = common.TFJS_ATTR_STRING_VALUE_KEY # some layers (e.g. PReLU) don't contain the `attr` key, # so test for its presence attrs: list = [] if attr_key in node: attrs = _find_if_has_key(node[attr_key], key=str_key, of_type=list) for attr in attrs: array = attr[str_key] # check if conversion is actually necessary if (len(array) > 0) and isinstance(array, list) \ and isinstance(array[0], int): string = ''.join(map(chr, array)) binary = string.encode('utf8') attr[str_key] = base64.encodebytes(binary) elif len(array) == 0: attr[str_key] = None def _fix_dilation_attrs(node: Dict[str, Any]) -> None: """ Search dilations-attribute and convert misaligned dilation rates if necessary see https://github.com/patlevin/tfjs-to-tf/issues/1 """ path = ['attr', 'dilations', 'list'] values = node found = True for key in path: if key in values: values = values[key] else: found = False break # if dilations are present, they're stored in 'values' now ints = common.TFJS_ATTR_INT_VALUE_KEY if found and ints in values and isinstance(values[ints], list): value = values[ints] if len(value) != 4: # must be NCHW-formatted 4D tensor or else TF can't handle it raise ValueError("Unsupported 'dilations'-attribute in node " f'{node[common.TFJS_NAME_KEY]}') # check for [>1,>1,1,1], which is likely a mistranslated [1,>1,>1,1] if int(value[0], 10) > 1: values[ints] = ['1', value[0], value[1], '1'] def fix_node_attributes(message_dict: Dict[str, Any]) -> Dict[str, Any]: """ Fix various known issues found "in the wild": • Node attributes in deserialised JSON may contain strings as lists of ascii codes when the TF GraphDef proto expects base64 encoded strings • 'dilation' attributes may be misaligned in a way unsupported by TF Further fixes will be added as issues are reported. Args: message_dict: Graph model formatted as parsed JSON dictionary Returns: Updated message dictionary with fixes applied if necessary """ if common.TFJS_NODE_KEY in message_dict: nodes = message_dict[common.TFJS_NODE_KEY] for node in nodes: _convert_string_attrs(node) _fix_dilation_attrs(node) return message_dict
2.21875
2
problem_solving/3sum.py
umitkaanusta/stuff-im-learning
1
12777128
class Solution: # not my soln but its very cool def threeSum(self, nums: List[int]) -> List[List[int]]: triplets = set() neg, pos, zeros = [], [], 0 for num in nums: if num > 0: pos.append(num) elif num < 0: neg.append(num) else: zeros += 1 neg_set, pos_set = set(neg), set(pos) # for O(1) lookup # if there's zero in list, add cases where -x is in neg and x is in pos if zeros > 0: for num in pos_set: if -num in neg_set: triplets.add((-num, 0, num)) # if at least 3 zeros in list, add 0, 0, 0 if zeros >= 3: triplets.add((0, 0, 0)) # for all pairs of negative numbers, check if their complement is in positive set for i in range(len(neg)): for j in range(i + 1, len(neg)): target = -1 * (neg[i] + neg[j]) if target in pos_set: triplets.add(tuple(sorted([neg[i], neg[j], target]))) # do the same for positive numbers for i in range(len(pos)): for j in range(i + 1, len(pos)): target = -1 * (pos[i] + pos[j]) if target in neg_set: triplets.add(tuple(sorted([pos[i], pos[j], target]))) return triplets
3.34375
3
targets/PythonSdk/source/PlayFab/PlayFabSettings.py
iomac/SDKGenerator
0
12777129
<filename>targets/PythonSdk/source/PlayFab/PlayFabSettings.py import playfab.PlayFabErrors as PlayFabErrors import sys import traceback ProductionEnvironmentURL = "https://{titleId}.playfabapi.com{methodUrl}" TitleId = "" # You must set this value for PlayFabSdk to work properly (Found in the Game # Manager for your title, at the PlayFab Website) DeveloperSecretKey = None # You must set this value for Admin/Server/Matchmaker to work properly (Found in the Game # Manager for your title, at the PlayFab Website) # Client specifics AdvertisingIdType = "" # Set this to the appropriate AD_TYPE_X constant below AdvertisingIdValue = None # Set this to corresponding device value # DisableAdvertising is provided for completeness, but changing it is not # suggested # Disabling this may prevent your advertising-related PlayFab marketplace # partners from working correctly DisableAdvertising = False AD_TYPE_IDFA = "Idfa" AD_TYPE_ANDROID_ID = "Adid" class InternalSettings: pass _internalSettings = InternalSettings() # This is automatically populated by the PlayFabEntityApi.GetEntityToken method. _internalSettings.EntityToken = None # This is automatically populated by any PlayFabClientApi.Login method. _internalSettings.ClientSessionTicket = None _internalSettings.SdkVersionString = "PythonSdk-<%- sdkVersion %>" def GetURL(methodUrl): if not TitleId: raise PlayFabErrors.PlayFabException("You must set PlayFabSettings.TitleId before making an API call") url = ProductionEnvironmentURL.format(titleId = TitleId, methodUrl=methodUrl) return url def DefaultExceptionLogger(exceptionObj): print("Unexpected error:", sys.exc_info()[0]) traceback.print_exc() GlobalErrorHandler = None GlobalExceptionLogger = DefaultExceptionLogger
1.929688
2
recalibration/recalibration.py
LaRoccaRaphael/MSI_recalibration
10
12777130
<filename>recalibration/recalibration.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- import argparse import numpy as np from pyimzml.ImzMLParser import ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter from sklearn import linear_model from scipy.stats import gaussian_kde def peak_selection(ms_intensities): # return the 300 mot intense centroid of a mass spectrum intensities_arr = np.array(ms_intensities) return(intensities_arr.argsort()[::-1][:300]) def compute_masserror(experimental_mass, database_mass, tolerance): # mass error in Dalton if database_mass != 0: return abs(experimental_mass - database_mass) <= tolerance def binarySearch_tol(arr, l, r, x, tolerance): # binary with a tolerance in Da search from an ordered list while l <= r: mid = l + (r - l)//2; if compute_masserror(x,arr[mid],tolerance): itpos = mid +1 itneg = mid -1 index = [] index.append(mid) if( itpos < len(arr)): while compute_masserror(x,arr[itpos],tolerance) and itpos < len(arr): index.append(itpos) itpos += 1 if( itneg > 0): while compute_masserror(x,arr[itneg],tolerance) and itneg > 0: index.append(itneg) itneg -= 1 return index elif arr[mid] < x: l = mid + 1 else: r = mid - 1 return -1 def hits_generation(peaks_mz,database_exactmass, tolerance): # for each detected mz return its index in of the hits in the database hit_errors = list() hit_exp = list() for i in range(0,np.size(peaks_mz,0)): exp_peak = peaks_mz[i] db_ind = binarySearch_tol(np.append(database_exactmass,np.max(database_exactmass)+1), 0, len(database_exactmass)-1, exp_peak,tolerance) if db_ind != -1: for j in range(0,len(db_ind)): true_peak = database_exactmass[db_ind[j]] da_error = (exp_peak - true_peak) hit_errors.append(da_error) hit_exp.append(exp_peak) return(np.asarray(hit_exp),np.asarray(hit_errors)) def kde_scipy(x, x_grid, bandwidth=0.002, **kwargs): # kernel density estimation of the hit errors kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs) return kde.evaluate(x_grid) def hits_selection(hit_errors, step, tolerance, da_limit): # return the indexes of the hits of the most populated error region x = np.asarray(hit_errors) x_grid = np.arange(-tolerance,tolerance+0.0001,0.0001) pdf = kde_scipy(x, x_grid, bandwidth=step) max_da_value = x_grid[np.argmax(pdf,axis=0)] roi = (x <= (max_da_value + da_limit)) & (x >= (max_da_value -da_limit )) return(roi) def create_lm(hit_exp,hit_errors,tolerance=30,da_limit=2.5,step=0.001): # estimate a linear model of the mz error according to the mz with RANSAC algorithm X = np.vander(hit_exp, 2) # 2d array for ransac algorithm, we add only ones in the second column roi = hits_selection(hit_errors,step,tolerance=tolerance,da_limit=da_limit) y = hit_errors[roi] X = X[roi,] try: model = linear_model.RANSACRegressor(max_trials=300, min_samples=10) mz_error_model = model.fit(X, y) except ValueError: print("error") mz_error_model = [] return(mz_error_model) def correct_mz_lm(ms_mzs,mz_error_model): # predict the Da errors for each detected mz and correct them X = np.vander(ms_mzs, 2) predicted_mz_errors = mz_error_model.predict(X) estimated_mz = ms_mzs - predicted_mz_errors return(estimated_mz) def write_corrected_msi(msi,output_file,tolerance,database_exactmass,step,dalim): # iterate throug each pixel of an MSI with ImzMLWriter(output_file) as w: p = ImzMLParser(msi, parse_lib='ElementTree') for idx, (x,y,z) in enumerate(p.coordinates): ms_mzs, ms_intensities = p.getspectrum(idx) peaks_ind = peak_selection(ms_intensities) peaks_mz = ms_mzs[peaks_ind] if len(peaks_mz) >30 : hit_exp, hit_errors = hits_generation(peaks_mz,database_exactmass, tolerance) if len(hit_errors) > 10: roi = hits_selection(hit_errors, step, tolerance , da_limit=dalim) if np.sum(roi) > 10: mz_error_model = create_lm(hit_exp,hit_errors,tolerance=tolerance,da_limit=dalim,step=step) if mz_error_model: corrected_mzs = correct_mz_lm(ms_mzs, mz_error_model) w.addSpectrum(corrected_mzs, ms_intensities, (x,y,z)) my_parser = argparse.ArgumentParser(allow_abbrev=False) my_parser.add_argument('-i','--input', action='store', type=str, required=True,help='file path to an imzML') my_parser.add_argument('-i2','--input2', action='store', type=str, required=True,help='file containing the calibrating ion mass values') my_parser.add_argument('-o','--output', action='store', type=str, required=True,help='file path for the recalibrated MSI in imzML format') my_parser.add_argument('-st','--step', action='store', type=float, required=True,help='bandwidth for the density estimation function') my_parser.add_argument('-tl','--tol', action='store', type=float, required=True,help='Da tolerance for the identifications') my_parser.add_argument('-lm','--dalim', action='store', type=float, required=True,help='limit in Da for hits selection') args = my_parser.parse_args() msi = args.input step = args.step tolerance = args.tol database_name = args.input2 dalim = args.dalim exact_mass_full = np.genfromtxt(database_name) # order the list of masses for the binary search database_exactmass = exact_mass_full[exact_mass_full.argsort()] output_file = args.output write_corrected_msi(msi,output_file,tolerance,database_exactmass,step,dalim)
2.46875
2
tests/test_add_contact.py
kegorn/python_training
0
12777131
<filename>tests/test_add_contact.py<gh_stars>0 from model.contact import Contact def test_add_contact(app): app.contactHelper.create(Contact(firstname="new_fn_1", lastname="new_ln_1", address2="some_address_2"))
2.078125
2
src/scripts/train.py
xiedidan/sparse-coding
18
12777132
import os import sys sys.path.insert(0, os.path.abspath('../../.')) from tqdm import tqdm import torch from src.model.SparseNet import SparseNet from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from src.model.ImageDataset import NatPatchDataset from src.utils.cmd_line import parse_args from src.scripts.plotting import plot_rf # save to tensorboard board = SummaryWriter("../../runs/sparse-net") arg = parse_args() # if use cuda device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # create net sparse_net = SparseNet(arg.n_neuron, arg.size, R_lr=arg.r_learning_rate, lmda=arg.reg, device=device) # load data dataloader = DataLoader(NatPatchDataset(arg.batch_size, arg.size, arg.size), batch_size=250) # train optim = torch.optim.SGD([{'params': sparse_net.U.weight, "lr": arg.learning_rate}]) for e in range(arg.epoch): running_loss = 0 c = 0 for img_batch in tqdm(dataloader, desc='training', total=len(dataloader)): img_batch = img_batch.reshape(img_batch.shape[0], -1).to(device) # update pred = sparse_net(img_batch) loss = ((img_batch - pred) ** 2).sum() running_loss += loss.item() loss.backward() # update U optim.step() # zero grad sparse_net.zero_grad() # norm sparse_net.normalize_weights() c += 1 board.add_scalar('Loss', running_loss / c, e * len(dataloader) + c) if e % 5 == 4: # plotting fig = plot_rf(sparse_net.U.weight.T.reshape(arg.n_neuron, arg.size, arg.size).cpu().data.numpy(), arg.n_neuron, arg.size) board.add_figure('RF', fig, global_step=e * len(dataloader) + c) if e % 10 == 9: # save checkpoint torch.save(sparse_net, f"../../trained_models/ckpt-{e+1}.pth") torch.save(sparse_net, f"../../trained_models/ckpt-{e+1}.pth")
2.3125
2
tests/test_model_views.py
Algebra8/LAViewSet
0
12777133
<reponame>Algebra8/LAViewSet import json import pytest from laviewset import ModelViewSet from .models import User, UserSchema _serializer_class = UserSchema @pytest.fixture def model_viewset_core(db_router): class SomeViewSet(ModelViewSet): route = db_router.extend('users') model = User serializer_class = _serializer_class return SomeViewSet @pytest.fixture def db_cli_core(loop, aiohttp_client, db_app, model_viewset_core): return loop.run_until_complete(aiohttp_client(db_app)) async def test_list(db_cli_core, get_all_users): resp = await db_cli_core.get('/users') assert resp.status == 200 dat = await resp.json() assert dat == _serializer_class(many=True).dump(get_all_users) async def test_retrieve(db_cli_core, get_user_1): resp = await db_cli_core.get('/users/1') assert resp.status == 200 dat = await resp.json() assert dat == _serializer_class().dump(get_user_1) async def test_retrieve_404(db_cli_core): resp = await db_cli_core.get('/users/99') assert resp.status == 404 async def test_create(db_cli_core): data = {'id': 4, 'nickname': 'new_user'} resp = await db_cli_core.post('/users', data=json.dumps(data)) assert resp.status == 201 dat = await resp.json() assert dat['id'] == data['id'] assert dat['nickname'] == 'new_user' assert resp.headers['Location'] == f"{resp.url}/{data['id']}" async def test_delete(db_cli_core): resp = await db_cli_core.delete('/users/1') assert resp.status == 204 attempt_get = await db_cli_core.get('/users/1') assert attempt_get.status == 404 async def test_partial_update(db_cli_core): data = {'nickname': 'patched_nickname'} resp = await db_cli_core.patch('/users/1', data=json.dumps(data)) assert resp.status == 200 patched_attempt = await db_cli_core.get('/users/1') patched = await patched_attempt.json() assert patched['nickname'] == data['nickname'] async def test_update(db_cli_core): partial_data = {'nickname': 'patched_nickname'} bad_request_resp = await db_cli_core.put( '/users/1', data=json.dumps(partial_data) ) assert bad_request_resp.status == 400 full_data = {'id': 1, 'nickname': 'patched_nickname'} ok_resp = await db_cli_core.put( '/users/1', data=json.dumps(full_data) ) assert ok_resp.status == 200 put_attempt = await db_cli_core.get('/users/1') put = await put_attempt.json() assert put['id'] == full_data['id'] assert put['nickname'] == full_data['nickname']
2.25
2
slixmpp/plugins/xep_0333/__init__.py
marconfus/slixmpp
0
12777134
""" slixmpp: The Slick XMPP Library Copyright (C) 2016 <NAME> This file is part of slixmpp. See the file LICENSE for copying permission. """ from slixmpp.plugins.base import register_plugin from slixmpp.plugins.xep_0333.stanza import Markable, Received, Displayed, Acknowledged from slixmpp.plugins.xep_0333.hints import XEP_0333 register_plugin(XEP_0333)
1.078125
1
watch/forms.py
moharick/KaaRadaMtaani
0
12777135
<filename>watch/forms.py from django import forms from django.forms import ModelForm from django.core import validators from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from .models import * class MtaaForm(ModelForm): class Meta: model = Mtaa fields = ('mtaa_name',) class AddBizForm(ModelForm): class Meta: model = Biz fields = ('name','biz_location','email') class UpdateProfileForm(ModelForm): class Meta: model = UserProfile fields = ('first_name','last_name','mtaa_name',) class PostForm(ModelForm): class Meta: model = Post fields = ('title','post_description',)
2.15625
2
homeassistant/components/poolsense/__init__.py
jsltrifork/core
0
12777136
"""The PoolSense integration.""" import asyncio from datetime import timedelta import logging import async_timeout from poolsense import PoolSense from poolsense.exceptions import PoolSenseError from homeassistant.config_entries import ConfigEntry from homeassistant.const import CONF_EMAIL, CONF_PASSWORD from homeassistant.core import HomeAssistant from homeassistant.exceptions import ConfigEntryNotReady from homeassistant.helpers import aiohttp_client, update_coordinator from homeassistant.helpers.update_coordinator import UpdateFailed from .const import DOMAIN PLATFORMS = ["sensor"] _LOGGER = logging.getLogger(__name__) async def async_setup(hass: HomeAssistant, config: dict): """Set up the PoolSense component.""" # Make sure coordinator is initialized. hass.data.setdefault(DOMAIN, {}) return True async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """Set up PoolSense from a config entry.""" poolsense = PoolSense() auth_valid = await poolsense.test_poolsense_credentials( aiohttp_client.async_get_clientsession(hass), entry.data[CONF_EMAIL], entry.data[CONF_PASSWORD], ) if not auth_valid: _LOGGER.error("Invalid authentication") return False coordinator = await get_coordinator(hass, entry) await hass.data[DOMAIN][entry.entry_id].async_refresh() if not coordinator.last_update_success: raise ConfigEntryNotReady hass.data[DOMAIN][entry.entry_id] = coordinator for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry): """Unload a config entry.""" unload_ok = all( await asyncio.gather( *[ hass.config_entries.async_forward_entry_unload(entry, component) for component in PLATFORMS ] ) ) if unload_ok: hass.data[DOMAIN].pop(entry.entry_id) return unload_ok async def get_coordinator(hass, entry): """Get the data update coordinator.""" async def async_get_data(): _LOGGER.info("Run query to server") poolsense = PoolSense() return_data = {} with async_timeout.timeout(10): try: return_data = await poolsense.get_poolsense_data( aiohttp_client.async_get_clientsession(hass), entry.data[CONF_EMAIL], entry.data[CONF_PASSWORD], ) except (PoolSenseError) as error: raise UpdateFailed(error) return return_data return update_coordinator.DataUpdateCoordinator( hass, logging.getLogger(__name__), name=DOMAIN, update_method=async_get_data, update_interval=timedelta(hours=1), )
2.015625
2
day01/Quartiles.py
silvioedu/RackerRank-10-days-of-statistic
0
12777137
<filename>day01/Quartiles.py if __name__ == '__main__': n = int(input()) x = sorted(list(map(int, input().split()))) from statistics import median print(int(median(x[:n//2]))) print(int(median(x))) print(int(median(x[(n+1)//2:])))
3.296875
3
app/__init__.py
Haustorium/Flask-Microblog
0
12777138
<reponame>Haustorium/Flask-Microblog import os from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy from flask.ext.openid import OpenID from flask.ext.login import LoginManager from config import basedir app = Flask(__name__) app.config.from_object('config') db = SQLAlchemy(app) lm = LoginManager() lm.init_app(app) lm.login_view = 'login' oid = OpenID(app,os.path.join(basedir,'tmp')) from app import views, models
2.203125
2
src/tensorneko/util/dispatcher.py
ControlNet/tensorneko
9
12777139
<reponame>ControlNet/tensorneko from __future__ import annotations import warnings from functools import partial from typing import Callable, Dict, List, Generic, Sequence, Any import inspect from .type import T class DispatcherTypeWarning(Warning): pass class Dispatcher: dispatchers: Dict[str, Dispatcher] = {} def __init__(self, name: str): self.name = name self._functions = {} def __call__(self, func: Callable[..., T], set_types: Sequence[type] = None) -> Resolver[T]: if isinstance(func, (classmethod, staticmethod)): func = func.__func__ if not set_types: parameters: List[inspect.Parameter] = [*inspect.signature(func).parameters.values()] is_method = False if len(parameters) > 0: if parameters[0].name == "cls" and parameters[0].annotation is inspect.Parameter.empty: # if it is a class method parameters[0] = inspect.Parameter("cls", inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=type) if parameters[0].name == "self" and parameters[0].annotation is inspect.Parameter.empty: # if the function is a method parameters = parameters[1:] is_method = True possible_types = [[]] for param in parameters: # if is union type is_union = str(param.annotation)[:13] == "typing.Union[" # if no default value and not union, just append to each possible types if param.default is inspect.Signature.empty and not is_union: for types in possible_types: types.append(param.annotation) # if no default value but is union, append to each possible types and add new types elif param.default is inspect.Signature.empty and is_union: new_possible_types = [] # add new types for i in range(len(possible_types)): types = possible_types[i] for type_ in param.annotation.__args__[1:]: new_possible_types.append(types + [type_]) # append first union arg to each possible types for types in possible_types: types.append(param.annotation.__args__[0]) possible_types.extend(new_possible_types) # if has default value and is not union elif param.default is not inspect.Signature.empty and not is_union: for i in range(len(possible_types)): types = possible_types[i] possible_types.append(types + [param.annotation]) # if has default value and is union elif param.default is not inspect.Signature.empty and is_union: for i in range(len(possible_types)): types = possible_types[i] for type_ in param.annotation.__args__: possible_types.append(types + [type_]) else: raise TypeError(f"Unknown Error in Dispatcher with {parameters}") has_empty_type = False for types in possible_types: if tuple(types) in self._functions: warnings.warn(f"The dispatcher in {func.__module__ + '.' + func.__name__}({str(types)[1:-1]}) " f"is overridden!", DispatcherTypeWarning, stacklevel=2) if inspect.Signature.empty in types: warnings.warn(f"The dispatcher in {func.__module__ + '.' + func.__name__}({str(types)[1:-1]}) " f"has no type annotation!", DispatcherTypeWarning, stacklevel=2) has_empty_type = True self._functions[tuple(types)] = func if not is_method: return Resolver(self) else: return MethodResolver(self) else: self._functions[tuple(set_types)] = func return Resolver(self) @classmethod def get(cls, key: str) -> Dispatcher: if key in cls.dispatchers: return cls.dispatchers[key] else: new_obj = Dispatcher(key) cls.dispatchers[key] = new_obj return new_obj class Resolver(Generic[T]): def __init__(self, d: Dispatcher): self._dispatcher = d def __call__(self, *args, **kwargs) -> T: """ Returns: ``T``: The result of the function call. """ types = tuple([type(arg) for arg in args] + [type(kwarg) for kwarg in kwargs.values()]) if types in self._dispatcher._functions: return self._dispatcher._functions[types](*args, **kwargs) else: raise TypeError(f"Not valid for type {str(types)} for function {self._dispatcher.name}") class MethodResolver(Resolver[T], Generic[T]): def __call__(self, *args, **kwargs) -> T: types = tuple([type(arg) for arg in args] + [type(kwarg) for kwarg in kwargs.values()])[1:] if types in self._dispatcher._functions: return self._dispatcher._functions[types](*args, **kwargs) else: raise TypeError(f"Not valid for type {str(types)} for function {self._dispatcher.name}") class DispatcherDecorator: def __init__(self): self.__doc__ = DispatcherDecorator.__call__.__doc__ @staticmethod def of(*types: type) -> Callable[[Callable[..., T]], Resolver[T]]: def wrapper(func: Callable[..., T]) -> Resolver[T]: name = ".".join([func.__module__, func.__qualname__]) return Dispatcher.get(name)(func, types) return wrapper def __call__(self, func: Callable[..., T]) -> Resolver[T]: """ Decorator for dispatcher. Args: func(``(...) -> T``): function to be dispatched. Returns: :class:`~tensorneko.util.dispatcher.Resolver[T]`: Resolver object. Example:: @dispatch def add(x: int, y: int) -> int: return x + y @dispatch def add(x: List[int], y: List[int]) -> List[int]: assert len(x) == len(y) return [x[i] + y[i] for i in range(len(x))] @dispatch.of(float, float) def add(x, y) -> float: return x + y add(1, 2) # get 3 add([1, 2], [3, 4]) # get [4, 6] add(1.0, 2.0) # get 3.0 """ name = ".".join([func.__module__, func.__qualname__]) return Dispatcher.get(name)(func) dispatch = DispatcherDecorator()
2.28125
2
test/test_use_cases.py
bitmuster/pytest_system_test_plugin
2
12777140
import logging import os import time import pytest CURL = "/usr/bin/curl -X POST http://localhost:{} -d hello_my_plugins" WAITSTATUS = 0.1 def get_factory_args(port): # TODO: Find better way of getting an interpreter in the current env interpreter = os.path.abspath("./env-plugin/bin/python") args = [ interpreter, "-m", "restapi_echo_server", "--host", "0.0.0.0", "--port", str(port), ] return args @pytest.fixture(name="echoserver") def fixture_echoserver(process_factory): """ Custom fixture starts an echoserver on port 8090 """ # TODO: Find better way of getting an interpreter in the current env interpreter = os.path.abspath("./env-plugin/bin/python") process = process_factory( [ interpreter, "-m", "restapi_echo_server", "--host", "0.0.0.0", "--port", "8090", ], ) process.set_name("echoserver_") yield process logging.info("Killing echoserver") process.kill() @pytest.fixture(name="echoserver_2") def fixture_echoserver_2(process_factory): """ Custom fixture starts an echoserver on port 8092 """ # TODO: Find better way of getting an interpreter in the current env interpreter = os.path.abspath("./env-plugin/bin/python") process = process_factory( [ interpreter, "-m", "restapi_echo_server", "--host", "0.0.0.0", "--port", "8092", ], ) process.set_name("ecoserver_2_") yield process logging.info("Killing echoserver") process.kill() @pytest.fixture(name="asserts_echoserver") def fixture_asserts_echoserver(): yield logging.info("Asserts Echoserver") @pytest.fixture(name="cleanup_echoserver") def fixture_cleanup_echoserver(): yield logging.info("Cleanup Echoserver") def test_use_case_echo(echoserver): echoserver.run_bg() time.sleep(1) echoserver.kill() time.sleep(WAITSTATUS) # If this fails, there is maybe still one running assert echoserver.get_status() == "NotExisting" def test_use_case_echo_with_additional_cleanup( echoserver, asserts_echoserver, cleanup_echoserver ): _ = asserts_echoserver # for now just use them otherwise pylint will complain _ = cleanup_echoserver # Does not work right echoserver.run_bg() time.sleep(0.1) def test_use_case_echo_and_curl(process_factory, process): # TODO: Find better way of getting an interpreter in the current env interpreter = os.path.abspath("./env-plugin/bin/python") server = process_factory( [ interpreter, "-m", "restapi_echo_server", "--host", "0.0.0.0", "--port", "8080", ] ) server.run_bg() # give the server 100ms to start in the background time.sleep(0.1) process.set_command( CURL.format(8080).split(), ) assert process.run() == 0 def test_use_case_echo_and_curl_from_factory(process_factory): # TODO: Find better way of getting an interpreter in the current env interpreter = os.path.abspath("./env-plugin/bin/python") server = process_factory( [ interpreter, "-m", "restapi_echo_server", "--host", "0.0.0.0", "--port", "8080", ], "server_", ) server.run_bg() time.sleep(WAITSTATUS) assert server.get_status() == "Running" # make sure it still runs # give the server 100ms to start in the background time.sleep(0.1) client = process_factory( CURL.format(8080).split(), "client_", ) client.run_bg() time.sleep(WAITSTATUS) assert client.get_status() == 0 server.kill() time.sleep(WAITSTATUS) assert server.get_status() == "NotExisting" # For weird reasons the echoserver logs to stderr assert server.get_stdout() == "" assert "hello_my_plugins" in server.get_stderr() def test_use_case_echoserver_fixture_and_curl(process_factory, echoserver): echoserver.run_bg() time.sleep(WAITSTATUS) # give the server some time to start assert echoserver.get_status() == "Running" # make sure it still runs # give the server 100ms to start in the background time.sleep(0.1) client = process_factory( CURL.format(8090).split(), "client_", ) client.run_bg() time.sleep(WAITSTATUS) assert client.get_status() == 0 echoserver.kill() time.sleep(WAITSTATUS) assert echoserver.get_status() == "NotExisting" assert ( echoserver.get_stdout() == "" ) # For weird reasons the echoserver logs to stderr assert "hello_my_plugins" in echoserver.get_stderr() def test_use_case_echoserver_1_and_2(process_factory, echoserver, echoserver_2): echoserver_1 = echoserver echoserver_1.run_bg() echoserver_2.run_bg() time.sleep(0.1) assert echoserver_1.get_status() == "Running" assert echoserver_2.get_status() == "Running" time.sleep(0.1) client_a = process_factory( CURL.format(8090).split(), "client_a_", ) client_b = process_factory( CURL.format(8092).split(), "client_b_", ) client_a.run_bg() client_b.run_bg() time.sleep(0.1) assert client_a.get_status() == 0 assert client_b.get_status() == 0 echoserver_1.kill() echoserver_2.kill() time.sleep(0.1) assert echoserver_1.get_status() == "NotExisting" assert echoserver_2.get_status() == "NotExisting" assert "hello_my_plugins" in echoserver_1.get_stderr() assert "hello_my_plugins" in echoserver_2.get_stderr() def test_use_case_echo_and_curl_from_factory_n(process_factory): amount = 10 servers = [] clients = [] for i in range(amount): server = process_factory(get_factory_args(8080 + i), f"server_{i}_") server.run_bg() servers.append(server) time.sleep(0.1) logging.info("Polling server status") for server in servers: status = server.get_status() if status != "Running": logging.error("Something went wrong here is stdout") logging.error(server.get_stdout()) logging.error("Something went wrong here is stderr") logging.error(server.get_stderr()) assert status == "Running" time.sleep(0.5) logging.info("Starting clients") for i in range(amount): client = process_factory( CURL.format(8080 + i).split(), f"client_{i}_", ) client.run_bg() time.sleep(0.5) logging.info("Polling clients") # We expect, that all clients exited with zero for client in clients: assert client.get_status() == 0 clients.append(client) for server in servers: server.kill() time.sleep(0.1) for server in servers: assert server.get_status() == "NotExisting" for server in servers: # For weird reasons the echoserver logs to stderr assert server.get_stdout() == "" assert "hello_my_plugins" in server.get_stderr() for client in clients: assert "method" in client.get_stdout() assert "Total" in client.get_stderr()
2.328125
2
scipy/optimize/tests/test__linprog_clean_inputs.py
smola/scipy
1
12777141
<reponame>smola/scipy """ Unit test for Linear Programming via Simplex Algorithm. """ from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_, assert_allclose from pytest import raises as assert_raises from scipy.optimize._linprog_util import _clean_inputs, _LPProblem from copy import deepcopy def test_aliasing(): """ Test for ensuring that no objects referred to by `lp` attributes, `c`, `A_ub`, `b_ub`, `A_eq`, `b_eq`, `bounds`, have been modified by `_clean_inputs` as a side effect. """ lp = _LPProblem( c=1, A_ub=[[1]], b_ub=[1], A_eq=[[1]], b_eq=[1], bounds=(-np.inf, np.inf) ) lp_copy = deepcopy(lp) _clean_inputs(lp) assert_(lp.c == lp_copy.c, "c modified by _clean_inputs") assert_(lp.A_ub == lp_copy.A_ub, "A_ub modified by _clean_inputs") assert_(lp.b_ub == lp_copy.b_ub, "b_ub modified by _clean_inputs") assert_(lp.A_eq == lp_copy.A_eq, "A_eq modified by _clean_inputs") assert_(lp.b_eq == lp_copy.b_eq, "b_eq modified by _clean_inputs") assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") def test_aliasing2(): """ Similar purpose as `test_aliasing` above. """ lp = _LPProblem( c=np.array([1, 1]), A_ub=np.array([[1, 1], [2, 2]]), b_ub=np.array([[1], [1]]), A_eq=np.array([[1, 1]]), b_eq=np.array([1]), bounds=[(-np.inf, np.inf), (None, 1)] ) lp_copy = deepcopy(lp) _clean_inputs(lp) assert_allclose(lp.c, lp_copy.c, err_msg="c modified by _clean_inputs") assert_allclose(lp.A_ub, lp_copy.A_ub, err_msg="A_ub modified by _clean_inputs") assert_allclose(lp.b_ub, lp_copy.b_ub, err_msg="b_ub modified by _clean_inputs") assert_allclose(lp.A_eq, lp_copy.A_eq, err_msg="A_eq modified by _clean_inputs") assert_allclose(lp.b_eq, lp_copy.b_eq, err_msg="b_eq modified by _clean_inputs") assert_(lp.bounds == lp_copy.bounds, "bounds modified by _clean_inputs") def test_missing_inputs(): c = [1, 2] A_ub = np.array([[1, 1], [2, 2]]) b_ub = np.array([1, 1]) A_eq = np.array([[1, 1], [2, 2]]) b_eq = np.array([1, 1]) assert_raises(TypeError, _clean_inputs) assert_raises(TypeError, _clean_inputs, _LPProblem(c=None)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=A_ub, b_ub=None)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_ub=b_ub)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=None, b_ub=b_ub)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=A_eq, b_eq=None)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, b_eq=b_eq)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=None, b_eq=b_eq)) def test_too_many_dimensions(): cb = [1, 2, 3, 4] A = np.random.rand(4, 4) bad2D = [[1, 2], [3, 4]] bad3D = np.random.rand(4, 4, 4) assert_raises(ValueError, _clean_inputs, _LPProblem(c=bad2D, A_ub=A, b_ub=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad3D, b_ub=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=A, b_ub=bad2D)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad3D, b_eq=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=A, b_eq=bad2D)) def test_too_few_dimensions(): bad = np.random.rand(4, 4).ravel() cb = np.random.rand(4) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_ub=bad, b_ub=cb)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=cb, A_eq=bad, b_eq=cb)) def test_inconsistent_dimensions(): m = 2 n = 4 c = [1, 2, 3, 4] Agood = np.random.rand(m, n) Abad = np.random.rand(m, n + 1) bgood = np.random.rand(m) bbad = np.random.rand(m + 1) boundsbad = [(0, 1)] * (n + 1) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Abad, b_ub=bgood)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_ub=Agood, b_ub=bbad)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Abad, b_eq=bgood)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, A_eq=Agood, b_eq=bbad)) assert_raises(ValueError, _clean_inputs, _LPProblem(c=c, bounds=boundsbad)) def test_type_errors(): lp = _LPProblem( c=[1, 2], A_ub=np.array([[1, 1], [2, 2]]), b_ub=np.array([1, 1]), A_eq=np.array([[1, 1], [2, 2]]), b_eq=np.array([1, 1]), bounds=[(0, 1)] ) bad = "hello" assert_raises(TypeError, _clean_inputs, lp._replace(c=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(A_ub=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(b_ub=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(A_eq=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(b_eq=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(bounds=bad)) assert_raises(TypeError, _clean_inputs, lp._replace(bounds="hi")) assert_raises(TypeError, _clean_inputs, lp._replace(bounds=["hi"])) assert_raises(TypeError, _clean_inputs, lp._replace(bounds=[("hi")])) assert_raises(TypeError, _clean_inputs, lp._replace(bounds=[(1, "")])) assert_raises(TypeError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, "")])) def test_non_finite_errors(): lp = _LPProblem( c=[1, 2], A_ub=np.array([[1, 1], [2, 2]]), b_ub=np.array([1, 1]), A_eq=np.array([[1, 1], [2, 2]]), b_eq=np.array([1, 1]), bounds=[(0, 1)] ) assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, None])) assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.inf, 0])) assert_raises(ValueError, _clean_inputs, lp._replace(c=[0, -np.inf])) assert_raises(ValueError, _clean_inputs, lp._replace(c=[np.nan, 0])) assert_raises(ValueError, _clean_inputs, lp._replace(A_ub=[[1, 2], [None, 1]])) assert_raises(ValueError, _clean_inputs, lp._replace(b_ub=[np.inf, 1])) assert_raises(ValueError, _clean_inputs, lp._replace(A_eq=[[1, 2], [1, -np.inf]])) assert_raises(ValueError, _clean_inputs, lp._replace(b_eq=[1, np.nan])) def test__clean_inputs1(): lp = _LPProblem( c=[1, 2], A_ub=[[1, 1], [2, 2]], b_ub=[1, 1], A_eq=[[1, 1], [2, 2]], b_eq=[1, 1], bounds=None ) lp_cleaned = _clean_inputs(lp) assert_allclose(lp_cleaned.c, np.array(lp.c)) assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) assert_(lp_cleaned.bounds == [(0, None)] * 2, "") assert_(lp_cleaned.c.shape == (2,), "") assert_(lp_cleaned.A_ub.shape == (2, 2), "") assert_(lp_cleaned.b_ub.shape == (2,), "") assert_(lp_cleaned.A_eq.shape == (2, 2), "") assert_(lp_cleaned.b_eq.shape == (2,), "") def test__clean_inputs2(): lp = _LPProblem( c=1, A_ub=[[1]], b_ub=1, A_eq=[[1]], b_eq=1, bounds=(0, 1) ) lp_cleaned = _clean_inputs(lp) assert_allclose(lp_cleaned.c, np.array(lp.c)) assert_allclose(lp_cleaned.A_ub, np.array(lp.A_ub)) assert_allclose(lp_cleaned.b_ub, np.array(lp.b_ub)) assert_allclose(lp_cleaned.A_eq, np.array(lp.A_eq)) assert_allclose(lp_cleaned.b_eq, np.array(lp.b_eq)) assert_(lp_cleaned.bounds == [(0, 1)], "") assert_(lp_cleaned.c.shape == (1,), "") assert_(lp_cleaned.A_ub.shape == (1, 1), "") assert_(lp_cleaned.b_ub.shape == (1,), "") assert_(lp_cleaned.A_eq.shape == (1, 1), "") assert_(lp_cleaned.b_eq.shape == (1,), "") def test__clean_inputs3(): lp = _LPProblem( c=[[1, 2]], A_ub=np.random.rand(2, 2), b_ub=[[1], [2]], A_eq=np.random.rand(2, 2), b_eq=[[1], [2]], bounds=[(0, 1)] ) lp_cleaned = _clean_inputs(lp) assert_allclose(lp_cleaned.c, np.array([1, 2])) assert_allclose(lp_cleaned.b_ub, np.array([1, 2])) assert_allclose(lp_cleaned.b_eq, np.array([1, 2])) assert_(lp_cleaned.bounds == [(0, 1)] * 2, "") assert_(lp_cleaned.c.shape == (2,), "") assert_(lp_cleaned.b_ub.shape == (2,), "") assert_(lp_cleaned.b_eq.shape == (2,), "") def test_bad_bounds(): lp = _LPProblem(c=[1, 2]) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, -2))) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, -2)])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, -2), (1, 2)])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=(1, 2, 2))) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2, 2)])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2, 2)])) assert_raises(ValueError, _clean_inputs, lp._replace(bounds=[(1, 2), (1, 2), (1, 2)])) def test_good_bounds(): lp = _LPProblem(c=[1, 2]) lp_cleaned = _clean_inputs(lp) # lp.bounds is None by default assert_(lp_cleaned.bounds == [(0, None)] * 2, "") lp_cleaned = _clean_inputs(lp._replace(bounds=(1, 2))) assert_(lp_cleaned.bounds == [(1, 2)] * 2, "") lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, 2)])) assert_(lp_cleaned.bounds == [(1, 2)] * 2, "") lp_cleaned = _clean_inputs(lp._replace(bounds=[(1, np.inf)])) assert_(lp_cleaned.bounds == [(1, None)] * 2, "") lp_cleaned = _clean_inputs(lp._replace(bounds=[(-np.inf, 1)])) assert_(lp_cleaned.bounds == [(None, 1)] * 2, "") lp_cleaned = _clean_inputs(lp._replace(bounds=[(-np.inf, np.inf), (-np.inf, np.inf)])) assert_(lp_cleaned.bounds == [(None, None)] * 2, "")
2.5
2
test/test_extractor.py
fabiofumarola/dragnet
0
12777142
import io import os import unittest import numpy as np from sklearn.linear_model import LogisticRegression from dragnet import Extractor from dragnet.blocks import TagCountNoCSSReadabilityBlockifier from dragnet.util import get_and_union_features from dragnet.compat import str_cast with io.open(os.path.join('test', 'datafiles', 'models_testing.html'), 'r') as f: big_html_doc = f.read() class TestExtractor(unittest.TestCase): def test_extractor(self): prob_threshold = 0.5 blockifier = TagCountNoCSSReadabilityBlockifier() features = get_and_union_features(['weninger', 'kohlschuetter', 'readability']) # initialize model from pre-fit attributes model_attrs = { 'C': 1.0, 'class_weight': None, 'classes_': [0, 1], 'coef_': [[0.00501458328421719, -0.0006331822163374379, -0.6699789320373452, 0.026069227973339763, -1.5552477377277252, 0.02980432745983307, -0.965575689884716, 0.019509367890934326, -0.35692924115362307]], 'dual': False, 'fit_intercept': True, 'intercept_': [-1.2071425754440765], 'intercept_scaling': 1, 'max_iter': 100, 'multi_class': 'ovr', 'n_iter_': [12], 'n_jobs': 1, 'penalty': 'l2', 'solver': 'liblinear', 'tol': 0.0001, 'warm_start': False} model = LogisticRegression() for k, v in model_attrs.items(): if isinstance(v, list): setattr(model, k, np.array(v)) else: setattr(model, k, v) # extract content via the extractor class extractor = Extractor(blockifier, features=features, model=model, to_extract='content', prob_threshold=prob_threshold) extractor_content = extractor.extract(big_html_doc) # extract content via individual components blocks = blockifier.blockify(big_html_doc) features_mat = features.transform(blocks) positive_idx = list(model.classes_).index(1) preds = (model.predict_proba(features_mat) > prob_threshold)[:, positive_idx].astype(int) components_content = '\n'.join(str_cast(blocks[ind].text) for ind in np.flatnonzero(preds)) self.assertIsNotNone(extractor_content) self.assertEqual(extractor_content, components_content) if __name__ == "__main__": unittest.main()
2.28125
2
process.py
PeterBee97/authors-tool
0
12777143
<reponame>PeterBee97/authors-tool #!/usr/bin/python3 import spacy,sys en = spacy.load("en_core_web_sm") #en.max_length=2000000 sents = en(open(sys.argv[1]).read()) people = [ee for ee in sents.ents if ee.label_ == 'PERSON'] for p in people: print(p)
2.6875
3
transposonmapper/mapping/samflag.py
EKingma/Transposonmapper
2
12777144
<filename>transposonmapper/mapping/samflag.py # -*- coding: utf-8 -*- """ Created on Mon Jan 4 09:38:16 2021 @author: gregoryvanbeek """ #%%INPUT flag = 1040 verbose=True #%% def samflags(flag=0, verbose=True): """This script converts a decimal flag to binary and get the corresponding properties according to the sam-flag standard. The code is based on the explanation given here https://davetang.org/muse/2014/03/06/understanding-bam-flags/ For manual checking sam flags, check https://broadinstitute.github.io/picard/explain-flags.html The input is a decimal number. Parameters ---------- flag : int, optional by default 0 verbose : bool, optional by default True """ flag_binary = format(flag, '012b') # '#012b' to start the string with '0b'. 12 indicated that the string has length 12. prop_dict = {1: 'read paired', 2: 'read mapped in proper pair', 3: 'read unmapped', 4: 'mate unmapped', 5: 'read reverse strand', 6: 'mate reverse strand', 7: 'first in pair', 8: 'second in pair', 9: 'not primary alignment', 10: 'read fails platform/vendor quality checks', 11: 'read is PCR or optical duplicate', 12: 'supplementary alignment'} counter = 1 flagprop_list = [] for b in reversed(flag_binary): if int(b) == 1: flagprop_list.append(prop_dict.get(counter)) counter += 1 if verbose == True: print('Entered decimal flag = %i' % flag) print('Corresponding binary flag = %s' % flag_binary) print(flagprop_list) print('') return(flag_binary, flagprop_list) #%% if __name__ == '__main__': flag_binary, flagproperties = samflags(flag=flag, verbose=verbose)
2.796875
3
rdr_service/alembic/versions/6f9266e7a5fb_initial_metrics.py
all-of-us/raw-data-repository
39
12777145
"""Initial metrics Revision ID: 6f9266e7a5fb Revises: 51415576d3e9 Create Date: 2017-12-12 10:38:27.166562 """ import model.utils import sqlalchemy as sa from alembic import op from rdr_service.participant_enums import MetricSetType, MetricsKey # revision identifiers, used by Alembic. revision = "6f9266e7a5fb" down_revision = "51415576d3e9" branch_labels = None depends_on = None def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() def upgrade_rdr(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def downgrade_rdr(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def upgrade_metrics(): # ### commands auto generated by Alembic - please adjust! ### op.create_table( "metric_set", sa.Column("metric_set_id", sa.String(length=50), nullable=False), sa.Column("metric_set_type", model.utils.Enum(MetricSetType), nullable=False), sa.Column("last_modified", model.utils.UTCDateTime(), nullable=False), sa.PrimaryKeyConstraint("metric_set_id"), schema="metrics", ) op.create_table( "aggregate_metrics", sa.Column("metric_set_id", sa.String(length=50), nullable=False), sa.Column("metrics_key", model.utils.Enum(MetricsKey), nullable=False), sa.Column("value", sa.String(length=50), nullable=False), sa.Column("count", sa.Integer(), nullable=False), sa.ForeignKeyConstraint(["metric_set_id"], ["metrics.metric_set.metric_set_id"], ondelete="CASCADE"), sa.PrimaryKeyConstraint("metric_set_id", "metrics_key", "value"), schema="metrics", ) # ### end Alembic commands ### def downgrade_metrics(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table("aggregate_metrics", schema="metrics") op.drop_table("metric_set", schema="metrics") # ### end Alembic commands ###
1.59375
2
jarviscli/tests/plugins/composed/plugin0.py
qwireq/Jarvis
1
12777146
<reponame>qwireq/Jarvis<gh_stars>1-10 from plugin import Plugin class Plugin0_Sub0(Plugin): """Doc""" def require(self): pass def complete(self): pass def alias(self): pass def run(self, jarvis, s): jarvis.say("++sub0++ " + s) class Plugin3(Plugin): """Docu Plugin 3""" def require(self): pass def complete(self): yield "test" def alias(self): yield "Plugin0 Sub0" def run(self, jarvis, s): jarvis.say("sub0_wrong") class Plugin0_Sub1__test(Plugin): """Doc""" def require(self): pass def complete(self): pass def alias(self): yield "Plugin1" yield "Plugin0 sub2" def run(self, jarvis, s): jarvis.say("++sub1++ " + s) class Plugin2(Plugin): """Doc""" def require(self): pass def complete(self): pass def alias(self): yield "Plugin0" def run(self, jarvis, s): jarvis.say("master") class Plugin3_Sub0(Plugin): """Docu Sub0""" def require(self): pass def alias(self): pass def run(self, jarvis, s): pass
2.40625
2
Important/FlattenListofLists.py
jason71319jason/Interview-solved
2
12777147
''' Write a function to flatten a list of lists input: [1,[2,3,4],[5,[6,7]]] output: [1,2,3,4,5,6,7] ''' #recursive function def flattenList(lst, f_lst=None): if f_lst is None: f_lst=[] for l in lst: if isinstance(l,list): flattenList(l, f_lst) else: f_lst.append(l) return f_lst #using itertools import itertools def flattenList1(lst): f_lst = list(itertools.chain.from_iterable(lst)) return f_lst print flattenList([[1,2,3],[4,5,6], [7], [8,9]])
4.21875
4
Aula12/exercicio_45.py
brenuvida/cursoemvideo
0
12777148
<filename>Aula12/exercicio_45.py from random import randint print ('\n\n\nVamos jogar JOKENPÔ!!!\n\n\n') escolha = int(input('Escolha PEDRA, PAPEL ou TESOURA:\n\n[1] PEDRA\n\n[2] PAPEL\n\n[3] TESOURA\n\nFaça a sua escolha: ')) if escolha != 1 and escolha != 2 and escolha != 3: print('\n\nVocê escolheu: {}\nApenas números entre 1 e 3 são aceitos, tente novamente'.format(escolha)) else: lista = ['','PEDRA','PAPEL','TESOURA'] print('Você escolheu: {}\n\n'.format(lista[escolha])) computador = randint(1,3) print('O computador escolheu: {}\n\n'.format(lista[computador])) if escolha == 1 and computador == 1: print('Emppate') elif escolha == 1 and computador == 2: print('O computador ganhou') elif escolha == 1 and computador == 3: print('Você ganhou') elif escolha == 2 and computador == 2: print('Emppate') elif escolha == 2 and computador == 1: print('Você ganhou') elif escolha == 2 and computador == 3: print('O computador ganhou') elif escolha == 3 and computador == 3: print('Emppate') elif escolha == 3 and computador == 1: print('O computador ganhou') elif escolha == 3 and computador == 2: print('Você ganhou') print ('\n\n === FIM ===\n\n\n\n')
3.9375
4
pyle.py
aljungberg/pyle
13
12777149
#!/usr/bin/env python # -*- coding: utf-8 -*- """Pyle makes it easy to use Python as a replacement for command line tools such as `sed` or `perl`. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future import standard_library standard_library.install_aliases() from future.utils import string_types import argparse import io import re import sh import sys import traceback __version__ = "0.4.1" STANDARD_MODULES = { 're': re, 'sh': sh } def truncate_ellipsis(line, length=30): """Truncate a line to the specified length followed by ``...`` unless its shorter than length already.""" return line if len(line) < length else line[:length - 3] + "..." def pyle_evaluate(expressions=None, modules=(), inplace=False, files=None, print_traceback=False): """The main method of pyle.""" eval_globals = {} eval_globals.update(STANDARD_MODULES) for module_arg in modules or (): for module in module_arg.strip().split(","): module = module.strip() if module: eval_globals[module] = __import__(module) if not expressions: # Default 'do nothing' program expressions = ['line'] encoding = sys.getdefaultencoding() files = files or ['-'] eval_locals = {} for file in files: if file == '-': file = sys.stdin out_buf = sys.stdout if not inplace else io.StringIO() out_line = None with (io.open(file, 'r', encoding=encoding) if not hasattr(file, 'read') else file) as in_file: for num, line in enumerate(in_file.readlines()): was_whole_line = False if line[-1] == '\n': was_whole_line = True line = line[:-1] expr = "" try: for expr in expressions: words = [word.strip() for word in re.split(r'\s+', line) if word] eval_locals.update({ 'line': line, 'words': words, 'filename': in_file.name, 'num': num }) out_line = eval(expr, eval_globals, eval_locals) if out_line is None: continue # If the result is something list-like or iterable, # output each item space separated. if not isinstance(out_line, string_types): try: out_line = u' '.join(str(part) for part in out_line) except: # Guess it wasn't a list after all. out_line = str(out_line) line = out_line except Exception as e: sys.stdout.flush() sys.stderr.write("At %s:%d ('%s'): `%s`: %s\n" % ( in_file.name, num, truncate_ellipsis(line), expr, e)) if print_traceback: traceback.print_exc(None, sys.stderr) else: if out_line is None: continue out_line = out_line or u'' out_buf.write(out_line) if was_whole_line: out_buf.write('\n') if inplace: with io.open(file, 'w', encoding=encoding) as out_file: out_file.write(out_buf.getvalue()) out_buf.close() def pyle(argv=None): """Execute pyle with the specified arguments, or sys.argv if no arguments specified.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("-m", "--modules", dest="modules", action='append', help="import MODULE before evaluation. May be specified more than once.") parser.add_argument("-i", "--inplace", dest="inplace", action='store_true', default=False, help="edit files in place. When used with file name arguments, the files will be replaced by the output of the evaluation") parser.add_argument("-e", "--expression", action="append", dest="expressions", help="an expression to evaluate for each line") parser.add_argument('files', nargs='*', help="files to read as input. If used with --inplace, the files will be replaced with the output") parser.add_argument("--traceback", action="store_true", default=False, help="print a traceback on stderr when an expression fails for a line") args = parser.parse_args() if not argv else parser.parse_args(argv) pyle_evaluate(args.expressions, args.modules, args.inplace, args.files, args.traceback) if __name__ == '__main__': pyle()
2.890625
3
scrap.py
yolossn/stock_split_meeting_alert
1
12777150
<filename>scrap.py import requests from lxml import etree from datetime import datetime,date import sys class stockAlert(): def __init__(self,url): self.url=url self.html="" self.doc="" self.contents="" self.event=[] self.eventQtly=[] self.weekNotify=[] self.monthNotify=[] self.split=[] self.today=date.today() self.splitToday=[] self.tmrNotify=[] def collect(self): self.html=requests.get(self.url) self.contents=self.html.content if self.html.status_code!=200: print(self.url,"returned a error while requesting content") sys.exit() self.doc=etree.HTML(self.contents) def scrapEvents(self,): companies=self.doc.xpath("//td[1]/a/b/text()") link=self.doc.xpath("//td[1]/a/@href") dates=self.doc.xpath("//td[2]/text()") agendas=self.doc.xpath("//td[3]/text()") dates=(datetime.strptime(str(x),"%d-%b-%Y").date() for x in dates) for company,href,date,agenda in zip(companies,link,dates,agendas): self.event.append({"comp":company,"href":href,"date":date,"agenda":agenda}) self.eventQtly=[x for x in self.event if x["agenda"]=="Quarterly Results"] return self.eventQtly def getTmrQtly(self): self.tmrNotify=[x for x in self.eventQtly if (x["date"]-self.today).days==1] return self.tmrNotify def getWeekQtly(self): self.weekNotify=[x for x in self.eventQtly if (x["date"]-self.today).days<7] return self.weekNotify def getMonthQtly(self): self.monthNotify=[x for x in self.eventQtly if (x["date"]-self.today).days<30] return self.monthNotify def scrapSplit(self): company=self.doc.xpath("//td[1]/a/b/text()") oldfv=self.doc.xpath("//td[2]/text()") newfv=self.doc.xpath("//td[3]/text()") links=self.doc.xpath("//td[1]/a/@href") dates=self.doc.xpath("//td[4]/text()") dates=(datetime.strptime(str(x),"%d-%m-%Y").date() for x in dates) for comp,date,link,old,new in zip(company,dates,links,oldfv,newfv): if (date-self.today).days<0: continue elif(date-self.today).days==0: self.splitToday.append({"comp":comp,"date":date,"href":link,"oldRate":old,"newRate":new}) self.split.append({"comp":comp,"date":date,"href":link,"oldRate":old,"newRate":new}) return self.splitToday,self.split def __repr__(self): string=[] for i in range(0,len(self.event)): string.append("{}||{}||{}".format(self.event[i]["comp"],self.event[i]["date"],self.event[i]["agenda"])) return "\n".join(string)
2.9375
3