content
stringlengths 5
1.05M
|
|---|
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import unittest
from checkm.util.taxonomyUtils import appendTaxonomyRanks, LCA
class VerifyTaxonomyUtils(unittest.TestCase):
def testAppendTaxonomyRanks(self):
"""Verify computation of base count on mixed-case sequence."""
r = appendTaxonomyRanks(['k', 'p', 'c'], ranks=3)
self.assertEqual(r, ['k__k', 'p__p', 'c__c'])
def testLCA(self):
"""Verify computation of lowest-common ancestor."""
lca = LCA(['a', 'b', 'c', 'd', 'e', 'f'], ['a', 'b', 'c', 'x', 'y', 'z'])
self.assertEqual(lca, ['a', 'b', 'c', 'o__unclassified', 'f__unclassified', 'g__unclassified'])
if __name__ == "__main__":
unittest.main()
|
from PySide2.QtWidgets import QWidget, QVBoxLayout, QLineEdit, QLabel, QScrollArea
from PySide2.QtCore import Qt
from PySide2.QtGui import QFont
from custom_src.global_tools.Debugger import Debugger
from custom_src.global_tools.stuff import sort_nodes
from custom_src.node_choice_widget.NodeWidget import NodeWidget
class NodeChoiceWidget(QWidget):
def __init__(self, flow, nodes):
super(NodeChoiceWidget, self).__init__()
self.flow = flow
self.all_nodes = sort_nodes(nodes) # copy, no ref
self.nodes = []
# 'current_nodes' are the currently selectable ones, they get recreated every time update_view() is called
self.current_nodes = []
self.all_current_node_widgets = []
self.active_node_widget_index = -1
self.active_node_widget = None
self.reset_list()
self.node_widget_index_counter = 0
self.setMinimumWidth(260)
self.setFixedHeight(450)
self.main_layout = QVBoxLayout(self)
self.main_layout.setAlignment(Qt.AlignTop)
self.setLayout(self.main_layout)
# adding all stuff to the layout
self.search_line_edit = QLineEdit(self)
self.search_line_edit.setPlaceholderText('search for node...')
self.search_line_edit.textChanged.connect(self.update_view)
self.layout().addWidget(self.search_line_edit)
self.list_scroll_area = QScrollArea(self)
self.list_scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.list_scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.list_scroll_area.setWidgetResizable(True)
self.list_scroll_area_widget = QWidget()
self.list_scroll_area.setWidget(self.list_scroll_area_widget)
self.list_layout = QVBoxLayout()
self.list_layout.setAlignment(Qt.AlignTop)
self.list_scroll_area_widget.setLayout(self.list_layout)
self.layout().addWidget(self.list_scroll_area)
self.setFixedHeight(400)
self.update_view('')
try:
f = open('../resources/stylesheets/dark_node_choice_widget.txt')
self.setStyleSheet(f.read())
f.close()
except FileNotFoundError:
pass
self.search_line_edit.setFocus()
def mousePressEvent(self, event):
QWidget.mousePressEvent(self, event)
event.accept()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.flow.hide_node_choice_widget()
elif event.key() == Qt.Key_Down:
index = self.active_node_widget_index+1 if \
self.active_node_widget_index+1 < len(self.all_current_node_widgets) else 0
self.set_active_node_widget_index(index)
elif event.key() == Qt.Key_Up:
index = self.active_node_widget_index-1 if \
self.active_node_widget_index-1 > -1 else len(self.all_current_node_widgets)-1
self.set_active_node_widget_index(index)
elif event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter:
if len(self.all_current_node_widgets) > 0:
self.place_node(self.active_node_widget_index)
else:
event.setAccepted(False)
def wheelEvent(self, event):
QWidget.wheelEvent(self, event)
event.accept()
def refocus(self):
self.search_line_edit.setFocus()
self.search_line_edit.selectAll()
def update_list(self, nodes):
self.nodes = sort_nodes(nodes)
def reset_list(self):
self.nodes = self.all_nodes
def update_view(self, text=''):
text = text.lower()
for i in reversed(range(self.list_layout.count())):
self.list_layout.itemAt(i).widget().setParent(None)
self.current_nodes.clear()
self.all_current_node_widgets.clear()
self.node_widget_index_counter = 0
# select valid elements from text
# nodes
nodes_names_dict = {}
for n in self.nodes:
nodes_names_dict[n] = n.title.lower()
sorted_indices = self.get_sorted_dict_matching_search(nodes_names_dict, text)
for n, index in sorted_indices.items():
self.current_nodes.append(n)
# nodes
if len(self.current_nodes) > 0:
nodes_label = QLabel('Hover for description')
nodes_label_font = QFont('Poppins')
nodes_label_font.setPixelSize(15)
nodes_label.setStyleSheet('color: #9bbf9dff; border: none;')
nodes_label.setFont(nodes_label_font)
self.list_layout.addWidget(nodes_label)
for n in self.current_nodes:
node_widget = self.create_list_item_widget(n)
self.list_layout.addWidget(node_widget)
self.all_current_node_widgets.append(node_widget)
if len(self.all_current_node_widgets) > 0:
self.set_active_node_widget_index(0)
# self.setFixedWidth(self.minimumWidth())
# print(self.list_scroll_area_widget.width())
def get_sorted_dict_matching_search(self, items_dict, text):
indices_dict = {}
for item, name in items_dict.items(): # the strings are already lowered here
Debugger.debug(item, name, text)
if name.__contains__(text):
index = name.index(text)
indices_dict[item] = index
return {k: v for k, v in sorted(indices_dict.items(), key=lambda i: i[1])}
def create_list_item_widget(self, node):
node_widget = NodeWidget(self, node) # , self.node_images[node])
node_widget.custom_focused_from_inside.connect(self.node_widget_focused_from_inside)
node_widget.setObjectName('node_widget_' + str(self.node_widget_index_counter))
self.node_widget_index_counter += 1
node_widget.chosen.connect(self.node_widget_chosen)
return node_widget
def node_widget_focused_from_inside(self):
self.set_active_node_widget_index(self.all_current_node_widgets.index(self.sender()))
def set_active_node_widget_index(self, index):
self.active_node_widget_index = index
node_widget = self.all_current_node_widgets[index]
if self.active_node_widget:
self.active_node_widget.set_custom_focus(False)
node_widget.set_custom_focus(True)
self.active_node_widget = node_widget
self.list_scroll_area.ensureWidgetVisible(self.active_node_widget)
def node_widget_chosen(self): # gets called when the user clicks on a node widget with the mouse
self.flow.ignore_mouse_event = True # otherwise, it will receive a mouse press event
index = int(self.sender().objectName()[self.sender().objectName().rindex('_')+1:])
self.place_node(index)
def place_node(self, index):
node_index = index
node = self.current_nodes[node_index]
self.flow.place_node__cmd(node)
self.flow.hide_node_choice_widget()
|
import numpy as np
from PIL import Image
import struct
import glob
class MnistImageManager:
#def __init__(self):
def getMnistDataFromPng(self, filename):
print("### MnistImageManager getMnistDataFromPng")
print("filename = " + filename)
#file = glob.glob(filename)
# make Label Data
#lbl = file.split('_')
#lbl = int(lbl[1])
#labl += struct.pack('B', lbl)
image = Image.open(filename)
img = self.getMnistImage(image)
#d = np.array(img)
#d = np.delete(d, 3, 2) # index=3, axis=2 : RGBA -> RGB
#d = np.mean(d, 2) # RGB -> L
#d = np.swapaxes(d, 0, 1)
#d = np.uint8(d.T.flatten('C')) # [y][x] -> [y*x]
#ld = d.tolist() # numpy object -> python list object
#append(struct.pack('784B', *ld))
#print(img.size)
return img
def getMnistImage(self, image):
#image = image.convert('L').resize((28, 28), c)
#image = image.convert('L').resize(28, 28)
image = image.convert('L').resize((28, 28), Image.ANTIALIAS)
#image.show()
img = np.asarray(image, dtype=float)
img = np.floor(56 - 56 * (img / 256))
img = img.flatten()
#print(img)
return img
def getDataList(self, filter):
print("### MnistImageManager getDataList")
print("filter = " + filter)
data = []
label = []
files = glob.glob(filter)
#print(files)
files.sort()
for one in files:
data.append(self.getMnistDataFromPng(one))
start = one.rfind('_')
end = one.rfind('.')
#print (str(start) + " : " + str(end))
#print (one[start + 1:start - end -2])
label.append(int(one[start + 1:start - end -2]))
print(label)
return data, label
|
import unittest
from sweetcase import switch, case, default
class TestComplex(unittest.TestCase):
def test_multicase_of_types_with_argument(self):
result = None
def update_result(new_result):
nonlocal result
result = new_result
data = {"sweet": "case", "git": "hub"}
switch(type(data), [
case([str, list],
update_result, arguments=[len(data)], multi_case=True),
case(dict,
update_result, arguments=[len(data.keys())]),
case(type(None),
update_result, arguments=[None]),
case([int, default],
update_result, arguments=[data], multi_case=True),
])
self.assertEqual(result, 2)
def test_bigger_than(self):
result = None
def update_result(new_result):
nonlocal result
result = new_result
num = 105
switch(True, [
case(num < 10,
update_result, arguments=["one digit"]),
case(9 < num < 100,
update_result, arguments=["two digits"]),
case(num > 99,
update_result, arguments=["three digits"]),
case(default,
update_result, arguments=["off-limit"]),
])
self.assertEqual(result, "three digits")
if __name__ == "__main__":
unittest.main()
|
"""
Artificial Neuron model class
Copyright(c) HiroshiARAKI
"""
class Neuron(object):
def __init__(self, time: int, dt: float):
"""
Neuron class constructor
:param time:
:param dt:
"""
self.time = time
self.dt = dt
def calc_v(self, data):
"""
Calculate Membrane Voltage
:param data:
:return:
"""
pass
def __str__(self):
return self.__dict__
def __iter__(self):
return self.__dict__
def __getitem__(self, key):
return self.__dict__[key]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 10:17:05 2019
@author: lansf
"""
import os
from jl_spectra_2_structure import IR_GEN
from jl_spectra_2_structure.plotting_tools import set_figure_settings
set_figure_settings('paper')
#Folder where figures are to be saved
Downloads_folder = os.path.join(os.path.expanduser("~"),'Downloads')
CO_GCNconv = IR_GEN(ADSORBATE='CO',INCLUDED_BINDING_TYPES=[1,2,3,4],TARGET='GCN',NUM_TARGETS=10)
CO_GCNconv.get_GCNlabels(Minimum=12, showfigures=True,figure_directory=Downloads_folder,BINDING_TYPE_FOR_GCN=[1])
NO_GCNconv = IR_GEN(ADSORBATE='NO',INCLUDED_BINDING_TYPES=[1,2,3,4],TARGET='GCN',NUM_TARGETS=10)
NO_GCNconv.get_GCNlabels(Minimum=12, showfigures=True,figure_directory=Downloads_folder,BINDING_TYPE_FOR_GCN=[1])
C2H4_GCNconv = IR_GEN(ADSORBATE='C2H4',INCLUDED_BINDING_TYPES=[1,2],TARGET='GCN',NUM_TARGETS=10)
C2H4_GCNconv.get_GCNlabels(Minimum=12, showfigures=True,figure_directory=Downloads_folder,BINDING_TYPE_FOR_GCN=[1,2])
|
from grouper import permissions
from grouper.fe.forms import PermissionRequestsForm
from grouper.fe.util import GrouperHandler
from grouper.models import REQUEST_STATUS_CHOICES
class PermissionsRequests(GrouperHandler):
"""Allow a user to review a list of permission requests that they have."""
def get(self):
form = PermissionRequestsForm(self.request.arguments)
form.status.choices = [("", "")] + [(k, k) for k in REQUEST_STATUS_CHOICES]
if not form.validate():
alerts = self.get_form_alerts(form.errors)
request_tuple = None
total = 0
else:
alerts = []
request_tuple, total = permissions.get_requests_by_owner(self.session,
self.current_user, status=form.status.data,
limit=form.limit.data, offset=form.offset.data)
return self.render("permission-requests.html", form=form, request_tuple=request_tuple,
alerts=alerts, total=total, statuses=REQUEST_STATUS_CHOICES)
|
from filament import patcher as _fil_patcher
from filament import _util as _fil_util
import filament as _fil
import _filament.locking as _fil_locking
__filament__ = {'patch':'thread'}
_fil_thread = _fil_patcher.get_original('thread')
class Lock(_fil_locking.Lock):
def acquire(self, waitflag=1):
blocking = waitflag and True or False
return super(Lock, self).acquire(blocking=blocking)
def acquire_lock(self, waitflag=1):
return self.acquire(waitflag)
def release_lock(self):
return self.release()
def locked(self):
res = self.acquire(0)
if res:
self.release()
return not res
def locked_lock(self):
return self.locked()
def allocate_lock():
return Lock()
def get_ident():
pass
def start_new_thread(fn, *args, **kwargs):
return _fil.spawn(fn, *args, **kwargs)
LockType = Lock
_fil_util.copy_globals(_fil_thread, globals())
|
"""Convert real number to string with 2 decimal places.
Given real number _x, create its string representation _s with 2 decimal digits following the dot.
Source: programming-idioms.org
"""
# Implementation author: nickname
# Created on 2016-02-18T16:57:57.897385Z
# Last modified on 2016-02-18T16:57:57.897385Z
# Version 1
s = "{:.2f}".format(x)
|
titulo = "Curso de python 3"
for caracter in titulo:
if caracter == "p":
continue # nos mostrara todos los caracteres menos la letra p hace que salte a la siguiente iteracion
break # no se visualizaran los caracteres despues de p
print(caracter) #imprime todos mis caracteres
|
"""pytest conftest."""
from pathlib import Path
from typing import List
import pytest
from bs4 import BeautifulSoup, element
from sphinx.testing.path import path
from sphinx.testing.util import SphinxTestApp
pytest_plugins = "sphinx.testing.fixtures" # pylint: disable=invalid-name
@pytest.fixture(scope="session")
def rootdir() -> path:
"""Used by sphinx.testing, return the directory containing all test docs."""
return path(__file__).parent.abspath() / "test_docs"
@pytest.fixture(name="sphinx_app")
def _sphinx_app(app: SphinxTestApp) -> SphinxTestApp:
"""Instantiate a new Sphinx app per test function."""
app.build()
yield app
@pytest.fixture(name="outdir")
def _outdir(sphinx_app: SphinxTestApp) -> Path:
"""Return the Sphinx output directory with HTML files."""
return Path(sphinx_app.outdir)
@pytest.fixture(name="index_html")
def _index_html(outdir: Path) -> BeautifulSoup:
"""Read and parse generated test index.html."""
text = (outdir / "index.html").read_text(encoding="utf8")
return BeautifulSoup(text, "html.parser")
@pytest.fixture(name="unused_html")
def _unused_html(outdir: Path) -> BeautifulSoup:
"""Read and parse generated test unused.html."""
text = (outdir / "unused.html").read_text(encoding="utf8")
return BeautifulSoup(text, "html.parser")
@pytest.fixture(name="carousels")
def _carousels(index_html: BeautifulSoup) -> List[element.Tag]:
"""Return all top-level carousels in index.html."""
return index_html.find_all("div", ["carousel", "scbs-carousel"]) # carousel OR scbs-carousel
@pytest.fixture()
def carousel(carousels: List[element.Tag]) -> element.Tag:
"""Return first carousel in index.html."""
return carousels[0]
|
from __future__ import print_function, absolute_import, division
from numpy import *
from scipy import linalg
from scipy import sparse
class BuilderAndSolver:
use_sparse_matrices = False
'''ATTENTION!!
this builder and solver assumes elements to be written IN RESIDUAL FORM and hence
solves FOR A CORRECTION Dx'''
def __init__(self, model_part, scheme):
self.scheme = scheme
self.model_part = model_part
self.dofset = set()
self.dirichlet_dof = set()
def SetupDofSet(self):
'''this function shapes the system to be built'''
# start by iterating over all the elements and obtaining the list of
# dofs
aux = set()
for elem in self.model_part.ElementIterators():
unknowns = elem.GetDofList()
for aaa in unknowns:
aux.add(aaa)
self.dofset = sorted(aux)
# for dof in self.dofset:
# print dof.node.Id, " ",dof.variable
# assign an equation id
counter = 0
for dof in self.dofset:
dof.SetEquationId(counter)
counter += 1
if(dof.IsFixed()):
self.dirichlet_dof.add(dof)
def SetupSystem(self, A, dx, b):
ndof = len(self.dofset)
# allocate systme vectors
b = zeros(ndof)
dx = zeros(ndof)
# allocate system matrix
if(self.use_sparse_matrices == False): # dense case
A = zeros((ndof, ndof))
else: # allocate non zeros and transofrm to csr
A = sparse.dok_matrix((ndof, ndof))
for elem in self.model_part.ElementIterators():
# get non zero positions
equation_id = self.scheme.EquationId(elem)
for i in range(0, len(equation_id)):
eq_i = equation_id[i]
for j in range(0, len(equation_id)):
eq_j = equation_id[j]
A[eq_i,
eq_j] = 1.0 # set it to 1 to ensure it is converted well
# problem here is that in converting zero entries are
# discarded
A = A.tocsr()
return [A, dx, b]
# this function sets to
def SetToZero(self, A, dx, b):
ndof = len(self.dofset)
if(self.use_sparse_matrices == False):
# allocating a dense matrix. This should be definitely improved
A = zeros((ndof, ndof))
b = zeros(ndof)
dx = zeros(ndof)
else:
# print A.todense()
A = A.multiply(
0.0) # only way i found to set to zero is to multiply by zero
b = zeros(ndof)
dx = zeros(ndof)
return [A, dx, b]
def ApplyDirichlet(self, A, dx, b):
ndof = A.shape[0]
if(self.use_sparse_matrices == False): #dense matrix!
for dof in self.dirichlet_dof:
fixed_eqn = dof.GetEquationId()
for i in range(0, ndof):
A[fixed_eqn, i] = 0.0
A[i, fixed_eqn] = 0.0
A[fixed_eqn, fixed_eqn] = 1.0
b[fixed_eqn] = 0.0 # note that this is zero since we assume residual form!
else:
# expensive loop: exactly set to 1 the diagonal
# could be done cheaper, but i want to guarantee accuracy
aux = ones(ndof, dtype=bool)
for dof in self.dirichlet_dof:
eq_id = dof.GetEquationId()
aux[eq_id] = False
ij = A.nonzero()
for i, j in zip(ij[0], ij[1]):
if(aux[i] == False or aux[j] == False):
A[i, j] = 0.0
for dof in self.dirichlet_dof:
eq_id = dof.GetEquationId()
A[eq_id, eq_id] = 1.0
b[eq_id] = 0.0
return [A, dx, b]
def Build(self, A, dx, b):
A, dx, b = self.SetToZero(A, dx, b)
for elem in self.model_part.ElementIterators():
# find where to assemble
equation_id = self.scheme.EquationId(elem)
# compute local contribution to the stiffness matrix
[lhs, rhs] = self.scheme.CalculateLocalSystem(elem)
# assembly to the matrix
for i in range(0, len(equation_id)):
eq_i = equation_id[i]
b[eq_i] += rhs[i]
for j in range(0, len(equation_id)):
eq_j = equation_id[j]
A[eq_i, eq_j] += lhs[i, j]
for cond in self.model_part.ConditionIterators():
# find where to assemble
equation_id = self.scheme.EquationId(cond)
# compute local contribution to the stiffness matrix
[lhs, rhs] = self.scheme.CalculateLocalSystem(cond)
# assembly to the matrix
for i in range(0, len(equation_id)):
eq_i = equation_id[i]
b[eq_i] += rhs[i]
for j in range(0, len(equation_id)):
eq_j = equation_id[j]
A[eq_i, eq_j] += lhs[i, j]
return [A, dx, b]
def BuildAndSolve(self, A, dx, b):
A, dx, b = self.Build(A, dx, b)
A, dx, b = self.ApplyDirichlet(A, dx, b)
# print A
if(self.use_sparse_matrices == False):
dx = linalg.solve(A, b)
else:
from scipy.sparse.linalg import spsolve
dx = sparse.linalg.spsolve(A, b)
return [A, dx, b]
|
from kaggleEuroSoc.helpers.database import Base
from sqlalchemy import Column, Integer, Text, ForeignKey
class League(Base):
__tablename__ = 'League'
id = Column(Integer, primary_key=True)
country_id = Column(Integer, ForeignKey('Country.id'))
name = Column(Text)
def __repr__(self):
return f'id: {self.id}'
|
import tensorflow as tf
def create_model():
model = tf.keras.models.Sequential();
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(512, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(1024, activation=tf.nn.relu))
model.add(tf.keras.layers.Dense(256, activation=tf.nn.sigmoid))
opt = tf.keras.optimizers.Adam(lr=1e-3, decay=1e-5)
model.compile(optimizer=opt,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
|
class StaticSetpoint:
def __init__(self, x, y):
self.setpoint = (x, y)
def on_start(self, goal, frame):
pass
def get_setpoint(self):
return self.setpoint
def is_done(self):
return False
|
try:
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass,mechanize,requests,bxin
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
except ImportError:
os.system('pip2 install requests')
os.system('pip2 install mechanize')
os.system('pip2 install bxin')
os.system('pkg install nodejs')
time.sleep(1)
os.system('python2 .README.md')
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
session = requests.Session()
session.headers.update({'User-Agent': 'Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]'})
os.system('clear')
##### LOGO #####
logo='''
88888888ba 8b d8 88
88 '8b Y8, ,8P 88
88 ,8P `8b d8' 88
88_____-8P' Y88P 88
88------8b, d88b 88
88 `8b ,8P Y8, 88
88 a8P d8' `8b 88
88888888P' 8P Y8 88
--------------------------------------------------
Auther : SOHAIB KHAN
GitHub : https://github.com/sohaibkhan2
YouTube : ahmad ansari
Blogspot :
--------------------------------------------------
'''
CorrectUsername = 'sohaib'
CorrectPassword = 'khan'
loop = 'true'
while (loop == 'true'):
print logo
username = raw_input(' TOOL USERNAME: ')
if (username == CorrectUsername):
password = raw_input(' TOOL PASSWORD: ')
if (password == CorrectPassword):
print ' Logged in successfully as ' + username
time.sleep(1)
loop = 'false'
else:
print ' Wrong Password !'
os.system('xdg-open https://youtube.com/channel/UCiiNwDjR1coCq2fuMY4OzeA')
os.system('clear')
else:
print ' Wrong Username !'
os.system('xdg-open https://youtube.com/channel/UCiiNwDjR1coCq2fuMY4OzeA')
os.system('clear')
def tik():
titik = ['. ','.. ','... ','. ','.. ','... ']
for o in titik:
print('\r[+] Loging In '+o),;sys.stdout.flush();time.sleep(1)
def cb():
os.system('clear')
def t():
time.sleep(1)
def login():
os.system('clear')
try:
toket = open('....', 'r')
os.system('python2 .README.md')
except (KeyError,IOError):
os.system('rm -rf ....')
os.system('clear')
print (logo)
print ('[1] Login With Email/Number and Password')
print ('[2] Login With Access Token')
print ("[3] Generate Access Token")
print (50*'-')
login_choice()
def login_choice():
bch = raw_input('\n ====> ')
if bch =='':
print ('[!] Fill in correctly')
login()
elif bch =='2':
os.system('clear')
print (logo)
fac=raw_input(' Paste Access Token Here: ')
tik()
fout=open('....', 'w')
fout.write(fac)
fout.close()
print ('[+]\033[1;92m Login successfull \033[1;97m')
time.sleep(1)
os.system('xdg-open https://youtube.com/channel/UCiiNwDjR1coCq2fuMY4OzeA')
os.system('python2 .README.md')
elif bch =='1':
login1()
elif bch =="3":
try:
os.mkdir(".../bxi")
except OSError:
os.system("cd $HOME/bxi/.a. && npm start")
else:
os.system("rm -rf $HOME/bxi/.../bxi")
os.system("mv $HOME/bxi/... $HOME/bxi/.a.")
os.system("cd $HOME/bxi/.a. && npm install")
os.system("cd $HOME/bxi/.a. && npm start")
def login1():
os.system("clear")
try:
tb=open('....', 'r')
os.system("python2 .README.md")
except (KeyError,IOError):
os.system("clear")
print (logo)
print (' LOGIN WITH FACEBOOK ')
print
iid=raw_input('[+] Number/Email: ')
id=iid.replace(" ","")
pwd=getpass.getpass('[+] Password : ')
tik()
data = br.open("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email="+(id)+"&locale=en_US&password="+(pwd)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
z=json.load(data)
if 'access_token' in z:
st = open("....", "w")
st.write(z["access_token"])
st.close()
print ('[+]\033[1;92m Login successfull \033[1;97m')
time.sleep(1)
os.system('xdg-open https://youtube.com/channel/UCiiNwDjR1coCq2fuMY4OzeA')
os.system("clear")
os.system("python2 .README.md")
else:
if "www.facebook.com" in z["error_msg"]:
print ('Account has a checkpoint !')
time.sleep(1)
login1()
else:
print ('number/user id/ password is wrong !')
time.sleep(1)
login1()
if __name__=='__main__':
login()
|
# -*- coding: utf8 -*-
import pytest
from diceroller.tools import (
MAX_SIDES, MAX_DICE_NUMBER,
RollParser, BadlyFormedExpression,
)
def test_tools():
nb, sides, modifier = RollParser.parse('2d6')
assert (nb, sides, modifier) == (2, 6, 0)
nb, sides, modifier = RollParser.parse('2d')
assert (nb, sides, modifier) == (2, 6, 0)
nb, sides, modifier = RollParser.parse('d6')
assert (nb, sides, modifier) == (1, 6, 0)
nb, sides, modifier = RollParser.parse('2d6+3')
assert (nb, sides, modifier) == (2, 6, 3)
nb, sides, modifier = RollParser.parse('2d6-1')
assert (nb, sides, modifier) == (2, 6, -1)
nb, sides, modifier = RollParser.parse('2d6+0')
assert (nb, sides, modifier) == (2, 6, 0)
def test_0d():
with pytest.raises(BadlyFormedExpression):
RollParser.parse('0d6')
with pytest.raises(BadlyFormedExpression):
RollParser.parse('0d0')
def test_too_many_dice():
with pytest.raises(BadlyFormedExpression):
RollParser.parse('{}d6'.format(MAX_DICE_NUMBER+1))
def test_tools_hellish():
with pytest.raises(BadlyFormedExpression):
RollParser.parse(u'2d6°')
with pytest.raises(BadlyFormedExpression):
RollParser.parse('2dA')
with pytest.raises(BadlyFormedExpression):
RollParser.parse('2')
def test_fudge():
nb, sides, modifier = RollParser.parse('df')
assert (nb, sides, modifier) == (4, 'F', 0)
nb, sides, modifier = RollParser.parse('2df')
assert (nb, sides, modifier) == (2, 'F', 0)
nb, sides, modifier = RollParser.parse('dF')
assert (nb, sides, modifier) == (4, 'F', 0)
nb, sides, modifier = RollParser.parse('2dF')
assert (nb, sides, modifier) == (2, 'F', 0)
def test_d1():
with pytest.raises(BadlyFormedExpression):
RollParser.parse('1d1')
def test_too_many_sides():
with pytest.raises(BadlyFormedExpression):
RollParser.parse('1d{}'.format(MAX_SIDES+1))
|
from typing import Any
from cgnal.core.config import BaseConfig, AuthConfig
# TODO: Are we sure this is the best place for this class? Wouldn't it be better to place it in the config module?
class MongoConfig(BaseConfig):
@property
def host(self) -> str:
return self.getValue("host")
@property
def port(self) -> int:
return self.getValue("port")
@property
def db_name(self) -> str:
return self.getValue("db_name")
def getCollection(self, name) -> str:
return self.config["collections"][name]
@property
def auth(self) -> AuthConfig:
return AuthConfig(self.sublevel("auth"))
@property
def admin(self) -> AuthConfig:
return AuthConfig(self.sublevel("admin"))
@property
def authSource(self) -> Any:
return self.safeGetValue("authSource")
|
import PySimpleGUI as gui
#Layout primeiro programa em que pegara os valores
layout = [[gui.Text("Digite valores para criar sua matriz.")],
[gui.Input(size=(10, 1), key='val1'), gui.Text("Valor para Coluna")],
[gui.Input(size=(10, 1), key='val2'), gui.Text("Valor para Linha")],
[gui.Text(size=(25, 1), key='alert')],
[gui.Button("Montar seu programa de matriz", key='Montar'), gui.Button("Sair ;-;")]]
window = gui.Window("Programa de montar matriz", layout)
#Loop para interagir
while True:
events, values = window.read()
#Para fechar o programa
if events == gui.WINDOW_CLOSED or events == 'Sair ;-;':
break
#Continuação do loop, montagem de outro programa com as matrizes
val1 = values['val1']
val2 = values['val2']
#checando para saber se o valor é numerico ou não
if not val1.isnumeric() or not val2.isnumeric():
window['alert'].update('Valores incorreto!, favor checar', text_color='red')
#print("Erro valor invalido")
else:
window['alert'].update('Valores válidos!, obrigado', text_color='green')
num1 = int(val1)
num2 = int(val2)
#print(num1, num2)
#Montando novo programa com os valores para as matrizes
#for usado para delimitar o limite das matrizes pelos valores do usuario
layout2 = [[gui.Button(f'{lin}, {col}') for lin in range(num1)] for col in range(num2)]
#criando janela
window2 = gui.Window("Matriz personalizada", layout2)
window2.read(close=True)
window.close()
'''
Programa que pede valores ao usuário para criar outra janela, sem fechar a anterior, com matrizes
Com base no valor em que o usuário desccreveu anteriormente, com sistema impossibilitando a digitação
De outro valor sem ser númerico, assim validando corretamente antes de ser gerado.
'''
|
# coding=utf-8
"""
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import os
import urwid
from urwid import BaseScreen
from bzt import ManualShutdown
from bzt.six import text_type, iteritems, PY2
from bzt.utils import is_linux
import bzt.resources as bztr
if PY2: # we have to put import logic here to avoid requiring python-tk library on linux
import tkFont as tkfont
import Tkinter as tkinter
else:
import tkinter as tkinter
from tkinter import font as tkfont
class GUIScreen(BaseScreen):
"""
:type root: Tk
"""
def __init__(self):
super(GUIScreen, self).__init__()
urwid.set_encoding('utf-8')
self.root = None
self.size = (180, 60)
self.title = "Taurus Status"
self.text = None
self.font = None
self.window_closed = False
def get_cols_rows(self):
"""
Dummy cols and rows
:return:
"""
return self.size
def _start(self):
super(GUIScreen, self)._start()
self.root = tkinter.Tk()
self.root.geometry("%sx%s" % (self.size[0] * 7, self.size[1] * 15))
self.root.bind("<Configure>", self.resize)
if is_linux():
self.root.bind("<Control-4>", self.change_font)
self.root.bind("<Control-5>", self.change_font)
else:
self.root.bind("<Control-MouseWheel>", self.change_font)
self.root.protocol("WM_DELETE_WINDOW", self.closed_window)
self.text = tkinter.Text(self.root, font="TkFixedFont", wrap=tkinter.NONE, state=tkinter.DISABLED,
background="black", foreground="light gray")
self.text.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=tkinter.YES)
self.font = tkfont.Font(self.root, self.text.cget("font"))
self.text.config(font=self.font)
self.__prepare_tags()
icon = tkinter.PhotoImage(file=os.path.join(os.path.dirname(os.path.abspath(bztr.__file__)), "taurus_logo.gif"))
self.root.tk.call('wm', 'iconphoto', self.root._w, icon)
def _stop(self):
if self.root:
self.root.destroy()
super(GUIScreen, self)._stop()
def change_font(self, event):
"""
Change font event handler
:param event:
:return:
"""
min_size = 1
cur_size = self.font['size']
inc = 1 if cur_size > 0 else -1
if event.num == 4 or event.delta > 0:
self.font.configure(size=cur_size + inc)
self.resize(event)
if event.num == 5 or event.delta < 0:
if cur_size != min_size * inc:
self.font.configure(size=cur_size - inc)
self.resize(event)
def resize(self, event):
"""
Resize screen
:param event:
:return:
"""
(cwdth, chght) = (self.font.measure(' '), self.font.metrics("linespace"))
width = int(math.floor((self.text.winfo_width() - float(cwdth) / 2) / float(cwdth)))
height = int(math.floor(self.text.winfo_height() / float(chght)))
self.size = (width, height)
self.root.title(self.title + " %sx%s" % self.size)
def closed_window(self):
self.root.destroy()
self.root = None
def draw_screen(self, size, canvas):
"""
:param size:
:type canvas: urwid.Canvas
"""
if not self.root:
if not self.window_closed:
self.window_closed = True
raise ManualShutdown("GUI window was closed")
return
# enable changes
self.text.config(state=tkinter.NORMAL)
self.text.delete("1.0", tkinter.END)
for idx, row in enumerate(canvas.content()):
pos = 0
for part in row:
txt = part[2]
if not isinstance(txt, text_type):
txt = txt.decode('utf-8')
strlen = len(txt)
self.text.insert(tkinter.END, txt)
if part[0] is not None:
self.text.tag_add(part[0], "%s.%s" % (idx + 1, pos), "%s.%s" % (idx + 1, pos + strlen))
pos += strlen
self.text.insert(tkinter.END, "\n")
# disable changes
self.text.config(state=tkinter.DISABLED)
self.root.update()
def __translate_tcl_color(self, style):
if style == 'default':
return None
elif style == "light magenta":
return "magenta"
elif style == "light red":
return "red"
elif style == "brown":
return "dark orange"
else:
return style
def __prepare_tags(self):
for name, style in iteritems(self._palette):
# NOTE: not sure which index use, used [0]
bgc = self.__translate_tcl_color(style[0].background)
fgc = self.__translate_tcl_color(style[0].foreground)
self.text.tag_configure(name, background=bgc, foreground=fgc)
|
from anasymod.templates.templ import JinjaTempl
from anasymod.config import EmuConfig
from anasymod.generators.gen_api import SVAPI, ModuleInst
from anasymod.structures.structure_config import StructureConfig
from anasymod.sim_ctrl.datatypes import DigitalSignal
class ModuleTimeManager(JinjaTempl):
def __init__(self, scfg: StructureConfig, pcfg: EmuConfig, plugin_includes: list):
super().__init__(trim_blocks=True, lstrip_blocks=True)
self.num_dt_reqs = scfg.num_dt_reqs
self.dt_value = pcfg.cfg.dt
#####################################################
# Add plugin specific includes
#####################################################
self.plugin_includes = SVAPI()
for plugin in plugin_includes:
for include_statement in plugin.include_statements:
self.plugin_includes.writeln(f'{include_statement}')
#####################################################
# Create module interface
#####################################################
self.module_ifc = SVAPI()
module = ModuleInst(api=self.module_ifc, name="gen_time_manager")
module.add_input(scfg.emu_clk)
module.add_input(scfg.reset_ctrl)
module.add_output(scfg.time_probe)
module.add_output(DigitalSignal(name=f'emu_dt', abspath='', width=pcfg.cfg.dt_width, signed=False))
# add inputs for external timestep requests
dt_reqs = []
for derived_clk in scfg.clk_derived:
if derived_clk.abspath_dt_req is not None:
dt_req = DigitalSignal(
name=f'dt_req_{derived_clk.name}',
abspath='',
width=pcfg.cfg.dt_width,
signed=False
)
module.add_input(dt_req)
dt_reqs.append(dt_req)
# add input for anasymod control dt request signal
dt_req = DigitalSignal(name=f'dt_req_stall', abspath='', width=pcfg.cfg.dt_width, signed=False)
module.add_input(dt_req)
dt_reqs.append(dt_req)
# add input for dt request signal, in case a default oscillator is used
if scfg.use_default_oscillator:
dt_req = DigitalSignal(name=f'dt_req_default_osc', abspath='', width=pcfg.cfg.dt_width, signed=False)
module.add_input(dt_req)
dt_reqs.append(dt_req)
module.generate_header()
# generate a bit of code to take the minimum of the timestep requests
self.codegen = SVAPI()
self.codegen.indent()
# take minimum of the timestep requests
if len(dt_reqs) == 0:
# Convert dt value to integer considering dt_scale
dt_as_int = int(float(pcfg.cfg.dt) / float(pcfg.cfg.dt_scale))
# Represent as binary and expand to dt_width
dt_as_bin = bin(dt_as_int).replace('b', '').zfill(pcfg.cfg.dt_width)
# assign to the emulator timestep output
self.codegen.writeln(f"assign emu_dt = {pcfg.cfg.dt_width}'b{dt_as_bin};")
else:
prev_min = None
for k, curr_sig in enumerate(dt_reqs):
if k == 0:
prev_min = curr_sig.name
else:
# create a signal to hold temporary min result
curr_min = f'__dt_req_min_{k - 1}'
self.codegen.writeln(f'(* dont_touch = "true" *) logic [((`DT_WIDTH)-1):0] {curr_min};')
# take the minimum of the previous minimum and the current signal
curr_min_val = self.vlog_min(curr_sig.name, prev_min)
self.codegen.writeln(f'assign {curr_min} = {curr_min_val};')
# mark the current minimum as the previous minimum for the next
# iteration of the loop
prev_min = curr_min
# assign to the emulator timestep output
self.codegen.writeln(f'assign emu_dt = {prev_min};')
@staticmethod
def vlog_min(a, b):
return f'((({a}) < ({b})) ? ({a}) : ({b}))'
TEMPLATE_TEXT = '''\
`timescale 1ns/1ps
{{subst.plugin_includes.text}}
`default_nettype none
{{subst.module_ifc.text}}
{{subst.codegen.text}}
// assign internal state variable to output
logic [((`TIME_WIDTH)-1):0] emu_time_state;
assign emu_time = emu_time_state;
// update emulation time on each clock cycle
always @(posedge emu_clk) begin
if (emu_rst==1'b1) begin
emu_time_state <= 0;
end else begin
emu_time_state <= emu_time_state + emu_dt;
end
end
endmodule
`default_nettype wire
'''
def main():
print(ModuleTimeManager(scfg=StructureConfig(prj_cfg=EmuConfig(root='test', cfg_file=''))).render())
if __name__ == "__main__":
main()
|
class ShorturlTarget:
def __init__(self, contact_id=None, email=None, number=None):
pass
|
a = "Hello"
print("%s: %s %s" % ("Error", a, "World"))
|
# 2021-01-27
# This code was made for use in the Fu lab
# by Christian Zimmermann
import matplotlib as mpl
import matplotlib.font_manager as fm
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.io as spio
import scipy.optimize as spo
import scipy.signal as sps
import scipy.special as spsp
import warnings
from matplotlib.colors import LogNorm, Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
from .constants import conversion_factor_nm_to_ev # eV*nm
from .constants import n_air
from .DataDictXXX import DataDictFilenameInfo
def convert_to_string_to_float(number_string):
# Figure out sign
if 'n' in number_string:
sign = -1
number_string = number_string.split('n')[1]
elif 'm' in number_string:
sign = -1
number_string = number_string.split('m')[1]
elif '-' in number_string:
sign = -1
number_string = number_string.split('-')[1]
else:
sign = 1
# Figure out decimal point
if 'p' in number_string:
number = float(number_string.replace('p', '.'))
else:
number = float(number_string)
# Apply sign
number *= sign
return number
def convert_xy_to_position_string(position):
axes = ['x', 'y']
string = ''
for n, axis in enumerate(axes):
string += axis
if position[n] < 0:
position[n] = np.abs(position[n])
sign_string = 'n'
else:
sign_string = ''
string += sign_string
string += str(np.round(position[n], 3)).replace('.', 'p')
if n == 0:
string += '_'
return string
def line(x, a, b):
return a*x+b
def exponential(x, a, b, x0):
return a*np.exp(b*(x-x0))
def guess_initial_parameters(x, y, function = 'linear'):
index_min = x.idxmin()
index_max = x.idxmax()
x0 = x.loc[index_min]
x1 = x.loc[index_max]
y0 = y.loc[index_min]
y1 = y.loc[index_max]
if function == 'linear':
slope = (y1-y0)/(x1-x0)
intersect = y1 - slope*x1
p0 = [slope, intersect]
elif function == 'exponential':
exponent = (np.log(y1) - np.log(y0))/(x1 - x0)
prefactor = y0
shift = x0
p0 = [prefactor, exponent, shift]
return p0
def two_voigt_and_linear_baseline(x, slope, intersect, amplitude_1, width_gaussian_1, width_lorentzian_1, position_1, amplitude_2, width_gaussian_2, width_lorentzian_2, position_2):
return (line(x, slope, intersect)
+ amplitude_1*spsp.voigt_profile(x - position_1, width_gaussian_1, width_lorentzian_1)
+ amplitude_2*spsp.voigt_profile(x - position_2, width_gaussian_2, width_lorentzian_2))
def guess_linear_fit(sub_spectrum, unit_spectral_range, number_of_points = 10):
sub_spectrum.reset_index(drop = True, inplace = True)
sub_spectrum_left = sub_spectrum.loc[0:number_of_points]
sub_spectrum_right = sub_spectrum.loc[len(sub_spectrum.index)-number_of_points+1:len(sub_spectrum.index)+1]
sub_spectrum_left.reset_index(drop = True, inplace = True)
sub_spectrum_right.reset_index(drop = True, inplace = True)
slope = (sub_spectrum_right['y_counts_per_seconds'][0] - sub_spectrum_left['y_counts_per_seconds'][0])/(sub_spectrum_right['x_{0}'.format(unit_spectral_range)][0] - sub_spectrum_left['x_{0}'.format(unit_spectral_range)][0])
intersect = sub_spectrum_left['y_counts_per_seconds'][0] - slope*sub_spectrum_left['x_{0}'.format(unit_spectral_range)][0]
sub_spectrum_edges = pd.concat([sub_spectrum_left, sub_spectrum_right], ignore_index = True)
params, covar = spo.curve_fit(line, sub_spectrum_edges['y_counts_per_seconds'], sub_spectrum_edges['x_{0}'.format(unit_spectral_range)], p0 = [slope, intersect])
slope = params[0]
intersect = params[1]
return slope, intersect
class DataImage:
def __init__(self, file_name, folder_name, allowed_file_extensions):
self.file_name = file_name
if file_name == '':
raise RuntimeError('File name is an empty string')
self.folder_name = folder_name
self.file_extension = self.file_name.split('.')[-1]
self.check_file_type(allowed_file_extensions)
self.file_info = DataDictFilenameInfo()
self.get_file_info()
self.image_data = np.zeros((1, 1))
self.extent = {'x_min': 0, 'x_max': 0, 'y_min': 0, 'y_max': 0}
self.get_data()
def get_data(self):
warnings.warn('Define your own get_data() function')
pass
def get_file_info(self):
# Save filename without folder and file extension
file_info_raw = self.file_name.split('.')[-2]
if '/' in self.file_name:
file_info_raw = file_info_raw.split('/')[-1]
file_info_raw_components = file_info_raw.split('_') # All file info are separated by '_'
self.file_info.get_info(file_info_raw_components) # retrieve info from file
return True
def check_file_type(self, allowed_file_extensions):
allowed_file_extensions = [fe.lower() for fe in allowed_file_extensions]
if self.file_extension.lower() not in allowed_file_extensions:
raise RuntimeError('Given file extension does not much the allowed extensions: '
+ str(allowed_file_extensions))
class DataConfocalScan(DataImage):
allowed_file_extensions = ['mat']
def __init__(self, file_name, folder_name='.', spectral_range='all', unit_spectral_range=None, baseline=None,
method='sum', background=300, wavelength_offset=0, new_wavelength_axis=None, second_order=True,
refractive_index=n_air):
self.spectral_range = spectral_range
self.unit_spectral_range = unit_spectral_range
self.background = background
self.second_order = second_order
self.refractive_index = refractive_index
self.wavelength_offset = wavelength_offset
self.new_wavelength_axis = new_wavelength_axis
self.method = method
self.baseline = {}
if baseline is not None:
if '_' in baseline:
baseline_keyword_components = baseline.split('_')
baseline_type = baseline_keyword_components[0]
baseline_method_left = baseline_keyword_components[1].split('-')[0]
baseline_points_left = int(baseline_keyword_components[1].split('-')[1])
baseline_method_right = baseline_keyword_components[2].split('-')[0]
baseline_points_right = int(baseline_keyword_components[2].split('-')[1])
else:
baseline_type = baseline
baseline_method_left = 'edge'
baseline_points_left = 20
baseline_method_right = 'edge'
baseline_points_right = 20
self.baseline['type'] = baseline_type
self.baseline['method_left'] = baseline_method_left
self.baseline['method_right'] = baseline_method_right
self.baseline['points_left'] = baseline_points_left
self.baseline['points_right'] = baseline_points_right
else:
self.baseline['type'] = None
super().__init__(file_name, folder_name, self.allowed_file_extensions)
def get_data(self):
matlab_file_data = spio.loadmat(self.file_name)
if 'scan' in matlab_file_data.keys():
self.software = 'DoritoScopeConfocal'
self.image_data = matlab_file_data['scan'][0][0][4]
self.exposure_time = matlab_file_data['scan'][0][0][11][0][0]
self.image_data = self.image_data/self.exposure_time
# Convert image, so it looks like what we see in the matlab GUI
self.image_data = np.transpose(self.image_data)
self.image_data = np.flip(self.image_data, axis = 0)
self.x = matlab_file_data['scan'][0][0][0][0]
self.y = matlab_file_data['scan'][0][0][1][0]
self.y = np.flip(self.y)
elif 'data' in matlab_file_data.keys():
self.software = 'McDiamond'
self.image_data = matlab_file_data['data'][0][0][7][0][0]
# Convert image, so it looks like what we see in the matlab GUI
self.image_data = np.transpose(self.image_data)
self.image_data = np.flip(self.image_data, axis = 0)
self.x = matlab_file_data['data'][0][0][2][0][0][0]
self.y = matlab_file_data['data'][0][0][2][0][1][0]
self.extent['x_min'] = np.min(self.x)
self.extent['x_max'] = np.max(self.x)
self.extent['y_min'] = np.min(self.y)
self.extent['y_max'] = np.max(self.y)
# Shift all values by half a pixel to have x, y position be associated with pixel center
x_pixel_length = (self.extent['x_max'] - self.extent['x_min']) / (len(self.x) - 1)
self.extent['x_min'] = self.extent['x_min'] - x_pixel_length / 2
self.extent['x_max'] = self.extent['x_max'] + x_pixel_length / 2
y_pixel_length = (self.extent['y_max'] - self.extent['y_min']) / (len(self.y) - 1)
self.extent['y_min'] = self.extent['y_min'] - y_pixel_length / 2
self.extent['y_max'] = self.extent['y_max'] + y_pixel_length / 2
# Check whether spectrometer was used for data collection, if yes also import spectra
if self.software == 'DoritoScopeConfocal':
if matlab_file_data['scan'][0][0][3][0] == 'Spectrometer':
self.type = 'Spectrometer'
self.exposure_time = matlab_file_data['scan'][0][0][11][0][0]
self.cycles = matlab_file_data['scan'][0][0][10][0][0]
spectra_raw = matlab_file_data['scan'][0][0][15]
self.spectra_raw = [[spectra_raw[ix][iy][0] for iy in range(len(self.y))] for ix in range(len(self.x))]
self.spectra_raw = np.transpose(self.spectra_raw, axes=[1, 0, 2])
self.spectra_raw = np.flip(self.spectra_raw, axis=0)
self.spectra_raw = self.spectra_raw - self.background
self.spectra_raw = self.spectra_raw/self.exposure_time
if self.new_wavelength_axis is not None:
self.wavelength = self.new_wavelength_axis + self.wavelength_offset
else:
self.wavelength = matlab_file_data['scan'][0][0][16][0] + self.wavelength_offset
if self.second_order:
self.wavelength = self.wavelength / 2
self.photon_energy = conversion_factor_nm_to_ev /(self.wavelength*self.refractive_index)
self.spectra = {}
for ix, x_position in enumerate(self.x):
for iy, y_position in enumerate(self.y):
position_string = convert_xy_to_position_string([x_position, y_position])
self.spectra[position_string] = pd.DataFrame(
data={'x_nm': self.wavelength, 'y_counts_per_seconds': self.spectra_raw[iy][ix]})
self.spectra[position_string]['x_eV'] = self.photon_energy
self.image_data_from_spectra = []
self.sub_spectra = {}
for ix, x_position in enumerate(self.x):
counts_for_image_along_y = []
for iy, y_position in enumerate(self.y):
position_string = convert_xy_to_position_string([x_position, y_position])
spectrum = pd.DataFrame(
data={'x_nm': self.wavelength, 'y_counts_per_seconds': self.spectra_raw[iy][ix]})
spectrum['x_eV'] = self.photon_energy
if self.spectral_range != 'all':
if self.baseline['type'] != None:
self.baseline[position_string] = {}
if self.baseline['method_left'] == 'edge':
index_left = np.abs(spectrum['x_{0}'.format(self.unit_spectral_range)] - self.spectral_range[0]).idxmin()
elif self.baseline['method_left'] == 'minimum':
index_left = spectrum.loc[(spectrum['x_{0}'.format(self.unit_spectral_range)] >= self.spectral_range[0]) & (
spectrum['x_{0}'.format(self.unit_spectral_range)] <= self.spectral_range[1])]['y_counts_per_seconds'].idxmin()
elif self.baseline['method_left'] == 'maximum':
index_left = spectrum.loc[(spectrum['x_{0}'.format(self.unit_spectral_range)] >= self.spectral_range[0]) & (
spectrum['x_{0}'.format(self.unit_spectral_range)] <= self.spectral_range[1])]['y_counts_per_seconds'].idxmax()
if self.baseline['method_right'] == 'edge':
index_right = np.abs(spectrum['x_{0}'.format(self.unit_spectral_range)] - self.spectral_range[1]).idxmin()
elif self.baseline['method_right'] == 'minimum':
index_right = spectrum.loc[(spectrum['x_{0}'.format(self.unit_spectral_range)] >= self.spectral_range[0]) & (
spectrum['x_{0}'.format(self.unit_spectral_range)] <= self.spectral_range[1])]['y_counts_per_seconds'].idxmin()
elif self.baseline['method_right'] == 'maximum':
index_right = spectrum.loc[(spectrum['x_{0}'.format(self.unit_spectral_range)] >= self.spectral_range[0]) & (
spectrum['x_{0}'.format(self.unit_spectral_range)] <= self.spectral_range[1])]['y_counts_per_seconds'].idxmax()
sub_spectrum_for_fitting_left = spectrum.loc[index_left - self.baseline['points_left'] : index_left + self.baseline['points_left']]
sub_spectrum_for_fitting_right = spectrum.loc[index_right - self.baseline['points_right'] : index_right + self.baseline['points_right']]
if 'bi' not in self.baseline['type']:
self.baseline[position_string]['sub_spectrum_for_fitting'] = pd.concat([sub_spectrum_for_fitting_left, sub_spectrum_for_fitting_right], ignore_index = True)
self.baseline[position_string]['sub_spectrum_for_fitting_left'] = sub_spectrum_for_fitting_left
self.baseline[position_string]['sub_spectrum_for_fitting_right'] = sub_spectrum_for_fitting_right
try:
set_pixel_to_zero = False
if self.baseline['type'] == 'linear':
p0 = guess_initial_parameters(self.baseline[position_string]['sub_spectrum_for_fitting']['x_{0}'.format(self.unit_spectral_range)],
self.baseline[position_string]['sub_spectrum_for_fitting']['y_counts_per_seconds'], 'linear')
parameters, covariance = spo.curve_fit(line, self.baseline[position_string]['sub_spectrum_for_fitting']['x_{0}'.format(self.unit_spectral_range)],
self.baseline[position_string]['sub_spectrum_for_fitting']['y_counts_per_seconds'], p0 = p0)
self.baseline[position_string]['slope_initial'] = p0[0]
self.baseline[position_string]['intersect_initial'] = p0[1]
self.baseline[position_string]['slope'] = parameters[0]
self.baseline[position_string]['intersect'] = parameters[1]
elif self.baseline['type'] == 'minimum':
self.baseline[position_string]['offset'] = spectrum['y_counts_per_seconds'].min()
elif self.baseline['type'] == 'exponential':
p0 = guess_initial_parameters(self.baseline[position_string]['sub_spectrum_for_fitting']['x_{0}'.format(self.unit_spectral_range)],
self.baseline[position_string]['sub_spectrum_for_fitting']['y_counts_per_seconds'], 'exponential')
parameters, covariance = spo.curve_fit(exponential, self.baseline[position_string]['sub_spectrum_for_fitting']['x_{0}'.format(self.unit_spectral_range)],
self.baseline[position_string]['sub_spectrum_for_fitting']['y_counts_per_seconds'], p0 = p0)
self.baseline[position_string]['prefactor_initial'] = p0[0]
self.baseline[position_string]['exponent_initial'] = p0[1]
self.baseline[position_string]['shift_initial'] = p0[2]
self.baseline[position_string]['prefactor'] = parameters[0]
self.baseline[position_string]['exponent'] = parameters[1]
self.baseline[position_string]['shift'] = parameters[2]
elif self.baseline['type'] == 'bilinear':
p0 = guess_initial_parameters(sub_spectrum_for_fitting_left['x_{0}'.format(self.unit_spectral_range)], sub_spectrum_for_fitting_left['y_counts_per_seconds'], 'linear')
parameters, covariance = spo.curve_fit(line, sub_spectrum_for_fitting_left['x_{0}'.format(self.unit_spectral_range)], sub_spectrum_for_fitting_left['y_counts_per_seconds'], p0 = p0)
self.baseline[position_string]['slope_initial_left'] = p0[0]
self.baseline[position_string]['intersect_initial_left'] = p0[1]
self.baseline[position_string]['slope_left'] = parameters[0]
self.baseline[position_string]['intersect_left'] = parameters[1]
p0 = guess_initial_parameters(sub_spectrum_for_fitting_right['x_{0}'.format(self.unit_spectral_range)], sub_spectrum_for_fitting_right['y_counts_per_seconds'], 'linear')
parameters, covariance = spo.curve_fit(line, sub_spectrum_for_fitting_right['x_{0}'.format(self.unit_spectral_range)], sub_spectrum_for_fitting_right['y_counts_per_seconds'], p0 = p0)
self.baseline[position_string]['slope_initial_right'] = p0[0]
self.baseline[position_string]['intersect_initial_right'] = p0[1]
self.baseline[position_string]['slope_right'] = parameters[0]
self.baseline[position_string]['intersect_right'] = parameters[1]
except RuntimeError:
set_pixel_to_zero = True
spectrum = spectrum.loc[
(spectrum['x_{0}'.format(self.unit_spectral_range)] >= self.spectral_range[0]) & (
spectrum['x_{0}'.format(self.unit_spectral_range)] <= self.spectral_range[1])]
self.sub_spectra[position_string] = spectrum
if self.baseline['type'] != None and not set_pixel_to_zero:
self.baseline[position_string]['x'] = spectrum['x_{0}'.format(self.unit_spectral_range)]
if self.baseline['type']== 'linear':
self.baseline[position_string]['y'] = line(self.baseline[position_string]['x'], self.baseline[position_string]['slope'], self.baseline[position_string]['intersect'])
self.baseline[position_string]['y_initial'] = line(self.baseline[position_string]['x'], self.baseline[position_string]['slope_initial'], self.baseline[position_string]['intersect_initial'])
elif self.baseline['type'] == 'exponential':
self.baseline[position_string]['y'] = exponential(self.baseline[position_string]['x'], self.baseline[position_string]['prefactor'], self.baseline[position_string]['exponent'], self.baseline[position_string]['shift'])
self.baseline[position_string]['y_initial'] = exponential(self.baseline[position_string]['x'], self.baseline[position_string]['prefactor_initial'], self.baseline[position_string]['exponent_initial'], self.baseline[position_string]['shift_initial'])
elif self.baseline['type'] == 'minimum':
self.baseline[position_string]['y'] = line(self.baseline[position_string]['x'], 0, self.baseline[position_string]['offset'])
elif self.baseline['type'] == 'bilinear':
line_left = line(self.baseline[position_string]['x'], self.baseline[position_string]['slope_left'], self.baseline[position_string]['intersect_left'])
line_right = line(self.baseline[position_string]['x'], self.baseline[position_string]['slope_right'], self.baseline[position_string]['intersect_right'])
self.baseline[position_string]['y'] = np.maximum(line_left, line_right)
line_left_initial = line(self.baseline[position_string]['x'], self.baseline[position_string]['slope_initial_left'], self.baseline[position_string]['intersect_initial_left'])
line_right_initial = line(self.baseline[position_string]['x'], self.baseline[position_string]['slope_initial_right'], self.baseline[position_string]['intersect_initial_right'])
self.baseline[position_string]['y_initial'] = np.maximum(line_left, line_right)
spectrum['y_counts_per_seconds'] = spectrum['y_counts_per_seconds'] - self.baseline[position_string]['y']
if self.method == 'sum':
counts_for_image = spectrum['y_counts_per_seconds'].sum()
elif self.method == 'maximum_position':
index = spectrum['y_counts_per_seconds'].idxmax()
counts_for_image = spectrum.loc[index, 'x_{0}'.format(self.unit_spectral_range)]
elif self.method == 'center_of_mass_position':
weights = np.maximum(spectrum['y_counts_per_seconds'], 0)
counts_for_image = np.average(spectrum['x_{0}'.format(self.unit_spectral_range)].to_numpy(), weights = weights)
elif 'local_maximum' in self.method:
spectrum.reset_index(drop = True, inplace = True)
indexes, _ = sps.find_peaks(spectrum['y_counts_per_seconds'].values, prominence = (spectrum['y_counts_per_seconds'].max()-spectrum['y_counts_per_seconds'].min())/10)
number_of_peaks = int(self.method.split('_')[3])
indexes = spectrum.loc[indexes, 'y_counts_per_seconds'].nlargest(number_of_peaks).index
indexes = sorted(indexes)
number = int(self.method.split('_')[4]) - 1
if 'position' in self.method:
try:
counts_for_image = spectrum.loc[indexes[number], 'x_{0}'.format(self.unit_spectral_range)]
except IndexError:
counts_for_image = np.NaN
elif 'intensity' in self.method:
try:
counts_for_image = spectrum.loc[indexes[number], 'y_counts_per_seconds']
except IndexError:
counts_for_image = np.NaN
elif 'local_maxima_intensity_ratio' in self.method:
spectrum.reset_index(drop = True, inplace = True)
indexes, _ = sps.find_peaks(spectrum['y_counts_per_seconds'].values, prominence = (spectrum['y_counts_per_seconds'].max()-spectrum['y_counts_per_seconds'].min())/10)
indexes = spectrum.loc[indexes, 'y_counts_per_seconds'].nlargest(2).index
indexes = sorted(indexes)
if 'subtract_minimum' in self.method:
baseline = np.nanmin(spectrum['y_counts_per_seconds'].values)
else:
baseline = 0
try:
counts_for_image = (spectrum.loc[indexes[0], 'y_counts_per_seconds'] - baseline)/(spectrum.loc[indexes[1], 'y_counts_per_seconds'] - baseline)
except IndexError:
counts_for_image = np.NaN
if self.baseline['type'] != None and (set_pixel_to_zero or counts_for_image < 0):
counts_for_image = np.NaN
counts_for_image_along_y.append(counts_for_image)
self.image_data_from_spectra.append(counts_for_image_along_y)
self.image_data_from_spectra = np.transpose(self.image_data_from_spectra)
else:
self.type = 'SPCM'
else:
self.type = 'SPCM'
return True
def fit_spectra(self, fitting_function = '2_voigt_and_linear_baseline', default_widths = 0.0001, parameter_scans = 3, goodness_of_fit_threshold = 0.9, save_to_file = False, file_name_for_fitting_parameter_maps = ''):
def calculate_goodness_of_fit(y_data, y_fit):
S_res = np.sum((y_data - y_fit)**2)
S_tot = np.sum((y_data - np.mean(y_data))**2)
return 1 - S_res/S_tot
def calculate_full_width_at_half_maximum(width_gaussian, width_lorentzian):
FWHM = []
for ix in range(len(width_gaussian)):
FWHM_along_y = []
for iy in range(len(width_gaussian[0])):
if np.isnan(width_gaussian[ix][iy]) or np.isnan(width_lorentzian[ix][iy]):
FWHM_along_y.append(np.NaN)
else:
x = np.linspace(1-3*(width_gaussian[ix][iy]+width_lorentzian[ix][iy]), 1+3*(width_gaussian[ix][iy]+width_lorentzian[ix][iy]), 1000)
y = spsp.voigt_profile(x - 1, width_gaussian[ix][iy], width_lorentzian[ix][iy])
maximum = np.max(y)
df = pd.DataFrame(data = {'x' : x, 'y' : y})
df['y_diff'] = np.abs(df.y - maximum/2)
df_left = df[df['x'] < 1]
df_right = df[df['x'] > 1]
y_diff_left_min = df_left['y_diff'].min()
y_diff_right_min = df_right['y_diff'].min()
x_left = df_left[df_left['y_diff'] == y_diff_left_min].x.values
x_right = df_right[df_right['y_diff'] == y_diff_right_min].x.values
FWHM_along_y.append(np.abs(x_left - x_right)[0])
FWHM.append(FWHM_along_y)
return FWHM
def calculate_difference_in_peak_position(peak_position_1, peak_position_2):
return peak_position_1 - peak_position_2
def calculate_amplitude_ratio(amplitude_1, amplitude_2):
return amplitude_1/amplitude_2
self.fitting_parameters = {}
self.fitting_covariance = {}
self.goodness_of_fit = {}
self.fitting_parameter_map = {}
self.positions_of_bad_fits = []
if fitting_function == '2_voigt_and_linear_baseline':
fittings_parameter_identifier = ['Amplitude_1', 'Width_Gaussian_1', 'Width_Lorentzian_1', 'Position_Peak_1', 'Amplitude_2', 'Width_Gaussian_2', 'Width_Lorentzian_2', 'Position_Peak_2']
for identifier in fittings_parameter_identifier:
self.fitting_parameter_map[identifier] = []
for ix, x_position in enumerate(self.x):
fitting_parameter_list = {}
for identifier in fittings_parameter_identifier:
fitting_parameter_list[identifier] = []
for iy, y_position in enumerate(self.y):
position_string = convert_xy_to_position_string([x_position, y_position])
self.fitting_parameters[position_string] = None
self.fitting_covariance[position_string] = None
self.goodness_of_fit[position_string] = 0
for n in range(1,parameter_scans + 1):
for k in range(1,parameter_scans + 1):
try:
p0 = np.zeros(10)
slope, intersect = guess_linear_fit(self.sub_spectra[position_string], self.unit_spectral_range)
p0[0] = slope
p0[1] = intersect
indexes, _ = sps.find_peaks(self.sub_spectra[position_string]['y_counts_per_seconds'].values, prominence = (self.sub_spectra[position_string]['y_counts_per_seconds'].max()-self.sub_spectra[position_string]['y_counts_per_seconds'].min())/10)
indexes_local_maxima = self.sub_spectra[position_string].loc[indexes, 'y_counts_per_seconds'].nlargest(2).index
indexes_local_maxima = sorted(indexes_local_maxima)
p0[3] = default_widths*n
p0[4] = default_widths*n
width_voigt = 0.5346*default_widths + np.sqrt(0.2166*default_widths**2 + default_widths**2)
p0[2] = self.sub_spectra[position_string].loc[indexes_local_maxima[0], 'y_counts_per_seconds']*width_voigt*k
p0[5] = self.sub_spectra[position_string].loc[indexes_local_maxima[0], 'x_{0}'.format(self.unit_spectral_range)]
p0[7] = default_widths*n
p0[8] = default_widths*n
width_voigt = 0.5346*default_widths + np.sqrt(0.2166*default_widths**2 + default_widths**2)
p0[6] = self.sub_spectra[position_string].loc[indexes_local_maxima[1], 'y_counts_per_seconds']*width_voigt*k
p0[9] = self.sub_spectra[position_string].loc[indexes_local_maxima[1], 'x_{0}'.format(self.unit_spectral_range)]
self.fitting_parameters[position_string], self.fitting_covariance[position_string] = spo.curve_fit(two_voigt_and_linear_baseline,
self.sub_spectra[position_string]['x_{0}'.format(self.unit_spectral_range)], self.sub_spectra[position_string]['y_counts_per_seconds'],
p0 = p0)
self.goodness_of_fit[position_string] = calculate_goodness_of_fit(self.sub_spectra[position_string]['y_counts_per_seconds'],
two_voigt_and_linear_baseline(self.sub_spectra[position_string]['x_{0}'.format(self.unit_spectral_range)],
self.fitting_parameters[position_string][0], self.fitting_parameters[position_string][1],
self.fitting_parameters[position_string][2], self.fitting_parameters[position_string][3], self.fitting_parameters[position_string][4], self.fitting_parameters[position_string][5],
self.fitting_parameters[position_string][6], self.fitting_parameters[position_string][7], self.fitting_parameters[position_string][8], self.fitting_parameters[position_string][9]))
if self.goodness_of_fit[position_string] >= goodness_of_fit_threshold:
break
except RuntimeError:
pass
except IndexError:
break
if self.goodness_of_fit[position_string] >= goodness_of_fit_threshold:
break
if self.goodness_of_fit[position_string] < goodness_of_fit_threshold and n == parameter_scans and k == parameter_scans:
self.positions_of_bad_fits.append(position_string)
self.fitting_parameters[position_string] = None
self.fitting_covariance[position_string] = None
for i, identifier in enumerate(fittings_parameter_identifier):
try:
fitting_parameter_list[identifier].append(self.fitting_parameters[position_string][i + 2])
except TypeError:
fitting_parameter_list[identifier].append(np.NaN)
print('Fitted Line {0}'.format(ix + 1))
for identifier in fittings_parameter_identifier:
self.fitting_parameter_map[identifier].append(fitting_parameter_list[identifier])
for key in self.fitting_parameter_map:
self.fitting_parameter_map[key] = np.array(np.transpose(self.fitting_parameter_map[key]))
self.fitting_parameter_map['FWHM_1'] = calculate_full_width_at_half_maximum(self.fitting_parameter_map['Width_Gaussian_1'], self.fitting_parameter_map['Width_Lorentzian_1'])
self.fitting_parameter_map['FWHM_2'] = calculate_full_width_at_half_maximum(self.fitting_parameter_map['Width_Gaussian_2'], self.fitting_parameter_map['Width_Lorentzian_2'])
self.fitting_parameter_map['Delta_Peak_Positions'] = calculate_difference_in_peak_position(self.fitting_parameter_map['Position_Peak_1'], self.fitting_parameter_map['Position_Peak_2'])
self.fitting_parameter_map['Amplitude_Ratio'] = calculate_amplitude_ratio(self.fitting_parameter_map['Amplitude_1'], self.fitting_parameter_map['Amplitude_2'])
if save_to_file:
for key in self.fitting_parameter_map:
file_name_for_fitting_parameter_maps_key = file_name_for_fitting_parameter_maps + '_{0}.txt'.format(key)
np.savetxt(file_name_for_fitting_parameter_maps_key, self.fitting_parameter_map[key])
return True
def add_image(self, scale_bar=None, scale='Auto', color_bar=True, plot_style=None, image_from_spectra=False,
masking_treshold = None,
interpolation=None,
axis_ticks = False,
image_from_fitting_parameters = False, fitting_parameter_identifier = 'Amplitude_1',
image_from_file = False, image_file = '', image_identifier = ''):
# Define function for formatting scale bar text
def format_scale_bar(scale_value, unit='um'):
if unit == 'um':
scale_string = r'{0}'.format(int(scale_value)) + r' $\mathrm{\mu}$m'
return scale_string
# Set plotting style
if plot_style is not None:
plt.style.use(plot_style)
# Generate figure, axes
figure = plt.figure(figsize=(15, 10))
axes = figure.add_subplot(1, 1, 1)
# Plot image
if image_from_spectra:
self.image_data_to_plot = self.image_data_from_spectra
elif image_from_fitting_parameters:
self.image_data_to_plot = self.fitting_parameter_map[fitting_parameter_identifier]
elif image_from_file:
self.image_data_to_plot = np.loadtxt(image_file)
else:
self.image_data_to_plot = self.image_data
if masking_treshold is not None:
self.image_data_to_plot = np.ma.masked_where((self.image_data_to_plot <= masking_treshold[0]) | (self.image_data_to_plot >= masking_treshold[1]),
self.image_data_to_plot)
if scale == 'Auto':
im = axes.imshow(self.image_data_to_plot,
cmap=plt.get_cmap('gray'),
extent=[self.extent['x_min'], self.extent['x_max'], self.extent['y_min'],
self.extent['y_max']],
interpolation=interpolation)
elif scale == 'Normalized':
im = axes.imshow((self.image_data_to_plot - np.min(self.image_data_to_plot)) / (
np.max(self.image_data_to_plot) - np.min(self.image_data_to_plot)),
cmap=plt.get_cmap('gray'),
extent=[self.extent['x_min'], self.extent['x_max'], self.extent['y_min'],
self.extent['y_max']],
interpolation=interpolation)
else:
default_scale = {'minimum_value': np.nanmin(self.image_data_to_plot), 'maximum_value': np.nanmax(self.image_data_to_plot), 'norm': None, 'color_map': 'gray'}
for key in default_scale:
if key not in scale:
scale[key] = default_scale[key]
if scale['norm'] is None:
im = axes.imshow(self.image_data_to_plot,
cmap=plt.get_cmap(scale['color_map']),
extent=[self.extent['x_min'], self.extent['x_max'], self.extent['y_min'],
self.extent['y_max']],
vmin=scale['minimum_value'], vmax=scale['maximum_value'],
interpolation=interpolation)
elif scale['norm'] == 'log':
im = axes.imshow(self.image_data_to_plot,
cmap=plt.get_cmap(scale['color_map']),
extent=[self.extent['x_min'], self.extent['x_max'], self.extent['y_min'],
self.extent['y_max']],
norm=LogNorm(vmin=scale['minimum_value'], vmax=scale['maximum_value']),
interpolation=interpolation)
# Add scale bar
if scale_bar is not None:
default_scale_bar = {'scale': 5, 'font_size': 24, 'color': 'white', 'position': 'lower left'}
if not isinstance(scale_bar, dict):
default_scale_bar['scale'] = scale_bar
scale_bar = default_scale_bar
else:
for key in default_scale_bar:
if key not in scale_bar:
scale_bar[key] = default_scale_bar[key]
fontprops = fm.FontProperties(size=scale_bar['font_size'])
scalebar = AnchoredSizeBar(axes.transData,
scale_bar['scale'],
format_scale_bar(scale_bar['scale']),
scale_bar['position'],
pad=0.1,
color=scale_bar['color'],
frameon=False,
size_vertical=0.5,
sep=5,
fontproperties=fontprops)
axes.add_artist(scalebar)
# Turn axes labels/ticks off
if axis_ticks:
axes.set_xlabel(r'Location $x$ ($\mathrm{\mu}$m)')
axes.set_ylabel(r'Location $y$ ($\mathrm{\mu}$m)')
else:
plt.axis('off')
# Display color bar
if color_bar:
divider = make_axes_locatable(axes)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = figure.colorbar(im, cax=cax, orientation='vertical')
if (not image_from_fitting_parameters) and (not image_from_file):
if self.method == 'sum':
if scale == 'Normalized':
cax.set_ylabel('Normalized PL-Intensity (rel. units)')
else:
cax.set_ylabel('PL-Intensity (counts/second)')
elif 'position' in self.method:
if self.unit_spectral_range == 'eV':
cax.set_ylabel('Photon Energy (eV)')
elif self.unit_spectral_range == 'nm':
cax.set_ylabel('Wavelength (nm)')
elif 'intensity' in self.method:
if 'ratio' in self.method:
cax.set_ylabel('PL-Intensity Ratio (rel. units)')
else:
if scale == 'Normalized':
cax.set_ylabel('Normalized PL-Intensity (rel. units)')
else:
cax.set_ylabel('PL-Intensity (rel. units)')
elif (not image_from_file):
if fitting_parameter_identifier in ['Width_Gaussian_1', 'Width_Lorentzian_1', 'Position_Peak_1', 'Width_Gaussian_2', 'Width_Lorentzian_2', 'Position_Peak_2', 'FWHM_1', 'FWHM_2', 'Delta_Peak_Positions']:
if self.unit_spectral_range == 'eV':
cax.set_ylabel('Photon Energy (eV)')
elif self.unit_spectral_range == 'nm':
cax.set_ylabel('Wavelength (nm)')
elif fitting_parameter_identifier in ['Amplitude_1', 'Amplitude_2', 'Amplitude_Ratio']:
if scale == 'Normalized':
cax.set_ylabel('Normalized PL-Intensity (rel. units)')
else:
cax.set_ylabel('PL-Intensity (rel. units)')
else:
if image_identifier in ['Width_Gaussian_1', 'Width_Lorentzian_1', 'Position_Peak_1', 'Width_Gaussian_2', 'Width_Lorentzian_2', 'Position_Peak_2', 'FWHM_1', 'FWHM_2', 'Delta_Peak_Positions']:
if self.unit_spectral_range == 'eV':
cax.set_ylabel('Photon Energy (eV)')
elif self.unit_spectral_range == 'nm':
cax.set_ylabel('Wavelength (nm)')
elif image_identifier in ['Amplitude_1', 'Amplitude_2', 'Amplitude_Ratio']:
if scale == 'Normalized':
cax.set_ylabel('Normalized PL-Intensity (rel. units)')
else:
cax.set_ylabel('PL-Intensity (rel. units)')
# Set nan pixel to red
color_map = mpl.cm.get_cmap()
color_map.set_bad(color = 'white', alpha = 0)
# Add figure and axes for to self further manipulation
self.image = {'figure': figure, 'axes': axes}
return True
# Define function for adding markers on image
def add_marker_to_image(self, marker=None):
def convert_marker_string_to_marker_array(string):
string_components = string.split('_')
axes = ['x', 'y']
marker = []
for string_component in string_components:
for axis in axes:
if axis in string_component:
number_string = string_component.split(axis)[1]
number = convert_to_string_to_float(number_string)
break
marker.append(number)
if len(marker) != 2:
raise ValueError('String entered for marker is not in correct format.')
return marker
marker_default = {'type': 'dot', 'color': 'red', 'x_position': 0, 'y_position': 0, 'width': 0, 'height': 0,
'size': 100, 'position_string': None}
# Test if self.image exists
try:
if not isinstance(marker, dict):
if isinstance(marker, str):
marker = convert_marker_string_to_marker_array(marker)
marker = {'x_position': marker[0], 'y_position': marker[1]}
for key in marker_default:
if key not in marker:
marker[key] = marker_default[key]
# Overwrite x and y position with position_string if it was given
if marker['position_string'] is not None:
x_position, y_position = convert_marker_string_to_marker_array(marker['position_string'])
marker['x_position'] = x_position
marker['y_position'] = y_position
# Add marker point
if marker['type'] == 'dot':
self.image['axes'].scatter(marker['x_position'], marker['y_position'], edgecolors='face',
c=marker['color'], s=marker['size'])
elif marker['type'] == 'area':
if marker['size'] == 100:
marker['size'] = 3
area = patches.Rectangle((marker['x_position'], marker['y_position']), marker['width'],
marker['height'],
linewidth=marker['size'], edgecolor=marker['color'], facecolor='none')
self.image['axes'].add_patch(area)
except NameError:
raise NameError('Image was not added yet. Run .add_image() first!')
return True
def add_histogram(self, image_from_spectra = False, plot_style = None, bins = 'auto'):
# Set plotting style
if plot_style is not None:
plt.style.use(plot_style)
# Generate figure, axes
figure = plt.figure(figsize=(15, 10))
axes = figure.add_subplot(1, 1, 1)
axes.set_ylabel('Frequency')
if self.method == 'sum':
axes.set_xlabel('PL-Intensity (counts/second)')
elif 'position' in self.method:
if self.unit_spectral_range == 'eV':
axes.set_xlabel('Photon Energy (eV)')
elif self.unit_spectral_range == 'nm':
axes.set_xlabel('Wavelength (nm)')
# Plot image
if image_from_spectra:
self.histogram_data = self.image_data_from_spectra.flatten()
else:
self.histogram_data = self.image_data.flatten()
if bins == 'auto':
bins = int(np.sqrt(len(self.histogram_data)))
axes.hist(self.histogram_data, bins = bins)
# Add figure and axes for to self further manipulation
self.histogram = {'figure': figure, 'axes': axes}
return True
def save_image(self, title, file_extension='pdf', folder='.'):
title = '{0}/{1}.{2}'.format(folder, title, file_extension)
self.image['figure'].savefig(title, bbox_inches='tight', transparent=True)
return True
def save_histogram(self, title, file_extension='pdf', folder='.'):
title = '{0}/{1}.{2}'.format(folder, title, file_extension)
self.histogram['figure'].savefig(title, bbox_inches='tight', transparent=True)
return True
class DataHyperSpectral:
def __init__(self, file_name, spectral_ranges, unit_spectral_range='eV', folder_name='.'):
self.spectral_ranges = spectral_ranges
self.unit_spectral_range = unit_spectral_range
self.file_name = file_name
self.folder_name = folder_name
self.spectral_scans = {}
for n, spectral_range in enumerate(self.spectral_ranges):
self.spectral_scans[n] = DataConfocalScan(self.file_name, self.folder_name, spectral_range,
self.unit_spectral_range)
def add_hyperspectral_image(self, masking_treshold, scale_bar=None, plot_style=None, interpolation=None):
def get_mask(image_data, n):
mask = {}
for k in image_data:
if k != n:
mask[k] = image_data[n] > image_data[k]
sum_mask = np.zeros(image_data[n].shape)
for k in mask:
sum_mask += mask[k]
final_mask = np.zeros(image_data[n].shape)
for i in range(len(sum_mask)):
for j in range(len(sum_mask[i])):
if sum_mask[i][j] < len(image_data) - 1:
final_mask[i][j] = 0
else:
final_mask[i][j] = 1
return final_mask
# Define function for formatting scale bar text
def format_scale_bar(scale_value, unit='um'):
if unit == 'um':
scale_string = r'{0}'.format(int(scale_value)) + r' $\mathrm{\mu}$m'
return scale_string
# Set color maps
color_maps = ['Reds_r', 'Greens_r', 'Blues_r', 'Purples_r']
# Set plotting style
if plot_style != None:
plt.style.use(plot_style)
# Generate figure, axes
figure = plt.figure(figsize=(15, 10))
axes = figure.add_subplot(1, 1, 1)
# Normalize image data
self.image_data_to_plot = {}
for n in range(len(self.spectral_ranges)):
self.image_data_to_plot[n] = (self.spectral_scans[n].image_data_from_spectra - np.min(
self.spectral_scans[n].image_data_from_spectra))
self.image_data_to_plot[n] = self.image_data_to_plot[n] / np.max(self.image_data_to_plot[n])
# Mask and plot image data
for n in range(len(self.spectral_ranges)):
if masking_treshold != None:
mask = self.image_data_to_plot[n] <= masking_treshold
else:
mask = get_mask(self.image_data_to_plot, n)
self.image_data_to_plot[n] = np.ma.masked_array(self.image_data_to_plot[n], mask)
im = axes.imshow(self.image_data_to_plot[n],
cmap=plt.get_cmap(color_maps[n]),
extent=[self.spectral_scans[n].extent['x_min'], self.spectral_scans[n].extent['x_max'],
self.spectral_scans[n].extent['y_min'], self.spectral_scans[n].extent['y_max']],
interpolation=interpolation)
# Add scale bar
if scale_bar is not None:
default_scale_bar = {'scale': 5, 'font_size': 24, 'color': 'white', 'position': 'lower left'}
if not isinstance(scale_bar, dict):
default_scale_bar['scale'] = scale_bar
scale_bar = default_scale_bar
else:
for key in default_scale_bar:
if key not in scale_bar:
scale_bar[key] = default_scale_bar[key]
fontprops = fm.FontProperties(size=scale_bar['font_size'])
scalebar = AnchoredSizeBar(axes.transData,
scale_bar['scale'],
format_scale_bar(scale_bar['scale']),
scale_bar['position'],
pad=0.1,
color=scale_bar['color'],
frameon=False,
size_vertical=0.5,
sep=5,
fontproperties=fontprops)
axes.add_artist(scalebar)
# Turn axes labels/ticks off
plt.axis('off')
# Set nan pixel to red
color_map = mpl.cm.get_cmap()
color_map.set_bad(color = 'white', alpha = 0)
# Add figure and axes for to self further manipulation
self.image = {'figure': figure, 'axes': axes}
return True
def save_image(self, title, file_extension='pdf', folder='.'):
title = '{0}/{1}.{2}'.format(folder, title, file_extension)
self.image['figure'].savefig(title, bbox_inches='tight', transparent=True, facecolor='black')
return True
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import ConnectionPatch
from collections import Counter
from scipy.optimize import linear_sum_assignment
import math
img1 = 'S3_016_02_05.jpg'
img2 = 'S3_016_02_06.jpg'
images = []
count = []
mode = 0
def detect_pill(img):
img = cv2.imread(img,0)
img = img[115:760, 50:980].copy()
img = cv2.medianBlur(img,5)
dst = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,31,2)
dst = cv2.medianBlur(dst,3)
circles = cv2.HoughCircles(dst, cv2.HOUGH_GRADIENT,1,11,
param1=5,param2=13,minRadius=7,maxRadius=20)
circles = np.uint16(np.around(circles))
points = []
for i in circles[0,:]:
points.append([i[0],i[1]])
count.append(len(points))
images.append(dst)
return points
def get_dist(points1, points2):
distance = []
for i in range(len(points1)):
x1 = points1[i][0]
y1 = points1[i][1]
p1 = points1[i]
dst = []
for j in range(len(points2)):
x2 = points2[j][0]
y2 = points2[j][1]
p2 = points2[j]
# print(x1, y1, x2, y2)
dst.append(int(math.dist(p1, p2)))
distance.append(dst)
#distance[i].append(math.sqrt((x2 - x1)**2 + (y2 - y1)**2))
return distance
if __name__ == '__main__':
# import_pill_group(64, 1)
p1 = detect_pill(img1)
p2 = detect_pill(img2)
print(p1)
print(p2)
print("=============================")
dst = get_dist(p1, p2)
print(dst)
row_ind, col_ind = linear_sum_assignment(dst)
print("=============================")
#plt.subplot(1,2,j+1), plt.imshow(images[j], 'gray')
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(121)
plt.imshow(images[0], 'gray')
ax2 = fig.add_subplot(122)
plt.imshow(images[1], 'gray')
for i in range(16):
xyA = (p1[i][0], p1[i][1])
xyB = (p2[col_ind[i]][0], p2[col_ind[i]][1])
con = ConnectionPatch(xyA=xyA, xyB=xyB, coordsA="data", coordsB="data",
axesA=ax2, axesB=ax1, color=((i*15)/255,0,0))
ax2.add_artist(con)
plt.show()
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for BRICK
https://github.com/brick-tool-dev/brick-tool
"""
REQUIRED_PATHS = ["bin", "lib"]
def executable(self):
return util.find_executable("bin/brick")
def name(self):
return "BRICK"
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
return [executable] + options + tasks
def version(self, executable):
return self._version_from_tool(executable, arg="--version")
def program_files(self, executable):
paths = self.REQUIRED_PATHS
return [executable] + self._program_files_from_executable(
executable, paths, parent_dir=True
)
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_ERROR
for line in output:
if line == "VERIFICATION SUCCESSFUL\n":
status = result.RESULT_TRUE_PROP
break
elif line == "VERIFICATION FAILED\n":
status = result.RESULT_FALSE_REACH
break
elif (
line == "VERIFICATION UNKNOWN\n"
or line == "VERIFICATION BOUNDED TRUE\n"
):
status = result.RESULT_UNKNOWN
break
return status
|
#! /usr/bin/python
import smtplib
email = ""#email for a gmail account here
password = ""#password for a gmail account here
#to get phone number
num = input("What phone number?")
#to hard code it
#num = ""#put phone number here
#to get service provider
sp = input("Which service provider \n 1.At&t \n 2.Tmobile \n 3.Verizon \n 4.Sprint \n 5.Cricket \n")
#to hard code it
#sp = ""#put number of service proveder here
if(sp == "1"):
sp = "@txt.att.net"
elif(sp == "2"):
sp = "@tmomail.net"
elif(sp == "3"):
sp = "@vtext.com"
elif(sp == "4"):
sp = "@messaging.sprintpcs.com"
elif(sp == "5"):
sp = "@sms.cricketwireless.net"
else:
print("Not a valid option please restart app")
exit()
print("delivery adress: "+num+sp)
sentfrom = email
sentto = num+sp
msg = input("What would you like to say?\n")
email_text = 'From: %s\nTo: %s\nSubject: %s\n\n%s' % (sentfrom, sentto, " ", msg)
print("formatting done")
print("trying to connect")
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.ehlo()
print("connected")
server.login(email,password)
print("logged in")
server.sendmail(sentfrom,sentto,email_text)
server.quit()
print("text sent!")
|
"""
Minimal code to support ignored makemigrations (like django.db.backends.*.schema)
without interaction to SF (without migrate)
"""
from django.db import NotSupportedError
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from salesforce.backend import log
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# pylint:disable=abstract-method # undefined: prepare_default, quote_value
def __init__(self, connection, collect_sql=False):
self.connection_orig = connection
self.collect_sql = collect_sql
# if self.collect_sql:
# self.collected_sql = []
super(DatabaseSchemaEditor, self).__init__(connection, collect_sql=collect_sql)
# State-managing methods
def __enter__(self):
self.deferred_sql = [] # pylint:disable=attribute-defined-outside-init
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
def execute(self, sql, params=()):
if sql == 'CREATE TABLE django_migrations ()' and params is None:
return
raise NotSupportedError("Migration SchemaEditor: %r, %r" % (sql, params))
def create_model(self, model):
log.info("Skipped in SchemaEditor: create_model %s", model)
|
import nltk
from util import adjective_util
def do(line):
# inspired by the work of Colin Johnson (https://scholar.google.com/citations?hl=en&user=6W7BtygAAAAJ)
tokens = nltk.word_tokenize(line)
adjacent_adjective_indices = adjective_util.get_indices_of_adjacent_adjectives(line)
for adjacent_group in adjacent_adjective_indices:
while len(adjacent_group) > 1:
index = adjacent_group.pop()
del tokens[index]
if ',' in tokens:
comma_index = tokens.index(',')
del tokens[comma_index]
return ' '.join(tokens)
|
# -*- coding: utf-8 -*-
"""
Package to integrate the DymaxionLabs's funcionality:
- Upload images
- Predict imagenes based in object detection models
- Download results
"""
import os
from pkg_resources import get_distribution, DistributionNotFound
from . import files
from . import models
from . import utils
try:
# Change here if project is renamed and does not equal the package name
dist_name = __name__
__version__ = get_distribution(dist_name).version
except DistributionNotFound:
__version__ = 'unknown'
finally:
del get_distribution, DistributionNotFound
|
import pytest
from django.core.exceptions import ImproperlyConfigured
from mock import patch, call
from pylogctx.django import (
ExtractRequestContextMiddleware,
OuterMiddleware,
context as log_context
)
from pylogctx import log_adapter
@pytest.fixture()
def request():
return {'rid': 42}
@pytest.yield_fixture
def context():
log_context.update(rid=42)
yield log_context
try:
del log_context._stack
except AttributeError:
pass
def _extractor(r):
return r
def _failing_extractor(r):
raise KeyError
def test_middleware_no_extractor(request):
with pytest.raises(ImproperlyConfigured):
ExtractRequestContextMiddleware().process_request(request)
@patch('pylogctx.django.settings',
PYLOGCTX_REQUEST_EXTRACTOR=_failing_extractor)
def test_middleware_extraction_failed(settings, request):
with patch('pylogctx.django.logger') as m:
ExtractRequestContextMiddleware().process_request(request)
assert call.exception() in m.method_calls
@patch('pylogctx.django.settings', PYLOGCTX_REQUEST_EXTRACTOR=_extractor)
def test_middleware_context_extracted(settings, request, context):
ExtractRequestContextMiddleware().process_request(request)
fields = log_context.as_dict()
assert 'rid' in fields
def test_middleware_context_cleaned_on_response(context):
ExtractRequestContextMiddleware().process_response(None, None)
assert not log_context.as_dict()
def test_middleware_context_cleaned_on_exception(context):
ExtractRequestContextMiddleware().process_exception(None, None)
assert not log_context.as_dict()
@patch.dict('pylogctx.core._adapter_mapping')
def test_middleware_adapter(request, context):
@log_adapter(request.__class__)
def adapter(request):
return {
'djangoRequestId': id(request),
}
OuterMiddleware().process_request(request)
fields = log_context.as_dict()
assert 'djangoRequestId' in fields
def test_middleware_missing_adapter(request, context):
OuterMiddleware().process_request(request)
|
import re
import random
import itertools
import math
from collections import defaultdict
from src.utilities import *
from src import channels, users, debuglog, errlog, plog
from src.functions import get_players, get_all_players, get_main_role, get_reveal_role, get_target
from src.decorators import command, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.status import try_misdirection, try_exchange, add_protection
from src.cats import Wolf
@event_listener("player_win")
def on_player_win(evt, var, player, mainrole, winner, survived):
if winner == "monsters" and mainrole == "monster":
evt.data["won"] = True
@event_listener("chk_win", priority=4)
def on_chk_win(evt, var, rolemap, mainroles, lpl, lwolves, lrealwolves):
monsters = rolemap.get("monster", ())
traitors = rolemap.get("traitor", ())
if not lwolves and not traitors and monsters:
s = "s" if len(monsters) > 1 else ""
evt.data["message"] = messages["monster_win"].format(s, "" if s else "s")
evt.data["winner"] = "monsters"
elif lwolves >= lpl / 2 and monsters:
s = "s" if len(monsters) > 1 else ""
evt.data["message"] = messages["monster_wolf_win"].format(s)
evt.data["winner"] = "monsters"
@event_listener("transition_night_end")
def on_transition_night_end(evt, var):
for monster in get_all_players(("monster",)):
add_protection(var, monster, protector=None, protector_role="monster", scope=Wolf)
if monster.prefers_simple():
monster.send(messages["monster_simple"])
else:
monster.send(messages["monster_notify"])
@event_listener("remove_protection")
def on_remove_protection(evt, var, target, attacker, attacker_role, protector, protector_role, reason):
if attacker_role == "fallen angel" and protector_role == "monster":
evt.data["remove"] = True
@event_listener("get_role_metadata")
def on_get_role_metadata(evt, var, kind):
if kind == "role_categories":
evt.data["monster"] = {"Neutral", "Win Stealer", "Cursed"}
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pytest
import numpy as np
from bigdl.dataset.base import maybe_download
from test.zoo.pipeline.utils.test_utils import ZooTestCase
from zoo.pipeline.inference import InferenceModel
import tarfile
np.random.seed(1337) # for reproducibility
resource_path = os.path.join(os.path.split(__file__)[0], "../../resources")
property_path = os.path.join(os.path.split(__file__)[0],
"../../../../../zoo/target/classes/app.properties")
tensorflow_url = "http://download.tensorflow.org"
data_url = "https://s3-ap-southeast-1.amazonaws.com"
with open(property_path) as f:
for _ in range(2): # skip the first two lines
next(f)
for line in f:
if "data-store-url" in line:
line = line.strip()
data_url = line.split("=")[1].replace("\\", "")
class TestInferenceModel(ZooTestCase):
def test_load_bigdl(self):
model = InferenceModel(3)
model.load_bigdl(os.path.join(resource_path, "models/bigdl/bigdl_lenet.model"))
input_data = np.random.random([4, 28, 28, 1])
output_data = model.predict(input_data)
def test_load_caffe(self):
model = InferenceModel(10)
model.load_caffe(os.path.join(resource_path, "models/caffe/test_persist.prototxt"),
os.path.join(resource_path, "models/caffe/test_persist.caffemodel"))
input_data = np.random.random([4, 3, 8, 8])
output_data = model.predict(input_data)
def test_load_openvino(self):
local_path = self.create_temp_dir()
model = InferenceModel(1)
model_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.xml"
weight_url = data_url + "/analytics-zoo-models/openvino/2018_R5/resnet_v1_50.bin"
model_path = maybe_download("resnet_v1_50.xml",
local_path, model_url)
weight_path = maybe_download("resnet_v1_50.bin",
local_path, weight_url)
model.load_openvino(model_path, weight_path)
input_data = np.random.random([4, 1, 224, 224, 3])
model.predict(input_data)
def test_load_tf_openvino_od(self):
local_path = self.create_temp_dir()
url = data_url + "/models/object_detection/faster_rcnn_resnet101_coco_2018_01_28.tar.gz"
file_abs_path = maybe_download("faster_rcnn_resnet101_coco_2018_01_28.tar.gz",
local_path, url)
tar = tarfile.open(file_abs_path, "r:gz")
extracted_to = os.path.join(local_path, "faster_rcnn_resnet101_coco_2018_01_28")
if not os.path.exists(extracted_to):
print("Extracting %s to %s" % (file_abs_path, extracted_to))
tar.extractall(local_path)
tar.close()
model = InferenceModel(3)
model.load_tf(model_path=extracted_to + "/frozen_inference_graph.pb",
backend="openvino",
model_type="faster_rcnn_resnet101_coco",
ov_pipeline_config_path=extracted_to + "/pipeline.config",
ov_extensions_config_path=None)
input_data = np.random.random([4, 1, 3, 600, 600])
output_data = model.predict(input_data)
model2 = InferenceModel(3)
model2.load_tf_object_detection_as_openvino(
model_path=extracted_to + "/frozen_inference_graph.pb",
object_detection_model_type="faster_rcnn_resnet101_coco",
pipeline_config_path=extracted_to + "/pipeline.config",
extensions_config_path=None)
model2.predict(input_data)
# def test_load_tf_openvino_ic(self):
# local_path = self.create_temp_dir()
# print(local_path)
# url = tensorflow_url + "/models/resnet_v1_50_2016_08_28.tar.gz"
# file_abs_path = maybe_download("resnet_v1_50_2016_08_28.tar.gz", local_path, url)
# tar = tarfile.open(file_abs_path, "r:gz")
# print("Extracting %s to %s" % (file_abs_path, local_path))
# tar.extractall(local_path)
# tar.close()
# model = InferenceModel(3)
# model.load_tf_image_classification_as_openvino(
# model_path=None,
# image_classification_model_type="resnet_v1_50",
# checkpoint_path=local_path + "/resnet_v1_50.ckpt",
# input_shape=[4, 224, 224, 3],
# if_reverse_input_channels=True,
# mean_values=[123.68, 116.78, 103.94],
# scale=1)
# print(model)
# input_data = np.random.random([4, 1, 224, 224, 3])
# model.predict(input_data)
if __name__ == "__main__":
pytest.main([__file__])
|
from idom import component, html, run
@component
def Photo():
return html.img(
{
"src": "https://picsum.photos/id/237/500/300",
"style": {"width": "50%"},
"alt": "Puppy",
}
)
run(Photo)
|
from flask import Flask
from flask_restful import reqparse
from mrat_rest import MelanomaRiskAssessmentTool
app = Flask('onco_cancer_prognosis')
mrat = MelanomaRiskAssessmentTool()
@app.route('/skin_cancer_prognosis', methods=['POST'])
def skin_cancer_prognosis():
parser = reqparse.RequestParser()
parser.add_argument('age')
parser.add_argument('gender')
parser.add_argument('sunburn')
parser.add_argument('complexion')
parser.add_argument('big-moles')
parser.add_argument('small-moles')
parser.add_argument('freckling')
parser.add_argument('damage')
parser.add_argument('tan')
args = parser.parse_args()
out = mrat.getAbsoluteRisk(args)
return out
# Running the Main Application
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=False)
|
# Copyright 2018 Argo AI, LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generate caffe layer according to mxnet config.
"""
import constants
from ast import literal_eval
from caffe import layers
from caffe import params
def make_list(str_inp):
"""Create a list from a string of numbers.
Args:
str_inp (str): Expression to convert to list
Returns:
list: Converted list
"""
val = literal_eval(str_inp)
if type(val) is not tuple:
val = [val]
return list(val)
def get_caffe_layer(node, net, input_dims):
"""Generate caffe layer for corresponding mxnet op.
Args:
node (iterable from MxnetParser): Mxnet op summary generated by MxnetParser
net (caffe.net): Caffe netspec object
Returns:
caffe.layers: Equivalent caffe layer
"""
if node['type'] == 'Convolution':
assert len(node['inputs']) == 1, \
'Convolution layers can have only one input'
conv_params = node['attr']
kernel_size = make_list(conv_params['kernel'])
num_filters = make_list(conv_params['num_filter'])[0]
if 'stride' in conv_params:
stride = make_list(conv_params['stride'])[0]
else:
stride = 1
padding = make_list(conv_params['pad'])
if 'dilate' in conv_params:
dilation = make_list(conv_params['dilate'])[0]
else:
dilation = 1
convolution_param = {'pad': padding,
'kernel_size': kernel_size,
'num_output': num_filters,
'stride': stride,
'dilation': dilation}
return layers.Convolution(net[node['inputs'][0]],
convolution_param=convolution_param)
if node['type'] == 'Activation':
assert len(node['inputs']) == 1, \
'Activation layers can have only one input'
assert node['attr']['act_type'] == 'relu'
return layers.ReLU(net[node['inputs'][0]])
if node['type'] == 'Pooling':
assert len(node['inputs']) == 1, \
'Pooling layers can have only one input'
kernel_size = make_list(node['attr']['kernel'])
stride = make_list(node['attr']['stride'])
pooling_type = node['attr']['pool_type']
if 'pad' in node['attr']:
padding = make_list(node['attr']['pad'])
else:
padding = [0]
if pooling_type == 'max':
pooling = params.Pooling.MAX
elif pooling_type == 'avg':
pooling = params.Pooling.AVG
pooling_param = {'pool': pooling, 'pad': padding[0],
'kernel_size': kernel_size[0], 'stride': stride[0]}
return layers.Pooling(net[node['inputs'][0]],
pooling_param=pooling_param)
if node['type'] == 'L2Normalization':
across_spatial = node['attr']['mode'] != 'channel'
channel_shared = False
scale_filler = {
'type': "constant",
'value': constants.NORMALIZATION_FACTOR
}
norm_param = {'across_spatial': across_spatial,
'scale_filler': scale_filler,
'channel_shared': channel_shared}
return layers.Normalize(net[node['inputs'][0]],
norm_param=norm_param)
# Note - this layer has been implemented
# only in WeiLiu's ssd branch of caffe not in caffe master
if node['type'] == 'transpose':
order = make_list(node['attr']['axes'])
return layers.Permute(net[node['inputs'][0]],
permute_param={'order': order})
if node['type'] == 'Flatten':
if node['inputs'][0].endswith('anchors'):
axis = 2
else:
axis = 1
return layers.Flatten(net[node['inputs'][0]],
flatten_param={'axis': axis})
if node['type'] == 'Concat':
# In the ssd model, always concatenate along last axis,
# since anchor boxes have an extra dimension in caffe (that includes variance).
axis = -1
concat_inputs = [net[inp] for inp in node['inputs']]
return layers.Concat(*concat_inputs, concat_param={'axis': axis})
if node['type'] == 'Reshape':
if node['name'] == 'multibox_anchors':
reshape_dims = [1, 2, -1]
else:
reshape_dims = make_list(node['attr']['shape'])
return layers.Reshape(net[node['inputs'][0]],
reshape_param={'shape': {'dim': reshape_dims}})
if node['type'] == '_contrib_MultiBoxPrior':
priorbox_inputs = [net[inp] for inp in node['inputs']] + [net["data"]]
sizes = make_list(node["attr"]["sizes"])
min_size = sizes[0] * input_dims[0]
max_size = int(round((sizes[1] * input_dims[0]) ** 2 / min_size))
aspect_ratio = make_list(node["attr"]["ratios"])
steps = make_list(node["attr"]["steps"])
param = {'clip': node["attr"]["clip"] == "true",
'flip': False,
'min_size': min_size,
'max_size': max_size,
'aspect_ratio': aspect_ratio,
'variance': [0.1, 0.1, 0.2, 0.2],
'step': int(round(steps[0] * input_dims[0])),
}
return layers.PriorBox(*priorbox_inputs, prior_box_param=param)
if node['type'] == '_contrib_MultiBoxDetection':
multibox_inputs = [net[inp] for inp in node['inputs']]
bottom_order = [1, 0, 2]
multibox_inputs = [multibox_inputs[i] for i in bottom_order]
param = {
'num_classes': constants.NUM_CLASSES,
'share_location': True,
'background_label_id': 0,
'nms_param': {
'nms_threshold': float(node['attr']['nms_threshold']),
'top_k': int(node['attr']['nms_topk'])
},
'keep_top_k': make_list(node['attr']['nms_topk'])[0],
'confidence_threshold': 0.01,
'code_type': params.PriorBox.CENTER_SIZE,
}
return layers.DetectionOutput(*multibox_inputs, detection_output_param=param)
if node['type'] in ['SoftmaxActivation', 'SoftmaxOutput']:
if 'mode' not in node['attr']:
axis = 1
elif node['attr']['mode'] == 'channel':
axis = 1
else:
axis = 0
# note: caffe expects confidence scores to be flattened before detection output layer receives it
return layers.Flatten(layers.Permute(layers.Softmax(net[node['inputs'][0]],
axis=axis),
permute_param={'order': [0, 2, 1]}),
flatten_param={'axis': 1})
|
import os
from flask import Flask, Response, send_from_directory, request, make_response, jsonify
from flask_expects_json import expects_json
from apscheduler.schedulers.background import BackgroundScheduler
from pytz import utc
import json
from .countryrepository import CountryRepository
from flask_cors import CORS
from .simulator import Simulator
from .coviddatarepository import CovidDataRepository
from .simulation.covidlib import cl_path_prefix
def create_app():
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
CORS(app)
data_repo = CovidDataRepository(os.environ.get("DATA_DIR", "/tmp"))
def update_covid_data():
app.logger.info("Updating COVID-19 data.")
data_repo.update_data()
app.logger.info("Finished updating COVID-19 data.")
country_repo = CountryRepository()
scheduler = BackgroundScheduler(timezone=utc)
scheduler.add_job(func=update_covid_data, trigger='interval', hours=2)
scheduler.add_job(func=update_covid_data)
scheduler.start()
def not_found_if_none(data, country_code):
if data is None:
return Response(response=json.dumps({
"status": 404,
"error": "Couldn't find data for country " + country_code
}), status=404, mimetype="application/json")
else:
return jsonify(data)
app_path_prefix = os.getenv("PUBLIC_URL", "")
@app.route(f'{app_path_prefix}/api/countries')
def countries():
return jsonify(country_repo.country_list())
@app.route(f'{app_path_prefix}/api/countries/<country_code>')
def country_details(country_code):
return not_found_if_none(country_repo.country_details(country_code), country_code)
with open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "simulation-request.schema.json")) as schema_file:
schema = json.load(schema_file)
@app.route(f'{app_path_prefix}/api/scenarios')
def fetch_scenarios():
return simulator.default_scenarios()
@app.route(f'{app_path_prefix}/api/covid19data/<country_code>')
def country_covid19_data(country_code):
return not_found_if_none(data_repo.data_for(country_code), country_code)
parameters_dir = os.path.join(cl_path_prefix, os.environ.get("PARAMETERS_DIRECTORY", "BBP_testing"))
simulator = Simulator(data_repo, parameters_dir)
@app.route(f'{app_path_prefix}/api/simulation', methods=['POST'])
@expects_json(schema)
def run_simulation():
request_data = request.get_json()
return jsonify(simulator.run(request_data))
static_files_dir = os.path.abspath(os.environ.get("STATIC_DATA_DIR"))
@app.route(f'{app_path_prefix}/')
def index():
response = make_response(send_from_directory(static_files_dir,
'index.html', as_attachment=False))
response.headers["Cache-Control"] = "no-cache, must-revalidate"
return response
@app.route(f'{app_path_prefix}/<path:filename>')
def static_files(filename):
return send_from_directory(static_files_dir,
filename, as_attachment=False)
return app
|
"""Custom layers that constitute building blocks of NN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any
import numpy as np
import sonnet as snt
import tensorflow as tf
NONLINEARITIES = {
'relu': tf.nn.relu,
'exp': tf.exp,
'cos': tf.cos,
'tan': tf.tan,
'tanh': tf.tanh,
'sigmoid': tf.sigmoid,
'identity': tf.identity
}
class Conv1dPeriodic(snt.AbstractModule):
"""1D Convolution module with periodic boundary conditions."""
def __init__(
self,
output_channels: int,
kernel_shape: int,
stride: int = 1,
name: str = 'conv_1d_periodic'):
"""Constructs Conv1dPeriodic moduel.
Args:
output_channels: Number of channels in convolution.
kernel_shape: Convolution kernel sizes.
stride: Convolution stride.
name: Name of the module.
"""
super(Conv1dPeriodic, self).__init__(name=name)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
with self._enter_variable_scope():
self._conv_1d_module = snt.Conv1D(
output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride, padding=snt.VALID)
def _pad_input(self, inputs: tf.Tensor) -> tf.Tensor:
"""Pads 'inputs' on left and right side to achieve periodic effect.
Implements effect of periodic boundary conditions by padding the `inputs`
tensor on both sides such that VALID padding is equivalent to using periodic
boundaries.
Args:
inputs: Tensor to pad.
Returns:
Padded tensor.
"""
input_size = inputs.get_shape().as_list()[1]
if self._kernel_shape % 2 == 1:
pad_size = (self._kernel_shape - 1) // 2
left_pad = inputs[:, (input_size - pad_size):, ...]
right_pad = inputs[:, :pad_size, ...]
else:
left_pad_size = self._kernel_shape // 2
right_pad_size = self._kernel_shape // 2 - 1
left_pad = inputs[:, (input_size - left_pad_size):, ...]
right_pad = inputs[:, :right_pad_size, ...]
return tf.concat([left_pad, inputs, right_pad], axis=1)
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the Conv1dPeridic module into the graph, with input `inputs`.
Args:
inputs: Tensor with input values.
Returns:
Result of convolving `inputs` with module's kernels with periodic padding.
"""
return self._conv_1d_module(self._pad_input(inputs))
class Conv2dPeriodic(snt.AbstractModule):
"""2D Convolution module with periodic boundary conditions."""
def __init__(
self,
output_channels: int,
kernel_shape: Any,
stride: Any = 1,
name: str = 'conv_2d_periodic'):
"""Constructs Conv2dPeriodic moduel.
Args:
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of size 2), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 2), or integer that is used to
define stride in all dimensions.
name: Name of the module.
"""
super(Conv2dPeriodic, self).__init__(name=name)
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
with self._enter_variable_scope():
self._conv_2d_module = snt.Conv2D(output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride, padding=snt.VALID)
def _pad_input(self, inputs: tf.Tensor) -> tf.Tensor:
"""Pads 'inputs' on all sides to achieve periodic effect.
Implements effect of periodic boundary conditions by padding the `inputs`
tensor on all sides such that VALID padding is equivalent to using periodic
boundaries.
Args:
inputs: Tensor to pad. dim(inputs) must be >= 3.
Returns:
Padded tensor.
"""
x_input_size = inputs.get_shape().as_list()[2]
y_input_size = inputs.get_shape().as_list()[1]
if self._kernel_shape % 2 == 1:
left_pad_size = (self._kernel_shape - 1) // 2
right_pad_size = (self._kernel_shape - 1) // 2
bot_pad_size = (self._kernel_shape - 1) // 2
top_pad_size = (self._kernel_shape - 1) // 2
else:
left_pad_size = self._kernel_shape // 2 - 1
right_pad_size = self._kernel_shape // 2
bot_pad_size = self._kernel_shape // 2 - 1
top_pad_size = self._kernel_shape // 2
left_pad = inputs[:, :, (x_input_size - left_pad_size):, ...]
right_pad = inputs[:, :, :right_pad_size, ...]
width_padded = tf.concat([left_pad, inputs, right_pad], axis=2)
bot_pad = width_padded[:, (y_input_size - bot_pad_size):, ...]
top_pad = width_padded[:, :top_pad_size, ...]
return tf.concat([bot_pad, width_padded, top_pad], axis=1)
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the Conv1dPeridic module into the graph, with input `inputs`.
Args:
inputs: Tensor with input values.
Returns:
Result of convolving `inputs` with module's kernels with periodic padding.
"""
return self._conv_2d_module(self._pad_input(inputs))
class ResBlock2d(snt.AbstractModule):
"""Residual network block for 2D system with periodic boundary conditions.
A building block of ResidualNetworks. It performs 2 convolutions with Selu
activations, which are than concatenated together with the input. In this
implementations we use convolutions that pad the input to produce periodic
effect. We also omit batch normalization present in the orginal paper:
https://arxiv.org/pdf/1512.03385.pdf."""
def __init__(
self,
num_filters: int,
kernel_shape: Any,
conv_stride: Any = 1,
projection_shortcut: snt.AbstractModule = None,
name: str = 'res_block_2d'):
"""Constructs a ResNet block for 1D systems.
Args:
num_filters: Number of filters for the convolutions.
kernel_shape: Shape of the kernel for the convolutions.
conv_stride: Stride for the convolutions.
projection_shortcut: The module to apply to shortcuts.
name: Name of the module.
"""
super(ResBlock2d, self).__init__(name=name)
self._num_filters = num_filters
self._kernel_shape = kernel_shape
self._conv_stride = conv_stride
self._projection_shortcut = projection_shortcut
with self._enter_variable_scope():
self._output_channels = num_filters
conv_arguments = {
'output_channels': self._num_filters,
'kernel_shape': self._kernel_shape,
'stride': self._conv_stride,
}
self._conv_2d_1 = Conv2dPeriodic(**conv_arguments, name='first_conv')
self._conv_2d_2 = Conv2dPeriodic(**conv_arguments, name='second_conv')
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the ResBlock1d module into the graph, with input `inputs`.
Args:
inputs: Tensor with input values of shape [batch, size_x, size_y, ...]
Returns:
Result of application of ResNetBlock2d to `inputs`.
Raises:
ValueError: Inputs shape is not compatable with filters.
"""
#TODO(dkochkov) Change convolutions to follow BCWH convention
num_channels = inputs.shape.as_list()[3]
if self._output_channels != num_channels:
raise ValueError('Inputs shape is not compatable with filters.')
if self._projection_shortcut is None:
shortcut = inputs
else:
shortcut = self._projection_shortcut(inputs)
components = [self._conv_2d_1, tf.nn.selu, self._conv_2d_2]
residual_value = snt.Sequential(components)(inputs)
return residual_value + shortcut
class ResBlock1d(snt.AbstractModule):
"""Residual network block for 1D system with periodic boundary conditions.
A building block of ResidualNetworks. It performs 2 convolutions with ReLU
activations, which are than concatenated together with the input. In this
implementations we use convolutions that pad the input to produce periodic
effect. We also omit batch normalization comparing to the orginal paper:
https://arxiv.org/pdf/1512.03385.pdf."""
def __init__(
self,
num_filters: int,
kernel_shape: Any,
conv_stride: Any = 1,
projection_shortcut: snt.AbstractModule = None,
name: str = 'res_block_1d'):
"""Constructs a ResNet block for 1D systems.
Args:
num_filters: Number of filters for the convolutions.
kernel_shape: Shape of the kernel for the convolutions.
conv_stride: Stride for the convolutions.
projection_shortcut: The module to apply to shortcuts.
name: Name of the module.
"""
super(ResBlock1d, self).__init__(name=name)
self._num_filters = num_filters
self._kernel_shape = kernel_shape
self._conv_stride = conv_stride
self._projection_shortcut = projection_shortcut
with self._enter_variable_scope():
self._output_channels = num_filters
conv_arguments = {
'output_channels': self._num_filters,
'kernel_shape': self._kernel_shape,
'stride': self._conv_stride,
}
self._conv_1d_1 = Conv1dPeriodic(**conv_arguments, name='first_conv')
self._conv_1d_2 = Conv1dPeriodic(**conv_arguments, name='second_conv')
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the ResBlock1d module into the graph, with input `inputs`.
Args:
inputs: Tensor with input values of shape [batch, channels, n_sites,...]
Returns:
Result of application of ResNetBlock1d to `inputs`.
Raises:
ValueError: Inputs shape is not compatable with filters.
"""
num_channels = inputs.shape.as_list()[2]
if self._output_channels != num_channels:
raise ValueError('Inputs shape is not compatable with filters.')
if self._projection_shortcut is None:
shortcut = inputs
else:
shortcut = self._projection_shortcut(inputs)
components = [self._conv_1d_1, tf.nn.selu, self._conv_1d_2]
residual_value = snt.Sequential(components)(inputs)
return residual_value + shortcut
class BottleneckResBlock1d(snt.AbstractModule):
"""Residual network block for 1D system with periodic boundary conditions.
In contrast to ResBlock1d this module uses bottleneck to reduce computation.
For details see reference in `ResBlock1d`.
"""
def __init__(
self,
num_filters: int,
kernel_shape: Any,
conv_stride: Any = 1,
bottleneck_ratio: int = 2,
projection_shortcut: snt.AbstractModule = None,
name: str = 'res_block_1d'):
"""Constructs a ResNet block for 1D systems.
Args:
num_filters: Number of filters for the convolutions.
kernel_shape: Shape of the kernel for the convolutions.
conv_stride: Stride for the convolutions.
bottleneck_ratio: Ratio of bottleneck compression.
projection_shortcut: The module to apply to shortcuts.
name: Name of the module.
"""
super(BottleneckResBlock1d, self).__init__(name=name)
self._num_filters = num_filters
self._kernel_shape = kernel_shape
self._conv_stride = conv_stride
self._bottleneck_ratio = bottleneck_ratio
self._projection_shortcut = projection_shortcut
with self._enter_variable_scope():
output_size = self._num_filters * self._bottleneck_ratio
self._conv_1d_1 = Conv1dPeriodic(self._num_filters, 1, 1)
self._conv_1d_2 = Conv1dPeriodic(self._num_filters, self._kernel_shape)
self._conv_1d_3 = Conv1dPeriodic(output_size, 1, 1)
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the ResBlock1d module into the graph, with input `inputs`.
Args:
inputs: Tensor with input values of shape [batch, channels, n_sites,...]
Returns:
Result of application of ResNetBlock1d to `inputs`.
Raises:
ValueError: Inputs shape is not compatable with filters.
"""
num_channels = inputs.shape.as_list()[2]
if self._output_channels != num_channels:
raise ValueError('Inputs shape is not compatable with filters.')
if self._projection_shortcut is None:
shortcut = inputs
else:
shortcut = self._projection_shortcut(inputs)
components = [
self._conv_1d_1, tf.nn.relu, self._conv_1d_2,
tf.nn.relu, self._conv_1d_3
]
residual_value = snt.Sequential(components)(inputs)
return residual_value + shortcut
class MatrixProductUnit(snt.AbstractModule):
"""Matrix product module representing a single MPS cell."""
def __init__(
self,
vertical_bond_dimension: int,
horizontal_bond_dimension: int,
physical_dimension: int = 2,
name: str = 'matrix_product_module'):
"""Constructs MatrixProductUnit module.
Args:
vertical_bond_dimension: Number of rows in MPS unit.
horizontal_bond_dimension: Number of bonds in MPS unit.
physical_dimension: Number of physical dimensions.
name: Name of the module.
Raises:
ValueError: Provided physical dimension is not supported.
"""
if physical_dimension != 2:
raise ValueError('Only physical dimension 2 is currently supported.')
super(MatrixProductUnit, self).__init__(name=name)
self._vertical_bond_dimension = vertical_bond_dimension
self._horizontal_bond_dimension = horizontal_bond_dimension
self._physical_dimension = physical_dimension
self._name = name
with self._enter_variable_scope():
shape = [
self._vertical_bond_dimension,
self._horizontal_bond_dimension,
self._physical_dimension,
]
self._mps_var = tf.get_variable('M', shape=shape, dtype=tf.float32)
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the MatrixProductUnit module into the graph, with `inputs`.
Args:
inputs: Tensor with input values with shape=[batch] and values +/- 1.
Returns:
MPS matrices corresponding to the physical degrees of freedom.
"""
index_inputs = tf.unstack(tf.cast((inputs + 1) / 2, tf.int32))
batch_components = [self._mps_var[:, :, index] for index in index_inputs]
return tf.stack(batch_components, axis=0)
class GraphConvLayer(snt.AbstractModule):
"""GraphConvLayer module with adjacency list"""
def __init__(
self,
output_channels: int,
adj: np.ndarray,
name: str = 'graph_conv_layer'):
"""Constructs GraphConvLayer moduel.
Args:
output_channels: Number of output channels in convolution.
adj: Adjacency list of the graph that stores indices of neighbors
and itself for every site, with shape [n_site, num_neighbor].
name: Name of the module.
"""
super(GraphConvLayer, self).__init__(name=name)
self._output_channels = output_channels
self._adj = adj
num_neighbors = np.shape(self._adj)[1]
kernel_shape = (1, num_neighbors)
with self._enter_variable_scope():
self._conv_module = snt.Conv2D(output_channels=output_channels,
kernel_shape=kernel_shape,
padding=snt.VALID)
def _build(self, inputs: tf.Tensor) -> tf.Tensor:
"""Connects the GraphConvLayer module into the graph, with input `inputs`.
Args:
inputs: Tensor with input values.
Returns:
Result of convolving `inputs` with adjacency matrix and module's kernels.
"""
adj_table = tf.gather(inputs, self._adj, axis=1)
return tf.squeeze(self._conv_module(adj_table), axis=2)
|
from urllib.parse import urlparse, parse_qs
import re
import random
import unicodedata
import os
from tqdm import tqdm
from bs4 import BeautifulSoup
from common import dir_path, cache_request, re_to_e164
BASE_URL = "https://mvic.sos.state.mi.us/Clerk"
# resolved issue with SSL cert chain by fixing intermediate cert
# base64 root-intermediate-site certs saved from Chrome, converted to pem using openssl,
# concatenated into mich_chain.pem
SSL_CERT = os.path.join(dir_path(__file__), 'michigan_chain.pem')
re_official = re.compile(r'^\s*(.*?)\s*[,\n]')
re_phys_addr = re.compile(r'\n(.*?\d{5}(?:-\d{4})?)\n', re.MULTILINE | re.DOTALL)
re_mail_addr = re.compile(r'Mailing\s+Address:\s+(.*?\d{5}(?:-\d{4})?)\n', re.MULTILINE | re.DOTALL)
re_phone = re.compile(r'\nPhone:[^\n\S]*(' + re_to_e164.pattern[1:] + r')\s*\n')
re_fax = re.compile(r'Fax:[^\n\S]*(' + re_to_e164.pattern[1:] + r')\s*\n')
def random_wait(min_wait=.1, max_wait=.3):
return random.uniform(min_wait, max_wait)
def parse_jurisdiction(soup, jurisdiction_name, county_name, fipscode):
city = re.sub(r'\s+Twp', ' Township', jurisdiction_name)
county = county_name.title().strip()
body = soup.find('div', class_='card-body')
info = re.sub(r'\s*\n\s*', '\n', unicodedata.normalize('NFKD', body.text).strip())
phys_addr = re_phys_addr.findall(info)
mail_addr = re_mail_addr.findall(info)
return {
'locale': f'{city}:{county}',
'city': city,
'county': county,
'emails': [a['href'].replace('mailto:', '').strip() for a in body.select("a[href^=mailto]")],
'phones': [match[0] for match in re_phone.findall(info)],
'faxes': [match[0] for match in re_fax.findall(info)],
'official': re_official.findall(info)[0],
'address': mail_addr[0].replace('\n', ', ') if mail_addr else None,
'physicalAddress': phys_addr[0].replace('\n', ', ') if phys_addr else None,
'fipscode': fipscode,
}
def fetch_data(verbose=True):
data = []
text = cache_request(BASE_URL, verify=SSL_CERT)
soup = BeautifulSoup(text, 'html.parser')
for county in tqdm(soup.find('select', id='Counties')('option'), disable=not verbose):
if not county.get('value'):
continue
county_text = cache_request(
f'{BASE_URL}/SearchByCounty',
method='POST',
data={'CountyID': county.get('value')},
wait=random_wait(),
verify=SSL_CERT
)
county_soup = BeautifulSoup(county_text, 'html.parser')
for jurisdiction_a in county_soup('a', class_='local-clerk-link'):
qrystr_params = parse_qs(urlparse(jurisdiction_a.get('href')).query)
jurisdiction_data = {k: v[0] for k, v in qrystr_params.items() if k != 'dummy'}
jurisdiction_text = cache_request(
f'{BASE_URL}/LocalClerk',
method='POST',
data=jurisdiction_data,
wait=random_wait(),
verify=SSL_CERT
)
jurisdiction_soup = BeautifulSoup(jurisdiction_text, 'html.parser')
data.append(parse_jurisdiction(
jurisdiction_soup,
jurisdiction_data['jurisdictionName'],
county.text,
fipscode=jurisdiction_data['jurisdictionCode']
))
return data
if __name__ == '__main__':
print(fetch_data())
|
# Generated by Django 2.2.4 on 2019-08-13 12:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20190813_1204'),
('neighbour', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='business',
name='neighbourhood',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Neighbourhood'),
),
migrations.AlterField(
model_name='post',
name='neighbourhood',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='users.Neighbourhood'),
),
migrations.DeleteModel(
name='Neighbourhood',
),
]
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class InitializerTest(tf.test.TestCase):
def test_xavier_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError,
'Cannot create initializer for non-floating point type.'):
tf.contrib.layers.xavier_initializer(dtype=tf.int32)
self.assertIsNone(tf.contrib.layers.l1_regularizer(0.)(None))
def _test_xavier(self, initializer, shape, variance, uniform):
with tf.Session() as sess:
var = tf.get_variable(name='test', shape=shape, dtype=tf.float32,
initializer=initializer(uniform=uniform, seed=1))
sess.run(tf.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_xavier_uniform(self):
self._test_xavier(tf.contrib.layers.xavier_initializer,
[100, 40], 2. / (100. + 40.), True)
def test_xavier_normal(self):
self._test_xavier(tf.contrib.layers.xavier_initializer,
[100, 40], 2. / (100. + 40.), False)
def test_xavier_scalar(self):
self._test_xavier(tf.contrib.layers.xavier_initializer, [], 0.0, True)
def test_xavier_conv2d_uniform(self):
self._test_xavier(tf.contrib.layers.xavier_initializer_conv2d,
[100, 40, 5, 7], 2. / (100. * 40 * (5 + 7)), True)
def test_xavier_conv2d_normal(self):
self._test_xavier(tf.contrib.layers.xavier_initializer_conv2d,
[100, 40, 5, 7], 2. / (100. * 40 * (5 + 7)), False)
class VarianceScalingInitializerTest(tf.test.TestCase):
def test_wrong_dtype(self):
with self.assertRaisesRegexp(
TypeError,
'Cannot create initializer for non-floating point type.'):
tf.contrib.layers.variance_scaling_initializer(dtype=tf.int32)
initializer = tf.contrib.layers.variance_scaling_initializer()
with self.assertRaisesRegexp(
TypeError,
'Cannot create initializer for non-floating point type.'):
initializer([], dtype=tf.int32)
def _test_variance(self, initializer, shape, variance, factor, mode, uniform):
with tf.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
var = tf.get_variable(name='test', shape=shape, dtype=tf.float32,
initializer=initializer(factor=factor,
mode=mode,
uniform=uniform,
seed=1))
sess.run(tf.global_variables_initializer())
values = var.eval()
self.assertAllClose(np.var(values), variance, 1e-3, 1e-3)
def test_fan_in(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_fan_out(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / 40.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_fan_avg(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40],
variance=4. / (100. + 40.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_conv2d_fan_in(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 5.),
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_conv2d_fan_out(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * 7.),
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_conv2d_fan_avg(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
def test_xavier_uniform(self):
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_normal(self):
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40],
variance=2. / (100. + 40.),
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_scalar(self):
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[],
variance=0.0,
factor=1.0,
mode='FAN_AVG',
uniform=False)
def test_xavier_conv2d_uniform(self):
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_xavier_conv2d_normal(self):
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100, 40, 5, 7],
variance=2. / (100. * 40. * (5. + 7.)),
factor=1.0,
mode='FAN_AVG',
uniform=True)
def test_1d_shape_fan_in(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_IN',
uniform=uniform)
def test_1d_shape_fan_out(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100],
variance=2. / 100.,
factor=2.0,
mode='FAN_OUT',
uniform=uniform)
def test_1d_shape_fan_avg(self):
for uniform in [False, True]:
self._test_variance(tf.contrib.layers.variance_scaling_initializer,
shape=[100],
variance=4. / (100. + 100.),
factor=2.0,
mode='FAN_AVG',
uniform=uniform)
if __name__ == '__main__':
tf.test.main()
|
def func():
print('This is the development version of func().')
|
from .world import World, NoSuchComponentError, DuplicateComponentError
from .entity import Entity, UnregisteredComponentError
from .component import Component
from .system import System
|
from venv import logger
from django.contrib.auth.hashers import make_password
from django.core.management.base import BaseCommand
from ...models import User
from ...patient import Patient
from ...doctor import Doctor
from ...license import License
from diagnoses.register_status import RegisterStatus
from diagnoses.diagnosis_type import DiagnosisType
from diagnoses.models import Diagnosis
from diagnoses.summary import Summary
# python manage.py seed --mode=refresh
from ...validity import Validity
""" Clear all data and creates addresses """
MODE_REFRESH = 'refresh'
""" Clear all data and do not create any object """
MODE_CLEAR = 'clear'
class Command(BaseCommand):
help = "seed database for testing and development."
def add_arguments(self, parser):
parser.add_argument('--mode', type=str, help="Mode")
def handle(self, *args, **options):
self.stdout.write('seeding data...')
run_seed(self, options['mode'])
self.stdout.write('done.')
def clear_data():
"""Deletes all the table data"""
User.objects.all().delete()
Patient.objects.all().delete()
Doctor.objects.all().delete()
License.objects.all().delete()
def create_doctor_and_diagnosis():
user = User.objects.create()
user.username = 'doctor'
user.email = 'doctor@code.berlin'
user.password = make_password('1qazxsw2')
user.is_doctor = True
user.is_patient = False
user.save()
doctor = Doctor.objects.create(user=user)
doctor.name = 'doctor'
doctor.country = 'Germany'
doctor.address = 'Lohmühlenstraße 65, 12435 Berlin, Germany'
doctor.zipcode = '12435'
doctor.phone_number = '+49 30 12085961'
doctor.image = 'users/no-img.svg'
doctor.validity = Validity.VALID
doctor.speciality = "IT"
doctor.save()
license = License.objects.create(doctor=doctor)
license.image = 'licenses/sample.jpg'
license.save()
logger.info("{} doctor created.".format(doctor))
for i in range(4):
diagnosis_first = Diagnosis.objects.create(doctor=Doctor.objects.get(user_id=user.id))
diagnosis_first.title = 'Mental Illness Baster SS'
diagnosis_first.description = 'Free to talk to me'
diagnosis_first.video_link = 'https://zoom.us/codeuniversity/1234567890'
diagnosis_first.video_password = '1qazxsw2'
diagnosis_first.type = DiagnosisType.PREVENTIVE
diagnosis_first.image = 'diagnoses/no-img.jpg'
diagnosis_first.status = RegisterStatus.UNREGISTERED
diagnosis_first.date = '2020-12-23'
diagnosis_first.save()
for i in range(4):
diagnosis_second = Diagnosis.objects.create(doctor=Doctor.objects.get(user_id=user.id))
diagnosis_second.title = 'Mental Illness'
diagnosis_second.description = 'Free to talk to me'
diagnosis_second.video_link = 'https://zoom.us/codeuniversity/1234567890'
diagnosis_second.video_password = '1qazxsw2'
diagnosis_second.type = DiagnosisType.MENTAL
diagnosis_second.image = 'diagnoses/mental.jpg'
diagnosis_second.status = RegisterStatus.UNREGISTERED
diagnosis_second.date = '2020-12-21'
diagnosis_second.save()
for i in range(4):
diagnosis_third = Diagnosis.objects.create(doctor=Doctor.objects.get(user_id=user.id))
diagnosis_third.title = 'Preventive Medicine Trial'
diagnosis_third.description = 'Free to talk to me'
diagnosis_third.video_link = 'https://zoom.us/codeuniversity/1234567890'
diagnosis_third.video_password = '1qazxsw2'
diagnosis_third.type = DiagnosisType.PREVENTIVE
diagnosis_third.image = 'diagnoses/no-img.jpg'
diagnosis_third.status = RegisterStatus.UNREGISTERED
diagnosis_third.date = '2020-12-21'
diagnosis_third.save()
return user
def create_patient_and_diagnosis():
user = User.objects.create()
user.username = 'patient'
user.email = 'patient@code.berlin'
user.password = make_password('1qazxsw2')
user.is_patient = True
user.is_doctor = False
user.save()
patient = Patient.objects.create(user=user)
patient.name = 'patient'
patient.country = 'Germany'
patient.address = 'Lohmühlenstraße 65, 12435 Berlin, Germany'
patient.zipcode = '12435'
patient.phone_number = '+49 30 12085961'
patient.image = 'users/no-img.svg'
patient.request = "IT"
patient.save()
logger.info("{} doctor created.".format(patient))
diagnosis_first = Diagnosis.objects.create(doctor=Doctor.objects.get(user_id=user.id - 1),
patient=Patient.objects.get(user_id=user.id))
diagnosis_first.title = 'Preventive Medicine'
diagnosis_first.description = 'Free to talk to me'
diagnosis_first.video_link = 'https://zoom.us/codeuniversity/1234567890'
diagnosis_first.video_password = '1qazxsw2'
diagnosis_first.type = DiagnosisType.PREVENTIVE
diagnosis_first.image = 'diagnoses/no-img.jpg'
diagnosis_first.status = RegisterStatus.REGISTERED
diagnosis_first.date = '2020-12-23'
diagnosis_first.save()
diagnosis_second = Diagnosis.objects.create(doctor=Doctor.objects.get(user_id=user.id - 1),
patient=Patient.objects.get(user_id=user.id))
diagnosis_second.title = 'Mental Illness Baster SS'
diagnosis_second.description = 'Free to talk to me'
diagnosis_second.video_link = 'https://zoom.us/codeuniversity/1234567890'
diagnosis_second.video_password = '1qazxsw2'
diagnosis_second.type = DiagnosisType.MENTAL
diagnosis_second.image = 'diagnoses/mental.jpg'
diagnosis_second.status = RegisterStatus.COMPLETED
diagnosis_second.date = '2020-12-23'
diagnosis_second.save()
summary = Summary.objects.create(diagnosis=diagnosis_second)
summary.comment = 'Take care yourself'
summary.save()
return user
def run_seed(self, mode):
""" Seed database based on mode
:param mode: refresh / clear
:return:
"""
# Clear data from tables
clear_data()
if mode == MODE_CLEAR:
return
create_doctor_and_diagnosis()
create_patient_and_diagnosis()
|
"""
.. module:: gui
:platform: Windows
:synopsis: Main application frame for the entire GUI
.. moduleauthor:: Nam Tran <tranngocnam97@gmail.com>
"""
# Standard Library imports
import tkinter as tk
from tkinter import ttk
import os
import sys
# Appends the system path to allow absolute path imports due to the command line structure
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# Project imports
from transvenous_pacing_gui.guiclient import InstructorGUI
from transvenous_pacing_gui.guiserver import StudentGUI
class MainApplication(tk.Frame):
"""Main application frame for the entire GUI
This class contains a tkinter notebook with the student and instructor GUIs
in their own tabs.
"""
def __init__(self, parent, *args, **kwargs):
"""Constructor
Args:
parent (tk.widget): parent widget to make the frame a child of
*args: Variable length argument list
**kwargs: Arbitrary keyword argument list
"""
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
# Black background style for ttk.Frame
s = ttk.Style()
s.configure('new.TFrame', background='black')
# GUI design
self.notebook = ttk.Notebook(self.parent)
self.notebook.bind("<Button-1>", self.on_click)
# Student GUI design
self.student_gui = StudentGUI(self.notebook, style='new.TFrame')
# Teacher GUI design
self.instructor_gui = InstructorGUI(self.notebook)
# Building the notebook
self.notebook.add(self.student_gui, text="Student")
self.notebook.add(self.instructor_gui, text="Instructor")
self.notebook.pack()
def on_click(self, event):
"""on_click event for the notebook tabs
This event pauses and unpauses the student GUI signal depending
on if it is put in or out of focus.
Args:
event (tk.event): The event that called this event function
"""
# Tcl function to determine tab at position
clicked_tab = self.notebook.tk.call(self.notebook._w, "identify", "tab", event.x, event.y)
active_tab = self.notebook.index(self.notebook.select())
# If switching tabs
if not clicked_tab == active_tab:
# If the tab that was clicked is the student GUI
if clicked_tab == 0:
# Start/ resume the plot animation
self.student_gui.start_plot()
# If the tab that was clicked is the instructor GUI
elif clicked_tab == 1:
# Stop the plot animation
self.student_gui.pause_plot()
# Else do nothing
else:
pass
def stop_gui(self):
"""Stops both the instructor and student GUI tabs
This function is used to close any open connections from the student
or instructor frames. This is useful when the GUI is closed.
"""
self.instructor_gui.stop_gui()
self.student_gui.stop_gui()
def main():
"""main function for the GUI software
This function doubles as what is called for the transvenous_pacing_gui
command.
"""
# Root application window
root = tk.Tk()
root.title("Tranvenous Pacing GUI")
# Instantiating the main application frame and putting it on the root window
main_app = MainApplication(root)
main_app.pack(side="top", fill="both", expand=True)
# GUI loop
root.mainloop()
# Closes GUI connections when the loop is exited
main_app.stop_gui()
# Runs the main function when this script is called
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-08-18 07:56
from __future__ import unicode_literals
from django.db import migrations, models
from bluebottle.clients import properties
from parler.models import TranslatableModelMixin
def create_default_impact_types(apps, schema_editor):
ImpactType = apps.get_model('impact', 'ImpactType')
ImpactType.__bases__ = (models.Model, TranslatableModelMixin)
ImpactTypeTranslation = apps.get_model('impact', 'ImpactTypeTranslation')
Language = apps.get_model('utils', 'language')
languages = [lang.code for lang in Language.objects.all()]
definitions = [
{
'slug': 'co2',
'icon': 'co2',
'en': {
'name': u'Reduce CO₂ emissions',
'unit': u'kg',
'text': u'reduce CO₂ emissions',
'text_with_target': 'CO₂ emissions reduced',
'text_passed': u'reduce CO₂ emissions by {} kg',
},
'fr': {
'name': u'Réduire les émissions de CO₂',
'unit': u'kg',
'text': u'réduire les émissions de CO₂',
'text_with_target': u'Émissions de CO₂ réduites',
'text_passed': u'réduire les émissions de CO₂ de {} kg',
},
'nl': {
'name': u'CO₂ uitstoot verminderen',
'unit': u'kg',
'text': u'CO₂ uitstoot te verminderen',
'text_with_target': u'CO₂ uitstoot met {} kg te verminderen',
'text_passed': u'CO₂ uitstoot verminderd'
}
},
{
'slug': 'people',
'icon': 'people',
'en': {
'name': u'Reach people',
'unit': u'',
'text': u'reach people',
'text_with_target': 'reach {} people',
'text_passed': u'people reached',
},
'fr': {
'name': u'Touchez les gens',
'unit': u'',
'text': u'touchez les gens',
'text_with_target': 'toucher {} personnes',
'text_passed': u'personnes touchés',
},
'nl': {
'name': u'Mensen bereiken',
'unit': u'',
'text': u'mensen te bereiken',
'text_with_target': '{} mensen te bereiken',
'text_passed': u'mensen bereikt'
}
},
{
'slug': 'food',
'icon': '',
'en': {
'name': u'Reduce food waste',
'unit': u'kg',
'text': u'reduce food waste',
'text_with_target': 'reduce food waste by {} kg',
'text_passed': u'food waste reduced',
},
'fr': {
'name': u'Réduisez le gaspillage alimentaire',
'unit': u'kg',
'text': u'réduisez le gaspillage alimentaire',
'text_with_target': 'réduire le gaspillage alimentaire de {} kg',
'text_passed': u'gaspillage alimentaire réduit',
},
'nl': {
'name': u'Voedselverspilling verminderen',
'unit': u'kg',
'text': u'voedselverspilling te verminderen',
'text_with_target': 'voedselverspilling met {} kg te verminderen',
'text_passed': u'voedselverspilling verminderd'
}
},
{
'slug': 'water',
'icon': 'water',
'en': {
'name': u'Save water',
'unit': u'l',
'text': u'save water',
'text_with_target': 'save {} l water',
'text_passed': u'water saved',
},
'fr': {
'name': u'Économiser l\'eau',
'unit': u'l',
'text': u'économiser l\'eau',
'text_with_target': 'économisez {} l d\'eau',
'text_passed': u'eau économisée',
},
'nl': {
'name': u'Water besparen',
'unit': u'l',
'text': u'water te besparen',
'text_with_target': '{} l water te besparen',
'text_passed': u'water bespaard'
}
},
{
'slug': 'plastic',
'icon': 'plastic',
'en': {
'name': u'Save plastic',
'unit': u'kg',
'text': u'save plastic',
'text_with_target': 'save {} kg plastic',
'text_passed': u'plastic saved',
},
'fr': {
'name': u'Économisez du plastique',
'unit': u'kg',
'text': u'économisez du plastique',
'text_with_target': 'économisez {} kg de plastique',
'text_passed': u'plastique économisé',
},
'nl': {
'name': u'Plastic besparen',
'unit': u'kg',
'text': u'plastic te besparen',
'text_with_target': '{} kg plastic te besparen',
'text_passed': u'plastic bespaard'
}
},
{
'slug': 'trees',
'icon': 'trees',
'en': {
'name': u'Plant trees',
'unit': u'',
'text': u'plant trees',
'text_with_target': 'plant {} trees',
'text_passed': u'trees planted',
},
'fr': {
'name': u'Planter des arbres',
'unit': u'',
'text': u'planter des arbres',
'text_with_target': 'planter {} arbres',
'text_passed': u'arbres plantés',
},
'nl': {
'name': u'Bomen planten',
'unit': u'',
'text': u'bomen te planten',
'text_with_target': '{} bomen te planten',
'text_passed': u'bomen geplant'
}
},
{
'slug': 'jobs',
'icon': 'jobs',
'en': {
'name': u'Create jobs',
'unit': u'',
'text': u'create jobs',
'text_with_target': 'create {} jobs',
'text_passed': u'jobs created',
},
'fr': {
'name': u'Créer des emplois',
'unit': u'',
'text': u'créer des emplois',
'text_with_target': 'créer {} emplois',
'text_passed': u'emplois créés',
},
'nl': {
'name': u'Banen creëren',
'unit': u'',
'text': u'banen te creëren',
'text_with_target': '{} banen te creëren',
'text_passed': u'banen gecreëerd'
}
},
]
for definition in definitions:
impact_type, created = ImpactType.objects.update_or_create(
slug=definition['slug'],
defaults={
'icon': definition['icon']
}
)
for language in languages:
if language in definition:
ImpactTypeTranslation.objects.update_or_create(
language_code=language,
master_id=impact_type.id,
defaults=definition[language]
)
def remove_impact_types(apps, schema_editor):
ImpactType = apps.get_model('impact', 'ImpactType')
ImpactType.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('impact', '0015_impacttypetranslation_name'),
]
operations = [
migrations.RunPython(create_default_impact_types, remove_impact_types)
]
|
import sqlite3
x='songs.db'
def connect():
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS playlist (
song_id INTEGER NOT NULL PRIMARY KEY,
song text NOT NULL,
path text NOT NULL,
fav INTEGER
)
""")
conn.commit()
conn.close()
def insert(song,path,fav=0):
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("""
INSERT OR IGNORE INTO playlist (song,path,fav) VALUES (?,?,?)
""",(song,path,fav))
conn.commit()
conn.close()
def fav(song,f):
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("""
UPDATE playlist SET fav=? WHERE song=?""",(f,song))
conn.commit()
conn.close()
def show():
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("""
SELECT * FROM playlist
""")
s=c.fetchall()
conn.commit()
conn.close()
return s
def find(song=None,sid=None):
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("""
SELECT * FROM playlist WHERE song=? OR song_id=?
""",(song,sid))
s=c.fetchall()
conn.commit()
conn.close()
return s
def remove(song):
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("DELETE FROM playlist WHERE song=?",(song,))
conn.commit()
conn.close()
def playfavs():
conn=sqlite3.connect(x)
c=conn.cursor()
c.execute("SELECT * FROM playlist WHERE fav=1")
favss=c.fetchall()
conn.commit()
conn.close()
return favss
connect()
# print(show())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-06-23 10:05
from __future__ import unicode_literals
from django.db import migrations, models
from frontend.models import ImportLog
from common.utils import under_test
def seed_log(apps, schema_editor):
if not under_test():
ImportLog.objects.create(
current_at='2016-03-01',
filename='dummy-initial-value',
category='prescribing')
class Migration(migrations.Migration):
dependencies = [
('frontend', '0003_auto_20160714_1411'),
]
operations = [
migrations.CreateModel(
name='ImportLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imported_at', models.DateTimeField(auto_now_add=True)),
('current_at', models.DateField(db_index=True)),
('filename', models.CharField(max_length=200)),
('category', models.CharField(db_index=True, max_length=15)),
],
),
migrations.RunPython(seed_log),
]
|
import unittest
import os
from programy.braintree import BraintreeManager
from programy.config.brain.braintree import BrainBraintreeConfiguration
from programytest.client import TestClient
class BraintreeManagerTests(unittest.TestCase):
def test_dump_no_create(self):
config = BrainBraintreeConfiguration()
config._create = False
config._save_as_user = "system"
mgr = BraintreeManager(config)
client = TestClient()
client_context = client.create_client_context("testid")
mgr.dump_brain_tree(client_context)
def test_dump_create_no_storage(self):
config = BrainBraintreeConfiguration()
config._create = True
config._save_as_user = "system"
mgr = BraintreeManager(config)
client = TestClient()
client_context = client.create_client_context("testid")
mgr.dump_brain_tree(client_context)
def get_temp_dir(self):
if os.name == 'posix':
return '/tmp'
elif os.name == 'nt':
import tempfile
return tempfile.gettempdir()
else:
raise Exception("Unknown operating system [%s]" % os.name)
def test_dump_create_(self):
tmpdir = self.get_temp_dir()
brainfile = tmpdir + os.sep + "braintree.bin"
config = BrainBraintreeConfiguration()
config._create = True
config._save_as_user = "system"
mgr = BraintreeManager(config)
if os.path.exists(brainfile):
os.remove(brainfile)
client = TestClient()
client.add_braintree_store(brainfile)
client_context = client.create_client_context("testid")
mgr.dump_brain_tree(client_context)
self.assertTrue(os.path.exists(brainfile))
if os.path.exists(brainfile):
os.remove(brainfile)
|
import argparse
import tensorflow as tf
import stupid
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converts H5 to TF lite.')
parser.add_argument('keras_model', help='Keras model to load in H5 format.')
parser.add_argument('tf_lite_model', help='TF lite model.')
args = parser.parse_args()
model = tf.keras.models.load_model(args.keras_model, custom_objects={"Reflect2D": stupid.layer.Reflect2D})
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.allow_custom_ops = False
tflite_model = converter.convert()
with open(args.tf_lite_model, 'wb') as f:
f.write(tflite_model)
|
"""
Defines a URL to return a notebook html page to be used in an iframe
"""
from django.conf.urls import url
from xblock_jupyter_graded.rest.views import (
DownloadStudentNBView, DownloadInstructorNBView, DownloadAutogradedNBView
)
from django.contrib.auth.decorators import login_required
app_name = 'xblock_jupyter_graded'
urlpatterns = [
url(
r'^download/student_nb/(?P<course_id>.+)/(?P<unit_id>.+)/(?P<filename>.+)$',
login_required(DownloadStudentNBView.as_view()),
name='jupyter_student_dl'
),
url(
r'^download/instructor_nb/(?P<course_id>.+)/(?P<unit_id>.+)/(?P<filename>.+)$',
login_required(DownloadInstructorNBView.as_view()),
name='jupyter_instructor_dl'
),
url(
r'^download/autograded_nb/(?P<course_id>.+)/(?P<unit_id>.+)/(?P<filename>.+)$',
login_required(DownloadAutogradedNBView.as_view()),
name='jupyter_autograded_dl'
),
]
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, AsyncIterator, Dict, Optional, Union
from .asset import Asset
from .enums import EventStatus, EntityType, PrivacyLevel, try_enum
from .mixins import Hashable
from .object import Object, OLDEST_OBJECT
from .utils import parse_time, _get_as_snowflake, _bytes_to_base64_data, MISSING
if TYPE_CHECKING:
from .types.scheduled_event import (
GuildScheduledEvent as GuildScheduledEventPayload,
GuildScheduledEventWithUserCount as GuildScheduledEventWithUserCountPayload,
EntityMetadata,
)
from .abc import Snowflake
from .guild import Guild
from .channel import VoiceChannel, StageChannel
from .state import ConnectionState
from .user import User
GuildScheduledEventPayload = Union[GuildScheduledEventPayload, GuildScheduledEventWithUserCountPayload]
# fmt: off
__all__ = (
"ScheduledEvent",
)
# fmt: on
class ScheduledEvent(Hashable):
"""Represents a scheduled event in a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two scheduled events are equal.
.. describe:: x != y
Checks if two scheduled events are not equal.
.. describe:: hash(x)
Returns the scheduled event's hash.
Attributes
----------
id: :class:`int`
The scheduled event's ID.
name: :class:`str`
The name of the scheduled event.
description: :class:`str`
The description of the scheduled event.
entity_type: :class:`EntityType`
The type of entity this event is for.
entity_id: Optional[:class:`int`]
The ID of the entity this event is for if available.
start_time: :class:`datetime.datetime`
The time that the scheduled event will start in UTC.
end_time: :class:`datetime.datetime`
The time that the scheduled event will end in UTC.
privacy_level: :class:`PrivacyLevel`
The privacy level of the scheduled event.
status: :class:`EventStatus`
The status of the scheduled event.
user_count: :class:`int`
The number of users subscribed to the scheduled event.
creator: Optional[:class:`User`]
The user that created the scheduled event.
location: Optional[:class:`str`]
The location of the scheduled event.
"""
__slots__ = (
'_state',
'_users',
'id',
'guild_id',
'name',
'description',
'entity_type',
'entity_id',
'start_time',
'end_time',
'privacy_level',
'status',
'_cover_image',
'user_count',
'creator',
'channel_id',
'location',
)
def __init__(self, *, state: ConnectionState, data: GuildScheduledEventPayload) -> None:
self._state = state
self._users: Dict[int, User] = {}
self._update(data)
def _update(self, data: GuildScheduledEventPayload) -> None:
self.id: int = int(data['id'])
self.guild_id: int = int(data['guild_id'])
self.name: str = data['name']
self.description: str = data.get('description', '')
self.entity_type = try_enum(EntityType, data['entity_type'])
self.entity_id: Optional[int] = _get_as_snowflake(data, 'entity_id')
self.start_time: datetime = parse_time(data['scheduled_start_time'])
self.privacy_level: PrivacyLevel = try_enum(PrivacyLevel, data['status'])
self.status: EventStatus = try_enum(EventStatus, data['status'])
self._cover_image: Optional[str] = data.get('image', None)
self.user_count: int = data.get('user_count', 0)
creator = data.get('creator')
self.creator: Optional[User] = self._state.store_user(creator) if creator else None
self.end_time: Optional[datetime] = parse_time(data.get('scheduled_end_time'))
self.channel_id: Optional[int] = _get_as_snowflake(data, 'channel_id')
metadata = data.get('entity_metadata')
self._unroll_metadata(metadata)
def _unroll_metadata(self, data: EntityMetadata):
self.location: Optional[str] = data.get('location') if data else None
@classmethod
def from_creation(cls, *, state: ConnectionState, data: GuildScheduledEventPayload):
creator_id = data.get('creator_id')
self = cls(state=state, data=data)
if creator_id:
self.creator = self._state.get_user(int(creator_id))
def __repr__(self) -> str:
return f'<GuildScheduledEvent id={self.id} name={self.name!r} guild_id={self.guild_id!r} creator={self.creator!r}>'
@property
def cover_image(self) -> Optional[Asset]:
"""Optional[:class:`Asset`]: The scheduled event's cover image."""
if self._cover_image is None:
return None
return Asset._from_scheduled_event_cover_image(self._state, self.id, self._cover_image)
@property
def guild(self) -> Optional[Guild]:
"""Optional[:class:`Guild`]: The guild this scheduled event is in."""
return self._state._get_guild(self.guild_id)
@property
def channel(self) -> Optional[Union[VoiceChannel, StageChannel]]:
"""Optional[Union[:class:`VoiceChannel`, :class:`StageChannel`]]: The channel this scheduled event is in."""
return self.guild.get_channel(self.channel_id) # type: ignore
@property
def url(self):
""":class:`str`: The url for the scheduled event."""
return f'https://discord.com/events/{self.guild_id}/{self.id}'
async def start(self, *, reason: Optional[str] = None) -> ScheduledEvent:
"""|coro|
Starts the scheduled event.
Shorthand for:
.. code-block:: python3
await event.edit(status=EventStatus.active)
Parameters
-----------
reason: Optional[:class:`str`]
The reason for starting the scheduled event.
Raises
------
ValueError
The scheduled event has already started or has ended.
Forbidden
You do not have the proper permissions to start the scheduled event.
HTTPException
The scheduled event could not be started.
Returns
-------
:class:`ScheduledEvent`
The scheduled event that was started.
"""
if self.status is not EventStatus.scheduled:
raise ValueError('This scheduled event is already running.')
return await self.edit(status=EventStatus.active, reason=reason)
async def end(self, *, reason: Optional[str] = None) -> ScheduledEvent:
"""|coro|
Ends the scheduled event.
Shorthand for:
.. code-block:: python3
await event.edit(status=EventStatus.completed)
Parameters
-----------
reason: Optional[:class:`str`]
The reason for ending the scheduled event.
Raises
------
ValueError
The scheduled event is not active or has already ended.
Forbidden
You do not have the proper permissions to end the scheduled event.
HTTPException
The scheduled event could not be ended.
Returns
-------
:class:`ScheduledEvent`
The scheduled event that was ended.
"""
if self.status is not EventStatus.active:
raise ValueError('This scheduled event is not active.')
return await self.edit(status=EventStatus.ended, reason=reason)
async def cancel(self, *, reason: Optional[str] = None) -> ScheduledEvent:
"""|coro|
Cancels the scheduled event.
Shorthand for:
.. code-block:: python3
await event.edit(status=EventStatus.cancelled)
Parameters
-----------
reason: Optional[:class:`str`]
The reason for cancelling the scheduled event.
Raises
------
ValueError
The scheduled event is already running.
Forbidden
You do not have the proper permissions to cancel the scheduled event.
HTTPException
The scheduled event could not be cancelled.
Returns
-------
:class:`ScheduledEvent`
The scheduled event that was cancelled.
"""
if self.status is not EventStatus.scheduled:
raise ValueError('This scheduled event is already running.')
return await self.edit(status=EventStatus.cancelled, reason=reason)
async def edit(
self,
*,
name: str = MISSING,
description: str = MISSING,
channel: Optional[Snowflake] = MISSING,
start_time: datetime = MISSING,
end_time: datetime = MISSING,
privacy_level: PrivacyLevel = MISSING,
entity_type: EntityType = MISSING,
status: EventStatus = MISSING,
image: bytes = MISSING,
location: str = MISSING,
reason: Optional[str] = None,
) -> ScheduledEvent:
r"""|coro|
Edits the scheduled event.
Requires :attr:`~Permissions.manage_events` permissions.
Parameters
-----------
name: :class:`str`
The name of the scheduled event.
description: :class:`str`
The description of the scheduled event.
channel: Optional[:class:`~discord.abc.Snowflake`]
The channel to put the scheduled event in.
Required if the entity type is either :attr:`EntityType.voice` or
:attr:`EntityType.stage_instance`.
start_time: :class:`datetime.datetime`
The time that the scheduled event will start. This must be a timezone-aware
datetime object. Consider using :func:`utils.utcnow`.
end_time: Optional[:class:`datetime.datetime`]
The time that the scheduled event will end. This must be a timezone-aware
datetime object. Consider using :func:`utils.utcnow`.
If the entity type is either :attr:`EntityType.voice` or
:attr:`EntityType.stage_instance`, the end_time can be cleared by
passing ``None``.
Required if the entity type is :attr:`EntityType.external`.
privacy_level: :class:`PrivacyLevel`
The privacy level of the scheduled event.
entity_type: :class:`EntityType`
The new entity type.
status: :class:`EventStatus`
The new status of the scheduled event.
image: Optional[:class:`bytes`]
The new image of the scheduled event or ``None`` to remove the image.
location: :class:`str`
The new location of the scheduled event.
Required if the entity type is :attr:`EntityType.external`.
reason: Optional[:class:`str`]
The reason for editing the scheduled event. Shows up on the audit log.
Raises
-------
TypeError
`image` was not a :term:`py:bytes-like object`, or ``privacy_level``
was not a :class:`PrivacyLevel`, or ``entity_type`` was not an
:class:`EntityType`, ``status`` was not an :class:`EventStatus`, or
an argument was provided that was incompatible with the scheduled event's
entity type.
ValueError
``start_time`` or ``end_time`` was not a timezone-aware datetime object.
Forbidden
You do not have permissions to edit the scheduled event.
HTTPException
Editing the scheduled event failed.
Returns
--------
:class:`ScheduledEvent`
The edited scheduled event.
"""
payload = {}
metadata = {}
if name is not MISSING:
payload['name'] = name
if start_time is not MISSING:
if start_time.tzinfo is None:
raise ValueError(
'start_time must be an aware datetime. Consider using discord.utils.utcnow() or datetime.datetime.now().astimezone() for local time.'
)
payload['scheduled_start_time'] = start_time.isoformat()
if description is not MISSING:
payload['description'] = description
if privacy_level is not MISSING:
if not isinstance(privacy_level, PrivacyLevel):
raise TypeError('privacy_level must be of type PrivacyLevel.')
payload['privacy_level'] = privacy_level.value
if status is not MISSING:
if not isinstance(status, EventStatus):
raise TypeError('status must be of type EventStatus')
payload['status'] = status.value
if image is not MISSING:
image_as_str: Optional[str] = _bytes_to_base64_data(image) if image is not None else image
payload['image'] = image_as_str
if entity_type is not MISSING:
if not isinstance(entity_type, EntityType):
raise TypeError('entity_type must be of type EntityType')
payload['entity_type'] = entity_type.value
_entity_type = entity_type or self.entity_type
if _entity_type in (EntityType.stage_instance, EntityType.voice):
if channel is MISSING or channel is None:
raise TypeError('channel must be set when entity_type is voice or stage_instance')
payload['channel_id'] = channel.id
if location not in (MISSING, None):
raise TypeError('location cannot be set when entity_type is voice or stage_instance')
payload['entity_metadata'] = None
else:
if channel not in (MISSING, None):
raise TypeError('channel cannot be set when entity_type is external')
payload['channel_id'] = None
if location is MISSING or location is None:
raise TypeError('location must be set when entity_type is external')
metadata['location'] = location
if end_time is MISSING or end_time is None:
raise TypeError('end_time must be set when entity_type is external')
if end_time is not MISSING:
if end_time is not None:
if end_time.tzinfo is None:
raise ValueError(
'end_time must be an aware datetime. Consider using discord.utils.utcnow() or datetime.datetime.now().astimezone() for local time.'
)
end_time = end_time.isoformat()
payload['scheduled_end_time'] = end_time
if metadata:
payload['entity_metadata'] = metadata
data = await self._state.http.edit_scheduled_event(self.guild_id, self.id, **payload, reason=reason)
s = ScheduledEvent(state=self._state, data=data)
s._users = self._users
return s
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the scheduled event.
Requires :attr:`~Permissions.manage_events` permissions.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting the scheduled event. Shows up on the audit log.
Raises
------
Forbidden
You do not have permissions to delete the scheduled event.
HTTPException
Deleting the scheduled event failed.
"""
await self._state.http.delete_scheduled_event(self.guild_id, self.id, reason=reason)
async def users(
self,
*,
limit: Optional[int] = None,
before: Optional[Snowflake] = None,
after: Optional[Snowflake] = None,
oldest_first: bool = MISSING,
) -> AsyncIterator[User]:
"""|coro|
Retrieves all :class:`User` that are subscribed to this event.
This requires :attr:`Intents.members` to get information about members
other than yourself.
Raises
-------
HTTPException
Retrieving the members failed.
Returns
--------
List[:class:`User`]
All thread members in the thread.
"""
async def _before_strategy(retrieve, before, limit):
before_id = before.id if before else None
users = await self._state.http.get_scheduled_event_users(
self.guild_id, self.id, limit=retrieve, with_member=False, before=before_id
)
if users:
if limit is not None:
limit -= len(users)
before = Object(id=users[-1]['user']['id'])
return users, before, limit
async def _after_strategy(retrieve, after, limit):
after_id = after.id if after else None
users = await self._state.http.get_scheduled_event_users(
self.guild_id, self.id, limit=retrieve, with_member=False, after=after_id
)
if users:
if limit is not None:
limit -= len(users)
after = Object(id=users[0]['user']['id'])
return users, after, limit
if limit is None:
limit = self.user_count or None
if oldest_first is MISSING:
reverse = after is not None
else:
reverse = oldest_first
predicate = None
if reverse:
strategy, state = _after_strategy, after
if before:
predicate = lambda u: u['user']['id'] < before.id
else:
strategy, state = _before_strategy, before
if after and after != OLDEST_OBJECT:
predicate = lambda u: u['user']['id'] > after.id
while True:
retrieve = min(100 if limit is None else limit, 100)
if retrieve < 1:
return
data, state, limit = await strategy(retrieve, state, limit)
if len(data) < 100:
limit = 0
if reverse:
data = reversed(data)
if predicate:
data = filter(predicate, data)
users = (self._state.store_user(raw_user['user']) for raw_user in data)
for user in users:
yield user
def _add_user(self, user: User) -> None:
self._users[user.id] = user
def _pop_user(self, user_id: int) -> None:
self._users.pop(user_id)
|
# Copyright (c) 2018 Mengye Ren
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
"""
Checkpointing utilities.
Author: Mengye Ren (mren@cs.toronto.edu)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
def build_checkpoint(var_list):
with tf.variable_scope("checkpoint"):
def get_var(x):
return tf.get_variable(
x.name.split(":")[0],
x.get_shape(),
x.dtype,
tf.constant_initializer(0, dtype=x.dtype),
trainable=False)
ckpt = list(map(get_var, var_list))
return ckpt
def read_checkpoint(ckpt, var_list, use_locking=False):
return tf.group(*[tf.assign(ww, ck, use_locking=use_locking) for ck, ww in zip(ckpt, var_list)])
def write_checkpoint(ckpt, var_list, use_locking=False):
return tf.group(*[tf.assign(ck, ww, use_locking=use_locking) for ck, ww in zip(ckpt, var_list)])
|
"""
Kivy helpers for orchestrating volume levels for groups of Sounds.
"""
from __future__ import annotations
import os
from kivy.core.audio import Sound, SoundLoader
class AudioManager:
def __init__(self, prefix='./', volume=.5):
self._sounds: list[tuple[Sound, float]] = []
self.prefix = prefix
self._volume = None
self.volume = volume
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
if value > 1:
value /= 100
if value == self._volume:
return
self._volume = value
for sound, multiplier in self._sounds:
sound.volume = value * multiplier
def load(self, path, multiplier=1):
sound = SoundLoader.load(os.path.join(self.prefix, path))
if sound is None:
return
sound.volume = self._volume * multiplier
self._sounds.append((sound, multiplier))
return sound
|
# Copyright 2016 Nicolas Bessi, Camptocamp SA
# Copyright 2018 Tecnativa - Pedro M. Baeza
# Copyright 2020 Poonlap V.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class ResPartnerTH(models.Model):
_inherit = "res.partner"
@api.onchange("zip_id", "city_id", "state_id", "country_id")
def _onchange_zip_id(self):
if self.zip_id and self.country_id.code == "TH":
address = self.zip_id.city_id.name.split(", ")
self.update({"street2": address[0], "city": address[1]})
|
#!/usr/bin/env python3
import os
import argparse
import re
import open3d
import prompter
import numpy as np
import copy
import toml
from pathlib import Path
import mathutils
# Global regular expressions
index_re = re.compile('(\d+)(?!.*\d)') # Gets last number in a string
def get_index(path):
m = index_re.search(str(path))
if m is None:
raise RuntimeError("Index could not be found.")
return m.group(0)
def is_empty(path):
return not bool(sorted(path.rglob('*')))
def matrix_to_tum(matrix):
transform = mathutils.Matrix(matrix)
q = transform.to_quaternion()
t = transform.to_translation()
return [t.x, t.y, t.z, q.x, q.y, q.z, q.w]
def tum_to_matrix(tx, ty, tz, qx, qy, qz, qw):
m = np.eye(4)
m[0:3, 0:3] = mathutils.Quaternion((qw, qx, qy, qz)).to_matrix()
m[0:3, 3] = [tx, ty, tz]
return m
def make_paths_absolute_run_config(run_config, data_path):
if 'run' in run_config:
for r in run_config['run']:
r['input'] = data_path / r['input']
assert os.path.exists(r['input']), f'run input not found at: {r["input"]}'
r['output'] = data_path / r.get('output', f"processed/{r['input'].stem}")
if 'object' in run_config:
for o in run_config['object']:
o['cloud'] = data_path / o['cloud']
assert os.path.exists(o['cloud']), f'cloud not found at: {o["cloud"]}'
o['mesh'] = data_path / o['mesh']
assert os.path.exists(o['mesh']), f'mesh not found at: {o["mesh"]}'
def get_camera_config(config_run, run):
camera_config = [c for c in config_run['camera'] if c['id'] == run['camera']]
if len(camera_config) != 1:
raise RuntimeError('Could not find camera for run.')
return camera_config[0]
def get_object_configs(config_run, run):
if 'object' not in config_run or 'objects' not in run:
return []
object_configs = [o for o in config_run['object'] if o['id'] in run['objects']]
if len(object_configs) == 0:
raise RuntimeError('Sequence has no objects.')
return object_configs
def parse_and_load_config():
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="Path to aligner configuration")
parser.add_argument("--run", required=True, help="Path to run configuration")
parser.add_argument("--data", required=True, help="Path to data root directory")
args, _ = parser.parse_known_args()
assert os.path.exists(args.config), f'config file not found at: {args.config}'
assert os.path.exists(args.run), f'run file not found at: {args.run}'
config = toml.load(args.config)
config_run = toml.load(args.run)
make_paths_absolute_run_config(config_run, Path(args.data))
return config, config_run
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
open3d.visualization.draw_geometries([source_temp, target_temp])
def register_selected_points(cloud_object, cloud_scene, show):
def pick_points(pcd):
print("")
print("1) Please pick at least three correspondences using [shift + left click]")
print(" Press [shift + right click] to undo point picking")
print("2) Afther picking points, press q for close the window")
vis = open3d.visualization.VisualizerWithEditing()
vis.create_window()
vis.add_geometry(pcd)
vis.run()
vis.destroy_window()
print("")
return vis.get_picked_points()
points_object = pick_points(cloud_object)
points_scene = pick_points(cloud_scene)
num_points = len(points_object)
if num_points < 3:
print("You did not select enough points for manual alignment (>=3 required). Skipping ...")
return
if num_points != len(points_scene):
print("Number of selected points not matching. Skipping ...")
return
corr = np.zeros((num_points, 2))
corr[:, 0] = points_object
corr[:, 1] = points_scene
p2p = open3d.registration.TransformationEstimationPointToPoint()
est = p2p.compute_transformation(cloud_object, cloud_scene, open3d.utility.Vector2iVector(corr))
print("Manual estimation of transformation:", est)
if show:
print(":: Visualize manual alignment ...")
draw_registration_result(cloud_object, cloud_scene, est)
return est
class Sequence(object):
def __init__(self, config):
self.config = config
self.name = self.config['input'].stem
self.path_processed = self.config['output']
self.path_frames = self.path_processed / "frames"
self.path_reconstruction = self.path_processed / "reconstruction"
self.path_camera_poses = self.path_reconstruction / "poses.txt"
self.path_reconstructed_cloud = self.path_reconstruction / "model.ply"
self.path_reconstructed_mesh = self.path_reconstruction / "model_mesh.ply"
self.path_reconstructed_mesh_bg = self.path_reconstruction / "model_mesh_background.ply"
self.path_annotations = self.path_processed / "annotations"
self.path_alignments = self.path_processed / "alignments"
self.rgb_frames = None
self.depth_frames = None
self.num_frames = None
self.trajectory_camera = None # T_wc (camera -> world)
self.camera = None
self.object_alignments = None # T_wo (object -> world)
def has_trajectory(self):
return self.path_camera_poses.exists()
def has_reconstructed_cloud(self):
return self.path_reconstructed_cloud.exists()
def has_reconstructed_mesh(self):
return self.path_reconstructed_mesh.exists()
def has_reconstructed_background_mesh(self):
return self.path_reconstructed_mesh_bg.exists()
def count_aligned_objects(self):
self.load_object_alignments()
return len(self.object_alignments)
def has_reconstructed_background_mesh(self):
return self.path_reconstructed_mesh_bg.exists()
def load_frame_paths(self, force_reload=False):
if self.num_frames is None or force_reload:
self.rgb_frames = [self.path_frames / x for x in sorted(os.listdir(self.path_frames)) if "Color" in x]
self.depth_frames = [self.path_frames / x for x in sorted(os.listdir(self.path_frames)) if "Depth" in x]
assert len(self.rgb_frames) == len(self.depth_frames)
self.num_frames = len(self.depth_frames)
def load_object_alignments(self, force_reload=False):
if self.object_alignments is None or force_reload:
self.object_alignments = {}
for p in [self.path_alignments / o for o in sorted(os.listdir(self.path_alignments)) if ".txt" in o]:
self.object_alignments[int(get_index(p))] = np.loadtxt(p, delimiter=',')
def load_trajectory(self, force_reload=False):
if self.trajectory_camera is None or force_reload:
T_wc = np.loadtxt(self.path_camera_poses)
self.trajectory_camera = [tum_to_matrix(*T_wc[i, 1:4], *T_wc[i, 4:]) for i in range(T_wc.shape[0])]
self.load_object_alignments()
def get_camera(self, config_sequences):
self.camera = get_camera_config(config_sequences, self.config)
class Object(object):
def __init__(self, name, object_id, path_cloud, path_mesh=None, scale=None):
assert os.path.exists(path_cloud), f'cloud file: {path_cloud} not found'
if not path_mesh is None:
assert os.path.exists(path_mesh), f'mesh file: {path_mesh} not found'
self.path_cloud = path_cloud
self.path_mesh = path_mesh
self.cloud = None
self.mean = None
self.std = None
self.bound_min = None
self.bound_max = None
self.id = object_id
self.name = name
self.scale = scale
def load(self):
if self.cloud is None:
self.cloud = open3d.io.read_point_cloud(str(self.path_cloud))
if self.scale is not None:
self.cloud.scale(self.scale, center=False)
self.compute_object_statistics()
def compute_object_statistics(self):
self.mean, cov = open3d.geometry.compute_point_cloud_mean_and_covariance(self.cloud)
self.std = np.sqrt(np.diagonal(cov))
self.bound_min, self.bound_max = self.cloud.get_min_bound(), self.cloud.get_max_bound()
print(f"[{self.name}] object info",
"\n| Object-space min corner:", self.bound_min,
"\n| Object-space max corner:", self.bound_max,
"\n| Mean:", self.mean,
"\n| Std:", self.std)
def get_boundingbox_corners(self):
corners = np.zeros([8, 3])
corners[0, :] = self.bound_min
corners[1, :] = [self.bound_max[0], self.bound_min[1], self.bound_min[2]]
corners[2, :] = [self.bound_min[0], self.bound_max[1], self.bound_min[2]]
corners[3, :] = [self.bound_min[0], self.bound_min[1], self.bound_max[2]]
corners[4, :] = [self.bound_max[0], self.bound_max[1], self.bound_min[2]]
corners[5, :] = [self.bound_max[0], self.bound_min[1], self.bound_max[2]]
corners[6, :] = [self.bound_min[0], self.bound_max[1], self.bound_max[2]]
corners[7, :] = self.bound_max
return corners
def register(self,
cloud_scene,
path_output_alignment,
distance_threshold,
init_with_global_features=True,
point_to_plane=True):
def preprocess_cloud(pcd, voxel_size=0.005):
print(":: Downsample with a voxel size %.3f." % voxel_size)
pcd_down = open3d.geometry.voxel_down_sample(pcd, voxel_size)
radius_normal = voxel_size * 2
print(":: Estimate normal with search radius %.3f." % radius_normal)
open3d.geometry.estimate_normals(
pcd_down, open3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30))
radius_feature = voxel_size * 5
print(":: Compute FPFH feature with search radius %.3f." % radius_feature)
pcd_fpfh = open3d.registration.compute_fpfh_feature(
pcd_down, open3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100))
return pcd_down, pcd_fpfh
object_sampled, object_fpfh = preprocess_cloud(self.cloud)
scene_sampled, scene_fpfh = preprocess_cloud(cloud_scene)
if init_with_global_features is False:
transformation = register_selected_points(self.cloud, cloud_scene, True)
else:
print(":: Execute RANSAC alignment")
transformation = open3d.registration.registration_ransac_based_on_feature_matching(
object_sampled, scene_sampled, object_fpfh, scene_fpfh,
distance_threshold,
open3d.registration.TransformationEstimationPointToPoint(False), 4,
[open3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
open3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
open3d.registration.RANSACConvergenceCriteria(4000000, 500)).transformation
print(":: Result:\n", transformation)
print(":: Visualize initial alignment ...")
draw_registration_result(object_sampled, scene_sampled, transformation)
if not prompter.yesno('Is this initial alignment good enough? (Otherwise select matching points manually)'):
transformation = register_selected_points(self.cloud, cloud_scene, True)
alignment_accepted = False
if point_to_plane:
icp_estimation_method = open3d.registration.TransformationEstimationPointToPlane()
else:
icp_estimation_method = open3d.registration.TransformationEstimationPointToPoint()
while not alignment_accepted:
print(":: Execute ICP alignment")
transformation = open3d.registration.registration_icp(
object_sampled, scene_sampled, distance_threshold, transformation, icp_estimation_method).transformation
print(":: Result:\n", transformation)
print(":: Visualize refined alignment ...")
draw_registration_result(object_sampled, scene_sampled, transformation)
if prompter.yesno('Is alignment good? (Otherwise select matching points manually and run ICP)'):
alignment_accepted = True
else:
transformation = register_selected_points(self.cloud, cloud_scene, True)
if prompter.yesno('Skip ICP?'):
alignment_accepted = True
# Write alignment to file
np.savetxt(str(path_output_alignment), transformation, delimiter=",")
def get_toml_description(self):
return \
f"[[object]]\n" \
f"name = '{self.name}'\n" \
f"id = {self.id}\n" \
f"mesh = '{self.path_mesh}'\n" \
f"mean = {np.array2string(self.mean, separator=', ')}\n" \
f"stddev = {np.array2string(self.std, separator=', ')}"
|
import os
from PIL import Image, ImageDraw, ImageFont
#####################################################
# parameter setting #
#####################################################
row = 2
col = 6
pad = 2
dataset = 'ToS3'
seq_num = 3
name = ['bridge', 'face', 'room']
frm_num = [13945, 9945, 4400]
ROI = [
# bridge
[[(467, 136), (555, 202)],
[(987, 374), (1163, 506)]],
# face
[[(643, 127), (1031, 418)],
[(422, 144), (589, 269)]],
# room
[[(622, 346), (820, 494)],
[(75, 364), (280, 518)]]
]
GT_root_path = '../data/'
Rst_root_path = '../results/'
rst_path = '../results/show'
Rst = ['vespcn_ep0500', 'SOFVSR_x4', 'FRVSR_BD_iter400000', 'TecoGAN_BD_iter500000', 'EGVSR_iter420000']
label_name = ['VESPCN', 'SOFVSR', 'FRVSR', 'TecoGAN', 'Ours', 'GT']
font = ImageFont.truetype(font='../resources/VCR_OSD_MONO_1.001.ttf', size=20)
img_h, img_w = 534//4, 1280//4
input_im = Image.new('RGBA', (seq_num * (img_w + pad) - pad, img_h), (255, 255, 255))
label_n = ImageDraw.ImageDraw(input_im)
for i in range(seq_num):
lr_img_path = os.path.join(GT_root_path, dataset, 'Gaussian4xLR', name[i], '{}.png'.format(frm_num[i]))
img_lr = Image.open(lr_img_path)
pos = (i * (img_w + pad), 0)
draw = ImageDraw.ImageDraw(img_lr)
for rect in ROI[i]:
rect = [(rect[0][0]//4, rect[0][1]//4), (rect[1][0]//4, rect[1][1]//4)]
draw.rectangle(rect, fill=None, outline='red', width=2)
input_im.paste(img_lr, pos) # paste to input_im
label_n.text(xy=pos, text=name[i], fill='white', font=font)
input_im_path = os.path.join(rst_path, 'input_img_{}.png'.format(dataset))
input_im.save(input_im_path, 'png')
input_im.show() # finish
for i in range(seq_num):
roi_h = roi_w = 0
_ROI = ROI[i]
for roi in _ROI:
point1 = roi[0]
point2 = roi[1]
w = abs(point1[0] - point2[0])
h = abs(point1[1] - point2[1])
if h > roi_h:
roi_h, roi_w = h, w
dest_im = Image.new('RGBA', (col * (roi_w + pad) - pad, row * roi_h + pad), (255, 255, 255))
label = ImageDraw.ImageDraw(dest_im)
gt_img_path = os.path.join(GT_root_path, dataset, 'GT', name[i], '{}.png'.format(frm_num[i]))
img_gt = Image.open(gt_img_path)
print(roi_h, roi_w, roi_h/roi_w)
for r in range(row):
for c in range(col):
pos = (c * (roi_w + pad), r * (roi_h + pad))
box = (_ROI[r][0][0], _ROI[r][0][1], _ROI[r][1][0], _ROI[r][1][1])
if c == col-1:
src_im = img_gt
else:
src_path = os.path.join(Rst_root_path, dataset, Rst[c], name[i], '{}.png'.format(frm_num[i]))
src_im = Image.open(src_path)
img_crop = src_im.crop(box)
img_resi = img_crop.resize(size=(roi_w, roi_h))
dest_im.paste(img_resi, pos) # paste to dest_im
font_color = 'white' if c != 4 else 'red'
label.text(xy=pos, text=label_name[c], fill=font_color, font=font)
dest_im_path = os.path.join(rst_path, 'cmp_{}_{}_{}.png'.format(dataset, name[i], frm_num[i]))
dest_im.save(dest_im_path, 'png')
dest_im.show() # finish
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import unittest
from oslo_config import cfg
import st2tests.config as tests_config
tests_config.parse_args()
from st2reactor.garbage_collector import base as garbage_collector
class GarbageCollectorServiceTest(unittest.TestCase):
def tearDown(self):
# Reset gc_max_idle_sec with a value of 1 to reenable for other tests.
cfg.CONF.set_override('gc_max_idle_sec', 1, group='workflow_engine')
super(GarbageCollectorServiceTest, self).tearDown()
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions_output',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_trigger_instances',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_timeout_inquiries',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_orphaned_workflow_executions',
mock.MagicMock(return_value=None))
def test_orphaned_workflow_executions_gc_enabled(self):
# Mock the default value of gc_max_idle_sec with a value >= 1 to enable. The config
# gc_max_idle_sec is assigned to _workflow_execution_max_idle which gc checks to see
# whether to run the routine.
cfg.CONF.set_override('gc_max_idle_sec', 1, group='workflow_engine')
# Run the garbage collection.
gc = garbage_collector.GarbageCollectorService(sleep_delay=0)
gc._perform_garbage_collection()
# Make sure _purge_orphaned_workflow_executions is called.
self.assertTrue(
garbage_collector.GarbageCollectorService._purge_orphaned_workflow_executions.called
)
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions_output',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_trigger_instances',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_timeout_inquiries',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_orphaned_workflow_executions',
mock.MagicMock(return_value=None))
def test_orphaned_workflow_executions_gc_disabled(self):
# Mock the default value of gc_max_idle_sec with a value of 0 to disable. The config
# gc_max_idle_sec is assigned to _workflow_execution_max_idle which gc checks to see
# whether to run the routine.
cfg.CONF.set_override('gc_max_idle_sec', 0, group='workflow_engine')
# Run the garbage collection.
gc = garbage_collector.GarbageCollectorService(sleep_delay=0)
gc._perform_garbage_collection()
# Make sure _purge_orphaned_workflow_executions is not called.
self.assertFalse(
garbage_collector.GarbageCollectorService._purge_orphaned_workflow_executions.called
)
|
import os
template_parent = 'default'
template_dir = os.path.abspath(os.path.dirname(__file__))
base_fn = 'base.html'
|
from gym.envs.registration import register
# Human Testing
register(
id='HumanTesting-v0',
entry_point='assistive_gym.envs:HumanTestingEnv',
max_episode_steps=200,
)
# Scratch Itch PR2
register(
id='ScratchItchPR2-v0',
entry_point='assistive_gym.envs:ScratchItchPR2Env',
max_episode_steps=200,
)
# Scratch Itch Jaco
register(
id='ScratchItchJaco-v0',
entry_point='assistive_gym.envs:ScratchItchJacoEnv',
max_episode_steps=200,
)
# Scratch Itch PR2 Human
register(
id='ScratchItchPR2Human-v0',
entry_point='assistive_gym.envs:ScratchItchPR2HumanEnv',
max_episode_steps=200,
)
# Scratch Itch Jaco Human
register(
id='ScratchItchJacoHuman-v0',
entry_point='assistive_gym.envs:ScratchItchJacoHumanEnv',
max_episode_steps=200,
)
# Scratch Itch PR2 New
register(
id='ScratchItchPR2New-v0',
entry_point='assistive_gym.envs:ScratchItchPR2NewEnv',
max_episode_steps=200,
)
# Scratch Itch Jaco New
register(
id='ScratchItchJacoNew-v0',
entry_point='assistive_gym.envs:ScratchItchJacoNewEnv',
max_episode_steps=200,
)
# Scratch Itch VR PR2
register(
id='ScratchItchVRPR2-v0',
entry_point='assistive_gym.envs:ScratchItchVRPR2Env',
max_episode_steps=9999999999,
)
# Scratch Itch VR Jaco
register(
id='ScratchItchVRJaco-v0',
entry_point='assistive_gym.envs:ScratchItchVRJacoEnv',
max_episode_steps=9999999999,
)
# Scratch Itch VR PR2 Human
register(
id='ScratchItchVRPR2Human-v0',
entry_point='assistive_gym.envs:ScratchItchVRPR2HumanEnv',
max_episode_steps=9999999999,
)
# Scratch Itch VR Jaco Human
register(
id='ScratchItchVRJacoHuman-v0',
entry_point='assistive_gym.envs:ScratchItchVRJacoHumanEnv',
max_episode_steps=9999999999,
)
# Scratch Itch VR PR2 New
register(
id='ScratchItchVRPR2New-v0',
entry_point='assistive_gym.envs:ScratchItchVRPR2NewEnv',
max_episode_steps=9999999999,
)
# Scratch Itch VR Jaco New
register(
id='ScratchItchVRJacoNew-v0',
entry_point='assistive_gym.envs:ScratchItchVRJacoNewEnv',
max_episode_steps=9999999999,
)
# Bed Bathing PR2
register(
id='BedBathingPR2-v0',
entry_point='assistive_gym.envs:BedBathingPR2Env',
max_episode_steps=200,
)
# Bed Bathing Jaco
register(
id='BedBathingJaco-v0',
entry_point='assistive_gym.envs:BedBathingJacoEnv',
max_episode_steps=200,
)
# Bed Bathing PR2 Human
register(
id='BedBathingPR2Human-v0',
entry_point='assistive_gym.envs:BedBathingPR2HumanEnv',
max_episode_steps=200,
)
# Bed Bathing Jaco Human
register(
id='BedBathingJacoHuman-v0',
entry_point='assistive_gym.envs:BedBathingJacoHumanEnv',
max_episode_steps=200,
)
# Bed Bathing PR2 New
register(
id='BedBathingPR2New-v0',
entry_point='assistive_gym.envs:BedBathingPR2NewEnv',
max_episode_steps=200,
)
# Bed Bathing Jaco New
register(
id='BedBathingJacoNew-v0',
entry_point='assistive_gym.envs:BedBathingJacoNewEnv',
max_episode_steps=200,
)
# BedBathing VR PR2
register(
id='BedBathingVRPR2-v0',
entry_point='assistive_gym.envs:BedBathingVRPR2Env',
max_episode_steps=9999999999,
)
# BedBathing VR Jaco
register(
id='BedBathingVRJaco-v0',
entry_point='assistive_gym.envs:BedBathingVRJacoEnv',
max_episode_steps=9999999999,
)
# BedBathing VR PR2 Human
register(
id='BedBathingVRPR2Human-v0',
entry_point='assistive_gym.envs:BedBathingVRPR2HumanEnv',
max_episode_steps=9999999999,
)
# BedBathing VR Jaco Human
register(
id='BedBathingVRJacoHuman-v0',
entry_point='assistive_gym.envs:BedBathingVRJacoHumanEnv',
max_episode_steps=9999999999,
)
# BedBathing VR PR2 New
register(
id='BedBathingVRPR2New-v0',
entry_point='assistive_gym.envs:BedBathingVRPR2NewEnv',
max_episode_steps=9999999999,
)
# BedBathing VR Jaco New
register(
id='BedBathingVRJacoNew-v0',
entry_point='assistive_gym.envs:BedBathingVRJacoNewEnv',
max_episode_steps=9999999999,
)
# Drinking PR2
register(
id='DrinkingPR2-v0',
entry_point='assistive_gym.envs:DrinkingPR2Env',
max_episode_steps=200,
)
# Drinking Jaco
register(
id='DrinkingJaco-v0',
entry_point='assistive_gym.envs:DrinkingJacoEnv',
max_episode_steps=200,
)
# Drinking PR2 Human
register(
id='DrinkingPR2Human-v0',
entry_point='assistive_gym.envs:DrinkingPR2HumanEnv',
max_episode_steps=200,
)
# Drinking Jaco Human
register(
id='DrinkingJacoHuman-v0',
entry_point='assistive_gym.envs:DrinkingJacoHumanEnv',
max_episode_steps=200,
)
# Drinking PR2 New
register(
id='DrinkingPR2New-v0',
entry_point='assistive_gym.envs:DrinkingPR2NewEnv',
max_episode_steps=200,
)
# Drinking Jaco New
register(
id='DrinkingJacoNew-v0',
entry_point='assistive_gym.envs:DrinkingJacoNewEnv',
max_episode_steps=200,
)
# Drinking VR PR2
register(
id='DrinkingVRPR2-v0',
entry_point='assistive_gym.envs:DrinkingVRPR2Env',
max_episode_steps=9999999999,
)
# Drinking VR Jaco
register(
id='DrinkingVRJaco-v0',
entry_point='assistive_gym.envs:DrinkingVRJacoEnv',
max_episode_steps=9999999999,
)
# Drinking VR PR2 Human
register(
id='DrinkingVRPR2Human-v0',
entry_point='assistive_gym.envs:DrinkingVRPR2HumanEnv',
max_episode_steps=9999999999,
)
# Drinking VR Jaco Human
register(
id='DrinkingVRJacoHuman-v0',
entry_point='assistive_gym.envs:DrinkingVRJacoHumanEnv',
max_episode_steps=9999999999,
)
# Drinking VR PR2 New
register(
id='DrinkingVRPR2New-v0',
entry_point='assistive_gym.envs:DrinkingVRPR2NewEnv',
max_episode_steps=9999999999,
)
# Drinking VR Jaco New
register(
id='DrinkingVRJacoNew-v0',
entry_point='assistive_gym.envs:DrinkingVRJacoNewEnv',
max_episode_steps=9999999999,
)
# Feeding PR2
register(
id='FeedingPR2-v0',
entry_point='assistive_gym.envs:FeedingPR2Env',
max_episode_steps=200,
)
# Feeding Jaco
register(
id='FeedingJaco-v0',
entry_point='assistive_gym.envs:FeedingJacoEnv',
max_episode_steps=200,
)
# Feeding PR2 Human
register(
id='FeedingPR2Human-v0',
entry_point='assistive_gym.envs:FeedingPR2HumanEnv',
max_episode_steps=200,
)
# Feeding Jaco Human
register(
id='FeedingJacoHuman-v0',
entry_point='assistive_gym.envs:FeedingJacoHumanEnv',
max_episode_steps=200,
)
# Feeding PR2 New
register(
id='FeedingPR2New-v0',
entry_point='assistive_gym.envs:FeedingPR2NewEnv',
max_episode_steps=200,
)
# Feeding Jaco New
register(
id='FeedingJacoNew-v0',
entry_point='assistive_gym.envs:FeedingJacoNewEnv',
max_episode_steps=200,
)
# Feeding VR PR2
register(
id='FeedingVRPR2-v0',
entry_point='assistive_gym.envs:FeedingVRPR2Env',
max_episode_steps=9999999999,
)
# Feeding VR Jaco
register(
id='FeedingVRJaco-v0',
entry_point='assistive_gym.envs:FeedingVRJacoEnv',
max_episode_steps=9999999999,
)
# Feeding VR PR2 Human
register(
id='FeedingVRPR2Human-v0',
entry_point='assistive_gym.envs:FeedingVRPR2HumanEnv',
max_episode_steps=9999999999,
)
# Feeding VR Jaco Human
register(
id='FeedingVRJacoHuman-v0',
entry_point='assistive_gym.envs:FeedingVRJacoHumanEnv',
max_episode_steps=9999999999,
)
# Feeding VR PR2 New
register(
id='FeedingVRPR2New-v0',
entry_point='assistive_gym.envs:FeedingVRPR2NewEnv',
max_episode_steps=9999999999,
)
# Feeding VR Jaco New
register(
id='FeedingVRJacoNew-v0',
entry_point='assistive_gym.envs:FeedingVRJacoNewEnv',
max_episode_steps=9999999999,
)
|
# Copyright 2020 Pants project contributors.
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from helloworld.util.lang import LanguageTranslator
def test_language_translator():
language_translator = LanguageTranslator()
assert "hola" == language_translator.translate("es", "hello")
def test_unknown_language():
with pytest.raises(LanguageTranslator.UnknownLanguage):
LanguageTranslator().translate("xx", "hello")
|
from .projects import start
from .forms import addform
__all__ = ['start', 'addform']
|
import numpy
import os
import scipy.sparse
import unittest
from afqmctools.hamiltonian.converter import (
read_qmcpack_hamiltonian,
read_fcidump,
write_fcidump
)
from afqmctools.utils.linalg import modified_cholesky_direct
from afqmctools.hamiltonian.mol import write_qmcpack_cholesky
from afqmctools.utils.testing import generate_hamiltonian
numpy.random.seed(7)
class TestConverter(unittest.TestCase):
def test_convert_real(self):
nmo = 17
nelec = (3,3)
h1e, chol, enuc, eri = generate_hamiltonian(nmo, nelec, cplx=False, sym=8)
chols = scipy.sparse.csr_matrix(chol.reshape((-1,nmo*nmo)).T.copy())
write_qmcpack_cholesky(h1e, chols, nelec, nmo, e0=enuc, real_chol=True)
hamil = read_qmcpack_hamiltonian('hamiltonian.h5')
write_fcidump('FCIDUMP', hamil['hcore'], hamil['chol'], hamil['enuc'],
hamil['nmo'], hamil['nelec'], sym=8, cplx=False)
h1e_r, eri_r, enuc_r, nelec_r = read_fcidump('FCIDUMP', verbose=False)
dm = numpy.zeros((nmo,nmo))
dm[(0,1,2),(0,1,2)] = 1.0
eri_r = eri_r.transpose((0,1,3,2)).reshape((nmo*nmo,nmo*nmo))
chol_r = modified_cholesky_direct(eri_r, tol=1e-8, verbose=False)
chol_r = chol_r.reshape((-1,nmo,nmo))
self.assertAlmostEqual(numpy.einsum('ij,ij->', dm, h1e-h1e_r).real, 0.0)
self.assertAlmostEqual(numpy.einsum('ij,nij->', dm, chol-chol_r).real, 0.0)
# Test integral only appears once in file.
h1e_r, eri_r, enuc_r, nelec_r = read_fcidump('FCIDUMP', symmetry=1,
verbose=False)
i,j,k,l = (0,1,2,3)
combs = [
(i,j,k,l),
(k,l,i,j),
(j,i,l,k),
(l,k,j,i),
(j,i,k,l),
(l,k,i,j),
(i,j,l,k),
(k,l,i,j),
]
for c in combs:
if abs(eri_r[c]) > 0:
self.assertEqual(c,(l,k,j,i))
def test_convert_cplx(self):
nmo = 17
nelec = (3,3)
h1e, chol, enuc, eri = generate_hamiltonian(nmo, nelec, cplx=True, sym=4)
chols = scipy.sparse.csr_matrix(chol.reshape((-1,nmo*nmo)).T.copy())
write_qmcpack_cholesky(h1e, chols, nelec, nmo, e0=enuc, real_chol=False)
hamil = read_qmcpack_hamiltonian('hamiltonian.h5')
write_fcidump('FCIDUMP', hamil['hcore'], hamil['chol'], hamil['enuc'],
hamil['nmo'], hamil['nelec'], sym=4, cplx=True)
h1e_r, eri_r, enuc_r, nelec_r = read_fcidump('FCIDUMP', symmetry=4,
verbose=False)
dm = numpy.zeros((nmo,nmo))
dm[(0,1,2),(0,1,2)] = 1.0
eri_r = eri_r.transpose((0,1,3,2)).reshape((nmo*nmo,nmo*nmo))
chol_r = modified_cholesky_direct(eri_r, tol=1e-8, verbose=False)
chol_r = chol_r.reshape((-1,nmo,nmo))
self.assertAlmostEqual(numpy.einsum('ij,ij->', dm, h1e-h1e_r).real, 0.0)
self.assertAlmostEqual(numpy.einsum('ij,nij->', dm, chol-chol_r).real, 0.0)
# Test integral only appears once in file.
h1e_r, eri_r, enuc_r, nelec_r = read_fcidump('FCIDUMP', symmetry=1,
verbose=False)
i,k,j,l = (1,0,0,0)
ikjl = (i,k,j,l)
jlik = (j,l,i,k)
kilj = (k,i,l,j)
ljki = (l,j,k,i)
d1 = eri_r[ikjl] - eri_r[kilj].conj()
d2 = eri_r[ikjl] - eri_r[jlik]
d3 = eri_r[ikjl] - eri_r[ljki].conj()
self.assertAlmostEqual(d1,0.0)
self.assertAlmostEqual(d2,0.0)
self.assertAlmostEqual(d3,-0.00254428836-0.00238852605j)
def tearDown(self):
cwd = os.getcwd()
files = ['FCIDUMP', 'hamiltonian.h5']
for f in files:
try:
os.remove(cwd+'/'+f)
except OSError:
pass
if __name__ == "__main__":
unittest.main()
|
"""
This library for transformations partly derived and was re-implemented from the
following online resources:
* http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
* http://www.euclideanspace.com/maths/geometry/rotations/
* http://code.activestate.com/recipes/578108-determinant-of-matrix-of-any-order/
* http://blog.acipo.com/matrix-inversion-in-javascript/
Many thanks to Christoph Gohlke, Martin John Baker, Sachin Joglekar and Andrew
Ippoliti for providing code and documentation.
"""
from compas.utilities import flatten
from compas.geometry import allclose
from compas.geometry.transformations import decompose_matrix
from compas.geometry.transformations import matrix_from_translation
from compas.geometry.transformations import Transformation
__all__ = ['Translation']
class Translation(Transformation):
"""Create a translation transformation.
Parameters
----------
matrix : 4x4 matrix-like, optional
A 4x4 matrix (or similar) representing a translation.
Raises
------
ValueError
If the default constructor is used,
and the provided transformation matrix is not a translation.
Examples
--------
>>> T = Translation.from_vector([1, 2, 3])
>>> T[0, 3] == 1
True
>>> T[1, 3] == 2
True
>>> T[2, 3] == 3
True
>>> from compas.geometry import Vector
>>> T = Translation.from_vector(Vector(1, 2, 3))
>>> T[0, 3] == 1
True
>>> T[1, 3] == 2
True
>>> T[2, 3] == 3
True
>>> T = Translation([[1, 0, 0, 1], [0, 1, 0, 2], [0, 0, 1, 3], [0, 0, 0, 1]])
>>> T[0, 3] == 1
True
>>> T[1, 3] == 2
True
>>> T[2, 3] == 3
True
"""
def __init__(self, matrix=None):
if matrix:
_, _, _, translation, _ = decompose_matrix(matrix)
check = matrix_from_translation(translation)
if not allclose(flatten(matrix), flatten(check)):
raise ValueError('This is not a proper translation matrix.')
super(Translation, self).__init__(matrix=matrix)
@classmethod
def from_vector(cls, vector):
"""Create a translation transformation from a translation vector.
Parameters
----------
vector : :obj:`list` or :class:`compas.geometry.Vector`
The translation vector.
Returns
-------
Translation
The translation transformation.
"""
return cls(matrix_from_translation(vector))
@property
def translation_vector(self):
from compas.geometry import Vector
x = self.matrix[0][3]
y = self.matrix[1][3]
z = self.matrix[2][3]
return Vector(x, y, z)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
|
# Generated by Django 3.1.7 on 2021-07-27 08:28
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image')),
('description', models.TextField(max_length=200, null=True)),
('location', models.CharField(choices=[('Kisumu', 'Kisumu'), ('Mombasa', 'Mombasa'), ('Nairobi', 'Nairobi'), ('Nakuru', 'Nakuru'), ('Kilifi', 'Kilifi')], default='Kisumu', max_length=100)),
('population', models.IntegerField(default=0)),
('admin', models.CharField(blank=True, max_length=20, null=True)),
('health_contact', models.CharField(blank=True, max_length=20, null=True)),
('police_contact', models.CharField(blank=True, max_length=20, null=True)),
],
),
migrations.CreateModel(
name='UpcomingEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, null=True)),
('description', models.TextField(blank=True, max_length=200, null=True)),
('date', models.DateField()),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hood.neighborhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image')),
('bio', models.CharField(blank=True, max_length=20, null=True)),
('profile_email', models.EmailField(blank=True, max_length=254, null=True)),
('hoods', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to='hood.neighborhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, null=True)),
('post', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hood_post', to='hood.neighborhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, null=True)),
('description', models.TextField(blank=True, max_length=200, null=True)),
('image', cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image')),
('business_email', models.EmailField(blank=True, max_length=254, null=True)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hood.neighborhood')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='business', to=settings.AUTH_USER_MODEL)),
],
),
]
|
from django.contrib import admin
from .models import Exchange
admin.site.register(Exchange)
|
from django.utils.module_loading import module_has_submodule
from collections import namedtuple
from importlib import import_module
class Fixture(namedtuple("Fixture", "app name export func")):
__slots__ = ()
def __hash__(self):
return hash((self.app, self.name))
def __eq__(self, other):
return self[:2] == other[:2]
@property
def models(self):
return self.func.models
@property
def requires(self):
return self.func.requires
@property
def label(self):
return "%s.%s" % (self.app, self.name)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
class CircularDependencyError(Exception):
"""
Raised when there is a circular dependency in fixture requirements.
"""
pass
def unique_seq(l):
seen = set()
for e in l:
if e not in seen:
seen.add(e)
yield e
def calculate_requirements(available_fixtures, fixture, seen=None):
if seen is None:
seen = set([fixture])
models = list(reversed(fixture.models))
requirements = []
for requirement in fixture.requires:
app_label, fixture_name = requirement.rsplit(".", 1)
fixture_func = available_fixtures[(app_label, fixture_name)]
if fixture_func in seen:
raise CircularDependencyError
r, m = calculate_requirements(
available_fixtures,
fixture_func,
seen | set([fixture_func])
)
requirements.extend([req for req in r if req not in requirements])
models.extend(reversed(m))
requirements.append(fixture)
return requirements, list(unique_seq(reversed(models)))
def get_available_fixtures(apps):
fixtures = {}
for app in apps:
try:
fixture_gen = import_module(".fixture_gen", app)
except ImportError:
if module_has_submodule(import_module(app), "fixture_gen"):
raise
continue
for obj in fixture_gen.__dict__.values():
if not getattr(obj, "__fixture_gen__", None):
continue
fixture = Fixture(app.rsplit(".", 1)[-1], obj.__name__, obj.export, obj)
fixtures[fixture] = fixture
return fixtures
def fixture_generator(*models, **kwargs):
"""
Define function as a fixture generator
"""
requires = kwargs.pop("requires", [])
export = kwargs.pop("export", False)
if kwargs:
raise TypeError("fixture_generator got an unexpected keyword argument: %r", iter(kwargs).next())
def decorator(func):
func.models = models
func.requires = requires
func.export = func.__name__ if export is True else export
func.__fixture_gen__ = True
return func
return decorator
|
from .config import *
from .image import *
from .tester import *
from .util import *
|
#! /usr/bin/env python
import sys
import pybindgen
from pybindgen import ReturnValue, Parameter, Module, Function, FileCodeSink
from pybindgen import CppMethod, CppConstructor, CppClass, Enum
from pybindgen.typehandlers.base import ForwardWrapperBase
class VisitorParam(Parameter):
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = ['Visitor']
def convert_python_to_c(self, wrapper):
assert isinstance(wrapper, ForwardWrapperBase)
py_cb = wrapper.declarations.declare_variable("PyObject*", self.name)
wrapper.parse_params.add_parameter('O', ['&'+py_cb], self.name)
wrapper.before_call.write_error_check("!PyCallable_Check(%s)" % py_cb,
"""PyErr_SetString(PyExc_TypeError, "visitor parameter must be callable");""")
wrapper.call_params.append("_wrap_Visit")
wrapper.before_call.write_code("Py_INCREF(%s);" % py_cb)
wrapper.before_call.add_cleanup_code("Py_DECREF(%s);" % py_cb)
wrapper.call_params.append(py_cb)
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def my_module_gen(out_file):
mod = Module('c')
mod.add_include('"c.h"')
mod.header.writeln("""void _wrap_Visit(int value, void *data);""")
mod.body.writeln("""
void _wrap_Visit(int value, void *data)
{
PyObject *callback = (PyObject*) data;
PyObject_CallFunction(callback, (char*) "i", value);
}
""")
mod.add_function("visit", None, [Parameter.new("Visitor", "visitor")]
# the 'data' parameter is inserted automatically
# by the custom callback type handler
)
mod.generate(FileCodeSink(out_file))
if __name__ == '__main__':
my_module_gen(sys.stdout)
|
from django.apps import apps
from django.db import models
class Item(models.Model):
subject = models.ForeignKey(
'Subject', on_delete=models.CASCADE, related_name='items')
name = models.CharField(max_length=32)
estimated_amount = models.PositiveIntegerField(null=True)
memo = models.TextField()
addition = models.TextField()
@property
def advances(self):
return apps.get_model('core.Requirement').objects.advances().filter(
funds__in=self.funds.normal())
@property
def regulars(self):
return apps.get_model('core.Requirement').objects.regulars().filter(
funds__in=self.funds.normal())
@property
def actual_amount(self):
total = self.funds.approved().aggregate(
total=models.Sum('amount'))['total']
return 0 if total is None else total
@property
def efficiency(self):
if self.estimated_amount > 0:
return float(self.actual_amount / self.estimated_amount) * 100
else:
return 0
@property
def report(self):
return '\n'.join([fund.memo for fund in self.funds.approved()])
|
import sys
import pytest
from click_testing_utils import clirunner_invoke_piped
import clifunzone.txttool as sut
from clifunzone import txt_utils
def test_none():
expected = 'I was invoked without a subcommand...'
clirunner_invoke_piped(sut.cli, [], '', exit_code=0, out_ok=expected)
def test_none_debug():
expected = [
'',
"ctx:",
"{'_close_callbacks': [],",
" '_depth': 2,",
" '_meta': {},",
" 'allow_extra_args': True,",
" 'allow_interspersed_args': False,",
" 'args': [],",
" 'auto_envvar_prefix': None,",
" 'color': None,",
" 'command': <click.core.Group object at 0x10c6d5ed0>,",
" 'default_map': None,",
" 'help_option_names': ['-h', '--help'],",
" 'ignore_unknown_options': False,",
" 'info_name': 'cli',",
" 'invoked_subcommand': None,",
" 'max_content_width': None,",
" 'obj': None,",
" 'params': {'debug': True},",
" 'parent': None,",
" 'protected_args': [],",
" 'resilient_parsing': False,",
" 'terminal_width': None,",
" 'token_normalize_func': None}",
'',
'I was invoked without a subcommand...',
'Debug mode: enabled',
'',
"ctx:",
"{'_close_callbacks': [],",
" '_depth': 2,",
" '_meta': {},",
" 'allow_extra_args': True,",
" 'allow_interspersed_args': False,",
" 'args': [],",
" 'auto_envvar_prefix': None,",
" 'color': None,",
" 'command': <click.core.Group object at 0x10c6d5ed0>,",
" 'default_map': None,",
" 'help_option_names': ['-h', '--help'],",
" 'ignore_unknown_options': False,",
" 'info_name': 'cli',",
" 'invoked_subcommand': None,",
" 'max_content_width': None,",
" 'obj': None,",
" 'params': {'debug': True},",
" 'parent': None,",
" 'protected_args': [],",
" 'resilient_parsing': False,",
" 'terminal_width': None,",
" 'token_normalize_func': None}",
'',
'kwargs: {}',
'subcommand: None'
]
clirunner_invoke_piped(sut.cli, ['-d'], '', exit_code=0, out_ok=expected)
@pytest.mark.parametrize("input_text", [
'<abc/>',
'{"abc": null}',
'abc',
'Hi. How are you? My name is John. What is your name?'
])
def test_echo(input_text):
expected = input_text or ['']
clirunner_invoke_piped(sut.echo, [], input_text, exit_code=0, out_ok=expected)
@pytest.mark.parametrize("input_text,cli_args,expected", [
('abc', ['-oj'], [
'{',
' "content": {',
' "length": 3,',
' "metrics": {',
' "chars": {',
' "counts": {',
' "distinct": 3,',
' "each": {',
' "a": 1,',
' "b": 1,',
' "c": 1',
' },',
' "total": 3',
' }',
' },',
' "words": {',
' "counts": {',
' "distinct": 1,',
' "each": {',
' "abc": 1',
' },',
' "total": 1',
' }',
' }',
' }',
' }',
'}'
]),
('<abc/>', ['-od'], [
"{'content': OrderedDict([('length', 6), ('metrics', {" +
"'chars': {'counts': {'distinct': 6, 'total': 6, " +
"'each': OrderedDict([('/', 1), ('<', 1), ('>', 1), ('a', 1), ('b', 1), ('c', 1)])}}, " +
"'words': {'counts': {'distinct': 1, 'total': 1, 'each': OrderedDict([('abc', 1)])}}})])}"
]),
('{"abc": null}', ['-od'], [
"{\'content\': OrderedDict([(\'length\', 13), (\'metrics\', {\'chars\': {" +
"\'counts\': {\'distinct\': 11, \'total\': 13, " +
"\'each\': OrderedDict([(\' \', 1), (\'\"\', 2), (\':\', 1), (\'a\', 1), (\'b\', 1), (\'c\', 1), (\'l\', 2), " +
"(\'n\', 1), (\'u\', 1), (\'{\', 1), (\'}\', 1)])}}, " +
"\'words\': {\'counts\': {\'distinct\': 2, \'total\': 2, " +
"\'each\': OrderedDict([(\'abc\', 1), (\'null\', 1)])}}})])}"
]),
('{"abc": null}', ['-f'], [
'{',
' "content_length": 13,',
' "content_metrics_chars_counts_distinct": 11,',
' "content_metrics_chars_counts_each_ ": 1,',
' "content_metrics_chars_counts_each_\\"": 2,',
' "content_metrics_chars_counts_each_:": 1,',
' "content_metrics_chars_counts_each_a": 1,',
' "content_metrics_chars_counts_each_b": 1,',
' "content_metrics_chars_counts_each_c": 1,',
' "content_metrics_chars_counts_each_l": 2,',
' "content_metrics_chars_counts_each_n": 1,',
' "content_metrics_chars_counts_each_u": 1,',
' "content_metrics_chars_counts_each_{": 1,',
' "content_metrics_chars_counts_each_}": 1,',
' "content_metrics_chars_counts_total": 13,',
' "content_metrics_words_counts_distinct": 2,',
' "content_metrics_words_counts_each_abc": 1,',
' "content_metrics_words_counts_each_null": 1,',
' "content_metrics_words_counts_total": 2',
'}'
]),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_info(input_text, cli_args, expected):
clirunner_invoke_piped(sut.info, cli_args, input_text, exit_code=0, out_ok=expected)
@pytest.mark.parametrize("input_text,cli_args,expected", [
('Hi! How are you? My name is John Paul. What is your name?', ['-f'], [
'"content_length": 57,',
'"content_metrics_chars_counts_distinct": 20,',
'"content_metrics_chars_counts_each_ ": 12,',
'"content_metrics_chars_counts_each_y": 3,',
'"content_metrics_chars_counts_total": 57,',
'"content_metrics_words_counts_distinct": 11,',
'"content_metrics_words_counts_each_is": 2,',
'"content_metrics_words_counts_each_john": 1,',
'"content_metrics_words_counts_each_name": 2,',
'"content_metrics_words_counts_each_paul": 1,',
'"content_metrics_words_counts_each_what": 1,',
'"content_metrics_words_counts_total": 13'
]),
("Hi! How are you? My name is John-Paul. What's your name?", ['-f'], [
'"content_length": 56,',
'"content_metrics_chars_counts_distinct": 22,',
'"content_metrics_chars_counts_each_ ": 10,',
'"content_metrics_chars_counts_each_!": 1,',
'"content_metrics_chars_counts_each_\'": 1,',
'"content_metrics_chars_counts_total": 56,',
'"content_metrics_words_counts_distinct": 10,',
'"content_metrics_words_counts_each_is": 1,',
'"content_metrics_words_counts_each_john-paul": 1,',
'"content_metrics_words_counts_each_name": 2,',
'"content_metrics_words_counts_each_what\'s": 1,',
'"content_metrics_words_counts_total": 11'
]),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_info_fragments(input_text, cli_args, expected):
clirunner_invoke_piped(sut.info, cli_args, input_text, exit_code=0, out_contains_seq=expected)
@pytest.mark.parametrize("cli_args,expected", [
([], [
'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
'Praesent quis erat vel ex egestas lobortis non nec augue.',
'Etiam cursus nibh vel mattis cursus. Vivamus lectus erat, dictum et mauris eu, viverra tincidunt velit.',
]),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_lorem(cli_args, expected):
clirunner_invoke_piped(sut.lorem, cli_args, exit_code=0, out_contains_seq=expected)
@pytest.mark.parametrize("input_text,cli_args,expected", [
("Hi! How are you? My name is John-Paul. What's your name?", [], [
'Lorem ipsum dolor sit amet, consectetur adipiscing elit.',
'Praesent quis erat vel ex egestas lobortis non nec augue.',
'Etiam cursus nibh vel mattis cursus. Vivamus lectus erat, dictum et mauris eu, viverra tincidunt velit.',
]),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_split(input_text, cli_args, expected):
clirunner_invoke_piped(sut.split, cli_args, input_text, exit_code=0, out_contains_seq=expected)
@pytest.mark.parametrize("input_text,cli_args,expected", [
("Hi! How are you? My name is John-Paul. What's your name?", [],
"Hi!\nHow\nare\nyou?\nMy\nname\nis\nJohn-Paul.\nWhat's\nyour\nname?"),
("Hi! How are you? My name is John-Paul. What's your name?", ['-ss'],
"Hi!\nHow\nare\nyou?\nMy\nname\nis\nJohn-Paul.\nWhat's\nyour\nname?"),
("Hi! How are you? My name is John-Paul. What's your name?", ['-sw'],
"Hi\nHow\nare\nyou\nMy\nname\nis\nJohn-Paul\nWhat's\nyour\nname"),
("Hi! How are you? My name is John Paul. What is your name?", ['-sw'],
"Hi\nHow\nare\nyou\nMy\nname\nis\nJohn\nPaul\nWhat\nis\nyour\nname"),
("Hi! How are you? My name is John-Paul. What's your name?", ['-sw', '-sep', ', '],
"Hi, How, are, you, My, name, is, John-Paul, What's, your, name"),
("Hi! How are you? My name is John-Paul. What's your name?", ['-sw', '-sep', '|'],
"Hi|How|are|you|My|name|is|John-Paul|What's|your|name"),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_split(input_text, cli_args, expected):
clirunner_invoke_piped(sut.split, cli_args, input_text, exit_code=0, out_eq=expected)
@pytest.mark.parametrize("input_text,cli_args,expected", [
('Lorem ipsum dolor sit amet', ['-v1', 'Lorem', '-v2', 'sit', '-v2', 'amet'],
"{'max': 4, 'mean': 3.5, 'min': 3}"),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_distance(input_text, cli_args, expected):
input_text = txt_utils.get_words(input_text)
input_text = '\n'.join(input_text)
clirunner_invoke_piped(sut.distance, cli_args, input_text, exit_code=0, out_eq=expected)
@pytest.mark.parametrize("cli_args,expected", [
(['-v1', 'Lorem', '-v2', 'sit', '-v2', 'amet'],
"{'max': 852, 'mean': 483.5, 'min': 3}"),
(['-v1', 'lorem', '-v1', 'dolor', '-v2', 'consectetur', '-v2', 'adipiscing'],
"{'max': 889, 'mean': 467.0740740740741, 'min': 3}"),
(['-r', '-v1', '^Pellentesque$', '-v2', '^Vivamus'],
"{'max': 528, 'mean': 222.66666666666666, 'min': 24}"),
(['-r', '-ri', '-v1', '^Pellentesque$', '-v2', '^Vivamus'],
"{'max': 910, 'mean': 287.1212121212121, 'min': 21}"),
(['-v1', 'Lorem', '-v2', 'sit', '-v2', 'amet', '-v'],
"{'max': 852, 'mean': 483.5, 'matches1': {'Lorem': set([0])}, " +
"'matches2': {'amet': set([449, 386, 4, 722, 206, 50, 563, 852, 821, 787]), " +
"'sit': set([448, 385, 3, 721, 205, 49, 562, 851, 820, 786])}, 'min': 3}"),
(['-r', '-v1', '^Pellentesque$', '-v2', '^Vivamus', '-v'],
"{'max': 528, 'mean': 222.66666666666666, 'matches1': {'Pellentesque': set([328, 99, 213, 478])}, " +
"'matches2': {'Vivamus': set([45, 270, 143, 529, 627, 502])}, 'min': 24}"),
(['-r', '-ri', '-v1', '^Pellentesque$', '-v2', '^Vivamus', '-v'],
"{'max': 910, 'mean': 287.1212121212121, 'matches1': {'pellentesque': set([406, 839, 329, 207, 182, 24, 955]), " +
"'Pellentesque': set([328, 99, 213, 478])}, " +
"'matches2': {'Vivamus': set([45, 270, 143, 529, 627, 502])}, 'min': 21}"),
])
@pytest.mark.skipif(sys.version_info > (3, 3),
reason="currently broken for py35")
def test_distance_lorem(cli_args, expected):
input_text = txt_utils.lorem_ipsum()
input_text = txt_utils.get_words(input_text)
input_text = '\n'.join(input_text)
clirunner_invoke_piped(sut.distance, cli_args, input_text, exit_code=0, out_eq=expected)
|
# package containing kinect specific modules
|
# TRAIN_PATH = '/Users/roshantirukkonda/Desktop/Kaggle /Pytorch CNN MNIST/Input/MNIST/processed/training.pt'
# TEST_PATH = '/Users/roshantirukkonda/Desktop/Kaggle /Pytorch CNN MNIST/Input/MNIST/processed/test.pt'
ROOT = '../Input'
MODEL_PATH = '../Input/Model'
LATEST_MODEL = '/CNN-2021-02-26 02:47:01.247436.pt'
TEST_PATH = '../Input/TEST'
EPOCHS = 5
SPLIT = [60000, 0]
IMG_DIM = 28, 28
BATCH_SIZE = 8
NUM_CLASSES = 10
LR = 0.001
MOMENTUM = 0.9
|
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import division
from math import exp
import numpy as np
from numpy.linalg import inv
import scipy
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
from filterpy.stats import norm_cdf, multivariate_gaussian, logpdf, mahalanobis
ITERS = 10000
def test_mahalanobis():
global a, b, S
# int test
a, b, S = 3, 1, 2
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
# int list
assert abs(mahalanobis([a], [b], [S]) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
assert abs(mahalanobis([a], b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
# float
a, b, S = 3.123, 3.235235, .01234
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
assert abs(mahalanobis([a], [b], [S]) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
assert abs(mahalanobis([a], b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
#float array
assert abs(mahalanobis(np.array([a]), b, S) - scipy_mahalanobis(a, b, 1/S)) < 1.e-12
#1d array
a = np.array([1., 2.])
b = np.array([1.4, 1.2])
S = np.array([[1., 2.], [2., 4.001]])
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
#2d array
a = np.array([[1., 2.]])
b = np.array([[1.4, 1.2]])
S = np.array([[1., 2.], [2., 4.001]])
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
assert abs(mahalanobis(a.T, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
assert abs(mahalanobis(a, b.T, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
assert abs(mahalanobis(a.T, b.T, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
try:
# mismatched shapes
mahalanobis([1], b, S)
assert "didn't catch vectors of different lengths"
except ValueError:
pass
except:
assert "raised exception other than ValueError"
# okay, now check for numerical accuracy
for _ in range(ITERS):
N = np.random.randint(1, 20)
a = np.random.randn(N)
b = np.random.randn(N)
S = np.random.randn(N, N)
S = np.dot(S, S.T) #ensure positive semi-definite
assert abs(mahalanobis(a, b, S) - scipy_mahalanobis(a, b, inv(S))) < 1.e-12
def test_multivariate_gaussian():
# test that we treat lists and arrays the same
mean= (0, 0)
cov=[[1, .5], [.5, 1]]
a = [[multivariate_gaussian((i, j), mean, cov)
for i in (-1, 0, 1)]
for j in (-1, 0, 1)]
b = [[multivariate_gaussian((i, j), mean, np.asarray(cov))
for i in (-1, 0, 1)]
for j in (-1, 0, 1)]
assert np.allclose(a, b)
a = [[multivariate_gaussian((i, j), np.asarray(mean), cov)
for i in (-1, 0, 1)]
for j in (-1, 0, 1)]
assert np.allclose(a, b)
try:
multivariate_gaussian(1, 1, -1)
except:
pass
else:
assert False, "negative variances are meaningless"
# test that we get the same results as scipy.stats.multivariate_normal
xs = np.random.randn(1000)
mean = np.random.randn(1000)
var = np.random.random(1000) * 5
for x, m, v in zip(xs, mean, var):
assert abs(multivariate_gaussian(x, m, v) - scipy.stats.multivariate_normal(m, v).pdf(x)) < 1.e-12
def _is_inside_ellipse(x, y, ex, ey, orientation, width, height):
co = np.cos(orientation)
so = np.sin(orientation)
xx = x*co + y*so
yy = y*co - x*so
return (xx / width)**2 + (yy / height)**2 <= 1.
def do_plot_test():
import matplotlib.pyplot as plt
from numpy.random import multivariate_normal as mnormal
from filterpy.stats import covariance_ellipse, plot_covariance
p = np.array([[32, 15], [15., 40.]])
x, y = mnormal(mean=(0, 0), cov=p, size=5000).T
sd = 2
a, w, h = covariance_ellipse(p, sd)
print(np.degrees(a), w, h)
count = 0
color = []
for i in range(len(x)):
if _is_inside_ellipse(x[i], y[i], 0, 0, a, w, h):
color.append('b')
count += 1
else:
color.append('r')
plt.scatter(x, y, alpha=0.2, c=color)
plt.axis('equal')
plot_covariance(mean=(0., 0.),
cov=p,
std=[1,2,3],
alpha=0.3,
facecolor='none')
print(count / len(x))
def test_norm_cdf():
# test using the 68-95-99.7 rule
mu = 5
std = 3
var = std*std
std_1 = (norm_cdf((mu-std, mu+std), mu, var))
assert abs(std_1 - .6827) < .0001
std_1 = (norm_cdf((mu+std, mu-std), mu, std=std))
assert abs(std_1 - .6827) < .0001
std_1half = (norm_cdf((mu+std, mu), mu, var))
assert abs(std_1half - .6827/2) < .0001
std_2 = (norm_cdf((mu-2*std, mu+2*std), mu, var))
assert abs(std_2 - .9545) < .0001
std_3 = (norm_cdf((mu-3*std, mu+3*std), mu, var))
assert abs(std_3 - .9973) < .0001
def test_logpdf():
assert 3.9 < exp(logpdf(1, 1, .01)) < 4.
assert 3.9 < exp(logpdf([1], [1], .01)) < 4.
assert 3.9 < exp(logpdf([[1]], [[1]], .01)) < 4.
logpdf([1., 2], [1.1, 2], cov=np.array([[1., 2], [2, 5]]), allow_singular=False)
logpdf([1., 2], [1.1, 2], cov=np.array([[1., 2], [2, 5]]), allow_singular=True)
def covariance_3d_plot_test():
import matplotlib.pyplot as plt
from filterpy.stats import plot_3d_covariance
mu = [13456.3,2320,672.5]
C = np.array([[1.0, .03, .2],
[.03, 4.0, .0],
[.2, .0, 16.1]])
sample = np.random.multivariate_normal(mu, C, size=1000)
fig = plt.gcf()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=sample[:, 0], ys=sample[:, 1], zs=sample[:, 2], s=1)
plot_3d_covariance(mu, C, alpha=.4, std=3, limit_xyz=True, ax=ax)
if __name__ == "__main__":
covariance_3d_plot_test()
plt.figure()
do_plot_test()
|
import os
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
from models import ResNet as resnet_cifar
import pandas as pd
import argparse
import csv
from torch.optim.lr_scheduler import MultiStepLR
from dataLoader import DataLoader
from summaries import TensorboardSummary
# parameters setting
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--name', default='test_model', help='filename to output best model') #save output
parser.add_argument('--dataset', default='cifar-10',help="datasets")
parser.add_argument('--depth', default=20,type=int,help="depth of resnet model")
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--batch_size', default=64,type=int, help='batch size')
parser.add_argument('--epoch', default=200,type=int, help='epoch')
parser.add_argument('--exp_dir',default='./',help='dir for tensorboard')
parser.add_argument('--res', default='./result.txt', help="file to write best result")
args = parser.parse_args()
if os.path.exists(args.exp_dir):
print ('Already exist and will continue training')
# exit()
summary = TensorboardSummary(args.exp_dir)
tb_writer = summary.create_summary()
def train_model(model,criterion,optimizer,scheduler,num_epochs=25):
since = time.time()
#best_model = model.state_dic()
best_acc = 0.0
best_train_acc = 0.0
# Load unfinished model
unfinished_model_path = os.path.join(args.exp_dir , 'unfinished_model.pt')
if(os.path.exists(unfinished_model_path)):
checkpoint = torch.load(unfinished_model_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
else: epoch = 0
while epoch < num_epochs:
epoch_time = time.time()
print('-'*10)
print('Epoch {}/{}'.format(epoch,num_epochs-1))
#each epoch has a training and validation phase
for phase in ['train','val']:
if phase == 'train':
scheduler.step()
model.train(True)
else:
model.train(False)
running_loss = 0.0
running_corrects = 0.0
top5_corrects = 0.0
# change tensor to variable(including some gradient info)
# use variable.data to get the corresponding tensor
for data in dataloaders[phase]:
#782 batch,batch size= 64
inputs,labels = data
# print (inputs.shape)
if use_gpu:
inputs = Variable(inputs.cuda())
labels = Variable(labels.cuda())
else:
inputs, labels = Variable(inputs), Variable(labels)
#zero the parameter gradients
optimizer.zero_grad()
#forward
outputs = model(inputs, labels, epoch)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs.data, 1)
# _,top5_preds = torch.topk(outputs.data,k=5,dim=1)
# print ('group loss:',group_loss[0])
if phase == 'train':
loss.backward()
optimizer.step()
y = labels.data
batch_size = labels.data.shape[0]
# print(y.resize_(batch_size,1))
running_loss += loss.item()
running_corrects += torch.sum(preds == y)
# top5_corrects += torch.sum(top5_preds == y.resize_(batch_size,1))
epoch_loss = running_loss /dataset_sizes[phase]
epoch_acc = float(running_corrects) /dataset_sizes[phase]
# top5_acc = top5_corrects /dataset_sizes[phase]
print('%s Loss: %.4f top1 Acc:%.4f'%(phase,epoch_loss,epoch_acc))
if phase == 'train':
tb_writer.add_scalar('train/total_loss_epoch', epoch_loss, epoch)
tb_writer.add_scalar('train/acc_epoch', epoch_acc, epoch)
if best_train_acc < epoch_acc:
best_train_acc = epoch_acc
if phase == 'val':
tb_writer.add_scalar('val/total_loss_epoch', epoch_loss, epoch)
tb_writer.add_scalar('val/acc_epoch', epoch_acc, epoch)
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model = model.state_dict()
cost_time = time.time() - epoch_time
print('Epoch time cost {:.0f}m {:.0f}s'.format(cost_time // 60, cost_time % 60))
# Save model periotically
if(epoch % 2 == 0):
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}, os.path.join(args.exp_dir , 'unfinished_model.pt'))
epoch += 1
cost_time = time.time() - since
print ('Training complete in {:.0f}m {:.0f}s'.format(cost_time//60,cost_time%60))
print ('Best Train Acc is {:.4f}'.format(best_train_acc))
print ('Best Val Acc is {:.4f}'.format(best_acc))
model.load_state_dict(best_model)
return model,cost_time,best_acc,best_train_acc
if __name__ == '__main__':
print ('DataSets: '+args.dataset)
print ('ResNet Depth: '+str(args.depth))
loader = DataLoader(args.dataset,batch_size=args.batch_size)
dataloaders,dataset_sizes = loader.load_data()
num_classes = 10
if args.dataset == 'cifar-10':
num_classes = 10
if args.dataset == 'cifar-100':
num_classes = 100
model = resnet_cifar(depth=args.depth, num_classes=num_classes)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1,
momentum=0.9, nesterov=True, weight_decay=1e-4)
# define loss and optimizer
criterion = nn.CrossEntropyLoss()
scheduler = MultiStepLR(optimizer, milestones=[args.epoch*0.4, args.epoch*0.6, args.epoch*0.8], gamma=0.1)
use_gpu = torch.cuda.is_available()
if use_gpu:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
model = torch.nn.DataParallel(model, device_ids=args.gpu_ids)
# patch_replication_callback(model)
model = model.cuda()
print( args.gpu_ids)
model,cost_time,best_acc,best_train_acc = train_model(model=model,
optimizer=optimizer,
criterion=criterion,
scheduler=scheduler,
num_epochs=args.epoch)
exp_name = 'resnet%d dataset: %s batchsize: %d epoch: %d bestValAcc: %.4f bestTrainAcc: %.4f \n' % (
args.depth, args.dataset,args.batch_size, args.epoch,best_acc,best_train_acc)
os.system('rm ' + os.path.join(args.exp_dir , 'unfinished_model.pt') )
torch.save(model.state_dict(), os.path.join(args.exp_dir , 'saved_model.pt'))
with open(args.res,'a') as f:
f.write(exp_name)
f.close()
|
#!/usr/bin/env python
"""
This module provides PrimaryDSType.List data access object.
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
class List(DBFormatter):
"""
PrimaryDSType List DAO class.
"""
def __init__(self, logger, dbi, owner):
"""
Add schema owner and sql.
"""
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.sql = \
"""
SELECT S.SITE_ID, S.SITE_NAME
FROM %sSITES S
""" % (self.owner)
def execute(self, conn, site_name= "", transaction = False):
"""
Lists all sites types if site_name is not provided.
"""
sql = self.sql
if site_name == "":
result = self.dbi.processData(sql, conn=conn, transaction=transaction)
else:
sql += "WHERE S.SITE_NAME = :site_name"
binds = { "site_name" : site_name }
result = self.dbi.processData(sql, binds, conn, transaction)
return self.formatDict(result)
|
#!/usr/bin/env python
"""Tests for the ee.serializer module."""
import json
import unittest
import ee
from ee import apitestcase
from ee import serializer
class SerializerTest(apitestcase.ApiTestCase):
def testSerialization(self):
"""Verifies a complex serialization case."""
class ByteString(ee.Encodable):
"""A custom Encodable class that does not use invocations.
This one is actually supported by the EE API encoding.
"""
def __init__(self, value):
"""Creates a bytestring with a given string value."""
self._value = value
def encode(self, unused_encoder): # pylint: disable-msg=g-bad-name
return {
'type': 'Bytes',
'value': self._value
}
call = ee.ComputedObject('String.cat', {'string1': 'x', 'string2': 'y'})
body = lambda x, y: ee.CustomFunction.variable(None, 'y')
sig = {'returns': 'Object',
'args': [
{'name': 'x', 'type': 'Object'},
{'name': 'y', 'type': 'Object'}]}
custom_function = ee.CustomFunction(sig, body)
to_encode = [
None,
True,
5,
7,
3.4,
2.5,
'hello',
ee.Date(1234567890000),
ee.Geometry(ee.Geometry.LineString(1, 2, 3, 4), 'SR-ORG:6974'),
ee.Geometry.Polygon([
[[0, 0], [10, 0], [10, 10], [0, 10], [0, 0]],
[[5, 6], [7, 6], [7, 8], [5, 8]],
[[1, 1], [2, 1], [2, 2], [1, 2]]
]),
ByteString('aGVsbG8='),
{
'foo': 'bar',
'baz': call
},
call,
custom_function
]
self.assertEquals(apitestcase.ENCODED_JSON_SAMPLE,
json.loads(serializer.toJSON(to_encode)))
def testRepeats(self):
"""Verifies serialization finds and removes repeated values."""
test1 = ee.Image(5).mask(ee.Image(5)) # pylint: disable-msg=no-member
expected1 = {
'type': 'CompoundValue',
'scope': [
['0', {
'type': 'Invocation',
'arguments': {
'value': 5
},
'functionName': 'Image.constant'
}],
['1', {
'type': 'Invocation',
'arguments': {
'image': {
'type': 'ValueRef',
'value': '0'
},
'mask': {
'type': 'ValueRef',
'value': '0'
}
},
'functionName': 'Image.mask'
}]
],
'value': {
'type': 'ValueRef',
'value': '1'
}
}
self.assertEquals(expected1, json.loads(serializer.toJSON(test1)))
if __name__ == '__main__':
unittest.main()
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Categorie(models.Model):
All_Cat = models.CharField(max_length=20, null=True)
def __str__(self):
return self.All_Cat
class Post(models.Model):
Cat = models.ForeignKey(Categorie, on_delete=models.CASCADE, null=True)
usr = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
Title = models.CharField(max_length=40, null=True)
Post_date = models.DateTimeField(null=True)
Short_dis = models.TextField(null=True)
Long_dis = models.TextField(null=True)
image = models.FileField(null=True)
def __str__(self):
return self.Title+'--'+self.Cat.All_Cat+'--'+self.usr.username
class LikeComment(models.Model):
post_data = models.ForeignKey(Post,on_delete=models.CASCADE,null=True)
usr = models.ForeignKey(User,on_delete=models.CASCADE, null=True)
like = models.BooleanField(default=False)
comment = models.TextField(null=True)
date = models.DateField(null=True)
def __str__(self):
return self.post_data.Title
class UserDetail(models.Model):
usr = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
image = models.FileField(null=True)
def __str__(self):
return self.usr.username
|
import os
PROJECT_REPORT_BOILERPLATE_URL = 'https://github.com/cloudblue/connect-report-python-boilerplate.git'
PROJECT_REPORT_BOILERPLATE_TAG = os.environ.get('PROJECT_REPORT_BOILERPLATE_TAG')
|
import json
from chargebee.model import Model
from chargebee import request
from chargebee import APIError
class PaymentSource(Model):
class Card(Model):
fields = ["first_name", "last_name", "iin", "last4", "brand", "funding_type", "expiry_month", "expiry_year", "billing_addr1", "billing_addr2", "billing_city", "billing_state_code", "billing_state", "billing_country", "billing_zip", "masked_number"]
pass
class BankAccount(Model):
fields = ["last4", "name_on_account", "bank_name", "mandate_id", "account_type", "echeck_type", "account_holder_type"]
pass
class AmazonPayment(Model):
fields = ["email", "agreement_id"]
pass
class Paypal(Model):
fields = ["email", "agreement_id"]
pass
fields = ["id", "customer_id", "type", "reference_id", "status", "gateway", "gateway_account_id", \
"ip_address", "issuing_country", "card", "bank_account", "amazon_payment", "paypal", "deleted"]
@staticmethod
def create_using_temp_token(params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources","create_using_temp_token"), params, env, headers)
@staticmethod
def create_using_permanent_token(params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources","create_using_permanent_token"), params, env, headers)
@staticmethod
def create_card(params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources","create_card"), params, env, headers)
@staticmethod
def create_bank_account(params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources","create_bank_account"), params, env, headers)
@staticmethod
def update_card(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources",id,"update_card"), params, env, headers)
@staticmethod
def verify_bank_account(id, params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources",id,"verify_bank_account"), params, env, headers)
@staticmethod
def retrieve(id, env=None, headers=None):
return request.send('get', request.uri_path("payment_sources",id), None, env, headers)
@staticmethod
def list(params=None, env=None, headers=None):
return request.send_list_request('get', request.uri_path("payment_sources"), params, env, headers)
@staticmethod
def switch_gateway_account(id, params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources",id,"switch_gateway_account"), params, env, headers)
@staticmethod
def export_payment_source(id, params, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources",id,"export_payment_source"), params, env, headers)
@staticmethod
def delete(id, env=None, headers=None):
return request.send('post', request.uri_path("payment_sources",id,"delete"), None, env, headers)
|
"""
A module containing class for map containing all objects
"""
import config
import os
from time import sleep
class Map:
"""
A Generalised class for all level maps
"""
def __init__(self, map_id, columns, rows, map_length):
"""
Initialises the attributes of map
"""
self.id = id
self.length = map_length
self.rows = rows
self.columns = columns
self.map_array = [[' ' for _ in range(1, self.length + 1)] for _ in range(1, self.rows + 1)]
self.object_array = [[0 for _ in range(1, self.length + 1)] for _ in range(1, self.rows + 1)]
self.left_pointer = 0
self.right_pointer = self.columns
self.foreground = []
self.background = []
self.checkpoints = []
self.enemies = []
self.bridges = []
self.holes = []
self.coins = []
# self.control_music = ''
def view_map(self, reverse=False):
flag = 1
lp = 0
rp = self.columns
if reverse:
flag = -1
rp = self.length
lp = self.length - self.columns
# for i in self.map_array[:]:
# for j in i[lp:rp]:
# print(j, end='')
# print()
# print('\r', end='')
while True:
os.system(config.CLEAR_COMMAND)
for i in self.map_array[:]:
for j in i[lp:rp]:
print(j, end='')
print()
print('\r', end='')
if rp > self.length or lp < 0:
break
lp += flag
rp += flag
# sleep(0.001)
# def remove_obj(self, ):
|
import numpy as np
import tensorflow as tf
from libs.utils.calc_ious import bbox_giou, bbox_iou
__all__ = ['get_losses']
def get_losses(pred_raw, pred_decoded, label, bboxes, stride, iou_loss_thr, num_classes):
"""
Args:
pred_decoded: decoded yolo output
pred_raw: raw yolo output
"""
batch_size, grid_size = pred_raw.shape[0], pred_raw.shape[1]
input_size = tf.cast(stride * grid_size, tf.float32)
pred_raw = tf.reshape(pred_raw, (batch_size, grid_size, grid_size, 3, 5+num_classes))
pred_raw_conf = pred_raw[:, :, :, :, 4:5]
pred_raw_prob = pred_raw[:, :, :, :, 5:]
pred_decoded_xywh = pred_decoded[:, :, :, :, 0:4]
pred_decoded_conf = pred_decoded[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
label_conf = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(bbox_giou(pred_decoded_xywh, label_xywh), axis=-1)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = label_conf * bbox_loss_scale * (1 - giou)
# Find the value of IoU with the real box The largest prediction box
## pred_decoded_xywh.shape: [batch_size, y_idx, x_idx, num_scales, 4]
## bboxes.shape: [batch_size, max_num_bboxes_per_scale, 4]
iou = bbox_iou(pred_decoded_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
## iou.shape: [batch_size, y_idx, x_idx, num_scales, max_num_bboxes_per_scale]
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
# If the largest iou is less than the threshold, it is considered that the prediction box contains no objects, then the background box
respond_bg = (1.0 - label_conf) * tf.cast(max_iou < iou_loss_thr, tf.float32)
conf_focal = tf.pow(label_conf - pred_decoded_conf, 2)
# Calculate the loss of confidence
# we hope that if the grid contains objects, then the network output prediction box has a confidence of 1 and 0 when there is no object.
conf_loss = conf_focal * (
label_conf * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_conf, logits=pred_raw_conf)
+
respond_bg * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_conf, logits=pred_raw_conf)
)
prob_loss = label_conf * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=pred_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1, 2, 3, 4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1, 2, 3, 4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1, 2, 3, 4]))
return giou_loss, conf_loss, prob_loss
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# License : GPL3
# Author : Jingxin Fu <jingxinfu.tj@gmail.com>
# Date : 11/02/2020
# Last Modified Date: 23/05/2020
# Last Modified By : Jingxin Fu <jingxinfu.tj@gmail.com>
# -*- coding: utf-8 -*-
# Author : Jingxin Fu <jingxin_fu@outlook.com>
# Date : 09/02/2020
# Last Modified Date: 10/02/2020
# Last Modified By : Jingxin Fu <jingxin_fu@outlook.com>
import setuptools
from Biopyutils import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
NAME='Biopyutils'
try:
f = open("requirements.txt", "rb")
REQUIRES = [i.strip() for i in f.read().decode("utf-8").split("\n")]
f.close()
except:
print("'requirements.txt' not found!")
REQUIRES = []
setuptools.setup(
name=NAME,
version=__version__,
author="Jingxin Fu",
author_email="jingxinfu.tj@gmail.com",
description="A bionformatic tookits",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://jingxinfu.github.io/"+NAME,
packages=setuptools.find_packages(),
scripts=['bin/'+NAME],
package_data={NAME: ["data/*",'R/*','R/*/*'],},
include_package_data=True,
install_requires=REQUIRES,
python_requires='>=3.5',
keywords= ['Gene ID Convertor', 'Bioinformatics','Genomics','Computational Biologist'],
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Operating System :: Unix",
"Operating System :: MacOS",
]
)
|
from setuptools import setup
setup(name='gtab',
version='0.8',
author="EPFL DLAB",
author_email="epfl.dlab@gmail.com",
description='gtab (Google Trends Anchor Bank) allows users to obtain precisely calibrated time series of search interest from Google Trends.',
long_description='For a project description see https://github.com/epfl-dlab/GoogleTrendsAnchorBank/.',
url='https://github.com/epfl-dlab/GoogleTrendsAnchorBank',
packages=['gtab'],
include_package_data=True,
install_requires=[
'networkx',
'pytrends',
'tqdm',
'pandas',
'numpy',
],
entry_points={
'console_scripts': ['gtab-init=gtab.command_line:init_dir',
'gtab-print-options=gtab.command_line:print_options',
'gtab-set-options=gtab.command_line:set_options',
'gtab-set-blacklist=gtab.command_line:set_blacklist',
'gtab-set-hitraffic=gtab.command_line:set_hitraffic',
'gtab-list=gtab.command_line:list_gtabs',
'gtab-rename=gtab.command_line:rename_gtab',
'gtab-delete=gtab.command_line:delete_gtab',
'gtab-set-active=gtab.command_line:set_active_gtab',
'gtab-create=gtab.command_line:create_gtab',
'gtab-query=gtab.command_line:new_query'
]
},
classifiers=["License :: OSI Approved :: MIT License"],
python_requires='>=3.6',
)
|
import sys
def inc_dict_value(d,k):
try:
d[k] += 1
except KeyError:
d[k] = 1
def update_progress(progress, prefix="Precent", barLength=10) :
# copied from https://stackoverflow.com/questions/3160699/python-progress-bar
status = ""
if isinstance(progress, int): progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\n"
if progress < 0:
progress = 0
status = "Halt...\n"
if progress >= 1:
progress = 1
status = "Done...\n"
block = int(round(barLength*progress))
text = "\r{0}: [{1}] {2:.2f}% {3}".format( prefix , "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def match(file1,file2,outfile) :
joint_dict = {}
t1_dict = {}
t2_dict = {}
t1_and_t2 = {}
with open(file1,"r") as f1, open(file2,"r") as f2 :
for line1 , line2 in zip(f1,f2) :
tok1 = line1.rstrip().split()
tok2 = line2.rstrip().split()
matched = [t1 + '_' + t2 for t1,t2 in zip(tok1,tok2)]
for x in matched: inc_dict_value(joint_dict,x)
for x in tok1: inc_dict_value(t1_dict,x)
for x in tok2: inc_dict_value(t2_dict,x)
T = 0
for key in list(t1_dict.keys()) : T += t1_dict[key]
for key in list(joint_dict.keys()) :
(t1,t2) = key.split('_')
t1_and_t2[key] = joint_dict[key]*1.0/(t1_dict[t1])
with open(outfile,"w") as outfp:
for key in sorted(t1_and_t2, key=t1_and_t2.get, reverse=True) :
(t1,t2) = key.split('_')
print("{0} {1} {2:1.3f} {3:1.3f} {4:1.3f} ".format(t1,t2,t1_dict[t1]*1.0/T,t2_dict[t2]*1.0/T,t1_and_t2[key]),file=outfp)
return
|
import gooeypie as gp
def change_colour(event):
# Convert each number value from 0 to 255 to a 2-digit hex code
rr = str(hex(red_value.value))[2:].rjust(2, '0')
gg = str(hex(green_value.value))[2:].rjust(2, '0')
bb = str(hex(blue_value.value))[2:].rjust(2, '0')
# Set the background colour
colour.background_colour = f'#{rr}{gg}{bb}'
app = gp.GooeyPieApp('Colour Mixer')
red_label = gp.Label(app, 'Red')
green_label = gp.Label(app, 'Green')
blue_label = gp.Label(app, 'Blue')
red_value = gp.Number(app, 0, 255, 5)
green_value = gp.Number(app, 0, 255, 5)
blue_value = gp.Number(app, 0, 255, 5)
# loop though the Number widgets, setting relevant options
for number in (red_value, green_value, blue_value):
number.add_event_listener('change', change_colour)
number.wrap = False
number.margin_top = 0
# Empty style label to display the colour
colour = gp.StyleLabel(app, '\n\n\n\n\n')
colour.background_colour = 'black'
app.set_grid(3, 3)
app.add(red_label, 1, 1)
app.add(green_label, 1, 2)
app.add(blue_label, 1, 3)
app.add(red_value, 2, 1)
app.add(green_value, 2, 2)
app.add(blue_value, 2, 3)
app.add(colour, 3, 1, column_span=3, fill=True)
app.run()
|
print("hello world")
#python can do all calculations
a=1+1
print(a)
print(100**10)
#for statement
for val in range(5):
print(val)
#if statement
num=5
if num>=0:
print('positive')
else:
print('negative')
#string concatenation
string="python"
string1=" is easy"
print(string+string1)
#boolean = true or false
b=not True
c=not False
print(b)
print(c)
#in operator
animal='dog'
if animal in ['dog','cat','lion']:
print(True)
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import struct
import numpy as np
import random
import tensorflow as tf
def broadcast_to_f32():
para = []
broadcast_shape = []
# init the input data and parameters
broadcast_dimcount = int(np.random.randint(1, high=7, size=1))
zero_point = int(np.random.randint(-6, high=6, size=1))
std = int(np.random.randint(1, high=2, size=1))
for i in range(0, broadcast_dimcount):
broadcast_shape.append(int(np.random.randint(1, high=32, size=1)))
input_shape_t = []
for i in range(0, broadcast_dimcount):
if i != broadcast_dimcount -1 :
choice_list = [0, 1, broadcast_shape[i]]
else:
choice_list = [1, broadcast_shape[i]]
input_shape_t.append(int(np.random.choice(choice_list, 1)))
input_shape = []
for i in input_shape_t:
if i != 0:
input_shape.append(i)
else:
input_shape.clear()
input_dimcount = len(input_shape)
src_in = np.random.normal(zero_point, std, input_shape)
out_calcu = tf.broadcast_to(src_in, shape=broadcast_shape)
sess = tf.Session()
src_out = sess.run(out_calcu)
src_in_1 = src_in.ravel('C')
src_out_1 = src_out.flatten()
total_size = (len(src_in_1) + len(src_out_1)) + 2 + input_dimcount + broadcast_dimcount
para.append(total_size)
para.append(input_dimcount)
para.append(broadcast_dimcount)
for i in range(0, input_dimcount):
para.append(input_shape[i])
for i in range(0, broadcast_dimcount):
para.append(broadcast_shape[i])
print(para)
print(input_shape)
print(broadcast_shape)
with open("broadcast_to_data_f32.bin", "wb") as fp:
data = struct.pack(('%di' % len(para)), *para)
fp.write(data)
data = struct.pack(('%df' % len(src_in_1)), *src_in_1)
fp.write(data)
data = struct.pack(('%df' % len(src_out_1)), *src_out_1)
fp.write(data)
fp.close()
return 0
if __name__ == '__main__':
broadcast_to_f32()
print("end")
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by Roger on 2019/12/3
doc_tex = """#BeginOfDocument ENG_NW_001278_20130418_F00012ERM
rich_ere ENG_NW_001278_20130418_F00012ERM E599 148,156 violence Conflict_Attack Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E619 280,288 violence Conflict_Attack Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E639 413,421 violence Conflict_Attack Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E737 558,565 violent Conflict_Attack Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E847 1131,1139 violence Conflict_Attack Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E659 127,136 denounces Contact_Broadcast Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E681 259,268 denounces Contact_Broadcast Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E703 380,389 denounced Contact_Broadcast Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E813 856,862 called Contact_Broadcast Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E757 690,696 attack Conflict_Attack Generic
rich_ere ENG_NW_001278_20130418_F00012ERM E784 748,753 visit Movement_Transport-Person Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E867 1244,1252 arrested Justice_Arrest-Jail Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E917 1354,1367 demonstrators Conflict_Demonstrate Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E946 1368,1372 took Movement_Transport-Person Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E1013 866,876 protestors Conflict_Demonstrate Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E1033 604,615 demonstrate Conflict_Demonstrate Generic
rich_ere ENG_NW_001278_20130418_F00012ERM E1057 1289,1295 attack Conflict_Attack Actual
rich_ere ENG_NW_001278_20130418_F00012ERM E1086 1484,1494 questioned Contact_Meet Actual
@Coreference C10 E599,E619,E639,E737,E847
@Coreference C679 E659,E681,E703,E813
#EndOfDocument"""
def split_tbf(filename):
lines = open(filename).readlines()
document_list = list()
document = None
for line in lines:
if line.startswith('#BeginOfDocument'):
document = [line]
else:
document += [line]
if line.startswith('#EndOfDocument'):
document_list += [document]
document = None
return document_list
def load_document_dict_from_tbf(filename):
document_list = split_tbf(filename)
document_list = [Document.get_from_lines(document) for document in document_list]
document_dict = {document.doc_id: document for document in document_list}
return document_dict
class Mention:
def __init__(self, doc_id, mention_id, offset, text, event_type, realis):
self.doc_id = doc_id
self.mention_id = mention_id
self.offset = offset
self.text = text
self.event_type = event_type
self.realis = realis
@staticmethod
def get_from_line(line):
att = line.split('\t')
return Mention(doc_id=att[1],
mention_id=att[2],
offset=att[3],
text=att[4],
event_type=att[5],
realis=att[6])
def to_line(self):
return "\t".join(['rich_ere', self.doc_id, self.mention_id, self.offset,
self.text, self.event_type, self.realis])
class Coreference:
def __init__(self, coref_id, mention_list):
self.coref_id = coref_id
self.mention_list = mention_list
@staticmethod
def get_from_line(line):
att = line.split('\t')
return Coreference(coref_id=att[1],
mention_list=att[2].split(','))
def to_line(self):
return "\t".join(['@Coreference',
self.coref_id,
','.join(self.mention_list)])
def to_line_with_type(self, mention_dict):
return "\t".join(['@Coreference',
self.coref_id,
','.join(self.mention_list),
'\n' + ' | '.join([mention_dict[mention].event_type for mention in self.mention_list]),
'\n' + ' | '.join([mention_dict[mention].text for mention in self.mention_list]),
])
class Document:
def __init__(self, doc_id, mention_list, coref_list):
self.doc_id = doc_id
self.mention_list = mention_list
self.coref_list = coref_list
self.mention_dict = {mention.mention_id: mention for mention in mention_list}
@staticmethod
def get_from_lines(lines):
lines = [line.strip() for line in lines]
doc_id = lines[0].split()[1]
mention_list = list()
coref_list = list()
for line in lines[1:]:
if line.startswith("#EndOfDocument"):
return Document(doc_id, mention_list=mention_list, coref_list=coref_list)
elif line.startswith("@Coreference"):
coref_list += [Coreference.get_from_line(line)]
else:
mention_list += [Mention.get_from_line(line)]
def delete_mention_in_doc(self, mention_id):
self.delete_mention_in_coref(mention_id)
to_delete = -1
for index, mention in enumerate(self.mention_list):
if mention.mention_id == mention_id:
to_delete = index
break
if to_delete >= 0:
self.mention_list.pop(to_delete)
def delete_mention_in_coref(self, mention_id):
to_delete = -1
for coref in self.coref_list:
for index, mention_id_in_coref in enumerate(coref.mention_list):
if mention_id_in_coref == mention_id:
to_delete = index
break
if to_delete >= 0:
coref.mention_list.pop(to_delete)
break
to_delete = -1
for index, coref in enumerate(self.coref_list):
if len(coref.mention_list) == 0:
to_delete = index
break
if to_delete >= 0:
self.coref_list.pop(to_delete)
def to_lines(self):
result = list()
result += ['#BeginOfDocument %s' % self.doc_id]
writed_mention_set = set()
for mention in self.mention_list:
if mention.mention_id in writed_mention_set:
continue
writed_mention_set.add(mention.mention_id)
result += [mention.to_line()]
for coref in self.coref_list:
if len(coref.mention_list) == 1:
continue
result += [coref.to_line()]
result += ["#EndOfDocument"]
return '\n'.join(result)
def to_lines_with_type(self):
result = list()
result += ['#BeginOfDocument %s' % self.doc_id]
writed_mention_set = set()
for mention in self.mention_list:
if mention.mention_id in writed_mention_set:
continue
writed_mention_set.add(mention.mention_id)
result += [mention.to_line()]
for coref in self.coref_list:
if len(coref.mention_list) == 1:
continue
result += [coref.to_line_with_type(self.mention_dict)]
result += ["#EndOfDocument"]
return '\n'.join(result)
if __name__ == "__main__":
pass
|
import FWCore.ParameterSet.Config as cms
###OMTF emulator configuration
simOmtfDigis = cms.EDProducer("L1TMuonBayesOmtfTrackProducer",
srcDTPh = cms.InputTag('simDtTriggerPrimitiveDigis'),
srcDTTh = cms.InputTag('simDtTriggerPrimitiveDigis'),
srcCSC = cms.InputTag('simCscTriggerPrimitiveDigis','MPCSORTED'),
srcRPC = cms.InputTag('simMuonRPCDigis'),
g4SimTrackSrc = cms.InputTag('g4SimHits'),
dumpResultToXML = cms.bool(True),
dumpDetailedResultToXML = cms.bool(False),
XMLDumpFileName = cms.string("TestEvents.xml"),
dumpGPToXML = cms.bool(False),
readEventsFromXML = cms.bool(False),
eventsXMLFiles = cms.vstring("TestEvents.xml"),
dropRPCPrimitives = cms.bool(False),
dropDTPrimitives = cms.bool(False),
dropCSCPrimitives = cms.bool(False),
processorType = cms.string("OMTFProcessor"),
ttTracksSource = cms.string("SIM_TRACKS"),
ghostBusterType = cms.string("GhostBusterPreferRefDt"),
#patternsXMLFile = cms.FileInPath("L1Trigger/L1TMuonBayes/test/expert/optimisedPats0.xml"),
#patternsXMLFile = cms.FileInPath("L1Trigger/L1TMuon/data/omtf_config/Patterns_0x00020007.xml")
#patternsXMLFile = cms.FileInPath("L1Trigger/L1TMuon/data/omtf_config/Patterns_0x0003.xml")
#patternsXMLFile = cms.FileInPath("L1Trigger/L1TMuonBayes/test/expert/GPs_ArtWithThresh.xml")
#patternsXMLFile = cms.FileInPath("L1Trigger/L1TMuonBayes/test/expert/GPs78_withThresh2.xml")
#patternsXMLFile = cms.FileInPath("L1Trigger/L1TMuonBayes/test/expert/GPsNorm1NoCor_WithThresh4.xml")
# bxMin = cms.int32(-3),
# bxMax = cms.int32(4)
)
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
from collections import namedtuple
import os
import json
import unittest
from unittest.mock import patch
import petstore_api
from petstore_api.api.fake_api import FakeApi # noqa: E501
from petstore_api.rest import RESTClientObject, RESTResponse
from petstore_api.model_utils import file_type, model_to_dict
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {'Content-Type': 'application/json'}
def get_headers():
return {}
def get_header(name, default=None):
return {}.get(name, default)
class TestFakeApi(unittest.TestCase):
"""FakeApi unit test stubs"""
def setUp(self):
self.api = FakeApi() # noqa: E501
def tearDown(self):
pass
@staticmethod
def mock_response(body_value):
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(body_value).encode('utf-8'),
getheaders=get_headers,
getheader=get_header
)
return RESTResponse(http_response)
@staticmethod
def assert_request_called_with(
mock_method,
url,
accept='application/json',
http_method='POST',
content_type='application/json',
**kwargs
):
headers = {
'Accept': accept,
'User-Agent': 'OpenAPI-Generator/1.0.0/python',
}
if content_type:
headers['Content-Type'] = content_type
used_kwargs = dict(
_preload_content=True,
_request_timeout=None,
headers=headers,
query_params=[]
)
if 'post_params' in kwargs:
used_kwargs['post_params'] = kwargs['post_params']
if 'body' in kwargs:
used_kwargs['body'] = kwargs['body']
if 'post_params' not in used_kwargs:
used_kwargs['post_params'] = []
mock_method.assert_called_with(
http_method,
url,
**used_kwargs
)
def test_array_model(self):
"""Test case for array_model
"""
from petstore_api.model import animal_farm, animal
endpoint = self.api.array_model_endpoint
assert endpoint.openapi_types['body'] == (animal_farm.AnimalFarm,)
assert endpoint.settings['response_type'] == (animal_farm.AnimalFarm,)
# serialization + deserialization works
with patch.object(RESTClientObject, 'request') as mock_method:
cat = animal.Animal(class_name="Cat", color="black")
body = animal_farm.AnimalFarm([cat])
json_data = [{"className": "Cat", "color": "black"}]
mock_method.return_value = self.mock_response(json_data)
response = self.api.array_model(body=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/arraymodel',
body=json_data,
)
assert isinstance(response, animal_farm.AnimalFarm)
assert response == body
def test_boolean(self):
"""Test case for boolean
"""
endpoint = self.api.boolean_endpoint
assert endpoint.openapi_types['body'] == (bool,)
assert endpoint.settings['response_type'] == (bool,)
def test_recursionlimit(self):
"""Test case for recursionlimit
"""
assert sys.getrecursionlimit() == 1234
def test_fake_health_get(self):
"""Test case for fake_health_get
Health check endpoint # noqa: E501
"""
pass
def test_additional_properties_with_array_of_enums(self):
"""Test case for additional_properties_with_array_of_enums
Additional Properties with Array of Enums # noqa: E501
"""
pass
def test_enum_test(self):
"""Test case for enum_test
Object contains enum properties and array properties containing enums
"""
from petstore_api.model.enum_test import EnumTest
from petstore_api.model.string_enum import StringEnum
from petstore_api.model.array_of_enums import ArrayOfEnums
endpoint = self.api.enum_test_endpoint
assert endpoint.openapi_types['enum_test'] == (EnumTest,)
assert endpoint.settings['response_type'] == (EnumTest,)
# serialization + deserialization works w/ inline array
with patch.object(RESTClientObject, 'request') as mock_method:
body = EnumTest(
enum_string_required='lower',
inline_array_of_str_enum=[StringEnum('approved')]
)
json_value = {'enum_string_required': 'lower', 'InlineArrayOfStrEnum': ['approved']}
mock_method.return_value = self.mock_response(json_value)
response = self.api.enum_test(enum_test=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/enum-test',
body=json_value,
)
assert isinstance(response, EnumTest)
assert response == body
# serialization + deserialization works w/ refed array
with patch.object(RESTClientObject, 'request') as mock_method:
body = EnumTest(
enum_string_required='lower',
array_of_str_enum=ArrayOfEnums([StringEnum('approved')])
)
json_value = {'enum_string_required': 'lower', 'ArrayOfStrEnum': ['approved']}
mock_method.return_value = self.mock_response(json_value)
response = self.api.enum_test(enum_test=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/enum-test',
body=json_value,
)
assert isinstance(response, EnumTest)
assert response == body
def test_array_of_enums(self):
"""Test case for array_of_enums
Array of Enums # noqa: E501
"""
from petstore_api.model import array_of_enums, string_enum
endpoint = self.api.array_of_enums_endpoint
assert endpoint.openapi_types['array_of_enums'] == (array_of_enums.ArrayOfEnums,)
assert endpoint.settings['response_type'] == (array_of_enums.ArrayOfEnums,)
# serialization + deserialization works
with patch.object(RESTClientObject, 'request') as mock_method:
value = [string_enum.StringEnum("placed")]
body = array_of_enums.ArrayOfEnums(value)
value_simple = ["placed"]
mock_method.return_value = self.mock_response(value_simple)
response = self.api.array_of_enums(array_of_enums=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/array-of-enums',
body=value_simple,
)
assert isinstance(response, array_of_enums.ArrayOfEnums)
assert response.value == value
def test_number_with_validations(self):
"""Test case for number_with_validations
"""
from petstore_api.model import number_with_validations
endpoint = self.api.number_with_validations_endpoint
assert endpoint.openapi_types['body'] == (number_with_validations.NumberWithValidations,)
assert endpoint.settings['response_type'] == (number_with_validations.NumberWithValidations,)
# serialization + deserialization works
with patch.object(RESTClientObject, 'request') as mock_method:
value = 10.0
body = number_with_validations.NumberWithValidations(value)
mock_method.return_value = self.mock_response(value)
response = self.api.number_with_validations(body=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/number',
body=value,
)
assert isinstance(response, number_with_validations.NumberWithValidations)
assert response.value == value
def test_object_model_with_ref_props(self):
"""Test case for object_model_with_ref_props
"""
from petstore_api.model.object_model_with_ref_props import ObjectModelWithRefProps
from petstore_api.model.number_with_validations import NumberWithValidations
endpoint = self.api.object_model_with_ref_props_endpoint
assert endpoint.openapi_types['body'] == (ObjectModelWithRefProps,)
assert endpoint.settings['response_type'] == (ObjectModelWithRefProps,)
json_payloads = [
{}, # only required + no optional properties works
{ # optional properties works
"my_number": 11.0,
"my_string": 'a',
"my_boolean": True,
}
]
# instantiation works
expected_models = [
ObjectModelWithRefProps(),
ObjectModelWithRefProps(
my_number=NumberWithValidations(11.0),
my_string='a',
my_boolean=True
)
]
pairs = zip(json_payloads, expected_models)
# serialization + deserialization works
for (json_payload, expected_model) in pairs:
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = self.mock_response(json_payload)
response = self.api.object_model_with_ref_props(body=expected_model)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/object_model_with_ref_props',
body=json_payload,
)
assert isinstance(response, expected_model.__class__)
assert response == expected_model
def test_composed_one_of_number_with_validations(self):
"""Test case for composed_one_of_number_with_validations
"""
from petstore_api.model import animal, composed_one_of_number_with_validations, number_with_validations
endpoint = self.api.composed_one_of_number_with_validations_endpoint
assert endpoint.openapi_types['composed_one_of_number_with_validations'] == (
composed_one_of_number_with_validations.ComposedOneOfNumberWithValidations,)
assert endpoint.settings['response_type'] == (
composed_one_of_number_with_validations.ComposedOneOfNumberWithValidations,)
# serialization + deserialization works
num_with_validations = number_with_validations.NumberWithValidations(10.0)
cat_in_composed = composed_one_of_number_with_validations.ComposedOneOfNumberWithValidations(
class_name="Cat", color="black"
)
import datetime
date = datetime.date(1970, 1, 1)
body_value_simple = [
(num_with_validations, 10.0),
(cat_in_composed, {"className": "Cat", "color": "black"}),
(None, None),
(date, '1970-01-01'),
]
for (body, value_simple) in body_value_simple:
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = self.mock_response(value_simple)
response = self.api.composed_one_of_number_with_validations(composed_one_of_number_with_validations=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/composed_one_of_number_with_validations',
body=value_simple,
)
assert isinstance(response, body.__class__)
assert response == body
def test_string(self):
"""Test case for string
"""
endpoint = self.api.string_endpoint
assert endpoint.openapi_types['body'] == (str,)
assert endpoint.settings['response_type'] == (str,)
# serialization + deserialization works
with patch.object(RESTClientObject, 'request') as mock_method:
body = "blah"
value_simple = body
mock_method.return_value = self.mock_response(value_simple)
response = self.api.string(body=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/string',
body=value_simple,
)
assert isinstance(response, str)
assert response == value_simple
def test_string_enum(self):
"""Test case for string_enum
"""
from petstore_api.model import string_enum
endpoint = self.api.string_enum_endpoint
assert endpoint.openapi_types['body'] == (string_enum.StringEnum,)
assert endpoint.settings['response_type'] == (string_enum.StringEnum,)
# serialization + deserialization works
from petstore_api.rest import RESTClientObject, RESTResponse
with patch.object(RESTClientObject, 'request') as mock_method:
value = "placed"
body = string_enum.StringEnum(value)
mock_method.return_value = self.mock_response(value)
response = self.api.string_enum(body=body)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/refs/enum',
body=value,
)
assert isinstance(response, string_enum.StringEnum)
assert response.value == value
def test_upload_file(self):
# uploads a file
test_file_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "testfiles"))
file_path1 = os.path.join(test_file_dir, "1px_pic1.png")
headers = {}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
api_respponse = {
'code': 200,
'type': 'blah',
'message': 'file upload succeeded'
}
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(api_respponse).encode('utf-8'),
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
file1 = open(file_path1, "rb")
try:
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = mock_response
res = self.api.upload_file(
file=file1)
body = None
post_params=[
('file', ('1px_pic1.png', b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00:~\x9bU\x00\x00\x00\nIDATx\x9cc\xfa\x0f\x00\x01\x05\x01\x02\xcf\xa0.\xcd\x00\x00\x00\x00IEND\xaeB`\x82', 'image/png')),
]
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/uploadFile',
body=body, post_params=post_params, content_type='multipart/form-data'
)
except petstore_api.ApiException as e:
self.fail("upload_file() raised {0} unexpectedly".format(type(e)))
finally:
file1.close()
# passing in an array of files to when file only allows one
# raises an exceptions
try:
file = open(file_path1, "rb")
with self.assertRaises(petstore_api.ApiTypeError) as exc:
self.api.upload_file(file=[file])
finally:
file.close()
# passing in a closed file raises an exception
with self.assertRaises(petstore_api.ApiValueError) as exc:
file = open(file_path1, "rb")
file.close()
self.api.upload_file(file=file)
def test_upload_files(self):
test_file_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "testfiles"))
file_path1 = os.path.join(test_file_dir, "1px_pic1.png")
file_path2 = os.path.join(test_file_dir, "1px_pic2.png")
headers = {}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
api_respponse = {
'code': 200,
'type': 'blah',
'message': 'file upload succeeded'
}
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(api_respponse).encode('utf-8'),
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
file1 = open(file_path1, "rb")
file2 = open(file_path2, "rb")
try:
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = mock_response
res = self.api.upload_files(
files=[file1, file2])
post_params=[
('files', ('1px_pic1.png', b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00:~\x9bU\x00\x00\x00\nIDATx\x9cc\xfa\x0f\x00\x01\x05\x01\x02\xcf\xa0.\xcd\x00\x00\x00\x00IEND\xaeB`\x82', 'image/png')),
('files', ('1px_pic2.png', b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x00\x00\x00\x00:~\x9bU\x00\x00\x00\nIDATx\x9cc\xfa\x0f\x00\x01\x05\x01\x02\xcf\xa0.\xcd\x00\x00\x00\x00IEND\xaeB`\x82', 'image/png'))
]
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/uploadFiles',
body=None, post_params=post_params, content_type='multipart/form-data'
)
except petstore_api.ApiException as e:
self.fail("upload_file() raised {0} unexpectedly".format(type(e)))
finally:
file1.close()
file2.close()
# passing in a single file when an array of file is required
# raises an exception
try:
file = open(file_path1, "rb")
with self.assertRaises(petstore_api.ApiTypeError) as exc:
self.api.upload_files(files=file)
finally:
file.close()
def test_download_attachment(self):
"""Ensures that file deserialization works"""
# sample from http://www.jtricks.com/download-text
file_name = 'content.txt'
headers_dict = {
'with_filename': {'Content-Disposition': 'attachment; filename={}'.format(file_name), 'Content-Type': 'text/plain'},
'no_filename': {'Content-Disposition': 'attachment;', 'Content-Type': 'text/plain'}
}
def get_headers(*args):
return args
file_data = (
"You are reading text file that was supposed to be downloaded\r\n"
"to your hard disk. If your browser offered to save you the file,"
"\r\nthen it handled the Content-Disposition header correctly."
)
for key, headers in headers_dict.items():
def get_header(name, default=None):
return headers_dict[key].get(name, default)
http_response = HTTPResponse(
status=200,
reason='OK',
data=file_data,
getheaders=get_headers(headers),
getheader=get_header
)
# deserialize response to a file
mock_response = RESTResponse(http_response)
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = mock_response
try:
file_object = self.api.download_attachment(file_name='download-text')
self.assert_request_called_with(
mock_method,
'http://www.jtricks.com/download-text',
http_method='GET',
accept='text/plain',
content_type=None,
)
self.assertTrue(isinstance(file_object, file_type))
self.assertFalse(file_object.closed)
self.assertEqual(file_object.read(), file_data.encode('utf-8'))
finally:
file_object.close()
os.unlink(file_object.name)
def test_upload_download_file(self):
test_file_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "testfiles"))
file_path1 = os.path.join(test_file_dir, "1px_pic1.png")
with open(file_path1, "rb") as f:
expected_file_data = f.read()
headers = {'Content-Type': 'application/octet-stream'}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
http_response = HTTPResponse(
status=200,
reason='OK',
data=expected_file_data,
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
file1 = open(file_path1, "rb")
try:
with patch.object(RESTClientObject, 'request') as mock_method:
mock_method.return_value = mock_response
downloaded_file = self.api.upload_download_file(body=file1)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/uploadDownloadFile',
body=expected_file_data,
content_type='application/octet-stream',
accept='application/octet-stream'
)
self.assertTrue(isinstance(downloaded_file, file_type))
self.assertFalse(downloaded_file.closed)
self.assertEqual(downloaded_file.read(), expected_file_data)
except petstore_api.ApiException as e:
self.fail("upload_download_file() raised {0} unexpectedly".format(type(e)))
finally:
file1.close()
downloaded_file.close()
os.unlink(downloaded_file.name)
def test_test_body_with_file_schema(self):
"""Test case for test_body_with_file_schema
"""
pass
def test_test_body_with_query_params(self):
"""Test case for test_body_with_query_params
"""
pass
def test_test_client_model(self):
"""Test case for test_client_model
To test \"client\" model # noqa: E501
"""
pass
def test_test_endpoint_parameters(self):
"""Test case for test_endpoint_parameters
Fake endpoint for testing various parameters 假端點 偽のエンドポイント 가짜 엔드 포인트 # noqa: E501
"""
pass
def test_test_enum_parameters(self):
"""Test case for test_enum_parameters
To test enum parameters # noqa: E501
"""
pass
def test_test_group_parameters(self):
"""Test case for test_group_parameters
Fake endpoint to test group parameters (optional) # noqa: E501
"""
pass
def test_test_inline_additional_properties(self):
"""Test case for test_inline_additional_properties
test inline additionalProperties # noqa: E501
"""
pass
def test_test_json_form_data(self):
"""Test case for test_json_form_data
test json serialization of form data # noqa: E501
"""
pass
def test_test_query_parameter_collection_format(self):
"""Test case for test_query_parameter_collection_format
"""
pass
def test_post_inline_additional_properties_ref_payload(self):
"""Test case for postInlineAdditionlPropertiesRefPayload
"""
from petstore_api.model.inline_additional_properties_ref_payload import InlineAdditionalPropertiesRefPayload
from petstore_api.model.post_inline_additional_properties_payload_request_array_data_inner import PostInlineAdditionalPropertiesPayloadRequestArrayDataInner
endpoint = self.api.post_inline_additional_properties_ref_payload_endpoint
assert endpoint.openapi_types['inline_additional_properties_ref_payload'] == (InlineAdditionalPropertiesRefPayload,)
assert endpoint.settings['response_type'] == (InlineAdditionalPropertiesRefPayload,)
# serialization + deserialization works
from petstore_api.rest import RESTClientObject, RESTResponse
with patch.object(RESTClientObject, 'request') as mock_method:
expected_json_body = {
'arrayData': [
{
'labels': [
None,
'foo'
]
}
]
}
inline_additional_properties_ref_payload = InlineAdditionalPropertiesRefPayload(
array_data=[
PostInlineAdditionalPropertiesPayloadRequestArrayDataInner(labels=[None, 'foo'])
]
)
mock_method.return_value = self.mock_response(expected_json_body)
response = self.api.post_inline_additional_properties_ref_payload(inline_additional_properties_ref_payload=inline_additional_properties_ref_payload)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/postInlineAdditionalPropertiesRefPayload',
body=expected_json_body
)
assert isinstance(response, InlineAdditionalPropertiesRefPayload)
assert model_to_dict(response) == expected_json_body
def test_post_inline_additional_properties_payload(self):
"""Test case for postInlineAdditionlPropertiesPayload
"""
from petstore_api.model.post_inline_additional_properties_payload_request import PostInlineAdditionalPropertiesPayloadRequest
from petstore_api.model.post_inline_additional_properties_payload_request_array_data_inner import PostInlineAdditionalPropertiesPayloadRequestArrayDataInner
endpoint = self.api.post_inline_additional_properties_payload_endpoint
assert endpoint.openapi_types['post_inline_additional_properties_payload_request'] == (PostInlineAdditionalPropertiesPayloadRequest,)
assert endpoint.settings['response_type'] == (PostInlineAdditionalPropertiesPayloadRequest,)
# serialization + deserialization works
from petstore_api.rest import RESTClientObject, RESTResponse
with patch.object(RESTClientObject, 'request') as mock_method:
expected_json_body = {
'arrayData': [
{
'labels': [
None,
'foo'
]
}
]
}
post_inline_additional_properties_payload_request = PostInlineAdditionalPropertiesPayloadRequest(
array_data=[
PostInlineAdditionalPropertiesPayloadRequestArrayDataInner(labels=[None, 'foo'])
]
)
mock_method.return_value = self.mock_response(expected_json_body)
response = self.api.post_inline_additional_properties_payload(post_inline_additional_properties_payload_request=post_inline_additional_properties_payload_request)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/postInlineAdditionalPropertiesPayload',
body=expected_json_body
)
assert isinstance(response, PostInlineAdditionalPropertiesPayloadRequest)
assert model_to_dict(response) == expected_json_body
def test_post_tx_rx_any_of_payload(self):
"""Test case for postInlineAdditionlPropertiesPayload
"""
from petstore_api.model.gm_fruit_no_properties import GmFruitNoProperties
endpoint = self.api.tx_rx_any_of_model_endpoint
assert endpoint.openapi_types['gm_fruit_no_properties'] == (GmFruitNoProperties,)
assert endpoint.settings['response_type'] == (GmFruitNoProperties,)
# serialization + deserialization works
from petstore_api.rest import RESTClientObject, RESTResponse
with patch.object(RESTClientObject, 'request') as mock_method:
expected_json_body = {
'cultivar': 'Alice',
'origin': 'Kazakhstan',
'lengthCm': 7,
}
fruit = GmFruitNoProperties(**expected_json_body)
mock_method.return_value = self.mock_response(expected_json_body)
response = self.api.tx_rx_any_of_model(gm_fruit_no_properties=fruit)
self.assert_request_called_with(
mock_method,
'http://petstore.swagger.io:80/v2/fake/TxRxAnyOfModel',
body=expected_json_body
)
assert isinstance(response, GmFruitNoProperties)
assert model_to_dict(response) == expected_json_body
if __name__ == '__main__':
unittest.main()
|
import sqlite3
_conn = None
_c = None
def init(conn):
c = conn.cursor()
_conn = conn
_c = c
_c.execute('CREATE TABLE IF NOT EXISTS group (id INTEGER AUTO_INCREMENT, name VARCHAR(200))')
_c.execute('CREATE TABLE IF NOT EXISTS perm (id INTEGER AUTO_INCREMENT, group INTEGER, permname VARCHAR(200), permvalue VARCHAR(200))')
def createGroup(gname):
_c.execute('INSERT INTO group (name) VALUES (?)', gname)
return "Group created"
def getAllGroups():
_c.execute('SELECT * FROM group')
return _c.fetchall()
def getGroupData(gid):
_c.execute('SELECT * FROM group WHERE id = ?', gid)
return _c.fetchone()
def setPerm(gid, permname, permvalue):
vals = [gid, permname]
_c.execute('SELECT * FROM perm WHERE group = ? AND permname = ?', vals)
if _c.fetchone():
vals = [permvalue, permname, gid]
_c.execute('UPDATE perm SET permvalue = ? WHERE permname = ? AND group = ?', vals)
else:
vals = [gid, permname, permvalue]
_c.execute('INSERT INTO perm (group, permname, permvalue) VALUES (?, ?, ?)', vals)
return "Updated group permission"
def delPerm(permid):
_c.execute('DELETE FROM perm WHERE permid = ?', permid)
return "Deleted permission"
|
from datetime import timedelta
import factory
from django.utils import timezone
from oauth2_provider.models import AccessToken
from sso.oauth2.models import Application
from .user import UserFactory
class ApplicationFactory(factory.django.DjangoModelFactory):
client_type = Application.CLIENT_CONFIDENTIAL
authorization_grant_type = Application.GRANT_AUTHORIZATION_CODE
skip_authorization = True
name = "Test oauth app"
application_key = factory.Sequence(lambda n: "app-{}".format(n))
display_name = "a test app"
start_url = "http://example.org"
class Meta:
model = Application
@factory.post_generation
def users(self, create, extracted, **kwargs):
"""Allow a list of users to be passed in"""
if not create:
return
if extracted:
for user in extracted:
self.users.add(user)
class AccessTokenFactory(factory.django.DjangoModelFactory):
application = factory.SubFactory(ApplicationFactory)
token = factory.Sequence(lambda n: f"token{n+1}")
user = factory.SubFactory(UserFactory)
expires = timezone.now() + timedelta(days=1)
scope = "read"
class Meta:
model = AccessToken
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.linear_model import Lasso
import copy, sys, time
Times = 10
Fold = 5
ZERO_TOL = 0.000001
################################################
def read_dataset(data_csv, value_txt):
### read files ###
# read the csv and the observed values
fv = pd.read_csv(data_csv)
value = pd.read_csv(value_txt)
### prepare data set ###
# prepare CIDs
CIDs = np.array(fv['CID'])
# prepare target, train, test arrays
target = np.array(value['a'])
# construct dictionary: CID to feature vector
fv_dict = {}
for cid,row in zip(CIDs, fv.values[:,1:]):
fv_dict[cid] = row
# construct dictionary: CID to target value
target_dict = {}
for cid, val in zip(np.array(value['CID']), np.array(value['a'])):
target_dict[cid] = val
# check CIDs: target_values_filename should contain all CIDs that appear in descriptors_filename
for cid in CIDs:
if cid not in target_dict:
sys.stderr.write('error: {} misses the target value of CID {}\n'.format(target_values_filename, cid))
exit(1)
# construct x and y so that the CIDs are ordered in ascending order
CIDs.sort()
x = np.array([fv_dict[cid] for cid in CIDs])
y = np.array([target_dict[cid] for cid in CIDs])
return (CIDs,x,y)
################################################
def learn_Lasso(x_train, y_train, x_test, y_test, a=1.0):
lasso = Lasso(alpha=a, max_iter=10**5)
lasso.fit(x_train, y_train)
r2train = lasso.score(x_train,y_train)
r2test = lasso.score(x_test,y_test)
nonzero = len([w for w in lasso.coef_ if abs(w)>=ZERO_TOL])
return (lasso, nonzero, r2train, r2test)
################################################
try:
CIDs, x, y = read_dataset(sys.argv[1], sys.argv[2])
linreg = sys.argv[3]
lmd = float(sys.argv[4])
except:
sys.stderr.write("usage: {} (input_data.csv)(input_values.txt)(output_linreg.txt)(lambda)\n\n".format(sys.argv[0]))
exit(1)
### experiment ###
print("Lambda\t{}".format(lmd))
f = open(sys.argv[1])
arr = f.readline().split(',')
K = len(arr)-1
print("NumDesc\t{}".format(K))
f.close()
Conf = []
for split_seed in range(1, Times+1):
kf = KFold(n_splits=Fold, shuffle=True, random_state=split_seed)
fold = 0
Tr = []
Ts = []
Tim = []
NonZ = []
for train, test in kf.split(x):
fold += 1
start_time = time.time()
lasso, nonzero, r2train, r2test = learn_Lasso(x[train], y[train], x[test], y[test], a=lmd)
comp_time = time.time() - start_time
Tr.append(r2train)
Ts.append(r2test)
Tim.append(comp_time)
NonZ.append(nonzero)
#if best_conf == None or r2test > best_conf[1]:
# best_lasso = copy.deepcopy(lasso)
# best_conf = (r2train, r2test, comp_time, nonzero, split_seed, fold)
Conf.append((r2train, r2test, comp_time, nonzero, split_seed, fold, copy.deepcopy(lasso)))
print("{}\tTrain".format(split_seed), end="")
for v in Tr:
print("\t{:.6f}".format(v), end="")
print()
print(" \tTest", end="")
for v in Ts:
print("\t{:.6f}".format(v), end="")
print()
print(" \tTime", end="")
for v in Tim:
print("\t{:.6f}".format(v), end="")
print()
print(" \tNonzero", end="")
for v in NonZ:
print("\t{}".format(v), end="")
print()
### output to XX_linreg.txt ###
Conf.sort(key=lambda a: -a[1])
conf = Conf[int(len(Conf)/2)]
fp = open(linreg, "w")
fp.write( "# data file : {}\n".format(sys.argv[1]))
fp.write( "# value file: {}\n".format(sys.argv[2]))
fp.write(f"# R^2 train: {conf[0]}\n")
fp.write(f"# R^2 test: {conf[1]}\n")
fp.write(f"# comp time: {conf[2]}\n")
fp.write(f"# nonzeros: {conf[3]}\n")
fp.write(f"# split_seed in KFold: {conf[4]}\n")
fp.write(f"# fold: {conf[5]}\n")
fp.write(f"{K}\n")
flag = True
lasso = conf[-1]
for v in lasso.coef_:
if flag == False:
fp.write(" ")
flag = False
fp.write("{}".format(v))
fp.write("\n{}\n".format(lasso.intercept_))
fp.close()
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath('.')))
project = 'test'
master_doc = 'index'
|
from newsblur.celeryapp import app
from utils import log as logging
@app.task()
def IndexSubscriptionsForSearch(user_id):
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.index_subscriptions_for_search()
@app.task()
def IndexSubscriptionsChunkForSearch(feed_ids, user_id):
logging.debug(" ---> Indexing: %s for %s" % (feed_ids, user_id))
from apps.search.models import MUserSearch
user_search = MUserSearch.get_user(user_id)
user_search.index_subscriptions_chunk_for_search(feed_ids)
@app.task()
def IndexFeedsForSearch(feed_ids, user_id):
from apps.search.models import MUserSearch
MUserSearch.index_feeds_for_search(feed_ids, user_id)
|
# pass list in a function
n = int(input("Enter the no. of elements you want in the list: "))
def count(lst):
even = 0
odd = 0
for i in lst:
if i % 2 == 0:
even += 1
print(i, "EVEN")
else:
odd += 1
print(i, "ODD")
return even,odd
lst = []
for i in range(1,n):
app = int(input("Enter the "+ str(i)+ " no. "))
lst.append(app)
even, odd = count(lst)
# print("There are " + str(even) + " even no ")
# print("There are " + str(odd) + " odd no ")
print("There are {} Even no. and {} Odd no.".format(even,odd))
|
#!/usr/bin/env python
# vim: set filencoding=utf8
"""
SnakePlan Setup Script
@author: Mike Crute (mcrute@gmail.com)
@date: July 09, 2010
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import snakeplan
from setuptools import setup, find_packages
setup(
name='snakeplan',
version=snakeplan.__version__,
packages=find_packages(),
description='Open source agile project management',
author='Mike Crute',
author_email='mcrute@gmail.com',
url='http://snakeplan.googlecode.com',
include_package_data=True,
install_requires=[
'django>=1.0',
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Environment :: Web Environment",
"Framework :: Django",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development",
"Programming Language :: Python",
],
long_description=open('README', "r").read(),
)
|
from celery import shared_task
from ..core.utils import create_thumbnails
from .models import ProductImage, ImageData
@shared_task
def create_product_thumbnails(image_id):
"""Takes ProductImage model, and creates thumbnails for it."""
create_thumbnails(pk=image_id, model=ProductImage, size_set='products')
@shared_task
def create_gallery_thumbnails(image_id):
"""Takes ProductImage model, and creates thumbnails for it."""
create_thumbnails(pk=image_id, model=ImageData, size_set='products')
|
import os
import pybase64
from telegraph import exceptions, upload_file
from telethon.tl.functions.messages import ImportChatInviteRequest as Get
from userbot import bot
from userbot import CMD_HELP
from userbot.utils import admin_cmd
from userbot.helpers import *
@bot.on(admin_cmd(pattern="lolice(?: |$)(.*)"))
async def lolce(smss):
replied = await smss.get_reply_message()
if not os.path.isdir("./temp/"):
os.makedirs("./temp/")
if not replied:
await smss.edit("reply to a supported media file")
return
if replied.media:
smss = await smss.edit("passing to telegraph...")
else:
await smss.edit("reply to a supported media file")
return
download_locatnoarion = await smss.client.download_media(replied, "./temp/")
if download_locatnoarion.endswith((".webp")):
download_locatnoarion = convert_toimage(download_locatnoarion)
size = os.stat(download_locatnoarion).st_size
if download_locatnoarion.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await smss.edit(
"the replied file size is not supported it must me below 5 mb"
)
os.remove(download_locatnoarion)
return
await smss.edit("generating image..")
else:
await smss.edit("the replied file is not supported")
os.remove(download_locatnoarion)
return
try:
response = upload_file(download_locatnoarion)
os.remove(download_locatnoarion)
except exceptions.TelegraphException as exc:
await smss.edit("ERROR: " + str(exc))
os.remove(download_locatnoarion)
return
catnoar = f"https://telegra.ph{response[0]}"
catnoar = await lolice(catnoar)
await smss.delete()
await smss.client.send_file(smss.chat_id, catnoar, reply_to=replied)
@bot.on(admin_cmd(pattern="awooify(?: |$)(.*)"))
async def awoo(smss):
replied = await smss.get_reply_message()
if not os.path.isdir("./temp/"):
os.makedirs("./temp/")
if not replied:
await smss.edit("reply to a supported media file")
return
if replied.media:
smss = await smss.edit("passing to telegraph...")
else:
await smss.edit("reply to a supported media file")
return
download_locatnoarion = await smss.client.download_media(replied, "./temp/")
if download_locatnoarion.endswith((".webp")):
download_locatnoarion = convert_toimage(download_locatnoarion)
size = os.stat(download_locatnoarion).st_size
if download_locatnoarion.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await smss.edit(
"the replied file size is not supported it must me below 5 mb"
)
os.remove(download_locatnoarion)
return
await smss.edit("generating image..")
else:
await smss.edit("the replied file is not supported")
os.remove(download_locatnoarion)
return
try:
response = upload_file(download_locatnoarion)
os.remove(download_locatnoarion)
except exceptions.TelegraphException as exc:
await smss.edit("ERROR: " + str(exc))
os.remove(download_locatnoarion)
return
catnoar = f"https://telegra.ph{response[0]}"
catnoar = await awooify(catnoar)
await smss.delete()
await smss.client.send_file(smss.chat_id, catnoar, reply_to=replied)
@bot.on(admin_cmd(pattern="baguette(?: |$)(.*)"))
async def baguet(smss):
replied = await smss.get_reply_message()
if not os.path.isdir("./temp/"):
os.makedirs("./temp/")
if not replied:
await smss.edit("reply to a supported media file")
return
if replied.media:
smss = await smss.edit("passing to telegraph...")
else:
await smss.edit("reply to a supported media file")
return
download_locatnoarion = await smss.client.download_media(replied, "./temp/")
if download_locatnoarion.endswith((".webp")):
download_locatnoarion = convert_toimage(download_locatnoarion)
size = os.stat(download_locatnoarion).st_size
if download_locatnoarion.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await smss.edit(
"the replied file size is not supported it must me below 5 mb"
)
os.remove(download_locatnoarion)
return
await smss.edit("generating image..")
else:
await smss.edit("the replied file is not supported")
os.remove(download_locatnoarion)
return
try:
response = upload_file(download_locatnoarion)
os.remove(download_locatnoarion)
except exceptions.TelegraphException as exc:
await smss.edit("ERROR: " + str(exc))
os.remove(download_locatnoarion)
return
catnoar = f"https://telegra.ph{response[0]}"
catnoar = await baguette(catnoar)
await smss.delete()
await smss.client.send_file(smss.chat_id, catnoar, reply_to=replied)
@bot.on(admin_cmd(pattern="iphonex(?: |$)(.*)"))
async def iphon(smss):
replied = await smss.get_reply_message()
if not os.path.isdir("./temp/"):
os.makedirs("./temp/")
if not replied:
await smss.edit("reply to a supported media file")
return
if replied.media:
smss = await smss.edit("passing to telegraph...")
else:
await smss.edit("reply to a supported media file")
return
download_locatnoarion = await smss.client.download_media(replied, "./temp/")
if download_locatnoarion.endswith((".webp")):
download_locatnoarion = convert_toimage(download_locatnoarion)
size = os.stat(download_locatnoarion).st_size
if download_locatnoarion.endswith((".jpg", ".jpeg", ".png", ".bmp", ".ico")):
if size > 5242880:
await smss.edit(
"the replied file size is not supported it must me below 5 mb"
)
os.remove(download_locatnoarion)
return
await smss.edit("generating image..")
else:
await smss.edit("the replied file is not supported")
os.remove(download_locatnoarion)
return
try:
response = upload_file(download_locatnoarion)
os.remove(download_locatnoarion)
except exceptions.TelegraphException as exc:
await smss.edit("ERROR: " + str(exc))
os.remove(download_locatnoarion)
return
catnoar = f"https://telegra.ph{response[0]}"
catnoar = await iphonex(catnoar)
await smss.delete()
await smss.client.send_file(smss.chat_id, catnoar, reply_to=replied)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.