content stringlengths 5 1.05M |
|---|
__version__ = '1.2.8'
|
import json
from . import sha256 as sha
import os
class bloque:
def __init__(self, numero, data, anterior, hashid, estructura):
self.id = numero
self.data = data
self.anterior = anterior
self.hash = hashid
self.estructura = estructura
def get(self):
return {"id":self.id, "content":self.data, "previous":self.anterior, "hash":self.hash, "Estructure": self.estructura}
class blockchain:
def __init__(self):
self.anterior = '0000000000000000000000000000000000000000000000000000000000000000'
def crear(self, database, table):
file = open("./Data/security/"+database+"_"+table+".json", "w+", encoding='utf-8')
file.write(json.dumps('', indent=4))
file.close()
def insertar(self, tablas, database, table):
file = open("./Data/security/"+database+"_"+table+".json", "r")
lista = json.loads(file.read())
file.close()
if type(lista)!=list:
lista = []
else:
self.anterior = lista[-1]['hash']
key = len(lista)
key+=1
val = []
for x in list(tablas.values()):
if type(x) == type(b''):
x = x.decode()
val.append(x)
values = ",".join(str(x) for x in val)
id_hash = sha.generate(values)
nuevo = bloque(key, tablas, self.anterior, id_hash, 'correcta')
lista.append(nuevo.get())
file = open("./Data/security/"+database+"_"+table+".json", "w+", encoding='utf-8')
file.write(json.dumps([j for j in lista], indent=4))
self.anterior = id_hash
def insert(self, data: list, database: str, table: str):
dic = {}
y=1
for x in data:
if type(x) == type(b''):
x = x.decode()
dic.update({y:x})
y+=1
self.insertar(dic, database, table)
def update(self, tabla, registro, database, table, h2):
file = open("./Data/security/"+database+"_"+table+".json", "r")
lista = json.loads(file.read())
file.close()
for bloque in lista:
if registro == bloque["hash"]:
bloque["content"] = tabla
bloque["hash"] = h2
if registro != h2:
bloque["Estructure"] = 'incorrecta'
break
file = open("./Data/security/"+database+"_"+table+".json", "w+", encoding='utf-8')
file.write(json.dumps([j for j in lista], indent=4))
file.close()
# self.graficar(database, table)
def dropAddColumn(self, row1, row2, database, table):
ldata = ",".join(str(x) for x in row1)
lnewData = ",".join(str(x) for x in row2)
h1 = sha.generate(ldata)
h2 = sha.generate(lnewData)
dic = {}
y=1
for x in row2:
dic.update({y:x})
y+=1
self.update(dic, h1, database, table, h2)
def delete(self, registro, database, table):
file = open("./Data/security/"+database+"_"+table+".json", "r")
lista = json.loads(file.read())
file.close()
anterior = None
i=0
for bloque in lista:
if registro == bloque["hash"]:
if anterior:
if i!=len(lista)-1:
siguiente = lista[i+1]
if anterior["hash"]!= siguiente["previous"]:
siguiente["Estructure"]='incorrecta'
else:
if len(lista):
di = lista[i+1]
di["Estructure"]='incorrecta'
lista.remove(bloque)
anterior = bloque
i+=1
file = open("./Data/security/"+database+"_"+table+".json", "w+", encoding='utf-8')
file.write(json.dumps([j for j in lista], indent=4))
file.close()
# self.graficar(database, table)
def CompararHash(self, data:list, newData: list, database, table):
ldata = ",".join(str(x) for x in data)
lnewData = ",".join(str(x) for x in newData)
h1 = sha.generate(ldata)
h2 = sha.generate(lnewData)
dic = {}
y=1
for x in newData:
dic.update({y:x})
y+=1
self.update(dic, h1, database, table, h2)
if h1 == h2:
return 0
return 6
def EliminarHash(self, registro, database, table):
row = ",".join(str(x) for x in registro)
h1 = sha.generate(row)
self.delete(h1, database, table)
def graficar(self, database, table):
file = open("./Data/security/"+database+"_"+table+".json", "r")
lista = json.loads(file.read())
file.close()
if type(lista)!=list:
lista = []
if type(lista)==list:
f= open('./Data/security/'+database+'_'+table+'.dot', 'w',encoding='utf-8')
f.write("digraph dibujo{\n")
f.write('graph [ordering="out"];')
f.write('rankdir=TB;\n')
f.write('node [shape = box];\n')
data =""
t=0
color = 'white'
for x in lista:
if x['Estructure']=='incorrecta':
color = 'orangered'
nombre = 'Nodo'+str(t)
data = ''
for y in list(x.values()):
if type(y) == dict:
d = ",".join(str(x) for x in list(y.values()))
data+="""<tr><td>"""+d+"""</td></tr>"""
else:
if str(y)!='correcta' and str(y)!='incorrecta':
data+="""<tr><td>"""+str(y)+"""</td></tr>"""
tabla ="""<<table BGCOLOR='"""+color+"""' cellspacing='0' cellpadding='20' border='0' cellborder='1'>
"""+data+"""
</table> >"""
f.write(nombre+' [label = '+tabla+', fontsize="30", shape = plaintext ];\n')
t+=1
f.write('}')
f.close()
os.system('dot -Tpng ./Data/security/'+database+'_'+table+'.dot -o tupla.png')
def fail(self, database, table):
file = open("./Data/security/"+database+"_"+table+".json", "r")
lista = json.loads(file.read())
file.close()
if type(lista)!=list:
lista = []
for x in lista:
if x['Estructure']=='incorrecta':
return True
return False
def activo(self, database, table):
if os.path.isfile("./Data/security/"+database+"_"+table+".json"):
return True
return False |
from django.contrib import admin
from .models import ProductData
# Register your models here.
# admin.site.register(user_info_10004)
admin.site.register(ProductData)
# redis-server
# celery -A inventory_scrapping worker -l info
# celery -A inventory_scrapping beat -l info |
import imptools
def enable_relative():
"""
Enable relative imports for scripts that are not executed as module.
Usually, scripts that are part of a module and use relative imports must be
run as `python3 -m module.script`. However, this requires being in the
correct working directory and can be annoying. The `enable_relative()`
function allows to execute those scripts normally as `python3 script.py`.
Since PEP 366, this can be achieved by setting the `__package__` variable in
the script and importing the package or making it available on the Pyhton
import path. The `enable_relative()` function hides this behind a simple
function that can be imported and called inside the script, before any
relative imports.
```
import imptools
imptools.enable_relative()
# Relative imports...
```
Raises:
ModuleNotFoundError: If the parent directory of the script that calls this
function is not a module.
"""
import pathlib
import __main__
# Skip if the script is executed as a module.
if __main__.__package__ is not None:
return
# Skip if running from interactive interpreter.
if not hasattr(__main__, '__file__'):
return
# Assume the module is simply the parent directory.
root = pathlib.Path(__main__.__file__).parent
# Import the module without polluting the Python import path.
imptools.import_path(root)
# Set the package variable so Python can resolve relative imports.
__main__.__package__ = root.name
|
# LMS Adaptive Notch Filter
# pr7_1_2
from Noisy import *
from Universal import *
if __name__ == '__main__':
filename = 'bluesky1.wav' # file path
speech = Speech() # Speech class instantiation
ss, fs = speech.audioread(filename, 8000) # read data
ss = ss - np.mean(ss) # DC
s = ss / np.max(ss) # normalized
N = len(s) # signal length
time = np.arange(N) / fs # time
ns = 0.5 * np.cos(2 * np.pi * 50 * time) # 50Hz power frequency
x = s + ns # noisy
noisy = Noisy() # Noisy class instantiation
snr1 = noisy.SNR_singlech(s, x)
x1 = np.cos(2 * np.pi * 50 * time)
x2 = np.sin(2 * np.pi * 50 * time)
w1 = 0.1 # weight 1
w2 = 0.1 # weight 2
e = np.zeros(N) # initialization
y = np.zeros(N)
mu = 0.05
for i in range(N): # LMS adaptive notch filter
y[i] = w1 * x1[i] + w2 * x2[i]
e[i] = x[i] - y[i]
w1 = w1 + mu * e[i] * x1[i] # iteration
w2 = w2 + mu * e[i] * x2[i]
output = e
snr2 = noisy.SNR_singlech(s, output) # SNR after notch filter
snr = snr2 - snr1
print('snr1 = {} \nsnr2 = {} \n'.format(snr1, snr2))
# figure
plt.figure(figsize=(9, 16))
plt.subplot(3, 1, 1)
plt.plot(time, s)
plt.axis([0, np.max(time), -1, 1])
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Original Speech Signal')
plt.subplot(3, 1, 2)
plt.plot(time, x)
plt.axis([0, np.max(time), -1, 1])
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Noisy Speech Signal SNR = {:.2f}dB'.format(snr1))
plt.subplot(3, 1, 3)
plt.plot(time, output)
plt.axis([0, np.max(time), -1, 1])
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('LMS Notch Filter Output Speech Signal \nSNR = {:.2f}dB'.format(snr2))
plt.savefig('images/lms_notch_denoise.png', bbox_inches='tight', dpi=600)
plt.show() |
/usr/local/python3/lib/python3.7/stat.py |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
from proctor_lib import Proctor
class ProctorConfig(AppConfig):
name = 'proctor'
def ready(self):
p = Proctor()
p.load_plugins() |
#Faça um algoritmo que leia o preço de um produto e mostre seu novo preço,
#com 5% de desconto.
preço = float(input("Entre com o preço do produto: "))
desconto = (preço * 5) / 100
preço = preço - desconto
print("O novo preço do produto será de {} com 5% de desconto!".format(preço))
|
"""Autogenerated 2021-11-16T11:37:36.465555 by redcap_classfiles.py
"""
from ....pgrest import *
from ...constants import Constants
from ..rcconstants import REDCapConstants
from ..rcaptable import RcapTable
__all__ = ["RcapPainCatastrophizingQuestionnairePcs6"]
class RcapPainCatastrophizingQuestionnairePcs6(RcapTable):
"""Pain Catastrophizing Questionnaire Pcs6"""
__redcap_form_name = "pain_catastrophizing_questionnaire_pcs6"
pain_catastrophizing_questionnaire_pcs6_id = Constants.SERIAL_PRIMARY_KEY_COLUMN
pain_catastrophizing_questionnaire_pcs6_complete = Column(
Integer, ForeignKey("status.status_id")
)
# It's awful and I feel that it overwhelms me
# Field Type: radio
# Choices: 0, Not at all | 1, To a slight degree | 2, To a moderate degree | 3, To a great degree | 4, All the time
pcqpainawfulovrwhlmscl = Column(Integer, nullable=True, comments=None)
# I feel I can't stand it anymore
# Field Type: radio
# Choices: 0, Not at all | 1, To a slight degree | 2, To a moderate degree | 3, To a great degree | 4, All the time
pcqfeelcantwithstandscl = Column(Integer, nullable=True, comments=None)
# I become afraid that the pain will get worse
# Field Type: radio
# Choices: 0, Not at all | 1, To a slight degree | 2, To a moderate degree | 3, To a great degree | 4, All the time
pcqafraidpainworsescl = Column(Integer, nullable=True, comments=None)
# I keep thinking about how much it hurts
# Field Type: radio
# Choices: 0, Not at all | 1, To a slight degree | 2, To a moderate degree | 3, To a great degree | 4, All the time
pcqhurtscl = Column(Integer, nullable=True, comments=None)
# I keep thinking about how badly I want the pain to stop
# Field Type: radio
# Choices: 0, Not at all | 1, To a slight degree | 2, To a moderate degree | 3, To a great degree | 4, All the time
pcqpainstopscl = Column(Integer, nullable=True, comments=None)
# I wonder whether something serious may happen
# Field Type: radio
# Choices: 0, Not at all | 1, To a slight degree | 2, To a moderate degree | 3, To a great degree | 4, All the time
pcqseriousscl = Column(Integer, nullable=True, comments=None)
|
#!/usr/bin/env python
# encoding: utf-8
# MIT License
# (c) baltasar 2016
import webapp2
from google.appengine.ext import ndb
from model.session import Session
class AddSessionHandler(webapp2.RequestHandler):
def get(self):
try:
client_id = self.request.GET['client_id']
except:
self.redirect("/error?msg=missing client_id for new session")
return
try:
client = ndb.Key(urlsafe=client_id).get()
except:
self.redirect("/error?msg=client was not found")
return
session = Session()
session.dni = client.dni
session.subject = "Revisión"
session.comments = ""
session.proposal = ""
session.put()
self.redirect("/modifySession?session_id=" + session.key.urlsafe() + "&client_id=" + client_id)
app = webapp2.WSGIApplication([
("/addSession", AddSessionHandler),
], debug=True)
|
"""Constants for the ical integration."""
VERSION = "0.9"
DOMAIN = "ical"
CONF_MAX_EVENTS = "max_events"
CONF_DAYS = "days"
ICON = "mdi:calendar"
DEFAULT_NAME = "iCal Sensor"
DEFAULT_MAX_EVENTS = 5
|
from collections import OrderedDict
import networkx as nx
from .test_graph import TestGraph as _TestGraph
from .test_graph import BaseGraphTester
from .test_digraph import TestDiGraph as _TestDiGraph
from .test_digraph import BaseDiGraphTester
from .test_multigraph import TestMultiGraph as _TestMultiGraph
from .test_multidigraph import TestMultiDiGraph as _TestMultiDiGraph
def test_factories():
class mydict1(dict):
pass
class mydict2(dict):
pass
class mydict3s(dict):
pass
class mydict3p(dict):
pass
class mydict3a(dict):
pass
class mydict4(dict):
pass
class mydict5(dict):
pass
for Graph in (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph):
# print("testing class: ", Graph.__name__)
class MyGraph(Graph):
node_dict_factory = mydict1
adjlist_outer_dict_factory = mydict2
inner_succ_dict_factory = mydict3s
inner_pred_dict_factory = mydict3p
inner_adj_dict_factory = mydict3a
edge_key_dict_factory = mydict4
edge_attr_dict_factory = mydict5
G = MyGraph()
assert isinstance(G._node, mydict1)
assert isinstance(G._adj, mydict2)
G.add_node(1)
if G.is_directed():
assert isinstance(G._adj[1], mydict3s)
else:
assert isinstance(G._adj[1], mydict3a)
if G.is_directed():
assert isinstance(G._pred, mydict2)
assert isinstance(G._succ, mydict2)
assert isinstance(G._pred[1], mydict3p)
G.add_edge(1, 2)
if G.is_multigraph():
assert isinstance(G._adj[1][2], mydict4)
assert isinstance(G._adj[1][2][0], mydict5)
else:
assert isinstance(G._adj[1][2], mydict5)
class TestSpecialGraph(_TestGraph):
def setup_method(self):
_TestGraph.setup_method(self)
self.Graph = nx.Graph
class TestOrderedGraph(_TestGraph):
def setup_method(self):
_TestGraph.setup_method(self)
class MyGraph(nx.Graph):
node_dict_factory = OrderedDict
adjlist_outer_dict_factory = OrderedDict
inner_succ_dict_factory = OrderedDict
inner_pred_dict_factory = OrderedDict
inner_adj_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
self.Graph = MyGraph
class TestThinGraph(BaseGraphTester):
def setup_method(self):
all_edge_dict = {'weight': 1}
class MyGraph(nx.Graph):
def edge_attr_dict_factory(self): return all_edge_dict
self.Graph = MyGraph
# build dict-of-dict-of-dict K3
ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict)
self.k3adj = {0: {1: ed1, 2: ed2},
1: {0: ed1, 2: ed3},
2: {0: ed2, 1: ed3}}
self.k3edges = [(0, 1), (0, 2), (1, 2)]
self.k3nodes = [0, 1, 2]
self.K3 = self.Graph()
self.K3._adj = self.k3adj
self.K3._node = {}
self.K3._node[0] = {}
self.K3._node[1] = {}
self.K3._node[2] = {}
class TestSpecialDiGraph(_TestDiGraph):
def setup_method(self):
_TestDiGraph.setup_method(self)
self.Graph = nx.DiGraph
class TestOrderedDiGraph(_TestDiGraph):
def setup_method(self):
_TestDiGraph.setup_method(self)
class MyGraph(nx.DiGraph):
node_dict_factory = OrderedDict
adjlist_outer_dict_factory = OrderedDict
inner_succ_dict_factory = OrderedDict
inner_pred_dict_factory = OrderedDict
inner_adj_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
self.Graph = MyGraph
class TestThinDiGraph(BaseDiGraphTester):
def setup_method(self):
all_edge_dict = {'weight': 1}
class MyGraph(nx.DiGraph):
def edge_attr_dict_factory(self): return all_edge_dict
self.Graph = MyGraph
# build dict-of-dict-of-dict K3
ed1, ed2, ed3 = (all_edge_dict, all_edge_dict, all_edge_dict)
ed4, ed5, ed6 = (all_edge_dict, all_edge_dict, all_edge_dict)
self.k3adj = {0: {1: ed1, 2: ed2}, 1: {0: ed3, 2: ed4}, 2: {0: ed5, 1: ed6}}
self.k3edges = [(0, 1), (0, 2), (1, 2)]
self.k3nodes = [0, 1, 2]
self.K3 = self.Graph()
self.K3._adj = self.K3._succ = self.k3adj
self.K3._pred = {0: {1: ed3, 2: ed5}, 1: {0: ed1, 2: ed6}, 2: {0: ed2, 1: ed4}}
self.K3._node = {}
self.K3._node[0] = {}
self.K3._node[1] = {}
self.K3._node[2] = {}
ed1, ed2 = (all_edge_dict, all_edge_dict)
self.P3 = self.Graph()
self.P3._adj = {0: {1: ed1}, 1: {2: ed2}, 2: {}}
self.P3._succ = self.P3._adj
self.P3._pred = {0: {}, 1: {0: ed1}, 2: {1: ed2}}
self.P3._node = {}
self.P3._node[0] = {}
self.P3._node[1] = {}
self.P3._node[2] = {}
class TestSpecialMultiGraph(_TestMultiGraph):
def setup_method(self):
_TestMultiGraph.setup_method(self)
self.Graph = nx.MultiGraph
class TestOrderedMultiGraph(_TestMultiGraph):
def setup_method(self):
_TestMultiGraph.setup_method(self)
class MyGraph(nx.MultiGraph):
node_dict_factory = OrderedDict
adjlist_outer_dict_factory = OrderedDict
inner_succ_dict_factory = OrderedDict
inner_pred_dict_factory = OrderedDict
inner_adj_dict_factory = OrderedDict
edge_key_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
self.Graph = MyGraph
class TestSpecialMultiDiGraph(_TestMultiDiGraph):
def setup_method(self):
_TestMultiDiGraph.setup_method(self)
self.Graph = nx.MultiDiGraph
class TestOrderedMultiDiGraph(_TestMultiDiGraph):
def setup_method(self):
_TestMultiDiGraph.setup_method(self)
class MyGraph(nx.MultiDiGraph):
node_dict_factory = OrderedDict
adjlist_outer_dict_factory = OrderedDict
inner_succ_dict_factory = OrderedDict
inner_pred_dict_factory = OrderedDict
inner_adj_dict_factory = OrderedDict
edge_key_dict_factory = OrderedDict
edge_attr_dict_factory = OrderedDict
self.Graph = MyGraph
|
from PIL import Image
import sys
from functools import reduce
fname = sys.argv[1]
im = Image.open(fname, 'r')
f = open(fname.rsplit('.', 1)[0] + '.hbmp', "wb")
# turn image into bit stream
px_data = im.load()
bitstream = ''
for j in range(im.size[1]):
for i in range(im.size[0]):
bitstream += "1" if px_data[i, j][3] == 255 else "0"
# function that adds hamming codes to chunk (15, 11)
def chunk_to_hamming(chunk):
hamming_chunk = ''
# Fill hamming chunk using method:
# a) if its a parity bit, fill it with 0
# b) if it is not, take one bit from the beginning of the chunk
idx = 0
for i in range(16):
if i in [0, 1, 2, 4, 8]:
hamming_chunk += "0"
else:
hamming_chunk += chunk[idx]
idx += 1
chunk_iter = enumerate(hamming_chunk)
hamming_chunk = [bit for bit in hamming_chunk]
# I heard 3blue1brown made a video on this recently, it may be useful in understanding this line
error_loc = reduce(lambda x, y: x ^ y, [0] + [i for (i, bit) in chunk_iter if bit == '1'])
hamming_codes = [ 1 << idx for idx, bit in enumerate(bin(error_loc)[:1:-1]) if bit == "1" ]
# Flip parity bits to match data
for loc in hamming_codes:
hamming_chunk[loc] = "1"
return ''.join(hamming_chunk)
# split bitstream into chunks of 11 to fill hamming codes
chunks = [bitstream[i : i + 11] for i in range(0, len(bitstream), 11)]
hamming_chunks = map(chunk_to_hamming, chunks)
bitstream = ''.join(hamming_chunks)
# save bit stream to file
data = int(bitstream, 2).to_bytes(len(bitstream) // 8, 'little')
f.write(data)
f.close() |
class PoolUsedCapacity(object):
def read_get(self, name, idx_name, unity_client):
return unity_client.get_pool_size_used(idx_name)
class PoolUsedCapacityColumn(object):
def get_idx(self, name, idx, unity_client):
return unity_client.get_pools()
|
import logging
import numpy
import zmq
from numpy.lib.format import header_data_from_array_1_0
from fuel.utils import buffer_
logger = logging.getLogger(__name__)
def send_arrays(socket, arrays, stop=False):
"""Send NumPy arrays using the buffer interface and some metadata.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to send data over.
arrays : list
A list of :class:`numpy.ndarray` to transfer.
stop : bool, optional
Instead of sending a series of NumPy arrays, send a JSON object
with a single `stop` key. The :func:`recv_arrays` will raise
``StopIteration`` when it receives this.
Notes
-----
The protocol is very simple: A single JSON object describing the array
format (using the same specification as ``.npy`` files) is sent first.
Subsequently the arrays are sent as bytestreams (through NumPy's
support of the buffering protocol).
"""
if arrays:
# The buffer protocol only works on contiguous arrays
arrays = [numpy.ascontiguousarray(array) for array in arrays]
if stop:
headers = {'stop': True}
socket.send_json(headers)
else:
headers = [header_data_from_array_1_0(array) for array in arrays]
socket.send_json(headers, zmq.SNDMORE)
for array in arrays[:-1]:
socket.send(array, zmq.SNDMORE)
socket.send(arrays[-1])
def recv_arrays(socket):
"""Receive a list of NumPy arrays.
Parameters
----------
socket : :class:`zmq.Socket`
The socket to receive the arrays on.
Returns
-------
list
A list of :class:`numpy.ndarray` objects.
Raises
------
StopIteration
If the first JSON object received contains the key `stop`,
signifying that the server has finished a single epoch.
"""
headers = socket.recv_json()
if 'stop' in headers:
raise StopIteration
arrays = []
for header in headers:
data = socket.recv(copy=False)
buf = buffer_(data)
array = numpy.frombuffer(buf, dtype=numpy.dtype(header['descr']))
array.shape = header['shape']
if header['fortran_order']:
array.shape = header['shape'][::-1]
array = array.transpose()
arrays.append(array)
return arrays
def start_server(data_stream, port=5557, hwm=10):
"""Start a data processing server.
This command starts a server in the current process that performs the
actual data processing (by retrieving data from the given data stream).
It also starts a second process, the broker, which mediates between the
server and the client. The broker also keeps a buffer of batches in
memory.
Parameters
----------
data_stream : :class:`.DataStream`
The data stream to return examples from.
port : int, optional
The port the server and the client (training loop) will use to
communicate. Defaults to 5557.
hwm : int, optional
The `ZeroMQ high-water mark (HWM)
<http://zguide.zeromq.org/page:all#High-Water-Marks>`_ on the
sending socket. Increasing this increases the buffer, which can be
useful if your data preprocessing times are very random. However,
it will increase memory usage. There is no easy way to tell how
many batches will actually be queued with a particular HWM.
Defaults to 10. Be sure to set the corresponding HWM on the
receiving end as well.
"""
logging.basicConfig(level='INFO')
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(hwm)
socket.bind('tcp://*:{}'.format(port))
it = data_stream.get_epoch_iterator()
logger.info('server started')
while True:
try:
data = next(it)
stop = False
logger.debug("sending {} arrays".format(len(data)))
except StopIteration:
it = data_stream.get_epoch_iterator()
data = None
stop = True
logger.debug("sending StopIteration")
send_arrays(socket, data, stop=stop)
|
from django import forms
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
class Profile(forms.ModelForm):
class Meta:
model = User
fields = ('username','password','first_name','last_name',)
'''list = Group.objects.all()
temp = []
for group in list:
select = (group.slug, group.title)
temp.append(select)
OPTIONS = tuple(
temp,
)''' |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Any, List, Sequence
import numpy as np
import pytest
import torch
from pytorch_lightning import seed_everything
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from flash.core.data.base_viz import BaseVisualization
from flash.core.data.io.input import DataKeys
from flash.core.data.utils import _CALLBACK_FUNCS, _STAGES_PREFIX
from flash.core.utilities.imports import _PIL_AVAILABLE
from flash.core.utilities.stages import RunningStage
from flash.image import ImageClassificationData
from tests.helpers.utils import _IMAGE_TESTING
if _PIL_AVAILABLE:
from PIL import Image
def _rand_image():
return Image.fromarray(np.random.randint(0, 255, (196, 196, 3), dtype="uint8"))
class CustomBaseVisualization(BaseVisualization):
def __init__(self):
super().__init__()
self.show_load_sample_called = False
self.show_pre_tensor_transform_called = False
self.show_to_tensor_transform_called = False
self.show_post_tensor_transform_called = False
self.show_collate_called = False
self.per_batch_transform_called = False
def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
self.show_load_sample_called = True
def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
self.show_pre_tensor_transform_called = True
def show_to_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
self.show_to_tensor_transform_called = True
def show_post_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
self.show_post_tensor_transform_called = True
def show_collate(self, batch: Sequence, running_stage: RunningStage) -> None:
self.show_collate_called = True
def show_per_batch_transform(self, batch: Sequence, running_stage: RunningStage) -> None:
self.per_batch_transform_called = True
def check_reset(self):
self.show_load_sample_called = False
self.show_pre_tensor_transform_called = False
self.show_to_tensor_transform_called = False
self.show_post_tensor_transform_called = False
self.show_collate_called = False
self.per_batch_transform_called = False
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
class TestBaseViz:
def test_base_viz(self, tmpdir):
seed_everything(42)
tmpdir = Path(tmpdir)
train_images = [str(tmpdir / "a1.png"), str(tmpdir / "b1.png")]
_rand_image().save(train_images[0])
_rand_image().save(train_images[1])
class CustomImageClassificationData(ImageClassificationData):
@staticmethod
def configure_data_fetcher(*args, **kwargs) -> CustomBaseVisualization:
return CustomBaseVisualization(*args, **kwargs)
B: int = 2 # batch_size
dm = CustomImageClassificationData.from_files(
train_files=train_images,
train_targets=[0, 1],
val_files=train_images,
val_targets=[2, 3],
test_files=train_images,
test_targets=[4, 5],
predict_files=train_images,
batch_size=B,
num_workers=0,
)
num_tests = 10
for stage in _STAGES_PREFIX.values():
for _ in range(num_tests):
for fcn_name in _CALLBACK_FUNCS:
dm.data_fetcher.reset()
fcn = getattr(dm, f"show_{stage}_batch")
fcn(fcn_name, reset=False)
is_predict = stage == "predict"
def _extract_data(data):
return data[0][DataKeys.INPUT]
def _get_result(function_name: str):
return dm.data_fetcher.batches[stage][function_name]
res = _get_result("load_sample")
assert len(res) == B
assert isinstance(_extract_data(res), Image.Image)
if not is_predict:
res = _get_result("load_sample")
assert isinstance(res[0][DataKeys.TARGET], int)
res = _get_result("to_tensor_transform")
assert len(res) == B
assert isinstance(_extract_data(res), torch.Tensor)
if not is_predict:
res = _get_result("to_tensor_transform")
assert isinstance(res[0][DataKeys.TARGET], torch.Tensor)
res = _get_result("collate")
assert _extract_data(res).shape == (B, 3, 196, 196)
if not is_predict:
res = _get_result("collate")
assert res[0][DataKeys.TARGET].shape == torch.Size([2])
res = _get_result("per_batch_transform")
assert _extract_data(res).shape == (B, 3, 196, 196)
if not is_predict:
res = _get_result("per_batch_transform")
assert res[0][DataKeys.TARGET].shape == (B,)
assert dm.data_fetcher.show_load_sample_called
assert dm.data_fetcher.show_pre_tensor_transform_called
assert dm.data_fetcher.show_to_tensor_transform_called
assert dm.data_fetcher.show_post_tensor_transform_called
assert dm.data_fetcher.show_collate_called
assert dm.data_fetcher.per_batch_transform_called
dm.data_fetcher.check_reset()
@pytest.mark.parametrize(
"func_names, valid",
[
(["load_sample"], True),
(["not_a_hook"], False),
(["load_sample", "pre_tensor_transform"], True),
(["load_sample", "not_a_hook"], True),
],
)
def test_show(self, func_names, valid):
base_viz = CustomBaseVisualization()
batch = {func_name: "test" for func_name in func_names}
if not valid:
with pytest.raises(MisconfigurationException, match="Invalid function names"):
base_viz.show(batch, RunningStage.TRAINING, func_names)
else:
base_viz.show(batch, RunningStage.TRAINING, func_names)
for func_name in func_names:
if hasattr(base_viz, f"show_{func_name}_called"):
assert getattr(base_viz, f"show_{func_name}_called")
|
# Generated by Django 3.2.11 on 2022-02-03 09:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("change_log", "0002_auto_20220202_1535"),
]
operations = [
migrations.AlterModelOptions(
name="change",
options={"get_latest_by": "timestamp", "ordering": ["-timestamp"]},
),
]
|
from sspdatatables.forms import AbstractFooterForm
from django.forms import ChoiceField
from django_countries import countries
class BookFieldSelectForm(AbstractFooterForm):
def get_author_nationality_choices(self):
return [(None, 'Select')] + list(countries.countries.items())
class Meta:
fields = [("author_nationality", ChoiceField),]
|
prenom = "Vivien"
print(f'Bonjour {prenom} !') |
import discord
from discord.ext import commands
import random
import random
import asyncio
class emote(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(brief="gives a random kawaii emoji.",aliases=["ka"])
async def kawaii_random(self,ctx,*,message=None,amount=1):
if message is None:
await ctx.send("You don't want to say anything cute?")
kawaii_emotes= self.client.get_guild(773571474761973840)
kawaii_emotes2 = self.client.get_guild(806669712410411068)
kawaii_emotes3 = self.client.get_guild(692576207404793946)
emoji_choosen = random.choice(kawaii_emotes.emojis+kawaii_emotes2.emojis+kawaii_emotes3.emojis)
await ctx.channel.purge(limit=amount)
await ctx.send(f"{message} {emoji_choosen}")
def setup(client):
client.add_cog(emote(client))
|
import os
from setuptools import setup
from microbot import __version__
name = "microbot"
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, "README.md"), "r") as file:
long_description = file.read()
with open(os.path.join(here, "requirements.txt"), "r") as file:
install_requires = file.read().splitlines()
with open(os.path.join(here, "test-requirements.txt"), "r") as file:
tests_require = file.read().splitlines()
setup(
version=__version__,
name=name,
author="minelminel",
description="Raspberry Pi stepper motor application for 3-axis robot",
url="https://github.com/minelminel/microbot",
license="MIT",
author_email="ctrlcmdspace@gmail.com",
long_description=long_description,
long_description_content_type="text/markdown",
packages=[
"microbot",
],
install_requires=install_requires,
tests_require=tests_require,
python_requires=">=3.0.*",
entry_points={"console_scripts": ["microbot=microbot.__main__:main"]},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
from cms.models import CMSPlugin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
CLASS_NAMES = getattr(settings, "CMS_STYLE_NAMES", (
('info', _("info")),
('new', _("new")),
('hint', _("hint"))
)
)
class Style(CMSPlugin):
"""
A CSS Style Plugin
"""
DIV_TAG = 'div'
ARTICLE_TAG = 'article'
SECTION_TAG = 'section'
HTML_TAG_TYPES = getattr(settings, "CMS_STYLE_TAG_TYPES", (
(DIV_TAG, _('div')),
(ARTICLE_TAG, _('article')),
(SECTION_TAG, _('section')),
)
)
cmsplugin_ptr = models.OneToOneField(CMSPlugin, related_name='+', parent_link=True)
class_name = models.CharField(_("class name"), choices=CLASS_NAMES, default=CLASS_NAMES[0][0], max_length=50, blank=True, null=True)
tag_type = models.CharField(verbose_name=_('tag Type'), max_length=50, choices=HTML_TAG_TYPES, default=HTML_TAG_TYPES[0][0])
padding_left = models.SmallIntegerField(_("padding left"), blank=True, null=True)
padding_right = models.SmallIntegerField(_("padding right"), blank=True, null=True)
padding_top = models.SmallIntegerField(_("padding top"), blank=True, null=True)
padding_bottom = models.SmallIntegerField(_("padding bottom"), blank=True, null=True)
margin_left = models.SmallIntegerField(_("margin left"), blank=True, null=True)
margin_right = models.SmallIntegerField(_("margin right"), blank=True, null=True)
margin_top = models.SmallIntegerField(_("margin top"), blank=True, null=True)
margin_bottom = models.SmallIntegerField(_("margin bottom"), blank=True, null=True)
additional_classes = models.CharField(
verbose_name=_('additional clases'),
max_length=200,
blank=True,
help_text=_('Comma separated list of additional classes to apply to tag_type')
)
def __unicode__(self):
display = self.get_class_name_display() or self.tag_type or u''
return u"%s" % display
def inline_style(self):
style = ""
if self.padding_left:
style += "padding-left: %dpx; " % self.padding_left
if self.padding_right:
style += "padding-right: %dpx; " % self.padding_right
if self.padding_bottom:
style += "padding-bottom: %dpx; " % self.padding_bottom
if self.padding_top:
style += "padding-top: %dpx; " % self.padding_top
if self.margin_left:
style += "margin-left: %dpx; " % self.margin_left
if self.margin_right:
style += "margin-right: %dpx; " % self.margin_right
if self.margin_top:
style += "margin-top: %dpx; " % self.margin_top
if self.margin_bottom:
style += "margin-bottom: %dpx; " % self.margin_bottom
return style
@property
def get_additional_classes(self):
if self.additional_classes:
# Removes any extra spaces
return ' '.join((html_class.strip() for html_class in self.additional_classes.split(',')))
return ''
|
from summit.strategies.base import Transform
from summit.experiment import Experiment
from summit.domain import *
from summit.utils.dataset import DataSet
import numpy as np
import pandas as pd
from scipy.integrate import solve_ivp
import os
import json
import requests
from requests.auth import HTTPBasicAuth
from time import sleep
import socketserver
import socket
from http.server import BaseHTTPRequestHandler
import ftplib
import csv
from fractions import Fraction
class Allylation1(Experiment):
outputfile = r'XXX' # Absolute path
IP_HPLC_PC = '146.64.91.153'
RasPi_start = 'XXX:1880/start'
RasPi_run_relay = 'XXX:1880/run_relay'
IP_Control_PC = '146.64.91.245'
absolute_path = "XXX" # Directory where JSON templates are located
username = 'XXX'
password = 'XXX'
ftp_password = 'XXX'
def __init__(self, noise_level=0, **kwargs):
domain = self._setup_domain()
super().__init__(domain)
self.rng = np.random.default_rng()
self.noise_level = noise_level
def _setup_domain(self):
domain = Domain()
# Decision variables
des_1 = "residence time in minutes"
domain += ContinuousVariable(name="tau", description=des_1, bounds=[4, 30])
des_2 = "reactor temperature"
domain += ContinuousVariable(
name="temperature", description=des_2, bounds=[40, 90]
)
des_3 = "equivalence"
domain += ContinuousVariable(name='eqv', description=des_3, bounds=[1, 1.39])
# Objectives
des_5 = "space time yield (g/dm^3/h)"
domain += ContinuousVariable(
name="sty",
description=des_5,
bounds=[0, 890],
is_objective=True,
maximize=True,
)
return domain
def grab_data(self, IP, prod_theoritical_conc):
# IP is IP address of HPLC PC
result = None
while result is None:
os.chdir('XXX')
try:
sleep(0.2)
# IP address and credentials of HPLC PC
ftp = ftplib.FTP(IP)
ftp.login(username, ftp_password)
# Move to direcotry where HPLC results are stored
ftp.cwd('/ClosedLoop')
### Copy data file
# Look for most recent directory
filename = sorted(ftp.nlst(), key=lambda x: ftp.voidcmd(f"MDTM {x}"))[-1]
# Go into most recent directory
ftp.cwd(filename)
hplc_run = filename
os.chdir('XXX')
# Grab data from csv titled 'REPORT-SAD04.CSV'
data_file_name = 'REPORT-SAD04.CSV'
ftp.retrbinary("RETR " + data_file_name, open(data_file_name, 'wb').write)
name = str(filename + '_' + data_file_name)
ftp.retrbinary("RETR " + data_file_name, open(name, 'wb').write)
### Close ftp connection
ftp.quit()
os.chdir('XXX')
### Load data from csv with pandas
df = pd.read_csv(
name,
header=None,
encoding="utf_16_le"
)
df.columns = ['Peak#', 'RetTime', 'Type', 'Width', 'Area', 'Height', 'Area%']
# Get area of Isovanillin - peak1, RetTime between 1.85 and 2.2 min
# Get area of Allyl bromide - peak2, RetTime between 3.95 and 4.2 min
# Get area of product - peak 3, RetTime between 3.4 and 3.92 min
### peak1 -> Iso
peak1 = df[(df['RetTime'].between(1.85, 2.2))]
### peak2 -> allyl
peak2 = df[(df['RetTime'].between(3.95, 4.2))]
### peak3 -> Product
peak3 = df[(df['RetTime'].between(3.4, 3.92))]
# if no peak found, df is empty, make area = 0
if peak1.empty:
print('No peak was found in selected RetTime range')
data = {'Area': [0, 0, 0]}
peak1 = pd.DataFrame(data, columns=['Area'])
if peak2.empty:
print('No peak was found in selected RetTime range')
data = {'Area': [0, 0, 0]}
peak2 = pd.DataFrame(data, columns=['Area'])
if peak3.empty:
print('No peak was found in selected RetTime range')
data = {'Area': [0, 0, 0]}
peak3 = pd.DataFrame(data, columns=['Area'])
# Grab peak with largest area. This is to prevent the selection of any very small peak which might be due to a contamination
peak1_area = peak1['Area'].max()
peak2_area = peak2['Area'].max()
peak3_area = peak3['Area'].max()
print('peak1_area: ' + str(peak1_area))
print('peak2_area: ' + str(peak2_area))
print('peak3_area: ' + str(peak3_area))
dilution_factor = 1
sample_vol = 0.005
iso_area = peak1_area
# Process peak 1 - Iso compound
### y = mx + c --> x = (y - c) / m
m = 17995 # 304.3
c = 0
iso_conc = (peak1_area - c) / m
### Dilution
iso_conc = iso_conc * dilution_factor
### Process peak 2 - allyl compound
### y = mx + c --> x = (y - c) / m
m = 1151.6
c = 0
allyl_conc = (peak2_area - c) / m
### Dilution
allyl_conc = allyl_conc * dilution_factor
# Process peak 3 - Product
### y = mx + c --> x = (y - c) / m
m = 20207 # 19618
c = 0
prod_conc = (peak3_area - c) / m
### Dilution
prod_conc = prod_conc * dilution_factor
# Calculate yield of Product based on theoretical conc.
prod_yld = (prod_conc / prod_theoritical_conc) * 100
result = iso_conc, allyl_conc, prod_conc, prod_yld, hplc_run
print('Result:')
print('iso_conc, allyl_conc, prod_conc, prod_yld, hplc_run')
print(result)
if result is None:
continue
return result
except:
sleep(20)
print('Trying to get LC data again...')
pass
def _run(self, conditions, **kwargs):
# Check how many experiments have been run
with open(outputfile, 'r', encoding='UTF8',
newline='') as f:
reader = csv.reader(f, )
lines = len(list(reader))
completed_exp = lines / 2
print('completed_exp')
print(completed_exp)
class MyHandler(BaseHTTPRequestHandler):
def do_POST(self):
if self.path == '/cont':
print('from do_POST')
# Insert your code here
if self.path == '/running':
print('from do_POST')
# Insert your code here
if self.path == '/run_relay':
print('from do_POST')
self.send_response(200)
# If number of experiments is divisible by 3, pause and wait for column to be replaced
# For the script to continue, an http request needs to be sent to /cont
if completed_exp % 3 == 0:
print('Need to wait for new column. Send http message to \'/cont\' when ready')
http = socketserver.TCPServer((IP_Control_PC, 9001), MyHandler)
http.handle_request()
print('Continuing the experiment!')
print('Number of completed experiments: ' + str(completed_exp))
### Sequence for reactor run
file_path_json1 = absolute_path + '\\json1.json'
with open(file_path_json1) as f:
Exp_seq = json.load(f)
### Sequence for changing valves to solvent lines
solv_chg_path = absolute_path + '\\json1-solv_chg.json'
print(solv_chg_path)
with open(solv_chg_path) as f:
Solv_chg = json.load(f)
### Sequence for reactor clean up
file_path_json1_CleanUp = absolute_path + '\\json1_CleanUp.json'
# print(file_path_json1_CleanUp)
with open(file_path_json1_CleanUp) as f:
CleanUp = json.load(f)
print('Conditions to be tested:')
print(conditions)
opt_type = conditions["strategy"][0]
print('opt_type')
print(opt_type)
# Conc of stock solutions for pump A and pump B
# A = iso, B = allyl
A_stock_conc = 0.495
B_stock_conc = 0.679
ret_time = float(conditions["tau"])
T = float(conditions["temperature"])
T = round(T, 1)
eqv = conditions["eqv"]
# Calculate total flow rate
reactor_vol = 7.0
total_flow_rate = reactor_vol / ret_time
print('total_flow_rate')
print(total_flow_rate)
# Calculate flow rate of pump A & B based on: total flow rate, Eqv, and concentration of stock solutions
print('eqv')
print(eqv[0])
eqv = round(eqv[0], 3)
print('eqv')
print(eqv)
eqv_ratio = Fraction(str(eqv))
print('eqv_ratio')
print(eqv_ratio)
A = eqv_ratio.denominator
B = eqv_ratio.numerator
ratio_tot = A + B
A_change = A / A_stock_conc
B_change = B / B_stock_conc
tot_change = A_change + B_change
flow_A = total_flow_rate / tot_change * A_change
flow_B = total_flow_rate / tot_change * B_change
flow_A = round(flow_A, 2)
flow_B = round(flow_B, 2)
total_flow_rate = flow_A + flow_B
ret_time = reactor_vol / (flow_A + flow_B)
ret_time = round(ret_time, 2)
conditions["tau"] = ret_time
conditions["temperature"] = T
A_ratio = A_stock_conc * flow_A
B_ratio = B_stock_conc * flow_B
new_eqv = B_ratio / A_ratio
new_eqv = round(new_eqv, 3)
eqv = new_eqv
conditions["eqv"] = eqv
print('T, ret_time, flow_A, flow_B, eqv')
print(T, ret_time, flow_A, flow_B, eqv)
tube_vol_to_T = 1.1 # volume from selector valve to t-mixer
pumpA_time = tube_vol_to_T / flow_A
pumpB_time = tube_vol_to_T / flow_B
time_dif = pumpB_time - pumpA_time
time_dif = time_dif * 60 # Convert to seconds
time_dif = round(time_dif, 1)
if time_dif <= 0:
time_dif = 0.1
# Update parameters before JSON message is sent to reactor
Exp_seq[1]['set'] = T
Exp_seq[3]['set'] = flow_B
Exp_seq[3]['delay'] = time_dif
Exp_seq[5]['set'] = flow_A
### Start run on Uniqsis
test = requests.post(RasPi_start, json=Exp_seq,
auth=HTTPBasicAuth(username, password))
print(test)
### Wait for reactor to start run
### Reactor may need so time to heat up or cool down to specified temperature
class MyHandler(BaseHTTPRequestHandler):
def do_POST(self):
if self.path == '/running':
print('from do_POST')
# Insert your code here
if self.path == '/run_relay':
print('from do_POST')
self.send_response(200)
http = socketserver.TCPServer((IP_Control_PC, 9000), MyHandler)
print('Waiting for reactor to start run')
http.handle_request()
print('Reactor is running')
### Wait for 8.5ml of A to be injected
vol_of_A = 8.5
inj_time = vol_of_A / flow_A
inj_time = round(inj_time, 2)
print('Waiting ' + str(inj_time) + ' min for ' + str(vol_of_A) + ' ml of A to be pumped')
sleep(inj_time * 60)
print('Switching to solvent')
test = requests.post('http://csirpharmatech.hopto.org:1880/start', json=Solv_chg,
auth=HTTPBasicAuth(username, password))
# print(test)
### Wait for 'steady state' before taking sample
wait_vol = 4
wait_time = (wait_vol / total_flow_rate)
print('Going to wait for ' + str(wait_time) + ' min before taking sample. (' + str(wait_vol) + ' ml)')
sleep(wait_time * 60)
### Use relay to start HPLC run
test = requests.post(RasPi_run_relay, # json=Exp_seq,
auth=HTTPBasicAuth(username, password))
sleep(2)
### Start clean up of reactor
print('Starting reactor wash')
run_clean = requests.post(RasPi_start, json=CleanUp,
auth=HTTPBasicAuth(username, password))
### Wait for HPLC to start analysis
print('Waiting for LC analysis')
sleep(200)
### Calculate the theoritcal product concentration
prod_theoritical_conc = A_stock_conc * (flow_A / total_flow_rate)
prod_theoritical_conc = round(prod_theoritical_conc, 5)
print('prod_theoritical_conc')
print(prod_theoritical_conc)
### Wait/Grab data for response from HPLC ###
iso_conc, allyl_conc, prod_conc, prod_yld, hplc_run = self.grab_data(IP_HPLC_PC, prod_theoritical_conc)
results = iso_conc, allyl_conc, prod_conc, prod_yld
### Calculate the STY
MW = 192.21
sty = ((prod_conc * MW * (total_flow_rate * 0.06)) / (reactor_vol / 1000))
### Add results to csv file
header = ['opt_type', 'T', 'ret_time', 'eqv', 'flow_A', 'flow_B', 'iso_conc', 'allyl_conc', 'prod_conc',
'prod_theoritical_conc', 'prod_yld', 'sty', 'hplc_run']
data_to_save = (
opt_type, T, ret_time, eqv, flow_A, flow_B, iso_conc, allyl_conc, prod_conc, prod_theoritical_conc, prod_yld,
sty, hplc_run)
with open(r'XXX', 'a', encoding='UTF8', newline='') as f:
writer = csv.writer(f, )
writer.writerow(header)
writer.writerow(data_to_save)
# Save the STY results for the optimisation algorithm
conditions[("sty", "DATA")] = sty
return conditions, {}
def to_dict(self, **kwargs):
experiment_params = dict(noise_level=self.noise_level)
return super().to_dict(**experiment_params)
|
import datetime
from functools import wraps, lru_cache
from time import time
from typing import Union, Type, Sequence, NamedTuple, Callable
import joblib
from ..functions import curry
from ..seq import as_seq
MemoryProvider = Callable[[str], joblib.Memory]
MEMORY_PROVIDER: MemoryProvider = None
PERIOD_ALIASES = {
"day": datetime.timedelta(days=1),
"week": datetime.timedelta(days=7),
**{"{n}h": datetime.timedelta(hours=n) for n in range(1, 25)},
}
class Result(NamedTuple):
value: object
time: float
# References and similar projects
# - http://joblib.readthedocs.io/
# - https://cachetools.readthedocs.io/
# - https://github.com/lonelyenvoy/python-memoization
# noinspection PyUnresolvedReferences
@curry(2)
def ttl_cache(key, fn, *, timeout=6 * 3600, memory=None, **cache_kwargs):
"""
Decorator that creates a cached version of function that stores results
in disk for the given timeout (in seconds).
Args:
key:
Name of memory cache used to store computed results.
timeout:
Maximum time the item is kept in cache (in seconds).
memory:
A provider of Memory objects. The provider is a function that
receives a key and returns a joblib Memory object.
Returns:
A decorated function that stores items in the given cache for the given
timeout.
Examples:
>>> @ttl_cache("my-cache", timeout=3600)
... def expensive_function(url):
... # Some expensive function, possibly touching the internet...
... response = requests.get(url)
... ...
... return pd.DataFrame(response.json())
Notes:
The each pair of (cache name, function name) must be unique. It cannot
decorate multiple lambda functions or callable objects with no __name__
attribute.
"""
mem = normalize_memory(memory, key)
# We need to wrap fn into another decorator to preserve its name and avoid
# confusion with joblib's cache. This function just wraps the result of fn
# int a Result() instance with the timestamp as info.
@mem.cache(**cache_kwargs)
@wraps(fn)
def cached(*args, **kwargs):
return Result(fn(*args, **kwargs), time())
# Now the decorated function asks for the result in the cache, checks
# if it is within the given timeout and return or recompute the value
@wraps_with_cache(fn, cached)
def decorated(*args, **kwargs):
mem_item = cached.call_and_shelve(*args, **kwargs)
result: Result = mem_item.get()
if result.time + timeout < time():
mem_item.clear()
result = cached(*args, **kwargs)
return result.value
decorated.clear = mem.clear
decorated.prune = mem.reduce_size
return decorated
@curry(2)
def disk_cache(key, fn, memory=None):
"""
A simple in-disk cache.
Can be called as ``disk_cache(key, fn)``, to decorate a function or as as
decorator in ``@disk_cache(key)``.
"""
return normalize_memory(memory, key).cache(fn)
@curry(2)
def period_cache(
key: str,
fn: callable,
*,
period: Union[str, int, datetime.timedelta],
memory=None,
fallback: Sequence[Type[Exception]] = None,
):
"""
Keeps value in cache within n intervals of the given time delta.
Args:
key:
Name of memory cache used to store computed results.
fn:
The decorated function.
period:
Time period in which the cache expires. Can be given as a timedelta,
a integer (in seconds) or a string in the set {'day', 'week', '1h',
'2h', ..., '24h'}.
Other named periods can be registered using the :func:`register_period`
function.
memory:
A provider of Memory objects. The provider is a function that
receives a key and returns a joblib Memory object.
fallback:
If an exception or list of exceptions, correspond to the kinds of
errors that triggers the cache to check previously stored responses.
There is nothing that guarantees that the old values will still
be present, but it gives a second attempt that may hit the cache
or call the function again.
Examples:
>>> @period_cache("numeric", period="day")
... def fn(x):
... print('Doing really expensive computation...')
... return ...
"""
# Select the main method to decorate the cached function
mem = normalize_memory(memory, key)
# Reads a period and return a function that return increments of the period
# according to the current time. This logic is encapsulated into the key()
# function.
date = today()
ref_time = datetime.datetime(date.year, date.month, date.day).timestamp()
if isinstance(period, str):
period = PERIOD_ALIASES[period].seconds
period = int(period)
get_time = time
key = lambda: int(get_time() - ref_time) // period
# The main cached function. This is stored only internally and the function
# exposed to the user fixes the _cache_bust and _recur parameters to the
# correct values.
fallback = tuple(as_seq(fallback)) if fallback else ImpossibleError
@mem.cache
def cached(_cache_bust, _recur, *args, **kwargs):
try:
return fn(*args, **kwargs)
except fallback:
if _recur > 0:
return cached(_cache_bust - 1, _recur - 1, *args, **kwargs)
raise
# Save function
@wraps_with_cache(fn, cached)
def decorated(*args, **kwargs):
return cached(key(), 1, *args, **kwargs)
return decorated
class ImpossibleError(Exception):
"""
It is an error to raise this exception, do not use it!
"""
def wraps_with_cache(fn, cache=None):
"""
Like functools.wraps, but also copy the cache methods created either
by lru_cache or by joblib.Memory.cache.
"""
cache = cache or fn
wrapped = wraps(fn)
for attr in ("cache_info", "clear_cache"):
if hasattr(cache, attr):
setattr(wrapped, attr, getattr(cache, attr))
return wrapped
def normalize_memory(memory, key: str) -> joblib.Memory:
"""
Return the joblib's Memory object with the given name.
"""
if isinstance(memory, joblib.Memory):
return memory
if memory is None:
memory_provider = get_global_memory_provider()
else:
memory_provider = memory
return memory_provider(key)
def get_global_memory_provider():
"""
Return the global memory provider.
"""
if MEMORY_PROVIDER is None:
raise RuntimeError('must initialize the global memory provider before continuing')
return MEMORY_PROVIDER
def set_global_memory_provider(func: MemoryProvider):
"""
Set the global memory provider function.
"""
global MEMORY_PROVIDER
MEMORY_PROVIDER = func
def today(n=0) -> datetime.date:
"""
Return the date today.
"""
date = datetime.datetime.now().date()
if n:
return date + datetime.timedelta(days=n)
return date
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/05_data.block.ipynb (unless otherwise specified).
__all__ = ['DQN', 'TestDataset']
# Cell
# Python native modules
import os
# Third party libs
from fastcore.all import *
from fastai.torch_basics import *
from fastai.data.all import *
from fastai.basics import *
from torch.utils.data import Dataset
from torch import nn
# Local modules
# Cell
class DQN(Module):
def __init__(self):
self.policy=nn.Sequential(
nn.Linear(4,50),
nn.ReLU(),
nn.Linear(50,2)
)
def forward(self,x):
return torch.argmax(self.policy(x),dim=0)
# Cell
class TestDataset(Dataset):
def __init__(self,policy,device='cpu'):
self.policy=policy
self.env=gym.make('CartPole-v1')
self.next_state=self.env.reset()
self.device='cpu'
def __len__(self): return 100
def __getitem__(self,idx):
print(id(self.env),' ')
self.next_state, r, is_done, _=self.env.step(self.policy(Tensor(self.next_state).to(self.device)).numpy())
if is_done:self.next_state=self.env.reset()
return self.next_state
# Cell
# Python native modules
import os
# Third party libs
from fastcore.all import *
from fastai.torch_basics import *
from fastai.data.all import *
from fastai.basics import *
from torch.utils.data import Dataset
from torch import nn
# Local modules
from .block import * |
from functools import wraps
import inspect
def traceable(*args):
def _traceable(f):
@wraps(f)
def decorated(*args, **kwargs):
res = f(*args, **kwargs)
broker = EventBroker.get_instance()
# Create the initial dictionary with args that have defaults
args_dict = {}
if inspect.getargspec(f).defaults:
args_dict = dict(
zip(
reversed(inspect.getargspec(f).args),
reversed(inspect.getargspec(f).defaults)))
# Update / insert values for positional args
args_dict.update(dict(zip(inspect.getargspec(f).args, args)))
# Update it with values for named args
args_dict.update(kwargs)
# args_dict = {k: str(v) for k, v in args_dict.items()}
broker.notify({
'name': name,
'function': f.__qualname__,
'arguments': args_dict,
'result': res
})
return res
return decorated
if len(args) == 1 and callable(args[0]):
# No event name override in the args:
# @traceable
# def foo()
f, = args
name = f.__qualname__
return _traceable(f)
else:
# Event name is overriden:
# @traceable('event-foo')
# def foo()
name, = args
return _traceable
class EventBroker(object):
_instance = None
def __init__(self):
self.listeners = []
def add(self, f):
self.listeners.append(f)
def remove(self, f):
self.listeners.remove(f)
def notify(self, arguments):
for listener in self.listeners:
listener(arguments)
@classmethod
def get_instance(cls):
if not cls._instance:
cls._instance = EventBroker()
return cls._instance
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
np.random.seed(102)
class TestNanmedian(unittest.TestCase):
def setUp(self):
single_axis_shape = (120)
multi_axis_shape = (2, 3, 4, 5)
self.fake_data = {
"single_axis_normal":
np.random.uniform(-1, 1, single_axis_shape).astype(np.float32),
"multi_axis_normal":
np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32),
"single_axis_all_nan": np.full(single_axis_shape, np.nan),
"multi_axis_all_nan": np.full(multi_axis_shape, np.nan),
}
single_partial_nan = self.fake_data["single_axis_normal"].copy()
single_partial_nan[single_partial_nan > 0] = np.nan
multi_partial_nan = self.fake_data["multi_axis_normal"].copy()
multi_partial_nan[multi_partial_nan > 0] = np.nan
self.fake_data["single_axis_partial_nan"] = single_partial_nan
self.fake_data["multi_axis_partial_nan"] = multi_partial_nan
row_data = np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32)
row_data[:, :, :, 0] = np.nan
row_data[:, :, :2, 1] = np.nan
row_data[:, :, 2:, 2] = np.nan
self.fake_data["row_nan_even"] = row_data
self.fake_data["row_nan_float64"] = row_data.astype(np.float64)
self.fake_data["row_nan_int64"] = row_data.astype(np.int64)
self.fake_data["row_nan_int32"] = row_data.astype(np.int32)
col_data = np.random.uniform(-1, 1, multi_axis_shape).astype(np.float32)
col_data[:, :, 0, :] = np.nan
col_data[:, :, 1, :3] = np.nan
col_data[:, :, 2, 3:] = np.nan
self.fake_data["col_nan_odd"] = col_data
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()
self.axis_candiate_list = [
None, 0, 2, -1, -2, (1, 2), [0, -1], [0, 1, 3], (1, 2, 3),
[0, 2, 1, 3]
]
def test_api_static(self):
data = self.fake_data["col_nan_odd"]
paddle.enable_static()
np_res = np.nanmedian(data, keepdims=True)
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', data.shape)
out1 = paddle.nanmedian(x, keepdim=True)
out2 = paddle.tensor.nanmedian(x, keepdim=True)
out3 = paddle.tensor.stat.nanmedian(x, keepdim=True)
axis = np.arange(len(data.shape)).tolist()
out4 = paddle.nanmedian(x, axis=axis, keepdim=True)
out5 = paddle.nanmedian(x, axis=tuple(axis), keepdim=True)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': data},
fetch_list=[out1, out2, out3, out4, out5])
for out in res:
self.assertTrue(np.allclose(np_res, out, equal_nan=True))
def test_api_dygraph(self):
paddle.disable_static(self.place)
def clean_axis_numpy(axis, shape_len):
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list):
for k in range(len(axis)):
if axis[k] < 0:
axis[k] += shape_len
axis = set(axis)
return axis
def test_data_case(data):
for keep_dim in [False, True]:
if np.isnan(data).all() and keep_dim:
np_ver = np.version.version.split('.')
if int(np_ver[0]) < 1 or int(np_ver[1]) <= 20:
print(
"This numpy version does not support all nan elements when keepdim is True"
)
continue
np_res = np.nanmedian(data, keepdims=keep_dim)
pd_res = paddle.nanmedian(
paddle.to_tensor(data), keepdim=keep_dim)
self.assertTrue(
np.allclose(
np_res, pd_res.numpy(), equal_nan=True))
def test_axis_case(data, axis):
pd_res = paddle.nanmedian(
paddle.to_tensor(data), axis=axis, keepdim=False)
axis = clean_axis_numpy(axis, len(data.shape))
np_res = np.nanmedian(data, axis=axis, keepdims=False)
self.assertTrue(np.allclose(np_res, pd_res.numpy(), equal_nan=True))
for name, data in self.fake_data.items():
test_data_case(data)
for axis in self.axis_candiate_list:
test_axis_case(self.fake_data["row_nan_even"], axis)
test_axis_case(self.fake_data["col_nan_odd"], axis)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data("X", [10, 12])
def test_dtype():
x2 = paddle.fluid.data('X2', [10, 12], 'bool')
paddle.nanmedian(x2)
def test_empty_axis():
paddle.nanmedian(x, axis=[], keepdim=True)
def test_axis_not_in_range():
paddle.nanmedian(x, axis=3, keepdim=True)
def test_duplicated_axis():
paddle.nanmedian(x, axis=[1, -1], keepdim=True)
self.assertRaises(TypeError, test_dtype)
self.assertRaises(ValueError, test_empty_axis)
self.assertRaises(ValueError, test_axis_not_in_range)
self.assertRaises(ValueError, test_duplicated_axis)
def test_dygraph(self):
paddle.disable_static(place=self.place)
with paddle.fluid.dygraph.guard():
data = self.fake_data["col_nan_odd"]
out = paddle.nanmedian(paddle.to_tensor(data), keepdim=True)
np_res = np.nanmedian(data, keepdims=True)
self.assertTrue(np.allclose(np_res, out, equal_nan=True))
paddle.enable_static()
def test_check_grad(self):
paddle.disable_static(place=self.place)
shape = (4, 5)
x_np = np.random.uniform(-1, 1, shape).astype(np.float64)
x_np[0, :] = np.nan
x_np[1, :3] = np.nan
x_np[2, 3:] = np.nan
x_np_sorted = np.sort(x_np)
nan_counts = np.count_nonzero(np.isnan(x_np).astype(np.int32), axis=1)
np_grad = np.zeros((shape))
for i in range(shape[0]):
valid_cnts = shape[1] - nan_counts[i]
if valid_cnts == 0:
continue
mid = int(valid_cnts / 2)
targets = [x_np_sorted[i, mid]]
is_odd = valid_cnts % 2
if not is_odd and mid > 0:
targets.append(x_np_sorted[i, mid - 1])
for j in range(shape[1]):
if x_np[i, j] in targets:
np_grad[i, j] = 1 if is_odd else 0.5
x_tensor = paddle.to_tensor(x_np, stop_gradient=False)
y = paddle.nanmedian(x_tensor, axis=1, keepdim=True)
dx = paddle.grad(y, x_tensor)[0].numpy()
self.assertTrue(np.allclose(np_grad, dx, equal_nan=True))
if __name__ == "__main__":
unittest.main()
|
from django.db import models
from site_reserva import settings
import requests
class Pessoa(models.Model):
posto_graduacao_choices = (
('Gen Ex', 'Gen Ex'),
('Gen Div', 'Gen Div'),
('Gen Bda', 'Gen Bda'),
('Cel', 'Cel'),
('TC', 'TC'),
('Maj', 'Maj'),
('Cap', 'Cap'),
('1º Ten', '1º Ten'),
('2º Ten', '2º Ten'),
('Asp', 'Asp'),
('ST', 'ST'),
('1º Sgt', '1º Sgt'),
('2º Sgt', '2º Sgt'),
('3º Sgt', '3º Sgt'),
('Cb', 'Cb'),
('Sd', 'Sd'),
('Sd EV', 'Sd EV'),
)
nome_completo = models.CharField(max_length=100)
nome_guerra = models.CharField(max_length=100)
posto_graduacao = models.CharField(max_length=20, choices=posto_graduacao_choices, default='3º Sgt')
identidade_civil = models.CharField(max_length=9)
identidade_militar = models.CharField(max_length=10, help_text='Campo utilizado para identificar a pessoa no sistema')
cpf = models.CharField(max_length=11)
# Armazenar hash da senha e nao a senha propriamente dita
senha = models.CharField(max_length=64, help_text='Funcionará como a assinatura da pessoa')
quartel_atual = models.ForeignKey('Quartel', on_delete=models.PROTECT)
telefone_pessoal = models.CharField(max_length=11, help_text='Principal meio utilizado para estabelecer contato com a pessoa')
telefone_quartel = models.CharField(max_length=10)
email = models.EmailField(help_text='Usado para enviar avisos antes do vencimento da cautela')
foto = models.ImageField(upload_to='reserva_material/images/pessoa/', null=True, blank=True, help_text='Imagem da pessoa')
def save(self, *args, **kwargs):
url = 'http://informacoesdopessoal.dgp.eb.mil.br/almq1/fichas/foto_fi.asp?ID='
info = self.identidade_militar
res = requests.get(url+info)
res.raise_for_status()
if res.status_code == 200:
if len(res.text) != 0:
imagem = open('reserva_material/media/images/pessoa/' + info + '.jpg', 'wb')
for chunk in res.iter_content(100000):
imagem.write(chunk);
imagem.close();
self.foto = 'images/pessoa/' + info + '.jpg'
super(Pessoa, self).save(*args, **kwargs)
def __str__(self):
texto = '%s %s - %s (%s)' % (self.posto_graduacao, self.nome_guerra, self.quartel_atual, self.telefone_pessoal)
return texto
class Quartel(models.Model):
nome_quartel = models.CharField(max_length=100, help_text='Usado para gerar documentos')
sigla = models.CharField(max_length=100, help_text='Usado para facilitar a busca')
class Meta:
verbose_name_plural = 'quarteis'
def __str__(self):
return self.nome_quartel
class Material(models.Model):
nome_material = models.CharField(max_length=100)
descricao = models.TextField(null=True, blank=True)
em_reserva = models.BooleanField(default=True, help_text='O material encontra-se na reserva?')
em_cautela = models.BooleanField(default=False, help_text='O material encontra-se cautelado?')
em_manutencao = models.BooleanField(default=False, help_text='O material encontra-se em manutenção?')
indisponivel = models.BooleanField(default=False, help_text='O material encontra-se indisponível?')
numero_serie = models.CharField(max_length=20, null=True, blank=True)
foto = models.ImageField(upload_to='images/material/', null=True, blank=True, help_text='Imagem do material')
class Meta:
verbose_name_plural = 'materiais'
def __str__(self):
if not self.numero_serie:
return self.nome_material
else:
return '%s (SN: %s)' % (self.nome_material, self.numero_serie)
class CautelaManager(models.Manager):
def contar_cautelas(self, pessoa_retirou):
return self.filter(pessoa_retirou=pessoa_retirou).count()
class Cautela(models.Model):
pessoa_retirou = models.ForeignKey('Pessoa', on_delete=models.PROTECT, related_name='+', null=False, blank=False)
pessoa_emprestou = models.ForeignKey('Pessoa', on_delete=models.PROTECT, related_name='+', null=False, blank=False)
material_cautelado = models.ForeignKey('Material', on_delete=models.PROTECT, null=False, blank=False)
quantitativo = models.IntegerField(default=1, null=False, blank=False, help_text='Quantidade de material cautelado')
inicio_cautela = models.DateTimeField(auto_now_add=True, editable=True, help_text='')
fim_cautela = models.DateTimeField(editable=True, help_text='Padrão de 30 dias')
vencida = models.BooleanField(default=False, help_text='Sinaliza se a cautela está vencida')
data_devolucao = models.DateTimeField(editable=True, null=True, help_text='Quando foi devolvido')
devolvido = models.BooleanField(default=False)
objects = CautelaManager()
|
class Solution:
def recurse(self, num, target, index, prev, cur, val, s, ans):
if index == len(num):
if val == target and cur == 0:
ans.append(''.join(s[1:]))
return
cur = cur * 10 + int(num[index])
strop = str(cur)
if cur > 0:
self.recurse(num, target, index + 1, prev, cur, val, s, ans)
s.append('+')
s.append(strop)
self.recurse(num, target, index + 1, cur, 0, val + cur, s, ans)
s.pop()
s.pop()
if len(s) > 0:
s.append('-')
s.append(strop)
self.recurse(num, target, index + 1, -cur, 0, val - cur, s, ans)
s.pop()
s.pop()
s.append('*')
s.append(strop)
self.recurse(num, target, index + 1, cur * prev, 0, val - prev + (cur * prev), s, ans)
s.pop()
s.pop()
def addOperators(self, num, target):
ans = []
self.recurse(num, target, 0, 0, 0, 0, [], ans)
return ans
if __name__ == "__main__":
solution = Solution()
print(solution.addOperators('123', 6))
print(solution.addOperators('232', 8))
|
from django.conf import settings
from django.conf.urls import include
from django.contrib import admin
from django.urls import path
admin.autodiscover()
urlpatterns = [
path("admin/", admin.site.urls),
path("api/", include("evan.api.urls")),
path("", include("evan.site.urls")),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path("__debug__/", include(debug_toolbar.urls)),
] + urlpatterns
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
# error handlers
handler500 = "evan.site.views.server_error"
|
from pypy.interpreter.mixedmodule import MixedModule
class ErrorsModule(MixedModule):
"Definition of pyexpat.errors module."
appleveldefs = {}
interpleveldefs = {}
def setup_after_space_initialization(self):
from pypy.module.pyexpat import interp_pyexpat
space = self.space
# Three mappings for errors: the module contains errors
# message by symbol (errors.XML_ERROR_SYNTAX == 'syntax error'),
# codes is a dict mapping messages to numeric codes
# (errors.codes['syntax error'] == 2), and messages is a dict
# mapping numeric codes to messages (messages[2] == 'syntax error').
w_codes = space.newdict()
w_messages = space.newdict()
for name in interp_pyexpat.xml_error_list:
w_name = space.newtext(name)
num = getattr(interp_pyexpat, name)
w_num = space.newint(num)
w_message = interp_pyexpat.ErrorString(space, num)
space.setattr(self, w_name, w_message)
space.setitem(w_codes, w_message, w_num)
space.setitem(w_messages, w_num, w_message)
space.setattr(self, space.newtext("codes"), w_codes)
space.setattr(self, space.newtext("messages"), w_messages)
class ModelModule(MixedModule):
"Definition of pyexpat.model module."
appleveldefs = {}
interpleveldefs = {}
def setup_after_space_initialization(self):
from pypy.module.pyexpat import interp_pyexpat
space = self.space
for name in interp_pyexpat.xml_model_list:
value = getattr(interp_pyexpat, name)
space.setattr(self, space.newtext(name), space.wrap(value))
class Module(MixedModule):
"Python wrapper for Expat parser."
appleveldefs = {
}
interpleveldefs = {
'ParserCreate': 'interp_pyexpat.ParserCreate',
'XMLParserType': 'interp_pyexpat.W_XMLParserType',
'ErrorString': 'interp_pyexpat.ErrorString',
'ExpatError': 'space.fromcache(interp_pyexpat.Cache).w_error',
'error': 'space.fromcache(interp_pyexpat.Cache).w_error',
}
submodules = {
'errors': ErrorsModule,
'model': ModelModule,
}
for name in ['XML_PARAM_ENTITY_PARSING_NEVER',
'XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE',
'XML_PARAM_ENTITY_PARSING_ALWAYS']:
interpleveldefs[name] = 'space.newint(interp_pyexpat.%s)' % (name,)
def __init__(self, space, w_name):
"NOT_RPYTHON"
from pypy.module.pyexpat import interp_pyexpat
super(Module, self).__init__(space, w_name)
ver = space.unwrap(interp_pyexpat.get_expat_version(space))
assert len(ver) >= 5, (
"Cannot compile with the wide (UTF-16) version of Expat")
def startup(self, space):
from pypy.module.pyexpat import interp_pyexpat
w_ver = interp_pyexpat.get_expat_version(space)
space.setattr(self, space.newtext("EXPAT_VERSION"), w_ver)
w_ver = interp_pyexpat.get_expat_version_info(space)
space.setattr(self, space.newtext("version_info"), w_ver)
|
from django.conf.urls import url
from rest_framework import routers
from cameras.views import CameraViewSet
router = routers.DefaultRouter()
router.register(r'camera', CameraViewSet)
urlpatterns = router.urls
|
import time
from playground.slam.frame import Frame
from playground.slam.posegraph import PoseGraph
class BackEnd:
def __init__(self, edge_sigma, angle_sigma):
self.__pose_graph = PoseGraph(edge_sigma_x=edge_sigma, edge_sigma_y=edge_sigma,
edge_sigma_angle=angle_sigma)
def update_frames(self, frames: list[Frame], loop_frame: Frame):
"""
Optimize pose graph and update frame positions with an assumption that we've detected a loop closure
"""
start_time = time.perf_counter()
print('Pose Graph optimization started...')
self.__pose_graph.clear()
vertex_index = self.__pose_graph.prior_pose_index + 1
for frame in frames:
ty = frame.position[0]
tx = frame.position[1]
rot = frame.rotation[:2, :2]
self.__pose_graph.add_vertex(vertex_index, ty, tx, rot.T)
edge_ty = frame.relative_icp_position[1]
edge_tx = frame.relative_icp_position[0]
edge_rot = frame.relative_icp_rotation[:2, :2]
self.__pose_graph.add_factor_edge(vertex_index - 1, vertex_index, edge_ty, edge_tx, edge_rot.T)
vertex_index += 1
# add the loop closure constraint
# This factor encodes the fact that we have returned to the same pose. In real
# systems, these constraints may be identified in many ways, such as appearance-based
# techniques with camera images.
loop_ty = loop_frame.relative_icp_position[1]
loop_tx = loop_frame.relative_icp_position[0]
loop_rot = loop_frame.relative_icp_rotation[:2, :2]
self.__pose_graph.add_factor_edge(vertex_index - 1,
self.__pose_graph.prior_pose_index + 1,
loop_ty, loop_tx, loop_rot.T)
self.__pose_graph.optimize()
vertex_index = self.__pose_graph.prior_pose_index + 1
for frame in frames:
tx, ty, rot = self.__pose_graph.get_pose_at(vertex_index)
frame.position[:2] = ty, tx
frame.rotation = rot.T
vertex_index += 1
end_time = time.perf_counter()
print(f'Pose Graph optimization finished in {end_time - start_time} seconds')
|
import requests
import yaml
import json
import os
from pathlib import Path
class BaseClient:
CONFIG_PATH = Path(f"{str(Path.home())}/.datahen.yml")
def __init__(self, auth_token = None):
self._config = self._load_yaml_config()
self._init_token(auth_token)
self._base_headers = {
"Authorization": f"Bearer {self._auth_token}",
"Content-Type": "application/json",
}
self._base_api_url = "https://app.datahen.com/api/v1"
def _load_yaml_config(self):
if self.CONFIG_PATH.exists():
with open(self.CONFIG_PATH, 'r') as f:
try:
yaml_data = yaml.safe_load(f)
except yaml.scanner.ScannerError:
yaml_data = {}
return yaml_data
def _init_token(self, auth_token):
if 'DATAHEN_TOKEN' in os.environ:
self._auth_token = os.environ['DATAHEN_TOKEN']
elif 'api_token' in self._config:
self._auth_token = self._config['api_token']
elif auth_token is not None:
self._auth_token = auth_token
else:
raise ValueError("Datahen token was not defined")
def get(self, relative_url, params={}):
url = f"{self._base_api_url}{relative_url}"
query = {}
if 'page' in params:
query['p'] = params['page']
if 'per_page' in params:
query['pp'] = params['per_page']
for key in params:
if params[key] == True:
query[key] = 'true'
elif params[key] == False:
query[key] = 'false'
else:
query[key] = params[key]
r = requests.get(url, headers=self._base_headers, params=query)
return r.json()
def post(self, relative_url, params={}):
url = f"{self._base_api_url}{relative_url}"
query = {}
for key in params:
query[key] = params[key]
r = requests.post(url, headers=self._base_headers, data=json.dumps(query))
return r.json()
def put(self, relative_url, params={}):
url = f"{self._base_api_url}{relative_url}"
query = {}
for key in params:
query[key] = params[key]
r = requests.put(url, headers=self._base_headers, data=json.dumps(query))
return r.json()
def delete(self, relative_url, params={}):
url = f"{self._base_api_url}{relative_url}"
query = {}
for key in params:
query[key] = params[key]
r = requests.delete(url, headers=self._base_headers, data=json.dumps(query))
return r.json()
|
class Solution(object):
def isPalindrome(self, a):
"""
:type s: str
:rtype: bool
"""
b=list(a)
c=[]
for i in range(len(b)):
if b[i].isalnum():
c.append(b[i].lower())
i=0
j=len(c)-1
loop=True
while j>i:
if c[i]!=c[j]:
loop=False
j-=1
i+=1
return loop |
"""
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: Gregor Hendel
"""
from PyQt4.QtGui import QFrame, QWidget, QLabel,\
QApplication, QKeySequence, QFileDialog, \
QVBoxLayout, QHBoxLayout
from .IPetTreeView import IpetTreeView
from .EditableForm import EditableForm
from PyQt4.QtCore import Qt, SIGNAL
from PyQt4.Qt import QLayout, QTabWidget
from ipet.evaluation.IPETEvalTable import IPETEvaluation, IPETEvaluationColumn
import sys
from ipet.misc import misc
from ipet.evaluation.Aggregation import Aggregation
from ipet.evaluation.IPETFilter import IPETFilterGroup, IPETValue
from ipet.evaluation.IPETFilter import IPETFilter
from .IpetMainWindow import IpetMainWindow
from .EditableBrowser import EditableBrowser
from .IPETApplicationTab import IPETApplicationTab
from .SimpleQIPETDataView import IPETDataTableView
from . import ExperimentManagement
class EvaluationEditorWindow(IPETApplicationTab):
addedcolumns = 0
addedfiltergroups = 0
addedfilters = 0
addedaggregations = 0
addedinstances = 0
def __init__(self, parent=None):
"""
Constructor
"""
super(EvaluationEditorWindow, self).__init__(parent)
self.browser = EditableBrowser(self)
self.evaluation = None
self.filename = None
self.lastfiltergroup = None
vlayout = QVBoxLayout()
layout = QHBoxLayout()
layout.addWidget(self.browser)
layout.setSizeConstraint(QLayout.SetMaximumSize)
vlayout.addLayout(layout)
tabwidget = QTabWidget(self)
self.tableview = IPETDataTableView(None, self)
self.aggtableview = IPETDataTableView(None, self)
tabwidget.addTab(self.tableview, ("Instances"))
tabwidget.addTab(self.aggtableview, ("Aggregated"))
vlayout.addWidget(tabwidget)
self.setLayout(vlayout)
self.defineActions()
self.initConnections()
self.passGroupToTableViews()
def initConnections(self):
self.connect(self.browser, SIGNAL(EditableBrowser.ITEMEVENT), self.itemChanged)
def setEvaluation(self, evaluation):
self.browser.setRootElement(evaluation)
self.evaluation = evaluation
EditableForm.extendAvailableOptions("datakey", [col.getName() for col in evaluation.getActiveColumns()])
def defineActions(self):
self.loadaction = self.createAction("&Load evaluation", self.loadEvaluation, QKeySequence.Open, icon = "Load-icon",
tip="Load evaluation from XML file (current evaluation gets discarded)")
self.saveaction = self.createAction("&Save evaluation", self.saveEvaluation, QKeySequence.Save, icon = "disk-icon",
tip="Save evaluation to XML format")
self.saveasaction = self.createAction("&Save evaluation as", self.saveEvaluationAs, QKeySequence.SaveAs, icon = "disk-icon",
tip="Save evaluation to XML format")
self.addcolaction = self.createAction("Add &Column", self.addColumn, "Alt+C", icon="Letter-C-violet-icon",
tip="Add new column as a child of the currently selected element")
self.addfiltergroupaction = self.createAction("Add Filter &Group", self.addFilterGroup, "Alt+G", icon="Letter-G-gold-icon",
tip="Add new filter group as a child of the current evaluation")
self.addfilteraction = self.createAction("Add &Filter", self.addFilter, "Alt+H", icon="Letter-F-lg-icon",
tip="Add filter as a child of the current filter group")
self.addaggregationaction = self.createAction("Add &Aggregation", self.addAggregation, "Alt+A", icon="Letter-A-dg-icon",
tip="Add aggregation as a child for the current top level column")
self.addinstancenaction = self.createAction("Add &Instance", self.addInstance, "Alt+I", icon="Letter-I-blue-icon",
tip="Add instance as child of the current filter")
self.deletelementaction = self.createAction("&Delete Element", self.browser.deleteElement, QKeySequence.Delete, "delete-icon",
tip="Delete currently selected element")
self.reevaluateaction = self.createAction("Reevaluate", self.reevaluate, "F5", icon="reevaluate-icon",
tip="Reevaluate the current evaluation on the _experiment")
def getMenuActions(self):
return (("&File", [self.loadaction, self.saveaction, self.saveasaction]),("&Data", [self.reevaluateaction]))
def getToolBarActions(self):
return (("File", [self.saveaction, self.loadaction]),
("Evaluation", [self.addcolaction,
self.addfiltergroupaction,
self.addfilteraction,
self.addaggregationaction,
self.addinstancenaction,
self.deletelementaction]
),
("Data", [self.reevaluateaction])
)
def addColumn(self):
self.updateStatus("Add column")
self.addedcolumns += 1
newcolname = "New Column %d"%self.addedcolumns
newcol = IPETEvaluationColumn(name=newcolname)
self.browser.addNewElementAsChildOfSelectedElement(newcol)
EditableForm.extendAvailableOptions("datakey", [col.getName() for col in self.evaluation.getActiveColumns()])
def addFilterGroup(self):
self.updateStatus("Add filter group")
self.addedfiltergroups += 1
newfiltergroupname = "New Group %d"%self.addedfiltergroups
newfiltergroup = IPETFilterGroup(newfiltergroupname)
self.browser.addNewElementAsChildOfSelectedElement(newfiltergroup)
def addFilter(self):
self.updateStatus("Add filter")
self.addedfilters += 1
newfilter = IPETFilter(expression1 = "CHANGE", expression2 = "CHANGE")
self.browser.addNewElementAsChildOfSelectedElement(newfilter)
def addAggregation(self):
self.updateStatus("Add aggregation")
self.addedaggregations += 1
newaggregationname = "New Aggregation %d"%self.addedaggregations
newaggregation = Aggregation(newaggregationname)
self.browser.addNewElementAsChildOfSelectedElement(newaggregation)
def addInstance(self):
self.updateStatus("Add instance")
self.addedinstances += 1
newinstancename = "new Instance %d"%self.addedinstances
newinstance = IPETValue(newinstancename)
self.browser.addNewElementAsChildOfSelectedElement(newinstance)
def loadEvaluation(self):
thedir = str(".")
filename = str(QFileDialog.getOpenFileName(self, caption=("%s - Load an evaluation"%QApplication.applicationName()),
directory=thedir, filter=str("XML files (*.xml)")))
if filename:
try:
ev = IPETEvaluation.fromXMLFile(filename)
message = "Loaded evaluation from %s"%filename
self.setEvaluation(ev)
except Exception:
message = "Error: Could not load evaluation from file %s"%filename
self.updateStatus(message)
pass
def saveEvaluation(self):
if self.filename is None:
filename = str(QFileDialog.getSaveFileName(self, caption=("%s - Save current evaluation"%QApplication.applicationName()),
directory = str("."), filter=str("XML files (*.xml)")))
else:
filename = self.filename
if not filename:
return
misc.saveAsXML(self.evaluation, filename)
self.filename = filename
self.updateStatus("Saved evaluation to file %s"%filename)
def saveEvaluationAs(self):
filename = str(QFileDialog.getSaveFileName(self, caption=("%s - Save current evaluation"%QApplication.applicationName()),
directory = str("."), filter=str("XML files (*.xml)")))
if not filename:
return
misc.saveAsXML(self.evaluation, filename)
self.filename = filename
self.updateStatus("Saved evaluation to file %s"%filename)
def enableOrDisableActions(self):
for action, addclass in zip([self.addcolaction, self.addfiltergroupaction, self.addfilteraction, self.addaggregationaction, self.addinstancenaction],
[IPETEvaluationColumn(), IPETFilterGroup(), IPETFilter(), Aggregation(), IPETValue()]):
if self.browser.treewidget.currentItemAcceptsClassAsChild(addclass):
action.setEnabled(True)
else:
action.setEnabled(False)
def itemChanged(self):
self.enableOrDisableActions()
self.passGroupToTableViews()
def setDataFrames(self, tableviewdf, aggtableviewdf):
"""
sets both data frames and formatters for the views
"""
self.tableview.setDataFrame(tableviewdf, self.evaluation.getColumnFormatters(tableviewdf))
self.aggtableview.setDataFrame(aggtableviewdf, self.evaluation.getColumnFormatters(aggtableviewdf))
def passGroupToTableViews(self):
if self.evaluation is None or not self.evaluation.isEvaluated():
self.updateStatus("Refresh evaluation first to update results")
return
selectedfiltergroup = None
if self.browser.treewidget.getSelectedEditable().__class__ is IPETFilterGroup:
selectedfiltergroup = self.browser.treewidget.getSelectedEditable()
# if selectedfiltergroup is not None and selectedfiltergroup.isActive():
# return
if selectedfiltergroup != self.lastfiltergroup:
if selectedfiltergroup is not None:
self.updateStatus("Display data for selected filter group \"%s\"" % selectedfiltergroup.getName())
self.setDataFrames(self.evaluation.getInstanceGroupData(selectedfiltergroup), self.evaluation.getAggregatedGroupData(selectedfiltergroup))
else:
self.updateStatus("Display data for all instances")
self.setDataFrames(self.evaluation.getInstanceData(), self.evaluation.getAggregatedData())
self.lastfiltergroup = selectedfiltergroup
def reevaluate(self):
if self.evaluation is not None:
rettab, retagg = self.evaluation.evaluate(ExperimentManagement.getExperiment())
self.setDataFrames(rettab, retagg)
class IpetEvaluationEditorApp(IpetMainWindow):
"""
This app represents the Editable Browser in a single, executable window
"""
def __init__(self, parent = None):
super(IpetEvaluationEditorApp, self).__init__(parent)
self.evaluationeditorwindow = EvaluationEditorWindow()
self.populateMenu(self.evaluationeditorwindow)
self.populateToolBar(self.evaluationeditorwindow)
self.setCentralWidget(self.evaluationeditorwindow)
def setEvaluation(self, evaluation):
self.evaluationeditorwindow.setEvaluation(evaluation)
def setExperiment(self, _experiment):
EditableForm.extendAvailableOptions("datakey", _experiment.getDatakeys())
if __name__ == "__main__":
app = QApplication(sys.argv)
app.setApplicationName("Evaluation editor")
mainwindow = IpetEvaluationEditorApp()
ev = IPETEvaluation.fromXMLFile("../test/testevaluate.xml")
ev.set_defaultgroup('testmode')
ExperimentManagement.addOutputFiles(["../test/check.short.scip-3.1.0.1.linux.x86_64.gnu.dbg.spx.opt85.testmode.out"])
ExperimentManagement.getExperiment().collectData()
mainwindow.setEvaluation(ev)
mainwindow.setExperiment(ExperimentManagement.getExperiment())
mainwindow.evaluationeditorwindow.reevaluate()
IpetMainWindow.getStatusBar().showMessage("I am a working status bar", 5000)
mainwindow.show()
sys.exit(app.exec_())
|
from algorithms.Pairwise.PairRank import PairRank
from utils.argparsers.simulationargparser import SimulationArgumentParser
from utils.datasimulation import DataSimulation
def func_pairrank(args, dir_name):
ranker_params = {
"learning_rate": args.lr,
"learning_rate_decay": args.lr_decay,
"update": args.update,
"_lambda": args.lmbd,
"alpha": args.alpha,
"refine": args.refine,
"rank": args.rank,
"ind": args.ind,
}
sim_args, other_args = parser.parse_all_args(ranker_params)
if args.update == "gd" or args.update == "gd_diag" or args.update == "gd_recent":
ranker_name = "None-None-{}-{}-{}-{}-{}-{}-{}".format(
args.update, args.lmbd, args.alpha, args.refine, args.rank, args.ind, args.seed,
)
else:
ranker_name = "{}-{}-{}-{}-{}-{}-{}-{}-{}".format(
args.lr, args.lr_decay, args.update, args.lmbd, args.alpha, args.refine, args.rank, args.ind, args.seed,
)
run_name = dir_name + ranker_name
ranker = [(run_name, PairRank, other_args)]
sim = DataSimulation(sim_args)
sim.run(ranker)
def set_sim_and_run(args):
cm = args.click_models[0]
n_impr = args.n_impressions
n_results = args.n_results
algo = args.algo.upper()
dir_name = "algo/{}/{}/{}/{}/".format(algo, cm, n_impr, n_results)
switcher = {
"PAIRRANK": lambda: func_pairrank(args, dir_name),
}
return switcher.get(algo, lambda: "ERROR: algorithm type not valid")()
# get input parameters
if __name__ == "__main__":
DESCRIPTION = "Run script for testing framework."
parser = SimulationArgumentParser(description=DESCRIPTION)
input_args = parser.parse_sim_args()
set_sim_and_run(input_args)
|
from player import Player # class for creating player character
class Action():
def __init__(self, method, name, hotkey, **kwargs):
self.method = method # function/method to be executed
self.hotkey = hotkey # hotkey to use action
self.name = name # display name
self.kwargs = kwargs # anything else
def __str__(self):
return "{}: {}".format(self.hotkey, self.name) # for being printed in the console
class MoveNorth(Action): # used to move north (or 'up')
def __init__(self):
super().__init__(method=Player.move_north, name='Move North', hotkey='w')
class MoveSouth(Action): # used to move south (or 'down')
def __init__(self):
super().__init__(method=Player.move_south, name='Move South', hotkey='s')
class MoveEast(Action): # used to move East (or 'right')
def __init__(self):
super().__init__(method=Player.move_east, name='Move East', hotkey='d')
class MoveWest(Action): # used to move west (or 'left')
def __init__(self):
super().__init__(method=Player.move_west, name='Move West', hotkey='a')
class ViewInventory(Action): # Prints the Player's inventory
def __init__(self):
super().__init__(method=Player.print_inventory, name='View Inventory', hotkey='e')
class Attack(Action): # used to attack an enemy
def __init__(self, enemy):
super().__init__(method=Player.attack, name="Attack", hotkey='q', enemy=enemy)
class Flee(Action): # move to a rendom adjacent room
def __init__(self, tile):
super().__init__(method=Player.flee, name="Flee", hotkey='f', tile=tile)
class Rest(Action): # used to do nothing for one turn
def __init__(self):
super().__init__(method=Player.rest, name="Rest", hotkey='r')
class ViewStats(Action): # used to check health
def __init__(self):
super().__init__(method=Player.stats, name="View stats", hotkey='stats')
class RestFull(Action): # use to rest untill fully healed (or dead)
def __init__(self):
super().__init__(method=Player.rr, name="Rest until healed", hotkey='R')
class Save(Action): # used to save, load or quit the game
def __init__(self):
super().__init__(method=Player.saveGame, name="Save/Load/Quit Game", hotkey='Menu')
|
'''
制作 m 束花所需的最少天数
给你一个整数数组 bloomDay,以及两个整数 m 和 k 。
现需要制作 m 束花。制作花束时,需要使用花园中 相邻的 k 朵花 。
花园中有 n 朵花,第 i 朵花会在 bloomDay[i] 时盛开,恰好 可以用于 一束 花中。
请你返回从花园中摘 m 束花需要等待的最少的天数。如果不能摘到 m 束花则返回 -1 。
示例 1:
输入:bloomDay = [1,10,3,10,2], m = 3, k = 1
输出:3
解释:让我们一起观察这三天的花开过程,x 表示花开,而 _ 表示花还未开。
现在需要制作 3 束花,每束只需要 1 朵。
1 天后:[x, _, _, _, _] // 只能制作 1 束花
2 天后:[x, _, _, _, x] // 只能制作 2 束花
3 天后:[x, _, x, _, x] // 可以制作 3 束花,答案为 3
示例 2:
输入:bloomDay = [1,10,3,10,2], m = 3, k = 2
输出:-1
解释:要制作 3 束花,每束需要 2 朵花,也就是一共需要 6 朵花。而花园中只有 5 朵花,无法满足制作要求,返回 -1 。
示例 3:
输入:bloomDay = [7,7,7,7,12,7,7], m = 2, k = 3
输出:12
解释:要制作 2 束花,每束需要 3 朵。
花园在 7 天后和 12 天后的情况如下:
7 天后:[x, x, x, x, _, x, x]
可以用前 3 朵盛开的花制作第一束花。但不能使用后 3 朵盛开的花,因为它们不相邻。
12 天后:[x, x, x, x, x, x, x]
显然,我们可以用不同的方式制作两束花。
示例 4:
输入:bloomDay = [1000000000,1000000000], m = 1, k = 1
输出:1000000000
解释:需要等 1000000000 天才能采到花来制作花束
示例 5:
输入:bloomDay = [1,10,2,9,3,8,4,7,5,6], m = 4, k = 2
输出:9
提示:
bloomDay.length == n
1 <= n <= 10^5
1 <= bloomDay[i] <= 10^9
1 <= m <= 10^6
1 <= k <= n
'''
from typing import List
'''
思路:筛选法
主要思路:对花束时间影响最大的是开花时间最长的花朵,所以需要从大到小依次尝试排除最大的开花时间,
数组会被排除的开花时间分割成多个区间,直至剩余的区间恰好能满足m个花束要求
1、对开花时间和索引构成的数组进行排序sorted
2、从大到小遍历sorted,当前元素索引为i,如果用i将区间拆分成2个区间后能剩余的区间能满足m个花束要求,继续遍历
判断拆分区间是否能满足花束要求需要维护1个变量remainder,当拆分成的2个区间left,right
如果len(left)//k+len(right)//k < len(原数组)//k remainder需要减1
拆分成的区间需要加入红黑树,便于用i来查找区间
时间复杂度:O(nlogn)
空间复杂度:O(n)
官方解法是二分,因python中没有红黑树,需要参照java版的题解
'''
class Solution:
def minDays(self, bloomDay: List[int], m: int, k: int) -> int:
pass
|
secret_message = [word for word in input().split()]
secret_message_parts_1 = []
secret_message_parts_2 =[]
for word in secret_message:
ascii_digits = ""
for el in word:
if el.isdigit():
ascii_digits += el
secret_message_parts_1.append(chr(int(ascii_digits)))
for word in secret_message:
word_elements = []
for el in word:
word_elements.append(el)
if el.isdigit():
word_elements.remove(el)
secret_message_parts_2.append("".join(word_elements))
current_index = 0
list_in_progress = []
for _ in range(len(secret_message)):
words_from_sentence = secret_message_parts_1[current_index] + secret_message_parts_2[current_index]
current_index += 1
list_in_progress.append(words_from_sentence)
for word in list_in_progress:
answer = []
for letter in word:
answer.append(letter)
last_index = len(answer) - 1
answer[1], answer[last_index] = answer[last_index], answer[1]
print("".join(answer), end=" ")
|
# Copyright 2014 Cloudwatt
#
# Author: Jordan Pittier <jordan.pittier@cloudwatt.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from designate.tests import TestCase
from designate import utils
LOG = logging.getLogger(__name__)
class Bind9Test(TestCase):
def test_bind9_zone_ends_with_empty_line(self):
name = ['templates', 'bind9-zone.jinja2']
resource_string = utils.resource_string(*name)
self.assertEqual('\n\n', resource_string[-2:])
|
import warnings
import numpy as np
import pygsp as gsp
from pygsp import graphs, filters, reduction
import scipy as sp
from scipy import sparse
from sortedcontainers import SortedList
from loukas_coarsening.maxWeightMatching import maxWeightMatching
import loukas_coarsening.graph_utils as graph_utils
try:
import matplotlib
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
warnings.warn('Warning: plotting functions unavailable')
def coarsen(G, K=10, r=0.5, max_levels=20, method='variation_edges', algorithm='greedy', Uk=None, lk=None, max_level_r=0.99):
"""
This function provides a common interface for coarsening algorithms that contract subgraphs
Parameters
----------
G : pygsp Graph
K : int
The size of the subspace we are interested in preserving.
r : float between (0,1)
The desired reduction defined as 1 - n/N.
Returns
-------
C : np.array of size n x N
The coarsening matrix.
Gc : pygsp Graph
The smaller graph.
Call : list of np.arrays
Coarsening matrices for each level
Gall : list of (n_levels+1) pygsp Graphs
All graphs involved in the multilevel coarsening
Example
-------
C, Gc, Call, Gall = coarsen(G, K=10, r=0.8)
"""
r = np.clip(r, 0, 0.999)
N = G.N
#TODO(lbethune): fix this heuristic
K = max(1, min(K, N - 2))
n, n_target = N, max(2, np.ceil((1-r)*N))
C = sp.sparse.eye(N, format='csc')
Gc = G
Call, Gall = [], []
Gall.append(G)
iC = None
for level in range(1,max_levels+1):
if n <= n_target:
break
G = Gc
# how much more we need to reduce the current graph
r_cur = np.clip(1-n_target/n, 0.0, max_level_r)
if 'variation' in method:
if level == 1:
if (Uk is not None) and (lk is not None) and (len(lk)>= K):
mask = lk<1e-10; lk[mask] = 1; lsinv = lk**(-0.5); lsinv[mask] = 0
B = Uk[:,:K] @ np.diag(lsinv[:K])
else:
offset = 2*np.max(G.dw)
T = offset*sp.sparse.eye(G.N, format='csc') - G.L
lk, Uk = sp.sparse.linalg.eigsh(T, k=K, which='LM', tol=1e-5)
lk = (offset-lk)[::-1]
Uk = Uk[:,::-1]
mask = lk<1e-10;
lk[mask] = 1
lsinv = lk**(-1/2)
lsinv[mask] = 0
B = Uk @ np.diag(lsinv)
A = B
else:
B = iC.dot(B)
d, V = np.linalg.eig(B.T @ (G.L).dot(B))
mask = np.isclose(d, np.zeros(shape=d.shape))
d[mask] = 1
dinvsqrt = d**(-1/2)
dinvsqrt[mask] = 0
A = B @ np.diag(dinvsqrt) @ V
if method == 'variation_edges':
coarsening_list = contract_variation_edges(G, K=K, A=A, r=r_cur, algorithm=algorithm)
else:
coarsening_list = contract_variation_linear(G, K=K, A=A, r=r_cur, mode=method)
else:
weights = get_proximity_measure(G, method, K=K)
if algorithm == 'optimal':
# the edge-weight should be light at proximal edges
weights = -weights
if 'rss' not in method: weights -= min(weights)
coarsening_list = matching_optimal(G, weights=weights, r=r_cur)
elif algorithm == 'greedy':
coarsening_list = matching_greedy(G, weights=weights, r=r_cur)
iC = get_coarsening_matrix(G, coarsening_list)
if iC.shape[1] - iC.shape[0] <= 2: break # avoid too many levels for so few nodes
C = iC.dot(C)
Call.append(iC)
Wc = graph_utils.zero_diag(coarsen_matrix(G.W, iC)) # coarsen and remove self-loops
Wc = (Wc + Wc.T) / 2 # this is only needed to avoid pygsp complaining for tiny errors
if not hasattr(G, 'coords'):
Gc = gsp.graphs.Graph(Wc)
else:
Gc = gsp.graphs.Graph(Wc, coords=coarsen_vector(G.coords, iC))
Gall.append(Gc)
n = Gc.N
return C, Gc, Call, Gall
################################################################################
# General coarsening utility functions
################################################################################
def coarsen_vector(x, C):
return (C.power(2)).dot(x)
def lift_vector(x, C):
#Pinv = C.T; Pinv[Pinv>0] = 1
D = sp.sparse.diags(np.array(1/np.sum(C,0))[0])
Pinv = (C.dot(D)).T
return Pinv.dot(x)
def coarsen_matrix(W, C):
#Pinv = C.T; #Pinv[Pinv>0] = 1
D = sp.sparse.diags(np.array(1/np.sum(C,0))[0])
Pinv = (C.dot(D)).T
return (Pinv.T).dot(W.dot(Pinv))
def lift_matrix(W, C):
P = C.power(2)
return (P.T).dot(W.dot(P))
def get_coarsening_matrix(G, partitioning):
"""
This function should be called in order to build the coarsening matrix C.
Parameters
----------
G : the graph to be coarsened
partitioning : a list of subgraphs to be contracted
Returns
-------
C : the new coarsening matrix
Example
-------
C = contract(gsp.graphs.sensor(20),[0,1]) ??
"""
#C = np.eye(G.N)
C = sp.sparse.eye(G.N, format='lil')
rows_to_delete = []
for subgraph in partitioning:
nc = len(subgraph)
# add v_j's to v_i's row
C[subgraph[0],subgraph] = 1/np.sqrt(nc) #np.ones((1,nc))/np.sqrt(nc)
rows_to_delete.extend(subgraph[1:])
# delete vertices
#C = np.delete(C,rows_to_delete,0)
C.rows = np.delete(C.rows, rows_to_delete)
C.data = np.delete(C.data, rows_to_delete)
C._shape = (G.N - len(rows_to_delete), G.N)
C = sp.sparse.csc_matrix(C)
# check that this is a projection matrix
#assert sp.sparse.linalg.norm( ((C.T).dot(C))**2 - ((C.T).dot(C)) , ord='fro') < 1e-5
return C
def coarsening_quality(G, C, kmax=30, Uk=None, lk=None):
"""
Measures how good is a coarsening.
Parameters
----------
G : pygsp Graph
C : np.array(n,N)
The coarsening matrix
kmax : int
Until which eigenvalue we are interested in.
Returns
-------
metric : dictionary
Contains all relevant metrics for coarsening quality:
* error_eigenvalue : np.array(kmax)
* error_subspace : np.array(kmax)
* error_sintheta : np.array(kmax)
* angle_matrix : np.array(kmax)
* rss constants : np.array(kmax)
as well as some general properties of Gc:
* r : int
reduction ratio
* m : int
number of edges
"""
N = G.N
I = np.eye(N)
if (Uk is not None) and (lk is not None) and (len(lk)>= kmax):
U, l = Uk, lk
elif hasattr(G, 'U'):
U, l = G.U, G.e
else:
l, U = sp.sparse.linalg.eigsh(G.L, k=kmax, which='SM', tol=1e-3)
l[0] = 1; linv = l**(-0.5); linv[0] = 0 # l[0] = 0 # avoids divide by 0
# below here, everything is C specific
n = C.shape[0]
Pi = C.T @ C
S = graph_utils.get_S(G).T
Lc = C.dot((G.L).dot(C.T))
Lp = Pi @ G.L @ Pi
if kmax>n/2:
[Uc,lc] = graph_utils.eig(Lc.toarray())
else:
lc, Uc = sp.sparse.linalg.eigsh(Lc, k=kmax, which='SM', tol=1e-3)
if not sp.sparse.issparse(Lc): print('warning: Lc should be sparse.')
metrics = {'r': 1 - n / N, 'm': int((Lc.nnz-n)/2)}
kmax = np.clip(kmax, 1, n)
# eigenvalue relative error
metrics['error_eigenvalue'] = np.abs(l[:kmax] - lc[:kmax]) / l[:kmax]
metrics['error_eigenvalue'][0] = 0
# angles between eigenspaces
metrics['angle_matrix'] = U.T @ C.T @ Uc
# rss constants
# metrics['rss'] = np.diag(U.T @ Lp @ U)/l - 1
# metrics['rss'][0] = 0
# other errors
kmax = np.clip(kmax, 2, n)
error_subspace = np.zeros(kmax)
error_subspace_bound = np.zeros(kmax)
error_sintheta = np.zeros(kmax)
M = S @ Pi @ U @ np.diag(linv)
# M_bound = S @ (I - Pi) @ U @ np.diag(linv)
for kIdx in range(1,kmax):
error_subspace[kIdx] = np.abs(np.linalg.norm( M[:,:kIdx + 1], ord=2)-1)
# error_subspace_bound[kIdx] = np.linalg.norm( M_bound[:,:kIdx + 1], ord=2)
error_sintheta[kIdx] = np.linalg.norm(metrics['angle_matrix'][0:kIdx+1,kIdx+1:], ord='fro')**2
metrics['error_subspace'] = error_subspace
#metrics['error_subspace_bound'] = error_subspace_bound
metrics['error_sintheta'] = error_sintheta
return metrics
def plot_coarsening(Gall, Call, size=3, edge_width=0.8, node_size=20, alpha=0.55, title='', monochrom=False):
"""
Plot a (hierarchical) coarsening
Parameters
----------
G_all : list of pygsp Graphs
Call : list of np.arrays
Returns
-------
fig : matplotlib figure
"""
# colors signify the size of a coarsened subgraph ('k' is 1, 'g' is 2, 'b' is 3, and so on)
colors = ['k', 'g', 'b', 'r', 'y']
if monochrom:
colors = ['b']*len(colors)
n_levels = len(Gall)-1
if n_levels == 0: return None
fig = plt.figure(figsize=(n_levels*size*3, size*2))
for level in range(n_levels):
G = Gall[level]
edges = np.array(G.get_edge_list()[0:2])
Gc = Gall[level+1]
# Lc = C.dot(G.L.dot(C.T))
# Wc = sp.sparse.diags(Lc.diagonal(), 0) - Lc;
# Wc = (Wc + Wc.T) / 2
# Gc = gsp.graphs.Graph(Wc, coords=(C.power(2)).dot(G.coords))
edges_c = np.array(Gc.get_edge_list()[0:2])
C = Call[level]
C = C.toarray()
if G.coords.shape[1] == 2:
ax = fig.add_subplot(1, n_levels+1, level+1)
ax.axis('off')
ax.set_title(f'{title} | level = {level}, N = {G.N}')
[x,y] = G.coords.T
for eIdx in range(0, edges.shape[1]):
ax.plot(x[edges[:,eIdx]], y[edges[:,eIdx]], color='k', alpha=alpha, lineWidth=edge_width)
for i in range(Gc.N):
subgraph = np.arange(G.N)[C[i,:] > 0]
ax.scatter(x[subgraph], y[subgraph], c = colors[np.clip(len(subgraph)-1,0,4)], s = node_size*len(subgraph), alpha=alpha)
elif G.coords.shape[1] == 3:
ax = fig.add_subplot(1, n_levels+1, level+1, projection='3d')
ax.axis('off')
[x,y,z] = G.coords.T
for eIdx in range(0, edges.shape[1]):
ax.plot(x[edges[:,eIdx]], y[edges[:,eIdx]], zs=z[edges[:,eIdx]], color='k', alpha=alpha, lineWidth=edge_width)
for i in range(Gc.N):
subgraph = np.arange(G.N)[C[i,:] > 0]
ax.scatter(x[subgraph], y[subgraph], z[subgraph], c=colors[np.clip(len(subgraph)-1,0,4)], s=node_size*len(subgraph), alpha=alpha)
# the final graph
Gc = Gall[-1]
edges_c = np.array(Gc.get_edge_list()[0:2])
if G.coords.shape[1] == 2:
ax = fig.add_subplot(1, n_levels+1, n_levels+1)
ax.axis('off')
[x,y] = Gc.coords.T
ax.scatter(x, y, c = 'k', s = node_size, alpha=alpha)
for eIdx in range(0, edges_c.shape[1]):
ax.plot(x[edges_c[:,eIdx]], y[edges_c[:,eIdx]], color='k', alpha=alpha, lineWidth=edge_width)
elif G.coords.shape[1] == 3:
ax = fig.add_subplot(1, n_levels+1, n_levels+1, projection='3d')
ax.axis('off')
[x,y,z] = Gc.coords.T
ax.scatter(x, y, z, c = 'k', s = node_size, alpha=alpha)
for eIdx in range(0, edges_c.shape[1]):
ax.plot(x[edges_c[:,eIdx]], y[edges_c[:,eIdx]], z[edges_c[:,eIdx]], color='k', alpha=alpha, lineWidth=edge_width)
ax.set_title(f'{title} | level = {n_levels}, n = {Gc.N}')
fig.tight_layout()
return fig
################################################################################
# Variation-based contraction algorithms
################################################################################
def contract_variation_edges(G, A=None, K=10, r=0.5, algorithm='greedy'):
"""
Sequential contraction with local variation and edge-based families.
This is a specialized implementation for the edge-based family, that works
slightly faster than the contract_variation() function, which works for
any family.
See contract_variation() for documentation.
"""
N, deg, M = G.N, G.dw, G.Ne
ones = np.ones(2)
Pibot = np.eye(2) - np.outer(ones, ones) / 2
# cost function for the edge
def subgraph_cost(G, A, edge):
edge, w = edge[:2].astype(np.int), edge[2]
deg_new = 2*deg[edge] - w
L = np.array([[deg_new[0],-w], [-w,deg_new[1]]])
B = Pibot @ A[edge,:]
return np.linalg.norm(B.T @ L @ B)
# cost function for the edge
def subgraph_cost_old(G, A, edge):
w = G.W[edge[0], edge[1]]
deg_new = 2*deg[edge] - w
L = np.array([[deg_new[0],-w], [-w,deg_new[1]]])
B = Pibot @ A[edge,:]
return np.linalg.norm(B.T @ L @ B)
#edges = np.array(G.get_edge_list()[0:2])
edges = np.array(G.get_edge_list())
weights = np.array([subgraph_cost(G, A, edges[:,e]) for e in range(M)])
#weights = np.zeros(M)
#for e in range(M):
# weights[e] = subgraph_cost_old(G, A, edges[:,e])
if algorithm == 'optimal':
# identify the minimum weight matching
coarsening_list = matching_optimal(G, weights=weights, r=r)
elif algorithm == 'greedy':
# find a heavy weight matching
coarsening_list = matching_greedy(G, weights=-weights, r=r)
return coarsening_list
def contract_variation_linear(G, A=None, K=10, r=0.5, mode='neighborhood'):
"""
Sequential contraction with local variation and general families.
This is an implemmentation that improves running speed,
at the expense of being more greedy (and thus having slightly larger error).
See contract_variation() for documentation.
"""
N, deg, W_lil = G.N, G.dw, G.W.tolil()
# The following is correct only for a single level of coarsening.
# Normally, A should be passed as an argument.
if A is None:
lk, Uk = sp.sparse.linalg.eigsh(G.L, k=K, which='SM', tol=1e-3) # this is not optimized!
lk[0] = 1
lsinv = lk**(-0.5)
lsinv[0] = 0
lk[0] = 0
D_lsinv = np.diag(lsinv)
A = Uk @ D_lsinv
# cost function for the subgraph induced by nodes array
def subgraph_cost(nodes):
nc = len(nodes)
ones = np.ones(nc)
W = W_lil[nodes,:][:,nodes]#.tocsc()
L = np.diag(2*deg[nodes] - W.dot(ones)) - W
B = (np.eye(nc) - np.outer(ones, ones) / nc ) @ A[nodes,:]
unnormalized_cost = np.linalg.norm(B.T @ L @ B)
return unnormalized_cost / (nc-1) if nc != 1 else 0.
class CandidateSet:
def __init__(self, candidate_list):
self.set = candidate_list
self.cost = subgraph_cost(candidate_list)
def __lt__(self, other):
return self.cost < other.cost
family = []
W_bool = G.A + sp.sparse.eye(G.N, dtype=np.bool, format='csr')
if 'neighborhood' in mode:
for i in range(N):
#i_set = G.A[i,:].indices # get_neighbors(G, i)
#i_set = np.append(i_set, i)
i_set = W_bool[i,:].indices
family.append(CandidateSet(i_set))
if 'cliques' in mode:
import networkx as nx
Gnx = nx.from_scipy_sparse_matrix(G.W)
for clique in nx.find_cliques(Gnx):
family.append(CandidateSet(np.array(clique)))
else:
if 'edges' in mode:
edges = np.array(G.get_edge_list()[0:2])
for e in range(0,edges.shape[1]):
family.append(CandidateSet(edges[:,e]))
if 'triangles' in mode:
triangles = set([])
edges = np.array(G.get_edge_list()[0:2])
for e in range(0,edges.shape[1]):
[u,v] = edges[:,e]
for w in range(G.N):
if G.W[u,w] > 0 and G.W[v,w] > 0:
triangles.add(frozenset([u,v,w]))
triangles = list(map(lambda x: np.array(list(x)), triangles))
for triangle in triangles:
family.append(CandidateSet(triangle))
family = SortedList(family)
marked = np.zeros(G.N, dtype=np.bool)
# ----------------------------------------------------------------------------
# Construct a (minimum weight) independent set.
# ----------------------------------------------------------------------------
coarsening_list = []
#n, n_target = N, (1-r)*N
n_reduce = np.floor(r*N) # how many nodes do we need to reduce/eliminate?
while len(family) > 0:
i_cset = family.pop(index=0)
i_set = i_cset.set
# check if marked
i_marked = marked[i_set]
if not any(i_marked):
n_gain = len(i_set) - 1
if n_gain > n_reduce: continue # this helps avoid over-reducing
# all vertices are unmarked: add i_set to the coarsening list
marked[i_set] = True
coarsening_list.append(i_set)
#n -= len(i_set) - 1
n_reduce -= n_gain
#if n <= n_target: break
if n_reduce <= 0: break
# may be worth to keep this set
else:
i_set = i_set[~i_marked]
if len(i_set) > 1:
# todo1: check whether to add to coarsening_list before adding to family
# todo2: currently this will also select contraction sets that are disconnected
# should we eliminate those?
i_cset.set = i_set
i_cset.cost = subgraph_cost(i_set)
family.add(i_cset)
return coarsening_list
################################################################################
# Edge-based contraction algorithms
################################################################################
def get_proximity_measure(G, name, K=10):
N = G.N
W = G.W #np.array(G.W.toarray(), dtype=np.float32)
deg = G.dw #np.sum(W, axis=0)
edges = np.array(G.get_edge_list()[0:2])
weights = np.array(G.get_edge_list()[2])
M = edges.shape[1]
num_vectors = K #int(1*K*np.log(K))
if 'lanczos' in name:
l_lan, X_lan = sp.sparse.linalg.eigsh(G.L, k=K, which='SM', tol=1e-2)
elif 'cheby' in name:
X_cheby = generate_test_vectors(G, num_vectors=num_vectors, method='Chebychev', lambda_cut = G.e[K+1])
elif 'JC' in name:
X_jc = generate_test_vectors(G, num_vectors=num_vectors, method='JC', iterations=20)
elif 'GS' in name:
X_gs = generate_test_vectors(G, num_vectors=num_vectors, method='GS', iterations=1)
if 'expected' in name:
X = X_lan
assert not np.isnan(X).any()
assert X.shape[0] == N
K = X.shape[1]
proximity = np.zeros(M, dtype=np.float32)
# heuristic for mutligrid
if name == 'heavy_edge':
wmax = np.array(np.max(G.W, 0).todense())[0] + 1e-5
for e in range(0,M):
proximity[e] = weights[e] / max(wmax[edges[:,e]]) # select edges with large proximity
return proximity
# heuristic for mutligrid
elif name == 'algebraic_JC':
proximity += np.Inf
for e in range(0,M):
i,j = edges[:,e]
for kIdx in range(num_vectors):
xk = X_jc[:,kIdx]
proximity[e] = min(proximity[e], 1 / max(np.abs(xk[i]-xk[j])**2, 1e-6) ) # select edges with large proximity
return proximity
# heuristic for mutligrid
elif name == 'affinity_GS':
c = np.zeros((N,N))
for e in range(0,M):
i,j = edges[:,e]
c[i,j] = (X_gs[i,:] @ X_gs[j,:].T)**2 / ( (X_gs[i,:] @ X_gs[i,:].T)**2 * (X_gs[j,:] @ X_gs[j,:].T)**2 ) # select
c += c.T
c -= np.diag(np.diag(c))
for e in range(0,M):
i,j = edges[:,e]
proximity[e] = c[i,j] / ( max(c[i,:]) * max(c[j,:]) )
return proximity
for e in range(0,M):
i,j = edges[:,e]
if name == 'heavy_edge_degree':
proximity[e] = deg[i] + deg[j] + 2*G.W[i,j] # select edges with large proximity
# loose as little information as possible (custom)
elif 'min_expected_loss' in name:
for kIdx in range(1,K):
xk = X[:,kIdx]
proximity[e] = sum([proximity[e], (xk[i]-xk[j])**2 ] ) # select edges with small proximity
# loose as little gradient information as possible (custom)
elif name == 'min_expected_gradient_loss':
for kIdx in range(1,K):
xk = X[:,kIdx]
proximity[e] = sum([proximity[e], (xk[i]-xk[j])**2 * (deg[i] + deg[j] + 2*G.W[i,j]) ] ) # select edges with small proximity
# relaxation ensuring that K first eigenspaces are aligned (custom)
elif name == 'rss':
for kIdx in range(1,K):
xk = G.U[:,kIdx]
lk = G.e[kIdx]
proximity[e] = sum([proximity[e], (xk[i]-xk[j])**2 * ((deg[i] + deg[j] + 2*G.W[i,j])/4) / lk ] ) # select edges with small proximity
# fast relaxation ensuring that K first eigenspaces are aligned (custom)
elif name == 'rss_lanczos':
for kIdx in range(1,K):
xk = X_lan[:,kIdx]
lk = l_lan[kIdx]
proximity[e] = sum( [proximity[e], (xk[i]-xk[j])**2 * ((deg[i] + deg[j] + 2*G.W[i,j])/4 - 0.5*(lk + lk)) / lk ] ) # select edges with small proximity
# approximate relaxation ensuring that K first eigenspaces are aligned (custom)
elif name == 'rss_cheby':
for kIdx in range(num_vectors):
xk = X_cheby[:,kIdx]
lk = xk.T @ G.L @ xk
proximity[e] = sum( [proximity[e], ( (xk[i]-xk[j])**2 * ((deg[i] + deg[j] + 2*G.W[i,j])/4 - 0*lk ) / lk )] ) # select edges with small proximity
# heuristic for mutligrid (algebraic multigrid)
elif name == 'algebraic_GS':
proximity[e] = np.Inf
for kIdx in range(num_vectors):
xk = X_gs[:,kIdx]
proximity[e] = min(proximity[e], 1 / max(np.abs(xk[i]-xk[j])**2, 1e-6) ) # select edges with large proximity
if ('rss' in name) or ('expected' in name): proximity = -proximity
return proximity
def generate_test_vectors(G, num_vectors=10, method = 'Gauss-Seidel', iterations=5, lambda_cut=0.1):
L = G.L
N = G.N
X = np.random.randn(N, num_vectors) / np.sqrt(N)
if method == 'GS' or method == 'Gauss-Seidel':
L_upper = sp.sparse.triu(L, 1, format='csc')
L_lower_diag = sp.sparse.triu(L, 0, format='csc').T
for j in range(num_vectors):
x = X[:,j]
for _ in range(iterations):
x = - sp.sparse.linalg.spsolve_triangular(L_lower_diag, L_upper @ x)
X[:,j] = x
return X
if method == 'JC' or method == 'Jacobi':
deg = G.dw.astype(np.float)
D = sp.sparse.diags(deg,0)
deginv = deg**(-1)
deginv[deginv == np.Inf] = 0
Dinv = sp.sparse.diags(deginv,0)
M = Dinv.dot(D-L)
for j in range(num_vectors):
x = X[:,j]
for _ in range(iterations):
x = 0.5 * x + 0.5 * M.dot(x)
X[:,j] = x
return X
elif method == 'Chebychev':
f = filters.Filter(G, lambda x : ((x <= lambda_cut)*1).astype(np.float32))
return f.filter(X, method='chebyshev', order=50)
else:
raise ValueError
def matching_optimal(G, weights, r=0.4):
"""
Generates a matching optimally with the objective of minimizing the total
weight of all edges in the matching.
Parameters
----------
G : pygsp graph
weights : np.array(M)
a weight for each edge
ratio : float
The desired dimensionality reduction (ratio = 1 - n/N)
Notes:
* The complexity of this is O(N^3)
* Depending on G, the algorithm might fail to return ratios>0.3
"""
N = G.N
# the edge set
edges = G.get_edge_list()
edges = np.array(edges[0:2])
M = edges.shape[1]
max_weight = 1*np.max(weights)
# prepare the input for the minimum weight matching problem
edge_list = []
for edgeIdx in range(M):
[i,j] = edges[:,edgeIdx]
if i==j: continue
edge_list.append( (i, j, max_weight-weights[edgeIdx]) )
assert min(weights) >= 0
# solve it
tmp = np.array(maxWeightMatching(edge_list))
# format output
m = tmp.shape[0]
matching = np.zeros((m,2), dtype=int)
matching[:,0] = range(m)
matching[:,1] = tmp
# remove null edges and duplicates
idx = np.where(tmp!=-1)[0]
matching = matching[idx,:]
idx = np.where(matching[:,0] > matching[:,1])[0]
matching = matching[idx,:]
assert matching.shape[0] >= 1
# if the returned matching is larger than what is requested, select the min weight subset of it
matched_weights = np.zeros(matching.shape[0])
for mIdx in range(matching.shape[0]):
i = matching[mIdx,0]
j = matching[mIdx,1]
eIdx = [e for e,t in enumerate(edges[:,:].T) if ((t==[i,j]).all() or (t==[j,i]).all()) ]
matched_weights[mIdx] = weights[eIdx]
keep = min(int(np.ceil(r*N)), matching.shape[0])
if keep < matching.shape[0]:
idx = np.argpartition(matched_weights, keep)
idx = idx[0:keep]
matching = matching[idx,:]
return matching
def matching_greedy(G, weights, r=0.4):
"""
Generates a matching greedily by selecting at each iteration the edge
with the largest weight and then removing all adjacent edges from the
candidate set.
Parameters
----------
G : pygsp graph
weights : np.array(M)
a weight for each edge
r : float
The desired dimensionality reduction (r = 1 - n/N)
Notes:
* The complexity of this is O(M)
* Depending on G, the algorithm might fail to return ratios>0.3
"""
N = G.N
# the edge set
edges = np.array(G.get_edge_list()[0:2])
M = edges.shape[1]
idx = np.argsort(-weights)
# idx = np.argsort(weights)[::-1]
edges = edges[:,idx]
# the candidate edge set
candidate_edges = edges.T.tolist()
# the matching edge set (this is a list of arrays)
matching = []
# which vertices have been selected
marked = np.zeros(N, dtype=np.bool)
n, n_target = N, (1-r)*N
while len(candidate_edges) > 0:
# pop a candidate edge
[i,j] = candidate_edges.pop(0)
# check if marked
if any(marked[[i,j]]): continue
marked[[i,j]] = True
n -= 1
# add it to the matching
matching.append(np.array([i,j]))
# termination condition
if n <= n_target: break
return np.array(matching)
##############################################################################
# Sparsification and Kron reduction
# Most of the code has been adapted from the PyGSP implementation.
##############################################################################
def kron_coarsening(G, r=0.5, m=None):
if not hasattr(G, 'coords'):
G.set_coordinates(np.random.rand(G.N,2)) # needed by kron
n_target = np.floor((1-r)*G.N)
levels = int(np.ceil(np.log2(G.N/n_target)))
Gs = my_graph_multiresolution(G, levels, r=r, sparsify=False, sparsify_eps=None, reduction_method='kron', reg_eps=0.01)
Gk = Gs[-1]
# sparsify to a target number of edges m
if m is not None:
M = Gk.Ne #int(Gk.W.nnz/2)
epsilon = min(10/np.sqrt(G.N),.3) #1 - m/M
Gc = graph_sparsify(Gk, epsilon, maxiter=10)
Gc.mr = Gk.mr
else:
Gc = Gk
return Gc, Gs[0]
def kron_quality(G, Gc, kmax=30, Uk=None, lk=None):
N, n = G.N, Gc.N
keep_inds = Gc.mr['idx']
metrics = {'r': 1 - n / N, 'm': int(Gc.W.nnz/2), 'failed':False}
kmax = np.clip(kmax, 1, n)
if (Uk is not None) and (lk is not None) and (len(lk)>= kmax):
U, l = Uk, lk
elif hasattr(G, 'U'):
U, l = G.U, G.e
else:
l, U = sp.sparse.linalg.eigsh(G.L, k=kmax, which='SM', tol=1e-3)
l[0] = 1; linv = l**(-0.5); linv[0] = 0 # avoids divide by 0
C = np.eye(N); C = C[keep_inds,:]
L = G.L.toarray()
try:
Phi = np.linalg.pinv(L + 0.01*np.eye(N))
Cinv = (Phi @ C.T) @ np.linalg.pinv(C @ Phi @ C.T)
if kmax>n/2:
[Uc,lc] = graph_utils.eig(Gc.L.toarray())
else:
lc, Uc = sp.sparse.linalg.eigsh(Gc.L, k=kmax, which='SM', tol=1e-3)
# eigenvalue relative error
metrics['error_eigenvalue'] = np.abs(l[:kmax] - lc[:kmax]) / l[:kmax]
metrics['error_eigenvalue'][0] = 0
# TODO : angles between eigenspaces
# metrics['angle_matrix'] = U.T @ C.T @ Uc
# other errors
kmax = np.clip(kmax, 2, n)
error_subspace = np.zeros(kmax)
error_sintheta = np.zeros(kmax)
# M = (Lsqrtm - sp.linalg.sqrtm(Cinv @ Gc.L.dot(C))) @ U @ np.diag(linv) # is this correct?
M = U - sp.linalg.sqrtm(Cinv @ Gc.L.dot(C)) @ U @ np.diag(linv) # is this correct?
for kIdx in range(0, kmax):
error_subspace[kIdx] = np.abs(np.linalg.norm(M[:,:kIdx + 1], ord=2)-1)
metrics['error_subspace'] = error_subspace
metrics['error_sintheta'] = error_sintheta
except:
metrics['failed'] = True
return metrics
def kron_interpolate(G, Gc, x):
return np.squeeze(reduction.interpolate(G, x, Gc.mr['idx']))
def my_graph_multiresolution(G, levels, r=0.5, sparsify=True, sparsify_eps=None,
downsampling_method='largest_eigenvector',
reduction_method='kron', compute_full_eigen=False,
reg_eps=0.005):
r"""Compute a pyramid of graphs (by Kron reduction).
'graph_multiresolution(G,levels)' computes a multiresolution of
graph by repeatedly downsampling and performing graph reduction. The
default downsampling method is the largest eigenvector method based on
the polarity of the components of the eigenvector associated with the
largest graph Laplacian eigenvalue. The default graph reduction method
is Kron reduction followed by a graph sparsification step.
*param* is a structure of optional parameters.
Parameters
----------
G : Graph structure
The graph to reduce.
levels : int
Number of level of decomposition
lambd : float
Stability parameter. It adds self loop to the graph to give the
algorithm some stability (default = 0.025). [UNUSED?!]
sparsify : bool
To perform a spectral sparsification step immediately after
the graph reduction (default is True).
sparsify_eps : float
Parameter epsilon used in the spectral sparsification
(default is min(10/sqrt(G.N),.3)).
downsampling_method: string
The graph downsampling method (default is 'largest_eigenvector').
reduction_method : string
The graph reduction method (default is 'kron')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues and eigenvectors
for every graph in the multiresolution sequence (default is False).
reg_eps : float
The regularized graph Laplacian is :math:`\bar{L}=L+\epsilon I`.
A smaller epsilon may lead to better regularization, but will also
require a higher order Chebyshev approximation. (default is 0.005)
Returns
-------
Gs : list
A list of graph layers.
Examples
--------
>>> from pygsp import reduction
>>> levels = 5
>>> G = graphs.Sensor(N=512)
>>> G.compute_fourier_basis()
>>> Gs = reduction.graph_multiresolution(G, levels, sparsify=False)
>>> for idx in range(levels):
... Gs[idx].plotting['plot_name'] = 'Reduction level: {}'.format(idx)
... Gs[idx].plot()
"""
if sparsify_eps is None:
sparsify_eps = min(10. / np.sqrt(G.N), 0.3)
if compute_full_eigen:
G.compute_fourier_basis()
else:
G.estimate_lmax()
Gs = [G]
Gs[0].mr = {'idx': np.arange(G.N), 'orig_idx': np.arange(G.N)}
n_target = int(np.floor(G.N * (1-r)))
for i in range(levels):
if downsampling_method == 'largest_eigenvector':
if hasattr(Gs[i], '_U'):
V = Gs[i].U[:, -1]
else:
V = sp.sparse.linalg.eigs(Gs[i].L, 1)[1][:, 0]
V *= np.sign(V[0])
n = max(int(Gs[i].N/2), n_target)
ind = np.argsort(V) # np.nonzero(V >= 0)[0]
ind = np.flip(ind,0)
ind = ind[:n]
else:
raise NotImplementedError('Unknown graph downsampling method.')
if reduction_method == 'kron':
Gs.append(reduction.kron_reduction(Gs[i], ind))
else:
raise NotImplementedError('Unknown graph reduction method.')
if sparsify and Gs[i+1].N > 2:
Gs[i+1] = reduction.graph_sparsify(Gs[i+1], min(max(sparsify_eps, 2. / np.sqrt(Gs[i+1].N)), 1.))
if Gs[i+1].is_directed():
W = (Gs[i+1].W + Gs[i+1].W.T)/2
Gs[i+1] = graphs.Graph(W, coords=Gs[i+1].coords)
if compute_full_eigen:
Gs[i+1].compute_fourier_basis()
else:
Gs[i+1].estimate_lmax()
Gs[i+1].mr = {'idx': ind, 'orig_idx': Gs[i].mr['orig_idx'][ind], 'level': i}
L_reg = Gs[i].L + reg_eps * sparse.eye(Gs[i].N)
Gs[i].mr['K_reg'] = reduction.kron_reduction(L_reg, ind)
Gs[i].mr['green_kernel'] = filters.Filter(Gs[i], lambda x: 1./(reg_eps + x))
return Gs
def graph_sparsify(M, epsilon, maxiter=10):
from pygsp import utils
from scipy import stats
# Test the input parameters
if isinstance(M, graphs.Graph):
if not M.lap_type == 'combinatorial':
raise NotImplementedError
L = M.L
else:
L = M
N = np.shape(L)[0]
if not 1./np.sqrt(N) <= epsilon < 1:
raise ValueError('GRAPH_SPARSIFY: Epsilon out of required range')
# Not sparse
resistance_distances = utils.resistance_distance(L).toarray()
# Get the Weight matrix
if isinstance(M, graphs.Graph):
W = M.W
else:
W = np.diag(L.diagonal()) - L.toarray()
W[W < 1e-10] = 0
W = sparse.coo_matrix(W)
W.data[W.data < 1e-10] = 0
W = W.tocsc()
W.eliminate_zeros()
start_nodes, end_nodes, weights = sparse.find(sparse.tril(W))
# Calculate the new weights.
weights = np.maximum(0, weights)
Re = np.maximum(0, resistance_distances[start_nodes, end_nodes])
Pe = weights * Re + 1e-4
Pe = Pe / np.sum(Pe)
for _ in range(maxiter):
# Rudelson, 1996 Random Vectors in the Isotropic Position
# (too hard to figure out actual C0)
C0 = 1 / 30.
# Rudelson and Vershynin, 2007, Thm. 3.1
C = 4 * C0
q = round(N * np.log(N) * 9 * C**2 / (epsilon**2))
results = stats.rv_discrete(values=(np.arange(np.shape(Pe)[0]), Pe)).rvs(size=int(q))
spin_counts = stats.itemfreq(results).astype(int)
per_spin_weights = weights / (q * Pe)
counts = np.zeros(np.shape(weights)[0])
counts[spin_counts[:, 0]] = spin_counts[:, 1]
new_weights = counts * per_spin_weights
sparserW = sparse.csc_matrix((new_weights, (start_nodes, end_nodes)),
shape=(N, N))
sparserW = sparserW + sparserW.T
sparserL = sparse.diags(sparserW.diagonal(), 0) - sparserW
# if graphs.Graph(W=sparserW).is_connected():
# break
# elif i == maxiter - 1:
# print('Despite attempts to reduce epsilon, sparsified graph is disconnected')
# else:
# epsilon -= (epsilon - 1/np.sqrt(N)) / 2.
if isinstance(M, graphs.Graph):
sparserW = sparse.diags(sparserL.diagonal(), 0) - sparserL
if not M.is_directed():
sparserW = (sparserW + sparserW.T) / 2.
Mnew = graphs.Graph(W=sparserW)
#M.copy_graph_attributes(Mnew)
else:
Mnew = sparse.lil_matrix(sparserL)
return Mnew
##############################################################################
|
# Generated by Django 2.0.4 on 2019-02-26 12:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carPooling', '0012_auto_20190225_1123'),
]
operations = [
migrations.RemoveField(
model_name='carpoolingassdetail',
name='c_user_phone',
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='c_end_city',
field=models.CharField(db_index=True, max_length=128, verbose_name='到达城市'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='c_start_city',
field=models.CharField(db_index=True, max_length=128, verbose_name='出发城市'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='c_userid',
field=models.CharField(db_index=True, max_length=128, verbose_name='车主id'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='create_time',
field=models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='d_go_time',
field=models.DateTimeField(db_index=True, verbose_name='出发时间'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='i_cash',
field=models.IntegerField(default=0, verbose_name='费用'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='i_status',
field=models.SmallIntegerField(help_text='参考CurTripStatus', verbose_name='行程状态'),
),
migrations.AlterField(
model_name='carpoolingassdetail',
name='update_time',
field=models.DateTimeField(auto_now=True, db_index=True, verbose_name='更新时间'),
),
migrations.AlterField(
model_name='carpoolingrecdetail',
name='i_status',
field=models.SmallIntegerField(help_text='参考CurTripStatus', verbose_name='行程状态'),
),
]
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# **************************
# * Author : baiyyang
# * Email : baiyyang@163.com
# * Description :
# * create time : 2018/4/8下午4:53
# * file name : train_model.py
import tensorflow as tf
from rcnn_model import RCNN
import time
import os
import sys
import datetime
from data_helper import load_data_and_labels_chinese, batch_iter
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
# Data loading params
tf.flags.DEFINE_string("train_data_file", "../data/train.txt", "Data source for the train data.")
tf.flags.DEFINE_string("test_data_file", "../data/test.txt", "Data source for the test data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("filter_nums", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_integer("sequence_length", 300, "every sequences length")
tf.flags.DEFINE_integer("num_classes", 45, "the number of classes")
tf.flags.DEFINE_integer("vocabulary_size", 10000, "the size of vocabulary")
tf.flags.DEFINE_integer("hidden_dim", 128, "the hidden unit number")
tf.flags.DEFINE_float("learning_rate", 1e-3, "learning rate")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
print("Parameters:")
for key, value in FLAGS.flag_values_dict().items():
print("{}={}".format(key.upper(), value))
print("Load data\n")
_, x_train, y_train, x_test, y_test = load_data_and_labels_chinese(
FLAGS.train_data_file, FLAGS.test_data_file, FLAGS.sequence_length, FLAGS.vocabulary_size)
# train
with tf.Graph().as_default():
session = tf.Session()
with session.as_default():
rcnn = RCNN(
embedding_dim=FLAGS.embedding_dim,
sequence_length=FLAGS.sequence_length,
hidden_dim=FLAGS.hidden_dim,
num_classes=FLAGS.num_classes,
vocabulary_size=FLAGS.vocabulary_size,
dropout_keep_prob=FLAGS.dropout_keep_prob,
filter_nums=FLAGS.filter_nums,
learning_rate=FLAGS.learning_rate,
batch_size=FLAGS.batch_size,
epochs=FLAGS.epochs
)
# train produce
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(rcnn.learning_rate)
train_op = optimizer.apply_gradients(optimizer.compute_gradients(rcnn.loss), global_step=global_step)
# output for model and summaries
timestamp = str(int(time.time()))
outputdir = os.path.abspath(os.path.join(os.path.curdir, 'runs', timestamp))
print("Writing to dir: {}".format(outputdir))
# Summary for loss and accuracy
loss_summary = tf.summary.scalar('loss', rcnn.loss)
acc_summary = tf.summary.scalar('accuracy', rcnn.accuracy)
# train summary
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(outputdir, 'summary', 'train')
train_summary_writer = tf.summary.FileWriter(train_summary_dir, session.graph)
# test summary
test_summary_op = tf.summary.merge([loss_summary, acc_summary])
test_summary_dir = os.path.join(outputdir, 'summary', 'test')
test_summary_writer = tf.summary.FileWriter(test_summary_dir, session.graph)
# checkpoint file
checkpoint_dir = os.path.join(outputdir, 'checkpoint', 'model')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# 只保存最近的max_to_keep个检查点文件
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# initialize all variables
session.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A simple train step
:param x_batch:
:param y_batch:
:return:
"""
feed_dict = {
rcnn.input_x: x_batch,
rcnn.input_y: y_batch,
rcnn.keep_prob: FLAGS.dropout_keep_prob
}
_, step, summary, loss, accuracy = session.run(
[train_op, global_step, train_summary_op, rcnn.loss, rcnn.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}---step:{}, loss:{}, accuracy:{}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summary, step)
def test_step(x_batch, y_batch):
"""
A simple test step
:param x_batch:
:param y_batch:
:return:
"""
feed_dict = {
rcnn.input_x: x_batch,
rcnn.input_y: y_batch,
rcnn.keep_prob: 1.0
}
step, summary, loss, accuracy = session.run(
[global_step, test_summary_op, rcnn.loss, rcnn.accuracy], feed_dict=feed_dict
)
time_str = datetime.datetime.now().isoformat()
print("{}---step:{}, loss:{}, accuracy:{}".format(time_str, step, loss, accuracy))
test_summary_writer.add_summary(summary, step)
# Generate batch
batches = batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.epochs)
# Statistic the number of parameters
print("the number of parameters is: {}".format(np.sum([np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()])))
# Train loop
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(session, global_step)
if current_step % FLAGS.evaluate_every == 0:
test_step(x_test, y_test)
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(session, checkpoint_dir, global_step=global_step)
print('Saved model checkpoint to {}'.format(path))
|
"""Legacy installation process, i.e. `setup.py install`.
"""
import logging
import os
import sys
from distutils.util import change_root
from pip._internal.exceptions import InstallationError
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.setuptools_build import make_setuptools_install_args
from pip._internal.utils.subprocess import runner_with_spinner_message
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Sequence
from pip._internal.build_env import BuildEnvironment
from pip._internal.models.scheme import Scheme
logger = logging.getLogger(__name__)
class LegacyInstallFailure(Exception):
def __init__(self):
# type: () -> None
self.parent = sys.exc_info()
def install(
install_options, # type: List[str]
global_options, # type: Sequence[str]
root, # type: Optional[str]
home, # type: Optional[str]
prefix, # type: Optional[str]
use_user_site, # type: bool
pycompile, # type: bool
scheme, # type: Scheme
setup_py_path, # type: str
isolated, # type: bool
req_name, # type: str
build_env, # type: BuildEnvironment
unpacked_source_directory, # type: str
req_description, # type: str
):
# type: (...) -> bool
header_dir = scheme.headers
with TempDirectory(kind="record") as temp_dir:
try:
record_filename = os.path.join(temp_dir.path, 'install-record.txt')
install_args = make_setuptools_install_args(
setup_py_path,
global_options=global_options,
install_options=install_options,
record_filename=record_filename,
root=root,
prefix=prefix,
header_dir=header_dir,
home=home,
use_user_site=use_user_site,
no_user_config=isolated,
pycompile=pycompile,
)
runner = runner_with_spinner_message(
f"Running setup.py install for {req_name}"
)
with indent_log(), build_env:
runner(
cmd=install_args,
cwd=unpacked_source_directory,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
# Signal to the caller that we didn't install the new package
return False
except Exception:
# Signal to the caller that we didn't install the new package
raise LegacyInstallFailure
# At this point, we have successfully installed the requirement.
# We intentionally do not use any encoding to read the file because
# setuptools writes the file using distutils.file_util.write_file,
# which does not specify an encoding.
with open(record_filename) as f:
record_lines = f.read().splitlines()
def prepend_root(path):
# type: (str) -> str
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
for line in record_lines:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
message = (
"{} did not indicate that it installed an "
".egg-info directory. Only setup.py projects "
"generating .egg-info directories are supported."
).format(req_description)
raise InstallationError(message)
new_lines = []
for line in record_lines:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
os.path.relpath(prepend_root(filename), egg_info_dir)
)
new_lines.sort()
ensure_dir(egg_info_dir)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
return True
|
from math import inf
from math import log
class nGrams(object):
def __init__(self):
self.value = {}
pass
def NGram(self, dictionary, poem, nGram):
Number_of_Ngrams = len(poem)-nGram+1
for position in range(Number_of_Ngrams):
words = []
for nWord in range(nGram):
words = words + [poem[nWord+position]]
temp_dic = dictionary
for nWord in range(nGram):
current_word = words[nWord]
last_word = nWord+1==nGram
if(current_word in temp_dic):
if(last_word):
temp_dic[current_word] = temp_dic[current_word]+1 #Increase the Ngram Count by 1
else:
temp_dic = temp_dic[current_word]
else:
create_dic = 0
if(last_word):
create_dic = 1
else:
create_dic = {words[-1]:1}
for k in range(nGram-2,nWord,-1):
create_dic = {words[k]:create_dic}
temp_dic[current_word] = create_dic
break
self.value = dictionary
return(dictionary)
def BiGram(dic, poem):
a = poem
b = poem[1:]
for i in range(len(b)):
if(a[i] in dic):
if(b[i] in dic[a[i]]):
dic[a[i]][b[i]] = dic[a[i]][b[i]]+1
else:
dic[a[i]][b[i]] = 1
else:
dic[a[i]]={b[i]:1}
return(dic)
def TriGram(dic, poem):
a = poem
b = poem[1:]
c = poem[2:]
for i in range(len(c)):
if(a[i] in dic):
if(b[i] in dic[a[i]]):
#print(dic)
#print(c[i])
if(c[i] in (dic[a[i]])[b[i]]):
((dic[a[i]])[b[i]])[c[i]] = dic[a[i]][b[i]][c[i]]+1
else:
dic[a[i]][b[i]][c[i]] = 1
else:
dic[a[i]][b[i]] = {c[i]:1}
else:
dic[a[i]]={b[i]:{c[i]:1}}
return(dic)
def next_word(words, dic, nGram):
Max = 0
next_word = ""
dictionary = dic
for word in words:
dictionary = dictionary[word]
for i,j in list(dictionary.items()):
if(j>Max):
Max = j
next_word = i
return(next_word)
def next_word_prob(words, dic, nGram, a=0.1):
Max = -inf
next_word = ""
dictionary = dic
for word in words:
dictionary = dictionary[word]
#print((list(dictionary.values())[0]))
if(not (((list(dictionary.values())[0])<1)) ):
total_counts = 0
for i,j in list(dictionary.items()):
total_counts = total_counts+j
#print(total_counts)
items = list(dictionary.items())
alpha = a*total_counts
for i,j in items:
dictionary[i] = ((j+alpha)/(total_counts + alpha*len(items)))
for i,j in list(dictionary.items()):
if(j>Max):
Max = j
next_word = i
return(next_word, Max)
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
import os
import uuid
from builtins import property
from pathlib import Path
from typing import List, Optional
from urllib.parse import urlparse
import requests
import torch
from azureml.core import Run
from InnerEye.Azure.azure_config import AzureConfig
from InnerEye.Common import fixed_paths
from InnerEye.ML.common import find_recovery_checkpoint_and_epoch
from InnerEye.ML.deep_learning_config import EXTRA_RUN_SUBFOLDER, OutputParams, WEIGHTS_FILE
from InnerEye.ML.lightning_container import LightningContainer
from InnerEye.ML.utils.run_recovery import RunRecovery
class CheckpointHandler:
"""
This class handles which checkpoints are used to initialize the model during train or test time based on the
azure config and model config.
"""
def __init__(self, container: LightningContainer, azure_config: AzureConfig,
project_root: Path, run_context: Optional[Run] = None):
self.azure_config = azure_config
self.container = container
self.run_recovery: Optional[RunRecovery] = None
self.project_root = project_root
self.run_context = run_context
self.local_weights_path: Optional[Path] = None
self.has_continued_training = False
@property
def output_params(self) -> OutputParams:
"""
Gets the part of the configuration that is responsible for output paths.
"""
return self.container
def download_checkpoints_from_hyperdrive_child_runs(self, hyperdrive_parent_run: Run) -> None:
"""
Downloads the best checkpoints from all child runs of a Hyperdrive parent runs. This is used to gather results
for ensemble creation.
"""
self.run_recovery = RunRecovery.download_best_checkpoints_from_child_runs(self.output_params,
hyperdrive_parent_run)
# Check paths are good, just in case
for path in self.run_recovery.checkpoints_roots:
if not path.is_dir():
raise NotADirectoryError(f"Does not exist or is not a directory: {path}")
def download_recovery_checkpoints_or_weights(self) -> None:
"""
Download checkpoints from a run recovery object or from a weights url. Set the checkpoints path based on the
run_recovery_object, weights_url or local_weights_path.
This is called at the start of training.
"""
if self.azure_config.run_recovery_id:
run_to_recover = self.azure_config.fetch_run(self.azure_config.run_recovery_id.strip())
self.run_recovery = RunRecovery.download_all_checkpoints_from_run(self.output_params, run_to_recover)
else:
self.run_recovery = None
if self.azure_config.pretraining_run_recovery_id is not None:
run_to_recover = self.azure_config.fetch_run(self.azure_config.pretraining_run_recovery_id.strip())
run_recovery_object = RunRecovery.download_all_checkpoints_from_run(self.output_params,
run_to_recover,
EXTRA_RUN_SUBFOLDER)
self.container.extra_downloaded_run_id = run_recovery_object
else:
self.container.extra_downloaded_run_id = None
if self.container.weights_url or self.container.local_weights_path:
self.local_weights_path = self.get_and_save_modified_weights()
def additional_training_done(self) -> None:
"""
Lets the handler know that training was done in this run.
"""
self.has_continued_training = True
def get_recovery_path_train(self) -> Optional[Path]:
"""
Decides the checkpoint path to use for the current training run. Looks for the latest checkpoint in the
checkpoint folder. If run_recovery is provided, the checkpoints will have been downloaded to this folder
prior to calling this function. Else, if the run gets pre-empted and automatically restarted in AML,
the latest checkpoint will be present in this folder too.
:return: Constructed checkpoint path to recover from.
"""
recovery = find_recovery_checkpoint_and_epoch(self.container.checkpoint_folder)
if recovery is not None:
local_recovery_path, recovery_epoch = recovery
self.container._start_epoch = recovery_epoch
return local_recovery_path
elif self.local_weights_path:
return self.local_weights_path
else:
return None
def get_best_checkpoint(self) -> List[Path]:
"""
Get a list of checkpoints per epoch for testing/registration.
1. If a run recovery object is used and no training was done in this run, use checkpoints from run recovery.
2. If a run recovery object is used, and training was done in this run, but the start epoch is larger than
the epoch parameter provided, use checkpoints from run recovery.
3. If a run recovery object is used, and training was done in this run, but the start epoch is smaller than
the epoch parameter provided, use checkpoints from the current training run.
This function also checks that all the checkpoints at the returned checkpoint paths exist,
and drops any that do not.
"""
if not self.run_recovery and not self.has_continued_training:
raise ValueError("Cannot recover checkpoint, no run recovery object provided and"
"no training has been done in this run.")
checkpoint_paths = []
if self.run_recovery:
checkpoint_paths = self.run_recovery.get_best_checkpoint_paths()
checkpoint_exists = []
# Discard any checkpoint paths that do not exist - they will make inference/registration fail.
# This can happen when some child runs in a hyperdrive run fail; it may still be worth running inference
# or registering the model.
for path in checkpoint_paths:
if path.is_file():
checkpoint_exists.append(path)
else:
logging.warning(f"Could not recover checkpoint path {path}")
checkpoint_paths = checkpoint_exists
if self.has_continued_training:
# Checkpoint is from the current run, whether a new run or a run recovery which has been doing more
# training, so we look for it there.
checkpoint_from_current_run = self.output_params.get_path_to_best_checkpoint()
if checkpoint_from_current_run.is_file():
logging.info("Using checkpoints from current run.")
checkpoint_paths = [checkpoint_from_current_run]
else:
logging.info("Training has continued, but not yet written a checkpoint. Using recovery checkpoints.")
else:
logging.info("Using checkpoints from run recovery")
return checkpoint_paths
def get_checkpoints_to_test(self) -> List[Path]:
"""
Find the checkpoints to test. If a run recovery is provided, or if the model has been training, look for
checkpoints corresponding to the epochs in get_test_epochs(). If there is no run recovery and the model was
not trained in this run, then return the checkpoint from the local_weights_path.
"""
checkpoints = []
# If recovery object exists, or model was trained, look for checkpoints by epoch
if self.run_recovery or self.has_continued_training:
checkpoints = self.get_best_checkpoint()
elif self.local_weights_path and not self.has_continued_training:
# No recovery object and model was not trained, check if there is a local weight path.
if self.local_weights_path.exists():
logging.info(f"Using model weights at {self.local_weights_path} to initialize model")
checkpoints = [self.local_weights_path]
else:
logging.warning(f"local_weights_path does not exist, "
f"cannot recover from {self.local_weights_path}")
else:
logging.warning("Could not find any run recovery object or local_weights_path to get checkpoints from")
return checkpoints
def download_weights(self) -> Path:
"""
Download a checkpoint from weights_url to the modelweights directory.
"""
target_folder = self.project_root / fixed_paths.MODEL_WEIGHTS_DIR_NAME
target_folder.mkdir(exist_ok=True)
url = self.container.weights_url
# assign the same filename as in the download url if possible, so that we can check for duplicates
# If that fails, map to a random uuid
file_name = os.path.basename(urlparse(url).path) or str(uuid.uuid4().hex)
result_file = target_folder / file_name
# only download if hasn't already been downloaded
if result_file.exists():
logging.info(f"File already exists, skipping download: {result_file}")
return result_file
logging.info(f"Downloading weights from URL {url}")
response = requests.get(url, stream=True)
response.raise_for_status()
with open(result_file, "wb") as file:
for chunk in response.iter_content(chunk_size=1024):
file.write(chunk)
return result_file
def get_local_weights_path_or_download(self) -> Optional[Path]:
"""
Get the path to the local weights to use or download them and set local_weights_path
"""
if self.container.local_weights_path:
weights_path = self.container.local_weights_path
elif self.container.weights_url:
weights_path = self.download_weights()
else:
raise ValueError("Cannot download/modify weights - neither local_weights_path nor weights_url is set in"
"the model config.")
return weights_path
def get_and_save_modified_weights(self) -> Path:
"""
Downloads the checkpoint weights if needed.
Then passes the downloaded or local checkpoint to the modify_checkpoint function from the model_config and saves
the modified state dict from the function in the outputs folder with the name weights.pth.
"""
weights_path = self.get_local_weights_path_or_download()
if not weights_path or not weights_path.is_file():
raise FileNotFoundError(f"Could not find the weights file at {weights_path}")
modified_weights = self.container.load_checkpoint_and_modify(weights_path)
target_file = self.output_params.outputs_folder / WEIGHTS_FILE
torch.save(modified_weights, target_file)
return target_file
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys
import os
import stat
import argparse
def process_cmd(ns):
ip = ns.ip
ip = ip.strip()
L = ip.split(".")
if len(L) == 4:
L2 = ["%02X" %int(x) for x in L]
print ip, " => ", " ".join(L2)
else:
v = int(ip, 0)
L = []
for i in range(4):
x = v & 0xFF
v >>= 8
L.append(str(x))
L.reverse()
print ip, " => ", ".".join(L)
def main():
parse = argparse.ArgumentParser(prog = "ip", description = "convert ip addr", version = "1.0")
parse.add_argument("ip")
ns = parse.parse_args()
process_cmd(ns)
if __name__ == '__main__':
main()
|
import os
from bkp_test_util import sync_testdir
from bkp_core import sync_mod,util,fs_mod
from io import StringIO
import time
import re
import math
def test_sync_mod_fs(sync_testdir):
""" test suite for the sync_mod testing for file system functionality """
do_sync_mod_test(sync_testdir,sync_testdir["remote_fs_basepath"])
def test_sync_mod_ssh(sync_testdir):
""" test suite for the sync_mod testing for sftp system functionality """
do_sync_mod_test(sync_testdir,sync_testdir["ssh_basepath"])
def test_sync_mod_s3(sync_testdir):
""" test suite for the sync_mod testing for file system functionality """
do_sync_mod_test(sync_testdir,sync_testdir["s3_basepath"])
def do_sync_mod_test( t_dir, test_basepath ):
""" driver to run sync_mod tests on various file systems based on the test_basepath parameter """
sync_config = {}
sync_config["target"] = test_basepath
sync_config["dirs"] = [t_dir["local_path"]]
sync_config["exclude_files"] = r"local_3\.txt"
sync_config["exclude_dirs"] = ["not_subdir_path"]
sync_config["ssh_username"] = t_dir["ssh_username"]
sync_config["ssh_password"] = t_dir["ssh_password"]
sync_config["threads"] = "5"
sync_job = sync_mod.SyncJob( sync_config )
sync_job.set_verbose(True)
def get_paths( local_file ):
if local_file.startswith("subdir_"):
local_path = os.path.join(os.path.join(t_dir["local_path"],"subdir_path"),local_file)
remote_path = os.path.join(os.path.join(sync_config["target"]+t_dir["local_path"],"subdir_path"),local_file)
else:
local_path = os.path.join(t_dir["local_path"],local_file)
remote_path = os.path.join(sync_config["target"]+t_dir["local_path"],local_file)
return (local_path,remote_path)
deleted_on_client = []
for cases in range(0,5):
time.sleep(1) # guarantee at least 1 second between sync jobs
assert(not sync_job.synchronize())
for local_file in t_dir["local_files"]:
local_path,remote_path = get_paths( local_file )
if local_file == "local_3.txt":
assert(fs_mod.fs_stat(local_path,lambda: sync_config) != (-1,-1))
assert(fs_mod.fs_stat(remote_path,lambda: sync_config) == (-1,-1))
else:
local_mtime,local_size = fs_mod.fs_stat(local_path,lambda: sync_config)
assert( (local_mtime,local_size) != (-1,-1))
assert(fs_mod.fs_stat(remote_path,lambda: sync_config) == (local_mtime,local_size))
for remote_file in t_dir["remote_files"]:
local_path,remote_path = get_paths( remote_file )
local_mtime,local_size = fs_mod.fs_stat(local_path,lambda: sync_config)
assert( (local_mtime,local_size) != (-1,-1))
assert(fs_mod.fs_stat(remote_path,lambda: sync_config) == (local_mtime,local_size))
for l in StringIO(fs_mod.fs_ls(sync_config["target"],True,lambda: sync_config)):
l = l.strip()
parts = re.split(r"\s*",l,3)
basename = os.path.basename(parts[-1])
assert(basename.startswith(".sync") or basename in deleted_on_client or basename in t_dir["local_files"] or basename in t_dir["remote_files"])
if cases == 0:
local_path,remote_path = get_paths( "new_remote_file.txt" )
util.put_contents(os.path.dirname(remote_path), os.path.basename(remote_path), "New remote content!", False, lambda: sync_config, False)
t_dir["remote_files"].append( "new_remote_file.txt" )
elif cases == 1:
local_path,remote_path = get_paths( "local_4.txt" )
print("New local_4.txt content", file=open(local_path,"w"))
elif cases == 2:
local_path,remote_path = get_paths( "local_4.txt" )
assert(open(local_path,"r").read() == "New local_4.txt content\n")
assert(util.get_contents(os.path.dirname(remote_path), os.path.basename(remote_path), False, lambda: sync_config) == "New local_4.txt content\n")
fs_mod.fs_del(remote_path,False,lambda: sync_config)
t_dir["local_files"].remove("local_4.txt")
deleted_on_client.append("local_4.txt")
elif cases == 3:
local_path,remote_path = get_paths( "remote_3.txt" )
fs_mod.fs_del(remote_path,False,lambda: sync_config)
|
from injector import inject
from vc.command.base import BaseCommand
from vc.service.abme import AbmeService, AbmeOptions
class AbmeCommand(BaseCommand):
description = 'Runs abme on an input file'
args = [
{
'dest': 'first_file',
'type': str,
'help': 'First file',
'default': 'first.png',
'nargs': '?',
},
{
'dest': 'second_file',
'type': str,
'help': 'Second file',
'default': 'second.png',
'nargs': '?',
},
{
'dest': 'output_file',
'type': str,
'help': 'Output file',
'default': 'debug.png',
'nargs': '?',
},
]
abme: AbmeService
@inject
def __init__(self, abme: AbmeService):
self.abme = abme
def handle(self, args):
print('got %s %s %s' % (args.first_file, args.second_file, args.output_file))
self.abme.handle(AbmeOptions(
first_file=args.first_file,
second_file=args.second_file,
output_file=args.output_file,
))
|
from ScriptingBridge import SBApplication, SBObject
import weakref
import struct
import shlex
import subprocess
import objc
from contextlib import contextmanager
from enum import Enum
from typing import Mapping
from typing import List, Optional, Generator
from collections import namedtuple
SIZE = namedtuple('Size', ('width', 'height'))
Geometry = namedtuple('Geometry', ('x', 'y', 'size'))
# OSX has common codes as a packed 32bit big endian int.
SIZE_CODE, = struct.unpack('>L', b'ptsz')
POSITION_CODE, = struct.unpack('>L', b'posn')
ACTION_CODE, = struct.unpack('>L', b'actT')
APPLE_EVENT_CODE, = struct.unpack('>L', b'aevt')
QUIT_EVENT_ID, = struct.unpack('>L', b'quit')
# OSX sendEvent_id_format operates funny. You will see in sdef /Applications/Spotify.app
# things like aevtquit for the hidden code. Those are a lie.
# The real magic is app.sendEvent_id_format_(struct.unpack('>L', 'aevt')[0], struct.unpack('>L', 'quit')[0], 0)
# See how it splits? Fucking wow.
#
class TrackPosition(namedtuple('TrackPosition', ['current_ms', 'duration_ms', 'percentage'])):
__slots__ = ()
class TrackInfo(namedtuple(
'TrackInfo', [
'title',
'artist',
'album',
'disc_number',
'track_number',
'starred',
'popularity',
'play_count',
'cover_url',
'album_artist',
'url',
])):
__slots__ = ()
class SpotifyPlayStates(Enum):
STOPPED, = struct.unpack('>L', b'kPSS')
PLAYING, = struct.unpack('>L', b'kPSP')
PAUSED, = struct.unpack('>L', b'kPSp')
def first(iterable):
for i in iterable:
return i
def list_unique_properties(app) -> Mapping[str, objc.selector]:
return {
key: type(getattr(app, key))
for key in list(set(dir(app)) - set(dir(SBObject)))
}
class SystemEvent(object):
def send_keystroke(self, char):
self.app.keystroke_using_(char, None)
def __init__(self):
self.app = SBApplication.applicationWithBundleIdentifier_('com.apple.systemevents')
super().__init__()
def stop_screen_saver(self):
self.send_keystroke('\r')
@property
def processes(self) -> Mapping[str, SBObject]:
processes = {}
for process in self.app.applicationProcesses():
name = process.name()
assert isinstance(name, str)
try:
processes[name].append(process)
except KeyError:
processes[name] = [process]
for key in processes:
processes[key] = sorted(processes[key], key=lambda val: val.frontmost())
return processes
def get_process_by_name(self, name):
name = name.lower()
for proc_name, app in self.processes.iteritems():
if proc_name.lower() == name:
return app
raise KeyError('{} not found!'.format(name))
def get_processes_by_bundle(self, name):
name = name.lower()
for process in self.app.applicationProcesses():
if not process.bundleIdentifier():
continue
if process.bundleIdentifier().lower() == name:
yield process
def cast_process_to_class(self, class_type):
app = SBApplication.applicationWithBundleIdentifier_(class_type.bundle_id)
started = bool(app.valueForKey_('running'))
if not started:
app.activate()
for process in self.get_processes_by_bundle(class_type.bundle_id):
t = class_type(self, process)
if not started:
t.hide()
yield t
class CommonApp(object):
bundle_id: str = None
def __init__(self,
event_system: SystemEvent,
process: Optional[SBObject]=None):
assert self.bundle_id is not None, 'Requires a bundle_id to be in class type!'
self.app = SBApplication.applicationWithBundleIdentifier_(self.bundle_id)
self.app_props = list_unique_properties(self.app)
self._events: ReferenceType[SystemEvent] = weakref.ref(event_system)
self._process = process
self._caffeine_fh = None
super().__init__()
def start(self):
if not self.running:
self.app.activate()
def quit(self):
if self.running:
self.app.sendEvent_id_format_(APPLE_EVENT_CODE, QUIT_EVENT_ID, 0)
@property
def process(self):
if self._process is not None:
if self._process.get() is None:
self._process = None
elif self._process.bundleIdentifier() != self.bundle_id:
self._process = None
if self._process is None:
ref = self._events()
self._process = first(ref.get_processes_by_bundle(self.bundle_id))
return self._process
@property
def pid(self):
process = self.process
if process is None:
raise ValueError('{} is not running'.format(self.__class__.__name__))
return process.unixId()
@property
def windows(self) -> List[SBObject]:
process = self.process
windows = list(process.windows())
if not windows:
raise ValueError(
'{} either does not have any windows or '
'does not allow you to query them'.format(self.__class__.__name__))
return windows
def sizes(self) -> Generator[Geometry, None, None]:
for window in self.windows:
item = window.propertyWithCode_(SIZE_CODE).get()
x, y = window.propertyWithCode_(POSITION_CODE).get()
if item is None:
break
width, height = item
yield Geometry(x, y, SIZE(width, height))
def hide(self):
self.process.setVisible_(0)
def show(self):
self.process.setVisible_(1)
def send_keystroke(self, char):
self.app.keystroke_using_(char, None)
@property
def running(self):
return bool(self.app.valueForKey_('running'))
def caffeinate(self):
ref = self._events()
ref.stop_screen_saver()
self.uncaffeinate()
self._caffeine_fh = subprocess.Popen(shlex.split(
'caffeinate -d -w {}'.format(self.pid)), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
def uncaffeinate(self):
if self._caffeine_fh and self._caffeine_fh.poll() is None:
self._caffeine_fh.terminate()
self._caffeine_fh = None
@contextmanager
def halt_saver(self):
'''
>>> with spotify.halt_saver():
... # do something
... pass
'''
self.caffeinate()
yield self._caffeine_fh
self.uncaffeinate()
class Spotify(CommonApp):
bundle_id = 'com.spotify.client'
def __init__(self, event_system, process=None):
super().__init__(event_system, process)
if not self.running:
self.start()
@property
def current_track(self):
track = self.app.currentTrack()
if track:
kwargs = {
'artist': track.artist(),
'album': track.album(),
'disc_number': track.discNumber(),
'play_count': track.playedCount(),
'track_number': track.trackNumber(),
'starred': track.starred(),
'popularity': track.popularity(),
'title': track.name(),
'cover_url': track.artworkUrl(),
'album_artist': track.albumArtist(),
'url': track.spotifyUrl(),
}
for key in kwargs:
if not isinstance(kwargs[key], (str, int, float)):
kwargs[key] = str(kwargs[key])
return TrackInfo(**kwargs)
return None
@property
def position(self):
duration = self.app.currentTrack().duration()
position = int(self.app.playerPosition() * 1000)
return TrackPosition(position, duration, float(position) / duration * 100)
@property
def volume(self):
return self.app.soundVolume()
@volume.setter
def volume(self, val):
if not isinstance(val, int):
raise TypeError('Must be an int')
if not (0 <= val <= 100):
raise ValueError('Must be between 0-100')
self.app.setSoundVolume_(val)
@property
def status(self):
status = SpotifyPlayStates(self.app.playerState())
return status
def next(self):
self.app.nextTrack()
def previous(self):
self.app.previousTrack()
@property
def shuffle(self):
return self.app.shuffling()
@shuffle.setter
def shuffle(self, val):
if not isinstance(val, bool):
raise TypeError('Only bools allowed')
self.app.setShuffling_(int(val))
def play(self, track_url=None, context_url=None, shuffle=False):
self.caffeinate()
self.shuffle = shuffle
if track_url is not None:
assert track_url.startswith('spotify:track:'), 'not a track url'
self.app.playTrack_inContext_(track_url, context_url)
else:
self.app.play()
def pause(self):
if self.status is SpotifyPlayStates.PAUSED:
self.app.play()
return
self.app.pause()
self.uncaffeinate()
if __name__ == '__main__':
evt = SystemEvent()
client = Spotify(evt)
client.play()
|
from zentral.utils.apps import ZentralAppConfig
class ZentralSantaAppConfig(ZentralAppConfig):
name = "zentral.contrib.santa"
verbose_name = "Zentral Santa contrib app"
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Android Application Usage history parsers."""
import unittest
from plaso.parsers import android_app_usage
from tests.parsers import test_lib
class AndroidAppUsageParserTest(test_lib.ParserTestCase):
"""Tests for the Android Application Usage History parser."""
def testParse(self):
"""Tests the Parse function."""
parser = android_app_usage.AndroidAppUsageParser()
storage_writer = self._ParseFile(['usage-history.xml'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 28)
events = list(storage_writer.GetEvents())
expected_event_values = {
'data_type': 'android:event:last_resume_time',
'component': (
'com.sec.android.widgetapp.ap.hero.accuweather.menu.MenuAdd'),
'package': 'com.sec.android.widgetapp.ap.hero.accuweather',
'timestamp': '2013-12-09 19:28:33.047000'}
self.CheckEventValues(storage_writer, events[22], expected_event_values)
expected_event_values = {
'data_type': 'android:event:last_resume_time',
'component': 'com.google.android.gsf.login.NameActivity',
'package': 'com.google.android.gsf.login',
'timestamp': '2013-09-27 19:45:55.675000'}
self.CheckEventValues(storage_writer, events[17], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2.3 on 2019-09-18 15:14
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filebeat', '0004_auto_20190607_1533'),
]
operations = [
migrations.AlterField(
model_name='configuration',
name='inputs',
field=django.contrib.postgres.fields.jsonb.JSONField(editable=False),
),
migrations.AlterField(
model_name='enrolledmachine',
name='serial_number',
field=models.TextField(unique=True),
),
]
|
# coding: utf-8
import unittest
import muse.audio
class AudioTest(unittest.TestCase):
def test_parse_m4a_tags(self):
m4a_path = r"C:\Users\phone\Desktop\01 覚醒~Alternative Heart~.m4a"
m = muse.audio.Music(m4a_path)
self.assertTrue(m.tags.title == "覚醒~Alternative Heart~")
self.assertTrue(m.tags.artist == "広瀬こはる(CV:橋本ちなみ), オルタナティブガールズ, 水島愛梨(CV:遠藤ゆりか), 西園寺玲(CV:木戸衣吹), 悠木美弥花(CV:竹達彩奈), 天堂真知(CV:安済知佳) & 橘直美(CV:大空直美)")
self.assertTrue(m.artwork.mime_type == "image/jpeg")
def test_parse_mp3_tags(self):
path = r"C:\Users\phone\Desktop\01. きらめきっ!の日.mp3"
m = muse.audio.Music(path)
self.assertTrue(m.tags.title == "きらめきっ!の日")
self.assertTrue(m.tags.artist == "情報処理部")
self.assertTrue(m.artwork.mime_type == "image/jpeg")
|
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# The author of this file is: https://github.com/mg2015started
from pathlib import Path
from smarts.sstudio.genscenario import gen_scenario
from smarts.sstudio.types import (
Distribution,
Flow,
LaneChangingModel,
Mission,
RandomRoute,
Route,
Scenario,
Traffic,
TrafficActor,
)
social_vehicle_num = 100
ego_missions = [
Mission(
route=Route(begin=("edge-south-SN", 0, 10), end=("edge-east-WE", 0, 8)),
),
]
right_traffic_actor = TrafficActor(
name="car",
speed=Distribution(sigma=0.2, mean=1),
lane_changing_model=LaneChangingModel(impatience=0),
)
scenario = Scenario(
traffic={
"basic": Traffic(
flows=[
Flow(
route=RandomRoute(),
rate=1,
actors={right_traffic_actor: 1.0},
)
for i in range(social_vehicle_num)
]
)
},
ego_missions=ego_missions,
)
gen_scenario(scenario=scenario, output_dir=Path(__file__).parent)
|
from application import create_app # pragma: no cover
app = create_app() # pragma: no cover
if __name__ == '__main__': # pragma: no cover
app.run()
|
speed = 108
time = 12
dist = speed * time
print(dist)
# + Сложение 11 6 17
# - Вычитание 11 6 5
# * Умножение 11 6 66
# // Целочисленное деление 11 6 1
# % Остаток от деления 11 6 5
# ** Возведение в степень 2 3 8
goodByePhrase = 'Hasta la vista'
person = 'baby'
print(goodByePhrase + ', ' + person + '!')
answer = '2 + 3 = ' + str(2 + 3)
print(answer)
word = 'Bye'
phrase = word * 3 + '!'
print(phrase) |
"""
Utility module to use python logging uniformly.
usage:
```python
from deb.utils.logging import logger
logger.info("all good in the neighborhood!")
```
Author: Par (turalabs.com)
Contact:
license: GPL v3 - PLEASE REFER TO DEB/LICENSE FILE
"""
import logging
from logging import DEBUG, INFO, WARNING, WARN, ERROR, CRITICAL, FATAL
import sys
from deb.utils.config import config
def __get_config_level():
try:
return eval(config['logging']['level'].as_str())
except Exception:
return INFO
def __get_config_stream():
try:
if config['logging']['output'].as_str().lower() == 'stdout':
return sys.stdout
else:
return sys.stderr
except Exception:
return sys.stderr
# setup logging and logger
logging.basicConfig(format='[%(levelname)-8s] [%(asctime)s] [%(module)-35s][%(lineno)04d] : %(message)s',
level=__get_config_level(),
stream=__get_config_stream())
logger: logging.Logger = logging
|
class Language:
line_hashes = '###############################################'
line_apprx = '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
line_non_linux_distro = "This script must run in a Kali or any Linux distro.\nGood bye! :)\n"
line_not_python3 = "Need python3 to run this script\nGood bye! :)\n"
line_not_kali = "You are not using Kali OS! Please at least use these in a VirtualBOX so that you can " \
"roll back more easily!"
line_non_superuser = "You need to be a superuser or run this script in super user mode.\nGood bye. :)"
line_thats_wrap = "~~~~~~~~~~~~~~That's a wrap baby!~~~~~~~~~~~~~~"
line_0 = "Choose your options:" \
"{}" \
"\n** Press [ 13 ] to download ** all scripts **" \
"\n\n** Press [ 14 ] to terminate the script"
line_1 = '\n**{} Discover Script (Former Backtrack Script)'
line_2 = '\n**{} SMBExec (grab hashes out of Domain Controller and reverse shells)'
line_3 = '\n**{} Veil 3.0 (to create Python based Metepreter executable)'
line_4 = '\n**{} PeepingTom (to take snapshots of web pages) (NOT AVAILABLE NOW***)'
line_5 = '\n**{} Eye Witness (to take snapshots of web pages'
line_6 = '\n**{} Powersploit (to create Powershell script)'
line_7 = '\n**{} Responder (to gain NTLM challenge/hashes)'
line_8 = '\n**{} Social Engineering Toolkit'
line_9 = '\n**{} bypassUAC (NOT AVAILABLE NOW***)'
line_10 = '\n**{} beEF for cross site scripting'
line_11 = '\n**{} Fuzzing Lists (for Social Engineering Campaign)'
line_12 = '\n**{} other necessary scripts like' \
'\n - WCE (Windows Credential Editor), Mimikatz (to recover password from memory),' \
'\n - Custom password list from Skull Security and Crackstation, ' \
'\n - & NMap scripts (for quicker scanning and smarter identification'
def __init__(self):
# These will be all the folders in a selected machine
self.folder_name = ['discover', 'smbexec', 'Veil', 'EyeWitness', 'PowerSploit',
'Responder', 'set', 'beEF', 'SecLists']
self.password_folders = ['mimikatz_trunk.zip',
'crackstation-human-only.txt.gz', 'rockyou.txt.bz2']
self.opt_status = dict()
self.wget_status = dict()
self.folder_to_index = {
1: 'discover',
2: 'smbexec',
3: 'Veil',
4: '',
5: 'EyeWitness',
6: 'PowerSploit',
7: 'Responder',
8: 'set',
9: '',
10: 'beEF',
11: 'SecLists',
12: 'mimikatz_trunk.zip',
13: 'crackstation-human-only.txt.gz',
14: 'rockyou.txt.bz2',
}
def main_menu_v2(self):
self.item_status() # this will update the dicts inside __init__
sentence_list = self._make_sentences()
final_line = ''
for index in range(0, 12):
folder_name = self.folder_to_index[index + 1]
if index < 11:
folder_status = self.opt_status[folder_name] if folder_name != '' else False
else:
folder_status = self.wget_status[folder_name] if folder_name != '' else False
new_sentence = sentence_list[index]
if folder_status:
new_sentence = new_sentence.format(
' [ Completed ] | [ {} ] to re-install/download again'.format(index + 1))
else:
new_sentence = new_sentence.format(
'Press [ {} ] to install'.format(index + 1))
sentence_list[index] = new_sentence
all_lines = ''
for sentence in sentence_list:
all_lines += sentence
final_line = self.line_0.format(all_lines)
return final_line
def main_menu(self, last_item):
# its deprecated
last_item = int(last_item)
last_item = 12 if last_item > 12 else last_item
sentence_list = self._make_sentences()
count = 0
while count < last_item:
new_sentence = sentence_list[count]
new_sentence = new_sentence.format(
' [ Completed ] | Press [ {} ] to re-install/download again'.format(count + 1))
sentence_list[count] = new_sentence
count += 1
while count < len(sentence_list):
new_sentence = sentence_list[count]
if count == len(sentence_list):
new_sentence = new_sentence.format(
'Press [ {} ] to download & install'.format(count + 1))
else:
new_sentence = new_sentence.format(
'Press [ {} ] to install'.format(count + 1))
sentence_list[count] = new_sentence
count += 1
# now this will create 12 sentences inside a list.
big_line = ''
for sentence in sentence_list:
big_line += sentence
final_line = self.line_0.format(big_line)
return final_line
def _make_sentences(self):
sentence_array = list()
sentence_array.append(self.line_1)
sentence_array.append(self.line_2)
sentence_array.append(self.line_3)
sentence_array.append(self.line_4)
sentence_array.append(self.line_5)
sentence_array.append(self.line_6)
sentence_array.append(self.line_7)
sentence_array.append(self.line_8)
sentence_array.append(self.line_9)
sentence_array.append(self.line_10)
sentence_array.append(self.line_11)
sentence_array.append(self.line_12)
return sentence_array
def item_status(self):
"""
update the status of items /opt and wget magic.
"""
# update the status of all files in opt/
all_opt_files = self.list_dir()
for files in self.folder_name:
if files in all_opt_files:
self.opt_status[files] = True
else:
self.opt_status[files] = False
# update the status over backup password and mimikatz
import os
mimikatz_path = '~/backup_wget/'
password_list = '~/backup_wget/password_list/'
mimikatz_bkup = self.list_dir(path=os.path.expanduser(mimikatz_path))
if self.password_folders[0] in mimikatz_bkup:
self.wget_status[self.password_folders[0]] = True
else:
self.wget_status[self.password_folders[0]] = False
try:
all_wget_files = self.list_dir(path=os.path.expanduser(password_list))
for files in self.password_folders[1:]:
if files in all_wget_files:
self.wget_status[files] = True
else:
self.wget_status[files] = False
except OSError as _:
print("OSError - {}".format(_))
self.wget_status[files] = False
return
@staticmethod
def list_dir(path='/opt/'):
"""
returns a list of all the folders in a directory given.
"""
import os
# this program is suppose to run in sudo mode
return os.listdir(path)
if __name__ == '__main__':
# print(Language().main_menu_v2())
pass
|
from eth_tester.exceptions import TransactionFailed
from pytest import fixture, mark, raises
from utils import longTo32Bytes, TokenDelta, EtherDelta, longToHexString, PrintGasUsed, AssertLog
from reporting_utils import proceedToNextRound, finalize, proceedToDesignatedReporting
def test_initial_report(localFixture, universe, market, categoricalMarket, scalarMarket, cash, reputationToken):
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
constants = localFixture.contracts["Constants"]
# Now end the window and finalize
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
assert categoricalMarket.finalize()
assert scalarMarket.finalize()
marketInitialReport = localFixture.applySignature('InitialReporter', market.getInitialReporter())
categoricalInitialReport = localFixture.applySignature('InitialReporter', categoricalMarket.getInitialReporter())
marketStake = marketInitialReport.getStake()
initialReporterRedeemedLog = {
"reporter": localFixture.accounts[0],
"amountRedeemed": marketStake,
"repReceived": marketStake,
"payoutNumerators": [0, market.getNumTicks(), 0],
"universe": universe.address,
"market": market.address
}
with AssertLog(localFixture, "InitialReporterRedeemed", initialReporterRedeemedLog):
with TokenDelta(reputationToken, marketStake, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert marketInitialReport.redeem(localFixture.accounts[0])
categoricalMarketStake = categoricalInitialReport.getStake()
with TokenDelta(reputationToken, categoricalMarketStake, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert categoricalInitialReport.redeem(localFixture.accounts[0])
@mark.parametrize('finalize', [
True,
False,
])
def test_failed_crowdsourcer(finalize, localFixture, universe, market, cash, reputationToken):
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
# We'll make the window active
localFixture.contracts["Time"].setTimestamp(disputeWindow.getStartTime() + 1)
# We'll have testers contribute to a dispute but not reach the target
bondSize = market.getParticipantStake() * 2
partialFill = bondSize / 6
# confirm we can contribute 0
assert market.contribute([0, 1, market.getNumTicks()-1], 0, "", sender=localFixture.accounts[1])
with TokenDelta(reputationToken, -partialFill, localFixture.accounts[1], "Disputing did not reduce REP balance correctly"):
assert market.contribute([0, 1, market.getNumTicks()-1], partialFill, "", sender=localFixture.accounts[1])
with TokenDelta(reputationToken, -partialFill, localFixture.accounts[2], "Disputing did not reduce REP balance correctly"):
assert market.contribute([0, 1, market.getNumTicks()-1], partialFill, "", sender=localFixture.accounts[2])
assert market.getDisputeWindow() == disputeWindow.address
payoutDistributionHash = market.derivePayoutDistributionHash([0, 1, market.getNumTicks()-1])
failedCrowdsourcer = localFixture.applySignature("DisputeCrowdsourcer", market.getCrowdsourcer(payoutDistributionHash))
# confirm we cannot contribute directly to a crowdsourcer without going through the market
with raises(TransactionFailed):
failedCrowdsourcer.contribute(localFixture.accounts[0], 1, False)
if finalize:
# Fast forward time until the dispute window is over and we can redeem to recieve the REP back
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
else:
# Continue to the next round which will disavow failed crowdsourcers and let us redeem once the window is over
market.contribute([0, 0, market.getNumTicks()], bondSize, "")
assert market.getDisputeWindow() != disputeWindow.address
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
with TokenDelta(reputationToken, partialFill, localFixture.accounts[1], "Redeeming did not refund REP"):
assert failedCrowdsourcer.redeem(localFixture.accounts[1])
with TokenDelta(reputationToken, partialFill, localFixture.accounts[2], "Redeeming did not refund REP"):
assert failedCrowdsourcer.redeem(localFixture.accounts[2])
def test_one_round_crowdsourcer(localFixture, universe, market, cash, reputationToken):
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
constants = localFixture.contracts["Constants"]
# We'll make the window active
localFixture.contracts["Time"].setTimestamp(disputeWindow.getStartTime() + 1)
# We'll have testers push markets into the next round by funding dispute crowdsourcers
amount = 2 * market.getParticipantStake()
with TokenDelta(reputationToken, -amount, localFixture.accounts[1], "Disputing did not reduce REP balance correctly"):
assert market.contribute([0, 0, market.getNumTicks()], amount, "", sender=localFixture.accounts[1])
newDisputeWindowAddress = market.getDisputeWindow()
assert newDisputeWindowAddress != disputeWindow.address
# fast forward time to the fee new window
disputeWindow = localFixture.applySignature('DisputeWindow', newDisputeWindowAddress)
localFixture.contracts["Time"].setTimestamp(disputeWindow.getStartTime() + 1)
# Fast forward time until the new dispute window is over and we can redeem our winning stake, and dispute bond tokens
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
initialReporter = localFixture.applySignature('InitialReporter', market.getReportingParticipant(0))
marketDisputeCrowdsourcer = localFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(1))
expectedRep = reputationToken.balanceOf(marketDisputeCrowdsourcer.address)
disputeCrowdsourcerRedeemedLog = {
"reporter": localFixture.accounts[1],
"disputeCrowdsourcer": marketDisputeCrowdsourcer.address,
"amountRedeemed": marketDisputeCrowdsourcer.getStake(),
"repReceived": expectedRep,
"payoutNumerators": [0, 0, market.getNumTicks()],
"universe": universe.address,
"market": market.address
}
with AssertLog(localFixture, "DisputeCrowdsourcerRedeemed", disputeCrowdsourcerRedeemedLog):
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[1], "Redeeming didn't refund REP"):
assert marketDisputeCrowdsourcer.redeem(localFixture.accounts[1], sender=localFixture.accounts[1])
# The initial reporter does not get their REP back
with TokenDelta(reputationToken, 0, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert initialReporter.redeem(localFixture.accounts[0])
def test_multiple_round_crowdsourcer(localFixture, universe, market, cash, reputationToken):
constants = localFixture.contracts["Constants"]
# Initial Report disputed
proceedToNextRound(localFixture, market, localFixture.accounts[1], True)
# Initial Report winning
proceedToNextRound(localFixture, market, localFixture.accounts[2], True)
# Initial Report disputed
proceedToNextRound(localFixture, market, localFixture.accounts[1], True, randomPayoutNumerators=True)
# Initial Report winning
proceedToNextRound(localFixture, market, localFixture.accounts[3], True)
# Get all the winning Reporting Participants
initialReporter = localFixture.applySignature('InitialReporter', market.getReportingParticipant(0))
winningDisputeCrowdsourcer1 = localFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(2))
winningDisputeCrowdsourcer2 = localFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(4))
# Get losing Reporting Participants
losingDisputeCrowdsourcer1 = localFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(1))
losingDisputeCrowdsourcer2 = localFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(3))
# We can't redeem yet as the market isn't finalized
with raises(TransactionFailed):
initialReporter.redeem(localFixture.accounts[0])
with raises(TransactionFailed):
winningDisputeCrowdsourcer1.redeem(localFixture.accounts[2])
# Fast forward time until the new dispute window is over
disputeWindow = localFixture.applySignature("DisputeWindow", market.getDisputeWindow())
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
expectedRep = initialReporter.getStake() + initialReporter.getStake() * 2 / 5
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert initialReporter.redeem(localFixture.accounts[0])
expectedRep = winningDisputeCrowdsourcer1.getStake() + winningDisputeCrowdsourcer1.getStake() * 2 / 5
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[2], "Redeeming didn't refund REP"):
assert winningDisputeCrowdsourcer1.redeem(localFixture.accounts[2])
expectedRep = winningDisputeCrowdsourcer2.getStake() + winningDisputeCrowdsourcer2.getStake() * 2 / 5
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[3], "Redeeming didn't refund REP"):
assert winningDisputeCrowdsourcer2.redeem(localFixture.accounts[3])
# The losing reports get no REP
with TokenDelta(reputationToken, 0, localFixture.accounts[1], "Redeeming refunded REP"):
assert losingDisputeCrowdsourcer1.redeem(localFixture.accounts[1])
with TokenDelta(reputationToken, 0, localFixture.accounts[1], "Redeeming refunded REP"):
assert losingDisputeCrowdsourcer2.redeem(localFixture.accounts[1])
def test_multiple_contributors_crowdsourcer(localFixture, universe, market, cash, reputationToken):
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
# We'll make the window active
localFixture.contracts["Time"].setTimestamp(disputeWindow.getStartTime() + 1)
# We'll have testers push markets into the next round by funding dispute crowdsourcers
amount = market.getParticipantStake()
with TokenDelta(reputationToken, -amount, localFixture.accounts[1], "Disputing did not reduce REP balance correctly"):
assert market.contribute([0, 0, market.getNumTicks()], amount, "", sender=localFixture.accounts[1])
with TokenDelta(reputationToken, -amount, localFixture.accounts[2], "Disputing did not reduce REP balance correctly"):
assert market.contribute([0, 0, market.getNumTicks()], amount, "", sender=localFixture.accounts[2])
newDisputeWindowAddress = market.getDisputeWindow()
assert newDisputeWindowAddress != disputeWindow.address
# fast forward time to the fee new window
disputeWindow = localFixture.applySignature('DisputeWindow', newDisputeWindowAddress)
localFixture.contracts["Time"].setTimestamp(disputeWindow.getStartTime() + 1)
# Fast forward time until the new dispute window is over and we can redeem our winning stake, and dispute bond tokens
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
marketDisputeCrowdsourcer = localFixture.applySignature('DisputeCrowdsourcer', market.getReportingParticipant(1))
expectedRep = amount + amount * 2 / 5
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[1], "Redeeming didn't refund REP"):
assert marketDisputeCrowdsourcer.redeem(localFixture.accounts[1])
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[2], "Redeeming didn't refund REP"):
assert marketDisputeCrowdsourcer.redeem(localFixture.accounts[2])
def test_forkAndRedeem(localFixture, universe, market, categoricalMarket, cash, reputationToken):
# Let's do some initial disputes for the categorical market
proceedToNextRound(localFixture, categoricalMarket, localFixture.accounts[1], moveTimeForward = False)
# Get to a fork
testers = [localFixture.accounts[0], localFixture.accounts[1], localFixture.accounts[2], localFixture.accounts[3]]
testerIndex = 1
while (market.getForkingMarket() == longToHexString(0)):
proceedToNextRound(localFixture, market, testers[testerIndex], True)
testerIndex += 1
testerIndex = testerIndex % len(testers)
# Have the participants fork and create new child universes
for i in range(market.getNumParticipants()):
reportingParticipant = localFixture.applySignature("DisputeCrowdsourcer", market.getReportingParticipant(i))
# Finalize the fork
finalize(localFixture, market, universe)
categoricalDisputeCrowdsourcer = localFixture.applySignature("DisputeCrowdsourcer", categoricalMarket.getReportingParticipant(1))
# Migrate the categorical market into the winning universe. This will disavow the dispute crowdsourcer on it, letting us redeem for original universe rep
assert categoricalMarket.migrateThroughOneFork([0,0,0,categoricalMarket.getNumTicks()], "")
expectedRep = categoricalDisputeCrowdsourcer.getStake()
with TokenDelta(reputationToken, expectedRep, localFixture.accounts[1], "Redeeming didn't increase REP correctly"):
categoricalDisputeCrowdsourcer.redeem(localFixture.accounts[1])
noPayoutNumerators = [0] * market.getNumberOfOutcomes()
noPayoutNumerators[1] = market.getNumTicks()
yesPayoutNumerators = [0] * market.getNumberOfOutcomes()
yesPayoutNumerators[2] = market.getNumTicks()
noUniverse = localFixture.applySignature('Universe', universe.createChildUniverse(noPayoutNumerators))
yesUniverse = localFixture.applySignature('Universe', universe.createChildUniverse(yesPayoutNumerators))
noUniverseReputationToken = localFixture.applySignature('ReputationToken', noUniverse.getReputationToken())
yesUniverseReputationToken = localFixture.applySignature('ReputationToken', yesUniverse.getReputationToken())
# Now we'll fork and redeem the reporting participants
for i in range(market.getNumParticipants()):
account = localFixture.accounts[i % 4]
reportingParticipant = localFixture.applySignature("DisputeCrowdsourcer", market.getReportingParticipant(i))
expectedRep = reputationToken.balanceOf(reportingParticipant.address) * 7 / 5 # * 1.4 to account for the minting reward of 40%
repToken = noUniverseReputationToken if i % 2 == 0 else yesUniverseReputationToken
with TokenDelta(repToken, expectedRep, account, "Redeeming didn't increase REP correctly for " + str(i)):
assert reportingParticipant.forkAndRedeem(sender=account)
def test_preemptive_crowdsourcer_contributions_never_used(localFixture, universe, market, reputationToken):
# We can pre-emptively stake REP in case someone disputes our initial report
preemptiveBondSize = 200 * 10 ** 18
assert market.contributeToTentative([0, market.getNumTicks(), 0], preemptiveBondSize, "")
# Now let the market resolve with the initial report
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
# Time marches on and the market can be finalized
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
# The premptive bond can be redeemed for the REP staked
preemptiveDisputeCrowdsourcer = localFixture.applySignature('DisputeCrowdsourcer', market.preemptiveDisputeCrowdsourcer())
with TokenDelta(reputationToken, preemptiveBondSize, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert preemptiveDisputeCrowdsourcer.redeem(localFixture.accounts[0])
def test_preemptive_crowdsourcer_contributions_disputed_wins(localFixture, universe, market, reputationToken):
# We can pre-emptively stake REP in case someone disputes our initial report
preemptiveBondSize = 200 * 10 ** 18
# We'll have one user buy all the stake that will award an ROI and another user buy the remaining stake which will not
initialStake = market.getParticipantStake()
realBondSize = initialStake * 3
assert market.contributeToTentative([0, market.getNumTicks(), 0], realBondSize, "")
assert market.contributeToTentative([0, market.getNumTicks(), 0], preemptiveBondSize - realBondSize, "", sender = localFixture.accounts[1])
preemptiveDisputeCrowdsourcer = localFixture.applySignature('DisputeCrowdsourcer', market.preemptiveDisputeCrowdsourcer())
# Now we'll dispute the intial report
proceedToNextRound(localFixture, market)
# By disputing we actually cause the preemptive bond to get placed.
assert market.getParticipantStake() == preemptiveBondSize + initialStake * 3
# We'll simply move time forward and let this bond placed on the initial report outcome win
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
# Time marches on and the market can be finalized
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
# The account which placed stake first and got normal tokens will make the normal 40% ROI
expectedWinnings = realBondSize * .4
with TokenDelta(reputationToken, realBondSize + expectedWinnings, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert preemptiveDisputeCrowdsourcer.redeem(localFixture.accounts[0])
# The account which placed stake later and got overload tokens will not make any ROI
with TokenDelta(reputationToken, preemptiveBondSize - realBondSize, localFixture.accounts[1], "Redeeming didn't refund REP"):
assert preemptiveDisputeCrowdsourcer.redeem(localFixture.accounts[1])
def test_preemptive_crowdsourcer_contributions_disputed_loses(localFixture, universe, market, reputationToken):
# We can pre-emptively stake REP in case someone disputes our initial report
preemptiveBondSize = 200 * 10 ** 18
assert market.contributeToTentative([0, market.getNumTicks(), 0], preemptiveBondSize, "")
initialStake = market.getParticipantStake()
preemptiveDisputeCrowdsourcer = localFixture.applySignature('DisputeCrowdsourcer', market.preemptiveDisputeCrowdsourcer())
# Now we'll dispute the intial report
proceedToNextRound(localFixture, market)
# By disputing we actually cause the preemptive bond to get placed.
assert market.getParticipantStake() == preemptiveBondSize + initialStake * 3
# We'll dispute this newly placed bond made from the preemptive contributions
proceedToNextRound(localFixture, market)
# And now we'll let the dispute win
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
# Time marches on and the market can be finalized
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
# The preemptive bond has been liquidated
assert reputationToken.balanceOf(preemptiveDisputeCrowdsourcer.address) == 0
def test_preemptive_crowdsourcer_contributions_disputed_twice_wins(localFixture, universe, market, reputationToken):
# We can pre-emptively stake REP in case someone disputes our initial report
preemptiveBondSize = 200 * 10 ** 18
# We'll have one user buy all the stake that will award an ROI and another user buy the remaining stake which will not
initialStake = market.getParticipantStake()
realBondSize = initialStake * 3
assert market.contributeToTentative([0, market.getNumTicks(), 0], realBondSize, "")
assert market.contributeToTentative([0, market.getNumTicks(), 0], preemptiveBondSize - realBondSize, "", sender = localFixture.accounts[1])
preemptiveDisputeCrowdsourcer = localFixture.applySignature('DisputeCrowdsourcer', market.preemptiveDisputeCrowdsourcer())
# Now we'll dispute the intial report
proceedToNextRound(localFixture, market)
# By disputing we actually cause the preemptive bond to get placed.
assert market.getParticipantStake() == preemptiveBondSize + initialStake * 3
# We'll dispute this newly placed bond made from the preemptive contributions
proceedToNextRound(localFixture, market)
# And now we'll do one more dispute in favor of the initial report
proceedToNextRound(localFixture, market)
# Now we finalize the market
disputeWindow = localFixture.applySignature('DisputeWindow', market.getDisputeWindow())
# Time marches on and the market can be finalized
localFixture.contracts["Time"].setTimestamp(disputeWindow.getEndTime() + 1)
assert market.finalize()
# Because the overloaded bond was disputed there is now sufficient REP to award the overload tokens with ROI as well so both users will receive a 40% ROI
expectedWinnings = realBondSize * .4
with TokenDelta(reputationToken, realBondSize + expectedWinnings, localFixture.accounts[0], "Redeeming didn't refund REP"):
assert preemptiveDisputeCrowdsourcer.redeem(localFixture.accounts[0])
overloadStake = preemptiveBondSize - realBondSize
expectedWinnings = overloadStake * .4
with TokenDelta(reputationToken, overloadStake + expectedWinnings, localFixture.accounts[1], "Redeeming didn't refund REP"):
assert preemptiveDisputeCrowdsourcer.redeem(localFixture.accounts[1])
@fixture(scope="session")
def localSnapshot(fixture, kitchenSinkSnapshot):
fixture.resetToSnapshot(kitchenSinkSnapshot)
universe = kitchenSinkSnapshot['universe']
market = kitchenSinkSnapshot['yesNoMarket']
categoricalMarket = kitchenSinkSnapshot['categoricalMarket']
scalarMarket = kitchenSinkSnapshot['scalarMarket']
# Skip to Designated Reporting
fixture.contracts["Time"].setTimestamp(market.getEndTime() + 1)
# Distribute REP
reputationToken = fixture.applySignature('ReputationToken', universe.getReputationToken())
for testAccount in [fixture.accounts[1], fixture.accounts[2], fixture.accounts[3]]:
reputationToken.transfer(testAccount, 1 * 10**6 * 10**18)
# Designated Report on the markets
designatedReportCost = universe.getOrCacheDesignatedReportStake()
with TokenDelta(reputationToken, 0, fixture.accounts[0], "Doing the designated report didn't deduct REP correctly or didn't award the no show bond"):
market.doInitialReport([0, market.getNumTicks(), 0], "")
categoricalMarket.doInitialReport([0, categoricalMarket.getNumTicks(), 0, 0], "")
scalarMarket.doInitialReport([0, scalarMarket.getNumTicks(), 0], "")
return fixture.createSnapshot()
@fixture
def localFixture(fixture, localSnapshot):
fixture.resetToSnapshot(localSnapshot)
return fixture
@fixture
def reputationToken(localFixture, kitchenSinkSnapshot, universe):
return localFixture.applySignature('ReputationToken', universe.getReputationToken())
@fixture
def universe(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['universe'].address, kitchenSinkSnapshot['universe'].abi)
@fixture
def market(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['yesNoMarket'].address, kitchenSinkSnapshot['yesNoMarket'].abi)
@fixture
def categoricalMarket(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['categoricalMarket'].address, kitchenSinkSnapshot['categoricalMarket'].abi)
@fixture
def scalarMarket(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['scalarMarket'].address, kitchenSinkSnapshot['scalarMarket'].abi)
@fixture
def cash(localFixture, kitchenSinkSnapshot):
return localFixture.applySignature(None, kitchenSinkSnapshot['cash'].address, kitchenSinkSnapshot['cash'].abi)
|
from core.advbase import *
from slot.a import *
from slot.d import *
def module():
return Ramona
class Ramona(Adv):
a1 = ('primed_att',0.10)
a3 = ('bc',0.13)
conf = {}
conf['slots.a'] = Summer_Paladyns()+Primal_Crisis()
conf['slots.burn.a'] = Resounding_Rendition()+Elegant_Escort()
conf['acl'] = """
`dragon, s=1
`s3, not self.s3_buff
`s1a
`s2,x=4
"""
coab = ['Blade', 'Wand', 'Marth']
def prerun(self):
self.a_s1 = self.s1.ac
self.a_s1a = S('s1', Conf({'startup': 0.10, 'recovery': 3.10}))
def recovery():
return self.a_s1a._recovery + self.a_s1.getrecovery()
self.a_s1a.getrecovery = recovery
def s1back(self, t):
self.s1.ac = self.a_s1
def s1a(self):
if self.s1.check():
self.dmg_make('s1', 2.93*6)
self.hits += 6
self.s1.ac = self.a_s1a
Timer(self.s1back).on(self.conf.s1.startup+0.01)
return self.s1()
else:
return 0
def s2_proc(self, e):
Event('defchain')()
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import os
from collections import defaultdict
logger = logging.getLogger(__name__)
FPS = 30
# AVA_VALID_FRAMES = range(902, 1799)
def load_image_lists(cfg, is_train):
"""
Loading image paths from corresponding files.
Args:
cfg (CfgNode): config.
is_train (bool): if it is training dataset or not.
Returns:
image_paths (list[list]): a list of items. Each item (also a list)
corresponds to one video and contains the paths of images for
this video.
video_idx_to_name (list): a list which stores video names.
"""
path_to_file = cfg.VIDOR.TRAIN_FRAME_LIST
assert os.path.exists(path_to_file), "{} dir not found".format(
path_to_file
)
image_paths = defaultdict(list)
video_name_to_idx = {}
video_idx_to_name = []
with open(path_to_file, "r") as f:
f.readline()
for line in f:
row = line.split()
# The format of each row should follow:
# video_id frame_id frame_path labels box[0] box[1] box[2] box[3]
assert len(row) == 8
video_name = row[0]
if video_name not in video_name_to_idx:
idx = len(video_name_to_idx)
video_name_to_idx[video_name] = idx
video_idx_to_name.append(video_name)
data_key = video_name_to_idx[video_name]
image_paths[data_key].append(
os.path.join(cfg.VIDOR.FRAME_PATH, row[2])
)
image_paths = [image_paths[i] for i in range(len(image_paths))]
return image_paths, video_idx_to_name
def load_boxes_and_labels(cfg, mode):
"""
Loading boxes and labels from csv files.
Args:
cfg (CfgNode): config.
mode (str): 'train', 'val', or 'test' mode.
Returns:
all_boxes (dict): a dict which maps from `video_name` and
`frame_sec` to a list of `box`. Each `box` is a
[`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
"""
path_to_file = cfg.VIDOR.TRAIN_FRAME_LIST
assert os.path.exists(path_to_file), "{} dir not found".format(
path_to_file
)
all_boxes = {}
count = 0
unique_box_count = 0
with open(path_to_file, "r") as f:
f.readline()
for line in f:
row = line.split()
video_name, frame_sec = row[0], int(row[1])
# Box with format [x1, y1, x2, y2]
box_key = ",".join(row[4:8])
box = list(map(int, row[4:8]))
label = int(row[3])
if video_name not in all_boxes:
all_boxes[video_name] = {}
all_boxes[video_name][frame_sec] = {}
if box_key not in all_boxes[video_name][frame_sec]:
all_boxes[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
all_boxes[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in all_boxes.keys():
for frame_sec in all_boxes[video_name].keys():
# Save in format of a list of [box_i, box_i_labels].
all_boxes[video_name][frame_sec] = list(
all_boxes[video_name][frame_sec].values()
)
logger.info("Number of unique boxes: %d" % unique_box_count)
logger.info("Number of annotations: %d" % count)
return all_boxes
def get_keyframe_data(boxes_and_labels):
"""
Getting keyframe indices, boxes and labels in the dataset.
Args:
boxes_and_labels (list[dict]): a list which maps from video_idx to a dict.
Each dict `frame_sec` to a list of boxes and corresponding labels.
Returns:
keyframe_indices (list): a list of indices of the keyframes.
keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from
video_idx and sec_idx to a list of boxes and corresponding labels.
"""
def sec_to_frame(sec):
"""
Convert time index (in second) to frame index.
0: 900
30: 901
"""
# return (sec - 900) * FPS
return sec
keyframe_indices = []
keyframe_boxes_and_labels = []
count = 0
for video_idx in range(len(boxes_and_labels)):
sec_idx = 0
keyframe_boxes_and_labels.append([])
for sec in boxes_and_labels[video_idx].keys():
if len(boxes_and_labels[video_idx][sec]) > 0:
keyframe_indices.append(
(video_idx, sec_idx, sec, sec_to_frame(sec))
)
keyframe_boxes_and_labels[video_idx].append(
boxes_and_labels[video_idx][sec]
)
sec_idx += 1
count += 1
logger.info("%d keyframes used." % count)
return keyframe_indices, keyframe_boxes_and_labels
def get_num_boxes_used(keyframe_indices, keyframe_boxes_and_labels):
"""
Get total number of used boxes.
Args:
keyframe_indices (list): a list of indices of the keyframes.
keyframe_boxes_and_labels (list[list[list]]): a list of list which maps from
video_idx and sec_idx to a list of boxes and corresponding labels.
Returns:
count (int): total number of used boxes.
"""
count = 0
for video_idx, sec_idx, _, _ in keyframe_indices:
count += len(keyframe_boxes_and_labels[video_idx][sec_idx])
return count
|
from django.urls import path
from . views import *
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
path('connect/',main,name="connect_caller"),
path('connect/file',calulate_distance_view,name="connect_caller"),
#path('connect/file/1',show_assistant,name="connect_caller_1"),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)
urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
|
"""
modules for DQAS framework
"""
import sys
import inspect
from functools import lru_cache, partial
from multiprocessing import Pool, get_context
import functools
import operator
import numpy as np
import scipy as sp
import sympy
import tensorflow as tf
from typing import (
List,
Sequence,
Any,
Tuple,
Callable,
Iterator,
Optional,
Union,
Iterable,
Dict,
)
import networkx as nx
import cirq
import tensorflow_quantum as tfq
from ..gates import array_to_tensor, num_to_tensor
from .. import cons
# don't directly import backend, as it is supposed to change at runtime
from ..circuit import Circuit
from .layers import *
Array = Any # np.array
Opt = Any # tf.keras.optimizer
Model = Any # tf.keras.Model
thismodule = sys.modules[__name__]
_op_pool: Sequence[Any] = []
def set_op_pool(l: Sequence[Any]) -> None:
# sometimes, to make parallel mode work, one should set_op_pool in global level of the script
global _op_pool
_op_pool = l
def get_op_pool() -> Sequence[Any]:
global _op_pool
return _op_pool
## infrastrcture for DQAS search
def get_var(name: str) -> Any:
"""
call in customized functions and grab variable within DQAF framework function by var name str
:param name:
:return:
"""
return inspect.stack()[2][0].f_locals[name]
def verbose_output(max_prob: bool = True, weight: bool = True) -> None:
"""
doesn't support prob model DQAS search
:param max_prob:
:param weight:
:return:
"""
if max_prob:
prob = get_var("prob")
print("max probability for each layer:")
print(np.max(prob.numpy(), axis=1))
if weight:
nnp = get_var("nnp")
stp = get_var("stp")
cand_weight = get_weights(nnp, stp).numpy()
print(
"associating weights:",
cand_weight,
)
def preset_byprob(prob: Tensor) -> Sequence[int]:
preset = []
p = prob.shape[0]
c = prob.shape[1]
for i in range(p):
j = np.random.choice(np.arange(c), p=np.array(prob[i]))
preset.append(j)
return preset
def get_preset(stp: Tensor) -> Tensor:
return tf.argmax(stp, axis=1)
def get_weights(
nnp: Tensor, stp: Tensor = None, preset: Optional[Sequence[int]] = None
) -> Tensor:
"""
works only when nnp has the same shape as stp, i.e. one parameter for each op
:param nnp:
:param stp:
:param preset:
:return:
"""
if preset is None:
preset = get_preset(stp)
p = nnp.shape[0]
ind_ = tf.stack([tf.cast(tf.range(p), tf.int32), tf.cast(preset, tf.int32)])
return tf.gather_nd(nnp, tf.transpose(ind_))
def get_weights_v2(nnp: Tensor, preset: Sequence[int]) -> Tensor:
if len(nnp.shape) == 3:
l = nnp.shape[-1]
else:
l = 1
nnp = nnp[..., tf.newaxis]
p, c = nnp.shape[0], nnp.shape[1]
weights = np.empty(dtype=np.float32, shape=[p, l])
for i, j in enumerate(preset):
weights[i, :] = nnp[i, j, :]
if l == 1:
weights = weights.reshape([p])
return tf.constant(weights)
def parallel_kernel(
prob: Tensor,
gdata: Any,
nnp: Tensor,
kernel_func: Callable[[Any, Tensor, Sequence[int]], Tuple[Tensor, Tensor]],
) -> Tuple[Tensor, Tensor, Tensor]:
"""
kernel for multiprocess to run parallel in DQAS function
:param prob:
:param gdata:
:param nnp:
:param kernel_func:
:return:
"""
sp.random.seed() # make each subprocess run with different random state
# see https://stackoverflow.com/a/6914470/9062180
# it is still not the best way to corporate numpy random and multiprocessing
# see more in https://github.com/numpy/numpy/issues/9650
dtype = tf.float32
p = prob.shape[0]
preset = preset_byprob(prob)
loss, gnnp = kernel_func(gdata, nnp, preset)
gs = tf.tensor_scatter_nd_add(
tf.cast(-prob, dtype=dtype),
tf.constant(list(zip(range(p), preset))),
tf.ones([p], dtype=dtype),
) # \nabla lnp
return loss, gnnp, gs
def void_generator() -> Iterator[Any]:
while True:
yield None
def single_generator(g: Any) -> Iterator[Any]:
while True:
yield g
def history_loss() -> Array:
return get_var("avcost1").numpy()
def repr_op(element: Any) -> str:
if isinstance(element, str):
return element
if isinstance(element, list) or isinstance(element, tuple):
return str(tuple([repr_op(e) for e in element]))
if callable(element.__repr__):
return element.__repr__() # type: ignore
else:
return element.__repr__ # type: ignore
def DQAS_search(
kernel_func: Callable[[Any, Tensor, Sequence[int]], Tuple[Tensor, Tensor]],
*,
g: Optional[Iterator[Any]] = None,
op_pool: Optional[Sequence[Any]] = None,
p: Optional[int] = None,
p_nnp: Optional[int] = None,
p_stp: Optional[int] = None,
batch: int = 300,
prethermal: int = 0,
epochs: int = 100,
parallel_num: int = 0,
verbose: bool = False,
verbose_func: Optional[Callable[[], None]] = None,
history_func: Optional[Callable[[], Any]] = None,
prob_clip: Optional[float] = None,
baseline_func: Optional[Callable[[Sequence[float]], float]] = None,
pertubation_func: Optional[Callable[[], Tensor]] = None,
nnp_initial_value: Optional[Array] = None,
stp_initial_value: Optional[Array] = None,
network_opt: Optional[Opt] = None,
structure_opt: Optional[Opt] = None,
prethermal_opt: Optional[Opt] = None,
prethermal_preset: Optional[Sequence[int]] = None,
stp_regularization: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
nnp_regularization: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
) -> Tuple[Tensor, Tensor, Sequence[Any]]:
"""
DQAS framework entrypoint
:param kernel_func: function with input of data instance, circuit parameters theta and structural paramter k,
return tuple of objective value and gradient with respect to theta
:param g: data generator as dataset
:param op_pool: list of operations as primitive operator pool
:param p: the default layer number of the circuit ansatz
:param p_nnp: shape of circuit parameter pool, in general p_stp*l, where l is the max number of circuit parameters for
op in the operator pool
:param p_stp: the same as p in the most times
:param batch: batch size of one epoch
:param prethermal: prethermal update times
:param epochs: training epochs
:param parallel_num: parallel thread number, 0 to disable multiprocessing model by default
:param verbose: set verbose log to print
:param vebose_func: function to output verbose information
:param history_func: function return intermiediate result for final history list
:param prob_clip: cutoff probability to avoid peak distribution
:param baseline_func: function accepting list of objective values and return the baseline value used in the next round
:param pertubation_func: return noise with the same shape as circuit parameter pool
:param nnp_initial_value: initial values for circuit parameter pool
:param stp_initial_value: initial values for probabilistic model parameters
:param network_opt: optimizer for circuit parameters theta
:param structure_opt: optimizer for model parameters alpha
:param prethermal_opt: optimizer for circuit parameters in prethermal stage
:param prethermal_preset: fixed structural parameters for prethermal training
:param stp_regularization: regularization function for model parameters alpha
:param nnp_regularization: regularization function for circuit parameters theta
:return:
"""
# shape of nnp and stp is not necessarily compatible in complicated settings
dtype = tf.float32 # caution, simply changing this is not guranteed to work
if op_pool is None:
op_pool = get_op_pool()
c = len(op_pool)
set_op_pool(op_pool)
if g is None:
g = void_generator()
if parallel_num > 0:
pool = get_context("spawn").Pool(parallel_num)
global parallel_kernel
p_parallel_kernel = partial(parallel_kernel, kernel_func=kernel_func)
if network_opt is None:
network_opt = tf.keras.optimizers.Adam(learning_rate=0.1) # network
if structure_opt is None:
structure_opt = tf.keras.optimizers.Adam(
learning_rate=0.1, beta_1=0.8, beta_2=0.99
) # structure
if prethermal_opt is None:
prethermal_opt = tf.keras.optimizers.Adam(learning_rate=0.1) # prethermal
if nnp_initial_value is None:
if p_nnp is None:
if p is not None:
p_nnp = p
else:
raise ValueError("Please give the shape information on nnp")
nnp_initial_value = np.random.uniform(size=[p_nnp, c])
if stp_initial_value is None:
if p_stp is None:
if p is not None:
p_stp = p
else:
raise ValueError("Please give the shape information on stp")
stp_initial_value = np.zeros([p_stp, c])
if p is None:
p = stp_initial_value.shape[0]
if baseline_func is None:
baseline_func = np.mean
nnp = tf.Variable(initial_value=nnp_initial_value, dtype=dtype)
stp = tf.Variable(initial_value=stp_initial_value, dtype=dtype)
history = []
prob = tf.math.exp(stp) / tf.tile(
tf.math.reduce_sum(tf.math.exp(stp), axis=1)[:, tf.newaxis], [1, c]
) # softmax categorical probability
avcost1 = 0
for _, gdata in zip(range(prethermal), g): # prethermal for nn param
if prethermal_preset is None:
preset = preset_byprob(prob)
else:
preset = prethermal_preset
forwardv, gnnp = kernel_func(gdata, nnp, preset)
prethermal_opt.apply_gradients([(gnnp, nnp)])
if verbose:
print("network parameter after prethermalization: \n", nnp.numpy())
try:
for epoch in range(epochs): # iteration to update strcuture param
# for data spliting case, odd update network, even update structure
prob = tf.math.exp(stp) / tf.tile(
tf.math.reduce_sum(tf.math.exp(stp), axis=1)[:, tf.newaxis], [1, c]
)
if prob_clip is not None:
prob = tf.clip_by_value(prob, (1 - prob_clip) / c, prob_clip)
prob = prob / tf.tile(
tf.reshape(tf.reduce_sum(prob, axis=1), [prob.shape[0], 1]),
tf.constant([1, prob.shape[1]]),
)
if verbose:
print("probability: \n", prob.numpy())
print("----------new epoch %s-----------" % epoch)
deri_stp = []
deri_nnp = []
# avcost2 = tf.convert_to_tensor(avcost1 / batch) * baseline_scale
avcost2 = avcost1
costl = []
# nnpg = tf.zeros_like(nnp)
# collect nn param graident on the matrix with the same form as nnp
if stp_regularization is not None:
stp_penalty_gradient = stp_regularization(stp, nnp)
if verbose:
print("stp_penalty_gradient:", stp_penalty_gradient.numpy())
else:
stp_penalty_gradient = 0.0
if nnp_regularization is not None:
nnp_penalty_gradient = nnp_regularization(stp, nnp)
if verbose:
print("nnpp_penalty_gradient:", nnp_penalty_gradient.numpy())
else:
nnp_penalty_gradient = 0.0
if parallel_num == 0:
for _, gdata in zip(range(batch), g):
preset = preset_byprob(prob)
if pertubation_func is not None:
loss, gnnp = kernel_func(
gdata, nnp + pertubation_func(), preset
)
else:
loss, gnnp = kernel_func(gdata, nnp, preset)
gs = tf.tensor_scatter_nd_add(
tf.cast(-prob, dtype=dtype),
tf.constant(list(zip(range(p), preset))),
tf.ones([p], dtype=dtype),
) # \nabla lnp
deri_stp.append(
(tf.cast(loss, dtype=dtype) - tf.cast(avcost2, dtype=dtype))
* tf.cast(gs, dtype=dtype)
)
deri_nnp.append(gnnp)
costl.append(loss.numpy())
else: ## parallel mode for batch evaluation
args_list = []
for _, gdata in zip(range(batch), g):
if pertubation_func is not None:
args_list.append((prob, gdata, nnp + pertubation_func()))
else:
args_list.append((prob, gdata, nnp))
parallel_result = pool.starmap(p_parallel_kernel, args_list)
# [(loss, gnnp, gs), ...]
deri_nnp = []
deri_stp = []
costl = []
for loss, gnnp, gs in parallel_result:
costl.append(loss.numpy())
deri_nnp.append(gnnp)
deri_stp.append(
(tf.cast(loss, dtype=dtype) - tf.cast(avcost2, dtype=dtype))
* tf.cast(gs, dtype=dtype)
)
avcost1 = tf.convert_to_tensor(baseline_func(costl))
print(
"batched average loss: ",
np.mean(costl),
" batched loss std: ",
np.std(costl),
"\nnew baseline: ",
avcost1.numpy(), # type: ignore
)
batched_gs = tf.math.reduce_mean(
tf.convert_to_tensor(deri_stp, dtype=dtype), axis=0
)
batched_gnnp = tf.math.reduce_mean(
tf.convert_to_tensor(deri_nnp, dtype=dtype), axis=0
)
if verbose:
print("batched gradient of stp: \n", batched_gs.numpy())
print("batched gradient of nnp: \n", batched_gnnp.numpy())
network_opt.apply_gradients(
zip([batched_gnnp + nnp_penalty_gradient], [nnp])
)
structure_opt.apply_gradients(
zip([batched_gs + stp_penalty_gradient], [stp])
)
if verbose:
print(
"strcuture parameter: \n",
stp.numpy(),
"\n network parameter: \n",
nnp.numpy(),
)
if verbose_func is not None:
verbose_func()
cand_preset = get_preset(stp).numpy()
cand_preset_repr = [repr_op(op_pool[f]) for f in cand_preset]
print("best candidates so far:", cand_preset_repr)
# TODO, more general repr
if nnp.shape == stp.shape and verbose:
cand_weight = get_weights(nnp, stp).numpy()
print(
"And associating weights:",
cand_weight,
)
if history_func is not None:
history.append(history_func())
if parallel_num > 0:
pool.close()
return stp, nnp, history # TODO: history list trackings
except KeyboardInterrupt:
if parallel_num > 0:
pool.close()
return stp, nnp, history
## training based on DQAS
def qaoa_simple_train(
preset: Sequence[int],
graph: Union[Sequence[Graph], Iterator[Graph]],
vag_func: Optional[
Callable[[Any, Tensor, Sequence[int]], Tuple[Tensor, Tensor]]
] = None,
epochs: int = 60,
batch: int = 1,
nnp_shape: Optional[Array] = None,
nnp_initial_value: Optional[Array] = None,
opt: Optional[Opt] = None,
search_func: Optional[Callable[..., Any]] = None,
kws: Optional[Dict[Any, Any]] = None,
) -> Tuple[Array, float]:
sp.random.seed()
# TODO: the best practice combine multiprocessing and random generator still needs further investigation
p = len(preset)
c = len(get_op_pool())
stp_train = np.zeros([p, c])
for i, j in enumerate(preset):
stp_train[i, j] = 10.0
if nnp_initial_value is None and nnp_shape is None:
nnp_initial_value = np.random.normal(loc=0.23, scale=0.8, size=[p, c])
elif nnp_shape is not None and nnp_initial_value is None:
nnp_initial_value = np.random.normal(loc=0.23, scale=0.8, size=nnp_shape)
if vag_func is None:
from .vags import qaoa_vag_energy
vag_func = qaoa_vag_energy
if kws is None:
kws = {}
if "prob_model_func" in kws:
pmf = kws["prob_model_func"]
del kws["prob_model_func"]
kws[
"prob_model"
] = pmf() # in case keras model cannot pickled for multiprocessing map
if isinstance(graph, list):
def graph_generator() -> Iterator[Graph]:
i = 0
l = len(graph) # type: ignore
while True:
if i < l:
yield graph[i] # type: ignore
i += 1
else:
i = 0
yield graph[i] # type: ignore
graph_g = graph_generator()
else:
graph_g = graph # type: ignore
if search_func is None:
search_func = DQAS_search
kws.update({"stp_initial_value": stp_train})
stp, nnp, h = search_func(
vag_func,
g=graph_g,
p=p,
batch=batch,
prethermal=0,
epochs=epochs,
history_func=history_loss,
nnp_initial_value=nnp_initial_value,
network_opt=opt,
**kws,
)
return (get_weights_v2(nnp, preset=preset).numpy(), np.mean(h[-10:]))
def parallel_qaoa_train(
preset: Sequence[int],
g: Any,
vag_func: Any = None,
opt: Opt = None,
epochs: int = 60,
tries: int = 16,
batch: int = 1,
cores: int = 8,
loc: float = 0.0,
scale: float = 1.0,
nnp_shape: Optional[Sequence[int]] = None,
search_func: Optional[Callable[..., Any]] = None,
kws: Optional[Dict[Any, Any]] = None,
) -> Sequence[Any]:
"""
parallel variational parameter training and search to avoid local minimum
not limited to qaoa setup as the function name indicates,
as long as you provided suitable `vag_func`
:param preset:
:param g: data input generator for vag_func
:param vag_func: vag_kernel
:param opt:
:param epochs:
:param tries: number of tries
:param batch: for optimization problem the input is in general fixed so batch is often 1
:param cores: number of parallel jobs
:param loc: mean value of normal distribution for nnp
:param scale: std deviation of normal distribution for nnp
:return:
"""
if not opt:
opt = tf.keras.optimizers.Adam(learning_rate=0.1)
p = len(preset)
c = len(get_op_pool())
glist = []
for _ in range(epochs * batch):
glist.append(g.send(None)) # pickle doesn't support generators even in dill
if vag_func is None:
from .vags import qaoa_vag_energy
vag_func = qaoa_vag_energy
if nnp_shape is None:
nnp_shape = [p, c]
pool = Pool(cores)
args_list = [
(
preset,
glist,
vag_func,
epochs,
batch,
None,
np.random.normal(loc=loc, scale=scale, size=nnp_shape),
opt,
search_func,
kws,
)
for _ in range(tries)
]
result_list = pool.starmap(qaoa_simple_train, args_list)
pool.close()
result_list = sorted(result_list, key=lambda s: s[1])
print(result_list)
print("the optimal result is %s" % result_list[0][1])
return result_list
def evaluate_everyone(
vag_func: Any,
gdata: Iterator[Any],
nnp: Tensor,
presets: Sequence[Sequence[List[int]]],
batch: int = 1,
) -> Sequence[Tuple[Tensor, Tensor]]:
losses = []
if not isinstance(nnp, tf.Tensor):
nnp = tf.Variable(initial_value=nnp)
for preset in presets:
loss = 0
for i, g in zip(range(batch), gdata):
loss += vag_func(g, nnp, preset)[0]
loss /= batch # type: ignore
losses.append((preset, loss.numpy())) # type: ignore
return losses
## probabilisitic model based DQAS
def van_sample(
prob_model: Model, batch_size: int
) -> Tuple[List[Tensor], List[List[Tensor]]]:
glnprob_list = []
with tf.GradientTape(persistent=True) as t:
sample, xhat = prob_model.sample(batch_size)
lnprob = prob_model._log_prob(sample, xhat)
for i in range(batch_size):
glnprob_list.append(t.gradient(lnprob[i], prob_model.variables))
sample = tf.argmax(sample, axis=-1)
sample_list = [sample[i] for i in range(batch_size)]
del t
return sample_list, glnprob_list
def van_regularization(
prob_model: Model, nnp: Tensor = None, lbd_w: float = 0.01, lbd_b: float = 0.01
) -> Tensor:
return prob_model.regularization(lbd_w=lbd_w, lbd_b=lbd_b)
def micro_sample(
prob_model: Model,
batch_size: int,
repetitions: Optional[List[int]] = None,
) -> Tuple[List[Tensor], List[List[Tensor]]]:
glnprob_list = []
with tf.GradientTape(persistent=True) as t:
sample, xhat = prob_model.sample(batch_size)
lnprob = prob_model._log_prob(sample, xhat)
for i in range(batch_size):
glnprob_list.append(t.gradient(lnprob[i], prob_model.variables))
sample = tf.argmax(sample, axis=-1)
sample_list = sample.numpy()
del t
if not repetitions:
return tf.constant(sample_list), glnprob_list
else:
ns = np.empty(shape=[batch_size, len(repetitions)], dtype=np.int32)
for i, j in enumerate(repetitions):
ns[:, i] = sample_list[:, j]
return tf.constant(ns), glnprob_list
def DQAS_search_pmb(
kernel_func: Callable[[Any, Tensor, Sequence[int]], Tuple[Tensor, Tensor]],
prob_model: Model,
*,
sample_func: Optional[
Callable[[Model, int], Tuple[List[Tensor], List[List[Tensor]]]]
] = None,
g: Optional[Iterator[Any]] = None,
op_pool: Optional[Sequence[Any]] = None,
p: Optional[int] = None,
batch: int = 300,
prethermal: int = 0,
epochs: int = 100,
parallel_num: int = 0,
verbose: bool = False,
verbose_func: Optional[Callable[[], None]] = None,
history_func: Optional[Callable[[], Any]] = None,
baseline_func: Optional[Callable[[Sequence[float]], float]] = None,
pertubation_func: Optional[Callable[[], Tensor]] = None,
nnp_initial_value: Optional[Array] = None,
stp_regularization: Optional[Callable[[Model, Tensor], Tensor]] = None,
network_opt: Optional[Opt] = None,
structure_opt: Optional[Opt] = None,
prethermal_opt: Optional[Opt] = None,
loss_func: Optional[Callable[[Tensor], Tensor]] = None,
loss_derivative_func: Optional[Callable[[Tensor], Tensor]] = None,
validate_period: int = 0,
validate_batch: int = 1,
validate_func: Optional[
Callable[[Any, Tensor, Sequence[int]], Tuple[Tensor, Tensor]]
] = None,
vg: Optional[Iterator[Any]] = None,
) -> Tuple[Tensor, Tensor, Sequence[Any]]:
"""
probabilistic model based DQAS, can use extensively for DQAS case for ``NMF`` probabilistic model
:param kernel_func: vag func, return loss and nabla lnp
:param prob_model: keras model
:param sample_func: sample func of logic with keras model input
:param g: input data pipeline generator
:param op_pool: operation pool
:param p: depth for DQAS
:param batch:
:param prethermal:
:param epochs:
:param parallel_num: parallel kernels
:param verbose:
:param verbose_func:
:param history_func:
:param baseline_func:
:param pertubation_func:
:param nnp_initial_value:
:param stp_regularization:
:param network_opt:
:param structure_opt:
:param prethermal_opt:
:param loss_func: final loss function in terms of average of sub loss for each circuit
:param loss_derivative_func: derivative function for ``loss_func``
:return:
"""
# shape of nnp and stp is not necessarily compatible in complicated settings
dtype = tf.float32 # caution, simply changing this is not guranteed to work
if op_pool is None:
op_pool = get_op_pool()
c = len(op_pool)
set_op_pool(op_pool)
if sample_func is None:
sample_func = van_sample
if g is None:
g = void_generator()
if vg is None:
vg = void_generator()
if parallel_num > 0:
pool = get_context("spawn").Pool(parallel_num)
# use spawn model instead of default fork which has threading lock issues
if network_opt is None:
network_opt = tf.keras.optimizers.Adam(learning_rate=0.1) # network
if structure_opt is None:
structure_opt = tf.keras.optimizers.Adam(
learning_rate=0.1, beta_1=0.8, beta_2=0.99
) # structure
if prethermal_opt is None:
prethermal_opt = tf.keras.optimizers.Adam(learning_rate=0.1) # prethermal
if p is None:
p = nnp_initial_value.shape[0] # type: ignore
if nnp_initial_value is None:
nnp_initial_value = np.random.normal(loc=0, scale=0.3, size=[p, c])
if baseline_func is None:
baseline_func = np.mean
nnp = tf.Variable(initial_value=nnp_initial_value, dtype=dtype)
if loss_func is None:
loss_func = lambda s: s
if loss_derivative_func is None:
loss_derivative_func = lambda s: tf.constant(1.0)
history = []
avcost1 = 0
if prethermal > 0:
presets, glnprobs = sample_func(prob_model, prethermal)
for i, gdata in zip(range(prethermal), g): # prethermal for nn param
forwardv, gnnp = kernel_func(gdata, nnp, presets[i])
prethermal_opt.apply_gradients([(gnnp, nnp)])
if verbose:
print("network parameter after prethermalization: \n", nnp.numpy())
try:
for epoch in range(epochs): # iteration to update strcuture param
print("----------new epoch %s-----------" % epoch)
deri_stp = []
deri_nnp = []
avcost2 = avcost1
costl = []
presets, glnprobs = sample_func(prob_model, batch)
if stp_regularization is not None:
with tf.GradientTape() as t:
stp_penalty = stp_regularization(prob_model, nnp)
gr = t.gradient(stp_penalty, prob_model.variables)
g_stp_penalty = []
for v, gi in zip(prob_model.variables, gr):
if gi is not None:
g_stp_penalty.append(gi)
else:
g_stp_penalty.append(tf.zeros_like(v))
if verbose:
print(
"typical scale of gradient from stp variable regularization:",
[tf.reduce_mean(tf.math.abs(w)).numpy() for w in g_stp_penalty],
)
else:
g_stp_penalty = []
for v in prob_model.variables:
g_stp_penalty.append(tf.zeros_like(v))
if parallel_num == 0:
for i, gdata in zip(range(batch), g):
if pertubation_func is not None:
loss, gnnp = kernel_func(
gdata, nnp + pertubation_func(), presets[i]
)
else:
loss, gnnp = kernel_func(gdata, nnp, presets[i])
# gnnp \equiv \partial L_i/\partial \theta
# batched_gnnp = sum_{i\in batch} \partial \mathcal{L}/\partial L_i \partial L_i/\partial \theta
# batched_gstp = \partial \mathcal{L}/\partial \bar{L} (\sum_i (L-\bar{L})\nabla \ln p)
# \partial \mathcal{L}/\partial L_i = \partial \mathcal{L}/\partial \bar{L} 1/n
deri_stp.append(
[
(tf.cast(loss, dtype=dtype) - tf.cast(avcost2, dtype=dtype))
* w
for w in glnprobs[i]
]
)
deri_nnp.append(gnnp)
costl.append(loss.numpy())
if validate_period != 0 and epoch % validate_period == 0:
accuracy = []
validate_presets, _ = sample_func(prob_model, validate_batch)
for i, gdata in zip(range(validate_batch), vg):
accuracy.append(validate_func(gdata, nnp, validate_presets[i])) # type: ignore
print("accuracy on validation set:", np.mean(accuracy))
else: ## parallel mode for batch evaluation
args_list = []
for i, gdata in zip(range(batch), g):
if pertubation_func is not None:
args_list.append(
(gdata, nnp + pertubation_func(), presets[i].numpy())
)
else:
args_list.append((gdata, nnp, presets[i].numpy()))
# print(args_list)
parallel_result = pool.starmap(kernel_func, args_list)
# [(loss, gnnp), ...]
deri_nnp = []
deri_stp = []
costl = []
for i, r in enumerate(parallel_result):
loss, gnnp = r
costl.append(loss.numpy())
deri_nnp.append(gnnp)
deri_stp.append(
[
(tf.cast(loss, dtype=dtype) - tf.cast(avcost2, dtype=dtype))
* w
for w in glnprobs[i]
]
)
avcost1 = tf.convert_to_tensor(baseline_func(costl))
print(
"batched average loss: ",
np.mean(costl),
" batched loss std: ",
np.std(costl),
"\nnew baseline: ",
avcost1.numpy(), # type: ignore
)
batched_gs = []
batched_gs_std = []
loss_bar = tf.reduce_mean(costl)
loss_bar_d = loss_derivative_func(
loss_bar
) # \partial \mathcal{L} /\partial \bar{L}
for i in range(len(glnprobs[0])):
batched_gs.append(
loss_bar_d
* tf.math.reduce_mean(
tf.convert_to_tensor([w[i] for w in deri_stp], dtype=dtype),
axis=0,
)
+ g_stp_penalty[i]
)
if verbose: # check on baseline fluctuation reduction effect
batched_gs_std.append(
tf.math.reduce_std(
tf.convert_to_tensor([w[i] for w in deri_stp], dtype=dtype),
axis=0,
)
)
batched_gnnp = loss_bar_d * tf.math.reduce_mean(
tf.convert_to_tensor(deri_nnp, dtype=dtype), axis=0
)
if verbose:
print(
"final loss:",
loss_func(loss_bar),
" final loss derivative multiplier:",
loss_bar_d,
)
if verbose:
print("batched gradient of nnp: \n", batched_gnnp.numpy())
print(
"typical scale of batched graident of stp: \n",
[tf.reduce_mean(tf.math.abs(w)).numpy() for w in batched_gs],
)
network_opt.apply_gradients(zip([batched_gnnp], [nnp]))
structure_opt.apply_gradients(zip(batched_gs, prob_model.variables))
if verbose:
print(
"\n network parameter: \n",
nnp.numpy(),
)
print(
"typical scale of stp parameter: \n",
[
tf.reduce_mean(tf.math.abs(w)).numpy()
for w in prob_model.variables
],
)
print(
"typical scale standard deviation of batched gradient (ratio to mean): \n",
[
tf.reduce_mean(tf.math.abs(w1)).numpy()
/ tf.reduce_mean(tf.math.abs(w2) + 1.0e-20).numpy()
for w1, w2 in zip(batched_gs_std, prob_model.variables)
],
)
if verbose_func is not None:
verbose_func()
if history_func is not None:
history.append(history_func())
if validate_period != 0 and (epoch + 1) % validate_period == 0:
args_list = []
validate_presets, _ = sample_func(prob_model, validate_batch)
for i, gdata in zip(range(validate_batch), vg):
args_list.append((gdata, nnp, validate_presets[i].numpy()))
# print(args_list)
parallel_validation_result = pool.starmap(validate_func, args_list) # type: ignore
print("--------")
if isinstance(parallel_validation_result[0], dict):
for kk in parallel_validation_result[0]:
print(
"%s on validation set:" % kk,
np.mean([p[kk] for p in parallel_validation_result]),
)
else:
print(
"accuracy on validation set:",
np.mean(parallel_validation_result),
)
if parallel_num > 0:
pool.close()
return prob_model, nnp, history
except KeyboardInterrupt:
if parallel_num > 0:
pool.close()
return prob_model, nnp, history
|
# File: fireeyeetp_consts.py
#
# Copyright (c) Robert Drouin, 2021-2022
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
# Define your constants here
FIREETEETP_API_PATH = "api/v1/"
FIREETEETP_LIST_ALERTS_ENDPOINT = "alerts"
FIREETEETP_GET_ALERT_ENDPOINT = "alerts/{alertId}"
FIREETEETP_GET_ALERT_CASE_FILES_ENDPOINT = "alerts/{alertId}/downloadzip"
FIREETEETP_GET_ALERT_MALWARE_FILES_ENDPOINT = "alerts/{alertId}/downloadmalware"
FIREETEETP_GET_ALERT_PCAP_FILES_ENDPOINT = "alerts/{alertId}/downloadpcap"
FIREETEETP_LIST_MESSAGE_ATTRIBUTES_ENDPOINT = "messages/trace"
FIREETEETP_GET_MESSAGE_ATTRIBUTES_ENDPOINT = "messages/{etp_message_id}"
FIREETEETP_GET_MESSAGE_TRACE_ENDPOINT = "messages"
FIREETEETP_GET_EMAIL_ENDPOINT = "messages/{etp_message_id}/email"
FIREETEETP_REMEDIATE_EMAILS_ENDPOINT = "messages/remediate"
FIREEYEETP_GET_QUARANTINED_EMAIL_ENDPOINT = "quarantine/email/{etp_message_id}"
FIREEYEETP_BULK_RELEASE_QUARANTINE_EMAILS_ENDPOINT = "quarantine/release/"
FIREEYEETP_RELEASE_QUARANTINED_EMAIL_ENDPOINT = "quarantine/release/{etp_message_id}"
FIREEYEETP_BULK_DELETE_QUARANTINE_EMAILS_ENDPOINT = "quarantine/delete/"
FIREEYEETP_DELETE_QUARANTINED_EMAIL_ENDPOINT = "quarantine/delete/{etp_message_id}"
FIREEYEETP_LIST_QUARANTINED_EMAILS_ENDPOINT = "quarantine"
# Constants relating to '_get_error_message_from_exception'
ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
ERR_ISO_FORMAT = "Date supplied in the '{}' field is not ISO8601 compliant. " \
"Please make sure it is a valid ISO8601 datetime stamp"
# Constants relating to '_validate_integer'
VALID_INTEGER_MSG = "Please provide a valid integer value in the {}"
NON_NEGATIVE_INTEGER_MSG = "Please provide a valid non-negative integer value in the {}"
POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
SIZE_KEY = "'size' action parameter"
LEGACY_ID_KEY = "'legacy_id' action parameter"
NUM_DAYS_KEY = "'num_days' action parameter"
CONTAINER_COUNT_KEY = "'container_count' action parameter"
# Constant for corrupt asset file
FIREEYEETP_STATE_FILE_CORRUPT_ERR = "Error occurred while loading the state file due to its unexpected format.\
Resetting the state file with the default format. Please try again."
# Timeout
FIREETEETP_DEFAULT_TIMEOUT = 30
|
{
"name": "Listado de Bancos Argentinos",
'version': '13.0.1.0.0',
'category': 'Localization/Argentina',
'sequence': 14,
'author': 'ADHOC SA',
'license': 'AGPL-3',
'summary': '',
'depends': [
'base',
],
'data': [
'data/res_bank.xml',
'views/l10n_ar_bank.xml',
],
'installable': True,
'auto_install': False,
'application': False,
}
|
# coding=utf-8
"utility helper functions"
import os
import re
from elifetools import xmlio
from elifetools import utils as etoolsutils
# namespaces for when reparsing XML strings
XML_NAMESPACE_MAP = {
"ali": "http://www.niso.org/schemas/ali/1.0/",
"mml": "http://www.w3.org/1998/Math/MathML",
"xlink": "http://www.w3.org/1999/xlink",
}
def reparsing_namespaces(namespace_map):
"""compile a string representation of the namespaces"""
namespace_string = ""
for prefix, uri in namespace_map.items():
namespace_string += 'xmlns:%s="%s" ' % (prefix, uri)
return namespace_string.rstrip()
def remove_non_breaking_space(string):
"""replace non breaking space characters"""
return string.replace("\xc2\xa0", "").replace("\xa0", "") if string else ""
def remove_strike(string):
"""replace strike tags and leading and tailing whitespace"""
if not string:
return ""
for match in re.finditer(r"\s*<strike>.*?</strike>\s*", string):
# replace with blank string unless flanked by spaces replace with a space char
replace_char = ""
if match.group(0).startswith(" ") and match.group(0).endswith(" "):
replace_char = " "
string = string.replace(match.group(0), replace_char)
return string
def remove_empty_p_tags(string):
"""remove paragraphs which only contain whitespace"""
if not string:
return ""
empty_p_tag_match_pattern = re.compile(r"<p[^>]*?>\s+?</p>")
return re.sub(empty_p_tag_match_pattern, "", string)
def new_line_replace_with(line_one, line_two):
"""determine the whitespace to use when concatenating two lines together"""
if line_one is None:
return ""
# strip spaces before comparisons
line_one = line_one.lstrip().rstrip()
line_two = line_two.lstrip().rstrip()
if line_one.endswith(">") and line_two.startswith("<"):
if (
line_one.startswith("<p><italic>")
and not line_one.endswith("</italic></p>")
and line_two.startswith("</italic>")
):
return "</italic><break /><break /><italic>"
# default return blank string
return ""
if not line_one.startswith("<p>"):
if line_two == "<italic>":
return "<break /><break />"
if line_one.endswith("</italic>"):
return "<break /><break />"
if line_one.startswith("</italic>") and line_two.startswith("<italic>"):
return "<break /><break />"
if (
not line_one.startswith("<")
and line_two.startswith("</italic>")
and line_two != "</italic></p>"
):
return "<break /><break />"
if line_two.startswith("<bold>") and line_two.endswith("</bold></p>"):
return "<break /><break />"
if not line_two.startswith("<") and line_two.endswith("</p>"):
return "<break /><break />"
if not line_two.endswith("</p>") and not line_one.startswith("<"):
return "<break /><break />"
elif line_two == "<italic>":
return "<break /><break />"
elif not line_one.endswith(">") and line_two.startswith("<italic>"):
return "<break /><break />"
elif (
line_one != "<p><italic>"
and line_one.endswith("<italic>")
and not line_two.startswith("<")
):
return "</italic><break /><break /><italic>"
elif (
line_one.startswith("<p><italic>")
and not line_one.endswith("</italic></p>")
and line_two.startswith("</italic>")
and line_two != "</italic></p>"
):
return "</italic><break /><break /><italic>"
elif not line_one.endswith(">") and not line_two.startswith("<"):
return "<break /><break />"
elif (
not line_one.endswith(">")
and line_two.startswith("<bold>")
and line_two.endswith("</p>")
):
return "<break /><break />"
return ""
def collapse_newlines(string):
if not string:
return None
new_string = ""
prev_line = None
for line in string.split("\n"):
replace_with = new_line_replace_with(prev_line, line.lstrip())
new_string += replace_with + line.lstrip()
prev_line = line
# remove meaningless break and italic tags due to and edge case fix
new_string = new_string.replace(
"<break /><break /></italic><break /><break />", "</italic><break /><break />"
)
new_string = new_string.replace(
"<break /><break /></italic>", "</italic><break /><break />"
)
new_string = new_string.replace(
"<break /><break /><italic><break /><break />", "<break /><break /><italic>"
)
new_string = new_string.replace("<italic></italic>", "")
return new_string
def clean_portion(string, root_tag="root"):
if not string:
return ""
string = re.sub(r"^<" + root_tag + ".*?>", "", string)
string = re.sub(r"</" + root_tag + ">$", "", string)
return string.lstrip().rstrip()
def allowed_tags():
"""tuple of whitelisted tags"""
return (
"<p>",
"<p ",
"</p>",
"<disp-quote",
"</disp-quote>",
"<italic>",
"</italic>",
"<bold>",
"</bold>",
"<underline>",
"</underline>",
"<sub>",
"</sub>",
"<sup>",
"</sup>",
"<sc>",
"</sc>",
"<inline-formula>",
"</inline-formula>",
"<disp-formula>",
"</disp-formula>",
"<mml:",
"</mml:",
"<ext-link",
"</ext-link>",
"<list>",
"<list ",
"</list>",
"<list-item",
"</list-item>",
"<label>",
"</label>",
"<title>",
"</title>",
"<caption>",
"</caption>",
"<graphic ",
"</graphic>",
"<table",
"<table ",
"</table>",
"<thead>",
"</thead>",
"<tbody>",
"</tbody>",
"<tr>",
"</tr>",
"<th>",
"<th",
"</th>",
"<td>",
"<td ",
"</td>",
"<xref ",
"</xref>",
)
def append_to_parent_tag(
parent,
tag_name,
original_string,
namespace_map,
attributes=None,
attributes_text="",
):
"""escape and reparse the string then add it to the parent tag"""
tag_converted_string = etoolsutils.escape_ampersand(original_string)
tag_converted_string = etoolsutils.escape_unmatched_angle_brackets(
tag_converted_string, allowed_tags()
)
namespaces_string = reparsing_namespaces(namespace_map)
minidom_tag = xmlio.reparsed_tag(
tag_name, tag_converted_string, namespaces_string, attributes_text
)
xmlio.append_minidom_xml_to_elementtree_xml(
parent, minidom_tag, attributes=attributes, child_attributes=True
)
def xml_string_fix_namespaces(xml_string, root_tag):
"""due to some bug with ElementTree.tostring, remove duplicate namespace attributes"""
# remove duplicate namespaces from root tag e.g. xmlns:mml="http://www.w3.org/1998/Math/MathML
root_tag_bytes = bytes(root_tag, "utf8")
match_string = rb"^(<%s.*?>).*" % root_tag_bytes
root_tag_match = re.match(match_string, xml_string)
if not root_tag_match:
return xml_string
root_tag_string = root_tag_match.group(1) # original root tag string
# extract all tag attributes separated by a space
attributes = root_tag_string.rstrip(b">").split(b" ")[1:]
# de-dupe the attributes using set comprehension
unique_attributes = {attr for attr in attributes if attr}
# join the unique attributes alphabetically
attributes_string = b" ".join(sorted(unique_attributes))
# assemble the string to replace the original root tag string
new_root_tag_string = b"<%s %s>" % (root_tag_bytes, attributes_string)
# now can replace the string
return xml_string.replace(root_tag_string, new_root_tag_string)
def replace_character_entities(xml_string):
"""replace standard XML character entities with hexadecimal replacements"""
char_map = {
b"&": b"&",
b">": b">",
b"<": b"<",
b""": b""",
}
for from_char, to_char in char_map.items():
try:
xml_string = xml_string.replace(from_char, to_char)
except TypeError:
# convert string to bytes if required
xml_string = xml_string.encode("utf8").replace(from_char, to_char)
return xml_string
def get_file_name_path(file_name):
"""return the folder path to a file excluding the file name itself"""
return os.sep.join(file_name.split(os.sep)[0:-1])
def get_file_name_file(file_name):
"""return the file name only removing the folder path preceeding it"""
return file_name.split(os.sep)[-1]
def open_tag(tag_name, attr=None):
if not attr:
return "<%s>" % tag_name
attr_values = []
for name, value in sorted(attr.items()):
attr_values.append('%s="%s"' % (name, value))
return "<%s %s>" % (tag_name, " ".join(attr_values))
def close_tag(tag_name):
return "</%s>" % tag_name
def manuscript_from_file_name(file_name):
# todo!!!
# may requiring changing when final file name format is decided
# based on file name e.g. Dutzler 39122 edit.docx
if file_name:
first_file_name_part = get_file_name_file(file_name).split(".")[0]
spaced_parts = first_file_name_part.split(" ")
hyphenated_parts = first_file_name_part.split("-")
if len(spaced_parts) > 1:
manuscript_string = spaced_parts[1]
else:
manuscript_string = hyphenated_parts[1]
try:
return str(int(manuscript_string))
except ValueError:
return None
return None
def remove_complex_scripts_styles(document_xml):
"""given docx document.xml contents remove complex scripts style tags"""
# pattern for matching run tags w:r
run_tag_match_pattern = re.compile(rb"(<w:r\s+.*?>.*?</w:r>)")
# pattern for matching complex styles bold formatting tags
complex_bold_match_pattern = re.compile(rb"<w:bCs.*?/>")
# pattern for matching complex styles italic formatting tags
complex_italic_match_pattern = re.compile(rb"<w:iCs.*?/>")
new_document_xml = b""
for xml_part in re.split(run_tag_match_pattern, document_xml):
# if the w:rFonts tag contains a specific attribute, then do not remove the complex styles
if not (b"<w:rFonts" in xml_part and b"w:cstheme" in xml_part) or (
b"<w:rFonts" in xml_part and b"w:ascii" in xml_part
):
xml_part = re.sub(complex_bold_match_pattern, b"", xml_part)
xml_part = re.sub(complex_italic_match_pattern, b"", xml_part)
new_document_xml += xml_part
return new_document_xml
def object_id_from_uri(uri):
"""
from a sciety.org uri extract the DOI portion to be an id value
e.g. from https://sciety.org/articles/activity/10.1101/865006
return 10.1101/865006
"""
if uri:
id_match_pattern = re.compile(r".*?/(10\..*)")
matches = id_match_pattern.match(uri)
if matches:
return matches.group(1)
return uri
return uri
|
# Generated by Django 3.1.12 on 2021-07-15 10:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Tour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('title_ru', models.CharField(max_length=255, null=True)),
('title_en', models.CharField(max_length=255, null=True)),
('title_zh_cn', models.CharField(max_length=255, null=True)),
('subtitle', models.CharField(max_length=255)),
('subtitle_ru', models.CharField(max_length=255, null=True)),
('subtitle_en', models.CharField(max_length=255, null=True)),
('subtitle_zh_cn', models.CharField(max_length=255, null=True)),
('announcement', models.TextField()),
('announcement_ru', models.TextField(null=True)),
('announcement_en', models.TextField(null=True)),
('announcement_zh_cn', models.TextField(null=True)),
('date', models.DateField()),
('event_program', models.TextField()),
('event_program_ru', models.TextField(null=True)),
('event_program_en', models.TextField(null=True)),
('event_program_zh_cn', models.TextField(null=True)),
('thumbnail', models.ImageField(blank=True, upload_to='tours/thumbnails/')),
],
),
]
|
from ..lib.utils import eprint
from .verilog_modeling import Bel, Site
# Mapping of IOB type to its IO ports
IOB_PORTS = {
"IBUF": ("I", ),
"IBUF_INTERMDISABLE": ("I", ),
"IBUF_IBUFDISABLE": ("I", ),
"OBUF": ("O", ),
"OBUFT": ("O", ),
"IOBUF": ("IO", ),
"IOBUF_INTERMDISABLE": ("IO", ),
"IBUFDS": (
"I",
"IB",
),
"OBUFDS": (
"O",
"OB",
),
"OBUFTDS": (
"O",
"OB",
),
"IOBUFDS": (
"IO",
"IOB",
),
}
DRIVE_NOT_ALLOWED = ["SSTL135", "SSTL15"]
def get_iob_site(db, grid, tile, site):
""" Return the prjxray.tile.Site objects and tiles for the given IOB site.
Returns tuple of (iob_site, iologic_tile, ilogic_site, ologic_site)
iob_site is the relevant prjxray.tile.Site object for the IOB.
ilogic_site is the relevant prjxray.tile.Site object for the ILOGIC
connected to the IOB.
ologic_site is the relevant prjxray.tile.Site object for the OLOGIC
connected to the IOB.
iologic_tile is the tile containing the ilogic_site and ologic_site.
"""
gridinfo = grid.gridinfo_at_tilename(tile)
tile_type = db.get_tile_type(gridinfo.tile_type)
sites = sorted(tile_type.get_instance_sites(gridinfo), key=lambda x: x.y)
if len(sites) == 1:
iob_site = sites[0]
else:
iob_site = sites[1 - int(site[-1])]
loc = grid.loc_of_tilename(tile)
if gridinfo.tile_type.startswith('LIOB33'):
dx = 1
elif gridinfo.tile_type.startswith('RIOB33'):
dx = -1
else:
assert False, gridinfo.tile_type
iologic_tile = grid.tilename_at_loc((loc.grid_x + dx, loc.grid_y))
ioi3_gridinfo = grid.gridinfo_at_loc((loc.grid_x + dx, loc.grid_y))
ioi3_tile_type = db.get_tile_type(ioi3_gridinfo.tile_type)
ioi3_sites = ioi3_tile_type.get_instance_sites(ioi3_gridinfo)
ilogic_site = None
ologic_site = None
target_ilogic_site = iob_site.name.replace('IOB', 'ILOGIC')
target_ologic_site = iob_site.name.replace('IOB', 'OLOGIC')
for site in ioi3_sites:
if site.name == target_ilogic_site:
assert ilogic_site is None
ilogic_site = site
if site.name == target_ologic_site:
assert ologic_site is None
ologic_site = site
assert ilogic_site is not None
assert ologic_site is not None
return iob_site, iologic_tile, ilogic_site, ologic_site, gridinfo.pin_functions[
iob_site.name]
def append_obuf_iostandard_params(top,
site,
bel,
possible_iostandards,
slew="SLOW",
in_term=None):
"""
Appends IOSTANDARD, DRIVE and SLEW parameters to the bel. The IOSTANDARD
and DRIVE parameters have to be read from an EBLIF file. If parameters
from the EBLIF contradicts those decoded from fasm, an error is printed.
"""
# Check if we have IO settings information for the site read from EBLIF
iosettings = top.get_site_iosettings(site.site.name)
# We don't. Use the default IOSTANDARD
if iosettings is None:
iosettings = {
"IOSTANDARD": top.default_iostandard,
"DRIVE": top.default_drive
}
# SSTL135/SSTL15 must have no DRIVE setting. If present, the DRIVE setting
# gets removes, as it was set by DEFAULT in the EBLIF
if iosettings["IOSTANDARD"] in DRIVE_NOT_ALLOWED:
iosettings["DRIVE"] = None
iostandard = iosettings.get("IOSTANDARD", None)
drive = iosettings.get("DRIVE", None)
# Check if this is possible according to decoded fasm
is_valid = (iostandard, drive, slew) in possible_iostandards
if not is_valid:
eprint("IOSTANDARD+DRIVE+SLEW settings provided for {} do not match "
"their counterparts decoded from the fasm".format(
site.site.name))
eprint("Requested:")
eprint(" IOSTANDARD={}, DRIVE={}".format(iostandard, drive))
eprint("Candidates are:")
eprint(" IOSTANDARD | DRIVE | SLEW |")
eprint("-------------------|--------|------|")
for i, d, s in possible_iostandards:
eprint(" {}| {}| {}|".format(
i.ljust(18),
str(d).ljust(7), s.ljust(5)))
eprint("")
# Demote NSTD-1 to warning
top.disable_drc("NSTD-1")
# Valid
else:
bel.parameters["IOSTANDARD"] = '"{}"'.format(iostandard)
if drive is not None:
bel.parameters["DRIVE"] = '"{}"'.format(drive)
# Input termination (here for inouts)
if in_term is not None:
for port in IOB_PORTS[bel.module]:
top.add_extra_tcl_line(
"set_property IN_TERM {} [get_ports {}]".format(
in_term, bel.connections[port]))
# Slew rate
bel.parameters["SLEW"] = '"{}"'.format(slew)
def append_ibuf_iostandard_params(top,
site,
bel,
possible_iostandards,
in_term=None):
"""
Appends IOSTANDARD parameter to the bel. The parameter has to be decoded
from the EBLIF file. If the parameter from the EBLIF contradicts the one
decoded from fasm, an error is printed.
"""
# Check if we have IO settings information for the site read from EBLIF
iosettings = top.get_site_iosettings(site.site.name)
# We don't. Use the default IOSTANDARD
if iosettings is None:
iosettings = {
"IOSTANDARD": top.default_iostandard,
"DRIVE": top.default_drive
}
# SSTL135/SSTL15 must have no DRIVE setting. If present, the DRIVE setting
# gets removes, as it was set by DEFAULT in the EBLIF
if iosettings["IOSTANDARD"] in DRIVE_NOT_ALLOWED:
iosettings["DRIVE"] = None
iostandard = iosettings.get("IOSTANDARD", None)
# Check if this is possible according to decoded fasm
is_valid = iostandard in possible_iostandards
if not is_valid:
eprint("IOSTANDARD setting provided for {} do not match "
"its counterpart decoded from the fasm".format(site.site.name))
eprint("Requested:")
eprint(" {}".format(iostandard))
eprint("Candidates are:")
for i in possible_iostandards:
eprint(" {}".format(i.ljust(15)))
eprint("")
# Demote NSTD-1 to warning
top.disable_drc("NSTD-1")
# Valid
else:
bel.parameters["IOSTANDARD"] = '"{}"'.format(iostandard)
# Input termination
if in_term is not None:
for port in IOB_PORTS[bel.module]:
top.add_extra_tcl_line(
"set_property IN_TERM {} [get_ports {}]".format(
in_term, bel.connections[port]))
def decode_iostandard_params(site, diff=False):
"""
Collects all IOSTANDARD+DRIVE and IOSTANDARD+SLEW. Collect also possible
input IOSTANDARDS.
"""
iostd_drive = {}
iostd_slew = {}
iostd_in = set()
iostd_out = []
iostd_prefix = "DIFF_" if diff else ""
for feature in site.features:
parts = feature.split(".")
if "DRIVE" in parts:
idx = parts.index("DRIVE")
if parts[idx + 1] == "I_FIXED":
drives = [None]
else:
drives_str = parts[idx + 1].replace("_I_FIXED", "")
drives = [int(s[1:]) for s in drives_str.split("_")]
iostds = [s for s in parts[idx - 1].split("_")]
for ios in iostds:
if ios not in iostd_drive.keys():
iostd_drive[ios] = set()
if ios in DRIVE_NOT_ALLOWED:
iostd_drive[ios].add(None)
else:
for drv in drives:
iostd_drive[ios].add(drv)
if "SLEW" in parts:
idx = parts.index("SLEW")
slew = parts[idx + 1]
iostds = [s for s in parts[idx - 1].split("_")]
for ios in iostds:
if ios not in iostd_slew.keys():
iostd_slew[ios] = slew
if "IN" in parts or "IN_ONLY" in parts:
iostd_in |= set([iostd_prefix + s for s in parts[-2].split("_")])
# Possible output configurations
for iostd in set(list(iostd_drive.keys())) | set(list(iostd_slew.keys())):
if iostd in iostd_drive and iostd in iostd_slew:
for drive in iostd_drive[iostd]:
iostd_out.append((
iostd_prefix + iostd,
drive,
iostd_slew[iostd],
))
return iostd_in, iostd_out
def decode_in_term(site):
"""
Decodes input termination setting.
"""
for term in ["40", "50", "60"]:
if site.has_feature("IN_TERM.UNTUNED_SPLIT_" + term):
return "UNTUNED_SPLIT_" + term
return None
def add_pull_bel(site, wire):
"""
Adds an appropriate PULL bel to the given site based on decoded fasm
features.
"""
if site.has_feature('PULLTYPE.PULLDOWN'):
bel = Bel('PULLDOWN')
bel.connections['O'] = wire
site.add_bel(bel)
elif site.has_feature('PULLTYPE.KEEPER'):
bel = Bel('KEEPER')
bel.connections['O'] = wire
site.add_bel(bel)
elif site.has_feature('PULLTYPE.PULLUP'):
bel = Bel('PULLUP')
bel.connections['O'] = wire
site.add_bel(bel)
def process_single_ended_iob(top, iob):
"""
Processes a single-ended IOB.
"""
aparts = iob[0].feature.split('.')
tile_name = aparts[0]
iob_site, iologic_tile, ilogic_site, ologic_site, pin_functions = get_iob_site(
top.db, top.grid, aparts[0], aparts[1])
# It seems that this IOB is always configured as an input at least in
# Artix7. So skip it here.
#
# FIXME: This will prevent from correctly decoding a design when that one
# is used in it.
if 'PUDC' in pin_functions:
return
site = Site(iob, iob_site)
intermdisable_used = site.has_feature('INTERMDISABLE.I')
ibufdisable_used = site.has_feature('IBUFDISABLE.I')
# Decode IOSTANDARD parameters
iostd_in, iostd_out = decode_iostandard_params(site)
in_term = decode_in_term(site)
# Buffer direction
is_input = (site.has_feature_with_part("IN")
or site.has_feature_with_part("IN_ONLY")
) and not site.has_feature_with_part("DRIVE")
is_inout = site.has_feature_with_part("IN") and site.has_feature_with_part(
"DRIVE")
is_output = not site.has_feature_with_part("IN") and \
site.has_feature_with_part("DRIVE")
# Sanity check. Can be only one or neither of them
assert (is_input + is_inout + is_output) <= 1, (
tile_name,
is_input,
is_output,
is_inout,
)
top_wire = None
# Input only
if is_input:
# Options are:
# IBUF, IBUF_IBUFDISABLE, IBUF_INTERMDISABLE
if intermdisable_used:
bel = Bel('IBUF_INTERMDISABLE')
site.add_sink(bel, 'INTERMDISABLE', 'INTERMDISABLE')
if ibufdisable_used:
site.add_sink(bel, 'IBUFDISABLE', 'IBUFDISABLE')
else:
bel.connections['IBUFDISABLE'] = 0
elif ibufdisable_used:
bel = Bel('IBUF_IBUFDISABLE')
site.add_sink(bel, 'IBUFDISABLE', 'IBUFDISABLE')
else:
bel = Bel('IBUF')
top_wire = top.add_top_in_port(tile_name, iob_site.name, 'IPAD')
bel.connections['I'] = top_wire
# Note this looks weird, but the BEL pin is O, and the site wire is
# called I, so it is in fact correct.
site.add_source(bel, bel_pin='O', source='I')
append_ibuf_iostandard_params(top, site, bel, iostd_in, in_term)
site.add_bel(bel)
# Tri-state
elif is_inout:
# Options are:
# IOBUF or IOBUF_INTERMDISABLE
if intermdisable_used or ibufdisable_used:
bel = Bel('IOBUF_INTERMDISABLE')
if intermdisable_used:
site.add_sink(bel, 'INTERMDISABLE', 'INTERMDISABLE')
else:
bel.connections['INTERMDISABLE'] = 0
if ibufdisable_used:
site.add_sink(bel, 'IBUFDISABLE', 'IBUFDISABLE')
else:
bel.connections['IBUFDISABLE'] = 0
else:
bel = Bel('IOBUF')
top_wire = top.add_top_inout_port(tile_name, iob_site.name, 'IOPAD')
bel.connections['IO'] = top_wire
# Note this looks weird, but the BEL pin is O, and the site wire is
# called I, so it is in fact correct.
site.add_source(bel, bel_pin='O', source='I')
site.add_sink(bel, 'T', 'T')
# Note this looks weird, but the BEL pin is I, and the site wire is
# called O, so it is in fact correct.
site.add_sink(bel, bel_pin='I', sink='O')
slew = "FAST" if site.has_feature_containing("SLEW.FAST") else "SLOW"
append_obuf_iostandard_params(top, site, bel, iostd_out, slew, in_term)
site.add_bel(bel)
# Output
elif is_output:
# TODO: Could be a OBUFT?
bel = Bel('OBUF')
top_wire = top.add_top_out_port(tile_name, iob_site.name, 'OPAD')
bel.connections['O'] = top_wire
# Note this looks weird, but the BEL pin is I, and the site wire
# is called O, so it is in fact correct.
site.add_sink(bel, bel_pin='I', sink='O')
slew = "FAST" if site.has_feature_containing("SLEW.FAST") else "SLOW"
append_obuf_iostandard_params(top, site, bel, iostd_out, slew, in_term)
site.add_bel(bel)
# Neither
else:
# Naked pull options are not supported
assert site.has_feature('PULLTYPE.PULLDOWN'), tile_name
# Pull
if top_wire is not None:
add_pull_bel(site, top_wire)
top.add_site(site)
def process_differential_iob(top, iob, in_diff, out_diff):
"""
Processes a differential-ended IOB.
"""
assert in_diff or out_diff
aparts = iob['S'][0].feature.split('.')
tile_name = aparts[0]
iob_site_s, iologic_tile, ilogic_site_s, ologic_site_s, _ = get_iob_site(
top.db, top.grid, aparts[0], aparts[1])
aparts = iob['M'][0].feature.split('.')
tile_name = aparts[0]
iob_site_m, iologic_tile, ilogic_site_m, ologic_site_m, _ = get_iob_site(
top.db, top.grid, aparts[0], aparts[1])
site_s = Site(iob['S'], iob_site_s)
site_m = Site(iob['M'], iob_site_m)
site = Site(iob['S'] + iob['M'], tile_name, merged_site=True)
intermdisable_used = site.has_feature('INTERMDISABLE.I')
ibufdisable_used = site.has_feature('IBUFDISABLE.I')
top_wire_n = None
top_wire_p = None
# Decode IOSTANDARD parameters
iostd_in, iostd_out = decode_iostandard_params(site, diff=True)
in_term = decode_in_term(site)
# Differential input
if in_diff and not out_diff:
assert False, (tile_name, "Differential inputs not supported yet!")
# Differential output / inout
elif out_diff:
if in_diff:
top_wire_n = top.add_top_inout_port(tile_name, iob_site_s.name,
'IOPAD_N')
top_wire_p = top.add_top_inout_port(tile_name, iob_site_m.name,
'IOPAD_P')
# Options are:
# IOBUFDS or IOBUFDS_INTERMDISABLE
# TODO: There are also IOBUFDS_DIFF_OUT* and variants with DCI
if intermdisable_used or ibufdisable_used:
bel = Bel('IOBUFDS_INTERMDISABLE')
if intermdisable_used:
site_m.add_sink(bel, 'INTERMDISABLE', 'INTERMDISABLE')
else:
bel.connections['INTERMDISABLE'] = 0
if ibufdisable_used:
site_m.add_sink(bel, 'IBUFDISABLE', 'IBUFDISABLE')
else:
bel.connections['IBUFDISABLE'] = 0
else:
bel = Bel('IOBUFDS')
bel.connections['IOB'] = top_wire_n
bel.connections['IO'] = top_wire_p
# For IOBUFDS add the O pin
site_m.add_source(bel, bel_pin='O', source='I')
else:
top_wire_n = top.add_top_out_port(tile_name, iob_site_s.name,
'OPAD_N')
top_wire_p = top.add_top_out_port(tile_name, iob_site_m.name,
'OPAD_P')
# Since we cannot distinguish between OBUFDS and OBUFTDS we add the
# "T" one. If it is the OBUFDS then the T input will be forced to 0.
bel = Bel('OBUFTDS')
bel.connections['OB'] = top_wire_n
bel.connections['O'] = top_wire_p
# Note this looks weird, but the BEL pin is I, and the site wire
# is called O, so it is in fact correct.
site_m.add_sink(bel, bel_pin='I', sink='O')
site_m.add_sink(bel, bel_pin='T', sink='T')
slew = "FAST" if site.has_feature_containing("SLEW.FAST") else "SLOW"
append_obuf_iostandard_params(top, site_m, bel, iostd_out, slew,
in_term)
site_m.add_bel(bel)
# Pulls
if top_wire_n is not None:
add_pull_bel(site_s, top_wire_n)
if top_wire_p is not None:
add_pull_bel(site_m, top_wire_p)
top.add_site(site_m)
top.add_site(site_s)
def process_iobs(conn, top, tile, features):
site_map = {
'IOB_Y1': 'S',
'IOB_Y0': 'M',
}
iobs = {
'S': [],
'M': [],
}
out_diff = False
in_diff = False
for f in features:
parts = f.feature.split('.')
# Detect differential IO
if parts[-1] == "OUT_DIFF":
out_diff = True
if parts[-1] == "IN_DIFF":
in_diff = True
if not parts[1].startswith('IOB_Y'):
continue
# Map site name to 'M' or 'S'
ms = site_map[parts[1]]
assert ms in iobs, ms
iobs[ms].append(f)
# Differential
if in_diff or out_diff:
process_differential_iob(top, iobs, in_diff, out_diff)
# Single ended
else:
for iob, features in iobs.items():
if len(features) > 0:
process_single_ended_iob(top, features)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Pedro Navarro Perez
# @author: Alessandro Pilotti, Cloudbase Solutions Srl
import sys
from neutron.plugins.hyperv.agent import utils
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
JOB_START = 4096
JOB_COMPLETE = 0
class HyperVUtilsV2(utils.HyperVUtils):
EXTERNAL_PORT = 'Msvm_ExternalEthernetPort'
SWITCH_PORT = 'Msvm_EthernetSwitchPort'
Port_VLAN_SET_DATA = 'Msvm_EthernetSwitchPortVlanSettingData'
LAN_ENDPOINT = 'Msvm_LANEndpoint'
_namespace = '//./root/virtualization/v2'
def __init__(self):
super(HyperVUtilsV2, self).__init__()
self._wmi_conn = None
@property
def _conn(self):
if self._wmi_conn is None:
self._wmi_conn = wmi.WMI(moniker=self._namespace)
return self._wmi_conn
def get_switch_ports(self, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
vswitch_ports = vswitch.associators(
wmi_result_class=self.SWITCH_PORT)
return set(p.Name for p in vswitch_ports)
def get_vnic_ids(self):
return set(
p.ElementName
for p in self._conn.Msvm_SyntheticEthernetPortSettingData()
if not p.ElementName is None )
def connect_vnic_to_vswitch(self, vswitch_name, switch_port_name):
vnic = self._get_vnic_settings(switch_port_name)
vm = self._get_vm_from_res_setting_data(vnic)
vswitch = self._get_vswitch(vswitch_name)
port, found = self._get_switch_port_allocation(switch_port_name, True)
port.HostResource = [vswitch.path_()]
port.Parent = vnic.path_()
if not found:
self._add_virt_resource(vm, port)
else:
self._modify_virt_resource(vm, port)
def _modify_virt_resource(self, vm, res_setting_data):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path,
out_res_setting_data,
ret_val) = vs_man_svc.ModifyResourceSettings(
ResourceSettings=[res_setting_data.GetText_(1)])
self._check_job_status(ret_val, job_path)
def _add_virt_resource(self, vm, res_setting_data):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
out_res_setting_data,
ret_val) = vs_man_svc.AddResourceSettings(vm.path_(), res_xml)
self._check_job_status(ret_val, job_path)
def disconnect_switch_port(
self, vswitch_name, switch_port_name, delete_port):
""" Disconnects the switch port """
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
sw_port, f = self._get_switch_port_allocation(switch_port_name)
if not sw_port:
# Port not found. It happens when the VM was already deleted.
return
sw_port.EnabledState = 3
self._modify_virt_resource(None, sw_port)
if delete_port:
(job, ret_val) = vs_man_svc.RemoveResourceSettings(
ResourceSettings=[sw_port.path_()])
self._check_job_status(ret_val, job)
def _get_vswitch(self, vswitch_name):
vswitch = self._conn.Msvm_VirtualEthernetSwitch(
ElementName=vswitch_name)
if not len(vswitch):
raise utils.HyperVException(msg=_('VSwitch not found: %s') %
vswitch_name)
return vswitch[0]
def _get_vswitch_external_port(self, vswitch):
vswitch_ports = vswitch.associators(
wmi_result_class=self.SWITCH_PORT)
for vswitch_port in vswitch_ports:
lan_endpoints = vswitch_port.associators(
wmi_result_class=self.LAN_ENDPOINT)
if len(lan_endpoints):
lan_endpoints = lan_endpoints[0].associators(
wmi_result_class=self.LAN_ENDPOINT)
if len(lan_endpoints):
ext_port = lan_endpoints[0].associators(
wmi_result_class=self.EXTERNAL_PORT)
if ext_port:
return vswitch_port
def set_vswitch_port_vlan_id(self, vlan_id, switch_port_name):
port_alloc, found = self._get_switch_port_allocation(switch_port_name)
if not found:
raise utils.HyperVException(
msg=_('Port Alloc not found: %s') % switch_port_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vlan_settings = self._get_vlan_setting_data_from_port_alloc(port_alloc)
if vlan_settings:
# removing the feature because it cannot be modified
# due to wmi exception.
(job_path, ret_val) = vs_man_svc.RemoveFeatureSettings(
FeatureSettings=[vlan_settings.path_()])
self._check_job_status(ret_val, job_path)
(vlan_settings, found) = self._get_vlan_setting_data(switch_port_name)
vlan_settings.AccessVlanId = vlan_id
vlan_settings.OperationMode = 1
(job_path, out, ret_val) = vs_man_svc.AddFeatureSettings(
port_alloc.path_(), [vlan_settings.GetText_(1)])
self._check_job_status(ret_val, job_path)
def get_port_by_id(self, port_id, vswitch_name):
vswitch = self._get_vswitch(vswitch_name)
switch_ports = vswitch.associators(
wmi_result_class=self.SWITCH_PORT)
for switch_port in switch_ports:
if (switch_port.ElementName == port_id):
return switch_port
def _check_job_status(self, ret_val, jobpath):
if ret_val == JOB_START:
super(HyperVUtilsV2, self)._check_job_status(
utils.WMI_JOB_STATE_RUNNING, jobpath)
def _get_vlan_setting_data_from_port_alloc(self, port_alloc):
return self._get_first_or_null(port_alloc.associators(
wmi_result_class=self.Port_VLAN_SET_DATA))
def _get_vlan_setting_data(self, sp_name, create=True):
return self._get_setting_data(
self._conn.Msvm_EthernetSwitchPortVlanSettingData, sp_name, create)
def _get_switch_port_allocation(self, el_name, create=False):
return self._get_setting_data(
self._conn.Msvm_EthernetPortAllocationSettingData, el_name, create)
def _get_setting_data(self, class_call, element_name, create=True):
data = self._get_first_or_null(class_call(ElementName=element_name))
found = data is not None
if not found and create:
data = self._get_setting_data_default(class_call)
data.ElementName = element_name
return data, found
def _get_setting_data_default(self, class_call):
return [n for n in class_call()
if n.InstanceID.rfind('Default') > 0] [0]
def _get_first_or_null(self, set_objects):
if set_objects:
return set_objects[0]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-01-24 12:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('payouts', '0019_auto_20190110_1155'),
('payouts', '0020_auto_20190123_1731'),
]
operations = [
]
|
#coding: utf-8
from django.db import models
from django.utils import timezone
class Blog(models.Model):
short_name = models.SlugField(max_length=50, unique=True)
pub_date = models.DateTimeField(default=timezone.now())
blog_title = models.CharField(max_length=100)
blog_content = models.TextField()
click = models.IntegerField(default=0, editable=False)
def __str__(self):
return self.blog_title
class Meta:
ordering = ["-id"] |
#!/usr/bin/env python3
# =====================
# Зависнуть у маркера
# =====================
import numpy as np
import cv2
import cv2.aruco as aruco
from aruco_calibration import Calibration as clb
import argparse
# ARGPARSER
parser = argparse.ArgumentParser()
parser.add_argument('--write', dest='write', action='store_true',
help='if set, video stream is written to a file')
parser.add_argument('--show', dest='show', action='store_true',
help='if set, video stream is displayed in the window')
parser.add_argument('--output', dest='output', action='store_true',
help='if set, ArUco recognition process is output to the terminal')
parser.add_argument('--no-ros', dest='no_ros', action='store_true',
help='if set, video stream is displayed in the window')
args = parser.parse_args()
parser.set_defaults(write=False)
parser.set_defaults(show=False)
parser.set_defaults(output=False)
parser.set_defaults(no_ros=False)
if not args.no_ros:
from drone_api import * # закомментировать для тестирования без ROS
def toFixed(numObj, digits=0):
return f'{numObj:.{digits}f}'
FONT = cv2.FONT_HERSHEY_PLAIN
camera_matrix, dist_coef = clb.loadCoefficients('calibration_save.yaml')
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters_create()
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
if args.write:
import time
time_now = time.gmtime(time.time())
video_file = f'{time.strftime("%Y.%m.%d %H:%M:%S", time.gmtime())}.avi'
while True:
ret, frame = cap.read()
if ret:
image_size = frame.shape[:2]
print(f'Resolution: {image_size[1]}x{image_size[0]}')
break
fps = 25.0
out = cv2.VideoWriter(video_file, cv2.VideoWriter_fourcc(*'MJPG'),
fps, (image_size[1], image_size[0]))
while True:
ret, frame = cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=camera_matrix,
distCoeff=dist_coef)
if args.output:
print('\n\n\n')
if np.all(ids is not None):
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners, 0.048, camera_matrix,
dist_coef)
aruco.drawDetectedMarkers(frame, corners)
aruco.drawAxis(frame, camera_matrix,
dist_coef, rvec[0], tvec[0], 0.06)
cv2.putText(frame, ' id' + str(ids[0])[1:-1], (20, 30), FONT,
1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, ' id' + str(ids[0])[1:-1], (20, 30), FONT,
1, (0, 0, 0), 1, cv2.LINE_AA)
if args.no_ros:
x, y, z, roll, pitch, yaw = (1, 2, 3, 4, 5, 6)
else:
x, y, z, roll, pitch, yaw = Camera_api.marker_cam_pose(rvec[0][0],
tvec[0][0])
marker_pose = [x, y, z, roll, pitch, yaw]
if args.output:
print(toFixed(x, 3), toFixed(y, 3), toFixed(z, 3))
print(toFixed(roll, 3), toFixed(pitch, 3), toFixed(yaw, 3))
cv2.putText(frame, str(toFixed(x, 3)+' ' +
toFixed(y, 3) + ' ' +
toFixed(z, 3) + ' '), (20, 90),
FONT, 1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, str(toFixed(x, 3) + ' ' +
toFixed(y, 3) + ' ' +
toFixed(z, 3) + ' '), (20, 90),
FONT, 1, (0, 0, 0), 1, cv2.LINE_AA)
cv2.putText(frame, str(toFixed(roll, 3)+' ' +
toFixed(pitch, 3) + ' ' +
toFixed(yaw, 3)), (20, 120),
FONT, 1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, str(toFixed(roll, 3) + ' ' +
toFixed(pitch, 3) + ' ' +
toFixed(yaw, 3)), (20, 120),
FONT, 1, (0, 0, 0), 1, cv2.LINE_AA)
else:
if args.output:
print('NOT FOUND')
cv2.putText(frame, 'NOT FOUND', (20, 30), FONT,
1, (255, 255, 255), 3, cv2.LINE_AA)
cv2.putText(frame, 'NOT FOUND', (20, 30), FONT,
1, (0, 0, 0), 1, cv2.LINE_AA)
if args.write:
out.write(frame)
# На headless-дистрибутивах крашится
if args.show:
cv2.imshow('frame', frame)
cv2.waitKey(1)
cap.release()
if args.write:
out.release()
cv2.destroyAllWindows()
|
import os
import testCoverage as coverage
CSS_CONTENTS = \
r"""
body {font-family: "Arial", san-serif;}
h1 {font-family: "Tahoma","Arial", sans-serif;
color: #333333;}
h3 {display: inline;}
h3.passed {text-decoration: none; display: inline;
color: black; background-color: lime; padding: 2px;}
a.passed:link {color: black; text-decoration: none;}
a.passed:visited {color: black; text-decoration: none;}
a.passed:hover {color: #ee00ee; text-decoration: underline;}
a.passed-slowly:link {color: black; text-decoration: none;}
a.passed-slowly:visited {color: black; text-decoration: none;}
a.passed-slowly:hover {color: #ee00ee; text-decoration: underline;}
h3.failed {text-decoration: none; display: inline;
color: yellow; background-color: red; padding: 2px;}
a.failed:link {color: yellow; text-decoration: none;}
a.failed:visited {color: yellow; text-decoration: none;}
a.failed:hover {color: #00ffff; text-decoration: underline;}
a.compfailed:link {color: yellow; text-decoration: none;}
a.compfailed:visited {color: yellow; text-decoration: none;}
a.compfailed:hover {color: #00ffff; text-decoration: underline;}
a.crashed:link {color: yellow; text-decoration: none;}
a.crashed:visited {color: yellow; text-decoration: none;}
a.crashed:hover {color: #00ffff; text-decoration: underline;}
h3.benchmade {text-decoration: none; display: inline;
color: black; background-color: orange; padding: 2px;}
a.benchmade:link {color: black; text-decoration: none;}
a.benchmade:visited {color: black; text-decoration: none;}
a.benchmade:hover {color: #00ffff; text-decoration: underline;}
span.nobreak {white-space: nowrap;}
span.mild-success {color: green;}
span.mild-failure {color: red;}
a.main:link {color: yellow; text-decoration: none;}
a.main:visited {color: yellow; text-decoration: none;}
a.main:hover {color: #00ffff; text-decoration: underline;}
td {border-width: 0px;
padding: 5px;
background-color: white;
vertical-align: middle;}
td.passed {background-color: lime; opacity: 0.8;}
td.passed-slowly {background-color: yellow; opacity: 0.8;}
td.failed {background-color: red; color: yellow; opacity: 0.8;}
td.compfailed {background-color: purple; color: yellow; opacity: 0.8;}
td.crashed {background-color: black; color: yellow; opacity: 0.8;}
td.benchmade {background-color: orange; opacity: 0.8;}
td.date {background-color: #666666; color: white; opacity: 0.8; font-weight: bold;}
.maintable tr:hover {background-color: blue;}
table {border-collapse: separate;
border-spacing: 2px;
margin-left: auto;
margin-right: auto;
border-width: 1px;
border-color: gray;
border-style: solid;
box-shadow: 10px 10px 5px #888888;}
table.head {border-collapse: separate;
border-spacing: 0px;
margin-left: auto;
margin-right: auto;
border-width: 0px;
border-style: solid;
box-shadow: none;}
/* http://blog.petermares.com/2010/10/27/vertical-text-in-html-table-headers-for-webkitmozilla-browsers-without-using-images/ */
div.verticaltext {text-align: center;
vertical-align: middle;
width: 20px;
margin: 0px;
padding: 0px;
padding-left: 3px;
padding-right: 3px;
padding-top: 10px;
white-space: nowrap;
-webkit-transform: rotate(-90deg);
-moz-transform: rotate(-90deg);}
#summary th {background-color: grey;
color: yellow;
text-align: center;
height: 2em;
padding-bottom: 3px;
padding-left: 5px;
padding-right: 5px;}
#summary td {background: transparent;}
#summary tr:nth-child(even) {background: #dddddd;}
#summary tr:nth-child(odd) {background: #eeeeee;}
#summary tr.special {background: #ccccff;}
#summary td.highlight {color: red;}
#summary td.passed {background-color: lime; }
#summary td.passed-slowly {background-color: yellow; }
#summary td.failed {background-color: red; color: yellow;}
#summary td.benchmade {background-color: orange;}
#summary td.compfailed {background-color: purple; color: yellow;}
#summary td.crashed {background-color: black; color: yellow;}
div.small {font-size: 75%;}
th {background-color: grey;
color: yellow;
text-align: center;
vertical-align: bottom;
height: @TABLEHEIGHT@;
padding-bottom: 3px;
padding-left: 5px;
padding-right: 5px;}
li {padding-top: 0.5em;}
ul li {color: blue;
font-weight: bold;}
ul li ul li {color: black;
font-weight: normal;}
ul li h3 {border: 1px solid black;}
#compare td {font-family: "Lucida Console", Monaco, monospace;
font-size: 80%;}
#box { width: 900px;
margin: 0 auto;
padding: 1em;
background: #ffffff;
}
.alignright {
text-align: right;
}
"""
HTML_HEADER = \
r"""
<HTML>
<HEAD>
<TITLE>@TESTDIR@ / @TESTNAME@</TITLE>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=ISO-8859-1">
<LINK REL="stylesheet" TYPE="text/css" HREF="tests.css">
</HEAD>
<BODY>
<div id="box">
"""
MAIN_HEADER = \
r"""
<HTML>
<HEAD>
<TITLE>@TITLE@</TITLE>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=ISO-8859-1">
<LINK REL="stylesheet" TYPE="text/css" HREF="tests.css">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.5.0/css/font-awesome.min.css">
</HEAD>
<BODY>
<!--GOUPLINK-->
<CENTER><H1>@TITLE@</H1></CENTER>
<CENTER><H2>@SUBTITLE@</H2></CENTER>
<P><TABLE class='maintable'>
<CENTER>
<td align=center class="benchmade"><h3>Benchmark Updated</h3></td>
<td align=center class="failed"><h3>Comparison Failed</h3></td>
<td align=center class="compfailed"><h3>Compilation Failed</h3></td>
<td align=center class="crashed"><h3>Crashed</h3></td>
<td align=center class="passed"><h3>Passed</h3></td>
<td align=center class="passed-slowly"><h3>Performance Drop</h3></td>
</CENTER>
</TABLE>
"""
def create_css(table_height=16):
""" write the css file for the webpages """
css = CSS_CONTENTS.replace("@TABLEHEIGHT@", "{}em".format(table_height))
with open("tests.css", 'w') as cf:
cf.write(css)
class HTMLList(object):
""" a simple class for managing nested HTML lists """
def __init__(self, of=None):
# items will hold tuples: (indent, string), where indent
# specifies how deeply nested we are
self.list_items = []
self.current_indent = 0
self.of = of
def item(self, content):
# add an item to the list
self.list_items.append((self.current_indent, content))
def indent(self):
# indent (nest a new list)
self.current_indent += 1
def outdent(self):
# close the current nest level
self.current_indent -= 1
def write_list(self):
# output the list to the outfile, of, specified at creation
self.of.write("<ul>\n")
current_indent = -1
for i, c in self.list_items:
if current_indent == -1:
current_indent = i
else:
if i < current_indent:
self.of.write("</li></ul></li>\n")
elif i > current_indent:
self.of.write("<ul>\n")
else:
self.of.write("</li>\n")
current_indent = i
self.of.write("<li>{}\n".format(c))
# finish current item
self.of.write("</li>")
# finish nesting
for n in range(0, current_indent):
self.of.write("</ul></li>\n")
self.of.write("</ul>\n")
class HTMLTable(object):
""" a simple class for creating an HTML table """
def __init__(self, out_file, columns=1, divs=None):
""" create the table object. Here divs is the name of
any HTML div(s) we want to wrap the table with """
self.hf = out_file
self.columns = columns
if not divs is None:
self.divs = list(divs)
else:
self.divs = None
def start_table(self):
if not self.divs is None:
for d in self.divs:
self.hf.write("<div id='{}'>\n".format(d))
self.hf.write("<p><table>\n")
def header(self, header_list):
""" write the table header """
n = len(header_list)
line = "<tr>"+n*"<th>{}</th>"+"</tr>\n"
self.hf.write(line.format(*header_list))
def print_single_row(self, row):
self.hf.write("<tr class='special'><td colspan={}>".format(self.columns)+row+"</td></tr>\n")
def print_row(self, row_list, highlight=False):
""" row_list are the individual table elements. Note that if
a list item is a tuple, then the first element is assumed to
be the cell data and the second element is an html tag that
goes in the <td >, e.g. to set the class or colspan"""
n = len(row_list)
if highlight:
line = "<tr>"+n*"<td class='highlight'>{}</td>"+"</tr>\n"
else:
line = "<tr>"
for d in row_list:
if isinstance(d, tuple):
line += "<td {}>{}</td>".format(d[1], d[0])
else:
line += "<td>{}</td>".format(d)
line += "</tr>\n"
self.hf.write(line.format(*row_list))
def end_table(self):
self.hf.write("</table>\n")
if not self.divs is None:
for n in range(len(self.divs)):
self.hf.write("</div>\n")
def get_particle_compare_command(diff_lines):
for line in diff_lines:
if line.find('particle_compare') > 0:
return line
def report_single_test(suite, test, tests, failure_msg=None):
""" generate a single problem's test result page. If
failure_msg is set to a string, then it is assumed
that the test did not complete. The string will
be reported on the test page as the error. """
# for navigation
tnames = [t.name for t in tests]
current_index = tnames.index(test.name)
if not failure_msg is None:
suite.log.testfail("aborting test")
suite.log.testfail(failure_msg)
current_dir = os.getcwd()
os.chdir(suite.full_web_dir)
# we stored compilation success in the test object
compile_successful = test.compile_successful
analysis_successful = True
if (test.analysisRoutine != ''):
analysis_successful = test.analysis_successful
# we store comparison success in the test object but also read
# in the comparison report for displaying
if failure_msg is None:
if not test.compileTest:
compare_successful = test.compare_successful
if test.doComparison:
compare_file = test.comparison_outfile
try: cf = open(compare_file, 'r')
except IOError:
suite.log.warn("WARNING: no comparison file found")
diff_lines = ['']
else:
diff_lines = cf.readlines()
cf.close()
# last check: did we produce any backtrace files?
if test.crashed: compare_successful = False
# write out the status file for this problem, with either
# PASSED, PASSED SLOWLY, COMPILE FAILED, or FAILED
status_file = "{}.status".format(test.name)
with open(status_file, 'w') as sf:
if (compile_successful and
(test.compileTest or ((not test.compileTest) and
compare_successful and
analysis_successful))):
string = "PASSED\n"
if test.check_performance:
meets_threshold, _, _ = test.measure_performance()
if not (meets_threshold is None or meets_threshold):
string = "PASSED SLOWLY\n"
sf.write(string)
suite.log.success("{} PASSED".format(test.name))
elif not compile_successful:
sf.write("COMPILE FAILED\n")
suite.log.testfail("{} COMPILE FAILED".format(test.name))
elif test.crashed:
sf.write("CRASHED\n")
if len(test.backtrace) > 0:
for btf in test.backtrace:
suite.log.warn("+++ Next backtrace: {} +++".format(btf))
suite.log.warn(open(btf, 'r').read())
suite.log.warn("+++ End of backtrace: {} +++\n".format(btf))
suite.log.testfail("{} CRASHED (backtraces produced)".format(test.name))
else:
suite.log.testfail(f"{test.name} CRASHED (script failed)")
else:
sf.write("FAILED\n")
suite.log.testfail("{} FAILED".format(test.name))
else:
# we came in already admitting we failed...
if not test.compile_successful:
msg = "COMPILE FAILED"
else:
msg = "FAILED"
status_file = "{}.status".format(test.name)
with open(status_file, 'w') as sf:
sf.write("{}\n".format(msg))
suite.log.testfail("{} {}".format(test.name, msg))
#--------------------------------------------------------------------------
# generate the HTML page for this test
#--------------------------------------------------------------------------
# write the css file
create_css()
html_file = "{}.html".format(test.name)
hf = open(html_file, 'w')
new_head = HTML_HEADER
# arrows for previous and next test
new_head += r"""<table style="width: 100%" class="head"><br><tr>"""
if current_index > 0:
new_head += r"""<td><< <a href="{}.html">previous test</td>""".format(tests[current_index-1].name)
else:
new_head += r"""<td> </td>"""
if current_index < len(tests)-1:
new_head += r"""<td class="alignright"><a href="{}.html">next test >></td>""".format(tests[current_index+1].name)
else:
new_head += r"""<td> </td>"""
new_head += r"</tr></table>" + "\n"
new_head += r"""<center><h1><a href="index.html">@TESTDIR@</a> / @TESTNAME@</h1></center>"""
new_head = new_head.replace("@TESTDIR@", os.path.normpath(suite.test_dir))
new_head = new_head.replace("@TESTNAME@", test.name)
hf.write(new_head)
ll = HTMLList(of=hf)
if not failure_msg is None:
ll.item("Test error: ")
ll.indent()
ll.item("<h3 class=\"failed\">Failed</h3>")
ll.item("{}".format(failure_msg))
ll.outdent()
# build summary
ll.item("Build/Test information:")
ll.indent()
ll.item("Build directory: {}".format(test.buildDir))
if not test.extra_build_dir == "":
ll.indent()
ll.item("in {}".format(suite.repos[test.extra_build_dir].dir))
ll.outdent()
if not test.compileTest:
if test.debug:
ll.item("Debug test")
if test.acc:
ll.item("OpenACC test")
if test.useMPI or test.useOMP:
ll.item("Parallel run")
ll.indent()
if test.useMPI:
ll.item("MPI numprocs = {}".format(test.numprocs))
if test.useOMP:
ll.item("OpenMP numthreads = {}".format(test.numthreads))
ll.outdent()
if test.restartTest:
ll.item("Restart test")
ll.indent()
ll.item("Job was run as normal and then restarted from checkpoint # {}, and the two final outputs were compared".format(test.restartFileNum))
ll.outdent()
ll.item("Files:")
ll.indent()
if test.inputFile:
ll.item("input file: <a href=\"{}.{}\">{}</a>".format(test.name, test.inputFile, test.inputFile))
if suite.sourceTree == "C_Src" and test.probinFile != "":
ll.item("probin file: <a href=\"{}.{}\">{}</a>".format(test.name, test.probinFile, test.probinFile))
for i, afile in enumerate(test.auxFiles):
# sometimes the auxFile was in a subdirectory under the
# build directory.
root_file = os.path.basename(afile)
ll.item("auxillary file {}: <a href=\"{}.{}\">{}</a>".format(i+1, test.name, root_file, afile))
ll.outdent()
ll.item("Dimensionality: {}".format(test.dim))
ll.outdent() # end of build information
# compilation summary
ll.item("Compilation:")
ll.indent()
if compile_successful:
ll.item("<h3 class=\"passed\">Successful</h3>")
else:
ll.item("<h3 class=\"failed\">Failed</h3>")
ll.item("Compilation time: {:.3f} s".format(test.build_time))
ll.item("Compilation command:<br><tt>{}</tt>".format(test.comp_string))
ll.item("<a href=\"{}.make.out\">make output</a>".format(test.name))
ll.outdent()
if not test.compileTest:
# execution summary
ll.item("Execution:")
ll.indent()
ll.item("Execution time: {:.3f} s".format(test.wall_time))
if test.check_performance:
meets_threshold, percentage, compare_str = test.measure_performance()
if meets_threshold is not None:
if meets_threshold: style = "mild-success"
else: style = "mild-failure"
ll.item("{} run average: {:.3f} s".format(test.runs_to_average, test.past_average))
ll.item("Relative performance: <span class=\"{}\">{:.1f}% {}</span>".format(
style, percentage, compare_str))
ll.item("Execution command:<br><tt>{}</tt>".format(test.run_command))
ll.item("<a href=\"{}.run.out\">execution output</a>".format(test.name))
if test.has_stderr:
ll.item("<a href=\"{}.err.out\">execution stderr</a>".format(test.name))
if test.has_jobinfo:
ll.item("<a href=\"{}.job_info\">job_info</a>".format(test.name))
ll.outdent()
# were there backtrace files?
if test.crashed:
ll.item("Backtraces:")
ll.indent()
for bt in test.backtrace:
ll.item("<a href=\"{}\">{}</a>".format(bt, bt))
ll.outdent()
# comparison summary
if failure_msg is None:
ll.item("Comparison: ")
ll.indent()
if compare_successful:
ll.item("<h3 class=\"passed\">Successful</h3>")
else:
ll.item("<h3 class=\"failed\">Failed</h3>")
ll.outdent()
if test.analysisRoutine != "":
ll.item("Analysis: ")
ll.indent()
if test.analysis_successful:
ll.item("<h3 class=\"passed\">Successful</h3>")
else:
ll.item("<h3 class=\"failed\">Failed</h3>")
ll.item("<a href=\"{}.analysis.out\">execution output</a>".format(test.name))
ll.outdent()
ll.write_list()
if (not test.compileTest) and test.doComparison and failure_msg is None:
# parse the compare output and make an HTML table
ht = HTMLTable(hf, columns=3, divs=["summary", "compare"])
in_diff_region = False
box_error = False
grid_error = False
variables_error = False
no_bench_error = False
pcomp_line = get_particle_compare_command(diff_lines)
for line in diff_lines:
if "number of boxes do not match" in line:
box_error = True
break
if "grids do not match" in line:
grid_error = True
break
if "number of variables do not match" in line:
variables_error = True
if "no corresponding benchmark found" in line:
no_bench_error = True
break
if not in_diff_region:
if line.find("fcompare") > 1:
hf.write("<tt>"+line+"</tt>\n")
if pcomp_line:
hf.write("<tt>"+pcomp_line+"</tt>\n")
ht.start_table()
continue
if line.strip().startswith("diff "):
# this catches the start of a plain text diff --
# we need the space here to not match variables
# that start with diff
ht.end_table()
hf.write("<pre>\n")
hf.write(line)
in_diff_region = True
continue
if line.strip().startswith("level "):
ht.print_single_row(line.strip())
continue
if line.strip().startswith("-----"):
continue
if line.strip().startswith("<<<"):
ht.print_single_row(
line.strip().replace('<', '<').replace('>', '>'))
continue
fields = [q.strip() for q in line.split(" ") if not q == ""]
if fields[0].startswith("variable"):
ht.header(fields)
continue
if len(fields) == 2:
if "NaN present" in line:
ht.print_row([fields[0], (fields[1], "colspan='2'")])
continue
elif "variable not present" in line:
ht.print_row([fields[0], (fields[1], "colspan='2'")])
continue
else:
ht.header([" "] + fields)
continue
if len(fields) == 1:
continue
else:
abs_err = float(fields[1])
rel_err = float(fields[2])
if abs(rel_err) > 1.e-6:
ht.print_row([fields[0], abs_err, rel_err], highlight=True)
else:
ht.print_row([fields[0], abs_err, rel_err])
else:
# diff region
hf.write(line)
if in_diff_region:
hf.write("</pre>\n")
else:
ht.end_table()
if box_error:
hf.write("<p>number of boxes do not match</p>\n")
if grid_error:
hf.write("<p>grids do not match</p>\n")
if no_bench_error:
hf.write("<p>no corresponding benchmark found</p>\n")
if variables_error:
hf.write("<p>variables differ in files</p>\n")
if (not test.compileTest) and failure_msg is None:
# show any visualizations
if test.doVis:
if not test.png_file is None:
hf.write("<P> \n")
hf.write("<P><IMG SRC='{}' BORDER=0>".format(test.png_file))
# show any analysis
if not test.analysisOutputImage == "":
hf.write("<P> \n")
hf.write("<P><IMG SRC='%s' BORDER=0>" % (test.analysisOutputImage) )
# close
hf.write("</div></body>\n")
hf.write("</html>\n")
hf.close()
# switch back to the original directory
os.chdir(current_dir)
def report_this_test_run(suite, make_benchmarks, note, update_time,
test_list, test_file):
""" generate the master page for a single run of the test suite """
# get the current directory
current_dir = os.getcwd()
# switch to the web directory and open the report file
os.chdir(suite.full_web_dir)
try:
build_time = sum([q.build_time for q in test_list])
except:
build_time = -1
try:
wall_time = sum([q.wall_time for q in test_list])
except:
wall_time = -1
# keep track of the number of tests that passed and the number that failed
num_failed = 0
num_passed = 0
#--------------------------------------------------------------------------
# generate the HTML page for this run of the test suite
#--------------------------------------------------------------------------
# always create the css (in case it changes)
create_css()
# create the master web page
hf = open("index.html", 'w')
new_head = HTML_HEADER + r"""<CENTER><H1><A HREF="../">@TESTDIR@</A> / @TESTNAME@</H1></CENTER>"""
new_head = new_head.replace("@TESTDIR@", suite.suiteName)
new_head = new_head.replace("@TESTNAME@", suite.test_dir)
hf.write(new_head)
if not note == "":
hf.write("<p><b>Test run note:</b><br><font color=\"gray\">%s</font>\n" % (note) )
if not make_benchmarks is None:
hf.write("<p><b>Benchmarks updated</b><br>comment: <font color=\"gray\">{}</font>\n".format(make_benchmarks) )
hf.write("<p><b>test input parameter file:</b> <A HREF=\"%s\">%s</A>\n" %
(test_file, test_file) )
if build_time > 0:
hf.write("<p><b>combined build time for all tests:</b> {} s\n".format(build_time))
if wall_time > 0:
hf.write("<p><b>wall clock time for all tests:</b> {} s\n".format(wall_time))
# git info lists
any_update = any([suite.repos[t].update for t in suite.repos])
if any_update and not update_time == "":
hf.write("<p><b>Git update was done at: </b>%s\n" % (update_time) )
hf.write("<ul>\n")
code_str = "<li><b>{}</b><ul>" + \
"<li><b>branch:</b> {}; <b>hash:</b> {}</li>" + \
"<li><b>changelog:</b> <a href=\"{}\">{}</a></li></ul></li>"
for k, r in suite.repos.items():
if r.update:
hf.write(code_str.format(r.name, r.branch_wanted, r.hash_current,
"ChangeLog.{}".format(r.name),
"ChangeLog.{}".format(r.name)))
hf.write("</ul>")
else:
hf.write("<p>No git update done\n")
hf.write("<p> \n")
# summary table
if make_benchmarks is None:
special_cols = []
if suite.summary_job_info_field1 != "":
special_cols.append(suite.summary_job_info_field1)
if suite.summary_job_info_field2 != "":
special_cols.append(suite.summary_job_info_field2)
if suite.summary_job_info_field3 != "":
special_cols.append(suite.summary_job_info_field3)
cols = ["test name", "dim", "compare plotfile",
"# levels", "MPI procs", "OMP threads", "OpenACC", "debug",
"compile", "restart"] + special_cols + ["build time", "wall time", "result"]
ht = HTMLTable(hf, columns=len(cols), divs=["summary"])
ht.start_table()
ht.header(cols)
else:
ht = HTMLTable(hf, columns=3, divs=["summary"])
ht.start_table()
ht.header(["test name", "result", "comment"])
# loop over the tests and add a line for each
for test in test_list:
if make_benchmarks is None:
# check if it passed or failed
status_file = "%s.status" % (test.name)
status = None
with open(status_file, 'r') as sf:
for line in sf:
if line.find("PASSED") >= 0:
status = "passed"
td_class = "passed-slowly" if "SLOWLY" in line else "passed"
num_passed += 1
elif line.find("COMPILE FAILED") >= 0:
status = "compile fail"
td_class = "compfailed"
num_failed += 1
elif line.find("CRASHED") >= 0:
status = "crashed"
td_class = "crashed"
num_failed += 1
elif line.find("FAILED") >= 0:
status = "failed"
td_class = "failed"
num_failed += 1
if status is not None:
break
row_info = []
row_info.append("<a href=\"{}.html\">{}</a>".format(test.name, test.name))
row_info.append(test.dim)
row_info.append("<div class='small'>{}</div>".format(test.compare_file_used))
if not test.nlevels is None:
row_info.append(test.nlevels)
else:
row_info.append("")
if test.useMPI:
row_info.append("✓ ({})".format(test.numprocs))
else:
row_info.append("")
# OMP ?
if test.useOMP:
row_info.append("✓ ({})".format(test.numthreads))
else:
row_info.append("")
# OpenACC ?
if test.acc:
row_info.append("✓")
else:
row_info.append("")
# debug ?
if test.debug:
row_info.append("✓")
else:
row_info.append("")
# compile ?
if test.compileTest:
row_info.append("✓")
else:
row_info.append("")
# restart ?
if test.restartTest:
row_info.append("✓")
else:
row_info.append("")
# special columns
if suite.summary_job_info_field1 != "":
row_info.append("<div class='small'>{}</div>".format(
test.job_info_field1))
if suite.summary_job_info_field2 != "":
row_info.append("<div class='small'>{}</div>".format(
test.job_info_field2))
if suite.summary_job_info_field3 != "":
row_info.append("<div class='small'>{}</div>".format(
test.job_info_field3))
# build time
row_info.append("{:.3f} s".format(test.build_time))
# wallclock time
row_info.append("{:.3f} s".format(test.wall_time))
# result
row_info.append((status.upper(), "class='{}'".format(td_class)))
ht.print_row(row_info)
else:
if test.restartTest: continue
if test.compileTest: continue
if test.selfTest: continue
# the benchmark was updated -- find the name of the new benchmark file
benchStatusFile = "%s.status" % (test.name)
bench_file = "none"
with open(benchStatusFile, 'r') as bf:
for line in bf:
index = line.find("file:")
if index >= 0:
bench_file = line[index+5:]
break
row_info = []
row_info.append("{}".format(test.name))
if not bench_file == "none":
row_info.append(("BENCHMARK UPDATED", "class='benchmade'"))
row_info.append("new benchmark file is {}".format(bench_file))
else:
row_info.append(("BENCHMARK NOT UPDATED", "class='failed'"))
row_info.append("compilation or execution failed")
ht.print_row(row_info)
ht.end_table()
# Test coverage
if suite.reportCoverage: report_coverage(hf, suite)
# close
hf.write("</div></body>\n")
hf.write("</html>\n")
hf.close()
#--------------------------------------------------------------------------
# write out a status file for all the tests
#--------------------------------------------------------------------------
status_file = os.path.normpath(suite.test_dir) + ".status"
with open(status_file, 'w') as sf:
if make_benchmarks is None:
if num_failed == 0:
sf.write("ALL PASSED\n")
elif num_failed > 0 and num_passed > 0:
sf.write("SOME FAILED\n")
else:
sf.write("ALL FAILED\n")
else:
sf.write("BENCHMARKS UPDATED\n")
# switch back to the original directory
os.chdir(current_dir)
return num_failed
def report_coverage(html_file, suite):
vars = (suite.covered_frac, suite.total, suite.covered_nonspecific_frac, suite.total_nonspecific)
if not all(vars): return
cols = ["coverage type", "coverage %", "# covered", "# uncovered"]
ht = HTMLTable(html_file, len(cols), divs=["summary"])
ht.start_table()
ht.header(cols)
# Overall coverage
row_info = []
row_info.append("<a href=\"{}\">{}</a>".format(coverage.SPEC_FILE, "overall"))
row_info.append("{:.2f}%".format(100 * suite.covered_frac))
covered = int(round(suite.total * suite.covered_frac))
uncovered = suite.total - covered
row_info.append("{}".format(covered))
row_info.append("{}".format(uncovered))
ht.print_row(row_info)
# Nonspecific-only coverage
row_info = []
row_info.append("<a href=\"{}\">{}</a>".format(coverage.NONSPEC_FILE, "nonspecific only"))
row_info.append("{:.2f}%".format(100 * suite.covered_nonspecific_frac))
covered = int(round(suite.total_nonspecific * suite.covered_nonspecific_frac))
uncovered = suite.total_nonspecific - covered
row_info.append("{}".format(covered))
row_info.append("{}".format(uncovered))
ht.print_row(row_info)
ht.end_table()
def report_all_runs(suite, active_test_list):
table_height = min(max(suite.lenTestName, 4), 18)
os.chdir(suite.webTopDir)
create_css(table_height=table_height)
valid_dirs, all_tests = suite.get_run_history(active_test_list)
if suite.do_timings_plots: suite.make_timing_plots(valid_dirs=valid_dirs, all_tests=all_tests)
#--------------------------------------------------------------------------
# generate the HTML
#--------------------------------------------------------------------------
title = "%s regression tests" % (suite.suiteName)
hf = open("index.html", "w")
header = MAIN_HEADER.replace("@TITLE@", title).replace("@SUBTITLE@", suite.sub_title)
if suite.goUpLink:
header2 = header.replace("<!--GOUPLINK-->", '<a href="../">GO UP</a>')
hf.write(header2)
else:
hf.write(header)
hf.write("<P><TABLE class='maintable'>\n")
# write out the header
hf.write("<TR><TH ALIGN=CENTER>date</TH>\n")
for test in all_tests:
hf.write("<TH><div class='verticaltext'>%s</div></TH>\n" % (test))
hf.write("</TR>\n")
if suite.do_timings_plots:
hf.write("<tr><td class='date'>plots</td>")
for t in all_tests:
plot_file = "{}-timings.{}".format(t, suite.plot_ext)
if os.path.isfile(plot_file):
hf.write("<TD ALIGN=CENTER title=\"{} timings plot\"><H3><a href=\"{}\"><i class=\"fa fa-line-chart\"></i></a></H3></TD>\n".format(t, plot_file))
else:
hf.write("<TD ALIGN=CENTER><H3> </H3></TD>\n")
hf.write("</TR>\n")
# loop over all the test runs
for tdir in valid_dirs:
# first look to see if there are any valid tests at all --
# otherwise we don't do anything for this date
valid = 0
for test in all_tests:
status_file = "{}/{}/{}.status".format(suite.webTopDir, tdir, test)
if os.path.isfile(status_file):
valid = 1
break
if not valid: continue
# did we run on a non-default branch?
try: bf = open("{}/{}/branch.status".format(suite.webTopDir, tdir), "r")
except:
branch_mark = ""
else:
branch_mark = r"∗"
bf.close()
# write out the directory (date)
hf.write("<TR><TD class='date'><SPAN CLASS='nobreak'><A class='main' HREF=\"{}/index.html\">{} </A>{}</SPAN></TD>\n".format(tdir, tdir, branch_mark) )
for test in all_tests:
# look to see if the current test was part of this suite run
status_file = "{}/{}/{}.status".format(suite.webTopDir, tdir, test)
status = None
if os.path.isfile(status_file):
with open(status_file, 'r') as sf:
for line in sf:
if line.find("PASSED") >= 0:
if "SLOWLY" not in line: status, emoji = "passed", ":)"
else: status, emoji = "passed-slowly", ":]"
elif line.find("COMPILE FAILED") >= 0:
status = "compfailed"
emoji = ":("
elif line.find("CRASHED") >= 0:
status = "crashed"
emoji = "xx"
elif line.find("FAILED") >= 0:
status = "failed"
emoji = "! "
elif line.find("benchmarks updated") >= 0:
status = "benchmade"
emoji = "U"
if status is not None:
break
# write out this test's status
if status is None:
hf.write("<td> </td>\n")
elif status == "benchmade":
hf.write("<td align=center title=\"{}\" class=\"{}\"><h3>U</h3></td>\n".format(
test, status))
else:
hf.write("<td align=center title=\"{}\" class=\"{}\"><h3><a href=\"{}/{}.html\" class=\"{}\">{}</a></h3></td>\n".format(
test, status, tdir, test, status, emoji))
hf.write("</TR>\n\n")
hf.write("</TABLE>\n")
# close
hf.write("</BODY>\n")
hf.write("</HTML>\n")
hf.close()
|
from model.game_object import GameObject
from model.missile import AbsMissile
from utils.geometry import Position
from visitor import Visitor
from abc import abstractmethod
from config import ENEMY_A_IMG
class AbstEnemy(GameObject):
def __init__(self, position: Position, icon_path: str, health: int, score_points: int):
super().__init__(position, icon_path)
self.health = health
self.orig_health = health
self.score_points = score_points
def accept_visitor(self, visitor: Visitor):
visitor.visit_enemy(self)
def is_dead(self):
return self.health <= 0
def get_health_color(self) -> str:
ratio = self.health / self.orig_health * 100
if 100.0 >= ratio > 66.7:
return "black"
if 66.7 >= ratio > 33.4:
return "orange"
if 33.4 >= ratio > 0:
return "red"
@abstractmethod
def get_hit_by(self, missile: AbsMissile):
pass
class EnemyA(AbstEnemy):
def __init__(self, position: Position, health: int, score_points: int):
super().__init__(position, ENEMY_A_IMG, health, score_points)
def get_hit_by(self, missile: AbsMissile):
self.health -= missile.damage
|
# KidsCanCode - Game Development with Pygame video series
# Shmup game - part 1
# Video link: https://www.youtube.com/watch?v=nGufy7weyGY
# Player sprite and movement
import pygame as pg
from pygame.sprite import Sprite
import random
from os import path
from pathlib import Path
img_folder = Path("img")
bg_img = img_folder / "bg.png"
print(bg_img)
game_dir = path.join(path.dirname(__file__))
print(game_dir)
# global variables
WIDTH = 480
HEIGHT = 600
FPS = 60
POWERUP_TIME = 5000
# define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
# initialize pygame and create window
pg.init()
pg.mixer.init()
screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption("Shmup!")
clock = pg.time.Clock()
# Load all game graphics
background = pg.image.load(path.join(game_dir + "/img/bg.png")).convert()
background_rect = background.get_rect()
background_rect2 = background.get_rect()
player_img = pg.image.load(path.join(game_dir + "/img/player.png")).convert()
mob_img = pg.image.load(path.join(game_dir + "/img/mob.png")).convert()
lazer_img = pg.image.load(path.join(game_dir + "/img/lazer.png")).convert()
spit_img = pg.image.load(path.join(game_dir + "/img/spit.png")).convert()
powerup_images = {}
powerup_images['shield'] = pg.image.load(path.join(game_dir + "/img/power.png")).convert()
powerup_images['gun'] = pg.image.load(path.join(game_dir + "/img/power.png")).convert()
player_mini_img = pg.transform.scale(player_img, (25, 19))
player_mini_img.set_colorkey(GREEN)
print(player_mini_img)
font_name = pg.font.match_font('arial')
def draw_text(surf, text, size, x, y):
font = pg.font.Font(font_name, size)
text_surface = font.render(text, True, WHITE)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
def draw__health(surf, x, y, w):
outline_rect = pg.Rect(x, y, 100, 20)
fill_rect = pg.Rect(x, y, w, 20)
pg.draw.rect(surf, RED, fill_rect)
pg.draw.rect(surf, WHITE, outline_rect, 2)
def draw_shield_bar(surf, x, y, pct):
if pct < 0:
pct = 0
BAR_LENGTH = 100
BAR_HEIGHT = 10
fill = (pct / 100) * BAR_LENGTH
outline_rect = pg.Rect(x, y, BAR_LENGTH, BAR_HEIGHT)
fill_rect = pg.Rect(x, y, fill, BAR_HEIGHT)
pg.draw.rect(surf, GREEN, fill_rect)
pg.draw.rect(surf, WHITE, outline_rect, 2)
def draw_lives(surf, x, y, lives, img):
for i in range(lives):
img_rect = img.get_rect()
img_rect.x = x - 30 * i
img_rect.y = y
surf.blit(img, img_rect)
class Player(Sprite):
def __init__(self):
Sprite.__init__(self)
# self.image = pg.Surface((50,40))
self.image = pg.transform.scale(player_img, (50, 40))
self.image.set_colorkey(GREEN)
# self.image = player_img
# self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.centerx = WIDTH / 2
self.rect.bottom = HEIGHT -10
self.speedx = 0
self.speedy = 10
self.power = 1
self.shield = 100
self.lives = 10
def update(self):
self.speedx = 0
# self.speedy = 0
keystate = pg.key.get_pressed()
if keystate[pg.K_a]:
self.speedx = -8
if keystate[pg.K_d]:
self.speedx = 8
# if keystate[pg.K_SPACE]:
# self.pew()
# if keystate[pg.K_w]:
# self.speedy = -8
# if keystate[pg.K_s]:
# self.speedy = 8
self.rect.x += self.speedx
# self.rect.y += self.speedy
# taken and modified from Bradfield - power up method
def powerup(self):
self.power += 1
self.power_time = pg.time.get_ticks()
def pew(self):
lazer = Lazer(self.rect.centerx, self.rect.top)
all_sprites.add(lazer)
lazers.add(lazer)
# print('trying to shoot..')
if self.power >= 2:
lazer1 = Lazer(self.rect.left, self.rect.centery)
lazer2 = Lazer(self.rect.right, self.rect.centery)
all_sprites.add(lazer1)
all_sprites.add(lazer2)
lazers.add(lazer1)
lazers.add(lazer2)
# shoot_sound.play()
class Pow(Sprite):
def __init__(self, center):
Sprite.__init__(self)
self.type = random.choice(['shield', 'gun'])
self.image = powerup_images[self.type]
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.center = center
self.speedy = 5
print(self.type)
def update(self):
self.rect.y += self.speedy
# kill if it moves off the top of the screen
if self.rect.top > HEIGHT:
self.kill()
class Mob(Sprite):
def __init__(self):
Sprite.__init__(self)
# self.image = pg.Surface((30,30))
self.image = mob_img
self.image.set_colorkey(BLACK)
# self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.rect.x = random.randrange(0, WIDTH - self.rect.width)
self.rect.y = random.randrange(0, 250)
self.speedy = random.randrange(1, 10)
self.speedx = random.randrange(1, 8)
self.hitpoints = 10
def pew(self):
spit = Spit(self.rect.centerx, self.rect.top)
all_sprites.add(spit)
spits.add(spit)
# print('trying to shoot..')
def update(self):
# self.health_image = pg.Surface(int(self.hitpoints), int(10))
# self.health_rect.x = self.x
# self.health_rect.y = self.y
self.rect.x += self.speedx
if random.random() > 0.99:
self.pew()
# self.rect.y += self.speedy
if self.rect.x > WIDTH or self.rect.x < 0:
self.speedx*=-1
self.rect.y += random.randrange(5,25)
if self.rect.top > HEIGHT + 10:
self.rect.y = 0
if self.hitpoints <= 0:
self.kill()
class Lazer(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
self.image = lazer_img
# self.image = pg.Surface((5,10))
# self.image.fill(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = -10
def update(self):
self.rect.y += self.speedy
if self.rect.y < 0:
self.kill()
# print(len(lazers))
class Spit(Sprite):
def __init__(self, x, y):
Sprite.__init__(self)
self.image = spit_img
# self.image = pg.Surface((5,10))
# self.image.fill(BLACK)
self.rect = self.image.get_rect()
self.rect.bottom = y
self.rect.centerx = x
self.speedy = 10
def update(self):
self.rect.y += self.speedy
if self.rect.y < 0:
self.kill()
# print(len(lazers))
all_sprites = pg.sprite.Group()
mobs = pg.sprite.Group()
lazers = pg.sprite.Group()
spits = pg.sprite.Group()
powerups = pg.sprite.Group()
player = Player()
all_sprites.add(player)
for i in range(8):
m = Mob()
all_sprites.add(m)
mobs.add(m)
# game loop
running = True
while running:
# do stuff over and over
clock.tick(FPS)
for event in pg.event.get():
# check for window close
if event.type == pg.QUIT:
running = False
elif event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
player.pew()
# Update the sprites in the game
all_sprites.update()
hits = pg.sprite.spritecollide(player, mobs, False)
if hits:
running = False
hits = pg.sprite.spritecollide(player, spits, False)
if hits:
player.shield -= 1
hits = pg.sprite.groupcollide(mobs, lazers, True, True)
for hit in hits:
if random.random() > 0.9:
pow = Pow(hit.rect.center)
all_sprites.add(pow)
powerups.add(pow)
if len(mobs) == 0:
for i in range(8):
m = Mob()
all_sprites.add(m)
mobs.add(m)
for m in mobs:
lhits = pg.sprite.spritecollide(m, lazers, False)
if lhits:
m.hitpoints-=1
# print(m.hitpoints)
if random.random() > 0.9:
pow = Pow(hit.rect.center)
all_sprites.add(pow)
powerups.add(pow)
# check to see if player hit a powerup
hits = pg.sprite.spritecollide(player, powerups, True)
for hit in hits:
if hit.type == 'shield':
player.shield += random.randrange(10, 30)
# shield_sound.play()
if player.shield >= 100:
player.shield = 100
if hit.type == 'gun':
player.powerup()
# power_sound.play()
background_rect2.y = background_rect.y - 600
background_rect.y+= player.speedy
background_rect2.y+= player.speedy
if background_rect2.y >- 0:
background_rect.y = background_rect.y -600
# Draw or render
screen.fill(RED)
screen.blit(background, background_rect)
screen.blit(background, background_rect2)
draw_shield_bar(screen, 5, 5, player.shield)
draw_lives(screen, WIDTH - 100, 5, player.lives, player_mini_img)
all_sprites.draw(screen)
pg.display.flip()
pg.quit()
|
import time
import torch
import torch.nn as nn
inputs = torch.randn(64, 751).cuda(0)
targets = torch.randint(0, 751, (64,), dtype=torch.int64).cuda(0)
logsoftmax = nn.LogSoftmax(dim=1)
while True:
inputs = torch.randn(64, 751).cuda(0)
targets = torch.randint(0, 751, (64,), dtype=torch.int64).cuda(0)
logsoftmax = nn.LogSoftmax(dim=1)
s_1 = time.time()
log_probs = logsoftmax(inputs)
print("First step: ", time.time() - s_1)
s_2 = time.time()
targets = torch.zeros(log_probs.size()).scatter_(
1, targets.unsqueeze(1).data.cpu(), 1)
print("Second step: ", time.time() - s_1)
|
from django.urls import path
from .views import pdf_upload_view, pdf_upload, pdf_explore, single_pdf_view, search_results_pdf
from .views import delete_tag_from_image, edit_page_view, add_tag_to_image, update_caption_pdf_image, search_pdf_view
urlpatterns = [
path('pdf_upload_view/', pdf_upload_view, name="upload_pdf_view"),
path('browse_pdfs/', pdf_explore, name='explore_pdfs'),
path('upload_pdf/', pdf_upload, name="pdf_upload"),
path('single_view_pdf/<int:pdf_id>/', single_pdf_view),
path('edit_image_pdf/<int:image_id>/', edit_page_view, name="edit_pdf_image_view"),
path('delete_tag_pdf_image/<int:tag_id>/<int:image_id>/',delete_tag_from_image),
path('add_tag_image_pdf/<int:image_id>/', add_tag_to_image),
path('update_caption_image_pdf/<int:image_id>/', update_caption_pdf_image),
path('search_pdfs/', search_pdf_view, name="search_pdfs"),
path('search_results/', search_results_pdf, name="search_results"),
] |
# -*- encoding:utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from abc import ABCMeta, abstractmethod
import logging
import time
from . import ABuXqFile
from ..CoreBu import env
# noinspection PyUnresolvedReferences
from ..CoreBu.ABuFixes import map, reduce, filter
from .ABuXqApi import BASE_XQ_HQ_URL
from .ABuXqApi import BASE_XQ_STOCK_INFO
from ..ExtBu import six
__author__ = '小青蛙'
__weixin__ = 'abu_quant'
def _bs4_html(content):
"""
使用BeautifulSoup解析html
:param content: html
:return: BeautifulSoup
"""
from bs4 import BeautifulSoup
return BeautifulSoup(content, 'lxml')
def _xpath(content):
"""
使用xpath解析html
:param content:
:return:
"""
from lxml import etree
selector = etree.HTML(content)
return selector
class BaseXQCrawlBrower(six.with_metaclass(ABCMeta, object)):
"""
使用chrome浏览器的自动化测试驱动接口,获取网页数据
"""
def __init__(self, base_url):
self._base_url = base_url
if env.g_crawl_chrome_driver is not None:
self.driver_path = env.g_crawl_chrome_driver
else:
raise RuntimeError('driver_path error!!!, abupy.CoreBu.ABuEnv.g_crawl_chrome_driver must be right')
# noinspection PyUnresolvedReferences
from selenium.webdriver.support import ui
# noinspection PyUnresolvedReferences
from selenium import webdriver
self.driver = webdriver.Chrome(self.driver_path)
self.wait = ui.WebDriverWait(self.driver, 10)
@abstractmethod
def _crawl_imp(self, *args, **kwargs):
pass
def get(self, url):
self.driver.get(url)
@property
def content(self):
return self.driver.page_source
def crawl(self, *args, **kwargs):
"""
执行完任务是自动退出,避免占用资源,在多进程爬时会启动多个chrome实例
:param args:
:param kwargs:
:return: crawl_imp
"""
ret = None
try:
self.driver.get(self._base_url)
self.driver.maximize_window()
ret = self._crawl_imp(*args, **kwargs)
except Exception as e:
logging.exception(e)
return ret
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.driver.quit()
def _scroll_to_bottom(self, element):
loc = element.location
self.driver.execute_script('window.scrollBy({},{})'.format(loc['x'], loc['y']))
class BaseHQCrawlBrower(BaseXQCrawlBrower):
def _crawl_imp(self, *args, **kwargs):
pass
def __init__(self, url):
super(BaseHQCrawlBrower, self).__init__(BASE_XQ_HQ_URL)
self._base_url = self._base_url + url
class NavHQCrawlBrower(BaseHQCrawlBrower):
def _crawl_imp(self, *args, **kwargs):
return _parse_nav(self.driver.page_source)
def __init__(self):
super(NavHQCrawlBrower, self).__init__('')
class StockListCrawlBrower(BaseHQCrawlBrower):
def _ensure_max_page_size(self):
"""
使每页展示的stock数最多,总页数变少,使网络请求数变少
"""
max_page_tag = self.driver.find_element_by_xpath('//*[@id="stockList-header"]/div[2]/a[3]')
max_page_tag.click()
time_out = 30
while time_out:
time.sleep(1)
time_out -= 1
_, total = self._curr_total_page()
# 直到 最大size生效
if total == 1 or self._curr_page_counts() == int(max_page_tag.text):
break
def _curr_page_counts(self):
selector = _xpath(self.content)
items = selector.xpath('//*[@id="stockList"]/div[1]/table/tbody/tr')
return len(items)
def _curr_total_page(self):
selector = _xpath(self.content)
pages = selector.xpath('//*[@id="pageList"]/div/ul/li/a/text()')
cur_page = selector.xpath('//*[@id="pageList"]/div/ul/li[@class="active"]/a/text()')
# 存在pages的最后一个值,否则cur和total都是1
if len(pages):
return int(cur_page[0]), int(pages[-1])
else:
return 1, 1
def _curr_page_items(self):
selector = _xpath(self.content)
# code = selector.xpath('//*[@id="stockList"]/div[1]/table/tbody/tr/td[1]/a/text()')
# name = selector.xpath('//*[@id="stockList"]/div[1]/table/tbody/tr/td[2]/a/text()')
# a标签下的text() 可能不存在,而xpath会把不存在的过滤掉,导致code,和name的长度不一致,产生错位,故先找到a,a。text为kong也占位,就能一一对应
code = selector.xpath('//*[@id="stockList"]/div[1]/table/tbody/tr/td[1]/a')
name = selector.xpath('//*[@id="stockList"]/div[1]/table/tbody/tr/td[2]/a')
code = list(map(lambda a: a.text, code))
name = list(map(lambda a: a.text, name))
return name, code
def _goto_next_page(self):
next_page = self.driver.find_element_by_xpath('//*[@id="pageList"]/div/ul/li[@class="next"]/a')
if next_page is not None:
# 滚动到next_page 标签显示出来,否则click可能会报错
self.wait.until(lambda dr: next_page.is_enabled())
self.driver.execute_script('arguments[0].click()', next_page)
time.sleep(1)
def _crawl_imp(self, *args, **kwargs):
self._ensure_max_page_size()
cur_page, total_page = self._curr_total_page()
names = []
symbols = []
# page index start 1
for page in range(1, total_page + 1):
self.wait.until(lambda dr: dr.find_element_by_xpath('//*[@id="stockList"]/div[1]/table').is_displayed())
cur_page, _ = self._curr_total_page()
temp_names, temp_symbols = self._curr_page_items()
names += temp_names
symbols += temp_symbols
if page < total_page:
self._goto_next_page()
else:
break
return names, symbols
def __init__(self, url):
super(StockListCrawlBrower, self).__init__(url)
def _parse_nav(content):
soup = _bs4_html(content)
nav_tags = soup.select('.industry-nav > div')
def parse_nav_tag(tag):
nav = {}
first_nav = tag.select('.first-nav > span')
if len(first_nav) > 0:
second_nav = tag.select('.second-nav > li')
nav[first_nav[0].string] = {}
for nav_2 in second_nav:
a_tag = nav_2.select('a')
if len(a_tag) <= 0:
continue
third_nav = nav_2.select('.third-nav')
if len(third_nav) > 0:
second_nav_name = str(nav_2).replace('<li><i class="list-style"></i>', '')
second_nav_name = second_nav_name[: second_nav_name.index('<i')]
nav[first_nav[0].string][second_nav_name] = list(map(lambda a: {a.get('title'): a.get('href')},
a_tag))
else:
nav[first_nav[0].string][a_tag[0].get('title')] = a_tag[0].get('href')
return nav
def merge(dict1, dict2):
return dict(dict1, **dict2)
return reduce(lambda d1, d2: merge(d1, d2), map(lambda tag: parse_nav_tag(tag), nav_tags))
class StockInfoListBrower(BaseXQCrawlBrower):
def __init__(self, market, symbols):
super(StockInfoListBrower, self).__init__(BASE_XQ_STOCK_INFO)
self._market = market
self._symbols = symbols
def __exit__(self, exc_type, exc_val, exc_tb):
self._market = None
self._symbols = None
super(StockInfoListBrower, self).__exit__(exc_type, exc_val, exc_tb)
def _parse_stock_info(self):
selector = _xpath(self.content)
# 特斯拉(NASDAQ:TSLA)
stock_name = selector.xpath('//*[@id="center"]/div[2]/div[2]/div[1]/div[1]/span[1]/strong/text()')
company_info_p = selector.xpath('//*[@id="center"]/div[3]/div/div[2]/div/p')
company_industry = selector.xpath('//*[@id="relatedIndustry"]/h2/a/text()')
quate_items = selector.xpath('//*[@id="center"]/div[2]/div[2]/div[2]/table/tbody/tr/td')
info = {}
if len(stock_name):
stock_name_info = stock_name[0]
st = stock_name_info.rfind('(')
en = stock_name_info.rfind(')')
market_symbol = stock_name_info[st + 1:en]
sp_result = market_symbol.split(':')
if len(sp_result) == 2:
company_name = stock_name_info[:st]
exchange_name = sp_result[0]
company_symbol = sp_result[1]
info['name'] = company_name
info['exchange'] = exchange_name
info['symbol'] = company_symbol
if len(company_info_p):
last_key = None
for p in company_info_p:
for child in p.getchildren():
if child.tag == 'strong':
info[child.text] = child.tail
last_key = child.text
if child.tag == 'a':
info[last_key] = child.get('href')
if len(quate_items):
for item in quate_items:
if len(item.getchildren()) == 1:
info[item.text] = item.getchildren()[0].text
if len(company_industry):
info['industry'] = company_industry[0]
return info
def _crawl_imp(self, *args, **kwargs):
for index, symbol in enumerate(self._symbols):
try:
if not ABuXqFile.exist_stock_info(self._market, symbol) or ('replace' in kwargs and kwargs['replace']):
self.get(self._base_url + symbol)
stock_info = self._parse_stock_info()
ABuXqFile.save_cache_stock_info(stock_info, self._market, symbol)
if (index + 1) % 200 == 0:
print(
'{}: {} {} {}/{}'.format(kwargs['process'], self._market, symbol, index + 1,
len(self._symbols)))
except Exception as e:
# 记录失败的symbol
ABuXqFile.error_stock_info(self._market, symbol, e)
logging.exception(e)
return 'Done'
|
import decimal
from flask import Blueprint, Response, jsonify, request
from sqlalchemy import text
from db import db, create_session
from helpers.auth import user_required
from model.dataset_model import Dataset
from model.percentile_model import Percentile
from model.metadata_model import Metadata
from model.network_model import Network
from model.movement_data_model import Movement_data
api_page = Blueprint('api', __name__, url_prefix='/api')
@api_page.before_request
@user_required
def check_user():
""" Protect to only logged in users of the admin endpoints. """
pass
# Custom Rest API
@api_page.route('/dataset', methods=['GET'])
def get_dataset():
"""
Return all datasets
"""
datasets = db.session.query(Dataset).all()
results = []
for elem in datasets:
results.append(elem.as_dict())
return jsonify(results)
@api_page.route('/dataset/<int:id>', methods=['GET'])
def get_dataset_id(id=None):
"""
Return a specific dataset
:param id: id of the specific dataset
"""
if not id:
return jsonify({})
datasets = db.session.query(Dataset).filter_by(id=id)
results = []
for elem in datasets:
results.append(elem.as_dict())
return jsonify(results)
@api_page.route('/dataset/user/<int:user_id>', methods=['GET'])
def get_dataset_user_id(user_id=None):
"""
Return all datasets of a specific user
:param user_id: id of the user
"""
if not id:
datasets = db.session.query(Dataset).all()
else:
datasets = db.session.query(Dataset).filter_by(user_id=user_id)
results = []
for elem in datasets:
results.append(elem.as_dict())
return jsonify(results)
@api_page.route('/dataset/<int:id>/<feature>', methods=['GET'])
def get_feature(id=None, feature=None):
"""
Return a feature of a specific dataset
:param id : id of the specific dataset
:param feature: feature string
"""
# not a valid feature
if not id or not feature:
return jsonify({})
# return absolute features
if feature in ['speed', 'acceleration', 'distance_centroid', 'metric_distance', 'direction', 'midline_offset']:
stmt = '''SELECT array(
SELECT round(''' + feature + ''',2)::text
FROM movement_data
WHERE dataset_id = :id
ORDER BY "time", animal_id); '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
# return the centroid
if feature in ['centroid']:
stmt = '''SELECT array(SELECT ST_asGeoJSON(centroid)::json->'coordinates'
FROM group_data
WHERE dataset_id = :id
ORDER BY "time") ; '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
# return the medoid
if feature in ['medoid']:
stmt = '''SELECT array(SELECT medoid
FROM group_data
WHERE dataset_id = :id
ORDER BY "time") ; '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
# return one of the aggregated and averaged swarm features
if feature in ['swarm_time', 'swarm_speed', 'swarm_acceleration', 'swarm_convex_hull_area',
'swarm_distance_centroid',
'swarm_direction', 'swarm_polarisation']:
feature = feature.replace('swarm_', '')
stmt = '''SELECT array(
SELECT round(''' + feature + ''',2)::text
FROM group_data
WHERE dataset_id = :id
ORDER BY "time"); '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
# return convex hull
if feature in ['convex_hull']:
stmt = '''SELECT array(
SELECT ST_asSVG(ST_Simplify(convex_hull,0.1),0,1) as convex_hull
FROM group_data
WHERE dataset_id = :id
ORDER BY "time"); '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
# return triangulation
if feature in ['triangulation']:
stmt = '''SELECT array(
SELECT ST_asSVG(ST_Simplify(delaunay_triangulation,0.1),0,1) as delaunay_triangulation
FROM group_data
WHERE dataset_id = :id
ORDER BY "time"); '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
# return triangulation
if feature in ['voronoi']:
stmt = '''SELECT array(
SELECT ST_asSVG(ST_Simplify(voronoi_polygons,0.1),0,1) as voronoi
FROM group_data
WHERE dataset_id = :id
ORDER BY "time"); '''
result = db.engine.execute(text(stmt), id=id).fetchone()[0]
return jsonify(result)
return jsonify({"id": id, "feature": feature})
@api_page.route('/dataset/<int:id>/vc', methods=['GET'])
def get_vc_feature(id=None):
"""
Return the variation coefficient of the feature
:param id: id of the specific dataset
"""
# not a valid feature
if not id:
return jsonify({})
result = {}
# return absolute features
for feature in ['metric_distance', 'speed', 'acceleration', 'distance_centroid', 'direction']:
stmt = '''SELECT stddev(''' + feature + '''+ tmp.val ) / avg(''' + feature + ''' + tmp.val)
FROM movement_data,
(SELECT abs(min(''' + feature + ''')) as val
FROM movement_data
WHERE dataset_id = :id) as tmp
WHERE dataset_id = :id; '''
query = db.engine.execute(text(stmt), id=id).fetchone()[0]
if isinstance(query, decimal.Decimal):
result[feature] = round(float(query), 4)
# TODO add some more features here
for feature in ['convex_hull_area']:
stmt = '''SELECT stddev(''' + feature + '''+ tmp.val ) / avg(''' + feature + ''' + tmp.val)
FROM group_data,
(SELECT abs(min(''' + feature + ''')) as val
FROM group_data
WHERE dataset_id = :id) as tmp
WHERE dataset_id = :id; '''
query = db.engine.execute(text(stmt), id=id).fetchone()[0]
if isinstance(query, decimal.Decimal):
if feature == 'convex_hull_area':
result['euclidean_distance'] = round(float(query), 4)
else:
result[feature] = round(float(query), 4)
return jsonify(result)
@api_page.route('/movement_only/<int:id>', methods=['GET'])
def get_movment_only(id=None):
"""
Return all only the movement data of a specific dataset
:param id: id of the specific dataset
"""
def streaming_func():
session = create_session()
if id == None:
return jsonify({})
num_records = 1500
stmt = '''SELECT json_build_object('a', animal_id, 't', "time", 'p', ST_asGeoJSON(position)::json->'coordinates')::text
FROM movement_data
WHERE dataset_id = '%s'
ORDER BY "time", animal_id ;'''
result = session.connection().execution_options(stream_results=True).execute(stmt, (id))
while True:
rows = result.fetchmany(num_records)
if not rows:
break
tmp = 'data: ['
for elem in rows:
tmp += elem[0] + ','
tmp = tmp[:-1]
yield tmp + ']\n\n'
yield 'data: close\n\n'
session.remove()
return Response(streaming_func(), mimetype='text/event-stream')
@api_page.route('/percentile/<int:id>', methods=['GET'])
def get_percentile(id=None):
"""
Return the percentiles of a datasets
:param id: id of the specific dataset
"""
if not id:
return jsonify({})
query = db.session.query(Percentile).filter_by(dataset_id=id)
result = []
for elem in query:
result.append(elem.as_dict())
return jsonify(result)
@api_page.route('/metadata/<int:id>', methods=['GET'])
def get_metadata(id=None):
"""
Return the metadata information for the datasets
:param id: id of the specific dataset
"""
if not id:
return jsonify({})
query = db.session.query(Metadata).filter_by(dataset_id=id)
result = []
for elem in query:
result.append(elem.as_dict())
return jsonify(result)
@api_page.route('/dataset/networks/<int:id>', methods=['GET'])
def get_dataset_networks(id=None):
"""
Return all network information (not the data) of a specific dataset
:param id: id of the specific dataset
"""
if not id:
return jsonify({})
query = db.session.query(Network).filter_by(dataset_id=id)
result = []
for elem in query:
result.append(elem.as_info_dict())
return jsonify(result)
@api_page.route('/dataset/network/<int:dataset_id>/<int:network_id>', methods=['GET'])
def get_dataset_network_data(dataset_id=None, network_id=None):
"""
Return a json of the network for a specific dataset
:param dataset_id: id of the specific dataset
:param network_id: network id of the specific dataset
"""
if dataset_id is None or network_id is None:
return jsonify({})
query = db.session.query(Network).filter_by(dataset_id=dataset_id, network_id=network_id)
result = []
for elem in query:
result.append(elem.network_as_data_dict())
return jsonify(result)
@api_page.route('/dataset/network/hierarchy/<int:dataset_id>/<int:network_id>', methods=['GET'])
def get_dataset_network_hierarchy_data(dataset_id=None, network_id=None):
"""
Return a json of the network hierarchy for a specific dataset
:param dataset_id: id of the specific dataset
:param network_id: network id of the specific dataset
"""
if dataset_id is None or network_id is None:
return jsonify({})
query = db.session.query(Network).filter_by(dataset_id=dataset_id, network_id=network_id)
result = []
for elem in query:
result.append(elem.hierarchy_as_data_dict())
return jsonify(result)
@api_page.route('/dataset/<int:id>/animal_ids', methods=['GET'])
def get_animal_ids(id=None):
"""
Return the distinct animal ids
:param id : id of the specific dataset
"""
if id is None:
return jsonify({})
animal_ids = db.session.query(Movement_data.animal_id).filter_by(dataset_id=id).distinct(Movement_data.animal_id)
result = []
for elem in animal_ids:
result.append(elem[0])
return jsonify(result)
# @ api_page.route('/api/dataset/visual_parameter/<int:dataset_id>', methods=['POST'])
# def get_dataset_suggested_parameters(dataset_id=None):
# """
# Calculate the suggested parameters via a optimization method
#
# :param
# dataset_id: id of the specific dataset
# tracked_data: JSON String of the tracked data
# """
# # Get JSON object passed with Ajax request
# tracked_data = request.json
# return jsonify(data=calculate_parameters(dataset_id, tracked_data))
|
import sys
import yaml
def smudge_or_clean(do_smudge):
try:
with open('secret.yml') as secret_yml:
secret = yaml.safe_load(secret_yml)
except FileNotFoundError:
secret = {}
for line in sys.stdin:
line = line.rstrip()
key = line.split('=', 1)[0].strip() if '=' in line else None
print('{} = {}'.format(key, secret[key] if do_smudge else '').rstrip()
if key in secret else line)
if __name__ == '__main__':
smudge_or_clean(dict(smudge=True, clean=False)[sys.argv[1]])
|
"""
This part communicates with the database
Author: Max Marshall
Project: Fridge Tracker
"""
import datetime
import math
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import readings as read
import light_sensor as ls
import mario
cred = None
try:
cred = credentials.Certificate("/home/pi/Smart_Fridge/smartfridge-28fdd-firebase-adminsdk-cn2d2-a24a5cb16c.json")
except FileNotFoundError:
pass
try:
cred = credentials.Certificate("D:\\Python Scripts\\GitHub\\Smart_Fridge\\"
"smartfridge-28fdd-firebase-adminsdk-cn2d2-a24a5cb16c.json")
except FileNotFoundError:
pass
try:
cred = credentials.Certificate("C:\\Users\\maxtm\\Desktop\\Python Projects\\GitHub\\Smart_Fridge\\"
"smartfridge-28fdd-firebase-adminsdk-cn2d2-a24a5cb16c.json")
except FileNotFoundError:
pass
firebase_admin.initialize_app(cred)
db = firestore.client()
# The main chunk of the program
def tick_forward(door_alarm, power_alarm):
global time_since_alarm
global st_time
st_time = datetime.datetime.today()
# Get readings
readings = []
# Gets readings and updates fridge data
for z in range(5):
humd, temp = read.get_reading()
readings.append(temp)
f_items = dict()
if temp > 4.4:
if ls.door_open(11):
if power_alarm is True:
pass
else:
if door_alarm is True:
pass
else:
f_items['door_alarm'] = True
door_alarm = True
elif not ls.door_open(11):
if door_alarm is True:
pass
else:
if power_alarm is True:
pass
else:
f_items['power_alarm'] = True
power_alarm = True
else:
f_items['door_alarm'] = False
f_items['power_alarm'] = False
door_alarm = False
power_alarm = False
f_items['humidity'] = int(humd)
f_items["temperature"] = int((temp * 1.8) + 32)
f_data_ref = db.collection(u'{}'.format('fridge_data')).document(u'{}'.format("data"))
f_data_ref.set(f_items, merge=True)
if door_alarm is True and time_since_alarm > 20:
play_song(0)
readings.append(15)
time_since_alarm = 0
break
elif power_alarm is True and time_since_alarm > 20:
play_song(1)
readings.append(15)
time_since_alarm = 0
break
else:
time_since_alarm += 1
end_time1 = datetime.datetime.today()
part1_time = end_time1.timestamp() - st_time.timestamp()
return door_alarm, power_alarm, readings, part1_time
def update_firebase(database, readings, part1_time):
global st_time
total = 0
exp_change = 0
items = []
# Takes average to use updating time remaining
for reading in readings:
total += reading
average = total/len(readings)
if 4.4 < average:
exp_change = part1_time/7200
# Get items from firebase
data_ref = db.collection(u'{}'.format(database))
docs = data_ref.stream()
for doc in docs:
items.append((doc.id, doc.to_dict()))
# For each item, updates date info
for item in items:
z = item[1]['expDate']
y = item[1]['addDate']
q = item[1]['expDate2']
z = float(z)
y = float(y)
q = float(q)
current_time = datetime.datetime.today()
ct_mils = (current_time.timestamp()*1000)
z = max(z - (exp_change * (q - y)), ct_mils)
item[1]['expDate'] = int(z)
item[1]['daysOld'] = math.floor((ct_mils - y) / 80640000)
item[1]['daysLeft'] = math.ceil((z - ct_mils) / 80640000)
# This bit updates the firebase
for item in items:
new_data_ref = db.collection(u'{}'.format(database)).document(u'{}'.format(item[0]))
new_data = dict()
for dict_item in item[1]:
if isinstance(item[1][dict_item], str):
new_data[u'{}'.format(dict_item)] = u'{}'.format(item[1][dict_item])
elif isinstance(item[1][dict_item], int):
new_data[u'{}'.format(dict_item)] = item[1][dict_item]
new_data_ref.set(new_data)
end_time = datetime.datetime.today()
print("Loop Completed in {}".format(end_time - st_time))
def play_song(s):
mario.setup()
if s == 0:
print("Super Mario Theme")
mario.play(mario.melody, mario.tempo, 1.3, 0.800)
if s == 1:
print("Super Mario Underworld Theme")
mario.play(mario.underworld_melody, mario.underworld_tempo, 1.3, 0.800)
mario.destroy()
door_alarm = 0
power_alarm = 0
time_since_alarm = 1000
time_since_feedback = 9600
# Guard
if __name__ == '__main__':
print("Starting...\n")
while True:
door_alarm, power_alarm, temps, time1 = tick_forward(door_alarm, power_alarm)
update_firebase("inventory", temps, time1)
if time_since_feedback >= 9600:
time_since_feedback = 0
grave_items = []
data_ref = db.collection(u'{}'.format("graveyard"))
docs = data_ref.stream()
for doc in docs:
grave_items.append((doc.id, doc.to_dict()))
for item in grave_items:
exp_date = float(item[1]['expDate'])
add_date = float(item[1]['addDate'])
if (exp_date - add_date) < 100000:
info = dict()
info['name'] = item[1]['name']
info['feedback'] = "You seem to be wasting food. Try buying fewer/less {}(s)".format(item[1]['name'])
f_data_ref = db.collection(u'{}'.format('feedback')).document(u'{}'.format(item[0]))
f_data_ref.set(info, merge=True)
|
from django.conf.urls import url, include
import backend.view.specimenreportview as specimenreportview
import backend.view.specimenview as specimenview
import backend.view.specimenfcsfileview as specimenfcsfileview
import backend.view.specimengateview as specimengateview
urlpatterns = [
# manage specimen object
url('create_specimen', specimenview.create_specimen),
url('query_specimenid', specimenview.query_specimenid),
url('query_specimen_suggest', specimenview.query_specimen_suggest),
url('delete_specimen', specimenview.delete_specimen),
# manage specimen files
url('query_specimen_fcsfiles', specimenfcsfileview.query_specimen_fcsfiles),
url('upload_specimen_fcsfiles', specimenfcsfileview.upload_specimen_fcsfiles),
url('query_specimen_fcsfile_data', specimenfcsfileview.query_specimen_fcsfile_data),
# speciment files gate
url('save_spceiment_fcsfile_gate', specimengateview.save_spceiment_fcsfile_gate),
url('query_fcsfile_gate', specimengateview.query_fcsfile_gate),
# report
url('gen_report', specimenreportview.gen_report),
url('query_report', specimenreportview.query_report),
url('cell_stat', specimenreportview.cell_stat)
]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: github.com/metaprov/modelaapi/services/review/v1/review.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from github.com.metaprov.modelaapi.pkg.apis.team.v1alpha1 import generated_pb2 as github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_team_dot_v1alpha1_dot_generated__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='github.com/metaprov/modelaapi/services/review/v1/review.proto',
package='github.com.metaprov.modelaapi.services.review.v1',
syntax='proto3',
serialized_options=b'Z6github.com/metaprov/modelaapi/services/review/v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nIgithub.com/metaprov/modelaapi/services/review/v1/review.proto\x12\x36github.com.metaprov.modelaapi.services.review.v1\x1a\x1bgoogle/protobuf/empty.proto\x1a\x44github.com/metaprov/modelaapi/pkg/apis/team/v1alpha1/generated.proto\x1a\x1cgoogle/api/annotations.proto\"\xd4\x01\n\x16GetReviewRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12j\n\x06labels\x18\x03 \x03(\x0b\x32Z.github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"y\n\x17GetReviewResponse\x12P\n\x04item\x18\x01 \x01(\x0b\x32\x42.github.com.metaprov.modelaapi.pkg.apis.team.v1alpha1.Review\x12\x0c\n\x04yaml\x18\x02 \x01(\t\"m\n\x19\x43reateReviewRequest\x12P\n\x04item\x18\x01 \x01(\x0b\x32\x42.github.com.metaprov.modelaapi.pkg.apis.team.v1alpha1.Review\"\x1c\n\x1a\x43reateReviewResponse\"m\n\x19UpdateReviewRequest\x12P\n\x04item\x18\x01 \x01(\x0b\x32\x42.github.com.metaprov.modelaapi.pkg.apis.team.v1alpha1.Review\"\x1c\n\x1aUpdateReviewResponse\"\xda\x01\n\x19\x44\x65leteReviewRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12m\n\x06labels\x18\x03 \x03(\x0b\x32].github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x1c\n\x1a\x44\x65leteReviewResponse\"\xef\x01\n\x17ListReviewRequest\x12\x11\n\tnamespace\x18\x01 \x01(\t\x12k\n\x06labels\x18\x02 \x03(\x0b\x32[.github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.LabelsEntry\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x8a\x01\n\x18ListReviewResponse\x12U\n\x05items\x18\x01 \x01(\x0b\x32\x46.github.com.metaprov.modelaapi.pkg.apis.team.v1alpha1.ReviewList\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xe7\t\n\x13\x43onversationService\x12\xe7\x01\n\x11ListReviews\x12O.github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest\x1aP.github.com.metaprov.modelaapi.services.review.v1.ListReviewResponse\"/\x82\xd3\xe4\x93\x02)\x12\'/api/v1alpha1/reviews/{namespace}\x12\xe3\x01\n\x12\x43reateReview\x12Q.github.com.metaprov.modelaapi.services.review.v1.CreateReviewRequest\x1aR.github.com.metaprov.modelaapi.services.review.v1.CreateReviewResponse\"&\x82\xd3\xe4\x93\x02 \"\x1b/api/v1alpha1/reviews:\x01*\x12\xea\x01\n\x0fGetReview\x12N.github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest\x1aO.github.com.metaprov.modelaapi.services.review.v1.GetReviewResponse\"6\x82\xd3\xe4\x93\x02\x30\x12./api/v1alpha1/reviews/{namespace}/{name}\x12\xa2\x02\n\x12UpdateReview\x12Q.github.com.metaprov.modelaapi.services.review.v1.UpdateReviewRequest\x1aR.github.com.metaprov.modelaapi.services.review.v1.UpdateReviewResponse\"e\x82\xd3\xe4\x93\x02_\x1aZ/api/v1alpha1/reviews/{review.metadata.namespace}/{review.metadata.name}:\x01*\x12\xed\x01\n\x12\x44\x65leteReview\x12Q.github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest\x1aR.github.com.metaprov.modelaapi.services.review.v1.DeleteReviewResponse\"0\x82\xd3\xe4\x93\x02**(/api/v1/reviews/{namespace}/{name}B8Z6github.com/metaprov/modelaapi/services/review/v1b\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_team_dot_v1alpha1_dot_generated__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETCONVERSATIONREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=475,
)
_GETCONVERSATIONREQUEST = _descriptor.Descriptor(
name='GetReviewRequest',
full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.labels', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_GETCONVERSATIONREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=263,
serialized_end=475,
)
_GETCONVERSATIONRESPONSE = _descriptor.Descriptor(
name='GetReviewResponse',
full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewResponse.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='yaml', full_name='github.com.metaprov.modelaapi.services.review.v1.GetReviewResponse.yaml', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=477,
serialized_end=598,
)
_CREATECONVERSATIONREQUEST = _descriptor.Descriptor(
name='CreateReviewRequest',
full_name='github.com.metaprov.modelaapi.services.review.v1.CreateReviewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.review.v1.CreateReviewRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=600,
serialized_end=709,
)
_CREATECONVERSATIONRESPONSE = _descriptor.Descriptor(
name='CreateReviewResponse',
full_name='github.com.metaprov.modelaapi.services.review.v1.CreateReviewResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=711,
serialized_end=739,
)
_UPDATECONVERSATIONREQUEST = _descriptor.Descriptor(
name='UpdateReviewRequest',
full_name='github.com.metaprov.modelaapi.services.review.v1.UpdateReviewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='item', full_name='github.com.metaprov.modelaapi.services.review.v1.UpdateReviewRequest.item', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=741,
serialized_end=850,
)
_UPDATECONVERSATIONRESPONSE = _descriptor.Descriptor(
name='UpdateReviewResponse',
full_name='github.com.metaprov.modelaapi.services.review.v1.UpdateReviewResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=852,
serialized_end=880,
)
_DELETECONVERSATIONREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=475,
)
_DELETECONVERSATIONREQUEST = _descriptor.Descriptor(
name='DeleteReviewRequest',
full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='name', full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.labels', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_DELETECONVERSATIONREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=883,
serialized_end=1101,
)
_DELETECONVERSATIONRESPONSE = _descriptor.Descriptor(
name='DeleteReviewResponse',
full_name='github.com.metaprov.modelaapi.services.review.v1.DeleteReviewResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1103,
serialized_end=1131,
)
_LISTCONVERSATIONREQUEST_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=430,
serialized_end=475,
)
_LISTCONVERSATIONREQUEST = _descriptor.Descriptor(
name='ListReviewRequest',
full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='namespace', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.namespace', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='labels', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.labels', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.page_size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.page_token', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_LISTCONVERSATIONREQUEST_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1134,
serialized_end=1373,
)
_LISTCONVERSATIONRESPONSE = _descriptor.Descriptor(
name='ListReviewResponse',
full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewResponse.items', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='github.com.metaprov.modelaapi.services.review.v1.ListReviewResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1376,
serialized_end=1514,
)
_GETCONVERSATIONREQUEST_LABELSENTRY.containing_type = _GETCONVERSATIONREQUEST
_GETCONVERSATIONREQUEST.fields_by_name['labels'].message_type = _GETCONVERSATIONREQUEST_LABELSENTRY
_GETCONVERSATIONRESPONSE.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_team_dot_v1alpha1_dot_generated__pb2._CONVERSATION
_CREATECONVERSATIONREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_team_dot_v1alpha1_dot_generated__pb2._CONVERSATION
_UPDATECONVERSATIONREQUEST.fields_by_name['item'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_team_dot_v1alpha1_dot_generated__pb2._CONVERSATION
_DELETECONVERSATIONREQUEST_LABELSENTRY.containing_type = _DELETECONVERSATIONREQUEST
_DELETECONVERSATIONREQUEST.fields_by_name['labels'].message_type = _DELETECONVERSATIONREQUEST_LABELSENTRY
_LISTCONVERSATIONREQUEST_LABELSENTRY.containing_type = _LISTCONVERSATIONREQUEST
_LISTCONVERSATIONREQUEST.fields_by_name['labels'].message_type = _LISTCONVERSATIONREQUEST_LABELSENTRY
_LISTCONVERSATIONRESPONSE.fields_by_name['items'].message_type = github_dot_com_dot_metaprov_dot_modelaapi_dot_pkg_dot_apis_dot_team_dot_v1alpha1_dot_generated__pb2._CONVERSATIONLIST
DESCRIPTOR.message_types_by_name['GetReviewRequest'] = _GETCONVERSATIONREQUEST
DESCRIPTOR.message_types_by_name['GetReviewResponse'] = _GETCONVERSATIONRESPONSE
DESCRIPTOR.message_types_by_name['CreateReviewRequest'] = _CREATECONVERSATIONREQUEST
DESCRIPTOR.message_types_by_name['CreateReviewResponse'] = _CREATECONVERSATIONRESPONSE
DESCRIPTOR.message_types_by_name['UpdateReviewRequest'] = _UPDATECONVERSATIONREQUEST
DESCRIPTOR.message_types_by_name['UpdateReviewResponse'] = _UPDATECONVERSATIONRESPONSE
DESCRIPTOR.message_types_by_name['DeleteReviewRequest'] = _DELETECONVERSATIONREQUEST
DESCRIPTOR.message_types_by_name['DeleteReviewResponse'] = _DELETECONVERSATIONRESPONSE
DESCRIPTOR.message_types_by_name['ListReviewRequest'] = _LISTCONVERSATIONREQUEST
DESCRIPTOR.message_types_by_name['ListReviewResponse'] = _LISTCONVERSATIONRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetReviewRequest = _reflection.GeneratedProtocolMessageType('GetReviewRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _GETCONVERSATIONREQUEST_LABELSENTRY,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _GETCONVERSATIONREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.GetReviewRequest)
})
_sym_db.RegisterMessage(GetReviewRequest)
_sym_db.RegisterMessage(GetReviewRequest.LabelsEntry)
GetReviewResponse = _reflection.GeneratedProtocolMessageType('GetReviewResponse', (_message.Message,), {
'DESCRIPTOR' : _GETCONVERSATIONRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.GetReviewResponse)
})
_sym_db.RegisterMessage(GetReviewResponse)
CreateReviewRequest = _reflection.GeneratedProtocolMessageType('CreateReviewRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATECONVERSATIONREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.CreateReviewRequest)
})
_sym_db.RegisterMessage(CreateReviewRequest)
CreateReviewResponse = _reflection.GeneratedProtocolMessageType('CreateReviewResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATECONVERSATIONRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.CreateReviewResponse)
})
_sym_db.RegisterMessage(CreateReviewResponse)
UpdateReviewRequest = _reflection.GeneratedProtocolMessageType('UpdateReviewRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECONVERSATIONREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.UpdateReviewRequest)
})
_sym_db.RegisterMessage(UpdateReviewRequest)
UpdateReviewResponse = _reflection.GeneratedProtocolMessageType('UpdateReviewResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATECONVERSATIONRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.UpdateReviewResponse)
})
_sym_db.RegisterMessage(UpdateReviewResponse)
DeleteReviewRequest = _reflection.GeneratedProtocolMessageType('DeleteReviewRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _DELETECONVERSATIONREQUEST_LABELSENTRY,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _DELETECONVERSATIONREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.DeleteReviewRequest)
})
_sym_db.RegisterMessage(DeleteReviewRequest)
_sym_db.RegisterMessage(DeleteReviewRequest.LabelsEntry)
DeleteReviewResponse = _reflection.GeneratedProtocolMessageType('DeleteReviewResponse', (_message.Message,), {
'DESCRIPTOR' : _DELETECONVERSATIONRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.DeleteReviewResponse)
})
_sym_db.RegisterMessage(DeleteReviewResponse)
ListReviewRequest = _reflection.GeneratedProtocolMessageType('ListReviewRequest', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _LISTCONVERSATIONREQUEST_LABELSENTRY,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest.LabelsEntry)
})
,
'DESCRIPTOR' : _LISTCONVERSATIONREQUEST,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.ListReviewRequest)
})
_sym_db.RegisterMessage(ListReviewRequest)
_sym_db.RegisterMessage(ListReviewRequest.LabelsEntry)
ListReviewResponse = _reflection.GeneratedProtocolMessageType('ListReviewResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCONVERSATIONRESPONSE,
'__module__' : 'github.com.metaprov.modelaapi.services.review.v1.review_pb2'
# @@protoc_insertion_point(class_scope:github.com.metaprov.modelaapi.services.review.v1.ListReviewResponse)
})
_sym_db.RegisterMessage(ListReviewResponse)
DESCRIPTOR._options = None
_GETCONVERSATIONREQUEST_LABELSENTRY._options = None
_DELETECONVERSATIONREQUEST_LABELSENTRY._options = None
_LISTCONVERSATIONREQUEST_LABELSENTRY._options = None
_CONVERSATIONSERVICE = _descriptor.ServiceDescriptor(
name='ReviewService',
full_name='github.com.metaprov.modelaapi.services.review.v1.ReviewService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1517,
serialized_end=2772,
methods=[
_descriptor.MethodDescriptor(
name='ListReviews',
full_name='github.com.metaprov.modelaapi.services.review.v1.ReviewService.ListReviews',
index=0,
containing_service=None,
input_type=_LISTCONVERSATIONREQUEST,
output_type=_LISTCONVERSATIONRESPONSE,
serialized_options=b'\202\323\344\223\002)\022\'/api/v1alpha1/reviews/{namespace}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateReview',
full_name='github.com.metaprov.modelaapi.services.review.v1.ReviewService.CreateReview',
index=1,
containing_service=None,
input_type=_CREATECONVERSATIONREQUEST,
output_type=_CREATECONVERSATIONRESPONSE,
serialized_options=b'\202\323\344\223\002 \"\033/api/v1alpha1/reviews:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetReview',
full_name='github.com.metaprov.modelaapi.services.review.v1.ReviewService.GetReview',
index=2,
containing_service=None,
input_type=_GETCONVERSATIONREQUEST,
output_type=_GETCONVERSATIONRESPONSE,
serialized_options=b'\202\323\344\223\0020\022./api/v1alpha1/reviews/{namespace}/{name}',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='UpdateReview',
full_name='github.com.metaprov.modelaapi.services.review.v1.ReviewService.UpdateReview',
index=3,
containing_service=None,
input_type=_UPDATECONVERSATIONREQUEST,
output_type=_UPDATECONVERSATIONRESPONSE,
serialized_options=b'\202\323\344\223\002_\032Z/api/v1alpha1/reviews/{review.metadata.namespace}/{review.metadata.name}:\001*',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteReview',
full_name='github.com.metaprov.modelaapi.services.review.v1.ReviewService.DeleteReview',
index=4,
containing_service=None,
input_type=_DELETECONVERSATIONREQUEST,
output_type=_DELETECONVERSATIONRESPONSE,
serialized_options=b'\202\323\344\223\002**(/api/v1/reviews/{namespace}/{name}',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_CONVERSATIONSERVICE)
DESCRIPTOR.services_by_name['ReviewService'] = _CONVERSATIONSERVICE
# @@protoc_insertion_point(module_scope)
|
"""
This script generates and saves the states closest to the solved state.
It stores the results as an hd5 file as an array. See the code at the end
for how to load the data.
It stores three values (stored in parallel arrays).
- A bit array representing the state.
- A boolean array which marks all actions which lead to the shortest path to the solved cube
- The distance to the solved_cube
"""
import collections
import numpy as np
# Load BatchCube
import sys
sys.path.append('..') # add parent directory to path
from batch_cube import BatchCube
MAX_DISTANCE = 6
eye12 = np.eye(12, dtype=bool)
state_dict = {} # value = (best_actions, distance)
print("Generating data...")
# start with solved cube
cubes = BatchCube(1)
solved_cube = cubes._cube_array[0]
key = cubes.bit_array().tobytes()
best_actions = np.zeros(12)
distance = 0
state_dict[key] = (cubes.bit_array()[0], best_actions, distance)
size = 1
for distance in range(1, MAX_DISTANCE+1):
print("Distance:", distance)
# go to neighbors
cubes.step_independent(np.arange(12))
# record last move taken
last_action = np.tile(np.arange(12), size)
# find inverse of that move (using ^)
best_action = last_action ^ 1
best_action_one_hot = eye12[best_action]
# record data
internal_array = cubes._cube_array
bit_array = cubes.bit_array()
temp_dict = {}
new_cube_array = []
for bits, internal, action in zip(bit_array, internal_array, best_action_one_hot):
key = bits.tobytes()
if key in state_dict:
continue
if key in temp_dict:
best_actions = temp_dict[key][1]
best_actions += action
continue
temp_dict[key] = (bits, action, distance)
new_cube_array.append(internal)
state_dict.update(temp_dict)
size = len(state_dict)
print("total:", size, "current:", len(new_cube_array))
# rebuild cube
cubes._cube_array = np.array(new_cube_array)
print("Storing data...")
# convert to arrays for easy storing
keys = []
bits = []
best_actions = []
distances = []
for b, a, d in state_dict.values():
bits.append(b[np.newaxis])
best_actions.append(a)
distances.append(d)
print(bits[0].shape)
bits = np.concatenate(bits, axis=0)
best_actions = np.array(best_actions, dtype=bool)
distances = np.array(distances)
# put into hD5 file
import h5py
h5f = h5py.File('../save/close_state_data.h5', 'w')
h5f.create_dataset('bits', data=bits)
h5f.create_dataset('best_actions', data=best_actions)
h5f.create_dataset('distances', data=distances)
h5f.close()
print("Load data...")
# Load
h5f = h5py.File('../save/close_state_data.h5', 'r')
bits = h5f['bits'][:]
best_actions = h5f['best_actions'][:]
distances = h5f['distances'][:]
h5f.close()
# Rebuild dictionary
state_dict = {b.tobytes():(b, a, int(d)) for b, a, d in zip(bits, best_actions, distances)}
print("Testing data...")
# Test data types
for k, v in state_dict.items():
assert v[0].dtype == bool
assert v[1].dtype == bool
break
# Test data
import numpy as np
for i in range(1000):
test_cube = BatchCube(1)
test_cube.randomize(1 + (i % MAX_DISTANCE))
_, best_actions, distance = state_dict[test_cube.bit_array().tobytes()]
for _ in range(distance):
assert not test_cube.done()[0]
action = np.random.choice(12, p=best_actions/np.sum(best_actions))
test_cube.step([action])
_, best_actions, _ = state_dict[test_cube.bit_array().tobytes()]
assert test_cube.done()[0]
print("Passed all tests")
|
'''
:class:`GlycanComposition`, :class:`MonosaccharideResidue`, and :class:`SubstituentResidue` are
useful for working with bag-of-residues where topology and connections are not relevant, but
the aggregate composition is known. These types work with a subset of the IUPAC three letter code
for specifying compositions.
>>> g = GlycanComposition(Hex=3, HexNAc=2)
>>> g["Hex"]
3
>>> r = MonosaccharideResidue.from_iupac_lite("Hex")
>>> r
MonosaccharideResidue(Hex)
>>> g[r]
3
>>> import glypy
>>> abs(g.mass() - glypy.motifs["N-Glycan core basic 1"].mass()) < 1e-5
True
>>> g2 = GlycanComposition(Hex=5)
>>> g["@n-acetyl"] = -2 # Remove two n-acetyl groups from the composition
>>> abs(g.mass() - g2.mass()) < 1e-5
True
'''
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from glypy.utils import tree, uid
from glypy.utils.multimap import OrderedMultiMap
from glypy.composition import Composition
from glypy.structure.base import SaccharideCollection, MoleculeBase
from glypy.structure.glycan import Glycan
from glypy.structure.monosaccharide import Monosaccharide, ReducedEnd
from glypy.structure.substituent import Substituent
from glypy.structure.constants import (Anomer, Stem, Configuration, UnknownPosition)
from glypy.io import iupac
from glypy.io.iupac import (
monosaccharide_reference as _monosaccharide_reference,
resolve_special_base_type as _resolve_special_base_type,
IUPACError)
from glypy.composition.base import formula
from glypy.composition.composition_transform import (
derivatize, has_derivatization, strip_derivatization,
_derivatize_reducing_end, _strip_derivatization_reducing_end,
make_counter)
from six import string_types as basestring
monosaccharide_residue_reference = {}
class IUPACLiteMonosaccharideDeserializer(iupac.SimpleMonosaccharideDeserializer):
def monosaccharide_from_iupac(self, monosaccharide_str, residue_class=None):
"""
Parse a string in a limited subset of IUPAC three letter code into
an instance of :class:`MonosaccharideResidue` or :class:`SubstituentResidue`.
Parameters
----------
monosaccharide_str: str
The string to be parsed
Returns
-------
MonosaccharideResidue
"""
if residue_class is None:
residue_class = MonosaccharideResidue
try:
match_dict = self.extract_pattern(monosaccharide_str)
residue = self.build_residue(match_dict)
except IUPACError:
if monosaccharide_str.startswith(MolecularComposition.sigil):
result = MolecularComposition.from_iupac_lite(monosaccharide_str)
return result
try:
result = SubstituentResidue.from_iupac_lite(monosaccharide_str)
return result
except Exception:
try: # pragma: no cover
result = MolecularComposition.from_iupac_lite(monosaccharide_str)
return result
except Exception:
raise IUPACError("Cannot find pattern in {}".format(monosaccharide_str))
except TypeError:
raise TypeError("Expected string, received {} ({})".format(monosaccharide_str, type(monosaccharide_str)))
deriv = match_dict.get("derivatization", '')
if deriv is not None and deriv != "":
self.apply_derivatization(residue, deriv)
return residue_class.from_monosaccharide(residue)
def build_residue(self, match_dict):
residue, linkage = super(IUPACLiteMonosaccharideDeserializer, self).build_residue(match_dict)
return residue
def __call__(self, string, residue_class=None):
return self.monosaccharide_from_iupac(string, residue_class=residue_class)
class IUPACLiteMonosaccharideSerializer(iupac.SimpleMonosaccharideSerializer):
def monosaccharide_to_iupac(self, residue):
"""
Encode a subset of traits of a :class:`Monosaccharide`-like object
using a limited subset of the IUPAC three letter code. The information
present is sufficient to reconstruct a :class:`MonosaccharideResidue` instance
reflecting the base type and its native substituents and modificats.
.. note::
This function is not suitable for use on whole |Glycan| objects. Instead,
see :meth:`GlycanComposition.from_glycan` and :meth:`GlycanComposition.serialize`
Parameters
----------
residue: Monosaccharide
The object to be encoded
Returns
-------
str
See Also
--------
:func:`from_iupac_lite`
"""
try:
string = super(IUPACLiteMonosaccharideSerializer, self).monosaccharide_to_iupac(residue)
except (AttributeError, TypeError, ValueError):
# if the residue passed was *really* a monosaccharide then this error is valid and
# should propagate
if isinstance(residue, Monosaccharide):
raise
else:
string = str(residue)
return string
from_iupac_lite = IUPACLiteMonosaccharideDeserializer()
to_iupac_lite = IUPACLiteMonosaccharideSerializer(
iupac.monosaccharide_reference,
iupac.SubstituentSerializer(monosaccharide_residue_reference))
Monosaccharide.register_serializer('iupac_lite', to_iupac_lite)
def drop_stem(residue, force=False):
"""Drops the stem, or the carbon ring stereochemical
classification from this monosaccharide.
Unless ``force`` is |True|, if :func:`~glypy.io.iupac.resolve_special_base_type`
returns a truthy value, this function will do nothing.
Parameters
----------
residue : :class:`~.Monosaccharide`
The monosaccharide to change
force : bool, optional
Whether or not to override known special case named monosaccharides
Returns
-------
:class:`~.Monosaccharide`
The mutated monosaccharide
"""
if _resolve_special_base_type(residue) is None or force:
residue.stem = (None,)
return residue
def drop_positions(residue, force=False):
"""Drops the position classifiers from all links and modifications
attached to this monosaccharide.
Unless ``force`` is |True|, if :func:`~.iupac.resolve_special_base_type`
returns a truthy value, this function will do nothing.
Parameters
----------
residue : :class:`~.Monosaccharide`
The monosaccharide to change
force : bool, optional
Whether or not to override known special case named monosaccharides
Returns
-------
:class:`~.Monosaccharide`
The mutated monosaccharide
"""
if _resolve_special_base_type(residue) is None or force:
modifications = OrderedMultiMap()
for _k, v in residue.modifications.items():
modifications[UnknownPosition] = v
residue.modifications = modifications
for _p, link in list(residue.substituent_links.items()):
link.break_link(refund=True)
link.parent_position = UnknownPosition
link.apply()
return residue
def drop_configuration(residue, force=False):
"""Drops the absolute stereochemical configuration of this
monosaccharide.
Unless ``force`` is |True|, if :func:`~.iupac.resolve_special_base_type`
returns a truthy value, this function will do nothing.
Parameters
----------
residue : :class:`~.Monosaccharide`
The monosaccharide to change
force : bool, optional
Whether or not to override known special case named monosaccharides
Returns
-------
:class:`~.Monosaccharide`
The mutated monosaccharide
"""
if _resolve_special_base_type(residue) is None or force:
residue.configuration = (None,)
return residue
water_composition = Composition({"O": 1, "H": 2})
class ResidueBase(object):
__slots__ = ()
def drop_stem(self, force=False):
"""Drops the stem, or the carbon ring stereochemical
classification from this monosaccharide.
Unless ``force`` is |True|, if :func:`~.iupac.resolve_special_base_type`
returns a truthy value, this function will do nothing.
Parameters
----------
residue : :class:`~.Monosaccharide`
The monosaccharide to change
force : bool, optional
Whether or not to override known special case named monosaccharides
Returns
-------
:class:`~.Monosaccharide`
The mutated monosaccharide
"""
return self
def drop_positions(self, force=False):
"""Drops the position classifiers from all links and modifications
attached to this monosaccharide.
Unless ``force`` is |True|, if :func:`~.iupac.resolve_special_base_type`
returns a truthy value, this function will do nothing.
Parameters
----------
residue : :class:`~.Monosaccharide`
The monosaccharide to change
force : bool, optional
Whether or not to override known special case named monosaccharides
Returns
-------
:class:`~.Monosaccharide`
The mutated monosaccharide
"""
return self
def drop_configuration(self, force=False):
"""Drops the absolute stereochemical configuration of this
monosaccharide.
Unless ``force`` is |True|, if :func:`~.iupac.resolve_special_base_type`
returns a truthy value, this function will do nothing.
Parameters
----------
residue : :class:`~.Monosaccharide`
The monosaccharide to change
force : bool, optional
Whether or not to override known special case named monosaccharides
Returns
-------
:class:`~.Monosaccharide`
The mutated monosaccharide
"""
return self
def to_iupac_lite(self):
"""Encode this residue using `iupac_lite` notation.
Returns
-------
str
"""
return to_iupac_lite(self)
@classmethod
def from_iupac_lite(cls, string):
"""Parse a string of `iupac_lite` notation to produce a residue object
Parameters
----------
string : :class:`str`
The string to parse
Returns
-------
ResidueBase
"""
return from_iupac_lite(string, residue_class=cls)
class MonosaccharideResidue(Monosaccharide, ResidueBase):
"""Represents a :class:`Monosaccharide`-like object, save that it does
not connect to other :class:`~.Monosaccharide` objects and does not have
properties related to topology, specifically, :attr:`anomer`.
A single :class:`MonosaccharideResidue` has lost a water molecule from its
composition, reflecting its residual nature. This is accounted for when dealing
with aggreates of residues. They also have altered carbon backbone occupancies.
:class:`MonosaccharideResidue` objects are hashable and comparable on their
`iupac_lite` representation, which is given by :meth:`__str__` or :meth:`name`.
"""
__slots__ = ()
@classmethod
def from_monosaccharide(cls, monosaccharide, configuration=False, stem=True, ring=False):
"""Construct an instance of :class:`MonosaccharideResidue` from an instance
of |Monosaccharide|. This function attempts to preserve derivatization if possible.
This function will create a *deep copy* of `monosaccharide`.
Parameters
----------
monosaccharide : Monosaccharide
The monosaccharide to be converted
configuration : bool, optional
Whether or not to preserve |Configuration|. Defaults to |False|
stem : bool, optional
Whether or not to preserve |Stem|. Defaults to |True|
ring : bool, optional
Whether or not to preserve |RingType|. Defaults to |False|
Returns
-------
MonosaccharideResidue
"""
residue = monosaccharide.clone(monosaccharide_type=cls)
premass = residue.mass()
deriv = has_derivatization(monosaccharide)
strip_derivatization(residue)
if _resolve_special_base_type(monosaccharide) is None:
if not configuration:
residue.configuration = (Configuration.x,)
if not stem:
residue.stem = (Stem.x,)
if not ring:
residue.ring_start = residue.ring_end = UnknownPosition
if deriv:
derivatize(residue, deriv)
if residue.mass() != premass and not deriv:
residue.composition += water_composition
return residue
def __init__(self, *args, **kwargs):
super(MonosaccharideResidue, self).__init__(*args, **kwargs)
self.composition -= water_composition
self.anomer = Anomer.x
def clone(self, *args, **kwargs):
'''
Copies just this |Monosaccharide| and its |Substituent| objects, creating a separate instance
with the same data. All mutable data structures are duplicated and distinct from the original.
Does not copy any :attr:`links` as this would cause recursive duplication of the entire |Glycan|
graph.
Parameters
----------
prop_id: :class:`bool`
Whether to copy :attr:`id` from ``self`` to the new instance
fast: :class:`bool`
Whether to use the fast-path initialization process in :meth:`MonosaccharideResidue.__init__`
monosaccharide_type: :class:`type`
A subclass of :class:`MonosaccharideResidue` to use
Returns
-------
:class:`MonosaccharideResidue`
'''
kwargs.setdefault("monosaccharide_type", MonosaccharideResidue)
residue = super(MonosaccharideResidue, self).clone(*args, **kwargs)
return residue
def __repr__(self): # pragma: no cover
return "MonosaccharideResidue(%s)" % self.name()
def __str__(self): # pragma: no cover
return to_iupac_lite(self)
def __hash__(self): # pragma: no cover
"""Obtain a hash value from `self` based on :meth:`MonosaccharideResidue.name`.
Returns
-------
int
"""
return hash(self.name())
def open_attachment_sites(self, max_occupancy=0):
'''
When attaching :class:`~.Monosaccharide` instances to other objects,
bonds are formed between the carbohydrate backbone and the other object.
If a site is already bound, the occupying object fills that space on the
backbone and prevents other objects from binding there.
Currently only cares about the availability of the hydroxyl group. As there
is not a hydroxyl attached to the ring-ending carbon, that should not be
considered an open site.
If any existing attached units have unknown positions, we can't provide any
known positions, in which case the list of open positions will be a :class:`list`
of ``-1`` s of the length of open sites.
A :class:`MonosaccharideResidue` has two fewer open attachment sites than
the equivalent :class:`~.Monosaccharide`
Parameters
----------
max_occupancy: int
The number of objects that may already be bound at a site before it
is considered unavailable for attachment.
Returns
-------
:class:`list`:
The positions open for binding
:class:`int`:
The number of bound but unknown locations on the backbone.
'''
sites, unknowns = super(
MonosaccharideResidue, self).open_attachment_sites(max_occupancy)
return sites[:-2], unknowns
def __eq__(self, other):
'''
Test for equality between :class:`MonosaccharideResidue` instances by comparing
the result of :meth:`MonosaccharideResidue.name` calls between `self` and `other`.
:meth:`MonosaccharideResidue.name` is an alias of :func:`to_iupac_lite` called on `self`
'''
if (other is None):
return False
if not isinstance(other, (MonosaccharideResidue, str)):
return False
return str(self) == str(other)
def name(self):
"""Name this object according to `iupac_lite`.
Returns
-------
str
See Also
--------
:meth:`to_iupac_lite`
"""
return to_iupac_lite(self)
def residue_name(self):
"""Name this object according to `iupac_lite`, omitting any derivatization
Returns
-------
str
See Also
--------
:meth:`to_iupac_lite`
:meth:`name`
"""
name = self.name()
return name.split("^")[0]
drop_stem = drop_stem
drop_positions = drop_positions
drop_configuration = drop_configuration
def copy_underivatized(self):
"""Create a copy of this residue without derivatization.
Returns
-------
:class:`MonosaccharideResidue`
"""
return from_iupac_lite.strip_derivatization(str(self), residue_class=self.__class__)
monosaccharide_residue_reference.update({
k: MonosaccharideResidue.from_monosaccharide(v) for k, v in _monosaccharide_reference.items()
})
class FrozenMonosaccharideResidue(MonosaccharideResidue):
'''
A subclass of |MonosaccharideResidue| which caches the result of :func:`to_iupac_lite` and instances returned
by :meth:`FrozenMonosaccharideResidue.clone` and :meth:`FrozenMonosaccharideResidue.from_iupac_lite`.
Also treated as immutable after initialization through :meth:`FrozenMonosaccharideResidue.from_monosaccharide`.
Note that directly calling :meth:`FrozenMonosaccharideResidue.from_monosaccharide` will not retrieve instances
from the cache directly, and direct initialization using normal instance creation will neither touch the cache
nor freeze the instance.
This type is intended for use with :class:`FrozenGlycanComposition` to minimize the number of times
:func:`from_iupac_lite` is called.
'''
__slots__ = ("_frozen", "_total_composition", "_hash", "_name", "_mass")
_attribute_caching_slots = (
'_total_composition', '_hash',
'_mass'
)
# _frozen = False
# _total_composition = None
__cache = {}
@classmethod
def from_monosaccharide(cls, monosaccharide, *args, **kwargs):
inst = super(FrozenMonosaccharideResidue, cls).from_monosaccharide(monosaccharide, *args, **kwargs)
if str(inst) not in inst.get_cache():
inst.get_cache()[str(inst)] = inst
inst._frozen = True
else:
inst = inst.get_cache()[str(inst)]
return inst
def __init__(self, *args, **kwargs):
self._total_composition = None
self._mass = None
# _name is left undefined to use a fast-path in __str__ by not testing for
# presence first.
# self._name = None
# self._hash = None
super(FrozenMonosaccharideResidue, self).__init__(*args, **kwargs)
self._frozen = kwargs.get("_frozen", False)
def __setattr__(self, key, value):
try:
is_frozen = self._frozen
except AttributeError:
is_frozen = False
if is_frozen and key not in FrozenMonosaccharideResidue._attribute_caching_slots:
self.get_cache().pop(str(self), None)
raise FrozenError("Cannot change a frozen object")
else:
object.__setattr__(self, key, value)
def __repr__(self): # pragma: no cover
return "FrozenMonosaccharideResidue(%s)" % self.name()
def __hash__(self): # pragma: no cover
"""Obtain a hash value from `self` based on :meth:`MonosaccharideResidue.name`.
Returns
-------
int
"""
try:
return self._hash
except AttributeError:
self._hash = hash(str(self))
return self._hash
def _update_hash(self):
self._hash = hash(str(self))
return self._hash
def __eq__(self, other):
'''
Test for equality between :class:`MonosaccharideResidue` instances by comparing
the result of :meth:`MonosaccharideResidue.name` calls between `self` and `other`.
:meth:`MonosaccharideResidue.name` is an alias of :func:`to_iupac_lite` called on `self`
'''
if isinstance(other, MonosaccharideResidue):
try:
return self._name == other._name
except AttributeError:
return str(self) == str(other)
elif isinstance(other, str):
return str(self) == other
if (other is None):
return False
if not isinstance(other, (MonosaccharideResidue, str)):
return False
def _save_to_cache(self):
self.get_cache()[str(self)] = self
def __str__(self):
try:
return self._name
except AttributeError:
name = to_iupac_lite(self)
self._name = name
return name
def clone(self, *args, **kwargs):
'''
Copies just this |Monosaccharide| and its |Substituent|s, creating a separate instance
with the same data. All mutable data structures are duplicated and distinct from the original.
Does not copy any :attr:`links` as this would cause recursive duplication of the entire |Glycan|
graph.
Parameters
----------
prop_id: :class:`bool`
Whether to copy :attr:`id` from ``self`` to the new instance
fast: :class:`bool`
Whether to use the fast-path initialization process in :meth:`Monosaccharide.__init__`
monosaccharide_type: :class:`type`
A subclass of :class:`Monosaccharide` to use
Returns
-------
:class:`Monosaccharide`
'''
if self._frozen and kwargs.get(
"monosaccharide_type",
FrozenMonosaccharideResidue) is FrozenMonosaccharideResidue:
return self
else:
return super(FrozenMonosaccharideResidue, self).clone(*args, **kwargs)
def __getstate__(self):
state = super(FrozenMonosaccharideResidue, self).__getstate__()
state['_name'] = str(self)
state['_total_composition'] = self.total_composition()
return state
def __setstate__(self, state):
self._frozen = False
self._total_composition = state.get('_total_composition')
self._name = state.get('_name')
self._hash = hash(str(self))
super(FrozenMonosaccharideResidue, self).__setstate__(state)
@classmethod
def get_cache(cls):
return cls.__cache
@classmethod
def from_iupac_lite(cls, string):
cache = cls.get_cache()
try:
return cache[string]
except KeyError:
result = from_iupac_lite(string, residue_class=cls)
if string not in cache:
for k, v in cache.items():
if v == result:
cache[string] = v
break
else:
cache[string] = result
return result
def total_composition(self):
if self._frozen:
if self._total_composition is None:
self._total_composition = super(FrozenMonosaccharideResidue, self).total_composition()
self._mass = None
return self._total_composition
else:
return super(FrozenMonosaccharideResidue, self).total_composition()
def mass(self, average=False, charge=0, mass_data=None, substituents=True):
'''
Calculates the total mass of ``self``.
Parameters
----------
average: bool, optional, defaults to False
Whether or not to use the average isotopic composition when calculating masses.
When ``average == False``, masses are calculated using monoisotopic mass.
charge: int, optional, defaults to 0
If charge is non-zero, m/z is calculated, where m is the theoretical mass, and z is ``charge``
mass_data: dict, optional
If mass_data is None, standard NIST mass and isotopic abundance data are used. Otherwise the
contents of mass_data are assumed to contain elemental mass and isotopic abundance information.
Defaults to :const:`None`.
substituents: bool, optional, defaults to True
Whether or not to include substituents' masses.
Returns
-------
:class:`float`
See also
--------
:func:`glypy.composition.composition.calculate_mass`
'''
if not average and charge == 0 and mass_data is None and self._frozen:
if self._mass is None:
self._mass = self.total_composition().calc_mass()
return self._mass
return self.total_composition().calc_mass(average=average, charge=charge, mass_data=mass_data)
class SubstituentResidue(Substituent, ResidueBase):
r'''
Represent substituent molecules unassociated with a specific
monosaccharide residue.
.. note::
:class:`SubstituentResidue`'s composition value includes the losses for forming a bond between
a monosaccharide residue and the substituent.
Attributes
----------
name: str
As in |Substituent|, but with :attr:`SubstituentResidue.sigil` prepended.
composition: |Composition|
links: |OrderedMultiMap|
_order: |int|
'''
#: All substituent string identifiers are prefixed with this character
#: for the :func:`from_iupac_lite` parser
sigil = "@"
def __init__(self, name, composition=None, id=None, links=None,
can_nh_derivatize=None, is_nh_derivatizable=None, derivatize=False,
attachment_composition=None):
if name.startswith(SubstituentResidue.sigil):
name = name[1:]
elif name.startswith(MolecularComposition.sigil):
raise TypeError("Invalid Sigil. SubstituentResidue instances must be given names with either"
" no sigil prefix or with '@'")
super(SubstituentResidue, self).__init__(
name=name, composition=composition, links=links, id=id,
can_nh_derivatize=can_nh_derivatize, is_nh_derivatizable=is_nh_derivatizable,
derivatize=derivatize, attachment_composition=attachment_composition)
self._residue_name = SubstituentResidue.sigil + self._name
self.composition -= self.attachment_composition
self.composition -= {"H": 1}
self._hash = None
def __hash__(self): # pragma: no cover
"""Obtain a hash value from `self` based on :attr:`name`.
Returns
-------
int
"""
try:
if self._hash is None:
self._hash = hash(self._residue_name)
return self._hash
except AttributeError:
return hash(self._residue_name)
def __getstate__(self):
state = super(SubstituentResidue, self).__getstate__()
state['_residue_name'] = self._residue_name
return state
def __setstate__(self, state):
super(SubstituentResidue, self).__setstate__(state)
self._residue_name = state.get("_residue_name")
def to_iupac_lite(self):
return self._residue_name
__str__ = to_iupac_lite
def __repr__(self): # pragma: no cover
return "SubstituentResidue(%s)" % self._residue_name
@classmethod
def from_iupac_lite(cls, name):
return cls(name)
def __eq__(self, other):
if (other is None):
return False
if isinstance(other, str):
return other == self._residue_name
if not isinstance(other, SubstituentResidue):
return False
return self.name == other.name
def __ne__(self, other): # pragma: no cover
return not self == other
def _backsolve_original_composition(self):
comp = super(SubstituentResidue, self)._backsolve_original_composition()
comp += {"H": 1}
return comp
def copy_underivatized(self):
inst = self.clone()
strip_derivatization(inst)
return inst
class MolecularComposition(MoleculeBase, ResidueBase): # pragma: no cover
sigil = "#"
__slots__ = ('name', 'composition', '_hash')
def __init__(self, name, composition):
self.name = name
self.composition = composition
self._hash = None
def mass(self, average=False, charge=0, mass_data=None):
return self.composition.calc_mass(average=average, charge=charge, mass_data=mass_data)
def __repr__(self):
return "%s%s%s%s" % (
self.sigil, self.name, self.sigil,
formula(self.composition))
to_iupac_lite = __repr__
def __reduce__(self):
return self.__class__, (self.name, self.composition)
def open_attachment_sites(self, *args, **kwargs):
return 0
def clone(self):
return self.__class__(self.name, Composition(self.composition))
def total_composition(self):
return self.composition.clone()
@classmethod
def from_iupac_lite(cls, string):
if not string.startswith(cls.sigil):
raise TypeError("%s does not start with header %s" % (string, cls.sigil))
_, header, composition = string.split("#")
name = header
return cls(name, Composition(composition))
def __hash__(self): # pragma: no cover
"""Obtain a hash value from `self` based on :attr:`name`.
Returns
-------
int
"""
try:
if self._hash is None:
self._hash = hash(self.name)
return self._hash
except AttributeError:
return hash(self.name)
def __eq__(self, other):
try:
return self.name == other or self.name == other.name
except AttributeError:
return self.name == str(other)
def __ne__(self, other):
return not (self == other)
class _CompositionBase(dict):
def _setitem_fast(self, key, value):
dict.__setitem__(self, key, value)
def _getitem_fast(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return 0
def __reduce__(self):
return self.__class__, (), self.__getstate__()
def __getstate__(self):
d = {
'mapping': dict(self),
'reducing_end': self._reducing_end,
'composition_offset': self._composition_offset
}
return d
def __setstate__(self, state):
self.update(state['mapping'])
self._reducing_end = state['reducing_end']
self._composition_offset = state['composition_offset']
@classmethod
def _empty(cls):
inst = cls.__new__(cls)
inst._composition_offset = water_composition.clone()
inst._reducing_end = None
inst._mass = None
return inst
def _update_from_typed_map(self, template, copy_nodes=False):
if copy_nodes:
for name, count in template.items():
self._setitem_fast(name.clone(), count)
else:
for name, count in template.items():
self._setitem_fast(name, count)
reduced = template.reducing_end
if reduced is not None:
self.reducing_end = reduced.clone()
self._mass = None
def serialize(self):
"""Convert a glycan composition into a curly brace-enclosed string specifying
pairs of `iupac_lite` and a integer count.
If the glycan is reduced, it will be appended to the closing brace following a
`$` character.
Returns
-------
str
"""
form = "{%s}" % '; '.join("{}:{}".format(str(k), v) for k, v in sorted(
self.items(), key=lambda x: (x[0].mass(), str(x[0]))) if v != 0)
reduced = self._reducing_end
if reduced is not None:
form = "%s$%s" % (form, formula(reduced.total_composition()))
return form
try:
from glypy._c.structure.glycan_composition import _CompositionBase
except ImportError:
pass
class GlycanComposition(_CompositionBase, SaccharideCollection):
"""
Describe a glycan as a collection of :class:`MonosaccharideResidue` counts without
explicit linkage information relating how each monosaccharide is connected to its neighbors.
This class subclasses |dict|, and assumes that keys will either be :class:`MonosaccharideResidue`
instances, :class:`SubstituentResidue` instances, or strings in `iupac_lite` format which will be parsed
into one of these types. While other types may be used, this is not recommended. All standard |dict| methods
are supported.
|GlycanComposition| objects may be derivatized just as |Glycan| objects are, with
:func:`glypy.composition.composition_transform.derivatize` and
:func:`glypy.composition.composition_transform.strip_derivatization`.
GlycanComposition objects also support composition arithmetic, and can be added or subtracted from each other
or multiplied by an integer.
As GlycanComposition is not a complete structure, they cannot be translated into text formats as
full |Glycan| objects are. They may instead be converted to and from a short-form text notation using
:meth:`GlycanComposition.serialize` and reconstructed from this format using :meth:`GlycanComposition.parse`.
Attributes
----------
reducing_end : ReducedEnd
Describe the reducing end of the aggregate without binding it to a specific monosaccharide.
This will contribute to composition and mass calculations.
_composition_offset: CComposition
Account for the one water molecule's worth of composition left over from applying the "residue"
transformation to each monosaccharide in the aggregate.
"""
_monosaccharide_type = MonosaccharideResidue
_key_parser = staticmethod(from_iupac_lite)
@classmethod
def from_glycan(cls, glycan):
"""
Convert a |Glycan| into a |GlycanComposition|.
Parameters
----------
glycan : :class:`~.Glycan`
The instance to be converted
Returns
-------
GlycanComposition
"""
inst = cls()
glycan = tree(glycan)
inst.extend(glycan)
inst.reducing_end = glycan.reducing_end
deriv = has_derivatization(glycan.root)
if deriv:
inst._composition_offset += (
deriv.total_composition() - deriv.attachment_composition_loss()) * 2
return inst
def __init__(self, *args, **kwargs):
"""Initialize a :class:`GlycanComposition` using the provided objects or keyword
arguments, imitating the :class:`dict` initialization signature.
If a :class:`Mapping` is provided as a positional argument, it will be used as a
template. If arbitrary keyword arguments are provided, they will be interpreted
using :meth:`update`. As a special case, if another :class:`GlycanComposition` is
provided, its :attr:`reducing_end` attribute will also be copied.
Parameters
----------
*args:
Arbitrary positional arguments
**kwargs:
Arbitrary keyword arguments
"""
# dict.__init__ just calls C update method. Expensive parameter parsing
# _CompositionBase.__init__(self)
self._reducing_end = None
self._mass = None
if args or kwargs:
self.update(*args, **kwargs)
if args:
template = args[0]
if isinstance(template, GlycanComposition):
reduced = template.reducing_end
if reduced is not None:
self.reducing_end = reduced.clone()
self._composition_offset = template._composition_offset.clone()
else:
self._composition_offset = water_composition.clone()
else:
self._composition_offset = water_composition.clone()
def __setitem__(self, key, value):
"""
Set the quantity of `key` to `value`
If `key` is a string, it will be passed through :func:`from_iupac_lite`
If `key` has a reducing end value, that reducing end will be set on `self`
Parameters
----------
key : str, MonosaccharideResidue, SubstituentResidue, or MolecularComposition
The entity to store
value : int
The value to store
"""
if isinstance(key, basestring):
key = self._key_parser(key)
if key.node_type is Monosaccharide.node_type and key.reducing_end is not None:
self.reducing_end = key.reducing_end
key = key.clone()
key.reducing_end = None
_CompositionBase.__setitem__(self, key, int(value))
self._mass = None
def __getitem__(self, key):
"""
Get the quantity of `key`
If `key` is a string, it will be passed through :func:`from_iupac_lite`
If `key` has a reducing end value, that reducing end will be set on `self`
Parameters
----------
key : str, MonosaccharideResidue, SubstituentResidue, or MolecularComposition
The entity to store
Returns
-------
int
"""
if isinstance(key, basestring):
key = self._key_parser(key)
try:
return _CompositionBase.__getitem__(self, key)
except KeyError:
return 0
def __delitem__(self, key):
if isinstance(key, basestring):
key = self._key_parser(key)
_CompositionBase.__delitem__(self, key)
self._mass = None
def mass(self, average=False, charge=0, mass_data=None):
'''
Calculates the total mass of ``self``.
Parameters
----------
average: bool, optional, defaults to False
Whether or not to use the average isotopic composition when calculating masses.
When ``average == False``, masses are calculated using monoisotopic mass.
charge: int, optional, defaults to 0
If charge is non-zero, m/z is calculated, where m is the theoretical mass, and z is ``charge``
mass_data: dict, optional
If mass_data is None, standard NIST mass and isotopic abundance data are used. Otherwise the
contents of mass_data are assumed to contain elemental mass and isotopic abundance information.
Defaults to :const:`None`.
Returns
-------
:class:`float`
See also
--------
:func:`glypy.composition.composition.calculate_mass`
'''
if self._mass is not None and charge == 0:
return self._mass
if charge == 0:
mass = self._composition_offset.mass
for residue_type, count in self.items():
mass += residue_type.mass(average=average, charge=0, mass_data=mass_data) * count
if self._reducing_end is not None:
mass += self._reducing_end.mass(average=average, charge=0, mass_data=mass_data)
self._mass = mass
else:
mass = self.total_composition().calc_mass(average=average, charge=charge, mass_data=mass_data)
return mass
def update(self, *args, **kwargs):
if len(args) == 1:
if isinstance(args[0], Mapping):
for name, count in args[0].items():
if count != 0:
self[name] = count
else:
for name, count in args:
if count != 0:
self[name] = count
for name, count in kwargs.items():
if count != 0:
self[name] = count
self._mass = None
def extend(self, *args):
if not isinstance(args[0], MonosaccharideResidue):
if isinstance(args[0], (Monosaccharide)):
args = map(MonosaccharideResidue.from_monosaccharide, args)
elif isinstance(args[0], Glycan):
args = map(
MonosaccharideResidue.from_monosaccharide,
[node for node in args[0] if node.node_type is MonosaccharideResidue.node_type])
else:
raise TypeError(
"Can't convert {} to MonosaccharideResidue".format(
type(args[0])))
for residue in args:
self[residue] += 1
def __iadd__(self, other):
for elem, cnt in (other.items()):
self[elem] += cnt
return self
def __add__(self, other):
result = self.clone()
for elem, cnt in other.items():
result[elem] += cnt
return result
def __radd__(self, other):
return self + other
def __isub__(self, other):
for elem, cnt in other.items():
self[elem] -= cnt
return self
def __sub__(self, other):
result = self.clone()
for elem, cnt in other.items():
result[elem] -= cnt
return result
def __rsub__(self, other):
return (self - other) * (-1)
def __mul__(self, other):
if not isinstance(other, int):
raise TypeError(
'Cannot multiply Composition by non-integer',
other)
prod = {}
for k, v in self.items():
prod[k] = v * other
return self.__class__(prod)
def __rmul__(self, other):
return self * other
def __eq__(self, other):
if isinstance(other, basestring):
return str(self) == other
if not isinstance(other, Mapping):
return False
self_items = set([i for i in self.items() if i[1]])
other_items = set([i for i in other.items() if i[1]])
return self_items == other_items
def __ne__(self, other):
return not (self == other)
def __neg__(self):
return -1 * self
def __missing__(self, key):
return 0
def __contains__(self, key):
if isinstance(key, basestring):
key = self._key_parser(key)
return _CompositionBase.__contains__(self, key)
def drop_stems(self):
for t in self:
drop_stem(t)
self.collapse()
return self
def drop_positions(self):
for t in self:
drop_positions(t)
self.collapse()
return self
def drop_configurations(self):
for t in self:
drop_configuration(t)
self.collapse()
return self
def total_composition(self):
'''
Computes the sum of the composition of all |Monosaccharide| objects in ``self``
Returns
-------
:class:`~glypy.composition.Composition`
'''
comp = self._composition_offset.clone()
for residue, count in self.items():
comp += residue.total_composition() * count
if self._reducing_end is not None:
comp += self._reducing_end.total_composition()
return comp
def collapse(self):
'''
Merge redundant keys.
After performing a structure-detail removing operation like
:meth:`drop_positions`, :meth:`drop_configurations`, or :meth:`drop_stems`,
monosaccharide keys may be redundant.
`collapse` will merge keys which refer to the same type of molecule.
'''
items = list(self.items())
self.clear()
for k, v in items:
self[k] += v
return self
def query(self, query, exact=True, **kwargs):
"""Return the total count of all residues in `self` which
match `query` using :func:`glypy.io.nomenclature.identity.is_a`
Parameters
----------
query : :class:`~.MonosaccharideResidue` or :class:`str`
A monosaccharide residue or a string which will be converted into one by
:func:`from_iupac_lite` to test for an `is-a` relationship with.
exact : bool, optional
Passed to :func:`~.is_a`. Explicitly |True| by default
**kwargs
Passed to :func:`~.is_a`
Returns
-------
int
The total count of all residues which satisfy the `is-a` relationship
See Also
--------
:func:`glypy.io.nomenclature.identity.is_a`
"""
from glypy.io.nomenclature.identity import is_a
if isinstance(query, basestring):
query = self._key_parser(query)
count = 0
for key, value in self.items():
if is_a(key, query, exact=exact, **kwargs):
count += value
return count
def reinterpret(self, references, exact=True, **kwargs):
"""Aggregate the counts of all residues in `self` for each
monosaccharide in `references` satisfying an `is-a` relationship,
collapsing multiple residues to a single key. Any residue not
aggregated will be preserved as-is.
.. note::
The order of ``references`` matters as any residue matched by
a reference will not be considered for later references.
Parameters
----------
references : :class:`Iterable` of :class:`~.MonosaccharideResidue`
The monosaccharides with which to test for an `is-a` relationship
exact : bool, optional
Passed to :func:`~.is_a`. Explicitly |True| by default
**kwargs
Passed to :func:`~.is_a`
Returns
-------
:class:`~.GlycanComposition`
self after key collection and collapse
"""
from glypy.io.nomenclature.identity import is_a
new_counts = []
pairs = list(self.items())
remaining_pairs = []
for ref in references:
count = 0
for key, value in pairs:
if is_a(key, ref, exact=exact, **kwargs):
count += value
else:
remaining_pairs.append((key, value))
if count > 0:
new_counts.append((ref, count))
pairs = remaining_pairs
remaining_pairs = []
self.clear()
for key, value in new_counts:
self[key] = value
for key, value in pairs:
self[key] = value
return self
@property
def reducing_end(self):
return self._reducing_end
@reducing_end.setter
def reducing_end(self, value):
self._invalidate()
self._reducing_end = value
def set_reducing_end(self, value):
self._invalidate()
self._reducing_end = value
def _invalidate(self):
self._mass = None
@property
def composition_offset(self):
return self._composition_offset
@composition_offset.setter
def composition_offset(self, value):
self._invalidate()
self._composition_offset = value
def clone(self, propogate_composition_offset=True, copy_nodes=True):
dup = self._empty()
dup._update_from_typed_map(self, copy_nodes=copy_nodes)
if not propogate_composition_offset:
dup._composition_offset = Composition('H2O')
else:
dup._composition_offset = self._composition_offset.clone()
return dup
# inheriting from dict overwrites MoleculeBase.copy
def copy(self, *args, **kwargs):
return self.clone(*args, **kwargs)
def __str__(self):
return self.serialize()
@classmethod
def _get_parse_tokens(cls, string):
string = str(string)
parts = string.split('$')
if len(parts) == 1:
tokens = parts[0]
reduced = None
elif len(parts) == 2:
tokens, reduced = parts
else:
raise ValueError("Could not interpret %r" % string)
tokens = tokens[1:-1].split('; ')
return tokens, reduced
def _handle_reduction_and_derivatization(self, reduced, deriv):
if reduced:
reduced = ReducedEnd(Composition(reduced))
self.reducing_end = reduced
if deriv:
self._derivatized(deriv.clone(), make_counter(uid()), include_reducing_end=False)
@classmethod
def parse(cls, string):
"""Parse a :class:`str` into a :class:`GlycanComposition`.
This will parse the format produced by :meth:`serialize`
Parameters
----------
string : :class:`str`
The string to parse
Returns
-------
:class:`GlycanComposition`
"""
tokens, reduced = cls._get_parse_tokens(string)
inst = cls._empty()
deriv = None
for token in tokens:
try:
residue, count = _parse_name_count(token)
except ValueError:
if string == "{}":
return inst
else:
raise ValueError("Malformed Token, %s" % (token,))
key = cls._key_parser(residue)
if "^" in residue:
_deriv = has_derivatization(key)
if _deriv:
deriv = _deriv
inst._setitem_fast(key, count)
inst._handle_reduction_and_derivatization(reduced, deriv)
return inst
def _derivatized(self, substituent, id_base, include_reducing_end=True):
n = 2
items = list(self.items())
self.clear()
for k, v in items:
self._setitem_fast(k, v)
if k.node_type is Substituent.node_type:
n -= v
self._composition_offset += (
substituent.total_composition() -
substituent.attachment_composition_loss() * 2) * n
if self._reducing_end is not None and include_reducing_end:
_derivatize_reducing_end(self._reducing_end, substituent, id_base)
self.collapse()
self._invalidate()
def _strip_derivatization(self):
self._composition_offset = Composition("H2O")
if self._reducing_end is not None:
_strip_derivatization_reducing_end(self._reducing_end)
self.collapse()
self._invalidate()
def _invalidate(self):
self._mass = None
from_glycan = GlycanComposition.from_glycan
parse = GlycanComposition.parse
class FrozenGlycanComposition(GlycanComposition):
'''
A subclass of |GlycanComposition| which uses :class:`FrozenMonosaccharideResidue` instead
of |MonosaccharideResidue| which reduces the number of times :func:`from_iupac_lite` is called.
Only use this type if residue names are pre-validated, residue types will not be transformed,
and when creating many, many instances. :func:`from_iupac_lite` invokes expensive introspection
algorithms which can be costly when repeatedly manipulating the same residue types.
'''
_str = None
_monosaccharide_type = FrozenMonosaccharideResidue
_key_parser = staticmethod(FrozenMonosaccharideResidue.from_iupac_lite)
def __setitem__(self, key, value):
key = self._key_parser(str(key))
_CompositionBase.__setitem__(self, key, value)
self._invalidate()
def __getitem__(self, key):
if not isinstance(key, FrozenMonosaccharideResidue):
key = self._key_parser(str(key))
return _CompositionBase.__getitem__(self, key)
def __delitem__(self, key):
key = self._key_parser(str(key))
_CompositionBase.__delitem__(self, key)
self._invalidate()
@classmethod
def parse(cls, string):
tokens, reduced = cls._get_parse_tokens(string)
inst = cls._empty()
deriv = None
key_parser = cls._key_parser
for token in tokens:
try:
residue, count = _parse_name_count(token)
except ValueError:
if string == "{}":
return inst
else:
raise ValueError("Malformed Token, %s" % (token,))
key = key_parser(residue)
if "^" in residue:
_deriv = has_derivatization(key)
if _deriv:
deriv = _deriv
inst._setitem_fast(key, count)
inst._handle_reduction_and_derivatization(reduced, deriv)
return inst
def serialize(self):
if self._str is None:
self._str = super(FrozenGlycanComposition, self).serialize()
return self._str
__str__ = serialize
def __contains__(self, key):
if isinstance(key, basestring):
key = self._key_parser(key)
return _CompositionBase.__contains__(self, key)
def thaw(self):
"""Convert this :class:`FrozenGlycanComposition` into a :class:`GlycanComposition`
that is not frozen.
Returns
-------
:class:`GlycanComposition`
"""
return GlycanComposition.parse(self)
def extend(self, *args):
if not isinstance(args[0], FrozenMonosaccharideResidue):
if isinstance(args[0], (Monosaccharide)):
args = map(FrozenMonosaccharideResidue.from_monosaccharide, args)
elif isinstance(args[0], Glycan):
args = map(
FrozenMonosaccharideResidue.from_monosaccharide,
[node for node in args[0]
if node.node_type is FrozenMonosaccharideResidue.node_type])
else:
raise TypeError(
"Can't convert {} to FrozenMonosaccharideResidue".format(
type(args[0])))
for residue in args:
self[residue] += 1
def _validate(self):
'''Populate the caching fields used for common behaviors, e.g.
mass and string representation.
'''
if self._mass is None:
self.mass()
if self._str is None:
self.serialize()
def _invalidate(self):
'''Clear the caching fields, forcing them to all be recalculated
when next requested.
'''
self._mass = None
self._str = None
self._total_composition = None
def clone(self, propogate_composition_offset=True, copy_nodes=False):
dup = self._empty()
dup._update_from_typed_map(self, copy_nodes=copy_nodes)
if not propogate_composition_offset:
dup._composition_offset = Composition('H2O')
else:
dup._composition_offset = self._composition_offset.clone()
return dup
class FrozenError(ValueError):
pass
class HashableGlycanComposition(FrozenGlycanComposition):
def __str__(self):
self._validate()
# Directly use internal cache variable to save time calling
# the super method chain
return self._str
def __hash__(self):
return hash(str(self))
def _parse_name_count(string):
name, count = string.split(":")
count = int(count)
return name, count
try:
_has_c = True
from glypy._c.utils import get_parse_tokens, parse_name_count as _parse_name_count
GlycanComposition._get_parse_tokens = get_parse_tokens
except ImportError:
_has_c = False
|
import os
import pybullet as p
import pybullet_data
p.connect(p.GUI)
pandaUid = p.loadURDF(os.path.join(pybullet_data.getDataPath(), "franka_panda/panda.urdf"),useFixedBase=True)
while True:
p.stepSimulation() |
"""
LC 26
Given an array of sorted numbers,
remove all duplicates from it. You should not use any extra space;
after removing the duplicates in-place return the length of the subarray that has no duplicate in it.
Example 1:
Input: [2, 3, 3, 3, 6, 9, 9]
Output: 4
Explanation: The first four elements after removing the duplicates will be [2, 3, 6, 9].
Example 2:
Input: [2, 2, 2, 11]
Output: 2
Explanation: The first two elements after removing the duplicates will be [2, 11].
"""
def remove_duplicates(arr):
p_place = 0
last_n = float('inf')
for p_iter, n in enumerate(arr):
if n != last_n:
arr[p_place] = n
last_n = n
p_place += 1
return p_place
def main():
print(remove_duplicates([2, 3, 3, 3, 6, 9, 9])) # 4
print(remove_duplicates([2, 2, 2, 11])) # 2
main()
"""
Time O(N)
Space O(1)
"""
|
#!/usr/bin/env python3
import argparse
import sys
import os
from ibonPrinter import IBON
def main():
parser = argparse.ArgumentParser(
prog='ibonprinter',
description='7-11 iBon printer uploader.'
)
parser.add_argument(
'--name',
type=str,
default=' ',
help='User name'
)
parser.add_argument(
'--email',
type=str,
default=' ',
help='User email'
)
parser.add_argument(
'file',
type=str,
help='Upload file'
)
args = parser.parse_args()
if not os.path.isfile(args.file):
return f'File is not existed, {args.file}'
# TODO: check file ext
# doc、docx、ppt、pptx、xls、xlsx、txt、ini、pdf、jpg、gif、bmp
# TODO: check file size < 10M
printer = IBON()
r = printer.upload(
args.file,
user=args.name,
email=args.email,
)
if __name__ == "__main__":
sys.exit(main())
|
from setuptools import setup
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
]
with open("README.md", "r") as fp:
long_description = fp.read()
setup(
name="terminal-img",
version="0.0.3",
author="Pranav Baburaj",
author_email="i.am.pranav.baburaj@gmail.com",
url="https://github.com/pranavbaburaj/img",
py_modules=["image", "cli"],
description="Display images in the terminal",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
classifiers=classifiers,
python_requires=">=3.6",
install_requires=["pillow", "requests"],
entry_points={
"console_scripts": ["img=cli:main"],
},
)
|
from collections import defaultdict
from hashlib import sha1
from typing import Any, List, Dict
from record import Record
def index_code(value: Any) -> int:
sha_obj = sha1(str(value).encode())
return int(sha_obj.hexdigest(), 16)
def create_index(
collection: Dict[int, Record],
attribute: str,
) -> Dict[int, List[int]]:
index: Dict[int, List[int]] = defaultdict(list)
for i, r in collection.items():
index[
index_code(getattr(r, attribute))
].append(r.id)
return index
|
from azureml.core import Workspace, Run, Experiment
from api import TesApi
import pytest
from azureml.exceptions import ServiceException
import datetime
def test_init():
"""Tests if init function works"""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
assert tes_api.ws._subscription_id == "48bbc269-ce89-4f6f-9a12-c6f91fcb772d"
assert tes_api.ws._resource_group == "aml1p-rg"
assert tes_api.ws._workspace_name == "aml1p-ml-wus2"
@pytest.mark.parametrize(
"test_run_id,view",
[
("dcc407c8-d905-4b54-9441-9e8772abdf8a", "mini"),
("dcc407c8-d905-4b54-9441-9e8772abdf8a", "full"),
("dcc407c8-d905-4b54-9441-9e8772abdf8a", "wrong_view"),
],
)
def test_get_descriptors(test_run_id, view):
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
test_run = Run.get(workspace, test_run_id)
if view not in ("mini", "full"):
with pytest.raises(ValueError):
descriptors = TesApi.get_descriptors(test_run, view)
return
else:
descriptors = TesApi.get_descriptors(test_run, view)
assert descriptors["runId"] == "dcc407c8-d905-4b54-9441-9e8772abdf8a"
assert descriptors["status"] == "Failed"
if view == "full":
assert descriptors["startTimeUtc"] == "2022-05-11T01:53:20.168964Z"
assert descriptors["endTimeUtc"] == "2022-05-11T02:04:51.030079Z"
assert descriptors["services"] == {}
assert descriptors["properties"] == {
"azureml.runsource": "azureml.PipelineRun",
"runSource": "Designer",
"runType": "HTTP",
"azureml.parameters": "{}",
"azureml.continue_on_step_failure": "False",
"azureml.pipelineComponent": "pipelinerun",
}
assert descriptors["submittedBy"] == "Fuhui Fang"
assert descriptors["inputDatasets"].__len__() == 0
assert descriptors["outputDatasets"].__len__() == 0
assert descriptors["logFiles"]["logs/azureml/executionlogs.txt"].startswith(
"https://aml1pmlwus27954171068.blob.core.windows.net/azureml/ExperimentRun/dcid.dcc407c8-d905-4b54-9441-9e8772abdf8a/logs/azureml/executionlogs.txt?sv=2019-07-07"
)
assert descriptors["logFiles"]["logs/azureml/stderrlogs.txt"].startswith(
"https://aml1pmlwus27954171068.blob.core.windows.net/azureml/ExperimentRun/dcid.dcc407c8-d905-4b54-9441-9e8772abdf8a/logs/azureml/stderrlogs.txt?sv=2019-07-07"
)
assert descriptors["logFiles"]["logs/azureml/stdoutlogs.txt"].startswith(
"https://aml1pmlwus27954171068.blob.core.windows.net/azureml/ExperimentRun/dcid.dcc407c8-d905-4b54-9441-9e8772abdf8a/logs/azureml/stdoutlogs.txt?sv=2019-07-07"
)
@pytest.mark.parametrize("view", ["mini", "full", "wrong_view_value"])
def test_list_tasks(view):
"""Tests if list_tasks returns a non-empty list of tasks, with non-empty values for the main keys"""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
if (view == "mini") or (view == "full"):
tasks = tes_api.list_tasks(view=view)
else:
with pytest.raises(ValueError):
tasks = tes_api.list_tasks(view=view)
return
assert len(tasks) > 0
assert tasks[0]["runId"]
assert tasks[0]["status"]
if view == "full":
assert tasks[0]["submittedBy"]
@pytest.mark.parametrize(
"task_id,view",
[
("dcc407c8-d905-4b54-9441-9e8772abdf8a", "mini"),
("dcc407c8-d905-4b54-9441-9e8772abdf8a", "full"),
("wrong_task_id", "mini"),
("wrong_task_id", "full"),
],
)
def test_get_task(task_id, view):
"""Tests if get_task returns a non-empty dictionary of task details"""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
if task_id == "wrong_task_id":
with pytest.raises(ServiceException):
task = tes_api.get_task(task_id, view)
return
else:
task = tes_api.get_task(task_id, view)
assert len(task) > 0
assert task["runId"]
assert task["status"]
if view == "full":
assert task["submittedBy"]
def test_cancel_existing_task():
"""Tests if we can cancel an existing task."""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
# first submit a run and grab its id
experiment = Experiment(workspace, "test_TES_API")
run = experiment.start_logging(outputs=None, snapshot_directory=".")
task_id = TesApi.get_descriptors(run, "mini")["runId"]
# cancel it
tes_api = TesApi(workspace)
tes_api.cancel_task(task_id)
# check it has been canceled
task = tes_api.get_task(task_id, "mini")
assert task["status"] == "Canceled"
def test_cancel_nonexisting_task():
"""Tests that trying to cancel a non-existing task throws an exception, as expected"""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
with pytest.raises(ServiceException):
tes_api.cancel_task("wrong_task_id")
def test_create_task_script():
"""Tests that we can create a basic task (run a script)."""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
# first we submit the task
run_id = tes_api.create_task(
name="mock_task" + "_" + str(datetime.datetime.now()),
description="A mock task created during unit tests (script with no arguments).",
inputs=[],
outputs=[],
compute_target="cpu-cluster",
environment="AzureML-minimal-ubuntu18.04-py37-cpu-inference",
executors={
"source_directory": "./tests/hello_world",
"script": "hello.py",
"command": [],
"arguments": [],
},
volumes=None,
tags={"test_type": "script"},
task_group="test_TES_API",
)
# then we test that we can grab it, and finally we delete it
descriptors = tes_api.get_task(run_id, "full")
assert descriptors["runId"]
assert descriptors["status"]
tes_api.cancel_task(run_id)
def test_create_task_command_with_args():
"""Tests that we can create a basic task (run a command using some arguments)."""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
# first we submit the task
run_id = tes_api.create_task(
name="mock_task_command_" + str(datetime.datetime.now()),
description="A mock task created during unit tests (command with arguments).",
inputs=[],
outputs=[],
compute_target="cpu-cluster",
environment="AzureML-minimal-ubuntu18.04-py37-cpu-inference",
executors={
"source_directory": "./tests/add_and_print",
"script": "",
"command": [
"python",
"add_and_print.py",
"--operand_1",
"2",
"--operand_2",
"3",
],
"arguments": [],
},
volumes=None,
tags={"test_type": "command_with_arguments"},
task_group="test_TES_API",
)
# then we test that we can grab it, and finally we delete it
descriptors = tes_api.get_task(run_id, "full")
assert descriptors["runId"]
assert descriptors["status"]
tes_api.cancel_task(run_id)
def test_create_task_command_with_inputs():
"""Tests that we can create a basic task (run a command using some inputs)."""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
# first we submit the task
run_id = tes_api.create_task(
name="mock_task_command_inputs_" + str(datetime.datetime.now()),
description="A mock task created during unit tests (command with inputs).",
inputs=["mnist", "irisdata"],
outputs=[],
compute_target="cpu-cluster",
environment="AzureML-minimal-ubuntu18.04-py37-cpu-inference",
executors={
"source_directory": "./tests/count_files",
"script": "",
"command": ["python", "count_files.py", "--argument_1", "foo"],
"arguments": [],
},
volumes=None,
tags={"test_type": "command_with_inputs"},
task_group="test_TES_API",
)
# then we test that we can grab it, and finally we delete it
descriptors = tes_api.get_task(run_id, "full")
assert descriptors["runId"]
assert descriptors["status"]
tes_api.cancel_task(run_id)
def test_create_task_script_with_inputs():
"""Tests that we can create a basic task (run a script using some inputs)."""
workspace = Workspace.from_config(path="./config", _file_name="workspace.json")
tes_api = TesApi(workspace)
# first we submit the task
run_id = tes_api.create_task(
name="mock_task_script_inputs_" + str(datetime.datetime.now()),
description="A mock task created during unit tests (script with inputs).",
inputs=["mnist", "irisdata"],
outputs=[],
compute_target="cpu-cluster",
environment="AzureML-minimal-ubuntu18.04-py37-cpu-inference",
executors={
"source_directory": "./tests/count_files",
"script": "count_files.py",
"command": [],
"arguments": ["--argument_1", "foo"],
},
volumes=None,
tags={"test_type": "script_with_inputs"},
task_group="test_TES_API",
)
# then we test that we can grab it, and finally we delete it
descriptors = tes_api.get_task(run_id, "full")
assert descriptors["runId"]
assert descriptors["status"]
tes_api.cancel_task(run_id)
@pytest.mark.parametrize(
"list,inputs,expected_result",
[
([], [], []),
([], ["dataset_1"], ["--input_data_1", "dataset_1"]),
(["foo", "bar"], ["dataset_1"], ["foo", "bar", "--input_data_1", "dataset_1"]),
],
)
def test_update_list(list, inputs, expected_result):
result = TesApi.update_list(list, inputs)
assert result == expected_result
|
def run(**kwargs):
print('####################$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$##############################')
# if __debug__:
# kwargs.update(notebook='irisdataset')
notebook = kwargs['notebook']
out_notebook = kwargs['out_notebook']
import papermill as pm
pm.execute_notebook(
notebook,
out_notebook,
parameters=dict(file='/opt/airflow/datasets/irisdataset.csv')
)
# if __debug__:
# run() |
import torch.nn as nn
import torch.optim as optim
from losses import *
from data_utlis import ProteinNetDataset
from torch.utils.data import DataLoader
class ModelConfig():
def __init__(self, in_dim, linear_out=20, cell='LSTM',
num_layers=2,
alphabet_size=20,
hidden_size=800,
bidirectional=True,
dropout=0.4):
self.in_dim = in_dim
self.linear_out = linear_out
self.num_layers = num_layers
self.alphabet_size = alphabet_size
self.hidden_size = hidden_size
self.dropout = dropout
self.cell = cell
self.bidirectional=bidirectional
def get_cell(self):
if self.cell == 'LSTM':
cell = nn.LSTM(self.in_dim, self.hidden_size, self.num_layers, bidirectional=self.bidirectional, dropout=self.dropout)
return cell
return nn.GRU(self.in_dim, self.hidden_size, self.num_layers, bidirectional=self.bidirectional, dropout=self.dropout)
class TrainingConfig():
def __init__(self, pn_train, pn_valid, pn_test, epochs=30, log_interval=10, batch_size=32, optimizer='SGD', verbose=False, profile_gpu=False, loss='dRMSD', lr=1e-4):
train_dataset = ProteinNetDataset(pn_train) #if self.window_size is None else ProteinNetWindowedDataset(pn_path)
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
valid_dataset = ProteinNetDataset(pn_valid) #if self.window_size is None else ProteinNetWindowedDataset(pn_path)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
test_dataset = ProteinNetDataset(pn_test) #if self.window_size is None else ProteinNetWindowedDataset(pn_path)
test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=True, pin_memory=True)
self.loaders = {'train': train_loader, 'test': test_loader, 'valid': valid_loader}
self.epochs = epochs
self.log_interval = log_interval
self.batch_size = batch_size
self.loss = loss
self.optimizer = optimizer
self.verbose = verbose
self.profile_gpu = profile_gpu
self.lr = lr
if profile_gpu:
from gpu_profile import gpu_profile
import sys
gpu_profile(frame=sys._getframe(), event='line', arg=None)
def get_optimizer(self, parameters):
if self.optimizer == 'Adam':
return optim.Adam(parameters, lr=self.lr)
return optim.SGD(parameters, lr=self.lr)
def get_loss(self):
if self.loss == 'Angular':
return AngularLoss()
return dRMSD()
def build_configs(f):
import configparser
config = configparser.ConfigParser()
config.read(f)
model_params = config['MODEL']
train_params = config['TRAINING']
model_config = ModelConfig(int(model_params['in']), int(model_params['linear_out']), model_params['cell'], int(model_params['num_layers']), int(model_params['alphabet_size']), int(model_params['hidden_size']), model_params.getboolean('bidirectional'), model_params.getfloat('dropout'))
train_config = TrainingConfig(train_params['train_path'], train_params['valid_path'],train_params['test_path'], int(train_params['epochs']), int(train_params['log_interval']), int(train_params['batch_size']), train_params['optimizer'], loss=train_params['loss'], lr=train_params.getfloat('lr'))
return (model_config, train_config)
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 18:48:12 2019
@author: DiPu
"""
file=open("romeo.txt")
counts = dict()
for line in file:
words = line.split()
for word in words:
if word not in counts:
counts[word] = 1
else:
counts[word] += 1
print(counts)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AccessMapping(Model):
"""AccessMapping.
:param access_point:
:type access_point: str
:param display_name:
:type display_name: str
:param moniker:
:type moniker: str
:param service_owner: The service which owns this access mapping e.g. TFS, ELS, etc.
:type service_owner: str
:param virtual_directory: Part of the access mapping which applies context after the access point of the server.
:type virtual_directory: str
"""
_attribute_map = {
'access_point': {'key': 'accessPoint', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'moniker': {'key': 'moniker', 'type': 'str'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'},
'virtual_directory': {'key': 'virtualDirectory', 'type': 'str'}
}
def __init__(self, access_point=None, display_name=None, moniker=None, service_owner=None, virtual_directory=None):
super(AccessMapping, self).__init__()
self.access_point = access_point
self.display_name = display_name
self.moniker = moniker
self.service_owner = service_owner
self.virtual_directory = virtual_directory
|
n1 = float(input('Primeira nota: '))
n2 = float(input('Segunda nota: '))
media = (n1 + n2) / 2
if media < 5:
print(f'Sua média foi de {media} e você está \033[31mREPROVADO\033[m.')
elif media >= 5 and media <= 6.9: # Também é possível utilizar elif 7 < media <= 5
print(f'Sua média foi de {media} e você está de \033[33mRECUPERAÇÃO\033[m.')
else:
print(f'Sua média foi de {media} e você está \033[32mAPROVADO\033[m.')
|
# -*- coding: UTF-8 -*-
"""
This module is intend to solve the matrix equation
sum_i=1^r Ax_i X Ay_i = F
Where Ay_i are circulant matrices
Ax_i is in general are band matrices
"""
import numpy as np
from scipy.linalg import circulant, inv
from scipy.sparse import csr_matrix, diags
from scipy.sparse.linalg import gmres, splu
from scipy.sparse import kron
from scipy.io import mmwrite, mmread
from scipy.optimize import minimize
from scipy.sparse.linalg import LinearOperator
# -----------------------
pi = np.pi
cos = np.cos
sin = np.sin
# -----------------------
# ...
def CMPLX(x,y):
return x + y * 1j
# ...
# ...
def genTestMatrices(r, nx, ny, p, EXPORT=False, IMPORT=False):
list_Ax = [] ; list_Ay = []
# ... Define non singular diagonal matrices for the x-direction
shift = 0
for i in range(0,r):
if IMPORT:
Ax = mmread("figa/Ax"+str(i)+".mtx")
else:
# a = np.random.random(nx)
a = np.ones(nx)
Ax = diags(a,shift)
Ax = csr_matrix(Ax)
if EXPORT:
mmwrite("figa/Ax"+str(i)+".mtx", Ax)
list_Ax.append(Ax)
# ...
# ... Define circulant matrices for the x-direction
for i in range(0,r):
if IMPORT:
Ay = mmread("figa/Ay"+str(i)+".mtx")
else:
ay = np.zeros(ny)
ay[:2*p+1] = np.random.random(2*p+1)
Ay = circulant(ay)
Ay = csr_matrix(Ay)
if EXPORT:
mmwrite("figa/Ay"+str(i)+".mtx", Ay)
list_Ay.append(Ay)
# ...
return list_Ax, list_Ay
# ...
# ...
def computeEigenValues(list_Ay, cmplx=True):
# ...
def computeEigenVal(A):
dtype = np.double
if cmplx:
dtype = np.complex
n,m = A.shape
a = np.zeros(n)
for i in range(0,n):
a[i] = A[0,i]
eigenA = np.zeros(n, dtype=dtype)
for k in range(0,n):
ck = 2*pi*k/n
for j in range(0,n):
if cmplx:
eigenA[k] += a[j] * CMPLX( cos(ck*j) , sin(ck*j) )
else:
eigenA[k] += a[j] * cos(ck*j)
return eigenA
# ...
list_eigenAy = []
for Ay in list_Ay:
eigenAy = computeEigenVal(Ay)
list_eigenAy.append(eigenAy)
return list_eigenAy
# ...
# ...
def AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy):
"""
j must be in range(0,ny)
"""
Sp = np.zeros((nx,nx))
for Ax,eigenAy in zip(list_Ax,list_eigenAy):
Sp = Sp + eigenAy[j] * Ax.todense()
return csr_matrix(Sp)
# ...
# ...
def solveSp(Sp, b):
x = gmres(Sp,b)[0]
return x
# ...
# ...
def rsolve(list_Ax, list_eigenAy, F):
fft = np.fft.rfft
ifft = np.fft.irfft
# ...
nx,ny = F.shape
n = nx ; m = ny
mmax = m/2 -1
x = F.transpose()
_F = np.zeros((m, n))
U = np.zeros_like(_F)
# ...
# ...
y = np.zeros((m/2 + 1, n), dtype=np.complex)
for j in range(0, n):
x1d = x[:,j]
y1d = fft(x1d)
y[:,j] = y1d
# ...
# ... if ny is even
for j in range(0,n):
_F[0,j] = y[0,j].real
for i in range(1,mmax+1):
z = y[i,j]
_F[2*i-1,j] = z.real
_F[2*i ,j] = z.imag
_F[m-1,j] = y[m/2,j].real
# ...
# ...
# ... treatment of the 0-mode
f1d = _F[0, :]
Sp = AssembleColumnMatrix(0, nx, ny, list_Ax, list_eigenAy)
u1d = solveSp(Sp, f1d)
U[0, :] = u1d
for j in range(1, mmax+1):
Sp = AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy)
# ... treatment of the mode 2j-1
f1d = _F[2*j-1, :]
u1d = solveSp(Sp, f1d)
U[2*j-1, :] = u1d
# ... treatment of the mode 2j
f1d = _F[2*j, :]
u1d = solveSp(Sp, f1d)
U[2*j, :] = u1d
# ... treatment of the last mode
f1d = _F[m-1, :]
Sp = AssembleColumnMatrix(mmax+1, nx, ny, list_Ax, list_eigenAy)
u1d = solveSp(Sp, f1d)
U[m-1, :] = u1d
# ...
# ... if ny is even
y = np.zeros_like(y)
for j in range(0,n):
y[0, j] = CMPLX(U[0, j], 0.0)
for i in range(1, mmax+1):
y[i, j] = CMPLX ( U[2*i - 1, j] , U[2*i, j] )
y[m/2, j] = CMPLX ( U[m-1, j] , 0.0 )
# ...
# ...
x = np.zeros_like(x)
for j in range(0, n):
y1d = y[:,j]
x1d = ifft(y1d)
x[:,j] = x1d
# ...
# ...
X = x.transpose()
#print X
# ...
return X
# ...
# ...
def csolve(list_Ax, list_eigenAy, F, EXPORT=False, list_opSj=None):
fft = np.fft.fft
ifft = np.fft.ifft
X = np.zeros_like(F)
Yp = np.zeros_like(F, dtype=np.complex)
Xp = np.zeros_like(F, dtype=np.complex)
# ...
for i in range(0, nx):
# ... extract the i^th line as a vector
y = F[i,:]
# ... move to the commun basis using FFT
yp = fft(y)
Yp[i,:] = yp
# ...
# ...
for j in range(0, ny):
if list_opSj is None:
# ... assemble the 1D matrix
Sj = AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy)
if EXPORT:
mmwrite("figa/S"+str(j)+".mtx", Sj)
# ... extract the j^th column as a vector
yp = Yp[:,j]
# ... solve the 1D linear system in the commun basis
if list_opSj is None:
xp = gmres(Sj,yp)[0]
else:
opSj = list_opSj[j]
xp = opSj.solve(yp)
Xp[:,j] = xp
# ...
# ...
for i in range(0, nx):
xp = Xp[i,:]
# ... come back to the real space
x = ifft(xp)
# ... ... make sur that it is real
x = x.real
# ... update the global matrix
X[i,:] = x
# ...
return X
# ...
# ...
def verification(list_Ax, list_Ay, X, F):
_F = np.zeros_like(X)
for Ax,Ay in zip(list_Ax, list_Ay):
_F += Ax * X * Ay.transpose()
# print "F ", F
# print "_F ", _F
print((np.allclose(F, _F)))
# assert(np.allclose(F, _F))
# ...
# ...
def constructGlobalSystem(list_Ax, list_Ay):
# ...
list_eigenAy = computeEigenValues(list_Ay)
# ...
# ...
Ax0 = list_Ax[0]
Ay0 = list_Ay[0]
S = kron(Ay0, Ax0)
r = len(list_Ax)
for i in range(1, r):
Ax = list_Ax[i]
Ay = list_Ay[i]
S = S + kron(Ay, Ax)
return S
# ...
# ...
# ...
class nearestCirculant(object):
"""
this class constructs a list of circulant matrices that approche a given
list of matrices A by minimizing the Frobenius norm
"""
def __init__(self, list_A, cost=0):
self.list_A = list_A
self.method = method
norm = lambda M: np.linalg.norm(M, 'fro')
# ...
def cost0(M, c):
C = circulant(c)
nr = norm(M-C)
return nr
# ...
# ...
def cost1(M, c):
n,m = M.shape
C = circulant(c)
invC = inv(C)
I = np.eye(n)
nr = norm(I-invC*M)
return nr
# ...
# ...
def cost2(M, c):
diag = M.diagonal()
shift = 0
D = diags(diag,shift)
Z = M-D
C = circulant(c)
nr = norm(Z-C)
return nr
# ...
self.cost0 = cost0
self.cost1 = cost1
self.cost2 = cost2
self.cost = getattr(self, 'cost%d' % cost)
def construct(self, method='BFGS', tol = 1.e-7):
list_C = []
for A in self.list_A:
# ...
if method is None:
n,m = A.shape
MD = A.todense()
c = np.zeros(n)
for k in range(0,n):
c1 =0.; c2=0.
for i in range(0,n-k):
c1 += MD[i,k+i]
for i in range(n-k,n):
c2 += MD[i,k+i-n]
c[k] = ( c1 + c2 ) / n
else:
cost = lambda c: self.cost(A,c)
n,m = A.shape
x0 = np.zeros(n)
x0[0] = 1.
res = minimize( cost, x0 \
, method=method \
, options={'gtol': tol, 'disp': verbose})
c = res.x
# ...
C = circulant(c)
C = csr_matrix(C)
list_C.append(C)
return list_C
# ...
# ...
class circulantPrecond(object):
def __init__(self, list_Ax, list_Ay \
, cost=0, method='BFGS' \
, tol = 1.e-7, verbose=False):
# ... construct the nearest circulant matrices for list_Ay
nearCirc = nearestCirculant(list_Ay, cost=cost)
list_C = nearCirc.construct(method=method, tol=tol)
# ...
self.list_C = list_C
# ...
self.list_eigenC = computeEigenValues(list_C)
# ...
# ...
n,m = list_Ax[0].shape ; nx = n
n,m = list_Ay[0].shape ; ny = n
self.n = [nx,ny]
# ...
# ...
r = len(list_Ax)
Ax0 = list_Ax[0]
C0 = list_C[0]
P = kron(C0, Ax0)
for i in range(1, r):
Ax = list_Ax[i]
C = list_C[i]
P = P + kron(C, Ax)
self.P = P
# ...
# ...
list_opSj = []
for j in range(0, ny):
# ... assemble the 1D matrix
Sj = AssembleColumnMatrix(j, nx, ny, list_Ax, self.list_eigenC)
opSj = splu(Sj.tocsc())
list_opSj.append(opSj)
self.list_opSj = list_opSj
# ...
def aspreconditioner(self):
"""Create a preconditioner
Returns
-------
precond : LinearOperator
Preconditioner suitable for the iterative solvers in defined in
the scipy.sparse.linalg module (e.g. cg, gmres) and any other
solver that uses the LinearOperator interface. Refer to the
LinearOperator documentation in scipy.sparse.linalg
See Also
--------
scipy.sparse.linalg.LinearOperator
Examples
--------
>>>
"""
shape = self.P.shape
dtype = self.P.dtype
nx, ny = self.n
self.i = 0
def matvec(b):
F = b.reshape((ny,nx))
F = F.transpose()
X = csolve(self.list_C, self.list_eigenC, F, list_opSj=self.list_opSj)
x = X.transpose().reshape(nx*ny)
# print ">> iteration ", self.i
self.i += 1
return x
return LinearOperator(shape, matvec, dtype=dtype)
# ...
# ...
def testcase(r, nx, ny, p, EXPORT=False, IMPORT=False):
# ...
if IMPORT:
F = np.genfromtxt("figa/F.txt")
try:
nx,ny = F.shape
except:
nx = 1
ny, = F.shape
_F = F
F = np.zeros((nx,ny))
F[0,:] = _F
else:
F = np.random.random((nx,ny))
np.savetxt("figa/F.txt", F)
# ...
# ...
list_Ax, list_Ay = genTestMatrices(r, nx, ny, p \
, EXPORT=EXPORT \
, IMPORT=IMPORT)
# ...
return list_Ax, list_Ay, F
# ...
# ...
def testcase_poisson(scale=False):
Mx = mmread("figa/Mx.mtx") ; Mx = Mx.tocsr()
Sx = mmread("figa/Sx.mtx") ; Sx = Sx.tocsr()
Kx = mmread("figa/Kx.mtx") ; Kx = Kx.tocsr()
KTx = Kx.transpose().tocsr()
My = mmread("figa/My.mtx") ; My = My.tocsr()
Sy = mmread("figa/Sy.mtx") ; Sy = Sy.tocsr()
Ky = mmread("figa/Ky.mtx") ; Ky = Ky.tocsr()
KTy = Ky.transpose().tocsr()
# # ...
# list_Ax = [Mx, Sx, Kx, KTx]
# list_A = [Sy, My, KTy, Ky]
# # ...
# # ...
# Kmx = np.sqrt(2) * (Kx+KTx)
# Kjx = - np.sqrt(2) * (Kx-KTx)
#
# Kmy = np.sqrt(2) * (Ky+KTy)
# Kjy = np.sqrt(2) * (Ky-KTy)
#
# list_Ax = [Mx, Sx, Kmx, Kjx]
# list_A = [Sy, My, Kmy, Kjy]
# # ...
# # ...
# list_Ax = [ Kx, KTx, Sx]
# list_A = [KTy, Ky, My]
# # ...
# ...
list_Ax = [Mx, Sx]
list_A = [Sy, My]
# ...
if scale:
print("MUST IMPROVED: WE HAVE TO MULTIPLY BY ONE MATRIX FOR ALL MATRICES")
shift = 0
list_Ay = []
for A in list_A:
diag = 1./A.diagonal()
D = diags(diag, shift).tocsr()
Ay = A * D
Ay.tocsr()
list_Ay.append(Ay)
else:
list_Ay = list_A
n,m = Mx.shape ; nx = n
n,m = My.shape ; ny = n
# F = np.random.random((nx,ny))
F = np.ones((nx,ny))
return list_Ax, list_Ay, F
# ...
# ---------------------------------------------------------------
if __name__=="__main__":
from time import time
# -------------------------
# nx = 512 ; ny = 512
# nx = 256 ; ny = 256
# nx = 128 ; ny = 128
# nx = 64 ; ny = 64
nx = 32 ; ny = 32
# nx = 16 ; ny = 16
r = 4
p = 3
# EXPORT = True
EXPORT = False
IMPORT = False
# IMPORT = True
method = None
cost = 0
# method = 'BFGS'
tol = 1.e-7
# verbose = True
verbose = False
# scale = True
scale = False
# CIRCULANT = True
CIRCULANT = False
# -------------------------
# ...
if CIRCULANT:
list_Ax, list_Ay, F = testcase(r, nx, ny, p, EXPORT=False, IMPORT=False)
else:
list_Ax, list_Ay, F = testcase_poisson(scale=scale)
# n,m = list_Ax[0].shape
# r = len(list_Ax)
# list_Ax = []
# for i in range(0,r):
## diag = np.random.random(n)
# diag = np.ones(n)
# shift = 0
# A = diags(diag, shift)
# list_Ax.append(A)
# _list_Ax = list_Ax[:3]
# _list_Ay = list_Ay[:3]
_list_Ax = list_Ax[:2]
_list_Ay = list_Ay[:2]
PrecConstruct = circulantPrecond(_list_Ax, _list_Ay \
, cost=cost, method=method \
, tol=tol, verbose=verbose)
mmwrite('figa/P.mtx', PrecConstruct.P)
# mmwrite('figa/C_Sy.mtx', PrecConstruct.list_C[0])
# mmwrite('figa/C_My.mtx', PrecConstruct.list_C[1])
# mmwrite('figa/C_Kmy.mtx', PrecConstruct.list_C[2])
# mmwrite('figa/Kmy.mtx', list_Ay[2])
# mmwrite('figa/C_KTy.mtx', PrecConstruct.list_C[2])
# mmwrite('figa/C_Ky.mtx' , PrecConstruct.list_C[3])
# mmwrite('figa/C_KTy.mtx', PrecConstruct.list_C[0])
# mmwrite('figa/C_Ky.mtx' , PrecConstruct.list_C[1])
# mmwrite('figa/C_My.mtx' , PrecConstruct.list_C[2])
mmwrite('figa/C_Sy.mtx', PrecConstruct.list_C[0])
mmwrite('figa/C_My.mtx', PrecConstruct.list_C[1])
Precond = PrecConstruct.aspreconditioner()
# ...
# ...
n,m = list_Ax[0].shape ; nx = n
n,m = list_Ay[0].shape ; ny = n
# ...
# ...
S = constructGlobalSystem(list_Ax, list_Ay)
mmwrite('figa/S.mtx', S)
# ...
# ...
print("=============================")
print(" nx, ny ", nx, ny)
print(" size ", S.shape)
print(" nnz ", S.nnz)
print("=============================")
# ...
# import sys ; sys.exit(0)
# ...
print("=============================")
print(">>> using the global system")
y = F.transpose().reshape(nx*ny)
tb = time()
Xg,it = gmres(S, y)
Xg = Xg.reshape((ny,nx))
Xg = Xg.transpose()
te = time()
print("Elapsed time ", te-tb)
# ...
# ...
if CIRCULANT:
print("=============================")
print(">>> using circulant fast solver")
list_eigenAy = computeEigenValues(list_Ay)
tb = time()
X = csolve(list_Ax, list_eigenAy, F)
te = time()
print("Elapsed time ", te-tb)
print("Internal verification ")
verification(list_Ax, list_Ay, X, F)
else:
print("=============================")
print(">>> using circulant preconditioner solver")
tb = time()
y = F.transpose().reshape(nx*ny)
x,it = gmres(S, y, M=Precond)
X = x.reshape((ny,nx))
X = X.transpose()
te = time()
print("Elapsed time ", te-tb)
# ...
# ...
print("=============================")
print("Is everything OK?")
print(np.allclose(Xg,X, rtol=1e-07) \
, " with error ", np.linalg.norm(Xg-X)/np.linalg.norm(X))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.