content
stringlengths 5
1.05M
|
|---|
# Generated by Django 3.0.5 on 2020-05-04 12:17
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('books', '0002_auto_20200504_1032'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ['-date_create'], 'verbose_name': 'Заказ', 'verbose_name_plural': 'Заказы'},
),
migrations.AddField(
model_name='order',
name='date_create',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2020, 5, 4, 15, 17, 45, 640613), verbose_name='Дата создания'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='name',
field=models.CharField(default='default', max_length=127, verbose_name='Название'),
preserve_default=False,
),
]
|
import sys
import numpy as np
import pyqtgraph as pg
from PyQt5.QtCore import Qt, QTimer, QElapsedTimer
from PyQt5.QtWidgets import (
QApplication, QCheckBox, QGridLayout, QGroupBox, QMenu, QPushButton, QRadioButton, QVBoxLayout, QWidget, QSlider)
from bg_gurney import BasalGanglia
class Window(QWidget):
def __init__(self, parent=None):
super(Window, self).__init__(parent)
# Initialise data and plot structures
self.data = {}
self.plot = {}
self.BG_PLOTS = ['Inp', 'dMSN', 'iMSN', 'PPn', 'VTA_SNc', 'DA', 'Ctx']
self.BG_REGIONS = ['Ventral']
# BG_PLOTS = ['Input', 'NAc', 'STNv', 'SNr', 'DM', 'PL']
self.PLOT_LENGTH = 1000
self.PLOT_COLOURS = ('r', 'g', 'b', 'c', 'm', 'y', 'w')
def configure( self, model ):
# Initialise Qt objects
grid = QGridLayout()
self.slider = {}
# Initialise time pointer
self.ptr = 0
for n in range(model.BG_CHANNELS):
grid.addWidget(self.create_input_sliders(n), 0, n)
for x in range(len(self.BG_REGIONS)):
for y in range(len(self.BG_PLOTS)):
pop = self.BG_REGIONS[x] + '_' + self.BG_PLOTS[y]
self.data[pop] = {}
self.data[pop]['Region'] = self.BG_REGIONS[x]
self.data[pop]['Population'] = self.BG_PLOTS[y]
# TODO: Tidy this up
if x is 0:
col = 0
else:
col = 3
grid.addWidget(self.create_plots(model, x, y), y + 1, col, 1, 2) # TODO: Tidy up these columns a bit
for i in range(model.BG_CHANNELS):
self.data[pop][i] = np.zeros(self.PLOT_LENGTH)
# Set window layout
self.setLayout(grid)
self.setWindowTitle('Basal Ganglia')
def create_input_sliders(self, ch):
ch_id = 'CH ' + str(ch + 1)
group_box = QGroupBox(ch_id)
# groupBox.setAlignment(Qt.AlignCenter)
self.slider[ch_id] = QSlider(Qt.Vertical)
self.slider[ch_id].setTickPosition(QSlider.TicksBothSides)
self.slider[ch_id].setMinimum(0)
self.slider[ch_id].setMaximum(100)
self.slider[ch_id].setMinimumSize(5, 100)
self.slider[ch_id].valueChanged.connect(lambda value, ch=ch: self.change_inputs(value, ch))
vbox = QVBoxLayout()
vbox.addWidget(self.slider[ch_id])
vbox.addStretch(1)
group_box.setLayout(vbox)
return group_box
def create_plots(self, model, region, pop):
plot_id = self.BG_REGIONS[region] + '_' + self.BG_PLOTS[pop]
group_box = QGroupBox(plot_id)
# Create and configure PlotWidget for each population
plt = pg.PlotWidget()
# TODO: Set xRange so 0 is at right and PLOT_LENGTH is at left
plt.setRange(yRange=[0, 1])
# Configure PlotItem for each channel
self.plot[plot_id] = {}
# self.plot[plot_id]['Region'] = BG_REGIONS[region]
# self.plot[plot_id]['Population'] = BG_PLOTS[pop]
for n in range(model.BG_CHANNELS):
self.plot[plot_id][n] = plt.plot([], pen=pg.mkPen(self.PLOT_COLOURS[n], width=2))
vbox = QVBoxLayout()
vbox.addWidget(plt)
# vbox.addStretch(1)
group_box.setLayout(vbox)
return group_box
def change_inputs(self, val, ch):
self.inputs[ch] = float(val) / 100
def notify(self, model):
if model.FIXED_INPUTS:
curr_time = self.ptr / 100
for c in model.BG_INPUTS.keys():
if model.BG_INPUTS[c]['Onset'] <= curr_time < model.BG_INPUTS[c]['Offset']:
self.inputs[c] = model.BG_INPUTS[c]['Size']
# Transient
if 'Transient' in model.BG_INPUTS[c] and model.BG_INPUTS[c]['Transient']['Onset'] <= curr_time < model.BG_INPUTS[c]['Transient']['Offset']:
self.inputs[c] = model.BG_INPUTS[c]['Size'] + model.BG_INPUTS[c]['Transient']['Size']
else:
self.inputs[c] = 0
# Increment time pointer
self.ptr += 1
for p in self.plot.keys():
reg = self.data[p]['Region']
pop = self.data[p]['Population']
for n in range(model.BG_CHANNELS):
# Shift all data along by one
self.data[p][n][:-1] = self.data[p][n][1:]
if pop == 'Inp':
self.data[p][n][-1] = model.inputs[n].item()
else:
# item() needed to convert numpy.float64 to native Python float
self.data[p][n][-1] = model.bg.pop[reg][pop]['o'][n].item()
self.plot[p][n].setData(self.data[p][n])
# self.plot[pop][n].setPos(self.ptr, 0)
# Main run loop
if __name__ == '__main__':
app = QApplication([])
clock = Window()
clock.show()
sys.exit(app.exec_())
|
# Generated by Django 3.0.3 on 2020-05-07 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0003_auto_20200507_1248'),
]
operations = [
migrations.AlterField(
model_name='user',
name='passport',
field=models.CharField(default='default', max_length=30),
),
]
|
#!/usr/bin/env python
# Load firmware onto the Digital Bitbox.
#
# The Digital Bitbox must be in bootloader mode to use this script:
# 1- Unlock the bootloader using send_command.py to send '{"bootloader":"unlock"}'
# 2- Hold the touch button 3 seconds to permit unlocking.
# 3- Replug the device, and briefly touch the touch button within 3 seconds.
# The LED will flash a few times quickly when entering bootloader mode.
#
# Firmware signatures are valid for deterministically built firware releases (refer to the github readme for building).
# Invalid firmware cannot be run.
#
# After loading new firmware, re-lock the bootloader using send_command.py to send '{"bootloader":"lock"}'
import sys
import binascii
from dbb_utils import *
if len(sys.argv) is not 3:
print('\n\nUsage:\n\tpython load_firmware.py firmware_name.bin firmware_version\n\n')
sys.exit()
else:
fn = sys.argv[1]
version = sys.argv[2]
# Private key signatures (order is important)
if 'signed' in fn:
print('\n\nPlease load the unsigned firmware binfile. Signatures are added within this script.\n\n')
sys.exit()
elif '2.0.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'302731115cafd4eb0d25747e604fe2a45f541c5e238dd5e946a34d608be104575b781b06f6b629e9debdfa1fe9cd27615fb0613bd90ccc527f5c9b838459c36e'
'20b6aa64e7f1dfce652cf69966abdda71a76560011159620d6704036ee96705e019e5bc8de2ddfa1656879744611b6909568f07deec7cfc6b6a967431b9ce81a'
'f82b0f23ebf8cfec971150580343327801a6a4f4a30473929ff681e9791f79bb5d645157378acdeaa1fdce6f3fea418829a04a2c6c5a4c27b3707b77a134f5d2'
'4c9b22dbc81d5765b6d9bc008777dae96df90162b54b7802699f4d197d8eb28c27323bcf218b0f2437f9fdd1e1f06ccfabca6a26605115c131fb5bbd9195a11e'
)
elif '2.1.1' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'713b243546825f155bc6527d27dd53331c963def45249fcce07079b13b95264f43889ac3a895621925d0a014fea9dc06fac25472c679ace3604a22e9b8a0bbd7'
'e47e909617f401064b579665961e0535c9618ea525e0dd325623834e451e1bb63eec6fd7ea3d259d42ca776bac992d86933e89b589c04322d253a18080122c9f'
'5d080a6cbbdceed080c13721bdd093eb3ad60881abf8b03146e28086e8f9b40f0a3921f0796079f196527cc037fe7451a426815f9c85043e0776e85975492b3a'
'ca225002e2cf45d5580187d6564ab4f664a480867fa6f767a999c065a829e3c5599f21c06a26b473f9b303e2aca245ea899f67b7b156935b384ccfabc1069669'
)
elif '2.2.2' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'ab62cdc84efe891dac91f5632fcfe57115cf4fc6769f0a1cddb5268294dac38371207c616d7ac123bb075d042c8e0ee3f2e036ac200348156baf831ad5d2d1af'
'f7d757c994a1c422fd4cb7adf589360231979dd1f1bb5dcd3fa28bc80eeb66882c7977df66d4f97e7761094f3f6f9748cd6f2c77eb22799212d154d2307031db'
'170a6d1e5d511aa07d588d72e18481d3286dc583b12f2d22a7a35ee4a5d955d66f1aa76979305ff8ed002744a851159436e87645e3b021dd69231b9f57a033bf'
'd293e93c78128fd6a4996961c34273c044cb120dd1c9a50d6b1db01577fd2a7a2644ec2ddb9e96f814082b5abc193da0e43c23e61eed6baa631a7f6ff67d3b77'
)
elif '2.2.3' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'a62edd2d91e565c4c3aeaace17943c097938ae7b30f8338b8937239dae55d6ba33b8dbc29a17d444bbccc6ca7a00cb717387a7bcb7688aaa0ad69d8ede143555'
'ece868577966cad79be858908db5a5f2e780ae0d5b0f6d197a677fc9a66e70a075c948ba11562533407c4f66401bb03454df99349569f13ba534fb2877b1a671'
'45c3964e3e720c9e78388ba8555275377448b564c55a3689cc0f0312be362e25273dc7f96f491a910707185718ceb3372ada9924eba8ced8fb42ab6f7ba416c1'
'5aacc1ab96f4bf67bd423c855686fd8385ac874bcc2195c8d3df36a43b3dc7ab7d5ae5d938d4b275e308642c9e1d083e9d0ceeec9915c823073a766e0fde996b'
)
elif '3.0.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'82bab51c67dbb4ac5ee46686cc10b485aa0868cf577c6a58e9706a156f9e0a5e0fb0032af50ae8b60a5a9e90c0814c0ab05a643ac28eb068524e1ad18683a395'
'b12cd81632caf0e1a5dd51bd33172f11ef8fe14fa17c49c4a60146225fea629922509e23fafe53b3dcf4b8865a7b87187b557bbdb2aea3eef77ca8ec3e9b4658'
'2fb4e401896eb81e53a7d8e659c118f721e8e4fd127b3243b135054e1111ad067d088c028517cc8515d8c43c44dd8865288eb04f1756021233e42ac99462daa2'
'8f4a6af6123f33b222212eed67c21904e947c8967b72cf2a6ec77a69bebae93e5d145065fac7bd1d53929ffeb0275a5e7df1b856c02b0f58e8d2f594d2be5b3e'
)
elif '4.0.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'0104628d9c67537a8ef79bff375afacf78c35fad647a090f05d45811e23c4a96539a9f9b3ca465f5af9e6691e518d816fa8e73c67896625be68de2621d22b5e8'
'091d6e389ed384bcaf5d7ba16a8af1a34bd48084b911a5685f41ff3340cb11616cc1d06f8c558fac31c38afb95f1c30e42bd9da204002ba757b9d97263301676'
'0f06409c24dc497d60524bb1275394de5df57981b485622d341e209d99b3e13854b21d7459abd0e3872011765b53e211069bc6b0438e18a4bed774ca2ac82048'
'9dc82dae4bb7e6093e888e4dcdfebee068af79f255c5d78b9eb1118a752491740023aea8924944f213fb5733a62a82d8d5a2706ea163cecf83df8aac0711cdf1'
)
elif '4.0.1' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'f89c9481d32e49e3dd3770d881d711e048964657e1532efba759fbfd067ef60f39568e8411766d47932d87da2c9ce33e53eabcc4b1ddd230506658084ba5544a'
'0416cf3aad238a30d94ce4884ac9e3e350f807402f6b6dd204e8ba5a8cad5d0179e6c7f1503d665c3be41dcc437eb68dd0d956f11f5c6d5ff4d45892b0f7179a'
'fb22728ed783fef1cac48e5ebd1160a503baec0076adf963088717d48ba0d31a7f01445382196f66b71ee08e2c504a4e7d7a7972464aa3c27eb61668303ff643'
'2d4983c0628424a63f9aa37acaf1faedb1b3ca69dd176161115ba6caf18b96c417322c4509325ff2d0945bcb95233db8da35804eb4f80fbfa20588d85b205794'
)
elif '5.0.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'fb8f3f271869872b9fd7ee7985cb65434f8c2b24e0cc95b23d2e35ac94b10f44394889d44398498586f18a09ecb6ec1a0bd2c760a1e14288151cc0f96cbb99b2'
'44acefdafb734091f9ea4bda8165cc9aac2c9ceb0e5cd33b8b1d0761c980dedd4e02a88510ab1eb6ace128dc32f64c926118289e4b1a54f62b55ef1b754a201c'
'6ca3f4264c85db8b2f8f24ecf38efee60ad4117e5a293fc01adf7f1c445d896323fb9ecd386074b0bcbd9d120c88f09f3c801adcba9171a5553e68e5deb8e1cb'
'8774a39d8d9ba34c1f47209f869bd9ea7806f3b584e5fcbc5531fc6a31b2b79c519d9c1b14b07db72d390a633dc4e55494b13e66be49c09cf2032aff7a6a5f7f'
)
elif '6.0.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'76edbb3aec7bb595d93114cefb9062808790e631ccf7727a434c5865c71199fc0b6680a1cb6de8eb747d122a6232de475a2c6034cb11121e28a4de4987c9789f'
'5608619e65c633ab6dac32c5fd4365591afbbd3890be66940b428f183e4fa4d56a81a18599ddc305c2285bf054283e57aa96bb2bf927c74dea41a39d0af20dff'
'9678367ce39d3acec4d2d1de6518ab85bf06870d027e0501e292f6bc759dd2bc60b564f3d8ad5e3768fb23f3cef2ca839781f50781e42aa47fcd969eb79201d5'
'95d75b7b6820317b33e007b46ded2984638d7321bcfe230b581cb638bc38592e6391b1ceeb2cddaa4ed5cfe19cd50c87031ad0f7cb93e9c375501730f15986ad'
)
elif '6.0.1' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'a31ce24e6b9127a09b10d6869139f6a5846d1b93cdb3855907534d2f701eb19d306fb3a99519148ef8a6b2bf5e6f588b11753b6ad1ff6f29a48cc7ee470ebfe3'
'8271ecedc4a7968be0d935f68a54e4e85cb1792193033039114c541c4d8ea83c3e0419e2b9aac90376a803074f362673845716824da4d52694a87aa364560afc'
'28417cbfaecbbe6f3329d89192348cbb4674d15c8457944f675ea85df13d40f44630ec92629b7a422d0b152fe8dd028932a1cf389d7d108becb60af3504605e9'
'11dcc79225db4be8bf8e3fae2689665c52151fefa3be183e6945d87e104c9e4d74a732e55f6705c5b9448689c571f66d015149445acaf54ddc3a96cf360bfcd6'
)
elif '6.0.2' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'f32c81811d9fcee78b011a74d29d96068afd6366ef79f6599054e0ba46f3d5517b8ad38c34d0117bfb137cc29b3b390555b8c0eaa13c8ff18ec8d82dd07c3e1d'
'b53c45cc28c03f6e678e5ee68c533cdea1dcd66193578f03310c86f85a6a1d87541a9d430df36351d253a1c8eae6017ccff535ee678a5806b2c55534ca59eec5'
'dd3a96de0e274da0431e98c0a40968c61be64eb5af7c8a73cefdc5b321e771f54a7d1cd7b20c68e16c087a31aaf703c398e761b92f619bceab7e8a49c4368edd'
'41a0e5d3a09d79f83cb17c00faf06425f1f230a30379951f3bd96aac0740b93e188819a388f46c629a60d52f1721be35a7f7bff55a4a1b476f6d5a11029ab10f'
)
elif '6.0.3' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'5932861985f702f1103173ae772ae9aed4db74a23ca9ba590827856466fd89c300ff0b910b3badfd7ffb46f4a84375f81cc8632d3496e7b8950fb907c40969fe'
'dd0539315a7e3aa7743142d95aa4ae2c3dfa477aba40f8f53d724f25083ebc126a807d13e4eb6332184bc775a368f4d46aebc21d781cc7e697b1b3cf76f3b03e'
'b7c2108d404de7b40c30d772cd668f361a8174369e5c33add1dfebd3e1bc222f296d1dda936ac6b4ff6e66d48e0d8df58bc99f5119079b5008a41e63203df6d2'
'af85433087dbb3501aa7148bc9ee8209943070ab110d99512a843c2df753ffda45c4e0a40609d252d8cc53e56f866faf4895067d45f39ca4f91ab6559a1c02c7'
)
elif '6.0.4' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'94a319fa4e208a3703615ab1a1d5b91c0c60fa06ce11cf1f3b95715231ce05c266fb7b92ffda64e7781152048ba5f24350eb886d2bb2ad203aa268584e531a9b'
'a8f80f6d85a8d2f88d552a6543949f949eccd360449b0a81f8065c38a3aa163d67c6254078114f85d910062fc8e51c20e461a1c4219c6911dfb2d5242b39c739'
'30f12c64b516380ab1adaceb4f3f4008b003ce6c3d0f51cfe4509990b568c9c5252748099a95d0b2127ccb7b3c92c9b19fc32a26df100761fe0aacb52233afb7'
'34f59a71d7d46c22439700804cf02898f7e9c592f8c3c3c533da234e04201be2112dd33c0574eb1befeda3ea70d6a5c4690bcbc7ea02a38fd67427f8ceab1d4f'
)
elif '6.1.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'50c1a2b8ca106dbab2acd4eec2193af35dbd1890af199b2dba091eb1ce65c96a3d0058e104626f1a060bb59c00c74699b3656e2885557d2baec8bb17967b25ec'
'c514ed71d60486469e0b47afe93aafbc6f0b91818e98baa770692a3897be56f36eb82ea139e263bac8760192d97a39d3b48f61ac00c02d36f96d0aedb6168c85'
'77da95464c5647fd8f88a4a37400959f7bfce19dcf288efb1ea2396b84499b7b1e43fb184ba3df3e41a3e1433bf563b8e3a7b6fc825b81695399351ea3e3ee66'
'97b7b7861e929237c638690edc363ed8db1c85831e15114aaf1f103b97af7e3061fb85503f50104f3d36489e5e0b8b0313d750c114bd2eb80df9a3a0d02e12cc'
)
elif '6.1.1' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'1e2e802701edf7b0ccc0e9e50e3d6afbbca9325df46005bd815d30dc7a9cd2607221cf0f93b76b81506a7cf5d79d35d4c67db84b52cb19a379dd83aa07f482d5'
'7abdfbb497d9f23aa00b3c186fea27c1d791909d1937ce42e5dd5c4abcf62af863ce9e2b4fac914e5b4f96642631c7adae2bcfce53a0aab894ebf5496010968a'
'29b381102cab0cb104dd57353630c1d0544808d15d3bf5810a7619ead8a00548294308fb63abbca048439fb9df2de412696544d940fafec819fc176b73beff73'
'1b8c4505b246469cfa083e18c8fc039b203d8b97927658676058e57757a4ca750cbb442f30964f5ab1372b1a577d311707376e43325e117a4eedcbe85d0048c6'
)
elif '7.0.0' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'80aa3c367bab0266aa6770d1b1d6980e30c0fd18e07258dab7af60676045a5e13bd4679cf9cd6831286811f7d2173e62c7f5f37419d9cdf87013520aec4ec642'
'8ce42a69f03ea71af4f07841d5f9d417630d90ac9e5112a7f45f23801389e5812cc3dfb037e9866481ccceb203c2143a2e85286a9db1c4e5822d1b5b7d461495'
'91cec9da81c9b45e03ddb9813c0d3c4e229fc0d12cbca42c00a29914021c62544e917cf979bbac5bb5fac310fb4f0165599d32d004ef5d26795566c2f12fae0c'
'bf3b1e65840b842887bac9d7d48a3787eb32af1532bb16dd5d92357d0ea5ef374b2c68328f27ecee95b2b0bb34ed9bb9705761b36cfe93a550b8543260a2dd5c'
)
elif '7.0.1' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'e34772e2462db37425712aadb3d9d1059ec3a55375fe1d1a37e20d0069dc7b1765b1ca52da3c230f5c2a4325a7fa14c1f9ec55ebd364ba86443c2b7a13b74596'
'7c3d40d7dec84451866c829cff10a10401ecaa4e9a4d33ffa5425a099bd9df2355059b962a0beeec667bada36de2484b42f98b86fbd507f392405217f5941c3f'
'134eb617521a203b21a4495c06006c07cbdad2681f10169b6bee66e37cf798a801095c19918cbcc0403d626e47b1936286fa8566c4923da4933a1e035a8a9d23'
'f3b26d44eee48de2d9e99fcdcd158fe33ac6a484e65267ac5e025369115828e724f8e30e784e2b7d5eaf60094e931e9a3c410d0890f280240c069d55e59776e6'
)
elif 'debug' in version:
sig = (
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
)
else:
print('\n\nError: invalid firmware version ({}). Use the form \'vX.X.X\'\n\n'.format(version))
sys.exit()
def printFirmwareHash(filename):
with open(filename, "rb") as f:
data = bytearray()
while True:
d = f.read(chunksize)
if len(d) == 0:
break
data = data + bytearray(d)
data = data + b'\xFF' * (applen - len(data))
print('\nHashed firmware', binascii.hexlify(double_hash((data))))
# ----------------------------------------------------------------------------------
try:
openHid()
printFirmwareHash(fn)
sendPlainBoot("b") # blink led
sendPlainBoot("v") # bootloader version
sendPlainBoot("e") # erase existing firmware (required)
sendBin(fn) # send new firmware
# upload sigs and verify new firmware
load_result = sendPlainBoot("s" + "0" + sig)
if load_result[1] == 'V':
latest_version, = struct.unpack('>I', binascii.unhexlify(load_result[2+64:][:8]))
app_version, = struct.unpack('>I', binascii.unhexlify(load_result[2+64+8:][:8]))
print('ERROR: firmware downgrade not allowed. Got version %d, but must be equal or higher to %d' % (app_version, latest_version))
elif load_result[1] != '0':
print('ERROR: invalid firmware signature\n\n')
else:
print('SUCCESS: valid firmware signature\n\n')
sendPlainBoot("b") # blink led
except IOError as ex:
print(ex)
except (KeyboardInterrupt, SystemExit):
print('Exiting code')
|
"""
This is a script to scrape COVID19 data from the PowerBI visualization for Wake County NC:
- https://covid19.wakegov.com/
- https://app.powerbigov.us/view?r=eyJrIjoiNTIwNTg4NzktNjEzOC00NmVhLTg0OWMtNDEzNGEyM2I4MzhlIiwidCI6ImM1YTQxMmQxLTNhYmYtNDNhNC04YzViLTRhNTNhNmNjMGYyZiJ9
This script:
- scrapes infection and death data from all Wake counties
- appends the results to a Google Sheet
Create the cron job to run this script every four hours:
0 * * * * /path/to/python3 scrapeWakeCovidData.py infections
30 * * * * /path/to/python3 scrapeWakeCovidData.py deaths
"""
import sys
import requests
import json
import gspread
from datetime import datetime
from time import sleep
url = 'https://wabi-us-gov-virginia-api.analysis.usgovcloudapi.net/public/reports/querydata?synchronous=true'
infection_post_data = "{\"version\":\"1.0.0\",\"queries\":[{\"Query\":{\"Commands\":[{\"SemanticQueryDataShapeCommand\":{\"Query\":{\"Version\":2,\"From\":[{\"Name\":\"c1\",\"Entity\":\"COVID19 Cases\",\"Type\":0}],\"Select\":[{\"Column\":{\"Expression\":{\"SourceRef\":{\"Source\":\"c1\"}},\"Property\":\"City (groups)\"},\"Name\":\"COVID19 Cases.City (groups)\"},{\"Measure\":{\"Expression\":{\"SourceRef\":{\"Source\":\"c1\"}},\"Property\":\"Confirmed Cases\"},\"Name\":\"COVID19 Cases.Confirmed Cases\"}],\"OrderBy\":[{\"Direction\":1,\"Expression\":{\"Column\":{\"Expression\":{\"SourceRef\":{\"Source\":\"c1\"}},\"Property\":\"City (groups)\"}}}]},\"Binding\":{\"Primary\":{\"Groupings\":[{\"Projections\":[0,1]}]},\"DataReduction\":{\"DataVolume\":4,\"Primary\":{\"Window\":{\"Count\":1000}}},\"Version\":1},\"ExecutionMetricsKind\":1}}]},\"QueryId\":\"\",\"ApplicationContext\":{\"DatasetId\":\"01040b2e-ab5a-4cce-ba48-5c38b411737f\",\"Sources\":[{\"ReportId\":\"dfd4d7a2-ad2e-44de-8fa3-2f7d6676ac6a\",\"VisualId\":\"ba759968923b8db91b65\"}]}}],\"cancelQueries\":[],\"modelId\":429669}"
death_post_data = "{\"version\":\"1.0.0\",\"queries\":[{\"Query\":{\"Commands\":[{\"SemanticQueryDataShapeCommand\":{\"Query\":{\"Version\":2,\"From\":[{\"Name\":\"c1\",\"Entity\":\"COVID19 Cases\",\"Type\":0}],\"Select\":[{\"Column\":{\"Expression\":{\"SourceRef\":{\"Source\":\"c1\"}},\"Property\":\"City (groups)\"},\"Name\":\"COVID19 Cases.City (groups)\"},{\"Measure\":{\"Expression\":{\"SourceRef\":{\"Source\":\"c1\"}},\"Property\":\"Total Deaths\"},\"Name\":\"COVID19 Cases.Total Deaths\"}],\"OrderBy\":[{\"Direction\":1,\"Expression\":{\"Column\":{\"Expression\":{\"SourceRef\":{\"Source\":\"c1\"}},\"Property\":\"City (groups)\"}}}]},\"Binding\":{\"Primary\":{\"Groupings\":[{\"Projections\":[0,1]}]},\"DataReduction\":{\"DataVolume\":4,\"Primary\":{\"Window\":{\"Count\":1000}}},\"Version\":1},\"ExecutionMetricsKind\":1}}]},\"QueryId\":\"\",\"ApplicationContext\":{\"DatasetId\":\"01040b2e-ab5a-4cce-ba48-5c38b411737f\",\"Sources\":[{\"ReportId\":\"dfd4d7a2-ad2e-44de-8fa3-2f7d6676ac6a\",\"VisualId\":\"77b42cab00058b4402e0\"}]}}],\"cancelQueries\":[],\"modelId\":429669}"
sheet_name = 'Wake county municipality COVID19'
service_account_file = '/opt/nc-covid-scrape/usdr-nc-covid-7afc8b3be71c.json'
def scrape_powerbi_data(url, post_data):
try:
r = requests.post(url,data=post_data)
data_status_code = r.status_code
j = json.loads(r.text)
data_timestamp = ''
#data_title = j['results'][0]['result']['data']['descriptor']['Select'][1]['Name']
data = j['results'][0]['result']['data']['dsr']['DS'][0]['PH'][0]['DM0']
except json.decoder.JSONDecodeError:
data = []
data_timestamp = ''
return [data, data_timestamp, data_status_code]
def send_data_to_gsheet(sheet, data, data_timestamp, data_status_code):
city_data_list = [ item['C'] for item in data]
sheet_header = sheet.get('A1:1')[0]
scrape_data = [
['timestamp', str(datetime.now())],
['success', bool(data_status_code == 200)],
['data_timestamp', data_timestamp]
]
data_row = [-1] * len(sheet_header)
#for column, value in scrape_data + city_data_list:
for item in scrape_data + city_data_list:
try:
# column = town
# value = infections/deaths
column = item[0]
value = item[1]
index = sheet_header.index(column)
data_row[index] = value
except ValueError: # if the town doesn't exist in the current sheet
sheet_header.append(column)
data_row.append(value)
except IndexError: # if only a town is provided; no value
index = sheet_header.index(column)
data_row[index] = -1
# write data to gsheets
sheet.append_row(data_row)
# replace header
sheet.insert_row(sheet_header, index=1)
sheet.delete_row(index=2)
sheet.format(range_name='A1:1', cell_format={'textFormat': {'bold': True}})
def help():
print('\nScrape COVID19 data from Wake county PowerBI visualization' + \
'\nUsage: \n\tpython3 scrapeWakeCovidData.py [infections|deaths]')
#sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) != 2:
help()
report = sys.argv[1]
if report == 'infections':
data = scrape_powerbi_data(url, infection_post_data)
gc = gspread.service_account(filename=service_account_file)
sheet = gc.open(sheet_name).worksheet('Scrape Infections')
send_data_to_gsheet(sheet, *data)
elif report == 'deaths':
data = scrape_powerbi_data(url, death_post_data)
gc = gspread.service_account(filename=service_account_file)
sheet = gc.open(sheet_name).worksheet('Scrape Deaths')
send_data_to_gsheet(sheet, *data)
else:
help()
|
import os
import glob
from catsndogs.data import get_training_data
folder = get_training_data()
cats = glob.glob(os.path.join(get_training_data(), "cat", "*.jpg"))
dogs = glob.glob(os.path.join(get_training_data(), "dog", "*.jpg"))
|
import json
import os
import urllib.request, urllib.parse, urllib.error
from io import StringIO
import csv
from tests.fixtures.base_test import BasePlenarioTest, fixtures_path
# Filters
# =======
# Constants holding query string values, helps to have them all in one place.
# The row counts come from executing equivalents of these queries on posgres.
FLU_BASE = 'flu_shot_clinics__filter='
# Returns 4 rows for this condition.
FLU_FILTER_SIMPLE = '{"op": "eq", "col": "zip", "val": 60620}'
# Returns 10 rows.
FLU_FILTER_SIMPLE2 = '{"op": "eq", "col": "day", "val": "Wednesday"}'
# Returns 1 row.
FLU_FILTER_COMPOUND_AND = FLU_BASE + '{"op": "and", "val": [' + \
FLU_FILTER_SIMPLE + ', ' + \
FLU_FILTER_SIMPLE2 + ']}'
# Returns 13 rows.
FLU_FILTER_COMPOUND_OR = FLU_BASE + '{"op": "or", "val": [' + \
FLU_FILTER_SIMPLE + ', ' + \
FLU_FILTER_SIMPLE2 + ']}'
# Returns 4 rows.
FLU_FILTER_NESTED = '{"op": "and", "val": [' \
' {"op": "ge", "col": "date", "val": "2013-11-01"},' \
' {"op": "or", "val": [' + \
FLU_FILTER_SIMPLE + ', ' + \
FLU_FILTER_SIMPLE2 + \
' ]' \
' }' \
']}'
def get_escaped_geojson(fname):
pwd = os.path.dirname(os.path.realpath(__file__))
rect_path = os.path.join(pwd, '../fixtures', fname)
with open(rect_path, 'r') as rect_json:
query_rect = rect_json.read()
escaped_query_rect = urllib.parse.quote(query_rect)
return escaped_query_rect
def get_loop_rect():
return get_escaped_geojson('loop_rectangle.json')
class PointAPITests(BasePlenarioTest):
@classmethod
def setUpClass(cls):
super(PointAPITests, cls).setUpClass()
super(PointAPITests, cls).ingest_points()
super(PointAPITests, cls).ingest_shapes()
def get_api_response(self, query_string):
"""This bit of code seems to be repeated alot."""
query = '/v1/api/' + query_string
response = self.app.get(query)
return json.loads(response.data.decode("utf-8"))
# ========
# datasets
# ========
def test_metadata_no_args(self):
r = self.get_api_response('datasets')
self.assertEqual(len(r), 2)
self.assertEqual(len(r['objects']), 3)
def test_metadata_big_lower_bound(self):
r = self.get_api_response('datasets?obs_date__ge=1000-01-01')
self.assertEqual(len(r), 2)
self.assertEqual(len(r['objects']), 3)
def test_metadata_big_upper_bound(self):
r = self.get_api_response('datasets?obs_date__le=2016-01-01')
self.assertEqual(len(r), 2)
self.assertEqual(len(r['objects']), 3)
def test_metadata_both_bounds(self):
r = self.get_api_response('datasets?obs_date__le=2016-01-01&obs_date__ge=2000-01-01')
self.assertEqual(len(r), 2)
self.assertEqual(len(r['objects']), 3)
def test_metadata_single_dataset(self):
r = self.get_api_response('datasets?dataset_name=crimes')
self.assertEqual(len(r['objects']), 1)
self.assertEqual(r['objects'][0]['view_url'],
"http://data.cityofchicago.org/api/views/ijzp-q8t2/rows")
def test_metadata_filter(self):
escaped_query_rect = get_loop_rect()
query = 'datasets?location_geom__within={}'\
'&obs_date__ge={}&obs_date__le={}'\
.format(escaped_query_rect, '2015-1-1', '2016-1-1')
r = self.get_api_response(query)
self.assertEqual(len(r['objects']), 1)
dataset_found = r['objects'][0]
self.assertEqual(dataset_found['dataset_name'], 'crimes')
def test_included_fields(self):
r = self.get_api_response('datasets/?dataset_name=flu_shot_clinics'
'&include_columns=true')
self.assertEqual(len(r['objects'][0]['columns']), 17)
''' /fields '''
def test_fields(self):
r = self.get_api_response('fields/flu_shot_clinics')
# Should be the same length
# as the number of columns in the source dataset
self.assertEqual(len(r['objects']), 17)
# ====================
# /detail tree filters
# ====================
def test_detail_with_simple_flu_filter(self):
r = self.get_api_response('detail?obs_date__ge=2000&dataset_name=flu_shot_clinics&' + FLU_BASE + FLU_FILTER_SIMPLE)
self.assertEqual(r['meta']['total'], 4)
def test_detail_with_compound_flu_filters_and(self):
r = self.get_api_response('detail?obs_date__ge=2000&dataset_name=flu_shot_clinics&' + FLU_FILTER_COMPOUND_AND)
self.assertEqual(r['meta']['total'], 1)
def test_detail_with_compound_flu_filters_or(self):
r = self.get_api_response('detail?obs_date__ge=2000&dataset_name=flu_shot_clinics&' + FLU_FILTER_COMPOUND_OR)
self.assertEqual(r['meta']['total'], 13)
def test_detail_with_nested_flu_filters(self):
r = self.get_api_response('detail?obs_date__ge=2000&dataset_name=flu_shot_clinics&' + FLU_BASE + FLU_FILTER_NESTED)
self.assertEqual(r['meta']['total'], 4)
# ============================
# /detail query string filters
# ============================
def test_time_filter(self):
r = self.get_api_response('detail?dataset_name=flu_shot_clinics'
'&obs_date__ge=2013-09-22'
'&obs_date__le=2013-10-1')
self.assertEqual(r['meta']['total'], 5)
def test_detail_with_0_hour_filter(self):
endpoint = 'detail'
dataset_arg = '?dataset_name=flu_shot_clinics'
date_args = '&obs_date__ge=2013-09-22&obs_date__le=2013-10-1'
hour_arg = '&date__time_of_day_ge=0'
r = self.get_api_response(endpoint + dataset_arg + date_args + hour_arg)
self.assertEqual(r['meta']['total'], 5)
def test_detail_with_both_hour_filters(self):
endpoint = 'detail'
dataset_arg = '?dataset_name=crimes'
date_args = '&obs_date__ge=2000'
lower_hour_arg = '&date__time_of_day_ge=5'
upper_hour_arg = '&date__time_of_day_le=17'
r = self.get_api_response(endpoint + dataset_arg + date_args +
upper_hour_arg + lower_hour_arg)
self.assertEqual(r['meta']['total'], 3)
def test_csv_response(self):
query = '/v1/api/detail/?dataset_name=flu_shot_clinics&obs_date__ge=2013-09-22&obs_date__le=2013-10-1&data_type=csv'
resp = self.app.get(query)
mock_csv_file = StringIO(resp.data.decode("utf-8"))
reader = csv.reader(mock_csv_file)
lines = [line for line in reader]
# One header line, 5 data lines
self.assertEqual(len(lines), 6)
for line in lines:
self.assertEqual(len(line), len(lines[0]))
self.assertTrue('date' in lines[0])
self.assertTrue('latitude' in lines[0])
self.assertTrue('longitude' in lines[0])
def test_geojson_response(self):
r = self.get_api_response('detail/?dataset_name=flu_shot_clinics'
'&obs_date__ge=2013-09-22'
'&obs_date__le=2013-10-1&data_type=geojson')
points = r['features']
self.assertEqual(len(points), 5)
attributes = points[0]
self.assertTrue('geometry' in attributes)
self.assertTrue('latitude' in attributes['properties'])
self.assertTrue('longitude' in attributes['properties'])
def test_space_filter(self):
escaped_query_rect = get_loop_rect()
r = self.get_api_response('detail/?dataset_name=flu_shot_clinics'
'&obs_date__ge=2013-01-01'
'&obs_date__le=2013-12-31'
'&location_geom__within=' + escaped_query_rect)
self.assertEqual(r['meta']['total'], 5)
def test_time_of_day(self):
r = self.get_api_response('detail/?dataset_name=crimes'
'&obs_date__ge=2015-01-01'
'&date__time_of_day_ge=6')
self.assertEqual(r['meta']['total'], 2)
def test_in_operator(self):
r = self.get_api_response('detail/?obs_date__le=2016%2F01%2F19'
'&event_type__in=Alderman,CPD'
'&obs_date__ge=2012%2F10%2F21'
'&dataset_name=flu_shot_clinics')
self.assertEqual(r['meta']['total'], 53)
def test_multipolygon(self):
multipolygon = get_escaped_geojson('loop_and_near_southeast.json')
r = self.get_api_response('detail/?dataset_name=flu_shot_clinics'
'&obs_date__ge=2013-01-01'
'&obs_date__le=2013-12-31'
'&location_geom__within=' + multipolygon)
self.assertEqual(r['meta']['total'], 11)
# ==================
# /grid tree filters
# ==================
def test_grid_with_simple_tree_filter(self):
filter_ = 'crimes__filters={"op": "eq", "col": "description", "val": "CREDIT CARD FRAUD"}'
r = self.get_api_response('grid?obs_date__ge=2000&dataset_name=crimes&{}'.format(filter_))
self.assertEqual(len(r['features']), 2)
def test_space_and_time(self):
escaped_query_rect = get_loop_rect()
r = self.get_api_response('grid/'
'?obs_date__ge=2013-1-1&obs_date__le=2014-1-1'
'&dataset_name=flu_shot_clinics'
'&location_geom__within=' + escaped_query_rect)
self.assertEqual(len(r['features']), 4)
# Each feature should have an associated square geometry with 5 points
# (4 points to a square, then repeat the first to close it)
squares = [feat['geometry']['coordinates'][0] for feat in r['features']]
self.assertTrue(all([len(square) == 5 for square in squares]))
# Each feature also needs a count of items found in that square.
# We expect 3 squares with 1 and 1 square with 2
counts = [feat['properties']['count'] for feat in r['features']]
self.assertEqual(counts.count(1), 3)
self.assertEqual(counts.count(2), 1)
def test_grid_column_filter(self):
r = self.get_api_response(
'grid/?obs_date__ge=2013-1-1&obs_date_le=2014-1-1'
'&dataset_name=flu_shot_clinics&event_type=Church'
)
# 6 Church-led flu shot clinics.
# And they were far enough apart to each get their own square.
self.assertEqual(len(r['features']), 6)
# ===========
# /timeseries
# ===========
def flu_agg(self, agg_type, expected_counts):
# Always query from 9-22 to 10-1
r = self.get_api_response(
'timeseries/?obs_date__ge=2013-09-22'
'&obs_date__le=2013-10-1&agg=' + agg_type
)
# Only the flu dataset should have records in this range
self.assertEqual(len(r['objects']), 1)
timeseries = r['objects'][0]
self.assertEqual(timeseries['dataset_name'], 'flu_shot_clinics')
# Extract the number of flu clinics per time unit
counts = [time_unit['count'] for time_unit in timeseries['items']]
self.assertEqual(expected_counts, counts)
def test_day_agg(self):
# 1 clinic on the 22nd. No clinics on the 23rd...
expected_counts = [1, 0, 0, 0, 0, 0, 1, 0, 1, 2]
self.flu_agg('day', expected_counts)
def test_week_agg(self):
# Weeks start from the beginning of the year, not the date specified in the query.
# So even though we're only asking for 10 days,
# we intersect with 3 weeks.
expected_counts = [1, 1, 3]
self.flu_agg('week', expected_counts)
def test_month_agg(self):
# 3 clinics in the range we specified in September, 2 in October.
expected_counts = [3, 2]
self.flu_agg('month', expected_counts)
def test_year_agg(self):
# 5 clinics when grouping by year.
expected_counts = [5]
self.flu_agg('year', expected_counts)
def test_year_agg_csv(self):
# Extend the test reach into the csv part of timeseries.
query = '/v1/api/timeseries?obs_date__ge=2013-09-22&obs_date__le=2013-10-1&agg=year&data_type=csv'
resp = self.app.get(query)
# Assert a count of 5 in the year 2013.
self.assertEqual(
resp.data.decode("utf-8"),
'temporal_group,flu_shot_clinics\r\n2013-01-01,5\r\n'
)
def test_two_datasets(self):
# Query over all of 2012 and 2013, aggregating by year.
r = self.get_api_response(
'timeseries/?obs_date__ge=2012-01-01'
'&obs_date__le=2013-12-31&agg=year'
)
# The order of the datasets isn't guaranteed, so preprocess the response
# so we can grab each dataset's timeseries by name.
name_to_series = {}
for obj in r['objects']:
timeseries = [year['count'] for year in obj['items']]
name_to_series[obj['dataset_name']] = timeseries
# 7 landmarks declared in 2012, 0 in 2013.
self.assertEqual(name_to_series['landmarks'], [7, 0])
# No flu shot clinics in 2012, 65 in 2013.
self.assertEqual(name_to_series['flu_shot_clinics'], [0, 65])
def test_geo_filter(self):
escaped_query_rect = get_loop_rect()
r = self.get_api_response(
'timeseries/?obs_date__ge=2013-01-01&obs_date__le=2013-12-31'
'&agg=year&location_geom__within=' + escaped_query_rect
)
self.assertEqual(len(r['objects']), 1)
timeseries = r['objects'][0]
self.assertEqual(timeseries['dataset_name'], 'flu_shot_clinics')
# Extract the number of flu clinics per time unit
counts = [time_unit['count'] for time_unit in timeseries['items']]
self.assertEqual([5], counts)
def test_timeseries_with_multiple_datasets(self):
endpoint = 'timeseries'
query = '?obs_date__ge=2000-08-01&agg=year&dataset_name__in=flu_shot_clinics,landmarks'
resp_data = self.get_api_response(endpoint + query)
print(resp_data)
self.assertEqual(resp_data['objects'][0]['count'], 65)
self.assertEqual(resp_data['objects'][1]['count'], 149)
def test_timeseries_with_multiple_datasets_but_one_is_bad(self):
endpoint = 'timeseries'
query = '?obs_date__ge=2000&agg=year&dataset_name__in=flu_shot_clinics,landmarkz'
resp_data = self.get_api_response(endpoint + query)
self.assertIn('landmarkz', resp_data['meta']['message']['dataset_name__in']['1'][0])
# ================================
# /timeseries with condition trees
# ================================
def test_timeseries_with_a_tree_filter(self):
endpoint = 'timeseries'
query = '?obs_date__ge=2005-01-01&agg=year'
qfilter = '&crimes__filter={"op": "eq", "col": "iucr", "val": 1150}'
resp_data = self.get_api_response(endpoint + query + qfilter)
# Crimes is the only one that gets a filter applied.
self.assertEqual(resp_data['objects'][0]['count'], 2)
self.assertEqual(resp_data['objects'][1]['count'], 65)
self.assertEqual(resp_data['objects'][2]['count'], 88)
def test_timeseries_with_multiple_filters(self):
endpoint = 'timeseries'
query = '?obs_date__ge=2005&agg=year'
cfilter = '&crimes__filter={"op": "eq", "col": "iucr", "val": 1150}'
lfilter = '&landmarks__filter={"op": "eq", "col": "architect", "val": "Frommann and Jebsen"}'
resp_data = self.get_api_response(endpoint + query + cfilter + lfilter)
# Crime filter gets applied.
self.assertEqual(resp_data['objects'][0]['count'], 2)
# Flu shots gets no filter applied.
self.assertEqual(resp_data['objects'][1]['count'], 65)
# Landmark filter gets applied.
self.assertEqual(resp_data['objects'][2]['count'], 3)
# =================
# /detail-aggregate
# =================
def test_detail_aggregate_with_just_lower_time_bound(self):
resp = self.get_api_response('detail-aggregate?dataset_name=crimes'
'&obs_date__ge=2015-01-01')
self.assertEqual(resp['count'], 7)
def test_aggregate(self):
# Use same params as for timeseries
query = '/v1/api/detail-aggregate/?dataset_name=flu_shot_clinics' \
'&obs_date__ge=2013-09-22&obs_date__le=2013-10-1&agg=week'
resp = self.app.get(query)
response_data = json.loads(resp.data.decode("utf-8"))
expected_counts = [1, 1, 3]
observed_counts = [obj['count'] for obj in response_data['objects']]
self.assertEqual(expected_counts, observed_counts)
def test_polygon_filter(self):
query = '/v1/api/detail/?dataset_name=flu_shot_clinics' \
'&obs_date__ge=2013-09-22&obs_date__le=2013-10-1' \
'&shape=chicago_neighborhoods'
resp = self.app.get(query)
response_data = json.loads(resp.data.decode("utf-8"))
self.assertEqual(response_data['meta']['total'], 5)
def test_aggregate_column_filter(self):
query = 'v1/api/detail-aggregate/' \
'?obs_date__ge=2013-1-1&obs_date__le=2014-1-1' \
'&dataset_name=flu_shot_clinics&event_type=Church&agg=year'
resp = self.app.get(query)
response_data = json.loads(resp.data.decode("utf-8"))
# 6 Church-led flu shot clinics.
self.assertEqual(response_data['objects'][0]['count'], 6)
def test_bad_column_condition(self):
query = 'v1/api/detail/?dataset_name=flu_shot_clinics&fake_column=fake'
resp = self.app.get(query)
response_data = json.loads(resp.data.decode("utf-8"))
self.assertTrue("Unused parameter value fake_column='fake'" in response_data['meta']['message'], response_data['meta']['message'])
def test_bad_column_condition_with_shape(self):
query = 'v1/api/detail/?dataset_name=flu_shot_clinics&shape=chicago_neighborhoods&fake_column=fake'
resp = self.app.get(query)
response_data = json.loads(resp.data.decode("utf-8"))
self.assertTrue("Unused parameter value fake_column='fake'" in response_data['meta']['message'])
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''A simple tool that may be used to compare font faces.
Use the left/right cursor keys to change font faces.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
FONTS = ['Andale Mono', 'Consolas', 'Inconsolata', 'Inconsolata-dz', 'Monaco',
'Menlo']
SAMPLE = '''class Spam(object):
def __init__(self):
# The quick brown fox
self.spam = {"jumped": 'over'}
@the
def lazy(self, *dog):
self.dog = [lazy, lazy]'''
class Window(pyglet.window.Window):
font_num = 0
def on_text_motion(self, motion):
if motion == pyglet.window.key.MOTION_RIGHT:
self.font_num += 1
if self.font_num == len(FONTS):
self.font_num = 0
elif motion == pyglet.window.key.MOTION_LEFT:
self.font_num -= 1
if self.font_num < 0:
self.font_num = len(FONTS) - 1
face = FONTS[self.font_num]
self.head = pyglet.text.Label(face, font_size=24, y=0,
anchor_y='bottom')
self.text = pyglet.text.Label(SAMPLE, font_name=face, font_size=18,
y=self.height, anchor_y='top', width=self.width, multiline=True)
def on_draw(self):
self.clear()
self.head.draw()
self.text.draw()
window = Window()
window.on_text_motion(None)
pyglet.app.run()
|
from github import Github, GithubException
import time
from server import app, celery, logger
from server.utils.utils import getAllFilesWPathsInDirectory
from server.utils.githubUtils import createNewRepo
from server.api.messageApi import postEvent
@celery.task
def add(x, y):
return x + y
@celery.task(bind=True)
def timer(self, n):
results = []
for i in range(n):
data = {
'message': 'On number {} of {}'.format(i+1, n),
}
time.sleep(1)
resp = postEvent('STATUS_UPDATE', data, room=self.request.id)
results.append(i+1)
message = {
'message': 'Completed timer.',
'data': results,
}
resp = postEvent('COMPLETED', message, room=self.request.id)
return results
@celery.task(bind=True)
def copyAppToNewRepo(self, github_token, repo_name):
task_id = self.request.id
try:
DEFAULT_DIRS_TO_AVOID = set(['./.git', './env', './node_modules', './server/static/javascript', './.profile.d', './.heroku'])
DEFAULT_EXTENSIONS_TO_AVOID = set(['pyc', 'log', 'python_history', 'rdb', 'env'])
g = Github(github_token)
# Since creating a repo happens on the user obejct, we must fetch the user first.
user = g.get_user()
user_login = user.login
# Create a new repo for the user. Will fail if repo name already exists
repo = createNewRepo(user, repo_name)
new_repo_name = repo.name
# If we successfully created the repo, then we can prep all files in this app to add to the repo.
files = getAllFilesWPathsInDirectory('.', dirsToAvoid=DEFAULT_DIRS_TO_AVOID, extensionsToAvoid=DEFAULT_EXTENSIONS_TO_AVOID)
files_added_successfully = []
files_failed = []
for i, file_path in enumerate(files):
# Try to read the file's content.
try:
with open(file_path, 'rb') as file:
file_content = file.read()
except IOError as e:
files_failed.append(file_path_formatted)
continue
file_path_formatted = file_path[2:]
try:
# Ideally Github would allow us to add our files in batches, rather than one at a time,
# so that we can reduce the number of API calls required. However, based on this
# dicsussion, it does not appear to be possible. https://github.com/isaacs/github/issues/199
debug_message = 'Committing file {file_num} of {num_files} for {user_login} to {repo_name}: {file_path}'.format(file_num=i+1, num_files=len(files), user_login=user_login, repo_name=new_repo_name, file_path=file_path_formatted)
logger.debug(debug_message)
commit_message = 'File {file_num} of {num_files}: {file_path}'.format(file_num=i+1, num_files=len(files), user_login=user_login, repo_name=new_repo_name, file_path=file_path_formatted)
repo.create_file(file_path_formatted, commit_message, file_content)
files_added_successfully.append(file_path_formatted)
event_message = 'Committed file {file_num} of {num_files}: {file_path}'.format(file_num=i+1, num_files=len(files), user_login=user_login, repo_name=new_repo_name, file_path=file_path_formatted)
resp = postEvent('STATUS_UPDATE', {'message': event_message}, room=task_id)
except GithubException as e:
errorMessage = e.args[1].get('message')
files_failed.append(file_path_formatted)
results = {
'repoName': new_repo_name,
'successfullyAdded': files_added_successfully,
'failed': files_failed,
}
resp = postEvent('COMPLETED', results, room=task_id)
return resp
except GithubException as e:
error = {
'status': e.status,
'data': e.data,
}
error['data']['repoName'] = repo_name
logger.debug(error)
resp = postEvent('FAILED', error, room=task_id)
return
except Exception as e:
error = {
'status': 500,
'data': {
'message': str(e),
'repoName': repo_name
}
}
resp = postEvent('FAILED', error, room=task_id)
return
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 27 08:49:25 2017
Recursive execution
@author: laci
"""
import multiprocessing as mp
import time
import sparse_methods as m
import sparse_move as sm
import sparse_contract as sc
import graph
startTime = time.time()
lo = 0
up = 15000
g = graph.BAGraph(up, 0.7)
uf = m.recurse(sm.independent, sc.one, m.iterated, lo, up, g)
# print("all:", uf)
endTime = time.time()
#calculate the total time it took to complete the work
workTime = endTime - startTime
#print results
print("The job took " + str(workTime) + " seconds to complete")
|
from .boxes import *
from .demo_utils import *
from .logger import WandbLogger, setup_logger
from .model_utils import *
from .visualize import *
|
# -*- coding:utf-8 -*-
import turbo.log
from base import BaseHandler
from helpers import dc as dc_helper
from db.conn import dc_files
logger = turbo.log.getLogger(__file__)
db_dc = dc_helper.dc
class HomeHandler(BaseHandler):
_get_params = {
'option': [
('skip', int, 0),
('limit', int, 0)
]
}
def get(self, *args, **kwargs):
skip = self._params['skip']
limit = self._params['limit']
dcs = db_dc.find(limit=limit, skip=skip, sort=[('atime', -1)])
self.render('index.html', dcs=dcs, limit=limit, skip=skip)
class CreateHandler(BaseHandler):
def get(self, tp):
limit = self._limit
skip = self._skip
if tp != 'dc':
return
self.render('dccreate.html', message='', limit=limit, skip=skip)
_post_params = {
'option': [
('name', basestring, ''),
('desc', basestring, ''),
('used', list, []),
('spec', basestring, ''),
('expiration', basestring, ''),
]
}
def post(self, tp):
# print self.request.files['file'][0]['body']
limit = self._limit
skip = self._skip
if tp != 'dc':
return
name = self._params['name']
desc = self._params['desc']
used = self._params['used']
spec = self._params['spec']
expiration = self._params['expiration']
fid = None
try:
fid = dc_files.put(self.request.files['file'][0]['body'])
except Exception, e:
print e
db_dc.create({
'name': name,
'desc': desc,
'used': used,
'spec': spec,
'expiration': expiration,
'file': fid,
'file_name': self.request.files['file'][0]['filename']
})
message = 'success! add %s, %s' % (name, desc)
self.render('dccreate.html', message=message, limit=limit, skip=skip)
class EditHandler(BaseHandler):
def get(self, tp, objid):
skip = self._skip
limit = self._limit
if tp != 'dc':
return
dc = db_dc.find_by_id(objid)
if not dc:
return
dc['used'] = ','.join(dc['used'])
self.render('dcedit.html', message='', dc=dc, limit=limit, skip=skip)
_post_params = {
'option': [
('name', basestring, ''),
('desc', basestring, ''),
('used', list, []),
('spec', basestring, ''),
('expiration', basestring, ''),
]
}
def post(self, tp, objid):
limit = self._limit
skip = self._skip
if tp != 'dc':
return
dc = db_dc.find_by_id(objid)
if not dc:
return
dc['name'] = self._params['name']
dc['desc'] = self._params['desc']
dc['used'] = self._params['used']
dc['spec'] = self._params['spec']
dc['expiration'] = self._params['expiration']
if self.request.files.get('file', None):
self.request.files['file'][0]['body']
dc['file'] = dc_files.put(self.request.files['file'][0]['body'])
dc['file_name'] = self.request.files['file'][0]['filename']
db_dc.save(dc)
message = 'success! update %s, %s' % (dc['name'], dc['desc'])
dc['used'] = ','.join(dc['used'])
self.render('dcedit.html', message=message, dc=dc, limit=limit, skip=skip)
class DelHandler(BaseHandler):
pass
class FileHandler(BaseHandler):
def get(self, objid):
dc = db_dc.find_one({'file': self.to_objectid(objid)})
self.set_header('Content-Type', 'application/octet-stream')
self.set_header('Content-Disposition', 'attachment; filename=%s' % dc['file_name'])
self.write(dc_files.get(self.to_objectid(objid)).read())
|
import numpy as np
import pandas as pd
from glob import glob
import os
import sys
from morphomnist import io
def get_data(path):
df_train = pd.read_csv(os.path.join(INPUT_PATH, 'train-morpho.csv'))
df_test = pd.read_csv(os.path.join(INPUT_PATH, 't10k-morpho.csv'))
return df_train, df_test
def uniform_resampling(df, sample_size = 5300):
pd_uniform = df[df['thickness'] < 10.5]
# Round the value to discretize the domain
pd_uniform['round'] = round(pd_uniform["thickness"])
# Group the different descritzed numbers
grouped = pd_uniform.groupby('round')
print()
print(f'Real distribution : ')
print(pd_uniform.groupby('round')['round'].count())
# Resample each groups
grouped_uniform = grouped.apply(lambda x: x.sample(n=sample_size)).reset_index(drop=True)
print()
print(f'Artificial uniform distribution : ')
print(grouped_uniform.groupby('round')['round'].count())
return grouped_uniform
def load_manipulate_save(input_path, out_path, train_index, test_index):
train_paths = [path for path in glob(os.path.join(input_path, 'train*'))]; test_paths = [path for path in glob(os.path.join(input_path, 't10k*'))];
for path in train_paths:
name = path.split('/')[-1]
if name.split('.')[-1] == 'gz':
data_new = io.load_idx(path)[train_index.values]
io.save_idx(data_new, os.path.join(out_path, name))
if name.split('.')[-1] == 'csv':
data_new = pd.read_csv(path).loc[train_index.values]
data_new.to_csv(os.path.join(out_path, name), index=False)
print(' ------------ CHECK ------------ ')
data_new['round'] = round(data_new["thickness"])
print(data_new.groupby('round')['round'].count())
for path in test_paths:
name = path.split('/')[-1]
if name.split('.')[-1] == 'gz':
data_new = io.load_idx(path)[test_index.values]
io.save_idx(data_new, os.path.join(out_path, name))
if name.split('.')[-1] == 'csv':
data_new = pd.read_csv(path).loc[test_index.values]
data_new.to_csv(os.path.join(out_path, name), index=False)
print(' ------------ CHECK ------------ ')
data_new['round'] = round(data_new["thickness"])
print(data_new.groupby('round')['round'].count())
def data_resampling(input_path, out_path):
# Get data
df_train, df_test = get_data(input_path)
# Manipulate the thickness distribution
train_uniform = uniform_resampling(df_train)
test_uniform = uniform_resampling(df_test, sample_size = 697)
# Load the rest of the data and keep only the selected indexes
load_manipulate_save(input_path, out_path, train_uniform['index'], test_uniform['index'])
# Absolute path
FOLDER = '/data/processed/'
INPUT_NAME = 'original_thic'
OUTPUT_NAME = 'original_thic_resample'
INPUT_PATH = os.path.join(FOLDER, INPUT_NAME)
OUTPUT_PATH = os.path.join(FOLDER, OUTPUT_NAME)
# Created nes
os.makedirs(OUTPUT_PATH, exist_ok=True)
data_resampling(INPUT_PATH, OUTPUT_PATH)
|
from django.urls import include, re_path
urlpatterns = [
re_path(r"^", include("dpaste.urls.dpaste_api")),
re_path(r"^", include("dpaste.urls.dpaste")),
re_path(r"^i18n/", include("django.conf.urls.i18n")),
]
# Custom error handlers which load `dpaste/<code>.html` instead of `<code>.html`
handler404 = "dpaste.views.handler404"
handler500 = "dpaste.views.handler500"
|
import itertools
def chunks( iterable, size=1000 ):
"""
Make digestible chunks of something that can be iterated
Arguments
---------
iterable: list, tuple, generator
Just have to support the iter function will break it
in digestible chunks
size: size of the digestibles chunks
Examples
--------
>>> [ list( chunk ) for chunk in chunks( range( 10 ), size=5 ) ]
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
"""
iterator = iter( iterable )
for i in iterator:
yield itertools.chain( [i], itertools.islice( iterator, size - 1 ) )
class ___DUMP:
def __init__( self, dump ):
self.dump = dump
def chunk_each( l, f ):
inner_result = []
iterator = iter( l )
for i in iterator:
if f( i ):
if inner_result:
yield inner_result
inner_result = []
inner_result.append( i )
if inner_result:
yield inner_result
def cut_until( iterator, until ):
for i in iterator:
if until( i ):
break
yield i
|
DATABASES = {'default': {'ENGINE': 'django.db.backends.', 'NAME': '', 'HOST': '', 'USER': '', 'PASSWORD': '', 'PORT': ''}}
DEBUG = True
INSTALLED_APPS = ('django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles')
ROOT_URLCONF = 'project.urls' ###
SECRET_KEY = '01234567890123456789012345678901234567890123456789'
SETTINGS_MODULE = 'project.settings' ###
SITE_ID = 1 ###
STATIC_URL = '/static/'
TEMPLATE_DEBUG = True
USE_L10N = True
USE_TZ = True
WSGI_APPLICATION = 'project.wsgi.application'
|
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from Crypto.Random import new
class Client:
"""!
Client is created with a 4096 bit RSA key pair.
The public key is exported then hashed with sha256.
The hex output from that gives you the 'client_id'
"""
def __init__(self):
self.__private_key = RSA.generate(4096, new().read)
self.public_key = self.__private_key.publickey()
self.signer = PKCS1_v1_5.new(self.__private_key)
self.communicator = PKCS1_OAEP.new(self.__private_key)
@property
def client_id(self):
return SHA256.new(self.public_key.exportKey('DER')).hexdigest()
|
__copyright__ = 'Copyright 2018-2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
from .base import RMInfo, ResourceManager
# ------------------------------------------------------------------------------
#
class Debug(ResourceManager):
# --------------------------------------------------------------------------
#
def _init_from_scratch(self, rm_info: RMInfo) -> RMInfo:
nodes = [('localhost', rm_info.cores_per_node)
for idx in range(rm_info.requested_nodes)]
rm_info.node_list = self._get_node_list(nodes, rm_info)
# UIDs need to be made unique
for idx, node in enumerate(rm_info.node_list):
node['node_id'] = '%s_%04d' % (node['node_name'], idx)
return rm_info
# ------------------------------------------------------------------------------
|
import os
from yUMItools.datasim import *
import numpy as np
import matplotlib.pyplot as plt
def test__read_fasta_file():
import Bio.Seq
# filepath
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
# test loading one file
assert type(s.reference_sequence_dict['Rp0-reference']) is Bio.Seq.Seq
del s
def test__read_fasta_file_multiple():
# filepath
test_data_fasta_filepath = 'test_data/reference_sequence/mixed-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
# test loading multiple files
assert len(s.reference_sequence_dict.keys()) == 2
del s
def test_find_barcodes():
# filepath
test_data_fasta_filepath = 'test_data/reference_sequence/mixed-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
assert s.barcode_dict['barcode0'] == (265, 280)
assert s.barcode_dict['barcode5'] == (3149, 3164)
del s
def test_generate_barcode_library():
# filepath
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
s.generate_barcode_library(clones=10)
assert len(s.barcode_library_list) == 10
assert hamming(s.barcode_library_list[0], s.barcode_library_list[1]) > 0
del s
def test_reverse_transcribe_library():
# filepath
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
# only generate one clone - this allows us to easily count mutations
s.generate_barcode_library(clones=1)
# test with no mutations
rt_library = reverse_transcribe_library(s.barcode_library_list,
clones=2,
mut_type='random',
mutation_rate=0)
assert hamming(rt_library[0], rt_library[1]) == 0
# test with low mutation rate
rt_library = reverse_transcribe_library(s.barcode_library_list,
clones=2,
mut_type='random',
mutation_rate=1. / 10000)
assert hamming(rt_library[0], rt_library[1]) <= 10
# test with high mutation rate
rt_library = reverse_transcribe_library(s.barcode_library_list,
clones=2,
mut_type='random',
mutation_rate=1. / 100)
assert hamming(rt_library[0], rt_library[1]) >= 10
def test_resolve_barcode():
# filepath
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
umi_set = resolve_barcode(s.reference_sequence_dict[s.record_id],
s.barcode_dict)
assert umi_set[0:10] == s.reference_sequence_dict['Rp0-reference'][0:10]
def test_generate_barcode():
# test for correct length
assert len(generate_barcode(10)) == 10
# test for sufficient randomness
result_list = []
for i in range(10):
result_list.append(hamming(generate_barcode(10), generate_barcode(10)))
assert sum(result_list) / len(result_list) >= 5
def test_mutation_random():
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
s.generate_barcode_library(clones=1)
template = s.barcode_library_list[0]
assert mutation_random(template[0], mutation_rate=0) == 'G'
sequence = Seq("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
test_output = []
for i in range(len(sequence)):
if mutation_random(sequence[i], mutation_rate=0.5) == "A":
test_output.append(1)
else:
test_output.append(0)
def test_mutation_indel():
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
s.generate_barcode_library(clones=1)
template = s.barcode_library_list[0]
assert mutation_indel(template[0], mutation_rate=0) == 'G'
sequence = Seq("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
test_output = []
for i in range(len(sequence)):
mutation = mutation_indel(sequence[i], mutation_rate=0.9)
if mutation == "A":
test_output.append(1)
print(mutation)
else:
test_output.append(0)
print(mutation)
print(test_output)
def test_data_generation():
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
# generated barcode library diversity
s.generate_barcode_library(clones=500)
# amplify barcode library
amp_barcode_lib = library_amp(s.barcode_library_list, cycles=15, p=0.5)
# tagment barcoded library
tagment_amp_barcode_lib = tagment(amp_barcode_lib, ave_size=700, std=50)
# deep sequence library and write output files
deep_sequence(tagment_amp_barcode_lib, 'test_data/fastq_files/library-deep-sequence', read_length=300, coverage=5000)
#os.remove("test_data/fastq_files/library-deep-sequence_S00_L001_R1_001.fastq.gz")
#os.remove("test_data/fastq_files/library-deep-sequence_S00_L001_R2_001.fastq.gz")
def test_data_generation_indel():
test_data_fasta_filepath = 'test_data/reference_sequence/Rp0-reference.fa'
# create sequence object and load fasta file
s = TestTube(test_data_fasta_filepath)
library_sequence = s.reference_sequence_dict['Rp0-reference']
s.find_barcodes(library_sequence=library_sequence)
# generated barcode library diversity
s.generate_barcode_library(clones=500)
# reverse transcribe
high_rt_lib = reverse_transcribe_library(s.barcode_library_list,
mut_type='indel',
clones=50,
mutation_rate=1./100)
# amplify barcode library
amp_barcode_lib = library_amp(high_rt_lib, cycles=15, p=0.5)
# tagment barcoded library
tagment_amp_barcode_lib = tagment(amp_barcode_lib, ave_size=700, std=50)
# deep sequence library and write output files
deep_sequence(tagment_amp_barcode_lib,
'test_data/fastq_files/library-deep-sequence-high_rt_lib',
read_length=300,
coverage=5000)
#os.remove("test_data/fastq_files/library-deep-sequence_S00_L001_R1_001.fastq.gz")
#os.remove("test_data/fastq_files/library-deep-sequence_S00_L001_R2_001.fastq.gz")
|
import os
import random
import numpy as np
from decord import VideoReader
from decord.base import DECORDError
def _get_default_test_video():
return VideoReader(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'examples', 'flipping_a_pancake.mkv')))
def _get_corrupted_test_video():
return VideoReader(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'test_data', 'corrupted.mp4')))
def _get_rotated_test_video(rot, height=-1, width=-1):
return VideoReader(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'test_data', f'video_{rot}.mov')), height=height, width=width)
def _get_unordered_test_video():
# video with frames not ordered by pts
return VideoReader(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'test_data', 'unordered.mov')))
def test_video_reader_len():
vr = _get_default_test_video()
assert len(vr) == 310
def test_video_reader_read_sequential():
vr = _get_default_test_video()
for i in range(len(vr)):
frame = vr[i]
def test_video_reader_read_slice():
vr = _get_default_test_video()
frames = vr[:]
assert frames.shape[0] == len(vr)
vr = _get_default_test_video()
frames = vr[:10]
assert frames.shape[0] == 10
def test_video_reader_read_random():
vr = _get_default_test_video()
lst = list(range(len(vr)))
random.shuffle(lst)
num = min(len(lst), 10)
rand_lst = lst[:num]
for i in rand_lst:
frame = vr[i]
def test_video_get_batch():
vr = _get_default_test_video()
lst = list(range(len(vr)))
random.shuffle(lst)
num = min(len(lst), 10)
rand_lst = lst[:num]
frames = vr.get_batch(rand_lst)
def test_video_corrupted_get_batch():
from nose.tools import assert_raises
vr = _get_corrupted_test_video()
assert_raises(DECORDError, vr.get_batch, range(40))
def test_rotated_video():
# Input videos are all h=320 w=568, but
# with rotation metadata.
for rot in [0, 180]:
# shot in landscape; correct orientation has
# same shape as "original" frame
vr = _get_rotated_test_video(rot)
assert vr[0].shape == (320, 568, 3)
assert vr[:].shape == (3, 320, 568, 3)
for rot in [90, 270]:
# shot in portrait mode; correct orientation has
# swapped width and height
vr = _get_rotated_test_video(rot)
assert vr[0].shape == (568, 320, 3), vr[0].shape
assert vr[:].shape == (3, 568, 320, 3)
vr = _get_rotated_test_video(rot, height=200, width=300)
assert vr[0].shape == (300, 200, 3), vr[0].shape
def test_frame_timestamps():
vr = _get_default_test_video()
frame_ts = vr.get_frame_timestamp(range(5))
assert np.allclose(frame_ts[:,0], [0.0, 0.033, 0.067, 0.1, 0.133])
vr = _get_unordered_test_video()
'''ffprobe output:
pts_time=0.000000 dts_time=-0.062500
pts_time=0.093750 dts_time=-0.031250
pts_time=0.031250 dts_time=0.000000
pts_time=0.062500 dts_time=0.031250
'''
frame_ts = vr.get_frame_timestamp(range(4))
assert np.allclose(frame_ts[:,0], [0.0, 0.03125, 0.0625, 0.09375]), frame_ts[:,0]
if __name__ == '__main__':
import nose
nose.runmodule()
|
# -*- coding: utf-8 -*-
import sqlite3
from .cost_breakdown import CostBreakdown
from .stock import Stock
from .manufacturing_expense import ManufacturingExpense
from .me2 import ManufacturingExpense2
from .product_price import ProductPrice
from .product_cost import ProductCost
class TableAdaptorFactory:
shortcut_mapping = {
's': Stock,
'cb': CostBreakdown,
'me': ManufacturingExpense,
'me2': ManufacturingExpense2,
'pp': ProductPrice,
'pc': ProductCost,
}
def __init__(self, db_path):
if db_path is None:
raise Exception("path to DB is not provided")
self.conn = sqlite3.connect(db_path)
self.conn.isolation_level = None
self.conn.row_factory = sqlite3.Row
def stock(self):
return Stock(self.conn)
def cost_breakdown(self):
return CostBreakdown(self.conn)
def manufacturing_expense(self):
return ManufacturingExpense(self.conn)
def manufacturing_expense2(self):
return ManufacturingExpense2(self.conn)
def product_price(self):
return ProductPrice(self.conn)
def product_cost(self):
return ProductCost(self.conn)
def by_shortcut(self, shortcut):
t = TableAdaptorFactory.shortcut_mapping.get(shortcut, None)
if t is None:
raise Exception('unknown shortcut: {}'.format(shortcut))
return t(self.conn)
|
import frappe
import latte
from frappe.utils import formatdate, format_datetime
from frappe.core.doctype.data_export.exporter import DataExporter
from frappe.core.doctype.data_export.exporter import export_data
def add_data_row(self, rows, dt, parentfield, doc, rowidx):
d = doc.copy()
meta = frappe.get_meta(dt)
if self.all_doctypes:
d.name = f'"{d.name}"'
if len(rows) < rowidx + 1:
rows.append([""] * (len(self.columns) + 1))
row = rows[rowidx]
_column_start_end = self.column_start_end.get((dt, parentfield))
if _column_start_end:
for i, c in enumerate(self.columns[_column_start_end.start:_column_start_end.end]):
df = meta.get_field(c)
fieldtype = df.fieldtype if df else "Data"
value = d.get(c, "")
if value:
if fieldtype == "Date":
value = formatdate(value)
elif fieldtype == "Datetime":
value = format_datetime(value)
row[_column_start_end.start + i + 1] = value
DataExporter.add_data_row = add_data_row
@frappe.whitelist()
@latte.read_only()
def read_only_export_data(doctype=None, parent_doctype=None, all_doctypes=True, with_data=False,
select_columns=None, file_type='CSV', template=False, filters=None):
export_data(
doctype=doctype,
parent_doctype=parent_doctype,
all_doctypes=all_doctypes,
with_data=with_data,
select_columns=select_columns,
file_type=file_type,
template=template,
filters=filters
)
|
import numpy as np
def compute_homography(src, dst):
"""computes the homography from src, to dst using inversion method."""
if src.shape[1] == 2 :
p1 = np.ones((len(src),3),'float64')
p1[:,:2] = src
elif src.shape[1] == 3 : p1 = src
if dst.shape[1] == 2 :
p2 = np.ones((len(dst),3),'float64')
p2[:,:2] = dst
elif dst.shape[1] == 3 : p2 = dst
npoints = len(src)
count = 2*npoints +1
A = np.zeros((count,9),'float32')
#populating the matrix A
for i in range(npoints):
p1i = p1[i]
x2i,y2i,w2i = p2[i]
xpi = x2i*p1i
ypi = y2i*p1i
wpi = w2i*p1i
A[i*2+1,3:6] = -wpi
A[i*2+1,6:9] = ypi
A[i*2 ,0:3] = -wpi
A[i*2 ,6:9] = xpi
A[8,8] = 1
B = np.zeros((9,1))
B[8,0] = 1
h = (np.linalg.inv(A))@B
print(np.linalg.inv(A).shape)
H = h.reshape(3,3)
return H
def find_homography(src,dst):
"""computes the homography from src, to dst using singular value decomposition method."""
if src.shape[1] == 2 :
p1 = np.ones((len(src),3),'float64')
p1[:,:2] = src
elif src.shape[1] == 3 : p1 = src
if dst.shape[1] == 2 :
p2 = np.ones((len(dst),3),'float64')
p2[:,:2] = dst
elif dst.shape[1] == 3 : p2 = dst
npoints = len(src)
count = 3*npoints
A = np.zeros((count,9),'float32')
#populating the matrix A (TO BE DECOMPOSED).
#least squares fitting algorithm/ SVD algorithm.
for i in range(npoints):
p1i = p1[i]
x2i,y2i,w2i = p2[i]
xpi = x2i*p1i
ypi = y2i*p1i
wpi = w2i*p1i
A[i*3 ,3:6] = -wpi
A[i*3 ,6:9] = ypi
A[i*3+1,0:3] = wpi
A[i*3+1,6:9] = -xpi
A[i*3+2,0:3] = -ypi
A[i*3+2,3:6] = xpi
U,s,V = np.linalg.svd(A)
#we need the last set of non-singular values only
h = V[-1]
H = h.reshape(3,3)
return H
def find_homography_2(src,dst):
"""computes the homography from src, to dst using singular value decomposition method."""
if src.shape[1] == 2 :
p1 = np.ones((len(src),3),'float64')
p1[:,:2] = src
elif src.shape[1] == 3 : p1 = src
if dst.shape[1] == 2 :
p2 = np.ones((len(dst),3),'float64')
p2[:,:2] = dst
elif dst.shape[1] == 3 : p2 = dst
npoints = len(src)
count = 2*npoints +1
A = np.zeros((count,9),'float32')
#populating the matrix A (TO BE DECOMPOSED).
#least squares fitting algorithm/ SVD algorithm.
for i in range(npoints):
p1i = p1[i]
x2i,y2i,w2i = p2[i]
xpi = x2i*p1i
ypi = y2i*p1i
wpi = w2i*p1i
A[i*2+1,3:6] = -wpi
A[i*2+1,6:9] = ypi
A[i*2 ,0:3] = -wpi
A[i*2 ,6:9] = xpi
U,s,V = np.linalg.svd(A)
#we need the last set of non-singular values only
h = V[-1]
H = h.reshape(3,3)
return H
|
# coding=utf-8
# @Time : 2021/1/6 10:45
# @Auto : zzf-jeff
import torch
from tqdm import tqdm
import time
import numpy as np
def eval(model, valid_dataloader, post_process_class, metric_class):
if isinstance(model, torch.nn.DataParallel):
# TypeError: expected sequence object with len >= 0 or a single integer
model.device_ids = [model.gpu_ids[0]]
model.eval()
with torch.no_grad():
total_frame = 0.0
total_time = 0.0
pbar = tqdm(total=len(valid_dataloader), desc='eval model:')
for idx, data_batch in enumerate(valid_dataloader):
if idx >= len(valid_dataloader):
break
imgs = data_batch['image'].to(model.device)
start = time.time()
preds = model(imgs)
# Obtain usable results from post-processing methods
post_result = post_process_class(preds, data_batch)
total_time += time.time() - start
# Evaluate the results of the current batch
metric_class(post_result, data_batch)
pbar.update(1)
total_frame += len(imgs)
metirc = metric_class.get_metric()
pbar.close()
model.train()
if isinstance(model, torch.nn.DataParallel):
# TypeError: expected sequence object with len >= 0 or a single integer
model.device_ids = model.gpu_ids
metirc['fps'] = total_frame / total_time
return metirc
def engine_eval(model, valid_dataloader, post_process_class, metric_class):
total_frame = 0.0
total_time = 0.0
pbar = tqdm(total=len(valid_dataloader), desc='eval model:')
for idx, data_batch in enumerate(valid_dataloader):
if idx >= len(valid_dataloader):
break
imgs = data_batch['image']
input_data = np.array(imgs, dtype=np.float32, order='C')
start = time.time()
preds = model.run(input_data)
preds = torch.Tensor(preds)
# Obtain usable results from post-processing methods
post_result = post_process_class(preds, data_batch)
total_time += time.time() - start
# Evaluate the results of the current batch
metric_class(post_result, data_batch)
pbar.update(1)
total_frame += len(imgs)
metirc = metric_class.get_metric()
pbar.close()
metirc['fps'] = total_frame / total_time
return metirc
|
import json
class App(object):
def __init__(self):
self.routers = dict()
def router(self, path):
def decorator(fun):
self.routers[path] = fun
return fun
return decorator
def server(self, path, arg = None):
fun = self.routers.get(path)
if fun:
return fun(arg)
def load(self, data_str):
data_dict = json.loads(data_str)
for k, v in data_dict.items():
self.server(k, v)
|
import tensorflow as tf
import keras.backend as K
import numpy as np
from config import get_config
def log_normals_loss(y_true, y_pred):
# Get batch sie
config, unparsed = get_config()
batch_size = config.batch_size
# Print batch
y_true = tf.Print(y_true, [y_true], message='y_true', summarize=30)
y_pred = tf.Print(y_pred, [y_pred], message='y_pred', summarize=30)
y_true_clipped = y_true
y_pred_clipped = y_pred
# aux filter
w_x = K.variable(np.array([[-1.0, 0.0, 1.0],
[-1.0, 0.0, 1.0],
[-1.0, 0.0, 1.0]]).reshape(3, 3, 1, 1))
w_y = K.variable(np.array([[-1.0, -1.0, -1.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0]]).reshape(3, 3, 1, 1))
# true
dzdx = K.conv2d(y_true_clipped, w_x, padding='same')
dzdy = K.conv2d(y_true_clipped, w_y, padding='same')
dzdx_ = dzdx * -1.0
dzdy_ = dzdy * -1.0
mag_norm = K.pow(dzdx,2) + K.pow(dzdy,2) + 1.0
mag_norm = K.sqrt(mag_norm)
# Normals
N3 = 1.0 / mag_norm
N1 = dzdx_ / mag_norm
N2 = dzdy_ / mag_norm
normals = K.concatenate(tensors=[N1,N2,N3],axis=-1)
# pred
dzdx_pred = K.conv2d(y_pred_clipped, w_x, padding='same')
dzdy_pred = K.conv2d(y_pred_clipped, w_y, padding='same')
mag_norm_pred_x = K.pow(dzdx_pred,2) + 1.0
mag_norm_pred_x = K.sqrt(mag_norm_pred_x)
mag_norm_pred_y = K.pow(dzdy_pred, 2) + 1.0
mag_norm_pred_y = K.sqrt(mag_norm_pred_y)
#
grad_x = K.concatenate(tensors=[K.constant(1.0, shape=[batch_size, K.int_shape(y_pred)[1], K.int_shape(y_pred)[2], K.int_shape(y_pred)[3]])/ mag_norm_pred_x,
K.constant(0.0, shape=[batch_size, K.int_shape(y_pred)[1], K.int_shape(y_pred)[2], K.int_shape(y_pred)[3]])/ mag_norm_pred_x, dzdx_pred/ mag_norm_pred_x],axis=-1)
grad_y = K.concatenate(tensors=[K.constant(0.0, shape=[batch_size, K.int_shape(y_pred)[1], K.int_shape(y_pred)[2], K.int_shape(y_pred)[3]])/ mag_norm_pred_y,
K.constant(1.0, shape=[batch_size, K.int_shape(y_pred)[1], K.int_shape(y_pred)[2], K.int_shape(y_pred)[3]])/ mag_norm_pred_y, dzdy_pred/ mag_norm_pred_y],axis=-1)
# compute d_i
first_log = K.log(y_pred_clipped + 1.)
second_log = K.log(y_true_clipped + 1.)
log_term = K.mean(K.square(first_log - second_log), axis=-1)
# dot prod
dot_term_x = K.sum(normals[:,:,:,:] * grad_x[:,:,:,:], axis=-1, keepdims=True)
dot_term_y = K.sum(normals[:,:,:,:] * grad_y[:,:,:,:], axis=-1, keepdims=True)
dot_term_x = tf.Print(dot_term_x, [dot_term_x], message='dot_term_x', summarize=30)
dot_term_y = tf.Print(dot_term_y, [dot_term_y], message='dot_term_y', summarize=30)
# second term
sc_inv_term = K.square(K.mean((first_log - second_log), axis=-1))
# first term + dy term
norm_term = K.mean(K.square(dot_term_x), axis=-1) + K.mean(K.square(dot_term_y), axis=-1)
diff_x = dzdx_pred - dzdx
diff_y = dzdy_pred - dzdy
grad_loss = K.mean(K.square(diff_x) + K.square(diff_y), axis=-1)
loss = log_term - (0.5 * sc_inv_term) + norm_term
return loss
|
## module midpoint
''' yStop = integrate (F,x,y,xStop,tol=1.0e-6)
Modified midpoint method for solving the
initial value problem y' = F(x,y}.
x,y = initial conditions
xStop = terminal value of x
yStop = y(xStop)
F = user-supplied function that returns the
array F(x,y) = {y'[0],y'[1],...,y'[n-1]}.
'''
import numpy as np
import math
def integrate(F,x,y,xStop,tol):
def midpoint(F,x,y,xStop,nSteps):
# Midpoint formulas
h = (xStop - x)/nSteps
y0 = y
y1 = y0 + h*F(x,y0)
for i in range(nSteps-1):
x = x + h
y2 = y0 + 2.0*h*F(x,y1)
y0 = y1
y1 = y2
return 0.5*(y1 + y0 + h*F(x,y2))
def richardson(r,k):
# Richardson's extrapolation
for j in range(k-1,0,-1):
const = (k/(k - 1.0))**(2.0*(k-j))
r[j] = (const*r[j+1] - r[j])/(const - 1.0)
return
kMax = 51
n = len(y)
r = np.zeros((kMax,n))
# Start with two integration steps
nSteps = 2
r[1] = midpoint(F,x,y,xStop,nSteps)
r_old = r[1].copy()
# Increase the number of integration points by 2
# and refine result by Richardson extrapolation
for k in range(2,kMax):
nSteps = 2*k
r[k] = midpoint(F,x,y,xStop,nSteps)
richardson(r,k)
# Compute RMS change in solution
e = math.sqrt(np.sum((r[1] - r_old)**2)/n)
# Check for convergence
if e < tol: return r[1]
r_old = r[1].copy()
print("Midpoint method did not converge")
|
"""
frpy
A simple reverse proxy to help you expose a local server behind a NAT or firewall to the internet. (a imitator of frp)
[Architecture]
frpy_server —— worker_server —— user_side
|
frpy_client —— local_server_side
[Usage]
-- server side --
# start frpy.py server on 8000 port
$ python frpy.py server 0.0.0.0 8000
-- client side --
# start a website on 8080 port (or existing tcp service)
$ python -m http.server 8080
# start frpy.py client connecting to server, and expose the local tcp service to server
$ python frpy.py client server.domain.com 8000 127.0.0.1 8080 60080
# view the website on server with 60080 port
$ curl http://server.domain.com:60080
todo:
- improve concurrent performance and speed
- client can restart at will
- handle user disconnected
- test mysql
- test tslow.cn website
- ctrl + c to break server
"""
import logging
import random
import select
import socket
import sys
import threading
import time
import traceback
from _thread import start_new_thread
# =========== Conf ===========
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8000
LOCAL_HOST = '127.0.0.1'
LOCAL_PORT = 8080
REMOTE_PORT = 60080
BUFFER_SIZE = 1024
# =============================
logging.basicConfig(level=logging.DEBUG)
server_logger = logging.getLogger('Server')
client_logger = logging.getLogger('Client')
state = {}
# ---------- for server side ---------
class EasyTcpServer:
"""
[server example]
server = EasyTcpServer('127.0.0.1', 8000)
server.run()
[client example]
import socket
import time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 8000))
s.sendall(b'123')
data = s.recv(1024)
print(data)
time.sleep(3)
s.close()
"""
def __init__(self, host='0.0.0.0', port=8000, buffer_size=1024):
self.host = host
self.port = port
self.buffer_size = buffer_size
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # 设置地址可以重用
self.server_socket.bind((host, port))
self.server_socket.listen(10)
self.client_sockets = []
self.stopped = False
def run(self):
server_logger.info('[start tcp server] on %s:%d', self.host, self.port)
try:
while not self.stopped:
client_socket, client_address = self.server_socket.accept()
start_new_thread(self.handle_client, (client_socket, client_address), {})
self.client_sockets.append(client_socket)
except OSError as e:
server_logger.debug(e)
server_logger.info('[finish tcp server] on %s:%d', self.host, self.port)
def shutdown(self):
self.stopped = True
for s in self.client_sockets:
s.close()
self.server_socket.close()
server_logger.info('[shutdown tcp server] on %s:%d', self.host, self.port)
def handle_client(self, client_socket, client_address):
# handle a new client connection
server_logger.info('[new client] on %s:%d, client address: %s', self.host, self.port, client_address)
while True:
# synchronous blocking
data = client_socket.recv(self.buffer_size)
self.handle_recv(client_socket, data)
if data == b'':
break
self.client_sockets.remove(client_socket)
client_socket.close()
server_logger.info('[close client] on %s:%d, client address: %s', self.host, self.port, client_address)
def handle_recv(self, client_socket, data):
server_logger.warning('handle_recv should be override!')
server_logger.debug(data)
client_socket.sendall(data[::-1])
if data == b'shutdown server':
self.shutdown()
class WorkerServer(EasyTcpServer):
def __init__(self, host='0.0.0.0', port=8000, buffer_size=1024, parent_socket=None, main_server=None):
assert parent_socket, 'parent]_socket must been set'
assert main_server, 'main_server must been set'
self.parent_socket = parent_socket
self.main_server = main_server
super().__init__(host, port, buffer_size)
def handle_recv(self, client_socket, data):
server_logger.debug('data from user: %s', data)
user_socket = client_socket
# generate a random id (bytes, length: 8)
# don't worry it will repeat
n = random.randint(10000000, 99999999)
user_id = str(n).encode()
data = b'<frpy>' + user_id + b'|' + data + b'</frpy>'
self.parent_socket.sendall(data)
self.main_server.state[user_id] = {
'parent_socket': self.parent_socket,
'user_socket': user_socket,
}
class MainServer(EasyTcpServer):
workers = []
state = {}
def make_new_worker(self, port, client_socket):
# make a new server in a new thread. [async]
kwargs = {
'remote_port': port,
'parent_socket': client_socket,
}
start_new_thread(self.start_worker_server, (), kwargs)
server_logger.debug(threading.enumerate())
def start_worker_server(self, remote_port, parent_socket):
# parent_socket is a socket between frpy server and frpy client
worker_server = WorkerServer(port=remote_port, buffer_size=self.buffer_size, parent_socket=parent_socket, main_server=self)
self.workers.append(worker_server)
worker_server.run()
def handle_client(self, client_socket, client_address):
# handle a new frpy client connection
server_logger.info('[new client] on %s:%d, client address: %s', self.host, self.port, client_address)
buffer = b''
while True:
data = client_socket.recv(self.buffer_size + 22)
server_logger.debug('data from client: %s', data)
if data == b'':
break
buffer += data
index = buffer.find(b'</frpy>')
if index == -1:
continue
assert buffer.startswith(b'<frpy>'), buffer
data = buffer[6:index]
buffer = buffer[index + 7:]
# synchronous blocking
self.handle_recv(client_socket, data)
self.client_sockets.remove(client_socket)
client_socket.close()
server_logger.info('[close client] on %s:%d, client address: %s', self.host, self.port, client_address)
def split_data(self, data):
assert b'|' in data, 'recv error data: %s' % data
user_id, data = data.split(b'|', 1)
assert len(user_id) == 8, 'recv error data: %s' % data
return user_id, data
def handle_recv(self, client_socket, data):
# recv message from frpy client
if not data:
server_logger.warning('recv empty data')
try:
user_id, data = self.split_data(data)
# if the first message, make a worker server
if user_id == b'00000000':
port = int(data.decode())
self.make_new_worker(port, client_socket)
return
# other message will send to user
user_socket = self.state.get(user_id, {}).get('user_socket')
if not user_socket:
server_logger.warning('#%s %s not found!', user_id, client_socket)
server_logger.warning('state: %s', self.state)
return
user_socket.sendall(data)
except Exception as e:
server_logger.error('----- handle recv from client error -----')
server_logger.error(e)
server_logger.debug(traceback.format_exc())
server_logger.error('-----------------------------------------')
# ---------- for client side ---------
def write_to_local(client_socket):
buffer = b''
while True:
s_read, _, _ = select.select([client_socket], [], [], 0.5)
if not s_read:
continue
s = s_read[0]
data = s.recv(BUFFER_SIZE + 22)
client_logger.debug('data from server %s', data)
buffer += data
index = buffer.find(b'</frpy>')
if index == -1:
continue
assert buffer.startswith(b'<frpy>'), buffer
data = buffer[6:index]
buffer = buffer[index+7:]
user_id, data = data.split(b'|', 1)
# length of user_id: 8
assert len(user_id) == 8, user_id
if user_id in state:
local_socket = state[user_id]['local_socket']
else:
local_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
local_socket.connect((LOCAL_HOST, LOCAL_PORT))
state[user_id] = {
'local_socket': local_socket,
}
client_logger.info('send to %s %s', local_socket, data)
client_logger.debug('state %s', state)
local_socket.sendall(data)
def read_from_local(client_socket):
while True:
umap = {}
local_sockets = []
for key, value in state.items():
user_id = key
local_socket = value['local_socket']
umap[id(local_socket)] = user_id
local_sockets.append(local_socket)
if not local_sockets:
time.sleep(0.5)
continue
s_read, _, _ = select.select(local_sockets, [], [], 0.5)
for s in s_read:
data = s.recv(BUFFER_SIZE)
if data == b'':
continue
client_logger.info('recv from %s: %s', s, data)
user_id = umap[id(s)]
# <frpy>12345678|abcdef1234</frpy>
# external length: 22
data = b'<frpy>' + user_id + b'|' + data + b'</frpy>'
client_socket.sendall(data)
# -------------------------------
print('======== frpy v0.0.1 ========')
args = sys.argv[1:]
if not args:
print('Please chose the mode (1 or 2):')
print('1. Server')
print('2. Client')
mode = input()
if mode not in ['1', '2']:
input('Just entry 1 or 2!')
sys.exit()
args = [mode]
if args[0] in ['1', 'server']:
mode = 'server'
else:
mode = 'client'
if len(args) > 1:
SERVER_HOST = args[1]
if len(args) > 2:
SERVER_PORT = int(args[2])
if len(args) > 3:
LOCAL_HOST = args[3]
if len(args) > 4:
LOCAL_PORT = int(args[4])
if len(args) > 5:
REMOTE_PORT = int(args[5])
if len(args) > 6:
BUFFER_SIZE = int(args[6])
if mode == 'server':
if len(args) > 3:
BUFFER_SIZE = int(args[3])
client_logger.info('frpy server')
client_logger.info('SERVER_HOST: %s', SERVER_HOST)
client_logger.info('SERVER_PORT: %s', SERVER_PORT)
client_logger.info('BUFFER_SIZE: %s', BUFFER_SIZE)
server = MainServer(host=SERVER_HOST, port=SERVER_PORT, buffer_size=BUFFER_SIZE)
server.run()
else:
client_logger.info('======== frpy client ========')
client_logger.info('SERVER_HOST: %s', SERVER_HOST)
client_logger.info('SERVER_PORT: %s', SERVER_PORT)
client_logger.info('LOCAL_HOST: %s', LOCAL_HOST)
client_logger.info('LOCAL_PORT: %s', LOCAL_PORT)
client_logger.info('REMOTE_PORT: %s', REMOTE_PORT)
client_logger.info('BUFFER_SIZE: %s', BUFFER_SIZE)
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((SERVER_HOST, SERVER_PORT))
msg = '<frpy>00000000|%d</frpy>' % REMOTE_PORT
client_socket.sendall(msg.encode())
start_new_thread(write_to_local, (client_socket,), {})
start_new_thread(read_from_local, (client_socket,), {})
while True:
client_logger.debug(threading.enumerate())
time.sleep(10)
|
import pickle
import operator
def explore_model(standardize, model_type='rf', for_submission=True):
submission = "_submission" if for_submission else ""
standardized = "_standardized" if standardize else ""
model_file_name = "08_reg_model_" + model_type + standardized + submission + ".p"
print("Opening " + model_file_name + "...")
with open("../../dataset/" + model_file_name, 'rb') as h:
model = pickle.load(h)
if model_type == 'rf':
feature_importances = model.feature_importances_
print(feature_importances)
with open("../../dataset/04_tr_filled_data.p", 'rb') as h:
xtr = pickle.load(h)
del (xtr['ID'])
del (xtr['target'])
feature_names = list(xtr.columns.values)
feat_imps = dict(zip(feature_names, feature_importances))
feat_imps = sorted(feat_imps.items(), key=operator.itemgetter(1), reverse=True)
print(feat_imps)
if __name__ == "__main__":
# Model options:
# "log_reg",
# "rf"(Random Forest),
# "svc"(Support Vector Classification)
# "extra_trees_classifier" (Extra Decision Trees Classifier)
model_option = "rf"
explore_model(standardize=True, model_type=model_option, for_submission=False)
|
"""MongoDB queries for titanic database."""
import pymongo
if __name__ == "__main__":
# Username and password to be set by user.
username = "TODO"
cluster = "TODO"
password = "TODO"
group = "TODO"
# Create access to MondoDB rpg database.
s = "mongodb+srv://{}:{}@{}-{}.mongodb.net/titanic".format(username,
password,
cluster,
group)
client = pymongo.MongoClient(s)
titanic_db = client["titanic"]
outputs = {}
# Find the count of passengers aboard titanic who survived/didn't survive.
agg_dict = [
{
"$group":
{
"_id":
{
"survived": "$Survived"
},
"count": {"$sum": 1}
}
},
{
"$project":
{
"_id": 0,
"survived": "$_id.survived",
"count": "$count"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["survived_count"] = list(agg_res)
# Find the count of passengers for each class aboard titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"pclass": "$Pclass"
},
"count": {"$sum": 1}
}
},
{
"$project":
{
"_id": 0,
"pclass": "$_id.pclass",
"count": "$count"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["pclass_count"] = list(agg_res)
# Find the count of passengers for each class aboard titanic who
# survived/didn't survive.
agg_dict = [
{
"$group":
{
"_id":
{
"pclass": "$Pclass",
"survived": "$Survived"
},
"count": {"$sum": 1}
}
},
{
"$project":
{
"_id": 0,
"pclass": "$_id.pclass",
"survived": "$_id.survived",
"count": "$count"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["pclass+survived_count"] = list(agg_res)
# Find the average age for those that survived/didn't survive the titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"survived": "$Survived"
},
"avgAge": {"$avg": {"$toDecimal": "$Age"}}
}
},
{
"$project":
{
"_id": 0,
"survived": "$_id.survived",
"avgAge": "$avgAge"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["survived_avg(age)"] = list(agg_res)
# Find the average age for each class aboard titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"pclass": "$Pclass"
},
"avgAge": {"$avg": {"$toDecimal": "$Age"}}
}
},
{
"$project":
{
"_id": 0,
"pclass": "$_id.pclass",
"avgAge": "$avgAge"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["pclass_avg(age)"] = list(agg_res)
# Find the average fare per passenger for those that survived/didn't
# survive the titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"survived": "$Survived"
},
"avgFare": {"$avg": {"$toDecimal": "$Fare"}}
}
},
{
"$project":
{
"_id": 0,
"survived": "$_id.survived",
"avgFare": "$avgFare"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["survived_avg(fare)"] = list(agg_res)
# Find the average number of siblings/spouses per passenger for each class
# aboard titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"pclass": "$Pclass"
},
"avgFare": {"$avg": {"$toDecimal": "$Fare"}}
}
},
{
"$project":
{
"_id": 0,
"pclass": "$_id.pclass",
"avgFare": "$avgFare"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["pclass_avg(fare)"] = list(agg_res)
# Find the average number of siblings/spouses per passenger for those that
# survived/didn't survive the titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"survived": "$Survived"
},
"avgSibSpouse": {"$avg": {"$toInt": "$SiblingsSpousesAboard"}}
}
},
{
"$project":
{
"_id": 0,
"survived": "$_id.survived",
"avgSibSpouse": "$avgSibSpouse"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["survived_avg(sib_spouse)"] = list(agg_res)
# Find the average number of siblings/spouses per passenger for each class
# aboard titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"pclass": "$Pclass"
},
"avgSibSpouse": {"$avg": {"$toInt": "$SiblingsSpousesAboard"}}
}
},
{
"$project":
{
"_id": 0,
"pclass": "$_id.pclass",
"avgSibSpouse": "$avgSibSpouse"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["pclass_avg(sib_spouse)"] = list(agg_res)
# Find the average number of parents/children per passenger for those that
# survived/didn't survive the titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"survived": "$Survived"
},
"avgParChild": {"$avg": {"$toInt": "$ParentsChildrenAboard"}}
}
},
{
"$project":
{
"_id": 0,
"survived": "$_id.survived",
"avgParChild": "$avgParChild"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["survived_avg(par_child)"] = list(agg_res)
# Find the average number of parents/children per passenger for each class
# aboard titanic.
agg_dict = [
{
"$group":
{
"_id":
{
"pclass": "$Pclass"
},
"avgParChild": {"$avg": {"$toInt": "$ParentsChildrenAboard"}}
}
},
{
"$project":
{
"_id": 0,
"survived": "$_id.pclass",
"avgParChild": "$avgParChild"
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["pclass_avg(par_child)"] = list(agg_res)
# Find the number of repeating names aboard titanic.
agg_dict = [
{
"$group":
{
"_id": "$Name",
"count": {"$sum": 1}
}
},
{
"$match":
{
"count": {"$gt": 1}
}
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["matchName_count"] = len(list(agg_res))
# Find number of couples aboard titanic.
split_name = [{"$split": ["$Name", " "]}, 0]
agg_dict = [
{
"$project":
{
"_id": 0,
"Name": "$Name",
"SiblingsSpousesAboard": {"$toInt": "$SiblingsSpousesAboard"}
}
},
{
"$match":
{
"SiblingsSpousesAboard": {"$gt": 0}
}
},
{
"$project":
{
"_id": 0,
"Name": "$Name",
"mr": {"$eq": [{"$arrayElemAt": split_name}, "Mr."]},
"mrs": {"$eq": [{"$arrayElemAt": split_name}, "Mrs."]}
}
},
{
"$group":
{
"_id": {"$arrayElemAt": [{"$split": ["$Name", " "]}, -1]},
"mr_count": {"$sum": {"$cond": ["$mr", 1, 0]}},
"mrs_count": {"$sum": {"$cond": ["$mrs", 1, 0]}}
}
},
{
"$match":
{
"mr_count": {"$gt": 0},
"mrs_count": {"$gt": 0}
}
},
{
"$count": "count"
}
]
agg_res = titanic_db["titanic"].aggregate(agg_dict)
outputs["marriedCouple_count"] = list(agg_res)[0]["count"]
# Print results.
for o in outputs:
print(o)
print(outputs[o])
print()
|
valor = int(input('Qual valor você quer sacar? R$'))
total = valor
dinheiro = 50
totaldinheiro = 0
while True:
if total >= dinheiro:
total -= dinheiro
totaldinheiro += 1
else:
if totaldinheiro > 0:
print(f'Total de {totaldinheiro} cédulas de R${dinheiro}')
if dinheiro == 50:
dinheiro = 20
elif dinheiro == 20:
dinheiro = 10
elif dinheiro == 10:
dinheiro = 1
totaldinheiro = 0
if total == 0:
break
print('-' * 40)
print('Programa finalizado')
|
# Generated by Django 3.1.7 on 2021-08-04 19:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('experiments', '0003_auto_20210804_1906'),
]
operations = [
migrations.RenameField(
model_name='battery',
old_name='insturctions',
new_name='instructions',
),
]
|
from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager,
)
class DoubleMaStrategy(CtaTemplate):
author = " use Python traders "
fast_window = 10
slow_window = 20
fast_ma0 = 0.0
fast_ma1 = 0.0
slow_ma0 = 0.0
slow_ma1 = 0.0
parameters = ["fast_window", "slow_window"]
variables = ["fast_ma0", "fast_ma1", "slow_ma0", "slow_ma1"]
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(DoubleMaStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar)
self.am = ArrayManager()
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log(" strategy initialization ")
self.load_bar(10)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log(" policy startup ")
self.put_event()
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log(" stop strategy ")
self.put_event()
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
am = self.am
am.update_bar(bar)
if not am.inited:
return
fast_ma = am.sma(self.fast_window, array=True)
self.fast_ma0 = fast_ma[-1]
self.fast_ma1 = fast_ma[-2]
slow_ma = am.sma(self.slow_window, array=True)
self.slow_ma0 = slow_ma[-1]
self.slow_ma1 = slow_ma[-2]
cross_over = self.fast_ma0 > self.slow_ma0 and self.fast_ma1 < self.slow_ma1
cross_below = self.fast_ma0 < self.slow_ma0 and self.fast_ma1 > self.slow_ma1
if cross_over:
if self.pos == 0:
self.buy(bar.close_price, 1)
elif self.pos < 0:
self.cover(bar.close_price, 1)
self.buy(bar.close_price, 1)
elif cross_below:
if self.pos == 0:
self.short(bar.close_price, 1)
elif self.pos > 0:
self.sell(bar.close_price, 1)
self.short(bar.close_price, 1)
self.put_event()
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
|
"""
Create a 2-dimensional array, , of empty arrays. All arrays are zero indexed.
Create an integer, , and initialize it to .
There are types of queries:
Query: 1 x y
Find the list within at index .
Append the integer to the .
Query: 2 x y
Find the list within at index .
Find the value of element where is the number of elements in lastAnswer$.
Print the new value of on a new line
Note: is the bitwise XOR operation, which corresponds to the ^ operator in most languages. Learn more about it on Wikipedia. is the modulo operator.
Function Description
Complete the dynamicArray function below.
dynamicArray has the following parameters:
- int n: the number of empty arrays to initialize in
- string queries[q]: an array of query strings
Returns
int[]: the results of each type 2 query in the order they are presented
Input Format
The first line contains two space-separated integers, , the size of to create, and , the number of queries, respectively.
Each of the subsequent lines contains a query in the format defined above, .
Constraints
It is guaranteed that query type will never query an empty array or index.
Sample Input
2 5
1 0 5
1 1 7
1 0 3
2 1 0
2 1 1
Sample Output
7
3
"""
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'dynamicArray' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts following parameters:
# 1. INTEGER n
# 2. 2D_INTEGER_ARRAY queries
#
def dynamicArray(n, queries):
# Write your code here
arr = [[] for i in range(n)]
lastAnswer = 0
result = []
#query type one
for q in queries:
if q[0] == 1:
seq=(q[1] ^ lastAnswer) %n
arr[seq].append(q[2])
else:
seq=(q[1] ^ lastAnswer) %n
lastAnswer = arr[seq][q[2]%len(arr[seq])]
result.append(lastAnswer)
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
21 
B
22 
C
21 Answer: B
Listen from here Locate Explain Report
22 Answer: C
Listen from here Locate Explain Report
Questions 23-24
Write the correct letters, A-E, next to questions 23-24.
Which TWO of the following are sales strategies for chocolate in Italy and Germany?
A Locate near a children’s school
B Change the location of the product on shelves
C Give a free gift
D Make it the cheapest brand
E Make Schmutzig the second cheapest brand
23-24 Answer: B,E
Listen from here Locate Explain Report
Questions 25-30
Complete the table below.
Write NO MORE THAN TWO WORDS for each answer.
Research plan
Betty is interested in how 25  affects the sales of cosmetics and 26 
Bruce is going to be concerned with how 27  may impact on sales of cookies and the relationships among 28 , 29 , and sales.
The professor advised the students to bear in mind the extensions of 30 
25 Answer: colour/color
Listen from here Locate Explain Report
26 Answer: cleaning products
Listen from here Locate Explain Report
27 Answer: (different) containers
Listen from here Locate Explain Report
28 Answer: materials
Listen from here Locate Explain Report
29 Answer: image
Listen from here Locate Explain Report
30 Answer: advertisement
Listen from here Locate Explain Report
Previous NextSECTION 3
Audio Player


00:00
29:53
Use Up/Down Arrow keys to increase or decrease volume.
Change Audio Sources
SECTION 3
PROFESSOR: Come in!
BETTY: Professor Dundee? We’re ready to make our presentation.
PROFESSOR: Oh, yes. I did say one o’clock, didn’t I? Please, sit down. So, who goes first? Bruce? Or you, Betty?
BETTY: I guess I could. Bruce is always a little shy.
PROFESSOR: Not after he’s had a lager for lunch, eh Bruce? BRUCE: Heh, heh. No, Betty really should go first.
BETTY: OK. Well, I’m reporting on the effects of different marketing strategies on the cheese and oil markets. Different strategies obviously affect the sales volume differently. I looked at the sales in two countries, New Zealand and Colombia.
PROFESSOR: And what did you find, pray tell?
BETTY: Well, in New Zealand, the sales of both oil and cheese have declined pretty steadily. And in fact, the sales have decreased more quickly than the population. On the other hand, in Colombia, the volume of sales for both products has remained the same.
PROFESSOR: Wait, so you said sales in New Zealand have been going down?
BETTY: Correct. Suppliers have introduced two new upscale brands of each product, which are a bit expensive but very tasty. The big ad agencies are trying out a new series of ads that shift the focus from health to great taste. They think that will get sales moving up in New Zealand, where the population is less affluent and generally less health-conscious.
PROFESSOR: Brilliant. Thank you. And Bruce?
BRUCE: Uhhh... yeah. My report is about chocolate sales in Italy and Germany. The two countries’ marketers have found out that you have to market chocolate differently in each country.
PROFESSOR: For example?
BRUCE: In Italy, “Kostig”, the most expensive brand, pays shop owners to put the candy just about knee-high for an adult.
PROFESSOR: I don't see...
BRUCE: For little kids, that’s about eye level! That bright red candy is the first one they see, so they buy it! Even better, they start telling their moms to buy it, too!
PROFESSOR: So, you mean...
BRUCE: Well, I mean, in Italy if you locate your product at the right location of shelves, sales do great. They say it doesn’t matter much what brand of chocolate you’re selling. As for Germany...
PROFESSOR: “Das Land der Schokolade”.
BRUCE: Huh?
PROFESSOR: That’s German. It means “The Land of Chocolate”. Germans love the stuff, so people make a joke and call Germany that.
BRUCE: Oh... uh, right...
PROFESSOR: So, you were saying?
BRUCE: Well, like you pointed out, Germans love chocolate. But they’re thrifty. For a long time, the biggest selling brand was “Schmutzig", mostly because it was the second cheapest, but didn’t taste too bad.
PROFESSOR: Again brilliant! A pretty good job, both of you. Tell me,
what do you plan to investigate next week?
BETTY: I’m especially interested in the effects of colour on sales of products, so I’ll be looking at ads for cosmetics and cleaning products in the local market. You know, like the distinct orange colour of Mr. Muscle, lavatory cleaning products.
BRUCE: And you, Bruce?
I'm focusing on the effects of different containers on sales of cookies. So I’m going to look into packaging for cookies and how the materials they use will affect the image, and in turn sales. You know, most containers are paper, but some expensive cookies come in metal boxes. The shiny metal boxes catch people’s attention and the image remains in the memory longer.
PROFESSOR:Well, it sounds like you two are all set. But as always in this course, I urge you both to pay much more attention to the advertisement extensions. That’s often the key. Alright, any questions for me before you go.
BRUCE: No, I think I’m all set. Thanks!
Me too. Thanks, Professor Dundee. See ya later.
q = int(first_multiple_input[1])
queries = []
for _ in range(q):
queries.append(list(map(int, input().rstrip().split())))
result = dynamicArray(n, queries)
fptr.write('\n'.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
default_app_config = 'django_sms_toolkit.apps.DjangoSMSToolkitConfig'
|
"""
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Author: V. Michel, A. Gramfort
# License: BSD 3 clause
import numpy as np
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted
from scipy.sparse import issparse
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
A M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, Xred):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
Xred : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `Xred` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return Xred[..., inverse]
|
# Copyright 2011 Branan Purvine-Riley and Adam Johnson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SensEngine
from SensEngine import EntityFactory
from SensEngine import Entity
from SensEngine import Components
def register_factory(name, factory):
try:
SensEngine.client.manager.add_factory(factory(), name)
return None
except AttributeError: # we want to forward any errors that might come from the manager
pass
# registration functions for other situations besides the default client go here
raise NotImplementedError("Don't understand this context; can't register factories")
def register_get_datamgr():
try:
return SensEngine.client.loader
except AttributeError:
pass
raise NotImplementedError("Don't understand this context; can't fetch datamgr")
|
"""Definitions of the segment classes."""
# flake8: noqa: F401
from sqlfluff.core.parser.segments.base import BaseSegment, UnparsableSegment
from sqlfluff.core.parser.segments.generator import SegmentGenerator
from sqlfluff.core.parser.segments.raw import (
RawSegment,
CodeSegment,
UnlexableSegment,
CommentSegment,
WhitespaceSegment,
NewlineSegment,
)
from sqlfluff.core.parser.segments.ephemeral import EphemeralSegment
from sqlfluff.core.parser.segments.meta import Indent, Dedent, TemplateSegment
from sqlfluff.core.parser.segments.keyword import (
KeywordSegment,
SymbolSegment,
ReSegment,
NamedSegment,
)
|
"""
vodka data handlers, allows to modify data retrieved by
vodka data plugins
"""
import vodka.config
import vodka.component
import vodka.storage
import vodka.data.data_types
import vodka.util
handlers = {}
class register(vodka.util.register):
class Meta:
objects = handlers
name = "data handler"
def get(handle):
if handle not in handlers:
raise KeyError("Data handler with handle %s does not exist" % handle)
return handlers.get(handle)
def instantiate(cfg, data_id):
cls = get(cfg.get("type"))
return cls(cfg, data_id)
def instantiate_for_data_type(name, data_id=None):
data_type = vodka.data.data_types.get(name)
if not data_id:
data_id = data_type
r = []
for h in data_type.handlers:
r.append(instantiate(h, data_id))
return r
class Handler(vodka.component.Component):
"""
Base data handler class. A data handler can be attached to a data type
to manipulate data of that type as it enters vodka.
Attribues:
config (dict or MungeConfg): configuration collection
data_id (str): data id for this handler
Classes:
Configuration: Configuration Handler
"""
class Configuration(vodka.config.ComponentHandler):
pass
def __init__(self, config, data_id):
"""
Args:
config (dict or MungeConfig): configuration collection
data_id (str): data id for this handler, needs to be unique
"""
super().__init__(config)
self.data_id = data_id
self.init()
def __call__(self, data, caller=None):
pass
def init(self):
pass
@register("index")
class IndexHandler(Handler):
"""
Will re-index data in a dictionary, indexed to the
key specified in the config
"""
class Configuration(Handler.Configuration):
index = vodka.config.Attribute(str, help_text="the field to use for indexing")
def __call__(self, data, caller=None):
if "data" in data:
r = {}
for d in data["data"]:
if isinstance(d, dict):
r[d[self.get_config("index")]] = d
elif d:
self.log.debug(
"Only dictionary type data rows may be re-indexed, row ignored"
)
else:
self.log.debug("Empty data row ignored.")
data["data"] = r
else:
self.log.debug("Empty data object ignored")
return data
@register("store")
class StorageHandler(Handler):
"""
Will store the data in the vodka storage.
Data will be stored using data type and data id as keys
"""
class Configuration(Handler.Configuration):
container = vodka.config.Attribute(
str,
help_text="specify how to store data",
choices=["list", "dict"],
default="list",
)
limit = vodka.config.Attribute(
int,
default=500,
help_text="Limit the maximum amount of items to keep; only applies to list storage",
)
def validate_limit(self, value):
if value < 1:
return False, "Needs to be greater than 1"
return True, ""
def __call__(self, data, caller=None):
if type(self.storage) == list:
self.storage.append(data)
l = len(self.storage)
while l > self.get_config("limit"):
self.storage.pop(0)
l -= 1
elif type(self.storage) == dict:
self.storage.update(**data["data"])
return data
def init(self):
if self.get_config("container") == "list":
self.storage = vodka.storage.get_or_create(self.data_id, [])
elif self.get_config("container") == "dict":
self.storage = vodka.storage.get_or_create(self.data_id, {})
else:
raise ValueError(
"Unknown storage container type: %s" % self.get_config("container")
)
|
def search_and_set(mem_blocks,block_id,thread_id):
if block_id not in mem_blocks:
mem_blocks[block_id] = [0]*8
mem_blocks[block_id][thread_id] = 1
|
# Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrap representation of Spanner keys / ranges."""
from google.cloud.spanner_v1.proto.keys_pb2 import KeyRange as KeyRangePB
from google.cloud.spanner_v1.proto.keys_pb2 import KeySet as KeySetPB
from google.cloud.spanner_v1._helpers import _make_list_value_pb
from google.cloud.spanner_v1._helpers import _make_list_value_pbs
class KeyRange(object):
"""Identify range of table rows via start / end points.
Specify either a `start_open` or `start_closed` key, or defaults to
`start_closed = []`. Specify either an `end_open` or `end_closed` key,
or defaults to `end_closed = []`. However, at least one key has to be
specified. If no keys are specified, ValueError is raised.
:type start_open: list of scalars
:param start_open: keys identifying start of range (this key excluded)
:type start_closed: list of scalars
:param start_closed: keys identifying start of range (this key included)
:type end_open: list of scalars
:param end_open: keys identifying end of range (this key excluded)
:type end_closed: list of scalars
:param end_closed: keys identifying end of range (this key included)
:raises ValueError: if no keys are specified
"""
def __init__(self, start_open=None, start_closed=None,
end_open=None, end_closed=None):
if not any([start_open, start_closed, end_open, end_closed]):
raise ValueError("Must specify at least a start or end row.")
if start_open and start_closed:
raise ValueError("Specify one of 'start_open' / 'start_closed'.")
elif start_open is None and start_closed is None:
start_closed = []
if end_open and end_closed:
raise ValueError("Specify one of 'end_open' / 'end_closed'.")
elif end_open is None and end_closed is None:
end_closed = []
self.start_open = start_open
self.start_closed = start_closed
self.end_open = end_open
self.end_closed = end_closed
def to_pb(self):
"""Construct a KeyRange protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeyRange`
:returns: protobuf corresponding to this instance.
"""
kwargs = {}
if self.start_open is not None:
kwargs['start_open'] = _make_list_value_pb(self.start_open)
if self.start_closed is not None:
kwargs['start_closed'] = _make_list_value_pb(self.start_closed)
if self.end_open is not None:
kwargs['end_open'] = _make_list_value_pb(self.end_open)
if self.end_closed is not None:
kwargs['end_closed'] = _make_list_value_pb(self.end_closed)
return KeyRangePB(**kwargs)
class KeySet(object):
"""Identify table rows via keys / ranges.
:type keys: list of list of scalars
:param keys: keys identifying individual rows within a table.
:type ranges: list of :class:`KeyRange`
:param ranges: ranges identifying rows within a table.
:type all_: boolean
:param all_: if True, identify all rows within a table
"""
def __init__(self, keys=(), ranges=(), all_=False):
if all_ and (keys or ranges):
raise ValueError("'all_' is exclusive of 'keys' / 'ranges'.")
self.keys = list(keys)
self.ranges = list(ranges)
self.all_ = all_
def to_pb(self):
"""Construct a KeySet protobuf.
:rtype: :class:`~google.cloud.spanner_v1.proto.keys_pb2.KeySet`
:returns: protobuf corresponding to this instance.
"""
if self.all_:
return KeySetPB(all=True)
kwargs = {}
if self.keys:
kwargs['keys'] = _make_list_value_pbs(self.keys)
if self.ranges:
kwargs['ranges'] = [krange.to_pb() for krange in self.ranges]
return KeySetPB(**kwargs)
|
__author__ = "Gordon Ball <gordon@chronitis.net>"
__version__ = "0.4.3"
from .ipyrmd import ipynb_to_rmd, rmd_to_ipynb, ipynb_to_spin, spin_to_ipynb
|
import numpy as np
from numpy.testing import assert_allclose
import unittest
from pb_bss_eval.distribution import CACGMMTrainer
from pb_bss_eval.distribution import ComplexAngularCentralGaussian
from pb_bss_eval.distribution import sample_cacgmm
import itertools
from pb_bss_eval.utils import labels_to_one_hot
def solve_permutation(estimated_covariance, covariance):
K = estimated_covariance.shape[0]
permutations = list(itertools.permutations(range(K)))
best_permutation, best_cost = None, np.inf
for p in permutations:
cost = np.linalg.norm(estimated_covariance[p, :, :] - covariance)
if cost < best_cost:
best_permutation, best_cost = p, cost
return best_permutation
class TestCACGMM(unittest.TestCase):
def test_cacgmm(self):
np.random.seed(0)
samples = 10000
weight = np.array([0.3, 0.7])
covariance = np.array(
[
[[10, 1 + 1j, 1 + 1j], [1 - 1j, 5, 1], [1 - 1j, 1, 2]],
[[2, 0, 0], [0, 3, 0], [0, 0, 2]],
]
)
covariance /= np.trace(covariance, axis1=-2, axis2=-1)[..., None, None]
x = sample_cacgmm(samples, weight, covariance)
model = CACGMMTrainer().fit(
x,
num_classes=2,
covariance_norm='trace',
)
# Permutation invariant testing
best_permutation = solve_permutation(model.cacg.covariance[:, :, :], covariance)
assert_allclose(
model.cacg.covariance[best_permutation, :], covariance, atol=0.1
)
model.weight = model.weight[best_permutation,]
assert model.weight[0] < model.weight[1], model.weight
assert_allclose(model.weight, weight[:, None], atol=0.15)
# model = CACGMMTrainer().fit(
# x,
# num_classes=2,
# covariance_norm='trace',
# dirichlet_prior_concentration=np.inf
# )
# assert_allclose(np.squeeze(model.weight, axis=-1), [0.5, 0.5])
#
# model = CACGMMTrainer().fit(
# x,
# num_classes=2,
# covariance_norm='trace',
# dirichlet_prior_concentration=1_000_000_000
# )
# assert_allclose(np.squeeze(model.weight, axis=-1), [0.5, 0.5])
def test_cacgmm_independent_dimension(self):
samples = 10000
weight = np.array([0.3, 0.7])
covariance = np.array(
[
[[10, 1 + 1j, 1 + 1j], [1 - 1j, 5, 1], [1 - 1j, 1, 2]],
[[2, 0, 0], [0, 3, 0], [0, 0, 2]],
]
)
covariance /= np.trace(covariance, axis1=-2, axis2=-1)[..., None, None]
x = sample_cacgmm(samples, weight, covariance)
model = CACGMMTrainer().fit(
x[None, ...],
num_classes=2,
covariance_norm='trace',
)
# Permutation invariant testing
best_permutation = solve_permutation(model.cacg.covariance[0, :, :, :], covariance)
assert_allclose(
np.squeeze(model.weight, axis=(0, -1))[best_permutation,],
weight,
atol=0.15
)
assert_allclose(
model.cacg.covariance[0, best_permutation, :], covariance, atol=0.1
)
model = CACGMMTrainer().fit(
np.array([x, x]),
num_classes=2,
covariance_norm='trace',
)
for f in range(model.weight.shape[0]):
# Permutation invariant testing
best_permutation = solve_permutation(model.cacg.covariance[f, :, :, :], covariance)
assert_allclose(
np.squeeze(model.weight, axis=-1)[f, best_permutation,],
weight,
atol=0.15,
)
assert_allclose(
model.cacg.covariance[f, best_permutation, :],
covariance,
atol=0.1,
)
def test_cacgmm_sad_init(self):
samples = 10000
weight = np.array([0.3, 0.7])
num_classes, = weight.shape
covariance = np.array(
[
[[10, 1 + 1j, 1 + 1j], [1 - 1j, 5, 1], [1 - 1j, 1, 2]],
[[2, 0, 0], [0, 3, 0], [0, 0, 2]],
]
)
covariance /= np.trace(covariance, axis1=-2, axis2=-1)[..., None, None]
x, labels = sample_cacgmm(samples, weight, covariance, return_label=True)
affiliations = labels_to_one_hot(labels, num_classes, axis=-2)
# test initialization
model = CACGMMTrainer().fit(
x,
initialization=affiliations,
covariance_norm='trace',
)
# test initialization with independent
model = CACGMMTrainer().fit(
np.array([x]),
initialization=np.array([affiliations]),
covariance_norm='trace',
)
# test initialization with independent and broadcasted initialization
model = CACGMMTrainer().fit(
np.array([x, x, x]),
initialization=np.array([affiliations]),
covariance_norm='trace',
)
# test initialization with independent
model = CACGMMTrainer().fit(
np.array([x, x]),
initialization=np.array([affiliations, affiliations]),
covariance_norm='trace',
)
def test_sample_cacgmm():
np.random.seed(0)
samples = 10000
weight = np.array([0.3, 0.7])
num_classes = weight.shape[0]
labels = np.random.choice(
range(num_classes), size=(samples,), p=weight
)
covariance = np.array(
[
[[10, 1 + 1j, 1 + 1j], [1 - 1j, 5, 1], [1 - 1j, 1, 2]],
[[2, 0, 0], [0, 3, 0], [0, 0, 2]],
]
)
covariance /= np.trace(covariance, axis1=-2, axis2=-1)[..., None, None]
dimension = covariance.shape[-1]
x_ref = np.zeros((samples, dimension), dtype=np.complex128)
for l in range(num_classes):
cacg = ComplexAngularCentralGaussian.from_covariance(
covariance=covariance[l, :, :]
)
x_ref[labels == l, :] = cacg.sample(size=(np.sum(labels == l),))
np.random.seed(0)
x = sample_cacgmm(samples, weight, covariance)
np.testing.assert_equal(x, x_ref)
|
import torch
import torch.nn as nn
import numpy as np
import os
import glob
from libs.utils.data_utils import extract_length_angles
class RnnDetector(nn.Module):
def __init__(self):
super(RnnDetector, self).__init__()
def forward(self, model, inputs):
outs = model(inputs)
return outs
class Detect(object):
def __init__(self, model, detector, device):
super(Detect, self).__init__()
self.model = model
self.detector = detector
self.device = device
def save_labels(self, npy_folder, save_folder):
"""Save predicted labels of all test video to .cvs file.
Params:
pred: predicted labels
save_folder: path to save .csv file
Returns:
None
"""
coor_files = glob.glob(os.path.join(npy_folder, "*.npy"))
for file in coor_files:
csv_name = os.path.basename(file).replace(".npy", ".csv")
csv_path = os.path.join(save_folder, csv_name)
tjc = np.load(file) # tjc means timestep joints coordinates
btjc = tjc[np.newaxis] # btjc means batch timestep joints coordinates
btf = extract_length_angles(btjc) # btf means batch timestep feature
# tbf = btf.transpose(1, 0, 2) # as default "batch_first=False"
btf = torch.from_numpy(btf).to(self.device)
pred_labels = self.detector(self.model, btf)
# pred_btc = pred_labels.permute(1, 0, 2) # batch_first=False
pred_bt = torch.argmax(pred_labels, dim=2)
self._save_labels(pred_bt[0], csv_path)
print("All files are detected and saved!")
def _save_labels(self, pred, csv_file):
"""Save predicted labels of one test video to .cvs file.
Params:
pred: predicted labels
csv_file: path to save .csv file
Returns:
None
"""
pred_labels = ["{}".format(label) for label in pred]
line = ",".join(pred_labels)
with open(csv_file, "w") as f:
f.write(line)
print("Predicted labels have been saved to {}.".format(csv_file))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os;
import sys;
import datetime;
import string;
import shutil;
EPSILON = 1e-10;
Product_volume_dict = {};
START_TIME='20:55:00';
END_TIME='02:45:00';
if len(sys.argv) != 3:
print 'Usage: ./cmd src_md_dir dest_dir';
quit();
src_md_path = sys.argv[1];
dest_dir = sys.argv[2];
from_date_dir = datetime.date.today().strftime('%Y%m%d');
to_date_dir = from_date_dir;
date_dir = from_date_dir;
while date_dir <= to_date_dir:
if not os.path.exists(src_md_path+'/'+date_dir):
date_dir = (datetime.datetime.strptime(date_dir,'%Y%m%d')+datetime.timedelta(1)).strftime('%Y%m%d');
continue;
Product_volume_dict = {};
dest_night_dir = dest_dir+'/'+date_dir;
if not os.path.exists(dest_night_dir):
os.mkdir(dest_night_dir);
night_md_path = src_md_path+'/'+date_dir;
night_md_list = os.listdir(night_md_path);
for night_md_file in night_md_list:
if night_md_file.find('_n.csv') == -1:
continue;
night_md_file_path = night_md_path+'/'+night_md_file;
dest_night_md_path = dest_night_dir+'/'+night_md_file;
shutil.copyfile(md_file_zip_path, dest_night_md_path);
os.system('gzip -d '+dest_night_md_path);
dest_night_md_path = dest_night_md_path[0:len(dest_night_md_path)-3];
digit_index=0;
for c in md_file_zip:
if c>='0' and c<='9':
break;
digit_index = digit_index+1;
if digit_index == len(md_file_zip):
os.remove(dest_night_md_path);
continue;
product_name = md_file_zip[0:digit_index];
#print product_name;
#continue;
dest_fp = open(dest_night_md_path, 'r');
md_lines = dest_fp.readlines();
dest_fp.close();
#print dest_night_md_path,':',len(md_lines);
if len(md_lines) == 0:
os.remove(dest_night_md_path);
continue;
begin_line = md_lines[0];
begin_line_list = begin_line.split(',');
close_line = md_lines[-1];
close_line_list = close_line.split(',');
if begin_line_list[1]>START_TIME or close_line_list[1]<END_TIME:
os.remove(dest_night_md_path);
continue;
close_volume = string.atof(close_line_list[3]);
volumes_list = Product_volume_dict.get(product_name);
if volumes_list == None:
volumes_list = [];
volumes_list.append(close_volume);
Product_volume_dict[product_name] = volumes_list;
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'.csv');
elif len(volumes_list)==1:
if close_volume>volumes_list[0]:
volumes_list.append(volumes_list[0]);
volumes_list[0] = close_volume;
os.rename(dest_night_dir+'/'+product_name+'.csv',dest_night_dir+'/'+product_name+'2.csv');
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'.csv');
else:
volumes_list.append(close_volume);
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'2.csv');
else:
if close_volume>volumes_list[0]:
volumes_list[1] = volumes_list[0];
volumes_list[0] = close_volume;
os.rename(dest_night_dir+'/'+product_name+'.csv',dest_night_dir+'/'+product_name+'2.csv');
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'.csv');
elif close_volume>volumes_list[1]:
volumes_list[1] = close_volume;
os.rename(dest_night_md_path,dest_night_dir+'/'+product_name+'2.csv');
else:
os.remove(dest_night_md_path);
print 'Finish processing',date_dir;
date_dir = (datetime.datetime.strptime(date_dir,'%Y%m%d')+datetime.timedelta(1)).strftime('%Y%m%d');
|
import datajoint as dj
# -------------- group_shared_topopaper_horst_imaging --------------
schema = dj.Schema('group_shared_topopaper_horst_imaging')
vmod0 = dj.VirtualModule('vmod0', 'group_shared_topopaper_main_imaging')
@schema
class AlignmentPoints(dj.Manual):
definition = """
# User specified alignment points between two FOVs
-> vmod0.MetaSession.proj(metasession_ref="metasession_name")
-> vmod0.MetaSession.proj(metasession_align="metasession_name")
---
projection_table : enum('Projection','ProjectionCorr') # Projection table the alignmet images were retrieved from
num_points : int # Number of user defined alignment points
points_ref : longblob # List of points specified for the reference session
points_align : longblob # List of points specified for the aligned session
transform_method : varchar(255) # Which transform method to use, e.g. "Euclidean" or "Affine"
entry_time_align_points=CURRENT_TIMESTAMP : timestamp
"""
@schema
class FOVAlignmentParams(dj.Lookup):
definition = """
# Parameters for FOV alignment
fov_align_param_id : char(1) # Parameter set ID, starting with A
---
vector_y : float # How much to nudge in y
vector_x : float # How much to nudge in x
padding : float # Overall padding
"""
@schema
class FOVProjParam(dj.Lookup):
definition = """
# Name of Projection/ProjectionCorr key used for calculating image alignment metrics
projection_short : char(8) # Projection key short name
---
projection_key : varchar(50) # Projection key (minus "_corr") used for calculating FOV alignment metrics
"""
@schema
class FilteredCellsParams(dj.Lookup):
definition = """
# Parameters saved as parameter_dict_cells and restriction_dict_cell and are uniquely identified by parameter_hash_cell
param_hash_id_cell="standard" : enum('standard','ov_cutoff_D','ov_cutoff_H','ov_cutoff_I')
---
param_hash_cell : char(16) # 16 character hash of used restrictions AND parameters dict
restriction_dict_cell : longblob # Dictionary of filters used in query (Restrictions)
parameter_dict_cell : longblob # Dictionary of parameters used in query (Parameters)
"""
@schema
class ScoremapFOVParams(dj.Lookup):
definition = """
# Parameters for score maps
binning_param_short : char(6) # Parameter short name
---
binning_param_set : varchar(100) # Score map binning parameters set (identifies parameter set under BinningParamsFOV)
bin_size_microns : float # Bin size in microns
"""
@schema
class ScoremapFOVProjParams(dj.Lookup):
definition = """
# FOV Score map projection parameters
proj_param_id : char(1) # Parameter ID
---
proj_statistic : varchar(100) # Score map projection statistic (as in BinningsStats())
proj_sigma : float # Smoothing sigma
"""
@schema
class FilteredSessionsParams(dj.Lookup):
definition = """
# Restrictions saved as restriction_dict_session and uniquely identified by parameter_hash_session
param_hash_session : char(16) # 16 character hash of used restrictions
---
restriction_dict_session : longblob # Dictionary of filters used in query
"""
@schema
class PairwDistParams(dj.Lookup):
definition = """
# Parameters for pairwise distance analysis
pairwise_dist_param : char(1) # Param ID
---
score : varchar(50) # Score (column) name
score_cutoff : varchar(50) # Score cutoff value (defines starter cell population)
scoretables : varchar(1000) # Datajoint tables that 'score' can be found in (e.g. 'GridScore')
"""
@schema
class AnatomicalMaskParams(dj.Lookup):
definition = """
# LUT for anatomical masks drawn in Napari to identify subregions in FOV
timestamp_mask_lookup=CURRENT_TIMESTAMP : timestamp # Auto created timestamp
---
mec_label : tinyint # Medial entorhinal cortex (MEC) label name (integer)
pas_label=null : tinyint # Parasubiculum label name (integer)
rsa_label=null : tinyint # Retrosplenial agranular cortex (integer)
prh_label=null : tinyint # Perirhinal cortex
"""
@schema
class AlignmentFOV(dj.Computed):
definition = """
# Aligned FOV data
-> AlignmentPoints
-> FOVAlignmentParams
-> FOVProjParam
---
padded_ref : longblob # Padded / Shifted reference FOVs over planes (dictionary over planes)
padded_align : longblob # Padded / Shifted aligned FOV (warped) (dictionary over planes)
padded_align_raw : longblob # Padded / Shifted but non-warped FOV (dictionary over planes)
ssim_original=null : double # SSIM: Structural Similarity Index - original (average over planes)
ssim_warped=null : double # SSIM: Structural Similarity Index - after warping / alignment (average over planes)
mse_original=null : double # MSE: Mean squared error - original (average over planes)
mse_warped=null : double # MSE: Mean squared error - after warping / alignment (average over planes)
transform : longblob # Skimage transform object (same for all planes)
"""
@schema
class ScoremapFOV(dj.Computed):
definition = """
# Aligned score maps
-> vmod0.MetaSession.proj(metasession_ref="metasession_name")
-> ScoremapFOVParams
-> FilteredCellsParams
-> FilteredSessionsParams
---
bins_x=null : longblob # Bins in x
bins_y=null : longblob # Bins in y
aligned_metas : longblob # Dictionary containing all aligned sessions
aligned_metas_shuff : longblob # Dictionary containing shuffled + aligned sessions
no_sessions : int # Number of aligned sessions
no_shuffles : longblob # Number of shuffles
min_no_shuffles : int # Minimum number of shuffles across sessions
"""
@schema
class ScoremapFOVMoran(dj.Computed):
definition = """
# Calculate Moran's I (spat. autocorr measure) for FOV score maps
-> ScoremapFOV
-> ScoremapFOVProjParams
---
moran_i=null : double # Moran's I of current FOV score map
moran_i_shuffles=null : longblob # Moran's I shuffling distribution
moran_i_95=null : double # 95th percentile of Moran's I shuffling distribution
moran_i_99=null : double # 99th percentile of Moran's I shuffling distribution
moran_i_5=null : double # 5th percentile of Moran's I shuffling distribution
moran_i_1=null : double # 1st percentile of Moran's I shuffling distribution
"""
@schema
class ScoremapCorr(dj.Computed):
definition = """
# Correlation of FOV score maps
-> ScoremapFOV.proj(binning_param_A="binning_param_short")
-> ScoremapFOV.proj(binning_param_B="binning_param_short")
-> ScoremapFOVProjParams
---
xcorr=null : double # Cross correlation
xcorr_shuffles=null : longblob # Cross correlation shuffling distribution
xcorr_95=null : double # 95th percentile shuffling distribution xcorr
xcorr_99=null : double # 99th percentile shuffling distribution xcorr
xcorr_5=null : double # 5th percentile shuffling distribution xcorr
xcorr_1=null : double # 1st percentile shuffling distribution xcorr
"""
@schema
class FilteredSessions(dj.Manual):
definition = """
# Filtered sessions (manually selected by user)
-> vmod0.Tracking.proj(tracking_dataset="dataset_name")
-> FilteredSessionsParams
---
entry_time_filt_sessions=CURRENT_TIMESTAMP : timestamp
"""
@schema
class FilteredCells(dj.Manual):
definition = """
# Filtered cells (manually selected by user)
-> FilteredSessions
-> vmod0.Cell
-> FilteredCellsParams
---
entry_time_filt_cells=CURRENT_TIMESTAMP : timestamp
"""
@schema
class PairwDist(dj.Computed):
definition = """
# Pairwise distance statistic
-> FilteredSessions
-> FilteredCellsParams
-> PairwDistParams
-> vmod0.ProjectionCorr
"""
class Cells(dj.Part):
definition = """
# Cell numbers
-> PairwDist
region : char(3) # Brain region (3 letter abbreviation)
---
n_all : smallint # How many cells in total were considered?
n_startr : smallint # How many starter cells was the average calculated over (population A)?
"""
class NN(dj.Part):
definition = """
# Nearest neighbour (NN) distance results per region
-> PairwDist
region : char(3) # Brain region (3 letter abbreviation)
---
mean_nn : longblob # Mean NN over 1-10 neighbours
mean_nn_shuff_all : longblob # Shuffled mean NN over 1-10 neighbours taking all cells as start population
mean_nn_shuff_ref=null : longblob # Shuffled mean NN over 1-10 neighbours taking only reference cells as start population
mean_nn_csr : longblob # Shuffled mean NN over 1-10 neighbours taking CSR as start population
"""
class PairwD(dj.Part):
definition = """
# Pairw. distance results per region
-> PairwDist
region : char(3) # Brain region (3 letter abbreviation)
---
med_pairw_dist : double # Median pairwise distance
mean_pairw_dist=null : double # Mean pairwise distance
med_pairw_dist_shuffall : double # Shuffled median pairwise distance taking all cells as start population
mean_pairw_dist_shuffall : double # Shuffled mean pairwise distance taking all cells as start population
med_pairw_dist_shuffref=null : double # Shuffled median pairwise distance taking only reference cells as start population
mean_pairw_dist_shuffref=null : double # Shuffled mean pairwise distance taking only reference cells as start population
med_pairw_dist_csr : double # Shuffled median pairwise distance taking CSR as start population
mean_pairw_dist_csr : double # Shuffled mean pairwise distance taking CSR as start population
"""
@schema
class AnatomicalMask(dj.Manual):
definition = """
# Anatomical mask identifying anatomical regions in FOV
-> vmod0.ProjectionCorr
---
-> AnatomicalMaskParams
anat_mask : longblob # Anatomical mask for regions in FOV
"""
@schema
class RoisCorrBrainLoc(dj.Computed):
definition = """
# Cell IDs and anatomical location
-> AnatomicalMask
-> vmod0.RoisCorr
"""
class MEC(dj.Part):
definition = """
# Cells in MEC
-> RoisCorrBrainLoc
"""
class PAS(dj.Part):
definition = """
# Cells in Parasubiculum
-> RoisCorrBrainLoc
"""
class PRH(dj.Part):
definition = """
# Cells in perirhinal cortex
-> RoisCorrBrainLoc
"""
class RSA(dj.Part):
definition = """
# Cells in Retrosplenial / Agranular cortex
-> RoisCorrBrainLoc
"""
class Undefined(dj.Part):
definition = """
# Cells elsewhere (not defined by any anatomical label)
-> RoisCorrBrainLoc
"""
@schema
class NumberCellTypes(dj.Computed):
definition = """
# Number of (pure) cell types per session and brain region (chose MEC and PAS)
-> FilteredSessions
-> FilteredCellsParams
---
n_cells : int # Total number of (filtered) cells in this session
"""
class MEC(dj.Part):
definition = """
# Cell numbers MEC
-> NumberCellTypes
---
mec_n_cells : int # MEC Total number of (filtered) cells
mec_ovc=null : int # MEC Number of OV cells
mec_n_grid_95 : int # MEC Number of grid cells > 95th
mec_n_info_95 : int # MEC Number of cells w info content > 95th
mec_n_hd_95 : int # MEC Number of HD cells > 95th
mec_n_border_95 : int # MEC Number of Border cells > 95th
mec_n_bv_95 : int # MEC Number of BV cells > 95th
mec_n_grid_99 : int # MEC Number of grid cells > 99th
mec_n_info_99 : int # MEC Number of cells w info content > 99th
mec_n_hd_99 : int # MEC Number of HD cells > 99th
mec_n_border_99 : int # MEC Number of Border cells > 99th
mec_n_bv_99 : int # MEC Number of BV cells > 99th
"""
class PAS(dj.Part):
definition = """
# Cell numbers PAS
-> NumberCellTypes
---
pas_n_cells : int # PAS Total number of (filtered) cells
pas_ovc=null : int # PAS Number of OV cells
pas_n_grid_95 : int # PAS Number of grid cells > 95th
pas_n_info_95 : int # PAS Number of cells w info content > 95th
pas_n_hd_95 : int # PAS Number of HD cells > 95th
pas_n_border_95 : int # PAS Number of Border cells > 95th
pas_n_bv_95 : int # PAS Number of BV cells > 95th
pas_n_grid_99 : int # PAS Number of grid cells > 99th
pas_n_info_99 : int # PAS Number of cells w info content > 99th
pas_n_hd_99 : int # PAS Number of HD cells > 99th
pas_n_border_99 : int # PAS Number of Border cells > 99th
pas_n_bv_99 : int # PAS Number of BV cells > 99th
"""
@schema
class OVParams(dj.Lookup):
definition = """
# Object centered map parameters
ov_params_id : char(1) # Parameter set ID, starting with A
---
bin_size_dist_ov : float # Bin size for distance binning in mm
bins_angular_ov : int # Number of bins in 360 degrees
sigma_time_ov : float # 2D gaussian smoothing of occupancy
sigma_signal_ov : float # 2D guassian smoothing of binned signal
"""
@schema
class OVOccupancy(dj.Computed):
definition = """
# Object centered occupancy
-> vmod0.Tracking.OpenField
-> vmod0.SignalTrackingParams
-> OVParams
-> vmod0.ArenaObjectPos
---
occupancy_ov : longblob # Smoothed 2D occupancy map [seconds], x: angles, y: distance
mask_occ_ov : longblob # Mask (where time = 0), x: angles, y: distance
occupancy_raw_ov : longblob # Raw, non-smoothed 2D occupancy map, x: angles, y: distance
explor_ratio_ov : double # Exploration ratio (visited bins over all bins)
explor_std_ov : double # Exploration standard deviation (of visited bins)
radial_edges_ov : longblob # Histogram edges in y (distance)
angular_edges_ov : longblob # Histogram edges in x (angles)
occupancy_time_ov : double # Time in seconds in occupancy
fraction_occupancy_ov : double # Fraction of time in occupancy map
"""
@schema
class OVMap(dj.Computed):
definition = """
# Object centered ratemap (vector map)
-> vmod0.SignalTracking
-> OVOccupancy.proj(tracking_dataset="dataset_name")
---
ovmap : longblob # Object centered ratemap ("vector map")
ovmap_raw : longblob # Unsmoothed (raw) 2D ratemap
mask_ovmap : longblob # Mask (where time = 0)
binned_raw_ov : longblob # Raw, binned signal
bin_max_ov : longblob # Bin with maximum signal (ovmap(bin_max) = max(ovmap))
max_ov : double # Maximum
"""
@schema
class OVCFields(dj.Computed):
definition = """
# Object vector cell (OVC) field based calculations
-> vmod0.Ratemap.proj(base_session="session_name")
-> vmod0.ShuffleParams
---
object1_session : varchar(16) # Object session 1
object2_session : varchar(16) # Object session 2
"""
class Fields(dj.Part):
definition = """
# OVC calculated field statistics (all fields)
-> OVCFields
object1_field_id : int # Field ID of field in object session 1
object2_field_id : int # Field ID of closest field to object1_field_id in object session 2
---
dist_fields : double # Euclidian distance between fields with object1_field_id and object2_field_id - object centered
dist_to_object=null : double # Distance of field from object [average of field in object session 1 and 2]
angle_to_object=null : double # Angle of field to object [average of field in object session 1 and 2]
object1_field_base=null : double # (Object session 1 field mean rate in object session 1) / (Field mean rate in base session)
object2_field_base=null : double # (Object session 2 field mean rate in object session 2) / (Field mean rate in base session)
"""
@schema
class OVCScores(dj.Computed):
definition = """
# Object vector cell (OVC) vector map score based calculations
-> vmod0.SignalTracking.proj(base_session="session_name")
-> OVParams
-> vmod0.ShuffleParams
---
object1_session : varchar(16) # Object session 1
object2_session : varchar(16) # Object session 2
ovscore : double # Object vector score (2D correlation between OV maps)
"""
class ShuffledOVScore(dj.Part):
definition = """
# Shuffled Object vector (OV) score and shuffling
-> OVCScores
---
shuffled_ovscores_95perc : double # Object vector score shuffling for cell: 95th percentile
shuffled_ovscores_99perc : double # Object vector score shuffling for cell: 99th percentile
shuffled_ovscores : longblob # Object vector score shuffling for cell
"""
@schema
class OVCutoffs(dj.Lookup):
definition = """
# Object vector cell cutoffs
ov_cutoff_id : char(1) # Parameter set ID, starting with A
---
info_content_cutoff : varchar(100) # Information content cutoff (>)
ovscore_cutoff : varchar(100) # Object vector score cutoff (>)
dist_fields_cutoff : float # Distance [mm] (object centered field distances) (<)
dist_to_object_cutoff : float # Distance [mm] to object (>)
object1_field_base_cutoff : float # Relative rate of field in object session 1 compared to base session (>)
object2_field_base_cutoff : float # Relative rate of field in object session 1 compared to base session (>)
"""
@schema
class OVC(dj.Computed):
definition = """
# Object vector cell (OVC) summary table
-> OVCScores
-> OVCutoffs
-> OVCFields
---
object1_session : varchar(16) # Object session 1
object2_session : varchar(16) # Object session 2
ovscore : double # Object vector score (2D correlation between OV maps)
is_ovc : tinyint # 0 - not an OVC according to cutoffs, 1 - putative OVC
no_fields : int # Number of filtered fields (matching cutoff criteria)
mean_dist_to_object=null : double # Average distance of (filtered) fields to object [mm]
mean_dist_fields=null : double # Average distance between fields [mm]
mean_angle_to_object=null : double # Circular mean of field angles to object [0, 2*pi]
std_angle_to_object=null : double # Circular standard deviation for field angles to object [radians]
field_ids=null : longblob # Field IDs list of dictionaries ('object1_field_id', 'object2_field_id')
angles_to_object=null : longblob # Field angles [0, 2*pi]
dists_to_object=null : longblob # Distances of (filtered) fields to object [mm]
dists_fields=null : longblob # Distances of (filtered) fields to object [mm]
"""
@schema
class CutoffsInfoContent(dj.Computed):
definition = """
# Session level info content cutoffs
-> FilteredSessions
-> FilteredCellsParams
---
n_shuffles : int # Number of shuffles (total)
info_content_90=null : double # Spatial information content 90th cutoff
info_content_95=null : double # Spatial information content 95th cutoff
info_content_99=null : double # Spatial information content 99th cutoff
"""
@schema
class CutoffsGridscore(dj.Computed):
definition = """
# Session level gridscore cutoffs
-> FilteredSessions
-> FilteredCellsParams
---
n_shuffles : int # Number of shuffles (total)
gridscore_90=null : double # GridScore 90th cutoff
gridscore_95=null : double # GridScore 95th cutoff
gridscore_99=null : double # GridScore 99th cutoff
"""
@schema
class CutoffsOVScore(dj.Computed):
definition = """
# Session level OV score cutoffs
-> FilteredSessions.proj(base_session="session_name")
-> FilteredCellsParams
---
n_shuffles : int # Number of shuffles (total)
ovscore_90=null : double # OV Score 90th cutoff
ovscore_95=null : double # OV Score 95th cutoff
ovscore_99=null : double # OV Score 99th cutoff
"""
@schema
class CutoffsMVL(dj.Computed):
definition = """
# Session level MVL (head direction tuning) cutoffs
-> FilteredSessions
-> FilteredCellsParams
---
n_shuffles : int # Number of shuffles (total)
mvl_90=null : double # MVL 90th cutoff
mvl_95=null : double # MVL 95th cutoff
mvl_99=null : double # MVL 99th cutoff
"""
@schema
class CutoffsBorderScore(dj.Computed):
definition = """
# Session level Borderscore cutoffs
-> FilteredSessions
-> FilteredCellsParams
---
n_shuffles : int # Number of shuffles (total)
borderscore_90=null : double # Borderscore 90th cutoff
borderscore_95=null : double # Borderscore 95th cutoff
borderscore_99=null : double # Borderscore 99th cutoff
"""
@schema
class CutoffsBVScore(dj.Computed):
definition = """
# Session level boundary vector score cutoffs
-> FilteredSessions
-> FilteredCellsParams
---
n_shuffles : int # Number of shuffles (total)
bvs_90=null : double # BVS 90th cutoff
bvs_95=null : double # BVS 95th cutoff
bvs_99=null : double # BVS 99th cutoff
"""
@schema
class NNeighbourInterIntra(dj.Computed):
definition = """
# NN scores inter vs. intra score
-> FilteredSessions
-> FilteredCellsParams
-> PairwDistParams.proj(pairwise_dist_param_A="pairwise_dist_param")
-> PairwDistParams.proj(pairwise_dist_param_B="pairwise_dist_param")
-> vmod0.ProjectionCorr
"""
class Cells(dj.Part):
definition = """
# Cell numbers
-> NNeighbourInterIntra
region : char(3) # Brain region (3 letter abbreviation)
---
n_all : smallint # How many cells in total were considered?
n_startr_a : smallint # How many starter cells was the average calculated over (population A)?
n_startr_b : smallint # How many starter cells was the average calculated over (population B)?
"""
class DistAll(dj.Part):
definition = """
# NN distance results per region all cells - raw results
-> NNeighbourInterIntra
region : char(3)
---
nns_ab : longblob
nns_ba : longblob
nns_ab_shuffab : longblob
nns_ba_shuffab : longblob
nns_ab_shuffall : longblob
nns_ba_shuffall : longblob
nns_ab_csr : longblob
nns_ba_csr : longblob
"""
class DistSub(dj.Part):
definition = """
# NN distance results per region subsampled populations (size matched) - raw results
-> NNeighbourInterIntra
region : char(3)
---
nns_ab : longblob
nns_ba : longblob
nns_ab_shuffab : longblob
nns_ba_shuffab : longblob
nns_ab_shuffall : longblob
nns_ba_shuffall : longblob
nns_ab_csr : longblob
nns_ba_csr : longblob
"""
class NNAll(dj.Part):
definition = """
# Mean NN distance results per region all cells
-> NNeighbourInterIntra
region : char(3)
---
nns_ab : double
nns_ba : double
nns_ab_shuffab : double
nns_ba_shuffab : double
nns_ab_shuffall : double
nns_ba_shuffall : double
nns_ab_csr : double
nns_ba_csr : double
"""
class NNSub(dj.Part):
definition = """
# Mean NN distance results per region subsampled populations (size matched)
-> NNeighbourInterIntra
region : char(3)
---
nns_ab : double
nns_ba : double
nns_ab_shuffab : double
nns_ba_shuffab : double
nns_ab_shuffall : double
nns_ba_shuffall : double
nns_ab_csr : double
nns_ba_csr : double
"""
class RatioAll(dj.Part):
definition = """
# Inter to intra NN distances per region all cells
-> NNeighbourInterIntra
region : char(3)
---
ratio_ab : double
ratio_ba : double
ratio_ab_shuffab : double
ratio_ba_shuffab : double
ratio_ab_shuffall : double
ratio_ba_shuffall : double
ratio_ab_csr : double
ratio_ba_csr : double
"""
class RatioSub(dj.Part):
definition = """
# Inter to intra NN distances per region subsampled populations (size matched)
-> NNeighbourInterIntra
region : char(3)
---
ratio_ab : double
ratio_ba : double
ratio_ab_shuffab : double
ratio_ba_shuffab : double
ratio_ab_shuffall : double
ratio_ba_shuffall : double
ratio_ab_csr : double
ratio_ba_csr : double
"""
class Unprocessed(dj.Part):
definition = """
# Inter to intra NN distances per region (and cell numbers)
-> NNeighbourInterIntra
"""
|
from mitmproxy.web import master
__all__ = ["master"]
|
#!/usr/bin/env python
"""
Unittests for the kuralib middleware
"""
import unittest, sys, string, codecs, time
import kuraapp
class KuraAppTestCase(unittest.TestCase):
def testCreateRepository(self):
kuraapp.initApp("boud", "andal", "", "localhost")
app = kuraapp.app
assert len(app.tables) > 0, "Tables not filled."
assert len(app.relations) > 0, "Relations not filled."
assert len(app.objects) > 0, "Objects not defined."
assert app.sql
def suite():
s1 = unittest.makeSuite(KuraAppTestCase, "test")
testSuite=unittest.TestSuite((s1,))
return testSuite
def main():
runner = unittest.TextTestRunner(sys.stderr, 1, 2)
runner.run(suite())
if __name__=="__main__":
main()
__copyright__="""
/***************************************************************************
copyright : (C) 2002 by Boudewijn Rempt
see copyright notice for license
email : boud@valdyas.org
Revision : $Revision: 1.6 $
Last edited : $Date: 2002/11/16 13:43:59 $
***************************************************************************/
"""
|
import unittest
from buzzer import QuestionDataset, RNNBuzzer, create_feature_vecs_and_labels
import numpy as np
import torch
import torch.nn as nn
torch.set_printoptions(precision=10)
ex1 = {'feature_vec':torch.FloatTensor([[[0.1334, 0.1011, 0.0932], [0.1501, 0.1001, 0.0856], [0.1647, 0.0987, 0.0654]]]).view(1, 3, 3), 'len': torch.FloatTensor([3])}
ex2 = {'feature_vec':torch.FloatTensor([[[0.1234, 0.1111, 0.0934], [0.1301, 0.1041, 0.0898], [0.1447, 0.0981, 0.0723], [0.1596, 0.0901, 0.0657]],
[[0.1034, 0.0983, 0.0679], [0.1555, 0.1144, 0.0882], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.1132, 0.0932, 0.0813], [0.1404, 0.1001, 0.0831], [0.1696, 0.0777, 0.0593], [0.0, 0.0, 0.0]]]),
'len': torch.FloatTensor([4, 2, 3])}
class TestSeq2Seq(unittest.TestCase):
def setUp(self):
self.toy_rnn_model = RNNBuzzer(n_input=3, n_hidden=2)
self.toy_rnn_model.eval()
lstm_weight_input_hidden = torch.tensor([[0.4, -0.2, 0.1],
[-0.4, 0.5, 0.2],
[0.3, 0.2, 0.1],
[0.4, 0.9, -0.1],
[0.8, -0.7, -0.5],
[0.7, 0.1, -0.1],
[0.0, 0.1, 0.0],
[-0.9, -0.8, -0.7]])
lstm_weight_hidden_hidden = torch.tensor([[0.5, -0.1],
[-0.4, 0.3],
[0.3, 0.6],
[0.4, -0.2],
[0.8, -0.9],
[-0.7, 0.0],
[0.5, 0.2],
[0.0, -0.5]])
self.toy_rnn_model.lstm.weight_ih_l0.data.copy_(lstm_weight_input_hidden)
self.toy_rnn_model.lstm.weight_hh_l0.data.copy_(lstm_weight_hidden_hidden)
self.toy_rnn_model.lstm.bias_ih_l0.data.fill_(1.0)
self.toy_rnn_model.lstm.bias_hh_l0.data.fill_(1.0)
hidden_linear_layer_weight = torch.tensor([[0.4, -0.2], [-0.9, 0.8]])
self.toy_rnn_model.hidden_to_label.weight.data.copy_(hidden_linear_layer_weight)
nn.init.ones_(self.toy_rnn_model.hidden_to_label.bias.data)
def test_forward(self):
logits = self.toy_rnn_model(ex1['feature_vec'], ex1['len'])
self.assertAlmostEqual(logits[0][0].item(), 1.126254796981)
self.assertAlmostEqual(logits[0][1].item(), 0.922435641288757)
self.assertAlmostEqual(logits[1][0].item(), 1.193930149078369)
self.assertAlmostEqual(logits[1][1].item(), 0.8235720992088)
self.assertAlmostEqual(logits[2][0].item(), 1.2111276388168)
self.assertAlmostEqual(logits[2][1].item(), 0.796994566917)
def test_minibatch(self):
logits = self.toy_rnn_model(ex2['feature_vec'], ex2['len'])
self.assertAlmostEqual(logits[0][0].item(), 1.1259287596)
self.assertAlmostEqual(logits[0][1].item(), 0.9232868552)
self.assertAlmostEqual(logits[1][0].item(), 1.1934133768)
self.assertAlmostEqual(logits[1][1].item(), 0.8253083229)
self.assertAlmostEqual(logits[2][0].item(), 1.2106758356)
self.assertAlmostEqual(logits[2][1].item(), 0.7986904979)
self.assertAlmostEqual(logits[3][0].item(), 1.214038729)
self.assertAlmostEqual(logits[3][1].item(), 0.7943208218)
self.assertAlmostEqual(logits[4][0].item(), 1.1251035929)
self.assertAlmostEqual(logits[4][1].item(), 0.9263896942)
self.assertAlmostEqual(logits[5][0].item(), 1.1943942308)
self.assertAlmostEqual(logits[5][1].item(), 0.8215977550)
self.assertAlmostEqual(logits[6][0].item(), 1.2029464245)
self.assertAlmostEqual(logits[6][1].item(), 0.8289564848)
self.assertAlmostEqual(logits[7][0].item(), 1.2067799568)
self.assertAlmostEqual(logits[7][1].item(), 0.8231585622)
self.assertAlmostEqual(logits[8][0].item(), 1.1255118847)
self.assertAlmostEqual(logits[8][1].item(), 0.9250283241)
self.assertAlmostEqual(logits[9][0].item(), 1.1935989857)
self.assertAlmostEqual(logits[9][1].item(), 0.8247293830)
self.assertAlmostEqual(logits[10][0].item(), 1.2105900049)
self.assertAlmostEqual(logits[10][1].item(), 0.7990440130)
self.assertAlmostEqual(logits[11][0].item(), 1.2060568333)
self.assertAlmostEqual(logits[11][1].item(), 0.8258087635)
def test_feature_and_label_vectorizer(self):
guesses_and_scores1 = [[[('Little_Brown_Foxes', 0.1435), ('Jerry_Seinfeld', 0.1332), ('India', 0.1198)],
[('United_States', 0.1335), ('England', 0.1212), ('Canada', 0.1011)],
[('England', 0.1634), ('United_States', 0.1031), ('France', 0.0821)]]]
ans1 = [['England', 'England', 'England']]
exs = create_feature_vecs_and_labels(guesses_and_scores1, ans1, 3)
self.assertEqual(exs[0][0][0][0], 0.1435)
self.assertEqual(exs[0][0][0][1], 0.1332)
self.assertEqual(exs[0][0][1][1], 0.1212)
self.assertEqual(exs[0][0][1][2], 0.1011)
self.assertEqual(exs[0][0][2][0], 0.1634)
self.assertEqual(exs[0][0][2][2], 0.0821)
self.assertEqual(exs[0][1][0], 0)
self.assertEqual(exs[0][1][1], 0)
self.assertEqual(exs[0][1][2], 1)
if __name__ == '__main__':
unittest.main()
|
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as log
import os.path
import time
import mender.bootstrap.bootstrap as bootstrap
from mender.client import HTTPUnathorized
import mender.client.authorize as authorize
import mender.client.deployments as deployments
import mender.client.inventory as client_inventory
import mender.config.config as config
import mender.scripts.aggregator.identity as identity
import mender.scripts.aggregator.inventory as inventory
import mender.scripts.artifactinfo as artifactinfo
import mender.scripts.devicetype as devicetype
import mender.scripts.runner as installscriptrunner
import mender.settings.settings as settings
from mender.log.log import DeploymentLogHandler
class Context:
"""Class for storing the state-machine context"""
def __init__(self):
self.private_key = None
class State:
def __init__(self):
pass
def run(self, context):
pass
class Init(State):
def run(self, context, force_bootstrap=False):
log.debug("InitState: run()")
context.config = config.Config({}, {})
try:
context.config = config.load(
local_path=settings.PATHS.local_conf,
global_path=settings.PATHS.global_conf,
)
log.info(f"Loaded configuration: {context.config}")
except config.NoConfigurationFileError:
log.error(
"No configuration files found for the device."
"Most likely, the device will not be functional."
)
identity_data = identity.aggregate(path=settings.PATHS.identity_scripts)
context.identity_data = identity_data
private_key = bootstrap.now(
force_bootstrap=force_bootstrap, private_key_path=settings.PATHS.key
)
context.private_key = private_key
log.debug(f"Init set context to: {context}")
return context
##########################################
def run():
while os.path.exists(settings.PATHS.lockfile_path):
log.info(
"A deployment is currently in progress, the client will go to sleep for 60 seconds"
)
time.sleep(settings.SLEEP_INTERVAL)
StateMachine().run()
class StateMachine:
def __init__(self):
log.info("Initializing the state-machine")
self.context = Context()
self.context.authorized = False
log.info(f"ctx: {self.context}")
self.unauthorized_machine = UnauthorizedStateMachine()
self.authorized_machine = AuthorizedStateMachine()
log.info("Finished setting up the state-machine")
def run(self, force_bootstrap=False):
self.context = Init().run(self.context, force_bootstrap)
log.debug(f"Initialized context: {self.context}")
deployment_log_handler = DeploymentLogHandler()
logger = log.getLogger("")
logger.addHandler(deployment_log_handler)
self.context.deployment_log_handler = deployment_log_handler
self.context.deployment_log_handler.disable()
while True:
self.unauthorized_machine.run(self.context)
self.authorized_machine.run(self.context)
#
# Hierarchical - Yes!
#
# i.e., Authorized, and Unauthorized state-machine
#
class Authorize(State):
def run(self, context):
log.info("Authorizing...")
log.debug(f"Current context: {context}")
time.sleep(3)
return authorize.request(
context.config.ServerURL,
context.config.TenantToken,
context.identity_data,
context.private_key,
context.config.ServerCertificate,
)
class Idle(State):
def run(self, context):
log.info("Idling...")
time.sleep(10)
return True
class UnauthorizedStateMachine(StateMachine):
"""Handle Wait, and Authorize attempts"""
def __init__(self):
pass
def run(self, context):
while True:
JWT = Authorize().run(context)
if JWT:
context.JWT = JWT
context.authorized = True
return
Idle().run(context)
class AuthorizedStateMachine(StateMachine):
"""Handle Inventory update, and update check"""
def __init__(self):
self.idle_machine = IdleStateMachine()
self.update_machine = UpdateStateMachine()
def run(self, context):
while context.authorized:
try:
self.idle_machine.run(context) # Idle returns when an update is ready
UpdateStateMachine().run(
context
) # Update machine runs when idle detects an update
except HTTPUnathorized:
context.authorized = False
return
# Should transitions always go through the external state-machine, to verify and
# catch de-authorizations (?)
#
# Second layered machine (below Authorized)
#
# Idling - Or, just pushing inventory and identity data and looking for updates
class SyncInventory(State):
def run(self, context):
log.info("Syncing the inventory...")
inventory_data = inventory.aggregate(
settings.PATHS.inventory_scripts,
settings.PATHS.device_type,
settings.PATHS.artifact_info,
)
if inventory_data:
log.debug(f"aggreated inventory data: {inventory_data}")
client_inventory.request(
context.config.ServerURL,
context.JWT,
inventory_data,
context.config.ServerCertificate,
)
else:
log.info("No inventory data found")
time.sleep(1)
class SyncUpdate(State):
def run(self, context):
log.info("Checking for updates...")
device_type = devicetype.get(settings.PATHS.device_type)
artifact_name = artifactinfo.get(settings.PATHS.artifact_info)
deployment = deployments.request(
context.config.ServerURL,
context.JWT,
device_type=device_type,
artifact_name=artifact_name,
server_certificate=context.config.ServerCertificate,
)
if deployment:
context.deployment = deployment
context.deployment_log_handler.enable()
return True
time.sleep(2)
return False
class IdleStateMachine(AuthorizedStateMachine):
def __init__(self):
pass
def run(self, context):
while context.authorized:
SyncInventory().run(context)
if SyncUpdate().run(context):
# Update available
return
#
# Updating - Run the update state-machine
#
class Download(State):
def run(self, context):
log.info("Running the Download state...")
if deployments.download(
context.deployment,
artifact_path=os.path.join(
settings.PATHS.artifact_download, "artifact.mender"
),
server_certificate=context.config.ServerCertificate,
):
if not deployments.report(
context.config.ServerURL,
deployments.STATUS_DOWNLOADING,
context.deployment.ID,
context.config.ServerCertificate,
context.JWT,
):
log.error(
"Failed to report the deployment status 'downloading' to the Mender server"
)
return ArtifactInstall()
return ArtifactFailure()
class ArtifactInstall(State):
def run(self, context):
log.info("Running the ArtifactInstall state...")
if installscriptrunner.run_sub_updater(context.deployment.ID):
return ArtifactReboot()
return ArtifactFailure()
class ArtifactReboot(State):
def run(self, context):
log.info("Running the ArtifactReboot state...")
return ArtifactCommit()
class ArtifactCommit(State):
def run(self, context):
log.info("Running the ArtifactCommit state...")
return ArtifactRollback()
class ArtifactRollback(State):
def run(self, context):
log.info("Running the ArtifactRollback state...")
return ArtifactRollbackReboot()
class ArtifactRollbackReboot(State):
def run(self, context):
log.info("Running the ArtifactRollbackReboot state...")
return ArtifactFailure()
class ArtifactFailure(State):
def run(self, context):
log.info("Running the ArtifactFailure state...")
return _UpdateDone()
class _UpdateDone(State):
def __str__(self):
return "done"
def __eq__(self, other):
return isinstance(other, _UpdateDone)
def run(self, context):
assert False
# The update state-machine is the most advanced machine we need
class UpdateStateMachine(AuthorizedStateMachine):
def __init__(self):
self.current_state = Download()
def run(self, context):
while self.current_state != _UpdateDone():
self.current_state = self.current_state.run(context)
time.sleep(1)
|
import bisect
import functools
import itertools
import logging
import time
from uuid import UUID
from abc import (
ABCMeta,
abstractmethod
)
from typing import (
cast,
Dict,
Iterable,
List,
Set,
Tuple,
Type,
TYPE_CHECKING,
Union,
Optional,
)
from hvm.types import Timestamp
import rlp_cython as rlp
from trie import (
HexaryTrie,
)
from eth_typing import (
BlockNumber,
Hash32,
Address
)
from eth_utils import (
to_list,
to_tuple,
)
from eth_hash.auto import keccak
from hvm.constants import (
GENESIS_PARENT_HASH,
MIN_GAS_PRICE_CALCULATION_AVERAGE_DELAY,
MIN_GAS_PRICE_CALCULATION_AVERAGE_WINDOW_LENGTH,
MIN_GAS_PRICE_CALCULATION_MIN_TIME_BETWEEN_CHANGE_IN_MIN_GAS_PRICE,
MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP,
ZERO_HASH32,
BLANK_REWARD_HASH, MIN_GAS_PRICE_CALCULATION_GOAL_TX_PER_CENTISECOND_MUTIPLIER)
from hvm.exceptions import (
CanonicalHeadNotFound,
HeaderNotFound,
ParentNotFound,
TransactionNotFound,
JournalDbNotActivated,
HistoricalNetworkTPCMissing,
HistoricalMinGasPriceError,
NotEnoughDataForHistoricalMinGasPriceCalculation,
)
from hvm.db.backends.base import (
BaseDB
)
from hvm.db.schema import SchemaV1
from hvm.rlp.headers import (
BlockHeader,
)
from hvm.rlp.receipts import (
Receipt
)
from hvm.utils.hexadecimal import (
encode_hex,
)
from hvm.validation import (
validate_uint256,
validate_is_integer,
validate_word,
validate_canonical_address,
validate_centisecond_timestamp,
validate_is_bytes,
)
from hvm.rlp.consensus import StakeRewardBundle, BaseRewardBundle, NodeStakingScore
from hvm.rlp import sedes as evm_rlp_sedes
from hvm.rlp.sedes import(
trie_root,
address,
hash32,
)
from rlp_cython.sedes import(
big_endian_int,
CountableList,
binary,
)
from hvm.db.journal import (
JournalDB,
)
from sortedcontainers import (
SortedList,
SortedDict,
)
from hvm.utils.numeric import (
are_items_in_list_equal,
)
from hvm.utils.padding import de_sparse_timestamp_item_list, propogate_timestamp_item_list_to_present
if TYPE_CHECKING:
from hvm.rlp.blocks import ( # noqa: F401
BaseBlock
)
from hvm.rlp.transactions import ( # noqa: F401
BaseTransaction,
BaseReceiveTransaction
)
class TransactionKey(rlp.Serializable):
fields = [
('block_hash', hash32),
('index', rlp.sedes.big_endian_int),
('is_receive', rlp.sedes.boolean),
]
class BaseChainDB(metaclass=ABCMeta):
db = None # type: BaseDB
@abstractmethod
def __init__(self, db: BaseDB) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Canonical Chain API
#
@abstractmethod
def remove_block_from_canonical_block_hash_lookup(self, block_number: BlockNumber, chain_address: Address) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_canonical_block_header_by_number(self, block_number: BlockNumber, wallet_address: Address) -> BlockHeader:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_canonical_block_hash(self, block_number: BlockNumber, chain_address: Address) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_canonical_block_number_before_timestamp(self, before_timestamp: Timestamp, chain_address: Address) -> BlockNumber:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_canonical_head(self, wallet_address: Address) -> BlockHeader:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_canonical_head_hash(self, wallet_address: Address) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_all_block_hashes_on_chain(self, chain_address: Address) -> List[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_all_block_hashes_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_canonical_chain(self, chain_address: Address) -> List[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def is_in_canonical_chain(self, block_hash: Hash32) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_block_from_canonical_chain(self, block_hash: Hash32) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Header API
#
@abstractmethod
def header_exists(self, block_hash: Hash32) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Block API
#
@abstractmethod
def persist_block(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_non_canonical_block(self, block: 'BaseBlock'):
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_block_as_unprocessed(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def remove_block_from_unprocessed(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_unprocessed_block_lookup(self, block_hash: Hash32, block_number: BlockNumber, chain_address: Address) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_unprocessed_children_block_lookup(self, block_hash: Hash32) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_unprocessed_children_block_lookup_to_transaction_parents(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_unprocessed_children_block_lookup_to_reward_proof_parents(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_unprocessed_children_block_lookup_to_transaction_parents_if_nessissary(self, block: 'BaseBlock') -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def has_unprocessed_children(self, block_hash: Hash32) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_chain_wallet_address_for_block_hash(self, block_hash: Hash32) -> Address:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_number_of_send_tx_in_block(self, block_hash):
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_number_of_total_tx_in_block(self, block_hash: Hash32) -> int:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Chronologically consistent blockchain db API
#
@abstractmethod
def get_block_chronological_consistency_keys(self, chain_address: Address, block_number: BlockNumber) -> List[Tuple[Timestamp, Hash32]]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_block_chronological_consistency_keys(self, chain_address: Address, block_number: BlockNumber, keys: List[Tuple[Timestamp, Hash32]]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_block_consistency_key(self, chain_address: Address, block_number: BlockNumber, key: Tuple[Timestamp, Hash32]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_block_consistency_key(self, chain_address: Address, block_number: BlockNumber, key: Tuple[Timestamp, Hash32]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Transaction API
#
@abstractmethod
def add_receipt(self, block_header: BlockHeader, index_key: int, receipt: Receipt, send_or_receive) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_transaction(self,
block_header: BlockHeader,
index_key: int, transaction: 'BaseTransaction') -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_receive_transaction(self,
block_header: BlockHeader,
index_key: int,
transaction: 'BaseReceiveTransaction') -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_transactions(
self,
block_header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_receive_transactions(
self,
header: BlockHeader,
transaction_class: Type['BaseReceiveTransaction']) -> Iterable['BaseReceiveTransaction']:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_receive_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_receipts(self,
header: BlockHeader,
receipt_class: Type[Receipt]) -> Iterable[Receipt]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_receipts_by_block_hash(self,
hash: Hash32) -> Tuple[Receipt]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_receipt(self, tx_hash: Hash32) -> Receipt:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_cumulative_gas_used(self, tx_hash: Hash32) -> int:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_receipt_by_idx(self,
header: BlockHeader,
receipt_idx: int,
receipt_class: Type[Receipt] = Receipt) -> Optional[Receipt]:
raise NotImplementedError("ChainDB classes must implement this method")
# @abstractmethod
# def get_transaction_by_index(
# self,
# block_number: BlockNumber,
# transaction_index: int,
# transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
# raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_by_index_and_block_hash(
self,
block_hash: Hash32,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_receive_transaction_by_index_and_block_hash(
self,
block_hash: Hash32,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_by_hash(self,
tx_hash: Hash32,
send_tx_class: Type['BaseTransaction'],
receive_tx_class: Type['BaseReceiveTransaction']) -> Union['BaseTransaction', 'BaseReceiveTransaction']:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_chain_address(self, transaction_hash: Hash32) -> Address:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_block_hash(self, transaction_hash: Hash32) -> Hash32:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[BlockNumber, int, bool]:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Unprocessed block API
#
@abstractmethod
def is_block_unprocessed(self, block_hash: Hash32) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_unprocessed_block_hash_by_block_number(self, chain_address: Address, block_number: BlockNumber) -> Optional[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_unprocessed_block_header_by_block_number(self, chain_address: Address, block_number: BlockNumber) -> BlockHeader:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_unprocessed_block_lookup(self, block_hash: Hash32, block_number: BlockNumber) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_unprocessed_children_blocks_lookup(self, block_hash: Hash32) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def check_all_children_blocks_to_see_if_any_unprocessed(self, block_hash: Hash32) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Block children and Stake API
#
@abstractmethod
def add_block_receive_transactions_to_parent_child_lookup(self, block_header: 'BlockHeader',
transaction_class: Type[
'BaseReceiveTransaction']) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_block_rewards_to_parent_child_lookup(self, block_header: 'BlockHeader',
reward_bundle: BaseRewardBundle) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def remove_block_receive_transactions_to_parent_child_lookup(self, block_header: 'BlockHeader',
transaction_class: Type['BaseReceiveTransaction']) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def remove_block_child(self,
parent_block_hash: Hash32,
child_block_hash: Hash32) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def remove_block_from_all_parent_child_lookups(self, block_header: 'BlockHeader',
receive_transaction_class: Type['BaseReceiveTransaction']) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def add_block_child(self,
parent_block_hash: Hash32,
child_block_hash: Hash32) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_children(self, parent_block_hash: Hash32) -> List[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_all_descendant_block_hashes(self, block_hash: Hash32) -> List[Hash32]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_block_children(self, parent_block_hash: Hash32,
block_children: List[Hash32]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def delete_all_block_children_lookups(self, parent_block_hash: Hash32) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_children_chains(self, block_hash: Hash32, exclude_chains:Set = None) -> Set[Address]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_stake_from_children(self, block_hash: Hash32, coin_mature_time_for_staking: Timestamp, exclude_chains: Set = None) -> int:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_total_block_stake_of_block_hashes(self, block_hashes: List[Hash32], coin_mature_time_for_staking: Timestamp, timestamp_for_stake = None) -> int:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_mature_stake(self, wallet_address: Address, coin_mature_time_for_staking: Timestamp,
timestamp: Timestamp = None,
raise_canonical_head_not_found_error: bool = False) -> int:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Historical minimum allowed gas price API for throttling the network
#
@abstractmethod
def save_historical_minimum_gas_price(self,
historical_minimum_gas_price: List[List[Union[Timestamp, int]]]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def load_historical_minimum_gas_price(self, sort: bool = False) -> Optional[List[List[Union[Timestamp, int]]]]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_historical_tx_per_centisecond(self, historical_tx_per_centisecond: List[List[int]], de_sparse=True) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def load_historical_tx_per_centisecond(self, sort=False) -> Optional[List[List[int]]]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_historical_network_tpc_capability(self, historical_tpc_capability: List[List[Union[Timestamp, int]]],
de_sparse: bool = False) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def save_current_historical_network_tpc_capability(self, current_tpc_capability: int) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def load_historical_network_tpc_capability(self, sort: bool = False) -> Optional[List[List[Union[Timestamp, int]]]]:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def initialize_historical_minimum_gas_price_at_genesis(self, min_gas_price: int, net_tpc_cap: int,
tpc: int = None) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def propogate_historical_min_gas_price_parameters_to_present(self) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_required_block_min_gas_price(self, block_timestamp: Timestamp = None) -> int:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def min_gas_system_initialization_required(self) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Raw Database API
#
@abstractmethod
def exists(self, key: bytes) -> bool:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
#
# Reward bundle processing
#
@abstractmethod
def get_latest_reward_block_number(self, wallet_address: Address) -> BlockNumber:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def set_latest_reward_block_number(self, wallet_address: Address, block_number: BlockNumber) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_block_number_of_latest_reward_block(self, wallet_address: Address) -> BlockNumber:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def persist_reward_bundle(self, reward_bundle: BaseRewardBundle) -> None:
raise NotImplementedError("ChainDB classes must implement this method")
@abstractmethod
def get_reward_bundle(self, reward_bundle_hash: Hash32,
reward_bundle_class: Type[BaseRewardBundle]) -> BaseRewardBundle:
raise NotImplementedError("ChainDB classes must implement this method")
class ChainDB(BaseChainDB):
logger = logging.getLogger('hvm.db.chain_db.ChainDB')
_journaldb = None
def __init__(self, db: BaseDB) -> None:
self.db = db
#
# Canonical Chain API
#
def get_canonical_block_hash(self, block_number: BlockNumber, chain_address: Address) -> Hash32:
"""
Return the block hash for the given block number.
"""
validate_uint256(block_number, title="Block Number")
number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(chain_address, block_number)
try:
return rlp.decode(
self.db[number_to_hash_key],
sedes=rlp.sedes.binary,
)
except KeyError:
self.logger.debug
raise HeaderNotFound(
"No header found on the canonical chain {} with number {}".format(chain_address, block_number)
)
def remove_block_from_canonical_block_hash_lookup(self, block_number: BlockNumber, chain_address: Address) -> None:
'''
Deletes the block number from the get_canonical_block_hash lookup
:param block_number:
:param chain_address:
:return:
'''
validate_uint256(block_number, title="Block Number")
number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(chain_address, block_number)
try:
del(self.db[number_to_hash_key])
except KeyError:
pass
def get_canonical_block_header_by_number(self, block_number: BlockNumber, chain_address: Address) -> BlockHeader:
"""
Returns the block header with the given number in the canonical chain.
Raises HeaderNotFound if there's no block header with the given number in the
canonical chain.
"""
validate_uint256(block_number, title="Block Number")
return self.get_block_header_by_hash(self.get_canonical_block_hash(block_number, chain_address))
def get_canonical_block_number_before_timestamp(self, before_timestamp: Timestamp, chain_address: Address) -> BlockNumber:
"""
Returns the block header with the given number in the canonical chain.
Raises HeaderNotFound if there's no block header with the given number in the
canonical chain.
"""
head = self.get_canonical_head(chain_address)
if head.timestamp <= before_timestamp:
return head.block_number
else:
for i in range(head.block_number-1, -1, -1):
header = self.get_canonical_block_header_by_number(i, chain_address)
if header.timestamp <= before_timestamp:
return header.block_number
raise HeaderNotFound("No blocks before the timestamp {} were found.".format(before_timestamp))
def get_canonical_head(self, chain_address: Address) -> BlockHeader:
"""
Returns the current block header at the head of the chain.
Raises CanonicalHeadNotFound if no canonical head has been set.
"""
canonical_head_hash = self.get_canonical_head_hash(chain_address)
return self.get_block_header_by_hash(
cast(Hash32, canonical_head_hash),
)
def get_canonical_head_hash(self, chain_address: Address) -> Hash32:
try:
return self.db[SchemaV1.make_canonical_head_hash_lookup_key(chain_address)]
except KeyError:
raise CanonicalHeadNotFound("No canonical head set for this chain")
def get_all_block_hashes_on_chain(self, chain_address: Address) -> List[Hash32]:
chain_hashes = []
for block_number in itertools.count():
try:
chain_hashes.append(self.get_canonical_block_hash(block_number, chain_address))
except HeaderNotFound:
break
return chain_hashes
def get_all_block_hashes_on_chain_by_head_block_hash(self, chain_head_hash: Hash32) -> List[Hash32]:
chain_head_header = self.get_block_header_by_hash(chain_head_hash)
chain_address = chain_head_header.chain_address
chain_block_hashes = self.get_all_block_hashes_on_chain(chain_address)
return chain_block_hashes
def delete_canonical_chain(self, chain_address: Address) -> List[Hash32]:
'''
returns a list of deleted block hashes
:param chain_address:
:return:
'''
try:
canonical_header = self.get_canonical_head(chain_address= chain_address)
except CanonicalHeadNotFound:
canonical_header = None
deleted_hashes = []
if canonical_header is not None:
for i in range(0, canonical_header.block_number+1):
header_to_remove = self.get_canonical_block_header_by_number(i, chain_address= chain_address)
deleted_hashes.append(header_to_remove.hash)
for transaction_hash in self.get_block_transaction_hashes(header_to_remove):
self._remove_transaction_from_canonical_chain(transaction_hash)
for transaction_hash in self.get_block_receive_transaction_hashes(header_to_remove):
self._remove_transaction_from_canonical_chain(transaction_hash)
self.remove_block_from_canonical_block_hash_lookup(BlockNumber(i), chain_address=chain_address)
del(self.db[SchemaV1.make_canonical_head_hash_lookup_key(chain_address)])
return deleted_hashes
#
# Header API
#
def header_exists(self, block_hash: Hash32) -> bool:
"""
Returns True if the header with the given hash is in our DB.
"""
return self.db.exists(block_hash)
@functools.lru_cache(maxsize=128)
def get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
"""
Returns the requested block header as specified by block hash.
Raises HeaderNotFound if it is not present in the db.
"""
validate_word(block_hash, title="Block Hash")
try:
header_rlp = self.db[block_hash]
except KeyError:
raise HeaderNotFound(
"No header with hash {0} found".format(encode_hex(block_hash))
)
return _decode_block_header(header_rlp)
# TODO: This method should take a chain of headers as that's the most common use case
# and it'd be much faster than inserting each header individually.
def persist_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
"""
Returns iterable of headers newly on the canonical chain
"""
is_genesis = header.parent_hash == GENESIS_PARENT_HASH
if not is_genesis and not self.header_exists(header.parent_hash):
raise ParentNotFound(
"Cannot persist block header ({}) with unknown parent ({})".format(
encode_hex(header.hash), encode_hex(header.parent_hash)))
self._save_header_to_db(header)
new_headers = self._set_as_canonical_chain_head(header)
return new_headers
def _save_header_to_db(self, header: BlockHeader) -> None:
self.db.set(
header.hash,
rlp.encode(header),
)
def delete_block_from_canonical_chain(self, block_hash: Hash32) -> None:
'''
warning, this will only delete the block and transactions, it will not set the current head number of the chain.
'''
try:
header_to_remove = self.get_block_header_by_hash(block_hash)
# first check to see if it is in the canonical chain
canonical_block_hash = self.get_canonical_block_hash(header_to_remove.block_number, header_to_remove.chain_address)
#if the block doesnt match the canonical block, then it has already been removed from the canonical chain.
if block_hash == canonical_block_hash:
for transaction_hash in self.get_block_transaction_hashes(header_to_remove):
self._remove_transaction_from_canonical_chain(transaction_hash)
for transaction_hash in self.get_block_receive_transaction_hashes(header_to_remove):
self._remove_transaction_from_canonical_chain(transaction_hash)
self.remove_block_from_canonical_block_hash_lookup(header_to_remove.block_number, chain_address= header_to_remove.chain_address)
except HeaderNotFound:
pass
#todo: check if you can look up block by number once canonical chain is deleted below
def is_in_canonical_chain(self, block_hash: Hash32) -> bool:
try:
header = self.get_block_header_by_hash(block_hash)
except HeaderNotFound:
return False
block_number = header.block_number
chain_address = header.chain_address
try:
existing_header = self.get_canonical_block_header_by_number(block_number, chain_address)
except HeaderNotFound:
return False
if header.hash == existing_header.hash:
return True
else:
return False
#this also accepts a header that has a smaller block number than the current header
#in which case it will truncate the chain.
def _set_as_canonical_chain_head(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
"""
Returns iterable of headers newly on the canonical head
"""
try:
self.get_block_header_by_hash(header.hash)
except HeaderNotFound:
raise ValueError("Cannot use unknown block hash as canonical head: {}".format(
header.hash))
try:
canonical_header = self.get_canonical_head(chain_address= header.chain_address)
except CanonicalHeadNotFound:
canonical_header = None
if canonical_header is not None and header.block_number <= canonical_header.block_number:
for i in range(header.block_number +1, canonical_header.block_number+1):
header_to_remove = self.get_canonical_block_header_by_number(i, chain_address= header.chain_address)
for transaction_hash in self.get_block_transaction_hashes(header_to_remove):
self._remove_transaction_from_canonical_chain(transaction_hash)
for transaction_hash in self.get_block_receive_transaction_hashes(header_to_remove):
self._remove_transaction_from_canonical_chain(transaction_hash)
self.remove_block_from_canonical_block_hash_lookup(BlockNumber(i), chain_address= header.chain_address)
new_canonical_headers = tuple()
else:
new_canonical_headers = tuple(reversed(self._find_new_ancestors(header)))
# remove transaction lookups for blocks that are no longer canonical
for h in new_canonical_headers:
try:
old_hash = self.get_canonical_block_hash(h.block_number, header.chain_address)
except HeaderNotFound:
# no old block, and no more possible
break
else:
old_header = self.get_block_header_by_hash(old_hash)
for transaction_hash in self.get_block_transaction_hashes(old_header):
self._remove_transaction_from_canonical_chain(transaction_hash)
pass
for transaction_hash in self.get_block_receive_transaction_hashes(old_header):
self._remove_transaction_from_canonical_chain(transaction_hash)
for h in new_canonical_headers:
self._add_block_number_to_hash_lookup(h)
self.db.set(SchemaV1.make_canonical_head_hash_lookup_key(header.chain_address), header.hash)
return new_canonical_headers
@to_tuple
def _find_new_ancestors(self, header: BlockHeader) -> Iterable[BlockHeader]:
"""
Returns the chain leading up from the given header until (but not including)
the first ancestor it has in common with our canonical chain.
If D is the canonical head in the following chain, and F is the new header,
then this function returns (F, E).
A - B - C - D
\
E - F
"""
h = header
while True:
try:
orig = self.get_canonical_block_header_by_number(h.block_number, h.chain_address)
except HeaderNotFound:
# This just means the block is not on the canonical chain.
pass
else:
if orig.hash == h.hash:
# Found the common ancestor, stop.
break
# Found a new ancestor
yield h
if h.parent_hash == GENESIS_PARENT_HASH:
break
else:
h = self.get_block_header_by_hash(h.parent_hash)
def _add_block_number_to_hash_lookup(self, header: BlockHeader) -> None:
"""
Sets a record in the database to allow looking up this header by its
block number.
"""
block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
header.chain_address,
header.block_number
)
self.db.set(
block_number_to_hash_key,
rlp.encode(header.hash, sedes=rlp.sedes.binary),
)
#
# Block API
#
@functools.lru_cache(maxsize=128)
def get_number_of_send_tx_in_block(self, block_hash: Hash32) -> int:
'''
returns the number of send tx in a block
'''
#get header
header = self.get_block_header_by_hash(block_hash)
return self._get_block_transaction_count(header.transaction_root)
@functools.lru_cache(maxsize=128)
def get_number_of_total_tx_in_block(self, block_hash: Hash32) -> int:
'''
returns the number of send tx in a block
'''
# get header
header = self.get_block_header_by_hash(block_hash)
tx_count = self._get_block_transaction_count(header.transaction_root)
tx_count += self._get_block_transaction_count(header.receive_transaction_root)
return tx_count
def get_chain_wallet_address_for_block_hash(self, block_hash: Hash32) -> Address:
block_header = self.get_block_header_by_hash(block_hash)
return block_header.chain_address
def persist_block(self, block: 'BaseBlock') -> None:
'''
Persist the given block's header and uncles.
Assumes all block transactions have been persisted already.
'''
new_canonical_headers = self.persist_header(block.header)
if not (block.reward_bundle.reward_type_1.amount == 0 and block.reward_bundle.reward_type_2.amount == 0):
self.persist_reward_bundle(block.reward_bundle)
self.set_latest_reward_block_number(block.sender, block.number)
for header in new_canonical_headers:
for index, transaction_hash in enumerate(self.get_block_transaction_hashes(header)):
self._add_transaction_to_canonical_chain(transaction_hash, header, index)
for index, transaction_hash in enumerate(self.get_block_receive_transaction_hashes(header)):
self._add_receive_transaction_to_canonical_chain(transaction_hash, header, index)
#add all receive transactions as children to the sender block
self.add_block_receive_transactions_to_parent_child_lookup(header, block.receive_transaction_class)
self.add_block_rewards_to_parent_child_lookup(block.header, block.reward_bundle)
#we also have to save this block as the child of the parent block in the same chain
if block.header.parent_hash != GENESIS_PARENT_HASH:
self.add_block_child(block.header.parent_hash, block.header.hash)
def persist_non_canonical_block(self, block: 'BaseBlock') -> None:
self._save_header_to_db(block.header)
if not (block.reward_bundle.reward_type_1.amount == 0 and block.reward_bundle.reward_type_2.amount == 0):
self.persist_reward_bundle(block.reward_bundle)
#add all receive transactions as children to the sender block
self.add_block_receive_transactions_to_parent_child_lookup(block.header, block.receive_transaction_class)
self.add_block_rewards_to_parent_child_lookup(block.header, block.reward_bundle)
#we also have to save this block as the child of the parent block in the same chain
if block.header.parent_hash != GENESIS_PARENT_HASH:
self.add_block_child(block.header.parent_hash, block.header.hash)
#
# Chronologically consistent blockchain db API
#
# returns consistency keys list starting with newest timestamp to oldest.
def get_block_chronological_consistency_keys(self, chain_address: Address, block_number: BlockNumber) -> List[Tuple[Timestamp, Hash32]]:
lookup_key = SchemaV1.make_block_hash_requirement_for_staking_rewards_consistency(chain_address, block_number)
# keys are lists of [min_allowed_timestamp, block_hash_causing_this]
try:
consistency_keys = rlp.decode(self.db[lookup_key], sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, hash32])), use_list=True)
except KeyError:
return []
consistency_keys.sort(reverse=True)
return consistency_keys
def save_block_chronological_consistency_keys(self, chain_address: Address, block_number: BlockNumber, keys: List[Tuple[Timestamp, Hash32]]) -> None:
# keys are lists of [min_allowed_timestamp, block_hash_causing_this]
lookup_key = SchemaV1.make_block_hash_requirement_for_staking_rewards_consistency(chain_address, block_number)
self.db[lookup_key] = rlp.encode(keys, sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, hash32])))
def add_block_consistency_key(self, chain_address: Address, block_number: BlockNumber, key: Tuple[Timestamp, Hash32]) -> None:
consistency_keys = self.get_block_chronological_consistency_keys(chain_address, block_number)
consistency_keys_set = set(tuple(x) for x in consistency_keys)
consistency_keys_set.add(tuple(key))
self.save_block_chronological_consistency_keys(chain_address, block_number, tuple(consistency_keys_set))
def delete_block_consistency_key(self, chain_address: Address, block_number: BlockNumber, key: Tuple[Timestamp, Hash32]) -> None:
consistency_keys = self.get_block_chronological_consistency_keys(chain_address, block_number)
consistency_keys_set = set(tuple(x) for x in consistency_keys)
try:
consistency_keys_set.remove(tuple(key))
self.logger.debug("Removed key with tuple lookup")
except KeyError:
pass
self.save_block_chronological_consistency_keys(chain_address, block_number, list(consistency_keys_set))
#
# Unprocessed Block API
#
def save_block_as_unprocessed(self, block: 'BaseBlock') -> None:
'''
This saves the block as unprocessed, and saves to any unprocessed parents, including the one on this own chain and from receive transactions
'''
self.logger.debug("saving block number {} as unprocessed on chain {}. the block hash is {}".format(block.number, encode_hex(block.header.chain_address), encode_hex(block.hash)))
self.save_unprocessed_block_lookup(block.hash, block.number, block.header.chain_address)
if self.is_block_unprocessed(block.header.parent_hash):
self.save_unprocessed_children_block_lookup(block.header.parent_hash)
self.save_unprocessed_children_block_lookup_to_transaction_parents(block)
self.save_unprocessed_children_block_lookup_to_reward_proof_parents(block)
def remove_block_from_unprocessed(self, block: 'BaseBlock') -> None:
'''
This removes any unprocessed lookups for this block.
'''
if self.is_block_unprocessed(block.hash):
#delete the two unprocessed lookups for this block
self.delete_unprocessed_block_lookup(block.hash, block.number)
#delete all unprocessed lookups for transaction parents if nessisary
self.delete_unprocessed_children_block_lookup_to_transaction_parents_if_nessissary(block)
#delete all unprocessed lookups for chain parent if nessisary
if not self.check_all_children_blocks_to_see_if_any_unprocessed(block.header.parent_hash):
self.delete_unprocessed_children_blocks_lookup(block.header.parent_hash)
def save_unprocessed_block_lookup(self, block_hash: Hash32, block_number: BlockNumber, chain_address: Address) -> None:
lookup_key = SchemaV1.make_unprocessed_block_lookup_key(block_hash)
self.db[lookup_key] = b'1'
lookup_key = SchemaV1.make_unprocessed_block_lookup_by_number_key(chain_address, block_number)
self.db[lookup_key] = rlp.encode(block_hash, sedes=rlp.sedes.binary)
def save_unprocessed_children_block_lookup(self, block_hash: Hash32) -> None:
lookup_key = SchemaV1.make_has_unprocessed_block_children_lookup_key(block_hash)
self.db[lookup_key] = b'1'
def save_unprocessed_children_block_lookup_to_transaction_parents(self, block: 'BaseBlock') -> None:
for receive_transaction in block.receive_transactions:
#or do we not even have the block
if not self.is_in_canonical_chain(receive_transaction.sender_block_hash):
self.logger.debug("saving parent children unprocessed block lookup for block hash {}".format(encode_hex(receive_transaction.sender_block_hash)))
self.save_unprocessed_children_block_lookup(receive_transaction.sender_block_hash)
def save_unprocessed_children_block_lookup_to_reward_proof_parents(self, block: 'BaseBlock') -> None:
for node_staking_score in block.reward_bundle.reward_type_2.proof:
if not self.is_in_canonical_chain(node_staking_score.head_hash_of_sender_chain):
self.logger.debug("saving parent children unprocessed block lookup for reward proof parents block hash {}".format(encode_hex(node_staking_score.head_hash_of_sender_chain)))
self.save_unprocessed_children_block_lookup(node_staking_score.head_hash_of_sender_chain)
def delete_unprocessed_children_block_lookup_to_transaction_parents_if_nessissary(self, block: 'BaseBlock') -> None:
for receive_transaction in block.receive_transactions:
#or do we not even have the block
if not self.check_all_children_blocks_to_see_if_any_unprocessed(receive_transaction.sender_block_hash) :
self.delete_unprocessed_children_blocks_lookup(receive_transaction.sender_block_hash)
def has_unprocessed_children(self, block_hash: Hash32) -> bool:
'''
Returns True if the block has unprocessed children
'''
lookup_key = SchemaV1.make_has_unprocessed_block_children_lookup_key(block_hash)
try:
self.db[lookup_key]
return True
except KeyError:
return False
def is_block_unprocessed(self, block_hash: Hash32) -> bool:
'''
Returns True if the block is unprocessed
'''
#if block_hash == GENESIS_PARENT_HASH:
# return True
lookup_key = SchemaV1.make_unprocessed_block_lookup_key(block_hash)
try:
self.db[lookup_key]
return True
except KeyError:
return False
def get_unprocessed_block_hash_by_block_number(self, chain_address: Address, block_number: BlockNumber) -> Optional[Hash32]:
'''
Returns block hash if the block is unprocessed, false if it doesnt exist for this block number
'''
lookup_key = SchemaV1.make_unprocessed_block_lookup_by_number_key(chain_address, block_number)
try:
return rlp.decode(self.db[lookup_key], sedes = rlp.sedes.binary)
except KeyError:
return None
def get_unprocessed_block_header_by_block_number(self, chain_address: Address, block_number: BlockNumber) -> BlockHeader:
hash = self.get_unprocessed_block_hash_by_block_number(chain_address, block_number)
if hash is None:
raise HeaderNotFound("No unprocessed block exists on chain {} with block number {}".format(encode_hex(chain_address), block_number))
else:
return self.get_block_header_by_hash(hash)
def delete_unprocessed_block_lookup(self, block_hash: Hash32, block_number: BlockNumber) -> None:
lookup_key = SchemaV1.make_unprocessed_block_lookup_key(block_hash)
try:
del(self.db[lookup_key])
except KeyError:
pass
wallet_address = self.get_chain_wallet_address_for_block_hash(block_hash)
lookup_key = SchemaV1.make_unprocessed_block_lookup_by_number_key(wallet_address, block_number)
try:
del(self.db[lookup_key])
except KeyError:
pass
def delete_unprocessed_children_blocks_lookup(self, block_hash: Hash32) -> None:
'''
removes the lookup that says if this block has unprocessed children
'''
lookup_key = SchemaV1.make_has_unprocessed_block_children_lookup_key(block_hash)
try:
del(self.db[lookup_key])
except KeyError:
pass
def check_all_children_blocks_to_see_if_any_unprocessed(self, block_hash: Hash32) -> bool:
'''
manually goes through all children blocks instead of using lookup table.
if any children are unprocessed, it returns true, false otherwise.
'''
if not self.has_unprocessed_children(block_hash):
return False
children_block_hashes = self.get_block_children(block_hash)
if children_block_hashes == None:
return False
for child_block_hash in children_block_hashes:
if self.is_block_unprocessed(child_block_hash):
return True
return False
#
# Transaction API
#
def add_receipt(self, block_header: BlockHeader, index_key: int, receipt: Receipt, send_or_receive) -> Hash32:
"""
Adds the given receipt to the provide block header.
Returns the updated `receipts_root` for updated block header.
"""
receipt_db = HexaryTrie(db=self.db, root_hash=block_header.receipt_root)
receipt_db[index_key] = rlp.encode(receipt)
return receipt_db.root_hash
def add_transaction(self,
block_header: BlockHeader,
index_key: int,
transaction: 'BaseTransaction') -> Hash32:
"""
Adds the given transaction to the provide block header.
Returns the updated `transactions_root` for updated block header.
"""
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
transaction_db[index_key] = rlp.encode(transaction)
return transaction_db.root_hash
def add_receive_transaction(self,
block_header: BlockHeader,
index_key: int,
transaction: 'BaseReceiveTransaction') -> Hash32:
"""
Adds the given transaction to the provide block header.
Returns the updated `transactions_root` for updated block header.
"""
transaction_db = HexaryTrie(self.db, root_hash=block_header.receive_transaction_root)
transaction_db[index_key] = rlp.encode(transaction)
return transaction_db.root_hash
def get_block_transactions(
self,
header: BlockHeader,
transaction_class: Type['BaseTransaction']) -> Iterable['BaseTransaction']:
"""
Returns an iterable of transactions for the block speficied by the
given block header.
"""
return self._get_block_transactions(header.transaction_root, transaction_class)
def get_block_receive_transactions(
self,
header: BlockHeader,
transaction_class: Type['BaseReceiveTransaction']) -> List['BaseReceiveTransaction']:
"""
Returns an iterable of transactions for the block speficied by the
given block header.
"""
return self._get_block_transactions(header.receive_transaction_root, transaction_class)
@to_list
def get_block_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
"""
Returns an iterable of the transaction hashes from th block specified
by the given block header.
"""
all_encoded_transactions = self._get_block_transaction_data(
block_header.transaction_root,
)
for encoded_transaction in all_encoded_transactions:
yield keccak(encoded_transaction)
@to_list
def get_block_receive_transaction_hashes(self, block_header: BlockHeader) -> Iterable[Hash32]:
"""
Returns an iterable of the transaction hashes from th block specified
by the given block header.
"""
all_encoded_transactions = self._get_block_transaction_data(
block_header.receive_transaction_root,
)
for encoded_transaction in all_encoded_transactions:
yield keccak(encoded_transaction)
def get_transaction_receipt(self, tx_hash: Hash32) -> Receipt:
block_hash, index, is_receive = self.get_transaction_index(tx_hash)
block_header = self.get_block_header_by_hash(block_hash)
if is_receive:
num_send_transactions = self.get_number_of_send_tx_in_block(block_hash)
index += num_send_transactions
return self.get_receipt_by_idx(block_header, index)
def get_cumulative_gas_used(self, tx_hash: Hash32) -> int:
block_hash, index, is_receive = self.get_transaction_index(tx_hash)
block_header = self.get_block_header_by_hash(block_hash)
receipts = self.get_receipts(block_header)
cumulative = 0
for i in range(index+1):
cumulative += receipts[i].gas_used
return cumulative
def get_receipt_by_idx(self,
header: BlockHeader,
receipt_idx: int,
receipt_class: Type[Receipt] = Receipt) -> Optional[Receipt]:
receipt_db = HexaryTrie(db=self.db, root_hash=header.receipt_root)
receipt_key = rlp.encode(receipt_idx)
try:
receipt_data = receipt_db[receipt_key]
return rlp.decode(receipt_data, sedes=receipt_class)
except KeyError:
return None
@to_tuple
def get_receipts(self,
header: BlockHeader,
receipt_class: Type[Receipt] = Receipt) -> Tuple[Receipt]:
"""
Returns an iterable of receipts for the block specified by the given
block header.
"""
receipt_db = HexaryTrie(db=self.db, root_hash=header.receipt_root)
for receipt_idx in itertools.count():
receipt_key = rlp.encode(receipt_idx)
if receipt_key in receipt_db:
receipt_data = receipt_db[receipt_key]
yield rlp.decode(receipt_data, sedes=receipt_class)
else:
break
def get_receipts_by_block_hash(self,
hash: Hash32) -> Tuple[Receipt]:
block_header = self.get_block_header_by_hash(hash)
return self.get_receipts(block_header)
# def get_transaction_by_index(
# self,
# block_number: BlockNumber,
# transaction_index: int,
# transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
# """
# Returns the transaction at the specified `transaction_index` from the
# block specified by `block_number` from the canonical chain.
#
# Raises TransactionNotFound if no block
# """
# try:
# block_header = self.get_canonical_block_header_by_number(block_number)
# except HeaderNotFound:
# raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number))
# transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
# encoded_index = rlp.encode(transaction_index)
# if encoded_index in transaction_db:
# encoded_transaction = transaction_db[encoded_index]
# return rlp.decode(encoded_transaction, sedes=transaction_class)
# else:
# raise TransactionNotFound(
# "No transaction is at index {} of block {}".format(transaction_index, block_number))
def get_transaction_by_index_and_block_hash(
self,
block_hash: Hash32,
transaction_index: int,
transaction_class: Type['BaseTransaction']) -> 'BaseTransaction':
"""
Returns the transaction at the specified `transaction_index` from the
block specified by `block_number` from the canonical chain.
Raises TransactionNotFound if no block
"""
try:
block_header = self.get_block_header_by_hash(block_hash)
except HeaderNotFound:
raise TransactionNotFound("Block {} is not in the canonical chain".format(block_hash))
transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root)
encoded_index = rlp.encode(transaction_index)
if encoded_index in transaction_db:
encoded_transaction = transaction_db[encoded_index]
return rlp.decode(encoded_transaction, sedes=transaction_class)
else:
raise TransactionNotFound(
"No transaction is at index {} of block {}".format(transaction_index, block_header))
def get_receive_transaction_by_index_and_block_hash(
self,
block_hash: Hash32,
transaction_index: int,
transaction_class: Type['BaseReceiveTransaction']) -> 'BaseReceiveTransaction':
"""
Returns the transaction at the specified `transaction_index` from the
block specified by `block_number` from the canonical chain.
Raises TransactionNotFound if no block
"""
try:
block_header = self.get_block_header_by_hash(block_hash)
except HeaderNotFound:
raise TransactionNotFound("Block {} is not in the canonical chain".format(block_hash))
transaction_db = HexaryTrie(self.db, root_hash=block_header.receive_transaction_root)
encoded_index = rlp.encode(transaction_index)
if encoded_index in transaction_db:
encoded_transaction = transaction_db[encoded_index]
return rlp.decode(encoded_transaction, sedes=transaction_class)
else:
raise TransactionNotFound(
"No transaction is at index {} of block {}".format(transaction_index, block_header))
def get_transaction_by_hash(self,
tx_hash: Hash32,
send_tx_class: Type['BaseTransaction'],
receive_tx_class: Type['BaseReceiveTransaction']) -> Union['BaseTransaction', 'BaseReceiveTransaction']:
block_hash, index, is_receive = self.get_transaction_index(tx_hash)
if is_receive:
transaction = self.get_receive_transaction_by_index_and_block_hash(
block_hash,
index,
receive_tx_class,
)
else:
transaction = self.get_transaction_by_index_and_block_hash(
block_hash,
index,
send_tx_class,
)
return transaction
# def get_receive_transaction_by_index(
# self,
# block_number: BlockNumber,
# transaction_index: int,
# transaction_class: 'BaseReceiveTransaction') -> 'BaseReceiveTransaction':
# """
# Returns the transaction at the specified `transaction_index` from the
# block specified by `block_number` from the canonical chain.
#
# Raises TransactionNotFound if no block
# """
# try:
# block_header = self.get_canonical_block_header_by_number(block_number, chain_address)
# except HeaderNotFound:
# raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number))
# transaction_db = HexaryTrie(self.db, root_hash=block_header.receive_transaction_root)
# encoded_index = rlp.encode(transaction_index)
# if encoded_index in transaction_db:
# encoded_transaction = transaction_db[encoded_index]
# return rlp.decode(encoded_transaction, sedes=transaction_class)
# else:
# raise TransactionNotFound(
# "No transaction is at index {} of block {}".format(transaction_index, block_number))
def get_transaction_chain_address(self, transaction_hash: Hash32) -> Address:
block_hash, _, _ = self.get_transaction_index(transaction_hash)
return self.get_chain_wallet_address_for_block_hash(block_hash)
def get_transaction_block_hash(self, transaction_hash: Hash32) -> Hash32:
block_hash, _, _ = self.get_transaction_index(transaction_hash)
return block_hash
def get_transaction_index(self, transaction_hash: Hash32) -> Tuple[Hash32, int, bool]:
"""
Returns a 2-tuple of (block_number, transaction_index) indicating which
block the given transaction can be found in and at what index in the
block transactions.
Raises TransactionNotFound if the transaction_hash is not found in the
canonical chain.
"""
key = SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash)
try:
encoded_key = self.db[key]
except KeyError:
raise TransactionNotFound(
"Transaction {} not found in canonical chain".format(encode_hex(transaction_hash)))
transaction_key = rlp.decode(encoded_key, sedes=TransactionKey)
return (transaction_key.block_hash, transaction_key.index, transaction_key.is_receive)
def _get_block_transaction_data(self, transaction_root: Hash32) -> Iterable[Hash32]:
'''
Returns iterable of the encoded transactions for the given block header
'''
transaction_db = HexaryTrie(self.db, root_hash=transaction_root)
for transaction_idx in itertools.count():
transaction_key = rlp.encode(transaction_idx)
if transaction_key in transaction_db:
yield transaction_db[transaction_key]
else:
break
@functools.lru_cache(maxsize=32)
def _get_block_transaction_count(self, transaction_root: Hash32):
'''
Returns the transaction count
'''
count = 0
transaction_db = HexaryTrie(self.db, root_hash=transaction_root)
for transaction_idx in itertools.count():
transaction_key = rlp.encode(transaction_idx)
if transaction_key not in transaction_db:
return count
count += 1
@functools.lru_cache(maxsize=32)
@to_list
def _get_block_transactions(
self,
transaction_root: Hash32,
transaction_class: Union[Type['BaseTransaction'], Type['BaseReceiveTransaction']]) -> List[Union['BaseTransaction', 'BaseReceiveTransaction']]:
"""
Memoizable version of `get_block_transactions`
"""
for encoded_transaction in self._get_block_transaction_data(transaction_root):
yield rlp.decode(encoded_transaction, sedes=transaction_class)
def _remove_transaction_from_canonical_chain(self, transaction_hash: Hash32) -> None:
"""
Removes the transaction specified by the given hash from the canonical
chain.
"""
self.db.delete(SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash))
def _add_transaction_to_canonical_chain(self,
transaction_hash: Hash32,
block_header: BlockHeader,
index: int
) -> None:
"""
:param bytes transaction_hash: the hash of the transaction to add the lookup for
:param block_header: The header of the block with the txn that is in the canonical chain
:param int index: the position of the transaction in the block
- add lookup from transaction hash to the block number and index that the body is stored at
- remove transaction hash to body lookup in the pending pool
"""
transaction_key = TransactionKey(block_header.hash, index, False)
self.db.set(
SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash),
rlp.encode(transaction_key),
)
def _add_receive_transaction_to_canonical_chain(self,
transaction_hash: Hash32,
block_header: BlockHeader,
index: int
) -> None:
"""
:param bytes transaction_hash: the hash of the transaction to add the lookup for
:param block_header: The header of the block with the txn that is in the canonical chain
:param int index: the position of the transaction in the block
- add lookup from transaction hash to the block number and index that the body is stored at
- remove transaction hash to body lookup in the pending pool
"""
transaction_key = TransactionKey(block_header.hash, index, True)
self.db.set(
SchemaV1.make_transaction_hash_to_block_lookup_key(transaction_hash),
rlp.encode(transaction_key),
)
#
# Block children and Stake API
#
def add_block_receive_transactions_to_parent_child_lookup(self, block_header: 'BlockHeader', transaction_class: Type['BaseReceiveTransaction']) -> None:
block_receive_transactions = self.get_block_receive_transactions(block_header,
transaction_class)
for receive_transaction in block_receive_transactions:
self.add_block_child(
receive_transaction.sender_block_hash,
block_header.hash)
def add_block_rewards_to_parent_child_lookup(self, block_header: 'BlockHeader', reward_bundle: BaseRewardBundle) -> None:
for node_staking_score in reward_bundle.reward_type_2.proof:
self.logger.debug("saving parent child lookup for reward bundle proof")
self.add_block_child(node_staking_score.head_hash_of_sender_chain, block_header.hash)
def remove_block_receive_transactions_to_parent_child_lookup(self, block_header: 'BlockHeader', transaction_class: Type['BaseReceiveTransaction']) -> None:
block_receive_transactions = self.get_block_receive_transactions(block_header,
transaction_class)
for receive_transaction in block_receive_transactions:
self.remove_block_child(
receive_transaction.sender_block_hash,
block_header.hash)
def remove_block_child(self,
parent_block_hash: Hash32,
child_block_hash: Hash32) -> None:
validate_word(parent_block_hash, title="Block_hash")
validate_word(child_block_hash, title="Block_hash")
block_children = self.get_block_children(parent_block_hash)
if block_children is None or child_block_hash not in block_children:
self.logger.debug("tried to remove a block child that doesnt exist. It was likely already deleted when that block was purged.")
else:
block_children.remove(child_block_hash)
self.save_block_children(parent_block_hash, block_children)
def remove_block_from_all_parent_child_lookups(self, block_header: 'BlockHeader', receive_transaction_class: Type['BaseReceiveTransaction']) -> None:
'''
Removes block from parent child lookups coming from transactions, and from within the chain.
'''
self.remove_block_receive_transactions_to_parent_child_lookup(block_header, receive_transaction_class)
self.remove_block_child(block_header.parent_hash, block_header.hash)
def add_block_child(self,
parent_block_hash: Hash32,
child_block_hash: Hash32) -> None:
validate_word(parent_block_hash, title="Block_hash")
validate_word(child_block_hash, title="Block_hash")
block_children = self.get_block_children(parent_block_hash)
if block_children is None:
self.save_block_children(parent_block_hash, [child_block_hash])
elif child_block_hash in block_children:
self.logger.debug("tried adding a child block that was already added")
else:
block_children.append(child_block_hash)
self.save_block_children(parent_block_hash, block_children)
def get_block_children(self, parent_block_hash: Hash32) -> List[Hash32]:
validate_word(parent_block_hash, title="Block_hash")
block_children_lookup_key = SchemaV1.make_block_children_lookup_key(parent_block_hash)
try:
to_return = rlp.decode(self.db[block_children_lookup_key], sedes=rlp.sedes.FCountableList(hash32), use_list=True)
if to_return == []:
return None
return to_return
except KeyError:
return None
def get_all_descendant_block_hashes(self, block_hash: Hash32) -> List[Hash32]:
validate_word(block_hash, title="Block_hash")
descentant_blocks = self._get_all_descendant_block_hashes(block_hash)
return descentant_blocks
def _get_all_descendant_block_hashes(self, block_hash: Hash32, exclude_block_hashes: Set[Hash32] = set()) -> List[Hash32]:
# Needed to add exclude_block_hashes to stop circular infinite loops. Seems like it is possible for parents to be their own children
#self.logger.debug('test1')
#lookup children
children = self.get_block_children(block_hash)
#self.logger.debug('test2 {}'.format(children))
if children == None:
return None
else:
child_blocks = set()
for child_block_hash in children:
if child_block_hash not in exclude_block_hashes:
#self.logger.debug('test3 {}'.format(child_block_hash))
child_blocks.add(child_block_hash)
exclude_block_hashes_current = exclude_block_hashes | child_blocks
sub_children_blocks = self._get_all_descendant_block_hashes(child_block_hash, exclude_block_hashes= exclude_block_hashes_current)
if sub_children_blocks is not None:
child_blocks.update(sub_children_blocks)
return child_blocks
def save_block_children(self, parent_block_hash: Hash32,
block_children: List[Hash32]) -> None:
validate_word(parent_block_hash, title="Block_hash")
block_children_lookup_key = SchemaV1.make_block_children_lookup_key(parent_block_hash)
self.db[block_children_lookup_key] = rlp.encode(block_children, sedes=rlp.sedes.FCountableList(hash32))
def delete_all_block_children_lookups(self, parent_block_hash: Hash32) -> None:
validate_word(parent_block_hash, title="Block_hash")
block_children_lookup_key = SchemaV1.make_block_children_lookup_key(parent_block_hash)
try:
del(self.db[block_children_lookup_key])
except KeyError:
pass
def get_block_children_chains(self, block_hash: Hash32, exclude_chains:Set = None) -> Set[Address]:
validate_word(block_hash, title="Block_hash")
child_chains = self._get_block_children_chains(block_hash)
if child_chains is None:
return set()
if exclude_chains is not None:
child_chains = child_chains - exclude_chains
return child_chains
def _get_block_children_chains(self, block_hash: Hash32) -> Set[Address]:
#lookup children
children = self.get_block_children(block_hash)
if children == None:
return set()
else:
child_chains = set()
for child_block_hash in children:
chain_wallet_address = self.get_chain_wallet_address_for_block_hash(child_block_hash)
child_chains.add(chain_wallet_address)
sub_children_chain_wallet_addresses = self._get_block_children_chains(child_block_hash)
child_chains.update(sub_children_chain_wallet_addresses)
return child_chains
#This doesnt include stake from this block
def get_block_stake_from_children(self, block_hash: Hash32, coin_mature_time_for_staking: Timestamp, exclude_chains: Set = None) -> int:
validate_word(block_hash, title="Block Hash")
children_chain_wallet_addresses = self.get_block_children_chains(block_hash, exclude_chains)
origin_wallet_address = self.get_chain_wallet_address_for_block_hash(block_hash)
try:
children_chain_wallet_addresses.remove(origin_wallet_address)
except KeyError:
pass
except AttributeError:
pass
self.logger.debug(
"get_block_stake_from_children. children wallet addresses: {}".format(children_chain_wallet_addresses))
total_stake = 0
for wallet_address in children_chain_wallet_addresses:
total_stake += self.get_mature_stake(wallet_address, coin_mature_time_for_staking)
return total_stake
#this includes children and blocks corresponding to these hashes
def get_total_block_stake_of_block_hashes(self, block_hashes: List[Hash32], coin_mature_time_for_staking: Timestamp, timestamp_for_stake = None) -> int:
'''
This will not double count any addresses that the blocks might have in common.
timestamp_for_stake is the time where stake is calculated. So balances must be COIN_MATURE_TIME_FOR_STAKING time older than timestamp_for_stake
:param block_hashes:
:return:
'''
children_chain_wallet_addresses = set()
for block_hash in block_hashes:
children_chain_wallet_addresses.update(self.get_block_children_chains(block_hash))
origin_wallet_address = self.get_chain_wallet_address_for_block_hash(block_hash)
try:
children_chain_wallet_addresses.add(origin_wallet_address)
except KeyError:
pass
except AttributeError:
pass
total_stake = 0
for wallet_address in children_chain_wallet_addresses:
total_stake += self.get_mature_stake(wallet_address, coin_mature_time_for_staking, timestamp_for_stake)
return total_stake
def get_mature_stake(self, wallet_address: Address, coin_mature_time_for_staking: Timestamp, timestamp: Timestamp = None,
raise_canonical_head_not_found_error: bool = False) -> int:
if timestamp is None:
timestamp = int(time.time())
validate_uint256(timestamp, 'timestamp')
validate_canonical_address(wallet_address, title="Wallet Address")
# get account balance
return self._get_balance_at_time(wallet_address,
timestamp - coin_mature_time_for_staking,
raise_canonical_head_not_found_error=raise_canonical_head_not_found_error)
def _get_balance_at_time(self, wallet_address: Address, timestamp: Timestamp = None,
raise_canonical_head_not_found_error: bool = False) -> int:
if timestamp is None:
timestamp = int(time.time())
try:
canonical_head = self.get_canonical_head(chain_address=wallet_address)
except CanonicalHeadNotFound as e:
if raise_canonical_head_not_found_error:
raise e
else:
return 0
if canonical_head.timestamp <= timestamp:
return canonical_head.account_balance
else:
if canonical_head.block_number > 0:
for i in range(canonical_head.block_number - 1, -1, -1):
header = self.get_canonical_block_header_by_number(i, wallet_address)
if header.timestamp <= timestamp:
return header.account_balance
return 0
#
# Historical minimum allowed gas price API for throttling the network
#
def save_historical_minimum_gas_price(self, historical_minimum_gas_price: List[List[Union[Timestamp, int]]]) -> None:
'''
This takes list of timestamp, gas_price. The timestamps are every 100 seconds
'''
lookup_key = SchemaV1.make_historical_minimum_gas_price_lookup_key()
encoded_data = rlp.encode(historical_minimum_gas_price[-MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP:],sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int])))
self.db.set(
lookup_key,
encoded_data,
)
def load_historical_minimum_gas_price(self, sort:bool = True) -> Optional[List[List[Union[Timestamp, int]]]]:
'''
saved as timestamp, min gas price
'''
lookup_key = SchemaV1.make_historical_minimum_gas_price_lookup_key()
try:
data = rlp.decode(self.db[lookup_key], sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int])), use_list = True)
if sort:
if len(data) > 0:
data.sort()
return data
except KeyError:
return None
def save_historical_tx_per_centisecond(self, historical_tx_per_centisecond: List[List[int]], de_sparse = True) -> None:
'''
This takes list of timestamp, tx_per_centisecond.
this one is naturally a sparse list because some 100 second intervals might have no tx. So we can de_sparse it.
'''
if de_sparse:
historical_tx_per_centisecond = de_sparse_timestamp_item_list(historical_tx_per_centisecond, 100, filler = 0)
lookup_key = SchemaV1.make_historical_tx_per_centisecond_lookup_key()
encoded_data = rlp.encode(historical_tx_per_centisecond[-MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP:],sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int])))
self.db.set(
lookup_key,
encoded_data,
)
def load_historical_tx_per_centisecond(self, sort = True) -> Optional[List[List[int]]]:
'''
returns a list of [timestamp, tx/centisecond]
'''
lookup_key = SchemaV1.make_historical_tx_per_centisecond_lookup_key()
try:
data = rlp.decode(self.db[lookup_key], sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int])), use_list=True)
if sort:
if len(data) > 0:
data.sort()
return data
except KeyError:
return None
def save_historical_network_tpc_capability(self, historical_tpc_capability: List[List[Union[Timestamp, int]]], de_sparse: bool = False) -> None:
'''
This takes list of timestamp, historical_tpc_capability. The timestamps are every minute, historical_tpc_capability must be an intiger
'''
if de_sparse:
historical_tpc_capability = de_sparse_timestamp_item_list(historical_tpc_capability, 100, filler = None)
lookup_key = SchemaV1.make_historical_network_tpc_capability_lookup_key()
encoded_data = rlp.encode(historical_tpc_capability[-MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP:],sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int])))
self.db.set(
lookup_key,
encoded_data,
)
def save_current_historical_network_tpc_capability(self, current_tpc_capability: int) -> None:
validate_uint256(current_tpc_capability, title="current_tpc_capability")
existing = self.load_historical_network_tpc_capability()
current_centisecond = int(time.time()/100) * 100
if existing is None:
existing = [[current_centisecond, current_tpc_capability]]
else:
existing.append([current_centisecond, current_tpc_capability])
self.save_historical_network_tpc_capability(existing, de_sparse = True)
def load_historical_network_tpc_capability(self, sort:bool = True) -> Optional[List[List[Union[Timestamp, int]]]]:
'''
Returns a list of [timestamp, transactions per second]
:param mutable:
:param sort:
:return:
'''
lookup_key = SchemaV1.make_historical_network_tpc_capability_lookup_key()
try:
data = rlp.decode(self.db[lookup_key], sedes=rlp.sedes.FCountableList(rlp.sedes.FList([rlp.sedes.f_big_endian_int, rlp.sedes.f_big_endian_int])), use_list = True)
if sort:
if len(data) > 0:
data.sort()
return data
except KeyError:
return None
def _calculate_next_centisecond_minimum_gas_price(self, historical_minimum_allowed_gas: List[List[int]], historical_tx_per_centisecond: List[List[int]], goal_tx_per_centisecond: int) -> int:
goal_tx_per_centisecond = int(goal_tx_per_centisecond*MIN_GAS_PRICE_CALCULATION_GOAL_TX_PER_CENTISECOND_MUTIPLIER)
average_centisecond_delay = MIN_GAS_PRICE_CALCULATION_AVERAGE_DELAY
average_centisecond_window_length = MIN_GAS_PRICE_CALCULATION_AVERAGE_WINDOW_LENGTH
min_centisecond_time_between_change_in_minimum_gas = MIN_GAS_PRICE_CALCULATION_MIN_TIME_BETWEEN_CHANGE_IN_MIN_GAS_PRICE
if not len(historical_minimum_allowed_gas) >= min_centisecond_time_between_change_in_minimum_gas:
raise NotEnoughDataForHistoricalMinGasPriceCalculation('historical_minimum_allowed_gas too short. it is a lenght of {}, but should be a length of {}'.format(len(historical_minimum_allowed_gas),min_centisecond_time_between_change_in_minimum_gas))
if not len(historical_tx_per_centisecond) > average_centisecond_delay+average_centisecond_window_length:
raise NotEnoughDataForHistoricalMinGasPriceCalculation('historical_tx_per_centisecond too short. it is a length of {}, but should be a length of {}'.format(len(historical_tx_per_centisecond),average_centisecond_delay+average_centisecond_window_length))
if not are_items_in_list_equal(historical_minimum_allowed_gas[-1*min_centisecond_time_between_change_in_minimum_gas:]):
#we have to wait longer to change minimum gas
return historical_minimum_allowed_gas[-1]
else:
my_sum = sum(historical_tx_per_centisecond[-average_centisecond_delay-average_centisecond_window_length:-average_centisecond_delay])
average = my_sum/average_centisecond_window_length
error = average - goal_tx_per_centisecond
if error > 1:
new_minimum_allowed_gas = historical_minimum_allowed_gas[-1] + 1
elif error < -1:
new_minimum_allowed_gas = historical_minimum_allowed_gas[-1] -1
else:
new_minimum_allowed_gas = historical_minimum_allowed_gas[-1]
if new_minimum_allowed_gas < 1:
new_minimum_allowed_gas = 1
return new_minimum_allowed_gas
def initialize_historical_minimum_gas_price_at_genesis(self, min_gas_price: int, net_tpc_cap: int, tpc: int = None) -> None:
# we need to initialize the entire additive and fast sync region in time because that is where we check
# that blocks have enough gas
current_centisecond = int(time.time()/100) * 100
historical_minimum_gas_price = []
historical_tx_per_centisecond = []
historical_tpc_capability = []
earliest_required_centisecond = int(time.time()/100)*100-MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP*100
for timestamp in range(earliest_required_centisecond, current_centisecond+100, 100):
historical_minimum_gas_price.append([timestamp, min_gas_price])
if tpc is not None:
historical_tx_per_centisecond.append([timestamp, tpc])
else:
if min_gas_price <= 1:
historical_tx_per_centisecond.append([timestamp, 0])
else:
historical_tx_per_centisecond.append([timestamp, int(net_tpc_cap*0.94)])
historical_tpc_capability.append([timestamp, net_tpc_cap])
self.save_historical_minimum_gas_price(historical_minimum_gas_price)
self.save_historical_tx_per_centisecond(historical_tx_per_centisecond, de_sparse = False)
self.save_historical_network_tpc_capability(historical_tpc_capability, de_sparse = False)
def propogate_historical_min_gas_price_parameters_to_present(self) -> None:
hist_min_gas_price = self.load_historical_minimum_gas_price()
hist_tpc_cap = self.load_historical_network_tpc_capability()
hist_tx_per_centisecond = self.load_historical_tx_per_centisecond()
current_centisecond = int(time.time() / 100) * 100
hist_min_gas_price = propogate_timestamp_item_list_to_present(hist_min_gas_price, 100, current_centisecond)
hist_tpc_cap = propogate_timestamp_item_list_to_present(hist_tpc_cap, 100, current_centisecond)
hist_tx_per_centisecond = propogate_timestamp_item_list_to_present(hist_tx_per_centisecond, 100, current_centisecond)
self.save_historical_minimum_gas_price(hist_min_gas_price)
self.save_historical_tx_per_centisecond(hist_tpc_cap, de_sparse=False)
self.save_historical_network_tpc_capability(hist_tx_per_centisecond, de_sparse=False)
def _recalculate_historical_mimimum_gas_price(self, start_timestamp: Timestamp, end_timestamp: Timestamp = None) -> None:
#we just have to delete the ones in front of this time and update
self._delete_newer_historical_mimimum_gas_price(start_timestamp)
#then update the missing items:
self._update_historical_mimimum_gas_price(end_timestamp=end_timestamp)
def _delete_newer_historical_mimimum_gas_price(self, start_timestamp: Timestamp) -> None:
self.logger.debug("deleting historical min gas price newer than {}".format(start_timestamp))
hist_min_gas_price = self.load_historical_minimum_gas_price()
if (hist_min_gas_price is None
or len(hist_min_gas_price) < MIN_GAS_PRICE_CALCULATION_MIN_TIME_BETWEEN_CHANGE_IN_MIN_GAS_PRICE):
#there is no data for calculating min gas price
raise HistoricalMinGasPriceError("tried to update historical minimum gas price but historical minimum gas price has not been initialized")
sorted_hist_min_gas_price = SortedDict(hist_min_gas_price)
# if sorted_hist_min_gas_price.peekitem(0)[0] > start_timestamp:
# raise HistoricalMinGasPriceError("tried to recalculate historical minimum gas price at timestamp {}, however that timestamp doesnt exist".format(start_timestamp))
#
#make sure we leave at least the minimum amount to calculate future min gas prices. otherwise we cant do anything.
if MIN_GAS_PRICE_CALCULATION_MIN_TIME_BETWEEN_CHANGE_IN_MIN_GAS_PRICE > (MIN_GAS_PRICE_CALCULATION_AVERAGE_DELAY + MIN_GAS_PRICE_CALCULATION_AVERAGE_WINDOW_LENGTH):
min_required_centiseconds_remaining = MIN_GAS_PRICE_CALCULATION_MIN_TIME_BETWEEN_CHANGE_IN_MIN_GAS_PRICE + 3
else:
min_required_centiseconds_remaining = (MIN_GAS_PRICE_CALCULATION_AVERAGE_DELAY + MIN_GAS_PRICE_CALCULATION_AVERAGE_WINDOW_LENGTH) + 3
#we assume we have hist_net_tpc_capability back to at least as early as the earliest hist_min_gas_price, which should always be the case
try:
earliest_allowed_timestamp = sorted_hist_min_gas_price.keys()[min_required_centiseconds_remaining]
except IndexError:
return
if start_timestamp < earliest_allowed_timestamp:
start_timestamp = earliest_allowed_timestamp
if sorted_hist_min_gas_price.peekitem(-1)[0] > start_timestamp:
end_timestamp = sorted_hist_min_gas_price.peekitem(-1)[0]+100
for timestamp in range(start_timestamp, end_timestamp):
try:
del(sorted_hist_min_gas_price[timestamp])
except KeyError:
pass
hist_min_gas_price = list(sorted_hist_min_gas_price.items())
#save it with the deleted items
self.save_historical_minimum_gas_price(hist_min_gas_price)
def _update_historical_mimimum_gas_price(self, end_timestamp: Timestamp=None) -> None:
'''
needs to be called any time the chains are modified, and any time we lookup required gas price
it saves the historical block price up to MIN_GAS_PRICE_CALCULATION_AVERAGE_DELAY minutes ago using all information in our database
'''
hist_min_gas_price = self.load_historical_minimum_gas_price()
if (hist_min_gas_price is None
or len(hist_min_gas_price) < MIN_GAS_PRICE_CALCULATION_MIN_TIME_BETWEEN_CHANGE_IN_MIN_GAS_PRICE):
#there is no data for calculating min gas price
raise NotEnoughDataForHistoricalMinGasPriceCalculation("tried to update historical minimum gas price but historical minimum gas price has not been initialized")
sorted_hist_min_gas_price = SortedList(hist_min_gas_price)
current_centisecond = int(time.time()/100) * 100
if sorted_hist_min_gas_price[-1][0] != current_centisecond:
hist_tx_per_centi = self.load_historical_tx_per_centisecond()
if hist_tx_per_centi is None:
#there is no data for calculating min gas price
raise NotEnoughDataForHistoricalMinGasPriceCalculation("tried to update historical minimum gas price but historical transactions per centisecond is empty")
if len(hist_tx_per_centi) < (MIN_GAS_PRICE_CALCULATION_AVERAGE_DELAY + MIN_GAS_PRICE_CALCULATION_AVERAGE_WINDOW_LENGTH + 1):
raise NotEnoughDataForHistoricalMinGasPriceCalculation("tried to update historical minimum gas price but there are not enough entries of historical tx per centisecond")
sorted_hist_tx_per_centi = SortedList(hist_tx_per_centi)
#only update if there is a newer entry in hist tx per centi
if sorted_hist_tx_per_centi[-1][0] <= sorted_hist_min_gas_price[-1][0]:
self.logger.debug("No need to update historical minimum gas price because there have been no newer transactions")
return
hist_network_tpc_cap = self.load_historical_network_tpc_capability()
if hist_network_tpc_cap is None:
#there is no data for calculating min gas price
raise NotEnoughDataForHistoricalMinGasPriceCalculation("tried to update historical minimum gas price but historical network tpc capability is empty")
hist_network_tpc_cap = dict(hist_network_tpc_cap)
#now lets do the updating:
start_timestamp = sorted_hist_min_gas_price[-1][0]+100
if not end_timestamp:
end_timestamp = current_centisecond+100
else:
if end_timestamp > current_centisecond:
end_timestamp = current_centisecond+100
else:
end_timestamp = int(end_timestamp/100) * 100+100
historical_minimum_allowed_gas = [i[1] for i in sorted_hist_min_gas_price]
for timestamp in range(start_timestamp, end_timestamp, 100):
historical_tx_per_centisecond = [i[1] for i in sorted_hist_tx_per_centi if i[0] < timestamp]
try:
goal_tx_per_centisecond = hist_network_tpc_cap[timestamp]
except KeyError:
if len(hist_network_tpc_cap) > 0:
timestamps = list(hist_network_tpc_cap.keys())
index = bisect.bisect_right(timestamps, timestamp)
goal_tx_per_centisecond = hist_network_tpc_cap[timestamps[index-1]]
else:
raise HistoricalNetworkTPCMissing
next_centisecond_min_gas_price = self._calculate_next_centisecond_minimum_gas_price(historical_minimum_allowed_gas,
historical_tx_per_centisecond,
goal_tx_per_centisecond)
#first make sure we append it to historical_minimum_allowed_gas
historical_minimum_allowed_gas.append(next_centisecond_min_gas_price)
#now add it to the sortedList
sorted_hist_min_gas_price.add([timestamp, next_centisecond_min_gas_price])
#now lets change it into a list
hist_min_gas_price = list(sorted_hist_min_gas_price)
#now remove any that are to old.
if len(hist_min_gas_price) > MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP:
hist_min_gas_price = hist_min_gas_price[-MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP:]
#and finally save it
self.save_historical_minimum_gas_price(hist_min_gas_price)
def get_required_block_min_gas_price(self, block_timestamp: Timestamp = None) -> int:
'''
it is important that this doesn't run until our blockchain is up to date. If it is run before that,
it will give the wrong number.
'''
if block_timestamp is None:
block_timestamp = int(time.time())
centisecond_window = int(block_timestamp/100) * 100
hist_min_gas_price = self.load_historical_minimum_gas_price()
if hist_min_gas_price is None or len(hist_min_gas_price) == 0:
#there is no data for calculating min gas price
raise HistoricalMinGasPriceError("tried to get required block minimum gas price but historical minimum gas price has not been initialized")
dict_hist_min_gas_price = dict(hist_min_gas_price)
#self.logger.debug('get_required_block_min_gas_price, centisecond_window = {}, dict_hist_min_gas_price = {}'.format(centisecond_window, dict_hist_min_gas_price))
try:
return dict_hist_min_gas_price[centisecond_window]
except KeyError:
pass
sorted_list = list(hist_min_gas_price)
sorted_list.sort()
#if we don't have this centisecond_window, lets return the previous one.
return sorted_list[-1][1]
def min_gas_system_initialization_required(self) -> bool:
test_1 = self.load_historical_minimum_gas_price()
test_3 = self.load_historical_network_tpc_capability()
test_3 = self.load_historical_network_tpc_capability()
if test_1 is None or test_3 is None:
return True
earliest_required_centisecond = int(time.time()) - MAX_NUM_HISTORICAL_MIN_GAS_PRICE_TO_KEEP
newest_required_centisecond = int(time.time()/100) * 100-100*15
test_3.sort()
if test_3[-1][0] < newest_required_centisecond or test_3[0][0] > earliest_required_centisecond:
return True
return False
#
# Reward bundle persisting
#
def persist_reward_bundle(self, reward_bundle: BaseRewardBundle) -> None:
lookup_key = SchemaV1.make_reward_bundle_hash_lookup_key(reward_bundle.hash)
self.db[lookup_key] = rlp.encode(reward_bundle, sedes=BaseRewardBundle)
def get_reward_bundle(self, reward_bundle_hash: Hash32, reward_bundle_class: Type[BaseRewardBundle]) -> BaseRewardBundle:
validate_is_bytes(reward_bundle_hash, 'reward_bundle_hash')
lookup_key = SchemaV1.make_reward_bundle_hash_lookup_key(reward_bundle_hash)
try:
encoded = self.db[lookup_key]
return rlp.decode(encoded, sedes=reward_bundle_class)
except KeyError:
return reward_bundle_class()
def get_block_number_of_latest_reward_block(self, chain_address: Address) -> BlockNumber:
validate_canonical_address(chain_address, title="Wallet Address")
canonical_head = self.get_canonical_head(chain_address)
canonical_block_number = canonical_head.block_number
if canonical_head.reward_hash != BLANK_REWARD_HASH:
return canonical_block_number
if canonical_block_number == 0:
return BlockNumber(0)
for i in range(canonical_block_number, -1, -1):
header = self.get_canonical_block_header_by_number(BlockNumber(i), chain_address)
if header.reward_hash != BLANK_REWARD_HASH:
return BlockNumber(i)
def get_latest_reward_block_number(self, wallet_address: Address) -> BlockNumber:
validate_canonical_address(wallet_address, title="wallet_address")
key = SchemaV1.make_latest_reward_block_number_lookup(wallet_address)
try:
rlp_latest_block_number = self.db.get(key)
except KeyError:
rlp_latest_block_number = None
if rlp_latest_block_number is not None:
# in order to save some headache elsewhere, if a block is deleted for any reason, we won't reset this number
# so lets also check to make sure the block with this number has a reward
block_number = rlp.decode(rlp_latest_block_number, sedes=rlp.sedes.f_big_endian_int)
try:
block_header = self.get_canonical_block_header_by_number(block_number, wallet_address)
except HeaderNotFound:
# need to find previous reward block and save new one
latest_reward_block_number = self.get_block_number_of_latest_reward_block(wallet_address)
self.set_latest_reward_block_number(wallet_address, latest_reward_block_number)
return latest_reward_block_number
if block_header.reward_hash == BLANK_REWARD_HASH:
# need to find previous reward block and save new one
latest_reward_block_number = self.get_block_number_of_latest_reward_block(wallet_address)
self.set_latest_reward_block_number(wallet_address, latest_reward_block_number)
return latest_reward_block_number
return block_number
else:
return BlockNumber(0)
def set_latest_reward_block_number(self, wallet_address: Address, block_number: BlockNumber) -> None:
validate_canonical_address(wallet_address, title="wallet_address")
key = SchemaV1.make_latest_reward_block_number_lookup(wallet_address)
self.db[key] = rlp.encode(block_number, sedes=rlp.sedes.f_big_endian_int)
#
# Raw Database API
#
def exists(self, key: bytes) -> bool:
"""
Returns True if the given key exists in the database.
"""
return self.db.exists(key)
def persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
"""
Store raw trie data to db from a dict
"""
for key, value in trie_data_dict.items():
self.db[key] = value
# When performing a chain sync (either fast or regular modes), we'll very often need to look
# up recent block headers to validate the chain, and decoding their RLP representation is
# relatively expensive so we cache that here, but use a small cache because we *should* only
# be looking up recent blocks.
@functools.lru_cache(128)
def _decode_block_header(header_rlp: bytes) -> BlockHeader:
return rlp.decode(header_rlp, sedes=BlockHeader)
# TODO: remove this commented class
# this class has been moved to helios
# class AsyncChainDB(ChainDB):
#
# async def coro_get_block_header_by_hash(self, block_hash: Hash32) -> BlockHeader:
# raise NotImplementedError()
#
# async def coro_get_canonical_head(self) -> BlockHeader:
# raise NotImplementedError()
#
# async def coro_header_exists(self, block_hash: Hash32) -> bool:
# raise NotImplementedError()
#
# async def coro_get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32:
# raise NotImplementedError()
#
# async def coro_persist_header(self, header: BlockHeader) -> Tuple[BlockHeader, ...]:
# raise NotImplementedError()
#
# async def coro_persist_trie_data_dict(self, trie_data_dict: Dict[bytes, bytes]) -> None:
# raise NotImplementedError()
|
import time
import uuid
from minifw.db.orm import Model, StringField, BooleanField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, column_type='varchar(50)')
email = StringField(column_type='varchar(50)')
passwd = StringField(column_type='varchar(50)')
admin = BooleanField()
name = StringField(column_type='varchar(50)')
image = StringField(column_type='varchar(500)')
created_at = FloatField(default=time.time)
|
import hashlib
# Укажите ваше ФИО.
name = 'Стасенко Дмитрий Сергеевич'
reviewers = [
'lodthe',
'darkkeks',
'danlark1'
]
print(reviewers[int(hashlib.md5(name.encode('utf-8')).hexdigest(), 16) % len(reviewers)])
|
import gettext
import os.path
from climsoft_api.api.stationelement.schema import StationElementWithStation
from climsoft_api.utils.response import translate_schema
ROOT_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
)
LOCALE_DIR = os.path.join(
ROOT_DIR,
"src/climsoft_api/locale"
)
def test_should_translate_schema_successfully():
language = gettext.translation(
domain="climsoft_messages",
localedir=LOCALE_DIR,
languages=["fr"],
)
language.install()
translated_schema = translate_schema(
language.gettext,
StationElementWithStation.schema()
)
assert translated_schema[
"definitions"
][
"Station"
][
"properties"
][
"station_name"
][
"title"
] == "Nom de la station"
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a test repeatedly to measure its flakiness. The return code is non-zero
if the failure rate is higher than the specified threshold, but is not 100%."""
from __future__ import print_function
import argparse
import multiprocessing.dummy
import subprocess
import sys
import time
def load_options():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--retries', default=1000, type=int,
help='Number of test retries to measure flakiness.')
parser.add_argument('--threshold', default=0.05, type=float,
help='Minimum flakiness level at which test is '
'considered flaky.')
parser.add_argument('--jobs', '-j', type=int, default=1,
help='Number of parallel jobs to run tests.')
parser.add_argument('command', nargs='+', help='Command to run test.')
return parser.parse_args()
def run_test(job):
print('Starting retry attempt %d out of %d' % (job['index'] + 1,
job['retries']))
return subprocess.check_call(job['cmd'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def main():
options = load_options()
num_passed = num_failed = 0
running = []
pool = multiprocessing.dummy.Pool(processes=options.jobs)
args = [{'index': index, 'retries': options.retries, 'cmd': options.command}
for index in range(options.retries)]
results = pool.map(run_test, args)
num_passed = len([retcode for retcode in results if retcode == 0])
num_failed = len(results) - num_passed
if num_passed == 0:
flakiness = 0
else:
flakiness = num_failed / float(len(results))
print('Flakiness is %.2f' % flakiness)
if flakiness > options.threshold:
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
import configparser
import os
cf = configparser.ConfigParser()
cf.read("/etc/face_recognition/config/db.ini")
db_host = cf.get("database", "host")
db = cf.get("database", "db")
db_user = cf.get("database", "user")
db_pass = cf.get("database", "pass")
def get_db_host():
return db_host
def get_db_name():
return db
def get_db_user():
return db_user
def get_db_pass():
return db_pass
|
"""
List Events -
Small program used to list all events in a match and give examples to those events.
"""
import datetime
import json
from gamelocker import wrapper
api_key = "API_KEY_HERE" # VG API Key
api = wrapper.Vainglory(api_key)
# Vainglory game modes
game_modes = {"casual": "casual", "ranked": "ranked", "royale": "casual_aral", "blitz": "blitz_pvp_ranked"}
game_modes_string = " - any - "
for key in game_modes:
game_modes_string += str(key) + " - "
# Vainglory regions
regions = ["na", "eu", "se", "sa", "sg"]
regions_string = " - "
for key in regions:
regions_string += str(key) + " - "
ans = str(input("Debug Mode?(y/n)")).lower()
if ans == "y":
api.debugging = True
ign = input("In-game Name:")
repeat = True
while repeat == True:
region = str(input("In-game Region:" + regions_string)).lower()
if region == "":
region = "na"
repeat = False
elif region in regions:
repeat = False
else:
print(str(region) + " isn't a valid region!")
repeat = True
while repeat == True:
game_mode = str(input("Game Mode:" + game_modes_string)).lower()
if game_mode == "" or game_mode == "any":
game_mode = False
repeat = False
elif game_mode in game_modes:
game_mode = game_modes[game_mode]
repeat = False
else:
print(str(game_mode) + " isn't a valid game mode!")
# Args to fetch data with
args = {'filter[createdAt-start]': str(datetime.date.today() - datetime.timedelta(days=28)) + "T00:00:00Z", 'page[limit]': 50, 'filter[playerNames]': ign, "sort": "-createdAt"}
if game_mode != False:
args["filter[gameMode]"] = game_mode
print("ARGS USED:\n" + str(args))
print("\n")
data = api.matches(args, region)
if "error" in data:
print("Something went wrong!")
if api.debugging == False:
print(data)
exit()
print("Data Info -\nMatches: " + str(len(data)))
print("\n")
def process(match):
try:
url = match["telemetry"]["URL"]
print("Telemetry URL:\n" + str(url))
telemetry = api.telemetry(url)
print("Telemetry Info -\nEvents: " + str(len(telemetry)))
print("\n")
# Variables
hickups = 0
eventTypes = {}
for event in telemetry:
try:
if event["type"] not in eventTypes:
eventTypes[event["type"]] = event
except Exception as e:
print("hick-up!\n" + str(e))
print("\n")
print("Process Info:\nHick Ups: " + str(hickups))
print("\n")
for eventType in eventTypes:
print("Event Type: " + str(eventType) + "\nExample:\n" + str(eventTypes[eventType]) + "\n")
ans = input("Save to json as result.json in working directory? (y/n)")
if ans == "y":
with open("result.json", "w") as handler:
json.dump(eventTypes, handler)
print("Saved!")
else:
print("Not saved!")
print("\n")
except Exception as e:
print("An error has occurred while processing match telemetry!\n" + str(e))
def processAll(data):
try:
# Variables
hickups = 0
eventTypes = {}
for match in data:
try:
url = match["telemetry"]["URL"]
print("Telemetry URL:\n" + str(url))
telemetry = api.telemetry(url)
print("Telemetry Info -\nEvents: " + str(len(telemetry)))
print("\n")
for event in telemetry:
try:
if event["type"] not in eventTypes:
eventTypes[event["type"]] = event
except Exception as e:
print("hick-up!\n" + str(e))
except Exception as e:
print("An error has occurred while processing a match!\n" + str(e))
print("\n")
print("Process Info:\nHick Ups: " + str(hickups))
print("\n")
for eventType in eventTypes:
print("Event Type: " + str(eventType) + "\nExample:\n" + str(eventTypes[eventType]) + "\n")
ans = input("Save to json as result.json in working directory? (y/n)")
if ans == "y":
with open("result.json", "w") as handler:
json.dump(eventTypes, handler)
print("Saved!")
else:
print("Not saved!")
print("\n")
except Exception as e:
print("An error has occurred while processing match telemetry!\n" + str(e))
# Variables
current = 0
repeat = True
while repeat == True:
ans = input("On " + str(current + 1) + ", a " + str(data[current]["gameMode"]) + " match, of " + str(len(data)) + " matches; what would you like to do?\n~ next - back - process - processAll - exit ~")
if ans == "next":
if (current + 1) == len(data):
current = 0
else:
current += 1
elif ans == "back":
if current == 0:
current = (len(data) - 1)
else:
current -= 1
elif ans == "process":
process(data[current])
elif ans == "processAll":
ans = input("Processing all the matches may take a while, continue? (y/n)")
if ans == "y":
processAll(data)
else:
print("Cancelling...\n")
elif ans == "exit":
print("bye")
exit()
else:
print(str(ans) + " isn't a command!")
|
import numpy as np
import matplotlib.pyplot as plt
import csv
#PATH1 = '/Users/alihanks/Google Drive/NQUAKE_analysis/PERM/PERM_data/lbnl_sensor_60.csv'
PATH1 = '/Users/alihanks/k40_test_2019-02-06_D3S.csv'
def make_int(lst):
'''
Makes all entries of a list an integer
'''
y = []
for i in lst:
y.append(int(i))
return y
def make_array(lst):
'''
Makes list into an array. Also splices out the irrelevant stuff
for a spectra
'''
y = np.asarray(make_int(lst[12:]))
return y
def main_potassium(number, n=1, lower_limit=270, upper_limit=292):
'''
Main Function.
Number is the number of spectras to go through (Right now the total number is being used)
n is the number of hours that each spectra is integrated over. Default is 1.
It was experimentally determined that the channel for the K-40 centroid lies between 270 and 292 for the current
peak finder. If a centroid falls outside that range (the lower_limit and upper_limit), then it is put in the anomaly list
and it is plotted.
In order to plot individual spectra, tune the lower_limit and upper_limit (these only plot spectra outside the range)
'''
entries = 300*n
anomaly = []
days = (24/n)
i = 0
counter = 0
indexes = []
day = 1
while i < number:
if counter < days:
first_integration = rows[(i*entries)+1:((i+1)*entries)+1]
array_lst = []
for j in first_integration:
array_lst.append(make_array(j))
integrated = sum(array_lst)
fig, ax = plt.subplots()
fig.patch.set_facecolor('white')
plt.title('PERM Spectra')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(1,1000)
#plt.ylim()
x = range(0,len(integrated))
ax.plot(x, integrated, 'bo-', label="CPM")
ax.errorbar(x, integrated, yerr=np.sqrt(integrated), fmt='bo', ecolor='b')
plt.yscale('log')
plt.show()
i+=1
counter +=1
else:
#plt.title('1460 Centroid versus Time for Day {}'.format(day))
#plt.xlabel('hours')
#plt.ylabel('1460 Centroid')
#plt.plot(indexes, 'ro')
#plt.ylim(260, 300)
#plt.show()
print('plotted', day)
counter = 0
indexes = []
day += 1
#plt.title('1460 Centroid versus Time for Day {}'.format(day))
#plt.xlabel('Hour of the Day')
#plt.ylabel('1460 Centroid')
#plt.plot(indexes, 'ro')
#plt.ylim(260, 300)
#plt.show()
print('plotted', day)
counter = 0
indexes = []
day += 1
if anomaly:
print(anomaly)
else:
print('There are no anomalies')
if __name__ == '__main__':
with open(PATH1) as f:
reader = csv.reader(f)
rows = [r for r in reader]
print('This data is taken from the {} csv'.format(PATH1))
main_potassium(len(rows), n=1, lower_limit=0, upper_limit=4096)
|
import numpy as np
import torch
import torch.nn.functional as functional
from grasp_det_seg.modules.losses import smooth_l1
from grasp_det_seg.utils.bbx import ious, calculate_shift, bbx_overlap, mask_overlap
from grasp_det_seg.utils.misc import Empty
from grasp_det_seg.utils.nms import nms
from grasp_det_seg.utils.parallel import PackedSequence
class PredictionGenerator:
"""Perform NMS-based selection of detections
Parameters
----------
nms_threshold : float
IoU threshold for the class-specific NMS
score_threshold : float
Minimum class probability for a detection to be kept
max_predictions : int
Maximum number of detections to keep for each image
"""
def __init__(self,
nms_threshold,
score_threshold,
max_predictions):
self.nms_threshold = nms_threshold
self.score_threshold = score_threshold
self.max_predictions = max_predictions
@staticmethod
def _proposals_for_img(proposals, proposals_idx, roi_cls_logits, roi_bbx_logits, img_it):
relevant = proposals_idx == img_it
if relevant.any():
return proposals[relevant], roi_cls_logits[relevant], roi_bbx_logits[relevant]
else:
return None, None, None
def __call__(self, boxes, scores):
"""Perform NMS-based selection of detections
Parameters
----------
boxes : sequence of torch.Tensor
Sequence of N tensors of class-specific bounding boxes with shapes M_i x C x 4, entries can be None
scores : sequence of torch.Tensor
Sequence of N tensors of class probabilities with shapes M_i x (C + 1), entries can be None
Returns
-------
bbx_pred : PackedSequence
A sequence of N tensors of bounding boxes with shapes S_i x 4, entries are None for images in which no
detection can be kept according to the selection parameters
cls_pred : PackedSequence
A sequence of N tensors of thing class predictions with shapes S_i, entries are None for images in which no
detection can be kept according to the selection parameters
obj_pred : PackedSequence
A sequence of N tensors of detection confidences with shapes S_i, entries are None for images in which no
detection can be kept according to the selection parameters
"""
bbx_pred, cls_pred, obj_pred = [], [], []
for bbx_i, obj_i in zip(boxes, scores):
try:
if bbx_i is None or obj_i is None:
raise Empty
# Do NMS separately for each class
bbx_pred_i, cls_pred_i, obj_pred_i = [], [], []
for cls_id, (bbx_cls_i, obj_cls_i) in enumerate(zip(torch.unbind(bbx_i, dim=1),
torch.unbind(obj_i, dim=1)[1:])):
# Filter out low-scoring predictions
idx = obj_cls_i > self.score_threshold
if not idx.any().item():
continue
bbx_cls_i = bbx_cls_i[idx]
obj_cls_i = obj_cls_i[idx]
# Filter out empty predictions
idx = (bbx_cls_i[:, 2] > bbx_cls_i[:, 0]) & (bbx_cls_i[:, 3] > bbx_cls_i[:, 1])
if not idx.any().item():
continue
bbx_cls_i = bbx_cls_i[idx]
obj_cls_i = obj_cls_i[idx]
# Do NMS
idx = nms(bbx_cls_i.contiguous(), obj_cls_i.contiguous(), threshold=self.nms_threshold, n_max=-1)
if idx.numel() == 0:
continue
bbx_cls_i = bbx_cls_i[idx]
obj_cls_i = obj_cls_i[idx]
# Save remaining outputs
bbx_pred_i.append(bbx_cls_i)
cls_pred_i.append(bbx_cls_i.new_full((bbx_cls_i.size(0),), cls_id, dtype=torch.long))
obj_pred_i.append(obj_cls_i)
# Compact predictions from the classes
if len(bbx_pred_i) == 0:
raise Empty
bbx_pred_i = torch.cat(bbx_pred_i, dim=0)
cls_pred_i = torch.cat(cls_pred_i, dim=0)
obj_pred_i = torch.cat(obj_pred_i, dim=0)
# Do post-NMS selection (if needed)
if bbx_pred_i.size(0) > self.max_predictions:
_, idx = obj_pred_i.topk(self.max_predictions)
bbx_pred_i = bbx_pred_i[idx]
cls_pred_i = cls_pred_i[idx]
obj_pred_i = obj_pred_i[idx]
# Save results
bbx_pred.append(bbx_pred_i)
cls_pred.append(cls_pred_i)
obj_pred.append(obj_pred_i)
except Empty:
bbx_pred.append(None)
cls_pred.append(None)
obj_pred.append(None)
return PackedSequence(bbx_pred), PackedSequence(cls_pred), PackedSequence(obj_pred)
class ProposalMatcher:
"""Match proposals to ground truth boxes
Parameters
----------
classes : dict
Dictionary with the number of classes in the dataset -- expected keys: "total", "stuff", "thing"
num_samples : int
Maximum number of non-void proposals to keep for each image
pos_ratio : float
Fraction of `num_samples` reserved for positive proposals
pos_threshold : float
Minimum IoU threshold to mark a proposal as positive
neg_threshold_hi : float
Maximum IoU threshold to mark a proposal as negative / background
neg_threshold_lo : float
Minimum IoU threshold to mark a proposal as negative / background
void_threshold : float
If not zero, minimum overlap threshold with void regions to mark a proposal as void
"""
def __init__(self,
classes,
num_samples=128,
pos_ratio=0.25,
pos_threshold=0.5,
neg_threshold_hi=0.5,
neg_threshold_lo=0.0,
void_threshold=0.):
self.num_stuff = classes["stuff"]
self.num_samples = num_samples
self.pos_ratio = pos_ratio
self.pos_threshold = pos_threshold
self.neg_threshold_hi = neg_threshold_hi
self.neg_threshold_lo = neg_threshold_lo
self.void_threshold = void_threshold
def _subsample(self, pos_idx, neg_idx):
num_pos = int(self.num_samples * self.pos_ratio)
pos_idx = torch.nonzero(pos_idx).view(-1)
if pos_idx.numel() > 0:
rand_selection = np.random.permutation(pos_idx.numel()).astype(np.int64)
rand_selection = torch.from_numpy(rand_selection).to(pos_idx.device)
num_pos = min(num_pos, pos_idx.numel())
pos_idx = pos_idx[rand_selection[:num_pos]]
else:
num_pos = 0
pos_idx = torch.tensor((), dtype=torch.long, device=pos_idx.device)
num_neg = self.num_samples - num_pos
neg_idx = torch.nonzero(neg_idx).view(-1)
if neg_idx.numel() > 0:
rand_selection = np.random.permutation(neg_idx.numel()).astype(np.int64)
rand_selection = torch.from_numpy(rand_selection).to(neg_idx.device)
num_neg = min(num_neg, neg_idx.numel())
neg_idx = neg_idx[rand_selection[:num_neg]]
else:
neg_idx = torch.tensor((), dtype=torch.long, device=neg_idx.device)
return pos_idx, neg_idx
def __call__(self,
proposals,
bbx,
cat,
iscrowd):
"""Match proposals to ground truth boxes
Parameters
----------
proposals : PackedSequence
A sequence of N tensors with shapes P_i x 4 containing bounding box proposals, entries can be None
bbx : sequence of torch.Tensor
A sequence of N tensors with shapes K_i x 4 containing ground truth bounding boxes, entries can be None
cat : sequence of torch.Tensor
A sequence of N tensors with shapes K_i containing ground truth instance -> category mappings, entries can
be None
iscrowd : sequence of torch.Tensor
Sequence of N tensors of ground truth crowd regions (shapes H_i x W_i), or ground truth crowd bounding boxes
(shapes K_i x 4), entries can be None
Returns
-------
out_proposals : PackedSequence
A sequence of N tensors with shapes S_i x 4 containing the non-void bounding box proposals, entries are None
for images that do not contain any non-void proposal
match : PackedSequence
A sequence of matching results with shape S_i, with the following semantic:
- match[i, j] == -1: the j-th anchor in image i is negative
- match[i, j] == k, k >= 0: the j-th anchor in image i is matched to bbx[i][k] (bbox = gt)
"""
out_proposals = []
match = []
for proposals_i, bbx_i_ in zip(proposals, bbx):
bbx_i = bbx_i_[:, [0, 1, 3, 4]]
try:
# Append proposals to ground truth bounding boxes before proceeding
if bbx_i is not None and proposals_i is not None:
proposals_i = torch.cat([bbx_i, proposals_i], dim=0)
elif bbx_i is not None:
proposals_i = bbx_i
else:
raise Empty
if proposals_i.size(0) == 0:
raise Empty
# Find positives and negatives based on IoU
if bbx_i is not None:
iou = ious(proposals_i, bbx_i)
best_iou, best_gt = iou.max(dim=1)
pos_idx = best_iou >= self.pos_threshold
neg_idx = (best_iou >= self.neg_threshold_lo) & (best_iou < self.neg_threshold_hi)
else:
# No ground truth boxes: all proposals that are non-void are negative
pos_idx = proposals_i.new_zeros(proposals_i.size(0), dtype=torch.uint8)
neg_idx = proposals_i.new_ones(proposals_i.size(0), dtype=torch.uint8)
# Check that there are still some non-voids and do sub-sampling
if not pos_idx.any().item() and not neg_idx.any().item():
raise Empty
pos_idx, neg_idx = self._subsample(pos_idx, neg_idx)
# Gather selected proposals
out_proposals_i = proposals_i[torch.cat([pos_idx, neg_idx])]
# Save matching
match_i = out_proposals_i.new_full((out_proposals_i.size(0),), -1, dtype=torch.long)
match_i[:pos_idx.numel()] = best_gt[pos_idx]
# Save to output
out_proposals.append(out_proposals_i)
match.append(match_i)
except Empty:
out_proposals.append(None)
match.append(None)
return PackedSequence(out_proposals), PackedSequence(match)
class DetectionLoss:
"""Detection loss"""
def __init__(self, sigma):
self.sigma = sigma
def bbx_loss(self, bbx_logits, bbx_lbl, num_non_void):
bbx_logits = bbx_logits.view(-1, 4)
bbx_lbl = bbx_lbl.view(-1, 4)
bbx_loss = smooth_l1(bbx_logits, bbx_lbl, self.sigma).sum(dim=-1).sum()
bbx_loss /= num_non_void
return bbx_loss
def __call__(self, cls_logits, bbx_logits, cls_lbl, bbx_lbl):
"""Detection loss
"""
# Get contiguous view of the labels
cls_lbl, _ = cls_lbl.contiguous
bbx_lbl, _ = bbx_lbl.contiguous
# Classification loss
cls_loss = functional.cross_entropy(cls_logits, cls_lbl)
# Regression loss
positives = cls_lbl > 0
num_non_void = cls_lbl.numel()
if positives.any().item():
cls_lbl = cls_lbl[positives]
bbx_logits = bbx_logits[positives]
idx = torch.arange(0, bbx_logits.size(0), dtype=torch.long, device=bbx_logits.device)
bbx_loss = self.bbx_loss(bbx_logits[idx, cls_lbl - 1], bbx_lbl, num_non_void)
else:
bbx_loss = bbx_logits.sum() * 0
return cls_loss, bbx_loss
class DetectionAlgo:
"""Base class for detection algorithms
"""
def __init__(self, classes, bbx_reg_weights):
self.num_stuff = classes["stuff"]
self.bbx_reg_weights = bbx_reg_weights
@staticmethod
def _split_and_clip(boxes, scores, index, valid_size):
boxes_out, scores_out = [], []
for img_id, valid_size_i in enumerate(valid_size):
idx = index == img_id
if idx.any().item():
boxes_i = boxes[idx]
boxes_i[:, :, [0, 2]] = torch.clamp(boxes_i[:, :, [0, 2]], min=0, max=valid_size_i[0])
boxes_i[:, :, [1, 3]] = torch.clamp(boxes_i[:, :, [1, 3]], min=0, max=valid_size_i[1])
boxes_out.append(boxes_i)
scores_out.append(scores[idx])
else:
boxes_out.append(None)
scores_out.append(None)
return boxes_out, scores_out
def _match_to_lbl(self, proposals, bbx, cat, match):
cls_lbl = []
bbx_lbl = []
for i, (proposals_i, bbx_i_, match_i) in enumerate(zip(proposals, bbx, match)):
bbx_i = bbx_i_[:, [0, 1, 3, 4]]
cat_i = bbx_i_[:, 5].long()
if match_i is not None:
pos = match_i >= 0
# Objectness labels
cls_lbl_i = proposals_i.new_zeros(proposals_i.size(0), dtype=torch.long)
cls_lbl_i[pos] = cat_i[match_i[pos]] + 1 - self.num_stuff
# Bounding box regression labels
if pos.any().item():
bbx_lbl_i = calculate_shift(proposals_i[pos], bbx_i[match_i[pos]])
bbx_lbl_i *= bbx_lbl_i.new(self.bbx_reg_weights)
else:
bbx_lbl_i = None
cls_lbl.append(cls_lbl_i)
bbx_lbl.append(bbx_lbl_i)
else:
cls_lbl.append(None)
bbx_lbl.append(None)
return PackedSequence(cls_lbl), PackedSequence(bbx_lbl)
def training(self, head, x, proposals, bbx, cat, iscrowd, img_size):
"""Given input features, proposals and ground truth compute detection losses
"""
raise NotImplementedError()
def inference(self, head, x, proposals, valid_size, img_size):
"""Given input features compute detection predictions
"""
raise NotImplementedError()
|
from django.urls import path
from . import views
app_name = "accounts"
urlpatterns = [
path("logg-inn/", views.login_user, name="login"),
path("registrer/", views.register, name="register"),
path("logg-ut/", views.logout_user, name="logout"),
]
""" accounts/ login/ [name='login']
accounts/ logout/ [name='logout']
accounts/ password_change/ [name='password_change']
accounts/ password_change/done/ [name='password_change_done']
accounts/ password_reset/ [name='password_reset']
accounts/ password_reset/done/ [name='password_reset_done']
accounts/ reset/<uidb64>/<token>/ [name='password_reset_confirm']
accounts/ reset/done/ [name='password_reset_complete']"""
|
from peewee import *
from playhouse.fields import CompressedField
from playhouse.fields import PickleField
from .base import db
from .base import ModelTestCase
from .base import TestModel
class Comp(TestModel):
key = TextField()
data = CompressedField()
class Pickled(TestModel):
key = TextField()
data = PickleField()
class TestCompressedField(ModelTestCase):
requires = [Comp]
def test_compressed_field(self):
a = b'a' * 1024
b = b'b' * 1024
Comp.create(data=a, key='a')
Comp.create(data=b, key='b')
a_db = Comp.get(Comp.key == 'a')
self.assertEqual(a_db.data, a)
b_db = Comp.get(Comp.key == 'b')
self.assertEqual(b_db.data, b)
# Get at the underlying data.
CompTbl = Table('comp', ('id', 'data', 'key')).bind(self.database)
obj = CompTbl.select().where(CompTbl.key == 'a').get()
self.assertEqual(obj['key'], 'a')
# Ensure that the data actually was compressed.
self.assertTrue(len(obj['data']) < 1024)
class TestPickleField(ModelTestCase):
requires = [Pickled]
def test_pickle_field(self):
a = {'k1': 'v1', 'k2': [0, 1, 2], 'k3': None}
b = 'just a string'
Pickled.create(data=a, key='a')
Pickled.create(data=b, key='b')
a_db = Pickled.get(Pickled.key == 'a')
self.assertEqual(a_db.data, a)
b_db = Pickled.get(Pickled.key == 'b')
self.assertEqual(b_db.data, b)
|
from .models import Notification_Id, Analytics_event, Notification
from rest_framework import generics, permissions
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from rest_framework import status
from django.http import JsonResponse
from django.db import IntegrityError
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from django.contrib.auth import authenticate
from .serializers import Notification_IdSerializer, analytics_eventSerializer, NotificationSerializer
from datetime import date, timedelta, datetime
# Expo Notifications stuff
from exponent_server_sdk import (
DeviceNotRegisteredError,
PushClient,
PushMessage,
PushServerError,
PushTicketError,
)
from requests.exceptions import ConnectionError, HTTPError
def send_push_message(token, title , message=None, extra=None):
try:
response = PushClient().publish(
PushMessage(to=token,
title=title,
body=message,
data=extra))
except PushServerError as exc:
# Encountered some likely formatting/validation error.
rollbar.report_exc_info(
extra_data={
'token': token,
'message': message,
'extra': extra,
'errors': exc.errors,
'response_data': exc.response_data,
})
raise
except (ConnectionError, HTTPError) as exc:
# Encountered some Connection or HTTP error - retry a few times in
# case it is transient.
rollbar.report_exc_info(
extra_data={'token': token, 'message': message, 'extra': extra})
raise self.retry(exc=exc)
try:
# We got a response back, but we don't know whether it's an error yet.
# This call raises errors so we can handle them with normal exception
# flows.
response.validate_response()
except DeviceNotRegisteredError:
# Mark the push token as inactive
Notification_Id.objects.filter(key=token).delete()
except PushTicketError as exc:
# Encountered some other per-notification error.
rollbar.report_exc_info(
extra_data={
'token': token,
'message': message,
'extra': extra,
'push_response': exc.push_response._asdict(),
})
raise self.retry(exc=exc)
# End of Expo Push notifications stuff
class Notifications(generics.CreateAPIView):
serializer_class = Notification_IdSerializer
permission_classes = [permissions.IsAuthenticated]
def perform_create(self, serializer):
obj = serializer.save()
class Call_Notifications(generics.CreateAPIView):
serializer_class = NotificationSerializer
permission_classes = [permissions.IsAuthenticated]
def perform_create(self, serializer):
if 'body' in self.request.data:
for e in Notification_Id.objects.all():
send_push_message(e.key, self.request.data["title"], self.request.data["body"])
else:
for e in Notification_Id.objects.all():
send_push_message(e.key, self.request.data["title"])
serializer.save()
class Analytics(generics.ListCreateAPIView):
serializer_class = analytics_eventSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
Analytics_event.objects.filter(time__lte=datetime.now()-timedelta(days=10)).delete()
ok = self.kwargs.get('date')
type = self.kwargs.get('type')
if(not type):
type = "login"
today = date.today()
if(ok == 1):
return Analytics_event.objects.filter(time__day = today.day, name=type)
elif(ok == 2):
return Analytics_event.objects.filter(time__month = today.month, name=type)
else :
return Analytics_event.objects.all()
def perform_create(self, serializer):
obj = serializer.save()
@csrf_exempt
def login(request):
if request.method == 'POST':
data = JSONParser().parse(request)
user = authenticate(request, username=data['username'], password=data['password'], email = data['email'])
if user is None:
return JsonResponse({'error':'Could not login. Please check username and password'}, status=400)
else:
try:
token = Token.objects.get(user=user)
except:
token = Token.objects.create(user=user)
return JsonResponse({'token':str(token)}, status=200)
else:
return JsonResponse({'Error': 'No Get'}, status=400)
|
n = int(input())
for i in range(1,n+1):
start=(i*5)-4
lst=[e for e in range(start,start+5)]
if i%2==1:
print(*lst)
if i%2==0:
lst.reverse()
print(*lst)
|
from flask import Flask, render_template, session, url_for, escape, request, redirect
from app import app
import json, random
from os import listdir, system, chdir
from os.path import isfile, join, abspath, dirname
jspath = abspath(join(dirname( __file__ ), '..', '..','javascript'))
@app.route('/group1', methods=['POST'])
def group1():
user_data=request.form
chdir(jspath)
user_data = str(json.dumps(user_data))
print(user_data)
print(session['wallet'])
system("node setRequest.js " + session['wallet'].strip() + " " + user_data.replace(" ",""))
return json.dumps({'status':'OK'})
@app.route('/group2', methods=['POST'])
def group2():
user_data=request.form
chdir(jspath)
user_data = str( json.dumps(user_data))
system("node setWork.js " + session['wallet'].strip() + " " + user_data.replace(" ",""))
return json.dumps({'status':'OK'})
@app.route('/fbsignup', methods=['POST'])
def fbsignup():
email=request.form['email_id']
passwd=request.form['passwd']
user_name=request.form['user-name']
user_group=request.form['user-group']
s=SignUp()
s.signup_user(email,passwd,user_name,user_group)
return redirect(url_for('signin'))
@app.route('/login', methods=['POST'])
def login():
error_msg='Wrong Email/Password'
user_login=request.form
wallet=user_login['wallet']
try:
session['wallet'] = wallet
print(session['wallet'])
except:
return render_template('login.html',error_msg=error_msg)
if session['wallet'] == 'admin':
return redirect(url_for('admin'))
return redirect(url_for('forms'))
@app.route('/vote', methods=['POST'])
def vote():
poll_data=request.form
g1=Poll_Vote()
result=g1.submit_vote(poll_data['choice-radio'], poll_data['poll-id'],session['group'],session['username'])
return json.dumps({'status':result,'poll-data':poll_data})
@app.route('/logout')
def logout():
session.pop('wallet', None)
return redirect(url_for('index'))
|
""" Testing module for the DDoS implementation
of the blockchain client.
"""
import hashlib
import time
from queue import Queue
import nacl.encoding
import nacl.signing
import chains
import utils
VERSION = 0.7
class TestDDos(object):
""" Testcase used to bundle all tests for the
DDoS blockchain
"""
def setup(self):
""" Setup of the blockchain for the tests.
"""
self.counter = 0
self.sends = Queue()
self.gui_queue = Queue()
self.chain = chains.DDosChain(
VERSION, self.sends, self.gui_queue)
self.sender_sign = nacl.signing.SigningKey(
'1973224f51c2e798f6ab3bcf5f8a2a28\
5b1832ea16439bae9c26d9da8256a7ef'.encode('utf-8'),
nacl.encoding.HexEncoder)
self.sender_verify = self.sender_sign.verify_key.encode(
nacl.encoding.HexEncoder)
self.receiver_sign = nacl.signing.SigningKey(seed=b'bb' * 16)
self.receiver_verify = self.receiver_sign.verify_key.encode(
nacl.encoding.HexEncoder)
def test_ip_blocking(self, capsys):
""" Test that blocking and unblocking of IPs works.
"""
# Initial IPs
ips = ['1.1.1.1',
'2.2.2.2',
'3.3.3.3',
'4.4.4.4',
'5.5.5.5'
]
# Block IPs
for ip in ips:
self.process_transaction(
capsys, ip, 'b', self.sender_sign, self.sender_verify)
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert captured.out == f'{ips}\n'
# Unblock the last 3 IPs
for _ in range(3):
ip = ips.pop()
self.process_transaction(
capsys, ip, 'ub', self.sender_sign, self.sender_verify)
# Add 2 new IPs
for ip in ['6.6.6.6', '7.7.7.7']:
self.process_transaction(
capsys, ip, 'b', self.sender_sign, self.sender_verify)
ips.append(ip)
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert captured.out == f'{ips}\n'
def test_invites(self, capsys):
""" Test that invites and uninvites of users works.
"""
# Invite
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify)
def inner(ip):
""" Block IP
Args:
ip: IP that should be blocked
"""
self.fill_block(capsys, 4)
self.process_transaction(
capsys, ip, 'b',
self.receiver_sign, self.receiver_verify)
self.fill_block(capsys, 4)
# Verify that new user can block IPs
inner('1.1.1.1')
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '1.1.1.1' in captured.out
# Uninvite user
self.process_transaction(
capsys, self.receiver_verify, 'ui',
self.sender_sign, self.sender_verify)
# Verify that uninvited user can no longer block IPs
inner('2.2.2.2')
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '2.2.2.2' not in captured.out
def test_children(self, capsys):
""" Test the user hierarchy
"""
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify)
self.fill_block(capsys, 4)
self.chain.process_message(
('show_children', self.sender_verify, 'local'))
captured = capsys.readouterr()
assert self.sender_verify.decode('utf-8') in captured.out
assert self.receiver_verify.decode('utf-8') in captured.out
def test_purge(self, capsys):
""" Test the purging of a user-account
"""
# Invite
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify)
self.fill_block(capsys, 4)
# Verify that new user can block IPs
self.process_transaction(
capsys, '3.3.3.3', 'b',
self.receiver_sign, self.receiver_verify)
self.fill_block(capsys, 4)
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '3.3.3.3' in captured.out
# Purge new user
self.process_transaction(
capsys, self.receiver_verify, 'p',
self.sender_sign, self.sender_verify)
self.fill_block(capsys, 4)
# Verify that IPs are now unblocked
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '3.3.3.3' not in captured.out
def test_ancestors(self, capsys):
""" Test blocking/unblocking of IPs through ancestors
"""
# Invite
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify)
self.fill_block(capsys, 4)
# New user blocks IP
self.process_transaction(
capsys, '1.2.3.4', 'b',
self.receiver_sign, self.receiver_verify)
self.fill_block(capsys, 4)
# Ancestor(initial user) takes over block
self.process_transaction(
capsys, '1.2.3.4', 'b',
self.sender_sign, self.sender_verify)
self.fill_block(capsys, 4)
# Verify IP is blocked
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '1.2.3.4' in captured.out
# Try unblocking with new user
self.process_transaction(
capsys, '1.2.3.4', 'ub',
self.receiver_sign, self.receiver_verify)
self.fill_block(capsys, 4)
# Verify that new user cannot unblock the IP of ancestor
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '1.2.3.4' in captured.out
# Unblock IP from ancestor (initial user)
self.process_transaction(
capsys, '1.2.3.4', 'ub',
self.sender_sign, self.sender_verify)
self.fill_block(capsys, 4)
# Verify that IP is now unblocked
self.chain.process_message(('get_ips', '', 'local'))
captured = capsys.readouterr()
assert '1.2.3.4' not in captured.out
def test_invalid_transaction(self, capsys):
""" Test that the chain rejects invalid transaction
"""
utils.set_debug()
# Verify rejection of duplicate transaction
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify)
time.sleep(0.00000001) # new timestamp
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify, False)
captured = capsys.readouterr()
assert 'This operation is already in the pool' in captured.out
self.fill_block(capsys, 4)
# Verify rejection of invite for members of the blockchain
self.process_transaction(
capsys, self.receiver_verify, 'i',
self.sender_sign, self.sender_verify, False)
captured = capsys.readouterr()
assert 'Client is already invited!' in captured.out
# Verify rejection for uninvite of someone that is not a member
self.process_transaction(
capsys, 'Not a client', 'ui',
self.sender_sign, self.sender_verify, False)
captured = capsys.readouterr()
assert 'Client could not be found!' in captured.out
# Verify rejection of not-permissioned uninvites
self.process_transaction(
capsys, self.sender_verify, 'ui',
self.receiver_sign, self.receiver_verify, False)
captured = capsys.readouterr()
assert 'No permission to delete this node!' in captured.out
# Verify rejection of unblocking of not-blocked IPs
self.process_transaction(
capsys, '66.77.88.99', 'ub',
self.receiver_sign, self.receiver_verify, False)
captured = capsys.readouterr()
assert 'Trying to unblock IP that was not blocked' in captured.out
# Verify rejection of blocking of already blocked IPs
self.process_transaction(
capsys, '255.255.255.0', 'b',
self.sender_sign, self.sender_verify, False)
captured = capsys.readouterr()
assert 'IP was already blocked' in captured.out
# ####################### HELPER FUNCTIONS ###########################
def fill_block(self, capsys, amount):
""" Fill block with additional transactions
This is needed because the DDoS-chain creates
a new block every 5 transactions.
The transactions are blocking-transactions of IPs with this format:
255.255.255.X
with X increasing over time and is never the same.
Uses self.counter for this.
This works only for up to 255 transactions,
if more are needed change the function.
Args:
amount: Number of transactions missing to 5
"""
for i in range(amount):
self.process_transaction(
capsys,
f'255.255.255.{i+self.counter}',
'b',
self.sender_sign,
self.sender_verify)
self.counter += amount
def process_transaction(self,
capsys,
data,
action,
s_sign,
s_ver,
disable_out=True):
""" Create + Process a transaction.
Args:
capsys: capsys of caller.
data: DDos data (e.g. IP, key)
action: DDos action (e.g. 'b', 'ui')
s_sign: Signing key of sender
s_ver: Verify key of sender
disabled_out: Disable output (default: True)
"""
timestamp = time.time()
transaction = self.create_transaction(s_ver,
timestamp,
chains.DDosData(
action, data),
s_sign)
if disable_out:
with capsys.disabled():
self.chain.process_message(('new_transaction',
transaction,
'local'
))
else:
self.chain.process_message(('new_transaction',
transaction,
'local'
))
def create_transaction(self,
sender: str,
timestamp: int,
data: chains.DDosData,
signing_key: nacl.signing.SigningKey) \
-> chains.DDosTransaction:
""" Create a transaction.
Args:
sender: Verify key of sender.
timestamp: Timestamp of transaction.
data: DDoS data (action, data)
signing_key: Signing key of sender
Returns:
Created transaction
"""
hash_str = (str(sender) +
str(data) +
str(timestamp))
transaction_hash = chains.DDosChain.hash(hash_str)
transaction = chains.DDosTransaction(
sender,
timestamp,
data,
signing_key.sign(transaction_hash.encode())
)
return transaction
|
from shortest_path import gen_data, solve_model, solve_all_pairs, solve_tree_model, critical_tasks
def main():
import sys
import random
import tableutils
n=13
header = ['P'+str(i) for i in range(n)]
if len(sys.argv)<=1:
print('Usage is main [data|run|all|tree|pm] [seed]')
return
elif len(sys.argv)>2:
random.seed(int(sys.argv[2]))
C=gen_data(n)
if sys.argv[1]=='data':
for i in range(n):
C[i].insert(0,'P'+str(i))
C.insert(0,['']+header)
tableutils.printmat(C)
elif sys.argv[1]=='run':
rc,Value,Path,Cost,Cumul=solve_model(C)
Path.insert(0,'Points')
Cost.insert(0,'Distance')
Cumul.insert(0,'Cumulative')
T=[Path,Cost,Cumul]
tableutils.printmat(T,True)
elif sys.argv[1]=='all':
Paths, Costs = solve_all_pairs(C)
tableutils.printmat(tableutils.wrapmat(Costs,header,header))
elif sys.argv[1]=='tree':
rc, Val,Tree = solve_tree_model(C)
if rc != 0:
print('Infeasible')
else:
tableutils.printmat(tableutils.wrapmat(Tree,[],['From','To','Distance']),True,0)
elif sys.argv[1]=='pm':
D=[[0,3],[1,6],[2,3],[3,2],[4,2],[5,7],[6,7],[7,5],[8,2],[9,7],[10,4],[11,5]]
t=[0,3,0,3,9,0,9,16,21,21,21,3]
rc,Path = critical_tasks(D,t)
if rc != 0:
print('Infeasible')
else:
print(Path)
main()
|
import transaction
import zeit.cms.checkout.interfaces
import zeit.content.article.testing
import zope.component
class RecensionTest(zeit.content.article.testing.FunctionalTestCase):
def setUp(self):
super(RecensionTest, self).setUp()
repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
repository['article'] = zeit.content.article.article.Article()
manager = zeit.cms.checkout.interfaces.ICheckoutManager(
repository['article'])
self.article = manager.checkout()
transaction.commit()
def test_sanity_check_that_we_can_set_p_changed(self):
self.assertFalse(self.article._p_changed)
self.article._p_changed = True
self.assertTrue(self.article._p_changed)
def test_accessing_recension_container_should_not_write(self):
self.assertFalse(self.article._p_changed)
zeit.content.article.interfaces.IBookRecensionContainer(self.article)
self.assertFalse(self.article._p_changed)
def test_accessing_recension_should_not_write(self):
recensions = zeit.content.article.interfaces.IBookRecensionContainer(
self.article)
recensions.append(zeit.content.article.recension.BookRecension())
self.article._p_changed = False
self.assertFalse(self.article._p_changed)
list(recensions)
self.assertFalse(self.article._p_changed)
|
'''Example of autoencoder model on MNIST dataset using 2dim latent
The autoencoder forces the encoder to discover 2-dim latent vector
that the decoder can recover the original input. The 2-dim latent
vector is projected on 2D space to analyze the distribution of codes
in the latent space. The latent space can be navigated by varying the
values of latent vector to produce new MNIST digits.
This autoencoder has modular design. The encoder, decoder and autoencoder
are 3 models that share weights. For example, after training the
autoencoder, the encoder can be used to generate latent vectors
of input data for low-dim visualization like PCA or TSNE.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.layers import Dense, Input
from keras.layers import Conv2D, Flatten
from keras.layers import Reshape, Conv2DTranspose
from keras.models import Model
from keras.datasets import mnist
from keras.utils import plot_model
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import os
def plot_results(models,
data,
batch_size=32,
model_name="autoencoder_2dim"):
"""Plots 2-dim latent values as color gradient
then, plot MNIST digits as function of 2-dim latent vector
Arguments:
models (list): encoder and decoder models
data (list): test data and label
batch_size (int): prediction batch size
model_name (string): which model is using this function
"""
encoder, decoder = models
x_test, y_test = data
os.makedirs(model_name, exist_ok=True)
filename = os.path.join(model_name, "latent_2dim.png")
# display a 2D plot of the digit classes in the latent space
z = encoder.predict(x_test,
batch_size=batch_size)
plt.figure(figsize=(12, 10))
plt.scatter(z[:, 0], z[:, 1], c=y_test)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.savefig(filename)
plt.show()
filename = os.path.join(model_name, "digits_over_latent.png")
# display a 30x30 2D manifold of the digits
n = 30
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-4, 4, n)
grid_y = np.linspace(-4, 4, n)[::-1]
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z = np.array([[xi, yi]])
x_decoded = decoder.predict(z)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
plt.xticks(pixel_range, sample_range_x)
plt.yticks(pixel_range, sample_range_y)
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.imshow(figure, cmap='Greys_r')
plt.savefig(filename)
plt.show()
# load MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape to (28, 28, 1) and normalize input images
image_size = x_train.shape[1]
x_train = np.reshape(x_train, [-1, image_size, image_size, 1])
x_test = np.reshape(x_test, [-1, image_size, image_size, 1])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# network parameters
input_shape = (image_size, image_size, 1)
batch_size = 32
kernel_size = 3
latent_dim = 2
# encoder/decoder number of CNN layers and filters per layer
layer_filters = [32, 64]
# build the autoencoder model
# first build the encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
# stack of Conv2D(32)-Conv2D(64)
for filters in layer_filters:
x = Conv2D(filters=filters,
kernel_size=kernel_size,
activation='relu',
strides=2,
padding='same')(x)
# shape info needed to build decoder model so we don't do hand computation
# the input to the decoder's first Conv2DTranspose will have this shape
# shape is (7, 7, 64) which is processed by the decoder back to (28, 28, 1)
shape = K.int_shape(x)
# generate latent vector
x = Flatten()(x)
latent = Dense(latent_dim, name='latent_vector')(x)
# instantiate encoder model
encoder = Model(inputs, latent, name='encoder')
encoder.summary()
plot_model(encoder, to_file='encoder.png', show_shapes=True)
# build the decoder model
latent_inputs = Input(shape=(latent_dim,), name='decoder_input')
# use the shape (7, 7, 64) that was earlier saved
x = Dense(shape[1] * shape[2] * shape[3])(latent_inputs)
# from vector to suitable shape for transposed conv
x = Reshape((shape[1], shape[2], shape[3]))(x)
# stack of Conv2DTranspose(64)-Conv2DTranspose(32)
for filters in layer_filters[::-1]:
x = Conv2DTranspose(filters=filters,
kernel_size=kernel_size,
activation='relu',
strides=2,
padding='same')(x)
# reconstruct the input
outputs = Conv2DTranspose(filters=1,
kernel_size=kernel_size,
activation='sigmoid',
padding='same',
name='decoder_output')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='decoder.png', show_shapes=True)
# autoencoder = encoder + decoder
# instantiate autoencoder model
autoencoder = Model(inputs, decoder(encoder(inputs)), name='autoencoder')
autoencoder.summary()
plot_model(autoencoder, to_file='autoencoder.png', show_shapes=True)
# Mean Square Error (MSE) loss function, Adam optimizer
autoencoder.compile(loss='mse', optimizer='adam')
# train the autoencoder
autoencoder.fit(x_train,
x_train,
validation_data=(x_test, x_test),
epochs=20,
batch_size=batch_size)
# predict the autoencoder output from test data
x_decoded = autoencoder.predict(x_test)
# display the 1st 8 test input and decoded images
imgs = np.concatenate([x_test[:8], x_decoded[:8]])
imgs = imgs.reshape((4, 4, image_size, image_size))
imgs = np.vstack([np.hstack(i) for i in imgs])
plt.figure()
plt.axis('off')
plt.title('Input: 1st 2 rows, Decoded: last 2 rows')
plt.imshow(imgs, interpolation='none', cmap='gray')
plt.savefig('input_and_decoded.png')
plt.show()
# project the 2-dim latent on 2D space
models = (encoder, decoder)
data = (x_test, y_test)
plot_results(models, data,
batch_size=batch_size,
model_name="autonencoder-2dim")
|
import unittest
import asyncio
import canopy
class RequestWithRetryTest(unittest.TestCase):
def test_it_should_return_result_if_no_error(self):
request = DummyRequest(0)
result = canopy.run(canopy.request_with_retry(lambda: request.execute('abc'), 'request', True))
self.assertEqual(result, 'abc')
def test_it_should_return_result_if_one_error(self):
request = DummyRequest(1)
result = canopy.run(canopy.request_with_retry(lambda: request.execute('abc'), 'request', True))
self.assertEqual(result, 'abc')
def test_it_should_raise_error_if_two_errors(self):
request = DummyRequest(2)
try:
canopy.run(canopy.request_with_retry(lambda: request.execute('abc'), 'request', True))
self.fail()
except asyncio.TimeoutError:
pass
def test_it_should_return_none_if_two_errors_and_suppressed(self):
request = DummyRequest(2)
result = canopy.run(canopy.request_with_retry(lambda: request.execute('abc'), 'request', False))
self.assertIsNone(result)
class DummyRequest:
def __init__(self, error_count: int):
self.error_count = error_count
async def execute(self, result: str) -> str:
self.error_count -= 1
await asyncio.sleep(0.1)
if self.error_count >= 0:
raise asyncio.TimeoutError('Timed out')
else:
return result
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
@LOSSES.register_module()
class AdaptiveWingLoss(nn.Module):
"""Adaptive wing loss. paper ref: 'Adaptive Wing Loss for Robust Face
Alignment via Heatmap Regression' Wang et al. ICCV'2019.
Args:
alpha (float), omega (float), epsilon (float), theta (float)
are hyper-parameters.
use_target_weight (bool): Option to use weighted MSE loss.
Different joint types may have different target weights.
loss_weight (float): Weight of the loss. Default: 1.0.
"""
def __init__(self,
alpha=2.1,
omega=14,
epsilon=1,
theta=0.5,
use_target_weight=False,
loss_weight=1.):
super().__init__()
self.alpha = float(alpha)
self.omega = float(omega)
self.epsilon = float(epsilon)
self.theta = float(theta)
self.use_target_weight = use_target_weight
self.loss_weight = loss_weight
def criterion(self, pred, target):
"""Criterion of wingloss.
Note:
batch_size: N
num_keypoints: K
Args:
pred (torch.Tensor[NxKxHxW]): Predicted heatmaps.
target (torch.Tensor[NxKxHxW]): Target heatmaps.
"""
H, W = pred.shape[2:4]
delta = (target - pred).abs()
A = self.omega * (
1 / (1 + torch.pow(self.theta / self.epsilon, self.alpha - target))
) * (self.alpha - target) * (torch.pow(
self.theta / self.epsilon,
self.alpha - target - 1)) * (1 / self.epsilon)
C = self.theta * A - self.omega * torch.log(
1 + torch.pow(self.theta / self.epsilon, self.alpha - target))
losses = torch.where(
delta < self.theta,
self.omega *
torch.log(1 +
torch.pow(delta / self.epsilon, self.alpha - target)),
A * delta - C)
return torch.mean(losses)
def forward(self, output, target, target_weight):
"""Forward function.
Note:
batch_size: N
num_keypoints: K
Args:
output (torch.Tensor[NxKxHxW]): Output heatmaps.
target (torch.Tensor[NxKxHxW]): Target heatmaps.
target_weight (torch.Tensor[NxKx1]):
Weights across different joint types.
"""
if self.use_target_weight:
loss = self.criterion(output * target_weight.unsqueeze(-1),
target * target_weight.unsqueeze(-1))
else:
loss = self.criterion(output, target)
return loss * self.loss_weight
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
"""
tf2onnx.rewriter.rnn_unit_base - lstm support
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from onnx import onnx_pb
from tf2onnx import utils
from tf2onnx.rewriter.rnn_utils import get_pattern, RnnProperties, \
check_is_timemajor_transpose, REWRITER_RESULT
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher # pylint: disable=unused-import
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("tf2onnx.rewriter.rnn_unit_writer_base")
# pylint: disable=invalid-name,unused-argument,missing-docstring
# dynamic_rnn or bidirectional_dynamic_rnn related logic will be mapped to this base class.
class UnitRewriterBase(object):
def __init__(self, g):
self.g = g
self.all_nodes = self.g.get_nodes()
# checker signature : func_name(enter_target_node_input_id, identity_consumers, match)
# exit connector signature: func_name(rnn_node, exit_node, rnn_props)
self.switch_checkers = {}
def run(self, unit_type):
"""
main procedures:
1 use cell op pattern to find cell >> the found cell is the start pointer of the procedures below
2 find needed info from tensorflow graph:
1 rnn scope name
2 input_x
3 weight
4 sequence node
5 initializer
6 state output & hidden output
3 process found info according to ONNX requirement
remember: op pattern and scope name are useful
they are used to get needed info from tensorflow graph
raw found info need to be formatted according to ONNX requirement
"""
# allow_reorder must be true. because LSTMCell and BasicLSTMCell's call function
# are defining the calculation with different orders. Then we can share the same
# pattern.
cell_pattern = get_pattern(unit_type)
matcher = GraphMatcher(cell_pattern, allow_reorder=True)
match_results = list(matcher.match_ops(self.g.get_nodes()))
if match_results:
for match in match_results:
self.run_single_match(match)
self.g.delete_unused_nodes(self.g.output_names)
self.g.update_proto()
self.print_step("finish handling")
return self.g.get_nodes()
def run_single_match(self, match):
"""
methods to get needed info from tf graph:
1 input_x: specific node in found cell, then trace TensorArrayReadV >..>input of "TensorArrayScatterV",
if "Transpose" found under rnn scope, then input of "Transpose" is "input_x"
2 weight: specific node in cell computation graph and specific op pattern as input_x
3 sequence node: "Identity" op with name "sequence_length", the name is hard code in tensorflow code
4 state initializer: "LoopCond" and then specific op pattern >> LoopCond > Switch > Switch usage checker
5 hidden output and state output: find switch and use switch checker to distinguish different switch nodes
6 scope name of rnn and gru/lstm cell: specific node in cell computation graph,
and use found convention in tensorflow code to split name of node to get needed scooe name
most found info is stored in "rnn_props"
"""
log.debug("=========================")
self.print_step("start handling a new potential rnn cell")
self.all_nodes = self.g.get_nodes()
# FIXME:
# pylint: disable=assignment-from-none,assignment-from-no-return
# when bi-directional, node in while will be rnnxx/fw/fw/while/... >> scope name is rnnxx/fw/fw
# when single direction, node in while will be rnnxx/while/... >> scope name is rnnxx
# and rnnxx can be assigned by users but not "fw", though maybe "FW" in another tf version
rnn_scope_name = self.get_rnn_scope_name(match)
if not rnn_scope_name:
log.debug("unable to find rnn scope name, skip")
return REWRITER_RESULT.SKIP
log.debug("rnn scope name is %s", rnn_scope_name)
self.print_step("get_weight_and_bias starts")
rnn_weights = self.get_weight_and_bias(match)
if not rnn_weights:
log.debug("rnn weights check failed, skip")
return REWRITER_RESULT.SKIP
rnn_props = RnnProperties()
res = self.get_var_initializers(match, rnn_props, rnn_scope_name)
if not res or not rnn_props.var_initializers.keys:
log.debug("no cell variable initializers found, skip")
return REWRITER_RESULT.SKIP
seq_len_input_node = self.find_sequence_length_node(rnn_scope_name)
input_filter = self.get_rnn_input_blacklist(rnn_weights, rnn_props)
if seq_len_input_node:
input_filter.append(seq_len_input_node)
self.find_inputs(rnn_scope_name, rnn_props, match, input_filter)
if not rnn_props.is_valid():
log.debug("rnn properties are not valid, skip")
return REWRITER_RESULT.SKIP
if not self.process_input_x(rnn_props, rnn_scope_name):
log.debug("rnn input x not found, skip")
return REWRITER_RESULT.SKIP
self.print_step("process the weights/bias/ft_bias, to fit onnx weights/bias requirements")
self.process_weights_and_bias(rnn_weights, rnn_props)
_, batch_size_node = self.process_seq_length(rnn_props, seq_len_input_node)
rnn_props.batch_size_node = batch_size_node
self.process_var_init_nodes(rnn_props)
self.print_step("start to build new rnn node")
rnn_props.activation = self.get_rnn_activation(match)
rnn_node = self.create_rnn_node(rnn_props)
self.all_nodes.append(rnn_node)
self.print_step("start to handle outputs")
# format of ONNX output is different with tf
self.process_outputs(match, rnn_node, rnn_props, rnn_scope_name)
# FIXME:
# pylint: enable=assignment-from-none,assignment-from-no-return
return REWRITER_RESULT.OK
# find needed info from graph
def get_rnn_scope_name(self, match):
pass
def get_cell_scope_name(self, match):
return None
@staticmethod
def get_rnn_activation(match):
return None
def get_weight_and_bias(self, match):
pass
def get_var_initializers(self, match, rnn_props, rnn_scope_name):
"""
initializer op can be found by tracing from switch mode. while rnn has multiple switch nodes,
so have to discriminate them by a check.
switch nodes can be found by tracing LoopCond
"""
loop_cond_op = None
for n in self.g.get_nodes():
if n.type == 'LoopCond' and n.name.startswith(rnn_scope_name):
if not loop_cond_op:
loop_cond_op = n
else:
log.debug("only a LoopCond is expected, rnn scope name:%s", rnn_scope_name)
return None
if loop_cond_op is None:
log.debug("No LoopCond op is found, skip")
return None
switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0])
for n in switch_nodes:
if n.type != 'Switch':
raise ValueError("LoopCond's output node should be followed with a Switch node")
for var_name, funcs in self.switch_checkers.items():
var_checker = funcs[0]
if not funcs[2]:
continue
enter_target_input_id = self.check_switch_by_usage_pattern(n, match, var_checker)
if enter_target_input_id:
log.debug("found initializer node for " + var_name + ": " + enter_target_input_id)
rnn_props.var_initializers[var_name] = enter_target_input_id
break
return rnn_props.var_initializers
def find_sequence_length_node(self, rnn_scope_name):
# "sequence_length" under current rnn scope is the seq len node (if there is).
# this is hardcoded in dynamic_rnn().
seq_len_nodes = []
for n in self.g.get_nodes():
if not n.name.startswith(rnn_scope_name):
continue
if n.name.endswith("sequence_length") and n.type == "Identity":
log.debug("find non-const sequence length node")
elif "CheckSeqLen" in n.name and n.is_const():
# if seq length is const, the node might be const folded,
# so we check this way.
log.debug("find const sequence length node")
else:
continue
seq_len_nodes.append(n)
seq_len_node_cnt = len(seq_len_nodes)
if seq_len_node_cnt == 0:
return None
if seq_len_node_cnt == 1:
seq_len_node = seq_len_nodes[0]
if seq_len_node.is_const():
return seq_len_node
# input of the "identity" node may be a "cast"
# if so, then we have to keep it
# sentence "math_ops.to_int32(sequence_length)" in tf results in the "cast" op
if seq_len_node.inputs[0].type == "Cast":
cast_node = seq_len_node.inputs[0]
if not cast_node.inputs[0].name.startswith(rnn_scope_name):
return seq_len_node.inputs[0]
raise ValueError("sequence length node should be outside of rnn scope")
if not seq_len_node.inputs[0].name.startswith(rnn_scope_name):
return seq_len_node.inputs[0]
raise ValueError("sequence length node should be outside of rnn scope")
raise ValueError("there are more sequence length nodes than expected")
def get_rnn_input_blacklist(self, rnn_weights, rnn_props):
var_init_nodes = []
for _, init_input_id in rnn_props.var_initializers.items():
init_node = self.g.get_node_by_output(init_input_id)
var_init_nodes.append(init_node)
# weight/bias inputs, and c/h initializers are dynamic_rnn/LSTMCell's parameters.
# we will use them to filter out the dynamic_rnn's input tensor.
blacklist_inputs = [rnn_weights.kernel.node, rnn_weights.bias.node, rnn_weights.forget_bias.node]
blacklist_inputs.extend(var_init_nodes)
return blacklist_inputs
def find_inputs(self, rnn_scope_name, rnn_props, match, input_blacklist=None):
rnn_input_nodes = []
for n in self.g.get_nodes():
if n.name.startswith(rnn_scope_name):
# find input node that are not within rnn scope
for input_id, input_node in zip(n.input, n.inputs):
if not input_node.name.startswith(rnn_scope_name):
if input_node not in input_blacklist:
rnn_input_nodes.append([input_node, input_id])
if len(rnn_input_nodes) != 1:
log.debug("found %d inputs for the dynamic_run, unexpected. They are %s",
len(rnn_input_nodes), rnn_input_nodes)
return rnn_props
input_node_candidate = rnn_input_nodes[0][0]
input_id_candidate = rnn_input_nodes[0][1]
# we should not limit the rnn_input_nodes' type be Placeholder or Const,
# because there might some Reshape/etc. ops after the Placeholder
rnn_props.input_node = input_node_candidate
rnn_props.input_id = input_id_candidate
return rnn_props
# process found info according to ONNX requirement
def process_input_x(self, rnn_props, rnn_scope_name):
self.print_step("look for possible transpose following RNN input node")
# todo: peepholdes P is not considered now
input_consumers = self.g.find_output_consumers(rnn_props.input_id)
consumers_in_rnn_scope = []
for consumer in input_consumers:
if consumer.name.startswith(rnn_scope_name):
consumers_in_rnn_scope.append(consumer)
if len(consumers_in_rnn_scope) != 1:
log.warning("RNN input node has %d onsumers in current rnn scope %s skip",
len(consumers_in_rnn_scope), rnn_scope_name)
return None
possible_transpose_after_input = consumers_in_rnn_scope[0]
self.print_step("convert the transpose to onnx node if there is one found.")
# check whether time_major is enabled or not
# in TF, if time_major is not enabled, input format is [batch, time, ...]
# but, during TF handling, at the beginning, the data will be transposed to [time, batch, ...]
# after processing, the format is changed back before returning result.
# So here, we judge the time_major by checking the transpose operator existence.
converted_transpose = self._convert_timemajor_transpose(possible_transpose_after_input)
if converted_transpose:
log.debug("detect batch-major inputs")
rnn_props.time_major = False
rnn_props.x_input_id = converted_transpose.output[0]
self.all_nodes.extend([converted_transpose])
else:
log.debug("detect timer-major inputs")
rnn_props.time_major = True
rnn_props.x_input_id = rnn_props.input_id
rnn_props.onnx_input_ids["X"] = rnn_props.x_input_id
return rnn_props
def process_weights_and_bias(self, rnn_weights, rnn_props):
pass
def process_var_init_nodes(self, rnn_props):
pass
def process_seq_length(self, rnn_props, seq_length_node):
# output: [time step, batch size, input size]
shape_node = self.g.make_node("Shape", [rnn_props.x_input_id])
# LSTMCell only allow inputs of [batch size, input_size], so we assume dynamic_rnn has 3 dims.
# Slice cannot support Int64 in OPSET 7, so we cast here.
cast_shape_node = self.g.make_node("Cast", [shape_node.output[0]],
attr={"to": onnx_pb.TensorProto.FLOAT},
shapes=[self.g.get_shape(shape_node.output[0])])
batchsize_node = self.g.make_node("Slice", [cast_shape_node.output[0]],
attr={"axes": [0], "starts": [1], "ends": [2]})
# Tile's repeats must be INT64
repeat_node = self.g.make_node("Cast", [batchsize_node.output[0]],
attr={"to": onnx_pb.TensorProto.INT64})
self.all_nodes.extend([shape_node, cast_shape_node, batchsize_node, repeat_node])
if not seq_length_node:
timestep_node = self.g.make_node("Slice", [cast_shape_node.output[0]],
attr={"axes": [0], "starts": [0], "ends": [1]})
tile_node = self.g.make_node("Tile", [timestep_node.output[0], repeat_node.output[0]])
# LSTM sequence_lens needs to be int32
seq_length_node = self.g.make_node('Cast', [tile_node.output[0]],
attr={"to": onnx_pb.TensorProto.INT32})
self.all_nodes.extend([timestep_node, tile_node, seq_length_node])
rnn_props.onnx_input_ids["sequence_lens"] = seq_length_node.output[0]
return seq_length_node, batchsize_node
def process_outputs(self, match, rnn_node, rnn_props, rnn_scope_name):
# There are 2 kinds of output nodes for dynamic_rnn
# 1. output node, which ends with "Exit" followed
# either Transpose (when time_major is False),
# or TensorArrayGather
# 2. cell_state node,
# 2.1 if state_is_tuple is true:
# 2.1.1 which ends with "Exit" followed by a Pack<C, H> whose name is out of rnn scope.
# 2.1.2 which ends with "Exit" for c and h respectively, when cell_state.c/h is used.
# 2.2 which ends with "Exit" if state_is_tuple is false
for n in self.g.get_nodes():
if n.type == "Exit" and n.name.startswith(rnn_scope_name):
if len(n.input) != 1:
raise ValueError("exit's input count is " + str(len(n.input)) + " instead of 1")
switch = n.inputs[0]
if switch.type != "Switch":
log.debug("Exit has non-Switch input, skip.")
continue
for var_name, funcs in self.switch_checkers.items():
var_checker = funcs[0]
var_exit_connector = funcs[1]
enter_target_input_id = self.check_switch_by_usage_pattern(switch, match, var_checker)
if enter_target_input_id:
log.debug("this is %s exit node", var_name)
var_exit_connector(rnn_node, n, rnn_props)
break
def create_rnn_node(self, rnn_props):
pass
# helper function
def check_switch_by_usage_pattern(self, switch_node, match, check_func):
if switch_node.type != 'Switch':
return None
# the first input is data
merge_node = switch_node.inputs[0]
if merge_node.type != "Merge":
return None
target_node_input_id = None
for merge_input in merge_node.inputs:
if merge_input.type == 'Enter':
target_node_input_id = merge_input.input[0]
log.debug("a Switch >> Merge >> Enter is found called %s", merge_input.inputs[0].name)
break
else:
log.debug("skip the non-Enter input node of the merge_node")
continue
# check whether it is c_initialize or h_initialize
if target_node_input_id:
switch_consumers = self.g.find_output_consumers(switch_node.output[1])
assert len(switch_consumers) == 1
if switch_consumers[0].type == "Identity":
identity_consumers = self.g.find_output_consumers(switch_consumers[0].output[0])
return check_func(target_node_input_id, identity_consumers, match)
log.error("not expected, skip ")
log.warning("is_switch_used_by found no merge>>Enter node")
return None
@staticmethod
def print_step(level_2, level_1="find_dynamic_run_unit"):
log.debug(level_1 + " >> " + level_2)
def _workaround_fill_ch_init_node(self, initializer_input_id, rnn_props):
node = self.g.get_node_by_output(initializer_input_id)
if node.type != "Fill":
return None
fill_val = node.inputs[1].get_tensor_value()[0]
fill_val_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.inputs[1].dtype]
# this must be int64, since Concat's input data type must be consistent.
num_direction_node = self.g.make_const(utils.make_name("Const"), np.array([1], dtype=np.float32))
h_node = self.g.make_const(utils.make_name("Const"), np.array([rnn_props.hidden_size], dtype=np.float32))
b_node = rnn_props.batch_size_node
# Concat in OPSET7 does not support int64.
tile_shape = self.g.make_node("Concat", [num_direction_node.output[0], b_node.output[0], h_node.output[0]],
attr={"axis": 0})
# Tile's repeats must be INT64
attr = {"to": onnx_pb.TensorProto.INT64}
tile_shape_int64 = self.g.make_node("Cast", [tile_shape.output[0]], attr)
const_node = self.g.make_const(utils.make_name("Const"), np.array([[[fill_val]]], dtype=fill_val_dtype))
tile_node = self.g.make_node("Tile", [const_node.output[0], tile_shape_int64.output[0]])
self.all_nodes.extend([tile_shape, tile_shape_int64, tile_node])
return tile_node
def _convert_timemajor_transpose(self, node):
if not check_is_timemajor_transpose(node):
log.debug("not found timemajor transpose")
return None
log.debug("found timemajor transpose")
attr = {"perm": np.array([1, 0, 2], dtype=np.int64)}
new_trans = self.g.make_node("Transpose", [node.input[0]], attr=attr,
shapes=[self.g.get_shape(node.output[0])],
dtypes=[self.g.get_dtype(node.input[0])])
self.g.replace_all_inputs(self.g.get_nodes(), node.output[0], new_trans.output[0])
return new_trans
|
import unittest
import unishark
import os
import shutil
from unishark.util import get_interpreter
class TestProgramTestCase(unittest.TestCase):
def setUp(self):
super(TestProgramTestCase, self).setUp()
self.dest = 'results'
if os.path.exists(self.dest):
shutil.rmtree(self.dest)
def tearDown(self):
if os.path.exists(self.dest):
shutil.rmtree(self.dest)
class DefaultTestProgramTestCase(TestProgramTestCase):
def test_sequential_run(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit']
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertDictEqual(program.concurrency, {'type': 'threads', 'max_workers': 1, 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multithreading_on_suites(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 2, 'type': 'threads', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multithreading_on_classes(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 0},
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 0, 'type': 'threads', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multithreading_on_suites_and_within_suite(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 2, 'level': 'module'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 8, 'level': 'method'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2, 'type': 'threads'},
}
}
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 2, 'type': 'threads', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_multiprocessing_on_suites(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 2, 'level': 'module'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 8, 'level': 'method'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2, 'type': 'processes'},
}
}
if get_interpreter().startswith('jython'):
with self.assertRaises(ValueError) as cm:
unishark.DefaultTestProgram(dict_conf)
self.assertEqual(cm.exception.message, 'Jython does not support multiprocessing.')
else:
program = unishark.DefaultTestProgram(dict_conf)
self.assertEqual(program.concurrency, {'max_workers': 2, 'type': 'processes', 'timeout': None})
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_illegal_suites_concurrency_type(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 2, 'level': 'module'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 8, 'level': 'method'},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2, 'type': 'processing'},
}
}
with self.assertRaises(ValueError):
unishark.DefaultTestProgram(dict_conf)
self.assertFalse(os.path.exists(self.dest))
def test_program_with_no_suites(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': [],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 0)
exp_filenames = ['index.html', 'overview.html', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_program_with_no_reporters_1(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': [],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 1)
self.assertFalse(os.path.exists(self.dest))
def test_program_with_no_reporters_2(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'concurrency': {'max_workers': 2},
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 1)
self.assertFalse(os.path.exists(self.dest))
def test_program_with_name_pattern(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'concurrency': {'max_workers': 2},
'name_pattern': '^no_such_prefix\w*'
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 0)
def test_default_suites_concurrency(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
}
}
program = unishark.DefaultTestProgram(dict_conf)
exit_code = program.run()
self.assertEqual(exit_code, 1)
exp_filenames = ['index.html', 'overview.html', 'my_suite_1_result.html', 'my_suite_2_result.html',
'my_suite_1_xunit_result.xml', 'my_suite_2_xunit_result.xml', 'summary_xunit_result.xml']
filenames = os.listdir(os.path.join(self.dest))
self.assertSetEqual(set(filenames), set(exp_filenames))
def test_missing_max_workers(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {}
}
}
with self.assertRaises(KeyError):
program = unishark.DefaultTestProgram(dict_conf)
program.run()
self.assertFalse(os.path.exists(self.dest))
def test_illegal_max_workers_type(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'concurrency': {'max_workers': 'a'}
}
}
with self.assertRaises(ValueError):
program = unishark.DefaultTestProgram(dict_conf)
program.run()
self.assertFalse(os.path.exists(self.dest))
def test_misplacing_max_workers(self):
dict_conf = {
'suites': {
'my_suite_1': {
'package': 'tests.mock1',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module1', 'test_module2']
}
}
},
'my_suite_2': {
'package': 'tests.mock2',
'concurrency': {'max_workers': 4},
'groups': {
'g1': {
'granularity': 'module',
'modules': ['test_module3']
}
}
}
},
'reporters': {
'html': {
'class': 'unishark.HtmlReporter',
'kwargs': {
'dest': self.dest
}
},
'xunit': {
'class': 'unishark.XUnitReporter',
'kwargs': {
'dest': self.dest
}
}
},
'test': {
'suites': ['my_suite_1', 'my_suite_2'],
'reporters': ['html', 'xunit'],
'max_workers': 1
}
}
with self.assertRaises(KeyError) as cm:
program = unishark.DefaultTestProgram(dict_conf)
program.run()
self.assertEqual(cm.exception.message, 'Please set "max_workers" in the "concurrency" sub-dict instead.')
self.assertFalse(os.path.exists(self.dest))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# -*- coding: utf-8 -*-
import time
from collections import namedtuple
from application import Application
from middleware import Middleware
from render import render_basic
# TODO: what are some sane-default intervals?
Hit = namedtuple('Hit', 'start_time url pattern status_code '
' elapsed_time content_type')
class StatsMiddleware(Middleware):
def __init__(self):
self.hits = []
self.route_hits = {}
self.url_hits = {}
def request(self, next, request, _route):
start_time = time.time()
try:
resp = next()
resp_status = repr(getattr(resp, 'status_code', type(resp)))
resp_mime_type = resp.content_type.partition(';')[0]
except Exception as e:
# see Werkzeug #388
resp_status = repr(getattr(e, 'code', type(e)))
resp_mime_type = getattr(e, 'content_type', '').partition(';')[0]
raise
finally:
end_time = time.time()
elapsed_time = end_time - start_time
hit = Hit(start_time,
request.path,
_route.rule,
resp_status,
elapsed_time,
resp_mime_type)
self.hits.append(hit)
self.route_hits.setdefault(_route, []).append(hit)
self.url_hits.setdefault(request.path, []).append(hit)
return resp
from math import floor, ceil
import itertools
def hits_minutes_ago(hit_list, minutes=None):
if minutes is None:
minutes = 0
start_time = time.time() - (minutes * 60)
return itertools.dropwhile(lambda h: h.start_time < start_time, hit_list)
def hits_by_status(hit_list):
ret = {}
for hit in hit_list:
try:
ret[hit.status_code].append(hit)
except KeyError:
ret[hit.status_code] = [hit]
return ret
def percentile(unsorted_data, ptile=50):
if ptile > 100:
raise ValueError("it's percentile, not something-else-tile")
if not unsorted_data:
return 0.0 # TODO: hrm, lazy
data = sorted(unsorted_data)
idx = (float(ptile) / 100) * len(data)
idx_f, idx_c = int(floor(idx)), min(int(ceil(idx)), len(data) - 1)
return (data[idx_f] + data[idx_c]) / 2.0
def mean(vals):
if vals:
return sum(vals, 0.0) / len(vals)
else:
return 0.0
def float_round(n):
return n - (n % 2 ** -6)
def get_route_stats(rt_hits):
ret = {}
hbs = hits_by_status(rt_hits)
for status, hits in hbs.items():
ret[status] = cur = {}
durs = [round(h.elapsed_time * 1000, 2) for h in hits]
cur['min'] = min(durs)
cur['max'] = max(durs)
cur['mean'] = mean(durs)
cur['count'] = len(durs)
cur['median'] = percentile(durs, 50)
cur['ninefive'] = percentile(durs, 95)
return ret
def _get_stats_dict(_application):
try:
stats_mw = [mw for mw in _application.middlewares
if isinstance(mw, StatsMiddleware)][0]
except IndexError:
return {'error': "StatsMiddleware doesn't seem to be installed"}
rt_hits = stats_mw.route_hits
return {'resp_counts': dict([(url, len(rh)) for url, rh
in stats_mw.url_hits.items()]),
'route_stats': dict([(rt.rule, get_route_stats(rh)) for rt, rh
in rt_hits.items() if rh])}
def _create_app():
routes = [('/', _get_stats_dict, render_basic)]
mws = [StatsMiddleware()]
app = Application(routes, middlewares=mws)
return app
if __name__ == '__main__':
sapp = _create_app()
sapp.serve()
|
import datetime
from datetime import date
import pytest
from resources.models import Period, Day
from .utils import assert_hours
def daterange(start_date, end_date):
for n in range((end_date - start_date).days):
yield start_date
start_date += datetime.timedelta(days=1)
@pytest.mark.django_db
def test_opening_hours(resource_in_unit):
unit = resource_in_unit.unit
tz = unit.get_tz()
# Regular hours for the whole year
p1 = Period.objects.create(start=date(2015, 1, 1), end=date(2015, 12, 31),
unit=unit, name='regular hours')
for weekday in range(0, 7):
Day.objects.create(period=p1, weekday=weekday,
opens=datetime.time(8, 0),
closes=datetime.time(18, 0))
begin = tz.localize(datetime.datetime(2015, 6, 1))
end = begin + datetime.timedelta(days=30)
hours = resource_in_unit.get_opening_hours(begin, end)
for d in daterange(date(2015, 6, 1), date(2015, 6, 7)):
assert_hours(tz, hours, d, '08:00', '18:00')
# Summer hours
p2 = Period.objects.create(start=date(2015, 6, 1), end=date(2015, 9, 1),
unit=unit, name='summer hours')
for weekday in range(0, 5):
Day.objects.create(period=p2, weekday=weekday,
opens=datetime.time(10, 0),
closes=datetime.time(16, 0))
Day.objects.create(period=p2, weekday=5, closed=True)
Day.objects.create(period=p2, weekday=6, closed=True)
hours = resource_in_unit.get_opening_hours(begin, end)
assert_hours(tz, hours, date(2015, 6, 1), '10:00', '16:00')
assert_hours(tz, hours, date(2015, 6, 2), '10:00', '16:00')
assert_hours(tz, hours, date(2015, 6, 6), None)
assert_hours(tz, hours, date(2015, 6, 7), None)
# Closed June 9
p3 = Period.objects.create(start=date(2015, 6, 9), end=date(2015, 6, 9),
unit=unit, name='closed june9')
Day.objects.create(period=p3, weekday=1, closed=True)
hours = resource_in_unit.get_opening_hours(begin, end)
assert_hours(tz, hours, date(2015, 6, 8), '10:00', '16:00')
assert_hours(tz, hours, date(2015, 6, 9), None)
assert_hours(tz, hours, date(2015, 6, 10), '10:00', '16:00')
# Re-opened the week of June 8
p4 = Period.objects.create(start=date(2015, 6, 8), end=date(2015, 6, 14),
unit=unit, name='re-opened')
for d in range(0, 7):
Day.objects.create(period=p4, weekday=d, opens=datetime.time(12, 0), closes=datetime.time(14, 0))
hours = resource_in_unit.get_opening_hours(begin, end)
assert_hours(tz, hours, date(2015, 6, 8), '12:00', '14:00')
assert_hours(tz, hours, date(2015, 6, 9), None)
assert_hours(tz, hours, date(2015, 6, 10), '12:00', '14:00')
# Dayless period; is closed
Period.objects.create(start=date(2015, 6, 10), end=date(2015, 6, 14),
unit=unit, name='dayless')
hours = resource_in_unit.get_opening_hours(begin, end)
assert_hours(tz, hours, date(2015, 6, 10), None)
assert_hours(tz, hours, date(2015, 6, 11), None)
# Period that overlaps the parent but is not fully contained in it
p6 = Period.objects.create(start=date(2014, 12, 30), end=date(2015, 1, 10),
unit=unit, name='overlapping')
Day.objects.create(period=p6, weekday=1, opens=datetime.time(10, 0), closes=datetime.time(14, 0))
Day.objects.create(period=p6, weekday=3, opens=datetime.time(10, 0), closes=datetime.time(14, 0))
Day.objects.create(period=p6, weekday=4, opens=datetime.time(10, 0), closes=datetime.time(14, 0))
begin = tz.localize(datetime.datetime(2014, 12, 29))
end = begin + datetime.timedelta(days=30)
hours = resource_in_unit.get_opening_hours(begin, end)
from pprint import pprint
pprint(hours)
assert_hours(tz, hours, date(2014, 12, 29), None)
assert_hours(tz, hours, date(2014, 12, 30), '10:00', '14:00')
assert_hours(tz, hours, date(2014, 12, 31), None)
assert_hours(tz, hours, date(2015, 1, 1), '10:00', '14:00')
assert_hours(tz, hours, date(2015, 1, 2), '10:00', '14:00')
assert_hours(tz, hours, date(2015, 1, 3), None)
|
import os
import numpy as np
import pandas as pd
import pytest
from scvi.data import synthetic_iid
from scvi.model import PEAKVI, SCANVI, SCVI, TOTALVI
def single_pass_for_online_update(model):
dl = model._make_data_loader(model.adata, indices=range(0, 10))
for i_batch, tensors in enumerate(dl):
_, _, scvi_loss = model.module(tensors)
scvi_loss.loss.backward()
def test_scvi_online_update(save_path):
n_latent = 5
adata1 = synthetic_iid()
model = SCVI(adata1, n_latent=n_latent)
model.train(1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
# also test subset var option
adata2 = synthetic_iid(run_setup_anndata=False, n_genes=110)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
model2 = SCVI.load_query_data(adata2, dir_path, inplace_subset_query_vars=True)
model2.train(max_epochs=1, plan_kwargs=dict(weight_decay=0.0))
model2.get_latent_representation()
# encoder linear layer equal
one = (
model.module.z_encoder.encoder.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()[:, : adata1.shape[1]]
)
two = (
model2.module.z_encoder.encoder.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()[:, : adata1.shape[1]]
)
np.testing.assert_equal(one, two)
assert (
np.sum(
model2.module.z_encoder.encoder.fc_layers[0][0]
.weight.grad.cpu()
.numpy()[:, : adata1.shape[1]]
)
== 0
)
# dispersion
assert model2.module.px_r.requires_grad is False
# library encoder linear layer
assert model2.module.l_encoder.encoder.fc_layers[0][0].weight.requires_grad is True
# 5 for n_latent, 4 for batches
assert model2.module.decoder.px_decoder.fc_layers[0][0].weight.shape[1] == 9
# test options
adata1 = synthetic_iid()
model = SCVI(
adata1,
n_latent=n_latent,
n_layers=2,
encode_covariates=True,
use_batch_norm="encoder",
use_layer_norm="none",
)
model.train(1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
model2 = SCVI.load_query_data(adata2, dir_path, freeze_expression=True)
model2.train(max_epochs=1, plan_kwargs=dict(weight_decay=0.0))
# deactivate no grad decorator
model2.get_latent_representation()
# pytorch lightning zeros the grad, so this will get a grad to inspect
single_pass_for_online_update(model2)
grad = model2.module.z_encoder.encoder.fc_layers[0][0].weight.grad.cpu().numpy()
# expression part has zero grad
assert np.sum(grad[:, :-4]) == 0
# categorical part has non-zero grad
assert np.sum(grad[:, -4:]) != 0
grad = model2.module.decoder.px_decoder.fc_layers[0][0].weight.grad.cpu().numpy()
# linear layer weight in decoder layer has non-zero grad
assert np.sum(grad[:, :-4]) == 0
# do not freeze expression
model3 = SCVI.load_query_data(
adata2,
dir_path,
freeze_expression=False,
freeze_batchnorm_encoder=True,
freeze_decoder_first_layer=False,
)
model3.train(max_epochs=1)
model3.get_latent_representation()
assert model3.module.z_encoder.encoder.fc_layers[0][1].momentum == 0
# batch norm weight in encoder layer
assert model3.module.z_encoder.encoder.fc_layers[0][1].weight.requires_grad is False
single_pass_for_online_update(model3)
grad = model3.module.z_encoder.encoder.fc_layers[0][0].weight.grad.cpu().numpy()
# linear layer weight in encoder layer has non-zero grad
assert np.sum(grad[:, :-4]) != 0
grad = model3.module.decoder.px_decoder.fc_layers[0][0].weight.grad.cpu().numpy()
# linear layer weight in decoder layer has non-zero grad
assert np.sum(grad[:, :-4]) != 0
# do not freeze batchnorm
model3 = SCVI.load_query_data(adata2, dir_path, freeze_batchnorm_encoder=False)
model3.train(max_epochs=1)
model3.get_latent_representation()
def test_scvi_library_size_update(save_path):
n_latent = 5
adata1 = synthetic_iid()
model = SCVI(adata1, n_latent=n_latent, use_observed_lib_size=False)
assert (
getattr(model.module, "library_log_means", None) is not None
and model.module.library_log_means.shape == (1, 2)
and model.module.library_log_means.count_nonzero().item() == 2
)
assert getattr(
model.module, "library_log_vars", None
) is not None and model.module.library_log_vars.shape == (1, 2)
model.train(1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
# also test subset var option
adata2 = synthetic_iid(run_setup_anndata=False, n_genes=110)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
model2 = SCVI.load_query_data(adata2, dir_path, inplace_subset_query_vars=True)
assert (
getattr(model2.module, "library_log_means", None) is not None
and model2.module.library_log_means.shape == (1, 4)
and model2.module.library_log_means[:, :2].equal(model.module.library_log_means)
and model2.module.library_log_means.count_nonzero().item() == 4
)
assert (
getattr(model2.module, "library_log_vars", None) is not None
and model2.module.library_log_vars.shape == (1, 4)
and model2.module.library_log_vars[:, :2].equal(model.module.library_log_vars)
)
def test_scanvi_online_update(save_path):
# ref has semi-observed labels
n_latent = 5
adata1 = synthetic_iid(run_setup_anndata=False)
new_labels = adata1.obs.labels.to_numpy()
new_labels[0] = "Unknown"
adata1.obs["labels"] = pd.Categorical(new_labels)
SCANVI.setup_anndata(adata1, batch_key="batch", labels_key="labels")
model = SCANVI(
adata1,
"Unknown",
n_latent=n_latent,
encode_covariates=True,
)
model.train(max_epochs=1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
adata2.obs["labels"] = "Unknown"
model = SCANVI.load_query_data(adata2, dir_path, freeze_batchnorm_encoder=True)
model.train(max_epochs=1)
model.get_latent_representation()
model.predict()
# ref has fully-observed labels
n_latent = 5
adata1 = synthetic_iid(run_setup_anndata=False)
new_labels = adata1.obs.labels.to_numpy()
adata1.obs["labels"] = pd.Categorical(new_labels)
SCANVI.setup_anndata(adata1, batch_key="batch", labels_key="labels")
model = SCANVI(adata1, "Unknown", n_latent=n_latent, encode_covariates=True)
model.train(max_epochs=1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
# query has one new label
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
new_labels = adata2.obs.labels.to_numpy()
new_labels[0] = "Unknown"
adata2.obs["labels"] = pd.Categorical(new_labels)
model2 = SCANVI.load_query_data(adata2, dir_path, freeze_batchnorm_encoder=True)
model2._unlabeled_indices = np.arange(adata2.n_obs)
model2._labeled_indices = []
model2.train(max_epochs=1, plan_kwargs=dict(weight_decay=0.0))
model2.get_latent_representation()
model2.predict()
# test classifier frozen
class_query_weight = (
model2.module.classifier.classifier[0]
.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()
)
class_ref_weight = (
model.module.classifier.classifier[0]
.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()
)
# weight decay makes difference
np.testing.assert_allclose(class_query_weight, class_ref_weight, atol=1e-07)
# test classifier unfrozen
model2 = SCANVI.load_query_data(adata2, dir_path, freeze_classifier=False)
model2._unlabeled_indices = np.arange(adata2.n_obs)
model2._labeled_indices = []
model2.train(max_epochs=1)
class_query_weight = (
model2.module.classifier.classifier[0]
.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()
)
class_ref_weight = (
model.module.classifier.classifier[0]
.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()
)
with pytest.raises(AssertionError):
np.testing.assert_allclose(class_query_weight, class_ref_weight, atol=1e-07)
# test saving and loading of online scanvi
a = synthetic_iid(run_setup_anndata=False)
ref = a[a.obs["labels"] != "label_2"].copy() # only has labels 0 and 1
SCANVI.setup_anndata(ref, batch_key="batch", labels_key="labels")
m = SCANVI(ref, "label_2")
m.train(max_epochs=1)
m.save(save_path, overwrite=True)
query = a[a.obs["labels"] != "label_0"].copy()
query = synthetic_iid() # has labels 0 and 2. 2 is unknown
m_q = SCANVI.load_query_data(query, save_path)
m_q.save(save_path, overwrite=True)
m_q = SCANVI.load(save_path, adata=query)
m_q.predict()
m_q.get_elbo()
def test_totalvi_online_update(save_path):
# basic case
n_latent = 5
adata1 = synthetic_iid()
model = TOTALVI(adata1, n_latent=n_latent, use_batch_norm="decoder")
model.train(1, check_val_every_n_epoch=1)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
model2 = TOTALVI.load_query_data(adata2, dir_path)
assert model2.module.background_pro_alpha.requires_grad is True
model2.train(max_epochs=1)
model2.get_latent_representation()
# batch 3 has no proteins
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
adata2.obsm["protein_expression"][adata2.obs.batch == "batch_3"] = 0
# load from model in memory
model3 = TOTALVI.load_query_data(adata2, model)
model3.module.protein_batch_mask[2]
model3.module.protein_batch_mask[3]
model3.train(max_epochs=1)
model3.get_latent_representation()
def test_peakvi_online_update(save_path):
n_latent = 5
adata1 = synthetic_iid()
model = PEAKVI(adata1, n_latent=n_latent)
model.train(1, save_best=False)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
# also test subset var option
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
model2 = PEAKVI.load_query_data(adata2, dir_path)
model2.train(max_epochs=1, weight_decay=0.0, save_best=False)
model2.get_latent_representation()
# encoder linear layer equal for peak features
one = (
model.module.z_encoder.encoder.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()[:, : adata1.shape[1]]
)
two = (
model2.module.z_encoder.encoder.fc_layers[0][0]
.weight.detach()
.cpu()
.numpy()[:, : adata1.shape[1]]
)
np.testing.assert_equal(one, two)
assert (
np.sum(
model2.module.z_encoder.encoder.fc_layers[0][0]
.weight.grad.cpu()
.numpy()[:, : adata1.shape[1]]
)
== 0
)
# test options
adata1 = synthetic_iid()
model = PEAKVI(
adata1,
n_latent=n_latent,
encode_covariates=True,
)
model.train(1, check_val_every_n_epoch=1, save_best=False)
dir_path = os.path.join(save_path, "saved_model/")
model.save(dir_path, overwrite=True)
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = adata2.obs.batch.cat.rename_categories(["batch_2", "batch_3"])
model2 = PEAKVI.load_query_data(adata2, dir_path, freeze_expression=True)
model2.train(max_epochs=1, weight_decay=0.0, save_best=False)
# deactivate no grad decorator
model2.get_latent_representation()
# pytorch lightning zeros the grad, so this will get a grad to inspect
single_pass_for_online_update(model2)
grad = model2.module.z_encoder.encoder.fc_layers[0][0].weight.grad.cpu().numpy()
# expression part has zero grad
assert np.sum(grad[:, :-4]) == 0
# categorical part has non-zero grad
assert np.count_nonzero(grad[:, -4:]) > 0
# do not freeze expression
model3 = PEAKVI.load_query_data(
adata2,
dir_path,
freeze_expression=False,
freeze_decoder_first_layer=False,
)
model3.train(max_epochs=1, save_best=False, weight_decay=0.0)
model3.get_latent_representation()
single_pass_for_online_update(model3)
grad = model3.module.z_encoder.encoder.fc_layers[0][0].weight.grad.cpu().numpy()
# linear layer weight in encoder layer has non-zero grad
assert np.count_nonzero(grad[:, :-4]) != 0
grad = model3.module.z_decoder.px_decoder.fc_layers[0][0].weight.grad.cpu().numpy()
# linear layer weight in decoder layer has non-zero grad
assert np.count_nonzero(grad[:, :-4]) != 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
import getpaid.abstract_mixin
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.GETPAID_ORDER_MODEL),
]
operations = [
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('amount', models.DecimalField(max_digits=20, decimal_places=4, verbose_name='amount')),
('currency', models.CharField(verbose_name='currency', max_length=3)),
('status', models.CharField(choices=[('new', 'new'), ('in_progress', 'in progress'), ('partially_paid', 'partially paid'), ('paid', 'paid'), ('failed', 'failed')], default='new', verbose_name='status', db_index=True, max_length=20)),
('backend', models.CharField(verbose_name='backend', max_length=50)),
('created_on', models.DateTimeField(db_index=True, auto_now_add=True, verbose_name='created on')),
('paid_on', models.DateTimeField(blank=True, default=None, verbose_name='paid on', db_index=True, null=True)),
('amount_paid', models.DecimalField(max_digits=20, default=0, decimal_places=4, verbose_name='amount paid')),
('external_id', models.CharField(blank=True, null=True, verbose_name='external id', max_length=64)),
('description', models.CharField(blank=True, null=True, verbose_name='description', max_length=128)),
('order', models.ForeignKey(related_name='payments', to=settings.GETPAID_ORDER_MODEL)),
],
options={
'verbose_name_plural': 'Payments',
'ordering': ('-created_on',),
'verbose_name': 'Payment',
},
bases=(models.Model, getpaid.abstract_mixin.AbstractMixin),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
file="lines.txt"
f = open(file,"r")
for line in f:
print line
|
import sys
import time
import frida
import random
import traceback
from time import sleep
from bLib.const import *
from bLib.Mutator import Mutator
from bLib.Executor import Executor
from bLib.FuzzServer import FuzzServer
from bLib.Cov import BreakPointCoverage
from bLib.FuzzClient import BreakpointClient
inp_path = sys.argv[1]
args = ['test.exe', inp_path, 'loop']
options = {
'id': sys.argv[1],
'idir': 'in_test',
'odir': 'out_test',
'target_module': 'test.exe',
'target_offset': 0x1000,
'cov_modules': ['test.exe'],
'module_info_files': ['test.bbs'],
'inp_path': inp_path
}
class Server(FuzzServer):
def __init__(self, args, **options):
super().__init__(args, **options)
self.client = BreakpointClient(args, **options)
self.mutator = Mutator(self.client, **options)
def prepare_inp(self, buf):
try:
f = open(inp_path, 'wb')
f.write(buf)
f.close()
except:
pass
traceback.print_exc()
def _dry_run(self):
'''
TODO
hanlde crash and timeout
'''
self.logger.info('dryrun')
for testcase in self.queue:
self.logger.info(testcase)
self.prepare_inp(testcase.read())
fault = self.client.exec_one(INFINITE)
if fault == FAULT_CRASH or fault == FAULT_ERROR:
self.logger.info('testcase cause crash')
return FUZZER_STOP
elif fault == FAULT_TMOUT:
self.logger.info('testcase cause timeout')
return FUZZER_STOP
if not self.running:
break
self.logger.info('dryrun finished')
self.logger.info('hitcount: %d' % self.client.get_hitcount())
def _fuzz_loop(self):
self.logger.info('fuzz loop')
self.nexecs = 0
self.starttime = time.monotonic()
while self.running:
testcase = random.choice(self.queue)
self.logger.info('mutating: %s' % testcase.fname)
orig_bytes = testcase.read()
for i in range(2000):
if not self.running:
break
buf = self.mutator.havoc(orig_bytes[:])
fault = self.fuzz_one(buf)
if fault == FAULT_TMOUT:
break
self.nexecs += 1
if (self.nexecs % 1000 == 0):
self.nexecs = 0
self.endtime = time.monotonic()
interval = self.endtime-self.starttime
self.starttime = self.endtime
print ('exec/s: ', 1000 / interval)
self.logger.info('splice')
buf = self.mutator.splice(orig_bytes[:], self.queue)
self.prepare_inp(buf)
self.fuzz_one(buf)
self.sync()
fuzzserver = Server(args, **options)
fuzzserver.start()
|
import sys
import itertools as it
from collections import defaultdict
from random import random
import numpy as np
import numpy.linalg as la
from numpy.random import choice
import mdtraj as mdj
from wepy.boundary_conditions.boundary import BoundaryConditions
from wepy.resampling.distances.openmm import OpenMMRebindingDistance
class RebindingBC(BoundaryConditions):
WARP_INSTRUCT_DTYPE = np.dtype([('target', int)])
WARP_AUX_DTYPES = {'cycle' : np.int, 'passage_time' : np.float, 'warped_walker_weight' : np.float}
WARP_AUX_SHAPES = {'cycle' : (1,), 'passage_time' : (1,), 'warped_walker_weight' : (1,)}
def __init__(self, initial_states=None,
initial_weights=None,
cutoff_distance=0.2,
topology=None,
ligand_idxs=None,
binding_site_idxs=None,
comp_xyz=None,
alternative_maps=None):
# test input
assert initial_states is not None, "Must give a set of initial states"
assert topology is not None, "Must give a reference topology"
assert comp_xyz is not None, "Must give coordinates for bound state"
assert ligand_idxs is not None
assert binding_site_idxs is not None
assert type(cutoff_distance) is float
self.initial_states = initial_states
if initial_weights is None:
self.initial_weights = np.array([1] * len(initial_states))
else:
self.initial_weights = initial_weights
self.cutoff_distance = cutoff_distance
self.topology = topology
self.native_distance = OpenMMRebindingDistance(topology=topology,
ligand_idxs=ligand_idxs,
binding_site_idxs=binding_site_idxs,
alt_maps=alternative_maps,
comp_xyz=comp_xyz)
def check_boundaries(self, nat_rmsd):
# test to see if the ligand is re-bound
rebound = False
if nat_rmsd <= self.cutoff_distance:
rebound = True
boundary_data = {'nat_rmsd' : nat_rmsd}
return rebound, boundary_data
def warp(self, walker, cycle):
# choose a state randomly from the set of initial states
warped_state = choice(self.initial_states, 1, p=self.initial_weights/np.sum(self.initial_weights))[0]
# set the initial state into a new walker object with the same weight
warped_walker = type(walker)(state=warped_state, weight=walker.weight)
# thus there is only one record
warp_record = (0,)
# collect the passage time
# time is returned as an array because it is a feature of the
# walker, and domain specific. I.e. domain specific values are
# of type `array` while weights will always be floats in all
# applications.
time = walker.time_value()
warp_data = {'cycle' : np.array([cycle]), 'passage_time' : time,
'warped_walker_weight' : np.array([walker.weight])}
# make the warp data mapping
return warped_walker, warp_record, warp_data
def warp_walkers(self, walkers, cycle, debug_prints=False):
new_walkers = []
warped_walkers_records = []
cycle_bc_records = []
# boundary data is collected for each walker every cycle
cycle_boundary_data = defaultdict(list)
# warp data is collected each time a warp occurs
cycle_warp_data = defaultdict(list)
native_rmsds = self.native_distance.get_rmsd_native(walkers)
for walker_idx, walker in enumerate(walkers):
# check if it is unbound, also gives the minimum distance
# between guest and host
rebound, boundary_data = self.check_boundaries(native_rmsds[walker_idx])
# add boundary data for this walker
for key, value in boundary_data.items():
cycle_boundary_data[key].append(value)
# if the walker is unbound we need to warp it
if rebound:
# warp the walker
warped_walker, warp_record, warp_data = self.warp(walker,cycle)
# save warped_walker in the list of new walkers to return
new_walkers.append(warped_walker)
# save the record of the walker
warped_walkers_records.append( (walker_idx, warp_record) )
# save warp data
for key, value in warp_data.items():
cycle_warp_data[key].append(value)
# DEBUG
if debug_prints:
sys.stdout.write('REBINDING observed at {} \n'.format(
warp_data['passage_time']))
sys.stdout.write('Warped Walker Weight = {} \n'.format(
warp_data['warped_walker_weight']))
# no warping so just return the original walker
else:
new_walkers.append(walker)
# convert aux datas to np.arrays
for key, value in cycle_warp_data.items():
cycle_warp_data[key] = np.array(value)
for key, value in cycle_boundary_data.items():
cycle_boundary_data[key] = np.array(value)
return new_walkers, warped_walkers_records, cycle_warp_data, \
cycle_bc_records, cycle_boundary_data
|
import os, sys
def get_token():
config_file = os.path.join(os.path.expanduser('~'), '.todoist')
token = None
if os.path.isfile(config_file):
with open(config_file) as f:
token = f.read().strip()
if not token:
sys.exit('Put your Todoist API token in ~/.todoist')
else:
return token
|
import urllib2
from BeautifulSoup import BeautifulSoup
from win32com.client import Dispatch
import csv, sys, os
import codecs
import time
import datetime
import win32file
import random
import re
def get_data(majors):
for con in majors:
print con['href']
url=con['href']
url = 'https://servicios.dgae.unam.mx/Febrero2009/resultados/' + url
request = urllib2.Request( url )
page = urllib2.urlopen( request )
soup = BeautifulSoup(page)
#write a copy of the contents to a local file
filename = re.search('[0-9]+\.html',url)
f = open(filename.group(0), 'w')
f.write(str(soup))
f.close
data = []
for i in soup.body:
if i.string is not None:
data.append(i.string.encode('utf-8'))
soup.html.body.center.h1.contents
regmajor = re.compile('[ \n]+$')
major = regmajor.split(soup.html.body.center.h1.contents[0])[0].encode('ISO-8859-1')
faculty = regmajor.split(soup.html.body.center.h1.contents[2])[0].encode('ISO-8859-1')
i=1
folio = re.compile('([0-9]+) +([A-Z])')
a = re.compile('([0-9]+) +(A)')
while i<len(data):
m = folio.search(data[i])
if m :
row = [m.group(1),m.group(2),'R',major,faculty]
writer.writerow(row)
print 'yes'
i=i+1
else:
if i<len(data)-1:
m = a.search(data[i+1])
if m:
row = [data[i],m.group(1).encode('utf-8'),m.group(2).encode('utf-8'),major,faculty]
writer.writerow(row)
else:
row = [data[i],data[i+1],'R',major,faculty]
writer.writerow(row)
i=i+2
else:
i=i+2
time.sleep(20)
return
results=open('results.csv','wb')
writer = csv.writer(results,dialect='excel')
#'https://servicios.dgae.unam.mx/Febrero2009/resultados/15.html'
#
#'https://servicios.dgae.unam.mx/Febrero2009/resultados/35.html'
#'https://servicios.dgae.unam.mx/Febrero2009/resultados/45.html'
url='https://servicios.dgae.unam.mx/Febrero2009/resultados/25.html'
request = urllib2.Request( url )
page = urllib2.urlopen( request )
soup = BeautifulSoup(page)
majors=soup.findAll('a',href=re.compile("^[1234]/*"))
for i in majors:
print i['href']
get_data(majors)
results.close
|
"""User auth token columns
Revision ID: bdb195ed95bb
Revises: 534c4594ed29
Create Date: 2016-07-28 15:38:18.889151
"""
# revision identifiers, used by Alembic.
revision = 'bdb195ed95bb'
down_revision = '534c4594ed29'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('auth_token', sa.Unicode(length=255), nullable=True))
op.add_column('users', sa.Column('auth_token_created', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'auth_token_created')
op.drop_column('users', 'auth_token')
### end Alembic commands ###
|
from __future__ import print_function
from . import utils
from .utils import *
from . import asp
from .asp import *
from . import nn
from .nn import *
__all__ = utils.__all__
__all__ += asp.__all__
__all__ += nn.__all__
|
import sqlite3
class db (object):
def __init__(self, filename):
self._ctx = sqlite3.connect(filename)
self._ctx.cursor().execute('''
create table if not exists guild (
guildid integer primary key,
channel integer null
)
''')
self._ctx.commit()
def set_channel(self, guildid, channelid):
self._ctx.cursor().execute('''
insert into guild (guildid, channel) values (?, ?)
on conflict (guildid) do update set channel = ?
''', [guildid, channelid, channelid])
self._ctx.commit()
def get_guilds(self):
cur = self._ctx.cursor()
cur.execute('''
select guildid, channel from guild
''')
return cur.fetchall()
|
from typing import Tuple, Union, Dict, List, Iterable, Optional
from torch.utils.data import Dataset
from .common import LABEL_KEY, SENT_KEY, ANTI_KEY, form_sentence, chunks_from_iterable
class LevyHolt(Dataset):
def __init__(
self, txt_file: str, num_patterns: int = 1,
num_tokens_per_pattern: int = 1, only_sep: bool = True,
use_antipatterns: bool = False,
training: bool = False, pattern_chunk_size: int = 5
):
self.training = training
self.pattern_chunk_size = pattern_chunk_size
self.num_patterns = num_patterns
self.num_tokens_per_pattern = num_tokens_per_pattern
self.only_sep = only_sep
self.use_antipatterns = use_antipatterns
self.data = self.load_dataset(txt_file)
def load_dataset(self, txt_file):
data = []
with open(txt_file) as f:
for line in f:
hypo, prem, label = line.strip().split('\t')
hypo = tuple(h.strip() for h in hypo.split(','))
prem = tuple(p.strip() for p in prem.split(','))
label = label == 'True'
data.extend(self.create_instances(prem, hypo, label))
return data
def create_sentence(self, pattern_idx: int, prem: Tuple[str, str, str],
hypo: Tuple[str, str, str]) -> str:
sentence = form_sentence(
' '.join(prem), ' '.join(hypo),
pattern_idx, self.num_tokens_per_pattern, self.only_sep
)
return sentence
def create_single_instance(
self,
prem: Tuple[str, str, str],
hypo: Tuple[str, str, str],
label: bool,
pattern_indices: Iterable[int],
antipattern_indices: Optional[Iterable[int]]
) -> Dict[str, Union[bool, str]]:
inst = {}
inst[SENT_KEY] = [
self.create_sentence(pattern_idx, prem, hypo)
for pattern_idx in pattern_indices
]
if self.use_antipatterns:
assert antipattern_indices is not None, "Internal Error"
inst[ANTI_KEY] = [
self.create_sentence(pattern_idx, prem, hypo)
for pattern_idx in antipattern_indices
]
inst[LABEL_KEY] = 1 if label else 0
return inst
def create_instances(
self,
prem: Tuple[str, str, str],
hypo: Tuple[str, str, str],
label: bool
) -> List[Dict[str, Union[bool, str]]]:
instances = []
if self.training:
chunked = [
chunks_from_iterable(
range(self.num_patterns), self.pattern_chunk_size),
chunks_from_iterable(range(self.num_patterns, 2*self.num_patterns),
self.pattern_chunk_size)
]
for pattern_chunk, antipattern_chunk in zip(*chunked):
inst = self.create_single_instance(
prem, hypo, label, pattern_chunk, antipattern_chunk)
instances.append(inst)
else:
inst = self.create_single_instance(
prem, hypo, label,
range(self.num_patterns), range(self.num_patterns, 2*self.num_patterns))
instances.append(inst)
return instances
def __getitem__(self, index):
inst = self.data[index]
if self.use_antipatterns:
anti = inst[ANTI_KEY]
else:
anti = None
return inst[SENT_KEY], anti, inst[LABEL_KEY]
def __len__(self):
return len(self.data)
|
"""Load all entry poins"""
import os
import sys
from logging import basicConfig
from argparse import ArgumentParser, SUPPRESS, REMAINDER
from wmc import __version__
from wmc.dispatch import load_entry_points
def help_commands():
"""Print the command help."""
commands = load_entry_points()
for cls in commands.values():
cmd = cls()
text = '{:>14} v{:.5} - {}'.format(cmd.__class__.__name__, cmd.__version__, cmd.help)
print(text)
def create_parse(commands):
"""Create the main parser"""
parser = ArgumentParser(
prog='wmc',
description='Watch me coding, a toolbox',
epilog='Copyright 2021 AxJu | WMCv{}'.format(__version__),
)
parser.add_argument(
'-V', '--version', action='version',
version='%(prog)s v{}'.format(__version__),
)
parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='verbosity (-v, -vv, etc)'
)
parser.add_argument(
'-s', '--settings', default='data.json',
help='the settings file'
)
parser.add_argument(
'-H', '--help-commands', action='store_true',
help='some command infos'
)
parser.add_argument(
'command', nargs='?', choices=commands,
help='select one command'
)
parser.add_argument(
'path', nargs='?', default=os.getcwd(),
help='path to the project'
)
parser.add_argument('args', help=SUPPRESS, nargs=REMAINDER)
return parser
def main(argv=None):
"""Create parser und run the dispatch"""
commands = load_entry_points()
parser = create_parse(commands.keys())
args = parser.parse_args(argv or sys.argv[1:])
if args.verbose:
level = args.verbose * 10 if args.verbose <= 5 else 50
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
basicConfig(level=level, format=log_format)
if args.help_commands:
return help_commands()
if args.command:
cmd = commands[args.command](args.path, args.settings)
try:
return cmd.run(args.args)
except Exception as exc:
if args.verbose:
raise
print('Oh no, a error :(')
print('Error:', exc)
print('Run with --verbose for more information.')
return 0
return parser.print_help()
|
#!/usr/bin/env python
import pandas as pd
import sys
from argparse import ArgumentParser
def read_df(f):
df = pd.read_csv(f, header=0, sep="\t", index_col=0)
return df
def calc_means(dfsum, sample_info, info_col):
# Create dictionary to rename columns to their corresponding samplegroup
rename_dict = sample_info.loc[:,info_col].to_dict()
sample_means = pd.DataFrame()
for samplegroup in set(rename_dict.values()):
# Rename columns on the fly then calculate mean across axis 1
_ = pd.DataFrame(dfsum.rename(columns=rename_dict).loc[:, samplegroup].mean(axis=1))
_.columns=[samplegroup]
sample_means = pd.concat([sample_means,_], axis=1)
return sample_means
def main(args):
sample_info = read_df(args.sample_info)
annot = read_df(args.annot)
annot_col = annot.columns[0]
tax = read_df(args.tax)
abund = read_df(args.abund)
# Merge annotation with taxonomy
df = pd.merge(annot, tax, left_index=True, right_index=True, how="left")
# Merge with abundance
df = pd.merge(df, abund, left_index=True, right_index=True)
# Sum to annotation and taxonomy
dfsum = df.groupby([args.rank,annot_col]).sum()
# Calculate mean across sample groups
sample_means = calc_means(dfsum, sample_info, args.info_col)
# Write to file
sample_means.to_csv(sys.stdout, sep="\t")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("sample_info", type=str,
help="Sample info file")
parser.add_argument("info_col", type=str,
help="Column to groupby in sample info file")
parser.add_argument("annot", type=str,
help="Annotation file (e.g. kos.parsed.tsv")
parser.add_argument("tax", type=str,
help="Taxonomic classification file")
parser.add_argument("abund", type=str,
help="Abundance file")
parser.add_argument("-r", "--rank", default="kingdom",
help="Taxonomic rank to groupby")
args = parser.parse_args()
main(args)
|
import random
import time
f = open("python-basic-project/unit09/data.txt")
lines = f.readlines()
f.close()
random.shuffle(lines)
for line in lines:
line = line.strip()
print(line)
start = time.time()
user = input("")
end = time.time()
if line.strip() == user:
elapsed = end - start
speed = (len(line) / elapsed) * 60
print("타수: ", speed)
|
import PySAM.BatteryStateful as bt
def test_stateful():
b = bt.new()
params = {"control_mode": 0, "input_current": 1, "chem": 1, "nominal_energy": 10, "nominal_voltage": 500,
"initial_SOC": 50.000, "maximum_SOC": 95.000, "minimum_SOC": 5.000, "dt_hr": 1.000, "leadacid_tn": 0.000,
"leadacid_qn": 0.000, "leadacid_q10": 0.000, "leadacid_q20": 0.000, "voltage_choice": 0,
"Vnom_default": 3.600, "resistance": 0.0001, "Vfull": 4.100, "Vexp": 4.050, "Vnom": 3.400, "Qfull": 2.250,
"Qexp": 0.040, "Qnom": 2.000, "C_rate": 0.200, "mass": 507.000, "surface_area": 2.018, "Cp": 1004.000,
"h": 20.000, "cap_vs_temp": [[-10, 60], [0, 80], [25, 1E+2], [40, 1E+2]], "T_room_init": 20,
"cycling_matrix": [[20, 0, 1E+2], [20, 5E+3, 80], [20, 1E+4, 60], [80, 0, 1E+2], [80, 1E+3, 80],
[80, 2E+3, 60]], "calendar_choice": 1, "calendar_q0": 1.020, "calendar_a": 0.003,
"calendar_b": -7280.000, "calendar_c": 930.000, "calendar_matrix": [[-3.1E+231]], "loss_choice": 0,
"monthly_charge_loss": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
"monthly_discharge_loss": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
"monthly_idle_loss": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], "schedule_loss": [], "replacement_option": 0,
"replacement_capacity": 0.000}
for k, v in params.items():
b.value(k, v)
b.setup()
b.execute(0)
print(b.StatePack.export())
b.execute(0)
print(b.StatePack.export())
b.setup()
b.execute(0)
print(b.StatePack.export())
b.execute(0)
print(b.StatePack.export())
|
nome = str ( input ('Digite o seu nome completo: '))
print ('Seu nome em letras maisculas fica assim: ', nome.upper())
print ('seus nome em letras minsuculas fica assim: ', nome.lower())
print ('Quantidade de letras do seu nome: ', len ( nome.replace(' ','')))
nome = nome.split(' ')
print ('{}'.format (len(nome[0])))
print ('''codigo menor pra calcular o tamanho do nome inteiro:
nome = str(input('seu nome:')).strip()
print('seu nome tem {} letras'.format(len(''.join(nome.split()))))''')
|
#!/usr/bin/python
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import datetime
import dateutil.relativedelta
# PUT YOUR DEVELOPER KEY HERE
DEVELOPER_KEY = "## PUT YOUR KEY HERE"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(searchString, maxResults):
youtube = build(YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
# Only consider videos between one and 9 month old.
notAfter = datetime.datetime.now() + dateutil.relativedelta.relativedelta(months=-1)
notBefore = datetime.datetime.now() + dateutil.relativedelta.relativedelta(months=-9)
parameters = { 'q': searchString,
'type': "video",
'part': "id",
'maxResults': maxResults,
'videoEmbeddable': "true",
'videoSyndicated': "true",
'order': "viewCount",
'videoDefinition': "high",
'videoDimension': "2d",
'publishedBefore': notAfter.isoformat("T").split(".")[0] + "Z",
'publishedAfter': notBefore.isoformat("T").split(".")[0] + "Z"}
print(parameters)
search_response = youtube.search().list(**parameters).execute()
videos = []
# Add each result to the appropriate list, and then display the lists of
# matching videos, channels, and playlists.
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append(search_result["id"]["videoId"])
for videoId in videos:
video_response = youtube.videos().list(id=videoId, part='contentDetails').execute()
print("Video: %s, length: %s" % (videoId, video_response["items"][0]["contentDetails"]["duration"]))
if __name__ == "__main__":
argparser.add_argument("--query", help="Search string", default="music video")
argparser.add_argument("--max-results", help="Max YouTube results", default=25)
args = argparser.parse_args()
try:
youtube_search(args.query, args.max_results)
except HttpError as e:
print("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
import ujson
from typing import Union, List, Optional
from waio.keyboard.list import ListMainButton
class KeyboardButton(ListMainButton):
def __init__(self, title: str):
super().__init__(title)
class QuickReplyContentBase:
def __init__(self, text: str, caption: str):
self.text = text
self.caption = caption
class QuickReplyContentText(QuickReplyContentBase):
def __init__(self, header: str, text: str, caption: str):
self.type = "text"
self.header = header
super().__init__(text, caption)
def json(self):
return {
"type": self.type,
"header": self.header,
"text": self.text,
"caption": self.caption
}
class QuickReplyContentImage(QuickReplyContentBase):
def __init__(self, url: str, text: str, caption: str):
self.type = "image"
self.url = url
super().__init__(text, caption)
def json(self):
return {
"type": self.type,
"url": self.url,
"text": self.text,
"caption": self.caption
}
class QuickReplyContentDocument(QuickReplyContentBase):
def __init__(self, url: str, filename: str, text: str, caption: str):
self.type = "file"
self.url = url
self.filename = filename
super().__init__(text, caption)
def json(self):
return {
"type": self.type,
"url": self.url,
"text": self.text,
"caption": self.caption,
"filename": self.filename
}
class QuickReplyContentVideo(QuickReplyContentBase):
def __init__(self, url: str, text: str, caption: str):
self.type = "video"
self.url = url
super().__init__(text, caption)
def json(self):
return {
"type": self.type,
"url": self.url,
"text": self.text,
"caption": self.caption,
}
class QuickReply:
def __init__(
self,
callback_data: str,
content: Union[
QuickReplyContentText,
QuickReplyContentImage,
QuickReplyContentDocument,
QuickReplyContentVideo
],
options: Optional[List[KeyboardButton]] = None
):
self.type = "quick_reply"
self.callback_data = callback_data
self.content = content
if options is None:
self.options = []
else:
self.options = options
def add(self, element: KeyboardButton) -> 'QuickReply':
self.options.append(element)
return self
def dict(self):
return {
"type": self.type,
"msgid": self.callback_data,
"content": self.content.json(),
"options": [element.json() for element in self.options]
}
def json(self):
return ujson.dumps(self.dict(), indent=2)
|
from pyot.utils import loop_run
from pyot.models import lol
def test_status():
status = loop_run(lol.Status(platform="na1").get())
status.dict(recursive=True)
|
#!/usr/bin/python
# requires
# urllib3, python-pip
# pip install boto BeautifulSoup
import sys
import os
from Config import Config
config = Config()
config.create_html = True
config.create_movie = True
config.create_snapshot = True
config.data_file = "sample.csv"
config.data_definition_file = "data.definition"
config.data_seperator = '|'
config.data_has_headers = True
config.html_output_file = "index.html"
config.html_template = "template.html"
config.s3_materials = "video-transcodes-justgiving-com/config/materials"
config.max_rows = 1
config.terminate_on_completion = False
config.s3_destination = "video-transcodes-justgiving-com/my-story/%_PageShortName_%/"
config.script_file = "template.json"
template = config.to_JSON()
print (template)
|
class Solution:
def myAtoi(self, s: str) -> int:
s = s.strip()
if not s:
return 0
if s[0] == '-':
is_negative = True
start_index = 1
elif s[0] == '+':
is_negative = False
start_index = 1
else:
is_negative = False
start_index = 0
str_len = len(s)
value = 0
for index in range(start_index, str_len):
if not s[index].isdigit():
break
digit_val = ord(s[index]) - ord('0')
value = (value * 10) + digit_val
if is_negative:
value *= -1
# Value will not overflow because python3 does not technically have restriction on size
# Cannot use sys.maxint as that is for 64 bit
max_val = 2**31 - 1
min_val = -2**31
if value < min_val:
value = min_val
if value > max_val:
value = max_val
return value
|
import tempfile
import numpy as np
import pandas as pd
from paysage import batch
from paysage import backends as be
import pytest
def test_hdf_table_batch():
# the temporary storage file
store_file = tempfile.NamedTemporaryFile()
# create data
num_rows = 10000
num_cols = 10
df_A = pd.DataFrame(np.arange(num_rows*num_cols).reshape(num_rows, num_cols))
# save it
with pd.HDFStore(store_file.name, mode="w", format="table") as store:
store.append("train", df_A)
# read it back with the HDFtable
batch_size = 1000
num_train_batches = num_rows // batch_size
data = batch.HDFtable(store_file.name, "train", batch_size)
# loop through thrice, checking the data
for i_loop in range(3):
i_batch = 0
while True:
# get the data
try:
batch_data = data.get()
except StopIteration:
assert i_batch == num_train_batches
i_batch = 0
break
# check it
assert np.all(be.to_numpy_array(batch_data) == \
df_A.values[i_batch * batch_size: (i_batch + 1) * batch_size])
i_batch += 1
def test_hdf_batch():
# the temporary storage file
store_file = tempfile.NamedTemporaryFile()
# create data
num_rows = 10000
num_cols = 10
df_A = pd.DataFrame(np.arange(num_rows*num_cols).reshape(num_rows, num_cols))
df_B = df_A + num_rows*num_cols
# save it
with pd.HDFStore(store_file.name, mode="w", format="table") as store:
store.append("train", df_A)
store.append("validate", df_B)
# read it back with the HDFtable
batch_size = 1000
num_train_batches = num_rows // batch_size
data = batch.Batch(
{"train": batch.HDFtable(store_file.name, "train", batch_size),
"validate": batch.HDFtable(store_file.name, "validate", batch_size)})
# loop through thrice, checking the data
for i_loop in range(3):
i_batch = 0
while True:
# get the data
try:
batch_data_train = data.get("train")
batch_data_validate = data.get("validate")
except StopIteration:
assert i_batch == num_train_batches
i_batch = 0
data.reset_generator("all")
break
# check it
assert np.all(be.to_numpy_array(batch_data_train) == \
df_A.values[i_batch * batch_size: (i_batch + 1) * batch_size])
assert np.all(be.to_numpy_array(batch_data_validate) == \
df_B.values[i_batch * batch_size: (i_batch + 1) * batch_size])
i_batch += 1
if __name__ == "__main__":
pytest.main([__file__])
|
#-----------------------------------------------------------------------------
# Runtime: 128ms
# Memory Usage:
# Link:
#-----------------------------------------------------------------------------
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
word1_length, word2_length = len(word1), len(word2)
if word1_length == 0: return word2_length
if word2_length == 0: return word1_length
if word1_length < word2_length:
return self.minDistance(word2, word1)
dp = list(range(word2_length + 1))
for i in range(1, word1_length + 1):
upper_left = dp[0]
dp[0] = i
for j in range(1, word2_length + 1):
upper = dp[j]
if word1[i-1] == word2[j-1]:
dp[j] = upper_left
else:
left = dp[j-1]
dp[j] = min(upper_left, upper, left) + 1
upper_left = upper
return dp[-1]
|
#coding:utf-8
#
# id: bugs.core_1009
# title: Restoring RDB$BASE_FIELD for expression
# decription: RDB$BASE_FIELD for expression have to be NULL
# tracker_id: CORE-1009
# min_versions: []
# versions: 2.1
# qmid: bugs.core_1009
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(from_backup='core1009.fbk', init=init_script_1)
test_script_1 = """
set list on;
select rdb$field_name, rdb$base_field from rdb$relation_fields where rdb$relation_name = 'TEST_VIEW';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
RDB$FIELD_NAME ID
RDB$BASE_FIELD ID
RDB$FIELD_NAME EXPR
RDB$BASE_FIELD <null>
"""
@pytest.mark.version('>=2.1')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
import falcon
import simplejson as json
import mysql.connector
import config
from anytree import Node, AnyNode, LevelOrderIter
import excelexporters.equipmenttracking
class Reporting:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: build a space tree
# Step 3: query all equipments in the space tree
# Step 4: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
space_id = req.params.get('spaceid')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if space_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
else:
space_id = str.strip(space_id)
if not space_id.isdigit() or int(space_id) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_SPACE_ID')
else:
space_id = int(space_id)
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor(dictionary=True)
cursor.execute(" SELECT name "
" FROM tbl_spaces "
" WHERE id = %s ", (space_id,))
row = cursor.fetchone()
if row is None:
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.SPACE_NOT_FOUND')
else:
space_name = row['name']
################################################################################################################
# Step 2: build a space tree
################################################################################################################
query = (" SELECT id, name, parent_space_id "
" FROM tbl_spaces "
" ORDER BY id ")
cursor.execute(query)
rows_spaces = cursor.fetchall()
node_dict = dict()
if rows_spaces is not None and len(rows_spaces) > 0:
for row in rows_spaces:
parent_node = node_dict[row['parent_space_id']] if row['parent_space_id'] is not None else None
node_dict[row['id']] = AnyNode(id=row['id'], parent=parent_node, name=row['name'])
################################################################################################################
# Step 3: query all equipments in the space tree
################################################################################################################
equipment_list = list()
space_dict = dict()
for node in LevelOrderIter(node_dict[space_id]):
space_dict[node.id] = node.name
cursor.execute(" SELECT e.id, e.name AS equipment_name, s.name AS space_name, "
" cc.name AS cost_center_name, e.description "
" FROM tbl_spaces s, tbl_spaces_equipments se, tbl_equipments e, tbl_cost_centers cc "
" WHERE s.id IN ( " + ', '.join(map(str, space_dict.keys())) + ") "
" AND se.space_id = s.id "
" AND se.equipment_id = e.id "
" AND e.cost_center_id = cc.id ", )
rows_equipments = cursor.fetchall()
if rows_equipments is not None and len(rows_equipments) > 0:
for row in rows_equipments:
equipment_list.append({"id": row['id'],
"equipment_name": row['equipment_name'],
"space_name": row['space_name'],
"cost_center_name": row['cost_center_name'],
"description": row['description']})
if cursor:
cursor.close()
if cnx:
cnx.disconnect()
################################################################################################################
# Step 4: construct the report
################################################################################################################
result = {'equipments': equipment_list}
# export result to Excel file and then encode the file to base64 string
result['excel_bytes_base64'] = \
excelexporters.equipmenttracking.export(result,
space_name)
resp.body = json.dumps(result)
|
"""
Created on Mon Jun 14 15:24:21 2021
Represents a transaction with its inputs and outputs
@author: tj
"""
import hashlib
import struct
from binascii import hexlify, unhexlify
from script import BTCScript
from script import DEFAULT_TX_SEQUENCE
DEFAULT_TX_VERSION = b'\x02\x00\x00\x00'
DEFAULT_TX_LOCKTIME = b'\x00\x00\x00\x00'
SIGHASH_ALL = 0x01
class InputTx:
def __init__(self, tx_id, tx_idx):
self.tx_id = tx_id
self.tx_idx = tx_idx
self.script_sig = BTCScript([])
self.sequence = DEFAULT_TX_SEQUENCE
def stream(self):
tx_id_bytes = unhexlify(self.tx_id)[::-1]
tx_idx_bytes = struct.pack('<L', self.tx_idx)
script_sig_bytes = self.script_sig.to_bytes()
data = tx_id_bytes + tx_idx_bytes + struct.pack('B', len(script_sig_bytes)) + script_sig_bytes + self.sequence
return data
@classmethod
def deep_copy(cls, tx_in):
return cls(tx_in.tx_id, tx_in.tx_idx)
class OutputTx:
def __init__(self, amount, unlock_script: BTCScript):
self.amount = amount
self.unlock_script = unlock_script
def stream(self):
amount_bytes = struct.pack('<q', self.amount)
script_bytes = self.unlock_script.to_bytes()
data = amount_bytes + struct.pack('B', len(script_bytes)) + script_bytes
return data
@classmethod
def deep_copy(cls, tx_out):
return cls(tx_out.amount, tx_out.unlock_script)
class Transaction:
def __init__(self, inputs, outputs, with_segwit=False, witnesses=None):
self.inputs = inputs
self.outputs = outputs
self.time_lock = DEFAULT_TX_LOCKTIME
self.version = DEFAULT_TX_VERSION
self.with_segwit = with_segwit
if witnesses is None:
self.witnesses = []
else:
self.witnesses = witnesses
def stream(self):
data = self.version
if self.with_segwit and self.witnesses:
data += b'\x00'
data += b'\x01'
txin_count_bytes = chr(len(self.inputs)).encode()
txout_count_bytes = chr(len(self.outputs)).encode()
data += txin_count_bytes
for txin in self.inputs:
data += txin.stream()
data += txout_count_bytes
for txout in self.outputs:
data += txout.stream()
if self.with_segwit:
for witness in self.witnesses:
witnesses_count_bytes = chr(len(witness.script)).encode()
data += witnesses_count_bytes
data += witness.to_bytes(True)
data += self.time_lock
return data
def serialize(self):
data = self.stream()
return hexlify(data).decode('utf-8')
@classmethod
def deep_copy(cls, tx):
ins = [InputTx.deep_copy(tx_in) for tx_in in tx.inputs]
outs = [OutputTx.deep_copy(tx_out) for tx_out in tx.outputs]
return cls(ins, outs)
def digest(self, tx_in_index, script):
tmp_tx = Transaction.deep_copy(self)
for tx_in in tmp_tx.inputs:
tx_in.script_sig = BTCScript([])
tmp_tx.inputs[tx_in_index].script_sig = script
tx_for_signing = tmp_tx.stream()
tx_for_signing += struct.pack('<i', SIGHASH_ALL)
tx_digest = hashlib.sha256(hashlib.sha256(tx_for_signing).digest()).digest()
return tx_digest
def segwit_digest(self, tx_in_index, script, amount):
tmp_tx = Transaction.deep_copy(self)
hash_prevouts = b''
for txin in tmp_tx.inputs:
hash_prevouts += unhexlify(txin.tx_id)[::-1] + \
struct.pack('<L', txin.tx_idx)
hash_prevouts = hashlib.sha256(hashlib.sha256(hash_prevouts).digest()).digest()
hash_sequence = b''
for txin in tmp_tx.inputs:
hash_sequence += txin.sequence
hash_sequence = hashlib.sha256(hashlib.sha256(hash_sequence).digest()).digest()
hash_outputs = b''
for txout in tmp_tx.outputs:
amount_bytes = struct.pack('<q', txout.amount)
script_bytes = txout.unlock_script.to_bytes()
hash_outputs += amount_bytes + struct.pack('B', len(script_bytes)) + script_bytes
hash_outputs = hashlib.sha256(hashlib.sha256(hash_outputs).digest()).digest()
tx_for_signing = self.version
tx_for_signing += hash_prevouts + hash_sequence
txin = self.inputs[tx_in_index]
tx_for_signing += unhexlify(txin.tx_id)[::-1] + struct.pack('<L', txin.tx_idx)
tx_for_signing += struct.pack('B', len(script.to_bytes()))
tx_for_signing += script.to_bytes()
tx_for_signing += struct.pack('<q', amount)
tx_for_signing += txin.sequence
tx_for_signing += hash_outputs
tx_for_signing += self.time_lock
tx_for_signing += struct.pack('<i', SIGHASH_ALL)
return hashlib.sha256(hashlib.sha256(tx_for_signing).digest()).digest()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.