content
stringlengths 5
1.05M
|
|---|
def hello_world():
print("Hello World!")
def hallo_welt():
print("Hallo Welt!")
def hola_mundo():
print("Hola Mundo!")
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader as Dataloader
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim # Optimizer used to update weights.
import numpy as np
import matplotlib.pyplot as plt
#Download Dataset
torch.set_printoptions(linewidth=120)
def get_num_correct(preds, labels):
return preds.argmax(dim=1).eq(labels).sum().item()
#Show images and labels
'''
batch = next(iter(train_loader))
images, labels = batch
grid = torchvision.utils.make_grid(images, nrow=10)
plt.figure(figsize=(15,15))
plt.imshow(np.transpose(grid, (1,2,0)))
print('labels:', labels)
plt.show()
'''
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=12, kernel_size=5)
self.fc1 = nn.Linear(in_features=12*4*4, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=60)
self.out = nn.Linear(in_features=60, out_features=10)
def forward(self, t):
#implement the forward pass
#(1)input layer
t = t # t.shape = (1,28,28)
#(2)hidden conv layer
t = self.conv1(t) #t.shape (1,28,28) -> (6,24,24)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
#(3)hidden conv layer
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
#(4)hidden linear layer
t = t.reshape(-1, 12 * 4 * 4)
t = self.fc1(t)
t = F.relu(t)
#(5)hidden linear layer
t = self.fc2(t)
t = F.relu(t)
#(6)output layer
t = self.out(t)
#t = F.softmax(t, dim=1)
return t
train_set = torchvision.datasets.FashionMNIST(
root='./data/FashionMNIST',
train=True,
#download=True,
transform=transforms.Compose([
transforms.ToTensor()
])
)
#/---------------------Training with a single batch-----------------/
#Step1: Initialize a network.
network = Network()
network = network
#Step2: General setup: Load data. Setup optimizer. Initialize total loss and correct.
train_loader = Dataloader(train_set, batch_size=100)
optimizer = optim.Adam(network.parameters(), lr=0.01) #lr: learning rate.
batch_num = 0
#Step3: Get batch from the train_loader
#batch = next(iter(train_loader)) #Train with single batch.
for epoch in range(5):
total_loss = 0
total_correct = 0
for batch in train_loader:
batch_num += 1
images, labels = batch
images = images
labels = labels
#Step4: Calculating loss by predicting and compare to the labels.
preds = network(images)
loss = F.cross_entropy(preds, labels)
#print('Num correct before:'+str(get_num_correct(preds, labels)))
#Step5: Backward propogation.
optimizer.zero_grad() #Zero out gradients. Otherwise it would accumulate.
loss.backward() #calculate gradients.
optimizer.step()#update weights using gradients
total_loss += loss.item()
total_correct += get_num_correct(preds, labels)
#print(
# 'epoch', epoch, 'batch', batch_num, '| correct:', get_num_correct(preds, labels),
# 'loss:', loss.item())
print('Finished epoch', epoch, '| total_correct:', total_correct, 'loss:', total_loss)
'''
print('Loss before: '+str(loss.item()))
print('Num correct before:'+str(get_num_correct(preds, labels)))
preds = network(images)
loss = F.cross_entropy(preds, labels) #New loss here.
print('Loss after: '+str(loss.item()))
print('Num correct after:'+str(get_num_correct(preds, labels)))
'''
#/------------------------------------------------------------------/
|
# Generated by Django 3.0.6 on 2020-05-30 10:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('incident', '0010_auto_20200528_2355'),
]
operations = [
migrations.AddField(
model_name='incident',
name='latitude',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='incident',
name='longitude',
field=models.FloatField(blank=True, null=True),
),
]
|
from labtex.measurement import Measurement, MeasurementList
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
class LinearRegression:
"Linearly regress two MeasurementLists."
def __init__(self,x : MeasurementList, y : MeasurementList):
self.x = x
self.y = y
assert len(x) == len(y)
# TODO Support weighted regression
n = len(x)
w = [ 1 / n] * n
xmean = sum(w*x) / sum(w)
ymean = sum(w*y) / sum(w)
D = sum(w*(x - xmean) ** 2)
m = 1 / D * sum(w * (x - xmean) * y)
c = ymean - m * xmean
d = y - x*m - c.value
Delta_m = (1/D * sum(w * d ** 2) / (n - 2) ) ** 0.5
Delta_c = ( (1 / sum(w) + xmean ** 2 / D) * sum( w * d ** 2 ) / (n - 2) ) ** 0.5
# Line of best fit parameters
self.lobf = {
"m": Measurement(m.value,Delta_m.value,m.unit),
"c": Measurement(c.value,Delta_c.value,c.unit)
}
def __repr__(self):
return f"m = {self.lobf['m']}\nc = {self.lobf['c']}"
def savefig(self,filename : str = "figure", title: str = "", xlabel : str = "", ylabel: str = "",showline : bool = True, graphnumber : int = 0):
plt.figure(graphnumber)
plt.errorbar(self.x.tolist(),self.y.tolist(),yerr = self.y.uncertainty,fmt='o')
if showline:
plt.plot(self.x.tolist(),(self.x*self.lobf["m"].value+self.lobf["c"].value).tolist())
plt.title(title)
plt.xlabel(xlabel + f", (${self.x.unit}$)")
plt.ylabel(ylabel + f", (${self.y.unit}$)")
plt.savefig(filename)
|
import numpy as np
import unittest
import os
from openmdao.api import Problem
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from pycycle.constants import AIR_ELEMENTS
from pycycle.mp_cycle import Cycle
from pycycle.thermo.cea.species_data import janaf
from pycycle.elements.compressor import Compressor
from pycycle.elements.flow_start import FlowStart
from pycycle import constants
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/compressor.csv",
delimiter=",", skiprows=1)
header = [
'start.W',
'start.Pt',
'start.Tt',
'start.Fl_O.ht',
'start.Fl_O.s',
'start.Fl_O.MN',
'comp.PRdes',
'comp.effDes',
'comp.Fl_O.MN',
'shaft.Nmech',
'comp.Fl_O.Pt',
'comp.Fl_O.Tt',
'comp.Fl_O.ht',
'comp.Fl_O.s',
'comp.pwr',
'Fl_O.Ps',
'Fl_O.Ts',
'Fl_O.hs',
'Fl_O.rhos',
'Fl_O.gams',
'comp.effPoly']
h_map = dict(((v_name, i) for i, v_name in enumerate(header)))
class CompressorTestCase(unittest.TestCase):
def setUp(self):
self.prob = Problem()
cycle = self.prob.model = Cycle()
cycle.add_subsystem('flow_start', FlowStart(thermo_data=janaf, elements=AIR_ELEMENTS))
cycle.add_subsystem('compressor', Compressor(design=True, elements=AIR_ELEMENTS))
cycle.set_input_defaults('flow_start.P', 17., units='psi')
cycle.set_input_defaults('flow_start.T', 500., units='degR')
cycle.set_input_defaults('compressor.MN', 0.5)
cycle.set_input_defaults('flow_start.W', 10., units='lbm/s')
cycle.set_input_defaults('compressor.PR', 6.)
cycle.set_input_defaults('compressor.eff', 0.9)
cycle.pyc_connect_flow("flow_start.Fl_O", "compressor.Fl_I")
self.prob.set_solver_print(level=-1)
self.prob.setup(check=False, force_alloc_complex=True)
def test_case1(self):
np.seterr(divide='raise')
# 6 cases to check against
for i, data in enumerate(ref_data):
self.prob['compressor.PR'] = data[h_map['comp.PRdes']]
self.prob['compressor.eff'] = data[h_map['comp.effDes']]
self.prob['compressor.MN'] = data[h_map['comp.Fl_O.MN']]
# input flowstation
self.prob['flow_start.P'] = data[h_map['start.Pt']]
self.prob['flow_start.T'] = data[h_map['start.Tt']]
self.prob['flow_start.W'] = data[h_map['start.W']]
self.prob.run_model()
tol = 1e-3
npss = data[h_map['comp.Fl_O.Pt']]
pyc = self.prob['compressor.Fl_O:tot:P'][0]
print('Pt out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['comp.Fl_O.Tt']]
pyc = self.prob['compressor.Fl_O:tot:T'][0]
print('Tt out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['comp.Fl_O.ht']] - data[h_map['start.Fl_O.ht']]
pyc = self.prob['compressor.Fl_O:tot:h'] - self.prob['flow_start.Fl_O:tot:h']
print('delta h:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['start.Fl_O.s']]
pyc = self.prob['flow_start.Fl_O:tot:S'][0]
print('S in:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['comp.Fl_O.s']]
pyc = self.prob['compressor.Fl_O:tot:S'][0]
print('S out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['comp.pwr']]
pyc = self.prob['compressor.power'][0]
print('Power:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['Fl_O.Ps']]
pyc = self.prob['compressor.Fl_O:stat:P'][0]
print('Ps out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['Fl_O.Ts']]
pyc = self.prob['compressor.Fl_O:stat:T'][0]
print('Ts out:', npss, pyc)
assert_near_equal(pyc, npss, tol)
npss = data[h_map['comp.effPoly']]
pyc = self.prob['compressor.eff_poly'][0]
print('effPoly:', npss, pyc)
assert_near_equal(pyc, npss, tol)
print("")
partial_data = self.prob.check_partials(out_stream=None, method='cs',
includes=['compressor.*'], excludes=['*.base_thermo.*',])
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main()
|
import discord
from asyncio import sleep
class Fun:
def __init__(self):
self.bot = None
def init(self, bot):
self.bot = bot
async def loop(self, ctx):
'''Infinite Loop'''
await sleep(0.5)
await self.bot.delete_message(ctx.message)
stop_loop = False
while not stop_loop:
msg = await self.bot.send_message(ctx.message.channel, "ping")
msg = await self.bot.send_message(ctx.message.channel, "pong")
response = await self.bot.wait_for_message(timeout=0.5)
if response is not None:
if response.content is "%stop":
stop_loop = True
|
"""
This code comes from https://github.com/rubenvillegas/cvpr2018nkn/blob/master/datasets/fbx2bvh.py
"""
import bpy
import numpy as np
from os import listdir
data_path = './Mixamo/'
directories = sorted([f for f in listdir(data_path) if not f.startswith(".")])
for d in directories:
files = sorted([f for f in listdir(data_path + d) if f.endswith(".fbx")])
for f in files:
sourcepath = data_path + d + "/" + f
dumppath = data_path+d + "/" + f.split(".fbx")[0] + ".bvh"
bpy.ops.import_scene.fbx(filepath=sourcepath)
frame_start = 9999
frame_end = -9999
action = bpy.data.actions[-1]
if action.frame_range[1] > frame_end:
frame_end = action.frame_range[1]
if action.frame_range[0] < frame_start:
frame_start = action.frame_range[0]
frame_end = np.max([60, frame_end])
bpy.ops.export_anim.bvh(filepath=dumppath,
frame_start=frame_start,
frame_end=frame_end, root_transform_only=True)
bpy.data.actions.remove(bpy.data.actions[-1])
print(data_path + d + "/" + f + " processed.")
|
from fastapi import APIRouter, Depends
from ..utils import engine, string_to_datetime, get_session
from sqlmodel import Session, select, SQLModel, or_
from ..utils import engine
from ..models.user import User
from ..models.timelog import TimeLog
router = APIRouter(prefix="/api/timelogs", tags=["timelog"])
@router.post("/")
async def timelog(*, timelog: TimeLog, session: Session = Depends(get_session)):
"""
Post timelog
example: timelog.start_time = "2022-01-19T08:30:00.000Z"
"""
statement1 = (
select(TimeLog)
.where(TimeLog.user_id == timelog.user_id)
.where(TimeLog.start_time >= timelog.start_time)
.where(TimeLog.start_time < timelog.end_time)
)
statement2 = (
select(TimeLog)
.where(TimeLog.user_id == timelog.user_id)
.where(TimeLog.end_time > timelog.start_time)
.where(TimeLog.end_time <= timelog.end_time)
)
statement3 = (
select(TimeLog)
.where(TimeLog.user_id == timelog.user_id)
.where(TimeLog.start_time >= timelog.start_time)
.where(TimeLog.end_time <= timelog.end_time)
)
statement4 = (
select(TimeLog)
.where(TimeLog.user_id == timelog.user_id)
.where(TimeLog.start_time < timelog.start_time)
.where(TimeLog.end_time > timelog.end_time)
)
results1 = session.exec(statement1).all()
results2 = session.exec(statement2).all()
results3 = session.exec(statement3).all()
results4 = session.exec(statement4).all()
if results1 or results2 or results3 or results4:
return "currently posted timelog overlaps another timelog"
else:
session.add(timelog)
session.commit()
session.refresh(timelog)
return timelog
@router.get("/")
async def get_timelogs_all(session: Session = Depends(get_session)):
"""Get all timelogs"""
statement = select(TimeLog)
results = session.exec(statement).all()
return results
@router.get("/{timelog_id}")
async def get_timelog_by_id(timelog_id: int, session: Session = Depends(get_session)):
"""Get timelog by id"""
statement = select(TimeLog).where(TimeLog.id == timelog_id)
result = session.exec(statement).one()
return result
@router.get("/users/{user_id}/months/{month}/years/{year}")
async def get_timelog_user_id(
*,
user_id: str,
month: int,
year: int,
session: Session = Depends(get_session),
):
"""Get list of timelogs by user_id, month"""
statement = (
select(TimeLog)
.where(TimeLog.user_id == user_id)
.where(TimeLog.month == month)
.where(TimeLog.year == year)
)
results = session.exec(statement).all()
return results
@router.put("/{timelog_id}/new-start-time")
async def update_timelogs(
*,
timelog_id: int = None,
timelog_new_start_time: str = None,
session: Session = Depends(get_session),
):
"""Update timelogs"""
statement = select(TimeLog).where(TimeLog.id == timelog_id)
timelog_to_update = session.exec(statement).one()
timelog_to_update.start_time = timelog_new_start_time
session.add(timelog_to_update)
session.commit()
session.refresh(timelog_to_update)
return timelog_to_update
@router.delete("/{timelog_id}")
async def delete_timelogs(
*,
timelog_id: int,
session: Session = Depends(get_session),
):
"""Delete timelogs"""
statement = select(TimeLog).where(TimeLog.id == timelog_id)
result = session.exec(statement).one()
timelog_to_delete = result
session.delete(timelog_to_delete)
session.commit()
return True
|
"""
@brief test log(time=1s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG, noLOG
from pyquickhelper.pycode import get_temp_folder
from tkinterquickhelper.funcwin.default_functions import test_regular_expression, is_empty_string, IsEmptyString, file_head, file_grep
class TestWindows (unittest.TestCase):
def test_test_regular_expression(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
test_regular_expression(fLOG=noLOG)
def test_is_empty_string(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
assert is_empty_string(None)
assert is_empty_string("")
assert not is_empty_string(".")
assert not IsEmptyString(".")
def test_file_head(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_" + self._testMethodName)
out = os.path.join(temp, "out.5.py")
assert not os.path.exists(out)
head = file_head(__file__.replace(".pyc", ".py"), out=out, head=5)
assert os.path.exists(head)
def test_file_grep(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_" + self._testMethodName)
out = os.path.join(temp, "out.grep.py")
assert not os.path.exists(out)
head = file_grep(
__file__.replace(
".pyc",
".py"),
out=out,
regex="test_.*")
assert os.path.exists(head)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from simmate.workflow_engine import s3task_to_workflow
from simmate.calculators.vasp.tasks.relaxation import (
Quality00Relaxation as Quality00RelaxationTask,
)
from simmate.calculators.vasp.database.relaxation import (
Quality00Relaxation as Quality00RelaxationResults,
)
workflow = s3task_to_workflow(
name="relaxation/quality00",
module=__name__,
project_name="Simmate-Relaxation",
s3task=Quality00RelaxationTask,
calculation_table=Quality00RelaxationResults,
register_kwargs=["prefect_flow_run_id", "structure", "source"],
)
|
from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name='term-forecast',
version='0.1.dev0',
author='Kevin Midboe',
author_email='support@kevinmidboe.com',
description='Terminal Forcast is a easily accessible terminal based weather forecaster',
url='https://github.com/KevinMidboe/termWeather/',
license='MIT',
packages=['term_forecast'],
classifiers = [
"Environment :: Console",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
],
install_requires=requirements,
entry_points={
'console_scripts': [
'forecast = term_forecast.term_weather:main',
],
}
)
|
#!/usr/bin/env python
from nose.tools import assert_equal, assert_true, assert_almost_equal, nottest
from os.path import isdir,isfile
from os import listdir
import os
import sys
import subprocess
import pandas as pd
file_path = os.path.realpath(__file__)
test_dir_path = os.path.dirname(file_path)
data_path = os.path.abspath(os.path.join(test_dir_path,"test_data","map"))
tmp_dir_path = os.path.join(test_dir_path, 'nose_tmp_output')
tmp_basename_dir = os.path.join(tmp_dir_path, '1')
script_path = os.path.join(test_dir_path, '..', 'scripts', 'gen_input_table.py')
CWD = os.getcwd()
class TestCMD(object):
def setUp(self):
"""Create temporary dir if necessary,
otherwise clear contents of it"""
if not isdir(tmp_dir_path):
os.mkdir(tmp_dir_path)
self.tearDown()
os.mkdir(tmp_basename_dir)
os.chdir(test_dir_path)
def tearDown(self):
"""remove temporary output files"""
for d in os.listdir(tmp_dir_path):
d_path = os.path.join(tmp_dir_path,d)
try:
os.remove(d_path)
except:
for f in os.listdir(d_path):
f_path = os.path.join(d_path,f)
os.remove(f_path)
os.rmdir(d_path)
assert os.listdir(tmp_dir_path) == []
def run_command(self, sample_names = None, contigs_file=None, bam_files=None, bed_files=None, output_file=None):
assert not (bam_files and bed_files)
call = ["python", script_path,
"--samplenames", sample_names,
contigs_file]
if bam_files:
call.append(bam_files)
if bed_files:
call+= ["--isbedfiles", bed_files]
call += ['>', output_file]
self.c = 0
#try:
self.op = subprocess.check_output(
" ".join(call) + " 2> /dev/null",
shell=True)
#except subprocess.CalledProcessError as exc:
# self.c = exc.returncode
def file_len(self,fh):
i=0
with open(fh) as f:
for i, l in enumerate(f):
pass
return i + 1
def md5sum(self, filename):
with open(filename, 'rb') as fh:
content = fh.read()
m = hashlib.md5()
m.update(content)
return m.hexdigest()
def test_with_bamfiles(self):
bam_files = '*/*-s.bam',
self.run_command(bam_files = data_path + '/*/*-s.bam',
contigs_file = os.path.join(data_path, 'two_contigs.fa'),
output_file = os.path.join(tmp_dir_path, 'inputtable.tsv'),
sample_names = os.path.join(data_path, 'sample_names'))
assert_equal(self.c, 0,
msg = "Command exited with nonzero status")
new_output = os.path.join(tmp_dir_path, 'inputtable.tsv')
df = pd.read_csv(new_output, sep='\t', index_col=0)
assert_almost_equal(df['cov_mean_sample_ten_reads'].ix['contig-75000034'], 10*100.0/1615, 5)
assert_almost_equal(df['cov_mean_sample_ten_reads'].ix['contig-21000001'], 10*100.0/9998, 5)
assert_almost_equal(df['cov_mean_sample_twenty_reads'].ix['contig-75000034'], 20*100.0/1615, 5)
assert_almost_equal(df['cov_mean_sample_twenty_reads'].ix['contig-21000001'], 20*100.0/9998, 5)
#assert_equal(new_output, old_output,
# msg = "Output not the same as reference")
def test_with_bedfiles(self):
bed_files = '*/*.coverage'
self.run_command(bed_files = data_path + '/*/*.coverage',
contigs_file = os.path.join(data_path, 'two_contigs.fa'),
output_file = os.path.join(tmp_dir_path, 'inputtable.tsv'),
sample_names = os.path.join(data_path, 'sample_names'))
assert_equal(self.c, 0,
msg = "Command exited with nonzero status")
new_output = os.path.join(tmp_dir_path, 'inputtable.tsv')
df = pd.read_csv(new_output, sep='\t', index_col=0)
assert_almost_equal(df['cov_mean_sample_ten_reads'].ix['contig-75000034'], 10*100.0/1615, 5)
assert_almost_equal(df['cov_mean_sample_ten_reads'].ix['contig-21000001'], 10*100.0/9998, 5)
assert_almost_equal(df['cov_mean_sample_twenty_reads'].ix['contig-75000034'], 20*100.0/1615, 5)
assert_almost_equal(df['cov_mean_sample_twenty_reads'].ix['contig-21000001'], 20*100.0/9998, 5)
|
from botZap import Validar
validar = Validar(
'numerosUsados.json',
'numerosValidos.json'
)
validar.interface()
validar.gerarNumeros()
validar.apagarCookies()
validar.cookies()
validar.chrome()
validar.conectar()
validar.quit()
validar.iniciarNavegadorInvisivel()
validar.mudo()
validar.cookies()
validar.chromeWaCrx('wa.crx')
validar.conectar()
validar.butaoContato()
validar.validar()
validar.desconectar()
validar.quit()
validar.finalizarNavegadorInvisivel()
validar.apagarCookies()
validar.imprimaResultado()
|
#!/usr/bin/env python
# coding: utf-8
from functools import wraps
import numpy as np
def nsample(shape):
def _1(function):
@wraps(function)
def _(*args,**kw):
mat = np.zeros(shape)
if 1==mat.ndim:
vlen = shape
else:
vlen = shape[0]
for i in range(vlen):
mat[i] = function(*args,**kw)
return mat
return _
return _1
def vec_sample(vlen, dist_params):
def _1(function):
@wraps(function)
def _(*args,**kw):
vec = np.zeros(vlen)
for i in range(vlen):
vec[i] = function(dist_params[i])
return vec
return _
return _1
def nvec_sample(shape,dist_params):
def _1(function):
@nsample(shape)
@vec_sample(shape[1],dist_params)
@wraps(function)
def _(*args,**kw):
return function(*args,**kw)
return _
return _1
|
""" views
Created at 10/07/20
"""
import logging
from django.utils.translation import ugettext, ugettext_lazy as _
from rest_framework import mixins, generics, status, viewsets, permissions
from rest_framework.response import Response
from email_builder.api.serializers import EmailBuilderTxtSerializer, EmailBuilderHtmlSerializer
from email_builder.utils import get_email_builder_handler
logger = logging.getLogger(__name__)
class EmailPreviewTxtViewSet(viewsets.GenericViewSet):
serializer_class = EmailBuilderTxtSerializer
permission_classes = [permissions.AllowAny]
def get_preview(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(
serializer.validated_data, status=status.HTTP_200_OK
)
class EmailPreviewHtmlViewSet(viewsets.GenericViewSet):
serializer_class = EmailBuilderHtmlSerializer
permission_classes = [permissions.AllowAny]
def get_preview(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(
serializer.validated_data, status=status.HTTP_200_OK
)
class EmailPreviewAvailableVariablesViewSet(viewsets.GenericViewSet):
permission_classes = [permissions.AllowAny]
def get_available_vars(self, request, *args, **kwargs):
email_code = request.query_params.get("email_code")
variables = {}
if email_code:
try:
variables = get_email_builder_handler().get_available_variables_by_email_code(email_code=email_code)
except Exception as ex:
raise ex
else:
raise Exception("No email code passed")
return Response(status=status.HTTP_200_OK, data=variables)
|
#!/usr/bin/env python
"""
s3-mix topology
"""
from mininet.cli import CLI
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.node import Node
from mininet.log import setLogLevel, info
from utils import IP, MAC, NETMASK
from utils import IP_SWAT, MAC_SWAT, NETMASK_SWAT
from subprocess import call
class AttackerNode(Node):
"""AttackerNode starts an OpenVPN server."""
def config(self, **params):
super(AttackerNode, self).config(**params)
self.cmd('ifconfig attacker-eth1 10.0.0.1')
self.cmd('sh bridge-start.sh')
self.cmd('openvpn openvpn-server.conf &')
def terminate(self):
self.cmd('pkill openvpn')
self.cmd('sh bridge-stop.sh')
super(AttackerNode, self).terminate()
class AttackerNode2(Node):
"""AttackerNode2 starts an OpenVPN server2."""
def config(self, **params):
super(AttackerNode2, self).config(**params)
self.cmd('ifconfig attacker2-eth1 10.0.0.2')
self.cmd('sh bridge-start2.sh')
self.cmd('openvpn openvpn-server2.conf &')
def terminate(self):
self.cmd('pkill openvpn')
self.cmd('sh bridge-stop2.sh')
super(AttackerNode2, self).terminate()
class ClientNode(Node):
"""ClientNode starts an OpenVPN client."""
def config(self, **params):
super(ClientNode, self).config(**params)
self.cmd('openvpn openvpn-client.conf &')
def terminate(self):
super(ClientNode, self).terminate()
class ClientNode2(Node):
"""ClientNode starts an OpenVPN client2."""
def config(self, **params):
super(ClientNode2, self).config(**params)
self.cmd('openvpn openvpn-client2.conf &')
def terminate(self):
super(ClientNode2, self).terminate()
class MixTopo(Topo):
"""No subnets."""
def build(self):
# NOTE: swat
switch = self.addSwitch('s1')
plc2 = self.addHost(
'plc2',
ip=IP_SWAT['plc2'] + NETMASK_SWAT,
mac=MAC_SWAT['plc2'])
self.addLink(plc2, switch)
plc3 = self.addHost(
'plc3',
ip=IP_SWAT['plc3'] + NETMASK_SWAT,
mac=MAC_SWAT['plc3'])
self.addLink(plc3, switch)
attacker = self.addNode(
'attacker',
cls=AttackerNode,
ip=IP_SWAT['attacker'] + NETMASK_SWAT,
mac=MAC_SWAT['attacker'])
self.addLink(attacker, switch)
# NOTE: swat dumb nodes
plc1 = self.addHost(
'plc1',
ip=IP_SWAT['plc1'] + NETMASK_SWAT,
mac=MAC_SWAT['plc1'])
self.addLink(plc1, switch)
plc4 = self.addHost(
'plc4',
ip=IP_SWAT['plc4'] + NETMASK_SWAT,
mac=MAC_SWAT['plc4'])
self.addLink(plc4, switch)
plc5 = self.addHost(
'plc5',
ip=IP_SWAT['plc5'] + NETMASK_SWAT,
mac=MAC_SWAT['plc5'])
self.addLink(plc5, switch)
plc6 = self.addHost(
'plc6',
ip=IP_SWAT['plc6'] + NETMASK_SWAT,
mac=MAC_SWAT['plc6'])
self.addLink(plc6, switch)
shmi = self.addHost(
'shmi',
ip=IP_SWAT['shmi'] + NETMASK_SWAT,
mac=MAC_SWAT['shmi'])
self.addLink(shmi, switch)
# NOTE: wadi
switch2 = self.addSwitch('s2')
scada = self.addHost(
'scada',
ip=IP['scada'] + NETMASK,
mac=MAC['scada'])
self.addLink(scada, switch2)
rtu2a = self.addHost(
'rtu2a',
ip=IP['rtu2a'] + NETMASK,
mac=MAC['rtu2a'])
self.addLink(rtu2a, switch2)
rtu2b = self.addHost(
'rtu2b',
ip=IP['rtu2b'] + NETMASK,
mac=MAC['rtu2b'])
self.addLink(rtu2b, switch2)
attacker2 = self.addHost(
'attacker2',
cls=AttackerNode2,
ip=IP['attacker2'] + NETMASK,
mac=MAC['attacker2'])
self.addLink(attacker2, switch2)
# NOTE: wadi dumb nodes
hmi = self.addHost(
'hmi',
ip=IP['hmi'] + NETMASK,
mac=MAC['hmi'])
self.addLink(hmi, switch2)
hist = self.addHost(
'hist',
ip=IP['hist'] + NETMASK,
mac=MAC['hist'])
self.addLink(hist, switch2)
ids = self.addHost(
'ids',
ip=IP['ids'] + NETMASK,
mac=MAC['ids'])
self.addLink(ids, switch2)
switch3 = self.addSwitch('s3')
self.addLink(switch3, attacker)
self.addLink(switch3, attacker2)
# NOTE: remove when done with testing
# client = self.addNode(
# 'client',
# cls=ClientNode,
# ip=IP_SWAT['client'] + NETMASK_SWAT,
# mac=MAC_SWAT['client'])
# self.addLink(switch3, client)
# client2 = self.addHost(
# 'client2',
# cls=ClientNode2,
# ip=IP['client2'] + NETMASK,
# mac=MAC['client2'])
# self.addLink(switch3, client2)
if __name__ == '__main__':
"""Test MixTopo."""
setLogLevel( 'info' )
topo = MixTopo()
net = Mininet(topo=topo)
net.start()
CLI(net)
net.stop()
|
import abc
import numpy as np
from src.graphic_interface.image_standardizer import ImageAligner, ImageCropper
class DataSet:
"""
Wrapper around both Dataset, which can apply a transformation prior to sending data, or apply
a reverse transformation after receiving data.
"""
def __init__(self, dataset_path=None):
self.aligner = None
self.cropper = None
self._align = False
self.crop = False
self.coarse_seg_mode = False # MB added
self.only_NN_mask_mode = False # MB
self.use_seg_for_feature = False #MB added
self.point_data = None
# list of abstract attributes, that should be defined by inheriting classes:
# self.seg_params = None
# self.cluster_params = None
# self.frames = None # the iterable of frames
# self.frame_num = None # the number of frames
# self.name = None
# self.path_from_GUI = None
# self.nb_channels = None
# self.frame_shape = None
# self.nb_neurons = None
# self.h5raw_filename = None
# self.pointdat = None # Todo: get rid of calls in nd2??
# self.pointdat is a self.frame_num * (self.nb_neurons+1) * 3 array with:
# self.pointdat[t][n] = [x,y,z] where x,y,z are the coordinates of neuron n in time frame t (neurons start
# at n>=1, 0 is for background and contains np.nans)
# self.NN_pointdat = None
# self.neuron_presence = None a self.frame_num * (self.nb_neurons+1) array of booleans indicating presence of
# each neuron at each time frame
@classmethod
def load_dataset(cls, dataset_path):
if dataset_path.endswith(".nd2"):
from .nd2Data import nd2Data
return nd2Data(dataset_path)
else:
from .h5Data import h5Data
return h5Data(dataset_path)
@classmethod
def create_dataset(cls, dataset_path):
if dataset_path.endswith(".nd2"):
from .nd2Data import nd2Data
return nd2Data._create_dataset(dataset_path)
else:
from .h5Data import h5Data
return h5Data._create_dataset(dataset_path)
@property
def align(self):
return self._align
@align.setter
def align(self, value):
if value and self.aligner is None:
self.aligner = ImageAligner(self)
self._align = value
@property
def crop(self):
return self._crop
@crop.setter
def crop(self, value):
if value and self.cropper is None:
orig_shape = self.get_frame(0, force_original=True).shape
self.cropper = ImageCropper(self, orig_shape)
self._crop = value
@abc.abstractmethod
def close(self):
"""Close and/or save dataset properly""" # TODO
raise NotImplementedError
@abc.abstractmethod
def save(self):
"""Save what??""" # TODO
raise NotImplementedError
@classmethod
@abc.abstractmethod
def _create_dataset(cls, dataset_path):
"""
Creates new empty dataset at given path.
:return: new DataSet instance
"""
raise NotImplementedError
@abc.abstractmethod
def copy_properties(self, other_ds, except_frame_num=False):
"""
Copies all properties (such as number of frames, number of channels...) from other_ds into self.
:param other_ds: DataSet (of same type as self)
:param except_frame_num: if True, will not copy the frame number
"""
raise NotImplementedError
@abc.abstractmethod
def segmented_times(self, force_regular_seg=False):
"""
Gets the list of times for which a segmentation is defined.
Respects self.coarse_seg_mode, unless force_regular_seg is True
:param force_regular_seg: If True, will return the regular- (as opposed to coarse-) segmented frames, regardless of self.coarse_seg_mode
:return: list of time frames for which a segmentation is defined.
"""
raise NotImplementedError
@abc.abstractmethod
def ground_truth_frames(self):
"""Gets the list of frames marked as ground truth"""
raise NotImplementedError
@abc.abstractmethod
def get_transformation_keys(self):
"""Gets the list of frames for which a transformation is defined."""
raise NotImplementedError
####################################################################################
# reading the data
@abc.abstractmethod
def _get_frame(self, t, col="red"):
"""Gets the original frame"""
raise NotImplementedError
def get_frame(self, t, col="red", force_original=False):
frame = self._get_frame(t, col=col)
if force_original:
return frame
return self._transform(t, frame)
@abc.abstractmethod
def _get_mask(self, t):
"""
Gets the original mask of neurons
Raises KeyError if no mask exists for time t.
"""
raise NotImplementedError
def get_mask(self, t, force_original=False):
"""
Gets the segmented frame. Returns False if mask not present.
:param t: time frame
:param force_original: forces to return mask corresponding to original image, without transformation, and from non-coarse segmentation (if applicable)
:return segmented: 3D numpy array with segmented[x,y,z] = segment_id, or 0 for background
Returns False if mask not present.
"""
orig_segmented = self._get_mask(t)
if force_original or orig_segmented is False:
return orig_segmented
return self._transform(t, orig_segmented, True)
def get_NN_mask(self, t: int, NN_key: str):
"""
Gets the mask predicted by the network designated by NN_key for time t.
:return mask: 3D numpy array with mask[x,y,z] = segment_id, or 0 for background
Returns False if mask not present.
"""
# No transform is applied because the mask is saved with the transforation already applied.
raise NotImplementedError
@abc.abstractmethod
def segmented_frame(self, t, coarse=None):
"""
Gets the segmentation mask (NOT the neurons mask) for frame t.
Never applies a transformation.
"""
raise NotImplementedError
@abc.abstractmethod
def get_validation_set(self, NNname): # MB added this
"""
gets the frames that are validation set in NN
"""
raise NotImplementedError
@abc.abstractmethod
def feature_array(self):
"""
Returns features as numpy array (to be used for clustering/classification).
:param times: which times to include in the feature array (all if None). Overriden by segments.
:param segments: [(t1, s1), (t2, s2), ...] list of segments for which to return the features (in same order).
Overrides times if given; all segments in given times if None.
:param rotation_invariant: if True, use only rotation invariant parameters
:param segs_list: whether to also return list of corresponding (t,s)
:return ftrs[, segs]: ftrs the numpy array of features (one line per (time, segment));
no columns for Time and Segment in array.
Optionally also segs, the list of (t,s) segment corresponding to each line in ftrs, if segs_list
"""
raise NotImplementedError
@abc.abstractmethod
def get_segs_and_assignments(self, times):
"""
Returns list of all segments for given times and corresponding list of assigned neurites.
:param times: iterable of time frames for which to get segments
:return segments, neurites: segments = [(t1, s1), (t1, s2), ..., (t2, s), ...] list of segments for given frames
neurites = [n1, n2, ...] neurite assigned to each segment in segments
"""
raise NotImplementedError
@abc.abstractmethod
def get_transformation(self, t):
"""
Gets the matrix of affine transformation to be applied to align given frame.
:param t: time frame
:return: linear transformation matrix as output by Register_rotate.composite_transform
"""
raise NotImplementedError
@abc.abstractmethod
def ref_frames(self):
"""Gets the set of frames used as rotation references."""
raise NotImplementedError
@abc.abstractmethod
def base_ref_frame(self):
"""
Finds the original_reference for the Register_Rotate class, i.e. the reference frame against which all frames
are aligned.
"""
raise NotImplementedError
# Todo: get_ref(t)??
@abc.abstractmethod
def get_score(self, t):
"""
Gets the rotation score of frame t.
"""
raise NotImplementedError
@abc.abstractmethod
def get_ROI_params(self):
"""
Gets the the boundaries of the Region Of Interest (minimum region to include when cropping)
:return: xleft, xright, yleft, yright: the boundaries of the ROI.
"""
@abc.abstractmethod
def available_NNdats(self):
"""Gets iterable of NN ids for which pointdat is available"""
raise NotImplementedError
@abc.abstractmethod
def get_frame_match(self, t):
"""
For a dataset A that was built from dataset B. Matches frame t of A to corresponding time frame in the original
dataset B.
:param t: time frame (in self)
:return: orig: time frame
"""
raise NotImplementedError
@abc.abstractmethod
def original_intervals(self, which_dim=None):
"""
The intervals of the original video frame that the frames of this dataset include.
More precisely, a frame of self will correspond to
orig_frame[x_interval[0]:x_interval[1], y_interval[0]:y_interval[1], z_interval[0]:z_interval[1]]
if orig_frame is the video frame of the original dataset.
:param which_dim: None or one of 'x', 'y', 'z'. Which dimension you want the interval for. If None, will return
all three intervals.
:return: interval or (x_int, y_int, z_int). Each single interval is an array of size 2.
"""
raise NotImplementedError
@abc.abstractmethod
def get_real_time(self, t):
"""
This is the time stamp in the original nd2 file of the time frame t (in self).
:param t: time frame in self
:return: t (int), the time stamp in the original nd2 file
"""
raise NotImplementedError
def get_existing_neurons(self, t):
"""
:param t: time
:return existing_neurons: boolean array of len self.nb_neurons+1 with existing_neurons[neu] is True iff
neurite neu exists at time t
"""
if self.point_data:
existing_neurons = np.logical_not(np.isnan(self.pointdat[t][:, 0]))
elif self.point_data is None:
existing_neurons = np.full(self.nb_neurons+1, False)
else:
try:
mask = self.get_mask(t)
neurons = np.unique(mask)[0:]
existing_neurons = np.array([False] + [n in neurons for n in range(1, self.nb_neurons + 1)])
except KeyError:
existing_neurons = np.full(self.nb_neurons + 1, False)
return existing_neurons
@abc.abstractmethod
def ci_int(self):
"""Raise KeyError if they are not defined.""" # Todo: in what conditions?? useful to have KeyError?
raise NotImplementedError
####################################################################################
# editing the data
@abc.abstractmethod
def replace_frame(self, t, img_red, img_green):
"""
Replaces the videoframe of time frame t by the provided images img_red and img_green, which should be of same
size as original image. Stores the original image into another field, for memory.
Only supposed to work for two channels (though that can be easily changed)
:param t: time frame
:param img_red: new frame (3D numpy array) for red channel
:param img_green: new frame (3D numpy array) for red channel
"""
raise NotImplementedError
@abc.abstractmethod
def _save_frame(self, t, frameR, frameG=0, mask=0):#MB added
raise NotImplementedError
def save_frame(self, t, frameR, frameG = 0, mask = 0, force_original=False):#MB added
"""
Stores (or replaces if existing?) the segmentation for time t.
Saves the dimension of the frame as the dimensions for the dataset, and saves the number of channels or checks
that it is consistent. Also updates self.frame_num if t >= self.frame_num.
:param t: time frame
:param mask: segmented frame (3D numpy array with segmented[x,y,z] = segment (0 if background)
:param force_original: if True, does not apply inverse transform (otherwise, respects self.crop and self.align)
"""
if not force_original:
frameR = self._reverse_transform(t, frameR)
if np.any(frameG):
frameG = self._reverse_transform(t, frameG)
if mask:
mask = self._reverse_transform(t, mask)
if np.any(mask):
self._save_frame(t,frameR,frameG,mask)
else:
self._save_frame(t,frameR,frameG, 0)
@abc.abstractmethod
def _save_mask(self, t, mask):
raise NotImplementedError
def save_mask(self, t, mask, force_original=False):
"""
Stores (or replaces if existing?) the segmentation for time t.
:param t: time frame
:param mask: segmented frame (3D numpy array with segmented[x,y,z] = segment (0 if background)
:param force_original: if True, does not apply inverse transform (otherwise, respects self.crop and self.align)
"""
if not force_original:
mask = self._reverse_transform(t, mask)
self._save_mask(t, mask)
def _save_green_mask(self, t, mask):
raise NotImplementedError
def save_green_mask(self, t, mask, force_original=False):
"""
Stores (or replaces if existing?) the segmentation for time t.
:param t: time frame
:param mask: segmented frame (3D numpy array with segmented[x,y,z] = segment (0 if background)
:param force_original: if True, does not apply inverse transform (otherwise, respects self.crop and self.align)
"""
if not force_original:
mask = self._reverse_transform(t, mask)
self._save_green_mask(t, mask)
@abc.abstractmethod
def save_NN_mask(self, t, NN_key, mask):
"""
Stores (or replaces if existing) the mask predicted by the NN.
The NN-predicted mask is usually for the transformed movie (i.e. the mask is transformed).
:param t: time frame
:param NN_key: str, an identifier for the NN
:param mask: 3D numpy array with segmented[x,y,z] = segment (0 if background)
"""
raise NotImplementedError
@abc.abstractmethod
def flag_as_gt(self, frames):
"""Flag all given frames as ground truth"""
raise NotImplementedError
@abc.abstractmethod
def save_features(self, t, s, ftr_dict):
"""Saves the given features for segment s of time t."""
raise NotImplementedError
@abc.abstractmethod
def assign(self, assignment_dict):
"""
Assigns segments to neurites according to given dictionary.
:param assignment_dict: dictionary (t, s) -> neurite
"""
raise NotImplementedError
@abc.abstractmethod
def save_transformation_matrix(self, t, matrix):
"""Saves given matrix as the matrix of affine transfo to be applied to align given frame."""
raise NotImplementedError
@abc.abstractmethod
def save_ref(self, t, ref):
"""Saves that frame ref is to be used as reference for the alignment of frame t."""
raise NotImplementedError
@abc.abstractmethod
def save_score(self, t, score):
"""Saves the rotation score of frame t."""
raise NotImplementedError
@abc.abstractmethod
def save_ROI_params(self, xleft, xright, yleft, yright):
"""
Saves given parameters as the boundaries of the Region Of Interest (minimum region to include when cropping)
"""
raise NotImplementedError
def save_frame_match(self, orig, new):
"""
For a dataset A that was built from dataset B. Matches frame t of A to corresponding time frame orig in the
original dataset B.
:param orig: time frame
:param new: time frame (in self)
"""
raise NotImplementedError
@abc.abstractmethod
def save_original_intervals(self, x_interval, y_interval, z_interval):
"""
Stores the intervals of the original video frame that the frames of this dataset include.
More precisely, a frame of self will correspond to
orig_frame[x_interval[0]:x_interval[1], y_interval[0]:y_interval[1], z_interval[0]:z_interval[1]]
if orig_frame is the video frame of the original dataset.
:param x_interval, y_interval, z_interval: each is an iterable of size 2
"""
raise NotImplementedError
@abc.abstractmethod
def save_original_size(self, shape):
"""
Stores the shape of the original video frame (the frames of self can be subsamples of the original frames).
:param shape: size 3
"""
raise NotImplementedError
@abc.abstractmethod
def save_real_time(self, t, real_time):
"""
Saves the time stamp in the original nd2 file of the time frame t (in self).
:param t: time in the dataset
:param real_time: time stamp in the original nd2 file
"""
raise NotImplementedError
@abc.abstractmethod
def set_poindat(self, pointdat):
raise NotImplementedError
@abc.abstractmethod
def set_NN_pointdat(self, key):
'''
set NN pointdat
:param key: key
:return:
'''
raise NotImplementedError
@abc.abstractmethod
def pull_NN_results(self, NetName, runname, newpath):
"""
Reads the NN pointdat results from h5 file named by key.
:param key: designates an NN run from which to get the data.
:return:
"""
raise NotImplementedError
def get_method_results(self, method_name:str):
"""
:param method_name: str, the name of the method instance (such as NN)
:return: method_pointdat, the assignments made by the method, in the same format as pointdat
"""
raise NotImplementedError
def get_available_methods(self):
"""
:return: list of method instances available in the dataset
"""
raise NotImplementedError
@abc.abstractmethod
def set_calcium(self, ci_int):
raise NotImplementedError
####################################################################################
# defining the transformations
def _transform(self, t, img, is_mask=False):
if self.align:
img = self.aligner.align(img, t, is_mask)
if self.crop: # TODO: crop and/or resize??
img = self.cropper.crop(img)
return img
def _reverse_transform(self, t, img):
"""
Depending on current transformation mode, applies the necessary reverse transformations to img which is assumed
to be a mask.
"""
if self.crop:
img = self.cropper.inverse_crop(img)
if self.align:
img = self.aligner.dealign(img, t)
return img
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2021 Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from elasticsearch import Elasticsearch, ConnectionError, AuthenticationException, TransportError
from escli.services import Client, ClientConnectionError, ClientAuthError, ClientAPIError
log = getLogger(__name__)
class ElasticsearchClient(Client):
""" Client for use with Elasticsearch.
"""
def __init__(self):
with ElasticsearchExceptionWrapper():
self._client = Elasticsearch(**self.get_settings_from_env())
def get_indexes(self, include_all=False):
pattern = "*" if include_all else "*,-.*"
return self._client.indices.get(index=pattern)
def create_index(self, name):
self._client.indices.create(index=name)
def delete_index(self, name):
self._client.indices.delete(index=name)
def search(self, target, query, fields=None, sort=None, page_size=10, page_number=1):
with ElasticsearchExceptionWrapper():
if query is None:
query = {"match_all": {}}
else:
field, _, value = query.partition("=")
query = {"match": {field: value}}
if sort:
if sort.startswith("~"):
sort = {sort[1:]: "desc"}
else:
sort = {sort: "asc"}
else:
sort = None
res = self._client.search(index=target, query=query, _source_includes=fields or "*",
sort=sort, from_=(page_size * (page_number - 1)), size=page_size)
return [hit["_source"] for hit in res["hits"]["hits"]]
def ingest(self, target, document):
with ElasticsearchExceptionWrapper():
res = self._client.index(index=target, document=document)
return res # TODO: something more intelligent
class ElasticsearchExceptionWrapper:
""" Wrapper to catch and promote exceptions to the appropriate level
of abstraction.
"""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
return
try:
raise exc_val
except ConnectionError as ex:
log.debug(ex.info)
raise ClientConnectionError("Connection error: %s" % ex) from ex
except AuthenticationException as ex:
log.debug(ex.info)
raise ClientAuthError("Auth error: %s" % ex) from ex
except TransportError as ex:
log.debug(ex.info)
raise ClientAPIError("API error: %s" % ex) from ex
|
from scipy.stats import mode
import numpy as np
from imblearn.combine import SMOTEENN
from . import AbstractClassifier
class OversampleClassifier(AbstractClassifier):
"""Classifier which uses minority oversampling to balance minority class"""
def __init__(self, classifier, **kwargs):
super(OversampleClassifier, self).__init__(
"Oversampled ({})".format(classifier), **kwargs)
self.classifier = classifier
# balance the dataset, then train on it
def _train(self, X, Y):
# combine features with outputs for simpler row manipulation
data = np.hstack((X, Y.reshape((-1, 1))))
# unique classes, class index of each point, class counts
labels, label_counts = np.unique(Y, return_counts=True)
num_each = max(label_counts)
diffs = [num_each - count for count in label_counts]
# add diff samples of each class to train_data
for label, diff in zip(labels, diffs):
if diff:
subset = data[data[:,-1] == label]
sample_idxs = np.random.choice(subset.shape[0], size=diff)
# print subset
# print sample_idxs
# print subset[sample_idxs]
# if it's a minority class, take a random oversample
data = np.vstack((data, subset[sample_idxs]))
# train the classifier on equally-distributed samples
self.classifier.train(data[:,:-1], data[:,-1])
# classify a set of test points
def _classify(self, test_X):
return self.classifier.classify(test_X)
sm = SMOTEENN()
class SMOTEClassifier(OversampleClassifier):
"""Classifier which uses bagging to account for class distribution skew"""
def __init__(self, classifier, **kwargs):
super(OversampleClassifier, self).__init__(
"SMOTE ({})".format(classifier), **kwargs)
self.classifier = classifier
# balance the dataset, then train on it
def _train(self, X, Y):
# train the classifier on SMOTE-balanced samples
self.classifier.train(*sm.fit_sample(X, Y))
#from . import SVMClassifier, RandomForestClassifier, LogisticRegressionClassifier, KNNClassifier, test_classifier
'''
for kernel in ['rbf', 'poly']:
for c in range(1, 10, 1):
if kernel == poly:
test_classifier(SMOTEClassifier(SVMClassifier(C=c, kernel=kernel, degree=3)), folds=10)
test_classifier(SMOTEClassifier(SVMClassifier(C=c, kernel=kernel, degree=3)), folds=10)
'''
'''
for t in [4, 8, 16, 25, 32, 50, 64, 75, 100, 128]:
test_classifier(SMOTEClassifier(RandomForestClassifier(t)))
'''
'''
for c in [0.1, 0.5, 1, 1.5, 2.5]:
for degree in range(2, 4):
test_classifier(SMOTEClassifier(LogisticRegressionClassifier(C=c, degree=degree)))
raise Exception('done for now')
'''
'''
for k in [1, 2, 4, 8, 16, 25, 32, 64]:
test_classifier(SMOTEClassifier(KNNClassifier(k)))
raise Exception('done for now')
'''
#for t in [4, 8, 16, 25, 32, 50, 64, 75, 100, 128]:
# test_classifier(RandomForestClassifier(t))
#test_classifier(SMOTEClassifier(SVMClassifier(C=6, kernel='rbf')), folds=10)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/4/25 11:47
# @Author : Darren
# @Site :
# @File : config.py
# @Software: PyCharm
real_image_path = './data/train_images/'
fake_tps_image_path = './data/fake_images/tps_file/'
fake_transform_image_path = './data/fake_images/transform_file/'
|
minha_idade = 32
print('Eu tenho',minha_idade, 'anos')
taxaUSD = 3.20
arroz = 17.30
feijao = 8.99
#Descubra os valores em USD
arrozUSA = arroz * taxaUSD
print(round(arrozUSA))
|
"""Top-level package for damitalia."""
__author__ = """Manuel Capel"""
__email__ = 'manuel.capel82@gmail.com'
__version__ = '0.1.0'
|
s = [2,4,3,1]
s.sort()
print(s)
s.reverse()
print(s)
s = ['1', '2', '12']
print(s)
s.sort()
print(s)
s.sort(key=lambda x:int(x))
print(s)
s.sort(reverse=True)
print(s)
s.sort(key=lambda x:int(x), reverse=True)
print(s)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-09 20:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('golf', '0018_auto_20170809_1155'),
]
operations = [
migrations.CreateModel(
name='PlayerPlugin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_package', models.CharField(blank=True, help_text='Name of the module (filename with the .py) containing the class of your plugin', max_length=200, null=True)),
],
),
migrations.RemoveField(
model_name='club',
name='club_class_name',
),
migrations.RemoveField(
model_name='club',
name='club_class_package',
),
migrations.AlterField(
model_name='club',
name='data',
field=models.CharField(blank=True, help_text='Data such as username and password used to login to your clubs player data store (used by your plugin)', max_length=516, null=True),
),
migrations.AddField(
model_name='club',
name='player_plugin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='golf.PlayerPlugin', verbose_name='Player Plugin Id'),
),
]
|
name = "proc"
import QNLP.proc.basis_check
import QNLP.proc.load_basis
import QNLP.proc.process_corpus
import QNLP.proc.DisCoCat
import QNLP.proc.VectorSpaceModel
import QNLP.proc.VerbGraph
import QNLP.proc.HammingDistance
__all__ = ["basis_check", "load_basis", "process_corpus", "DisCoCat", "VectorSpaceModel", "VerbGraph", "HammingDistance"]
|
import os
from tqdm import tqdm
import warnings
from multiprocessing import Queue, Process
from artm.wrapper.exceptions import ArtmException
from .strategy import BaseStrategy
from ..models.base_model import padd_model_name
from ..routine import get_timestamp_in_str_format
NUM_MODELS_ERROR = "Failed to retrive number of trained models"
MODEL_RETRIEVE_ERROR = "Retrieved only {0} models out of {1}"
STRATEGY_RETRIEVE_ERROR = 'Failed to retrieve strategy parameters'
WARNINGS_RETRIEVE_ERROR = 'Failed to return warnings'
SCORE_ERROR_MESSAGE = "Can't find a score ''{0}''. Please add a score with that name to the model."
def check_experiment_existence(topic_model):
"""
Checks if topic_model has experiment.
Parameters
----------
topic_model : TopicModel
topic model
Returns
-------
bool
True if experiment exists, in other case False.
"""
is_experiment = topic_model.experiment is not None
return is_experiment
def retrieve_score_for_strategy(score_name=None):
if not score_name:
score_name = 'PerplexityScore@all'
def last_score(model):
try:
return model.scores[score_name][-1]
except KeyError:
raise KeyError(SCORE_ERROR_MESSAGE.format(score_name))
return last_score
# exists for multiprocessing debug
def put_to_queue(queue, puttable):
queue.put(puttable)
# exists for multiprocessing debug
def get_from_queue_till_fail(queue, error_message='',):
return queue.get()
class BaseCube:
"""
Abstract class for all cubes.
"""
def __init__(self, num_iter, action=None, reg_search="grid",
strategy=None, tracked_score_function=None,
verbose=False, separate_thread=True):
"""
Initialize stage.
Checks params and update .parameters attribute.
Parameters
----------
num_iter : int
number of iterations or method
action : str
stage of creation
reg_search : str
"grid" or "pair". "pair" for elementwise grid search in the case
of several regularizers, "grid" for the fullgrid search in the
case of several regularizers
strategy : BaseStrategy
optimization approach
tracked_score_function : str or callable
optimizable function for strategy
verbose : bool
visualization flag
separate_thread : bool
will train models inside a separate thread if True
"""
self.num_iter = num_iter
self.parameters = []
self.action = action
self.reg_search = reg_search
if not strategy:
strategy = BaseStrategy()
self.strategy = strategy
self.verbose = verbose
self.separate_thread = separate_thread
if isinstance(tracked_score_function, str):
tracked_score_function = retrieve_score_for_strategy(tracked_score_function)
self.tracked_score_function = tracked_score_function
def apply(self, topic_model, one_cube_parameter, dictionary=None, model_id=None):
"""
"apply" method changes topic_model in way that is defined by one_cube_parameter.
Parameters
----------
topic_model : TopicModel
topic model
one_cube_parameter : optional
parameters of one experiment
dictionary : dict
dictionary so that the it can be used
on the basis of the model (Default value = None)
model_id : str
id of created model if necessary (Default value = None)
Returns
-------
"""
raise NotImplementedError('must be implemented in subclass')
# TODO: из-за метода get_description на эту фунцию налагется больше требований чем тут написано
def get_jsonable_from_parameters(self):
"""
Transform self.parameters to something that can be downloaded as json.
Parameters
----------
Returns
-------
optional
something jsonable
"""
return self.parameters
def _train_models(self, experiment, topic_model, dataset, search_space):
"""
This function trains models
"""
dataset_trainable = dataset._transform_data_for_training()
dataset_dictionary = dataset.get_dictionary()
returned_paths = []
experiment_save_path = experiment.save_path
experiment_id = experiment.experiment_id
save_folder = os.path.join(experiment_save_path, experiment_id)
for search_point in search_space:
candidate_name = get_timestamp_in_str_format()
new_model_id = padd_model_name(candidate_name)
new_model_save_path = os.path.join(save_folder, new_model_id)
model_index = 0
while os.path.exists(new_model_save_path):
model_index += 1
new_model_id = padd_model_name("{0}{1:_>5}".format(candidate_name, model_index))
new_model_save_path = os.path.join(save_folder, new_model_id)
model_cube = {
"action": self.action,
"num_iter": self.num_iter,
"params": repr(search_point)
}
try:
# alter the model according to cube parameters
new_model = self.apply(topic_model, search_point, dataset_dictionary, new_model_id)
# train new model for a number of iterations (might be zero)
new_model._fit(
dataset_trainable=dataset_trainable,
num_iterations=self.num_iter
)
except ArtmException as e:
error_message = repr(e)
raise ValueError(
f'Cannot alter and fit artm model with parameters {search_point}.\n'
"ARTM failed with following: " + error_message
)
# add cube description to the model history
new_model.add_cube(model_cube)
new_model.experiment = experiment
new_model.save()
assert os.path.exists(new_model.model_default_save_path)
returned_paths.append(new_model.model_default_save_path)
# some strategies depend on previous train results, therefore scores must be updated
if self.tracked_score_function:
current_score = self.tracked_score_function(new_model)
self.strategy.update_scores(current_score)
# else:
# we return number of iterations as a placeholder
# current_score = len(returned_paths)
return returned_paths
def _retrieve_results_from_process(self, queue, experiment):
from ..models import DummyTopicModel
models_num = get_from_queue_till_fail(queue, NUM_MODELS_ERROR)
topic_models = []
for _ in range(models_num):
path = get_from_queue_till_fail(queue,
MODEL_RETRIEVE_ERROR.format(_, models_num))
topic_models.append(DummyTopicModel.load(path, experiment=experiment))
strategy_parameters = get_from_queue_till_fail(queue, STRATEGY_RETRIEVE_ERROR)
caught_warnings = get_from_queue_till_fail(queue, WARNINGS_RETRIEVE_ERROR)
self.strategy._set_strategy_parameters(strategy_parameters)
for (warning_message, warning_class) in caught_warnings:
# if issubclass(warning_class, UserWarning):
warnings.warn(warning_message)
return topic_models
def _train_models_and_report_results(self, queue, experiment, topic_model, dataset,
search_space, search_length):
"""
This function trains models in separate thread, saves them
and returns all paths for save with respect to train order.
To preserve train order model number is also returned.
"""
with warnings.catch_warnings(record=True) as caught_warnings:
returned_paths = self._train_models(experiment, topic_model, dataset, search_space)
put_to_queue(queue, len(returned_paths))
for path in returned_paths:
put_to_queue(queue, path)
# to work with strategy we recover consistency by sending important parameters
strategy_parameters = self.strategy._get_strategy_parameters(saveable_only=True)
put_to_queue(queue, strategy_parameters)
caught_warnings = [(warning.message, warning.category)
for warning in caught_warnings]
put_to_queue(queue, caught_warnings)
def _run_cube(self, topic_model, dataset):
"""
Apply cube to topic_model. Get new models and fit them on batch_vectorizer.
Return list of all trained models.
Parameters
----------
topic_model : TopicModel
dataset : Dataset
Returns
-------
TopicModel
"""
from ..models import DummyTopicModel
if isinstance(topic_model, DummyTopicModel):
topic_model = topic_model.restore()
# create log
# TODO: будет странно работать, если бесконечный список
parameter_description = self.get_jsonable_from_parameters()
cube_description = {
'action': self.action,
'params': parameter_description
}
# at one level only one cube can be implemented
if not check_experiment_existence(topic_model):
raise ValueError("TopicModel has no experiment. You should create Experiment.")
experiment = topic_model.experiment
topic_model_depth_in_tree = topic_model.depth
if topic_model_depth_in_tree < len(experiment.cubes):
existed_cube = experiment.cubes[topic_model_depth_in_tree]
if existed_cube['params'] != cube_description['params'] or \
existed_cube['action'] != cube_description['action']:
error_message = (
"\nYou can not change strategy to another on this level in "
"this experiment.\n"
"If you want you can create another experiment with this "
"model with parameter new_experiment=True."
f"the existing cube is \n {existed_cube['params']} \n, "
f"but the proposed cube is \n {cube_description['params']} \n"
)
raise ValueError(error_message)
is_new_exp_cube = False
else:
is_new_exp_cube = True
# perform all experiments
self.strategy.prepare_grid(self.parameters, self.reg_search)
search_space = self.strategy.grid_visit_generator(self.parameters, self.reg_search)
search_length = getattr(self.strategy, 'grid_len', None)
if self.verbose:
search_space = tqdm(search_space, total=search_length)
if self.separate_thread:
queue = Queue()
process = Process(
target=self._train_models_and_report_results,
args=(queue, experiment, topic_model, dataset,
search_space, search_length),
daemon=True
)
process.start()
topic_models = self._retrieve_results_from_process(queue, experiment)
else:
returned_paths = self._train_models(experiment, topic_model, dataset, search_space)
topic_models = [
DummyTopicModel.load(path, experiment=experiment)
for path in returned_paths
]
for topic_model in topic_models:
topic_model.data_path = dataset._data_path
experiment.add_model(topic_model)
if is_new_exp_cube:
experiment.add_cube(cube_description)
return topic_models
def __call__(self, topic_model_input, dataset):
"""
Apply cube to topic_model. Get new models and fit them on batch_vectorizer.
Return list of all trained models.
Parameters
----------
topic_model_input: TopicModel or list of TopicModel
dataset: Dataset
Returns
-------
list of TopicModel
"""
if isinstance(topic_model_input, (list, set)):
results = [
self._run_cube(topic_model, dataset)
for topic_model in topic_model_input
]
return results
return self._run_cube(topic_model_input, dataset)
|
import multiprocessing
import os
import string
import dask
import numpy as np
import pytest
import xarray as xr
import xpartition
@pytest.mark.parametrize(
("block_indexers", "expected", "exception"),
[
({"x": slice(0, 3)}, {"x": slice(0, 6)}, None),
({"x": slice(1, 2)}, {"x": slice(2, 5)}, None),
({"x": slice(-3, -2)}, {"x": slice(0, 2)}, None),
({"x": slice(-3, -1)}, {"x": slice(0, 5)}, None),
({"x": slice(-3, None)}, {"x": slice(0, 6)}, None),
({"x": slice(None, 1)}, {"x": slice(0, 2)}, None),
({"x": slice(0, 10)}, {"x": slice(0, 6)}, None),
({"x": slice(-10, None)}, {"x": slice(0, 6)}, None),
({"x": slice(None, None)}, {"x": slice(0, 6)}, None),
({"x": slice(10, 12)}, {"x": slice(6, 6)}, None),
({"x": slice(2, 1)}, {"x": slice(5, 2)}, None),
({"x": 1}, {"x": slice(2, 5)}, None),
({"x": -1}, {"x": slice(5, 6)}, None),
({"x": -2}, {"x": slice(2, 5)}, None),
({"x": np.int32(2)}, {"x": slice(5, 6)}, None),
({"x": slice(0, 3), "y": 1}, {"x": slice(0, 6), "y": slice(3, 4)}, None),
({"x": 4}, None, IndexError),
({"x": -4}, None, IndexError),
({"z": 1}, None, KeyError),
({"x": slice(None, None, 2)}, None, NotImplementedError),
({"x": 2.0}, None, ValueError),
],
ids=lambda x: f"{x}",
)
def test_indexers(block_indexers, expected, exception):
data = dask.array.zeros((6, 4), chunks=((2, 3, 1), (3, 1)))
da = xr.DataArray(data, dims=["x", "y"])
if exception is None:
result = da.blocks.indexers(**block_indexers)
assert result == expected
else:
with pytest.raises(exception):
da.blocks.indexers(**block_indexers)
def test_isel():
data = dask.array.random.random((6, 4), chunks=((2, 3, 1), (3, 1)))
da = xr.DataArray(data, dims=["x", "y"])
result = da.blocks.isel(x=slice(1, 2), y=1).data.compute()
expected = data.blocks[1:2, 1].compute()
np.testing.assert_array_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Specified Dask chunks")
@pytest.mark.parametrize("ranks", [1, 2, 3, 5, 10, 11])
def test_dataarray_mappable_write(tmpdir, da, ranks):
store = os.path.join(tmpdir, "test.zarr")
ds = da.to_dataset()
ds.to_zarr(store, compute=False)
with multiprocessing.get_context("spawn").Pool(ranks) as pool:
pool.map(da.partition.mappable_write(store, ranks, da.dims), range(ranks))
result = xr.open_zarr(store)
xr.testing.assert_identical(result, ds)
SHAPE_AND_CHUNK_PAIRS = [
((5,), (1,)),
((5,), (2,)),
((5,), (5,)),
((2, 5), (1, 1)),
((2, 5), (2, 1)),
((2, 5), (2, 2)),
((2, 5), (2, 4)),
((2, 5), (2, 5)),
((2, 1, 6), (1, 1, 1)),
((2, 1, 6), (1, 1, 2)),
((2, 1, 6), (2, 1, 2)),
((2, 1, 6), (2, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)),
((2, 3, 4, 5), (2, 1, 3, 3)),
]
@pytest.fixture(params=SHAPE_AND_CHUNK_PAIRS, ids=lambda x: str(x))
def da(request):
shape, chunks = request.param
name = "foo"
return _construct_dataarray(shape, chunks, name)
def _construct_dataarray(shape, chunks, name):
dims = list(string.ascii_lowercase[: len(shape)])
data = np.random.random(shape)
da = xr.DataArray(data, dims=dims, name=name)
if chunks is not None:
chunks = {dim: chunk for dim, chunk in zip(dims, chunks)}
da = da.chunk(chunks)
return da
ALIGNED_SHAPE_AND_CHUNK_PAIRS = [
((5,), (1,)),
((5,), (2,)),
((5,), (5,)),
((5, 2), (1, 1)),
((5, 2), (1, 2)),
((5, 2), (2, 2)),
((5, 2), (4, 2)),
((5, 2), (5, 2)),
((5, 2, 6), (1, 1, 1)),
((5, 2, 6), (1, 1, 2)),
((5, 2, 6), (2, 1, 2)),
((5, 2, 6), (2, 2, 5)),
]
@pytest.fixture
def ds():
unchunked_dataarrays = []
for i, (shape, chunks) in enumerate(ALIGNED_SHAPE_AND_CHUNK_PAIRS):
da = _construct_dataarray(shape, None, f"unchunked_{i}")
unchunked_dataarrays.append(da)
chunked_dataarrays = []
for i, (shape, chunks) in enumerate(ALIGNED_SHAPE_AND_CHUNK_PAIRS):
da = _construct_dataarray(shape, chunks, f"chunked_{i}")
chunked_dataarrays.append(da)
return xr.merge(unchunked_dataarrays + chunked_dataarrays)
@pytest.mark.filterwarnings("ignore:Specified Dask chunks")
@pytest.mark.parametrize("ranks", [1, 2, 3, 5, 10, 11])
def test_dataset_mappable_write(tmpdir, ds, ranks):
store = os.path.join(tmpdir, "test.zarr")
ds.partition.initialize_store(store)
with multiprocessing.get_context("spawn").Pool(ranks) as pool:
pool.map(ds.partition.mappable_write(store, ranks, ds.dims), range(ranks))
result = xr.open_zarr(store)
xr.testing.assert_identical(result, ds)
@pytest.mark.parametrize("has_coord", [True, False])
@pytest.mark.parametrize(
"original_chunks", [{"x": 2}, {"x": 2, "y": 5}], ids=lambda x: f"{x}"
)
def test_PartitionMapper_integration(tmpdir, has_coord, original_chunks):
def func(ds):
return ds.rename(z="new_name").assign_attrs(dataset_attr="fun")
ds = xr.Dataset({"z": (["x", "y"], np.ones((5, 10)), {"an": "attr"})}).chunk(
original_chunks
)
if has_coord:
ds = ds.assign_coords(x=range(5))
store = str(tmpdir)
mapper = ds.z.partition.map(store, ranks=3, dims=["x"], func=func, data=ds)
for rank in mapper:
mapper.write(rank)
written = xr.open_zarr(store)
xr.testing.assert_identical(func(ds), written)
def test_partition_partition():
# Partitions have two qualities which we test using a DataArray that
# has all unique values
ds = xr.Dataset({"z": (["x", "y"], np.arange(50).reshape((5, 10)))}).chunk({"x": 2})
arr = ds["z"]
n = 3
regions = arr.partition.partition(n, dims=["x"])
assert n == len(regions)
def to_set(arr):
return set(arr.values.ravel().tolist())
# These are the properties of a partition
# 1. sets in a partition are disjoint
intersection = set.intersection(*[to_set(arr.isel(region)) for region in regions])
assert intersection == set()
# assert that the values cover the set
# 2. the sets cover the original set
union = set.union(*[to_set(arr.isel(region)) for region in regions])
assert union == to_set(arr)
@pytest.mark.parametrize(
("original_chunks", "override_chunks", "expected_chunks"),
[
({"x": 5, "y": 2}, None, ((5, 5), (2, 2, 2))),
({"x": 5, "y": 2}, {"y": 3}, ((5, 5), (3, 3))),
({"x": 5, "y": 2}, {"y": 3, "z": 1}, ((5, 5), (3, 3))),
],
ids=lambda x: f"{x}",
)
@pytest.mark.parametrize("dtype", [float, int])
def test__zeros_like_dataarray(
original_chunks, override_chunks, expected_chunks, dtype
):
da = xr.DataArray(np.zeros((10, 6), dtype=dtype), dims=["x", "y"]).chunk(
original_chunks
)
result = xpartition._zeros_like_dataarray(da, override_chunks)
result_chunks = result.chunks
assert result_chunks == expected_chunks
assert result.dtype == da.dtype
def test_zeros_like():
shape = (2, 4)
dims = ["x", "y"]
attrs = {"foo": "bar"}
data1 = dask.array.random.random(shape)
data2 = dask.array.random.randint(0, size=shape)
data3 = dask.array.random.random(shape, chunks=(1, 1))
da1 = xr.DataArray(data1, dims=dims, name="a", attrs=attrs)
da2 = xr.DataArray(data2, dims=dims, name="b", attrs=attrs)
da3 = xr.DataArray(data3, dims=dims, name="c", attrs=attrs)
ds = xr.merge([da1, da2, da3])
zeros1_data = dask.array.zeros(shape)
zeros2_data = dask.array.zeros(shape, dtype=int)
zeros3_data = dask.array.zeros(shape, chunks=(1, 1))
zeros1 = xr.DataArray(zeros1_data, dims=dims, name="a", attrs=attrs)
zeros2 = xr.DataArray(zeros2_data, dims=dims, name="b", attrs=attrs)
zeros3 = xr.DataArray(zeros3_data, dims=dims, name="c", attrs=attrs)
expected = xr.merge([zeros1, zeros2, zeros3])
result = xpartition.zeros_like(ds)
xr.testing.assert_identical(result, expected)
for var in result:
# assert_identical does not check dtype or chunks
assert result[var].dtype == expected[var].dtype
assert result[var].chunks == expected[var].chunks
def test_partition_indexers_invalid_rank_error():
data = dask.array.zeros((6, 4), chunks=((6, 4)))
da = xr.DataArray(data, dims=["x", "y"])
with pytest.raises(ValueError, match="greater than maximum rank"):
da.partition.indexers(1, 1, ["x"])
|
# -*- coding: utf-8 -*-
from blinker import Namespace
_namespace = Namespace()
add_blueprints = _namespace.signal("add-blueprints")
add_extensions = _namespace.signal("add-extensions")
|
#Julie Chang and Chris Metzler 2020
import abc
import tensorflow as tf
import numpy as np
from numpy.fft import ifftshift
import fractions
import poppy
##############################
# Helper functions
##############################
def get_zernike_volume(resolution, n_terms, scale_factor=1e-6):
zernike_volume = poppy.zernike.zernike_basis(nterms=n_terms, npix=resolution, outside=0.0)
return zernike_volume * scale_factor
def fspecial(shape=(3, 3), sigma=0.5):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2. * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
def zoom(image_batch, zoom_fraction):
"""Get central crop of batch
"""
images = tf.unstack(image_batch, axis=0)
crops = []
for image in images:
crop = tf.image.central_crop(image, zoom_fraction)
crops.append(crop)
return tf.stack(crops, axis=0)
def transp_fft2d(a_tensor, dtype=tf.complex64):
"""Takes images of shape [batch_size, x, y, channels] and transposes them
correctly for tensorflows fft2d to work.
"""
# Tensorflow's fft only supports complex64 dtype
a_tensor = tf.cast(a_tensor, tf.complex64)
# Tensorflow's FFT operates on the two innermost (last two!) dimensions
a_tensor_transp = tf.transpose(a_tensor, [0, 3, 1, 2])
a_fft2d = tf.fft2d(a_tensor_transp)
a_fft2d = tf.cast(a_fft2d, dtype)
a_fft2d = tf.transpose(a_fft2d, [0, 2, 3, 1])
return a_fft2d
def transp_ifft2d(a_tensor, dtype=tf.complex64):
a_tensor = tf.transpose(a_tensor, [0, 3, 1, 2])
a_tensor = tf.cast(a_tensor, tf.complex64)
a_ifft2d_transp = tf.ifft2d(a_tensor)
# Transpose back to [batch_size, x, y, channels]
a_ifft2d = tf.transpose(a_ifft2d_transp, [0, 2, 3, 1])
a_ifft2d = tf.cast(a_ifft2d, dtype)
return a_ifft2d
def compl_exp_tf(phase, dtype=tf.complex64, name='complex_exp'):
"""Complex exponent via euler's formula, since Cuda doesn't have a GPU kernel for that.
Casts to *dtype*.
"""
phase = tf.cast(phase, tf.float64)
return tf.add(tf.cast(tf.cos(phase), dtype=dtype),
1.j * tf.cast(tf.sin(phase), dtype=dtype),
name=name)
def laplacian_filter_tf(img_batch):
"""Laplacian filter. Also considers diagonals.
"""
laplacian_filter = tf.constant([[1, 1, 1], [1, -8, 1], [1, 1, 1]], dtype=tf.float32)
laplacian_filter = tf.reshape(laplacian_filter, [3, 3, 1, 1])
filter_input = tf.cast(img_batch, tf.float32)
filtered_batch = tf.nn.convolution(filter_input, filter=laplacian_filter, padding="SAME")
return filtered_batch
def laplace_l1_regularizer(scale):
if np.allclose(scale, 0.):
print("Scale of zero disables the laplace_l1_regularizer.")
def laplace_l1(a_tensor):
with tf.name_scope('laplace_l1_regularizer'):
laplace_filtered = laplacian_filter_tf(a_tensor)
laplace_filtered = laplace_filtered[:, 1:-1, 1:-1, :]
attach_summaries("Laplace_filtered", tf.abs(laplace_filtered), image=True, log_image=True)
return scale * tf.reduce_mean(tf.abs(laplace_filtered))
return laplace_l1
def laplace_l2_regularizer(scale):
if np.allclose(scale, 0.):
print("Scale of zero disables the laplace_l1_regularizer.")
def laplace_l2(a_tensor):
with tf.name_scope('laplace_l2_regularizer'):
laplace_filtered = laplacian_filter_tf(a_tensor)
laplace_filtered = laplace_filtered[:, 1:-1, 1:-1, :]
attach_summaries("Laplace_filtered", tf.abs(laplace_filtered), image=True, log_image=True)
return scale * tf.reduce_mean(tf.square(laplace_filtered))
return laplace_l2
def phaseshifts_from_height_map(height_map, wave_lengths, refractive_idcs):
'''Calculates the phase shifts created by a height map with certain
refractive index for light with specific wave length.
'''
# refractive index difference
delta_N = refractive_idcs.reshape([1, 1, 1, -1]) - 1.
# wave number
wave_nos = 2. * np.pi / wave_lengths
wave_nos = wave_nos.reshape([1, 1, 1, -1])
# phase delay indiced by height field
phi = wave_nos * delta_N * height_map
phase_shifts = compl_exp_tf(phi)
return phase_shifts
def get_one_phase_shift_thickness(wave_lengths, refractive_index):
"""Calculate the thickness (in meter) of a phaseshift of 2pi.
"""
# refractive index difference
delta_N = refractive_index - 1.
# wave number
wave_nos = 2. * np.pi / wave_lengths
two_pi_thickness = (2. * np.pi) / (wave_nos * delta_N)
return two_pi_thickness
def attach_summaries(name, var, image=False, log_image=False):
if image:
tf.summary.image(name, var, max_outputs=3)
if log_image and image:
tf.summary.image(name + '_log', tf.log(var + 1e-12), max_outputs=3)
tf.summary.scalar(name + '_mean', tf.reduce_mean(var))
tf.summary.scalar(name + '_max', tf.reduce_max(var))
tf.summary.scalar(name + '_min', tf.reduce_min(var))
tf.summary.histogram(name + '_histogram', var)
def fftshift2d_tf(a_tensor):
input_shape = a_tensor.shape.as_list()
new_tensor = a_tensor
for axis in range(1, 3):
split = (input_shape[axis] + 1) // 2
mylist = np.concatenate((np.arange(split, input_shape[axis]), np.arange(split)))
new_tensor = tf.gather(new_tensor, mylist, axis=axis)
return new_tensor
def ifftshift2d_tf(a_tensor):
input_shape = a_tensor.shape.as_list()
new_tensor = a_tensor
for axis in range(1, 3):
n = input_shape[axis]
split = n - (n + 1) // 2
mylist = np.concatenate((np.arange(split, n), np.arange(split)))
new_tensor = tf.gather(new_tensor, mylist, axis=axis)
return new_tensor
def psf2otf(input_filter, output_size):
'''Convert 4D tensorflow filter into its FFT.
:param input_filter: PSF. Shape (height, width, num_color_channels, num_color_channels)
:param output_size: Size of the output OTF.
:return: The otf.
'''
# pad out to output_size with zeros
# circularly shift so center pixel is at 0,0
fh, fw, _, _ = input_filter.shape.as_list()
if output_size[0] != fh:
pad = (output_size[0] - fh) / 2
if (output_size[0] - fh) % 2 != 0:
pad_top = pad_left = int(np.ceil(pad))
pad_bottom = pad_right = int(np.floor(pad))
else:
pad_top = pad_left = int(pad) + 1
pad_bottom = pad_right = int(pad) - 1
padded = tf.pad(input_filter, [[pad_top, pad_bottom],
[pad_left, pad_right], [0, 0], [0, 0]], "CONSTANT")
else:
padded = input_filter
padded = tf.transpose(padded, [2, 0, 1, 3])
padded = ifftshift2d_tf(padded)
padded = tf.transpose(padded, [1, 2, 0, 3])
## Take FFT
tmp = tf.transpose(padded, [2, 3, 0, 1])
tmp = tf.fft2d(tf.complex(tmp, 0.))
return tf.transpose(tmp, [2, 3, 0, 1])
def next_power_of_two(number):
closest_pow = np.power(2, np.ceil(np.math.log(number, 2)))
return closest_pow
def img_psf_conv(img, psf, otf=None, adjoint=False, circular=False):
'''Performs a convolution of an image and a psf in frequency space.
:param img: Image tensor.
:param psf: PSF tensor.
:param otf: If OTF is already computed, the otf.
:param adjoint: Whether to perform an adjoint convolution or not.
:param circular: Whether to perform a circular convolution or not.
:return: Image convolved with PSF.
'''
img = tf.convert_to_tensor(img, dtype=tf.float32)
psf = tf.convert_to_tensor(psf, dtype=tf.float32)
img_shape = img.shape.as_list()
if not circular:
target_side_length = 2 * img_shape[1]
height_pad = (target_side_length - img_shape[1]) / 2
width_pad = (target_side_length - img_shape[1]) / 2
pad_top, pad_bottom = int(np.ceil(height_pad)), int(np.floor(height_pad))
pad_left, pad_right = int(np.ceil(width_pad)), int(np.floor(width_pad))
img = tf.pad(img, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]], "CONSTANT")
img_shape = img.shape.as_list()
img_fft = transp_fft2d(img)
if otf is None:
otf = psf2otf(psf, output_size=img_shape[1:3])
otf = tf.transpose(otf, [2, 0, 1, 3])
otf = tf.cast(otf, tf.complex64)
img_fft = tf.cast(img_fft, tf.complex64)
if adjoint:
result = transp_ifft2d(img_fft * tf.conj(otf))
else:
result = transp_ifft2d(img_fft * otf)
result = tf.cast(tf.real(result), tf.float32)
if not circular:
result = result[:, pad_top:-pad_bottom, pad_left:-pad_right, :]
return result
def depth_dep_convolution(img, psfs, disc_depth_map):
"""Convolves an image with different psfs at different depths as determined by a discretized depth map.
Args:
img: image with shape (batch_size, height, width, num_img_channels)
psfs: filters with shape (kernel_height, kernel_width, num_img_channels, num_filters)
disc_depth_map: Discretized depth map.
use_fft: Use img_psf_conv or normal conv2d
"""
# TODO: only convolve with PSFS that are necessary.
img = tf.cast(img, dtype=tf.float32)
input_shape = img.shape.as_list()
zeros_tensor = tf.zeros_like(img, dtype=tf.float32)
disc_depth_map = tf.tile(tf.cast(disc_depth_map, tf.int16),
multiples=[1, 1, 1, input_shape[3]])
blurred_imgs = []
for depth_idx, psf in enumerate(psfs):
psf = tf.cast(psf, dtype=tf.float32)
condition = tf.equal(disc_depth_map, tf.convert_to_tensor(depth_idx, tf.int16))
blurred_img = img_psf_conv(img, psf)
blurred_imgs.append(tf.where(condition,
blurred_img,
zeros_tensor))
result = tf.reduce_sum(blurred_imgs, axis=0)
return result
def get_spherical_wavefront_phase(resolution,
physical_size,
wave_lengths,
source_distance):
source_distance = tf.cast(source_distance, tf.float64)
physical_size = tf.cast(physical_size, tf.float64)
wave_lengths = tf.cast(wave_lengths, tf.float64)
N, M = resolution
[x, y] = np.mgrid[-N // 2:N // 2,
-M // 2:M // 2].astype(np.float64)
x = x / N * physical_size
y = y / M * physical_size
# Assume distance to source is approx. constant over wave
curvature = tf.sqrt(x ** 2 + y ** 2 + source_distance ** 2)
wave_nos = 2. * np.pi / wave_lengths
phase_shifts = compl_exp_tf(wave_nos * curvature)
phase_shifts = tf.expand_dims(tf.expand_dims(phase_shifts, 0), -1)
return phase_shifts
def least_common_multiple(a, b):
return abs(a * b) / fractions.gcd(a, b) if a and b else 0
def area_downsampling_tf(input_image, target_side_length):
input_shape = input_image.shape.as_list()
input_image = tf.cast(input_image, tf.float32)
if not input_shape[1] % target_side_length:
factor = int(input_shape[1] / target_side_length)
output_img = tf.nn.avg_pool(input_image,
[1, factor, factor, 1],
strides=[1, factor, factor, 1],
padding="VALID")
else:
# We upsample the image and then average pool
lcm_factor = least_common_multiple(target_side_length, input_shape[1]) / target_side_length
if lcm_factor > 10:
print(
"Warning: area downsampling is very expensive and not precise if source and target wave length have a large least common multiple")
upsample_factor = 10
else:
upsample_factor = int(lcm_factor)
img_upsampled = tf.image.resize_nearest_neighbor(input_image,
size=2 * [upsample_factor * target_side_length])
output_img = tf.nn.avg_pool(img_upsampled,
[1, upsample_factor, upsample_factor, 1],
strides=[1, upsample_factor, upsample_factor, 1],
padding="VALID")
return output_img
def get_intensities(input_field):
return tf.square(tf.abs(input_field), name='intensities')
##################################
# Optical elements & Propagation
##################################
class Propagation(abc.ABC):
def __init__(self,
input_shape,
distance,
discretization_size,
wave_lengths):
self.input_shape = input_shape
self.distance = distance
self.wave_lengths = wave_lengths
self.wave_nos = 2. * np.pi / wave_lengths
self.discretization_size = discretization_size
@abc.abstractmethod
def _propagate(self, input_field):
"""Propagate an input field through the medium
"""
def __call__(self, input_field):
return self._propagate(input_field)
class FresnelPropagation(Propagation):
def _propagate(self, input_field):
_, M_orig, N_orig, _ = self.input_shape
# zero padding.
Mpad = M_orig // 4
Npad = N_orig // 4
M = M_orig + 2 * Mpad
N = N_orig + 2 * Npad
padded_input_field = tf.pad(input_field,
[[0, 0], [Mpad, Mpad], [Npad, Npad], [0, 0]])
[x, y] = np.mgrid[-N // 2:N // 2,
-M // 2:M // 2]
# Spatial frequency
fx = x / (self.discretization_size * N) # max frequency = 1/(2*pixel_size)
fy = y / (self.discretization_size * M)
# We need to ifftshift fx and fy here, because ifftshift doesn't exist in TF.
fx = ifftshift(fx)
fy = ifftshift(fy)
fx = fx[None, :, :, None]
fy = fy[None, :, :, None]
squared_sum = np.square(fx) + np.square(fy)
# We create a non-trainable variable so that this computation can be reused
# from call to call.
if tf.contrib.framework.is_tensor(self.distance):
tmp = np.float64(self.wave_lengths * np.pi * -1. * squared_sum)
constant_exp_part_init = tf.constant_initializer(tmp)
constant_exponent_part = tf.get_variable("Fresnel_kernel_constant_exponent_part",
initializer=constant_exp_part_init,
shape=padded_input_field.shape,
dtype=tf.float64,
trainable=False)
H = compl_exp_tf(self.distance * constant_exponent_part, dtype=tf.complex64,
name='fresnel_kernel')
else: # Save some memory
tmp = np.float64(self.wave_lengths * np.pi * -1. * squared_sum * self.distance)
constant_exp_part_init = tf.constant_initializer(tmp)
constant_exponent_part = tf.get_variable("Fresnel_kernel_constant_exponent_part",
initializer=constant_exp_part_init,
shape=padded_input_field.shape,
dtype=tf.float64,
trainable=False)
H = compl_exp_tf(constant_exponent_part, dtype=tf.complex64,
name='fresnel_kernel')
objFT = transp_fft2d(padded_input_field)
out_field = transp_ifft2d(objFT * H)
return out_field[:, Mpad:-Mpad, Npad:-Npad, :]
class PhasePlate():
def __init__(self,
wave_lengths,
height_map,
refractive_idcs,
height_tolerance=None,
lateral_tolerance=None):
self.wave_lengths = wave_lengths
self.height_map = height_map
self.refractive_idcs = refractive_idcs
self.height_tolerance = height_tolerance
self.lateral_tolerance = lateral_tolerance
self._build()
def _build(self):
# Add manufacturing tolerances in the form of height map noise
if self.height_tolerance is not None:
self.height_map += tf.random_uniform(shape=self.height_map.shape,
minval=-self.height_tolerance,
maxval=self.height_tolerance,
dtype=self.height_map.dtype)
print("Phase plate with manufacturing tolerance %0.2e" % self.height_tolerance)
self.phase_shifts = phaseshifts_from_height_map(self.height_map,
self.wave_lengths,
self.refractive_idcs)
def __call__(self, input_field):
input_field = tf.cast(input_field, tf.complex64)
return tf.multiply(input_field, self.phase_shifts, name='phase_plate_shift')
def propagate_exact(input_field,
distance,
input_sample_interval,
wave_lengths):
_, M_orig, N_orig, _ = input_field.shape.as_list()
# zero padding.
Mpad = M_orig // 4
Npad = N_orig // 4
M = M_orig + 2 * Mpad
N = N_orig + 2 * Npad
padded_input_field = tf.pad(input_field,
[[0, 0], [Mpad, Mpad], [Npad, Npad], [0, 0]])
[x, y] = np.mgrid[-N // 2:N // 2,
-M // 2:M // 2]
# Spatial frequency
fx = x / (input_sample_interval * N) # max frequency = 1/(2*pixel_size)
fy = y / (input_sample_interval * M)
# We need to ifftshift fx and fy here, because ifftshift doesn't exist in TF.
fx = ifftshift(fx)
fy = ifftshift(fy)
fx = fx[None, :, :, None]
fy = fy[None, :, :, None]
# We create a non-trainable variable so that this computation can be reused
# from call to call.
if tf.contrib.framework.is_tensor(distance):
tmp = np.float64(
2 * np.pi * (1 / wave_lengths) * np.sqrt(1. - (wave_lengths * fx) ** 2 - (wave_lengths * fy) ** 2))
constant_exp_part_init = tf.constant_initializer(tmp)
constant_exponent_part = tf.get_variable("Fresnel_kernel_constant_exponent_part",
initializer=constant_exp_part_init,
shape=padded_input_field.shape,
dtype=tf.float64,
trainable=False)
H = compl_exp_tf(distance * constant_exponent_part, dtype=tf.complex64,
name='fresnel_kernel')
else: # Save some memory
tmp = np.float64(
2 * np.pi * (distance / wave_lengths) * np.sqrt(1. - (wave_lengths * fx) ** 2 - (wave_lengths * fy) ** 2))
constant_exp_part_init = tf.constant_initializer(tmp)
constant_exponent_part = tf.get_variable("Fresnel_kernel_constant_exponent_part",
initializer=constant_exp_part_init,
shape=padded_input_field.shape,
dtype=tf.float64,
trainable=False)
H = compl_exp_tf(constant_exponent_part, dtype=tf.complex64,
name='fresnel_kernel')
objFT = transp_fft2d(padded_input_field)
out_field = transp_ifft2d(objFT * H)
return out_field[:, Mpad:-Mpad, Npad:-Npad, :]
def propagate_fresnel(input_field,
distance,
sampling_interval,
wave_lengths):
input_shape = input_field.shape.as_list()
propagation = FresnelPropagation(input_shape,
distance=distance,
discretization_size=sampling_interval,
wave_lengths=wave_lengths)
return propagation(input_field)
def circular_aperture(input_field, r_cutoff):
input_shape = input_field.shape.as_list()
[x, y] = np.mgrid[-input_shape[1] // 2: input_shape[1] // 2,
-input_shape[2] // 2: input_shape[2] // 2].astype(np.float64)
max_val = np.amax(x)
if r_cutoff is None:
r_cutoff = np.amax(x)
r = np.sqrt(x ** 2 + y ** 2)[None, :, :, None]
aperture = (r < r_cutoff).astype(np.float64)
return aperture * input_field
def height_map_element(input_field,
name,
wave_lengths,
refractive_idcs,
block_size=1,
height_map_sqrt_initializer=None,
height_map_regularizer=None,
height_tolerance=None, # Default height tolerance is 2 nm.
):
_, height, width, _ = input_field.shape.as_list()
height_map_shape = [1, height // block_size, width // block_size, 1]
if height_map_sqrt_initializer is None:
init_height_map_value = np.ones(shape=height_map_shape, dtype=np.float64) * 1e-4
height_map_sqrt_initializer = tf.constant_initializer(init_height_map_value)
with tf.variable_scope(name, reuse=False):
height_map_var = tf.get_variable(name="height_map_sqrt",
shape=height_map_shape,
dtype=tf.float64,
trainable=True,
initializer=height_map_sqrt_initializer)
height_map_full = tf.image.resize_images(height_map_var, height_map_shape[1:3],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
height_map = tf.square(height_map_full, name='height_map')
if height_map_regularizer is not None:
tf.contrib.layers.apply_regularization(height_map_regularizer, weights_list=[height_map])
attach_summaries("Height_map", height_map, image=True, log_image=True)
element = PhasePlate(wave_lengths=wave_lengths,
height_map=height_map,
refractive_idcs=refractive_idcs,
height_tolerance=height_tolerance)
return element(input_field)
def my_height_map_element(input_field,
name,
wave_lengths,
refractive_idcs,
block_size=1,
height_map_sqrt_initializer=None,
height_map_regularizer=None,
height_tolerance=None, # Default height tolerance is 2 nm.
height_max = np.inf,
):
_, height, width, _ = input_field.shape.as_list()
height_map_shape = [1, height // block_size, width // block_size, 1]
if height_map_sqrt_initializer is None:
init_height_map_value = np.ones(shape=height_map_shape, dtype=np.float64) * 1e-4
height_map_sqrt_initializer = tf.constant_initializer(init_height_map_value)
with tf.variable_scope(name, reuse=False):
height_map_var = tf.get_variable(name="height_map_sqrt",
shape=height_map_shape,
dtype=tf.float64,
trainable=True,
initializer=height_map_sqrt_initializer,
constraint=lambda x: tf.clip_by_value(x, -np.sqrt(height_max), np.sqrt(height_max)))#clip to square root of 1.55e-6
height_map_full = tf.image.resize_images(height_map_var, height_map_shape[1:3],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
height_map = tf.square(height_map_full, name='height_map')
if height_map_regularizer is not None:
tf.contrib.layers.apply_regularization(height_map_regularizer, weights_list=[height_map])
attach_summaries("Height_map", height_map, image=True, log_image=True)
element = PhasePlate(wave_lengths=wave_lengths,
height_map=height_map,
refractive_idcs=refractive_idcs,
height_tolerance=height_tolerance)
return [element(input_field),height_map]
def fourier_element(input_field,
name,
wave_lengths,
refractive_idcs,
frequency_range=0.5,
height_map_regularizer=None,
height_tolerance=None, # Default height tolerance is 2 nm.
):
_, height, width, _ = input_field.shape.as_list()
height_map_shape = [1, height, width, 1]
fourier_initializer = tf.zeros_initializer()
with tf.variable_scope(name, reuse=False):
fourier_vars_real = tf.get_variable('fourier_coeffs_real',
shape=[1, int(height * frequency_range), int(width * frequency_range), 1],
dtype=tf.float32,
trainable=True,
initializer=fourier_initializer)
fourier_vars_cplx = tf.get_variable('fourier_coeffs_cmplx',
shape=[1, int(height * frequency_range), int(width * frequency_range), 1],
dtype=tf.float32,
trainable=True,
initializer=fourier_initializer)
fourier_coeffs = tf.complex(fourier_vars_real, fourier_vars_cplx)
attach_summaries("Fourier_coeffs", tf.abs(fourier_coeffs), image=True, log_image=False)
padding_width = int((1 - frequency_range) * height) // 2
fourier_coeffs_padded = tf.pad(fourier_coeffs,
[[0, 0], [padding_width, padding_width], [padding_width, padding_width], [0, 0]])
print(fourier_coeffs_padded.shape.as_list())
height_map = tf.real(transp_ifft2d(ifftshift2d_tf(fourier_coeffs_padded)))
if height_map_regularizer is not None:
tf.contrib.layers.apply_regularization(height_map_regularizer, weights_list=[height_map])
attach_summaries("Height_map", height_map, image=True, log_image=True)
element = PhasePlate(wave_lengths=wave_lengths,
height_map=height_map,
refractive_idcs=refractive_idcs,
height_tolerance=height_tolerance)
return element(input_field)
def zernike_element(input_field,
zernike_volume,
name,
wave_lengths,
refractive_idcs,
zernike_initializer=None,
height_map_regularizer=None,
height_tolerance=None, # Default height tolerance is 2 nm.
zernike_scale=1e5,
):
_, height, width, _ = input_field.shape.as_list()
height_map_shape = [1, height, width, 1]
num_zernike_coeffs = zernike_volume.shape.as_list()[0]
if zernike_initializer is None:
zernike_initializer = tf.zeros_initializer() # tf.random_normal_initializer(stddev=1e-6)
with tf.variable_scope(name, reuse=False):
zernike_coeffs = tf.get_variable('zernike_coeffs',
shape=[num_zernike_coeffs, 1, 1],
dtype=tf.float32,
trainable=True,
initializer=zernike_initializer)
mask = np.ones([num_zernike_coeffs, 1, 1])
mask[0] = 0.
zernike_coeffs *= mask / zernike_scale
for i in range(num_zernike_coeffs):
tf.summary.scalar('zernike_coeff_%d' % i, tf.squeeze(zernike_coeffs[i, :, :]))
height_map = tf.reduce_sum(zernike_coeffs * zernike_volume, axis=0)
height_map = tf.expand_dims(tf.expand_dims(height_map, 0), -1, name='height_map')
if height_map_regularizer is not None:
tf.contrib.layers.apply_regularization(height_map_regularizer, weights_list=[height_map])
height_map_summary = (height_map - tf.reduce_min(height_map)) / (
tf.reduce_max(height_map) - tf.reduce_min(height_map))
attach_summaries("Height_map", height_map_summary, image=True, log_image=True)
element = PhasePlate(wave_lengths=wave_lengths,
height_map=height_map,
refractive_idcs=refractive_idcs,
height_tolerance=height_tolerance)
return element(input_field)
def gaussian_noise(image, stddev=0.001):
dtype = image.dtype
return image + tf.random_normal(image.shape, 0.0, stddev, dtype=dtype)
def get_vanilla_height_map(side_length,
height_map_regularizer=None,
name='height_map'):
height_map_shape = [1, side_length, side_length, 1]
init_height_map_value = np.ones(shape=height_map_shape, dtype=np.float64) * 1e-4
height_map_initializer = tf.constant_initializer(init_height_map_value)
with tf.variable_scope(name, reuse=False):
height_map_sqrt = tf.get_variable(name="height_map_sqrt",
shape=height_map_shape,
dtype=tf.float64,
trainable=True,
initializer=height_map_initializer)
height_map = tf.square(height_map_sqrt, name='height_map')
if height_map_regularizer is not None:
tf.contrib.layers.apply_regularization(height_map_regularizer, weights_list=[height_map])
attach_summaries("Height_map", height_map, image=True, log_image=True)
return tf.cast(height_map, tf.float64)
def get_fourier_height_map(side_length,
frequency_range=0.5,
height_map_regularizer=None,
name='fourier_height_map'):
height_map_shape = [1, side_length, side_length, 1]
fourier_initializer = tf.zeros_initializer()
with tf.variable_scope(name, reuse=False):
fourier_vars_real = tf.get_variable('fourier_coeffs_real',
shape=[1, int(side_length * frequency_range),
int(side_length * frequency_range), 1],
dtype=tf.float32,
trainable=True,
initializer=fourier_initializer)
fourier_vars_cplx = tf.get_variable('fourier_coeffs_cmplx',
shape=[1, int(side_length * frequency_range),
int(side_length * frequency_range), 1],
dtype=tf.float32,
trainable=True,
initializer=fourier_initializer)
fourier_coeffs = tf.complex(fourier_vars_real, fourier_vars_cplx)
attach_summaries("Fourier_coeffs", tf.abs(fourier_coeffs), image=True, log_image=False)
padding_width = int((1 - frequency_range) * side_length) // 2
fourier_coeffs_padded = tf.pad(fourier_coeffs,
[[0, 0], [padding_width, padding_width], [padding_width, padding_width], [0, 0]])
print(fourier_coeffs_padded.shape.as_list())
height_map = tf.real(transp_ifft2d(ifftshift2d_tf(fourier_coeffs_padded)))
if height_map_regularizer is not None:
tf.contrib.layers.apply_regularization(height_map_regularizer, weights_list=[height_map])
attach_summaries("Height_map", height_map, image=True, log_image=True)
return height_map
class SingleLensSetup():
def __init__(self,
height_map,
wave_resolution,
wave_lengths,
sensor_distance,
sensor_resolution,
input_sample_interval,
refractive_idcs,
height_tolerance,
noise_model=gaussian_noise,
psf_resolution=None,
target_distance=None,
use_planar_incidence=True,
upsample=True,
depth_bins=None):
self.wave_lengths = wave_lengths
self.refractive_idcs = refractive_idcs
self.wave_resolution = wave_resolution
if psf_resolution is None:
psf_resolution = wave_resolution
self.psf_resolution = psf_resolution
self.sensor_distance = sensor_distance
self.noise_model = noise_model
self.sensor_resolution = sensor_resolution
self.input_sample_interval = input_sample_interval
self.use_planar_incidence = use_planar_incidence
self.upsample = upsample
self.target_distance = target_distance
self.depth_bins = depth_bins
self.height_tolerance = height_tolerance
self.height_map = height_map
self.physical_size = float(self.wave_resolution[0] * self.input_sample_interval)
self.pixel_size = self.input_sample_interval * np.array(wave_resolution) / np.array(sensor_resolution)
print("Physical size is %0.2e.\nWave resolution is %d." % (self.physical_size, self.wave_resolution[0]))
self.optical_element = PhasePlate(wave_lengths=self.wave_lengths,
height_map=self.height_map,
refractive_idcs=self.refractive_idcs,
height_tolerance=self.height_tolerance)
self.get_psfs()
def get_psfs(self):
# Sort the point source distances in increasing order
if self.use_planar_incidence:
input_fields = [tf.ones(self.wave_resolution, dtype=tf.float32)[None, :, :, None]]
else:
distances = self.depth_bins
if self.target_distance is not None:
distances += [self.target_distance]
N, M = self.wave_resolution
[x, y] = np.mgrid[-N // 2:N // 2,
-M // 2:M // 2].astype(np.float64)
x = x / N * self.physical_size
y = y / M * self.physical_size
squared_sum = x ** 2 + y ** 2
wave_nos = 2. * np.pi / self.wave_lengths
wave_nos = wave_nos.reshape([1, 1, 1, -1])
input_fields = []
for distance in distances:
# Assume distance to source is approx. constant over wave
curvature = tf.sqrt(squared_sum + tf.cast(distance, tf.float64) ** 2)
curvature = tf.expand_dims(tf.expand_dims(curvature, 0), -1)
spherical_wavefront = compl_exp_tf(wave_nos * curvature, dtype=tf.complex64)
input_fields.append(spherical_wavefront)
psfs = []
with tf.variable_scope("Forward_model") as scope:
for depth_idx, input_field in enumerate(input_fields):
field = self.optical_element(input_field)
field = circular_aperture(field)
sensor_incident_field = propagate_fresnel(field,
distance=self.sensor_distance,
sampling_interval=self.input_sample_interval,
wave_lengths=self.wave_lengths)
psf = get_intensities(sensor_incident_field)
if not self.upsample:
psf = area_downsampling_tf(psf, self.psf_resolution[0])
psf = tf.div(psf, tf.reduce_sum(psf, axis=[1, 2], keep_dims=True), name='psf_depth_idx_%d' % depth_idx)
attach_summaries('PSF_depth_idx_%d' % depth_idx, psf, image=True, log_image=True)
psfs.append(tf.transpose(psf, [1, 2, 0, 3])) # (Height, width, 1, channels)
scope.reuse_variables()
if self.target_distance is not None:
self.target_psf = psfs.pop()
attach_summaries('target_psf', tf.transpose(self.target_psf, [2, 0, 1, 3]), image=True)
self.psfs = psfs
def get_sensor_img(self,
input_img,
noise_sigma=0.001,
depth_dependent=False,
depth_map=None,
otfs=None):
""""""
# Upsample input_img to match wave resolution.
if self.upsample:
print("Images are upsampled to wave resolution")
input_img = tf.image.resize_images(input_img, self.wave_resolution,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
print("Images are not upsampled to wave resolution")
if depth_dependent:
if self.upsample:
depth_map = tf.image.resize_images(depth_map, self.wave_resolution,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
sensor_img = depth_dep_convolution(input_img, self.psfs, disc_depth_map=depth_map)
else:
sensor_img = img_psf_conv(input_img, self.psfs[0], otf=otfs)
# Down sample measured image to match sensor resolution.
if self.upsample:
sensor_img = area_downsampling_tf(sensor_img, self.sensor_resolution[0])
noisy_img = self.noise_model(sensor_img, noise_sigma)
# print("Additive noise of %0.2e"%noise_sigma)
attach_summaries("Sensor_img", noisy_img, image=True, log_image=False)
return noisy_img
def get_sensor_img_with_clipping(self,
input_img,
noise_sigma=0.001,
depth_dependent=False,
depth_map=None,
otfs=None):
""""""
# Upsample input_img to match wave resolution.
if self.upsample:
print("Images are upsampled to wave resolution")
input_img = tf.image.resize_images(input_img, self.wave_resolution,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
print("Images are not upsampled to wave resolution")
if depth_dependent:
if self.upsample:
depth_map = tf.image.resize_images(depth_map, self.wave_resolution,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
sensor_img = depth_dep_convolution(input_img, self.psfs, disc_depth_map=depth_map)
else:
sensor_img = img_psf_conv(input_img, self.psfs[0], otf=otfs)
# Down sample measured image to match sensor resolution.
if self.upsample:
sensor_img = area_downsampling_tf(sensor_img, self.sensor_resolution[0])
noisy_img = self.noise_model(sensor_img, noise_sigma)
# print("Additive noise of %0.2e"%noise_sigma)
attach_summaries("Sensor_img", noisy_img, image=True, log_image=False)
return noisy_img
class ZernikeSystem():
def __init__(self,
zernike_volume,
wave_resolution,
wave_lengths,
sensor_distance,
sensor_resolution,
input_sample_interval,
refractive_idcs,
height_tolerance,
target_distance=None,
upsample=True,
depth_bins=None):
'''Simulates a one-lens system with a zernike-parameterized lens.
:param zernike_volume: Zernike basis functions.
Tensor of shape (num_basis_functions, wave_resolution[0], wave_resolution[1]).
:param wave_resolution: Resolution of the simulated wavefront. Shape wave_resolution.
:param wave_lengths: Wavelengths to be simulated. Shape (num_wavelengths).
:param sensor_distance: Distance of sensor to optical element.
:param sensor_resolution: Resolution of simulated sensor.
:param input_sample_interval: Sampling interval of aperture. Scalar.
:param refractive_idcs: Refractive idcs of simulated material at wave_lengths.
:param height_tolerance: Manufacturing tolerance of element. Adds the respective level of noise to be robust to
manufacturing imperfections.
:param target_distance: Allows to define the depth of a PSF that will *always* be evaluated. That can then be
used for instance for EDOF deconvolution.
:param upsample: Whether the image should be upsampled to the PSF resolution or the PSF should be downsampled
to the sensor resolution.
:param depth_bins: Depths at which PSFs should be simulated.
'''
self.sensor_distance = sensor_distance
self.zernike_volume = zernike_volume
self.wave_resolution = wave_resolution
self.wave_lengths = wave_lengths
self.depth_bins = depth_bins
self.sensor_resolution = sensor_resolution
self.upsample = upsample
self.target_distance = target_distance
self.zernike_volume = zernike_volume
self.height_tolerance = height_tolerance
self.input_sample_interval = input_sample_interval
self.refractive_idcs = refractive_idcs
self.psf_resolution = self.sensor_resolution
self.physical_size = float(self.wave_resolution[0] * self.input_sample_interval)
print("Physical size is %0.2e.\nWave resolution is %d." % (self.physical_size, self.wave_resolution[0]))
self._build_height_map()
self._get_psfs()
def _build_height_map(self):
'''Generates a zernike height map for optimization (residing in self.element after function call.)
:return: None.
'''
num_zernike_coeffs = self.zernike_volume.shape.as_list()[0]
zernike_inits = np.zeros((num_zernike_coeffs, 1, 1))
zernike_inits[3] = -51. # This sets the defocus value to approximately focus the image for a distance of 1m.
zernike_initializer = tf.constant_initializer(zernike_inits)
self.zernike_coeffs = tf.get_variable('zernike_coeffs',
shape=[num_zernike_coeffs, 1, 1],
dtype=tf.float32,
trainable=True,
initializer=zernike_initializer)
for i in range(num_zernike_coeffs):
tf.summary.scalar('zernike_coeff_%d' % i, tf.squeeze(self.zernike_coeffs[i, :, :]))
self.height_map = tf.reduce_sum(self.zernike_coeffs * self.zernike_volume, axis=0)
self.height_map = tf.expand_dims(tf.expand_dims(self.height_map, 0), -1, name='height_map')
attach_summaries("Height_map", self.height_map, image=True, log_image=False)
self.element = PhasePlate(wave_lengths=self.wave_lengths,
height_map=self.height_map,
refractive_idcs=self.refractive_idcs,
height_tolerance=self.height_tolerance)
def _get_psfs(self):
'''Builds the graph to generate psfs for depths in self.depth_bins, residing in self.psfs after function call.
:return: None.
'''
# Sort the point source distances in increasing order
distances = self.depth_bins
if self.target_distance is not None:
distances += [self.target_distance]
N, M = self.wave_resolution
[x, y] = np.mgrid[-N // 2:N // 2,
-M // 2:M // 2].astype(np.float64)
x = x / N * self.physical_size
y = y / M * self.physical_size
squared_sum = x ** 2 + y ** 2
wave_nos = 2. * np.pi / self.wave_lengths
wave_nos = wave_nos.reshape([1, 1, 1, -1])
input_fields = []
for distance in distances:
# Assume distance to source is approx. constant over wave
curvature = tf.sqrt(squared_sum + tf.cast(distance, tf.float64) ** 2)
curvature = tf.expand_dims(tf.expand_dims(curvature, 0), -1)
spherical_wavefront = compl_exp_tf(wave_nos * curvature, dtype=tf.complex64)
input_fields.append(spherical_wavefront)
psfs = []
with tf.variable_scope("Forward_model") as scope:
for depth_idx, input_field in enumerate(input_fields):
field = self.element(input_field)
field = circular_aperture(field)
sensor_incident_field = propagate_fresnel(field,
distance=self.sensor_distance,
sampling_interval=self.input_sample_interval,
wave_lengths=self.wave_lengths)
psf = get_intensities(sensor_incident_field)
if not self.upsample:
psf = area_downsampling_tf(psf, self.psf_resolution[0])
psf = tf.div(psf, tf.reduce_sum(psf, axis=[1, 2], keep_dims=True), name='psf_depth_idx_%d' % depth_idx)
attach_summaries('PSF_depth_idx_%d' % depth_idx, psf, image=True, log_image=True)
psfs.append(tf.transpose(psf, [1, 2, 0, 3])) # (Height, width, 1, channels)
scope.reuse_variables()
if self.target_distance is not None:
self.target_psf = psfs.pop()
attach_summaries('target_psf', tf.transpose(self.target_psf, [2, 0, 1, 3]), image=True)
self.psfs = psfs
def get_sensor_img(self,
input_img,
noise_sigma,
depth_dependent=False,
depth_map=None):
"""Calculates the sensor image.
:param input_img: Imaged scene.
:param noise_sigma: Sigma of gaussian sensor noise. Scalar.
:param depth_dependent: Whether to use a depth_map.
:param depth_map: A discretized depth map, where every pixel is an index into self.depth_bins.
Shape (batch_size, self.sensor_resolution[0], self.sensor_resolution[1])
:return: Sensor image.
"""
# Upsample input_img to match wave resolution.
if self.upsample:
print("Images are upsampled to wave resolution")
input_img = tf.image.resize_images(input_img, self.wave_resolution,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
else:
print("Images are not upsampled to wave resolution")
if depth_dependent:
if self.upsample:
depth_map = tf.image.resize_images(depth_map, self.wave_resolution,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
sensor_img = depth_dep_convolution(input_img, self.psfs, disc_depth_map=depth_map)
else:
sensor_img = img_psf_conv(input_img, self.psfs[0])
# Downsample measured image to match sensor resolution.
if self.upsample:
sensor_img = area_downsampling_tf(sensor_img, self.sensor_resolution[0])
noisy_img = gaussian_noise(sensor_img, noise_sigma)
attach_summaries("Sensor_img", noisy_img, image=True, log_image=False)
return noisy_img
|
from django.apps import AppConfig
class ClsConfig(AppConfig):
name = 'cls'
|
from traceback import format_exc
import cv2
from pathlib import Path
from PIL import Image
import numpy as np
def ImageToMatrix(file):
im = Image.open(file)
width, height = im.size
im = im.convert("L")
data = im.getdata()
data = np.matrix(data, dtype='float') / 255.0
new_data = np.reshape(data, (height, width))
return new_data
def Brenner(img):
x, y = img.shape
D = 0
for i in range(x - 2):
for j in range(y - 2):
D += (img[i + 2, j] - img[i, j]) ** 2
return D
def variance_of_laplacian(image):
return cv2.Laplacian(image, cv2.CV_64F).var()
def main(input_path):
for file in Path(input_path).rglob("*.jpg"):
# 拉普拉斯算子
image = cv2.imdecode(np.fromfile(file, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
# Brenner 检测
frame = ImageToMatrix(file)
score = Brenner(frame)
print(fm, score)
if __name__ == '__main__':
print("Start...")
input_Folder = './orgin/'
#output_Folder = r"D:\SJTU Courses\3-1\sources\result"
try:
main(input_Folder)
print("finished")
except Exception as err:
print(f"程序运行失败!!!请联系数据处理中心:{err}")
print(format_exc())
input("按任意键盘退出!!!")
|
# print('\n'.join(is_not_a_pkg_name('^.+py.+$')))
# print(is_not_a_pkg_name('.*py$'))
from pipoke.pkg_vs_words import *
def is_from_module(obj, module):
"""Check if an object "belongs" to a module.
>>> import collections
>>> is_from_module(collections.ChainMap, collections)
True
>>> is_from_module(is_from_module, collections)
False
"""
return getattr(obj, '__module__', '').startswith(module.__name__)
def second_party_names(module, obj_filt=None):
"""Generator of module attribute names that point to object the module actually defines.
:param module: Module (object)
:param obj_filt: Boolean function applied to object to filter it in
:return:
>>> from tec import modules # pip install tec
>>> sorted(second_party_names(modules))[:5]
['DOTPATH', 'FILEPATH', 'FOLDERPATH', 'LOADED', 'ModuleSpecKind']
>>> sorted(second_party_names(modules, callable))[:5]
['ModuleSpecKind', 'coerce_module_spec', 'get_imported_module_paths', 'is_from_module', 'is_module_dotpath']
>>> sorted(second_party_names(modules, lambda obj: isinstance(obj, type)))
['ModuleSpecKind']
"""
obj_filt = obj_filt or (lambda x: x)
for attr in filter(lambda a: not a.startswith('_'), dir(module)):
obj = getattr(module, attr)
if is_from_module(obj, module) and obj_filt(obj):
yield attr
n_words = len(simple_words)
n_pkgs = len(pkg_names)
def words_containing_py_free_for_pkg():
return is_not_a_pkg_name('^.+py.+$')
def words_starting_with_py_free_for_pkg():
return is_not_a_pkg_name('py.*$')
def words_ending_with_py_free_for_pkg():
return is_not_a_pkg_name('.*py$')
def word_vs_pkgs_regex_stats(regex):
words, pkgs, pkgs_words = words_and_pkg_names_satisfying_regex(regex)
return {'words': len(words) / n_words, 'pkgs': len(pkgs) / n_pkgs}
def multiple_word_vs_pkgs_regex_stats(patterns):
"""
Get proportions of english and pkg names that satisfy a regex pattern
:param patterns:
:return:
"""
if isinstance(patterns, str):
patterns = [patterns]
if not isinstance(patterns, dict):
patterns = {p: p for p in patterns}
return [dict(pattern=name, **word_vs_pkgs_regex_stats(pattern)) for name, pattern in patterns.items()]
def subsequence_counts(n=2, n_of_top_counts=10):
"""
Get counts of subsequences of letters in english and pypi pkg words
:param n:
:param n_of_top_counts:
:return:
"""
from collections import Counter
from itertools import islice
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
word_subseqs = Counter()
for w in simple_words:
word_subseqs.update(window(w, n))
pkg_subseqs = Counter()
for w in pkg_names:
pkg_subseqs.update(window(w, n))
t = [(''.join(x[0]), x[1]) for x in word_subseqs.most_common(n_of_top_counts)]
tt = [(''.join(x[0]), x[1]) for x in pkg_subseqs.most_common(n_of_top_counts)]
return {'words': t, 'pkgs': tt}
if __name__ == '__main__':
import argh
from functools import wraps
parser = argh.ArghParser()
def mk_postproc_deco(postproc_func, func_rename=None):
def decorator(func):
@wraps(func)
def wrapped_func(*args, **kwargs):
return postproc_func(func(*args, **kwargs))
if func_rename is not None:
wrapped_func.__name__ = func_rename(func)
return wrapped_func
return decorator
column_disp = mk_postproc_deco(lambda x: '\n'.join(x))
counts = mk_postproc_deco(lambda x: '\n'.join(x))
funcs = []
funcs += list(map(column_disp,
[words_containing_py_free_for_pkg,
words_starting_with_py_free_for_pkg,
words_ending_with_py_free_for_pkg]))
funcs += [multiple_word_vs_pkgs_regex_stats, subsequence_counts]
parser.add_commands([words_and_pkg_names_satisfying_regex, is_not_a_pkg_name, allwords, pkgnames])
parser.dispatch()
# print(multiple_word_vs_pkgs_regex_stats({'contains "py"': '.*py.*',
# 'starts with py': 'py.*$',
# 'ends with py': '.*py$'
# }))
#
# print()
# print(subsequence_counts(n=3))
#
# print(len([w for w in pkg_names if 'django-' in w]))
# print(len(pkg_names))
|
from apps.lean.filters.basicinfor_filters import *
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# ./create_JSON_file_of_sections_in_your_courses.py
#
# with the option "-v" or "--verbose" you get lots of output - showing in detail the operations of the program
#
# Can also be called with an alternative configuration file:
# ./create_JSON_file_of_sections_in_your_courses --config config-test.json
#
# Purpose:
# Create a JSON file with information about courses where user enrolled as a 'TeacherEnrollment', 'Examiner', or 'TaEnrollment'
#
# The JSON file contains a course_info dict
# courses_to_ignore=course_info['courses_to_ignore'] - courses that the user wants to ignore
# courses_without_specific_sections=course_info['courses_without_specific_sections'] - courses where the user is responsible for all the students
# courses_with_sections=course_info['courses_with_sections'] - courses where the user has a specific section
# the specific section's name may be the user's name or some other unique string (such as "Chip's section")
# Because the name of the relevant section can be arbitrary, this file is necessary to know which section belongs to a given user
#
# Examples:
# create file for only exjobb courses:
# ./create_JSON_file_of_sections_in_your_courses.py -s fee.json -X
#
# update an existing file (possibly adding new courses)
# ./create_JSON_file_of_sections_in_your_courses.py -s foo.json -U
#
# G. Q. Maguire Jr.
#
# 2020.02.04
# based on earlier list_your_courses_JSON.py
#
import requests, time
import pprint
import optparse
import sys
import json
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
def list_your_courses():
courses_found_thus_far=[]
# Use the Canvas API to get the list of all of your courses
# GET /api/v1/courses
url = "{0}/courses".format(baseUrl)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting courses: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
courses_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
if Verbose_Flag:
print("result of getting courses for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
courses_found_thus_far.append(p_response)
return courses_found_thus_far
def your_user_info():
# Use the Canvas API to get yourown user information
# GET /api/v1/users/self
url = "{0}/users/self".format(baseUrl)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting your own user information: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
return page_response
return False
def sections_in_course(course_id):
sections_found_thus_far=[]
# Use the Canvas API to get the list of sections for this course
#GET /api/v1/courses/:course_id/sections
url = "{0}/courses/{1}/sections".format(baseUrl,course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting sections: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
sections_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
sections_found_thus_far.append(p_response)
return sections_found_thus_far
def students_in_course(course_id):
users_found_thus_far=[]
# Use the Canvas API to get the list of users enrolled in this course
#GET /api/v1/courses/:course_id/enrollments
url = "{0}/courses/{1}/enrollments".format(baseUrl,course_id)
if Verbose_Flag:
print("url: {}".format(url))
extra_parameters={'per_page': '100',
'type': ['StudentEnrollment']
}
r = requests.get(url, params=extra_parameters, headers = header)
if Verbose_Flag:
print("result of getting enrollments: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
users_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
users_found_thus_far.append(p_response)
return users_found_thus_far
def list_assignments(course_id):
assignments_found_thus_far=[]
# Use the Canvas API to get the list of assignments for the course
#GET /api/v1/courses/:course_id/assignments
url = "{0}/courses/{1}/assignments".format(baseUrl, course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting assignments: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of assignments
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
if Verbose_Flag:
print("result of getting assignments for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
return assignments_found_thus_far
def submission_for_assignment_by_user(course_id, assignment_id, user_id):
# return the submission information for a single user's assignment for a specific course as a dict
#
# Use the Canvas API to get a user's submission for a course for a specific assignment
# GET /api/v1/courses/:course_id/assignments/:assignment_id/submissions/:user_id
url = "{0}/courses/{1}/assignments/{2}/submissions/{3}".format(baseUrl, course_id, assignment_id, user_id)
if Verbose_Flag:
print("url: {}".format(url))
#extra_parameters={'student_ids[]': 'all'}
#r = requests.get(url, params=extra_parameters, headers = header)
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting submissions: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
if Verbose_Flag:
print("page_response: " + str(page_response))
return page_response
else:
return dict()
def cleanup_sections(users_name, courses_with_sections):
# if there is a section with a name == users_name, then eliminate all of the other sections
for c in courses_with_sections:
section_for_user=False
sections=courses_with_sections[c].get('sections', [])
for s in sections:
if courses_with_sections[c]['sections'][s] == users_name:
section_for_user=s
if section_for_user:
courses_with_sections[c]['sections']={section_for_user: users_name}
return courses_with_sections
def remove_courses_to_be_ignored(course_list, courses_to_ignore):
new_course_list=[]
for course in course_list:
if Verbose_Flag:
print("course['id']={}".format(course['id']))
# note that the course['id'] is an integer in course_list, but a string in courses_to_ignore
ci=courses_to_ignore.get(str(course['id']), False)
if ci:
print("ignoring course['id']={}".format(course['id']))
else:
new_course_list.append(course)
return new_course_list
def remove_courses_to_be_ignored_dict(course_dict, courses_to_ignore):
new_course_dict=dict()
for course in course_dict:
if Verbose_Flag:
print("course['id']={}".format(course['id']))
ci=courses_to_ignore.get(course, False)
if ci:
print("ignoring course with id={}".format(course))
else:
new_course_dict[course]=course_dict[course]
return new_course_dict
def main():
global Verbose_Flag
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option('-s', '--sectionnames',
dest="course_info_file",
help="use JSON FILE giving section names for a user in each course",
metavar="FILE"
)
parser.add_option('-U', '--update',
dest="update",
default=False,
action="store_true",
help="update existing JSON file"
)
parser.add_option('-X', '--exjobs',
dest="exjobs",
default=False,
action="store_true",
help="only include degree project courses"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print("ARGV : {}".format(sys.argv[1:]))
print("VERBOSE : {}".format(options.verbose))
print("REMAINING : {}".format(remainder))
print("Configuration file : {}".format(options.config_filename))
initialize(options)
user_info=your_user_info()
if user_info:
if Verbose_Flag:
pprint.pprint(user_info, indent=4)
user_id=user_info['id']
users_name=user_info['name']
else:
print("No user information")
sys.exit()
course_info=dict()
if options.course_info_file:
course_info_file=options.course_info_file
else:
course_info_file="sections_in_courses_for_{0}.json".format(users_name)
if Verbose_Flag:
print("course_info_file={}".format(course_info_file))
if options.update:
try:
with open(course_info_file) as json_data_file:
try:
course_info = json.load(json_data_file)
if Verbose_Flag:
print("course_info={}".format(course_info))
courses_to_ignore=course_info.get('courses_to_ignore',{})
courses_without_specific_sections=course_info.get('courses_without_specific_sections', {})
courses_with_sections=course_info.get('courses_with_sections', {})
except json.JSONDecodeError as e:
print("Unable to load JSON file named {}".format(course_info_file))
sys.exit()
except OSError as e:
print(e.message)
print("Unable to open JSON file named {}".format(course_info_file))
sys.exit()
else: # otherwise create empty dictionaries
courses_to_ignore=dict()
courses_without_specific_sections=dict()
courses_with_sections=dict()
course_list=list_your_courses()
if len(course_list) == 0:
print("User is not in any courses")
sys.exit()
if Verbose_Flag:
pprint.pprint(course_list, indent=4)
# remove courses that are to be ignored
if len(courses_to_ignore) > 0:
if Verbose_Flag:
print("courses_to_ignore={}".format(courses_to_ignore))
# remove the courses to be ignored from the list of the user's courses
course_list=remove_courses_to_be_ignored(course_list, courses_to_ignore)
# also remove courses to be ignored from the courses_with_sections dict
courses_without_specific_sections=remove_courses_to_be_ignored_dict(courses_without_specific_sections, courses_to_ignore)
#Note: We do not need removes from courses_with_sections - as they will recomputed from the reduced course_list
courses_with_sections=remove_courses_to_be_ignored_dict(courses_with_sections, courses_to_ignore)
# if only including degree project courses (course code of the form cc1ddX* or cc2ddX), then skip other courses
if options.exjobs:
exjobb_courses=[]
for course in course_list:
if (len(course['course_code']) > 6) and (course['course_code'][5] == 'X') and (course['course_code'][2] == '1' or (course['course_code'][2] == '2')):
exjobb_courses.append(course)
course_list=exjobb_courses
if len(course_list) == 0:
print("No courses to process")
sys.exit()
# create a dictionary so one can lookup course details by course id
course_dict=dict()
for course in course_list:
course_dict[course['id']]=course
list_of_course_ids=[]
for course in course_list:
enrolments=course['enrollments']
for e in enrolments:
if e['user_id'] == user_id:
if (e['role'] == 'TeacherEnrollment') or (e['role'] == 'Examiner') or (e['role'] == 'TaEnrollment'):
# only put the course into the list once
if not course['id'] in list_of_course_ids:
list_of_course_ids.append(course['id'])
if len(list_of_course_ids) == 0:
print("user is not a teacher or examiner in any courses")
sys.exit()
if Verbose_Flag:
print("courses where user is teacher or examiner={}".format(list_of_course_ids))
for c_id in list_of_course_ids:
# first check to see if this is a course that should be without specific sections
c1=courses_without_specific_sections.get(str(c_id), [])
if c1:
print("course {0} indicated as having no specific sections".format(c_id))
continue
# if there is exsiting explicit sections, then do not add additional sections
c2=courses_with_sections.get(str(c_id), [])
if c2:
s0=courses_with_sections[str(c_id)].get('sections', [])
if s0 and type(dict) == 'dict': # s0 will be a dict
continue
# otherwise add the section information
sections=sections_in_course(c_id)
if sections:
courses_with_sections[c_id]={'name': course_dict[c_id]['name'],
'course_code': course_dict[c_id]['course_code'],
'sections': dict()}
for s in sections:
courses_with_sections[c_id]['sections'][s['id']]=s['name']
else:
c3=courses_without_specific_sections.get(c_id, [])
if not c3: # if not already in courses_without_specific_sections, then add it
courses_without_specific_sections[c_id]={'name': course_dict[c_id]['name'],
'course_code': course_dict[c_id]['course_code']
}
courses_with_sections=cleanup_sections(users_name, courses_with_sections)
course_info['courses_to_ignore']=courses_to_ignore
course_info['courses_without_specific_sections']=courses_without_specific_sections
course_info['courses_with_sections']=courses_with_sections
try:
with open(course_info_file, 'w') as json_data_file:
json.dump(course_info, json_data_file)
print("created output file {}".format(course_info_file))
except:
print("Unable to write JSON file named {}".format(course_info_file))
sys.exit()
if __name__ == "__main__": main()
|
from qfengine.asset.equity import Equity
from qfengine.asset.cash import Cash
from typing import Union
assetClasses = Union[Equity,Cash]
|
# Copyright 2014 Julia Eskew
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sqlite3
MODULE_DIR = os.path.dirname(__file__)
class TimingDataStorage(object):
SCHEMA_NAME = 'schema.sql'
SCHEMA_PATH = '%s/%s' % (MODULE_DIR, SCHEMA_NAME)
DEFAULT_DB_NAME = 'block_times.db'
def __init__(self, **kwargs):
# Verify that the sqlite DB and schema exists.
db_name = self.DEFAULT_DB_NAME
if 'db_name' in kwargs:
db_name = kwargs['db_name']
if not os.path.exists(db_name):
self._createDB(db_name)
self.conn = sqlite3.connect(db_name)
def _createDB(self, db_name):
# Create the sqlite DB file.
with open(db_name, "w") as f:
conn = sqlite3.connect(db_name)
with open(self.SCHEMA_PATH, "r") as schema_file:
schema = schema_file.read()
cur = conn.cursor()
conn.executescript(schema)
conn.commit()
def run_id(self, desc=""):
"""
Creates a new run ID and returns it.
A single run ID can be used for multiple store()s for times in the same test.
"""
cur = self.conn.cursor()
cur.execute('insert into test_run (run_desc) values (?)', (desc,))
self.conn.commit()
cur.execute('select max(id) from test_run')
return cur.fetchone()[0]
def store(self, run_id, desc, elapsed):
"""
Store the description and elapsed time in the DB, under the passed-in run_id.
"""
cur = self.conn.cursor()
cur.execute(
'insert into block_times (run_id, block_desc, elapsed)'
'values (?, ?, ?)', (run_id, desc, elapsed)
)
self.conn.commit()
|
class HelloWorld(object):
def __init__(self):
self.hello_world = 'HelloWorld'
def gen_hello_world_msg(self, name: str):
return self.hello_world + ' ' + name
|
from ..core import Field, CapacityType, MillisecondsDatetimeType
from ..core.api.special_values import Autogenerate
from ..core.translators_and_types import MunchListType
from .system_object import InfiniBoxObject
from ..core.bindings import RelatedObjectNamedBinding, RelatedObjectBinding
class _Field(Field):
def __init__(self, *args, **kwargs):
kwargs.setdefault('is_sortable', True)
kwargs.setdefault('is_filterable', True)
super(_Field, self).__init__(*args, **kwargs)
class Export(InfiniBoxObject):
FIELDS = [
_Field("id", is_identity=True, type=int),
_Field("export_path", creation_parameter=True, default=Autogenerate("/{prefix}export_{uuid}")),
_Field("inner_path", creation_parameter=True, optional=True),
Field("filesystem", api_name="filesystem_id", creation_parameter=True, cached=True, type=int,
binding=RelatedObjectNamedBinding()),
_Field("enabled", type=bool, mutable=True, creation_parameter=True, optional=True),
_Field("make_all_users_anonymous", type=bool, mutable=True, creation_parameter=True, optional=True),
_Field("anonymous_gid", type=int, mutable=True, creation_parameter=True, optional=True),
_Field("anonymous_uid", type=int, mutable=True, creation_parameter=True, optional=True),
_Field("privileged_port", type=bool, mutable=True, creation_parameter=True, optional=True),
_Field("transport_protocols", creation_parameter=True, optional=True, mutable=True),
_Field("32bit_file_id", type=bool, mutable=True, creation_parameter=True, optional=True),
_Field("pref_readdir", creation_parameter=True, optional=True, type=CapacityType, mutable=True),
_Field("pref_read", creation_parameter=True, optional=True, type=CapacityType, mutable=True),
_Field("pref_write", creation_parameter=True, optional=True, type=CapacityType, mutable=True),
_Field("max_read", creation_parameter=True, optional=True, type=CapacityType, mutable=True),
_Field("max_write", creation_parameter=True, optional=True, type=CapacityType, mutable=True),
Field("permissions", type=MunchListType, creation_parameter=True, optional=True, mutable=True),
_Field("created_at", type=MillisecondsDatetimeType),
_Field("updated_at", type=MillisecondsDatetimeType),
_Field("snapdir_visible", type=bool, creation_parameter=True, optional=True, mutable=True,
feature_name="dot_snapshot"),
_Field("tenant", api_name="tenant_id", binding=RelatedObjectBinding('tenants'),
type='infinisdk.infinibox.tenant:Tenant', feature_name='tenants'),
]
@classmethod
def is_supported(cls, system):
return system.compat.has_nas()
|
import os, sys
from errno import *
from stat import *
import fcntl
try:
import _find_fuse_parts
except ImportError:
pass
import fuse
from fuse import Fuse
from deeputil import Dummy
DUMMY_LOG = Dummy()
if not hasattr(fuse, '__version__'):
raise RuntimeError("your fuse-py doesn't know of fuse.__version__, probably it's too old.")
fuse.fuse_python_api = (0, 2)
fuse.feature_assert('stateful_files', 'has_init')
def flag2mode(flags):
md = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
m = md[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
if flags | os.O_APPEND:
m = m.replace('w', 'a', 1)
return m
def logit(fn):
def _fn(*args, **kwargs):
self = args[0]
fnname = fn.__name__
self.log.debug(fnname, args=args, kwargs=kwargs)
try:
r = fn(*args, **kwargs)
except Exception:
self.log.exception('logit_exception_{}'.format(fnname))
raise
return r
return _fn
class MirrorFSFile(object):
@logit
def __init__(self, path, flags, *mode):
self.frompath = path
self.path = path = self.log_cache_dir + '/mirror' + path
self.file = os.fdopen(os.open(path, flags, *mode),
flag2mode(flags))
self.fd = self.file.fileno()
@logit
def read(self, length, offset):
self.file.seek(offset)
return self.file.read(length)
@logit
def write(self, buf, offset):
self.file.seek(offset)
self.file.write(buf)
return len(buf)
@logit
def release(self, flags):
self.file.close()
def _fflush(self):
if 'w' in self.file.mode or 'a' in self.file.mode:
self.file.flush()
@logit
def fsync(self, isfsyncfile):
self._fflush()
if isfsyncfile and hasattr(os, 'fdatasync'):
os.fdatasync(self.fd)
else:
os.fsync(self.fd)
@logit
def flush(self):
self._fflush()
# cf. xmp_flush() in fusexmp_fh.c
os.close(os.dup(self.fd))
@logit
def fgetattr(self):
return os.fstat(self.fd)
@logit
def ftruncate(self, len):
self.file.truncate(len)
@logit
def lock(self, cmd, owner, **kw):
# The code here is much rather just a demonstration of the locking
# API than something which actually was seen to be useful.
# Advisory file locking is pretty messy in Unix, and the Python
# interface to this doesn't make it better.
# We can't do fcntl(2)/F_GETLK from Python in a platfrom independent
# way. The following implementation *might* work under Linux.
#
# if cmd == fcntl.F_GETLK:
# import struct
#
# lockdata = struct.pack('hhQQi', kw['l_type'], os.SEEK_SET,
# kw['l_start'], kw['l_len'], kw['l_pid'])
# ld2 = fcntl.fcntl(self.fd, fcntl.F_GETLK, lockdata)
# flockfields = ('l_type', 'l_whence', 'l_start', 'l_len', 'l_pid')
# uld2 = struct.unpack('hhQQi', ld2)
# res = {}
# for i in xrange(len(uld2)):
# res[flockfields[i]] = uld2[i]
#
# return fuse.Flock(**res)
# Convert fcntl-ish lock parameters to Python's weird
# lockf(3)/flock(2) medley locking API...
op = { fcntl.F_UNLCK : fcntl.LOCK_UN,
fcntl.F_RDLCK : fcntl.LOCK_SH,
fcntl.F_WRLCK : fcntl.LOCK_EX }[kw['l_type']]
if cmd == fcntl.F_GETLK:
return -EOPNOTSUPP
elif cmd == fcntl.F_SETLK:
if op != fcntl.LOCK_UN:
op |= fcntl.LOCK_NB
elif cmd == fcntl.F_SETLKW:
pass
else:
return -EINVAL
fcntl.lockf(self.fd, op, kw['l_start'], kw['l_len'])
class MirrorFS(Fuse):
def __init__(self,
file_class=None,
*args,
**kw):
Fuse.__init__(self, *args, **kw)
self.log = DUMMY_LOG
self._log_cache_dir = None
self._mirror_dir = None
self.file_class = file_class or MirrorFSFile
@property
def log_cache_dir(self):
return self._log_cache_dir
@log_cache_dir.setter
def log_cache_dir(self, v):
self._log_cache_dir = v
self._mirror_dir = v + '/mirror'
if not os.path.exists(self._mirror_dir):
os.makedirs(self._mirror_dir)
def _mappath(self, path):
_path = self._mirror_dir + path
self.log.debug('_mappath', fromp=path, top=_path,
log_cache_dir=self._log_cache_dir)
return _path
@logit
def getattr(self, path):
path = self._mappath(path)
return os.lstat(path)
@logit
def readlink(self, path):
path = self._mappath(path)
return os.readlink(path)
@logit
def readdir(self, path, offset):
path = self._mappath(path)
self.log.debug('readdir', path=path, offset=offset)
for e in os.listdir(path):
yield fuse.Direntry(e)
@logit
def unlink(self, path):
path = self._mappath(path)
os.unlink(path)
@logit
def rmdir(self, path):
path = self._mappath(path)
os.rmdir(path)
@logit
def symlink(self, path, path1):
path = self._mappath(path)
path1 = self._mappath(path1)
os.symlink(path, path1)
@logit
def rename(self, path, path1):
path = self._mappath(path)
path1 = self._mappath(path1)
os.rename(path, path1)
@logit
def link(self, path, path1):
path = self._mappath(path)
path1 = self._mappath(path1)
os.link(path, path1)
@logit
def chmod(self, path, mode):
path = self._mappath(path)
os.chmod(path, mode)
@logit
def chown(self, path, user, group):
path = self._mappath(path)
os.chown(path, user, group)
@logit
def truncate(self, path, len):
path = self._mappath(path)
f = open(path, "a")
f.truncate(len)
f.close()
@logit
def mknod(self, path, mode, dev):
path = self._mappath(path)
os.mknod(path, mode, dev)
@logit
def mkdir(self, path, mode):
path = self._mappath(path)
os.mkdir(path, mode)
@logit
def utime(self, path, times):
path = self._mappath(path)
os.utime(path, times)
# The following utimens method would do the same as the above utime method.
# We can't make it better though as the Python stdlib doesn't know of
# subsecond preciseness in acces/modify times.
#
# def utimens(self, path, ts_acc, ts_mod):
# os.utime(path, (ts_acc.tv_sec, ts_mod.tv_sec))
@logit
def access(self, path, mode):
path = self._mappath(path)
if not os.access(path, mode):
return -EACCES
# This is how we could add stub extended attribute handlers...
# (We can't have ones which aptly delegate requests to the underlying fs
# because Python lacks a standard xattr interface.)
#
# def getxattr(self, path, name, size):
# val = name.swapcase() + '@' + path
# if size == 0:
# # We are asked for size of the value.
# return len(val)
# return val
#
# def listxattr(self, path, size):
# # We use the "user" namespace to please XFS utils
# aa = ["user." + a for a in ("foo", "bar")]
# if size == 0:
# # We are asked for size of the attr list, ie. joint size of attrs
# # plus null separators.
# return len("".join(aa)) + len(aa)
# return aa
@logit
def statfs(self):
"""
Should return an object with statvfs attributes (f_bsize, f_frsize...).
Eg., the return value of os.statvfs() is such a thing (since py 2.2).
If you are not reusing an existing statvfs object, start with
fuse.StatVFS(), and define the attributes.
To provide usable information (ie., you want sensible df(1)
output, you are suggested to specify the following attributes:
- f_bsize - preferred size of file blocks, in bytes
- f_frsize - fundamental size of file blcoks, in bytes
[if you have no idea, use the same as blocksize]
- f_blocks - total number of blocks in the filesystem
- f_bfree - number of free blocks
- f_files - total number of file inodes
- f_ffree - nunber of free file inodes
"""
return os.statvfs(self._mirror_dir)
@logit
def fsinit(self):
os.chdir(self._mirror_dir)
def main(self, *a, **kw):
return Fuse.main(self, *a, **kw)
|
import numpy as np
from vg.compat import v2 as vg
def indices_of_original_elements_after_applying_mask(mask):
"""
Given a mask that represents which of the original elements should be kept,
produce an array containing the new indices of the original elements. Returns
-1 as the index of the removed elements.
"""
result = np.repeat(np.int(-1), len(mask))
result[mask] = np.arange(np.count_nonzero(mask))
return result
def create_submesh(
mesh, vertex_mask, face_mask, ret_indices_of_original_faces_and_vertices=False
):
"""
Apply the requested mask to the vertices and faces to create a submesh,
discarding the face groups.
"""
from .._mesh import Mesh
new_v = mesh.v[vertex_mask]
indices_of_original_vertices = indices_of_original_elements_after_applying_mask(
vertex_mask
)
new_f = indices_of_original_vertices[mesh.f[face_mask]]
submesh = Mesh(v=new_v, f=new_f)
if ret_indices_of_original_faces_and_vertices:
indices_of_original_faces = indices_of_original_elements_after_applying_mask(
face_mask
)
return submesh, indices_of_original_faces, indices_of_original_vertices
else:
return submesh
def reindex_vertices(mesh, ordering):
"""
Reorder the vertices of the given mesh, returning a new mesh.
Args:
mesh (lacecore.Mesh): The mesh on which to operate.
ordering (np.arraylike): An array specifying the order in which
the original vertices should be arranged.
Returns:
lacecore.Mesh: The reindexed mesh.
"""
from .._mesh import Mesh
vg.shape.check(locals(), "ordering", (mesh.num_v,))
unique_values, inverse = np.unique(ordering, return_index=True)
if not np.array_equal(unique_values, np.arange(mesh.num_v)):
raise ValueError(
"Expected new vertex indices to be unique, and range from 0 to {}".format(
mesh.num_v - 1
)
)
return Mesh(v=mesh.v[ordering], f=inverse[mesh.f], face_groups=mesh.face_groups)
def reindex_faces(mesh, ordering):
"""
Reorder the faces of the given mesh, returning a new mesh.
Args:
mesh (lacecore.Mesh): The mesh on which to operate.
ordering (np.arraylike): An array specifying the order in which
the original faces should be arranged.
Returns:
lacecore.Mesh: The reindexed mesh.
"""
from .._mesh import Mesh
vg.shape.check(locals(), "ordering", (mesh.num_f,))
unique_values = np.unique(ordering)
if not np.array_equal(unique_values, np.arange(mesh.num_f)):
raise ValueError(
"Expected new face indices to be unique, and range from 0 to {}".format(
mesh.num_f - 1
)
)
return Mesh(
v=mesh.v,
f=mesh.f[ordering],
face_groups=None
if mesh.face_groups is None
else mesh.face_groups.reindexed(ordering),
)
|
import pandas as pd
from pandas.io import gbq
import logging
_LOGGER = logging.getLogger(__name__)
def test_vasopressor_units(dataset, project_id):
# verify vasopressors in expected units
units = {
'milrinone': 'mcg/kg/min',
'dobutamine': 'mcg/kg/min',
'dopamine': 'mcg/kg/min',
'epinephrine': 'mcg/kg/min',
'norepinephrine': 'mcg/kg/min',
'phenylephrine': 'mcg/kg/min',
'vasopressin': 'units/hour',
}
itemids = {
'milrinone': 221986,
'dobutamine': 221653,
'dopamine': 221662,
'epinephrine': 221289,
'norepinephrine': 221906,
'phenylephrine': 221749,
'vasopressin': 222315,
}
hadm_id = {
'norepinephrine': [21898267],
'phenylephrine': [26809360],
'vasopressin': [26272149]
}
# verify we always have a unit of measure for the rate
query = f"""
select itemid, COUNT(*) AS n
FROM mimic_icu.inputevents
WHERE itemid IN ({", ".join([str(x) for x in itemids.values()])})
AND rateuom IS NULL
GROUP BY itemid
"""
df = gbq.read_gbq(query, project_id=project_id, dialect="standard")
assert df.shape[0] == 0, 'found vasopressors with null units'
# norepinephrine has two rows in mg/kg/min
# these are actually supposed to be mcg/kg/min - and the patient weight has been set to 1 to make it work
# phenylephrine has one row in mcg/min - looks fine, within expected dose
# vasopressin three rows in units/min - these look OK
for drug, hadm_id_list in hadm_id.items():
query = f"""
select hadm_id, rate, rateuom
FROM mimic_icu.inputevents
WHERE itemid = {itemids[drug]}
AND rateuom != '{units[drug]}'
LIMIT 10
"""
df = gbq.read_gbq(query, project_id=project_id, dialect="standard")
# if we find new uninspected rows, raise a warning. this will only happen when mimic-iv is updated.
if (~df['hadm_id'].contains(hadm_id_list)).any():
_LOGGER.warn(f"""New data found with non-standard unit. Inspect the data with this query:
select *
from `physionet-data.mimic_icu.inputevents`
where itemid = {itemids['vasopressin']}
and stay_id in (
select stay_id from `physionet-data.mimic_icu.inputevents`
where itemid = {itemids['vasopressin']}
and rateuom != '{units['vasopressin']}'
)
order by starttime
""")
assert df.shape[0] != 10, f'many rows found with non-standard unit for {drug}'
def test_vasopressor_doses(dataset, project_id):
# verify vasopressors have reasonable doses
# based on uptodate graphic 99963 version 19.0
# double the maximum dose used in refractory shock is the upper limit used
itemids = {
'milrinone': 221986,
'dobutamine': 221653,
'dopamine': 221662,
'epinephrine': 221289,
'norepinephrine': 221906,
'phenylephrine': 221749,
'vasopressin': 222315,
}
max_dose = {
'milrinone': 1.5,
'dobutamine': 40,
'dopamine': 40,
'epinephrine': 4,
'norepinephrine': 6.6,
'phenylephrine': 18.2,
'vasopressin': 0.08,
}
for vaso, dose in max_dose.items():
query = f"""
select COUNT(vaso_rate) AS n_above_rate
FROM mimic_derived.{vaso}
WHERE vaso_rate >= {dose}
"""
df = gbq.read_gbq(query, project_id=project_id, dialect="standard")
n_above_rate = df.loc[0, 'n_above_rate']
assert n_above_rate == 0, f'found {vaso} rows with dose above {dose}, potentially incorrect'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import requests_cache
from pydeflate import config
# Create session
expire_after = datetime.timedelta(days=3)
wb_session = requests_cache.CachedSession(
cache_name=config.paths.data + r"/wb_cache",
backend="sqlite",
expire_after=expire_after,
)
import pandas as pd
from pandas_datareader import wb
from pydeflate.utils import emu, value_index, update_update_date
def get_iso3c():
countries = wb.get_countries(session=wb_session)
return countries[["name", "iso3c"]].set_index("name")["iso3c"].to_dict()
def _download_wb_indicator(indicator: str, start: int, end: int) -> None:
"""Download an indicator from WB (caching if applicable)"""
df = (
wb.WorldBankReader(
symbols=indicator,
countries="all",
start=start,
end=end,
session=wb_session,
)
.read()
.reset_index(drop=False)
)
df.to_feather(config.paths.data + rf"/{indicator}_{start}_{end}.feather")
print(f"Successfully updated {indicator} for {start}-{end}")
update_update_date("wb")
def _read_wb_indicator(indicator: str, start: int, end: int) -> pd.DataFrame:
"""Read an indicator from WB"""
return pd.read_feather(config.paths.data + rf"/{indicator}_{start}_{end}.feather")
def _clean_wb_indicator(
data: pd.DataFrame,
indicator: str,
) -> pd.DataFrame:
"""Add iso_code, change value name to value and sort"""
return (
data.assign(
iso_code=lambda d: d.country.map(get_iso3c()),
year=lambda d: pd.to_datetime(d.year, format="%Y"),
)
.rename(columns={indicator: "value"})
.dropna(subset=["iso_code"])
.sort_values(["iso_code", "year"])
.reset_index(drop=True)
.filter(["year", "iso_code", "value"], axis=1)
)
def wb_indicator(indicator: str, start: int = 1950, end: int = 2025) -> pd.DataFrame:
"""Download and clean an indicator from the WB.
- indicator: string like 'PA.NUS.FCRF'
- start: integer with starting year (or closest available)
- end: integer with ending year (or closest available)
"""
# Get data object
data = _read_wb_indicator(indicator=indicator, start=start, end=end)
# Convert to dataframe and clean
df = _clean_wb_indicator(data, indicator)
return df
def update_indicators() -> None:
"""Update data for all WB indicators"""
indicators = [
"NY.GDP.DEFL.ZS",
"NY.GDP.DEFL.ZS.AD",
"FP.CPI.TOTL",
"PA.NUS.FCRF",
"PX.REX.REER",
]
_ = [_download_wb_indicator(i, 1950, 2025) for i in indicators]
def get_gdp_deflator() -> pd.DataFrame:
"""The GDP implicit deflator is the ratio of GDP in current local currency
to GDP in constant local currency. The base year varies by country."""
return wb_indicator(indicator="NY.GDP.DEFL.ZS")
def get_gdp_deflator_linked() -> pd.DataFrame:
"""The GDP implicit deflator is calculated as the ratio of GDP in current
local currency to GDP in constant local currency. This series has been
linked to produce a consistent time series to counteract breaks in
series over time due to changes in base years, source data and
methodologies. Thus, it may not be comparable with other national
accounts series in the database for historical years. The base year
varies by country."""
return wb_indicator(indicator="NY.GDP.DEFL.ZS.AD")
def get_consumer_price_index() -> pd.DataFrame:
"""Consumer price index reflects changes in the cost to the average
consumer of acquiring a basket of goods and services that may be fixed
or changed at specified intervals, such as yearly. The Laspeyres formula
is generally used. Data are period averages."""
return wb_indicator(indicator="FP.CPI.TOTL")
def get_euro2usd() -> dict:
"""Dictionary of EUR to USD exchange rates"""
return get_exchange2usd_dict("EMU")
def get_can2usd() -> dict:
"""Dictionary of CAN to USD exchange rates"""
return get_exchange2usd_dict("CAN")
def get_gbp2usd() -> dict:
"""Dictionary of GBP to USD exchange rates"""
return get_exchange2usd_dict("GBR")
def get_usd_exchange() -> pd.DataFrame:
"""Official exchange rate refers to the exchange rate determined by
national authorities or to the rate determined in the legally
sanctioned exchange market. It is calculated as an annual average based on
monthly averages (local currency units relative to the U.S. dollar)."""
# get exchange rates
df = wb_indicator(indicator="PA.NUS.FCRF")
eur = df.loc[df.iso_code == "EMU"].dropna().set_index("year")["value"].to_dict()
# Euro area countries without exchange rates
eur_mask = (df.iso_code.isin(emu)) & (df.value.isna())
# Assign EURO exchange rate to euro are countries from year euro adopted
df.loc[eur_mask, "value"] = df.year.map(eur)
return df
def get_exchange2usd_dict(currency_iso: str) -> dict:
"""Dictionary of currency_iso to USD"""
df = get_usd_exchange()
return (
df.loc[df.iso_code == currency_iso]
.dropna()
.set_index("year")["value"]
.to_dict()
)
def get_currency_exchange(currency_iso: str) -> pd.DataFrame:
"""Get exchange rates based on a given currency/country (from LCU)"""
# Get WB exchange rates
df = get_usd_exchange()
target_xe = get_exchange2usd_dict(currency_iso)
df.value = df.value / df.year.map(target_xe)
return df
def get_real_effective_exchange_index() -> pd.DataFrame:
"""Real effective exchange rate is the nominal effective exchange rate
(a measure of the value of a currency against a weighted average of several
foreign currencies) divided by a price deflator or index of costs."""
return wb_indicator(indicator="PX.REX.REER")
def get_xe_deflator(currency_iso: str = "USA", base_year: int = 2010) -> pd.DataFrame:
"""get exchange rate deflator based on OECD base year and exchange rates"""
from datetime import datetime
# get exchange rates
xe = get_currency_exchange(currency_iso=currency_iso)
# get deflators and base year
base = {iso: datetime(base_year, 1, 1) for iso in xe.iso_code.unique()}
# get the exchange rate as an index based on the base year
xe.value = value_index(xe, base)
return xe
def available_methods() -> dict:
return {
"gdp": get_gdp_deflator,
"gdp_linked": get_gdp_deflator_linked,
"cpi": get_consumer_price_index,
}
if __name__ == "__main__":
pass
|
import logging
from typing import Optional
import shared.infrastructure.environment.globalvars as glob
from shared.domain.service.logging.logger import Logger
class FileLogger(Logger):
def __init__(self, name: Optional[str] = None, logfile: Optional[str] = None):
settings = glob.settings
if not name:
name = settings.site()
self._name = name
logger = self._logger
if not logfile:
logfile = self._name
level = logging.DEBUG
if settings.is_production():
level = logging.WARNING
logger.setLevel(level)
formatter = logging.Formatter("%(asctime)s :: %(levelname)s :: %(message)s")
file_handler = logging.FileHandler(f"{settings.logs_dir()}/{logfile}.log")
logging.StreamHandler()
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
@property
def _logger(self):
return logging.getLogger(self._name)
def debug(self, message: str, *args) -> None:
self._logger.debug(message, *args)
def info(self, message: str, *args) -> None:
self._logger.info(message, *args)
def warning(self, message: str, *args) -> None:
self._logger.warning(message, *args)
def error(self, message: str, *args) -> None:
self._logger.error(message, *args)
def critical(self, message: str, *args) -> None:
self._logger.critical(message, *args)
|
import sys
from os import getgid, getuid
from pathlib import Path
from pwd import getpwuid
from click import INT
from typer import confirm, prompt
from kolombo.console import debug, enable_debug, finished, info, started, step
from kolombo.util import run
_virtual_configs = {
"addresses": "bob@example.com bob@example.com",
"domains": "example.com",
"mailbox": "bob@example.com bob@example.com/",
"ssl_map": (
"example.com /etc/letsencrypt/live/example.com/privkey.pem "
"/etc/letsencrypt/live/example.com/fullchain.pem"
),
}
def init() -> None:
from kolombo import __version__ as version
username = getpwuid(getuid()).pw_name
started(f"Setting up Kolombo for current user [b]{username}[/]")
step("Creating /etc/kolombo folder ([u]need root privileges[/])")
info("Creating /etc/kolombo folder (as root)")
run(["mkdir", "-p", "-m", "750", "/etc/kolombo"], as_root=True)
info(f"Changing /etc/kolombo owner to {username} (as root)")
run(["chown", f"{getuid()}:{getgid()}", "/etc/kolombo"], as_root=True)
step("Writing configuration to /etc/kolombo/kolombo.conf")
debug_mode = confirm("Enable debug mode?", default=False, show_default=True)
if debug_mode:
enable_debug()
nginx_secret_key: str = prompt(
"Enter secret key for communication between NginX and auth API",
default="changeme",
show_default=True,
hide_input=True,
confirmation_prompt=True,
)
max_auth_attempts: int = prompt(
"Enter maximum auth attempts per one session",
default="3",
show_default=True,
type=INT,
)
passwords_salt: str = prompt(
"Enter secret key to be used as salt for passwords hashing",
default="changeme",
show_default=True,
hide_input=True,
confirmation_prompt=True,
)
configuration = (
f"### Generated by kolombo v{version}\n\n"
"# Whether debug mode is enabled (0 - disabled, 1 - enabled)\n"
f"DEBUG={int(debug_mode)}\n"
"# Secret key that is used to determine that nginx is using API\n"
f"NGINX_SECRET_KEY={nginx_secret_key}\n"
"# Maximum auth attempts per one session\n"
f"MAX_ATTEMPTS={max_auth_attempts}\n"
"# Salt used for passwords hashing\n"
f"SALT={passwords_salt}\n"
)
with open("/etc/kolombo/kolombo.conf", "w") as config_file:
config_file.write(configuration)
step("Populating /etc/kolombo with default folders and files")
debug("Creating /etc/kolombo folders for volumes")
folders = ("maildirs", "mail-enabled", "virtual", "dkim_keys")
for folder in folders:
Path(f"/etc/kolombo/{folder}").mkdir(mode=0o770, exist_ok=True)
for file in ("addresses", "domains", "mailbox", "ssl_map"):
debug(f"Writing default file to /etc/kolombo/virtual/{file}")
with open(f"/etc/kolombo/virtual/{file}", "w") as virtual_file:
virtual_file.write(f"# {_virtual_configs[file]}\n")
step("Installing auto-completion ([u]restart current shell session to use[/])")
run([sys.argv[0], "--install-completion"])
finished("Kolombo is set up!")
|
'''
codeml2tsv.py - analyze results from codeml kaks run
=============================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
Usage
-----
Example::
python codeml2tsv.py --help
Type::
python codeml2tsv.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import string
import re
import tempfile
import subprocess
import optparse
from types import *
import CGAT.Genomics as Genomics
import CGAT.Experiment as E
import CGAT.WrapperCodeML as WrapperCodeML
import scipy
import scipy.stats
import CGAT.TreeTools as TreeTools
import CGAT.Stats as Stats
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id: codeml2tsv.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-m", "--methods", dest="methods", type="string",
help="""methods for analysis.
write-ks-tree: write out ks tree(s).
write-ka-tree: write out ka tree(s).
""" )
parser.add_option("--column-prefix", dest="prefix", type="string",
help="prefix for rows.")
parser.add_option("--input-pattern", dest="pattern_input_filenames", type="string",
help="input pattern.")
parser.add_option("--filter-probability", dest="filter_probability", type="float",
help="threshold for probability above which to include positive sites.")
parser.add_option("--filter-omega", dest="filter_omega", type="float",
help="threshold for omega above which to include positive sites.")
parser.add_option("--models", dest="models", type="string",
help="restrict output to set of site specific models.")
parser.add_option("--significance-threshold", dest="significance_threshold", type="float",
help="significance threshold for log-likelihood test.")
parser.add_option("--mode", dest="mode", type="choice",
choices=("pairs", "1xn"),
help="analysis mode.")
parser.set_defaults(
methods="",
prefix=None,
filter_probability=0,
filter_omega=0,
models="",
significance_threshold=0.05,
mode="pairs",
)
(options, args) = E.Start(parser)
options.methods = options.methods.split(",")
options.models = options.models.split(",")
codeml = WrapperCodeML.CodeML()
results = []
if len(args) == 0:
# read from stdin, if no arguments are given
results.append(codeml.parseOutput(sys.stdin.readlines()))
else:
# read multiple results
for f in args:
try:
results.append(codeml.parseOutput(open(f, "r").readlines()))
except WrapperCodeML.ParsingError, msg:
options.stdlog.write(
"# parsing error in file %s: %s.\n" % (f, msg))
continue
if options.prefix:
prefix_tree = ">%s\n" % options.prefix
prefix_header = "prefix\t"
prefix_row = "%s\t" % options.prefix
else:
prefix_tree = ""
prefix_header = ""
prefix_row = ""
for method in options.methods:
if method == "write-ks-tree":
for result in results:
options.stdout.write(
prefix_tree + TreeTools.Tree2Newick(result.mTreeKs) + "\n")
elif method == "write-ka-tree":
for result in results:
options.stdout.write(
prefix_tree + TreeTools.Tree2Newick(result.mTreeKa) + "\n")
elif method == "write-kaks-tree":
for result in results:
options.stdout.write(
prefix_tree + TreeTools.Tree2Newick(result.mTreeKaks) + "\n")
elif method == "lrt":
# perform log-likelihood ratio test between successive models
# Assumption is that the models are nested with the previous model
# being the less complex model.
first_result = results[0]
last_result = results[0]
x = 1
options.stdout.write(
"%sm1\tm2\tstatus\tlnL1\tnp1\tlnl2\tnp2\tP-value\n" % prefix_header)
for result in results[1:]:
if options.mode == "pairs":
reference_result = last_result
reference_id = x - 1
elif options.mode == "1xn":
reference_result = first_result
reference_id = 0
if reference_result.mNumParameters >= result.mNumParameters:
if options.loglevel >= 1:
options.stdlog.write("number of parameters of full model not increased (null=%i, full=%i).\n" % (
reference_result.mNumParameters, result.mNumParameters))
continue
lrt = Stats.doLogLikelihoodTest(
result.mLogLikelihood, result.mNumParameters,
reference_result.mLogLikelihood, reference_result.mNumParameters,
options.significance_threshold)
if lrt.mPassed:
c = "passed"
else:
c = "failed"
options.stdout.write("%s%i\t%i\t%s\t%f\t%i\t%f\t%i\t%5.2e\n" % (prefix_row, reference_id, x, c,
lrt.mFullLogLikelihood, lrt.mFullNumParameters,
lrt.mNullLogLikelihood, lrt.mNullNumParameters,
lrt.mProbability,
))
last_result = result
x += 1
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
from .franchise import Franchise
from .player import Player
from .season import Season
from .league import League
from .friv import Frivolities
|
import pytest
from kopf import ActivityRegistry
from kopf import OperatorRegistry
from kopf import ResourceWatchingRegistry, ResourceChangingRegistry
from kopf import SimpleRegistry, GlobalRegistry # deprecated, but tested
from kopf.structs.handlers import HandlerId, ResourceChangingHandler
@pytest.fixture(params=[
pytest.param(ActivityRegistry, id='activity-registry'),
pytest.param(ResourceWatchingRegistry, id='resource-watching-registry'),
pytest.param(ResourceChangingRegistry, id='resource-changing-registry'),
pytest.param(SimpleRegistry, id='simple-registry'), # deprecated
])
def generic_registry_cls(request):
return request.param
@pytest.fixture(params=[
pytest.param(ActivityRegistry, id='activity-registry'),
])
def activity_registry_cls(request):
return request.param
@pytest.fixture(params=[
pytest.param(ResourceWatchingRegistry, id='resource-watching-registry'),
pytest.param(ResourceChangingRegistry, id='resource-changing-registry'),
pytest.param(SimpleRegistry, id='simple-registry'), # deprecated
])
def resource_registry_cls(request):
return request.param
@pytest.fixture(params=[
pytest.param(OperatorRegistry, id='operator-registry'),
pytest.param(GlobalRegistry, id='global-registry'), # deprecated
])
def operator_registry_cls(request):
return request.param
@pytest.fixture()
def parent_handler():
def parent_fn(**_):
pass
return ResourceChangingHandler(
fn=parent_fn, id=HandlerId('parent_fn'),
errors=None, retries=None, timeout=None, backoff=None, cooldown=None,
labels=None, annotations=None, when=None,
initial=None, deleted=None, requires_finalizer=None,
reason=None, field=None,
)
|
from django.shortcuts import render,redirect
from django.contrib.auth.hashers import make_password,check_password
# Create your views here.
from user.forms import RegisterForm
from user.models import User
def register(request):
if request.method == "POST":
form = RegisterForm(request.POST,request.FILES)
if form.is_valid():
user = form.save(commit=False)
user.password = make_password(user.password)
user.save()
return redirect("/user/login/")
else:
return render(request, "register.html", {"error" : form.errors})
else:
return render(request, "register.html")
def login(request):
if request.method == "POST":
nickname = request.POST.get("nickname")
password = request.POST.get("password")
try:
user = User.objects.get(nickname=nickname)
except User.DoesNotExist:
return render(request, 'login.html', {'error' : '用户账号不存在'})
if check_password(password, user.password):
request.session['uid'] = user.id
request.session['nickname'] = user.nickname
request.session['acatar'] = user.icon.url
return redirect("/user/info/")
return render(request, "login.html")
def logout(request):
request.session.flush()
return redirect("/")
def user_info(request):
uid = request.session.get("uid")
user = User.objects.get(pk=uid)
return render(request, "user_info.html", {'user' : user})
|
import logging
from django.http import (HttpResponseBadRequest, HttpResponseForbidden,
HttpResponseRedirect)
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.views.generic import CreateView, DeleteView, UpdateView
from rdmo.accounts.utils import is_site_manager
from rdmo.core.views import ObjectPermissionMixin, RedirectViewMixin
from ..forms import MembershipCreateForm
from ..models import Membership, Project
from ..utils import is_last_owner
logger = logging.getLogger(__name__)
class MembershipCreateView(ObjectPermissionMixin, RedirectViewMixin, CreateView):
model = Membership
form_class = MembershipCreateForm
permission_required = 'projects.add_membership_object'
def dispatch(self, *args, **kwargs):
self.project = get_object_or_404(Project.objects.all(), pk=self.kwargs['project_id'])
return super(MembershipCreateView, self).dispatch(*args, **kwargs)
def get_permission_object(self):
return self.project
def get_form_kwargs(self):
kwargs = super(MembershipCreateView, self).get_form_kwargs()
kwargs['project'] = self.project
return kwargs
class MembershipUpdateView(ObjectPermissionMixin, RedirectViewMixin, UpdateView):
model = Membership
queryset = Membership.objects.all()
fields = ('role', )
permission_required = 'projects.change_membership_object'
def get_permission_object(self):
return self.get_object().project
class MembershipDeleteView(ObjectPermissionMixin, RedirectViewMixin, DeleteView):
model = Membership
queryset = Membership.objects.all()
permission_required = 'projects.delete_membership_object'
def delete(self, *args, **kwargs):
self.obj = self.get_object()
if (self.request.user in self.obj.project.owners) or is_site_manager(self.request.user):
# user is owner or site manager
if is_last_owner(self.obj.project, self.obj.user):
logger.info('User "%s" not allowed to remove last user "%s"', self.request.user.username, self.obj.user.username)
return HttpResponseBadRequest()
else:
logger.info('User "%s" deletes user "%s"', self.request.user.username, self.obj.user.username)
success_url = reverse('project', args=[self.get_object().project.id])
self.obj.delete()
return HttpResponseRedirect(success_url)
elif self.request.user == self.obj.user:
# user wants to remove him/herself
logger.info('User "%s" deletes himself.', self.request.user.username)
success_url = reverse('projects')
self.obj.delete()
return HttpResponseRedirect(success_url)
else:
logger.info('User "%s" not allowed to remove user "%s"', self.request.user.username, self.obj.user.username)
return HttpResponseForbidden()
def get_permission_object(self):
return self.get_object().project
|
import frappe
import logging
import logging.config
import os
import json
from pprint import pformat
class ContextFilter(logging.Filter):
"""
This is a filter which injects request information (if available) into the log.
"""
def filter(self, record):
record.form_dict = pformat(getattr(frappe.local, 'form_dict', None))
record.site = getattr(frappe.local, 'site', None)
record.tb = frappe.utils.get_traceback()
return True
def setup_logging():
conf = frappe.get_site_config(sites_path=os.environ.get('SITES_PATH', '.'))
if conf.logging_conf:
logging_conf = conf.logging_conf
else:
logging_conf = {
"version": 1,
"disable_existing_loggers": True,
"filters": {
"context_filter": {
"()": "frappe.setup_logging.ContextFilter"
}
},
"formatters": {
"site_wise": {
"format": "\n%(asctime)s %(message)s \n site: %(site)s\n form: %(form_dict)s\n\n%(tb)s\n--------------"
}
},
"loggers": {
"frappe": {
"level": "INFO",
"propagate": False,
"filters": ["context_filter"],
"handlers": ["request_exception"]
}
},
"handlers": {
"request_exception": {
"level": "ERROR",
"formatter": "site_wise",
"class": "logging.StreamHandler",
}
}
}
if conf.request_exception_log_file:
logging_conf.update({
"handlers": {
"request_exception": {
"level": "ERROR",
"formatter": "site_wise",
"class": "logging.handlers.WatchedFileHandler",
"filename": conf.request_exception_log_file
}
}
})
logging.config.dictConfig(logging_conf)
|
#
# @lc app=leetcode id=478 lang=python3
#
# [478] Generate Random Point in a Circle
#
# @lc code=start
import random
class Solution:
def __init__(self, radius: float, x_center: float, y_center: float):
self.x = x_center
self.y = y_center
self.r = radius
def randPoint(self) -> List[float]:
res = [random.random() * 2 - 1, random.random() * 2 - 1]
while res[0] ** 2 + res[1] ** 2 >= 1:
res = [random.random() * 2 - 1, random.random() * 2 - 1]
return [res[0] * self.r + self.x, res[1] * self.r + self.y]
# Your Solution object will be instantiated and called as such:
# obj = Solution(radius, x_center, y_center)
# param_1 = obj.randPoint()
# @lc code=end
|
import unittest
from linty_fresh.linters import android
from linty_fresh.problem import Problem
test_string = """\
<?xml version="1.0" encoding="UTF-8"?>
<issues format="4" by="lint 25.1.6">
<issue
id="ScrollViewSize"
severity="Error"
message="This LinearLayout should use `android:layout_height="wrap_content"`"
category="Correctness"
priority="7"
summary="ScrollView size validation"
explanation="ScrollView children must set their `layout_width` or `layout_height` attributes to `wrap_content` rather than `fill_parent` or `match_parent` in the scrolling dimension"
errorLine1=" android:layout_height="match_parent""
errorLine2=" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
quickfix="studio,adt">
<location
file="scripts/run_tests.sh"
line="15"
column="13"/>
</issue>
<issue
id="DefaultLocale"
severity="Warning"
message="Implicitly using the default locale is a common source of bugs: Use `toLowerCase(Locale)` instead"
category="Correctness"
priority="6"
summary="Implied default locale in case conversion"
explanation="Calling `String#toLowerCase()` or `#toUpperCase()` *without specifying an explicit locale* is a common source of bugs. The reason for that is that those methods will use the current locale on the user's device, and even though the code appears to work correctly when you are developing the app, it will fail in some locales. For example, in the Turkish locale, the uppercase replacement for `i` is *not* `I`.
If you want the methods to just perform ASCII replacement, for example to convert an enum name, call `String#toUpperCase(Locale.US)` instead. If you really want to use the current locale, call `String#toUpperCase(Locale.getDefault())` instead."
url="http://developer.android.com/reference/java/util/Locale.html#default_locale"
urls="http://developer.android.com/reference/java/util/Locale.html#default_locale"
errorLine1=" final String filterPattern = constraint.toString().toLowerCase().trim();"
errorLine2=" ~~~~~~~~~~~">
<location
file="scripts/setup.sh"
line="238"
column="68"/>
</issue>
</issues>
"""
class AndroidLintTest(unittest.TestCase):
def test_empty_parse(self):
self.assertEqual(set(), android.parse('', pass_warnings=False))
def test_parse_all(self):
result = android.parse(test_string, pass_warnings=False)
self.assertEqual(2, len(result))
self.assertIn(Problem('scripts/run_tests.sh',
15,
'ScrollView size validation: This LinearLayout '
'should use '
'`android:layout_height="wrap_content"`'),
result)
self.assertIn(Problem('scripts/setup.sh',
238,
'Implied default locale in case conversion: '
'Implicitly using the default locale is a '
'common source of bugs: Use '
'`toLowerCase(Locale)` instead'),
result)
def test_parse_errors_only(self):
result = android.parse(test_string, pass_warnings=True)
self.assertEqual(1, len(result))
self.assertIn(Problem('scripts/run_tests.sh',
15,
'ScrollView size validation: This LinearLayout '
'should use '
'`android:layout_height="wrap_content"`'),
result)
|
import os
import random
import numpy as np
import pandas as pd
from matplotlib import image
import skimage.color
from skimage.exposure import cumulative_distribution
from copy import deepcopy
from debiasmedimg.cyclegan.util import get_sample_from_path, get_filenames, normalize_for_evaluation, \
ssim_score, get_fid, get_all_samples, save_to_csv, get_filtered_filenames
import debiasmedimg.settings as settings
class ColorTransfer:
"""
Encapsulates the color transfer approach, which is based on histogram equalization in the LAB color space
"""
def __init__(self, csv_file, seed):
"""
Initalize random seed and folder to load from and save to
:param csv_file: File to read image paths from
:param seed: Random seed
"""
self.csv_file = csv_file
# Set random seed
self.seed = seed
random.seed(seed)
def apply_transfer(self, domain_to_transfer, target_domain):
"""
Load in images, color transfer them and export them
:param domain_to_transfer: Which domain to transform
:param target_domain: Domain to transfer to
"""
files_to_transfer = get_filtered_filenames(self.csv_file, domain_to_transfer)
target_files = get_filtered_filenames(self.csv_file, target_domain)
for path in files_to_transfer:
img = image.imread(path)
if img.shape[2] == 4:
# Cut off alpha channel
img = img[:, :, :-1]
print("Cutting off alpha channel")
# Read in random target image
target_img_file = random.choice([x for x in target_files])
target_img = image.imread(target_img_file)
if target_img.shape[2] == 4:
# Cut off alpha channel
target_img = target_img[:, :, :-1]
# Color transfer images
color_transferred_img = self.lab_color_transfer(img, target_img)
filename = path.split(settings.DB_DIR)[1]
# Cut off filename
path_to_file, filename = filename.rsplit('/', 1)
path_sample_out = settings.OUTPUT_DIR + "/generated_images/color_transfer/" + "to_" + \
target_domain + "/" + str(self.seed) + "/" + path_to_file + "/"
if not os.path.exists(path_sample_out):
os.makedirs(path_sample_out)
image.imsave(path_sample_out + filename, color_transferred_img)
print("Exported:", path_sample_out + filename)
def lab_color_transfer(self, source, target):
"""
Transfer color to a source image given a target
:param source: Image to change
:param target: Image to use for target colours
:return: Color transferred image
"""
# Convert the RGB images to the LAB color space
lab_source = skimage.color.rgb2lab(source)
lab_target = skimage.color.rgb2lab(target)
# CDFs require image values as ints
lab_source_int = self.lab_to_lab_int(lab_source)
lab_target_int = self.lab_to_lab_int(lab_target)
# Calculate the CDFs of the source and target imgs
cdf_lab_source = self.cdf(lab_source_int)
cdf_lab_target = self.cdf(lab_target_int)
# Perform histogram matching
lab_result_int = self.hist_matching(cdf_lab_source, cdf_lab_target, deepcopy(lab_source_int))
lab_result_int = np.clip(lab_result_int, 0, 255)
# Convert LAB to RGB
lab_result = self.lab_int_to_lab(lab_result_int)
result = skimage.color.lab2rgb(lab_result)
return result
@staticmethod
def lab_to_lab_int(img):
"""
Convert an image from regular lab to integer lab representation for histogram matching
:param img: Image to transform
"""
img[:, :, 0] = img[:, :, 0] * 255 / 100
img[:, :, 1] = img[:, :, 1] + 127
img[:, :, 2] = img[:, :, 2] + 127
img = img.astype(np.uint8)
return img
@staticmethod
def lab_int_to_lab(img):
"""
Convert an image from integer lab representation to regular lab representation
:param img: Image to transform
"""
img = img.astype(np.float)
img[:, :, 0] = img[:, :, 0] * 100 / 255
img[:, :, 1] = img[:, :, 1] - 127
img[:, :, 2] = img[:, :, 2] - 127
return img
@staticmethod
def cdf(im):
"""
Computes the CDF of an image im as 2D numpy ndarray
:param im: Image to calculate the CDF of
"""
cdf_rgb = []
for i in range(3):
c, b = cumulative_distribution(im[:, :, i])
# pad the beginning and ending pixels and their CDF values
c = np.insert(c, 0, [0] * b[0])
c = np.append(c, [1] * (255 - b[-1]))
cdf_rgb.append(c)
cdf_rgb = np.array(cdf_rgb)
return cdf_rgb
@staticmethod
def hist_matching(c, c_t, im):
"""
Match the histograms via closest pixel-matches of the given CDFs
:param c: CDF of input image computed with the function cdf()
:param c_t: CDF of target image computed with the function cdf()
:param im: input image as 2D numpy ndarray
:return: modified pixel values
"""
for ix, layer in enumerate(c):
pixels = np.arange(256)
# find closest pixel-matches corresponding to the CDF of the input image, given the value of the CDF H of
# the template image at the corresponding pixels, s.t. c_t = H(pixels) <=> pixels = H-1(c_t)
new_pixels = np.interp(c[ix], c_t[ix], pixels)
im[:, :, ix] = (np.reshape(new_pixels[im[:, :, ix].ravel()], (im.shape[0], im.shape[1])).astype(np.uint8))
return im
def evaluate(self, validate, csv_file, domain_a, domain_b, dataset):
"""
Evaluate the color transferred images regarding SSIM and FID
:param validate: Whether we are validating or testing
:param csv_file: CSV files containing info on all images used during the evaluation
:param domain_a: Name of domain A
:param domain_b: Name of domain b
:param dataset: Name of the dataset
"""
run_id = "Baseline"
files_a = get_filtered_filenames(csv_file, domain_a)
files_b = get_filtered_filenames(csv_file, domain_b)
print("Evaluating set a")
ssims_a = []
for ix, path in enumerate(files_a):
# Read in images in full size, remove batch dimension
original_fullsize = np.squeeze(get_sample_from_path(path)[0])
filename = path.split(settings.DB_DIR)[1]
# Cut off filename
path_to_file, filename = filename.rsplit('/', 1)
path_sample_out = settings.OUTPUT_DIR + "/generated_images/color_transfer/" + "to_" + \
domain_b + "/" + str(self.seed) + "/" + path_to_file + "/"
transformed_upsampled = np.squeeze(get_sample_from_path(path_sample_out + filename)[0])
# Evaluate ssim
original_fullsize = normalize_for_evaluation(original_fullsize)
transformed_upsampled = normalize_for_evaluation(transformed_upsampled)
# Get the SSIM scores between input and output of the generator
ssim_inout = ssim_score(original_fullsize, transformed_upsampled)
ssims_a.append(ssim_inout)
print("Completed {}/{}".format(ix + 1, len(files_a)))
ssim_a = sum(ssims_a) / len(files_a)
# Read in all images again for FID and wasserstein distance on histograms
a_fullsize = get_all_samples(files_a)
transformed_files = [path_sample_out + f for f in os.listdir(path_sample_out)]
to_b_upsampled = get_all_samples(transformed_files)
b_fullsize = get_all_samples(files_b)
# FID score:
print("Calculating FID score between real domains and generated domains")
fid_b = get_fid(b_fullsize, to_b_upsampled)
fid_original = get_fid(a_fullsize, b_fullsize)
# Add summary to csv file
values = [ssim_a, fid_original, fid_b]
save_to_csv(run_id, self.seed, domain_a, domain_b, values, "color transfer", dataset,
validate=validate, only_ab=True)
|
""" Basic cross-platform utility that launches HTTP server from the current directory.
Useful when sending static web apps/prototypes to non-technical people.
v1 """
import webbrowser, random, sys, os, re
from http.server import SimpleHTTPRequestHandler, HTTPServer
PORT = URL = httpd = None
# Get the path to the executable's directory that will be served -----------------------
path_to_executable = sys.executable if getattr(sys, 'frozen', False) else \
os.path.abspath(__file__)
CWD = re.sub(r'\/.[^\/]*$', '', path_to_executable)
# Set current working directory to the exec's one --------------------------------------
os.chdir(CWD)
# Create a server instance on an available port ----------------------------------------
while httpd is None:
try_new_port = random.randint(6000, 9999)
try:
PORT = try_new_port
httpd = HTTPServer(('', PORT), SimpleHTTPRequestHandler)
URL = 'http://localhost:{}'.format(PORT)
except:
print('Port {} is already occupied, trying another one...'.format(try_new_port))
# Print out out some information and start the simple server ---------------------------
print('\n\n=====================================')
print('Running app from:\n{}\n'.format(CWD))
print('The app is available at:\n{}'.format(URL))
print('=====================================\n\n')
webbrowser.open(URL)
httpd.serve_forever()
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsms.log.mixin import LoggerMixin
class BaseHandler(object, LoggerMixin):
def _logger_name(self):
app_label = self.__module__.split(".")[-3]
return "app/%s/%s" % (app_label, self.__class__.__name__)
@classmethod
def dispatch(cls, router, msg):
return False
def __init__(self, router, msg):
self.router = router
self.msg = msg
def respond(self, template=None, **kwargs):
return self.msg.respond(template, **kwargs)
def respond_error(self, template=None, **kwargs):
return self.msg.error(template, **kwargs)
@classmethod
def test(cls, text, identity=None):
"""
Test this handler by dispatching an IncomingMessage containing
``text``, as sent by ``identity`` via a mock backend. Return a
list containing the ``text`` property of each response, in the
order which they were sent.::
>>> class AlwaysHandler(BaseHandler):
...
... @classmethod
... def dispatch(cls, router, msg):
... msg.respond("xxx")
... msg.respond("yyy")
... return True
>>> AlwaysHandler.test('anything')
['xxx', 'yyy']
Return False if the handler ignored the message (ie, the
``dispatch`` method returned False or None).
>>> class NeverHandler(BaseHandler):
... pass
>>> NeverHandler.test('anything')
False
This is intended to test the handler in complete isolation. To
test the interaction between multiple apps and/or handlers, see
the rapidsms.tests.scripted module.
"""
# avoid setting the default identity to "mock" in the signature,
# to avoid exposing it in the public API. it's not important.
if identity is None:
identity = "mock"
# models can't be loaded until the django ORM is ready.
from rapidsms.models import Backend, Connection
from rapidsms.messages import IncomingMessage
# create a mock backend and connection, so tests can create and
# manipulate linked objects without raising IntegrityError.
bknd = Backend.objects.create(name='mock')
conn = Connection.objects.create(backend=bknd, identity=identity)
msg = IncomingMessage(connection=conn, text=text)
try:
accepted = cls.dispatch(None, msg)
return [m.text for m in msg.responses]\
if accepted else False
# clean up the mock objects, to avoid causing side-effects.
finally:
conn.delete()
bknd.delete()
|
from flask import request, render_template, jsonify
from flask import Flask
from flask_cors import CORS
import numpy as np
import cv2
from custom_inference import run
import json
from utils.box_computations import corners_to_wh
import time
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
inferer = run.Custom_Infernce()
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
app = Flask(__name__)
CORS(app)
app.secret_key = "secret key"
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def upload_form():
return render_template('file-upload.html')
@app.route('/process_image', methods=['POST'])
def upload_file():
# check if the post request has the file part
if 'files[]' not in request.files:
resp = jsonify({'message': 'No file part in the request'})
resp.status_code = 400
return resp
files = request.files.getlist('files[]')
nms_thresh, conf, device = float(request.form['nms_thresh']), float(request.form['conf_thresh']), request.form['device']
print(nms_thresh, conf, device)
errors = {}
success = False
file = files[0]
if file and allowed_file(file.filename):
filestr = file.read()
npimg = np.fromstring(filestr, np.uint8)
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
start = time.time()
boxes = inferer.run_inference(img, modify_image=False,
custom_settings=(nms_thresh, conf, device))
boxes = corners_to_wh(boxes)
total = time.time() - start
total = "{:.3f}".format(total)
json_dump = json.dumps({'boxes': boxes}, cls=NumpyEncoder)
boxes = json.loads(json_dump)
success = True
else:
errors[file.filename] = 'File type is not allowed'
if success and errors:
errors['message'] = 'File(s) successfully uploaded'
resp = jsonify(errors)
resp.status_code = 206
return resp
if success:
resp = jsonify({'data': boxes, 'time_taken': total})
resp.status_code = 201
return resp
else:
resp = jsonify(errors)
resp.status_code = 400
return resp
if __name__ == "__main__":
app.run()
|
import os
import platform
import pandas as pd
import pickle
import dill
def loadPickledData(filePath, default=["default"]):
"""
load an existing pickle file or make a pickle with default data
then return the pickled data
Parameters:
- filePath: the absolute path or the relative path
- default: default value if the path is invalid for some reason
"""
try:
with open(filePath, "rb") as f:
content = pickle.load(f)
except Exception:
content = default
with open(filePath, "wb") as f:
pickle.dump(content, f)
return content
def saveObjUsingPickle(filePath, content):
"""
save the content as a byte file using pickle to a specific location
use the alternate dill function for complex objects
Parameters:
- filePath: The absolute or relative path of the pickle file
- Ex for win: C:\\Users\\....\\filename.pkl
- Ex for linux and mac: /home/username/.../filename.pkl
- content: object to be saved"""
with open(filePath, "wb") as f:
pickle.dump(content, f)
def savedf2Pickle(filePath, content):
"""
save a panda dataframe as a .pkl file
Parameters:
- filePath: either the relative path or the absolute path
- content: the df to be saved
"""
content.to_pickle(filePath)
def loadUsingDill(filePath):
"""
open and retrieve the contents of a file
use this when opening a file that contains a complex Class object,
if the object is "simple" use the function loadPickledData()
Parameters:
- filePath: either the relative path or the absolute path
"""
with open(filePath, "rb") as f:
print("unpickling content in {filePath}")
return dill.load(f)
def saveUsingDill(filePath, content):
"""
same as pickle version, save the content in the provided location
Parameters:
- filePath: either the relative path or the absolute path
- content: the content to be saved, allows complex class instance
"""
with open(filePath, "wb") as f:
dill.dump(content, f)
print(f"successfully saved {content} at {filePath}")
def fullPath(fileName, folder=""):
"""
given the folder and the file name, it returns a string object that have the type of slash right for the computer's OS
Parameters:
- fileName: the name of the file
- folder: the folder where the file is located in, if it's in the same directory, then use an empty string
"""
_, filePath = get_cd()
# we need the os name because different OS uses / or \ to navigate the file system
osName = platform.system()
# get the full path to the file that we're trying to open, and depending on the OS, the slashes changes
fullLocName = filePath + folder + "\\" + fileName
if osName == "Windows": pass
else:
# for OS' like linux and mac(Darwin)
fullLocName = fullLocName.replace("\\", "/")
return fullLocName
def loadConfig(folder, fileName):
"""load config information from a txt file
Parameters:
- folder: the folder where the file is located, empty string if its not in any folder
- fileName: the file name
"""
fullName = fullPath(fileName, folder)
# get the content of the file and convert it to a list
with open(fullName) as f:
content = [line.strip() for line in f.readlines()]
return content
def openCsv(filePath, default = ["new df here"]):
"""
returns the content of the csv file if it exists.
Parameters:
- filePath: the absolute or relative path to the .csv file
- default: default value to load if the file is not located
"""
try:
content = pd.read_csv(filePath, error_bad_lines=False)
except Exception:
print(f"exception, the filename {filePath} you requested to open was not found.")
if (int(input("do you want to make a new file? 1 for yes, 0 for no")) == 1):
content = pd.dataframe(default)
content.toCsv(filePath, index=False, header=False)
return content
def formatData(folder, fileName):
"""
get the relevant data from the file with the corresponding filename, then make a dictionary out of it
Parameters:
- folder: the folder where the file is located, use empty string, "", if the file isnt nested
- fileName: the name of the file
"""
fullName = fullPath(fileName, folder)
# get the content of the file and convert it to a panda dataframe
content = openCsv(fullName, [])
df_list = [content.columns.values.tolist()]+content.values.tolist()
# remove white spaces in font and back of all entries
df_list = [[txt.strip() if type(txt) == str else txt for txt in lst] for lst in df_list]
# make a new dataframe from the list, also use the first line in the dataframe as the new header
new_df = pd.DataFrame(df_list)
header = new_df.iloc[0]
new_df = new_df[1:]
new_df.columns = header
return new_df
def make_df(folder, fileName, debug=True):
"""
creates a panda dataframe from the contents in a csv file
Parameters:
- folder: the folder where the file is located, use empty string, "", if the file isnt nested
- fileName: the name of the file
"""
a = formatData(folder, fileName)
a.fillna(0, inplace =True)
if debug:
print("this is a preview of the data that you're loading:")
print(a.head(3))
return a
def save_df_to_csv(filepath, content):
content.to_csv(filepath)
def get_cd():
"""
uses the os.path function to get the filename and the absolute path to the current directory
Also does a primative check to see if the path is correct, there has been instances where the CD was different, hence the check.
return Value(s):
- scriptPath: the full directory path
- filePath: the full path that includes the current file
"""
# Get the path to this file
scriptPath, filePath = os.path.realpath(__file__), ""
# get the os name and the backslash or forward slash depending on the OS
os_name = platform.system()
path_slash = "/" if os_name in ["Linux", 'Darwin'] else "\\"
# remove the file name from the end of the path
for i in range(1,len(scriptPath)+1):
if scriptPath[-i] == path_slash:
scriptPath = scriptPath[0:-i]#current path, relative to root direcotory or C drive
break
if os.getcwd() != scriptPath: filePath = scriptPath + path_slash
return scriptPath, filePath
def mergeR0(pastR0, R0_location):
pastdata = openCsv(R0_location)
for k, v in pastdata.iteritems():
if k != "Unnamed: 0":
datastring = v[0]
datastring = "".join([i for i in datastring if i not in ["[", "]"]])
datastring = datastring.split(",")
datastring = [float(i) for i in datastring]
pastR0[k]= (datastring, '(npMean, stdev, rangeVal, median)',)
"""
request_name = "request_7"
saveName = "R0"
output_dir = fileRelated.fullPath(request_name, "outputs")
Path(output_dir).mkdir(parents=False, exist_ok=True)
output_folder = "outputs/"+ request_name
statfile.comparingBoxPlots(dict_A, plottedData="R0", saveName=saveName, outputDir=output_folder)
"""
def main():
# run this to check if the files can be extracted
a = formatData("configuration", "agents.csv")
print(a)
if __name__ == "__main__":
main()
|
# --------------------------------------------------------
# This file is used to test all the algorithms performed
# on the graph's
# --------------------------------------------------------
from Graph import *
from colorama import Fore, Style
print("Depth First Search")
print(Fore.BLUE)
print("-----------------------")
print("Graph:")
print("1 -> 2 -> 3")
print("↓ ↗ ↗")
print("5 -> 4")
print()
print("Source:")
print("1")
print("-----------------------")
print(Style.RESET_ALL)
graph = DirectedGraph() # prints the shortest path from source to target in a directed graph using
# breadth-first-search algorithm
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.connect(1, 2, 0) # connects vertex 1 to 2
graph.connect(2, 3, 0) # connects vertex 2 to 3
graph.connect(4, 3, 0) # connects vertex 4 to 3
graph.connect(5, 4, 0) # connects vertex 5 to 4
graph.connect(1, 5, 0) # connects vertex 1 to 5
graph.connect(5, 2, 0) # connects vertex 2 to 5
print_in_dfs(graph, 1)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
print("Breadth First Search")
print(Fore.BLUE)
print("-----------------------")
print("Graph:")
print("1 -> 2 -> 3")
print("↓ ↗ ↗")
print("5 -> 4")
print()
print("Source:")
print("1")
print("-----------------------")
print(Style.RESET_ALL)
graph = DirectedGraph() # prints the shortest path from source to target in a directed graph using
# breadth-first-search algorithm
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.connect(1, 2, 0) # connects vertex 1 to 2
graph.connect(2, 3, 0) # connects vertex 2 to 3
graph.connect(3, 4, 0) # connects vertex 4 to 3
graph.connect(4, 5, 0) # connects vertex 5 to 4
graph.connect(1, 5, 0) # connects vertex 1 to 5
graph.connect(5, 2, 0) # connects vertex 2 to 5
print_in_bfs(graph, 1)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
print("Shortest Path Breadth First Search")
print(Fore.BLUE)
print("-----------------------")
print("Graph:")
print("1 -> 2 -> 3")
print("↓ ↗ ↗")
print("5 <- 4")
print()
print("Source:")
print("1")
print()
print("Target")
print("2")
print("-----------------------")
print(Style.RESET_ALL)
graph = DirectedGraph() # prints the shortest path from source to target in a directed graph using
# breadth-first-search algorithm
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.connect(1, 2, 0) # connects vertex 1 to 2
graph.connect(2, 3, 0) # connects vertex 2 to 3
graph.connect(3, 4, 0) # connects vertex 4 to 3
graph.connect(4, 5, 0) # connects vertex 5 to 4
graph.connect(1, 5, 0) # connects vertex 1 to 5
graph.connect(5, 2, 0) # connects vertex 2 to 5
print_shortest_path_bfs(graph, 1, 2)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
print("Shortest Path Dikstra")
print(Fore.BLUE)
print("-----------------------")
print("Weights")
print("1 -> 2: 6")
print("2 -> 3: 5")
print("3 -> 4: 2")
print("4 -> 5: 3")
print("1 -> 5: 1")
print("5 -> 2: 1")
print()
print("Graph:")
print("1 -> 2 -> 3")
print("↓ ↗ ↗")
print("5 -> 4")
print()
print("Source:")
print("1")
print()
print("Target")
print("2")
print("-----------------------")
print(Style.RESET_ALL)
graph = WeightedGraph() # prints the shortest path from source to target in a weighted graph using dikstra algorithm
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.connect(1, 2, 6) # connects vertex 1 to 2 with weight of 6
graph.connect(2, 3, 5) # connects vertex 2 to 3 with weight of 5
graph.connect(3, 4, 2) # connects vertex 4 to 3 with weight of 2
graph.connect(4, 5, 3) # connects vertex 5 to 4 with weight of 3
graph.connect(1, 5, 1) # connects vertex 1 to 5 with weight of 1
graph.connect(5, 2, 1) # connects vertex 2 to 5 with weight of 1
print_shortest_path_dikstra(graph, 1, 2)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
print("Shortest Path Bellman Ford")
print(Fore.BLUE)
print("-----------------------")
print("Weights")
print("1 -> 2: 6")
print("2 -> 3: 5")
print("3 -> 4: 2")
print("4 -> 5: 3")
print("1 -> 5: 1")
print("5 -> 2: 1")
print()
print("Graph:")
print("1 -> 2 -> 3")
print("↓ ↗ ↗")
print("5 -> 4")
print()
print("Source:")
print("1")
print()
print("Target")
print("2")
print("-----------------------")
print(Style.RESET_ALL)
graph = WeightedGraph() # prints the shortest path from source to target in a weighted graph using bellman ford
# algorithm
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.connect(1, 2, 6) # connects vertex 1 to 2 with weight of 6
graph.connect(2, 3, 5) # connects vertex 2 to 3 with weight of 5
graph.connect(3, 4, 2) # connects vertex 4 to 3 with weight of 2
graph.connect(4, 5, 3) # connects vertex 5 to 4 with weight of 3
graph.connect(1, 5, 1) # connects vertex 1 to 5 with weight of 1
graph.connect(5, 2, 1) # connects vertex 2 to 5 with weight of 1
print_shortest_path_bellman_ford(graph, 1, 2)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
print("Shortest Path Bellman Ford Optimized Using Depth First Search")
print(Fore.BLUE)
print("-----------------------")
print("Weights")
print("1 -> 2: 6")
print("2 -> 3: 5")
print("3 -> 4: 2")
print("4 -> 5: 3")
print("1 -> 5: 1")
print("5 -> 2: 1")
print()
print("Graph:")
print("1 -> 2 -> 3")
print("↓ ↗ ↗")
print("5 -> 4")
print()
print("Source:")
print("1")
print()
print("Target")
print("2")
print("-----------------------")
print(Style.RESET_ALL)
graph = WeightedGraph() # prints the shortest path from source to target in a weighted graph using bellman ford
# algorithm
graph.add_vertex(1)
graph.add_vertex(2)
graph.add_vertex(3)
graph.add_vertex(4)
graph.add_vertex(5)
graph.connect(1, 2, 6) # connects vertex 1 to 2 with weight of 6
graph.connect(2, 3, 5) # connects vertex 2 to 3 with weight of 5
graph.connect(3, 4, 2) # connects vertex 4 to 3 with weight of 2
graph.connect(4, 5, 3) # connects vertex 5 to 4 with weight of 3
graph.connect(1, 5, 1) # connects vertex 1 to 5 with weight of 1
graph.connect(5, 2, 1) # connects vertex 2 to 5 with weight of 1
print_shortest_path_bellman_ford_optimized_dfs(graph, 1, 2)
print("-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------")
|
from rest_framework.response import Response
from assessment.middlewares.validators.errors import raises_error
from re import sub
from assessment.middlewares.validators.constants import database_types, related_mapper
class QueryParser():
valid_include = ['children']
excluded_fields = ['score', 'questions', 'answers','correct_choices']
included_params = ['limit', 'offset','include', 'order_by', 'assessment_id', 'question_id', 'page', 'user_id', 'assessment_name_id']
related_mapper = related_mapper
@classmethod
def parse_all(cls, *args):
self, model, query, schema, eagerLoadSchema=args
querySet = cls.build_queryset(model, query)
page = self.paginate_queryset(querySet)# if query.get('page') else None
if page is not None:
querySet = page
schema_data = schema(querySet, many=True).data
if eagerLoadSchema:
schema_data = cls.include_children(schema, eagerLoadSchema, query, querySet, True)
return cls.pagination_parser(self, page, schema_data)
@classmethod
def pagination_parser(cls, self, page, data):
if page is not None:
return self.get_paginated_response(data)
return Response(data)
@classmethod
def include_children(cls, *args):
schema, eagerLoadSchema, query, querySet, many = args
include = query.get('include')
if include:
cls.validate_include(include)
return eagerLoadSchema(querySet, many=many).data
return schema(querySet, many=many).data
@classmethod
def validate_include(cls, include):
if not include in cls.valid_include:
raises_error('include_error', 400, include, cls.valid_include)
@classmethod
def build_queryset(cls, model, query):
url_queries ={}
order_by = query.get('orderBy')
for key in query.keys():
url_queries.update(cls.query_to_dict(key, query, model))
url_queries = cls.filter_by_related_id(model.__name__, key, query, url_queries)
if order_by:
order = cls.validate_order_by(order_by, model)
return model.objects.filter(**url_queries).order_by(order)
return model.objects.filter(**url_queries)
@classmethod
def filter_by_related_id(cls, *args):
model_name, key, query, url_query = args
related_data = cls.related_mapper.get(key)
if related_data and related_data.get(model_name):
related_id_value = query.get(key)
cls.validate_field_type('IntegerField', related_id_value)
url_query.update({related_data[model_name]: related_id_value})
elif related_data:
raises_error('url_query_error', 400, key, model_name)
return url_query
@classmethod
def get_model_fields(cls, model):
valid_fields = {field.name:field.get_internal_type() for field in model._meta.get_fields() if field.name not in cls.excluded_fields}
return valid_fields
@classmethod
def query_to_dict(cls, key, query, model):
snake_case_key = cls.snake_case(key)
model_fields = cls.get_model_fields(model)
url_query_value = query.get(key)
if snake_case_key in model_fields:
model_type = model_fields[snake_case_key]
cls.validate_field_type(model_type,url_query_value)
return { snake_case_key: url_query_value }
if snake_case_key[6:] in model_fields or snake_case_key[4:] in model_fields:
return cls.start_end_query_to_dict(snake_case_key, url_query_value)
if snake_case_key in cls.included_params:
return {}
raises_error('url_query_error', 400, key, model.__name__)
@classmethod
def snake_case(cls, string):
return sub(r'(.)([A-Z])', r'\1_\2', string).lower()
@classmethod
def validate_field_type(cls, model_type, value):
try:
get_type = database_types.get(str(model_type))
if get_type:
(get_type.get('cast')(value))
except ValueError:
raises_error('invalid_value', 400, value, model_type, )
@classmethod
def start_end_query_to_dict(cls, key, value):
if key.startswith('start'):
return { f'{key[6:]}__gte': value }
elif key.startswith('end'):
return { f'{key[4:]}__lte': value }
return {}
@classmethod
def validate_order_by(cls, order_by, model):
order_by_colunm = cls.snake_case(order_by)
colunm = order_by_colunm[4:]
if not colunm in cls.get_model_fields(model):
raises_error('order_by_error', 400, order_by, 'orderBy')
if order_by.startswith('dec'):
return f'-{colunm}'
if order_by.startswith('asc'):
return colunm
raises_error('order_by_error', 400, order_by, 'orderBy')
|
# Copyright 2019 Akiomi Kamakura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from pilgram import util
def hue_rotate(im, deg=0):
"""Applies hue rotation.
A hue rotate operation is equivalent to the following matrix operation:
| R' | | a00 a01 a02 0 0 | | R |
| G' | | a10 a11 a12 0 0 | | G |
| B' | = | a20 a21 a22 0 0 | * | B |
| A' | | 0 0 0 1 0 | | A |
| 1 | | 0 0 0 0 1 | | 1 |
where
| a00 a01 a02 | [+0.213 +0.715 +0.072]
| a10 a11 a12 | = [+0.213 +0.715 +0.072] +
| a20 a21 a22 | [+0.213 +0.715 +0.072]
[+0.787 -0.715 -0.072]
cos(hueRotate value) * [-0.213 +0.285 -0.072] +
[-0.213 -0.715 +0.928]
[-0.213 -0.715+0.928]
sin(hueRotate value) * [+0.143 +0.140-0.283]
[-0.787 +0.715+0.072]
See the W3C document:
https://www.w3.org/TR/SVG11/filters.html#feColorMatrixValuesAttribute
Arguments:
im: An input image.
deg: An optional integer/float. The hue rotate value (degrees).
Defaults to 0.
Returns:
The output image.
"""
cos_hue = math.cos(math.radians(deg))
sin_hue = math.sin(math.radians(deg))
matrix = [
.213 + cos_hue * .787 - sin_hue * .213,
.715 - cos_hue * .715 - sin_hue * .715,
.072 - cos_hue * .072 + sin_hue * .928,
0,
.213 - cos_hue * .213 + sin_hue * .143,
.715 + cos_hue * .285 + sin_hue * .140,
.072 - cos_hue * .072 - sin_hue * .283,
0,
.213 - cos_hue * .213 - sin_hue * .787,
.715 - cos_hue * .715 + sin_hue * .715,
.072 + cos_hue * .928 + sin_hue * .072,
0,
]
rotated = util.or_convert(im, 'RGB').convert('RGB', matrix)
return util.or_convert(rotated, im.mode)
|
"""A collection of models which outline the scope and options of a particular project.
"""
import abc
from typing import TYPE_CHECKING, List, Optional, Union
import requests
from pydantic import Field, conlist, root_validator, validator
from typing_extensions import Literal
from nonbonded.library.config import settings
from nonbonded.library.models import BaseREST
from nonbonded.library.models.authors import Author
from nonbonded.library.models.engines import ForceBalance
from nonbonded.library.models.exceptions import MutuallyExclusiveError
from nonbonded.library.models.exceptions.exceptions import DuplicateItemsError
from nonbonded.library.models.forcefield import ForceField, Parameter
from nonbonded.library.models.models import BaseRESTCollection
from nonbonded.library.models.targets import OptimizationTarget
from nonbonded.library.models.validators.string import IdentifierStr, NonEmptyStr
from nonbonded.library.utilities.environments import ChemicalEnvironment
if TYPE_CHECKING:
PositiveInt = int
else:
from pydantic import PositiveInt
class SubStudy(BaseREST, abc.ABC):
"""A base class for optimization and benchmark sub-studies, which share largely the
same fields.
"""
id: IdentifierStr = Field(
..., description="The unique id assigned to this sub-study."
)
study_id: IdentifierStr = Field(..., description="The id of the parent study.")
project_id: IdentifierStr = Field(..., description="The id of the parent project.")
name: NonEmptyStr = Field(..., description="The name of the sub-study.")
description: NonEmptyStr = Field(
..., description="A description of this sub-study."
)
force_field: Optional[ForceField] = Field(
None,
description="The force field which will be used in this sub-study. If this is "
"a force field produced by an optimization from the parent study, the "
"``optimization_id`` input should be used instead. This option is mutually "
"exclusive with `optimization_id`.",
)
optimization_id: Optional[IdentifierStr] = Field(
None,
description="The id of the optimization which produced the force field to use "
"in this sub-study. This must be the id of an optimization which is part of "
"the same study and project. This option is mutually exclusive with "
"``force_field``.",
)
analysis_environments: List[ChemicalEnvironment] = Field(
...,
description="The chemical environments to consider when analysing the results "
"of this sub-study.",
)
@root_validator
def _validate_mutually_exclusive(cls, values):
optimization_id = values.get("optimization_id")
force_field = values.get("force_field")
if (optimization_id is None and force_field is None) or (
optimization_id is not None and force_field is not None
):
raise MutuallyExclusiveError("optimization_id", "force_field")
return values
class SubStudyCollection(BaseRESTCollection, abc.ABC):
@classmethod
@abc.abstractmethod
def sub_study_type(cls):
"""The type of sub-study stored in this collection."""
@classmethod
def _get_endpoint(cls, *, project_id: str, study_id: str):
return (
f"{settings.API_URL}/projects/"
f"{project_id}"
f"/studies/"
f"{study_id}"
f"/{cls.sub_study_type().__name__.lower()}s/"
)
class Optimization(SubStudy):
model_version: Literal[0] = Field(
0,
description="The current version of this model. Models with different version "
"numbers are incompatible.",
)
engine: Union[ForceBalance] = Field(
...,
description="The engine to use to drive the optimization.",
)
targets: conlist(OptimizationTarget, min_items=1) = Field(
...,
description="A list of the fitting targets to include in the optimization. "
"These represent different kinds of contributions to the objective function, "
"such as deviations from experimental measurements or from computed QM data.",
)
max_iterations: PositiveInt = Field(
...,
description="The maximum number of optimization iterations to perform. The "
"number actually performed may be less depending on if the optimization engine "
"supports automatically detecting whether the optimization has converged.",
)
parameters_to_train: conlist(Parameter, min_items=1) = Field(
..., description="The force field parameters to be optimized."
)
@validator("parameters_to_train")
def _validate_unique_parameters(cls, value: List[Parameter]) -> List[Parameter]:
unique_parameters = set()
duplicate_parameters = set()
for parameter in value:
if parameter in unique_parameters:
duplicate_parameters.add(parameter)
unique_parameters.add(parameter)
if len(duplicate_parameters) > 0:
raise DuplicateItemsError("parameters_to_train", duplicate_parameters)
return value
@validator("targets")
def _validate_unique_target_names(
cls, value: List[OptimizationTarget]
) -> List[OptimizationTarget]:
names = {target.id for target in value}
assert len(names) == len(value)
return value
@classmethod
def _get_endpoint(cls, *, project_id: str, study_id: str, sub_study_id: str):
return (
f"{settings.API_URL}/projects/"
f"{project_id}"
f"/studies/"
f"{study_id}"
f"/optimizations/"
f"{sub_study_id}"
)
def _post_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.study_id}"
f"/optimizations/"
)
def _put_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.study_id}"
f"/optimizations/"
)
def _delete_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.study_id}"
f"/optimizations/"
f"{self.id}"
)
@root_validator
def _validate_self_reference(cls, values):
identifier = values.get("id")
optimization_id = values.get("optimization_id")
assert optimization_id is None or optimization_id != identifier
return values
@classmethod
def from_rest(
cls,
*,
project_id: str,
study_id: str,
sub_study_id: str,
requests_class=requests,
) -> "Optimization":
# noinspection PyTypeChecker
return super(Optimization, cls).from_rest(
project_id=project_id,
study_id=study_id,
sub_study_id=sub_study_id,
requests_class=requests_class,
)
class OptimizationCollection(SubStudyCollection):
@classmethod
def sub_study_type(cls):
return Optimization
optimizations: List[Optimization] = Field(
default_factory=list,
description="A collection of optimizations.",
)
class Benchmark(SubStudy):
model_version: Literal[0] = Field(
0,
description="The current version of this model. Models with different version "
"numbers are incompatible.",
)
test_set_ids: conlist(IdentifierStr, min_items=1) = Field(
...,
description="The unique identifiers of the data sets to use as part of the "
"benchmarking.",
)
@classmethod
def _get_endpoint(cls, *, project_id: str, study_id: str, sub_study_id: str):
return (
f"{settings.API_URL}/projects/"
f"{project_id}"
f"/studies/"
f"{study_id}"
f"/benchmarks/"
f"{sub_study_id}"
)
def _post_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.study_id}"
f"/benchmarks/"
)
def _put_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.study_id}"
f"/benchmarks/"
)
def _delete_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.study_id}"
f"/benchmarks/"
f"{self.id}"
)
@classmethod
def from_rest(
cls,
*,
project_id: str,
study_id: str,
sub_study_id: str,
requests_class=requests,
) -> "Benchmark":
# noinspection PyTypeChecker
return super(Benchmark, cls).from_rest(
project_id=project_id,
study_id=study_id,
sub_study_id=sub_study_id,
requests_class=requests_class,
)
class BenchmarkCollection(SubStudyCollection):
@classmethod
def sub_study_type(cls):
return Benchmark
benchmarks: List[Benchmark] = Field(
default_factory=list,
description="A collection of benchmarks.",
)
class Study(BaseREST):
model_version: Literal[0] = Field(
0,
description="The current version of this model. Models with different version "
"numbers are incompatible.",
)
id: IdentifierStr = Field(..., description="The unique id assigned to this study.")
project_id: IdentifierStr = Field(..., description="The id of the parent project.")
name: NonEmptyStr = Field(..., description="The name of the study.")
description: NonEmptyStr = Field(..., description="A description of this study.")
optimizations: List[Optimization] = Field(
default_factory=list,
description="The optimizations to perform as part of this study.",
)
benchmarks: List[Benchmark] = Field(
default_factory=list,
description="The benchmarks to perform as part of this study.",
)
@root_validator
def _validate_studies(cls, values):
study_id = values.get("id")
optimizations: List[Optimization] = values.get("optimizations")
benchmarks: List[Benchmark] = values.get("benchmarks")
assert all(optimization.study_id == study_id for optimization in optimizations)
assert all(benchmark.study_id == study_id for benchmark in benchmarks)
optimization_ids = set(x.id for x in optimizations)
assert len(optimization_ids) == len(optimizations)
assert len(set(x.id for x in benchmarks)) == len(benchmarks)
assert all(
benchmark.optimization_id is None
or benchmark.optimization_id in optimization_ids
for benchmark in benchmarks
)
return values
@classmethod
def _get_endpoint(cls, *, project_id: str, study_id: str):
return f"{settings.API_URL}/projects/{project_id}/studies/{study_id}"
def _post_endpoint(self):
return f"{settings.API_URL}/projects/{self.project_id}/studies/"
def _put_endpoint(self):
return f"{settings.API_URL}/projects/{self.project_id}/studies/"
def _delete_endpoint(self):
return (
f"{settings.API_URL}/projects/"
f"{self.project_id}"
f"/studies/"
f"{self.id}"
)
@classmethod
def from_rest(
cls, *, project_id: str, study_id: str, requests_class=requests
) -> "Study":
# noinspection PyTypeChecker
return super(Study, cls).from_rest(
project_id=project_id, study_id=study_id, requests_class=requests_class
)
class StudyCollection(BaseRESTCollection):
studies: List[Study] = Field(
default_factory=list,
description="A collection of studies.",
)
@classmethod
def _get_endpoint(cls, *, project_id: str):
return f"{settings.API_URL}/projects/{project_id}/studies/"
class Project(BaseREST):
model_version: Literal[0] = Field(
0,
description="The current version of this model. Models with different version "
"numbers are incompatible.",
)
id: IdentifierStr = Field(..., description="The unique id assigned to the project.")
name: NonEmptyStr = Field(..., description="The name of the project.")
description: NonEmptyStr = Field(..., description="A description of the project.")
authors: conlist(Author, min_items=1) = Field(
..., description="The authors of the project."
)
studies: List[Study] = Field(
default_factory=list,
description="The studies conducted as part of the project.",
)
@root_validator
def _validate_studies(cls, values):
project_id = values.get("id")
studies = values.get("studies")
assert len(set(x.id for x in studies)) == len(studies)
assert all(
(
study.project_id == project_id
and all(opt.project_id == project_id for opt in study.optimizations)
and all(bench.project_id == project_id for bench in study.benchmarks)
)
for study in studies
)
return values
@classmethod
def _get_endpoint(cls, *, project_id: str):
return f"{settings.API_URL}/projects/{project_id}"
def _post_endpoint(self):
return f"{settings.API_URL}/projects/"
def _put_endpoint(self):
return f"{settings.API_URL}/projects/"
def _delete_endpoint(self):
return f"{settings.API_URL}/projects/{self.id}"
@classmethod
def from_rest(cls, *, project_id: str, requests_class=requests) -> "Project":
# noinspection PyTypeChecker
return super(Project, cls).from_rest(
project_id=project_id, requests_class=requests_class
)
class ProjectCollection(BaseRESTCollection):
projects: List[Project] = Field(
default_factory=list,
description="A collection of projects.",
)
@classmethod
def _get_endpoint(cls, **kwargs):
return f"{settings.API_URL}/projects/"
|
from django.apps import AppConfig
class PetfinderConfig(AppConfig):
name = 'petfinder'
|
from collections import defaultdict
from itertools import product
from multiprocessing import Pool
def count_neighbors_3(cube, x, y, z):
count = 0
for dx, dy, dz in product(range(-1, 2), repeat=3):
if dx == 0 and dy == 0 and dz == 0:
continue
count += cube[x+dx,y+dy,z+dz]
return count
def count_neighbors_4(cube, x, y, z, w):
count = 0
root = (x, y, z, w)
for d in product(range(x-1,x+2), range(y-1,y+2),range(z-1,z+2),range(w-1,w+2)):
count += d != root and cube[d]
return count
def d17(inp):
cube = None
next_cube = defaultdict(bool)
four_cube = defaultdict(bool)
transition = [defaultdict(bool), defaultdict(bool)]
transition[False][3] = True
transition[True][2] = True
transition[True][3] = True
ix, ax = 0, 0
iy, ay = 0, 0
iz, az = 0, 0
for y, line in enumerate(inp.split('\n')):
for x, char in enumerate(line):
if char == '#':
next_cube[x,y,0] = True
four_cube[x,y,0,0] = True
iax = ax = x
iay = ay = y
for i in range(6):
cube = next_cube.copy()
next_cube.clear()
for x, y, z in product(range(ix-1, ax+2), range(iy-1, ay+2), range(iz-1, az+2)):
nbrs = count_neighbors_3(cube, x, y, z)
if transition[cube[x,y,z]][nbrs]:
next_cube[x,y,z] = True
ix, ax = min(ix, x), max(ax, x)
iy, ay = min(iy, y), max(ay, y)
iz, az = min(iz, z), max(az, z)
p1 = sum(next_cube.values())
ix, ax = 0, iax
iy, ay = 0, iay
iz, az = 0, 0
iw, aw = 0, 0
next_cube = four_cube
for i in range(5):
cube = next_cube.copy()
next_cube.clear()
for x, y, z, w in product(range(ix-1, ax+2), range(iy-1, ay+2), range(iz-1, az+2), range(iw-1, aw+2)):
nbrs = count_neighbors_4(cube, x, y, z, w)
if transition[cube[x,y,z,w]][nbrs]:
next_cube[x,y,z,w] = True
ix, ax = min(ix, x), max(ax, x)
iy, ay = min(iy, y), max(ay, y)
iz, az = min(iz, z), max(az, z)
iw, aw = min(iw, w), max(aw, w)
p2 = 0
for x, y, z, w in product(range(ix-1, ax+2), range(iy-1, ay+2), range(iz-1, az+2), range(iw-1, aw+2)):
nbrs = count_neighbors_4(next_cube, x, y, z, w)
if transition[next_cube[x,y,z,w]][nbrs]:
p2 += 1
return p1, p2
def validate_test(case_id, inp=None, want_p1=None, want_p2=None):
#print(f"validate_test({case_id}, {inp}, {want_p1}, {want_p2})")
got_p1, got_p2 = d17(inp)
if want_p1 is not None:
assert want_p1 == got_p1, f"{case_id=} p1:\n\t{want_p1=}\n\t{got_p1=}"
if want_p2 is not None:
assert want_p2 == got_p2, f"{case_id=} p2:\n\t{want_p2=}\n\t{got_p2=}"
return True
def main():
with open('../inputs/d17.txt') as f:
inp = f.read().strip()
return d17(inp)
if __name__ == '__main__':
cases = [
#(id, inp, p1, p2),
('sample', '.#.\n..#\n###', 112, 848),
]
"""
for case in cases:
validate_test(*case)
p1, p2 = main()
print(f"p1 = {p1}\np2 = {p2}")
"""
with Pool(processes=min(8, len(cases) + 1)) as pool:
#import time
main_res = pool.apply_async(main)
#p1, p2 = main_res.get(60)
test_res = [pool.apply_async(validate_test, case) for case in cases]
#time.sleep(3)
#assert main_res.ready()
if all(test.get(30) for test in test_res):
p1, p2 = main_res.get(60)
print(f"p1 = {p1}\np2 = {p2}")
|
import torch.nn as nn
import scipy.ndimage
import torch
import numpy as np
class GaussianFilter(nn.Module):
def __init__(self, channels, kernel_size, sigma, peak_to_one = False):
super().__init__()
padding = int(kernel_size/2)
self.pad = nn.ZeroPad2d(padding)
kernel = self._make_gaussian_kernel(kernel_size, sigma, peak_to_one)
self.kernel_max = kernel.max()
self.conv = self._define_conv(channels, kernel, kernel_size)
self.conv.weight.requires_grad = False
def forward(self, x):
x = self.pad(x)
x = self.conv(x)
return x
def _define_conv(self, channels, kernel, kernel_size):
conv = nn.Conv2d(channels, channels, groups = channels, kernel_size = kernel_size, padding = 0, stride = 1, bias = False)
conv.weight.data.copy_(kernel)
return conv
def _make_gaussian_kernel(self, kernel_size, sigma, peak_to_one):
g_kernel = np.zeros((kernel_size, kernel_size)).astype(np.float64)
center = int(kernel_size / 2)
g_kernel[center, center] = 1
g_kernel = scipy.ndimage.gaussian_filter(g_kernel, sigma)
if peak_to_one:
g_kernel = g_kernel / g_kernel.max()
return torch.from_numpy(g_kernel)
|
#This file contains the configuration variables for your app, such as database details.
import os
|
#/mpcfit/mpcfit/transformations.py
"""
--------------------------------------------------------------
Oct 2018
Payne
Functions for transformation between coordinate frames
To include covariance transformation capabilities
See Bernstein's orbit/transforms.c code for comparison
See B&K (2000) and/or Farnocchia et al (2015) for math
--------------------------------------------------------------
"""
#Import third-party packages
# --------------------------------------------------------------
import numpy as np
import collections
#Import neighboring packages
#--------------------------------------------------------------
#import mpcutilities.phys_const as PHYS
#Fitting functions & classes
#--------------------------------------------------------------
class TRANSFORMS:
'''
To hold functions for transformation between coordinate frames
'''
def __init__(self, ):
'''
...
'''
self.inputCov =np.array( [ [1,2,3,4], [5,6,7,8], [9,10,11,12] ,[13,14,15,16] ])
self.partial =np.array( [ [1,2,], [3,4], [5,6] , [7,8]])
def covarianceRemap(self, inputCov, partial):
'''
Remap covariance matrix from one basis to another, using
the partial deriv matrix
\Gamma_{out} = A \Gamma_{in} A^T
A = \frac{\partial X_{out}}{\partial X_{in}}
X_{in}, X_{out} = input & output coordinate systems
Compare to Bernstein orbit/orbfit1.c/covar_map
kin = dimension on input, kout = dimension on output.
'''
# Need to have numpy arrays / matricees
assert isinstance(inputCov, np.ndarray)
assert isinstance(partial, np.ndarray)
# Do the matrix multiplication in the correct order
print("inputCov", inputCov)
print("partial", partial)
pT = np.transpose(partial)
print("pT",pT )
tmp = np.matmul(inputCov, pT)
print("tmp", tmp)
outputCov = np.matmul(partial, tmp)
print("outputCov", outputCov)
# Check on mat-mult calc:
'''
for i in range(
for (i=1; i<=kout; i++)
for (j=1; j<=kout; j++) {
sum = 0.;
for (m=1; m<=kin; m++)
for (n=1; n<=kin; n++)
sum += derivs[i][m]*derivs[j][n]*covar_in[m][n];
covar_out[i][j] = sum;
}
'''
return outputCov
|
# -*- coding: utf-8 -*-
# Copyright 2016-2018, Alexis de Lattre <alexis.delattre@akretion.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of the authors may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO list:
# - have both python2 and python3 support
# - add automated tests (currently, we only have tests at odoo module level)
# - keep original metadata by copy of pdf_tailer[/Info] ?
from ._version import __version__
from io import BytesIO
from lxml import etree
from tempfile import NamedTemporaryFile
from datetime import datetime
from PyPDF2 import PdfFileWriter, PdfFileReader
from PyPDF2.generic import DictionaryObject, DecodedStreamObject,\
NameObject, createStringObject, ArrayObject
from PyPDF2.utils import b_
from pkg_resources import resource_filename
import os.path
import mimetypes
import hashlib
import logging
FORMAT = '%(asctime)s [%(levelname)s] %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger('factur-x')
logger.setLevel(logging.INFO)
FACTURX_FILENAME = 'factur-x.xml'
FACTURX_LEVEL2xsd = {
'minimum': 'FACTUR-X_BASIC-WL.xsd',
'basicwl': 'FACTUR-X_BASIC-WL.xsd',
'basic': 'FACTUR-X_EN16931.xsd',
'en16931': 'FACTUR-X_EN16931.xsd', # comfort
}
FACTURX_LEVEL2xmp = {
'minimum': 'MINIMUM',
'basicwl': 'BASIC WL',
'basic': 'BASIC',
'en16931': 'EN 16931',
}
def check_facturx_xsd(
facturx_xml, flavor='autodetect', facturx_level='autodetect'):
"""
Validate the XML file against the XSD
:param facturx_xml: the Factur-X XML
:type facturx_xml: string, file or etree object
:param flavor: possible values: 'factur-x', 'zugferd' or 'autodetect'
:type flavor: string
:param facturx_level: the level of the Factur-X XML file. Default value
is 'autodetect'. The only advantage to specifiy a particular value instead
of using the autodetection is for a small perf improvement.
Possible values: minimum, basicwl, basic, en16931.
:return: True if the XML is valid against the XSD
raise an error if it is not valid against the XSD
"""
if not facturx_xml:
raise ValueError('Missing facturx_xml argument')
if not isinstance(flavor, (str, unicode)):
raise ValueError('Wrong type for flavor argument')
if not isinstance(facturx_level, (type(None), str, unicode)):
raise ValueError('Wrong type for facturx_level argument')
facturx_xml_etree = None
if isinstance(facturx_xml, str):
xml_string = facturx_xml
elif isinstance(facturx_xml, unicode):
xml_string = facturx_xml.encode('utf8')
elif isinstance(facturx_xml, type(etree.Element('pouet'))):
facturx_xml_etree = facturx_xml
xml_string = etree.tostring(
facturx_xml, pretty_print=True, encoding='UTF-8',
xml_declaration=True)
elif isinstance(facturx_xml, file):
facturx_xml.seek(0)
xml_string = facturx_xml.read()
facturx_xml.close()
if flavor not in ('factur-x', 'facturx', 'zugferd'): # autodetect
if facturx_xml_etree is None:
try:
facturx_xml_etree = etree.fromstring(xml_string)
except Exception as e:
raise Exception(
"The XML syntax is invalid: %s." % unicode(e))
flavor = get_facturx_flavor(facturx_xml_etree)
if flavor in ('factur-x', 'facturx'):
if facturx_level not in FACTURX_LEVEL2xsd:
if facturx_xml_etree is None:
try:
facturx_xml_etree = etree.fromstring(xml_string)
except Exception as e:
raise Exception(
"The XML syntax is invalid: %s." % unicode(e))
facturx_level = get_facturx_level(facturx_xml_etree)
if facturx_level not in FACTURX_LEVEL2xsd:
raise ValueError(
"Wrong level '%s' for Factur-X invoice." % facturx_level)
xsd_filename = FACTURX_LEVEL2xsd[facturx_level]
xsd_file = resource_filename(
__name__, 'xsd/factur-x/%s' % xsd_filename)
elif flavor == 'zugferd':
xsd_file = resource_filename(
__name__, 'xsd/xsd-zugferd/ZUGFeRD1p0.xsd')
xsd_etree_obj = etree.parse(open(xsd_file))
official_schema = etree.XMLSchema(xsd_etree_obj)
try:
t = etree.parse(BytesIO(xml_string))
official_schema.assertValid(t)
logger.info('Factur-X XML file successfully validated against XSD')
except Exception as e:
# if the validation of the XSD fails, we arrive here
logger.error(
"The XML file is invalid against the XML Schema Definition")
logger.error('XSD Error: %s', e)
raise Exception(
"The %s XML file is not valid against the official "
"XML Schema Definition. "
"Here is the error, which may give you an idea on the "
"cause of the problem: %s." % (flavor.capitalize(), unicode(e)))
return True
def get_facturx_xml_from_pdf(pdf_invoice, check_xsd=True):
if not pdf_invoice:
raise ValueError('Missing pdf_invoice argument')
if not isinstance(check_xsd, bool):
raise ValueError('Missing pdf_invoice argument')
if isinstance(pdf_invoice, str):
pdf_file = BytesIO(pdf_invoice)
elif isinstance(pdf_invoice, file):
pdf_file = pdf_invoice
else:
raise TypeError(
"The first argument of the method get_facturx_xml_from_pdf must "
"be either a string or a file (it is a %s)." % type(pdf_invoice))
xml_string = xml_filename = False
try:
pdf = PdfFileReader(pdf_file)
pdf_root = pdf.trailer['/Root']
logger.debug('pdf_root=%s', pdf_root)
embeddedfiles = pdf_root['/Names']['/EmbeddedFiles']['/Names']
logger.debug('embeddedfiles=%s', embeddedfiles)
# embeddedfiles must contain an even number of elements
if len(embeddedfiles) % 2 != 0:
raise
embeddedfiles_by_two = zip(embeddedfiles, embeddedfiles[1:])[::2]
logger.debug('embeddedfiles_by_two=%s', embeddedfiles_by_two)
for (filename, file_obj) in embeddedfiles_by_two:
logger.debug('found filename=%s', filename)
if filename in (FACTURX_FILENAME, 'ZUGFeRD-invoice.xml'):
xml_file_dict = file_obj.getObject()
logger.debug('xml_file_dict=%s', xml_file_dict)
tmp_xml_string = xml_file_dict['/EF']['/F'].getData()
xml_root = etree.fromstring(tmp_xml_string)
logger.info(
'A valid XML file %s has been found in the PDF file',
filename)
if check_xsd:
check_facturx_xsd(xml_root)
xml_string = tmp_xml_string
xml_filename = filename
else:
xml_string = tmp_xml_string
xml_filename = filename
break
except:
logger.error('No valid XML file found in the PDF')
return (None, None)
logger.info('Returning an XML file %s', xml_filename)
logger.debug('Content of the XML file: %s', xml_string)
return (xml_filename, xml_string)
def _get_pdf_timestamp(date=None):
if date is None:
date = datetime.now()
# example date format: "D:20141006161354+02'00'"
pdf_date = date.strftime("D:%Y%m%d%H%M%S+00'00'")
return pdf_date
def _get_metadata_timestamp():
now_dt = datetime.now()
# example format : 2014-07-25T14:01:22+02:00
meta_date = now_dt.strftime('%Y-%m-%dT%H:%M:%S+00:00')
return meta_date
def _prepare_pdf_metadata_txt(pdf_metadata):
pdf_date = _get_pdf_timestamp()
info_dict = {
'/Author': pdf_metadata.get('author', ''),
'/CreationDate': pdf_date,
'/Creator':
u'factur-x Python lib v%s by Alexis de Lattre' % __version__,
'/Keywords': pdf_metadata.get('keywords', ''),
'/ModDate': pdf_date,
'/Subject': pdf_metadata.get('subject', ''),
'/Title': pdf_metadata.get('title', ''),
}
return info_dict
def _prepare_pdf_metadata_xml(facturx_level, pdf_metadata):
nsmap_x = {'x': 'adobe:ns:meta/'}
nsmap_rdf = {'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'}
nsmap_dc = {'dc': 'http://purl.org/dc/elements/1.1/'}
nsmap_pdf = {'pdf': 'http://ns.adobe.com/pdf/1.3/'}
nsmap_xmp = {'xmp': 'http://ns.adobe.com/xap/1.0/'}
nsmap_pdfaid = {'pdfaid': 'http://www.aiim.org/pdfa/ns/id/'}
nsmap_fx = {
'fx': 'urn:factur-x:pdfa:CrossIndustryDocument:invoice:1p0#'}
ns_x = '{%s}' % nsmap_x['x']
ns_dc = '{%s}' % nsmap_dc['dc']
ns_rdf = '{%s}' % nsmap_rdf['rdf']
ns_pdf = '{%s}' % nsmap_pdf['pdf']
ns_xmp = '{%s}' % nsmap_xmp['xmp']
ns_pdfaid = '{%s}' % nsmap_pdfaid['pdfaid']
ns_fx = '{%s}' % nsmap_fx['fx']
ns_xml = '{http://www.w3.org/XML/1998/namespace}'
root = etree.Element(ns_x + 'xmpmeta', nsmap=nsmap_x)
rdf = etree.SubElement(
root, ns_rdf + 'RDF', nsmap=nsmap_rdf)
desc_pdfaid = etree.SubElement(
rdf, ns_rdf + 'Description', nsmap=nsmap_pdfaid)
desc_pdfaid.set(ns_rdf + 'about', '')
etree.SubElement(
desc_pdfaid, ns_pdfaid + 'part').text = '3'
etree.SubElement(
desc_pdfaid, ns_pdfaid + 'conformance').text = 'B'
desc_dc = etree.SubElement(
rdf, ns_rdf + 'Description', nsmap=nsmap_dc)
desc_dc.set(ns_rdf + 'about', '')
dc_title = etree.SubElement(desc_dc, ns_dc + 'title')
dc_title_alt = etree.SubElement(dc_title, ns_rdf + 'Alt')
dc_title_alt_li = etree.SubElement(
dc_title_alt, ns_rdf + 'li')
dc_title_alt_li.text = pdf_metadata.get('title', '')
dc_title_alt_li.set(ns_xml + 'lang', 'x-default')
dc_creator = etree.SubElement(desc_dc, ns_dc + 'creator')
dc_creator_seq = etree.SubElement(dc_creator, ns_rdf + 'Seq')
etree.SubElement(
dc_creator_seq, ns_rdf + 'li').text = pdf_metadata.get('author', '')
dc_desc = etree.SubElement(desc_dc, ns_dc + 'description')
dc_desc_alt = etree.SubElement(dc_desc, ns_rdf + 'Alt')
dc_desc_alt_li = etree.SubElement(
dc_desc_alt, ns_rdf + 'li')
dc_desc_alt_li.text = pdf_metadata.get('subject', '')
dc_desc_alt_li.set(ns_xml + 'lang', 'x-default')
desc_adobe = etree.SubElement(
rdf, ns_rdf + 'Description', nsmap=nsmap_pdf)
desc_adobe.set(ns_rdf + 'about', '')
producer = etree.SubElement(
desc_adobe, ns_pdf + 'Producer')
producer.text = 'PyPDF2'
desc_xmp = etree.SubElement(
rdf, ns_rdf + 'Description', nsmap=nsmap_xmp)
desc_xmp.set(ns_rdf + 'about', '')
creator = etree.SubElement(
desc_xmp, ns_xmp + 'CreatorTool')
creator.text = 'factur-x python lib v%s by Alexis de Lattre' % __version__
timestamp = _get_metadata_timestamp()
etree.SubElement(desc_xmp, ns_xmp + 'CreateDate').text = timestamp
etree.SubElement(desc_xmp, ns_xmp + 'ModifyDate').text = timestamp
xmp_file = resource_filename(
__name__, 'xmp/Factur-X_extension_schema.xmp')
# Reason for defining a parser below:
# http://lxml.de/FAQ.html#why-doesn-t-the-pretty-print-option-reformat-my-xml-output
parser = etree.XMLParser(remove_blank_text=True)
facturx_ext_schema_root = etree.parse(open(xmp_file), parser)
# The Factur-X extension schema must be embedded into each PDF document
facturx_ext_schema_desc_xpath = facturx_ext_schema_root.xpath(
'//rdf:Description', namespaces=nsmap_rdf)
rdf.append(facturx_ext_schema_desc_xpath[1])
# Now is the Factur-X description tag
facturx_desc = etree.SubElement(
rdf, ns_rdf + 'Description', nsmap=nsmap_fx)
facturx_desc.set(ns_rdf + 'about', '')
fx_doc_type = etree.SubElement(
facturx_desc, ns_fx + 'DocumentType', nsmap=nsmap_fx)
fx_doc_type.text = 'INVOICE'
fx_doc_filename = etree.SubElement(
facturx_desc, ns_fx + 'DocumentFileName', nsmap=nsmap_fx)
fx_doc_filename.text = FACTURX_FILENAME
fx_doc_version = etree.SubElement(
facturx_desc, ns_fx + 'Version', nsmap=nsmap_fx)
fx_doc_version.text = '1.0'
fx_conformance_level = etree.SubElement(
facturx_desc, ns_fx + 'ConformanceLevel', nsmap=nsmap_fx)
fx_conformance_level.text = FACTURX_LEVEL2xmp[facturx_level]
# TODO: should be UTF-16be ??
xml_str = etree.tostring(
root, pretty_print=True, encoding="UTF-8", xml_declaration=False)
head = u'<?xpacket begin="\ufeff" id="W5M0MpCehiHzreSzNTczkc9d"?>'.encode(
'utf-8')
tail = u'<?xpacket end="w"?>'.encode('utf-8')
xml_final_str = head + xml_str + tail
logger.debug('metadata XML:')
logger.debug(xml_final_str)
return xml_final_str
# def createByteObject(string):
# string_to_encode = u'\ufeff' + string
# x = string_to_encode.encode('utf-16be')
# return ByteStringObject(x)
def _filespec_additional_attachments(
pdf_filestream, name_arrayobj_cdict, file_dict, file_bin):
filename = file_dict['filename']
logger.debug('_filespec_additional_attachments filename=%s', filename)
mod_date_pdf = _get_pdf_timestamp(file_dict['mod_date'])
md5sum = hashlib.md5(file_bin).hexdigest()
md5sum_obj = createStringObject(md5sum)
params_dict = DictionaryObject({
NameObject('/CheckSum'): md5sum_obj,
NameObject('/ModDate'): createStringObject(mod_date_pdf),
NameObject('/Size'): NameObject(str(len(file_bin))),
})
file_entry = DecodedStreamObject()
file_entry.setData(file_bin)
file_mimetype = mimetypes.guess_type(filename)[0]
if not file_mimetype:
file_mimetype = 'application/octet-stream'
file_mimetype_insert = '/' + file_mimetype.replace('/', '#2f')
file_entry.update({
NameObject("/Type"): NameObject("/EmbeddedFile"),
NameObject("/Params"): params_dict,
NameObject("/Subtype"): NameObject(file_mimetype_insert),
})
file_entry_obj = pdf_filestream._addObject(file_entry)
ef_dict = DictionaryObject({
NameObject("/F"): file_entry_obj,
})
fname_obj = createStringObject(filename)
filespec_dict = DictionaryObject({
NameObject("/AFRelationship"): NameObject("/Unspecified"),
NameObject("/Desc"): createStringObject(file_dict.get('desc', '')),
NameObject("/Type"): NameObject("/Filespec"),
NameObject("/F"): fname_obj,
NameObject("/EF"): ef_dict,
NameObject("/UF"): fname_obj,
})
filespec_obj = pdf_filestream._addObject(filespec_dict)
name_arrayobj_cdict[fname_obj] = filespec_obj
def _facturx_update_metadata_add_attachment(
pdf_filestream, facturx_xml_str, pdf_metadata, facturx_level,
output_intents=[], additional_attachments={}):
'''This method is inspired from the code of the addAttachment()
method of the PyPDF2 lib'''
# The entry for the file
md5sum = hashlib.md5(facturx_xml_str).hexdigest()
md5sum_obj = createStringObject(md5sum)
params_dict = DictionaryObject({
NameObject('/CheckSum'): md5sum_obj,
NameObject('/ModDate'): createStringObject(_get_pdf_timestamp()),
NameObject('/Size'): NameObject(str(len(facturx_xml_str))),
})
file_entry = DecodedStreamObject()
file_entry.setData(facturx_xml_str) # here we integrate the file itself
file_entry.update({
NameObject("/Type"): NameObject("/EmbeddedFile"),
NameObject("/Params"): params_dict,
# 2F is '/' in hexadecimal
NameObject("/Subtype"): NameObject("/text#2Fxml"),
})
file_entry_obj = pdf_filestream._addObject(file_entry)
# The Filespec entry
ef_dict = DictionaryObject({
NameObject("/F"): file_entry_obj,
NameObject('/UF'): file_entry_obj,
})
fname_obj = createStringObject(FACTURX_FILENAME)
filespec_dict = DictionaryObject({
NameObject("/AFRelationship"): NameObject("/Data"),
NameObject("/Desc"): createStringObject("Factur-X Invoice"),
NameObject("/Type"): NameObject("/Filespec"),
NameObject("/F"): fname_obj,
NameObject("/EF"): ef_dict,
NameObject("/UF"): fname_obj,
})
filespec_obj = pdf_filestream._addObject(filespec_dict)
name_arrayobj_cdict = {fname_obj: filespec_obj}
for attach_bin, attach_dict in additional_attachments.items():
_filespec_additional_attachments(
pdf_filestream, name_arrayobj_cdict, attach_dict, attach_bin)
logger.debug('name_arrayobj_cdict=%s', name_arrayobj_cdict)
name_arrayobj_content_sort = list(
sorted(name_arrayobj_cdict.items(), key=lambda x: x[0]))
logger.debug('name_arrayobj_content_sort=%s', name_arrayobj_content_sort)
name_arrayobj_content_final = []
af_list = []
for (fname_obj, filespec_obj) in name_arrayobj_content_sort:
name_arrayobj_content_final += [fname_obj, filespec_obj]
af_list.append(filespec_obj)
embedded_files_names_dict = DictionaryObject({
NameObject("/Names"): ArrayObject(name_arrayobj_content_final),
})
# Then create the entry for the root, as it needs a
# reference to the Filespec
embedded_files_dict = DictionaryObject({
NameObject("/EmbeddedFiles"): embedded_files_names_dict,
})
res_output_intents = []
logger.debug('output_intents=%s', output_intents)
for output_intent_dict, dest_output_profile_dict in output_intents:
dest_output_profile_obj = pdf_filestream._addObject(
dest_output_profile_dict)
# TODO detect if there are no other objects in output_intent_dest_obj
# than /DestOutputProfile
output_intent_dict.update({
NameObject("/DestOutputProfile"): dest_output_profile_obj,
})
output_intent_obj = pdf_filestream._addObject(output_intent_dict)
res_output_intents.append(output_intent_obj)
# Update the root
metadata_xml_str = _prepare_pdf_metadata_xml(facturx_level, pdf_metadata)
metadata_file_entry = DecodedStreamObject()
metadata_file_entry.setData(metadata_xml_str)
metadata_file_entry.update({
NameObject('/Subtype'): NameObject('/XML'),
NameObject('/Type'): NameObject('/Metadata'),
})
metadata_obj = pdf_filestream._addObject(metadata_file_entry)
af_value_obj = pdf_filestream._addObject(ArrayObject(af_list))
pdf_filestream._root_object.update({
NameObject("/AF"): af_value_obj,
NameObject("/Metadata"): metadata_obj,
NameObject("/Names"): embedded_files_dict,
# show attachments when opening PDF
NameObject("/PageMode"): NameObject("/UseAttachments"),
})
logger.debug('res_output_intents=%s', res_output_intents)
if res_output_intents:
pdf_filestream._root_object.update({
NameObject("/OutputIntents"): ArrayObject(res_output_intents),
})
metadata_txt_dict = _prepare_pdf_metadata_txt(pdf_metadata)
pdf_filestream.addMetadata(metadata_txt_dict)
def _extract_base_info(facturx_xml_etree):
namespaces = facturx_xml_etree.nsmap
date_xpath = facturx_xml_etree.xpath(
'//rsm:ExchangedDocument/ram:IssueDateTime/udt:DateTimeString',
namespaces=namespaces)
date = date_xpath[0].text
date_dt = datetime.strptime(date, '%Y%m%d')
inv_num_xpath = facturx_xml_etree.xpath(
'//rsm:ExchangedDocument/ram:ID', namespaces=namespaces)
inv_num = inv_num_xpath[0].text
seller_xpath = facturx_xml_etree.xpath(
'//ram:ApplicableHeaderTradeAgreement/ram:SellerTradeParty/ram:Name',
namespaces=namespaces)
seller = seller_xpath[0].text
doc_type_xpath = facturx_xml_etree.xpath(
'//rsm:ExchangedDocument/ram:TypeCode', namespaces=namespaces)
doc_type = doc_type_xpath[0].text
base_info = {
'seller': seller,
'number': inv_num,
'date': date_dt,
'doc_type': doc_type,
}
logger.debug('Extraction of base_info: %s', base_info)
return base_info
def _base_info2pdf_metadata(base_info):
if base_info['doc_type'] == '381':
doc_type_name = u'Refund'
else:
doc_type_name = u'Invoice'
date_str = datetime.strftime(base_info['date'], '%Y-%m-%d')
title = '%s: %s %s' % (
base_info['seller'], doc_type_name, base_info['number'])
subject = 'Factur-X %s %s dated %s issued by %s' % (
doc_type_name, base_info['number'], date_str, base_info['seller'])
pdf_metadata = {
'author': base_info['seller'],
'keywords': u'%s, Factur-X' % doc_type_name,
'title': title,
'subject': subject,
}
logger.debug('Converted base_info to pdf_metadata: %s', pdf_metadata)
return pdf_metadata
def get_facturx_level(facturx_xml_etree):
if not isinstance(facturx_xml_etree, type(etree.Element('pouet'))):
raise ValueError('facturx_xml_etree must be an etree.Element() object')
namespaces = facturx_xml_etree.nsmap
doc_id_xpath = facturx_xml_etree.xpath(
"//rsm:ExchangedDocumentContext"
"/ram:GuidelineSpecifiedDocumentContextParameter"
"/ram:ID", namespaces=namespaces)
if not doc_id_xpath:
raise ValueError(
"This XML is not a Factur-X XML because it misses the XML tag "
"ExchangedDocumentContext/"
"GuidelineSpecifiedDocumentContextParameter/ID.")
doc_id = doc_id_xpath[0].text
level = doc_id.split(':')[-1]
if level not in FACTURX_LEVEL2xmp:
level = doc_id.split(':')[-2]
if level not in FACTURX_LEVEL2xmp:
raise ValueError(
"Invalid Factur-X URN: '%s'" % doc_id)
logger.info('Factur-X level is %s (autodetected)', level)
return level
def get_facturx_flavor(facturx_xml_etree):
if not isinstance(facturx_xml_etree, type(etree.Element('pouet'))):
raise ValueError('facturx_xml_etree must be an etree.Element() object')
if facturx_xml_etree.tag.startswith('{urn:un:unece:uncefact:'):
flavor = 'factur-x'
elif facturx_xml_etree.tag.startswith('{urn:ferd:'):
flavor = 'zugferd'
else:
raise Exception(
"Could not detect if the invoice is a Factur-X or ZUGFeRD "
"invoice.")
logger.info('Factur-X flavor is %s (autodetected)', flavor)
return flavor
def _get_original_output_intents(original_pdf):
output_intents = []
try:
pdf_root = original_pdf.trailer['/Root']
ori_output_intents = pdf_root['/OutputIntents']
logger.debug('output_intents_list=%s', ori_output_intents)
for ori_output_intent in ori_output_intents:
ori_output_intent_dict = ori_output_intent.getObject()
logger.debug('ori_output_intents_dict=%s', ori_output_intent_dict)
dest_output_profile_dict =\
ori_output_intent_dict['/DestOutputProfile'].getObject()
output_intents.append(
(ori_output_intent_dict, dest_output_profile_dict))
except:
pass
return output_intents
def generate_facturx_from_binary(
pdf_invoice, facturx_xml, facturx_level='autodetect',
check_xsd=True, pdf_metadata=None):
"""
Generate a Factur-X invoice from a regular PDF invoice and a factur-X XML
file. The method uses a binary as input (the regular PDF invoice) and
returns a binary as output (the Factur-X PDF invoice).
:param pdf_invoice: the regular PDF invoice as binary string
:type pdf_invoice: string
:param facturx_xml: the Factur-X XML
:type facturx_xml: string, file or etree object
:param facturx_level: the level of the Factur-X XML file. Default value
is 'autodetect'. The only advantage to specifiy a particular value instead
of using the autodetection is for a very very small perf improvement.
Possible values: minimum, basicwl, basic, en16931.
:type facturx_level: string
:param check_xsd: if enable, checks the Factur-X XML file against the XSD
(XML Schema Definition). If this step has already been performed
beforehand, you should disable this feature to avoid a double check
and get a small performance improvement.
:type check_xsd: boolean
:param pdf_metadata: Specify the metadata of the generated Factur-X PDF.
If pdf_metadata is None (default value), this lib will generate some
metadata in English by extracting relevant info from the Factur-X XML.
Here is an example for the pdf_metadata argument:
pdf_metadata = {
'author': 'Akretion',
'keywords': 'Factur-X, Invoice',
'title': 'Akretion: Invoice I1242',
'subject':
'Factur-X invoice I1242 dated 2017-08-17 issued by Akretion',
}
If you pass the pdf_metadata argument, you will not use the automatic
generation based on the extraction of the Factur-X XML file, which will
bring a very small perf improvement.
:type pdf_metadata: dict
:return: The Factur-X PDF file as binary string.
:rtype: string
"""
if not isinstance(pdf_invoice, str):
raise ValueError('pdf_invoice argument must be a string')
facturx_pdf_invoice = False
with NamedTemporaryFile(prefix='invoice-facturx-', suffix='.pdf') as f:
f.write(pdf_invoice)
generate_facturx_from_file(
f, facturx_xml, facturx_level=facturx_level,
check_xsd=check_xsd, pdf_metadata=pdf_metadata)
f.seek(0)
facturx_pdf_invoice = f.read()
f.close()
return facturx_pdf_invoice
def generate_facturx_from_file(
pdf_invoice, facturx_xml, facturx_level='autodetect',
check_xsd=True, pdf_metadata=None, output_pdf_file=None,
additional_attachments=None):
"""
Generate a Factur-X invoice from a regular PDF invoice and a factur-X XML
file. The method uses a file as input (regular PDF invoice) and re-writes
the file (Factur-X PDF invoice).
:param pdf_invoice: the regular PDF invoice as file path
(type string) or as file object
:type pdf_invoice: string or file
:param facturx_xml: the Factur-X XML
:type facturx_xml: string, file or etree object
:param facturx_level: the level of the Factur-X XML file. Default value
is 'autodetect'. The only advantage to specifiy a particular value instead
of using the autodetection is for a very very small perf improvement.
Possible values: minimum, basicwl, basic, en16931.
:type facturx_level: string
:param check_xsd: if enable, checks the Factur-X XML file against the XSD
(XML Schema Definition). If this step has already been performed
beforehand, you should disable this feature to avoid a double check
and get a small performance improvement.
:type check_xsd: boolean
:param pdf_metadata: Specify the metadata of the generated Factur-X PDF.
If pdf_metadata is None (default value), this lib will generate some
metadata in English by extracting relevant info from the Factur-X XML.
Here is an example for the pdf_metadata argument:
pdf_metadata = {
'author': 'Akretion',
'keywords': 'Factur-X, Invoice',
'title': 'Akretion: Invoice I1242',
'subject':
'Factur-X invoice I1242 dated 2017-08-17 issued by Akretion',
}
If you pass the pdf_metadata argument, you will not use the automatic
generation based on the extraction of the Factur-X XML file, which will
bring a very small perf improvement.
:type pdf_metadata: dict
:param output_pdf_file: File Path to the output Factur-X PDF file
:type output_pdf_file: string or unicode
:param additional_attachments: Specify the other files that you want to
embed in the PDF file. It is a dict where keys are filepath and value
is the description of the file (as unicode or string).
:type additional_attachments: dict
:return: Returns True. This method re-writes the input PDF invoice file,
unless if the output_pdf_file is provided.
:rtype: bool
"""
start_chrono = datetime.now()
logger.debug('1st arg pdf_invoice type=%s', type(pdf_invoice))
logger.debug('2nd arg facturx_xml type=%s', type(facturx_xml))
logger.debug('optional arg facturx_level=%s', facturx_level)
logger.debug('optional arg check_xsd=%s', check_xsd)
logger.debug('optional arg pdf_metadata=%s', pdf_metadata)
logger.debug(
'optional arg additional_attachments=%s', additional_attachments)
if not pdf_invoice:
raise ValueError('Missing pdf_invoice argument')
if not facturx_xml:
raise ValueError('Missing facturx_xml argument')
if not isinstance(facturx_level, (str, unicode)):
raise ValueError('Wrong facturx_level argument')
if not isinstance(check_xsd, bool):
raise ValueError('check_xsd argument must be a boolean')
if not isinstance(pdf_metadata, (type(None), dict)):
raise ValueError('pdf_metadata argument must be a dict or None')
if not isinstance(pdf_metadata, (dict, type(None))):
raise ValueError('pdf_metadata argument must be a dict or None')
if not isinstance(additional_attachments, (dict, type(None))):
raise ValueError(
'additional_attachments argument must be a dict or None')
if not isinstance(output_pdf_file, (type(None), str, unicode)):
raise ValueError('output_pdf_file argument must be a string or None')
if isinstance(pdf_invoice, (str, unicode)):
file_type = 'path'
else:
file_type = 'file'
xml_root = None
if isinstance(facturx_xml, str):
xml_string = facturx_xml
elif isinstance(facturx_xml, unicode):
xml_string = facturx_xml.encode('utf8')
elif isinstance(facturx_xml, type(etree.Element('pouet'))):
xml_root = facturx_xml
xml_string = etree.tostring(
xml_root, pretty_print=True, encoding='UTF-8',
xml_declaration=True)
elif isinstance(facturx_xml, file):
facturx_xml.seek(0)
xml_string = facturx_xml.read()
facturx_xml.close()
else:
raise TypeError(
"The second argument of the method generate_facturx must be "
"either a string, an etree.Element() object or a file "
"(it is a %s)." % type(facturx_xml))
additional_attachments_read = {}
if additional_attachments:
for attach_filepath, attach_desc in additional_attachments.items():
filename = os.path.basename(attach_filepath)
mod_timestamp = os.path.getmtime(attach_filepath)
mod_dt = datetime.fromtimestamp(mod_timestamp)
with open(attach_filepath, 'r') as fa:
fa.seek(0)
additional_attachments_read[fa.read()] = {
'filename': filename,
'desc': attach_desc,
'mod_date': mod_dt,
}
fa.close()
if pdf_metadata is None:
if xml_root is None:
xml_root = etree.fromstring(xml_string)
base_info = _extract_base_info(xml_root)
pdf_metadata = _base_info2pdf_metadata(base_info)
else:
# clean-up pdf_metadata dict
for key, value in pdf_metadata.iteritems():
if not isinstance(value, (str, unicode)):
pdf_metadata[key] = ''
facturx_level = facturx_level.lower()
if facturx_level not in FACTURX_LEVEL2xsd:
if xml_root is None:
xml_root = etree.fromstring(xml_string)
logger.debug('Factur-X level will be autodetected')
facturx_level = get_facturx_level(xml_root)
if check_xsd:
check_facturx_xsd(
xml_string, flavor='factur-x', facturx_level=facturx_level)
original_pdf = PdfFileReader(pdf_invoice)
# Extract /OutputIntents obj from original invoice
output_intents = _get_original_output_intents(original_pdf)
new_pdf_filestream = PdfFileWriter()
new_pdf_filestream._header = b_("%PDF-1.6")
new_pdf_filestream.appendPagesFromReader(original_pdf)
original_pdf_id = original_pdf.trailer.get('/ID')
logger.debug('original_pdf_id=%s', original_pdf_id)
if original_pdf_id:
new_pdf_filestream._ID = original_pdf_id
# else : generate some ?
_facturx_update_metadata_add_attachment(
new_pdf_filestream, xml_string, pdf_metadata, facturx_level,
output_intents=output_intents,
additional_attachments=additional_attachments_read)
if output_pdf_file:
with open(output_pdf_file, 'wb') as output_f:
new_pdf_filestream.write(output_f)
output_f.close()
else:
if file_type == 'path':
with open(pdf_invoice, 'wb') as f:
new_pdf_filestream.write(f)
f.close()
elif file_type == 'file':
new_pdf_filestream.write(pdf_invoice)
logger.info('%s file added to PDF invoice', FACTURX_FILENAME)
end_chrono = datetime.now()
logger.info(
'Factur-X invoice generated in %s seconds',
(end_chrono - start_chrono).total_seconds())
return True
|
from brownie import network, config, accounts, MockV3Aggregator
from web3 import Web3
FORKED_LOCAL_ENVIRONMENT = ["mainnet-fork", "mainnet-fork-dev"]
LOCAL_BLOCKCHAIN_ENVIRONMENTS = ["development", "ganache-local"]
DECIMALS = 8
STARTING_PRICE = 200000000000
def get_account():
if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS or network.show_active() in FORKED_LOCAL_ENVIRONMENT:
return accounts[0]
else:
return accounts.add(config["wallets"]["from_key"])
def deploy_mocks():
print(f"Active network is {network.show_active()}")
print("Deploying Mocks...")
if len(MockV3Aggregator) <= 0:
mock_aggregator = MockV3Aggregator.deploy(
DECIMALS,
Web3.toWei(STARTING_PRICE, "ether"),
{"from": get_account()}
)
print("Mocks Deployed!")
|
from setuptools import find_packages, setup
with open("README.md", encoding="utf-8", mode="r") as file:
long_desc = file.read()
setup(
name='perspective.py',
packages=find_packages(include=['perspective']),
version='0.3.4',
description='An easy-to-use API wrapper for Perspective API written in Python.',
long_description=long_desc,
author='Yilmaz04',
author_email="ymzymz2007@gmail.com",
license='MIT',
install_requires=['google-api-python-client', 'pycountry', 'httplib2', 'typing', "matplotlib"],
url="https://github.com/Yilmaz4/perspective.py/",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
download_url="https://github.com/Yilmaz4/perspective.py/archive/refs/tags/v0.3.0.tar.gz",
keywords=["perspective-api", "api-wrapper", "python", "api"],
long_description_content_type='text/markdown'
)
|
import numpy as np
import matplotlib.pyplot as plt
import sounddevice
from scipy.io import wavfile as wav
fs, wave = wav.read('piano1.wav')
#fs is sampling frequency
# fs = 20000
length = wave.shape[0] / fs
time = np.linspace(0,length,int(length*fs),endpoint=False)
# freq_signal = 1000
#wave is the sum of sine wave(1Hz) and cosine wave(10 Hz)
# wave = np.sin(np.pi*time)+ np.cos(np.pi*time)
# wave = [np.sin(2 * np.pi * freq_signal * x1) for x1 in time]
wave = np.array(wave)
wave = wave[:,1]
print(wave.shape)
# sounddevice.play(wave, fs)
#wave = np.exp(2j * np.pi * time )
plt.plot(time, wave)
plt.xlim(0, length)
plt.xlabel("time (second)")
plt.title('Original Signal in Time Domain')
plt.show()
# Compute the one-dimensional discrete Fourier Transform.
fft_wave = np.fft.fft(wave)
cur_max = (np.maximum(fft_wave.max(), np.abs(fft_wave.min())))
# print(np.int(cur_max))
fft_wave = fft_wave / cur_max.real
# Compute the Discrete Fourier Transform sample frequencies.
fft_fre = np.fft.fftfreq(n=wave.shape[0], d=1/fs)
plt.subplot(211)
plt.plot(fft_fre, fft_wave.real, label="Real part")
plt.xlim(-20000,20000)
plt.ylim(-1,1)
plt.legend(loc=1)
plt.title("FFT in Frequency Domain")
plt.subplot(212)
plt.plot(fft_fre, fft_wave.imag,label="Imaginary part")
plt.xlim(-20000,20000)
plt.ylim(-600,600)
plt.legend(loc=1)
plt.xlabel("frequency (Hz)")
plt.show()
|
from ToonHood import ToonHood
from panda3d.core import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from toontown.toonbase import ToontownGlobals
from toontown.safezone.TFSafeZoneLoader import TFSafeZoneLoader
from toontown.town.TFTownLoader import TFTownLoader
import SkyUtil
from direct.interval.IntervalGlobal import *
from toontown.toonfest.DayAndNightGlobals import *
class TFHood(ToonHood):
notify = directNotify.newCategory('TFHood')
def __init__(self, parentFSM, doneEvent, dnaStore, hoodId):
ToonHood.__init__(self, parentFSM, doneEvent, dnaStore, hoodId)
self.id = ToontownGlobals.ToonFest
self.townLoaderClass = TFTownLoader
self.safeZoneLoaderClass = TFSafeZoneLoader
self.storageDNAFile = 'phase_6/dna/storage_TF.jazz'
self.skyFile = 'phase_3.5/models/props/TT_sky'
self.spookySkyFile = 'phase_3.5/models/props/HW_2016_Sky'
self.titleColor = (1.0, 0.5, 0.4, 1.0)
self.sunriseTrack = None
self.sunsetTrack = None
self.FIX_TOD_DURATION = 1
return
def load(self):
ToonHood.load(self)
self.parentFSM.getStateNamed('TFHood').addChild(self.fsm)
def enter(self, *args):
ToonHood.enter(self, *args)
base.camLens.setNearFar(ToontownGlobals.SpeedwayCameraNear, ToontownGlobals.SpeedwayCameraFar)
def exit(self):
base.camLens.setNearFar(ToontownGlobals.DefaultCameraNear, ToontownGlobals.DefaultCameraFar)
ToonHood.exit(self)
def skyTrack(self, task):
return SkyUtil.cloudSkyTrack(task)
def setSunrise(self):
self.sunriseTrack = Sequence(render.colorScaleInterval(SUNRISE_TIME, Vec4(0.29, 0.56, 1.0, 1)))
if config.GetBool('toonfest-day-night', False):
self.sunriseTrack.start()
def setMorning(self):
base.cr.playGame.hood.sky.setTransparency(1)
SkyUtil.startCloudSky(self)
self.sky.setTransparency(TransparencyAttrib.MDual, 1)
self.sky.setScale(3)
def setSunset(self):
self.sunsetTrack = Sequence(render.colorScaleInterval(SUNSET_TIME, Vec4(0.22, 0.16, 0.76, 1)))
if config.GetBool('toonfest-day-night', False):
self.sunsetTrack.start()
def setNight(self):
pass
def unload(self):
self.parentFSM.getStateNamed('TFHood').removeChild(self.fsm)
if self.sunriseTrack is not None:
if self.sunriseTrack.isPlaying():
self.sunriseTrack.finish()
self.sunriseTrack = None
del self.sunriseTrack
if self.sunsetTrack is not None:
if self.sunsetTrack.isPlaying():
self.sunsetTrack.finish()
self.sunsetTrack = None
del self.sunsetTrack
Sequence(render.colorScaleInterval(0, Vec4(1, 1, 1, 1))).start()
ToonHood.unload(self)
return
def startSpookySky(self):
if hasattr(self, 'sky') and self.sky:
self.stopSky()
self.sky = loader.loadModel(self.spookySkyFile)
self.sky.setTag('sky', 'Halloween')
self.sky.setScale(5.2)
self.sky.setDepthTest(0)
self.sky.setDepthWrite(0)
self.sky.setColor(0.5, 0.5, 0.5, 1)
self.sky.setBin('background', 100)
self.sky.setFogOff()
self.sky.reparentTo(camera)
self.sky.setTransparency(TransparencyAttrib.MDual, 1)
fadeIn = self.sky.colorScaleInterval(1.5, Vec4(1, 1, 1, 1), startColorScale=Vec4(1, 1, 1, 0.25), blendType='easeInOut')
fadeIn.start()
self.sky.setZ(0.0)
self.sky.setHpr(0.0, 0.0, 0.0)
ce = CompassEffect.make(NodePath(), CompassEffect.PRot | CompassEffect.PZ)
self.sky.node().setEffect(ce)
|
#!/usr/bin/env python
"""
Util script to handle instance creation over Nova using HTTP APIs for
Nova Agent CI tasks.
"""
__author__ = "Shivaling Sannalli, abhishekkr"
import urllib2
import json
import time
import ConfigParser
import logging
import os
def _http_requests_json(url, headers={}, body=None):
""" General function for completing HTTP Requests over JSON. """
print("HTTP Request: %s" % url)
headers["Content-Type"] = "application/json"
headers["Accept"] = "application/json"
request = urllib2.Request(url, body, headers)
response_body = urllib2.urlopen(request)
return json.load(response_body)
def get_auth_token(username, api_key, identity_url):
""" Get HTTP AuthToken for RAX Actions. """
body = """{
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": "%s", "apiKey": "%s"
}
}
}""" % (username, api_key)
data = _http_requests_json(identity_url, body=body)
return data["access"]["token"]["id"]
def wait_server_to_active(url, auth_token, wait=20, timeout=600):
""" Waits for the created server to activate for "wait_time". """
time.sleep(wait)
resp = _http_requests_json(url, {"X-Auth-Token": auth_token})
status = resp["server"]["status"]
print("Server status: %s" % status)
if str(status) == "BUILD" and wait < timeout:
wait += wait/2
return wait_server_to_active(url, auth_token, wait, timeout)
return resp
def create_configfile(IPv4, admin_pass):
""" Dump information of created server to a file. """
config = ConfigParser.RawConfigParser()
config.add_section("credentials")
config.set("credentials", "IPv4", "%s" % IPv4)
config.set("credentials", "username", "root")
config.set("credentials", "adminpass", "%s" % admin_pass)
filename = "fabfile-%s.cfg" % os.getpid()
with open(filename, "wb") as configfile:
config.write(configfile)
print("Configuration for %s have been dumped to file %s" % (IPv4, filename))
return filename
def load_configurations():
image_name = os.getenv("IMAGE_NAME")
if not image_name:
image_name = "CentOS60"
print image_name
config = ConfigParser.RawConfigParser()
nova_agent_configuration = os.getenv("NOVA_AGENT_CONFIGURATION")
if not nova_agent_configuration:
nova_agent_configuration = os.path.join(os.getcwd(), "server_configurations.cfg")
config.read(nova_agent_configuration)
env = os.getenv("ENV_NAME")
if not env:
env = config.get("environment", "name")
return {
"tenant_id": config.get(env, "tenant_id"),
"username": config.get(env, "username"),
"api_key": config.get(env, "api_key"),
"identity_url": config.get(env, "identity_url"),
"cloud_url": config.get(env, "cloud_url"),
"image_id": config.get(env, image_name),
"flavor_id": config.get(env, "flavor_id"),
"server_name": "testagent"+image_name
}
def create_server(initial_wait=120):
""" Create an instance over Nova using HTTP API. """
rax_config = load_configurations()
auth_token = get_auth_token(rax_config["username"], rax_config["api_key"], rax_config["identity_url"])
print("Auth Token: %s" % auth_token)
url = "%s" % rax_config["cloud_url"] + "%s/servers" % rax_config["tenant_id"]
headers = {"X-Auth-Token": auth_token}
body = """{
"server": {
"imageRef": "%(image_id)s",
"flavorRef": "%(flavor_id)s",
"name": "%(server_name)s"
}
}""" % rax_config
resp = _http_requests_json(url, headers, body)
admin_pass = resp["server"]["adminPass"]
server_id = resp["server"]["id"]
server_url = resp["server"]["links"][0]["href"]
print("Server Details\nID:%s\nURL: %s" % (server_url, server_id))
print("Initial wait time to get server status: %s sec" % initial_wait)
time.sleep(initial_wait)
resp = wait_server_to_active(server_url, auth_token)
print(resp)
ipv4 = resp["server"]["accessIPv4"]
print("IPv4 : %s" % ipv4)
return create_configfile(ipv4, admin_pass)
|
from abc import ABCMeta, abstractmethod
from typing import Sequence
from misc.base_types import Controller
from environments.abstract_environments import GroundTruthSupportEnv
from .abstract_models import ForwardModelWithDefaults
from environments import env_from_string
import numpy as np
from misc.rolloutbuffer import RolloutBuffer, Rollout
from misc.seeding import Seeding
class AbstractGroundTruthModel(ForwardModelWithDefaults, metaclass=ABCMeta):
@abstractmethod
def set_state(self, state):
pass
@abstractmethod
def get_state(self, observation):
pass
class GroundTruthModel(AbstractGroundTruthModel):
simulated_env: GroundTruthSupportEnv
def __init__(self, **kwargs):
super().__init__(**kwargs)
if isinstance(self.env, GroundTruthSupportEnv):
self.simulated_env = env_from_string(self.env.name, **self.env.init_kwargs)
self.simulated_env.reset()
self.is_trained = True
else:
raise NotImplementedError("Environment does not support ground truth forward model")
def close(self):
self.simulated_env.close()
def train(self, buffer):
pass
def reset(self, observation):
self.simulated_env.set_state_from_observation(observation)
return self.get_state(observation)
def got_actual_observation_and_env_state(self, *, observation, env_state=None, model_state=None):
if env_state is None:
self.simulated_env.set_state_from_observation(observation)
return self.simulated_env.get_GT_state()
else:
return env_state
def set_state(self, state):
self.simulated_env.set_GT_state(state)
def get_state(self, observation):
return self.simulated_env.get_GT_state()
def predict(self, *, observations, states, actions):
def state_to_use(observation, state):
if state is None:
# This is an inefficiency as we set the state twice (typically not using state=None for GT models)
self.simulated_env.set_state_from_observation(observation)
return self.simulated_env.get_GT_state()
else:
return state
if observations.ndim == 1:
return self.simulated_env.simulate(state_to_use(observations, states), actions)
elif states is None:
states = [None] * len(observations)
next_obs, next_states, rs = zip(*[self.simulated_env.simulate(state_to_use(obs, state), action)
for obs, state, action in zip(observations, states, actions)])
return np.asarray(next_obs), next_states, np.asarray(rs)
def predict_n_steps(self, *, start_observations: np.ndarray, start_states: Sequence,
policy: Controller, horizon) -> (RolloutBuffer, Sequence):
# here we want to step through the envs in the direction of time
if start_observations.ndim != 2:
raise AttributeError(f"call predict_n_steps with a batches (shape: {start_observations.shape})")
if len(start_observations) != len(start_states):
raise AttributeError("number of observations and states have to be the same")
def perform_rollout(start_obs, start_state):
self.simulated_env.set_GT_state(start_state)
obs = start_obs
for h in range(horizon):
action = policy.get_action(obs, None)
next_obs, r, _, _ = self.simulated_env.step(action)
yield obs, next_obs, action, r
obs = next_obs
fields = self.rollout_field_names()
def rollouts_generator():
for obs_state in zip(start_observations, start_states):
trans = perform_rollout(*obs_state)
yield Rollout(field_names=fields, transitions=trans), self.simulated_env.get_GT_state()
rollouts, states = zip(*rollouts_generator())
return RolloutBuffer(rollouts=rollouts), states
def save(self, path):
pass
def load(self, path):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import random
import re
import unicodedata
import codecs
import math
import itertools
from io import open
import torch
import torchvision
# import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch.nn as nn
import csv
from cytoolz import map, filter, take
USE_CUDA = torch.cuda.is_available()
device = torch.device('cuda' if USE_CUDA else 'cpu')
corpus = r'/Users/oneai/ai/data/cmdc/'
def printLines(file, n=10):
"""
打印文件的函数。相当于head命令
:param file: 要读入的文件
:param n: 打印的最大长度
:return:
"""
with open(file, 'rb') as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
def fileFilter(file, n=10):
with open(file, 'rb') as datafile:
lines = datafile.readlines()
return list(take(n, lines))
printLines(os.path.join(corpus, "movie_lines.txt"))
fileFilter(os.path.join(corpus, "movie_lines.txt"))
# Splits each line of the file into a dictionary of fields
def loadLines(fileName, fields):
"""
把文件按照关键字,组成二级字典
:param fileName: 要处理的文件
:param fields: 文件中的字典关键字
:return:
"""
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
value = line.split(" +++$+++")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = value[i]
lines[lineObj['lineID']] = lineObj
return lines
# Groups fields of lines form readLines into conversations based on movie_conversations.txt
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
lineIds = eval(convObj['utteranceIDs'])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations
# Extracts pairs of sentences from conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
# Iterate over all the line of the conversation
for i in range(len(conversation["lines"]) - 1): #
inputLine = conversation["lines"][i]["text"].strip()
targetLine = conversation["lines"][i + 1]["text"].strip()
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
# Define path to new file
datafile = os.path.join(corpus, "formatted_movie_lines.txt")
# Print a sample of lines
print("\nSample lines from file:")
printLines(datafile)
delimiter = '\t'
# Unescape the delimiter
delimiter = str(codecs.decode(delimiter, "unicode_escape"))
# Initialize lines dict, conversations list, and field ids
lines = {}
conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
# Load lines and process conversations
print("\nProcessing corpus...")
lines = loadLines(os.path.join(corpus, 'movie_lines.txt'), MOVIE_LINES_FIELDS)
print("\nLoading conversations...")
conversations = loadConversations(os.path.join(corpus, "movie_conversations.txt"), lines, MOVIE_CONVERSATIONS_FIELDS)
# Write new csv file
print("\nWriting newly formatted file..")
with open(datafile, "w", encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter)
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
import unicodedata
def shave_marks(txt):
"""去掉全部变音符号"""
norm_txt = unicodedata.normalize('NFD', txt) # 把所有字符分解成基字符和组合记号
shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c)) # 过滤掉所有组合记号。
return unicodedata.normalize('NFC', shaved) # 重组所有字符
if __name__ == '__main__':
print("test")
|
"""
Code for recording and logging stack traces of training scripts to help determine
how Python interfaces with native C libraries.
This code was useful for determining how Python calls into PyTorch; PyTorch
has multiple native shared libraries it calls into.
"""
import textwrap
import traceback
import contextlib
from io import StringIO
import typing
# import tensorflow as tf
from rlscope.profiler.rlscope_logging import logger
from rlscope.profiler import wrap_util
# Intercept tf.Session.run(...) calls to see when calls to TensorFlow graph computations are made.
#
# Never called with --python-mode...
def print_indent(ss, indent):
if indent == 0 or indent is None:
return
ss.write(' '*indent)
def with_indent(txt, indent):
if indent == 0 or indent is None:
return txt
return textwrap.indent(txt, prefix=' '*indent)
class LoggedStackTrace:
def __init__(self, name, format_stack):
self.name = name
self.format_stack = format_stack
self.num_calls = 0
self.printed = False
def add_call(self):
self.num_calls += 1
self.printed = False
def print(self, ss, skip_last=0, indent=0):
keep_stack = self.format_stack[:len(self.format_stack)-skip_last]
ss.write(with_indent(''.join(keep_stack), indent))
self.printed = True
class _LoggedStackTraces:
def __init__(self):
# traceback.format_stack() ->
self.stacktraces = dict()
def _key(self, name, format_stack):
return tuple(format_stack)
def log_call(self, name, format_stack):
key = self._key(name, format_stack)
stacktrace = self.stacktraces.get(key, None)
if stacktrace is None:
stacktrace = LoggedStackTrace(name, format_stack)
self.stacktraces[key] = stacktrace
stacktrace.add_call()
def num_to_print(self):
n = 0
for st in self.stacktraces.values():
if not st.printed:
n += 1
return n
def print(self, ss, skip_last=0, indent=0):
# Only print stacktraces for functions that have been called since we last printed.
stacktraces = [st for st in self.stacktraces.values() if not st.printed]
# Sort by number of calls
stacktraces.sort(key=lambda st: (st.num_calls, st.name))
print_indent(ss, indent)
ss.write("Stacktraces ordered by number of calls (since last printed)\n")
for i, st in enumerate(stacktraces):
print_indent(ss, indent+1)
ss.write("Stacktrace[{i}] num_calls={num_calls}: {name}\n".format(
i=i,
num_calls=st.num_calls,
name=st.name,
))
st.print(ss, indent=indent+2, skip_last=skip_last)
def wrap_module(self, module, should_wrap=None):
wrap_util.wrap_module(LoggedCall, module, should_wrap=should_wrap)
def unwrap_module(self, module):
wrap_util.unwrap_module(LoggedCall, module)
def wrap_func(self, module, name, should_wrap=None):
wrap_util.wrap_func(LoggedCall, module, name, should_wrap=should_wrap)
def unwrap_func(self, module, name):
wrap_util.unwrap_func(LoggedCall, module, name)
def log_call(func, name, *args, **kwargs):
if LoggedStackTraces is not None:
stack = traceback.format_stack()
LoggedStackTraces.log_call(name, stack)
return func(*args, **kwargs)
class LoggedCall:
def __init__(self, func, name=None):
self.func = func
if name is None:
name = self.func.__name__
self.name = name
# -> typing.Any:
def __call__(self, *args, **kwargs):
if LoggedStackTraces is not None:
stack = traceback.format_stack()
LoggedStackTraces.log_call(self.name, stack)
ret = self.func(*args, **kwargs)
return ret
LoggedStackTraces = _LoggedStackTraces()
# LoggedStackTraces = None
# def setup_logging_stack_traces(FLAGS):
# global LoggedStackTraces
# WRAP_TF_SESSION_RUN = FLAGS.log_stacktrace_freq is not None
# if WRAP_TF_SESSION_RUN:
# LoggedStackTraces = _LoggedStackTraces()
#
# original_tf_Session_run = tf.compat.v1.Session.run
# def wrapped_tf_Session_run(self, fetches, feed_dict=None, options=None, run_metadata=None):
# return log_call(original_tf_Session_run, "tf.Session.run", self, fetches, feed_dict=feed_dict, options=options, run_metadata=run_metadata)
# tf.compat.v1.Session.run = wrapped_tf_Session_run
#
# from tensorflow.python import pywrap_tfe
#
# original_pywrap_tfe_TFE_Py_Execute = pywrap_tfe.TFE_Py_Execute
# def wrapped_pywrap_tfe_TFE_Py_Execute(*args, **kwargs):
# return log_call(original_pywrap_tfe_TFE_Py_Execute, "TFE_Py_Execute", *args, **kwargs)
# pywrap_tfe.TFE_Py_Execute = wrapped_pywrap_tfe_TFE_Py_Execute
#
# original_pywrap_tfe_TFE_Py_FastPathExecute = pywrap_tfe.TFE_Py_FastPathExecute
# def wrapped_pywrap_tfe_TFE_Py_FastPathExecute(*args, **kwargs):
# return log_call(original_pywrap_tfe_TFE_Py_FastPathExecute, "TFE_Py_FastPathExecute", *args, **kwargs)
# pywrap_tfe.TFE_Py_FastPathExecute = wrapped_pywrap_tfe_TFE_Py_FastPathExecute
@contextlib.contextmanager
def with_log_stacktraces():
"""Context manager for soft device placement, allowing summaries on CPU.
Eager and graph contexts have different default device placements. See
b/148408921 for details. This context manager should be used whenever using
summary writers contexts to make sure summaries work when executing on TPUs.
Yields:
Sets `tf.config.set_soft_device_placement(True)` within the context
"""
try:
yield
finally:
log_stacktraces()
def log_stacktraces():
if LoggedStackTraces is not None and LoggedStackTraces.num_to_print() > 0:
ss = StringIO()
# stack[-1] = Call to "traceback.format_stack()"
# stack[-2] = Call to "return log_call(...)"
# LoggedStackTraces.print(ss, skip_last=2, indent=0)
LoggedStackTraces.print(ss, skip_last=1, indent=0)
logger.info(ss.getvalue().rstrip())
|
from telegram import ReplyKeyboardMarkup, InlineKeyboardButton, InlineKeyboardMarkup
import os
def hr_keyboard():
return ReplyKeyboardMarkup(
[
['Просмотр резюме', 'Просмотр пользователей'],
['Создать ключ кандидата и сотрудника'],
['Вернуться в главное меню']
])
def show_cv(update, context):
files = os.listdir('downloads')
for fl in files:
update.message.reply_document(document=open('downloads/'+fl,'rb'))
update.message.reply_text('', reply_markup=hr_keyboard())
def roles_inline_keyboard():
keyboard = [
[InlineKeyboardButton("Внешний пользователь", callback_data='r1')],
[InlineKeyboardButton("Кандидат", callback_data='r2')],
[InlineKeyboardButton("Сотрудник", callback_data='r3')],
]
return InlineKeyboardMarkup(keyboard)
def show_user(update, context):
update.message.reply_text('Выберите роль', reply_markup=roles_inline_keyboard())
|
import os, sys
import gc
import pandas as pd
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
from sklearn.model_selection import KFold, cross_val_score, GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
import dill
import requests
import lightgbm as lgb
import traceback
from joblib import Parallel, delayed
import re
from utils import *
X_train, X_test, _ = read_train_test_data()
X = pd.concat([X_train, X_test])
del X_train, X_test; gc.collect()
train_columns = list(X.columns)
X_nonnan = X.dropna(axis=1)
X_nonnan = X_nonnan.replace(np.inf, 9999.999)
X_nonnan = X_nonnan.replace(-np.inf, -9999.999)
not_nan_columns = list(X_nonnan.columns)
nan_columns = [col for col in train_columns if col not in not_nan_columns]
print("Not Nan Columns: {} columns.".format(len(not_nan_columns)))
print(not_nan_columns)
print("Nan Columns: {} columns.".format(len(nan_columns)))
print(nan_columns)
print("PCA...")
from sklearn.decomposition import PCA
pca = PCA(n_components=len(not_nan_columns)-1, whiten=True)
X_nonnan_pca = pca.fit_transform(X_nonnan)
print("Explained Variance Ratio...")
print(pca.explained_variance_ratio_)
print("Explained Variance Ratio Cumulative sum...")
print(np.cumsum(pca.explained_variance_ratio_))
print("Correlation")
X_corr = X_nonnan.corr()
to_parquet(X_corr, "../tmp/X_corr.parquet")
|
#!/usr/bin/env python3
import json
import time
import subprocess
import telegram_send
MAC_MONITORING_PATH = '/etc/hostapd/mac-monitoring.json'
STA = 'wlan0'
def get_connections(sta, monitoring):
""" Returns MACs is sta that are being monitored """
list_sta = subprocess.check_output(
["sudo", "hostapd_cli", "-i"+sta, "list_sta"]).decode('utf-8').splitlines()
return [line for line in list_sta if line in monitoring]
def send(connections, disconnections):
msg = f"<b>CONNECTED</b>\n{', '.join(connections)}\n\n<b>DISSCONECTED</b>\n{', '.join(disconnections)}"
telegram_send.send(messages=[msg], parse_mode='html')
if __name__ == '__main__':
with open(MAC_MONITORING_PATH, 'r') as file:
monitoring = json.load(file)
prev_connected = get_connections(STA, monitoring)
while True:
connected = get_connections(STA, monitoring)
# List new connections
connections = [mac for mac in connected if mac not in prev_connected]
# List new disconnections
disconnections = [
p_mac for p_mac in prev_connected if p_mac not in connected]
prev_connected = connected # Save connectins for next iteration
if connections != [] or disconnections != []:
send(connections, disconnections)
time.sleep(5)
|
class Solution:
def nextGreaterElements(self, nums: List[int]) -> List[int]:
res = [-1] * len(nums)
s = []
n = len(nums)
for i in range(2 * len(nums) - 1, -1, -1):
while len(s) != 0 and s[-1] <= nums[i % n]:
s.pop()
res[i % n] = -1 if len(s) == 0 else s[-1]
s.append(nums[i % n])
return res
|
from imread_from_url.imread_from_url import *
__version__ = "0.1.0"
|
#
# Copyright 2020 The FLEX Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flex.crypto.fpe.api import generate_fpe_encryptor
import numpy as np
def test_encryptor_decryptor():
key = [np.random.bytes(16), np.random.bytes(32)]
n = [7, 20]
t = [b'', b'123']
for i in range(2):
encryptor = generate_fpe_encryptor(key[i], n[i], t[i], method='ff1', encrypt_algo='aes')
x = np.linspace(0, 2 ** n[i] - 1, 2 ** n[i]).astype(int)
y = encryptor.encrypt(x)
z = encryptor.decrypt(y)
assert np.all(x == np.unique(y))
assert np.all(x == z)
|
ix.enable_command_history()
ix.application.select_next_inputs(False)
ix.disable_command_history()
|
from datetime import datetime, date, time
from typing import List
from uuid import uuid4
from PIL.Image import Image
from qrcode import QRCode
from qrcode.constants import ERROR_CORRECT_H
from rawa.commands.exceptions import CommandError
from rawa.models import db, Station, Token, UsedToken, User
def find_token(token_id: int) -> Token:
# todo - token_id should be a UUID, not integer (security)
return Token.query.filter_by(id=token_id).first()
def _generate_token(station: Station, timestamp: datetime) -> Token:
token = Token(
station=station,
timestamp=timestamp,
value=uuid4().hex,
)
db.session.add(token)
return token
def generate_tokens_for_one_day(station: Station, day: date) -> List[Token]:
hours = range(24)
tokens = [
_generate_token(
station=station,
timestamp=datetime.combine(day, time(hour))
)
for hour in hours
]
db.session.commit()
return tokens
def _compute_score_from_token(token: Token) -> int:
return 100
def use_token(user: User, token_value: str) -> UsedToken:
token = Token.query.filter_by(value=token_value).first()
if not token:
raise CommandError({'token': 'token jest niepoprawny'})
used_token_before = UsedToken.query.filter_by(user=user, token=token).first()
if used_token_before:
raise CommandError({'token': 'token został już zużyty'})
used_token = UsedToken(
user=user,
token=token,
timestamp=datetime.now(),
score=_compute_score_from_token(token)
)
db.session.add(used_token)
db.session.commit()
return used_token
def generate_qr_code(token: Token, prefix: str = '') -> Image:
qrcode = QRCode(error_correction=ERROR_CORRECT_H)
qrcode.add_data(prefix + token.value)
qrcode.make()
return qrcode.make_image()
|
"""
Tests for Admin Template Tags
"""
from django.contrib.auth.models import User
from django.template import Template, Context
from django.test import TestCase, override_settings
class TemplateTagAdminTestCase(TestCase):
"""
Test Template Tags
"""
# |-------------------------------------------------------------------------
# | Test index_app_display
# |-------------------------------------------------------------------------
@override_settings(ADMINLTE2_ADMIN_INDEX_USE_APP_LIST=False)
def test_index_app_display_shows_box_when_setting_is_false(self):
"""Test index app display shows box when setting is false"""
app = {
'app_url': '/foo',
'app_name': 'foo',
'name': 'foo',
'app_label': 'foo',
'models': [],
}
context = Context({'app': app})
template_to_render = Template(
"{% load admin.admin_index %}"
"{% index_app_display %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<div class="box-body">',
rendered_template
)
self.assertNotIn(
'<table class="table table-hover table-striped">',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_INDEX_USE_APP_LIST=True)
def test_index_app_display_shows_list_when_setting_is_true(self):
"""Test index app display shows list when setting is true"""
app = {
'app_url': '/foo',
'app_name': 'foo',
'name': 'foo',
'app_label': 'foo',
'models': [],
}
context = Context({'app': app})
template_to_render = Template(
"{% load admin.admin_index %}"
"{% index_app_display %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<table class="table table-hover table-striped">',
rendered_template
)
self.assertNotIn(
'<div class="box-body">',
rendered_template
)
# |-------------------------------------------------------------------------
# | Test show_control_sidebar_button
# |-------------------------------------------------------------------------
def test_show_control_sidebar_button_shows_up_when_settings_are_left_as_default_tabs_to_show(self):
"""Test show control sidebar button shows up when settings are left as
default tabs to show"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_header %}"
"{% show_control_sidebar_button %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<a href="#" data-toggle="control-sidebar"><i class="fa fa-gears"></i></a>',
rendered_template
)
@override_settings(
ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={
'SHOW_SETTINGS_TAB': True,
'SHOW_EXTRA_TABS': True
}
)
def test_show_control_sidebar_button_shows_up_when_settings_defines_all_tabs_to_show(self):
"""Test show control sidebar button shows up when settings defines all
tabs to show"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_header %}"
"{% show_control_sidebar_button %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<a href="#" data-toggle="control-sidebar"><i class="fa fa-gears"></i></a>',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_RECENT_ACTIVITY_TAB': False})
def test_show_control_sidebar_button_is_missing_when_settings_hides_all_tabs_to_show(self):
"""Test show control sidebar button is missing when settings hides all tabs to show"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_header %}"
"{% show_control_sidebar_button %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'<a href="#" data-toggle="control-sidebar"><i class="fa fa-gears"></i></a>',
rendered_template
)
# |-------------------------------------------------------------------------
# | Test show_control_sidebar_recent_activity_tab_pane
# |-------------------------------------------------------------------------
def test_show_control_sidebar_recent_activity_tab_pane_displays_when_setting_is_default(self):
"""Test show control sidebar recent activity tab pane displays when
setting is default"""
user = User()
context = Context({'user': user})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_recent_activity_tab_pane %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'None available',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_RECENT_ACTIVITY_TAB': True})
def test_show_control_sidebar_recent_activity_tab_pane_displays_when_setting_is_true(self):
"""Test show control sidebar recent activity tab pane displays when
setting is true"""
user = User()
context = Context({'user': user})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_recent_activity_tab_pane %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'None available',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_RECENT_ACTIVITY_TAB': False})
def test_show_control_sidebar_recent_activity_tab_pane_is_hidden_when_setting_is_false(self):
"""Tests show control sidebar recent activity tab pane is hidden when
setting is false"""
user = User()
context = Context({'user': user})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_recent_activity_tab_pane %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'None available',
rendered_template
)
# |-------------------------------------------------------------------------
# | Test show_control_sidebar_settings_tab_pane
# |-------------------------------------------------------------------------
def test_show_control_sidebar_settings_tab_pane_is_hidden_when_setting_is_default(self):
"""Test show control sidebar settings tab pane is hidden when setting
is default"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_settings_tab_pane %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'<h3 class="control-sidebar-heading">General Settings</h3>',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_SETTINGS_TAB': True})
def test_show_control_sidebar_settings_tab_pane_displays_when_setting_is_true(self):
"""Test show control sidebar settings tab pane displays when setting
is true"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_settings_tab_pane %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<h3 class="control-sidebar-heading">General Settings</h3>',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_SETTINGS_TAB': False})
def test_show_control_sidebar_settings_tab_pane_is_hidden_when_setting_is_false(self):
"""Test show control sidebar settings tab pane is hidden when setting
is false"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_settings_tab_pane %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'<h3 class="control-sidebar-heading">General Settings</h3>',
rendered_template
)
# |-------------------------------------------------------------------------
# | Test show_control_sidebar_extra_tab_panes
# |-------------------------------------------------------------------------
def test_show_control_sidebar_extra_tab_panes_is_hidden_when_setting_is_default(self):
"""Test show control sidebar extra tab panes is hidden when setting
is default"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_extra_tab_panes %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'<h3 class="control-sidebar-heading">Extra Tab</h3>',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_EXTRA_TABS': True})
def test_show_control_sidebar_extra_tab_panes_displays_when_setting_is_true(self):
"""Test show control sidebar extra tab panes displays when setting is true"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_extra_tab_panes %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<h3 class="control-sidebar-heading">Extra Tab</h3>',
rendered_template
)
@override_settings(ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={'SHOW_EXTRA_TABS': False})
def test_show_control_sidebar_extra_tab_panes_is_hidden_when_setting_is_false(self):
"""Test show control sidebar extra tab panes is hidden when setting is false"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_extra_tab_panes %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'<h3 class="control-sidebar-heading">Extra Tab</h3>',
rendered_template
)
# |-------------------------------------------------------------------------
# | Test show_control_sidebar_tabs
# |-------------------------------------------------------------------------
@override_settings(
ADMINLTE2_ADMIN_CONTROL_SIDEBAR_TABS={
'SHOW_SETTINGS_TAB': True,
'SHOW_EXTRA_TABS': True
}
)
def test_show_control_sidebar_tabs_displays_the_enabled_tabs_as_tabs_when_there_is_more_than_one(self):
"""Test show control sidebar tabs displays the enabled tabs as tabs when
there is more than one"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_tabs %}"
)
rendered_template = template_to_render.render(context)
self.assertIn(
'<ul class="nav nav-tabs nav-justified control-sidebar-tabs">',
rendered_template
)
def test_show_control_sidebar_tabs_displays_the_enabled_tab_by_itself_and_not_as_a_tab_when_there_is_only_one(self):
"""Test show control sidebar tabs displays the enabled tab by itself and
not as a tab when there is only one"""
context = Context({})
template_to_render = Template(
"{% load admin.admin_control_sidebar %}"
"{% show_control_sidebar_tabs %}"
)
rendered_template = template_to_render.render(context)
self.assertNotIn(
'<ul class="nav nav-tabs nav-justified control-sidebar-tabs">',
rendered_template
)
|
# Problem description: Convert a number into a linked list of its digits.
# Solution time complexity: O(n)
# Comments: Notice that we prepend each subsequent digit node to the destination list's head.
# Otherwise the result will be in reverse, i.e. f(123) => 3->2->1.
#
import LinkedList
# Linked List Node inside the LinkedList module is defined as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def ConvertPositiveNumToLinkedList(val: int) -> LinkedList.Node:
node = None
while True:
dig = val % 10
val //= 10
prev = LinkedList.Node(dig, node)
node = prev
if val == 0:
break
return node
|
import tempfile
import nox
nox.options.sessions = "lint", "tests"
locations = "microservice_demo", "tests", "noxfile.py"
def install_with_constraints(session, *args, **kwargs):
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
f"--output={requirements.name}",
external=True,
)
session.install(f"--constraint={requirements.name}", *args, **kwargs)
@nox.session(python=["3.8"])
def lint(session):
args = session.posargs or locations
install_with_constraints(session, "flake8")
session.run("flake8", *args)
@nox.session(python="3.8")
def black(session):
args = session.posargs or locations
install_with_constraints(session, "black")
session.run("black", *args)
@nox.session(python="3.8")
@nox.session(python=["3.8"])
def tests(session):
args = session.posargs
session.run("poetry", "install", "--no-dev", external=True)
install_with_constraints(session, "pytest")
session.run("pytest", *args)
|
import matplotlib.pyplot as plt
from pydicom import dcmread
import pandas as pd
import numpy as np
import os
import csv
remove_tags = [
'PatientBirthDate',
'PatientID',
'PatientName',
'PatientSex', ]
save_info = {
# 'ImplementationVersionName': [0x0002,0x0013], #'OFFIS_DCMTK_360'
'ImageType': [0x0008, 0x0008], # ['ORIGINAL', 'PRIMARY', 'COLOR']
'SOPClassUID': [0x0008, 0x0016], # VL Photographic Image Storage
'StudyTime': [0x0008, 0x0030], # 093540
'ContentTime': [0x0008, 0x0033], # 'Proofsheet'
'Modality': [0x0008, 0x0060], # 'OP' or 'XC'?
'ConversionType': [0x0008, 0x0064], # 'SI'
'Manufacturer': [0x0008, 0x0070], # Zeiss
'StationName': [0x0008, 0x1010], # VISU-CAPTURE1
'SeriesDescription': [0x0008, 0x103e], # 'Single FA 5:14.10 55¡ã ART'
'PatientOrientation': [0x0020, 0x0020], #
'Laterality': [0x0020, 0x0060], # L or R?
'ImageLaterality': [0x0020, 0x0062], # L or R?
'PhotometricInterpretation': [0x0028, 0x0004], # RGB?
'ManufacturersModelName': [0x0008, 0x1090], # "FF450 Plus"
}
def dcm_deid(ds):
for tag in remove_tags:
if tag in ds:
ds[tag].value = ''
ds.PatientIdentityRemoved = 'YES'
def dcm_info(ds):
rt = {}
for k, v in save_info.items():
if v in ds:
rt[k] = ds[v].repval
return rt
def main(output_folder):
freport = '/media/hdd/data/imcaption/FAsample/sample.csv'
folder_path = '/media/hdd/data/imcaption/FAsample' # the folder storing csv
reportlist = pd.read_csv(freport, encoding='gb18030')
table = []
for index, row in reportlist.iterrows():
id = row['id']
# print(id)
finding = row['Findings']
# print('finding', finding)
impression = row['Impression']
# print('impression', impression)
id_folder_path = os.path.join(folder_path, id)
folder = os.walk(id_folder_path)
print(id_folder_path)
id_folder_path_deid = id_folder_path + '_deid'
print(id_folder_path_deid)
if not os.path.exists(id_folder_path_deid):
os.makedirs(id_folder_path_deid)
for path, dir_list, file_list in folder:
# print(path, dir_list, file_list)
save_to_path = os.path.join(output_folder, path)
if not os.path.exists(save_to_path):
os.makedirs(save_to_path)
for idx, dcm_file in enumerate(file_list):
dcm_filepath = os.path.join(path, dcm_file)
ds = dcmread(dcm_filepath)
before_deientify = dcm_info(ds)
# print(before_deientify)
dcm_deid(ds)
dcm_dict = dcm_info(ds)
dcm_dict['studyid'] = id
dcm_dict['imgid'] = dcm_file.replace('.dcm', '')
dcm_dict['filepath'] = dcm_filepath
table.append(dcm_dict)
# print(dcm_dict)
# save_to_path = os.path.join(output_folder,path)
# ds.save_as(save_to_file)
dicom_save_path = id_folder_path_deid + os.sep + dcm_file
ds.save_as(dicom_save_path)
if index == 500:
break
print(ds)
csv_file = "logs/infos.csv"
with open(csv_file, 'w', newline='', encoding='utf-8') as csvfile:
cols = list(save_info.keys())
cols.extend(['studyid', 'imgid', 'filepath'])
writer = csv.DictWriter(csvfile, fieldnames=cols)
writer.writeheader()
writer.writerows(table)
if __name__ == "__main__":
output_folder = "output"
main(output_folder)
print("Finished")
|
"""
Author: David O'Callaghan
"""
import numpy as np
import solver_utils
def solve(grid_in):
"""
This function contains the hand-coded solution for the data in
2dc579da.json of the Abstraction and Reasoning Corpus (ARC)
Transformation Description: The center row and center column of the input
grid are a different colour to the rest of the grid, effectively dividing
the grid into 4 quadrants. One of the 4 quadrants contains an element with
a different colour to every other element. The transformation is to select
this quadrant as the output grid.
Inputs: grid_in - A python list of lists containing the unsolved grid data
Returns: grid_out - A python list of lists containing the solved grid data
"""
# Convert to numpy array
grid_in_np = np.array(grid_in)
# Find the center index
midpoint = grid_in_np.shape[0] // 2
# Source : https://stackoverflow.com/questions/6252280/find-the-most-frequent-number-in-a-numpy-vector
# [Accessed: 14/11/2019]
(values,counts) = np.unique(grid_in_np, return_counts=True)
ind = np.argmin(counts)
minority_colour = values[ind]
squares = [
grid_in_np[0:midpoint,0:midpoint], # Top-left
grid_in_np[midpoint+1:,0:midpoint], # Bottom-left
grid_in_np[0:midpoint,midpoint+1:], # Top-right
grid_in_np[midpoint+1:,midpoint+1:] # Bottom-right
]
for square in squares:
if minority_colour in square:
grid_out_np = square
break
# Convert back to list of lists
grid_out = grid_out_np.tolist()
return grid_out
if __name__=='__main__':
# Get the data for the associated JSON file
data = solver_utils.parse_json_file()
# Iterate through training grids and test grids
for data_train in data['train']:
solver_utils.solve_wrapper(data_train['input'], solve)
for data_test in data['test']:
solver_utils.solve_wrapper(data_test['input'], solve)
|
#! /usr/bin/env python
"""
This script can be used to produce a standalone executable from
arbitrary Python code. You supply the name of the starting Python
file to import, and this script attempts to generate an executable
that will produce the same results as "python startfile.py".
This script is actually a wrapper around Panda's FreezeTool.py, which
is itself a tool to use Python's built-in "freeze" utility to compile
Python code into a standalone executable. It also uses Python's
built-in modulefinder module, which it uses to find all of the modules
imported directly or indirectly by the original startfile.py.
Usage::
pfreeze.py [opts] [startfile]
Options::
-o output
Specifies the name of the resulting executable file to produce.
If this ends in ".mf", a multifile is written instead of a frozen
binary. If it ends in ".dll", ".pyd", or ".so", a shared library
is written.
-x module[,module...]
Specifies a comma-separated list of Python modules to exclude from
the resulting file, even if they appear to be referenced. You
may also repeat the -x command for each module.
-i module[,module...]
Specifies a comma-separated list of Python modules to include in
the resulting file, even if they do not appear to be referenced.
You may also repeat the -i command for each module.
-p module[,module...]
Specifies a list of Python modules that do run-time manipulation
of the __path__ variable, and thus must be actually imported to
determine the true value of __path__.
-P path
Specifies an additional directory in which we should search for
Python modules. This is equivalent to setting the PYTHONPATH
environment variable. May be repeated.
-s
Adds the standard set of modules that are necessary for embedding
the Python interpreter. Implicitly set if an executable is
generated.
-k
Keeps temporary files generated by pfreeze. Useful when debugging
FreezeTool itself.
"""
import getopt
import sys
import os
from direct.showutil import FreezeTool
def usage(code, msg = ''):
if __doc__:
sys.stderr.write(__doc__ + '\n')
sys.stderr.write(str(msg) + '\n')
sys.exit(code)
def main(args=None):
if args is None:
args = sys.argv[1:]
freezer = FreezeTool.Freezer()
basename = None
addStartupModules = False
try:
opts, args = getopt.getopt(args, 'o:i:x:p:P:slkh')
except getopt.error as msg:
usage(1, msg)
for opt, arg in opts:
if opt == '-o':
basename = arg
elif opt == '-i':
for module in arg.split(','):
freezer.addModule(module)
elif opt == '-x':
for module in arg.split(','):
freezer.excludeModule(module)
elif opt == '-p':
for module in arg.split(','):
freezer.handleCustomPath(module)
elif opt == '-P':
sys.path.append(arg)
elif opt == '-s':
addStartupModules = True
elif opt == '-l':
freezer.linkExtensionModules = True
elif opt == '-k':
freezer.keepTemporaryFiles = True
elif opt == '-h':
usage(0)
else:
print('illegal option: ' + flag)
sys.exit(1)
if not basename:
usage(1, 'You did not specify an output file.')
if len(args) > 1:
usage(1, 'Only one main file may be specified.')
outputType = 'exe'
bl = basename.lower()
if bl.endswith('.mf'):
outputType = 'mf'
elif bl.endswith('.c'):
outputType = 'c'
elif bl.endswith('.dll') or bl.endswith('.pyd') or bl.endswith('.so'):
basename = os.path.splitext(basename)[0]
outputType = 'dll'
elif bl.endswith('.exe'):
basename = os.path.splitext(basename)[0]
compileToExe = False
if args:
startfile = args[0]
startmod = startfile
if startfile.endswith('.py') or startfile.endswith('.pyw') or \
startfile.endswith('.pyc') or startfile.endswith('.pyo'):
startmod = os.path.splitext(startfile)[0]
if outputType == 'dll' or outputType == 'c':
freezer.addModule(startmod, filename = startfile)
else:
freezer.addModule('__main__', filename = startfile)
compileToExe = True
addStartupModules = True
elif outputType == 'exe':
# We must have a main module when making an executable.
usage(1, 'A main file needs to be specified when creating an executable.')
freezer.done(addStartupModules = addStartupModules)
if outputType == 'mf':
freezer.writeMultifile(basename)
elif outputType == 'c':
freezer.writeCode(basename)
else:
freezer.generateCode(basename, compileToExe = compileToExe)
return 0
if __name__ == '__main__':
sys.exit(main())
|
# TestSwiftRangeTypes.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test the Swift.Range<T> type
"""
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftRangeType(TestBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.swiftTest
@decorators.add_test_categories(["swiftpr"])
def test_swift_range_type(self):
"""Test the Swift.Range<T> type"""
self.build()
self.do_test()
def setUp(self):
TestBase.setUp(self)
self.main_source = "main.swift"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
def do_test(self):
"""Test the Swift.Range<T> type"""
(target, process, self.thread, breakpoint) = lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', self.main_source_spec)
self.frame = self.thread.frames[0]
self.assertTrue(self.frame, "Frame 0 is valid.")
self.expect("frame variable a", substrs=[
'(ClosedRange<Int>) a = 1...100'])
self.expect("frame variable b", substrs=['(Range<Int>) b = 1..<100'])
self.expect("frame variable c", substrs=[
'(ClosedRange<Int>) c = 1...100'])
self.expect("frame variable d", substrs=[
'(Range<Int>) d = 1..<100'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lldb.SBDebugger.Terminate)
unittest2.main()
|
"""
Run this example to check if the GEKKO optimization package has been installed correctly
"""
from gekko import GEKKO
m = GEKKO(remote=False) # Initialize gekko
m.options.SOLVER = 1 # APOPT is an MINLP solver
# optional solver settings with APOPT
m.solver_options = ['minlp_maximum_iterations 500',
# minlp iterations with integer solution
'minlp_max_iter_with_int_sol 10',
# treat minlp as nlp
'minlp_as_nlp 0',
# nlp sub-problem max iterations
'nlp_maximum_iterations 50',
# 1 = depth first, 2 = breadth first
'minlp_branch_method 1',
# maximum deviation from whole number
'minlp_integer_tol 0.05',
# covergence tolerance
'minlp_gap_tol 0.01']
# Initialize variables
x1 = m.Var(value=1, lb=1, ub=5)
x2 = m.Var(value=5, lb=1, ub=5)
# Integer constraints for x3 and x4
x3 = m.Var(value=5, lb=1, ub=5, integer=True)
x4 = m.Var(value=1, lb=1, ub=5, integer=True)
# Equations
m.Equation(x1*x2*x3*x4 >= 25)
m.Equation(x1**2+x2**2+x3**2+x4**2 == 40)
m.Obj(x1*x4*(x1+x2+x3)+x3) # Objective
m.solve(disp=True) # Solve
print('Results')
print('x1: ' + str(x1.value))
print('x2: ' + str(x2.value))
print('x3: ' + str(x3.value))
print('x4: ' + str(x4.value))
print('Objective: ' + str(m.options.objfcnval))
|
import random
import pygame.draw as draw
import math
from pygame.sprite import Sprite
settings = None
screen = None
stats = None
def set_global_var(setts, scr, statistics):
global settings
global screen
global stats
settings = setts
screen = scr
stats = statistics
Ball.settings = setts
Ball.screen = scr
Ball.stats = statistics
class Ball(Sprite):
screen = screen
settings = settings
stats = stats
bullets = None
def __init__(self):
"""
Initialize the ball and set its starting position.
"""
super().__init__()
self.screen_rect = screen.get_rect()
# set ball moving parameters
self.radius = settings.hero_radius
self.color = None
# Place each new ball at the left center of the screen.
self.cx = 0
self.cy = 0
self.speed_x = 0
self.speed_y = 0
self.direction = -1
self.move_to_default_position()
# live settings
self.life = self.radius * self.radius * 3.14
# battle atributes
self.shield_thickness = 0
self.shield_life = 0
self.bullet_type = None
self.shoot_freq = None
self.shoot_frame_cnt = None
# istance identy, Enemy -- for enemy, Hero -- for hero
self.instance = 'Ball'
def update(self):
self.check_edges()
self.update_coords()
self.fire_bullet(self.bullets)
def move_to_default_position(self):
self.cx = float(self.radius) # x coordinate of center
self.cy = float(self.screen_rect.centery) # y coordinate of center
def check_edges(self):
raise Exception
def update_coords(self):
"""
Funny thing:
speed_x is speed on x coord (can be negative)
speed_y is abs (can't be negative)
"""
self.cx += self.speed_x
self.cy += self.speed_y * self.direction
def draw(self):
"""Draw ball on field."""
coordinates = (int(self.cx), int(self.cy))
draw.circle(screen, settings.enemy_shield_color, coordinates,
self.radius + self.shield_thickness)
draw.circle(screen, self.color, coordinates, self.radius)
def receive_damage(self, bullets, bullet):
"""Control how balls receive damage from bullets"""
if bullet.father != self.instance:
damage = bullet.damage / ((not stats.single_player) + 1)
if self.shield_life > 0:
self.shield_life -= damage
if self.shield_life <= 0:
self.life += self.shield_life
self.shield_life = 0
else:
self.life -= damage
self.set_radiuses()
bullet.kill()
def set_radiuses(self):
"""
Set ball shield and body radiuses (uses after bullet smashed enemy)
"""
if self.life > 0:
self.radius = int(math.sqrt(self.life / 3.14))
if self.shield_life > 0:
self.shield_thickness = int(math.sqrt(self.shield_life / 20))
else:
self.shield_thickness = 0
else:
self.radius = 0
self.shield_life = 0
self.shield_thickness = 0
def set_lifes(self):
"""
Set ball shield and body radiuses
(depends on radius and shield thickness)
"""
if self.radius > 0:
self.life = self.radius ** 2 * 3.14
if self.shield_thickness > 0:
self.shield_life = self.shield_thickness * \
self.shield_thickness * 20
assert self.shield_life >= 0
else:
self.life = 0
self.shield_life = 0
self.shield_thickness = 0
def fire_bullet(self, bullets):
""""
Create an bullet if frame number is big enough.
"""
if self.bullet_type is not None and self.alive:
assert self.shoot_freq >= 1
if self.shoot_frame_cnt >= self.shoot_freq:
self.shoot_frame_cnt -= self.shoot_freq
bullets.add(
settings.bullet_constructors[self.bullet_type](self)
)
self.shoot_frame_cnt += 1
def change_bullets(self, bullet_type: str) -> None:
self.bullet_type = bullet_type
self.shoot_freq = settings.innerFPS / \
settings.BulletPerSecond[self.bullet_type]
self.shoot_frame_cnt = random.randint(0, 100) % self.shoot_freq
|
#from asyncio.windows_events import NULL
from PIL import Image, ImageDraw, ImageFilter, ImageFont
import os
import glob
frame ={
"width": 150,
"height": 150,
"crops": True,
}
icon = {
"jpg":{ "isImage": True},
"gif":{ "isImage": True},
"png":{ "isImage": True},
"zip":{
"isImage": False,
"b_r" : 0x87,
"b_g" : 0xCE,
"b_b" : 0xEB,
"f_sz": 48,
"f_x": 15,
"f_y": 50,
"title": "ZIP",
},
"doc":{
"isImage": False,
"b_r" : 0x7F,
"b_g" : 0xFF,
"b_b" : 0xD4,
"f_sz": 24,
"f_x": 15,
"f_y": 50,
"title": "Word\nDocument",
},
"pdf":{
"isImage": False,
"b_r" : 0xFF,
"b_g" : 0xC0,
"b_b" : 0xCB,
"f_sz": 48,
"f_x": 15,
"f_y": 50,
"title": "PDF",
},
"csv":{
"isImage": False,
"b_r" : 0xFF,
"b_g" : 0xC0,
"b_b" : 0xCB,
"f_sz": 48,
"f_x": 15,
"f_y": 50,
"title": "CSV",
},
"unknown":{
"isImage": False,
"b_r" : 0xC0,
"b_g" : 0xC0,
"b_b" : 0xC0,
"f_sz": 24,
"f_x": 15,
"f_y": 50,
"title": "Unknown",
},
}
def crop_center(pil_img, crop_width, crop_height):
img_width, img_height = pil_img.size
return pil_img.crop(((img_width - crop_width) // 2,
(img_height - crop_height) // 2,
(img_width + crop_width) // 2,
(img_height + crop_height) // 2))
def crop_max_square(pil_img):
return crop_center(pil_img, min(pil_img.size), min(pil_img.size))
def chext(filename):
#print("chext",filename,os.path.isfile(filename))
#if os.path.isfile(filename) == False: return
basename = os.path.basename(filename)
dirname = os.path.dirname(filename)
fname = basename.split('.')[0].lower()
ext = basename.split('.')[1].lower()
if icon.get(ext):
if icon[ext]['isImage']:
return filename
else:
return f"{dirname}/{fname}.png"
else:
return f"{dirname}/{fname}.png"
def make_thumb(filename,save_dir="./thumbs"):
if os.path.isfile(filename) == False: return
basename = os.path.basename(filename)
dirname = os.path.dirname(filename)
fname = basename.split('.')[0].lower()
ext = basename.split('.')[1].lower()
print(ext)
if ext in icon.keys():
pass
else:
ext = "unknown"
if icon[ext]['isImage']:
#print("FILE:",filename)
#print("SAVE_DIR:",save_dir)
#print("BASE NAME:",basename)
im = Image.open(filename)
thumb_width = frame['width']
im_crop_maxsq = crop_max_square(im)
im_thumb = im_crop_maxsq.resize((thumb_width,thumb_width))
os.makedirs(save_dir, exist_ok=True)
im_thumb.save(f'{save_dir}/{basename}', quality=95)
else:
im = Image.new("RGB", (frame['width'], frame['height']), (icon[ext]['b_r'], icon[ext]['b_g'], icon[ext]['b_b']))
draw = ImageDraw.Draw(im)
font = ImageFont.truetype('ipaexg.ttf', icon[ext]['f_sz'])
draw.multiline_text((icon[ext]['f_x'], icon[ext]['f_y']), icon[ext]['title'], fill=(0, 0, 0), font=font)
os.makedirs(save_dir, exist_ok=True)
im.save(f'{save_dir}/{fname}.png', quality=95)
return
def save_file(id,dir_path,f):
os.makedirs(f'{dir_path}/{id}', exist_ok=True)
file_path = f'{dir_path}/{id}/{f.filename}'
print("path:",file_path)
f.save(file_path)
make_thumb(file_path,f"{dir_path}/{id}/thumbs")
response = {
"text":"OK",
"fileId": id,
"filename": f.filename,
"mimetype": f.mimetype,
}
return response
def remove_files(dict_list):
for f in dict_list['files']:
os.remove(f)
for f in dict_list['thumbs']:
os.remove(f)
return {"result":"OK"}
def remove_files2(dict_files,base_dir): #ファイルリストそのまま受ける
for f in dict_files:
if f.get('isSelect'):
if f.get('path') : os.remove(f['base_dir']+"/"+f['sub_path']) ;
if f.get('thumbPath') : os.remove(f['base_dir']+"/"+f['sub_thumbPath']) ;
return {"result":"OK"}
def get_flist(base_dir,dir_path):
print("get_flist:",base_dir,dir_path)
file_path_list = glob.glob(f'{base_dir}/{dir_path}/*')
arry = []
for f in file_path_list:
#f = os.path.normpath(f)
if os.path.isfile(f):
f_0 = os.path.split(f)[0]
f_1 = os.path.split(f)[1]
f_sub = f_0.replace(f"{base_dir}/",'')
dict_flist = {
"path": f_0+"/"+f_1,
"filename" : f_1,
"base_dir": base_dir,
"sub_path": f_sub+"/"+f_1,
"dir" : f_0,
"thumbPath": chext(f_0+"/thumbs/"+f_1),
"sub_thumbPath": chext(f_sub+"/thumbs/"+f_1),
"type": f_1.split('.')[-1].lower(),
"isfile": os.path.isfile(f),
"isdir": os.path.isdir(f),
"status": os.stat(f),
"isSelect":False,
"url": "/get-file/"+f_sub+"/"+f_1,
"urlThumb": "/get-file/"+chext(f_sub+"/thumbs/"+f_1)
}
#app.logger.debug(dict_flist)
arry.append(dict_flist)
return arry
def get_dir_info(base_dir,dir_path):
print("get_flist:",base_dir,dir_path)
file_path_list = glob.glob(f'{base_dir}/{dir_path}/*')
arry = []
for f in file_path_list:
if os.path.isdir(f):
f_0 = os.path.split(f)[0]
f_1 = os.path.split(f)[1]
f_2 = os.path.split(f)[-1]
f_sub = f_0.replace(f"{base_dir}/",'')
count = [os.path.isfile(ff) for ff in glob.glob(f+'/*')].count(True)
dict_flist = {
"path": f_0+"/"+f_1,
"base_dir": base_dir,
"sub_path": f_sub+"/"+f_1,
"dir" : f_0,
"key": f_2,
"count_files": count
}
arry.append(dict_flist)
return arry
if __name__ == "__main__":
#files = ["./test1.Doc","./test2.Zip","./test3.pdf","./test4.xyz","./test5.jpg"]
#for f in files:
# #make_thumb(f,'./thumbs')
# make_thumb(f)
d_dict={}
for d in get_dir_info('../static','upload/invoices'):
print(d['key'],d['count_files'])
d_dict[d['key']] = d['count_files']
print (d_dict)
|
from matplotlib import rc
rc('text', usetex=True) # this is if you want to use latex to print text. If you do you can create strings that go on labels or titles like this for example (with an r in front): r"$n=$ " + str(int(n))
from numpy import *
from pylab import *
import random
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import matplotlib.lines as lns
from scipy import stats
from matplotlib.patches import Polygon, Circle
import matplotlib.font_manager as fm
def latex_float(f):
float_str = "{0:.2g}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
class EffPt( object ):
def __init__( self, elp ):
self.cols = ["varname","bxf","flops","ai","rts"]
self.varname = elp[4][6:-1]
self.bxf = float(elp[6])
self.flops = float(elp[7])
self.ai = float(elp[8])
self.rts = float(elp[9])
self.opinfo = elp[0:3] + elp[5:6] # for checking if two pts are the same operation
self.comp = None # point to compare this point against (if any)
def __str__( self ):
return " ".join( str(col)+"="+str(getattr(self,col)) for col in self.cols )
class varinfo( object ):
def __init__( self, name, color, mark='o', mark_comp='d' ):
self.name = name
self.color = color
self.mark = mark
self.mark_comp = mark_comp
self.art = plt.Line2D((0,0),(0,0), color=self.color, marker=self.mark, linestyle='')
self.art_comp = plt.Line2D((0,0),(0,0), color=self.color, marker=self.mark_comp, linestyle='')
self.num_use = 0
self.num_use_comp = 0
def clear_use( self ):
self.num_use = 0
self.num_use_comp = 0
def inc_use( self, is_comp ):
if is_comp:
self.num_use_comp += 1
else:
self.num_use += 1
def get_mark( self, is_comp ):
return self.mark_comp if is_comp else self.mark
def get_leg( self, leg_art, leg_lab ):
verb_name = "\\verb|"+self.name+"|"
if self.num_use:
leg_art.append( self.art)
leg_lab.append( verb_name )
if self.num_use_comp:
leg_art.append( self.art_comp)
leg_lab.append( verb_name[:-1] + " (Comp)|" )
self.clear_use()
vis = [
varinfo( "conv", "cornflowerblue" ),
varinfo( "conv_simd", "cornflowerblue" ),
varinfo( "k1conv", "green" ),
varinfo( "k1conv_simd", "green" ),
varinfo( "tconv", "purple" ),
varinfo( "cudnn_conv", "red" ),
]
vis_map = { vi.name:vi for vi in vis }
def inc_comp( epts ):
for ept in epts:
yield ept
if ept.comp: yield ept.comp
def read_eff_file( epts, fn ):
els = open( fn ).readlines()
for el in els:
elps = el.split("&")
elps = [ elp.strip() for elp in elps ]
#print len(elps), elps
assert len(elps) == 12
epts.append( EffPt( elps ) )
if math.isnan(epts[-1].rts): epts.pop()
def adj_tick_lab( lab ):
lt = lab.get_text()
if not lt: return ""
if lt[0] == "$": lt = lt[1:-1]
neg = 1.0
if lt[0] == u'\u2212': lt = lt[1:]; neg = -1.0
return "$%s$" % latex_float(10**(neg*float(lt)))
class EffPlot( object ):
def __init__( self, args ):
self.args = args
self.epts = []
self.epts_comp = []
read_eff_file( self.epts, self.args.eff_fn )
if self.args.eff_comp_fn:
read_eff_file( self.epts_comp, self.args.eff_comp_fn )
assert len(self.epts) == len(self.epts_comp)
for ept,ept_comp in zip(self.epts,self.epts_comp):
assert ept.opinfo == ept_comp.opinfo
ept.comp = ept_comp
self.do_plots()
if self.args.do_zooms:
for zl in [1,2]:
self.args.out_fn += "-zoom"
max_flops = max( ept.flops for ept in self.epts )
self.epts = [ ept for ept in self.epts if ept.flops < (max_flops/10.0) ]
self.do_plots()
def skip_plot_check_flops_vs_time( self, ept ):
if not ept.comp: return 0 # no comp? if so, never skip.
delta = abs( ept.rts - ept.comp.rts )
rel_delta = delta * 2.0 / (ept.rts + ept.comp.rts)
# if rel_delta < self.args.min_rel_delta_to_show: return 1 # we're really trying to show varaint difference, so skip this check
if ept.varname == ept.comp.varname: return 1 # FIXME: skip when comp is same varaint. not right in general, but okay for now
# FIXME: a few data points have the same variant, but sig. diff runtimes. there is certainly some noise in the runtimes, or the code might have shifted a bit between the two runs, or it's possible the tuning params were a little different between the two runs. for now, we'll skip such points, but we should investigate more.
return 0
def plot_flops_vs_time_pt( self, ax, ept, is_comp ):
vi = vis_map[ept.varname]
vi.inc_use( is_comp )
x,y = math.log(ept.flops,10), math.log(ept.rts,10)
ax.plot(x, y, color=vi.color, markersize=4, alpha=.7, marker=vi.get_mark(is_comp), linestyle=' ' )
return x,y
def plot_fps_vs_ai_pt( self, ax, ept, is_comp ):
vi = vis_map[ept.varname]
vi.inc_use( is_comp )
x = ept.ai
y = ept.flops/ept.rts
ax.plot( x,y, color=vi.color, markersize=2*max(1,math.log(ept.flops,10)-6), alpha=.7, marker=vi.get_mark(is_comp), linestyle=' ' )
return x,y
def do_plots( self ):
# flops vs runtime plot with 60GF/s line
background_color =(0.85,0.85,0.85) #'#C0C0C0'
grid_color = 'white' #FAFAF7'
rc('axes', facecolor = background_color)
rc('axes', edgecolor = grid_color)
rc('axes', linewidth = 1.2)
rc('axes', grid = True )
rc('axes', axisbelow = True)
rc('grid',color = grid_color)
rc('grid',linestyle='-' )
rc('grid',linewidth=0.7 )
#rc('xtick.major',size =0 )
#rc('xtick.minor',size =0 )
#rc('ytick.major',size =0 )
#rc('ytick.minor',size =0 )
# filter data based on skip check
self.epts = [ ept for ept in self.epts if not self.skip_plot_check_flops_vs_time( ept ) ]
fig = plt.figure()
ax = fig.add_subplot(111)
#formatting:
ax.set_title("RUNTIME (seconds) vs \\#-of-FLOPS [log/log scale]",fontsize=12,fontweight='bold')
ax.set_xlabel("\\#-of-FLOPS", fontsize=12) # ,fontproperties = font)
ax.set_ylabel("RUNTIME (seconds)", fontsize=12) # ,fontproperties = font)
x = [ math.log(ept.flops,10) for ept in inc_comp(self.epts) ]
y = [ math.log(ept.rts,10) for ept in inc_comp(self.epts) ]
self.set_bnds( ax, x, y )
# print matplotlib.lines.Line2D.filled_markers
# --> (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
for ept in self.epts:
x,y = self.plot_flops_vs_time_pt( ax, ept, 0 )
if ept.comp:
xc,yc = self.plot_flops_vs_time_pt( ax, ept.comp, 1 )
ax.plot( [x,xc], [y,yc], linewidth=0.5, color='black' )
leg_art = []; leg_lab = []
for vi in vis: vi.get_leg( leg_art, leg_lab )
legend = ax.legend(leg_art,leg_lab,loc='lower right', shadow=True, fontsize='small',numpoints=1,ncol=1)
legend.get_frame().set_facecolor('#eeddcc')
max_fps = max( ept.flops/ept.rts for ept in inc_comp(self.epts) )
log10_max_fps = int(math.ceil(math.log(max_fps,10)))
if 1:
fps_bnd = 10**log10_max_fps
self.add_fps_line( ax, fps_bnd / 10.0 )
self.add_fps_line( ax, fps_bnd / 5.0 )
self.add_fps_line( ax, fps_bnd / 2.0 )
self.add_fps_line( ax, fps_bnd )
self.adj_ticks(ax,fig)
fig.savefig( self.args.out_fn + "." + self.args.out_fmt, dpi=600, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
#formatting:
ax.set_title("F/s vs Arithmetic Intensity",fontsize=12,fontweight='bold')
ax.set_xlabel("Arithmetic Intensity", fontsize=12) # ,fontproperties = font)
ax.set_ylabel("F/s", fontsize=12) # ,fontproperties = font)
x = [ ept.ai for ept in inc_comp(self.epts) ]
y = [ ept.flops/ept.rts for ept in inc_comp(self.epts) ]
self.set_bnds( ax, x, y )
# print matplotlib.lines.Line2D.filled_markers
# --> (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
for ept in self.epts:
x,y = self.plot_fps_vs_ai_pt( ax, ept, 0 )
if ept.comp:
xc,yc = self.plot_fps_vs_ai_pt( ax, ept.comp, 1 )
ax.plot( [x,xc], [y,yc], linewidth=0.5, color='black' )
leg_art = []; leg_lab = []
for vi in vis: vi.get_leg( leg_art, leg_lab )
max_flops = max( ept.flops for ept in inc_comp(self.epts) )
mfl = int(math.ceil(math.log(max_flops,10)))
for ls in range(max(mfl-5,1),mfl):
ms=2*max(1,ls-6)
leg_art += [plt.Line2D((0,0),(0,0), color="black", marker='o', linestyle='', markersize=ms)]
leg_lab += ["$10^{"+str(ls)+"}$ Flops"]
legend = ax.legend(leg_art,leg_lab,loc='upper right', shadow=True, fontsize='small',numpoints=1,ncol=1)
legend.get_frame().set_facecolor('#eeddcc')
fig.canvas.draw()
fig.savefig( self.args.out_fn + "-ai" + "." + self.args.out_fmt, dpi=600, bbox_inches='tight')
# ai vs GF/s plot
def set_bnds( self, ax, x, y ):
self.x_min = min(x)
self.x_max = max(x)*1.05
self.y_min = min(y)
self.y_max = max(y)*1.05
ax.axis([self.x_min,self.x_max,self.y_min,self.y_max])
self.data_aspect = float(self.x_max - self.x_min ) / (self.y_max - self.y_min)
#self.axis_aspect_rat = .618
self.axis_aspect_rat = 1
self.axis_aspect = self.axis_aspect_rat * self.data_aspect
ax.set_aspect(self.axis_aspect)
def adj_ticks( self, ax, fig ):
fig.canvas.draw()
tls = ax.get_xticklabels()
tls = [ adj_tick_lab(lab) for lab in tls ]
ax.set_xticklabels( tls )
tls = ax.get_yticklabels()
tls = [ adj_tick_lab(lab) for lab in tls ]
ax.set_yticklabels( tls )
def add_fps_line( self, ax, fps ): self.add_fps_line_log( ax, fps )
def add_fps_line_lin( self, ax, fps ):
#Peak performance line and text
x = [self.x_min,(self.x_min+self.x_max)*0.5,self.x_max]
y = [ v/fps for v in x ]
y_mid = (self.y_min+self.y_max)/2
if y[1] > y_mid: # high slope case; use target y val
y[1] = y_mid
x[1] = y[1]*fps
ax.plot(x,y, linewidth=1.0, color='black', linestyle=':' )
label_string = "%.1fGF/s" % (fps/1e9)
rot=np.arctan(y[1]/x[1]*self.axis_aspect) * 180 / np.pi
ax.text(x[1], y[1], label_string, fontsize=8, rotation=rot, ha="left", va="bottom")
def add_fps_line_log( self, ax, fps ):
#Peak performance line and text
x = [self.x_min,self.x_min*0.2+self.x_max*0.8,self.x_max]
y = [ v - math.log(fps,10) for v in x ]
y_mid = self.y_min*0.2+self.y_max*0.8
if y[1] > y_mid: # high slope case; use target y val
y[1] = y_mid
x[1] = y[1] + math.log(fps,10)
ax.plot(x,y, linewidth=1.0, color='black', linestyle=':' )
label_string = "%.1fGF/s" % (fps/1e9)
rot=np.arctan(self.data_aspect) * 180 / np.pi
ax.text(x[1], y[1], label_string, fontsize=12, rotation=rot, ha="left", va="bottom")
import argparse
parser = argparse.ArgumentParser(description='Create eff plots.')
parser.add_argument('--eff-fn', metavar="FN", type=str, default="eff-tab.raw", help="filename of eff values in latex table format" )
parser.add_argument('--eff-comp-fn', metavar="FN", type=str, default="", help="filename of eff values in latex table format for comparison to those from the file specified by --eff-fn" )
parser.add_argument('--out-fn', metavar="FN", type=str, default="eff", help="base filename of output plot image" )
parser.add_argument('--out-fmt', metavar="EXT", type=str, default="png", help="extention/format for output plot image" )
parser.add_argument('--do-zooms', metavar="BOOL", type=bool, default=0, help="if true, output zoomed and 2X zoomed graphs" )
parser.add_argument('--min-rel-delta-to-show', metavar="FLOAT", type=float, default=0.05, help="if true, skip showing points where delta/avg is < this value in comparison mode" )
args = parser.parse_args()
ep = EffPlot(args)
# example command lines for generating inputs to this script:
# boda on titan-X, optimized variants enabled
# boda cnn_op_info --cnn-func-sigs-fn='%(boda_test_dir)'/conv-ops-1-5-20-nin-alex-gn.txt --op-eff-tab-fn=conv-1-5-20-nin-alex-gn-titanX-boda.raw --rtc='(be=nvrtc)' --gen-data='(type=foo,str_vals=(vi=0.0f,mode=5))' --op-tune='(tconv=1,k1conv=1)' --rtc-comp='(be=nvrtc)' --max-err=10 --show-rtc-calls=1 --mad-toler=3e-3 --print-format=1 --inc-op-info-in-eff=1
# run on SD820, optimizations enabled, no comparison:
# export SD820_RTC="rtc=(be=ipc,remote_rtc=(be=ocl,gen_src=1,gen_src_output_dir=/data/local/rtc-gen-src),spawn_str=adb shell LD_LIBRARY_PATH=/data/local/lib /data/local/bin/boda,spawn_shell_escape_args=1,boda_parent_addr=tcp:10.0.0.100:12791)"
# export OP_TUNE="op_tune=(use_culibs=0,MNt=8:8,MNb=16:16,k1conv=1,tconv=0,Kb=1,vw=8,use_local_mem=2)"
# boda cnn_op_info --cnn-func-sigs-fn='%(boda_test_dir)'/conv-ops-1-5-20-nin-alex-gn.txt --op-eff-tab-fn=conv-1-5-20-nin-alex-gn-SD820-boda.raw --"${SD820_RTC}" --"${OP_TUNE}" --show-rtc-calls=1 --peak-flops=320e9 --print-format=1 --inc-op-info-in-eff=1
|
import unittest
from lib import constants
class TestCaseForTestnet(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
constants.set_testnet()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
constants.set_mainnet()
|
import os
from fnmatch import fnmatch
def find_files(pattern: str, root: str):
paths = []
for path, subdirs, files in os.walk(root):
del subdirs
for name in files:
if fnmatch(name, pattern):
paths.append(os.path.join(path, name))
return paths
|
# Copyright (c) Yugabyte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import os
import glob
import subprocess
from sys_detection import is_macos
from yugabyte_db_thirdparty.custom_logging import log
def fix_shared_library_references(
install_prefix: str,
lib_name_prefix: str) -> None:
if not is_macos():
return
lib_dir = os.path.realpath(os.path.join(install_prefix, "lib"))
lib_path_glob = os.path.join(lib_dir, lib_name_prefix + "*.dylib")
lib_paths = glob.glob(lib_path_glob)
bin_dir = os.path.realpath(os.path.join(install_prefix, "sbin"))
bin_path_glob = os.path.join(bin_dir, "*")
bin_paths = glob.glob(bin_path_glob)
log("Using these glob patterns to look for libraries and executables to fix RPATHs in: %s",
(lib_path_glob, bin_path_glob))
if not lib_paths:
log("Warning: no library paths found using glob %s", lib_path_glob)
if not bin_paths:
log("Warning: no executables found using glob %s", bin_path_glob)
for lib in lib_paths + bin_paths:
log("Ensuring %s uses @rpath correctly", lib)
if os.path.islink(lib):
log("%s is a link, skipping", lib)
continue
otool_output = subprocess.check_output(['otool', '-L', lib]).decode('utf-8')
lib_basename = os.path.basename(lib)
for line in otool_output.split('\n'):
if line.startswith('\t' + lib_name_prefix):
dependency_name = line.strip().split()[0]
dependency_real_name = os.path.relpath(
os.path.realpath(os.path.join(lib_dir, dependency_name)),
lib_dir)
if lib_basename in [dependency_name, dependency_real_name]:
log("Making %s refer to itself using @rpath", lib)
subprocess.check_call([
'install_name_tool',
'-id',
'@rpath/' + dependency_name,
lib
])
else:
log("Making %s refer to %s using @loader_path",
lib, dependency_name)
subprocess.check_call([
'install_name_tool',
'-change',
dependency_name,
'@loader_path/' + dependency_name,
lib
])
|
"""
Objects emitted whilst a deprecated object is being used.
"""
import attr
def _qualname(obj):
"""
Return the (non-fully-)qualified name of the given object.
"""
return obj.__qualname__
@attr.s(eq=True, frozen=True, hash=True)
class Deprecation(object):
"""
A single emitted deprecation.
"""
_kind = attr.ib()
_name_of = attr.ib(default=_qualname, repr=False)
_replacement = attr.ib(default=None, repr=False)
_removal_date = attr.ib(default=None, repr=False)
_addendum = attr.ib(default=None, repr=False)
def message(self):
parts = [self._kind.message(name_of=self._name_of)]
if self._removal_date is not None:
parts.append(
"It will be removed on or after {}.".format(self._removal_date)
)
if self._replacement is not None:
parts.append(
"Please use {} instead.".format(
self._name_of(self._replacement),
),
)
if self._addendum is not None:
parts.append(self._addendum)
return " ".join(parts)
# --* Representations of deprecated things *--
@attr.s(eq=True, frozen=True, hash=True)
class Callable(object):
"""
A parameter for a particular callable.
"""
_object = attr.ib()
def message(self, name_of):
return "{} is deprecated.".format(name_of(self._object))
@attr.s(eq=True, frozen=True, hash=True)
class Inheritance(object):
"""
The subclassing of a given parent type.
"""
_type = attr.ib()
def message(self, name_of):
return "Subclassing from {} is deprecated.".format(
name_of(self._type),
)
|
# https://scikit-learn.org/stable/modules/feature_selection.html#
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import mutual_info_regression
import warnings
warnings.filterwarnings('ignore')
def selector(
X_train, X_test, y_train, y_test, n_features,
method='f-regression'
):
"""[summary]
Args:
X_train (array): n_X_train_samples x None
X_test (array): n_test_samples x None
y_train (array): n_y_train_samples x None
y_test (array): n_y_test_samples x None
n_features (int): number of features
method (str, optional): [description]. Defaults to 'f-classif'.
Raises:
Exception: [description]
Returns:
tuple: X_train_reduce, X_test_reduced, y_train_reduced, y_test_reduced
"""
if method == 'f-classif':
raise Exception("f-classif cannot be used for regression purpose")
elif method == 'f-regression':
return _f_regression_selector(X_train, X_test, y_train, y_test,
n_features)
elif method == 'mutual-info-regression':
return _mutual_info_regression_selector(X_train, X_test,
y_train, y_test,
n_features)
else:
raise Exception("Not supported feature selectio method"
"Do not use underscore _")
def _f_classif_square_selector(X_train, X_test, y_train, y_test, n_features):
kbest = SelectKBest(f_classif, n_features)
kbest = kbest.fit(X_train, y_train)
X_train = kbest.transform(X_train)
X_test = kbest.transform(X_test)
return X_train, X_test, y_train, y_test
def _f_regression_selector(X_train, X_test, y_train, y_test, n_features):
kbest = SelectKBest(f_regression, n_features)
kbest.fit(X_train, y_train)
X_train = kbest.transform(X_train)
X_test = kbest.transform(X_test)
return X_train, X_test, y_train, y_test
def _mutual_info_regression_selector(X_train, X_test, y_train, y_test,
n_features):
kbest = SelectKBest(mutual_info_regression, n_features)
kbest.fit(X_train, y_train)
X_train = kbest.transform(X_train)
X_test = kbest.transform(X_test)
return X_train, X_test, y_train, y_test
|
"""Helper module for handling network related commands."""
import os
import requests
from phytoolkit.exception.installationexception import InstallationException
class NetHelper:
"""Runs network commands."""
def __init__(self, config):
self.config = config
self.console = config.console
def download_file(self, url, target_file):
"""Downloads file from URL to target file."""
if not str.startswith(target_file, self.config.dest_dir):
target_file = os.path.join(self.config.dest_dir, target_file)
self.console.verbose_info("Downloading file %s from url %s." % (target_file, url))
response = requests.get(url)
if response.status_code not in [200, "200"]:
self.console.verbose_error("Download failed with status code %s" % response.status_code)
raise InstallationException("Download of file %s failed." % target_file)
with open(target_file, "wb") as file_handle:
self.console.verbose_info("Download completed. Writing to file %s." % target_file)
file_handle.write(response.content)
self.console.verbose_success("Download of %s completed." % target_file)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.