hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e2c2c7b7815793a7f95c3b44334d1d26f04fcb1
| 219
|
py
|
Python
|
ex11.py
|
EiEiKyaw/python-exercise
|
63dcc073f55f125de784eb61aa7c82a50ca706ed
|
[
"MIT"
] | null | null | null |
ex11.py
|
EiEiKyaw/python-exercise
|
63dcc073f55f125de784eb61aa7c82a50ca706ed
|
[
"MIT"
] | null | null | null |
ex11.py
|
EiEiKyaw/python-exercise
|
63dcc073f55f125de784eb61aa7c82a50ca706ed
|
[
"MIT"
] | null | null | null |
print("How old are you?", end='')
age=input()
print("How tall are you?", end='')
height=input()
print("How much do you weigh?", end='')
weight=input()
print(f"So, you're {age} old, {height} tall and {weight} heavy.")
| 21.9
| 65
| 0.634703
|
c8a88c03f8c023b08a5d043032d47ea9703409ed
| 1,414
|
py
|
Python
|
tests/rules/test_fab_command_not_found.py
|
benmonro/therandy
|
b7c7c4ffc8f82b27f284ba90621a47baa5dfcb03
|
[
"MIT"
] | null | null | null |
tests/rules/test_fab_command_not_found.py
|
benmonro/therandy
|
b7c7c4ffc8f82b27f284ba90621a47baa5dfcb03
|
[
"MIT"
] | null | null | null |
tests/rules/test_fab_command_not_found.py
|
benmonro/therandy
|
b7c7c4ffc8f82b27f284ba90621a47baa5dfcb03
|
[
"MIT"
] | null | null | null |
import pytest
from therandy.rules.fab_command_not_found import match, get_new_command
from therandy.types import Command
output = '''
Warning: Command(s) not found:
extenson
deloyp
Available commands:
update_config
prepare_extension
Template A string class for supporting $-substitutions.
deploy
glob Return a list of paths matching a pathname pattern.
install_web
set_version
'''
@pytest.mark.parametrize('command', [
Command('fab extenson', output),
Command('fab deloyp', output),
Command('fab extenson deloyp', output)])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('gulp extenson', output),
Command('fab deloyp', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('script, result', [
('fab extenson', 'fab prepare_extension'),
('fab extenson:version=2016',
'fab prepare_extension:version=2016'),
('fab extenson:version=2016 install_web set_version:val=0.5.0',
'fab prepare_extension:version=2016 install_web set_version:val=0.5.0'),
('fab extenson:version=2016 deloyp:beta=true -H the.randy',
'fab prepare_extension:version=2016 deploy:beta=true -H the.randy'),
])
def test_get_new_command(script, result):
command = Command(script, output)
assert get_new_command(command) == result
| 28.857143
| 78
| 0.701556
|
fd881fda86582169d7365e1513f0cd66a317951b
| 12,622
|
py
|
Python
|
src/train_new.py
|
AlexHeffner/DropEdge
|
d981b779be330d5ff4dd557d6db1615391bfc249
|
[
"MIT"
] | 380
|
2019-10-09T16:18:52.000Z
|
2022-03-23T11:47:18.000Z
|
src/train_new.py
|
AlexHeffner/DropEdge
|
d981b779be330d5ff4dd557d6db1615391bfc249
|
[
"MIT"
] | 16
|
2019-10-17T16:47:50.000Z
|
2022-03-04T13:02:32.000Z
|
src/train_new.py
|
AlexHeffner/DropEdge
|
d981b779be330d5ff4dd557d6db1615391bfc249
|
[
"MIT"
] | 72
|
2019-10-13T08:03:37.000Z
|
2022-03-24T21:42:24.000Z
|
from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from tensorboardX import SummaryWriter
from earlystopping import EarlyStopping
from sample import Sampler
from metric import accuracy, roc_auc_compute_fn
# from deepgcn.utils import load_data, accuracy
# from deepgcn.models import GCN
from metric import accuracy
from utils import load_citation, load_reddit_data
from models import *
from earlystopping import EarlyStopping
from sample import Sampler
# Training settings
parser = argparse.ArgumentParser()
# Training parameter
parser.add_argument('--no_cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Disable validation during training.')
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
parser.add_argument('--epochs', type=int, default=800,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.02,
help='Initial learning rate.')
parser.add_argument('--lradjust', action='store_true',
default=False, help='Enable leraning rate adjust.(ReduceLROnPlateau or Linear Reduce)')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument("--mixmode", action="store_true",
default=False, help="Enable CPU GPU mixing mode.")
parser.add_argument("--warm_start", default="",
help="The model name to be loaded for warm start.")
parser.add_argument('--debug', action='store_true',
default=False, help="Enable the detialed training output.")
parser.add_argument('--dataset', default="cora", help="The data set")
parser.add_argument('--datapath', default="data/", help="The data path.")
parser.add_argument("--early_stopping", type=int,
default=0, help="The patience of earlystopping. Do not adopt the earlystopping when it equals 0.")
parser.add_argument("--no_tensorboard", default=False, help="Disable writing logs to tensorboard")
# Model parameter
parser.add_argument('--type',
help="Choose the model to be trained.(mutigcn, resgcn, densegcn, inceptiongcn)")
parser.add_argument('--inputlayer', default='gcn',
help="The input layer of the model.")
parser.add_argument('--outputlayer', default='gcn',
help="The output layer of the model.")
parser.add_argument('--hidden', type=int, default=128,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--withbn', action='store_true', default=False,
help='Enable Bath Norm GCN')
parser.add_argument('--withloop', action="store_true", default=False,
help="Enable loop layer GCN")
parser.add_argument('--nhiddenlayer', type=int, default=1,
help='The number of hidden layers.')
parser.add_argument("--normalization", default="AugNormAdj",
help="The normalization on the adj matrix.")
parser.add_argument("--sampling_percent", type=float, default=1.0,
help="The percent of the preserve edges. If it equals 1, no sampling is done on adj matrix.")
# parser.add_argument("--baseblock", default="res", help="The base building block (resgcn, densegcn, mutigcn, inceptiongcn).")
parser.add_argument("--nbaseblocklayer", type=int, default=1,
help="The number of layers in each baseblock")
parser.add_argument("--aggrmethod", default="default",
help="The aggrmethod for the layer aggreation. The options includes add and concat. Only valid in resgcn, densegcn and inecptiongcn")
parser.add_argument("--task_type", default="full", help="The node classification task type (full and semi). Only valid for cora, citeseer and pubmed dataset.")
args = parser.parse_args()
if args.debug:
print(args)
# pre setting
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.mixmode = args.no_cuda and args.mixmode and torch.cuda.is_available()
if args.aggrmethod == "default":
if args.type == "resgcn":
args.aggrmethod = "add"
else:
args.aggrmethod = "concat"
if args.fastmode and args.early_stopping > 0:
args.early_stopping = 0
print("In the fast mode, early_stopping is not valid option. Setting early_stopping = 0.")
if args.type == "mutigcn":
print("For the multi-layer gcn model, the aggrmethod is fixed to nores and nhiddenlayers = 1.")
args.nhiddenlayer = 1
args.aggrmethod = "nores"
# random seed setting
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda or args.mixmode:
torch.cuda.manual_seed(args.seed)
# should we need fix random seed here?
sampler = Sampler(args.dataset, args.datapath, args.task_type)
# get labels and indexes
labels, idx_train, idx_val, idx_test = sampler.get_label_and_idxes(args.cuda)
nfeat = sampler.nfeat
nclass = sampler.nclass
print("nclass: %d\tnfea:%d" % (nclass, nfeat))
# The model
model = GCNModel(nfeat=nfeat,
nhid=args.hidden,
nclass=nclass,
nhidlayer=args.nhiddenlayer,
dropout=args.dropout,
baseblock=args.type,
inputlayer=args.inputlayer,
outputlayer=args.outputlayer,
nbaselayer=args.nbaseblocklayer,
activation=F.relu,
withbn=args.withbn,
withloop=args.withloop,
aggrmethod=args.aggrmethod,
mixmode=args.mixmode)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=50, factor=0.618)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200, 300, 400, 500, 600, 700], gamma=0.5)
# convert to cuda
if args.cuda:
model.cuda()
# For the mix mode, lables and indexes are in cuda.
if args.cuda or args.mixmode:
labels = labels.cuda()
idx_train = idx_train.cuda()
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
if args.warm_start is not None and args.warm_start != "":
early_stopping = EarlyStopping(fname=args.warm_start, verbose=False)
print("Restore checkpoint from %s" % (early_stopping.fname))
model.load_state_dict(early_stopping.load_checkpoint())
# set early_stopping
if args.early_stopping > 0:
early_stopping = EarlyStopping(patience=args.early_stopping, verbose=False)
print("Model is saving to: %s" % (early_stopping.fname))
if args.no_tensorboard is False:
tb_writer = SummaryWriter(
comment=f"-dataset_{args.dataset}-type_{args.type}"
)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
# define the training function.
def train(epoch, train_adj, train_fea, idx_train, val_adj=None, val_fea=None):
if val_adj is None:
val_adj = train_adj
val_fea = train_fea
t = time.time()
model.train()
optimizer.zero_grad()
output = model(train_fea, train_adj)
# special for reddit
if sampler.learning_type == "inductive":
loss_train = F.nll_loss(output, labels[idx_train])
acc_train = accuracy(output, labels[idx_train])
else:
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
train_t = time.time() - t
val_t = time.time()
# We can not apply the fastmode for the reddit dataset.
# if sampler.learning_type == "inductive" or not args.fastmode:
if args.early_stopping > 0 and sampler.dataset != "reddit":
loss_val = F.nll_loss(output[idx_val], labels[idx_val]).item()
early_stopping(loss_val, model)
if not args.fastmode:
# # Evaluate validation set performance separately,
# # deactivates dropout during validation run.
model.eval()
output = model(val_fea, val_adj)
loss_val = F.nll_loss(output[idx_val], labels[idx_val]).item()
acc_val = accuracy(output[idx_val], labels[idx_val]).item()
if sampler.dataset == "reddit":
early_stopping(loss_val, model)
else:
loss_val = 0
acc_val = 0
if args.lradjust:
scheduler.step()
val_t = time.time() - val_t
return (loss_train.item(), acc_train.item(), loss_val, acc_val, get_lr(optimizer), train_t, val_t)
def test(test_adj, test_fea):
model.eval()
output = model(test_fea, test_adj)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
auc_test = roc_auc_compute_fn(output[idx_test], labels[idx_test])
if args.debug:
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"auc= {:.4f}".format(auc_test),
"accuracy= {:.4f}".format(acc_test.item()))
print("accuracy=%.5f" % (acc_test.item()))
return (loss_test.item(), acc_test.item())
# Train model
t_total = time.time()
loss_train = np.zeros((args.epochs,))
acc_train = np.zeros((args.epochs,))
loss_val = np.zeros((args.epochs,))
acc_val = np.zeros((args.epochs,))
sampling_t = 0
for epoch in range(args.epochs):
input_idx_train = idx_train
sampling_t = time.time()
# no sampling
# randomedge sampling if args.sampling_percent >= 1.0, it behaves the same as stub_sampler.
(train_adj, train_fea) = sampler.randomedge_sampler(percent=args.sampling_percent, normalization=args.normalization,
cuda=args.cuda)
if args.mixmode:
train_adj = train_adj.cuda()
sampling_t = time.time() - sampling_t
# The validation set is controlled by idx_val
# if sampler.learning_type == "transductive":
if False:
outputs = train(epoch, train_adj, train_fea, input_idx_train)
else:
(val_adj, val_fea) = sampler.get_test_set(normalization=args.normalization, cuda=args.cuda)
if args.mixmode:
val_adj = val_adj.cuda()
outputs = train(epoch, train_adj, train_fea, input_idx_train, val_adj, val_fea)
if args.debug and epoch % 1 == 0:
print('Epoch: {:04d}'.format(epoch + 1),
'loss_train: {:.4f}'.format(outputs[0]),
'acc_train: {:.4f}'.format(outputs[1]),
'loss_val: {:.4f}'.format(outputs[2]),
'acc_val: {:.4f}'.format(outputs[3]),
'cur_lr: {:.5f}'.format(outputs[4]),
's_time: {:.4f}s'.format(sampling_t),
't_time: {:.4f}s'.format(outputs[5]),
'v_time: {:.4f}s'.format(outputs[6]))
if args.no_tensorboard is False:
tb_writer.add_scalars('Loss', {'train': outputs[0], 'val': outputs[2]}, epoch)
tb_writer.add_scalars('Accuracy', {'train': outputs[1], 'val': outputs[3]}, epoch)
tb_writer.add_scalar('lr', outputs[4], epoch)
tb_writer.add_scalars('Time', {'train': outputs[5], 'val': outputs[6]}, epoch)
loss_train[epoch], acc_train[epoch], loss_val[epoch], acc_val[epoch] = outputs[0], outputs[1], outputs[2], outputs[
3]
if args.early_stopping > 0 and early_stopping.early_stop:
print("Early stopping.")
model.load_state_dict(early_stopping.load_checkpoint())
break
if args.early_stopping > 0:
model.load_state_dict(early_stopping.load_checkpoint())
if args.debug:
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# Testing
(test_adj, test_fea) = sampler.get_test_set(normalization=args.normalization, cuda=args.cuda)
if args.mixmode:
test_adj = test_adj.cuda()
(loss_test, acc_test) = test(test_adj, test_fea)
print("%.6f\t%.6f\t%.6f\t%.6f\t%.6f\t%.6f" % (
loss_train[-1], loss_val[-1], loss_test, acc_train[-1], acc_val[-1], acc_test))
| 41.794702
| 160
| 0.647599
|
37e6d298051012d51666f79b2be000c3221dd38b
| 815
|
py
|
Python
|
root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/chsocket/__init__.py
|
chyidl/chyidlTutorial
|
a033e0a57abf84fdbb61e57736822f9126db6ff7
|
[
"MIT"
] | 5
|
2018-10-17T05:57:39.000Z
|
2021-07-05T15:38:24.000Z
|
root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/chsocket/__init__.py
|
chyidl/chyidlTutorial
|
a033e0a57abf84fdbb61e57736822f9126db6ff7
|
[
"MIT"
] | 2
|
2021-04-14T00:48:43.000Z
|
2021-04-14T02:20:50.000Z
|
root/os/DSAA/DataStructuresAndAlgorithms/python/chutils/chutils/chsocket/__init__.py
|
chyidl/chyidlTutorial
|
a033e0a57abf84fdbb61e57736822f9126db6ff7
|
[
"MIT"
] | 3
|
2019-03-02T14:36:19.000Z
|
2022-03-18T10:12:09.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# __init__.py
# chsocket
#
# 🎂"Here's to the crazy ones. The misfits. The rebels.
# The troublemakers. The round pegs in the square holes.
# The ones who see things differently. They're not found
# of rules. And they have no respect for the status quo.
# You can quote them, disagree with them, glority or vilify
# them. About the only thing you can't do is ignore them.
# Because they change things. They push the human race forward.
# And while some may see them as the creazy ones, we see genius.
# Because the poeple who are crazy enough to think thay can change
# the world, are the ones who do."
#
# Created by Chyi Yaqing on 03/06/19 09:48.
# Copyright © 2019. Chyi Yaqing.
# All rights reserved.
#
# Distributed under terms of the
# MIT
"""
网络socket编程
"""
| 29.107143
| 66
| 0.716564
|
6e3a434dba5b6c794fe419e74c035a1677db95b6
| 287
|
py
|
Python
|
ex011 - area parede.py
|
fblaz/Python-ex---curso-em-video
|
794d1f7b9fa0803b168aaf973007906b66a02e2d
|
[
"MIT"
] | null | null | null |
ex011 - area parede.py
|
fblaz/Python-ex---curso-em-video
|
794d1f7b9fa0803b168aaf973007906b66a02e2d
|
[
"MIT"
] | null | null | null |
ex011 - area parede.py
|
fblaz/Python-ex---curso-em-video
|
794d1f7b9fa0803b168aaf973007906b66a02e2d
|
[
"MIT"
] | null | null | null |
larg = float(input('largura da parede em metros: '))
alt = float(input('altura da parde em metros '))
rend = 2 # rendimento de 2m2 por litro de tinta
area = larg*alt
tinta = area/rend
print(f'a area é de {area} m2 \n'
f'será necessário comprar {tinta:.2f} litros de tinta')
| 41
| 61
| 0.672474
|
0414778e36eb738ff3c83eb64b793b974e632049
| 21,642
|
py
|
Python
|
libopenzwave/_global.py
|
Julian/libopenzwave-cffi
|
7683e5e4e08270dd7d780ab6a0ccd048343b08e1
|
[
"MIT"
] | 1
|
2018-03-17T18:44:11.000Z
|
2018-03-17T18:44:11.000Z
|
libopenzwave/_global.py
|
Julian/libopenzwave-cffi
|
7683e5e4e08270dd7d780ab6a0ccd048343b08e1
|
[
"MIT"
] | null | null | null |
libopenzwave/_global.py
|
Julian/libopenzwave-cffi
|
7683e5e4e08270dd7d780ab6a0ccd048343b08e1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
.. module:: libopenzwave
This file is part of **python-openzwave** project https://github.com/OpenZWave/python-openzwave.
:platform: Unix, Windows, MacOS X
:sinopsis: openzwave C++
.. moduleauthor: bibi21000 aka Sebastien GALLET <bibi21000@gmail.com>
.. moduleauthor: Maarten Damen <m.damen@gmail.com>
License : GPL(v3)
**python-openzwave** is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
**python-openzwave** is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with python-openzwave. If not, see http://www.gnu.org/licenses.
"""
class PyOptions(object):
"""
Manage options manager
"""
def __init__(self, config_path=None, user_path=".", cmd_line=""):
"""
Create an option object and check that parameters are valid.
:param device: The device to use
:type device: str
:param config_path: The openzwave config directory. If None, try to configure automatically.
:type config_path: str
:param user_path: The user directory
:type user_path: str
:param cmd_line: The "command line" options of the openzwave library
:type cmd_line: str
"""
if config_path is None:
config_path = self.getConfigPath()
if config_path is None:
raise LibZWaveException("Can't autoconfigure path to config")
self._config_path = config_path
if user_path is None:
user_path = "."
self._user_path = user_path
if cmd_line is None:
cmd_line=""
self._cmd_line = cmd_line
self.create(self._config_path, self._user_path, self._cmd_line)
def create(self, a, b, c):
"""
.. _createoptions:
Create an option object used to start the manager
:param a: The path of the config directory
:type a: str
:param b: The path of the user directory
:type b: str
:param c: The "command line" options of the openzwave library
:type c: str
:see: destroyoptions_
"""
self.options = CreateOptions(
str_to_cppstr(a), str_to_cppstr(b), str_to_cppstr(c))
return True
def destroy(self):
"""
.. _destroyoptions:
Deletes the Options and cleans up any associated objects.
The application is responsible for destroying the Options object,
but this must not be done until after the Manager object has been
destroyed.
:return: The result of the operation.
:rtype: bool
:see: createoptions_
"""
return self.options.Destroy()
def lock(self):
"""
.. _lock:
Lock the options. Needed to start the manager
:return: The result of the operation.
:rtype: bool
:see: areLocked_
"""
return self.options.Lock()
def areLocked(self):
'''
.. _areLocked:
Test whether the options have been locked.
:return: true if the options have been locked.
:rtype: boolean
:see: lock_
'''
return self.options.AreLocked()
def addOptionBool(self, name, value):
"""
.. _addOptionBool:
Add a boolean option.
:param name: The name of the option.
:type name: str
:param value: The value of the option.
:type value: boolean
:return: The result of the operation.
:rtype: bool
:see: addOption_, addOptionInt_, addOptionString_
"""
return self.options.AddOptionBool(str_to_cppstr(name), value)
def addOptionInt(self, name, value):
"""
.. _addOptionInt:
Add an integer option.
:param name: The name of the option.
:type name: str
:param value: The value of the option.
:type value: boolean
:return: The result of the operation.
:rtype: bool
:see: addOption_, addOptionBool_, addOptionString_
"""
return self.options.AddOptionInt(str_to_cppstr(name), value)
def addOptionString(self, name, value, append=False):
"""
.. _addOptionString:
Add a string option.
:param name: The name of the option. Option names are case insensitive and must be unique.
:type name: str
:param value: The value of the option.
:type value: str
:param append: Setting append to true will cause values read from the command line
or XML file to be concatenated into a comma delimited set. If _append is false,
newer values will overwrite older ones.
:type append: boolean
:return: The result of the operation.
:rtype: bool
:see: addOption_, addOptionBool_, addOptionInt_
"""
return self.options.AddOptionString(
str_to_cppstr(name), str_to_cppstr(value), append)
def addOption(self, name, value):
"""
.. _addOption:
Add an option.
:param name: The name of the option.
:type name: string
:param value: The value of the option.
:type value: boolean, integer, string
:return: The result of the operation.
:rtype: bool
:see: addOptionBool_, addOptionInt_, addOptionString_
"""
if name not in PyOptionList:
return False
if PyOptionList[name]['type'] == "String":
return self.addOptionString(name, value)
elif PyOptionList[name]['type'] == "Bool":
return self.addOptionBool(name, value)
elif PyOptionList[name]['type'] == "Int":
return self.addOptionInt(name, value)
return False
def getOption(self, name):
"""
.. _getOption:
Retrieve option of a value.
:param name: The name of the option.
:type name: string
:return: The value
:rtype: boolean, integer, string or None
:see: getOptionAsBool_, getOptionAsInt_, getOptionAsString_
"""
if name not in PyOptionList:
return None
if PyOptionList[name]['type'] == "String":
return self.getOptionAsString(name)
elif PyOptionList[name]['type'] == "Bool":
return self.getOptionAsBool(name)
elif PyOptionList[name]['type'] == "Int":
return self.getOptionAsInt(name)
return False
# def getOptionAsBool(self, name):
# """
# .. _getOptionAsBool:
#
# Retrieve boolean value of an option.
#
# :param name: The name of the option.
# :type name: string
# :return: The value or None
# :rtype: boolean or None
#
# :see: getOption_, getOptionAsInt_, getOptionAsString_
#
# """
# cdef bool type_bool
# cret = self.options.GetOptionAsBool(str_to_cppstr(name), &type_bool)
# ret = type_bool if cret==True else None
# return ret
#
# def getOptionAsInt(self, name):
# """
# .. _getOptionAsInt:
#
# Retrieve integer value of an option.
#
# :param name: The name of the option.
# :type name: string
# :return: The value or None
# :rtype: Integer or None
#
# :see: getOption_, getOptionAsBool_, getOptionAsString_
#
# """
# cdef int32_t type_int
# cret = self.options.GetOptionAsInt(str_to_cppstr(name), &type_int)
# ret = type_int if cret==True else None
# return ret
#
# def getOptionAsString(self, name):
# """
# .. _getOptionAsString:
#
# Retrieve string value of an option.
#
# :param name: The name of the option.
# :type name: string
# :return: The value or None
# :rtype: String or None
#
# :see: getOption_, getOptionAsBool_, getOptionAsInt_
#
# """
# cdef string type_string
# cret = self.options.GetOptionAsString(str_to_cppstr(name), &type_string)
# ret = cstr_to_str(type_string.c_str()) if cret==True else None
# return ret
def getConfigPath(self):
'''
.. _getConfigPath:
Retrieve the config path. This directory hold the xml files.
:return: A string containing the library config path or None.
:rtype: str
'''
return configPath()
PyStatDriver = {
'SOFCnt' : "Number of SOF bytes received",
'ACKWaiting' : "Number of unsolicited messages while waiting for an ACK",
'readAborts' : "Number of times read were aborted due to timeouts",
'badChecksum' : "Number of bad checksums",
'readCnt' : "Number of messages successfully read",
'writeCnt' : "Number of messages successfully sent",
'CANCnt' : "Number of CAN bytes received",
'NAKCnt' : "Number of NAK bytes received",
'ACKCnt' : "Number of ACK bytes received",
'OOFCnt' : "Number of bytes out of framing",
'dropped' : "Number of messages dropped & not delivered",
'retries' : "Number of messages retransmitted",
'callbacks' : "Number of unexpected callbacks",
'badroutes' : "Number of failed messages due to bad route response",
'noack' : "Number of no ACK returned errors",
'netbusy' : "Number of network busy/failure messages",
'nondelivery' : "Number of messages not delivered to network",
'routedbusy' : "Number of messages received with routed busy status",
'broadcastReadCnt' : "Number of broadcasts read",
'broadcastWriteCnt' : "Number of broadcasts sent",
}
PyLogLevels = {
'Invalid' : {'doc':'Invalid Log Status', 'value':0},
'None' : {'doc':'Disable all logging', 'value':1},
'Always' : {'doc':'These messages should always be shown', 'value':2},
'Fatal' : {'doc':'A likely fatal issue in the library', 'value':3},
'Error' : {'doc':'A serious issue with the library or the network', 'value':4},
'Warning' : {'doc':'A minor issue from which the library should be able to recover', 'value':5},
'Alert' : {'doc':'Something unexpected by the library about which the controlling application should be aware', 'value':6},
'Info' : {'doc':"Everything's working fine...these messages provide streamlined feedback on each message", 'value':7},
'Detail' : {'doc':'Detailed information on the progress of each message', 'value':8},
'Debug' : {'doc':'Very detailed information on progress that will create a huge log file quickly but this level (as others) can be queued and sent to the log only on an error or warning', 'value':9},
'StreamDetail' : {'doc':'Will include low-level byte transfers from controller to buffer to application and back', 'value':10},
'Internal' : {'doc':'Used only within the log class (uses existing timestamp, etc', 'value':11},
}
class EnumWithDoc(str):
"""Enum helper"""
def setDoc(self, doc):
self.doc = doc
return self
PyControllerState = [
EnumWithDoc('Normal').setDoc("No command in progress."),
EnumWithDoc('Starting').setDoc("The command is starting."),
EnumWithDoc('Cancel').setDoc("The command was cancelled."),
EnumWithDoc('Error').setDoc("Command invocation had error(s) and was aborted."),
EnumWithDoc('Waiting').setDoc("Controller is waiting for a user action."),
EnumWithDoc('Sleeping').setDoc("Controller command is on a sleep queue wait for device."),
EnumWithDoc('InProgress').setDoc("The controller is communicating with the other device to carry out the command."),
EnumWithDoc('Completed').setDoc("The command has completed successfully."),
EnumWithDoc('Failed').setDoc("The command has failed."),
EnumWithDoc('NodeOK').setDoc("Used only with ControllerCommand_HasNodeFailed to indicate that the controller thinks the node is OK."),
EnumWithDoc('NodeFailed').setDoc("Used only with ControllerCommand_HasNodeFailed to indicate that the controller thinks the node has failed."),
]
PyNotifications = [
EnumWithDoc('ValueAdded').setDoc("A new node value has been added to OpenZWave's set. These notifications occur after a node has been discovered, and details of its command classes have been received. Each command class may generate one or more values depending on the complexity of the item being represented."),
EnumWithDoc('ValueRemoved').setDoc("A node value has been removed from OpenZWave's set. This only occurs when a node is removed."),
EnumWithDoc('ValueChanged').setDoc("A node value has been updated from the Z-Wave network and it is different from the previous value."),
EnumWithDoc('ValueRefreshed').setDoc("A node value has been updated from the Z-Wave network."),
EnumWithDoc('Group').setDoc("The associations for the node have changed. The application should rebuild any group information it holds about the node."),
EnumWithDoc('NodeNew').setDoc("A new node has been found (not already stored in zwcfg*.xml file)."),
EnumWithDoc('NodeAdded').setDoc("A new node has been added to OpenZWave's set. This may be due to a device being added to the Z-Wave network, or because the application is initializing itself."),
EnumWithDoc('NodeRemoved').setDoc("A node has been removed from OpenZWave's set. This may be due to a device being removed from the Z-Wave network, or because the application is closing."),
EnumWithDoc('NodeProtocolInfo').setDoc("Basic node information has been receievd, such as whether the node is a setening device, a routing device and its baud rate and basic, generic and specific types. It is after this notification that you can call Manager::GetNodeType to obtain a label containing the device description."),
EnumWithDoc('NodeNaming').setDoc("One of the node names has changed (name, manufacturer, product)."),
EnumWithDoc('NodeEvent').setDoc("A node has triggered an event. This is commonly caused when a node sends a Basic_Set command to the controller. The event value is stored in the notification."),
EnumWithDoc('PollingDisabled').setDoc("Polling of a node has been successfully turned off by a call to Manager::DisablePoll."),
EnumWithDoc('PollingEnabled').setDoc("Polling of a node has been successfully turned on by a call to Manager::EnablePoll."),
EnumWithDoc('SceneEvent').setDoc("Scene Activation Set received."),
EnumWithDoc('CreateButton').setDoc("Handheld controller button event created."),
EnumWithDoc('DeleteButton').setDoc("Handheld controller button event deleted."),
EnumWithDoc('ButtonOn').setDoc("Handheld controller button on pressed event."),
EnumWithDoc('ButtonOff').setDoc("Handheld controller button off pressed event."),
EnumWithDoc('DriverReady').setDoc("A driver for a PC Z-Wave controller has been added and is ready to use. The notification will contain the controller's Home ID, which is needed to call most of the Manager methods."),
EnumWithDoc('DriverFailed').setDoc("Driver failed to load."),
EnumWithDoc('DriverReset').setDoc("All nodes and values for this driver have been removed. This is sent instead of potentially hundreds of individual node and value notifications."),
EnumWithDoc('EssentialNodeQueriesComplete').setDoc("The queries on a node that are essential to its operation have been completed. The node can now handle incoming messages."),
EnumWithDoc('NodeQueriesComplete').setDoc("All the initialisation queries on a node have been completed."),
EnumWithDoc('AwakeNodesQueried').setDoc("All awake nodes have been queried, so client application can expected complete data for these nodes."),
EnumWithDoc('AllNodesQueriedSomeDead').setDoc("All nodes have been queried but some dead nodes found."),
EnumWithDoc('AllNodesQueried').setDoc("All nodes have been queried, so client application can expected complete data."),
EnumWithDoc('Notification').setDoc("A manager notification report."),
EnumWithDoc('DriverRemoved').setDoc("The Driver is being removed."),
EnumWithDoc('ControllerCommand').setDoc("When Controller Commands are executed, Notifications of Success/Failure etc are communicated via this Notification."),
]
PyStatDriver = {
'SOFCnt' : "Number of SOF bytes received",
'ACKWaiting' : "Number of unsolicited messages while waiting for an ACK",
'readAborts' : "Number of times read were aborted due to timeouts",
'badChecksum' : "Number of bad checksums",
'readCnt' : "Number of messages successfully read",
'writeCnt' : "Number of messages successfully sent",
'CANCnt' : "Number of CAN bytes received",
'NAKCnt' : "Number of NAK bytes received",
'ACKCnt' : "Number of ACK bytes received",
'OOFCnt' : "Number of bytes out of framing",
'dropped' : "Number of messages dropped & not delivered",
'retries' : "Number of messages retransmitted",
'callbacks' : "Number of unexpected callbacks",
'badroutes' : "Number of failed messages due to bad route response",
'noack' : "Number of no ACK returned errors",
'netbusy' : "Number of network busy/failure messages",
'nondelivery' : "Number of messages not delivered to network", 'routedbusy' : "Number of messages received with routed busy status", 'broadcastReadCnt' : "Number of broadcasts read", 'broadcastWriteCnt' : "Number of broadcasts sent",
}
COMMAND_CLASS_DESC = {
0x00: 'COMMAND_CLASS_NO_OPERATION',
0x20: 'COMMAND_CLASS_BASIC',
0x21: 'COMMAND_CLASS_CONTROLLER_REPLICATION',
0x22: 'COMMAND_CLASS_APPLICATION_STATUS',
0x23: 'COMMAND_CLASS_ZIP_SERVICES',
0x24: 'COMMAND_CLASS_ZIP_SERVER',
0x25: 'COMMAND_CLASS_SWITCH_BINARY',
0x26: 'COMMAND_CLASS_SWITCH_MULTILEVEL',
0x27: 'COMMAND_CLASS_SWITCH_ALL',
0x28: 'COMMAND_CLASS_SWITCH_TOGGLE_BINARY',
0x29: 'COMMAND_CLASS_SWITCH_TOGGLE_MULTILEVEL',
0x2A: 'COMMAND_CLASS_CHIMNEY_FAN',
0x2B: 'COMMAND_CLASS_SCENE_ACTIVATION',
0x2C: 'COMMAND_CLASS_SCENE_ACTUATOR_CONF',
0x2D: 'COMMAND_CLASS_SCENE_CONTROLLER_CONF',
0x2E: 'COMMAND_CLASS_ZIP_CLIENT',
0x2F: 'COMMAND_CLASS_ZIP_ADV_SERVICES',
0x30: 'COMMAND_CLASS_SENSOR_BINARY',
0x31: 'COMMAND_CLASS_SENSOR_MULTILEVEL',
0x32: 'COMMAND_CLASS_METER',
0x33: 'COMMAND_CLASS_COLOR',
0x34: 'COMMAND_CLASS_ZIP_ADV_CLIENT',
0x35: 'COMMAND_CLASS_METER_PULSE',
0x3C: 'COMMAND_CLASS_METER_TBL_CONFIG',
0x3D: 'COMMAND_CLASS_METER_TBL_MONITOR',
0x3E: 'COMMAND_CLASS_METER_TBL_PUSH',
0x38: 'COMMAND_CLASS_THERMOSTAT_HEATING',
0x40: 'COMMAND_CLASS_THERMOSTAT_MODE',
0x42: 'COMMAND_CLASS_THERMOSTAT_OPERATING_STATE',
0x43: 'COMMAND_CLASS_THERMOSTAT_SETPOINT',
0x44: 'COMMAND_CLASS_THERMOSTAT_FAN_MODE',
0x45: 'COMMAND_CLASS_THERMOSTAT_FAN_STATE',
0x46: 'COMMAND_CLASS_CLIMATE_CONTROL_SCHEDULE',
0x47: 'COMMAND_CLASS_THERMOSTAT_SETBACK',
0x4c: 'COMMAND_CLASS_DOOR_LOCK_LOGGING',
0x4E: 'COMMAND_CLASS_SCHEDULE_ENTRY_LOCK',
0x50: 'COMMAND_CLASS_BASIC_WINDOW_COVERING',
0x51: 'COMMAND_CLASS_MTP_WINDOW_COVERING',
0x56: 'COMMAND_CLASS_CRC_16_ENCAP',
0x5A: 'COMMAND_CLASS_DEVICE_RESET_LOCALLY',
0x5E: 'COMMAND_CLASS_ZWAVE_PLUS_INFO',
0x60: 'COMMAND_CLASS_MULTI_CHANNEL_V2',
0x61: 'COMMAND_CLASS_DISPLAY',
0x62: 'COMMAND_CLASS_DOOR_LOCK',
0x63: 'COMMAND_CLASS_USER_CODE',
0x64: 'COMMAND_CLASS_GARAGE_DOOR',
0x70: 'COMMAND_CLASS_CONFIGURATION',
0x71: 'COMMAND_CLASS_ALARM',
0x72: 'COMMAND_CLASS_MANUFACTURER_SPECIFIC',
0x73: 'COMMAND_CLASS_POWERLEVEL',
0x75: 'COMMAND_CLASS_PROTECTION',
0x76: 'COMMAND_CLASS_LOCK',
0x77: 'COMMAND_CLASS_NODE_NAMING',
0x78: 'COMMAND_CLASS_ACTUATOR_MULTILEVEL',
0x79: 'COMMAND_CLASS_KICK',
0x7A: 'COMMAND_CLASS_FIRMWARE_UPDATE_MD',
0x7B: 'COMMAND_CLASS_GROUPING_NAME',
0x7C: 'COMMAND_CLASS_REMOTE_ASSOCIATION_ACTIVATE',
0x7D: 'COMMAND_CLASS_REMOTE_ASSOCIATION',
0x80: 'COMMAND_CLASS_BATTERY',
0x81: 'COMMAND_CLASS_CLOCK',
0x82: 'COMMAND_CLASS_HAIL',
0x83: 'COMMAND_CLASS_NETWORK_STAT',
0x84: 'COMMAND_CLASS_WAKE_UP',
0x85: 'COMMAND_CLASS_ASSOCIATION',
0x86: 'COMMAND_CLASS_VERSION',
0x87: 'COMMAND_CLASS_INDICATOR',
0x88: 'COMMAND_CLASS_PROPRIETARY',
0x89: 'COMMAND_CLASS_LANGUAGE',
0x8A: 'COMMAND_CLASS_TIME',
0x8B: 'COMMAND_CLASS_TIME_PARAMETERS',
0x8C: 'COMMAND_CLASS_GEOGRAPHIC_LOCATION',
0x8D: 'COMMAND_CLASS_COMPOSITE',
0x8E: 'COMMAND_CLASS_MULTI_INSTANCE_ASSOCIATION',
0x8F: 'COMMAND_CLASS_MULTI_CMD',
0x90: 'COMMAND_CLASS_ENERGY_PRODUCTION',
0x91: 'COMMAND_CLASS_MANUFACTURER_PROPRIETARY',
0x92: 'COMMAND_CLASS_SCREEN_MD',
0x93: 'COMMAND_CLASS_SCREEN_ATTRIBUTES',
0x94: 'COMMAND_CLASS_SIMPLE_AV_CONTROL',
0x95: 'COMMAND_CLASS_AV_CONTENT_DIRECTORY_MD',
0x96: 'COMMAND_CLASS_AV_RENDERER_STATUS',
0x97: 'COMMAND_CLASS_AV_CONTENT_SEARCH_MD',
0x98: 'COMMAND_CLASS_SECURITY',
0x99: 'COMMAND_CLASS_AV_TAGGING_MD',
0x9A: 'COMMAND_CLASS_IP_CONFIGURATION',
0x9B: 'COMMAND_CLASS_ASSOCIATION_COMMAND_CONFIGURATION',
0x9C: 'COMMAND_CLASS_SENSOR_ALARM',
0x9D: 'COMMAND_CLASS_SILENCE_ALARM',
0x9E: 'COMMAND_CLASS_SENSOR_CONFIGURATION',
0xEF: 'COMMAND_CLASS_MARK',
0xF0: 'COMMAND_CLASS_NON_INTEROPERABLE'
}
| 41.860735
| 331
| 0.681776
|
a600d7c8d8c6f43756f0cea6f0b34463826860e7
| 2,671
|
py
|
Python
|
tests/test_agent.py
|
DavidSlayback/gym-gridverse
|
b8916576a92489c030fb2c2060598c7f473f19f2
|
[
"MIT"
] | 6
|
2020-10-15T14:45:11.000Z
|
2021-02-20T12:11:32.000Z
|
tests/test_agent.py
|
DavidSlayback/gym-gridverse
|
b8916576a92489c030fb2c2060598c7f473f19f2
|
[
"MIT"
] | 5
|
2021-01-20T15:34:29.000Z
|
2022-03-18T18:52:53.000Z
|
tests/test_agent.py
|
DavidSlayback/gym-gridverse
|
b8916576a92489c030fb2c2060598c7f473f19f2
|
[
"MIT"
] | 2
|
2021-01-11T18:48:49.000Z
|
2021-01-26T15:45:38.000Z
|
"""Tests agent related functionality"""
import pytest
from gym_gridverse.agent import Agent
from gym_gridverse.geometry import Area, Orientation, Position
@pytest.mark.parametrize(
'position,orientation,expected',
[
(Position(0, 0), Orientation.N, Area((-6, 0), (-3, 3))),
(Position(0, 0), Orientation.S, Area((0, 6), (-3, 3))),
(Position(0, 0), Orientation.E, Area((-3, 3), (0, 6))),
(Position(0, 0), Orientation.W, Area((-3, 3), (-6, 0))),
(Position(1, 2), Orientation.N, Area((-5, 1), (-1, 5))),
(Position(1, 2), Orientation.S, Area((1, 7), (-1, 5))),
(Position(1, 2), Orientation.E, Area((-2, 4), (2, 8))),
(Position(1, 2), Orientation.W, Area((-2, 4), (-4, 2))),
],
)
def test_get_pov_area(
position: Position, orientation: Orientation, expected: Area
):
relative_area = Area((-6, 0), (-3, 3))
agent = Agent(position, orientation)
assert agent.get_pov_area(relative_area) == expected
@pytest.mark.parametrize(
'position,orientation,delta_position,expected',
[
(Position(0, 0), Orientation.N, Position(1, -1), Position(1, -1)),
(Position(0, 0), Orientation.S, Position(1, -1), Position(-1, 1)),
(Position(0, 0), Orientation.E, Position(1, -1), Position(-1, -1)),
(Position(0, 0), Orientation.W, Position(1, -1), Position(1, 1)),
(Position(1, 2), Orientation.N, Position(2, -2), Position(3, 0)),
(Position(1, 2), Orientation.S, Position(2, -2), Position(-1, 4)),
(Position(1, 2), Orientation.E, Position(2, -2), Position(-1, 0)),
(Position(1, 2), Orientation.W, Position(2, -2), Position(3, 4)),
],
)
def test_agent_position_relative(
position: Position,
orientation: Orientation,
delta_position: Position,
expected: Position,
):
agent = Agent(position, orientation)
assert agent.position_relative(delta_position) == expected
@pytest.mark.parametrize(
'position,orientation,expected',
[
(Position(0, 0), Orientation.N, Position(-1, 0)),
(Position(0, 0), Orientation.S, Position(1, 0)),
(Position(0, 0), Orientation.E, Position(0, 1)),
(Position(0, 0), Orientation.W, Position(0, -1)),
(Position(1, 2), Orientation.N, Position(0, 2)),
(Position(1, 2), Orientation.S, Position(2, 2)),
(Position(1, 2), Orientation.E, Position(1, 3)),
(Position(1, 2), Orientation.W, Position(1, 1)),
],
)
def test_agent_position_in_front(
position: Position,
orientation: Orientation,
expected: Position,
):
agent = Agent(position, orientation)
assert agent.position_in_front() == expected
| 36.094595
| 75
| 0.60614
|
53ece3482bd612b9e86dd34ddffbfb2623d7f344
| 710
|
py
|
Python
|
rest_api/config/urls.py
|
Razz21/Nuxt-Django-E-Commerce-Demo
|
24834007f7554f9e59758b611c73ea0da85c841e
|
[
"MIT"
] | 1
|
2020-10-31T12:46:17.000Z
|
2020-10-31T12:46:17.000Z
|
rest_api/config/urls.py
|
Razz21/Nuxt-Django-E-Commerce-Demo
|
24834007f7554f9e59758b611c73ea0da85c841e
|
[
"MIT"
] | 2
|
2020-07-25T11:01:46.000Z
|
2022-01-22T10:39:02.000Z
|
rest_api/config/urls.py
|
Razz21/Nuxt-Django-E-Commerce-Demo
|
24834007f7554f9e59758b611c73ea0da85c841e
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
# todo api-root-url
urlpatterns = [
path("admin/", admin.site.urls),
path("api/auth/", include("project.users.urls")),
path("api/", include("project.core.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 29.583333
| 85
| 0.735211
|
b25e8d05359813a7e9389c5bf5860101614a2bc0
| 9,413
|
py
|
Python
|
guild/op_cmd.py
|
timt51/guildai
|
8d3aa9b902e29eb60ebbb408b3a1cbd3f40fcaec
|
[
"Apache-2.0"
] | 694
|
2018-11-30T01:06:30.000Z
|
2022-03-31T14:46:26.000Z
|
guild/op_cmd.py
|
timt51/guildai
|
8d3aa9b902e29eb60ebbb408b3a1cbd3f40fcaec
|
[
"Apache-2.0"
] | 323
|
2018-11-05T17:44:34.000Z
|
2022-03-31T16:56:41.000Z
|
guild/op_cmd.py
|
timt51/guildai
|
8d3aa9b902e29eb60ebbb408b3a1cbd3f40fcaec
|
[
"Apache-2.0"
] | 68
|
2019-04-01T04:24:47.000Z
|
2022-02-24T17:22:04.000Z
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import pprint
import six
from guild import flag_util
from guild import util
from guild import yaml_util
log = logging.getLogger("guild")
###################################################################
# State
###################################################################
class OpCmd(object):
def __init__(self, cmd_args, cmd_env, cmd_flags, flags_dest):
self.cmd_args = cmd_args
self.cmd_env = cmd_env
self.cmd_flags = cmd_flags
self.flags_dest = flags_dest
class CmdFlag(object):
def __init__(
self,
arg_name=None,
arg_skip=False,
arg_switch=None,
arg_split=None,
env_name=None,
):
self.arg_name = arg_name
self.arg_skip = arg_skip
self.arg_switch = arg_switch
self.arg_split = arg_split
self.env_name = env_name
###################################################################
# Generate command
###################################################################
def generate(op_cmd, flag_vals, resolve_params):
return (_gen_args(op_cmd, flag_vals, resolve_params), _gen_env(op_cmd, flag_vals))
def _gen_args(op_cmd, flag_vals, resolve_params):
encoded_resolve_params = _encode_arg_params(resolve_params)
args = []
for arg in op_cmd.cmd_args:
if arg == "__flag_args__":
args.extend(
_flag_args(flag_vals, op_cmd.flags_dest, op_cmd.cmd_flags, args)
)
else:
args.append(util.resolve_refs(arg, encoded_resolve_params))
return args
def _encode_arg_params(params):
return {name: _encode_general_arg(val) for name, val in params.items()}
def _encode_general_arg(val):
# Use same encoding used for env vals.
return _encode_env_val(val, arg_split=None)
def _flag_args(flag_vals, flag_dest, cmd_flags, cmd_args):
args = []
for name, val in sorted(flag_vals.items()):
cmd_flag = cmd_flags.get(name)
args.extend(_args_for_flag(name, val, cmd_flag, flag_dest, cmd_args))
return args
def _args_for_flag(name, val, cmd_flag, flag_dest, cmd_args):
cmd_flag = cmd_flag or CmdFlag()
if cmd_flag.arg_skip:
return []
arg_name = cmd_flag.arg_name or name
if "--%s" % arg_name in cmd_args:
log.warning(
"ignoring flag '%s=%s' because it's shadowed "
"in the operation cmd as --%s",
name,
flag_util.encode_flag_val(val),
arg_name,
)
return []
elif cmd_flag.arg_switch is not None:
if cmd_flag.arg_switch == val:
return ["--%s" % arg_name]
else:
return []
elif val is not None:
if _splittable(val, cmd_flag):
encoded = _encode_split_args(val, flag_dest, cmd_flag.arg_split)
return ["--%s" % arg_name] + encoded if encoded else []
else:
return [
"--%s" % arg_name,
_encode_flag_arg(val, flag_dest, cmd_flag.arg_split),
]
else:
return []
def _splittable(val, cmd_flag):
return not isinstance(val, list) and cmd_flag.arg_split is not None
def _encode_split_args(val, dest, arg_split):
encoded = _encode_flag_val_for_split(val, dest, arg_split)
parts = flag_util.split_encoded_flag_val(encoded, arg_split)
return _split_args_for_dest(parts, dest)
def _encode_flag_val_for_split(val, dest, arg_split):
if isinstance(val, six.string_types):
return val
return _encode_flag_arg(val, dest, arg_split)
def _split_args_for_dest(parts, dest):
if dest == "globals":
return [_encode_yaml_list_for_globals_arg(parts)]
return parts
def _encode_yaml_list_for_globals_arg(parts):
return yaml_util.encode_yaml(
[yaml_util.decode_yaml(part) for part in parts],
default_flow_style=True,
)
def _encode_flag_arg(val, dest, arg_split):
if (
dest == "globals"
or dest.startswith("global:")
or dest.startswith("dict:")
or dest.startswith("namespace:")
):
return _encode_flag_arg_for_globals(val)
else:
return _encode_flag_arg_for_argparse(val, arg_split)
def _encode_flag_arg_for_globals(val):
"""Returns an encoded flag value for Python globals interface.
Flags destined for globals within a Python module are encoded
using standard YAML encoding. Decoding must be handled using
standard YAML decoding.
"""
return yaml_util.encode_yaml(val, default_flow_style=True)
def _encode_flag_arg_for_argparse(val, arg_split):
"""Returns an encoded flag val for use by Python argparse.
argparse generally uses type functions (e.g. int, float, etc.) to
decode string args. We use `pprint.pformat` to encode here with
exceptions for boolean values. Boolean values decode any non-empty
string as True so we encode here accordingly using the arbitrarily
chose non-empty string '1' to represent True along with the empty
string '' to represent False.
`arg_split` is used to encode lists of values to a single string
argument.
"""
if val is True:
return "1"
elif val is False or val is None:
return ""
elif isinstance(val, six.string_types):
return val
elif isinstance(val, list):
return flag_util.join_splittable_flag_vals(val, arg_split)
else:
return pprint.pformat(val)
def _gen_env(op_cmd, flag_vals):
env = _encoded_cmd_env(op_cmd)
_resolve_env_flag_refs(flag_vals, env)
_apply_flag_env(flag_vals, op_cmd, env)
return env
def _encoded_cmd_env(op_cmd):
return {name: _encode_env_val(val) for name, val in op_cmd.cmd_env.items()}
def _encode_env_val(val, arg_split=None):
"""Returns an encoded flag val for use as env values.
Uses the same encoding scheme as _encode_flag_arg_for_argparse
under the assumption that the same logic is used to decode env
values as as command arguments.
"""
return _encode_flag_arg_for_argparse(val, arg_split)
def _resolve_env_flag_refs(flag_vals, env):
for env_name, env_val in env.items():
env[env_name] = util.resolve_refs(env_val, flag_vals)
def _apply_flag_env(flag_vals, op_cmd, env):
env.update(
{
_flag_env_name(name, op_cmd): _encode_env_val(val)
for name, val in flag_vals.items()
}
)
def _flag_env_name(flag_name, op_cmd):
cmd_flag = op_cmd.cmd_flags.get(flag_name)
if cmd_flag and cmd_flag.env_name:
return cmd_flag.env_name
return _default_flag_env_name(flag_name)
def _default_flag_env_name(flag_name):
return "FLAG_%s" % util.env_var_name(flag_name)
###################################################################
# Data IO
###################################################################
def for_data(data):
cmd_args = data.get("cmd-args") or []
cmd_env = data.get("cmd-env") or {}
cmd_flags = _cmd_flags_for_data(data.get("cmd-flags"))
flags_dest = data.get("flags-dest")
return OpCmd(cmd_args, cmd_env, cmd_flags, flags_dest)
def _cmd_flags_for_data(data):
if not data:
return {}
if not isinstance(data, dict):
raise ValueError(data)
return {
flag_name: _cmd_flag_for_data(cmd_flag_data)
for flag_name, cmd_flag_data in data.items()
}
def _cmd_flag_for_data(data):
if not isinstance(data, dict):
raise ValueError(data)
return CmdFlag(
arg_name=data.get("arg-name"),
arg_skip=data.get("arg-skip"),
arg_switch=data.get("arg-switch"),
arg_split=data.get("arg-split"),
env_name=data.get("env-name"),
)
def as_data(op_cmd):
data = {
"cmd-args": op_cmd.cmd_args,
}
if op_cmd.cmd_env:
data["cmd-env"] = op_cmd.cmd_env
cmd_flags_data = _cmd_flags_as_data(op_cmd.cmd_flags)
if cmd_flags_data:
data["cmd-flags"] = cmd_flags_data
if op_cmd.flags_dest:
data["flags-dest"] = op_cmd.flags_dest
return data
def _cmd_flags_as_data(cmd_flags):
data = {}
for flag_name, cmd_flag in cmd_flags.items():
cmd_flag_data = _cmd_flag_as_data(cmd_flag)
if cmd_flag_data:
data[flag_name] = cmd_flag_data
return data
def _cmd_flag_as_data(cmd_flag):
data = {}
if cmd_flag.arg_name:
data["arg-name"] = cmd_flag.arg_name
if cmd_flag.arg_skip:
data["arg-skip"] = cmd_flag.arg_skip
if cmd_flag.arg_switch:
data["arg-switch"] = cmd_flag.arg_switch
if cmd_flag.arg_split is not None:
data["arg-split"] = cmd_flag.arg_split
if cmd_flag.env_name:
data["env-name"] = cmd_flag.env_name
return data
| 29.142415
| 86
| 0.645065
|
039b76ad98bd636c2b42ba7b44acbd5d4c85d682
| 1,649
|
py
|
Python
|
dkist/utils/sysinfo.py
|
DKISTDC/dkist
|
3b97d7c0db144a717cfbe648b7402b8b8f9f2da2
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 21
|
2018-05-18T13:43:59.000Z
|
2022-03-16T21:17:39.000Z
|
dkist/utils/sysinfo.py
|
Cadair/dkist
|
2f4d930ea0e002db40e8ef17a79b0b4fb2e6d3f3
|
[
"BSD-3-Clause"
] | 134
|
2017-12-07T16:09:24.000Z
|
2022-03-17T16:13:55.000Z
|
dkist/utils/sysinfo.py
|
Cadair/dkist
|
2f4d930ea0e002db40e8ef17a79b0b4fb2e6d3f3
|
[
"BSD-3-Clause"
] | 4
|
2017-12-04T10:49:49.000Z
|
2022-01-10T12:20:46.000Z
|
import platform
from pkg_resources import get_distribution
from sunpy.extern.distro import linux_distribution
from sunpy.util.sysinfo import find_dependencies
__all__ = ['system_info']
def system_info():
"""
Display information about your system for submitting bug reports.
"""
base_reqs = get_distribution("dkist").requires()
base_reqs = {base_req.name.lower() for base_req in base_reqs}
missing_packages, installed_packages = find_dependencies(package="dkist")
extra_prop = {"System": platform.system(),
"Arch": f"{platform.architecture()[0]}, ({platform.processor()})",
"Python": platform.python_version(),
"SunPy": get_distribution("dkist").version}
sys_prop = {**installed_packages, **missing_packages, **extra_prop}
print("==============================")
print("DKIST Installation Information")
print("==============================")
print()
print("General")
print("#######")
if sys_prop['System'] == "Linux":
distro = " ".join(linux_distribution())
print(f"OS: {distro} (Linux {platform.release()})")
elif sys_prop['System'] == "Darwin":
print(f"OS: Mac OS {platform.mac_ver()[0]}")
elif sys_prop['System'] == "Windows":
print(f"OS: Windows {platform.release()} {platform.version()}")
else:
print("Unknown OS")
for sys_info in ['Arch', 'SunPy']:
print('{} : {}'.format(sys_info, sys_prop[sys_info]))
print()
print("Required Dependices")
print("###################")
for req in base_reqs:
print('{}: {}'.format(req, sys_prop[req]))
| 35.085106
| 84
| 0.597332
|
5b8932362dae9cb287b2a635972f66f141fcf3fd
| 3,541
|
py
|
Python
|
python/oneflow/test/modules/test_nonzero.py
|
butterluo/oneflow
|
cf2ce575d80f89642b71bee2248e69b09213007d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_nonzero.py
|
butterluo/oneflow
|
cf2ce575d80f89642b71bee2248e69b09213007d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/modules/test_nonzero.py
|
butterluo/oneflow
|
cf2ce575d80f89642b71bee2248e69b09213007d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def np_nonzero(input, as_tuple):
if as_tuple:
return np.nonzero(input)
else:
return np.transpose(np.nonzero(input))
def _test_nonzero(test_case, shape, as_tuple, device):
np_input = np.random.randn(*shape)
input = flow.tensor(np_input, dtype=flow.float32, device=flow.device(device))
of_out = flow.nonzero(input, as_tuple)
np_out = np_nonzero(np_input, as_tuple)
if as_tuple:
test_case.assertTrue(
np.allclose(tuple(x.numpy() for x in of_out), np_out, 0.0001, 0.0001)
)
else:
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
@flow.unittest.skip_unless_1n1d()
class TestNonzero(flow.unittest.TestCase):
def test_nonzero(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_nonzero]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6), (2, 3, 0, 4)]
arg_dict["as_tuple"] = [True, False]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
# Not check graph because of one reason:
# Reason 1, lazy tensor cannot call .numpy(). tensor.numpy() is not allowed to called in nn.Graph.build(*args) or called by lazy tensor.
# Please refer to File "python/oneflow/nn/modules/nonzero.py", line 29, in nonzero_op.
@autotest(auto_backward=False, check_graph="ValidatedFlase")
def test_nonzero_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=random(2, 5).to(int)).to(device)
y = torch.nonzero(x)
return y
# Not check graph because of one reason:
# Reason 1, lazy tensor cannot call .numpy(). tensor.numpy() is not allowed to called in nn.Graph.build(*args) or called by lazy tensor.
# Please refer to File "python/oneflow/nn/modules/nonzero.py", line 29, in nonzero_op.
@autotest(auto_backward=False, check_graph="ValidatedFlase")
def test_nonzero_bool_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=random(2, 5).to(int)).to(device=device, dtype=torch.bool)
y = torch.nonzero(x)
return y
# Not check graph because of one reason:
# Reason 1, lazy tensor cannot call .numpy(). tensor.numpy() is not allowed to called in nn.Graph.build(*args) or called by lazy tensor.
# Please refer to File "python/oneflow/nn/modules/nonzero.py", line 29, in nonzero_op.
@autotest(auto_backward=False, check_graph="ValidatedFlase")
def test_nonzero_with_0dim_data(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
y = torch.nonzero(x)
return y
if __name__ == "__main__":
unittest.main()
| 38.075269
| 140
| 0.694719
|
228161ea7cc2cf2d04f4dbe753d2c23160c414ee
| 3,096
|
py
|
Python
|
main.py
|
CuteFwan/b_test
|
8aca45673b0c95238a72a14c466b77a06458a887
|
[
"MIT"
] | null | null | null |
main.py
|
CuteFwan/b_test
|
8aca45673b0c95238a72a14c466b77a06458a887
|
[
"MIT"
] | null | null | null |
main.py
|
CuteFwan/b_test
|
8aca45673b0c95238a72a14c466b77a06458a887
|
[
"MIT"
] | null | null | null |
from b_test import *
import random
def setupgamerandomly(game):
"""
Randomly places boats on a game's board.
Keeps trying until all boats fit.
Need to add a timeout if the board gets into a configuration
where no more boats can fit, but this isn't necessary for standard
battleships.
Parameters
----------
game: :class:`Battleship`
The game to populate ships upon.
Returns
-------
:class:`Battleship`
The populated game.
"""
for boat, size in boats.items():
coords = None
while coords is None:
rotated = bool(random.randint(0,1))
pos = (random.randint(0, (9 - size) if not rotated else 9), random.randint(0, (9 - size) if rotated else 9))
#print(f'trying to place {boat} on {pos}')
coords = game.placeboat(boat, pos, rotated)
return game
def setupgame(game):
"""
Manually calls input() to populate a given game's board.
Parameters
----------
game: :class:`Battleship`
The game to populate ships upon.
Returns
-------
:class:`Battleship`
The populated game.
"""
for boat, size in boats.items():
coords = None
while coords is None:
res = input(f"Where to put {boat}? (A4... etc) ")
pos = lettertonum(res[0]), int(res[1:]) - 1
rotated = bool(int(input("Rotated? (0 or 1) ")))
coords = game.placeboat(boat, pos, rotated)
print(game.drawboard())
return game
def main():
"""
Human vs AI battleships game.
Human game is set up manually and AI game is set up randomly.
Could be improved with a better system to select which AI to use.
"""
mygame = setupgame(Battleship())
AIgame = setupgamerandomly(Battleship())
me = manually(mygame)
AI = history(AIgame)
myturn = True
while True:
if myturn:
myturn = False
spot = me.nextturn(AI.game)
print(f"You attack {numtoletter(spot[0][0])}{spot[0][1]+1}{f' and hit opponent {spot[1]}' if spot[1] != ' ' else ''}")
if AI.game.checkwin():
print("You win!")
break
else:
spot = AI.nextturn(me.game)
myturn = True
print(f"Opponent attacks {numtoletter(spot[0][0])}{spot[0][1]+1}{f' and hit your {spot[1]}' if spot[1] != ' ' else ''}")
if me.game.checkwin():
print("You lose!")
break
AI.save()
def aigame():
"""
Purely AI vs AI battleships game.
"""
AI1game = setupgamerandomly(Battleship())
AI2game = setupgamerandomly(Battleship())
AI1 = stats(AI1game)
AI2 = history(AI2game)
players = [AI1, AI2]
turn = False
while True:
players[turn].nextturn(players[not turn].game)
turn = not turn
if players[not turn].game.checkwin():
print(f'AI{int(turn)+1} wins')
break
print(AI1.game.drawboard())
print(AI2.game.drawboard())
AI1.save()
AI2.save()
main()
| 29.207547
| 132
| 0.561047
|
282073aa1c4c6bed4139f59d2344df6d9bf53642
| 785
|
py
|
Python
|
main.py
|
barthap/BPMN
|
43dae78ba9e47015267b382d596cb2afbe7d6a35
|
[
"MIT"
] | null | null | null |
main.py
|
barthap/BPMN
|
43dae78ba9e47015267b382d596cb2afbe7d6a35
|
[
"MIT"
] | null | null | null |
main.py
|
barthap/BPMN
|
43dae78ba9e47015267b382d596cb2afbe7d6a35
|
[
"MIT"
] | null | null | null |
import examples
import matplotlib.pyplot as plt
if __name__ == '__main__':
print('Hello!')
# examples.lab1_repair_example() # OK
# examples.lab2_example() # OK
# examples.lab2_ex1() # OK
# examples.lab2_ex2() # OK
for i in range(1, 10):
# examples.lab2_setA(i)
examples.lab3_setB(i)
examples.lab3_setB_nofilter(i)
# examples.loop1()
# examples.loop2()
# examples.loop3()
"""
Zestaw A:
1 - OK
2 - OK
3 - dwie bramy obok siebie
4 - podobno OK
5 - OK
6 - OK
7 - totanla porazka
8 - OK
9 - OK
"""
"""
Zestaw B:
1 -
2 -
3 -
4 -
5 -
6 -
7 -
8 -
9 -
"""
plt.show()
| 16.354167
| 48
| 0.464968
|
e5ead09d1ff6100d64e6861f41eb66b0f5e4b482
| 10,561
|
py
|
Python
|
Thirdparty/libuv/build.py
|
stinvi/dava.engine
|
2b396ca49cdf10cdc98ad8a9ffcf7768a05e285e
|
[
"BSD-3-Clause"
] | 26
|
2018-09-03T08:48:22.000Z
|
2022-02-14T05:14:50.000Z
|
Thirdparty/libuv/build.py
|
ANHELL-blitz/dava.engine
|
ed83624326f000866e29166c7f4cccfed1bb41d4
|
[
"BSD-3-Clause"
] | null | null | null |
Thirdparty/libuv/build.py
|
ANHELL-blitz/dava.engine
|
ed83624326f000866e29166c7f4cccfed1bb41d4
|
[
"BSD-3-Clause"
] | 45
|
2018-05-11T06:47:17.000Z
|
2022-02-03T11:30:55.000Z
|
import os
import shutil
import build_utils
import build_config
def get_supported_targets(platform):
if platform == 'win32':
return ['win32']
elif platform == 'darwin':
return ['macos', 'ios', 'android']
elif platform == 'linux':
return ['android', 'linux']
else:
return []
def get_dependencies_for_target(target):
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
elif target == 'macos':
_build_macos(working_directory_path, root_project_path)
elif target == 'ios':
_build_ios(working_directory_path, root_project_path)
elif target == 'android':
_build_android(working_directory_path, root_project_path)
elif target == 'linux':
_build_linux(working_directory_path, root_project_path)
def get_download_info():
return 'https://github.com/kkdaemon/libuv.git'
def _download(working_directory_path):
source_folder_path = os.path.join(working_directory_path, 'libuv')
build_utils.run_process(
['git', 'clone', '-b', 'winuap_support', get_download_info()],
process_cwd=working_directory_path,
shell=True)
return source_folder_path
def _build_win32(working_directory_path, root_project_path):
source_folder_path = _download(working_directory_path)
build_folder_path = os.path.join(working_directory_path, 'gen/build_win32')
build_folder_path_x86 = os.path.join(build_folder_path, 'x86')
build_folder_path_x86_debug = os.path.join(build_folder_path_x86, 'Debug')
build_folder_path_x86_release = os.path.join(build_folder_path_x86, 'Release')
build_folder_path_x64 = os.path.join(build_folder_path, 'x64')
build_folder_path_x64_debug = os.path.join(build_folder_path_x64, 'Debug')
build_folder_path_x64_release = os.path.join(build_folder_path_x64, 'Release')
os.makedirs(build_folder_path_x86_debug)
os.makedirs(build_folder_path_x86_release)
os.makedirs(build_folder_path_x64_debug)
os.makedirs(build_folder_path_x64_release)
vc_solution_file=os.path.join(source_folder_path, 'uv.sln')
override_props_file=os.path.abspath('override_win32.props')
toolset=build_config.get_msvc_toolset_ver_win32()
msbuild_args=[
"/p:ForceImportBeforeCppTargets={}".format(override_props_file),
"/p:WindowsTargetPlatformVersion={}".format(build_config.get_msvc_sdk_version_win32())
]
# x86
x86_env = build_utils.get_win32_vs_x86_env()
x86_env['GYP_MSVS_VERSION'] = build_config.get_gyp_msvs_version()
build_utils.run_process(
['vcbuild.bat', 'x86', 'nobuild'],
process_cwd=source_folder_path,
environment=x86_env,
shell=True)
build_utils.build_vs(vc_solution_file, 'Debug', 'Win32', 'libuv', toolset, msbuild_args=msbuild_args)
build_utils.build_vs(vc_solution_file, 'Release', 'Win32', 'libuv', toolset, msbuild_args=msbuild_args)
lib_path_x86_debug = os.path.join(build_folder_path_x86_debug, 'libuv.lib')
lib_path_x86_release = os.path.join(build_folder_path_x86_release, 'libuv.lib')
shutil.copyfile(
os.path.join(source_folder_path, 'Debug/lib/libuv.lib'),
lib_path_x86_debug)
shutil.copyfile(
os.path.join(source_folder_path, 'Release/lib/libuv.lib'),
lib_path_x86_release)
build_utils.run_process(
['vcbuild.bat', 'clean'],
process_cwd=source_folder_path,
environment=x86_env,
shell=True)
# x64
x64_env = build_utils.get_win32_vs_x64_env()
x64_env['GYP_MSVS_VERSION'] = build_config.get_gyp_msvs_version()
build_utils.run_process(
['vcbuild.bat', 'x64', 'nobuild'],
process_cwd=source_folder_path,
environment=x64_env,
shell=True)
build_utils.build_vs(vc_solution_file, 'Debug', 'x64', 'libuv', toolset, msbuild_args=msbuild_args)
build_utils.build_vs(vc_solution_file, 'Release', 'x64', 'libuv', toolset, msbuild_args=msbuild_args)
lib_path_x64_debug = os.path.join(build_folder_path_x64_debug, 'libuv.lib')
lib_path_x64_release = os.path.join(build_folder_path_x64_release, 'libuv.lib')
shutil.copyfile(
os.path.join(source_folder_path, 'Debug/lib/libuv.lib'),
lib_path_x64_debug)
shutil.copyfile(
os.path.join(source_folder_path, 'Release/lib/libuv.lib'),
lib_path_x64_release)
# copy libs
libs_win_root = os.path.join(root_project_path, 'Libs/lib_CMake/win')
shutil.copyfile(
lib_path_x86_debug,
os.path.join(libs_win_root, 'x86/Debug/libuv.lib'))
shutil.copyfile(
lib_path_x86_release,
os.path.join(libs_win_root, 'x86/Release/libuv.lib'))
shutil.copyfile(
lib_path_x64_debug,
os.path.join(libs_win_root, 'x64/Debug/libuv.lib'))
shutil.copyfile(
lib_path_x64_release,
os.path.join(libs_win_root, 'x64/Release/libuv.lib'))
_copy_headers(source_folder_path, root_project_path)
def _build_macos(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
env = build_utils.get_autotools_macos_env()
install_dir_macos = os.path.join(
working_directory_path, 'gen/install_macos')
build_utils.run_process(
['sh', 'autogen.sh'], process_cwd=source_folder_path, environment=env)
build_utils.build_with_autotools(
source_folder_path,
['--host=x86_64-apple-darwin', '--disable-shared', '--enable-static'],
install_dir_macos,
env=env)
lib_path = os.path.join(install_dir_macos, 'lib/libuv.a')
shutil.copyfile(
lib_path,
os.path.join(root_project_path, 'Libs/lib_CMake/mac/libuv_macos.a'))
_copy_headers_from_install(install_dir_macos, root_project_path)
def _build_ios(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
env = build_utils.get_autotools_ios_env()
install_dir_ios = os.path.join(working_directory_path, 'gen/install_ios')
build_utils.run_process(
['sh', 'autogen.sh'],
process_cwd=source_folder_path,
environment=env)
build_utils.build_with_autotools(
source_folder_path,
['--host=armv7-apple-darwin', '--disable-shared', '--enable-static'],
install_dir_ios,
env=env)
lib_path = os.path.join(install_dir_ios, 'lib/libuv.a')
shutil.copyfile(
lib_path,
os.path.join(root_project_path, 'Libs/lib_CMake/ios/libuv_ios.a'))
_copy_headers_from_install(install_dir_ios, root_project_path)
def _build_android(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
additional_defines = ' -D__ANDROID__ -DHAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC=1'
# ARM
toolchain_path_arm = build_utils.android_ndk_get_toolchain_arm()
env_arm = build_utils.get_autotools_android_arm_env(toolchain_path_arm)
env_arm['CFLAGS'] = env_arm['CFLAGS'] + additional_defines
env_arm['CPPFLAGS'] = env_arm['CPPFLAGS'] + additional_defines
install_dir_android_arm = os.path.join(
working_directory_path, 'gen/install_android_arm')
build_utils.run_process(
['sh', 'autogen.sh'],
process_cwd=source_folder_path,
environment=env_arm)
build_utils.build_with_autotools(
source_folder_path,
['--host=arm-linux-androideabi',
'--disable-shared',
'--enable-static'],
install_dir_android_arm, env=env_arm)
# x86
toolchain_path_x86 = build_utils.android_ndk_get_toolchain_x86()
env_x86 = build_utils.get_autotools_android_x86_env(toolchain_path_x86)
env_x86['CFLAGS'] = env_x86['CFLAGS'] + additional_defines
env_x86['CPPFLAGS'] = env_x86['CPPFLAGS'] + additional_defines
install_dir_android_x86 = os.path.join(
working_directory_path, 'gen/install_android_x86')
build_utils.run_process(
['sh', 'autogen.sh'],
process_cwd=source_folder_path,
environment=env_x86)
build_utils.build_with_autotools(
source_folder_path,
['--host=i686-linux-android', '--disable-shared', '--enable-static'],
install_dir_android_x86,
env=env_x86)
libs_android_root = os.path.join(root_project_path, 'Libs/lib_CMake/android')
lib_path_arm = os.path.join(install_dir_android_arm, 'lib/libuv.a')
shutil.copyfile(
lib_path_arm, os.path.join(libs_android_root, 'armeabi-v7a/libuv.a'))
lib_path_x86 = os.path.join(install_dir_android_x86, 'lib/libuv.a')
shutil.copyfile(
lib_path_x86, os.path.join(libs_android_root, 'x86/libuv.a'))
_copy_headers_from_install(install_dir_android_arm, root_project_path)
def _build_linux(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
# Clone gyp
build_utils.run_process(
['git clone https://chromium.googlesource.com/external/gyp.git build/gyp'],
process_cwd=source_folder_path,
shell=True)
# Generate makefile using gyp
env = build_utils.get_autotools_linux_env()
build_utils.run_process(
['./gyp_uv.py -f make'],
process_cwd=source_folder_path,
environment=env,
shell=True)
# Build release library: only libuv.a, skipping tests
build_utils.run_process(
['BUILDTYPE=Release make libuv -C out'],
process_cwd=source_folder_path,
environment=env,
shell=True)
# Copy binary files to dava.engine's library folder
source_dir = os.path.join(source_folder_path, 'out/Release')
target_dir = os.path.join(root_project_path, 'Libs/lib_CMake/linux')
shutil.copyfile(os.path.join(source_dir, 'libuv.a'),
os.path.join(target_dir, 'libuv.a'))
# Copy headers to dava.engine's include folder
_copy_headers_from_install(source_folder_path, root_project_path)
def _copy_headers_from_install(install_folder_path, root_project_path):
include_path = os.path.join(root_project_path, 'Libs/include/libuv')
build_utils.copy_folder_recursive(
os.path.join(install_folder_path, 'include'), include_path)
def _copy_headers(source_folder_path, root_project_path):
include_path = os.path.join(root_project_path, 'Libs/include/libuv')
build_utils.copy_files(
os.path.join(source_folder_path, 'include'), include_path, '*.h')
| 37.05614
| 107
| 0.719061
|
01d39ab22e2a89bb3379226cd92c2224a127080f
| 462
|
py
|
Python
|
openapi_schema_validator/__init__.py
|
jparise/openapi-schema-validator
|
a1e3459d2013b622b10fcdcd743fff01f0b67fad
|
[
"BSD-3-Clause"
] | 1
|
2021-05-20T12:52:56.000Z
|
2021-05-20T12:52:56.000Z
|
openapi_schema_validator/__init__.py
|
jparise/openapi-schema-validator
|
a1e3459d2013b622b10fcdcd743fff01f0b67fad
|
[
"BSD-3-Clause"
] | 3
|
2021-08-30T16:30:50.000Z
|
2022-03-01T23:15:44.000Z
|
openapi_schema_validator/__init__.py
|
jparise/openapi-schema-validator
|
a1e3459d2013b622b10fcdcd743fff01f0b67fad
|
[
"BSD-3-Clause"
] | 3
|
2021-05-21T21:26:34.000Z
|
2021-10-05T16:57:57.000Z
|
# -*- coding: utf-8 -*-
from openapi_schema_validator._format import oas30_format_checker
from openapi_schema_validator.shortcuts import validate
from openapi_schema_validator.validators import OAS30Validator
__author__ = 'Artur Maciag'
__email__ = 'maciag.artur@gmail.com'
__version__ = '0.1.4'
__url__ = 'https://github.com/p1c2u/openapi-schema-validator'
__license__ = 'BSD 3-Clause License'
__all__ = ['validate', 'OAS30Validator', 'oas30_format_checker']
| 35.538462
| 65
| 0.796537
|
24fe389318d44980ad45b766875022a99635b325
| 614
|
py
|
Python
|
src/main.py
|
Yasushi-Shinohara/CrystalLewenstein
|
5dde921f067269b41fbef31f2422c92eb8e2301b
|
[
"MIT"
] | null | null | null |
src/main.py
|
Yasushi-Shinohara/CrystalLewenstein
|
5dde921f067269b41fbef31f2422c92eb8e2301b
|
[
"MIT"
] | null | null | null |
src/main.py
|
Yasushi-Shinohara/CrystalLewenstein
|
5dde921f067269b41fbef31f2422c92eb8e2301b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# coding: UTF-8
# This is created 2020/06/08 by Y. Shinohara
# This is lastly modified 2020/06/08 by Y. Shinohara #This part is highly doubtable because of my lazyness
import time
ts = time.time()
from modules.print_funcs import print_header, print_footer, print_midtime, print_endtime
from modules.functions import *
from modules.parameters import parameter_class
print_header()
import sys
import numpy as np
import math
import ctypes as ct
from modules.constants import *
tt = time.time()
print_midtime(ts,tt)
te = time.time()
#print_endtime(ts,tt,te,param.Nt)
print_footer()
sys.exit()
| 21.928571
| 106
| 0.773616
|
33a85dc564074ead71cba7fa188bd0cc3b6cc96f
| 669
|
py
|
Python
|
examples/discover.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | null | null | null |
examples/discover.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | 1
|
2021-10-30T16:31:41.000Z
|
2021-10-30T16:31:41.000Z
|
examples/discover.py
|
nilsbeck/pytheos
|
de4f3a03330ddb28e68ddcaa7b4888ea9a25e238
|
[
"MIT"
] | 1
|
2021-10-30T14:24:58.000Z
|
2021-10-30T14:24:58.000Z
|
#!/usr/bin/env python
"""
This example demonstrates how to use SSDP to discover HEOS devices on your network.
"""
import os
import sys
import asyncio
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pytheos
import pytheos.utils
DISCOVERY_TIMEOUT = 3
async def main():
services = await pytheos.discover(DISCOVERY_TIMEOUT)
if services:
print("Discovered these HEOS services:")
for svc in services:
print(f'- {pytheos.utils.extract_host(svc.location)}')
else:
print("No HEOS services detected!")
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 21.580645
| 83
| 0.692078
|
e8967d6e439b1daa3735932ea5fc72c0d3652bd5
| 3,817
|
py
|
Python
|
Source/Python/imuDataManipulator.py
|
JonathanSolvesProblems/Motion-Capture-Hand
|
b4352d23196d5eabb3df0e97cd0b03eb0fe093ad
|
[
"MIT"
] | null | null | null |
Source/Python/imuDataManipulator.py
|
JonathanSolvesProblems/Motion-Capture-Hand
|
b4352d23196d5eabb3df0e97cd0b03eb0fe093ad
|
[
"MIT"
] | null | null | null |
Source/Python/imuDataManipulator.py
|
JonathanSolvesProblems/Motion-Capture-Hand
|
b4352d23196d5eabb3df0e97cd0b03eb0fe093ad
|
[
"MIT"
] | null | null | null |
import serial
import bpy
import mathutils
from time import *
class ImuDataManipulator:
def __init__(self, port = 'com3', baud = 115200, scene_mode = '', rot_mode = '', armatureName = '', boneName = '', boneFingerNames = []):
self.port = port
self.baud = baud
self.scene_mode = scene_mode
self.rot_mode = rot_mode
self.armatureName = armatureName
self.boneName = boneName
self.boneFingerNames = boneFingerNames
self.setModes()
self.setBone()
def readSerial(self):
imuData = serial.Serial(self.port, self.baud)
sleep(1) # give time to read serial port
return imuData
def setModes(self):
# enter pose mode
bpy.ops.object.mode_set(mode = self.scene_mode)
# set rotation mode
bpy.ops.pose.rotation_mode_set(type = self.rot_mode)
def setBone(self):
# accesses the armature for the hand
armature = bpy.data.objects[self.armatureName]
# gets the quaternion values for the middle joint bone called hand
targetBone = armature.pose.bones[self.boneName].rotation_quaternion
pinkyBone = armature.pose.bones[self.boneFingerNames[0]].rotation_quaternion
ringBone = armature.pose.bones[self.boneFingerNames[1]].rotation_quaternion
middleFingerBone = armature.pose.bones[self.boneFingerNames[2]].rotation_quaternion
indexFingerBone = armature.pose.bones[self.boneFingerNames[3]].rotation_quaternion
thumbFingerBone = armature.pose.bones[self.boneFingerNames[4]].rotation_quaternion
# armature = bpy.data.objects[self.armatureName]
# armatureWorld = armature.matrix_world
# targetBone = armatureWorld.to_quaternion()
# self.targetBone = aramatureWorldQuaternion
self.targetBone = targetBone
self.pinkyBone = pinkyBone
self.ringBone = ringBone
self.middleFingerBone = middleFingerBone
self.indexFingerBone = indexFingerBone
self.thumbFingerBone = thumbFingerBone
def retargetQuaternionData(self, q0, q1, q2, q3):
self.targetBone[0] = q0
self.targetBone[1] = -q2
self.targetBone[2] = q1
self.targetBone[3] = q3
def retargetFingerBones(self, fingerAngles):
quatDownScale = -100 # to get it suitable range for quaternion rotation.
pinkyFingerBend = fingerAngles[0] / quatDownScale # get the angle in a corresponding quaternion representation.
ringFingerBend = fingerAngles[1] / quatDownScale
middleFingerBend = fingerAngles[2] / quatDownScale
indexFingerBone = fingerAngles[3] / quatDownScale
thumbFingerBone = fingerAngles[4] / quatDownScale
self.pinkyBone[1] = pinkyFingerBend
self.ringBone[1] = ringFingerBend
self.middleFingerBone[1] = middleFingerBend
self.indexFingerBone[1] = indexFingerBone
self.thumbFingerBone[1] = thumbFingerBone
def readAndTargetQuaternionData(self):
imuData = self.readSerial()
while True: # continuously read data from IMU
while (imuData.inWaiting() == 0): # do nothing if no data being read
pass
packet = imuData.readline()
packet = str(packet,'utf-8')
packet = packet.split(',')
self.retargetQuaternionData(float(packet[5]), float(packet[6]), float(packet[7]), float(packet[8]))
self.retargetFingerBones([float(packet[0]), float(packet[1]), float(packet[2]), float(packet[3]), float(packet[4])])
# allows for live viewing of the updating scene, while reading the data
bpy.ops.wm.redraw_timer(type = 'DRAW_WIN_SWAP', iterations = 1)
| 43.375
| 141
| 0.650511
|
4a32fe4a0fe811b44e915b06de1a8de6ca673b16
| 2,108
|
py
|
Python
|
test_lib/pseudo_files_db.py
|
amotus/oe-pseudo-test-env
|
b9ae7f7ceb265f33b4f70938389a11bf2af41aed
|
[
"Apache-2.0"
] | null | null | null |
test_lib/pseudo_files_db.py
|
amotus/oe-pseudo-test-env
|
b9ae7f7ceb265f33b4f70938389a11bf2af41aed
|
[
"Apache-2.0"
] | null | null | null |
test_lib/pseudo_files_db.py
|
amotus/oe-pseudo-test-env
|
b9ae7f7ceb265f33b4f70938389a11bf2af41aed
|
[
"Apache-2.0"
] | null | null | null |
import sqlite3
from contextlib import closing
from dataclasses import dataclass
from typing import Iterator, Tuple, Any, Optional
from pathlib import Path
@dataclass
class PseudoFilesDbRow:
id: int
path: Path
dev: int
ino: int
uid: int
gid: int
mode: int
rdev: int
deleting: int
class PseudoFilesDbError(Exception):
pass
class PseudoFilesDbRowParseError(PseudoFilesDbError):
pass
def _parse_db_int(db_v: Any) -> int:
if not isinstance(db_v, int):
raise PseudoFilesDbRowParseError(
f"Unexpected db value '{db_v}' of type "
f"'{db_v.__class__.__name__}'. Expected an 'int'."
)
return db_v
def _parse_db_str(db_v: Any) -> str:
if not isinstance(db_v, str):
raise PseudoFilesDbRowParseError(
f"Unexpected db value '{db_v}' of type "
f"'{db_v.__class__.__name__}'. Expected a 'str'."
)
return db_v
def _parse_db_path(db_v: Any) -> Path:
return Path(_parse_db_str(db_v))
def _parse_files_db_row(row: Tuple[Any, ...]) -> PseudoFilesDbRow:
try:
return PseudoFilesDbRow(
id=_parse_db_int(row[0]),
path=_parse_db_path(row[1]),
dev=_parse_db_int(row[2]),
ino=_parse_db_int(row[3]),
uid=_parse_db_int(row[4]),
gid=_parse_db_int(row[5]),
mode=_parse_db_int(row[6]),
rdev=_parse_db_int(row[7]),
deleting=_parse_db_int(row[8])
)
except IndexError as e:
raise PseudoFilesDbRowParseError(
f"Unexpected db row size. Original error: {str(e)}"
)
def iter_pseudo_files_db_rows(
db_path: Path,
timeout_s: Optional[float] = None
) -> Iterator[PseudoFilesDbRow]:
if timeout_s is None:
timeout_s = 5.0
with closing(sqlite3.connect(
str(db_path),
timeout=timeout_s
)) as con:
with closing(con.cursor()) as cur:
for row in cur.execute(
"SELECT * FROM files ORDER BY id"):
yield _parse_files_db_row(row)
| 23.954545
| 66
| 0.610057
|
ebb5f52b866500805274837d4adcf73883b69e8c
| 613
|
py
|
Python
|
torch/_C/CudaDoubleStorageBase.py
|
binh-vu/pytorch-stub
|
2067c75457c66fd4fd40fde091b5da971f8ea838
|
[
"MIT"
] | 21
|
2019-01-25T08:53:51.000Z
|
2021-05-29T05:19:57.000Z
|
torch/_C/CudaDoubleStorageBase.py
|
binh-vu/pytorch-stub
|
2067c75457c66fd4fd40fde091b5da971f8ea838
|
[
"MIT"
] | 1
|
2020-11-19T03:22:24.000Z
|
2021-03-22T14:48:55.000Z
|
torch/_C/CudaDoubleStorageBase.py
|
binh-vu/pytorch-stub
|
2067c75457c66fd4fd40fde091b5da971f8ea838
|
[
"MIT"
] | 4
|
2019-03-02T05:59:19.000Z
|
2019-10-05T11:38:55.000Z
|
# encoding: utf-8
# module torch._C
# from /Users/rook/anaconda/lib/python3.6/site-packages/torch/_C.cpython-36m-darwin.so
# by generator 1.145
# no doc
# imports
import torch._C._functions as _functions # <module 'torch._C._functions'>
from .object import object
class CudaDoubleStorageBase(object):
# no doc
def __init__(self): # reliably restored by inspect
# no doc
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
| 24.52
| 101
| 0.686786
|
757cec314b6d3d1bf5d7c19eed131a52116ed2aa
| 482
|
py
|
Python
|
algernon/keyboard.py
|
ZeroMaxinumXZ/albert
|
ac6eca4a9422a87d0a09c0d2a51278cf51949b90
|
[
"MIT"
] | 7
|
2019-03-03T14:41:44.000Z
|
2019-03-06T18:37:50.000Z
|
algernon/keyboard.py
|
ZeroMaxinumXZ/reinfinity-mouse
|
ac6eca4a9422a87d0a09c0d2a51278cf51949b90
|
[
"MIT"
] | 5
|
2019-03-03T17:55:39.000Z
|
2019-03-06T05:37:01.000Z
|
algernon/keyboard.py
|
ZeroMaxinumXZ/albert
|
ac6eca4a9422a87d0a09c0d2a51278cf51949b90
|
[
"MIT"
] | null | null | null |
def keys():
keys = ['\t', '\n', '\r', ' ', '!', '"', '#', '$', '%', '&', "'", '(',
')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', ':', ';', '<', '=', '>', '?', '@', '[', '\\', ']', '^', '_', '`',
'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~',
'alt', 'ctrl', 'down', 'enter', 'left','right', 'shift', 'space', 'tab', 'up']
return keys
| 53.555556
| 78
| 0.209544
|
92cc8d487f67cd5889ea41187f4b62de90cf9838
| 486
|
py
|
Python
|
extra_tests/cffi_tests/embedding/add3.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
extra_tests/cffi_tests/embedding/add3.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
extra_tests/cffi_tests/embedding/add3.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
# Generated by pypy/tool/import_cffi.py
import cffi
ffi = cffi.FFI()
ffi.embedding_api("""
int add3(int, int, int, int);
""")
ffi.embedding_init_code(r"""
from _add3_cffi import ffi
import sys
@ffi.def_extern()
def add3(x, y, z, t):
sys.stdout.write("adding %d, %d, %d, %d\n" % (x, y, z, t))
sys.stdout.flush()
return x + y + z + t
""")
ffi.set_source("_add3_cffi", """
""")
fn = ffi.compile(verbose=True)
print('FILENAME: %s' % (fn,))
| 18.692308
| 66
| 0.578189
|
1f722dce6800b64deeaa36e4f51a706ac5ad0b14
| 2,042
|
py
|
Python
|
fastapi_contrib/permissions.py
|
mumtozvalijonov/fastapi_contrib
|
e35b4fd7c135380f885c364e4d4b992fb55f687e
|
[
"MIT"
] | 504
|
2019-08-26T18:14:03.000Z
|
2022-03-25T13:49:50.000Z
|
fastapi_contrib/permissions.py
|
mumtozvalijonov/fastapi_contrib
|
e35b4fd7c135380f885c364e4d4b992fb55f687e
|
[
"MIT"
] | 100
|
2019-08-23T07:52:30.000Z
|
2022-03-20T06:13:10.000Z
|
fastapi_contrib/permissions.py
|
mumtozvalijonov/fastapi_contrib
|
e35b4fd7c135380f885c364e4d4b992fb55f687e
|
[
"MIT"
] | 32
|
2019-10-01T12:46:14.000Z
|
2022-02-01T13:44:53.000Z
|
from abc import ABC, abstractmethod
from starlette import status
from starlette.requests import Request
from fastapi_contrib.exceptions import HTTPException
class BasePermission(ABC):
"""
Abstract permission that all other Permissions must be inherited from.
Defines basic error message, status & error codes.
Upon initialization, calls abstract method `has_required_permissions`
which will be specific to concrete implementation of Permission class.
You would write your permissions like this:
.. code-block:: python
class TeapotUserAgentPermission(BasePermission):
def has_required_permissions(self, request: Request) -> bool:
return request.headers.get('User-Agent') == "Teapot v1.0"
"""
error_msg = "Forbidden."
status_code = status.HTTP_403_FORBIDDEN
error_code = status.HTTP_403_FORBIDDEN
@abstractmethod
def has_required_permissions(self, request: Request) -> bool:
...
def __init__(self, request: Request):
if not self.has_required_permissions(request):
raise HTTPException(
status_code=self.status_code,
detail=self.error_msg,
error_code=self.error_code
)
class PermissionsDependency(object):
"""
Permission dependency that is used to define and check all the permission
classes from one place inside route definition.
Use it as an argument to FastAPI's `Depends` as follows:
.. code-block:: python
app = FastAPI()
@app.get(
"/teapot/",
dependencies=[Depends(
PermissionsDependency([TeapotUserAgentPermission]))]
)
async def teapot() -> dict:
return {"teapot": True}
"""
def __init__(self, permissions_classes: list):
self.permissions_classes = permissions_classes
def __call__(self, request: Request):
for permission_class in self.permissions_classes:
permission_class(request=request)
| 28.760563
| 77
| 0.669442
|
191c403529a93807d0ec4731a43ab31cfe53ccca
| 17,286
|
py
|
Python
|
tests/test_loader.py
|
vergeml/VergeML
|
3dc30ba4e0f3d038743b6d468860cbcf3681acc6
|
[
"MIT"
] | 324
|
2018-10-28T19:29:47.000Z
|
2020-01-24T20:22:07.000Z
|
tests/test_loader.py
|
mme/vergeml
|
3dc30ba4e0f3d038743b6d468860cbcf3681acc6
|
[
"MIT"
] | 8
|
2018-10-30T10:57:19.000Z
|
2019-06-05T10:21:30.000Z
|
tests/test_loader.py
|
mme/vergeml
|
3dc30ba4e0f3d038743b6d468860cbcf3681acc6
|
[
"MIT"
] | 19
|
2018-10-29T18:43:03.000Z
|
2019-02-21T15:08:41.000Z
|
"""
Tests data loading (cached + direct).
"""
import random
from pathlib import Path
from vergeml.loader import MemoryCachedLoader, LiveLoader, FileCachedLoader
from vergeml.io import SourcePlugin, source, Sample
from vergeml.operation import OperationPlugin, operation
from vergeml.operations.augment import AugmentOperation
# pylint: disable=C0111
# -------------------------------------------------
def test_live_loader_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = LiveLoader(cache_dir, src)
_test_loader_meta(loader)
def test_mem_loader_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = MemoryCachedLoader(cache_dir, src)
_test_loader_meta(loader)
def test_disk_loader_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = FileCachedLoader(cache_dir, src)
_test_loader_meta(loader)
# -------------------------------------------------
def test_live_loader_with_ops_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = LiveLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_meta(loader)
def test_mem_out_loader_with_ops_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = MemoryCachedLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_meta(loader)
def test_disk_out_loader_with_ops_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = FileCachedLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_meta(loader)
def test_mem_loader_with_ops_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = MemoryCachedLoader(cache_dir, src)
loader2 = LiveLoader(cache_dir, loader, ops=[AppendStringOperation()], output=src)
_test_loader_meta(loader2)
def test_disk_loader_with_ops_meta(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = FileCachedLoader(cache_dir, src)
loader2 = LiveLoader(cache_dir, loader, ops=[AppendStringOperation()], output=src)
_test_loader_meta(loader2)
# -------------------------------------------------
def test_live_loader_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = LiveLoader(cache_dir, src)
_test_loader_num_samples(loader)
def test_mem_loader_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = MemoryCachedLoader(cache_dir, src)
_test_loader_num_samples(loader)
def test_disk_loader_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = FileCachedLoader(cache_dir, src)
_test_loader_num_samples(loader)
# -------------------------------------------------
def test_live_loader_ops_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = LiveLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_num_samples(loader)
def test_mem_out_loader_ops_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = MemoryCachedLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_num_samples(loader)
def test_disk_out_loader_ops_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = FileCachedLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_num_samples(loader)
def test_mem_loader_ops_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = MemoryCachedLoader(cache_dir, src)
loader2 = LiveLoader(cache_dir, loader, ops=[AppendStringOperation()], output=src)
_test_loader_num_samples(loader2)
def test_disk_loader_ops_num_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir)})
loader = FileCachedLoader(cache_dir, src)
loader2 = LiveLoader(cache_dir, loader, ops=[AppendStringOperation()], output=src)
_test_loader_num_samples(loader2)
# --------------------------------------------------
def test_mem_loader_read_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = MemoryCachedLoader(cache_dir, src)
_test_loader_read_samples(loader)
def test_live_loader_read_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = LiveLoader(cache_dir, src)
_test_loader_read_samples(loader)
def test_disk_loader_read_samples(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = FileCachedLoader(cache_dir, src)
_test_loader_read_samples(loader)
# --------------------------------------------------
def test_live_loader_with_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = LiveLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_read_samples_transformed(loader)
def test_mem_loader_with_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = MemoryCachedLoader(cache_dir, src)
loader2 = LiveLoader(cache_dir, loader, ops=[AppendStringOperation()], output=src)
_test_loader_read_samples_transformed(loader2)
def test_mem_out_loader_with_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = MemoryCachedLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_read_samples_transformed(loader)
def test_disk_loader_with_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = FileCachedLoader(cache_dir, src)
loader2 = LiveLoader(cache_dir, loader, ops=[AppendStringOperation()], output=src)
_test_loader_read_samples_transformed(loader2)
def test_disk_out_loader_with_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = FileCachedLoader(cache_dir, src, ops=[AppendStringOperation()], output=src)
_test_loader_read_samples_transformed(loader)
# --------------------------------------------------
def test_live_loader_with_multiplier_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader = LiveLoader(cache_dir, src, ops=ops, output=src)
_test_loader_read_samples_x2(loader)
def test_mem_out_loader_with_multiplier_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader = MemoryCachedLoader(cache_dir, src, ops=ops, output=src)
_test_loader_read_samples_x2(loader)
def test_disk_out_loader_with_multiplier_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader = FileCachedLoader(cache_dir, src, ops=ops, output=src)
_test_loader_read_samples_x2(loader)
def test_mem_loader_with_multiplier_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = MemoryCachedLoader(cache_dir, src)
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader2 = LiveLoader(cache_dir, loader, ops=ops, output=src)
_test_loader_read_samples_x2(loader2)
def test_disk_loader_with_multiplier_ops(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = FileCachedLoader(cache_dir, src)
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader2 = LiveLoader(cache_dir, loader, ops=ops, output=src)
_test_loader_read_samples_x2(loader2)
# --------------------------------------------------
def test_live_loader_with_multiplier_ops_between(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader = LiveLoader(cache_dir, src, ops=ops, output=src)
_test_loader_read_samples_x2_between(loader)
def test_mem_out_loader_with_multiplier_ops_between(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader = MemoryCachedLoader(cache_dir, src, ops=ops, output=src)
_test_loader_read_samples_x2_between(loader)
def test_disk_out_loader_with_multiplier_ops_between(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader = FileCachedLoader(cache_dir, src, ops=ops, output=src)
_test_loader_read_samples_x2_between(loader)
def test_mem_loader_with_multiplier_ops_between(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = MemoryCachedLoader(cache_dir, src)
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader2 = LiveLoader(cache_dir, loader, ops=ops, output=src)
_test_loader_read_samples_x2_between(loader2)
def test_disk_loader_with_multiplier_ops_between(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = FileCachedLoader(cache_dir, src)
ops = [AugmentOperation(variants=2), AppendStringOperation()]
loader2 = LiveLoader(cache_dir, loader, ops=ops, output=src)
_test_loader_read_samples_x2_between(loader2)
# --------------------------------------------------
def test_mem_loader_with_rng_no_reset(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = MemoryCachedLoader(cache_dir, src)
_test_loader_with_rng_no_reset(loader)
def test_live_loader_with_rng_no_reset(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = LiveLoader(cache_dir, src)
_test_loader_with_rng_no_reset(loader)
def test_disk_loader_with_rng_no_reset(tmpdir):
cache_dir = _prepare_dir(tmpdir)
src = SourceTest({'samples-dir': str(tmpdir), 'test-split': 2, 'val-split': 2})
loader = FileCachedLoader(cache_dir, src)
_test_loader_with_rng_no_reset(loader)
# ---------------------------------------------------------------------------------
def _test_loader_meta(loader):
loader.begin_read_samples()
assert loader.meta['some-meta'] == 'meta-value'
loader.end_read_samples()
def _test_loader_num_samples(loader):
loader.begin_read_samples()
assert loader.num_samples('train') == 8
assert loader.num_samples('test') == 1
assert loader.num_samples('val') == 1
loader.end_read_samples()
def _test_loader_read_samples(loader):
loader.begin_read_samples()
train_samples = loader.read_samples('train', 0, loader.num_samples('train'))
assert list(map(lambda s: s.x, train_samples)) == \
['content8', 'content2', 'content9', 'content3', 'content5', 'content7']
assert len(train_samples) == 6
val_samples = loader.read_samples('val', 0, loader.num_samples('val'))
assert list(map(lambda s: s.x, val_samples)) == \
['content0', 'content1']
assert len(val_samples) == 2
test_samples = loader.read_samples('test', 0, loader.num_samples('test'))
assert list(map(lambda s: s.x, test_samples)) == \
['content4', 'content6']
assert len(test_samples) == 2
loader.end_read_samples()
def _test_loader_read_samples_transformed(loader):
loader.begin_read_samples()
train_samples = loader.read_samples('train', 0, loader.num_samples('train'))
assert list(map(lambda s: s.x, train_samples)) == \
['content8-hello-transformed', 'content2-hello-transformed', 'content9-hello-transformed',
'content3-hello-transformed', 'content5-hello-transformed', 'content7-hello-transformed']
assert len(train_samples) == 6
val_samples = loader.read_samples('val', 0, loader.num_samples('val'))
assert list(map(lambda s: s.x, val_samples)) == \
['content0-hello-transformed', 'content1-hello-transformed']
assert len(val_samples) == 2
test_samples = loader.read_samples('test', 0, loader.num_samples('test'))
assert list(map(lambda s: s.x, test_samples)) == \
['content4-hello-transformed', 'content6-hello-transformed']
assert len(test_samples) == 2
loader.end_read_samples()
def _test_loader_read_samples_x2(loader):
loader.begin_read_samples()
train_samples = loader.read_samples('train', 0, loader.num_samples('train'))
assert list(map(lambda s: s.x, train_samples)) == \
['content8-hello-transformed', 'content8-hello-transformed',
'content2-hello-transformed', 'content2-hello-transformed',
'content9-hello-transformed', 'content9-hello-transformed',
'content3-hello-transformed', 'content3-hello-transformed',
'content5-hello-transformed', 'content5-hello-transformed',
'content7-hello-transformed', 'content7-hello-transformed', ]
assert len(train_samples) == 12
val_samples = loader.read_samples('val', 0, loader.num_samples('val'))
assert list(map(lambda s: s.x, val_samples)) == \
['content0-hello-transformed', 'content0-hello-transformed',
'content1-hello-transformed', 'content1-hello-transformed']
assert len(val_samples) == 4
test_samples = loader.read_samples('test', 0, loader.num_samples('test'))
assert list(map(lambda s: s.x, test_samples)) == \
['content4-hello-transformed', 'content4-hello-transformed',
'content6-hello-transformed', 'content6-hello-transformed']
assert len(test_samples) == 4
loader.end_read_samples()
def _test_loader_read_samples_x2_between(loader):
loader.begin_read_samples()
train_samples = loader.read_samples('train', 1, 6)
assert list(map(lambda s: s.x, train_samples)) == \
['content8-hello-transformed',
'content2-hello-transformed', 'content2-hello-transformed',
'content9-hello-transformed', 'content9-hello-transformed',
'content3-hello-transformed']
def _test_loader_with_rng_no_reset(loader):
loader.begin_read_samples()
sample = loader.read_samples('train', 0)[0]
rnum = sample.rng.randint(1, 1000000)
sample = loader.read_samples('train', 0)[0]
assert rnum != sample.rng.randint(1, 1000000)
loader.end_read_samples()
def _prepare_dir(tmpdir):
for i in range(0, 10):
path = tmpdir.join(f"file{i}.test")
path.write("content" + str(i))
cache_dir = tmpdir.mkdir('.cache')
return str(cache_dir)
@source('test-source', 'A test source.', input_patterns="**/*.test") # pylint: disable=W0223
class SourceTest(SourcePlugin):
def __init__(self, args=None):
self.files = None
super().__init__(args or {})
def begin_read_samples(self):
if self.files:
return
self.meta['some-meta'] = 'meta-value'
self.files = self.scan_and_split_files()
def num_samples(self, split: str) -> int:
return len(self.files[split])
def read_samples(self, split, index, n=1):
items = self.files[split][index:index+n]
items = [(Path(filename).read_text(), meta) for filename, meta in items]
res = []
for item, meta in items:
rng = random.Random(str(self.random_seed) + meta['filename'])
res.append(Sample(item, None, meta.copy(), rng))
return res
def transform(self, sample):
sample.x = sample.x + '-transformed'
sample.y = None
return sample
def hash(self, state: str) -> str:
return super().hash(state + self.hash_files(self.files))
@operation('append')
class AppendStringOperation(OperationPlugin):
type = str
def transform(self, data, rng):
return data + "-hello"
| 40.672941
| 98
| 0.695476
|
be16002996752adf86c45119f92a9f2237354d26
| 15,626
|
py
|
Python
|
django_backend/backend/apps.py
|
holg/django_backend
|
6cef76a378664e6621619862e6db476788a58992
|
[
"BSD-3-Clause"
] | null | null | null |
django_backend/backend/apps.py
|
holg/django_backend
|
6cef76a378664e6621619862e6db476788a58992
|
[
"BSD-3-Clause"
] | null | null | null |
django_backend/backend/apps.py
|
holg/django_backend
|
6cef76a378664e6621619862e6db476788a58992
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import OrderedDict
from django.conf.urls import include, url
from django.contrib.auth import get_permission_codename
try: # htr dj3 fix
from django.urls import reverse
except Exception:
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django_viewset import ViewSet, ModelViewSet
from floppyforms.__future__.models import ModelForm, modelform_factory
from ..forms import formfield_callback
from ..group import Group
from .preview import Preview
from .inline_related import InlineRelatedObject
from .columns import BackendColumn
from .forms import ActionForm, SortForm
from .urlname_helper import URLNames
from .utils import TemplateHintProvider
DEFAULT_REGISTRY = 'default'
class BaseBackend(TemplateHintProvider, ViewSet):
# Import the branch and language globals here to make them available as
# class attributes. This is useful, so that view instances that have a
# ``self.backend`` variable attached don't need to import from django_backend.
# This is needed in some circumstances to prevent circular imports.
from django_backend.state import language
from django_backend.state import site
namespace = 'django_backend'
verbose_name = None
verbose_name_plural = None
template_hint = None
def __init__(self, id, verbose_name=None, verbose_name_plural=None,
parent=None, registry=DEFAULT_REGISTRY, group=None):
self.id = id
if verbose_name:
self.verbose_name = verbose_name
if verbose_name_plural:
self.verbose_name_plural = verbose_name_plural
self.parent = parent
self.registry = registry
self.group = group
self._registries = {}
self._groups = {}
self.base = self
while self.base and self.base.parent:
self.base = self.base.parent
urlname_prefix = self.get_urlname_prefix()
super(BaseBackend, self).__init__(urlname_prefix=urlname_prefix)
def __repr__(self):
id = self.id
parent = self.parent
while parent:
id = parent.id + '.' + id
parent = parent.parent
return '<{0}: {1}>'.format(
self.__class__.__name__,
id)
def reverse(self, viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
from django.contrib.sites.models import Site
if kwargs is None:
kwargs = {}
kwargs.setdefault('site', Site.objects.get_current().pk)
kwargs.setdefault('language', self.language.active)
return reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs, prefix=prefix, current_app=current_app)
def get_template_hints(self, name_provider, hint_providers):
return [self.template_hint] + self.FEATURES.keys()
def register(self, backend_class, registry=DEFAULT_REGISTRY,
group=None, **kwargs):
"""
Enables nested backends. There are two different registries in use. The
argument `registry` is refering to a internal categorization. Usually a
backend goes into the 'default' registry, but there are special cases
like backends only used for inline-edit modals. Those should have a
extra registry, so that they live in a extra url-namespace etc.
The other registry is the called `group`. This is only used in the
frontend to categorize backends by there relations. It's for example
used to group the backends into the different boxes in the sidebar.
"""
backend = backend_class(
parent=self,
registry=registry,
group=group,
**kwargs)
self._registries.setdefault(registry, []).append(backend)
if group is not None:
group = self.get_group(group)
group.append(backend)
return backend
def get_group(self, id):
"""
Get a registered group by id. If it is not yet registered, it will
create a new group with the given ID and register this. If a existing
group is given, it will be reused and registered if it is not yet
already.
"""
if isinstance(id, Group):
group = id
elif id in self._groups:
group = self._groups[id]
else:
group = Group(id)
if group.id not in self._groups:
self._groups[group.id] = group
return group
def get_registered(self, registry=DEFAULT_REGISTRY, include_children=True):
backends = []
if registry in self._registries:
backends.extend(self._registries[registry])
if include_children:
for child in self._registries.get(registry, []):
backends.extend(
child.get_registered(registry, include_children=include_children))
return backends
def find(self, id=None, model=None, registry=DEFAULT_REGISTRY):
"""
Find a backend with the given ``id`` which is registered in
``registry``.
The logic is to look inside the backend you call this method on if there
is a backend that matches. If that's not the case than go up to the
parent and try on this level.
"""
assert id or model, 'Either provide ``id`` or ``model``.'
if registry in self._registries:
backend_list = self._registries[registry]
for backend in backend_list:
id_matches = id and backend.id == id
backend_model = getattr(backend, 'model', None)
model_matches = model and backend_model == model
if id_matches or model_matches:
return backend
if self.parent:
try:
return self.parent.find(
id=id,
model=model,
registry=registry)
except ValueError:
pass
raise ValueError(
'Cannot find a backend with id `{0}` or model `{1}` in the '
'registry `{2}`'.format(
id,
model,
registry))
def __getitem__(self, key):
'''
Allow lookups by backend id:
>>> backend['menu']
<MenuBackend: menu>
'''
try:
return self.find(id=key)
except ValueError as e:
raise KeyError(e.args[0])
@property
def urlnames(self):
return URLNames(self)
def get_urlname_prefix(self):
prefix = self.id
if self.registry != DEFAULT_REGISTRY:
prefix = (
self.registry +
self.urlname_separator +
prefix)
if self.parent and self.parent.urlname_prefix:
prefix = (
self.parent.urlname_prefix +
self.urlname_separator +
prefix)
return prefix
def get_view_kwargs(self, viewset_view):
kwargs = super(BaseBackend, self).get_view_kwargs(viewset_view)
if hasattr(viewset_view.view, 'backend'):
kwargs['backend'] = self
return kwargs
@property
def groups(self):
return sorted(
self._groups.values(),
key=lambda g: g.position)
def get_children_urls(self):
'''
Return the urls of all registered backends.
'''
urls = []
for registry, backends in self._registries.items():
for backend in backends:
regex = r'^{0}{1}/'.format(
registry + '/' if registry != DEFAULT_REGISTRY else '',
backend.id)
urls.append(
url(regex, include(backend.get_urls())))
return urls
def get_urls(self):
viewset_urls = super(BaseBackend, self).get_urls()
urls = viewset_urls + self.get_children_urls()
return urls
def get_features(self):
return {} # none (so far)
@property
def FEATURES(self):
try:
return self._FEATURES
except AttributeError:
self._FEATURES = self.get_features()
return self._FEATURES
def has_perm(self, user, perm, obj=None):
# If it does not have a '.' in the permission name (like 'list'), it
# wants to check for a model permission. So we user the access_backend
# instead since the index has no 'list' permission assigned.
if '.' not in perm:
return user.has_perm('django_backend.access_backend')
else:
return user.has_perm(perm, obj)
class BaseModelBackend(ModelViewSet, BaseBackend):
form_class = ModelForm
filter_form_class = None
action_form_class = ActionForm
sort_form_class = SortForm
paginate_by = 12
order_by = None
readonly_fields = ()
preview = Preview()
inline_related_object = InlineRelatedObject()
def __init__(self, *args, **kwargs):
ModelViewSet.__init__(self, model=kwargs.pop('model'))
BaseBackend.__init__(self, *args, **kwargs)
self.model_opts = self.model._meta
if not self.verbose_name:
self.verbose_name = self.model._meta.verbose_name
if not self.verbose_name_plural:
self.verbose_name_plural = self.model._meta.verbose_name_plural
def get_queryset(self):
return self.model._default_manager.all()
def prepare_origin(self, origin):
return origin
def prepare_object(self, origin, object):
return object
def save_object(self, origin, object):
object.save()
return object
def dismiss_object(self, origin, object):
return object
def delete_object(self, object):
object.delete()
def get_to_be_deleted_objects(self, objects, user):
# Most of this code is ripped of
# ``django.contrib.admin.util.get_deleted_objects`` with some changes
# to better integrate it with django_backend.
from django_backend.compat import NestedObjects
using = 'default'
collector = NestedObjects(using=using)
collector.collect(objects)
perms_needed = set()
backend = self
def format_callback(obj):
try:
obj_backend = backend.find(model=obj.__class__)
except ValueError:
obj_backend = None
if obj_backend:
if not obj_backend.has_perm(user=user, perm='delete', obj=obj):
perms_needed.add(obj._meta.verbose_name)
return {
'backend': obj_backend,
'object': obj,
'user': user
}
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
def has_perm(self, user, perm, obj=None):
if '.' not in perm:
perm = '{app_label}.{permission_name}'.format(
app_label=self.model_opts.app_label,
permission_name=get_permission_codename(perm, self.model_opts))
return user.has_perm(perm, obj)
def get_form_initial(self, object):
return {}
def get_form_class(self, object=None, form_class=None):
return modelform_factory(
self.model,
form=form_class or self.form_class,
formfield_callback=formfield_callback)
def get_preview(self, object):
from django.utils import translation
with translation.override(self.language.active):
return self.preview.render({'object': object})
def get_list_actions(self):
return {}
def get_available_list_actions(self, user):
'''
Returns the actions defined by ``get_list_actions``, sorted by position
and filtered by permission.
'''
available_actions = []
for name, action in self.get_list_actions().items():
if action.check_permission(backend=self, user=user):
available_actions.append((name, action))
return OrderedDict(
list(sorted(
available_actions,
key=lambda action_tuple: action_tuple[1].position)))
def get_action_form_class(self):
return self.action_form_class
def get_filter_form_class(self):
return self.filter_form_class
def get_sort_form_class(self):
return self.sort_form_class
def get_list_columns(self):
return {
'name': BackendColumn(
_('Name'),
'django_backend/columns/_name.html',
position=0),
'buttons': BackendColumn(
'',
'django_backend/columns/_buttons.html', position=1000),
}
@property
def list_columns(self):
return OrderedDict(list(sorted(self.get_list_columns().items(), cmp=lambda x,y: cmp(x[1].position, y[1].position))))
def get_select_columns(self):
return {
'name': BackendColumn(
_('Name'),
'django_backend/columns/_plain_name.html',
position=0),
'select': BackendColumn(
'',
'django_backend/columns/_select.html', position=1000),
}
@property
def select_columns(self):
return OrderedDict(list(sorted(self.get_select_columns().items(), cmp=lambda x,y: cmp(x[1].position, y[1].position))))
def get_form_tab_definition(self):
return {}
def get_form_tabs(self, form):
tab_definition = self.get_form_tab_definition()
self.add_fallback_form_fields(tab_definition, form)
tab_definition_tuples = (
(key, tab)
for key, tab in tab_definition.items()
if tab.rows)
return OrderedDict(
sorted(
tab_definition_tuples,
key=lambda t: t[1].position))
def add_fallback_form_fields(self, tab_definition, form):
from .form_tabs import FormTab
if not 'content' in tab_definition:
tab_definition['content'] = FormTab(
_('Content'),
[])
self.add_fallback_form_fields_to_tab(tab_definition['content'], tab_definition, form)
def add_fallback_form_fields_to_tab(self, tab, tab_definition, form):
from .form_tabs import FormRow, FormField
tabs_fields = []
for _tab in tab_definition.values():
tabs_fields = tabs_fields + _tab.fields
tabs_fieldnames = [f.field for f in tabs_fields if hasattr(f, 'field')]
for form_fieldname, form_field in form.fields.items() + form.composite_fields.items():
if form_fieldname in tabs_fieldnames:
continue
if form_field.widget.is_hidden:
continue
tab._rows.append(FormRow(form_field.label, [
FormField(form_fieldname)
]))
tabs_fieldnames.append(form_fieldname)
def get_readonly_fields(self, form, object):
"""
Return the readonly fields. You can override this to mark some fields
as readonly depending on the form or object. By default it returns the
list given in the ``readonly_fields`` attribute.
For example you can use it to make fields only editable during
creation. Those that are then edited can then marked as readonly.
"""
return list(self.readonly_fields)
| 34.801782
| 126
| 0.614233
|
955bc47138927807e0f96a4bf22c1e0aa2c664f6
| 7,916
|
py
|
Python
|
kubernetes_asyncio/client/models/apps_v1beta1_deployment_rollback.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/apps_v1beta1_deployment_rollback.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/apps_v1beta1_deployment_rollback.py
|
aK0nshin/kubernetes_asyncio
|
aef9edcc1f8671a5b1bba9f4684bde890176b19c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AppsV1beta1DeploymentRollback(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str',
'rollback_to': 'AppsV1beta1RollbackConfig',
'updated_annotations': 'dict(str, str)'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name',
'rollback_to': 'rollbackTo',
'updated_annotations': 'updatedAnnotations'
}
def __init__(self, api_version=None, kind=None, name=None, rollback_to=None, updated_annotations=None): # noqa: E501
"""AppsV1beta1DeploymentRollback - a model defined in OpenAPI""" # noqa: E501
self._api_version = None
self._kind = None
self._name = None
self._rollback_to = None
self._updated_annotations = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
self.name = name
self.rollback_to = rollback_to
if updated_annotations is not None:
self.updated_annotations = updated_annotations
@property
def api_version(self):
"""Gets the api_version of this AppsV1beta1DeploymentRollback. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this AppsV1beta1DeploymentRollback. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this AppsV1beta1DeploymentRollback.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this AppsV1beta1DeploymentRollback. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this AppsV1beta1DeploymentRollback. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this AppsV1beta1DeploymentRollback. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this AppsV1beta1DeploymentRollback.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this AppsV1beta1DeploymentRollback. # noqa: E501
:type: str
"""
self._kind = kind
@property
def name(self):
"""Gets the name of this AppsV1beta1DeploymentRollback. # noqa: E501
Required: This must match the Name of a deployment. # noqa: E501
:return: The name of this AppsV1beta1DeploymentRollback. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AppsV1beta1DeploymentRollback.
Required: This must match the Name of a deployment. # noqa: E501
:param name: The name of this AppsV1beta1DeploymentRollback. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def rollback_to(self):
"""Gets the rollback_to of this AppsV1beta1DeploymentRollback. # noqa: E501
:return: The rollback_to of this AppsV1beta1DeploymentRollback. # noqa: E501
:rtype: AppsV1beta1RollbackConfig
"""
return self._rollback_to
@rollback_to.setter
def rollback_to(self, rollback_to):
"""Sets the rollback_to of this AppsV1beta1DeploymentRollback.
:param rollback_to: The rollback_to of this AppsV1beta1DeploymentRollback. # noqa: E501
:type: AppsV1beta1RollbackConfig
"""
if rollback_to is None:
raise ValueError("Invalid value for `rollback_to`, must not be `None`") # noqa: E501
self._rollback_to = rollback_to
@property
def updated_annotations(self):
"""Gets the updated_annotations of this AppsV1beta1DeploymentRollback. # noqa: E501
The annotations to be updated to a deployment # noqa: E501
:return: The updated_annotations of this AppsV1beta1DeploymentRollback. # noqa: E501
:rtype: dict(str, str)
"""
return self._updated_annotations
@updated_annotations.setter
def updated_annotations(self, updated_annotations):
"""Sets the updated_annotations of this AppsV1beta1DeploymentRollback.
The annotations to be updated to a deployment # noqa: E501
:param updated_annotations: The updated_annotations of this AppsV1beta1DeploymentRollback. # noqa: E501
:type: dict(str, str)
"""
self._updated_annotations = updated_annotations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppsV1beta1DeploymentRollback):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.872247
| 295
| 0.640222
|
b1d70f829e2394c3e3accf2c7dab8d2b55a06f92
| 1,524
|
py
|
Python
|
insynth/perturbators/__init__.py
|
mlxyz/insynth
|
0d2ad6d6177944978e6d85990b9991a614d75b68
|
[
"MIT"
] | null | null | null |
insynth/perturbators/__init__.py
|
mlxyz/insynth
|
0d2ad6d6177944978e6d85990b9991a614d75b68
|
[
"MIT"
] | 1
|
2021-12-06T20:46:20.000Z
|
2021-12-06T20:48:37.000Z
|
insynth/__init__.py
|
mlxyz/insynth
|
0d2ad6d6177944978e6d85990b9991a614d75b68
|
[
"MIT"
] | 1
|
2021-12-06T20:45:50.000Z
|
2021-12-06T20:45:50.000Z
|
# Copyright (c) 2022, Chair of Software Technology
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# - Neither the name of the University Mannheim nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 117.230769
| 758
| 0.803806
|
ccfac00df7279754dbbd869e46e7c2d840fa8dc8
| 1,717
|
py
|
Python
|
pos-directory.py
|
lothelanor/actib
|
c19ba559f2d5810e505ce39abf31e21ba212c8e9
|
[
"MIT"
] | 7
|
2021-03-23T12:28:59.000Z
|
2022-03-02T18:03:34.000Z
|
pos-directory.py
|
lothelanor/actib
|
c19ba559f2d5810e505ce39abf31e21ba212c8e9
|
[
"MIT"
] | 2
|
2021-01-08T13:08:47.000Z
|
2021-01-12T20:19:07.000Z
|
pos-directory.py
|
lothelanor/actib
|
c19ba559f2d5810e505ce39abf31e21ba212c8e9
|
[
"MIT"
] | null | null | null |
import pathlib
from datetime import datetime
import glob
import os
import sys
import actibpos
from multiprocessing import Pool
if len(sys.argv) < 2:
print("error: you must pass a directory as argument to the script")
sys.exit(1)
indir = sys.argv[1]
now = datetime.now()
dt = now.strftime("%Y%m%d_%H-%M-%S")
segoutpath = pathlib.Path("output-"+dt+"/seg/")
posoutpath = pathlib.Path("output-"+dt+"/pos/")
segoutpath.mkdir(parents=True, exist_ok=True)
posoutpath.mkdir(parents=True, exist_ok=True)
POOL = Pool(processes=3)
def main():
filenames = pathlib.Path(indir).rglob('*.xml')
for filename in sorted(filenames, key=lambda fn: str(fn)):
print("treating %s" % filename)
basename = os.path.basename(filename)
if basename.startswith("_"):
continue
noext = os.path.splitext(basename)[0]
posoutfilename = os.path.join(posoutpath, noext+".txt")
segoutfilename = os.path.join(segoutpath, noext+".txt")
print("apply_async")
POOL.apply_async(actibpos.processfiles, args=(filename, segoutfilename, posoutfilename, "seg:pos", "bdrc-tei"))
filenames = pathlib.Path(indir).rglob('*.txt')
for filename in sorted(filenames, key=lambda fn: str(fn)):
print("treating %s" % filename)
basename = os.path.basename(filename)
if basename.startswith("_"):
continue
noext = os.path.splitext(basename)[0]
posoutfilename = os.path.join(posoutpath, noext+".txt")
segoutfilename = os.path.join(segoutpath, noext+".txt")
POOL.apply_async(actibpos.processfiles, args=(filename, segoutfilename, posoutfilename, "seg:pos", "txt"))
POOL.close()
POOL.join()
main()
| 33.666667
| 119
| 0.665696
|
dfb88ca196f51ff6ce1e08080069487f8855b315
| 393
|
py
|
Python
|
hitchike/wsgi.py
|
tgy/hitchike
|
b73c8714f584eeeb432c7c8df706dd8b944d632c
|
[
"MIT"
] | 4
|
2016-02-17T21:30:23.000Z
|
2016-02-20T11:10:05.000Z
|
hitchike/wsgi.py
|
tgy/hitchike
|
b73c8714f584eeeb432c7c8df706dd8b944d632c
|
[
"MIT"
] | 10
|
2020-09-14T07:56:53.000Z
|
2020-09-14T07:56:55.000Z
|
hitchike/wsgi.py
|
tgy/hitchike
|
b73c8714f584eeeb432c7c8df706dd8b944d632c
|
[
"MIT"
] | null | null | null |
"""
WSGI config for hitchike project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hitchike.settings")
application = get_wsgi_application()
| 23.117647
| 78
| 0.78626
|
04b07f507d1328bfdb06f27e0620cacfebeadc2d
| 1,478
|
py
|
Python
|
test/knxip_tests/connectionstate_request_test.py
|
Trance-Paradox/xknx
|
d5603361080f96aafd19c14d17fb1ff391064b3f
|
[
"MIT"
] | null | null | null |
test/knxip_tests/connectionstate_request_test.py
|
Trance-Paradox/xknx
|
d5603361080f96aafd19c14d17fb1ff391064b3f
|
[
"MIT"
] | null | null | null |
test/knxip_tests/connectionstate_request_test.py
|
Trance-Paradox/xknx
|
d5603361080f96aafd19c14d17fb1ff391064b3f
|
[
"MIT"
] | null | null | null |
"""Unit test for KNX/IP ConnectionStateRequests."""
import pytest
from xknx import XKNX
from xknx.exceptions import CouldNotParseKNXIP
from xknx.knxip import HPAI, ConnectionStateRequest, KNXIPFrame
class TestKNXIPConnectionStateRequest:
"""Test class for KNX/IP ConnectionStateRequests."""
def test_connection_state_request(self):
"""Test parsing and streaming connection state request KNX/IP packet."""
raw = bytes.fromhex("06 10 02 07 00 10 15 00 08 01 C0 A8 C8 0C C3 B4")
xknx = XKNX()
knxipframe = KNXIPFrame(xknx)
knxipframe.from_knx(raw)
assert isinstance(knxipframe.body, ConnectionStateRequest)
assert knxipframe.body.communication_channel_id == 21
assert knxipframe.body.control_endpoint == HPAI(
ip_addr="192.168.200.12", port=50100
)
connectionstate_request = ConnectionStateRequest(
xknx,
communication_channel_id=21,
control_endpoint=HPAI(ip_addr="192.168.200.12", port=50100),
)
knxipframe2 = KNXIPFrame.init_from_body(connectionstate_request)
assert knxipframe2.to_knx() == raw
def test_from_knx_wrong_info(self):
"""Test parsing and streaming wrong ConnectionStateRequest."""
raw = bytes((0x06, 0x10, 0x02, 0x07, 0x00, 0x010))
xknx = XKNX()
knxipframe = KNXIPFrame(xknx)
with pytest.raises(CouldNotParseKNXIP):
knxipframe.from_knx(raw)
| 35.190476
| 80
| 0.68065
|
8d224c8754b6b73fd7030601793c536af6b05063
| 1,251
|
py
|
Python
|
activitysim/abm/models/summarize.py
|
mxndrwgrdnr/activitysim
|
722d6e36b2210d5d24dfa2ac4a3504c1e8f75336
|
[
"BSD-3-Clause"
] | 85
|
2018-02-16T15:08:13.000Z
|
2022-03-23T15:08:08.000Z
|
activitysim/abm/models/summarize.py
|
mxndrwgrdnr/activitysim
|
722d6e36b2210d5d24dfa2ac4a3504c1e8f75336
|
[
"BSD-3-Clause"
] | 311
|
2018-01-16T01:59:47.000Z
|
2022-03-29T00:46:40.000Z
|
activitysim/abm/models/summarize.py
|
mxndrwgrdnr/activitysim
|
722d6e36b2210d5d24dfa2ac4a3504c1e8f75336
|
[
"BSD-3-Clause"
] | 63
|
2018-02-05T15:27:51.000Z
|
2022-03-04T20:36:33.000Z
|
# ActivitySim
# See full license in LICENSE.txt.
import logging
import sys
import pandas as pd
from activitysim.core import pipeline
from activitysim.core import inject
from activitysim.core import config
from activitysim.core.config import setting
logger = logging.getLogger(__name__)
@inject.step()
def write_summaries(output_dir):
summary_settings_name = 'output_summaries'
summary_file_name = 'summaries.txt'
summary_settings = setting(summary_settings_name)
if summary_settings is None:
logger.info("No {summary_settings_name} specified in settings file. Nothing to write.")
return
summary_dict = summary_settings
mode = 'wb' if sys.version_info < (3,) else 'w'
with open(config.output_file_path(summary_file_name), mode) as output_file:
for table_name, column_names in summary_dict.items():
df = pipeline.get_table(table_name)
for c in column_names:
n = 100
empty = (df[c] == '') | df[c].isnull()
print(f"\n### {table_name}.{c} type: {df.dtypes[c]} rows: {len(df)} ({empty.sum()} empty)\n\n",
file=output_file)
print(df[c].value_counts().nlargest(n), file=output_file)
| 28.431818
| 111
| 0.665867
|
99d7abd44129a6ae699dc943bb9e0c88fa7866a6
| 1,219
|
py
|
Python
|
examples/holoclean_repair_example.py
|
cgebest/holoclean
|
f3032819d5c5353cfcd340fab9d94f1a0cf63913
|
[
"Apache-2.0"
] | null | null | null |
examples/holoclean_repair_example.py
|
cgebest/holoclean
|
f3032819d5c5353cfcd340fab9d94f1a0cf63913
|
[
"Apache-2.0"
] | null | null | null |
examples/holoclean_repair_example.py
|
cgebest/holoclean
|
f3032819d5c5353cfcd340fab9d94f1a0cf63913
|
[
"Apache-2.0"
] | null | null | null |
import holoclean
from detect import *
from repair.featurize import *
# 1. Setup a HoloClean session.
hc = holoclean.HoloClean(
db_name='holo',
domain_thresh_1=0.0,
domain_thresh_2=0.0,
weak_label_thresh=0.99,
max_domain=10000,
cor_strength=0.6,
nb_cor_strength=0.8,
weight_decay=0.01,
learning_rate=0.001,
threads=1,
batch_size=1,
verbose=True,
timeout=3 * 60000,
print_fw=True,
).session
# 2. Load training data and denial constraints.
hc.load_data('hospital', '../testdata/hospital/hospital.csv')
hc.load_dcs('../testdata/hospital/hospital_constraints.txt')
hc.ds.set_constraints(hc.get_dcs())
# 3. Detect erroneous cells using these two detectors.
detectors = [NullDetector(), ViolationDetector()]
hc.detect_errors(detectors)
# 4. Repair errors utilizing the defined features.
hc.generate_domain()
hc.run_estimator()
featurizers = [
OccurAttrFeaturizer(),
FreqFeaturizer(),
ConstraintFeaturizer(),
]
hc.repair_errors(featurizers)
# 5. Evaluate the correctness of the results.
report = hc.evaluate(fpath='../testdata/hospital/hospital_clean.csv',
tid_col='tid',
attr_col='attribute',
val_col='correct_val')
| 25.395833
| 69
| 0.709598
|
72d4c064d32319798f10ed1ce27829fa13c0611f
| 11,482
|
py
|
Python
|
models.py
|
kovrov/wakebreaker
|
cc9b38351dad5b71d0be913a31202d031e3ff65b
|
[
"BSD-3-Clause"
] | null | null | null |
models.py
|
kovrov/wakebreaker
|
cc9b38351dad5b71d0be913a31202d031e3ff65b
|
[
"BSD-3-Clause"
] | null | null | null |
models.py
|
kovrov/wakebreaker
|
cc9b38351dad5b71d0be913a31202d031e3ff65b
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import random
import pyglet
from pyglet.gl import *
from util import Vector3
import renderer
import scene
import fx
WORLD_WIDTH = 175
WORLD_HEIGHT = 175
MAX_SPEED = 1.0 # boat's maximum speed
MAX_CHECKPOINTS = 16
class Racer:
def __init__(self, model_manager, model):
# get the 3d model data
if model == scene.BOAT1:
self.ri = renderer.RenderInstance(model_manager.getBoat1())
else: # model == BOAT2:
self.ri = renderer.RenderInstance(model_manager.getBoat2())
# rotate so he's always right side up
self.ri.rotation[:] = -90.0, 0.0, 90.0
self.ri.scale[:] = 0.5, 0.5, 0.5
self.ri.position[:] = WORLD_WIDTH / 2.0, 0.0, WORLD_HEIGHT / 2.0
# update his rotation and direction
rad = math.radians(self.ri.rotation.y)
self.dir = Vector3(math.cos(rad), 0.0, math.sin(rad))
self.nextCPPos = Vector3()
self.up = True # used in making the boat bob slight
self.finished = False # whether or not we are done with the race
self.speed = 0 # ship's current speed
self.nextCheckPoint = 0 # which check point he's aiming for
self.currLap = 0 # which lap he's on
self.hasRotated = True # for rotation optimization
# the water spray that shoots out behind the boat
self.spray = fx.ParticleSystem(200, 15, self.ri.position, Vector3(0.0, 1.0, 0.0))
# Keeps the racer inside the seascape
def boundsCheck(self):
# check the player against each part fo the world, slow him down if he hit
pos = self.ri.position.copy()
if pos.x > WORLD_WIDTH:
pos.x = WORLD_WIDTH
self.speed = self.speed * 0.5
if pos.x < 0:
pos.x = 0
self.speed = self.speed * 0.5
if pos.z > WORLD_HEIGHT:
pos.z = WORLD_HEIGHT
self.speed = self.speed * 0.5
if pos.z < 0:
pos.z = 0
self.speed = self.speed * 0.5
# make the racer bob up and down
if self.up and self.speed > 0:
pos.y += 0.02 * self.speed
elif self.speed > 0:
pos.y -= 0.02 * self.speed
if pos.y >= 0.2:
self.up = False
if pos.y < -0.05:
self.up = True
self.ri.position[:] = pos
def rotate(self, r):
self.hasRotated = True
# this makes sure the rotation doesn't exceed 360 degrees
if self.ri.rotation.y + r > 360.0 or self.ri.rotation.y + r < -360.0:
self.ri.rotation.y = 0
self.ri.rotation.y += r
def update(self):
if self.hasRotated:
# rotate the ship if needed
rad = math.radians(self.ri.rotation.y)
self.dir.x = math.cos(rad)
self.dir.z = math.sin(rad)
self.hasRotated = False
# move him in
self.ri.translate((self.speed * self.dir.x, 0, -self.dir.z * self.speed))
# Bounds check him against the world
self.boundsCheck()
# slow him down
if self.speed - 0.01474 > 0:
self.speed -= 0.01474
elif self.speed - 0.01474 < 0:
self.speed += 0.03
# halt him if too slow
if self.speed <= 0.04 and self.speed >= -0.04:
self.speed = 0
# keep the particles with us
self.updateSpray()
def updateAI(self, player):
# we hit an island, this on most cases, corrects the problem
if self.speed < 0.3:
self.increaseSpeed(0.025)
# go around it
self.dir.x = math.cos(math.radians(90.0))
self.dir.z = math.sin(math.radians(90.0))
self.hasRotated = False
# move him in
self.ri.translate((self.speed * self.dir.x, 0.0, -self.dir.z * self.speed))
else:
self.increaseSpeed(0.025)
# build a normalized direction vector
desiredDir = self.ri.position - self.nextCPPos
desiredDir.y = 0.0
mag = math.sqrt(desiredDir.x * desiredDir.x + desiredDir.z * desiredDir.z)
n = 1.0 / mag
desiredDir.x *= n
desiredDir.z *= n
# slow the AI down a little
randFac = (8.0 + random.uniform(0.0, 0.00005)) / 10.0
finalX = -desiredDir.x * self.speed * randFac
finalZ = -desiredDir.z * self.speed * randFac
# make the boat "look" forward!!
if finalZ < 0: k = 90.0
else: k = -90.0
self.ri.rotation.y = math.degrees(math.atan(finalX / finalZ)) + k
# move the boat
self.ri.translate((finalX, 0.0, finalZ))
# keep him in the water
self.boundsCheck()
# move the spray trail with him
self.updateSpray()
def updateSpray(self):
# keep it the spray trail right with the boat
self.spray.move(self.ri.position)
# also keep it spraying in the right direction
if self.speed > 0:
newDir = Vector3(self.dir.x, 0.1, -self.dir.z)
else:
newDir = Vector3(0.0, 0.0, 0.0)
self.spray.redirect(newDir)
self.spray.update()
def increaseSpeed(self, s):
if self.speed < MAX_SPEED:
self.speed += s
if self.speed > MAX_SPEED:
self.speed = MAX_SPEED
# Renders the ship
def render(self, renderer):
# draw the ship
renderer.render(self.ri)
# draw the spray trail
self.spray.render()
class RaceCourse:
# 1. Generates a random race course within the donut described by min and
# max radius
# 2. Adds racers to the race course and sets up the race course
def __init__(self, center, minRadius, maxRadius, racers, model_manager):
self.checkPoints = [renderer.RenderInstance(model_manager.getCheckPoint()) for i in xrange(MAX_CHECKPOINTS)]
# calculate the angle apart each checkpoint has to be
interval = math.pi * 2.0 / MAX_CHECKPOINTS
angle = 0.0
for cp in self.checkPoints:
x = math.cos(angle) * random.uniform(minRadius, maxRadius) + center.x
z = math.sin(angle) * random.uniform(minRadius, maxRadius) + center.z
cp.position[:] = x, 1.0, z
cp.scale[:] = 1.0, 1.0, 1.0
cp.rotation[:] = -90.0, 0.0, 0.0
# advance the angle
angle -= interval
# set the racers and the amount of them
self.racers = racers
# place all the racers at the first checkpoint, each racer a bit behind
# the one before him
for racer in self.racers:
# set everyone at the starting checkpoint
racer.nextCheckPoint = 0
racer.CurrLap = 0
racer.ri.position[:] = self.checkPoints[0].position
racer.nextCPPos[:] = self.checkPoints[0].position
self.racers[0].rotate(90.0)
# load the textures for the checkpoints
self.cpOnTex = pyglet.image.load('checkpointon.png').get_texture()
self.cpOffTex = pyglet.image.load('checkpointoff.png').get_texture()
self.playerNextCP = 0
def render(self, ri):
glPushMatrix()
self.checkPoints[self.playerNextCP].renderData.texture = self.cpOnTex
ri.render(self.checkPoints[self.playerNextCP])
self.checkPoints[self.playerNextCP].renderData.texture = None
if self.playerNextCP + 1 == MAX_CHECKPOINTS:
self.checkPoints[0].renderData.texture = self.cpOffTex
ri.render(self.checkPoints[0])
self.checkPoints[0].renderData.texture = None
else:
self.checkPoints[self.playerNextCP + 1].renderData.texture = self.cpOffTex
ri.render(self.checkPoints[self.playerNextCP + 1])
self.checkPoints[self.playerNextCP + 1].renderData.texture = None
glPopMatrix()
# Updates the racers, returns 1 if player won, -1 if player lost, and 0
# if race is still in progress
def update(self):
# loop through each racer
for racer in self.racers:
# see if he has collided with the next checkpoint
nextCP = self.checkPoints[racer.nextCheckPoint].position
pos = racer.ri.position
dist = (nextCP.x - pos.x) * (nextCP.x - pos.x) + (nextCP.z - pos.z) * (nextCP.z - pos.z)
radii = 3.0 * 3.0
if dist < radii:
# the player has reached the next checkpoint
# assign him to the next checkpoint
CP = racer.nextCheckPoint + 1
# he has reached the last checkpoint
if CP == MAX_CHECKPOINTS:
# increment his lap count
racer.currLap += 1
if racer.currLap == 3:
# we have a winner
racer.finished = True
CP = 0
# assign him his new checkpoint
racer.nextCheckPoint = CP
racer.nextCPPos[:] = self.checkPoints[CP].position
if self.racers[0] is racer:
self.playerNextCP = CP
return 1
class Seascape:
def __init__(self, mm):
# used in water animation
self.texTranslate = 0
self.waterMoved = True
self.models = [renderer.RenderInstance(mm.getRandomSeascapeModel()) for i in xrange(15)]
for model in self.models:
# generate a random x and z
model.position[:] = random.uniform(0.0, WORLD_WIDTH), 0.0, random.uniform(0.0, WORLD_HEIGHT)
# generate a random rotation
model.rotation[:] = -90.0, random.uniform(0.0, 360.0), 0.0
# set up the renderInstance
# Set up the sea floor
vertices = (
(-2, 0.0, -2.0),
(-2, 0.0, WORLD_HEIGHT / 2.0 + 2.0),
(WORLD_WIDTH / 2.0 + 2.0, 0.0, -2.0),
(WORLD_WIDTH / 2.0 + 2.0, 0.0, WORLD_HEIGHT / 2.0 + 2.0))
indices = (0, 1, 2, 2, 1, 3)
uvmap = ((0.0, 0.0), (15.0, 0.0), (0.0, 15.0), (15.0, 15.0))
texture = pyglet.image.load('watertex.png').get_texture()
self.sea = renderer.RenderInstance(renderer.RenderData(vertices, indices, uvmap, texture))
self.sea.position[:] = 0.0, 0.0, 0.0
# checks if anything collided with the islands
def collided(self, pos, radius):
for model in self.models:
# if the distance between the two points is more than the two radii,
# no collision calculate the distance squared
dist = (model.position.x - pos.x) * (model.position.x - pos.x) + (model.position.z - pos.z) * (model.position.z - pos.z)
radii = (radius + 1.5) * (radius + 1.5)
if dist < radii:
return True
return False
# renders the seascape
def render(self, renderer):
# render all the models first
for model in self.models:
renderer.render(model)
# draw reflection
model.scale[:] = 2.0, -2.0, 2.0
renderer.render(model)
model.scale[:] = 2.0, 2.0, 2.0
# make sure the m_texTranslate never goes out of bounds
if self.waterMoved:
self.texTranslate -= 0.005
else:
self.texTranslate += 0.005
if self.texTranslate > 15.0:
self.waterMoved = True
if self.texTranslate < -15.0:
self.waterMoved = False
# Now render the water plane
# We render it 4 times, so that it forms a giant block
glPushMatrix()
# set up blending
glEnable(GL_BLEND)
glColor4f(1.0, 1.0, 1.0, 0.6)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.renderWater(renderer)
glTranslatef(0.0, 0.0, WORLD_HEIGHT / 2 + 4)
self.renderWater(renderer)
glTranslatef(WORLD_HEIGHT / 2.0 + 4.0, 0.0, 0.0)
self.renderWater(renderer)
glTranslatef(0.0, 0.0, -(WORLD_HEIGHT / 2.0 + 4.0))
self.renderWater(renderer)
# turn of blending and restore the original color
glDisable(GL_BLEND)
glColor4f(1.0, 1.0, 1.0, 1.0)
glPopMatrix()
# Renders the water
def renderWater(self, renderer):
glMatrixMode(GL_TEXTURE)
# shift the texture coords to simulate motion
glTranslatef(self.texTranslate, self.texTranslate, 0.0)
glRotatef(35.0, 0.0, 0.0, 1.0)
glColor4f(1.0, 1.0, 1.0, 0.6)
# render the first sea quad
renderer.render(self.sea)
# reset the texture matrix
glLoadIdentity()
# now scale and move the tex coords
glScalef(0.7, 0.7, 0.7)
glTranslatef(-self.texTranslate, 0.0, 0.0)
# change the transparency
glColor4f(1.0, 1.0, 1.0, 0.35)
# render another water quad just slightly above the previous one
glMatrixMode(GL_MODELVIEW)
self.sea.translate((0.0, 0.1, 0.0))
renderer.render(self.sea)
self.sea.translate((0.0 ,-0.1, 0.0))
glMatrixMode(GL_TEXTURE)
# reset the texture matrix again
glLoadIdentity()
# change back to modelview
glMatrixMode(GL_MODELVIEW)
| 33.770588
| 124
| 0.657986
|
c49547479022ec05e10bc4631bee5db686a7a732
| 1,922
|
py
|
Python
|
benchmark/startPyquil2694.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2694.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startPyquil2694.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(3) # number=16
prog += CZ(0,3) # number=17
prog += RX(-0.5686282702997527,3) # number=32
prog += H(3) # number=18
prog += H(3) # number=26
prog += CZ(0,3) # number=27
prog += H(3) # number=28
prog += X(3) # number=21
prog += RX(0.4241150082346221,2) # number=33
prog += CNOT(0,3) # number=22
prog += CNOT(0,3) # number=12
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=23
prog += CZ(1,2) # number=24
prog += H(2) # number=25
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=34
prog += CZ(2,0) # number=35
prog += H(0) # number=36
prog += H(0) # number=40
prog += CZ(2,0) # number=41
prog += H(0) # number=42
prog += Z(2) # number=38
prog += CNOT(2,0) # number=39
prog += CNOT(2,0) # number=31
prog += H(0) # number=9
prog += Y(0) # number=14
prog += Y(0) # number=15
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2694.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.626667
| 64
| 0.566077
|
2d2d5197872f08ac2a198546d6ea64299b7664e0
| 605
|
py
|
Python
|
users/models.py
|
DeepinSC/Pikachu-Housing
|
453201e19812e356106c071bbf9a306931d14fa7
|
[
"Apache-2.0"
] | 1
|
2019-03-23T18:49:31.000Z
|
2019-03-23T18:49:31.000Z
|
users/models.py
|
DeepinSC/Pikachu-Housing
|
453201e19812e356106c071bbf9a306931d14fa7
|
[
"Apache-2.0"
] | null | null | null |
users/models.py
|
DeepinSC/Pikachu-Housing
|
453201e19812e356106c071bbf9a306931d14fa7
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from department.models import Department
from housing.models import House
class UserProfile(models.Model):
user = models.OneToOneField(User, related_name="profile")
department = models.ForeignKey(Department, null=True, blank=True, related_name="user")
viewed_houses = models.ManyToManyField(House, blank=True, related_name="viewed_user")
def __str__(self):
if self.user.first_name or self.user.last_name:
return self.user.first_name + " " + self.user.last_name
return self.user.email
| 35.588235
| 90
| 0.745455
|
8b40b3b3cefebdbae9f6ed0d96c7e32cb8b862a8
| 5,667
|
py
|
Python
|
samples/sample_kinesis_wordputter.py
|
rconroy293/amazon-kinesis-client-python
|
66659655e31cec25ca0cc76c397478bdd5bcfcc8
|
[
"Apache-2.0"
] | 338
|
2015-01-08T00:39:31.000Z
|
2022-03-28T07:17:27.000Z
|
samples/sample_kinesis_wordputter.py
|
rconroy293/amazon-kinesis-client-python
|
66659655e31cec25ca0cc76c397478bdd5bcfcc8
|
[
"Apache-2.0"
] | 110
|
2015-01-06T01:22:16.000Z
|
2022-03-28T07:26:07.000Z
|
samples/sample_kinesis_wordputter.py
|
rconroy293/amazon-kinesis-client-python
|
66659655e31cec25ca0cc76c397478bdd5bcfcc8
|
[
"Apache-2.0"
] | 221
|
2015-01-05T10:56:45.000Z
|
2022-02-23T15:40:21.000Z
|
#!env python
'''
Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
'''
from __future__ import print_function
import sys, random, time, argparse
from boto import kinesis
def get_stream_status(conn, stream_name):
'''
Query this provided connection object for the provided stream's status.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
:rtype: str
:return: The stream's status
'''
r = conn.describe_stream(stream_name)
description = r.get('StreamDescription')
return description.get('StreamStatus')
def wait_for_stream(conn, stream_name):
'''
Wait for the provided stream to become active.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
'''
SLEEP_TIME_SECONDS = 3
status = get_stream_status(conn, stream_name)
while status != 'ACTIVE':
print('{stream_name} has status: {status}, sleeping for {secs} seconds'.format(
stream_name = stream_name,
status = status,
secs = SLEEP_TIME_SECONDS))
time.sleep(SLEEP_TIME_SECONDS) # sleep for 3 seconds
status = get_stream_status(conn, stream_name)
def put_words_in_stream(conn, stream_name, words):
'''
Put each word in the provided list of words into the stream.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
:type words: list
:param words: A list of strings to put into the stream.
'''
for w in words:
try:
conn.put_record(stream_name, w, w)
print("Put word: " + w + " into stream: " + stream_name)
except Exception as e:
sys.stderr.write("Encountered an exception while trying to put a word: "
+ w + " into stream: " + stream_name + " exception was: " + str(e))
def put_words_in_stream_periodically(conn, stream_name, words, period_seconds):
'''
Puts words into a stream, then waits for the period to elapse then puts the words in again. There is no strict
guarantee about how frequently we put each word into the stream, just that we will wait between iterations.
:type conn: boto.kinesis.layer1.KinesisConnection
:param conn: A connection to Amazon Kinesis
:type stream_name: str
:param stream_name: The name of a stream.
:type words: list
:param words: A list of strings to put into the stream.
:type period_seconds: int
:param period_seconds: How long to wait, in seconds, between iterations over the list of words.
'''
while True:
put_words_in_stream(conn, stream_name, words)
print("Sleeping for {period_seconds} seconds".format(period_seconds=period_seconds))
time.sleep(period_seconds)
if __name__ == '__main__':
parser = argparse.ArgumentParser('''
Puts words into a stream.
# Using the -w option multiple times
sample_wordputter.py -s STREAM_NAME -w WORD1 -w WORD2 -w WORD3 -p 3
# Passing input from STDIN
echo "WORD1\\nWORD2\\nWORD3" | sample_wordputter.py -s STREAM_NAME -p 3
''')
parser.add_argument("-s", "--stream", dest="stream_name", required=True,
help="The stream you'd like to create.", metavar="STREAM_NAME",)
parser.add_argument("-r", "--regionName", "--region", dest="region", default="us-east-1",
help="The region you'd like to make this stream in. Default is 'us-east-1'", metavar="REGION_NAME",)
parser.add_argument("-w", "--word", dest="words", default=[], action="append",
help="A word to add to the stream. Can be specified multiple times to add multiple words.", metavar="WORD",)
parser.add_argument("-p", "--period", dest="period", type=int,
help="If you'd like to repeatedly put words into the stream, this option provides the period for putting "
+ "words into the stream in SECONDS. If no period is given then the words are put once.",
metavar="SECONDS",)
args = parser.parse_args()
stream_name = args.stream_name
'''
Getting a connection to Amazon Kinesis will require that you have your credentials available to
one of the standard credentials providers.
'''
print("Connecting to stream: {s} in {r}".format(s=stream_name, r=args.region))
conn = kinesis.connect_to_region(region_name = args.region)
try:
status = get_stream_status(conn, stream_name)
if 'DELETING' == status:
print('The stream: {s} is being deleted, please rerun the script.'.format(s=stream_name))
sys.exit(1)
elif 'ACTIVE' != status:
wait_for_stream(conn, stream_name)
except:
# We'll assume the stream didn't exist so we will try to create it with just one shard
conn.create_stream(stream_name, 1)
wait_for_stream(conn, stream_name)
# Now the stream should exist
if len(args.words) == 0:
print('No -w options provided. Waiting on input from STDIN')
words = [l.strip() for l in sys.stdin.readlines() if l.strip() != '']
else:
words = args.words
if args.period != None:
put_words_in_stream_periodically(conn, stream_name, words, args.period)
else:
put_words_in_stream(conn, stream_name, words)
| 40.478571
| 130
| 0.662079
|
fe81645ece5b67fbe9c2c30f88db9128d4da2172
| 1,614
|
py
|
Python
|
buffer.py
|
Casperinous/Lena
|
202ed791ecf8e632b00fea8373d81df9b1f54d2c
|
[
"MIT"
] | 2
|
2021-06-04T07:32:35.000Z
|
2021-06-07T01:41:37.000Z
|
buffer.py
|
Casperinous/Lena
|
202ed791ecf8e632b00fea8373d81df9b1f54d2c
|
[
"MIT"
] | null | null | null |
buffer.py
|
Casperinous/Lena
|
202ed791ecf8e632b00fea8373d81df9b1f54d2c
|
[
"MIT"
] | null | null | null |
'''
Code originally used by Androguard in https://github.com/androguard/androguard/blob/master/androguard/core/bytecode.py#L714
representing a different entity. Due to our simplier logic, we combine these 3 classes into one
'''
class Buffer:
def __init__(self, buff):
"""
Badly, the stable version of Androguard is v2.0 which was
published 2-3 years ago. That being said, the source code
of the stable version is different of the one in the master
branch.
---------------------------------------------------------
if isinstance(buff,bytearray):
self.__buff = buff
else:
self.__buff = bytearray(buff)
"""
self.__buff = buff
self.__idx = 0
def __getitem__(self, item):
return self.__buff[item]
def __len__(self):
return len(self.__buff)
def read(self, size):
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def readat(self, off):
return self.__buff[off:]
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def add_idx(self, idx):
self.__idx += idx
def get_buff(self):
return self.__buff
def length_buff(self):
return len(self.__buff)
def set_buff(self, buff):
self.__buff = buff
def save(self, filename):
buff = self._save()
with open(filename, "wb") as fd:
fd.write(buff)
| 25.619048
| 123
| 0.581784
|
50770a815b4fd42fc50087cdb288930a12ecc702
| 1,274
|
py
|
Python
|
data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | null | null | null |
data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | null | null | null |
data/migrations/versions/eafdeadcebc7_remove_blob_index_from_manifestblob_.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | null | null | null |
"""Remove blob_index from ManifestBlob table
Revision ID: eafdeadcebc7
Revises: 9093adccc784
Create Date: 2018-08-07 15:57:54.001225
"""
# revision identifiers, used by Alembic.
revision = "eafdeadcebc7"
down_revision = "9093adccc784"
from alembic import op as original_op
from data.migrations.progress import ProgressWrapper
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade(tables, tester, progress_reporter):
op = ProgressWrapper(original_op, progress_reporter)
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("manifestblob_manifest_id_blob_index", table_name="manifestblob")
op.drop_column("manifestblob", "blob_index")
# ### end Alembic commands ###
def downgrade(tables, tester, progress_reporter):
op = ProgressWrapper(original_op, progress_reporter)
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"manifestblob",
sa.Column(
"blob_index", mysql.INTEGER(display_width=11), autoincrement=False, nullable=True
),
)
op.create_index(
"manifestblob_manifest_id_blob_index",
"manifestblob",
["manifest_id", "blob_index"],
unique=True,
)
# ### end Alembic commands ###
| 29.627907
| 93
| 0.708006
|
ace97fc8f70b4eb93ae272c5575e89074715483c
| 2,586
|
py
|
Python
|
nipype/interfaces/camino/tests/test_auto_TrackDT.py
|
vferat/nipype
|
536c57da150d157dcb5c121af43aaeab71cdbd5f
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/camino/tests/test_auto_TrackDT.py
|
vferat/nipype
|
536c57da150d157dcb5c121af43aaeab71cdbd5f
|
[
"Apache-2.0"
] | 2
|
2018-04-17T19:18:16.000Z
|
2020-03-04T22:05:02.000Z
|
nipype/interfaces/camino/tests/test_auto_TrackDT.py
|
oesteban/nipype
|
c14f24eba1da08711bbb894e049ee858ed740096
|
[
"Apache-2.0"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..dti import TrackDT
def test_TrackDT_inputs():
input_map = dict(
anisfile=dict(
argstr='-anisfile %s',
extensions=None,
),
anisthresh=dict(argstr='-anisthresh %f', ),
args=dict(argstr='%s', ),
curveinterval=dict(
argstr='-curveinterval %f',
requires=['curvethresh'],
),
curvethresh=dict(argstr='-curvethresh %f', ),
data_dims=dict(
argstr='-datadims %s',
units='voxels',
),
environ=dict(
nohash=True,
usedefault=True,
),
gzip=dict(argstr='-gzip', ),
in_file=dict(
argstr='-inputfile %s',
extensions=None,
position=1,
),
inputdatatype=dict(argstr='-inputdatatype %s', ),
inputmodel=dict(
argstr='-inputmodel %s',
usedefault=True,
),
interpolator=dict(argstr='-interpolator %s', ),
ipthresh=dict(argstr='-ipthresh %f', ),
maxcomponents=dict(
argstr='-maxcomponents %d',
units='NA',
),
numpds=dict(
argstr='-numpds %d',
units='NA',
),
out_file=dict(
argstr='-outputfile %s',
extensions=None,
genfile=True,
position=-1,
),
output_root=dict(
argstr='-outputroot %s',
extensions=None,
position=-1,
),
outputtracts=dict(argstr='-outputtracts %s', ),
seed_file=dict(
argstr='-seedfile %s',
extensions=None,
position=2,
),
stepsize=dict(
argstr='-stepsize %f',
requires=['tracker'],
),
tracker=dict(
argstr='-tracker %s',
usedefault=True,
),
voxel_dims=dict(
argstr='-voxeldims %s',
units='mm',
),
)
inputs = TrackDT.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_TrackDT_outputs():
output_map = dict(tracked=dict(extensions=None, ), )
outputs = TrackDT.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.733333
| 67
| 0.509667
|
bddef33302092b39deffbbf8e304e62f2f161184
| 5,695
|
py
|
Python
|
invenio_requests/customizations/base/request_types.py
|
Pineirin/invenio-requests
|
7eb85365128e4189e4c81d154e8918b09aae033d
|
[
"MIT"
] | null | null | null |
invenio_requests/customizations/base/request_types.py
|
Pineirin/invenio-requests
|
7eb85365128e4189e4c81d154e8918b09aae033d
|
[
"MIT"
] | null | null | null |
invenio_requests/customizations/base/request_types.py
|
Pineirin/invenio-requests
|
7eb85365128e4189e4c81d154e8918b09aae033d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 TU Wien.
# Copyright (C) 2021 CERN.
#
# Invenio-Requests is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Base class for creating custom types of requests.
The `RequestType` classes are the most important part in the customization/extension
mechanism for custom types of requests.
TODO explain what can be done here, and how!
"""
import base32_lib as base32
import marshmallow as ma
from ...proxies import current_requests
class RequestType:
"""Base class for custom request types."""
type_id = "base-request"
"""The unique and constant identifier for this type of requests.
Since this property is used to map generic chunks of data from the database
(i.e. the request model entries) to their correct `RequestType`, this should
be a globally unique value.
By convention, this would be the name of the package in which the custom
`RequestType` is defined as prefix, together with a suffix related to the
`RequestType`.
Further, it should be constant after the first release of the package
(otherwise, requests created with the old value will no longer be able to be
mapped to their `RequestType`).
"""
name = "Base Request"
"""The human-readable name for this type of requests."""
available_statuses = {}
"""Available statuses for the Request.
The keys in this dictionary is the set of available statuses, and their
values are indicators whether this Request is still considered to be
"open" in this state.
"""
default_status = None
"""The default status for new requests of this type.
This must be set to one of the available statuses for the custom request type.
"""
available_actions = {}
"""Available actions for this Request.
The keys are the internal identifiers for the actions, the values are
the actual RequestAction classes (not objects).
Whenever an action is looked up, a new object of the registered
RequestAction class is instantiated with the current Request object as
argument.
"""
creator_can_be_none = True
"""Determines if the ``created_by`` reference accepts ``None``."""
receiver_can_be_none = False
"""Determines if the ``receiver`` reference accepts ``None``."""
topic_can_be_none = True
"""Determines if the ``topic`` reference accepts ``None``."""
allowed_creator_ref_types = ["user"]
"""A list of allowed TYPE keys for ``created_by`` reference dicts."""
allowed_receiver_ref_types = ["user"]
"""A list of allowed TYPE keys for ``receiver`` reference dicts."""
allowed_topic_ref_types = []
"""A list of allowed TYPE keys for ``topic`` reference dicts."""
payload_schema = None
"""Schema for supported payload fields.
Define it as a dictionary of fields mapping:
.. code-block:: python
payload_schema = {
"content": fields.String(),
# ...
}
"""
@classmethod
def _create_marshmallow_schema(cls):
"""Create a marshmallow schema for this request type."""
# Avoid circular imports
from invenio_requests.services.schemas import (
EntityReferenceBaseSchema as RefBaseSchema,
)
from invenio_requests.services.schemas import RequestSchema
# The reference fields always need to be added
additional_fields = {
"created_by": ma.fields.Nested(
RefBaseSchema.create_from_dict(cls.allowed_creator_ref_types),
allow_none=cls.creator_can_be_none,
),
"receiver": ma.fields.Nested(
RefBaseSchema.create_from_dict(cls.allowed_receiver_ref_types),
allow_none=cls.receiver_can_be_none,
),
"topic": ma.fields.Nested(
RefBaseSchema.create_from_dict(cls.allowed_topic_ref_types),
allow_none=cls.topic_can_be_none,
),
}
# Raise on invalid payload keys
class PayloadBaseSchema(ma.Schema):
class Meta:
unknown = ma.RAISE
# If a payload schema is defined, add it to the request schema
if cls.payload_schema is not None:
additional_fields["payload"] = ma.fields.Nested(
PayloadBaseSchema.from_dict(cls.payload_schema),
)
# Dynamically create a schema from the fields defined
# by the payload schema dict.
return RequestSchema.from_dict(additional_fields)
@classmethod
def marshmallow_schema(cls):
"""Create a schema for the entire request including payload."""
type_id = cls.type_id
if type_id not in current_requests._schema_cache:
current_requests._schema_cache[type_id] = cls._create_marshmallow_schema()
return current_requests._schema_cache[type_id]
def generate_request_number(self, request, **kwargs):
"""Generate a new request number identifier.
This method can be overridden in subclasses to create external identifiers
according to a custom schema, using the information associated with the request
(e.g. topic, receiver, creator).
"""
from invenio_requests.records.models import RequestNumber
return base32.encode(RequestNumber.next())
def __str__(self):
"""Return str(self)."""
# Value used by marshmallow schemas to represent the type.
return self.type_id
def __repr__(self):
"""Return repr(self)."""
return f"<RequestType '{self.type_id}'>"
| 34.515152
| 87
| 0.668832
|
05ef8120697688582671622a511c15e2bccef1cd
| 6,570
|
py
|
Python
|
src/garage/torch/policies/categorical_cnn_policy.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 1,500
|
2018-06-11T20:36:24.000Z
|
2022-03-31T08:29:01.000Z
|
src/garage/torch/policies/categorical_cnn_policy.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 2,111
|
2018-06-11T04:10:29.000Z
|
2022-03-26T14:41:32.000Z
|
src/garage/torch/policies/categorical_cnn_policy.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 309
|
2018-07-24T11:18:48.000Z
|
2022-03-30T16:19:48.000Z
|
"""CategoricalCNNPolicy."""
import akro
import torch
from torch import nn
from garage import InOutSpec
from garage.torch.modules import CNNModule, MultiHeadedMLPModule
from garage.torch.policies.stochastic_policy import StochasticPolicy
class CategoricalCNNPolicy(StochasticPolicy):
"""CategoricalCNNPolicy.
A policy that contains a CNN and a MLP to make prediction based on
a categorical distribution.
It only works with akro.Discrete action space.
Args:
env_spec (garage.EnvSpec): Environment specification.
image_format (str): Either 'NCHW' or 'NHWC'. Should match env_spec. Gym
uses NHWC by default, but PyTorch uses NCHW by default.
kernel_sizes (tuple[int]): Dimension of the conv filters.
For example, (3, 5) means there are two convolutional layers.
The filter for first layer is of dimension (3 x 3)
and the second one is of dimension (5 x 5).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
hidden_channels (tuple[int]): Number of output channels for CNN.
For example, (3, 32) means there are two convolutional layers.
The filter for the first conv layer outputs 3 channels
hidden_sizes (list[int]): Output dimension of dense layer(s) for
the MLP for mean. For example, (32, 32) means the MLP consists
of two hidden layers, each with 32 hidden units.
hidden_nonlinearity (callable): Activation function for intermediate
dense layer(s). It should return a torch.Tensor. Set it to
None to maintain a linear activation.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
paddings (tuple[int]): Zero-padding added to both sides of the input
padding_mode (str): The type of padding algorithm to use,
either 'SAME' or 'VALID'.
max_pool (bool): Bool for using max-pooling or not.
pool_shape (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
shape (2, 2).
pool_stride (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
output_w_init (callable): Initializer function for the weight
of output dense layer(s). The function should return a
torch.Tensor.
output_b_init (callable): Initializer function for the bias
of output dense layer(s). The function should return a
torch.Tensor.
layer_normalization (bool): Bool for using layer normalization or not.
name (str): Name of policy.
"""
def __init__(self,
env_spec,
image_format,
kernel_sizes,
*,
hidden_channels,
strides=1,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
paddings=0,
padding_mode='zeros',
max_pool=False,
pool_shape=None,
pool_stride=1,
output_w_init=nn.init.xavier_uniform_,
output_b_init=nn.init.zeros_,
layer_normalization=False,
name='CategoricalCNNPolicy'):
if not isinstance(env_spec.action_space, akro.Discrete):
raise ValueError('CategoricalMLPPolicy only works '
'with akro.Discrete action space.')
if isinstance(env_spec.observation_space, akro.Dict):
raise ValueError('CNN policies do not support '
'with akro.Dict observation spaces.')
super().__init__(env_spec, name)
self._cnn_module = CNNModule(InOutSpec(
self._env_spec.observation_space, None),
image_format=image_format,
kernel_sizes=kernel_sizes,
strides=strides,
hidden_channels=hidden_channels,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
hidden_nonlinearity=hidden_nonlinearity,
paddings=paddings,
padding_mode=padding_mode,
max_pool=max_pool,
pool_shape=pool_shape,
pool_stride=pool_stride,
layer_normalization=layer_normalization)
self._mlp_module = MultiHeadedMLPModule(
n_heads=1,
input_dim=self._cnn_module.spec.output_space.flat_dim,
output_dims=[self._env_spec.action_space.flat_dim],
hidden_sizes=hidden_sizes,
hidden_w_init=hidden_w_init,
hidden_b_init=hidden_b_init,
hidden_nonlinearity=hidden_nonlinearity,
output_w_inits=output_w_init,
output_b_inits=output_b_init)
def forward(self, observations):
"""Compute the action distributions from the observations.
Args:
observations (torch.Tensor): Observations to act on.
Returns:
torch.distributions.Distribution: Batch distribution of actions.
dict[str, torch.Tensor]: Additional agent_info, as torch Tensors.
Do not need to be detached, and can be on any device.
"""
# We're given flattened observations.
observations = observations.reshape(
-1, *self._env_spec.observation_space.shape)
cnn_output = self._cnn_module(observations)
mlp_output = self._mlp_module(cnn_output)[0]
logits = torch.softmax(mlp_output, axis=1)
dist = torch.distributions.Categorical(logits=logits)
return dist, {}
| 46.595745
| 79
| 0.598021
|
50f2cff4c54720f8d305a83ec151cc2473cd0de0
| 2,825
|
py
|
Python
|
demo.py
|
iallabs/inception-web
|
ace76bbffe4dba242eb7b61fe9ce36ad0fb5fee7
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
iallabs/inception-web
|
ace76bbffe4dba242eb7b61fe9ce36ad0fb5fee7
|
[
"Apache-2.0"
] | null | null | null |
demo.py
|
iallabs/inception-web
|
ace76bbffe4dba242eb7b61fe9ce36ad0fb5fee7
|
[
"Apache-2.0"
] | null | null | null |
import os
from flask import (
Flask,
redirect,
url_for,
render_template,
request,
session,
make_response,
jsonify,
)
from inception import (
create_graph,
download_and_extract,
run_inference_on_image,
)
from shutil import copyfile
from werkzeug.utils import secure_filename
# Constants
UPLOAD_FOLDER = './static/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
# Flask app
app = Flask(__name__, static_url_path='/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
progInterface = [
{"ID": 1, "Str": "one"},
{"ID": 2, "Str": "two"},
{"ID": 3, "Str": "3"},
{"ID": 4, "Str": "4"},
{"ID": 5, "Str": "5"},
]
def uploadedImage(image):
return os.path.join(UPLOAD_FOLDER, image)
def sendJSON(obj):
return make_response(jsonify(obj))
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/run_inference/<image>', methods=["POST", "GET"])
def api_run_inference(image):
try:
# Create graph
create_graph()
result = run_inference_on_image(uploadedImage(image), 5)
return sendJSON({
"success" : True,
"response" : result,
}), 200
except:
return sendJSON({
"success" : False,
"error_code" : 500,
}), 200
@app.route('/upload_image', methods=["post"])
def form_upload_image():
if 'file' not in request.files:
return redirect('upload')
file = request.files['file']
if file.filename == '':
return redirect('upload')
if file and allowed_file(file.filename):
if request.form['name']:
s = secure_filename(file.filename).split('.')
filename = request.form['name'] + '.' + s[1]
else:
filename = secure_filename(file.filename)
file.save(os.path.join(uploadedImage(filename)))
return redirect('upload')
@app.route('/delete_image/<image>', methods=["post"])
def delete_image(image):
if image == '' or not os.path.exists(uploadedImage(image)) :
return sendJSON({
"success" : False,
"error": "file not found",
});
os.remove(uploadedImage(image))
return sendJSON({
"success" : True,
})
@app.route('/upload')
def upload():
return render_template("upload.html")
@app.route('/demo')
def demo():
ls=[]
for file in os.listdir(UPLOAD_FOLDER):
r={
"Name" : file,
"Namebase" : file.split('.')[0],
}
ls.append(r)
return render_template("demo.html", uploads=ls, proginterface=progInterface)
if __name__ == "__main__":
# Download inception model
download_and_extract()
# Run server
app.run(port=8002, debug=True)
| 24.780702
| 80
| 0.593628
|
16eddc1eab8628eec7e38d27b1f18df13dd480d7
| 4,283
|
py
|
Python
|
contrib/ACE2P/infer.py
|
LielinJiang/PaddleSeg
|
7c4d39da3d0ff635cac066aeb61e23dfada5d0d7
|
[
"Apache-2.0"
] | null | null | null |
contrib/ACE2P/infer.py
|
LielinJiang/PaddleSeg
|
7c4d39da3d0ff635cac066aeb61e23dfada5d0d7
|
[
"Apache-2.0"
] | null | null | null |
contrib/ACE2P/infer.py
|
LielinJiang/PaddleSeg
|
7c4d39da3d0ff635cac066aeb61e23dfada5d0d7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import cv2
import numpy as np
from utils.util import get_arguments
from utils.palette import get_palette
from PIL import Image as PILImage
import importlib
args = get_arguments()
config = importlib.import_module('config')
cfg = getattr(config, 'cfg')
# paddle垃圾回收策略FLAG,ACE2P模型较大,当显存不够时建议开启
os.environ['FLAGS_eager_delete_tensor_gb']='0.0'
import paddle.fluid as fluid
# 预测数据集类
class TestDataSet():
def __init__(self):
self.data_dir = cfg.data_dir
self.data_list_file = cfg.data_list_file
self.data_list = self.get_data_list()
self.data_num = len(self.data_list)
def get_data_list(self):
# 获取预测图像路径列表
data_list = []
data_file_handler = open(self.data_list_file, 'r')
for line in data_file_handler:
img_name = line.strip()
name_prefix = img_name.split('.')[0]
if len(img_name.split('.')) == 1:
img_name = img_name + '.jpg'
img_path = os.path.join(self.data_dir, img_name)
data_list.append(img_path)
return data_list
def preprocess(self, img):
# 图像预处理
if cfg.example == 'ACE2P':
reader = importlib.import_module('reader')
ACE2P_preprocess = getattr(reader, 'preprocess')
img = ACE2P_preprocess(img)
else:
img = cv2.resize(img, cfg.input_size).astype(np.float32)
img -= np.array(cfg.MEAN)
img /= np.array(cfg.STD)
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
def get_data(self, index):
# 获取图像信息
img_path = self.data_list[index]
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
return img, img,img_path, None
img_name = img_path.split(os.sep)[-1]
name_prefix = img_name.replace('.'+img_name.split('.')[-1],'')
img_shape = img.shape[:2]
img_process = self.preprocess(img)
return img, img_process, name_prefix, img_shape
def infer():
if not os.path.exists(cfg.vis_dir):
os.makedirs(cfg.vis_dir)
palette = get_palette(cfg.class_num)
# 人像分割结果显示阈值
thresh = 120
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# 加载预测模型
test_prog, feed_name, fetch_list = fluid.io.load_inference_model(
dirname=cfg.model_path, executor=exe, params_filename='__params__')
#加载预测数据集
test_dataset = TestDataSet()
data_num = test_dataset.data_num
for idx in range(data_num):
# 数据获取
ori_img, image, im_name, im_shape = test_dataset.get_data(idx)
if image is None:
print(im_name, 'is None')
continue
# 预测
if cfg.example == 'ACE2P':
# ACE2P模型使用多尺度预测
reader = importlib.import_module('reader')
multi_scale_test = getattr(reader, 'multi_scale_test')
parsing, logits = multi_scale_test(exe, test_prog, feed_name, fetch_list, image, im_shape)
else:
# HumanSeg,RoadLine模型单尺度预测
result = exe.run(program=test_prog, feed={feed_name[0]: image}, fetch_list=fetch_list)
parsing = np.argmax(result[0][0], axis=0)
parsing = cv2.resize(parsing.astype(np.uint8), im_shape[::-1])
# 预测结果保存
result_path = os.path.join(cfg.vis_dir, im_name + '.png')
if cfg.example == 'HumanSeg':
logits = result[0][0][1]*255
logits = cv2.resize(logits, im_shape[::-1])
ret, logits = cv2.threshold(logits, thresh, 0, cv2.THRESH_TOZERO)
logits = 255 *(logits - thresh)/(255 - thresh)
# 将分割结果添加到alpha通道
rgba = np.concatenate((ori_img, np.expand_dims(logits, axis=2)), axis=2)
cv2.imwrite(result_path, rgba)
else:
output_im = PILImage.fromarray(np.asarray(parsing, dtype=np.uint8))
output_im.putpalette(palette)
output_im.save(result_path)
if (idx + 1) % 100 == 0:
print('%d processd' % (idx + 1))
print('%d processd done' % (idx + 1))
return 0
if __name__ == "__main__":
infer()
| 32.694656
| 102
| 0.599113
|
64646d3bb53ccafdad1561309c2f0be1e0ecca6c
| 3,996
|
py
|
Python
|
sysID/fix_gearing.py
|
Team2470/FRC-2022-robot
|
bbc6d6277000be1929f4b746c73d06ca8afe926f
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2022-01-24T02:16:28.000Z
|
2022-01-24T02:16:28.000Z
|
sysID/fix_gearing.py
|
Team2470/2022-robot
|
bbc6d6277000be1929f4b746c73d06ca8afe926f
|
[
"BSD-3-Clause",
"MIT"
] | 6
|
2022-01-24T05:06:39.000Z
|
2022-02-19T23:44:29.000Z
|
sysID/fix_gearing.py
|
Team2470/FRC-2022-robot
|
bbc6d6277000be1929f4b746c73d06ca8afe926f
|
[
"BSD-3-Clause",
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import json
import sys
from pathlib import Path
if len(sys.argv) == 1:
print("Usage: {} sysid_data.json".format(sys.argv[0]))
sys.exit(1)
# Load in SysID data
print(f"sysid data file correct: {sys.argv[1]}")
sysid_data = None
with open(sys.argv[1]) as f:
sysid_data = json.load(f)
# Data format: https://github.com/wpilibsuite/sysid/blob/main/docs/data-collection.md#drivetrain
# | Index | Value |
# | ----- | ------------ |
# | 0 | timestamp |
# | 1 | l voltage |
# | 2 | r voltage |
# | 3 | l position |
# | 4 | r position |
# | 5 | l velocity |
# | 6 | r velocity |
# | 7 | angle |
# | 8 | angular rate |
#
# Note that all positions and velocities should be in rotations of the output and rotations/sec of the output
# respectively. If there is a gearing between the encoder and the output, that should be taken into account.
SYSID_TIMESTAMP = 0
SYSID_L_VOLTAGE = 1
SYSID_R_VOLTAGE = 2
SYSID_L_POSITION = 3
SYSID_R_POSITION = 4
SYSID_L_VELOCITY = 5
SYSID_R_VELOCITY = 6
SYSID_L_ANGLE = 7
SYSID_R_ANGLE = 8
# This is how SysID processes
# https://github.com/wpilibsuite/sysid/blob/main/sysid-projects/drive/src/main/cpp/Robot.cpp#L50-L53
# double cpr = m_json.at("counts per rotation").get<double>();
# double gearingNumerator = m_json.at("gearing numerator").get<double>();
# double gearingDenominator = m_json.at("gearing denominator").get<double>();
# double gearing = gearingNumerator / gearingDenominator;
# This is how we configured our encoder gearing
# 1:26.04
# From config.json
# {
# ...
# "gearing denominator": 26.04,
# "gearing numerator": 1,
# ...
# }
# When encoders are setup this is how the overall CPR (counters per revolution) is calculated:
# https://github.com/wpilibsuite/sysid/blob/main/sysid-library/src/main/cpp/generation/SysIdSetup.cpp#L167
# double combinedCPR = cpr * gearing;
# With the gearing ratio flipped, it reduced how many encoder counters occur for each turn of the wheel.
# When in reality for 1 wheel revolution the encoder turns 26.04 times, so the combinedCPR needs to be
# 2048 * (26.04/1) = 53,329.92, and not 2048 * (1/26.04) = 78.65.
# Position is measured in rotations
# Velocity is measured in rotations per second
# This is how the SysID converts the encoder value to rotations
# https://github.com/wpilibsuite/sysid/blob/main/sysid-library/src/main/cpp/generation/SysIdSetup.cpp#L144
# position = [=] { return talonController->GetSelectedSensorPosition() / cpr; };
# rate = [=] {
# return talonController->GetSelectedSensorVelocity() / cpr /
# 0.1; // Conversion factor from 100 ms to seconds
# };
# What the calculation should be:
# cpr = 2048 * (26.04/1) = 53,329.92
# rotations = position / (53,329.92)
# What we ended up with the incorrect gear ratio:
# cpr = 2048 * (1/26.04) = 53,329.92
# rotations = position / (78.65)
# So, in order to correct the incorrect gearing we need to divide the gearing by 26.04 twice. Basically we want to make
# the denominator 78.65 turn into 53,329.92.
# 1. The first divide gets our position/velocity measurements in reference to the builtin Falcon encoder
# 2. The second divide gets our position/velocity measures in reference to the rotations of the robot wheels.
gearing = 26.04
for test_name in ["fast-backward", "fast-forward", "slow-backward", "slow-forward"]:
for datapoint in sysid_data[test_name]:
# Fix up the metrics from this datapoint
for metric in [SYSID_L_POSITION, SYSID_R_POSITION, SYSID_L_VELOCITY, SYSID_R_VELOCITY]:
datapoint[metric] = datapoint[metric] / gearing / gearing
# Write out correct sysid data file
output_file_path = Path(sys.argv[1])
output_file_path = output_file_path.with_stem(f"{output_file_path.stem}-fixed")
print(f"Writing fixed sysid data file to {output_file_path}")
with open(output_file_path, 'w') as f:
json.dump(sysid_data, f, indent=4, sort_keys=True)
| 37.698113
| 119
| 0.698699
|
c0fd5e8f1435d8f36ee39f0b50e54a2a5717b107
| 471
|
py
|
Python
|
api-rest/apps/core/migrations/0003_auto_20200921_1323.py
|
zomars/koopers
|
57208f936926bd9e8b08947495d62c25f1168939
|
[
"MIT"
] | null | null | null |
api-rest/apps/core/migrations/0003_auto_20200921_1323.py
|
zomars/koopers
|
57208f936926bd9e8b08947495d62c25f1168939
|
[
"MIT"
] | 8
|
2021-04-08T20:06:41.000Z
|
2022-03-12T00:49:28.000Z
|
api-rest/apps/core/migrations/0003_auto_20200921_1323.py
|
zomars/koopers
|
57208f936926bd9e8b08947495d62c25f1168939
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-21 18:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20200921_1135'),
]
operations = [
migrations.AlterField(
model_name='screenshot',
name='screen_size',
field=models.CharField(choices=[('Mobile', 'Mobile'), ('Tablet', 'Tablet'), ('Desktop', 'Desktop')], max_length=200),
),
]
| 24.789474
| 129
| 0.600849
|
7d7e0daea4887832a0e84bfe81ad6858af5710ba
| 236
|
py
|
Python
|
src/backend/web/handlers/eventwizard.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | null | null | null |
src/backend/web/handlers/eventwizard.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | null | null | null |
src/backend/web/handlers/eventwizard.py
|
bovlb/the-blue-alliance
|
29389649d96fe060688f218d463e642dcebfd6cc
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from flask import render_template
from backend.common.decorators import cached_public
@cached_public(ttl=timedelta(seconds=61))
def eventwizard() -> str:
return render_template("eventwizard.html")
| 21.454545
| 51
| 0.805085
|
ceb9cb1e1e812869675cc399d00fec56b869fc8e
| 241
|
py
|
Python
|
cablegate/urls.py
|
h3/django-cablegate
|
bffa2970a1fb21717a48cfce76b8a24f909acab0
|
[
"BSD-3-Clause"
] | 1
|
2016-04-03T03:15:48.000Z
|
2016-04-03T03:15:48.000Z
|
cablegate/urls.py
|
h3/django-cablegate
|
bffa2970a1fb21717a48cfce76b8a24f909acab0
|
[
"BSD-3-Clause"
] | null | null | null |
cablegate/urls.py
|
h3/django-cablegate
|
bffa2970a1fb21717a48cfce76b8a24f909acab0
|
[
"BSD-3-Clause"
] | 1
|
2019-07-31T06:02:12.000Z
|
2019-07-31T06:02:12.000Z
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('cablegate.cable.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| 24.1
| 60
| 0.713693
|
447d7d6ece6feb5623c6e671da630aa4560e19aa
| 7,355
|
py
|
Python
|
server.py
|
miuho/Blockchain-Query-API
|
ba027081b16e9a9fdfc8493feae3f9ab28a970f9
|
[
"MIT"
] | null | null | null |
server.py
|
miuho/Blockchain-Query-API
|
ba027081b16e9a9fdfc8493feae3f9ab28a970f9
|
[
"MIT"
] | null | null | null |
server.py
|
miuho/Blockchain-Query-API
|
ba027081b16e9a9fdfc8493feae3f9ab28a970f9
|
[
"MIT"
] | null | null | null |
# server.py
# Run http server for Bitcoin blockchain Query API
#
# HingOn Miu
# https://docs.python.org/3/library/http.server.html
import io
import random
import string
import json
import time
import socket
import threading
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import urlparse
import blockchain
# sample http GET url for blockchain query request
# http://127.0.0.1:9000/blockheight?000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f
# http://127.0.0.1:9000/blockheader?0000000082b5015589a3fdf2d4baff403e6f0be035a5d9742c1cae6295464449
# http://127.0.0.1:9000/latestheight
# API endpoint to get block height of the block
blockheight_endpoint = "/blockheight"
# API endpoint to check if block is in main chain
mainchain_endpoint = "/mainchain"
# API endpoint to get block header of the block
blockheader_endpoint = "/blockheader"
# API endpoint to get latest block
latestblock_endpoint = "/latestblock"
# API endpoint to get latest block height
latestheight_endpoint = "/latestheight"
# API endpoint to get transactions of the block
blocktransactions_endpoint = "/blocktransactions"
# API endpoint to get information of the transaction
transactioninfo_endpoint = "/transactioninfo"
# API endpoint to get input transactions of the transaction
transactioninputs_endpoint = "/transactioninputs"
# API endpoint to get output transactions of the transaction
transactionoutputs_endpoint = "/transactionoutputs"
API_endpoints = {
blockheight_endpoint, mainchain_endpoint,
blockheader_endpoint, latestblock_endpoint,
latestheight_endpoint, blocktransactions_endpoint,
transactioninfo_endpoint, transactioninputs_endpoint,
transactionoutputs_endpoint
}
class Handler(BaseHTTPRequestHandler):
# handle http GET requests
def do_GET(self):
print("GET: " + self.path)
# parse url path
parsed_path = urlparse(self.path)
# check if API endpoint correct
endpoint = parsed_path.path
if endpoint not in API_endpoints:
self.send_error(404)
return
# parse query
hash_big_endian = parsed_path.query
# check if hash has proper format
if (endpoint == blockheight_endpoint or
endpoint == mainchain_endpoint or
endpoint == blockheader_endpoint or
endpoint == blocktransactions_endpoint or
endpoint == transactioninfo_endpoint or
endpoint == transactioninputs_endpoint or
endpoint == transactionoutputs_endpoint):
# check if string length is 64
if len(hash_big_endian) != 64:
self.send_error(400)
return
# check if it is proper hex string
try:
int(hash_big_endian, 16)
except ValueError:
self.send_error(400)
return
else:
# other endpoints do not have parameter
if hash_big_endian != "":
self.send_error(400)
return
if endpoint == blockheight_endpoint:
# get block height
blockheight = blockchain.get_block_height(hash_big_endian)
# check if block hash is invalid
if blockheight == -1:
message = json.dumps({"error": "Invalid Block Hash"})
else:
message = json.dumps({"height": blockheight})
elif endpoint == mainchain_endpoint:
# check if block is in main chain (longest blockchain)
mainchain = blockchain.get_main_chain(hash_big_endian)
message = json.dumps({"main_chain": mainchain})
elif endpoint == blockheader_endpoint:
# get block header fields
ver_num, prev_hash, merk_hash, start_time, nBits, nonce = \
blockchain.get_block_header(hash_big_endian)
# check if block hash is invalid
if ver_num == -1:
message = json.dumps({"error": "Invalid Block Hash"})
else:
message = json.dumps({"version": ver_num, "prev_block": prev_hash,
"mrkl_root": merk_hash, "time": start_time,
"bits": nBits, "nonce": nonce})
elif endpoint == latestblock_endpoint:
# get the latest block of main chain
latestblock = blockchain.get_latest_block()
message = json.dumps({"hash": latestblock})
elif endpoint == latestheight_endpoint:
# get the latest block height of main chain
latestheight = blockchain.get_latest_height()
message = json.dumps({"height": latestheight})
elif endpoint == blocktransactions_endpoint:
# get block transactions
count, transactions = blockchain.get_block_transactions(hash_big_endian)
# check if block hash is invalid
if count == -1:
message = json.dumps({"error": "Invalid Block Hash"})
else:
txs = []
# traverse all transactions
for i in range(0, count):
txid, btc_amount = transactions[i]
txs += [{"tx_hash": txid, "value": btc_amount}]
message = json.dumps({"tx_count": count, "transactions": txs})
elif endpoint == transactioninfo_endpoint:
# get transaction info
block_hash, ver, input_count, output_count, btc_amount, locktime = \
blockchain.get_transaction_info(hash_big_endian)
# check if tx hash is invalid
if ver == -1:
message = json.dumps({"error": "Invalid Transaction Hash"})
else:
message = json.dumps({"block_hash": block_hash, "version": ver,
"input_tx_count": input_count,
"output_tx_count": output_count,
"value": btc_amount, "lock_time": locktime})
elif endpoint == transactioninputs_endpoint:
# get input transactions
count, input_transactions = blockchain.get_transaction_inputs(hash_big_endian)
# check if tx hash is invalid
if count == -1:
message = json.dumps({"error": "Invalid Transaction Hash"})
else:
input_txs = []
# traverse all input transactions
for i in range(0, count):
prev_txid, script, seq = input_transactions[i]
input_txs += [{"prev_hash": prev_txid, "sig_script": script, "seq_num": seq}]
message = json.dumps({"input_tx_count": count, "input_transactions": input_txs})
elif endpoint == transactionoutputs_endpoint:
# get output transactions
count, output_transactions = blockchain.get_transaction_outputs(hash_big_endian)
# check if tx hash is invalid
if count == -1:
message = json.dumps({"error": "Invalid Transaction Hash"})
else:
output_txs = []
# traverse all input transactions
for i in range(0, count):
satoshi, script = output_transactions[i]
output_txs += [{"value": satoshi, "sig_script": script}]
message = json.dumps({"output_tx_count": count, "output_transactions": output_txs})
else:
# should not get here
message = json.dumps({"error": "Invalid Request"})
self.send_response(200)
self.send_header("Content-Type", "text/plain; charset=utf-8")
self.end_headers()
self.wfile.write(message.encode('utf-8'))
self.wfile.write(b'\n')
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
if __name__ == "__main__":
HOST, PORT = "localhost", 9000
# create server
server = ThreadedHTTPServer((HOST, PORT), Handler)
# start server thread to handle requests and server thread starts new thread for each new request
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
print("Server thread ready to handle http requests...")
# parse blockchain files
#blockchain.setup("Bitcoin/blocks/")
blockchain.setup("")
print("Blockchain setup done.")
# hang to wait for connections
while True:
continue
# clean up server
server.shutdown()
server.server_close()
| 30.26749
| 100
| 0.729436
|
c342ee22145631cc6b7285c012b4d51c5c032051
| 19,647
|
py
|
Python
|
up42/viztools.py
|
andher1802/dummy_0532
|
fb953c2785652c583d1147a2b688997e260e6afa
|
[
"MIT"
] | 72
|
2020-04-06T16:36:36.000Z
|
2022-03-30T23:39:14.000Z
|
up42/viztools.py
|
j-tr/up42-py
|
163b5a324775998987fc646afe8d5c257dd559ee
|
[
"MIT"
] | 59
|
2020-04-08T14:50:40.000Z
|
2022-03-23T22:14:46.000Z
|
up42/viztools.py
|
j-tr/up42-py
|
163b5a324775998987fc646afe8d5c257dd559ee
|
[
"MIT"
] | 40
|
2020-04-07T22:34:05.000Z
|
2022-03-23T10:57:15.000Z
|
"""
Visualization tools available in various objects
"""
# pylint: disable=dangerous-default-value
from typing import Tuple, List, Union, Optional
import math
from pathlib import Path
import warnings
import numpy as np
from shapely.geometry import box
import geopandas as gpd
from geopandas import GeoDataFrame
import matplotlib.pyplot as plt
import rasterio
from rasterio.plot import show
from rasterio.vrt import WarpedVRT
import folium
from folium.plugins import Draw
from up42.utils import (
get_logger,
)
# Folium map styling constants
VECTOR_STYLE = {
"fillColor": "#5288c4",
"color": "blue",
"weight": 2.5,
"dashArray": "5, 5",
}
HIGHLIGHT_STYLE = {
"fillColor": "#ffaf00",
"color": "red",
"weight": 3.5,
"dashArray": "5, 5",
}
# ignore warnings
warnings.filterwarnings("ignore", category=rasterio.errors.NotGeoreferencedWarning)
try:
from IPython import get_ipython
get_ipython().run_line_magic("matplotlib", "inline")
except (ImportError, AttributeError):
# No Ipython installed, Installed but run in shell
pass
logger = get_logger(__name__)
# pylint: disable=no-member, duplicate-code
class VizTools:
def __init__(self):
"""
Visualization functionality
"""
self.quicklooks = None
self.results: Union[list, dict, None] = None
def plot_results(
self,
figsize: Tuple[int, int] = (14, 8),
bands: List[int] = [1, 2, 3],
titles: Optional[List[str]] = None,
filepaths: Union[List[Union[str, Path]], dict, None] = None,
plot_file_format: List[str] = [".tif"],
**kwargs,
) -> None:
# pylint: disable=line-too-long
"""
Plots image data (quicklooks or results)
Args:
figsize: matplotlib figure size.
bands: Image bands and order to plot, default [1,2,3]. First band is 1.
titles: Optional list of titles for the subplots.
filepaths: Paths to images to plot. Optional, by default picks up the last
downloaded results.
plot_file_format: List of accepted image file formats e.g. [".tif"]
kwargs: Accepts any additional args and kwargs of
[rasterio.plot.show](https://rasterio.readthedocs.io/en/latest/api/rasterio.plot.html#rasterio.plot.show),
e.g. matplotlib cmap etc.
"""
if filepaths is None:
if self.results is None:
raise ValueError("You first need to download the results!")
filepaths = self.results
# Unpack results path dict in case of jobcollection.
if isinstance(filepaths, dict):
filepaths_lists = list(filepaths.values())
filepaths = [item for sublist in filepaths_lists for item in sublist]
if not isinstance(filepaths, list):
filepaths = [filepaths] # type: ignore
filepaths = [Path(path) for path in filepaths]
imagepaths = [
path for path in filepaths if str(path.suffix) in plot_file_format # type: ignore
]
if not imagepaths:
raise ValueError(
f"This function only plots files of format {plot_file_format}."
)
if not titles:
titles = [Path(fp).stem for fp in imagepaths]
if not isinstance(titles, list):
titles = [titles] # type: ignore
if len(imagepaths) < 2:
nrows, ncols = 1, 1
else:
ncols = 3
nrows = int(math.ceil(len(imagepaths) / float(ncols)))
_, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
if len(imagepaths) > 1:
axs = axs.ravel()
else:
axs = [axs]
if len(bands) != 3:
if len(bands) == 1:
if "cmap" not in kwargs:
kwargs["cmap"] = "gray"
else:
raise ValueError("Parameter bands can only contain one or three bands.")
for idx, (fp, title) in enumerate(zip(imagepaths, titles)):
with rasterio.open(fp) as src:
img_array = src.read(bands)
show(
img_array,
transform=src.transform,
title=title,
ax=axs[idx],
aspect="auto",
**kwargs,
)
axs[idx].set_axis_off()
plt.axis("off")
plt.tight_layout()
plt.show()
def plot_quicklooks(
self,
figsize: Tuple[int, int] = (8, 8),
titles: Optional[List[str]] = None,
filepaths: Optional[list] = None,
) -> None:
"""
Plots the downloaded quicklooks (filepaths saved to self.quicklooks of the
respective object, e.g. job, catalog).
Args:
figsize: matplotlib figure size.
filepaths: Paths to images to plot. Optional, by default picks up the last
downloaded results.
titles: List of titles for the subplots, optional.
"""
if filepaths is None:
if self.quicklooks is None:
raise ValueError("You first need to download the quicklooks!")
filepaths = self.quicklooks
self.plot_results(
plot_file_format=[".jpg", ".jpeg", ".png"],
figsize=figsize,
filepaths=filepaths,
titles=titles,
)
@staticmethod
def _map_images(
plot_file_format: List[str],
result_df: GeoDataFrame,
filepaths: List[Union[str, Path]],
bands: List[int] = [1, 2, 3],
aoi: Optional[GeoDataFrame] = None,
show_images=True,
show_features=False,
name_column: str = "id",
save_html: Optional[Path] = None,
) -> folium.Map:
"""
Displays data.json, and if available, one or multiple results geotiffs.
Args:
plot_file_format: List of accepted image file formats e.g. [".png"]
result_df: GeoDataFrame with scene geometries.
aoi: GeoDataFrame of aoi.
filepaths: Paths to images to plot. Optional, by default picks up the last
downloaded results.
show_images: Shows images if True (default).
show_features: Show features if True. For quicklooks maps is set to False.
name_column: Name of the feature property that provides the Feature/Layer name.
save_html: The path for saving folium map as html file. With default None, no file is saved.
"""
if result_df.shape[0] > 100:
result_df = result_df.iloc[:100]
logger.info(
"Only the first 100 results will be displayed to avoid memory "
"issues."
)
centroid = box(*result_df.total_bounds).centroid
m = folium_base_map(
lat=centroid.y,
lon=centroid.x,
)
df_bounds = result_df.bounds
list_bounds = df_bounds.values.tolist()
raster_filepaths = [
path for path in filepaths if Path(path).suffix in plot_file_format
]
try:
feature_names = result_df[name_column].to_list()
except KeyError:
feature_names = [""] * len(result_df.index)
if aoi is not None:
aoi_style = VECTOR_STYLE.copy()
aoi_style["color"] = "red"
folium.GeoJson(
aoi,
name="aoi",
style_function=lambda x: aoi_style,
highlight_function=lambda x: HIGHLIGHT_STYLE,
).add_to(m)
if show_features:
for idx, row in result_df.iterrows(): # type: ignore
try:
feature_name = row.loc[name_column]
except KeyError:
feature_name = ""
layer_name = f"Feature {idx + 1} - {feature_name}"
f = folium.GeoJson(
row["geometry"],
name=layer_name,
style_function=lambda x: VECTOR_STYLE,
highlight_function=lambda x: HIGHLIGHT_STYLE,
)
folium.Popup(
f"{layer_name}: {row.drop('geometry', axis=0).to_json()}"
).add_to(f)
f.add_to(m)
if show_images and raster_filepaths:
if len(bands) != 3:
if len(bands) == 1:
bands = bands * 3 # plot as grayband
else:
raise ValueError(
"Parameter bands can only contain one or three bands."
)
for idx, (raster_fp, feature_name) in enumerate(
zip(raster_filepaths, feature_names)
):
with rasterio.open(raster_fp) as src:
if src.meta["crs"] is None:
dst_array = src.read(bands)
minx, miny, maxx, maxy = list_bounds[idx]
else:
# Folium requires 4326, streaming blocks are 3857
with WarpedVRT(src, crs="EPSG:4326") as vrt:
dst_array = vrt.read(bands)
minx, miny, maxx, maxy = vrt.bounds
m.add_child(
folium.raster_layers.ImageOverlay(
np.moveaxis(np.stack(dst_array), 0, 2),
bounds=[[miny, minx], [maxy, maxx]], # different order.
name=f"Image {idx + 1} - {feature_name}",
)
)
# Collapse layer control with too many features.
collapsed = bool(result_df.shape[0] > 4)
folium.LayerControl(position="bottomleft", collapsed=collapsed).add_to(m)
if save_html:
save_html = Path(save_html)
if not save_html.exists():
save_html.mkdir(parents=True, exist_ok=True)
filepath = save_html / "final_map.html"
with filepath.open("w") as f:
f.write(m._repr_html_())
return m
def map_results(
self,
bands=[1, 2, 3],
aoi: GeoDataFrame = None,
show_images: bool = True,
show_features: bool = True,
name_column: str = "uid",
save_html: Path = None,
) -> folium.Map:
"""
Displays data.json, and if available, one or multiple results geotiffs.
Args:
bands: Image bands and order to plot, default [1,2,3]. First band is 1.
aoi: Optional visualization of aoi boundaries when given GeoDataFrame of aoi.
show_images: Shows images if True (default).
show_features: Shows features if True (default).
name_column: Name of the feature property that provides the Feature/Layer name.
save_html: The path for saving folium map as html file. With default None, no file is saved.
"""
# TODO: Surface optional filepaths? or remove option alltogether?
if self.results is None:
raise ValueError(
"You first need to download the results via job.download_results()!"
)
f_paths = []
if isinstance(self.results, list):
# Add features to map.
# Some blocks store vector results in an additional geojson file.
# pylint: disable=not-an-iterable
json_fp = [fp for fp in self.results if fp.endswith(".geojson")]
if json_fp:
json_fp = json_fp[0] # why only one element is selected?
else:
# pylint: disable=not-an-iterable
json_fp = [fp for fp in self.results if fp.endswith(".json")][0]
f_paths = self.results
elif isinstance(self.results, dict):
# pylint: disable=unsubscriptable-object
json_fp = self.results["merged_result"][0]
f_paths = []
for k, v in self.results.items():
if k != "merged_result":
f_paths.append([i for i in v if i.endswith(".tif")][0])
df: GeoDataFrame = gpd.read_file(json_fp)
# Add image to map.
m = self._map_images(
bands=bands,
plot_file_format=[".tif"],
result_df=df,
filepaths=f_paths,
aoi=aoi,
show_images=show_images,
show_features=show_features,
name_column=name_column,
save_html=save_html,
)
return m
def map_quicklooks(
self,
scenes: GeoDataFrame,
aoi: Optional[GeoDataFrame] = None,
show_images: bool = True,
show_features: bool = False,
filepaths: Optional[list] = None,
name_column: str = "id",
save_html: Optional[Path] = None,
) -> folium.Map:
"""
TODO: Currently only implemented for catalog!
Plots the downloaded quicklooks (filepaths saved to self.quicklooks of the
respective object, e.g. job, catalog).
Args:
scenes: GeoDataFrame of scenes, results of catalog.search()
aoi: GeoDataFrame of aoi.
show_images: Shows images if True (default).
show_features: Shows no features if False (default).
filepaths: Paths to images to plot. Optional, by default picks up the last
downloaded results.
name_column: Name of the feature property that provides the Feature/Layer name.
save_html: The path for saving folium map as html file. With default None, no file is saved.
"""
if filepaths is None:
if self.quicklooks is None:
raise ValueError("You first need to download the quicklooks!")
filepaths = self.quicklooks
m = self._map_images(
plot_file_format=[".jpg", ".jpeg", ".png"],
result_df=scenes,
filepaths=filepaths,
aoi=aoi,
show_images=show_images,
show_features=show_features,
name_column=name_column,
save_html=save_html,
)
return m
@staticmethod
def plot_coverage(
scenes: GeoDataFrame,
aoi: Optional[GeoDataFrame] = None,
legend_column: str = "sceneId",
figsize=(12, 16),
) -> None:
"""
Plots a coverage map of a dataframe with geometries e.g. the results of catalog.search())
Args:
scenes: GeoDataFrame of scenes, results of catalog.search()
aoi: GeoDataFrame of aoi.
legend_column: Dataframe column set to legend, default is "sceneId".
Legend entries are sorted and this determines plotting order.
figsize: Matplotlib figure size.
"""
if legend_column not in scenes.columns:
legend_column = None # type: ignore
logger.info(
"Given legend_column name not in scene dataframe, "
"plotting without legend."
)
try:
ax = scenes.plot(
legend_column,
categorical=True,
figsize=figsize,
cmap="Set3",
legend=True,
alpha=0.7,
legend_kwds=dict(loc="upper left", bbox_to_anchor=(1, 1)),
)
if aoi is not None:
aoi.plot(color="r", ax=ax, fc="None", edgecolor="r", lw=1)
except AttributeError as e:
raise TypeError(
"'scenes' and 'aoi' (optional) have to be a GeoDataFrame."
) from e
ax.set_axis_off()
plt.show()
def folium_base_map(
lat: float = 52.49190032214706,
lon: float = 13.39117252959244,
zoom_start: int = 14,
width_percent: str = "95%",
layer_control: bool = False,
) -> folium.Map:
"""Provides a folium map with basic features and UP42 logo."""
mapfigure = folium.Figure(width=width_percent)
m = folium.Map(location=[lat, lon], zoom_start=zoom_start, crs="EPSG3857").add_to(
mapfigure
)
tiles = (
"https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery"
"/MapServer/tile/{z}/{y}/{x}.png"
)
attr = (
"Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, "
"AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the "
"GIS User Community"
)
folium.TileLayer(tiles=tiles, attr=attr, name="Satellite - ESRI").add_to(m)
formatter = "function(num) {return L.Util.formatNum(num, 4) + ' ';};"
folium.plugins.MousePosition(
position="bottomright",
separator=" | ",
empty_string="NaN",
lng_first=True,
num_digits=20,
prefix="lon/lat:",
lat_formatter=formatter,
lng_formatter=formatter,
).add_to(m)
folium.plugins.MiniMap(
tile_layer="OpenStreetMap", position="bottomright", zoom_level_offset=-6
).add_to(m)
folium.plugins.Fullscreen().add_to(m)
folium.plugins.FloatImage(
image="https://cdn-images-1.medium.com/max/140/1*XJ_B7ur_c8bYKniXpKVpWg@2x.png",
bottom=90,
left=88,
).add_to(m)
if layer_control:
folium.LayerControl(position="bottomleft", collapsed=False, zindex=100).add_to(
m
)
# If adding additional layers outside of the folium base map function, don't
# use this one here. Causes an empty map.
return m
class DrawFoliumOverride(Draw):
def render(self, **kwargs):
# pylint: disable=import-outside-toplevel
from branca.element import CssLink, Element, Figure, JavascriptLink
super().render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), (
"You cannot render this Element " "if it is not in a Figure."
)
figure.header.add_child(
JavascriptLink(
"https://cdnjs.cloudflare.com/ajax/libs/leaflet.draw/1.0.2/"
"leaflet.draw.js"
)
) # noqa
figure.header.add_child(
CssLink(
"https://cdnjs.cloudflare.com/ajax/libs/leaflet.draw/1.0.2/"
"leaflet.draw.css"
)
) # noqa
export_style = """
<style>
#export {
position: absolute;
top: 270px;
left: 11px;
z-index: 999;
padding: 6px;
border-radius: 3px;
box-sizing: border-box;
color: #333;
background-color: #fff;
border: 2px solid rgba(0,0,0,0.5);
box-shadow: None;
font-family: 'Helvetica Neue';
cursor: pointer;
font-size: 17px;
text-decoration: none;
text-align: center;
font-weight: bold;
}
</style>
"""
# TODO: How to change hover color?
export_button = """<a href='#' id='export'>Export as<br/>GeoJson</a>"""
if self.export:
figure.header.add_child(Element(export_style), name="export")
figure.html.add_child(Element(export_button), name="export_button")
| 35.209677
| 122
| 0.548226
|
d79692f2207a4efd11ce732e5f576391cb3195fa
| 4,416
|
py
|
Python
|
nova/api/openstack/compute/image_metadata.py
|
bopopescu/trusted-nova
|
b440afb89f6f170c0831f5d6318a08ec41bc8c0a
|
[
"Apache-2.0"
] | 1
|
2015-07-15T08:51:16.000Z
|
2015-07-15T08:51:16.000Z
|
nova/api/openstack/compute/image_metadata.py
|
bopopescu/trusted-nova
|
b440afb89f6f170c0831f5d6318a08ec41bc8c0a
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/image_metadata.py
|
bopopescu/trusted-nova
|
b440afb89f6f170c0831f5d6318a08ec41bc8c0a
|
[
"Apache-2.0"
] | 2
|
2019-06-12T00:52:15.000Z
|
2020-07-24T10:35:29.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
from nova import flags
from nova import image
FLAGS = flags.FLAGS
class Controller(object):
"""The image metadata API controller for the OpenStack API"""
def __init__(self):
self.image_service = image.get_default_image_service()
def _get_image(self, context, image_id):
try:
return self.image_service.show(context, image_id)
except exception.NotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, image_id):
"""Returns the list of metadata for a given instance"""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if 'metadata' in body:
for key, value in body['metadata'].iteritems():
image['properties'][key] = value
common.check_img_metadata_quota_limit(context, image['properties'])
self.image_service.update(context, image_id, image, None)
return dict(metadata=image['properties'])
@wsgi.serializers(xml=common.MetaItemTemplate)
@wsgi.deserializers(xml=common.MetaItemDeserializer)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
try:
meta = body['meta']
except KeyError:
expl = _('Incorrect request body format')
raise exc.HTTPBadRequest(explanation=expl)
if not id in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_quota_limit(context, image['properties'])
self.image_service.update(context, image_id, image, None)
return dict(meta=meta)
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body.get('metadata', {})
common.check_img_metadata_quota_limit(context, metadata)
image['properties'] = metadata
self.image_service.update(context, image_id, image, None)
return dict(metadata=metadata)
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if not id in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
self.image_service.update(context, image_id, image, None)
def create_resource():
return wsgi.Resource(Controller())
| 37.109244
| 78
| 0.668025
|
e3635627346d8e7dde0a425f155ab70ec59e7d90
| 12,414
|
py
|
Python
|
sdk/python/pulumi_azure_native/web/v20150801/list_site_backup_status_secrets.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/web/v20150801/list_site_backup_status_secrets.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/web/v20150801/list_site_backup_status_secrets.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = [
'ListSiteBackupStatusSecretsResult',
'AwaitableListSiteBackupStatusSecretsResult',
'list_site_backup_status_secrets',
]
@pulumi.output_type
class ListSiteBackupStatusSecretsResult:
"""
Backup description
"""
def __init__(__self__, blob_name=None, correlation_id=None, created=None, databases=None, finished_time_stamp=None, id=None, kind=None, last_restore_time_stamp=None, location=None, log=None, name=None, scheduled=None, size_in_bytes=None, status=None, storage_account_url=None, tags=None, type=None, website_size_in_bytes=None):
if blob_name and not isinstance(blob_name, str):
raise TypeError("Expected argument 'blob_name' to be a str")
pulumi.set(__self__, "blob_name", blob_name)
if correlation_id and not isinstance(correlation_id, str):
raise TypeError("Expected argument 'correlation_id' to be a str")
pulumi.set(__self__, "correlation_id", correlation_id)
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if databases and not isinstance(databases, list):
raise TypeError("Expected argument 'databases' to be a list")
pulumi.set(__self__, "databases", databases)
if finished_time_stamp and not isinstance(finished_time_stamp, str):
raise TypeError("Expected argument 'finished_time_stamp' to be a str")
pulumi.set(__self__, "finished_time_stamp", finished_time_stamp)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if last_restore_time_stamp and not isinstance(last_restore_time_stamp, str):
raise TypeError("Expected argument 'last_restore_time_stamp' to be a str")
pulumi.set(__self__, "last_restore_time_stamp", last_restore_time_stamp)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if log and not isinstance(log, str):
raise TypeError("Expected argument 'log' to be a str")
pulumi.set(__self__, "log", log)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if scheduled and not isinstance(scheduled, bool):
raise TypeError("Expected argument 'scheduled' to be a bool")
pulumi.set(__self__, "scheduled", scheduled)
if size_in_bytes and not isinstance(size_in_bytes, float):
raise TypeError("Expected argument 'size_in_bytes' to be a float")
pulumi.set(__self__, "size_in_bytes", size_in_bytes)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if storage_account_url and not isinstance(storage_account_url, str):
raise TypeError("Expected argument 'storage_account_url' to be a str")
pulumi.set(__self__, "storage_account_url", storage_account_url)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if website_size_in_bytes and not isinstance(website_size_in_bytes, float):
raise TypeError("Expected argument 'website_size_in_bytes' to be a float")
pulumi.set(__self__, "website_size_in_bytes", website_size_in_bytes)
@property
@pulumi.getter(name="blobName")
def blob_name(self) -> Optional[str]:
"""
Name of the blob which contains data for this backup
"""
return pulumi.get(self, "blob_name")
@property
@pulumi.getter(name="correlationId")
def correlation_id(self) -> Optional[str]:
"""
Unique correlation identifier. Please use this along with the timestamp while communicating with Azure support.
"""
return pulumi.get(self, "correlation_id")
@property
@pulumi.getter
def created(self) -> Optional[str]:
"""
Timestamp of the backup creation
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def databases(self) -> Optional[Sequence['outputs.DatabaseBackupSettingResponse']]:
"""
List of databases included in the backup
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter(name="finishedTimeStamp")
def finished_time_stamp(self) -> Optional[str]:
"""
Timestamp when this backup finished.
"""
return pulumi.get(self, "finished_time_stamp")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="lastRestoreTimeStamp")
def last_restore_time_stamp(self) -> Optional[str]:
"""
Timestamp of a last restore operation which used this backup.
"""
return pulumi.get(self, "last_restore_time_stamp")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource Location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def log(self) -> Optional[str]:
"""
Details regarding this backup. Might contain an error message.
"""
return pulumi.get(self, "log")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Resource Name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scheduled(self) -> Optional[bool]:
"""
True if this backup has been created due to a schedule being triggered.
"""
return pulumi.get(self, "scheduled")
@property
@pulumi.getter(name="sizeInBytes")
def size_in_bytes(self) -> Optional[float]:
"""
Size of the backup in bytes
"""
return pulumi.get(self, "size_in_bytes")
@property
@pulumi.getter
def status(self) -> str:
"""
Backup status
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> Optional[str]:
"""
SAS URL for the storage account container which contains this backup
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="websiteSizeInBytes")
def website_size_in_bytes(self) -> Optional[float]:
"""
Size of the original web app which has been backed up
"""
return pulumi.get(self, "website_size_in_bytes")
class AwaitableListSiteBackupStatusSecretsResult(ListSiteBackupStatusSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListSiteBackupStatusSecretsResult(
blob_name=self.blob_name,
correlation_id=self.correlation_id,
created=self.created,
databases=self.databases,
finished_time_stamp=self.finished_time_stamp,
id=self.id,
kind=self.kind,
last_restore_time_stamp=self.last_restore_time_stamp,
location=self.location,
log=self.log,
name=self.name,
scheduled=self.scheduled,
size_in_bytes=self.size_in_bytes,
status=self.status,
storage_account_url=self.storage_account_url,
tags=self.tags,
type=self.type,
website_size_in_bytes=self.website_size_in_bytes)
def list_site_backup_status_secrets(backup_id: Optional[str] = None,
backup_schedule: Optional[pulumi.InputType['BackupScheduleArgs']] = None,
databases: Optional[Sequence[pulumi.InputType['DatabaseBackupSettingArgs']]] = None,
enabled: Optional[bool] = None,
id: Optional[str] = None,
kind: Optional[str] = None,
location: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_account_url: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListSiteBackupStatusSecretsResult:
"""
Backup description
:param str backup_id: Id of backup
:param pulumi.InputType['BackupScheduleArgs'] backup_schedule: Schedule for the backup if it is executed periodically
:param Sequence[pulumi.InputType['DatabaseBackupSettingArgs']] databases: Databases included in the backup
:param bool enabled: True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled
:param str id: Resource Id
:param str kind: Kind of resource
:param str location: Resource Location
:param str name: Resource Name
:param str resource_group_name: Name of resource group
:param str storage_account_url: SAS URL to the container
:param Mapping[str, str] tags: Resource tags
:param str type: Resource type
"""
__args__ = dict()
__args__['backupId'] = backup_id
__args__['backupSchedule'] = backup_schedule
__args__['databases'] = databases
__args__['enabled'] = enabled
__args__['id'] = id
__args__['kind'] = kind
__args__['location'] = location
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['storageAccountUrl'] = storage_account_url
__args__['tags'] = tags
__args__['type'] = type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20150801:listSiteBackupStatusSecrets', __args__, opts=opts, typ=ListSiteBackupStatusSecretsResult).value
return AwaitableListSiteBackupStatusSecretsResult(
blob_name=__ret__.blob_name,
correlation_id=__ret__.correlation_id,
created=__ret__.created,
databases=__ret__.databases,
finished_time_stamp=__ret__.finished_time_stamp,
id=__ret__.id,
kind=__ret__.kind,
last_restore_time_stamp=__ret__.last_restore_time_stamp,
location=__ret__.location,
log=__ret__.log,
name=__ret__.name,
scheduled=__ret__.scheduled,
size_in_bytes=__ret__.size_in_bytes,
status=__ret__.status,
storage_account_url=__ret__.storage_account_url,
tags=__ret__.tags,
type=__ret__.type,
website_size_in_bytes=__ret__.website_size_in_bytes)
| 38.79375
| 331
| 0.637506
|
4ace6a685e6d4b0452b5ef260810b53a2b01e6a1
| 399
|
py
|
Python
|
python/django/experiments/experiments/wsgi.py
|
romanthekat/experiments
|
b37125e54e846cfa6027319d724f4028e81b0ad6
|
[
"MIT"
] | null | null | null |
python/django/experiments/experiments/wsgi.py
|
romanthekat/experiments
|
b37125e54e846cfa6027319d724f4028e81b0ad6
|
[
"MIT"
] | null | null | null |
python/django/experiments/experiments/wsgi.py
|
romanthekat/experiments
|
b37125e54e846cfa6027319d724f4028e81b0ad6
|
[
"MIT"
] | null | null | null |
"""
WSGI config for experiments project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'experiments.settings')
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
fab3409b42334a3025712e9728e5a5095f090cb9
| 9,673
|
py
|
Python
|
scripts/reduce_sql.py
|
hannes/duckdb
|
4a24d71edecc7c0018eb3860d2e104cfe90462b6
|
[
"MIT"
] | null | null | null |
scripts/reduce_sql.py
|
hannes/duckdb
|
4a24d71edecc7c0018eb3860d2e104cfe90462b6
|
[
"MIT"
] | null | null | null |
scripts/reduce_sql.py
|
hannes/duckdb
|
4a24d71edecc7c0018eb3860d2e104cfe90462b6
|
[
"MIT"
] | null | null | null |
import re
import subprocess
import time
import os
import fuzzer_helper
import multiprocessing
import sqlite3
multiprocessing.set_start_method('fork')
get_reduced_query = '''
SELECT * FROM reduce_sql_statement('${QUERY}');
'''
def sanitize_error(err):
err = re.sub('Error: near line \d+: ', '', err)
err = err.replace(os.getcwd() + '/', '')
err = err.replace(os.getcwd(), '')
return err
def run_shell_command(shell, cmd):
command = [shell, '-csv', '--batch', '-init', '/dev/null']
res = subprocess.run(command, input=bytearray(cmd, 'utf8'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = res.stdout.decode('utf8').strip()
stderr = res.stderr.decode('utf8').strip()
return (stdout, stderr, res.returncode)
def get_reduced_sql(shell, sql_query):
reduce_query = get_reduced_query.replace('${QUERY}', sql_query.replace("'", "''"))
(stdout, stderr, returncode) = run_shell_command(shell, reduce_query)
if returncode != 0:
print(stdout)
print(stderr)
raise Exception("Failed to reduce query")
reduce_candidates = []
for line in stdout.split('\n'):
reduce_candidates.append(line.strip('"').replace('""', '"'))
return reduce_candidates[1:]
def reduce(sql_query, data_load, shell, error_msg, max_time_seconds=300):
start = time.time()
while True:
found_new_candidate = False
reduce_candidates = get_reduced_sql(shell, sql_query)
for reduce_candidate in reduce_candidates:
if reduce_candidate == sql_query:
continue
current_time = time.time()
if current_time - start > max_time_seconds:
break
(stdout, stderr, returncode) = run_shell_command(shell, data_load + reduce_candidate)
new_error = sanitize_error(stderr)
if new_error == error_msg:
sql_query = reduce_candidate
found_new_candidate = True
print("Found new reduced query")
print("=======================")
print(sql_query)
print("=======================")
break
if not found_new_candidate:
break
return sql_query
def is_ddl_query(query):
query = query.lower()
if 'create' in query or 'insert' in query or 'update' in query or 'delete' in query:
return True
return False
def initial_cleanup(query_log):
query_log = query_log.replace('SELECT * FROM pragma_version()\n', '')
return query_log
def run_queries_until_crash_mp(queries, result_file):
import duckdb
con = duckdb.connect()
sqlite_con = sqlite3.connect(result_file)
sqlite_con.execute('CREATE TABLE queries(id INT, text VARCHAR)')
sqlite_con.execute('CREATE TABLE result(text VARCHAR)')
sqlite_con.execute("INSERT INTO result VALUES ('__CRASH__')")
id = 1
is_internal_error = False
for q in queries:
# insert the current query into the database
# we do this pre-emptively in case the program crashes
sqlite_con.execute('INSERT INTO queries VALUES (?, ?)', (id, q))
sqlite_con.commit()
keep_query = False
try:
con.execute(q)
keep_query = is_ddl_query(q)
except Exception as e:
exception_error = str(e)
is_internal_error = fuzzer_helper.is_internal_error(exception_error)
if is_internal_error:
keep_query = True
sqlite_con.execute('UPDATE result SET text=?', (exception_error,))
if not keep_query:
sqlite_con.execute('DELETE FROM queries WHERE id=?', (id, ))
if is_internal_error:
# found internal error: no need to try further queries
break
id += 1
if not is_internal_error:
# failed to reproduce: delete result
sqlite_con.execute('DELETE FROM result')
sqlite_con.commit()
sqlite_con.close()
def run_queries_until_crash(queries):
sqlite_file = 'cleaned_queries.db'
if os.path.isfile(sqlite_file):
os.remove(sqlite_file)
# run the queries in a separate process because it might crash
p = multiprocessing.Process(target=run_queries_until_crash_mp, args=(queries, sqlite_file))
p.start()
p.join()
# read the queries back from the file
sqlite_con = sqlite3.connect(sqlite_file)
queries = sqlite_con.execute('SELECT text FROM queries ORDER BY id').fetchall()
results = sqlite_con.execute('SELECT text FROM result').fetchall()
sqlite_con.close()
if len(results) == 0:
# no internal error or crash found
return (None, None)
assert len(results) == 1
return ([x[0] for x in queries], results[0][0])
def cleanup_irrelevant_queries(query_log):
query_log = initial_cleanup(query_log)
queries = [x for x in query_log.split(';\n') if len(x) > 0]
return run_queries_until_crash(queries)
# def reduce_internal(start, sql_query, data_load, queries_final, shell, error_msg, max_time_seconds=300):
def reduce_query_log_query(start, shell, queries, query_index, max_time_seconds):
new_query_list = queries[:]
sql_query = queries[query_index]
while True:
found_new_candidate = False
reduce_candidates = get_reduced_sql(shell, sql_query)
for reduce_candidate in reduce_candidates:
if reduce_candidate == sql_query:
continue
current_time = time.time()
if current_time - start > max_time_seconds:
break
new_query_list[query_index] = reduce_candidate
(_, error) = run_queries_until_crash(new_query_list)
if error is not None:
sql_query = reduce_candidate
found_new_candidate = True
print("Found new reduced query")
print("=======================")
print(sql_query)
print("========ERROR==========")
print(error)
print("=======================")
print("")
break
if not found_new_candidate:
break
return sql_query
def reduce_query_log(queries, shell, max_time_seconds=300):
start = time.time()
current_index = 0
# first try to remove as many queries as possible
while current_index < len(queries):
print("Attempting to remove query at position %d (of %d total queries)" % (current_index, len(queries)))
current_time = time.time()
if current_time - start > max_time_seconds:
break
# remove the query at "current_index"
new_queries = queries[:current_index] + queries[current_index + 1:]
# try to run the queries and check if we still get the same error
(new_queries_x, current_error) = run_queries_until_crash(new_queries)
if current_error is None:
# cannot remove this query without invalidating the test case
current_index += 1
else:
# we can remove this query
queries = new_queries
# now try to reduce individual queries
for i in range(len(queries)):
if is_ddl_query(queries[i]):
continue
current_time = time.time()
if current_time - start > max_time_seconds:
break
queries[i] = reduce_query_log_query(start, shell, queries, i, max_time_seconds)
return queries
# Example usage:
# error_msg = 'INTERNAL Error: Assertion triggered in file "/Users/myth/Programs/duckdb-bugfix/src/common/types/data_chunk.cpp" on line 41: !types.empty()'
# shell = 'build/debug/duckdb'
# data_load = 'create table all_types as select * from test_all_types();'
# sql_query = '''
# select
# subq_0.c0 as c0,
# contains(
# cast(cast(nullif(
# argmax(
# cast(case when 0 then (select varchar from main.all_types limit 1 offset 5)
# else (select varchar from main.all_types limit 1 offset 5)
# end
# as varchar),
# cast(decode(
# cast(cast(null as blob) as blob)) as varchar)) over (partition by subq_0.c1 order by subq_0.c1),
# current_schema()) as varchar) as varchar),
# cast(cast(nullif(cast(null as varchar),
# cast(null as varchar)) as varchar) as varchar)) as c1,
# (select min(time) from main.all_types)
# as c2,
# subq_0.c1 as c3,
# subq_0.c1 as c4,
# cast(nullif(subq_0.c1,
# subq_0.c1) as decimal(4,1)) as c5
# from
# (select
# ref_0.timestamp_ns as c0,
# case when (EXISTS (
# select
# ref_0.timestamp_ns as c0,
# ref_0.timestamp_ns as c1,
# (select timestamp_tz from main.all_types limit 1 offset 4)
# as c2,
# ref_1.int_array as c3,
# ref_1.dec_4_1 as c4,
# ref_0.utinyint as c5,
# ref_1.int as c6,
# ref_0.double as c7,
# ref_0.medium_enum as c8,
# ref_1.array_of_structs as c9,
# ref_1.varchar as c10
# from
# main.all_types as ref_1
# where ref_1.varchar ~~~ ref_1.varchar
# limit 28))
# or (ref_0.varchar ~~~ ref_0.varchar) then ref_0.dec_4_1 else ref_0.dec_4_1 end
# as c1
# from
# main.all_types as ref_0
# where (0)
# and (ref_0.varchar ~~ ref_0.varchar)) as subq_0
# where writefile() !~~* writefile()
# limit 88
# '''
#
# print(reduce(sql_query, data_load, shell, error_msg))
| 37.203846
| 155
| 0.608808
|
a2edcee42da1b4e0eda75acfa762ce38acac522e
| 608
|
py
|
Python
|
vault_cli/metadata.py
|
irvansemestanya/vault-cli
|
220989a336e999ba770761fcdc4e6d829d644230
|
[
"Apache-2.0"
] | 52
|
2018-04-11T12:51:42.000Z
|
2022-03-29T12:51:54.000Z
|
vault_cli/metadata.py
|
irvansemestanya/vault-cli
|
220989a336e999ba770761fcdc4e6d829d644230
|
[
"Apache-2.0"
] | 186
|
2018-04-19T13:12:43.000Z
|
2022-01-27T08:39:45.000Z
|
vault_cli/metadata.py
|
irvansemestanya/vault-cli
|
220989a336e999ba770761fcdc4e6d829d644230
|
[
"Apache-2.0"
] | 15
|
2018-04-19T09:40:48.000Z
|
2021-08-24T14:42:36.000Z
|
import io
from distutils import dist
from typing import Mapping, Optional
import pkg_resources
def extract_metadata() -> Mapping[str, Optional[str]]:
distribution = pkg_resources.get_distribution("vault-cli")
metadata_str = distribution.get_metadata(distribution.PKG_INFO)
metadata_obj = dist.DistributionMetadata()
metadata_obj.read_pkg_file(io.StringIO(metadata_str))
return {
"author": metadata_obj.author,
"email": metadata_obj.author_email,
"license": metadata_obj.license,
"url": metadata_obj.url,
"version": metadata_obj.version,
}
| 27.636364
| 67
| 0.720395
|
4ae42e4f3b92e40f20f44c3232ebf4a1e8e6989b
| 149,145
|
py
|
Python
|
solver.py
|
LigninTools/lignet2.0
|
62ebc786b2a4c85cf60f59b1e3347d170fa5081a
|
[
"BSD-2-Clause"
] | null | null | null |
solver.py
|
LigninTools/lignet2.0
|
62ebc786b2a4c85cf60f59b1e3347d170fa5081a
|
[
"BSD-2-Clause"
] | null | null | null |
solver.py
|
LigninTools/lignet2.0
|
62ebc786b2a4c85cf60f59b1e3347d170fa5081a
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-C", "--PLIGC", type=float,
help="mol number of C in input", required=True)
parser.add_argument("-H", "--PLIGH", type=float,
help="mol number of H in input", required=True)
parser.add_argument("-O", "--PLIGO", type=float,
help="mol number of O in input", required=True)
parser.add_argument("-t", "--T0", type=float, default=298.15,
help="starting temperature", required=False)
parser.add_argument("-m", "--max_temperature", type=float,
help="maximum pyrolysis temperature", default=float('nan'))
parser.add_argument("-r", "--heating_rate", type=float, default=2.7,
help="heating rate in K/s")
parser.add_argument("-s", "--end_time", type=float, default=600,
help="end_time in seconds")
args = parser.parse_args()
print("Solver arguments are:")
print("\tPLIGC\t%f" % args.PLIGC)
print("\tPLIGH\t%f" % args.PLIGH)
print("\tPLIGO\t%f" % args.PLIGO)
print("\ttemperature\t%f" % args.T0)
print("\tmax temperature\t%f" % args.max_temperature)
print("\theating rate\t%f" % args.heating_rate)
print("\tend time (s)\t%f\n" % args.end_time)
T0 = args.T0
PLIGC = args.PLIGC
PLIGH = args.PLIGH
PLIGO = args.PLIGO
alpha = args.heating_rate
T_max = args.max_temperature
end_time = args.end_time
stoptime = min((args.max_temperature-args.T0)/args.heating_rate,600)
from scipy.integrate import odeint
import time
import numpy as np
def ODEs(y, t, p):
T, ADIO, ADIOM2, ALD3, C10H2, C10H2M2, C10H2M4, C2H6, C3H4O, C3H4O2, C3H6, C3H6O2, C3H8O2, CH2CO, CH3CHO, CH3OH, CH4, CHAR, CO, CO2, COUMARYL, ETOH, H2, H2O, KET, KETD, KETDM2, KETM2, LIG, LIGC, LIGH, LIGM2, LIGO, MGUAI, OH, PADIO, PADIOM2, PC2H2, PCH2OH, PCH2P, PCH3, PCHO, PCHOHP, PCHP2, PCOH, PCOHP2, PCOS, PFET3, PFET3M2, PH2, PHENOL, PKETM2, PLIG, PLIGC, PLIGH, PLIGM2, PLIGO, PRADIO, PRADIOM2, PRFET3, PRFET3M2, PRKETM2, PRLIGH, PRLIGH2, PRLIGM2A, RADIO, RADIOM2, RC3H3O, RC3H5O2, RC3H7O2, RCH3, RCH3O, RKET, RKETM2, RLIGA, RLIGB, RLIGH, RLIGM2A, RLIGM2B, RMGUAI, RPHENOL, RPHENOX, RPHENOXM2, SYNAPYL, VADIO, VADIOM2, VCOUMARYL, VKET, VKETD, VKETDM2, VKETM2, VMGUAI, VPHENOL, VSYNAPYL = y
alpha, R, A0, n0, E0, A1, n1, E1, A2, n2, E2, A3, n3, E3, A4, n4, E4, A5, n5, E5, A6, n6, E6, A7, n7, E7, A8, n8, E8, A9, n9, E9, A10, n10, E10, A11, n11, E11, A12, n12, E12, A13, n13, E13, A14, n14, E14, A15, n15, E15, A16, n16, E16, A17, n17, E17, A18, n18, E18, A19, n19, E19, A20, n20, E20, A21, n21, E21, A22, n22, E22, A23, n23, E23, A24, n24, E24, A25, n25, E25, A26, n26, E26, A27, n27, E27, A28, n28, E28, A29, n29, E29, A30, n30, E30, A31, n31, E31, A32, n32, E32, A33, n33, E33, A34, n34, E34, A35, n35, E35, A36, n36, E36, A37, n37, E37, A38, n38, E38, A39, n39, E39, A40, n40, E40, A41, n41, E41, A42, n42, E42, A43, n43, E43, A44, n44, E44, A45, n45, E45, A46, n46, E46, A47, n47, E47, A48, n48, E48, A49, n49, E49, A50, n50, E50, A51, n51, E51, A52, n52, E52, A53, n53, E53, A54, n54, E54, A55, n55, E55, A56, n56, E56, A57, n57, E57, A58, n58, E58, A59, n59, E59, A60, n60, E60, A61, n61, E61, A62, n62, E62, A63, n63, E63, A64, n64, E64, A65, n65, E65, A66, n66, E66, A67, n67, E67, A68, n68, E68, A69, n69, E69, A70, n70, E70, A71, n71, E71, A72, n72, E72, A73, n73, E73, A74, n74, E74, A75, n75, E75, A76, n76, E76, A77, n77, E77, A78, n78, E78, A79, n79, E79, A80, n80, E80, A81, n81, E81, A82, n82, E82, A83, n83, E83, A84, n84, E84, A85, n85, E85, A86, n86, E86, A87, n87, E87, A88, n88, E88, A89, n89, E89, A90, n90, E90, A91, n91, E91, A92, n92, E92, A93, n93, E93, A94, n94, E94, A95, n95, E95, A96, n96, E96, A97, n97, E97, A98, n98, E98, A99, n99, E99, A100, n100, E100, A101, n101, E101, A102, n102, E102, A103, n103, E103, A104, n104, E104, A105, n105, E105, A106, n106, E106, A107, n107, E107, A108, n108, E108, A109, n109, E109, A110, n110, E110, A111, n111, E111, A112, n112, E112, A113, n113, E113, A114, n114, E114, A115, n115, E115, A116, n116, E116, A117, n117, E117, A118, n118, E118, A119, n119, E119, A120, n120, E120, A121, n121, E121, A122, n122, E122, A123, n123, E123, A124, n124, E124, A125, n125, E125, A126, n126, E126, A127, n127, E127, A128, n128, E128, A129, n129, E129, A130, n130, E130, A131, n131, E131, A132, n132, E132, A133, n133, E133, A134, n134, E134, A135, n135, E135, A136, n136, E136, A137, n137, E137, A138, n138, E138, A139, n139, E139, A140, n140, E140, A141, n141, E141, A142, n142, E142, A143, n143, E143, A144, n144, E144, A145, n145, E145, A146, n146, E146, A147, n147, E147, A148, n148, E148, A149, n149, E149, A150, n150, E150, A151, n151, E151, A152, n152, E152, A153, n153, E153, A154, n154, E154, A155, n155, E155, A156, n156, E156, A157, n157, E157, A158, n158, E158, A159, n159, E159, A160, n160, E160, A161, n161, E161, A162, n162, E162, A163, n163, E163, A164, n164, E164, A165, n165, E165, A166, n166, E166, A167, n167, E167, A168, n168, E168, A169, n169, E169, A170, n170, E170, A171, n171, E171, A172, n172, E172, A173, n173, E173, A174, n174, E174, A175, n175, E175, A176, n176, E176, A177, n177, E177, A178, n178, E178, A179, n179, E179, A180, n180, E180, A181, n181, E181, A182, n182, E182, A183, n183, E183, A184, n184, E184, A185, n185, E185, A186, n186, E186, A187, n187, E187, A188, n188, E188, A189, n189, E189, A190, n190, E190, A191, n191, E191, A192, n192, E192, A193, n193, E193, A194, n194, E194, A195, n195, E195, A196, n196, E196, A197, n197, E197, A198, n198, E198, A199, n199, E199, A200, n200, E200, A201, n201, E201, A202, n202, E202, A203, n203, E203, A204, n204, E204, A205, n205, E205, A206, n206, E206, A207, n207, E207, A208, n208, E208, A209, n209, E209, A210, n210, E210, A211, n211, E211, A212, n212, E212, A213, n213, E213, A214, n214, E214, A215, n215, E215, A216, n216, E216, A217, n217, E217, A218, n218, E218, A219, n219, E219, A220, n220, E220, A221, n221, E221, A222, n222, E222, A223, n223, E223, A224, n224, E224, A225, n225, E225, A226, n226, E226, A227, n227, E227, A228, n228, E228, A229, n229, E229, A230, n230, E230, A231, n231, E231, A232, n232, E232, A233, n233, E233, A234, n234, E234, A235, n235, E235, A236, n236, E236, A237, n237, E237, A238, n238, E238, A239, n239, E239, A240, n240, E240, A241, n241, E241, A242, n242, E242, A243, n243, E243, A244, n244, E244, A245, n245, E245, A246, n246, E246, A247, n247, E247, A248, n248, E248, A249, n249, E249, A250, n250, E250, A251, n251, E251, A252, n252, E252, A253, n253, E253, A254, n254, E254, A255, n255, E255, A256, n256, E256, A257, n257, E257, A258, n258, E258, A259, n259, E259, A260, n260, E260, A261, n261, E261, A262, n262, E262, A263, n263, E263, A264, n264, E264, A265, n265, E265, A266, n266, E266, A267, n267, E267, A268, n268, E268, A269, n269, E269, A270, n270, E270, A271, n271, E271, A272, n272, E272, A273, n273, E273, A274, n274, E274, A275, n275, E275, A276, n276, E276, A277, n277, E277, A278, n278, E278, A279, n279, E279, A280, n280, E280, A281, n281, E281, A282, n282, E282, A283, n283, E283, A284, n284, E284, A285, n285, E285, A286, n286, E286, A287, n287, E287, A288, n288, E288, A289, n289, E289, A290, n290, E290, A291, n291, E291, A292, n292, E292, A293, n293, E293, A294, n294, E294, A295, n295, E295, A296, n296, E296, A297, n297, E297, A298, n298, E298, A299, n299, E299, A300, n300, E300, A301, n301, E301, A302, n302, E302, A303, n303, E303, A304, n304, E304, A305, n305, E305, A306, n306, E306, A307, n307, E307, A308, n308, E308, A309, n309, E309, A310, n310, E310, A311, n311, E311, A312, n312, E312, A313, n313, E313, A314, n314, E314, A315, n315, E315, A316, n316, E316, A317, n317, E317, A318, n318, E318, A319, n319, E319, A320, n320, E320, A321, n321, E321, A322, n322, E322, A323, n323, E323, A324, n324, E324, A325, n325, E325, A326, n326, E326, A327, n327, E327, A328, n328, E328, A329, n329, E329, A330, n330, E330, A331, n331, E331, A332, n332, E332, A333, n333, E333, A334, n334, E334, A335, n335, E335, A336, n336, E336, A337, n337, E337, A338, n338, E338, A339, n339, E339, A340, n340, E340, A341, n341, E341, A342, n342, E342, A343, n343, E343, A344, n344, E344, A345, n345, E345, A346, n346, E346, A347, n347, E347, A348, n348, E348, A349, n349, E349, A350, n350, E350, A351, n351, E351, A352, n352, E352, A353, n353, E353, A354, n354, E354, A355, n355, E355, A356, n356, E356, A357, n357, E357, A358, n358, E358, A359, n359, E359, A360, n360, E360, A361, n361, E361, A362, n362, E362, A363, n363, E363, A364, n364, E364, A365, n365, E365, A366, n366, E366, A367, n367, E367, A368, n368, E368, A369, n369, E369, A370, n370, E370, A371, n371, E371, A372, n372, E372, A373, n373, E373, A374, n374, E374, A375, n375, E375, A376, n376, E376, A377, n377, E377, A378, n378, E378, A379, n379, E379, A380, n380, E380, A381, n381, E381, A382, n382, E382, A383, n383, E383, A384, n384, E384, A385, n385, E385, A386, n386, E386, A387, n387, E387, A388, n388, E388, A389, n389, E389, A390, n390, E390, A391, n391, E391, A392, n392, E392, A393, n393, E393, A394, n394, E394, A395, n395, E395, A396, n396, E396, A397, n397, E397, A398, n398, E398, A399, n399, E399, A400, n400, E400, A401, n401, E401, A402, n402, E402, A403, n403, E403, A404, n404, E404, A405, n405, E405 = p
dydt = [alpha,
-1*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 -1*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 -1*A90 * T**n90 * np.exp(-1*E90/R/T) * ADIO**1.0 +1*A132 * T**n132 * np.exp(-1*E132/R/T) * RADIO**1.0 * LIGH**1.0 +1*A153 * T**n153 * np.exp(-1*E153/R/T) * RADIO**1.0 * PLIGH**1.0 +1*A174 * T**n174 * np.exp(-1*E174/R/T) * RADIO**1.0 * PLIGM2**1.0 +1*A195 * T**n195 * np.exp(-1*E195/R/T) * RADIO**1.0 * LIGM2**1.0 +1*A216 * T**n216 * np.exp(-1*E216/R/T) * RADIO**1.0 * LIGM2**1.0 +1*A237 * T**n237 * np.exp(-1*E237/R/T) * RADIO**1.0 * PFET3M2**1.0 +1*A258 * T**n258 * np.exp(-1*E258/R/T) * RADIO**1.0 * ADIOM2**1.0 +1*A279 * T**n279 * np.exp(-1*E279/R/T) * RADIO**1.0 * KETM2**1.0 +1*A300 * T**n300 * np.exp(-1*E300/R/T) * RADIO**1.0 * C10H2**1.0 +1*A321 * T**n321 * np.exp(-1*E321/R/T) * RADIO**1.0 * LIG**1.0 +1*A342 * T**n342 * np.exp(-1*E342/R/T) * RADIO**1.0 * LIG**1.0 +1*A363 * T**n363 * np.exp(-1*E363/R/T) * RADIO**1.0 * PFET3**1.0 -1*A364 * T**n364 * np.exp(-1*E364/R/T) * RC3H5O2**1.0 * ADIO**1.0 -1*A365 * T**n365 * np.exp(-1*E365/R/T) * PRFET3**1.0 * ADIO**1.0 -1*A366 * T**n366 * np.exp(-1*E366/R/T) * RC3H7O2**1.0 * ADIO**1.0 -1*A367 * T**n367 * np.exp(-1*E367/R/T) * RADIOM2**1.0 * ADIO**1.0 -1*A368 * T**n368 * np.exp(-1*E368/R/T) * PRFET3M2**1.0 * ADIO**1.0 -1*A369 * T**n369 * np.exp(-1*E369/R/T) * PRLIGH**1.0 * ADIO**1.0 -1*A370 * T**n370 * np.exp(-1*E370/R/T) * RLIGM2B**1.0 * ADIO**1.0 -1*A371 * T**n371 * np.exp(-1*E371/R/T) * RLIGM2A**1.0 * ADIO**1.0 -1*A372 * T**n372 * np.exp(-1*E372/R/T) * RCH3**1.0 * ADIO**1.0 -1*A373 * T**n373 * np.exp(-1*E373/R/T) * PRKETM2**1.0 * ADIO**1.0 -1*A374 * T**n374 * np.exp(-1*E374/R/T) * RKET**1.0 * ADIO**1.0 -1*A375 * T**n375 * np.exp(-1*E375/R/T) * PRADIO**1.0 * ADIO**1.0 -1*A376 * T**n376 * np.exp(-1*E376/R/T) * RC3H3O**1.0 * ADIO**1.0 -1*A377 * T**n377 * np.exp(-1*E377/R/T) * RLIGB**1.0 * ADIO**1.0 -1*A378 * T**n378 * np.exp(-1*E378/R/T) * RLIGA**1.0 * ADIO**1.0 -1*A379 * T**n379 * np.exp(-1*E379/R/T) * PRADIOM2**1.0 * ADIO**1.0 -1*A380 * T**n380 * np.exp(-1*E380/R/T) * RMGUAI**1.0 * ADIO**1.0 -1*A381 * T**n381 * np.exp(-1*E381/R/T) * OH**1.0 * ADIO**1.0 -1*A382 * T**n382 * np.exp(-1*E382/R/T) * RCH3O**1.0 * ADIO**1.0 -1*A383 * T**n383 * np.exp(-1*E383/R/T) * RPHENOL**1.0 * ADIO**1.0 -1*A384 * T**n384 * np.exp(-1*E384/R/T) * RADIO**1.0 * ADIO**1.0 +1*A384 * T**n384 * np.exp(-1*E384/R/T) * RADIO**1.0 * ADIO**1.0 +1*A405 * T**n405 * np.exp(-1*E405/R/T) * RADIO**1.0 * KET**1.0,
-1*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 -1*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 -1*A84 * T**n84 * np.exp(-1*E84/R/T) * ADIOM2**1.0 +1*A115 * T**n115 * np.exp(-1*E115/R/T) * RADIOM2**1.0 * LIGH**1.0 +1*A136 * T**n136 * np.exp(-1*E136/R/T) * RADIOM2**1.0 * PLIGH**1.0 +1*A157 * T**n157 * np.exp(-1*E157/R/T) * RADIOM2**1.0 * PLIGM2**1.0 +1*A178 * T**n178 * np.exp(-1*E178/R/T) * RADIOM2**1.0 * LIGM2**1.0 +1*A199 * T**n199 * np.exp(-1*E199/R/T) * RADIOM2**1.0 * LIGM2**1.0 +1*A220 * T**n220 * np.exp(-1*E220/R/T) * RADIOM2**1.0 * PFET3M2**1.0 -1*A238 * T**n238 * np.exp(-1*E238/R/T) * RC3H5O2**1.0 * ADIOM2**1.0 -1*A239 * T**n239 * np.exp(-1*E239/R/T) * PRFET3**1.0 * ADIOM2**1.0 -1*A240 * T**n240 * np.exp(-1*E240/R/T) * RC3H7O2**1.0 * ADIOM2**1.0 -1*A241 * T**n241 * np.exp(-1*E241/R/T) * RADIOM2**1.0 * ADIOM2**1.0 +1*A241 * T**n241 * np.exp(-1*E241/R/T) * RADIOM2**1.0 * ADIOM2**1.0 -1*A242 * T**n242 * np.exp(-1*E242/R/T) * PRFET3M2**1.0 * ADIOM2**1.0 -1*A243 * T**n243 * np.exp(-1*E243/R/T) * PRLIGH**1.0 * ADIOM2**1.0 -1*A244 * T**n244 * np.exp(-1*E244/R/T) * RLIGM2B**1.0 * ADIOM2**1.0 -1*A245 * T**n245 * np.exp(-1*E245/R/T) * RLIGM2A**1.0 * ADIOM2**1.0 -1*A246 * T**n246 * np.exp(-1*E246/R/T) * RCH3**1.0 * ADIOM2**1.0 -1*A247 * T**n247 * np.exp(-1*E247/R/T) * PRKETM2**1.0 * ADIOM2**1.0 -1*A248 * T**n248 * np.exp(-1*E248/R/T) * RKET**1.0 * ADIOM2**1.0 -1*A249 * T**n249 * np.exp(-1*E249/R/T) * PRADIO**1.0 * ADIOM2**1.0 -1*A250 * T**n250 * np.exp(-1*E250/R/T) * RC3H3O**1.0 * ADIOM2**1.0 -1*A251 * T**n251 * np.exp(-1*E251/R/T) * RLIGB**1.0 * ADIOM2**1.0 -1*A252 * T**n252 * np.exp(-1*E252/R/T) * RLIGA**1.0 * ADIOM2**1.0 -1*A253 * T**n253 * np.exp(-1*E253/R/T) * PRADIOM2**1.0 * ADIOM2**1.0 -1*A254 * T**n254 * np.exp(-1*E254/R/T) * RMGUAI**1.0 * ADIOM2**1.0 -1*A255 * T**n255 * np.exp(-1*E255/R/T) * OH**1.0 * ADIOM2**1.0 -1*A256 * T**n256 * np.exp(-1*E256/R/T) * RCH3O**1.0 * ADIOM2**1.0 -1*A257 * T**n257 * np.exp(-1*E257/R/T) * RPHENOL**1.0 * ADIOM2**1.0 -1*A258 * T**n258 * np.exp(-1*E258/R/T) * RADIO**1.0 * ADIOM2**1.0 +1*A262 * T**n262 * np.exp(-1*E262/R/T) * RADIOM2**1.0 * KETM2**1.0 +1*A283 * T**n283 * np.exp(-1*E283/R/T) * RADIOM2**1.0 * C10H2**1.0 +1*A304 * T**n304 * np.exp(-1*E304/R/T) * RADIOM2**1.0 * LIG**1.0 +1*A325 * T**n325 * np.exp(-1*E325/R/T) * RADIOM2**1.0 * LIG**1.0 +1*A346 * T**n346 * np.exp(-1*E346/R/T) * RADIOM2**1.0 * PFET3**1.0 +1*A367 * T**n367 * np.exp(-1*E367/R/T) * RADIOM2**1.0 * ADIO**1.0 +1*A388 * T**n388 * np.exp(-1*E388/R/T) * RADIOM2**1.0 * KET**1.0,
+1*A12 * T**n12 * np.exp(-1*E12/R/T) * RLIGH**1.0 +1*A13 * T**n13 * np.exp(-1*E13/R/T) * PRLIGH2**1.0,
+0.5*A11 * T**n11 * np.exp(-1*E11/R/T) * RPHENOX**1.0 +0.5*A29 * T**n29 * np.exp(-1*E29/R/T) * C10H2M2**1.0 +0.5*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 +0.5*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 +0.5*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +0.5*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +1*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 +1*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 +1*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 +1*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 +0.5*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 +0.5*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 +0.5*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +0.5*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 +0.5*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 +0.5*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 +1*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0 +0.5*A52 * T**n52 * np.exp(-1*E52/R/T) * RCH3O**1.0 * RPHENOX**1.0 +0.5*A55 * T**n55 * np.exp(-1*E55/R/T) * RPHENOX**1.0 * RCH3**1.0 +1.5*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +1*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +2*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +2*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +1*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 +1*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 +1*A80 * T**n80 * np.exp(-1*E80/R/T) * RPHENOX**1.0 * RPHENOL**1.0 +0.5*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 +0.5*A82 * T**n82 * np.exp(-1*E82/R/T) * RPHENOX**1.0 * CHAR**1.0 +1*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 -1*A280 * T**n280 * np.exp(-1*E280/R/T) * RC3H5O2**1.0 * C10H2**1.0 +0.5*A280 * T**n280 * np.exp(-1*E280/R/T) * RC3H5O2**1.0 * C10H2**1.0 -1*A281 * T**n281 * np.exp(-1*E281/R/T) * PRFET3**1.0 * C10H2**1.0 +0.5*A281 * T**n281 * np.exp(-1*E281/R/T) * PRFET3**1.0 * C10H2**1.0 -1*A282 * T**n282 * np.exp(-1*E282/R/T) * RC3H7O2**1.0 * C10H2**1.0 +0.5*A282 * T**n282 * np.exp(-1*E282/R/T) * RC3H7O2**1.0 * C10H2**1.0 -1*A283 * T**n283 * np.exp(-1*E283/R/T) * RADIOM2**1.0 * C10H2**1.0 +0.5*A283 * T**n283 * np.exp(-1*E283/R/T) * RADIOM2**1.0 * C10H2**1.0 -1*A284 * T**n284 * np.exp(-1*E284/R/T) * PRFET3M2**1.0 * C10H2**1.0 +0.5*A284 * T**n284 * np.exp(-1*E284/R/T) * PRFET3M2**1.0 * C10H2**1.0 -1*A285 * T**n285 * np.exp(-1*E285/R/T) * PRLIGH**1.0 * C10H2**1.0 +0.5*A285 * T**n285 * np.exp(-1*E285/R/T) * PRLIGH**1.0 * C10H2**1.0 -1*A286 * T**n286 * np.exp(-1*E286/R/T) * RLIGM2B**1.0 * C10H2**1.0 +0.5*A286 * T**n286 * np.exp(-1*E286/R/T) * RLIGM2B**1.0 * C10H2**1.0 -1*A287 * T**n287 * np.exp(-1*E287/R/T) * RLIGM2A**1.0 * C10H2**1.0 +0.5*A287 * T**n287 * np.exp(-1*E287/R/T) * RLIGM2A**1.0 * C10H2**1.0 -1*A288 * T**n288 * np.exp(-1*E288/R/T) * RCH3**1.0 * C10H2**1.0 +0.5*A288 * T**n288 * np.exp(-1*E288/R/T) * RCH3**1.0 * C10H2**1.0 -1*A289 * T**n289 * np.exp(-1*E289/R/T) * PRKETM2**1.0 * C10H2**1.0 +0.5*A289 * T**n289 * np.exp(-1*E289/R/T) * PRKETM2**1.0 * C10H2**1.0 -1*A290 * T**n290 * np.exp(-1*E290/R/T) * RKET**1.0 * C10H2**1.0 +0.5*A290 * T**n290 * np.exp(-1*E290/R/T) * RKET**1.0 * C10H2**1.0 -1*A291 * T**n291 * np.exp(-1*E291/R/T) * PRADIO**1.0 * C10H2**1.0 +0.5*A291 * T**n291 * np.exp(-1*E291/R/T) * PRADIO**1.0 * C10H2**1.0 -1*A292 * T**n292 * np.exp(-1*E292/R/T) * RC3H3O**1.0 * C10H2**1.0 +0.5*A292 * T**n292 * np.exp(-1*E292/R/T) * RC3H3O**1.0 * C10H2**1.0 -1*A293 * T**n293 * np.exp(-1*E293/R/T) * RLIGB**1.0 * C10H2**1.0 +0.5*A293 * T**n293 * np.exp(-1*E293/R/T) * RLIGB**1.0 * C10H2**1.0 -1*A294 * T**n294 * np.exp(-1*E294/R/T) * RLIGA**1.0 * C10H2**1.0 +0.5*A294 * T**n294 * np.exp(-1*E294/R/T) * RLIGA**1.0 * C10H2**1.0 -1*A295 * T**n295 * np.exp(-1*E295/R/T) * PRADIOM2**1.0 * C10H2**1.0 +0.5*A295 * T**n295 * np.exp(-1*E295/R/T) * PRADIOM2**1.0 * C10H2**1.0 -1*A296 * T**n296 * np.exp(-1*E296/R/T) * RMGUAI**1.0 * C10H2**1.0 +0.5*A296 * T**n296 * np.exp(-1*E296/R/T) * RMGUAI**1.0 * C10H2**1.0 -1*A297 * T**n297 * np.exp(-1*E297/R/T) * OH**1.0 * C10H2**1.0 +0.5*A297 * T**n297 * np.exp(-1*E297/R/T) * OH**1.0 * C10H2**1.0 -1*A298 * T**n298 * np.exp(-1*E298/R/T) * RCH3O**1.0 * C10H2**1.0 +0.5*A298 * T**n298 * np.exp(-1*E298/R/T) * RCH3O**1.0 * C10H2**1.0 -1*A299 * T**n299 * np.exp(-1*E299/R/T) * RPHENOL**1.0 * C10H2**1.0 +0.5*A299 * T**n299 * np.exp(-1*E299/R/T) * RPHENOL**1.0 * C10H2**1.0 -1*A300 * T**n300 * np.exp(-1*E300/R/T) * RADIO**1.0 * C10H2**1.0 +0.5*A300 * T**n300 * np.exp(-1*E300/R/T) * RADIO**1.0 * C10H2**1.0,
-1*A29 * T**n29 * np.exp(-1*E29/R/T) * C10H2M2**1.0 +0.5*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 -1*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 +0.5*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 +0.5*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 -1*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0 +0.5*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0,
+0.5*A10 * T**n10 * np.exp(-1*E10/R/T) * RPHENOXM2**1.0 -1*A28 * T**n28 * np.exp(-1*E28/R/T) * C10H2M4**1.0 +0.5*A28 * T**n28 * np.exp(-1*E28/R/T) * C10H2M4**1.0 +1*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 +1*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 +1*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 +1*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 +0.5*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 +0.5*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 +0.5*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +0.5*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +0.5*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 +0.5*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 +0.5*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +0.5*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 -1*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 +1*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 +0.5*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 -1*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 +0.5*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 +0.5*A53 * T**n53 * np.exp(-1*E53/R/T) * RCH3O**1.0 * RPHENOXM2**1.0 +0.5*A54 * T**n54 * np.exp(-1*E54/R/T) * RPHENOXM2**1.0 * RCH3**1.0 +1*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 +2*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 +2*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 +1*A63 * T**n63 * np.exp(-1*E63/R/T) * RMGUAI**2.0 +1*A64 * T**n64 * np.exp(-1*E64/R/T) * RKETM2**2.0 +1*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 +2*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +0.5*A83 * T**n83 * np.exp(-1*E83/R/T) * RPHENOXM2**1.0 * CHAR**1.0 +1*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0,
+1*A70 * T**n70 * np.exp(-1*E70/R/T) * RCH3**2.0,
+1*A124 * T**n124 * np.exp(-1*E124/R/T) * RC3H3O**1.0 * LIGH**1.0 +1*A145 * T**n145 * np.exp(-1*E145/R/T) * RC3H3O**1.0 * PLIGH**1.0 +1*A166 * T**n166 * np.exp(-1*E166/R/T) * RC3H3O**1.0 * PLIGM2**1.0 +1*A187 * T**n187 * np.exp(-1*E187/R/T) * RC3H3O**1.0 * LIGM2**1.0 +1*A208 * T**n208 * np.exp(-1*E208/R/T) * RC3H3O**1.0 * LIGM2**1.0 +1*A229 * T**n229 * np.exp(-1*E229/R/T) * RC3H3O**1.0 * PFET3M2**1.0 +1*A250 * T**n250 * np.exp(-1*E250/R/T) * RC3H3O**1.0 * ADIOM2**1.0 +1*A271 * T**n271 * np.exp(-1*E271/R/T) * RC3H3O**1.0 * KETM2**1.0 +1*A292 * T**n292 * np.exp(-1*E292/R/T) * RC3H3O**1.0 * C10H2**1.0 +1*A313 * T**n313 * np.exp(-1*E313/R/T) * RC3H3O**1.0 * LIG**1.0 +1*A334 * T**n334 * np.exp(-1*E334/R/T) * RC3H3O**1.0 * LIG**1.0 +1*A355 * T**n355 * np.exp(-1*E355/R/T) * RC3H3O**1.0 * PFET3**1.0 +1*A376 * T**n376 * np.exp(-1*E376/R/T) * RC3H3O**1.0 * ADIO**1.0 +1*A397 * T**n397 * np.exp(-1*E397/R/T) * RC3H3O**1.0 * KET**1.0,
+1*A18 * T**n18 * np.exp(-1*E18/R/T) * PRFET3M2**1.0 +1*A22 * T**n22 * np.exp(-1*E22/R/T) * PRFET3**1.0,
+1*A1 * T**n1 * np.exp(-1*E1/R/T) * LIGH**1.0,
+1*A14 * T**n14 * np.exp(-1*E14/R/T) * RADIOM2**1.0 +1*A19 * T**n19 * np.exp(-1*E19/R/T) * RADIO**1.0 +1*A112 * T**n112 * np.exp(-1*E112/R/T) * RC3H5O2**1.0 * LIGH**1.0 +1*A133 * T**n133 * np.exp(-1*E133/R/T) * RC3H5O2**1.0 * PLIGH**1.0 +1*A154 * T**n154 * np.exp(-1*E154/R/T) * RC3H5O2**1.0 * PLIGM2**1.0 +1*A175 * T**n175 * np.exp(-1*E175/R/T) * RC3H5O2**1.0 * LIGM2**1.0 +1*A196 * T**n196 * np.exp(-1*E196/R/T) * RC3H5O2**1.0 * LIGM2**1.0 +1*A217 * T**n217 * np.exp(-1*E217/R/T) * RC3H5O2**1.0 * PFET3M2**1.0 +1*A238 * T**n238 * np.exp(-1*E238/R/T) * RC3H5O2**1.0 * ADIOM2**1.0 +1*A259 * T**n259 * np.exp(-1*E259/R/T) * RC3H5O2**1.0 * KETM2**1.0 +1*A280 * T**n280 * np.exp(-1*E280/R/T) * RC3H5O2**1.0 * C10H2**1.0 +1*A301 * T**n301 * np.exp(-1*E301/R/T) * RC3H5O2**1.0 * LIG**1.0 +1*A322 * T**n322 * np.exp(-1*E322/R/T) * RC3H5O2**1.0 * LIG**1.0 +1*A343 * T**n343 * np.exp(-1*E343/R/T) * RC3H5O2**1.0 * PFET3**1.0 +1*A364 * T**n364 * np.exp(-1*E364/R/T) * RC3H5O2**1.0 * ADIO**1.0 +1*A385 * T**n385 * np.exp(-1*E385/R/T) * RC3H5O2**1.0 * KET**1.0,
+1*A114 * T**n114 * np.exp(-1*E114/R/T) * RC3H7O2**1.0 * LIGH**1.0 +1*A135 * T**n135 * np.exp(-1*E135/R/T) * RC3H7O2**1.0 * PLIGH**1.0 +1*A156 * T**n156 * np.exp(-1*E156/R/T) * RC3H7O2**1.0 * PLIGM2**1.0 +1*A177 * T**n177 * np.exp(-1*E177/R/T) * RC3H7O2**1.0 * LIGM2**1.0 +1*A198 * T**n198 * np.exp(-1*E198/R/T) * RC3H7O2**1.0 * LIGM2**1.0 +1*A219 * T**n219 * np.exp(-1*E219/R/T) * RC3H7O2**1.0 * PFET3M2**1.0 +1*A240 * T**n240 * np.exp(-1*E240/R/T) * RC3H7O2**1.0 * ADIOM2**1.0 +1*A261 * T**n261 * np.exp(-1*E261/R/T) * RC3H7O2**1.0 * KETM2**1.0 +1*A282 * T**n282 * np.exp(-1*E282/R/T) * RC3H7O2**1.0 * C10H2**1.0 +1*A303 * T**n303 * np.exp(-1*E303/R/T) * RC3H7O2**1.0 * LIG**1.0 +1*A324 * T**n324 * np.exp(-1*E324/R/T) * RC3H7O2**1.0 * LIG**1.0 +1*A345 * T**n345 * np.exp(-1*E345/R/T) * RC3H7O2**1.0 * PFET3**1.0 +1*A366 * T**n366 * np.exp(-1*E366/R/T) * RC3H7O2**1.0 * ADIO**1.0 +1*A387 * T**n387 * np.exp(-1*E387/R/T) * RC3H7O2**1.0 * KET**1.0,
+1*A30 * T**n30 * np.exp(-1*E30/R/T) * PLIGC**1.0 +1*A108 * T**n108 * np.exp(-1*E108/R/T) * LIGC**1.0,
+1*A27 * T**n27 * np.exp(-1*E27/R/T) * RC3H7O2**1.0 +1*A57 * T**n57 * np.exp(-1*E57/R/T) * RCH3O**2.0,
+1*A69 * T**n69 * np.exp(-1*E69/R/T) * OH**1.0 * RCH3**1.0 +1*A130 * T**n130 * np.exp(-1*E130/R/T) * RCH3O**1.0 * LIGH**1.0 +1*A151 * T**n151 * np.exp(-1*E151/R/T) * RCH3O**1.0 * PLIGH**1.0 +1*A172 * T**n172 * np.exp(-1*E172/R/T) * RCH3O**1.0 * PLIGM2**1.0 +1*A193 * T**n193 * np.exp(-1*E193/R/T) * RCH3O**1.0 * LIGM2**1.0 +1*A214 * T**n214 * np.exp(-1*E214/R/T) * RCH3O**1.0 * LIGM2**1.0 +1*A235 * T**n235 * np.exp(-1*E235/R/T) * RCH3O**1.0 * PFET3M2**1.0 +1*A256 * T**n256 * np.exp(-1*E256/R/T) * RCH3O**1.0 * ADIOM2**1.0 +1*A277 * T**n277 * np.exp(-1*E277/R/T) * RCH3O**1.0 * KETM2**1.0 +1*A298 * T**n298 * np.exp(-1*E298/R/T) * RCH3O**1.0 * C10H2**1.0 +1*A319 * T**n319 * np.exp(-1*E319/R/T) * RCH3O**1.0 * LIG**1.0 +1*A340 * T**n340 * np.exp(-1*E340/R/T) * RCH3O**1.0 * LIG**1.0 +1*A361 * T**n361 * np.exp(-1*E361/R/T) * RCH3O**1.0 * PFET3**1.0 +1*A382 * T**n382 * np.exp(-1*E382/R/T) * RCH3O**1.0 * ADIO**1.0 +1*A403 * T**n403 * np.exp(-1*E403/R/T) * RCH3O**1.0 * KET**1.0,
+1*A120 * T**n120 * np.exp(-1*E120/R/T) * RCH3**1.0 * LIGH**1.0 +1*A141 * T**n141 * np.exp(-1*E141/R/T) * RCH3**1.0 * PLIGH**1.0 +1*A162 * T**n162 * np.exp(-1*E162/R/T) * RCH3**1.0 * PLIGM2**1.0 +1*A183 * T**n183 * np.exp(-1*E183/R/T) * RCH3**1.0 * LIGM2**1.0 +1*A204 * T**n204 * np.exp(-1*E204/R/T) * RCH3**1.0 * LIGM2**1.0 +1*A225 * T**n225 * np.exp(-1*E225/R/T) * RCH3**1.0 * PFET3M2**1.0 +1*A246 * T**n246 * np.exp(-1*E246/R/T) * RCH3**1.0 * ADIOM2**1.0 +1*A267 * T**n267 * np.exp(-1*E267/R/T) * RCH3**1.0 * KETM2**1.0 +1*A288 * T**n288 * np.exp(-1*E288/R/T) * RCH3**1.0 * C10H2**1.0 +1*A309 * T**n309 * np.exp(-1*E309/R/T) * RCH3**1.0 * LIG**1.0 +1*A330 * T**n330 * np.exp(-1*E330/R/T) * RCH3**1.0 * LIG**1.0 +1*A351 * T**n351 * np.exp(-1*E351/R/T) * RCH3**1.0 * PFET3**1.0 +1*A372 * T**n372 * np.exp(-1*E372/R/T) * RCH3**1.0 * ADIO**1.0 +1*A393 * T**n393 * np.exp(-1*E393/R/T) * RCH3**1.0 * KET**1.0,
+0.2*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 +0.2*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 -1*A82 * T**n82 * np.exp(-1*E82/R/T) * RPHENOX**1.0 * CHAR**1.0 +1*A82 * T**n82 * np.exp(-1*E82/R/T) * RPHENOX**1.0 * CHAR**1.0 -1*A83 * T**n83 * np.exp(-1*E83/R/T) * RPHENOXM2**1.0 * CHAR**1.0 +1*A83 * T**n83 * np.exp(-1*E83/R/T) * RPHENOXM2**1.0 * CHAR**1.0 +0.2*A97 * T**n97 * np.exp(-1*E97/R/T) * PC2H2**1.0 +0.1*A100 * T**n100 * np.exp(-1*E100/R/T) * PCOHP2**1.0 +0.1*A103 * T**n103 * np.exp(-1*E103/R/T) * PCHP2**1.0 +0.5*A280 * T**n280 * np.exp(-1*E280/R/T) * RC3H5O2**1.0 * C10H2**1.0 +0.5*A281 * T**n281 * np.exp(-1*E281/R/T) * PRFET3**1.0 * C10H2**1.0 +0.5*A282 * T**n282 * np.exp(-1*E282/R/T) * RC3H7O2**1.0 * C10H2**1.0 +0.5*A283 * T**n283 * np.exp(-1*E283/R/T) * RADIOM2**1.0 * C10H2**1.0 +0.5*A284 * T**n284 * np.exp(-1*E284/R/T) * PRFET3M2**1.0 * C10H2**1.0 +0.5*A285 * T**n285 * np.exp(-1*E285/R/T) * PRLIGH**1.0 * C10H2**1.0 +0.5*A286 * T**n286 * np.exp(-1*E286/R/T) * RLIGM2B**1.0 * C10H2**1.0 +0.5*A287 * T**n287 * np.exp(-1*E287/R/T) * RLIGM2A**1.0 * C10H2**1.0 +0.5*A288 * T**n288 * np.exp(-1*E288/R/T) * RCH3**1.0 * C10H2**1.0 +0.5*A289 * T**n289 * np.exp(-1*E289/R/T) * PRKETM2**1.0 * C10H2**1.0 +0.5*A290 * T**n290 * np.exp(-1*E290/R/T) * RKET**1.0 * C10H2**1.0 +0.5*A291 * T**n291 * np.exp(-1*E291/R/T) * PRADIO**1.0 * C10H2**1.0 +0.5*A292 * T**n292 * np.exp(-1*E292/R/T) * RC3H3O**1.0 * C10H2**1.0 +0.5*A293 * T**n293 * np.exp(-1*E293/R/T) * RLIGB**1.0 * C10H2**1.0 +0.5*A294 * T**n294 * np.exp(-1*E294/R/T) * RLIGA**1.0 * C10H2**1.0 +0.5*A295 * T**n295 * np.exp(-1*E295/R/T) * PRADIOM2**1.0 * C10H2**1.0 +0.5*A296 * T**n296 * np.exp(-1*E296/R/T) * RMGUAI**1.0 * C10H2**1.0 +0.5*A297 * T**n297 * np.exp(-1*E297/R/T) * OH**1.0 * C10H2**1.0 +0.5*A298 * T**n298 * np.exp(-1*E298/R/T) * RCH3O**1.0 * C10H2**1.0 +0.5*A299 * T**n299 * np.exp(-1*E299/R/T) * RPHENOL**1.0 * C10H2**1.0 +0.5*A300 * T**n300 * np.exp(-1*E300/R/T) * RADIO**1.0 * C10H2**1.0,
+1*A10 * T**n10 * np.exp(-1*E10/R/T) * RPHENOXM2**1.0 +1*A11 * T**n11 * np.exp(-1*E11/R/T) * RPHENOX**1.0 +1*A94 * T**n94 * np.exp(-1*E94/R/T) * PCOS**1.0 +1*A95 * T**n95 * np.exp(-1*E95/R/T) * PCOH**1.0 +1*A111 * T**n111 * np.exp(-1*E111/R/T) * PCHO**1.0,
+1*A31 * T**n31 * np.exp(-1*E31/R/T) * PLIGO**1.0 +1*A52 * T**n52 * np.exp(-1*E52/R/T) * RCH3O**1.0 * RPHENOX**1.0 +1*A53 * T**n53 * np.exp(-1*E53/R/T) * RCH3O**1.0 * RPHENOXM2**1.0 +1*A107 * T**n107 * np.exp(-1*E107/R/T) * LIGO**1.0,
+1*A25 * T**n25 * np.exp(-1*E25/R/T) * RADIO**1.0 -1*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 -1*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 -1*A89 * T**n89 * np.exp(-1*E89/R/T) * COUMARYL**1.0,
+1*A56 * T**n56 * np.exp(-1*E56/R/T) * RCH3O**1.0 * RCH3**1.0,
+0.5*A10 * T**n10 * np.exp(-1*E10/R/T) * RPHENOXM2**1.0 +1.5*A11 * T**n11 * np.exp(-1*E11/R/T) * RPHENOX**1.0 +0.5*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 +0.5*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 +0.5*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 +1*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 +0.5*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 +0.5*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +1.5*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +1.5*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 +1.5*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 +1.5*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 +2.5*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 +1.5*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 +1.5*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 +1.5*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +1.5*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 +0.5*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 +0.5*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 +1.5*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 +1.5*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0 +1.5*A52 * T**n52 * np.exp(-1*E52/R/T) * RCH3O**1.0 * RPHENOX**1.0 +0.5*A53 * T**n53 * np.exp(-1*E53/R/T) * RCH3O**1.0 * RPHENOXM2**1.0 +2*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +1*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +2*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +2*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +2*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 +1*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 +1*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1.5*A80 * T**n80 * np.exp(-1*E80/R/T) * RPHENOX**1.0 * RPHENOL**1.0 +1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 +1.5*A82 * T**n82 * np.exp(-1*E82/R/T) * RPHENOX**1.0 * CHAR**1.0 +0.5*A83 * T**n83 * np.exp(-1*E83/R/T) * RPHENOXM2**1.0 * CHAR**1.0 +1*A96 * T**n96 * np.exp(-1*E96/R/T) * PH2**1.0 +1*A97 * T**n97 * np.exp(-1*E97/R/T) * PC2H2**1.0 +0.5*A102 * T**n102 * np.exp(-1*E102/R/T) * PCH2P**1.0 +0.5*A103 * T**n103 * np.exp(-1*E103/R/T) * PCHP2**1.0 +1*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0,
+1*A57 * T**n57 * np.exp(-1*E57/R/T) * RCH3O**2.0 +2*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 +2*A66 * T**n66 * np.exp(-1*E66/R/T) * RC3H7O2**2.0 +2*A67 * T**n67 * np.exp(-1*E67/R/T) * RC3H5O2**2.0 +1*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +2*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +4*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1*A129 * T**n129 * np.exp(-1*E129/R/T) * OH**1.0 * LIGH**1.0 +1*A150 * T**n150 * np.exp(-1*E150/R/T) * OH**1.0 * PLIGH**1.0 +1*A171 * T**n171 * np.exp(-1*E171/R/T) * OH**1.0 * PLIGM2**1.0 +1*A192 * T**n192 * np.exp(-1*E192/R/T) * OH**1.0 * LIGM2**1.0 +1*A213 * T**n213 * np.exp(-1*E213/R/T) * OH**1.0 * LIGM2**1.0 +1*A234 * T**n234 * np.exp(-1*E234/R/T) * OH**1.0 * PFET3M2**1.0 +1*A255 * T**n255 * np.exp(-1*E255/R/T) * OH**1.0 * ADIOM2**1.0 +1*A276 * T**n276 * np.exp(-1*E276/R/T) * OH**1.0 * KETM2**1.0 +1*A297 * T**n297 * np.exp(-1*E297/R/T) * OH**1.0 * C10H2**1.0 +1*A318 * T**n318 * np.exp(-1*E318/R/T) * OH**1.0 * LIG**1.0 +1*A339 * T**n339 * np.exp(-1*E339/R/T) * OH**1.0 * LIG**1.0 +1*A360 * T**n360 * np.exp(-1*E360/R/T) * OH**1.0 * PFET3**1.0 +1*A381 * T**n381 * np.exp(-1*E381/R/T) * OH**1.0 * ADIO**1.0 +1*A402 * T**n402 * np.exp(-1*E402/R/T) * OH**1.0 * KET**1.0,
+1*A20 * T**n20 * np.exp(-1*E20/R/T) * RLIGA**1.0 -1*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 -1*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 -1*A91 * T**n91 * np.exp(-1*E91/R/T) * KET**1.0 +1*A122 * T**n122 * np.exp(-1*E122/R/T) * RKET**1.0 * LIGH**1.0 +1*A143 * T**n143 * np.exp(-1*E143/R/T) * RKET**1.0 * PLIGH**1.0 +1*A164 * T**n164 * np.exp(-1*E164/R/T) * RKET**1.0 * PLIGM2**1.0 +1*A185 * T**n185 * np.exp(-1*E185/R/T) * RKET**1.0 * LIGM2**1.0 +1*A206 * T**n206 * np.exp(-1*E206/R/T) * RKET**1.0 * LIGM2**1.0 +1*A227 * T**n227 * np.exp(-1*E227/R/T) * RKET**1.0 * PFET3M2**1.0 +1*A248 * T**n248 * np.exp(-1*E248/R/T) * RKET**1.0 * ADIOM2**1.0 +1*A269 * T**n269 * np.exp(-1*E269/R/T) * RKET**1.0 * KETM2**1.0 +1*A290 * T**n290 * np.exp(-1*E290/R/T) * RKET**1.0 * C10H2**1.0 +1*A311 * T**n311 * np.exp(-1*E311/R/T) * RKET**1.0 * LIG**1.0 +1*A332 * T**n332 * np.exp(-1*E332/R/T) * RKET**1.0 * LIG**1.0 +1*A353 * T**n353 * np.exp(-1*E353/R/T) * RKET**1.0 * PFET3**1.0 +1*A374 * T**n374 * np.exp(-1*E374/R/T) * RKET**1.0 * ADIO**1.0 -1*A385 * T**n385 * np.exp(-1*E385/R/T) * RC3H5O2**1.0 * KET**1.0 -1*A386 * T**n386 * np.exp(-1*E386/R/T) * PRFET3**1.0 * KET**1.0 -1*A387 * T**n387 * np.exp(-1*E387/R/T) * RC3H7O2**1.0 * KET**1.0 -1*A388 * T**n388 * np.exp(-1*E388/R/T) * RADIOM2**1.0 * KET**1.0 -1*A389 * T**n389 * np.exp(-1*E389/R/T) * PRFET3M2**1.0 * KET**1.0 -1*A390 * T**n390 * np.exp(-1*E390/R/T) * PRLIGH**1.0 * KET**1.0 -1*A391 * T**n391 * np.exp(-1*E391/R/T) * RLIGM2B**1.0 * KET**1.0 -1*A392 * T**n392 * np.exp(-1*E392/R/T) * RLIGM2A**1.0 * KET**1.0 -1*A393 * T**n393 * np.exp(-1*E393/R/T) * RCH3**1.0 * KET**1.0 -1*A394 * T**n394 * np.exp(-1*E394/R/T) * PRKETM2**1.0 * KET**1.0 -1*A395 * T**n395 * np.exp(-1*E395/R/T) * RKET**1.0 * KET**1.0 +1*A395 * T**n395 * np.exp(-1*E395/R/T) * RKET**1.0 * KET**1.0 -1*A396 * T**n396 * np.exp(-1*E396/R/T) * PRADIO**1.0 * KET**1.0 -1*A397 * T**n397 * np.exp(-1*E397/R/T) * RC3H3O**1.0 * KET**1.0 -1*A398 * T**n398 * np.exp(-1*E398/R/T) * RLIGB**1.0 * KET**1.0 -1*A399 * T**n399 * np.exp(-1*E399/R/T) * RLIGA**1.0 * KET**1.0 -1*A400 * T**n400 * np.exp(-1*E400/R/T) * PRADIOM2**1.0 * KET**1.0 -1*A401 * T**n401 * np.exp(-1*E401/R/T) * RMGUAI**1.0 * KET**1.0 -1*A402 * T**n402 * np.exp(-1*E402/R/T) * OH**1.0 * KET**1.0 -1*A403 * T**n403 * np.exp(-1*E403/R/T) * RCH3O**1.0 * KET**1.0 -1*A404 * T**n404 * np.exp(-1*E404/R/T) * RPHENOL**1.0 * KET**1.0 -1*A405 * T**n405 * np.exp(-1*E405/R/T) * RADIO**1.0 * KET**1.0,
+1*A26 * T**n26 * np.exp(-1*E26/R/T) * RKET**1.0 -1*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 -1*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 -1*A92 * T**n92 * np.exp(-1*E92/R/T) * KETD**1.0,
+1*A9 * T**n9 * np.exp(-1*E9/R/T) * PRKETM2**1.0 +1*A24 * T**n24 * np.exp(-1*E24/R/T) * RKETM2**1.0 -1*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 -1*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 -1*A86 * T**n86 * np.exp(-1*E86/R/T) * KETDM2**1.0,
+1*A15 * T**n15 * np.exp(-1*E15/R/T) * RLIGM2A**1.0 -1*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 -1*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 -1*A85 * T**n85 * np.exp(-1*E85/R/T) * KETM2**1.0 -1*A259 * T**n259 * np.exp(-1*E259/R/T) * RC3H5O2**1.0 * KETM2**1.0 -1*A260 * T**n260 * np.exp(-1*E260/R/T) * PRFET3**1.0 * KETM2**1.0 -1*A261 * T**n261 * np.exp(-1*E261/R/T) * RC3H7O2**1.0 * KETM2**1.0 -1*A262 * T**n262 * np.exp(-1*E262/R/T) * RADIOM2**1.0 * KETM2**1.0 -1*A263 * T**n263 * np.exp(-1*E263/R/T) * PRFET3M2**1.0 * KETM2**1.0 -1*A264 * T**n264 * np.exp(-1*E264/R/T) * PRLIGH**1.0 * KETM2**1.0 -1*A265 * T**n265 * np.exp(-1*E265/R/T) * RLIGM2B**1.0 * KETM2**1.0 -1*A266 * T**n266 * np.exp(-1*E266/R/T) * RLIGM2A**1.0 * KETM2**1.0 -1*A267 * T**n267 * np.exp(-1*E267/R/T) * RCH3**1.0 * KETM2**1.0 -1*A268 * T**n268 * np.exp(-1*E268/R/T) * PRKETM2**1.0 * KETM2**1.0 -1*A269 * T**n269 * np.exp(-1*E269/R/T) * RKET**1.0 * KETM2**1.0 -1*A270 * T**n270 * np.exp(-1*E270/R/T) * PRADIO**1.0 * KETM2**1.0 -1*A271 * T**n271 * np.exp(-1*E271/R/T) * RC3H3O**1.0 * KETM2**1.0 -1*A272 * T**n272 * np.exp(-1*E272/R/T) * RLIGB**1.0 * KETM2**1.0 -1*A273 * T**n273 * np.exp(-1*E273/R/T) * RLIGA**1.0 * KETM2**1.0 -1*A274 * T**n274 * np.exp(-1*E274/R/T) * PRADIOM2**1.0 * KETM2**1.0 -1*A275 * T**n275 * np.exp(-1*E275/R/T) * RMGUAI**1.0 * KETM2**1.0 -1*A276 * T**n276 * np.exp(-1*E276/R/T) * OH**1.0 * KETM2**1.0 -1*A277 * T**n277 * np.exp(-1*E277/R/T) * RCH3O**1.0 * KETM2**1.0 -1*A278 * T**n278 * np.exp(-1*E278/R/T) * RPHENOL**1.0 * KETM2**1.0 -1*A279 * T**n279 * np.exp(-1*E279/R/T) * RADIO**1.0 * KETM2**1.0,
-1*A4 * T**n4 * np.exp(-1*E4/R/T) * LIG**1.0 +1*A108 * T**n108 * np.exp(-1*E108/R/T) * LIGC**1.0 +1*A125 * T**n125 * np.exp(-1*E125/R/T) * RLIGB**1.0 * LIGH**1.0 +1*A126 * T**n126 * np.exp(-1*E126/R/T) * RLIGA**1.0 * LIGH**1.0 +1*A146 * T**n146 * np.exp(-1*E146/R/T) * RLIGB**1.0 * PLIGH**1.0 +1*A147 * T**n147 * np.exp(-1*E147/R/T) * RLIGA**1.0 * PLIGH**1.0 +1*A167 * T**n167 * np.exp(-1*E167/R/T) * RLIGB**1.0 * PLIGM2**1.0 +1*A168 * T**n168 * np.exp(-1*E168/R/T) * RLIGA**1.0 * PLIGM2**1.0 +1*A188 * T**n188 * np.exp(-1*E188/R/T) * RLIGB**1.0 * LIGM2**1.0 +1*A189 * T**n189 * np.exp(-1*E189/R/T) * RLIGA**1.0 * LIGM2**1.0 +1*A209 * T**n209 * np.exp(-1*E209/R/T) * RLIGB**1.0 * LIGM2**1.0 +1*A210 * T**n210 * np.exp(-1*E210/R/T) * RLIGA**1.0 * LIGM2**1.0 +1*A230 * T**n230 * np.exp(-1*E230/R/T) * RLIGB**1.0 * PFET3M2**1.0 +1*A231 * T**n231 * np.exp(-1*E231/R/T) * RLIGA**1.0 * PFET3M2**1.0 +1*A251 * T**n251 * np.exp(-1*E251/R/T) * RLIGB**1.0 * ADIOM2**1.0 +1*A252 * T**n252 * np.exp(-1*E252/R/T) * RLIGA**1.0 * ADIOM2**1.0 +1*A272 * T**n272 * np.exp(-1*E272/R/T) * RLIGB**1.0 * KETM2**1.0 +1*A273 * T**n273 * np.exp(-1*E273/R/T) * RLIGA**1.0 * KETM2**1.0 +1*A293 * T**n293 * np.exp(-1*E293/R/T) * RLIGB**1.0 * C10H2**1.0 +1*A294 * T**n294 * np.exp(-1*E294/R/T) * RLIGA**1.0 * C10H2**1.0 -1*A301 * T**n301 * np.exp(-1*E301/R/T) * RC3H5O2**1.0 * LIG**1.0 -1*A302 * T**n302 * np.exp(-1*E302/R/T) * PRFET3**1.0 * LIG**1.0 -1*A303 * T**n303 * np.exp(-1*E303/R/T) * RC3H7O2**1.0 * LIG**1.0 -1*A304 * T**n304 * np.exp(-1*E304/R/T) * RADIOM2**1.0 * LIG**1.0 -1*A305 * T**n305 * np.exp(-1*E305/R/T) * PRFET3M2**1.0 * LIG**1.0 -1*A306 * T**n306 * np.exp(-1*E306/R/T) * PRLIGH**1.0 * LIG**1.0 -1*A307 * T**n307 * np.exp(-1*E307/R/T) * RLIGM2B**1.0 * LIG**1.0 -1*A308 * T**n308 * np.exp(-1*E308/R/T) * RLIGM2A**1.0 * LIG**1.0 -1*A309 * T**n309 * np.exp(-1*E309/R/T) * RCH3**1.0 * LIG**1.0 -1*A310 * T**n310 * np.exp(-1*E310/R/T) * PRKETM2**1.0 * LIG**1.0 -1*A311 * T**n311 * np.exp(-1*E311/R/T) * RKET**1.0 * LIG**1.0 -1*A312 * T**n312 * np.exp(-1*E312/R/T) * PRADIO**1.0 * LIG**1.0 -1*A313 * T**n313 * np.exp(-1*E313/R/T) * RC3H3O**1.0 * LIG**1.0 -1*A314 * T**n314 * np.exp(-1*E314/R/T) * RLIGB**1.0 * LIG**1.0 +1*A314 * T**n314 * np.exp(-1*E314/R/T) * RLIGB**1.0 * LIG**1.0 -1*A315 * T**n315 * np.exp(-1*E315/R/T) * RLIGA**1.0 * LIG**1.0 +1*A315 * T**n315 * np.exp(-1*E315/R/T) * RLIGA**1.0 * LIG**1.0 -1*A316 * T**n316 * np.exp(-1*E316/R/T) * PRADIOM2**1.0 * LIG**1.0 -1*A317 * T**n317 * np.exp(-1*E317/R/T) * RMGUAI**1.0 * LIG**1.0 -1*A318 * T**n318 * np.exp(-1*E318/R/T) * OH**1.0 * LIG**1.0 -1*A319 * T**n319 * np.exp(-1*E319/R/T) * RCH3O**1.0 * LIG**1.0 -1*A320 * T**n320 * np.exp(-1*E320/R/T) * RPHENOL**1.0 * LIG**1.0 -1*A321 * T**n321 * np.exp(-1*E321/R/T) * RADIO**1.0 * LIG**1.0 -1*A322 * T**n322 * np.exp(-1*E322/R/T) * RC3H5O2**1.0 * LIG**1.0 -1*A323 * T**n323 * np.exp(-1*E323/R/T) * PRFET3**1.0 * LIG**1.0 -1*A324 * T**n324 * np.exp(-1*E324/R/T) * RC3H7O2**1.0 * LIG**1.0 -1*A325 * T**n325 * np.exp(-1*E325/R/T) * RADIOM2**1.0 * LIG**1.0 -1*A326 * T**n326 * np.exp(-1*E326/R/T) * PRFET3M2**1.0 * LIG**1.0 -1*A327 * T**n327 * np.exp(-1*E327/R/T) * PRLIGH**1.0 * LIG**1.0 -1*A328 * T**n328 * np.exp(-1*E328/R/T) * RLIGM2B**1.0 * LIG**1.0 -1*A329 * T**n329 * np.exp(-1*E329/R/T) * RLIGM2A**1.0 * LIG**1.0 -1*A330 * T**n330 * np.exp(-1*E330/R/T) * RCH3**1.0 * LIG**1.0 -1*A331 * T**n331 * np.exp(-1*E331/R/T) * PRKETM2**1.0 * LIG**1.0 -1*A332 * T**n332 * np.exp(-1*E332/R/T) * RKET**1.0 * LIG**1.0 -1*A333 * T**n333 * np.exp(-1*E333/R/T) * PRADIO**1.0 * LIG**1.0 -1*A334 * T**n334 * np.exp(-1*E334/R/T) * RC3H3O**1.0 * LIG**1.0 -1*A335 * T**n335 * np.exp(-1*E335/R/T) * RLIGB**1.0 * LIG**1.0 +1*A335 * T**n335 * np.exp(-1*E335/R/T) * RLIGB**1.0 * LIG**1.0 -1*A336 * T**n336 * np.exp(-1*E336/R/T) * RLIGA**1.0 * LIG**1.0 +1*A336 * T**n336 * np.exp(-1*E336/R/T) * RLIGA**1.0 * LIG**1.0 -1*A337 * T**n337 * np.exp(-1*E337/R/T) * PRADIOM2**1.0 * LIG**1.0 -1*A338 * T**n338 * np.exp(-1*E338/R/T) * RMGUAI**1.0 * LIG**1.0 -1*A339 * T**n339 * np.exp(-1*E339/R/T) * OH**1.0 * LIG**1.0 -1*A340 * T**n340 * np.exp(-1*E340/R/T) * RCH3O**1.0 * LIG**1.0 -1*A341 * T**n341 * np.exp(-1*E341/R/T) * RPHENOL**1.0 * LIG**1.0 -1*A342 * T**n342 * np.exp(-1*E342/R/T) * RADIO**1.0 * LIG**1.0 +1*A356 * T**n356 * np.exp(-1*E356/R/T) * RLIGB**1.0 * PFET3**1.0 +1*A357 * T**n357 * np.exp(-1*E357/R/T) * RLIGA**1.0 * PFET3**1.0 +1*A377 * T**n377 * np.exp(-1*E377/R/T) * RLIGB**1.0 * ADIO**1.0 +1*A378 * T**n378 * np.exp(-1*E378/R/T) * RLIGA**1.0 * ADIO**1.0 +1*A398 * T**n398 * np.exp(-1*E398/R/T) * RLIGB**1.0 * KET**1.0 +1*A399 * T**n399 * np.exp(-1*E399/R/T) * RLIGA**1.0 * KET**1.0,
+1*A105 * T**n105 * np.exp(-1*E105/R/T) * PLIGC**1.0 -1*A108 * T**n108 * np.exp(-1*E108/R/T) * LIGC**1.0,
-1*A1 * T**n1 * np.exp(-1*E1/R/T) * LIGH**1.0 +1*A104 * T**n104 * np.exp(-1*E104/R/T) * PLIGH**1.0 -1*A112 * T**n112 * np.exp(-1*E112/R/T) * RC3H5O2**1.0 * LIGH**1.0 -1*A113 * T**n113 * np.exp(-1*E113/R/T) * PRFET3**1.0 * LIGH**1.0 -1*A114 * T**n114 * np.exp(-1*E114/R/T) * RC3H7O2**1.0 * LIGH**1.0 -1*A115 * T**n115 * np.exp(-1*E115/R/T) * RADIOM2**1.0 * LIGH**1.0 -1*A116 * T**n116 * np.exp(-1*E116/R/T) * PRFET3M2**1.0 * LIGH**1.0 -1*A117 * T**n117 * np.exp(-1*E117/R/T) * PRLIGH**1.0 * LIGH**1.0 -1*A118 * T**n118 * np.exp(-1*E118/R/T) * RLIGM2B**1.0 * LIGH**1.0 -1*A119 * T**n119 * np.exp(-1*E119/R/T) * RLIGM2A**1.0 * LIGH**1.0 -1*A120 * T**n120 * np.exp(-1*E120/R/T) * RCH3**1.0 * LIGH**1.0 -1*A121 * T**n121 * np.exp(-1*E121/R/T) * PRKETM2**1.0 * LIGH**1.0 -1*A122 * T**n122 * np.exp(-1*E122/R/T) * RKET**1.0 * LIGH**1.0 -1*A123 * T**n123 * np.exp(-1*E123/R/T) * PRADIO**1.0 * LIGH**1.0 -1*A124 * T**n124 * np.exp(-1*E124/R/T) * RC3H3O**1.0 * LIGH**1.0 -1*A125 * T**n125 * np.exp(-1*E125/R/T) * RLIGB**1.0 * LIGH**1.0 -1*A126 * T**n126 * np.exp(-1*E126/R/T) * RLIGA**1.0 * LIGH**1.0 -1*A127 * T**n127 * np.exp(-1*E127/R/T) * PRADIOM2**1.0 * LIGH**1.0 -1*A128 * T**n128 * np.exp(-1*E128/R/T) * RMGUAI**1.0 * LIGH**1.0 -1*A129 * T**n129 * np.exp(-1*E129/R/T) * OH**1.0 * LIGH**1.0 -1*A130 * T**n130 * np.exp(-1*E130/R/T) * RCH3O**1.0 * LIGH**1.0 -1*A131 * T**n131 * np.exp(-1*E131/R/T) * RPHENOL**1.0 * LIGH**1.0 -1*A132 * T**n132 * np.exp(-1*E132/R/T) * RADIO**1.0 * LIGH**1.0,
-1*A2 * T**n2 * np.exp(-1*E2/R/T) * LIGM2**1.0 +1*A107 * T**n107 * np.exp(-1*E107/R/T) * LIGO**1.0 +1*A118 * T**n118 * np.exp(-1*E118/R/T) * RLIGM2B**1.0 * LIGH**1.0 +1*A119 * T**n119 * np.exp(-1*E119/R/T) * RLIGM2A**1.0 * LIGH**1.0 +1*A139 * T**n139 * np.exp(-1*E139/R/T) * RLIGM2B**1.0 * PLIGH**1.0 +1*A140 * T**n140 * np.exp(-1*E140/R/T) * RLIGM2A**1.0 * PLIGH**1.0 +1*A160 * T**n160 * np.exp(-1*E160/R/T) * RLIGM2B**1.0 * PLIGM2**1.0 +1*A161 * T**n161 * np.exp(-1*E161/R/T) * RLIGM2A**1.0 * PLIGM2**1.0 -1*A175 * T**n175 * np.exp(-1*E175/R/T) * RC3H5O2**1.0 * LIGM2**1.0 -1*A176 * T**n176 * np.exp(-1*E176/R/T) * PRFET3**1.0 * LIGM2**1.0 -1*A177 * T**n177 * np.exp(-1*E177/R/T) * RC3H7O2**1.0 * LIGM2**1.0 -1*A178 * T**n178 * np.exp(-1*E178/R/T) * RADIOM2**1.0 * LIGM2**1.0 -1*A179 * T**n179 * np.exp(-1*E179/R/T) * PRFET3M2**1.0 * LIGM2**1.0 -1*A180 * T**n180 * np.exp(-1*E180/R/T) * PRLIGH**1.0 * LIGM2**1.0 -1*A181 * T**n181 * np.exp(-1*E181/R/T) * RLIGM2B**1.0 * LIGM2**1.0 +1*A181 * T**n181 * np.exp(-1*E181/R/T) * RLIGM2B**1.0 * LIGM2**1.0 -1*A182 * T**n182 * np.exp(-1*E182/R/T) * RLIGM2A**1.0 * LIGM2**1.0 +1*A182 * T**n182 * np.exp(-1*E182/R/T) * RLIGM2A**1.0 * LIGM2**1.0 -1*A183 * T**n183 * np.exp(-1*E183/R/T) * RCH3**1.0 * LIGM2**1.0 -1*A184 * T**n184 * np.exp(-1*E184/R/T) * PRKETM2**1.0 * LIGM2**1.0 -1*A185 * T**n185 * np.exp(-1*E185/R/T) * RKET**1.0 * LIGM2**1.0 -1*A186 * T**n186 * np.exp(-1*E186/R/T) * PRADIO**1.0 * LIGM2**1.0 -1*A187 * T**n187 * np.exp(-1*E187/R/T) * RC3H3O**1.0 * LIGM2**1.0 -1*A188 * T**n188 * np.exp(-1*E188/R/T) * RLIGB**1.0 * LIGM2**1.0 -1*A189 * T**n189 * np.exp(-1*E189/R/T) * RLIGA**1.0 * LIGM2**1.0 -1*A190 * T**n190 * np.exp(-1*E190/R/T) * PRADIOM2**1.0 * LIGM2**1.0 -1*A191 * T**n191 * np.exp(-1*E191/R/T) * RMGUAI**1.0 * LIGM2**1.0 -1*A192 * T**n192 * np.exp(-1*E192/R/T) * OH**1.0 * LIGM2**1.0 -1*A193 * T**n193 * np.exp(-1*E193/R/T) * RCH3O**1.0 * LIGM2**1.0 -1*A194 * T**n194 * np.exp(-1*E194/R/T) * RPHENOL**1.0 * LIGM2**1.0 -1*A195 * T**n195 * np.exp(-1*E195/R/T) * RADIO**1.0 * LIGM2**1.0 -1*A196 * T**n196 * np.exp(-1*E196/R/T) * RC3H5O2**1.0 * LIGM2**1.0 -1*A197 * T**n197 * np.exp(-1*E197/R/T) * PRFET3**1.0 * LIGM2**1.0 -1*A198 * T**n198 * np.exp(-1*E198/R/T) * RC3H7O2**1.0 * LIGM2**1.0 -1*A199 * T**n199 * np.exp(-1*E199/R/T) * RADIOM2**1.0 * LIGM2**1.0 -1*A200 * T**n200 * np.exp(-1*E200/R/T) * PRFET3M2**1.0 * LIGM2**1.0 -1*A201 * T**n201 * np.exp(-1*E201/R/T) * PRLIGH**1.0 * LIGM2**1.0 -1*A202 * T**n202 * np.exp(-1*E202/R/T) * RLIGM2B**1.0 * LIGM2**1.0 +1*A202 * T**n202 * np.exp(-1*E202/R/T) * RLIGM2B**1.0 * LIGM2**1.0 -1*A203 * T**n203 * np.exp(-1*E203/R/T) * RLIGM2A**1.0 * LIGM2**1.0 +1*A203 * T**n203 * np.exp(-1*E203/R/T) * RLIGM2A**1.0 * LIGM2**1.0 -1*A204 * T**n204 * np.exp(-1*E204/R/T) * RCH3**1.0 * LIGM2**1.0 -1*A205 * T**n205 * np.exp(-1*E205/R/T) * PRKETM2**1.0 * LIGM2**1.0 -1*A206 * T**n206 * np.exp(-1*E206/R/T) * RKET**1.0 * LIGM2**1.0 -1*A207 * T**n207 * np.exp(-1*E207/R/T) * PRADIO**1.0 * LIGM2**1.0 -1*A208 * T**n208 * np.exp(-1*E208/R/T) * RC3H3O**1.0 * LIGM2**1.0 -1*A209 * T**n209 * np.exp(-1*E209/R/T) * RLIGB**1.0 * LIGM2**1.0 -1*A210 * T**n210 * np.exp(-1*E210/R/T) * RLIGA**1.0 * LIGM2**1.0 -1*A211 * T**n211 * np.exp(-1*E211/R/T) * PRADIOM2**1.0 * LIGM2**1.0 -1*A212 * T**n212 * np.exp(-1*E212/R/T) * RMGUAI**1.0 * LIGM2**1.0 -1*A213 * T**n213 * np.exp(-1*E213/R/T) * OH**1.0 * LIGM2**1.0 -1*A214 * T**n214 * np.exp(-1*E214/R/T) * RCH3O**1.0 * LIGM2**1.0 -1*A215 * T**n215 * np.exp(-1*E215/R/T) * RPHENOL**1.0 * LIGM2**1.0 -1*A216 * T**n216 * np.exp(-1*E216/R/T) * RADIO**1.0 * LIGM2**1.0 +1*A223 * T**n223 * np.exp(-1*E223/R/T) * RLIGM2B**1.0 * PFET3M2**1.0 +1*A224 * T**n224 * np.exp(-1*E224/R/T) * RLIGM2A**1.0 * PFET3M2**1.0 +1*A244 * T**n244 * np.exp(-1*E244/R/T) * RLIGM2B**1.0 * ADIOM2**1.0 +1*A245 * T**n245 * np.exp(-1*E245/R/T) * RLIGM2A**1.0 * ADIOM2**1.0 +1*A265 * T**n265 * np.exp(-1*E265/R/T) * RLIGM2B**1.0 * KETM2**1.0 +1*A266 * T**n266 * np.exp(-1*E266/R/T) * RLIGM2A**1.0 * KETM2**1.0 +1*A286 * T**n286 * np.exp(-1*E286/R/T) * RLIGM2B**1.0 * C10H2**1.0 +1*A287 * T**n287 * np.exp(-1*E287/R/T) * RLIGM2A**1.0 * C10H2**1.0 +1*A307 * T**n307 * np.exp(-1*E307/R/T) * RLIGM2B**1.0 * LIG**1.0 +1*A308 * T**n308 * np.exp(-1*E308/R/T) * RLIGM2A**1.0 * LIG**1.0 +1*A328 * T**n328 * np.exp(-1*E328/R/T) * RLIGM2B**1.0 * LIG**1.0 +1*A329 * T**n329 * np.exp(-1*E329/R/T) * RLIGM2A**1.0 * LIG**1.0 +1*A349 * T**n349 * np.exp(-1*E349/R/T) * RLIGM2B**1.0 * PFET3**1.0 +1*A350 * T**n350 * np.exp(-1*E350/R/T) * RLIGM2A**1.0 * PFET3**1.0 +1*A370 * T**n370 * np.exp(-1*E370/R/T) * RLIGM2B**1.0 * ADIO**1.0 +1*A371 * T**n371 * np.exp(-1*E371/R/T) * RLIGM2A**1.0 * ADIO**1.0 +1*A391 * T**n391 * np.exp(-1*E391/R/T) * RLIGM2B**1.0 * KET**1.0 +1*A392 * T**n392 * np.exp(-1*E392/R/T) * RLIGM2A**1.0 * KET**1.0,
+1*A106 * T**n106 * np.exp(-1*E106/R/T) * PLIGO**1.0 -1*A107 * T**n107 * np.exp(-1*E107/R/T) * LIGO**1.0,
-1*A88 * T**n88 * np.exp(-1*E88/R/T) * MGUAI**1.0 +1*A128 * T**n128 * np.exp(-1*E128/R/T) * RMGUAI**1.0 * LIGH**1.0 +1*A149 * T**n149 * np.exp(-1*E149/R/T) * RMGUAI**1.0 * PLIGH**1.0 +1*A170 * T**n170 * np.exp(-1*E170/R/T) * RMGUAI**1.0 * PLIGM2**1.0 +1*A191 * T**n191 * np.exp(-1*E191/R/T) * RMGUAI**1.0 * LIGM2**1.0 +1*A212 * T**n212 * np.exp(-1*E212/R/T) * RMGUAI**1.0 * LIGM2**1.0 +1*A233 * T**n233 * np.exp(-1*E233/R/T) * RMGUAI**1.0 * PFET3M2**1.0 +1*A254 * T**n254 * np.exp(-1*E254/R/T) * RMGUAI**1.0 * ADIOM2**1.0 +1*A275 * T**n275 * np.exp(-1*E275/R/T) * RMGUAI**1.0 * KETM2**1.0 +1*A296 * T**n296 * np.exp(-1*E296/R/T) * RMGUAI**1.0 * C10H2**1.0 +1*A317 * T**n317 * np.exp(-1*E317/R/T) * RMGUAI**1.0 * LIG**1.0 +1*A338 * T**n338 * np.exp(-1*E338/R/T) * RMGUAI**1.0 * LIG**1.0 +1*A359 * T**n359 * np.exp(-1*E359/R/T) * RMGUAI**1.0 * PFET3**1.0 +1*A380 * T**n380 * np.exp(-1*E380/R/T) * RMGUAI**1.0 * ADIO**1.0 +1*A401 * T**n401 * np.exp(-1*E401/R/T) * RMGUAI**1.0 * KET**1.0,
+1*A1 * T**n1 * np.exp(-1*E1/R/T) * LIGH**1.0 +1*A9 * T**n9 * np.exp(-1*E9/R/T) * PRKETM2**1.0 +1*A23 * T**n23 * np.exp(-1*E23/R/T) * RADIOM2**1.0 +1*A24 * T**n24 * np.exp(-1*E24/R/T) * RKETM2**1.0 +1*A25 * T**n25 * np.exp(-1*E25/R/T) * RADIO**1.0 +1*A26 * T**n26 * np.exp(-1*E26/R/T) * RKET**1.0 -1*A69 * T**n69 * np.exp(-1*E69/R/T) * OH**1.0 * RCH3**1.0 +1*A98 * T**n98 * np.exp(-1*E98/R/T) * PCH2OH**1.0 +1*A99 * T**n99 * np.exp(-1*E99/R/T) * PCHOHP**1.0 +1*A100 * T**n100 * np.exp(-1*E100/R/T) * PCOHP2**1.0 -1*A129 * T**n129 * np.exp(-1*E129/R/T) * OH**1.0 * LIGH**1.0 -1*A150 * T**n150 * np.exp(-1*E150/R/T) * OH**1.0 * PLIGH**1.0 -1*A171 * T**n171 * np.exp(-1*E171/R/T) * OH**1.0 * PLIGM2**1.0 -1*A192 * T**n192 * np.exp(-1*E192/R/T) * OH**1.0 * LIGM2**1.0 -1*A213 * T**n213 * np.exp(-1*E213/R/T) * OH**1.0 * LIGM2**1.0 -1*A234 * T**n234 * np.exp(-1*E234/R/T) * OH**1.0 * PFET3M2**1.0 -1*A255 * T**n255 * np.exp(-1*E255/R/T) * OH**1.0 * ADIOM2**1.0 -1*A276 * T**n276 * np.exp(-1*E276/R/T) * OH**1.0 * KETM2**1.0 -1*A297 * T**n297 * np.exp(-1*E297/R/T) * OH**1.0 * C10H2**1.0 -1*A318 * T**n318 * np.exp(-1*E318/R/T) * OH**1.0 * LIG**1.0 -1*A339 * T**n339 * np.exp(-1*E339/R/T) * OH**1.0 * LIG**1.0 -1*A360 * T**n360 * np.exp(-1*E360/R/T) * OH**1.0 * PFET3**1.0 -1*A381 * T**n381 * np.exp(-1*E381/R/T) * OH**1.0 * ADIO**1.0 -1*A402 * T**n402 * np.exp(-1*E402/R/T) * OH**1.0 * KET**1.0,
-1*A7 * T**n7 * np.exp(-1*E7/R/T) * PADIO**1.0 +1*A123 * T**n123 * np.exp(-1*E123/R/T) * PRADIO**1.0 * LIGH**1.0 +1*A144 * T**n144 * np.exp(-1*E144/R/T) * PRADIO**1.0 * PLIGH**1.0 +1*A165 * T**n165 * np.exp(-1*E165/R/T) * PRADIO**1.0 * PLIGM2**1.0 +1*A186 * T**n186 * np.exp(-1*E186/R/T) * PRADIO**1.0 * LIGM2**1.0 +1*A207 * T**n207 * np.exp(-1*E207/R/T) * PRADIO**1.0 * LIGM2**1.0 +1*A228 * T**n228 * np.exp(-1*E228/R/T) * PRADIO**1.0 * PFET3M2**1.0 +1*A249 * T**n249 * np.exp(-1*E249/R/T) * PRADIO**1.0 * ADIOM2**1.0 +1*A270 * T**n270 * np.exp(-1*E270/R/T) * PRADIO**1.0 * KETM2**1.0 +1*A291 * T**n291 * np.exp(-1*E291/R/T) * PRADIO**1.0 * C10H2**1.0 +1*A312 * T**n312 * np.exp(-1*E312/R/T) * PRADIO**1.0 * LIG**1.0 +1*A333 * T**n333 * np.exp(-1*E333/R/T) * PRADIO**1.0 * LIG**1.0 +1*A354 * T**n354 * np.exp(-1*E354/R/T) * PRADIO**1.0 * PFET3**1.0 +1*A375 * T**n375 * np.exp(-1*E375/R/T) * PRADIO**1.0 * ADIO**1.0 +1*A396 * T**n396 * np.exp(-1*E396/R/T) * PRADIO**1.0 * KET**1.0,
-1*A6 * T**n6 * np.exp(-1*E6/R/T) * PADIOM2**1.0 +1*A58 * T**n58 * np.exp(-1*E58/R/T) * RCH3O**1.0 * PRADIOM2**1.0 +1*A71 * T**n71 * np.exp(-1*E71/R/T) * RCH3**1.0 * PRADIOM2**1.0 +1*A127 * T**n127 * np.exp(-1*E127/R/T) * PRADIOM2**1.0 * LIGH**1.0 +1*A148 * T**n148 * np.exp(-1*E148/R/T) * PRADIOM2**1.0 * PLIGH**1.0 +1*A169 * T**n169 * np.exp(-1*E169/R/T) * PRADIOM2**1.0 * PLIGM2**1.0 +1*A190 * T**n190 * np.exp(-1*E190/R/T) * PRADIOM2**1.0 * LIGM2**1.0 +1*A211 * T**n211 * np.exp(-1*E211/R/T) * PRADIOM2**1.0 * LIGM2**1.0 +1*A232 * T**n232 * np.exp(-1*E232/R/T) * PRADIOM2**1.0 * PFET3M2**1.0 +1*A253 * T**n253 * np.exp(-1*E253/R/T) * PRADIOM2**1.0 * ADIOM2**1.0 +1*A274 * T**n274 * np.exp(-1*E274/R/T) * PRADIOM2**1.0 * KETM2**1.0 +1*A295 * T**n295 * np.exp(-1*E295/R/T) * PRADIOM2**1.0 * C10H2**1.0 +1*A316 * T**n316 * np.exp(-1*E316/R/T) * PRADIOM2**1.0 * LIG**1.0 +1*A337 * T**n337 * np.exp(-1*E337/R/T) * PRADIOM2**1.0 * LIG**1.0 +1*A358 * T**n358 * np.exp(-1*E358/R/T) * PRADIOM2**1.0 * PFET3**1.0 +1*A379 * T**n379 * np.exp(-1*E379/R/T) * PRADIOM2**1.0 * ADIO**1.0 +1*A400 * T**n400 * np.exp(-1*E400/R/T) * PRADIOM2**1.0 * KET**1.0,
+2*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 +2*A66 * T**n66 * np.exp(-1*E66/R/T) * RC3H7O2**2.0 +2*A67 * T**n67 * np.exp(-1*E67/R/T) * RC3H5O2**2.0 +2*A68 * T**n68 * np.exp(-1*E68/R/T) * RC3H3O**2.0 +1*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +2*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +2*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 -1*A97 * T**n97 * np.exp(-1*E97/R/T) * PC2H2**1.0 +0.5*A99 * T**n99 * np.exp(-1*E99/R/T) * PCHOHP**1.0,
+1*A58 * T**n58 * np.exp(-1*E58/R/T) * RCH3O**1.0 * PRADIOM2**1.0 +1*A59 * T**n59 * np.exp(-1*E59/R/T) * RCH3O**1.0 * PRKETM2**1.0 +2*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 +2*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 +2*A64 * T**n64 * np.exp(-1*E64/R/T) * RKETM2**2.0 +2*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +2*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +2*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 -1*A98 * T**n98 * np.exp(-1*E98/R/T) * PCH2OH**1.0 +2*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 +2*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0,
+2*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1*A98 * T**n98 * np.exp(-1*E98/R/T) * PCH2OH**1.0 -1*A102 * T**n102 * np.exp(-1*E102/R/T) * PCH2P**1.0,
+1*A54 * T**n54 * np.exp(-1*E54/R/T) * RPHENOXM2**1.0 * RCH3**1.0 +1*A55 * T**n55 * np.exp(-1*E55/R/T) * RPHENOX**1.0 * RCH3**1.0 +1*A71 * T**n71 * np.exp(-1*E71/R/T) * RCH3**1.0 * PRADIOM2**1.0 +1*A72 * T**n72 * np.exp(-1*E72/R/T) * RCH3**1.0 * PRKETM2**1.0 +2*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 -1*A101 * T**n101 * np.exp(-1*E101/R/T) * PCH3**1.0,
+2*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 +2*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 -1*A111 * T**n111 * np.exp(-1*E111/R/T) * PCHO**1.0,
+2*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 +2*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 +2*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 +2*A66 * T**n66 * np.exp(-1*E66/R/T) * RC3H7O2**2.0 +2*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +2*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +2*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 -1*A99 * T**n99 * np.exp(-1*E99/R/T) * PCHOHP**1.0 +2*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 +2*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0,
+2*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 +2*A64 * T**n64 * np.exp(-1*E64/R/T) * RKETM2**2.0 +2*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 +2*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +2*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 +2*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 +2*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1*A102 * T**n102 * np.exp(-1*E102/R/T) * PCH2P**1.0 -1*A103 * T**n103 * np.exp(-1*E103/R/T) * PCHP2**1.0 +2*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 +2*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0,
+2*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 +2*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 +2*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 +2*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 +1*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 +1*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +1*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +1*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 +1*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 +1*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +1*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 +1*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 +1*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 +1*A54 * T**n54 * np.exp(-1*E54/R/T) * RPHENOXM2**1.0 * RCH3**1.0 +1*A55 * T**n55 * np.exp(-1*E55/R/T) * RPHENOX**1.0 * RCH3**1.0 +2*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 +4*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 +4*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 +2*A63 * T**n63 * np.exp(-1*E63/R/T) * RMGUAI**2.0 +4*A64 * T**n64 * np.exp(-1*E64/R/T) * RKETM2**2.0 +2*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 +2*A67 * T**n67 * np.exp(-1*E67/R/T) * RC3H5O2**2.0 +2*A68 * T**n68 * np.exp(-1*E68/R/T) * RC3H3O**2.0 +1*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +3*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 +4*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 -1*A95 * T**n95 * np.exp(-1*E95/R/T) * PCOH**1.0 +1*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 +2*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0,
+2*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 +1*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +2*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +2*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 -1*A100 * T**n100 * np.exp(-1*E100/R/T) * PCOHP2**1.0,
+1*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 +2*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 +1*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +1*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +2*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 +2*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 +2*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 +2*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 +1*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 +1*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 +1*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +1*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 +1*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 +1*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0 +3*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +1*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +1*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +3*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +4*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +1*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 +2*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 +2*A80 * T**n80 * np.exp(-1*E80/R/T) * RPHENOX**1.0 * RPHENOL**1.0 +1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 +1*A82 * T**n82 * np.exp(-1*E82/R/T) * RPHENOX**1.0 * CHAR**1.0 +1*A83 * T**n83 * np.exp(-1*E83/R/T) * RPHENOXM2**1.0 * CHAR**1.0 -1*A94 * T**n94 * np.exp(-1*E94/R/T) * PCOS**1.0 +1*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0,
+1*A21 * T**n21 * np.exp(-1*E21/R/T) * RLIGB**1.0 +1*A113 * T**n113 * np.exp(-1*E113/R/T) * PRFET3**1.0 * LIGH**1.0 +1*A134 * T**n134 * np.exp(-1*E134/R/T) * PRFET3**1.0 * PLIGH**1.0 +1*A155 * T**n155 * np.exp(-1*E155/R/T) * PRFET3**1.0 * PLIGM2**1.0 +1*A176 * T**n176 * np.exp(-1*E176/R/T) * PRFET3**1.0 * LIGM2**1.0 +1*A197 * T**n197 * np.exp(-1*E197/R/T) * PRFET3**1.0 * LIGM2**1.0 +1*A218 * T**n218 * np.exp(-1*E218/R/T) * PRFET3**1.0 * PFET3M2**1.0 +1*A239 * T**n239 * np.exp(-1*E239/R/T) * PRFET3**1.0 * ADIOM2**1.0 +1*A260 * T**n260 * np.exp(-1*E260/R/T) * PRFET3**1.0 * KETM2**1.0 +1*A281 * T**n281 * np.exp(-1*E281/R/T) * PRFET3**1.0 * C10H2**1.0 +1*A302 * T**n302 * np.exp(-1*E302/R/T) * PRFET3**1.0 * LIG**1.0 +1*A323 * T**n323 * np.exp(-1*E323/R/T) * PRFET3**1.0 * LIG**1.0 -1*A343 * T**n343 * np.exp(-1*E343/R/T) * RC3H5O2**1.0 * PFET3**1.0 -1*A344 * T**n344 * np.exp(-1*E344/R/T) * PRFET3**1.0 * PFET3**1.0 +1*A344 * T**n344 * np.exp(-1*E344/R/T) * PRFET3**1.0 * PFET3**1.0 -1*A345 * T**n345 * np.exp(-1*E345/R/T) * RC3H7O2**1.0 * PFET3**1.0 -1*A346 * T**n346 * np.exp(-1*E346/R/T) * RADIOM2**1.0 * PFET3**1.0 -1*A347 * T**n347 * np.exp(-1*E347/R/T) * PRFET3M2**1.0 * PFET3**1.0 -1*A348 * T**n348 * np.exp(-1*E348/R/T) * PRLIGH**1.0 * PFET3**1.0 -1*A349 * T**n349 * np.exp(-1*E349/R/T) * RLIGM2B**1.0 * PFET3**1.0 -1*A350 * T**n350 * np.exp(-1*E350/R/T) * RLIGM2A**1.0 * PFET3**1.0 -1*A351 * T**n351 * np.exp(-1*E351/R/T) * RCH3**1.0 * PFET3**1.0 -1*A352 * T**n352 * np.exp(-1*E352/R/T) * PRKETM2**1.0 * PFET3**1.0 -1*A353 * T**n353 * np.exp(-1*E353/R/T) * RKET**1.0 * PFET3**1.0 -1*A354 * T**n354 * np.exp(-1*E354/R/T) * PRADIO**1.0 * PFET3**1.0 -1*A355 * T**n355 * np.exp(-1*E355/R/T) * RC3H3O**1.0 * PFET3**1.0 -1*A356 * T**n356 * np.exp(-1*E356/R/T) * RLIGB**1.0 * PFET3**1.0 -1*A357 * T**n357 * np.exp(-1*E357/R/T) * RLIGA**1.0 * PFET3**1.0 -1*A358 * T**n358 * np.exp(-1*E358/R/T) * PRADIOM2**1.0 * PFET3**1.0 -1*A359 * T**n359 * np.exp(-1*E359/R/T) * RMGUAI**1.0 * PFET3**1.0 -1*A360 * T**n360 * np.exp(-1*E360/R/T) * OH**1.0 * PFET3**1.0 -1*A361 * T**n361 * np.exp(-1*E361/R/T) * RCH3O**1.0 * PFET3**1.0 -1*A362 * T**n362 * np.exp(-1*E362/R/T) * RPHENOL**1.0 * PFET3**1.0 -1*A363 * T**n363 * np.exp(-1*E363/R/T) * RADIO**1.0 * PFET3**1.0 +1*A365 * T**n365 * np.exp(-1*E365/R/T) * PRFET3**1.0 * ADIO**1.0 +1*A386 * T**n386 * np.exp(-1*E386/R/T) * PRFET3**1.0 * KET**1.0,
+1*A17 * T**n17 * np.exp(-1*E17/R/T) * RLIGM2B**1.0 +1*A116 * T**n116 * np.exp(-1*E116/R/T) * PRFET3M2**1.0 * LIGH**1.0 +1*A137 * T**n137 * np.exp(-1*E137/R/T) * PRFET3M2**1.0 * PLIGH**1.0 +1*A158 * T**n158 * np.exp(-1*E158/R/T) * PRFET3M2**1.0 * PLIGM2**1.0 +1*A179 * T**n179 * np.exp(-1*E179/R/T) * PRFET3M2**1.0 * LIGM2**1.0 +1*A200 * T**n200 * np.exp(-1*E200/R/T) * PRFET3M2**1.0 * LIGM2**1.0 -1*A217 * T**n217 * np.exp(-1*E217/R/T) * RC3H5O2**1.0 * PFET3M2**1.0 -1*A218 * T**n218 * np.exp(-1*E218/R/T) * PRFET3**1.0 * PFET3M2**1.0 -1*A219 * T**n219 * np.exp(-1*E219/R/T) * RC3H7O2**1.0 * PFET3M2**1.0 -1*A220 * T**n220 * np.exp(-1*E220/R/T) * RADIOM2**1.0 * PFET3M2**1.0 -1*A221 * T**n221 * np.exp(-1*E221/R/T) * PRFET3M2**1.0 * PFET3M2**1.0 +1*A221 * T**n221 * np.exp(-1*E221/R/T) * PRFET3M2**1.0 * PFET3M2**1.0 -1*A222 * T**n222 * np.exp(-1*E222/R/T) * PRLIGH**1.0 * PFET3M2**1.0 -1*A223 * T**n223 * np.exp(-1*E223/R/T) * RLIGM2B**1.0 * PFET3M2**1.0 -1*A224 * T**n224 * np.exp(-1*E224/R/T) * RLIGM2A**1.0 * PFET3M2**1.0 -1*A225 * T**n225 * np.exp(-1*E225/R/T) * RCH3**1.0 * PFET3M2**1.0 -1*A226 * T**n226 * np.exp(-1*E226/R/T) * PRKETM2**1.0 * PFET3M2**1.0 -1*A227 * T**n227 * np.exp(-1*E227/R/T) * RKET**1.0 * PFET3M2**1.0 -1*A228 * T**n228 * np.exp(-1*E228/R/T) * PRADIO**1.0 * PFET3M2**1.0 -1*A229 * T**n229 * np.exp(-1*E229/R/T) * RC3H3O**1.0 * PFET3M2**1.0 -1*A230 * T**n230 * np.exp(-1*E230/R/T) * RLIGB**1.0 * PFET3M2**1.0 -1*A231 * T**n231 * np.exp(-1*E231/R/T) * RLIGA**1.0 * PFET3M2**1.0 -1*A232 * T**n232 * np.exp(-1*E232/R/T) * PRADIOM2**1.0 * PFET3M2**1.0 -1*A233 * T**n233 * np.exp(-1*E233/R/T) * RMGUAI**1.0 * PFET3M2**1.0 -1*A234 * T**n234 * np.exp(-1*E234/R/T) * OH**1.0 * PFET3M2**1.0 -1*A235 * T**n235 * np.exp(-1*E235/R/T) * RCH3O**1.0 * PFET3M2**1.0 -1*A236 * T**n236 * np.exp(-1*E236/R/T) * RPHENOL**1.0 * PFET3M2**1.0 -1*A237 * T**n237 * np.exp(-1*E237/R/T) * RADIO**1.0 * PFET3M2**1.0 +1*A242 * T**n242 * np.exp(-1*E242/R/T) * PRFET3M2**1.0 * ADIOM2**1.0 +1*A263 * T**n263 * np.exp(-1*E263/R/T) * PRFET3M2**1.0 * KETM2**1.0 +1*A284 * T**n284 * np.exp(-1*E284/R/T) * PRFET3M2**1.0 * C10H2**1.0 +1*A305 * T**n305 * np.exp(-1*E305/R/T) * PRFET3M2**1.0 * LIG**1.0 +1*A326 * T**n326 * np.exp(-1*E326/R/T) * PRFET3M2**1.0 * LIG**1.0 +1*A347 * T**n347 * np.exp(-1*E347/R/T) * PRFET3M2**1.0 * PFET3**1.0 +1*A368 * T**n368 * np.exp(-1*E368/R/T) * PRFET3M2**1.0 * ADIO**1.0 +1*A389 * T**n389 * np.exp(-1*E389/R/T) * PRFET3M2**1.0 * KET**1.0,
+1*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 +1*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 +1*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 +1.5*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 +1*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 +2*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 +2*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +2*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +2*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 +2*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 +2*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 +2*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 +1*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 +1*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 +1*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +2*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 +0.5*A54 * T**n54 * np.exp(-1*E54/R/T) * RPHENOXM2**1.0 * RCH3**1.0 +1.5*A55 * T**n55 * np.exp(-1*E55/R/T) * RPHENOX**1.0 * RCH3**1.0 +2*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 +3*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 +3*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 +2*A63 * T**n63 * np.exp(-1*E63/R/T) * RMGUAI**2.0 +2*A64 * T**n64 * np.exp(-1*E64/R/T) * RKETM2**2.0 +1*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 +1*A66 * T**n66 * np.exp(-1*E66/R/T) * RC3H7O2**2.0 +1*A67 * T**n67 * np.exp(-1*E67/R/T) * RC3H5O2**2.0 +1*A68 * T**n68 * np.exp(-1*E68/R/T) * RC3H3O**2.0 +3*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 +3*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 +5*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 +5*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 +2*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 +2*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 +2*A80 * T**n80 * np.exp(-1*E80/R/T) * RPHENOX**1.0 * RPHENOL**1.0 +1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 -1*A96 * T**n96 * np.exp(-1*E96/R/T) * PH2**1.0 +2*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 +1*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0,
-1*A93 * T**n93 * np.exp(-1*E93/R/T) * PHENOL**1.0 +1*A131 * T**n131 * np.exp(-1*E131/R/T) * RPHENOL**1.0 * LIGH**1.0 +1*A152 * T**n152 * np.exp(-1*E152/R/T) * RPHENOL**1.0 * PLIGH**1.0 +1*A173 * T**n173 * np.exp(-1*E173/R/T) * RPHENOL**1.0 * PLIGM2**1.0 +1*A194 * T**n194 * np.exp(-1*E194/R/T) * RPHENOL**1.0 * LIGM2**1.0 +1*A215 * T**n215 * np.exp(-1*E215/R/T) * RPHENOL**1.0 * LIGM2**1.0 +1*A236 * T**n236 * np.exp(-1*E236/R/T) * RPHENOL**1.0 * PFET3M2**1.0 +1*A257 * T**n257 * np.exp(-1*E257/R/T) * RPHENOL**1.0 * ADIOM2**1.0 +1*A278 * T**n278 * np.exp(-1*E278/R/T) * RPHENOL**1.0 * KETM2**1.0 +1*A299 * T**n299 * np.exp(-1*E299/R/T) * RPHENOL**1.0 * C10H2**1.0 +1*A320 * T**n320 * np.exp(-1*E320/R/T) * RPHENOL**1.0 * LIG**1.0 +1*A341 * T**n341 * np.exp(-1*E341/R/T) * RPHENOL**1.0 * LIG**1.0 +1*A362 * T**n362 * np.exp(-1*E362/R/T) * RPHENOL**1.0 * PFET3**1.0 +1*A383 * T**n383 * np.exp(-1*E383/R/T) * RPHENOL**1.0 * ADIO**1.0 +1*A404 * T**n404 * np.exp(-1*E404/R/T) * RPHENOL**1.0 * KET**1.0,
-1*A8 * T**n8 * np.exp(-1*E8/R/T) * PKETM2**1.0 +1*A16 * T**n16 * np.exp(-1*E16/R/T) * PRLIGM2A**1.0 +1*A59 * T**n59 * np.exp(-1*E59/R/T) * RCH3O**1.0 * PRKETM2**1.0 +1*A72 * T**n72 * np.exp(-1*E72/R/T) * RCH3**1.0 * PRKETM2**1.0 +1*A121 * T**n121 * np.exp(-1*E121/R/T) * PRKETM2**1.0 * LIGH**1.0 +1*A142 * T**n142 * np.exp(-1*E142/R/T) * PRKETM2**1.0 * PLIGH**1.0 +1*A163 * T**n163 * np.exp(-1*E163/R/T) * PRKETM2**1.0 * PLIGM2**1.0 +1*A184 * T**n184 * np.exp(-1*E184/R/T) * PRKETM2**1.0 * LIGM2**1.0 +1*A205 * T**n205 * np.exp(-1*E205/R/T) * PRKETM2**1.0 * LIGM2**1.0 +1*A226 * T**n226 * np.exp(-1*E226/R/T) * PRKETM2**1.0 * PFET3M2**1.0 +1*A247 * T**n247 * np.exp(-1*E247/R/T) * PRKETM2**1.0 * ADIOM2**1.0 +1*A268 * T**n268 * np.exp(-1*E268/R/T) * PRKETM2**1.0 * KETM2**1.0 +1*A289 * T**n289 * np.exp(-1*E289/R/T) * PRKETM2**1.0 * C10H2**1.0 +1*A310 * T**n310 * np.exp(-1*E310/R/T) * PRKETM2**1.0 * LIG**1.0 +1*A331 * T**n331 * np.exp(-1*E331/R/T) * PRKETM2**1.0 * LIG**1.0 +1*A352 * T**n352 * np.exp(-1*E352/R/T) * PRKETM2**1.0 * PFET3**1.0 +1*A373 * T**n373 * np.exp(-1*E373/R/T) * PRKETM2**1.0 * ADIO**1.0 +1*A394 * T**n394 * np.exp(-1*E394/R/T) * PRKETM2**1.0 * KET**1.0,
-1*A5 * T**n5 * np.exp(-1*E5/R/T) * PLIG**1.0 +1*A30 * T**n30 * np.exp(-1*E30/R/T) * PLIGC**1.0,
-1*A30 * T**n30 * np.exp(-1*E30/R/T) * PLIGC**1.0 -1*A105 * T**n105 * np.exp(-1*E105/R/T) * PLIGC**1.0,
-1*A0 * T**n0 * np.exp(-1*E0/R/T) * PLIGH**1.0 -1*A104 * T**n104 * np.exp(-1*E104/R/T) * PLIGH**1.0 +1*A117 * T**n117 * np.exp(-1*E117/R/T) * PRLIGH**1.0 * LIGH**1.0 -1*A133 * T**n133 * np.exp(-1*E133/R/T) * RC3H5O2**1.0 * PLIGH**1.0 -1*A134 * T**n134 * np.exp(-1*E134/R/T) * PRFET3**1.0 * PLIGH**1.0 -1*A135 * T**n135 * np.exp(-1*E135/R/T) * RC3H7O2**1.0 * PLIGH**1.0 -1*A136 * T**n136 * np.exp(-1*E136/R/T) * RADIOM2**1.0 * PLIGH**1.0 -1*A137 * T**n137 * np.exp(-1*E137/R/T) * PRFET3M2**1.0 * PLIGH**1.0 -1*A138 * T**n138 * np.exp(-1*E138/R/T) * PRLIGH**1.0 * PLIGH**1.0 +1*A138 * T**n138 * np.exp(-1*E138/R/T) * PRLIGH**1.0 * PLIGH**1.0 -1*A139 * T**n139 * np.exp(-1*E139/R/T) * RLIGM2B**1.0 * PLIGH**1.0 -1*A140 * T**n140 * np.exp(-1*E140/R/T) * RLIGM2A**1.0 * PLIGH**1.0 -1*A141 * T**n141 * np.exp(-1*E141/R/T) * RCH3**1.0 * PLIGH**1.0 -1*A142 * T**n142 * np.exp(-1*E142/R/T) * PRKETM2**1.0 * PLIGH**1.0 -1*A143 * T**n143 * np.exp(-1*E143/R/T) * RKET**1.0 * PLIGH**1.0 -1*A144 * T**n144 * np.exp(-1*E144/R/T) * PRADIO**1.0 * PLIGH**1.0 -1*A145 * T**n145 * np.exp(-1*E145/R/T) * RC3H3O**1.0 * PLIGH**1.0 -1*A146 * T**n146 * np.exp(-1*E146/R/T) * RLIGB**1.0 * PLIGH**1.0 -1*A147 * T**n147 * np.exp(-1*E147/R/T) * RLIGA**1.0 * PLIGH**1.0 -1*A148 * T**n148 * np.exp(-1*E148/R/T) * PRADIOM2**1.0 * PLIGH**1.0 -1*A149 * T**n149 * np.exp(-1*E149/R/T) * RMGUAI**1.0 * PLIGH**1.0 -1*A150 * T**n150 * np.exp(-1*E150/R/T) * OH**1.0 * PLIGH**1.0 -1*A151 * T**n151 * np.exp(-1*E151/R/T) * RCH3O**1.0 * PLIGH**1.0 -1*A152 * T**n152 * np.exp(-1*E152/R/T) * RPHENOL**1.0 * PLIGH**1.0 -1*A153 * T**n153 * np.exp(-1*E153/R/T) * RADIO**1.0 * PLIGH**1.0 +1*A159 * T**n159 * np.exp(-1*E159/R/T) * PRLIGH**1.0 * PLIGM2**1.0 +1*A180 * T**n180 * np.exp(-1*E180/R/T) * PRLIGH**1.0 * LIGM2**1.0 +1*A201 * T**n201 * np.exp(-1*E201/R/T) * PRLIGH**1.0 * LIGM2**1.0 +1*A222 * T**n222 * np.exp(-1*E222/R/T) * PRLIGH**1.0 * PFET3M2**1.0 +1*A243 * T**n243 * np.exp(-1*E243/R/T) * PRLIGH**1.0 * ADIOM2**1.0 +1*A264 * T**n264 * np.exp(-1*E264/R/T) * PRLIGH**1.0 * KETM2**1.0 +1*A285 * T**n285 * np.exp(-1*E285/R/T) * PRLIGH**1.0 * C10H2**1.0 +1*A306 * T**n306 * np.exp(-1*E306/R/T) * PRLIGH**1.0 * LIG**1.0 +1*A327 * T**n327 * np.exp(-1*E327/R/T) * PRLIGH**1.0 * LIG**1.0 +1*A348 * T**n348 * np.exp(-1*E348/R/T) * PRLIGH**1.0 * PFET3**1.0 +1*A369 * T**n369 * np.exp(-1*E369/R/T) * PRLIGH**1.0 * ADIO**1.0 +1*A390 * T**n390 * np.exp(-1*E390/R/T) * PRLIGH**1.0 * KET**1.0,
-1*A3 * T**n3 * np.exp(-1*E3/R/T) * PLIGM2**1.0 +1*A31 * T**n31 * np.exp(-1*E31/R/T) * PLIGO**1.0 -1*A154 * T**n154 * np.exp(-1*E154/R/T) * RC3H5O2**1.0 * PLIGM2**1.0 -1*A155 * T**n155 * np.exp(-1*E155/R/T) * PRFET3**1.0 * PLIGM2**1.0 -1*A156 * T**n156 * np.exp(-1*E156/R/T) * RC3H7O2**1.0 * PLIGM2**1.0 -1*A157 * T**n157 * np.exp(-1*E157/R/T) * RADIOM2**1.0 * PLIGM2**1.0 -1*A158 * T**n158 * np.exp(-1*E158/R/T) * PRFET3M2**1.0 * PLIGM2**1.0 -1*A159 * T**n159 * np.exp(-1*E159/R/T) * PRLIGH**1.0 * PLIGM2**1.0 -1*A160 * T**n160 * np.exp(-1*E160/R/T) * RLIGM2B**1.0 * PLIGM2**1.0 -1*A161 * T**n161 * np.exp(-1*E161/R/T) * RLIGM2A**1.0 * PLIGM2**1.0 -1*A162 * T**n162 * np.exp(-1*E162/R/T) * RCH3**1.0 * PLIGM2**1.0 -1*A163 * T**n163 * np.exp(-1*E163/R/T) * PRKETM2**1.0 * PLIGM2**1.0 -1*A164 * T**n164 * np.exp(-1*E164/R/T) * RKET**1.0 * PLIGM2**1.0 -1*A165 * T**n165 * np.exp(-1*E165/R/T) * PRADIO**1.0 * PLIGM2**1.0 -1*A166 * T**n166 * np.exp(-1*E166/R/T) * RC3H3O**1.0 * PLIGM2**1.0 -1*A167 * T**n167 * np.exp(-1*E167/R/T) * RLIGB**1.0 * PLIGM2**1.0 -1*A168 * T**n168 * np.exp(-1*E168/R/T) * RLIGA**1.0 * PLIGM2**1.0 -1*A169 * T**n169 * np.exp(-1*E169/R/T) * PRADIOM2**1.0 * PLIGM2**1.0 -1*A170 * T**n170 * np.exp(-1*E170/R/T) * RMGUAI**1.0 * PLIGM2**1.0 -1*A171 * T**n171 * np.exp(-1*E171/R/T) * OH**1.0 * PLIGM2**1.0 -1*A172 * T**n172 * np.exp(-1*E172/R/T) * RCH3O**1.0 * PLIGM2**1.0 -1*A173 * T**n173 * np.exp(-1*E173/R/T) * RPHENOL**1.0 * PLIGM2**1.0 -1*A174 * T**n174 * np.exp(-1*E174/R/T) * RADIO**1.0 * PLIGM2**1.0,
-1*A31 * T**n31 * np.exp(-1*E31/R/T) * PLIGO**1.0 -1*A106 * T**n106 * np.exp(-1*E106/R/T) * PLIGO**1.0,
+1*A5 * T**n5 * np.exp(-1*E5/R/T) * PLIG**1.0 +1*A7 * T**n7 * np.exp(-1*E7/R/T) * PADIO**1.0 -2*A109 * T**n109 * np.exp(-1*E109/R/T) * PRADIO**2.0 -1*A123 * T**n123 * np.exp(-1*E123/R/T) * PRADIO**1.0 * LIGH**1.0 -1*A144 * T**n144 * np.exp(-1*E144/R/T) * PRADIO**1.0 * PLIGH**1.0 -1*A165 * T**n165 * np.exp(-1*E165/R/T) * PRADIO**1.0 * PLIGM2**1.0 -1*A186 * T**n186 * np.exp(-1*E186/R/T) * PRADIO**1.0 * LIGM2**1.0 -1*A207 * T**n207 * np.exp(-1*E207/R/T) * PRADIO**1.0 * LIGM2**1.0 -1*A228 * T**n228 * np.exp(-1*E228/R/T) * PRADIO**1.0 * PFET3M2**1.0 -1*A249 * T**n249 * np.exp(-1*E249/R/T) * PRADIO**1.0 * ADIOM2**1.0 -1*A270 * T**n270 * np.exp(-1*E270/R/T) * PRADIO**1.0 * KETM2**1.0 -1*A291 * T**n291 * np.exp(-1*E291/R/T) * PRADIO**1.0 * C10H2**1.0 -1*A312 * T**n312 * np.exp(-1*E312/R/T) * PRADIO**1.0 * LIG**1.0 -1*A333 * T**n333 * np.exp(-1*E333/R/T) * PRADIO**1.0 * LIG**1.0 -1*A354 * T**n354 * np.exp(-1*E354/R/T) * PRADIO**1.0 * PFET3**1.0 -1*A375 * T**n375 * np.exp(-1*E375/R/T) * PRADIO**1.0 * ADIO**1.0 -1*A396 * T**n396 * np.exp(-1*E396/R/T) * PRADIO**1.0 * KET**1.0,
+1*A3 * T**n3 * np.exp(-1*E3/R/T) * PLIGM2**1.0 +1*A6 * T**n6 * np.exp(-1*E6/R/T) * PADIOM2**1.0 -1*A58 * T**n58 * np.exp(-1*E58/R/T) * RCH3O**1.0 * PRADIOM2**1.0 -1*A71 * T**n71 * np.exp(-1*E71/R/T) * RCH3**1.0 * PRADIOM2**1.0 -2*A110 * T**n110 * np.exp(-1*E110/R/T) * PRADIOM2**2.0 -1*A127 * T**n127 * np.exp(-1*E127/R/T) * PRADIOM2**1.0 * LIGH**1.0 -1*A148 * T**n148 * np.exp(-1*E148/R/T) * PRADIOM2**1.0 * PLIGH**1.0 -1*A169 * T**n169 * np.exp(-1*E169/R/T) * PRADIOM2**1.0 * PLIGM2**1.0 -1*A190 * T**n190 * np.exp(-1*E190/R/T) * PRADIOM2**1.0 * LIGM2**1.0 -1*A211 * T**n211 * np.exp(-1*E211/R/T) * PRADIOM2**1.0 * LIGM2**1.0 -1*A232 * T**n232 * np.exp(-1*E232/R/T) * PRADIOM2**1.0 * PFET3M2**1.0 -1*A253 * T**n253 * np.exp(-1*E253/R/T) * PRADIOM2**1.0 * ADIOM2**1.0 -1*A274 * T**n274 * np.exp(-1*E274/R/T) * PRADIOM2**1.0 * KETM2**1.0 -1*A295 * T**n295 * np.exp(-1*E295/R/T) * PRADIOM2**1.0 * C10H2**1.0 -1*A316 * T**n316 * np.exp(-1*E316/R/T) * PRADIOM2**1.0 * LIG**1.0 -1*A337 * T**n337 * np.exp(-1*E337/R/T) * PRADIOM2**1.0 * LIG**1.0 -1*A358 * T**n358 * np.exp(-1*E358/R/T) * PRADIOM2**1.0 * PFET3**1.0 -1*A379 * T**n379 * np.exp(-1*E379/R/T) * PRADIOM2**1.0 * ADIO**1.0 -1*A400 * T**n400 * np.exp(-1*E400/R/T) * PRADIOM2**1.0 * KET**1.0,
-1*A22 * T**n22 * np.exp(-1*E22/R/T) * PRFET3**1.0 -1*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 -1*A78 * T**n78 * np.exp(-1*E78/R/T) * PRFET3**1.0 * PRFET3**1.0 -1*A113 * T**n113 * np.exp(-1*E113/R/T) * PRFET3**1.0 * LIGH**1.0 -1*A134 * T**n134 * np.exp(-1*E134/R/T) * PRFET3**1.0 * PLIGH**1.0 -1*A155 * T**n155 * np.exp(-1*E155/R/T) * PRFET3**1.0 * PLIGM2**1.0 -1*A176 * T**n176 * np.exp(-1*E176/R/T) * PRFET3**1.0 * LIGM2**1.0 -1*A197 * T**n197 * np.exp(-1*E197/R/T) * PRFET3**1.0 * LIGM2**1.0 -1*A218 * T**n218 * np.exp(-1*E218/R/T) * PRFET3**1.0 * PFET3M2**1.0 -1*A239 * T**n239 * np.exp(-1*E239/R/T) * PRFET3**1.0 * ADIOM2**1.0 -1*A260 * T**n260 * np.exp(-1*E260/R/T) * PRFET3**1.0 * KETM2**1.0 -1*A281 * T**n281 * np.exp(-1*E281/R/T) * PRFET3**1.0 * C10H2**1.0 -1*A302 * T**n302 * np.exp(-1*E302/R/T) * PRFET3**1.0 * LIG**1.0 -1*A323 * T**n323 * np.exp(-1*E323/R/T) * PRFET3**1.0 * LIG**1.0 +1*A343 * T**n343 * np.exp(-1*E343/R/T) * RC3H5O2**1.0 * PFET3**1.0 -1*A344 * T**n344 * np.exp(-1*E344/R/T) * PRFET3**1.0 * PFET3**1.0 +1*A344 * T**n344 * np.exp(-1*E344/R/T) * PRFET3**1.0 * PFET3**1.0 +1*A345 * T**n345 * np.exp(-1*E345/R/T) * RC3H7O2**1.0 * PFET3**1.0 +1*A346 * T**n346 * np.exp(-1*E346/R/T) * RADIOM2**1.0 * PFET3**1.0 +1*A347 * T**n347 * np.exp(-1*E347/R/T) * PRFET3M2**1.0 * PFET3**1.0 +1*A348 * T**n348 * np.exp(-1*E348/R/T) * PRLIGH**1.0 * PFET3**1.0 +1*A349 * T**n349 * np.exp(-1*E349/R/T) * RLIGM2B**1.0 * PFET3**1.0 +1*A350 * T**n350 * np.exp(-1*E350/R/T) * RLIGM2A**1.0 * PFET3**1.0 +1*A351 * T**n351 * np.exp(-1*E351/R/T) * RCH3**1.0 * PFET3**1.0 +1*A352 * T**n352 * np.exp(-1*E352/R/T) * PRKETM2**1.0 * PFET3**1.0 +1*A353 * T**n353 * np.exp(-1*E353/R/T) * RKET**1.0 * PFET3**1.0 +1*A354 * T**n354 * np.exp(-1*E354/R/T) * PRADIO**1.0 * PFET3**1.0 +1*A355 * T**n355 * np.exp(-1*E355/R/T) * RC3H3O**1.0 * PFET3**1.0 +1*A356 * T**n356 * np.exp(-1*E356/R/T) * RLIGB**1.0 * PFET3**1.0 +1*A357 * T**n357 * np.exp(-1*E357/R/T) * RLIGA**1.0 * PFET3**1.0 +1*A358 * T**n358 * np.exp(-1*E358/R/T) * PRADIOM2**1.0 * PFET3**1.0 +1*A359 * T**n359 * np.exp(-1*E359/R/T) * RMGUAI**1.0 * PFET3**1.0 +1*A360 * T**n360 * np.exp(-1*E360/R/T) * OH**1.0 * PFET3**1.0 +1*A361 * T**n361 * np.exp(-1*E361/R/T) * RCH3O**1.0 * PFET3**1.0 +1*A362 * T**n362 * np.exp(-1*E362/R/T) * RPHENOL**1.0 * PFET3**1.0 +1*A363 * T**n363 * np.exp(-1*E363/R/T) * RADIO**1.0 * PFET3**1.0 -1*A365 * T**n365 * np.exp(-1*E365/R/T) * PRFET3**1.0 * ADIO**1.0 -1*A386 * T**n386 * np.exp(-1*E386/R/T) * PRFET3**1.0 * KET**1.0,
-1*A18 * T**n18 * np.exp(-1*E18/R/T) * PRFET3M2**1.0 -1*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 -1*A65 * T**n65 * np.exp(-1*E65/R/T) * PRFET3M2**1.0 * PRFET3M2**1.0 -1*A116 * T**n116 * np.exp(-1*E116/R/T) * PRFET3M2**1.0 * LIGH**1.0 -1*A137 * T**n137 * np.exp(-1*E137/R/T) * PRFET3M2**1.0 * PLIGH**1.0 -1*A158 * T**n158 * np.exp(-1*E158/R/T) * PRFET3M2**1.0 * PLIGM2**1.0 -1*A179 * T**n179 * np.exp(-1*E179/R/T) * PRFET3M2**1.0 * LIGM2**1.0 -1*A200 * T**n200 * np.exp(-1*E200/R/T) * PRFET3M2**1.0 * LIGM2**1.0 +1*A217 * T**n217 * np.exp(-1*E217/R/T) * RC3H5O2**1.0 * PFET3M2**1.0 +1*A218 * T**n218 * np.exp(-1*E218/R/T) * PRFET3**1.0 * PFET3M2**1.0 +1*A219 * T**n219 * np.exp(-1*E219/R/T) * RC3H7O2**1.0 * PFET3M2**1.0 +1*A220 * T**n220 * np.exp(-1*E220/R/T) * RADIOM2**1.0 * PFET3M2**1.0 -1*A221 * T**n221 * np.exp(-1*E221/R/T) * PRFET3M2**1.0 * PFET3M2**1.0 +1*A221 * T**n221 * np.exp(-1*E221/R/T) * PRFET3M2**1.0 * PFET3M2**1.0 +1*A222 * T**n222 * np.exp(-1*E222/R/T) * PRLIGH**1.0 * PFET3M2**1.0 +1*A223 * T**n223 * np.exp(-1*E223/R/T) * RLIGM2B**1.0 * PFET3M2**1.0 +1*A224 * T**n224 * np.exp(-1*E224/R/T) * RLIGM2A**1.0 * PFET3M2**1.0 +1*A225 * T**n225 * np.exp(-1*E225/R/T) * RCH3**1.0 * PFET3M2**1.0 +1*A226 * T**n226 * np.exp(-1*E226/R/T) * PRKETM2**1.0 * PFET3M2**1.0 +1*A227 * T**n227 * np.exp(-1*E227/R/T) * RKET**1.0 * PFET3M2**1.0 +1*A228 * T**n228 * np.exp(-1*E228/R/T) * PRADIO**1.0 * PFET3M2**1.0 +1*A229 * T**n229 * np.exp(-1*E229/R/T) * RC3H3O**1.0 * PFET3M2**1.0 +1*A230 * T**n230 * np.exp(-1*E230/R/T) * RLIGB**1.0 * PFET3M2**1.0 +1*A231 * T**n231 * np.exp(-1*E231/R/T) * RLIGA**1.0 * PFET3M2**1.0 +1*A232 * T**n232 * np.exp(-1*E232/R/T) * PRADIOM2**1.0 * PFET3M2**1.0 +1*A233 * T**n233 * np.exp(-1*E233/R/T) * RMGUAI**1.0 * PFET3M2**1.0 +1*A234 * T**n234 * np.exp(-1*E234/R/T) * OH**1.0 * PFET3M2**1.0 +1*A235 * T**n235 * np.exp(-1*E235/R/T) * RCH3O**1.0 * PFET3M2**1.0 +1*A236 * T**n236 * np.exp(-1*E236/R/T) * RPHENOL**1.0 * PFET3M2**1.0 +1*A237 * T**n237 * np.exp(-1*E237/R/T) * RADIO**1.0 * PFET3M2**1.0 -1*A242 * T**n242 * np.exp(-1*E242/R/T) * PRFET3M2**1.0 * ADIOM2**1.0 -1*A263 * T**n263 * np.exp(-1*E263/R/T) * PRFET3M2**1.0 * KETM2**1.0 -1*A284 * T**n284 * np.exp(-1*E284/R/T) * PRFET3M2**1.0 * C10H2**1.0 -1*A305 * T**n305 * np.exp(-1*E305/R/T) * PRFET3M2**1.0 * LIG**1.0 -1*A326 * T**n326 * np.exp(-1*E326/R/T) * PRFET3M2**1.0 * LIG**1.0 -1*A347 * T**n347 * np.exp(-1*E347/R/T) * PRFET3M2**1.0 * PFET3**1.0 -1*A368 * T**n368 * np.exp(-1*E368/R/T) * PRFET3M2**1.0 * ADIO**1.0 -1*A389 * T**n389 * np.exp(-1*E389/R/T) * PRFET3M2**1.0 * KET**1.0,
+1*A8 * T**n8 * np.exp(-1*E8/R/T) * PKETM2**1.0 -1*A9 * T**n9 * np.exp(-1*E9/R/T) * PRKETM2**1.0 -1*A59 * T**n59 * np.exp(-1*E59/R/T) * RCH3O**1.0 * PRKETM2**1.0 -1*A72 * T**n72 * np.exp(-1*E72/R/T) * RCH3**1.0 * PRKETM2**1.0 -1*A121 * T**n121 * np.exp(-1*E121/R/T) * PRKETM2**1.0 * LIGH**1.0 -1*A142 * T**n142 * np.exp(-1*E142/R/T) * PRKETM2**1.0 * PLIGH**1.0 -1*A163 * T**n163 * np.exp(-1*E163/R/T) * PRKETM2**1.0 * PLIGM2**1.0 -1*A184 * T**n184 * np.exp(-1*E184/R/T) * PRKETM2**1.0 * LIGM2**1.0 -1*A205 * T**n205 * np.exp(-1*E205/R/T) * PRKETM2**1.0 * LIGM2**1.0 -1*A226 * T**n226 * np.exp(-1*E226/R/T) * PRKETM2**1.0 * PFET3M2**1.0 -1*A247 * T**n247 * np.exp(-1*E247/R/T) * PRKETM2**1.0 * ADIOM2**1.0 -1*A268 * T**n268 * np.exp(-1*E268/R/T) * PRKETM2**1.0 * KETM2**1.0 -1*A289 * T**n289 * np.exp(-1*E289/R/T) * PRKETM2**1.0 * C10H2**1.0 -1*A310 * T**n310 * np.exp(-1*E310/R/T) * PRKETM2**1.0 * LIG**1.0 -1*A331 * T**n331 * np.exp(-1*E331/R/T) * PRKETM2**1.0 * LIG**1.0 -1*A352 * T**n352 * np.exp(-1*E352/R/T) * PRKETM2**1.0 * PFET3**1.0 -1*A373 * T**n373 * np.exp(-1*E373/R/T) * PRKETM2**1.0 * ADIO**1.0 -1*A394 * T**n394 * np.exp(-1*E394/R/T) * PRKETM2**1.0 * KET**1.0,
+1*A0 * T**n0 * np.exp(-1*E0/R/T) * PLIGH**1.0 -1*A117 * T**n117 * np.exp(-1*E117/R/T) * PRLIGH**1.0 * LIGH**1.0 -1*A138 * T**n138 * np.exp(-1*E138/R/T) * PRLIGH**1.0 * PLIGH**1.0 -1*A159 * T**n159 * np.exp(-1*E159/R/T) * PRLIGH**1.0 * PLIGM2**1.0 -1*A180 * T**n180 * np.exp(-1*E180/R/T) * PRLIGH**1.0 * LIGM2**1.0 -1*A201 * T**n201 * np.exp(-1*E201/R/T) * PRLIGH**1.0 * LIGM2**1.0 -1*A222 * T**n222 * np.exp(-1*E222/R/T) * PRLIGH**1.0 * PFET3M2**1.0 -1*A243 * T**n243 * np.exp(-1*E243/R/T) * PRLIGH**1.0 * ADIOM2**1.0 -1*A264 * T**n264 * np.exp(-1*E264/R/T) * PRLIGH**1.0 * KETM2**1.0 -1*A285 * T**n285 * np.exp(-1*E285/R/T) * PRLIGH**1.0 * C10H2**1.0 -1*A306 * T**n306 * np.exp(-1*E306/R/T) * PRLIGH**1.0 * LIG**1.0 -1*A327 * T**n327 * np.exp(-1*E327/R/T) * PRLIGH**1.0 * LIG**1.0 -1*A348 * T**n348 * np.exp(-1*E348/R/T) * PRLIGH**1.0 * PFET3**1.0 -1*A369 * T**n369 * np.exp(-1*E369/R/T) * PRLIGH**1.0 * ADIO**1.0 -1*A390 * T**n390 * np.exp(-1*E390/R/T) * PRLIGH**1.0 * KET**1.0,
-1*A13 * T**n13 * np.exp(-1*E13/R/T) * PRLIGH2**1.0 +1*A133 * T**n133 * np.exp(-1*E133/R/T) * RC3H5O2**1.0 * PLIGH**1.0 +1*A134 * T**n134 * np.exp(-1*E134/R/T) * PRFET3**1.0 * PLIGH**1.0 +1*A135 * T**n135 * np.exp(-1*E135/R/T) * RC3H7O2**1.0 * PLIGH**1.0 +1*A136 * T**n136 * np.exp(-1*E136/R/T) * RADIOM2**1.0 * PLIGH**1.0 +1*A137 * T**n137 * np.exp(-1*E137/R/T) * PRFET3M2**1.0 * PLIGH**1.0 +1*A138 * T**n138 * np.exp(-1*E138/R/T) * PRLIGH**1.0 * PLIGH**1.0 +1*A139 * T**n139 * np.exp(-1*E139/R/T) * RLIGM2B**1.0 * PLIGH**1.0 +1*A140 * T**n140 * np.exp(-1*E140/R/T) * RLIGM2A**1.0 * PLIGH**1.0 +1*A141 * T**n141 * np.exp(-1*E141/R/T) * RCH3**1.0 * PLIGH**1.0 +1*A142 * T**n142 * np.exp(-1*E142/R/T) * PRKETM2**1.0 * PLIGH**1.0 +1*A143 * T**n143 * np.exp(-1*E143/R/T) * RKET**1.0 * PLIGH**1.0 +1*A144 * T**n144 * np.exp(-1*E144/R/T) * PRADIO**1.0 * PLIGH**1.0 +1*A145 * T**n145 * np.exp(-1*E145/R/T) * RC3H3O**1.0 * PLIGH**1.0 +1*A146 * T**n146 * np.exp(-1*E146/R/T) * RLIGB**1.0 * PLIGH**1.0 +1*A147 * T**n147 * np.exp(-1*E147/R/T) * RLIGA**1.0 * PLIGH**1.0 +1*A148 * T**n148 * np.exp(-1*E148/R/T) * PRADIOM2**1.0 * PLIGH**1.0 +1*A149 * T**n149 * np.exp(-1*E149/R/T) * RMGUAI**1.0 * PLIGH**1.0 +1*A150 * T**n150 * np.exp(-1*E150/R/T) * OH**1.0 * PLIGH**1.0 +1*A151 * T**n151 * np.exp(-1*E151/R/T) * RCH3O**1.0 * PLIGH**1.0 +1*A152 * T**n152 * np.exp(-1*E152/R/T) * RPHENOL**1.0 * PLIGH**1.0 +1*A153 * T**n153 * np.exp(-1*E153/R/T) * RADIO**1.0 * PLIGH**1.0,
+1*A13 * T**n13 * np.exp(-1*E13/R/T) * PRLIGH2**1.0 -1*A16 * T**n16 * np.exp(-1*E16/R/T) * PRLIGM2A**1.0 +1*A154 * T**n154 * np.exp(-1*E154/R/T) * RC3H5O2**1.0 * PLIGM2**1.0 +1*A155 * T**n155 * np.exp(-1*E155/R/T) * PRFET3**1.0 * PLIGM2**1.0 +1*A156 * T**n156 * np.exp(-1*E156/R/T) * RC3H7O2**1.0 * PLIGM2**1.0 +1*A157 * T**n157 * np.exp(-1*E157/R/T) * RADIOM2**1.0 * PLIGM2**1.0 +1*A158 * T**n158 * np.exp(-1*E158/R/T) * PRFET3M2**1.0 * PLIGM2**1.0 +1*A159 * T**n159 * np.exp(-1*E159/R/T) * PRLIGH**1.0 * PLIGM2**1.0 +1*A160 * T**n160 * np.exp(-1*E160/R/T) * RLIGM2B**1.0 * PLIGM2**1.0 +1*A161 * T**n161 * np.exp(-1*E161/R/T) * RLIGM2A**1.0 * PLIGM2**1.0 +1*A162 * T**n162 * np.exp(-1*E162/R/T) * RCH3**1.0 * PLIGM2**1.0 +1*A163 * T**n163 * np.exp(-1*E163/R/T) * PRKETM2**1.0 * PLIGM2**1.0 +1*A164 * T**n164 * np.exp(-1*E164/R/T) * RKET**1.0 * PLIGM2**1.0 +1*A165 * T**n165 * np.exp(-1*E165/R/T) * PRADIO**1.0 * PLIGM2**1.0 +1*A166 * T**n166 * np.exp(-1*E166/R/T) * RC3H3O**1.0 * PLIGM2**1.0 +1*A167 * T**n167 * np.exp(-1*E167/R/T) * RLIGB**1.0 * PLIGM2**1.0 +1*A168 * T**n168 * np.exp(-1*E168/R/T) * RLIGA**1.0 * PLIGM2**1.0 +1*A169 * T**n169 * np.exp(-1*E169/R/T) * PRADIOM2**1.0 * PLIGM2**1.0 +1*A170 * T**n170 * np.exp(-1*E170/R/T) * RMGUAI**1.0 * PLIGM2**1.0 +1*A171 * T**n171 * np.exp(-1*E171/R/T) * OH**1.0 * PLIGM2**1.0 +1*A172 * T**n172 * np.exp(-1*E172/R/T) * RCH3O**1.0 * PLIGM2**1.0 +1*A173 * T**n173 * np.exp(-1*E173/R/T) * RPHENOL**1.0 * PLIGM2**1.0 +1*A174 * T**n174 * np.exp(-1*E174/R/T) * RADIO**1.0 * PLIGM2**1.0,
+1*A4 * T**n4 * np.exp(-1*E4/R/T) * LIG**1.0 -1*A19 * T**n19 * np.exp(-1*E19/R/T) * RADIO**1.0 -1*A25 * T**n25 * np.exp(-1*E25/R/T) * RADIO**1.0 -2*A74 * T**n74 * np.exp(-1*E74/R/T) * RADIO**2.0 -1*A132 * T**n132 * np.exp(-1*E132/R/T) * RADIO**1.0 * LIGH**1.0 -1*A153 * T**n153 * np.exp(-1*E153/R/T) * RADIO**1.0 * PLIGH**1.0 -1*A174 * T**n174 * np.exp(-1*E174/R/T) * RADIO**1.0 * PLIGM2**1.0 -1*A195 * T**n195 * np.exp(-1*E195/R/T) * RADIO**1.0 * LIGM2**1.0 -1*A216 * T**n216 * np.exp(-1*E216/R/T) * RADIO**1.0 * LIGM2**1.0 -1*A237 * T**n237 * np.exp(-1*E237/R/T) * RADIO**1.0 * PFET3M2**1.0 -1*A258 * T**n258 * np.exp(-1*E258/R/T) * RADIO**1.0 * ADIOM2**1.0 -1*A279 * T**n279 * np.exp(-1*E279/R/T) * RADIO**1.0 * KETM2**1.0 -1*A300 * T**n300 * np.exp(-1*E300/R/T) * RADIO**1.0 * C10H2**1.0 -1*A321 * T**n321 * np.exp(-1*E321/R/T) * RADIO**1.0 * LIG**1.0 -1*A342 * T**n342 * np.exp(-1*E342/R/T) * RADIO**1.0 * LIG**1.0 -1*A363 * T**n363 * np.exp(-1*E363/R/T) * RADIO**1.0 * PFET3**1.0 +1*A364 * T**n364 * np.exp(-1*E364/R/T) * RC3H5O2**1.0 * ADIO**1.0 +1*A365 * T**n365 * np.exp(-1*E365/R/T) * PRFET3**1.0 * ADIO**1.0 +1*A366 * T**n366 * np.exp(-1*E366/R/T) * RC3H7O2**1.0 * ADIO**1.0 +1*A367 * T**n367 * np.exp(-1*E367/R/T) * RADIOM2**1.0 * ADIO**1.0 +1*A368 * T**n368 * np.exp(-1*E368/R/T) * PRFET3M2**1.0 * ADIO**1.0 +1*A369 * T**n369 * np.exp(-1*E369/R/T) * PRLIGH**1.0 * ADIO**1.0 +1*A370 * T**n370 * np.exp(-1*E370/R/T) * RLIGM2B**1.0 * ADIO**1.0 +1*A371 * T**n371 * np.exp(-1*E371/R/T) * RLIGM2A**1.0 * ADIO**1.0 +1*A372 * T**n372 * np.exp(-1*E372/R/T) * RCH3**1.0 * ADIO**1.0 +1*A373 * T**n373 * np.exp(-1*E373/R/T) * PRKETM2**1.0 * ADIO**1.0 +1*A374 * T**n374 * np.exp(-1*E374/R/T) * RKET**1.0 * ADIO**1.0 +1*A375 * T**n375 * np.exp(-1*E375/R/T) * PRADIO**1.0 * ADIO**1.0 +1*A376 * T**n376 * np.exp(-1*E376/R/T) * RC3H3O**1.0 * ADIO**1.0 +1*A377 * T**n377 * np.exp(-1*E377/R/T) * RLIGB**1.0 * ADIO**1.0 +1*A378 * T**n378 * np.exp(-1*E378/R/T) * RLIGA**1.0 * ADIO**1.0 +1*A379 * T**n379 * np.exp(-1*E379/R/T) * PRADIOM2**1.0 * ADIO**1.0 +1*A380 * T**n380 * np.exp(-1*E380/R/T) * RMGUAI**1.0 * ADIO**1.0 +1*A381 * T**n381 * np.exp(-1*E381/R/T) * OH**1.0 * ADIO**1.0 +1*A382 * T**n382 * np.exp(-1*E382/R/T) * RCH3O**1.0 * ADIO**1.0 +1*A383 * T**n383 * np.exp(-1*E383/R/T) * RPHENOL**1.0 * ADIO**1.0 -1*A384 * T**n384 * np.exp(-1*E384/R/T) * RADIO**1.0 * ADIO**1.0 +1*A384 * T**n384 * np.exp(-1*E384/R/T) * RADIO**1.0 * ADIO**1.0 -1*A405 * T**n405 * np.exp(-1*E405/R/T) * RADIO**1.0 * KET**1.0,
+1*A2 * T**n2 * np.exp(-1*E2/R/T) * LIGM2**1.0 -1*A14 * T**n14 * np.exp(-1*E14/R/T) * RADIOM2**1.0 -1*A23 * T**n23 * np.exp(-1*E23/R/T) * RADIOM2**1.0 -2*A60 * T**n60 * np.exp(-1*E60/R/T) * RADIOM2**2.0 -1*A115 * T**n115 * np.exp(-1*E115/R/T) * RADIOM2**1.0 * LIGH**1.0 -1*A136 * T**n136 * np.exp(-1*E136/R/T) * RADIOM2**1.0 * PLIGH**1.0 -1*A157 * T**n157 * np.exp(-1*E157/R/T) * RADIOM2**1.0 * PLIGM2**1.0 -1*A178 * T**n178 * np.exp(-1*E178/R/T) * RADIOM2**1.0 * LIGM2**1.0 -1*A199 * T**n199 * np.exp(-1*E199/R/T) * RADIOM2**1.0 * LIGM2**1.0 -1*A220 * T**n220 * np.exp(-1*E220/R/T) * RADIOM2**1.0 * PFET3M2**1.0 +1*A238 * T**n238 * np.exp(-1*E238/R/T) * RC3H5O2**1.0 * ADIOM2**1.0 +1*A239 * T**n239 * np.exp(-1*E239/R/T) * PRFET3**1.0 * ADIOM2**1.0 +1*A240 * T**n240 * np.exp(-1*E240/R/T) * RC3H7O2**1.0 * ADIOM2**1.0 -1*A241 * T**n241 * np.exp(-1*E241/R/T) * RADIOM2**1.0 * ADIOM2**1.0 +1*A241 * T**n241 * np.exp(-1*E241/R/T) * RADIOM2**1.0 * ADIOM2**1.0 +1*A242 * T**n242 * np.exp(-1*E242/R/T) * PRFET3M2**1.0 * ADIOM2**1.0 +1*A243 * T**n243 * np.exp(-1*E243/R/T) * PRLIGH**1.0 * ADIOM2**1.0 +1*A244 * T**n244 * np.exp(-1*E244/R/T) * RLIGM2B**1.0 * ADIOM2**1.0 +1*A245 * T**n245 * np.exp(-1*E245/R/T) * RLIGM2A**1.0 * ADIOM2**1.0 +1*A246 * T**n246 * np.exp(-1*E246/R/T) * RCH3**1.0 * ADIOM2**1.0 +1*A247 * T**n247 * np.exp(-1*E247/R/T) * PRKETM2**1.0 * ADIOM2**1.0 +1*A248 * T**n248 * np.exp(-1*E248/R/T) * RKET**1.0 * ADIOM2**1.0 +1*A249 * T**n249 * np.exp(-1*E249/R/T) * PRADIO**1.0 * ADIOM2**1.0 +1*A250 * T**n250 * np.exp(-1*E250/R/T) * RC3H3O**1.0 * ADIOM2**1.0 +1*A251 * T**n251 * np.exp(-1*E251/R/T) * RLIGB**1.0 * ADIOM2**1.0 +1*A252 * T**n252 * np.exp(-1*E252/R/T) * RLIGA**1.0 * ADIOM2**1.0 +1*A253 * T**n253 * np.exp(-1*E253/R/T) * PRADIOM2**1.0 * ADIOM2**1.0 +1*A254 * T**n254 * np.exp(-1*E254/R/T) * RMGUAI**1.0 * ADIOM2**1.0 +1*A255 * T**n255 * np.exp(-1*E255/R/T) * OH**1.0 * ADIOM2**1.0 +1*A256 * T**n256 * np.exp(-1*E256/R/T) * RCH3O**1.0 * ADIOM2**1.0 +1*A257 * T**n257 * np.exp(-1*E257/R/T) * RPHENOL**1.0 * ADIOM2**1.0 +1*A258 * T**n258 * np.exp(-1*E258/R/T) * RADIO**1.0 * ADIOM2**1.0 -1*A262 * T**n262 * np.exp(-1*E262/R/T) * RADIOM2**1.0 * KETM2**1.0 -1*A283 * T**n283 * np.exp(-1*E283/R/T) * RADIOM2**1.0 * C10H2**1.0 -1*A304 * T**n304 * np.exp(-1*E304/R/T) * RADIOM2**1.0 * LIG**1.0 -1*A325 * T**n325 * np.exp(-1*E325/R/T) * RADIOM2**1.0 * LIG**1.0 -1*A346 * T**n346 * np.exp(-1*E346/R/T) * RADIOM2**1.0 * PFET3**1.0 -1*A367 * T**n367 * np.exp(-1*E367/R/T) * RADIOM2**1.0 * ADIO**1.0 -1*A388 * T**n388 * np.exp(-1*E388/R/T) * RADIOM2**1.0 * KET**1.0,
+1*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 +1*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 +1*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 +1*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 +1*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 +1*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 +1*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 +1*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 -2*A68 * T**n68 * np.exp(-1*E68/R/T) * RC3H3O**2.0 -1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 -1*A124 * T**n124 * np.exp(-1*E124/R/T) * RC3H3O**1.0 * LIGH**1.0 -1*A145 * T**n145 * np.exp(-1*E145/R/T) * RC3H3O**1.0 * PLIGH**1.0 -1*A166 * T**n166 * np.exp(-1*E166/R/T) * RC3H3O**1.0 * PLIGM2**1.0 -1*A187 * T**n187 * np.exp(-1*E187/R/T) * RC3H3O**1.0 * LIGM2**1.0 -1*A208 * T**n208 * np.exp(-1*E208/R/T) * RC3H3O**1.0 * LIGM2**1.0 -1*A229 * T**n229 * np.exp(-1*E229/R/T) * RC3H3O**1.0 * PFET3M2**1.0 -1*A250 * T**n250 * np.exp(-1*E250/R/T) * RC3H3O**1.0 * ADIOM2**1.0 -1*A271 * T**n271 * np.exp(-1*E271/R/T) * RC3H3O**1.0 * KETM2**1.0 -1*A292 * T**n292 * np.exp(-1*E292/R/T) * RC3H3O**1.0 * C10H2**1.0 -1*A313 * T**n313 * np.exp(-1*E313/R/T) * RC3H3O**1.0 * LIG**1.0 -1*A334 * T**n334 * np.exp(-1*E334/R/T) * RC3H3O**1.0 * LIG**1.0 -1*A355 * T**n355 * np.exp(-1*E355/R/T) * RC3H3O**1.0 * PFET3**1.0 -1*A376 * T**n376 * np.exp(-1*E376/R/T) * RC3H3O**1.0 * ADIO**1.0 -1*A397 * T**n397 * np.exp(-1*E397/R/T) * RC3H3O**1.0 * KET**1.0,
+1*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 +1*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 +1*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 +1*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 -2*A67 * T**n67 * np.exp(-1*E67/R/T) * RC3H5O2**2.0 -1*A112 * T**n112 * np.exp(-1*E112/R/T) * RC3H5O2**1.0 * LIGH**1.0 -1*A133 * T**n133 * np.exp(-1*E133/R/T) * RC3H5O2**1.0 * PLIGH**1.0 -1*A154 * T**n154 * np.exp(-1*E154/R/T) * RC3H5O2**1.0 * PLIGM2**1.0 -1*A175 * T**n175 * np.exp(-1*E175/R/T) * RC3H5O2**1.0 * LIGM2**1.0 -1*A196 * T**n196 * np.exp(-1*E196/R/T) * RC3H5O2**1.0 * LIGM2**1.0 -1*A217 * T**n217 * np.exp(-1*E217/R/T) * RC3H5O2**1.0 * PFET3M2**1.0 -1*A238 * T**n238 * np.exp(-1*E238/R/T) * RC3H5O2**1.0 * ADIOM2**1.0 -1*A259 * T**n259 * np.exp(-1*E259/R/T) * RC3H5O2**1.0 * KETM2**1.0 -1*A280 * T**n280 * np.exp(-1*E280/R/T) * RC3H5O2**1.0 * C10H2**1.0 -1*A301 * T**n301 * np.exp(-1*E301/R/T) * RC3H5O2**1.0 * LIG**1.0 -1*A322 * T**n322 * np.exp(-1*E322/R/T) * RC3H5O2**1.0 * LIG**1.0 -1*A343 * T**n343 * np.exp(-1*E343/R/T) * RC3H5O2**1.0 * PFET3**1.0 -1*A364 * T**n364 * np.exp(-1*E364/R/T) * RC3H5O2**1.0 * ADIO**1.0 -1*A385 * T**n385 * np.exp(-1*E385/R/T) * RC3H5O2**1.0 * KET**1.0,
-1*A27 * T**n27 * np.exp(-1*E27/R/T) * RC3H7O2**1.0 +1*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 +1*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 +1*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 +1*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 -2*A66 * T**n66 * np.exp(-1*E66/R/T) * RC3H7O2**2.0 -1*A114 * T**n114 * np.exp(-1*E114/R/T) * RC3H7O2**1.0 * LIGH**1.0 -1*A135 * T**n135 * np.exp(-1*E135/R/T) * RC3H7O2**1.0 * PLIGH**1.0 -1*A156 * T**n156 * np.exp(-1*E156/R/T) * RC3H7O2**1.0 * PLIGM2**1.0 -1*A177 * T**n177 * np.exp(-1*E177/R/T) * RC3H7O2**1.0 * LIGM2**1.0 -1*A198 * T**n198 * np.exp(-1*E198/R/T) * RC3H7O2**1.0 * LIGM2**1.0 -1*A219 * T**n219 * np.exp(-1*E219/R/T) * RC3H7O2**1.0 * PFET3M2**1.0 -1*A240 * T**n240 * np.exp(-1*E240/R/T) * RC3H7O2**1.0 * ADIOM2**1.0 -1*A261 * T**n261 * np.exp(-1*E261/R/T) * RC3H7O2**1.0 * KETM2**1.0 -1*A282 * T**n282 * np.exp(-1*E282/R/T) * RC3H7O2**1.0 * C10H2**1.0 -1*A303 * T**n303 * np.exp(-1*E303/R/T) * RC3H7O2**1.0 * LIG**1.0 -1*A324 * T**n324 * np.exp(-1*E324/R/T) * RC3H7O2**1.0 * LIG**1.0 -1*A345 * T**n345 * np.exp(-1*E345/R/T) * RC3H7O2**1.0 * PFET3**1.0 -1*A366 * T**n366 * np.exp(-1*E366/R/T) * RC3H7O2**1.0 * ADIO**1.0 -1*A387 * T**n387 * np.exp(-1*E387/R/T) * RC3H7O2**1.0 * KET**1.0,
+1*A52 * T**n52 * np.exp(-1*E52/R/T) * RCH3O**1.0 * RPHENOX**1.0 +1*A53 * T**n53 * np.exp(-1*E53/R/T) * RCH3O**1.0 * RPHENOXM2**1.0 -1*A54 * T**n54 * np.exp(-1*E54/R/T) * RPHENOXM2**1.0 * RCH3**1.0 -1*A55 * T**n55 * np.exp(-1*E55/R/T) * RPHENOX**1.0 * RCH3**1.0 -1*A56 * T**n56 * np.exp(-1*E56/R/T) * RCH3O**1.0 * RCH3**1.0 -1*A69 * T**n69 * np.exp(-1*E69/R/T) * OH**1.0 * RCH3**1.0 -2*A70 * T**n70 * np.exp(-1*E70/R/T) * RCH3**2.0 -1*A71 * T**n71 * np.exp(-1*E71/R/T) * RCH3**1.0 * PRADIOM2**1.0 -1*A72 * T**n72 * np.exp(-1*E72/R/T) * RCH3**1.0 * PRKETM2**1.0 +1*A101 * T**n101 * np.exp(-1*E101/R/T) * PCH3**1.0 -1*A120 * T**n120 * np.exp(-1*E120/R/T) * RCH3**1.0 * LIGH**1.0 -1*A141 * T**n141 * np.exp(-1*E141/R/T) * RCH3**1.0 * PLIGH**1.0 -1*A162 * T**n162 * np.exp(-1*E162/R/T) * RCH3**1.0 * PLIGM2**1.0 -1*A183 * T**n183 * np.exp(-1*E183/R/T) * RCH3**1.0 * LIGM2**1.0 -1*A204 * T**n204 * np.exp(-1*E204/R/T) * RCH3**1.0 * LIGM2**1.0 -1*A225 * T**n225 * np.exp(-1*E225/R/T) * RCH3**1.0 * PFET3M2**1.0 -1*A246 * T**n246 * np.exp(-1*E246/R/T) * RCH3**1.0 * ADIOM2**1.0 -1*A267 * T**n267 * np.exp(-1*E267/R/T) * RCH3**1.0 * KETM2**1.0 -1*A288 * T**n288 * np.exp(-1*E288/R/T) * RCH3**1.0 * C10H2**1.0 -1*A309 * T**n309 * np.exp(-1*E309/R/T) * RCH3**1.0 * LIG**1.0 -1*A330 * T**n330 * np.exp(-1*E330/R/T) * RCH3**1.0 * LIG**1.0 -1*A351 * T**n351 * np.exp(-1*E351/R/T) * RCH3**1.0 * PFET3**1.0 -1*A372 * T**n372 * np.exp(-1*E372/R/T) * RCH3**1.0 * ADIO**1.0 -1*A393 * T**n393 * np.exp(-1*E393/R/T) * RCH3**1.0 * KET**1.0,
+1*A27 * T**n27 * np.exp(-1*E27/R/T) * RC3H7O2**1.0 +1*A28 * T**n28 * np.exp(-1*E28/R/T) * C10H2M4**1.0 +1*A29 * T**n29 * np.exp(-1*E29/R/T) * C10H2M2**1.0 +1*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 +1*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 +1*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 +1*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0 -1*A52 * T**n52 * np.exp(-1*E52/R/T) * RCH3O**1.0 * RPHENOX**1.0 -1*A53 * T**n53 * np.exp(-1*E53/R/T) * RCH3O**1.0 * RPHENOXM2**1.0 -1*A56 * T**n56 * np.exp(-1*E56/R/T) * RCH3O**1.0 * RCH3**1.0 -2*A57 * T**n57 * np.exp(-1*E57/R/T) * RCH3O**2.0 -1*A58 * T**n58 * np.exp(-1*E58/R/T) * RCH3O**1.0 * PRADIOM2**1.0 -1*A59 * T**n59 * np.exp(-1*E59/R/T) * RCH3O**1.0 * PRKETM2**1.0 -1*A130 * T**n130 * np.exp(-1*E130/R/T) * RCH3O**1.0 * LIGH**1.0 -1*A151 * T**n151 * np.exp(-1*E151/R/T) * RCH3O**1.0 * PLIGH**1.0 -1*A172 * T**n172 * np.exp(-1*E172/R/T) * RCH3O**1.0 * PLIGM2**1.0 -1*A193 * T**n193 * np.exp(-1*E193/R/T) * RCH3O**1.0 * LIGM2**1.0 -1*A214 * T**n214 * np.exp(-1*E214/R/T) * RCH3O**1.0 * LIGM2**1.0 -1*A235 * T**n235 * np.exp(-1*E235/R/T) * RCH3O**1.0 * PFET3M2**1.0 -1*A256 * T**n256 * np.exp(-1*E256/R/T) * RCH3O**1.0 * ADIOM2**1.0 -1*A277 * T**n277 * np.exp(-1*E277/R/T) * RCH3O**1.0 * KETM2**1.0 -1*A298 * T**n298 * np.exp(-1*E298/R/T) * RCH3O**1.0 * C10H2**1.0 -1*A319 * T**n319 * np.exp(-1*E319/R/T) * RCH3O**1.0 * LIG**1.0 -1*A340 * T**n340 * np.exp(-1*E340/R/T) * RCH3O**1.0 * LIG**1.0 -1*A361 * T**n361 * np.exp(-1*E361/R/T) * RCH3O**1.0 * PFET3**1.0 -1*A382 * T**n382 * np.exp(-1*E382/R/T) * RCH3O**1.0 * ADIO**1.0 -1*A403 * T**n403 * np.exp(-1*E403/R/T) * RCH3O**1.0 * KET**1.0,
-1*A26 * T**n26 * np.exp(-1*E26/R/T) * RKET**1.0 -2*A77 * T**n77 * np.exp(-1*E77/R/T) * RKET**2.0 -1*A122 * T**n122 * np.exp(-1*E122/R/T) * RKET**1.0 * LIGH**1.0 -1*A143 * T**n143 * np.exp(-1*E143/R/T) * RKET**1.0 * PLIGH**1.0 -1*A164 * T**n164 * np.exp(-1*E164/R/T) * RKET**1.0 * PLIGM2**1.0 -1*A185 * T**n185 * np.exp(-1*E185/R/T) * RKET**1.0 * LIGM2**1.0 -1*A206 * T**n206 * np.exp(-1*E206/R/T) * RKET**1.0 * LIGM2**1.0 -1*A227 * T**n227 * np.exp(-1*E227/R/T) * RKET**1.0 * PFET3M2**1.0 -1*A248 * T**n248 * np.exp(-1*E248/R/T) * RKET**1.0 * ADIOM2**1.0 -1*A269 * T**n269 * np.exp(-1*E269/R/T) * RKET**1.0 * KETM2**1.0 -1*A290 * T**n290 * np.exp(-1*E290/R/T) * RKET**1.0 * C10H2**1.0 -1*A311 * T**n311 * np.exp(-1*E311/R/T) * RKET**1.0 * LIG**1.0 -1*A332 * T**n332 * np.exp(-1*E332/R/T) * RKET**1.0 * LIG**1.0 -1*A353 * T**n353 * np.exp(-1*E353/R/T) * RKET**1.0 * PFET3**1.0 -1*A374 * T**n374 * np.exp(-1*E374/R/T) * RKET**1.0 * ADIO**1.0 +1*A385 * T**n385 * np.exp(-1*E385/R/T) * RC3H5O2**1.0 * KET**1.0 +1*A386 * T**n386 * np.exp(-1*E386/R/T) * PRFET3**1.0 * KET**1.0 +1*A387 * T**n387 * np.exp(-1*E387/R/T) * RC3H7O2**1.0 * KET**1.0 +1*A388 * T**n388 * np.exp(-1*E388/R/T) * RADIOM2**1.0 * KET**1.0 +1*A389 * T**n389 * np.exp(-1*E389/R/T) * PRFET3M2**1.0 * KET**1.0 +1*A390 * T**n390 * np.exp(-1*E390/R/T) * PRLIGH**1.0 * KET**1.0 +1*A391 * T**n391 * np.exp(-1*E391/R/T) * RLIGM2B**1.0 * KET**1.0 +1*A392 * T**n392 * np.exp(-1*E392/R/T) * RLIGM2A**1.0 * KET**1.0 +1*A393 * T**n393 * np.exp(-1*E393/R/T) * RCH3**1.0 * KET**1.0 +1*A394 * T**n394 * np.exp(-1*E394/R/T) * PRKETM2**1.0 * KET**1.0 -1*A395 * T**n395 * np.exp(-1*E395/R/T) * RKET**1.0 * KET**1.0 +1*A395 * T**n395 * np.exp(-1*E395/R/T) * RKET**1.0 * KET**1.0 +1*A396 * T**n396 * np.exp(-1*E396/R/T) * PRADIO**1.0 * KET**1.0 +1*A397 * T**n397 * np.exp(-1*E397/R/T) * RC3H3O**1.0 * KET**1.0 +1*A398 * T**n398 * np.exp(-1*E398/R/T) * RLIGB**1.0 * KET**1.0 +1*A399 * T**n399 * np.exp(-1*E399/R/T) * RLIGA**1.0 * KET**1.0 +1*A400 * T**n400 * np.exp(-1*E400/R/T) * PRADIOM2**1.0 * KET**1.0 +1*A401 * T**n401 * np.exp(-1*E401/R/T) * RMGUAI**1.0 * KET**1.0 +1*A402 * T**n402 * np.exp(-1*E402/R/T) * OH**1.0 * KET**1.0 +1*A403 * T**n403 * np.exp(-1*E403/R/T) * RCH3O**1.0 * KET**1.0 +1*A404 * T**n404 * np.exp(-1*E404/R/T) * RPHENOL**1.0 * KET**1.0 +1*A405 * T**n405 * np.exp(-1*E405/R/T) * RADIO**1.0 * KET**1.0,
-1*A24 * T**n24 * np.exp(-1*E24/R/T) * RKETM2**1.0 -2*A64 * T**n64 * np.exp(-1*E64/R/T) * RKETM2**2.0 +1*A259 * T**n259 * np.exp(-1*E259/R/T) * RC3H5O2**1.0 * KETM2**1.0 +1*A260 * T**n260 * np.exp(-1*E260/R/T) * PRFET3**1.0 * KETM2**1.0 +1*A261 * T**n261 * np.exp(-1*E261/R/T) * RC3H7O2**1.0 * KETM2**1.0 +1*A262 * T**n262 * np.exp(-1*E262/R/T) * RADIOM2**1.0 * KETM2**1.0 +1*A263 * T**n263 * np.exp(-1*E263/R/T) * PRFET3M2**1.0 * KETM2**1.0 +1*A264 * T**n264 * np.exp(-1*E264/R/T) * PRLIGH**1.0 * KETM2**1.0 +1*A265 * T**n265 * np.exp(-1*E265/R/T) * RLIGM2B**1.0 * KETM2**1.0 +1*A266 * T**n266 * np.exp(-1*E266/R/T) * RLIGM2A**1.0 * KETM2**1.0 +1*A267 * T**n267 * np.exp(-1*E267/R/T) * RCH3**1.0 * KETM2**1.0 +1*A268 * T**n268 * np.exp(-1*E268/R/T) * PRKETM2**1.0 * KETM2**1.0 +1*A269 * T**n269 * np.exp(-1*E269/R/T) * RKET**1.0 * KETM2**1.0 +1*A270 * T**n270 * np.exp(-1*E270/R/T) * PRADIO**1.0 * KETM2**1.0 +1*A271 * T**n271 * np.exp(-1*E271/R/T) * RC3H3O**1.0 * KETM2**1.0 +1*A272 * T**n272 * np.exp(-1*E272/R/T) * RLIGB**1.0 * KETM2**1.0 +1*A273 * T**n273 * np.exp(-1*E273/R/T) * RLIGA**1.0 * KETM2**1.0 +1*A274 * T**n274 * np.exp(-1*E274/R/T) * PRADIOM2**1.0 * KETM2**1.0 +1*A275 * T**n275 * np.exp(-1*E275/R/T) * RMGUAI**1.0 * KETM2**1.0 +1*A276 * T**n276 * np.exp(-1*E276/R/T) * OH**1.0 * KETM2**1.0 +1*A277 * T**n277 * np.exp(-1*E277/R/T) * RCH3O**1.0 * KETM2**1.0 +1*A278 * T**n278 * np.exp(-1*E278/R/T) * RPHENOL**1.0 * KETM2**1.0 +1*A279 * T**n279 * np.exp(-1*E279/R/T) * RADIO**1.0 * KETM2**1.0,
-1*A20 * T**n20 * np.exp(-1*E20/R/T) * RLIGA**1.0 -2*A76 * T**n76 * np.exp(-1*E76/R/T) * RLIGA**2.0 -1*A126 * T**n126 * np.exp(-1*E126/R/T) * RLIGA**1.0 * LIGH**1.0 -1*A147 * T**n147 * np.exp(-1*E147/R/T) * RLIGA**1.0 * PLIGH**1.0 -1*A168 * T**n168 * np.exp(-1*E168/R/T) * RLIGA**1.0 * PLIGM2**1.0 -1*A189 * T**n189 * np.exp(-1*E189/R/T) * RLIGA**1.0 * LIGM2**1.0 -1*A210 * T**n210 * np.exp(-1*E210/R/T) * RLIGA**1.0 * LIGM2**1.0 -1*A231 * T**n231 * np.exp(-1*E231/R/T) * RLIGA**1.0 * PFET3M2**1.0 -1*A252 * T**n252 * np.exp(-1*E252/R/T) * RLIGA**1.0 * ADIOM2**1.0 -1*A273 * T**n273 * np.exp(-1*E273/R/T) * RLIGA**1.0 * KETM2**1.0 -1*A294 * T**n294 * np.exp(-1*E294/R/T) * RLIGA**1.0 * C10H2**1.0 +1*A301 * T**n301 * np.exp(-1*E301/R/T) * RC3H5O2**1.0 * LIG**1.0 +1*A302 * T**n302 * np.exp(-1*E302/R/T) * PRFET3**1.0 * LIG**1.0 +1*A303 * T**n303 * np.exp(-1*E303/R/T) * RC3H7O2**1.0 * LIG**1.0 +1*A304 * T**n304 * np.exp(-1*E304/R/T) * RADIOM2**1.0 * LIG**1.0 +1*A305 * T**n305 * np.exp(-1*E305/R/T) * PRFET3M2**1.0 * LIG**1.0 +1*A306 * T**n306 * np.exp(-1*E306/R/T) * PRLIGH**1.0 * LIG**1.0 +1*A307 * T**n307 * np.exp(-1*E307/R/T) * RLIGM2B**1.0 * LIG**1.0 +1*A308 * T**n308 * np.exp(-1*E308/R/T) * RLIGM2A**1.0 * LIG**1.0 +1*A309 * T**n309 * np.exp(-1*E309/R/T) * RCH3**1.0 * LIG**1.0 +1*A310 * T**n310 * np.exp(-1*E310/R/T) * PRKETM2**1.0 * LIG**1.0 +1*A311 * T**n311 * np.exp(-1*E311/R/T) * RKET**1.0 * LIG**1.0 +1*A312 * T**n312 * np.exp(-1*E312/R/T) * PRADIO**1.0 * LIG**1.0 +1*A313 * T**n313 * np.exp(-1*E313/R/T) * RC3H3O**1.0 * LIG**1.0 +1*A314 * T**n314 * np.exp(-1*E314/R/T) * RLIGB**1.0 * LIG**1.0 -1*A315 * T**n315 * np.exp(-1*E315/R/T) * RLIGA**1.0 * LIG**1.0 +1*A315 * T**n315 * np.exp(-1*E315/R/T) * RLIGA**1.0 * LIG**1.0 +1*A316 * T**n316 * np.exp(-1*E316/R/T) * PRADIOM2**1.0 * LIG**1.0 +1*A317 * T**n317 * np.exp(-1*E317/R/T) * RMGUAI**1.0 * LIG**1.0 +1*A318 * T**n318 * np.exp(-1*E318/R/T) * OH**1.0 * LIG**1.0 +1*A319 * T**n319 * np.exp(-1*E319/R/T) * RCH3O**1.0 * LIG**1.0 +1*A320 * T**n320 * np.exp(-1*E320/R/T) * RPHENOL**1.0 * LIG**1.0 +1*A321 * T**n321 * np.exp(-1*E321/R/T) * RADIO**1.0 * LIG**1.0 -1*A336 * T**n336 * np.exp(-1*E336/R/T) * RLIGA**1.0 * LIG**1.0 -1*A357 * T**n357 * np.exp(-1*E357/R/T) * RLIGA**1.0 * PFET3**1.0 -1*A378 * T**n378 * np.exp(-1*E378/R/T) * RLIGA**1.0 * ADIO**1.0 -1*A399 * T**n399 * np.exp(-1*E399/R/T) * RLIGA**1.0 * KET**1.0,
-1*A21 * T**n21 * np.exp(-1*E21/R/T) * RLIGB**1.0 -1*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 -1*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 -1*A75 * T**n75 * np.exp(-1*E75/R/T) * RLIGB**1.0 * RLIGB**1.0 -1*A125 * T**n125 * np.exp(-1*E125/R/T) * RLIGB**1.0 * LIGH**1.0 -1*A146 * T**n146 * np.exp(-1*E146/R/T) * RLIGB**1.0 * PLIGH**1.0 -1*A167 * T**n167 * np.exp(-1*E167/R/T) * RLIGB**1.0 * PLIGM2**1.0 -1*A188 * T**n188 * np.exp(-1*E188/R/T) * RLIGB**1.0 * LIGM2**1.0 -1*A209 * T**n209 * np.exp(-1*E209/R/T) * RLIGB**1.0 * LIGM2**1.0 -1*A230 * T**n230 * np.exp(-1*E230/R/T) * RLIGB**1.0 * PFET3M2**1.0 -1*A251 * T**n251 * np.exp(-1*E251/R/T) * RLIGB**1.0 * ADIOM2**1.0 -1*A272 * T**n272 * np.exp(-1*E272/R/T) * RLIGB**1.0 * KETM2**1.0 -1*A293 * T**n293 * np.exp(-1*E293/R/T) * RLIGB**1.0 * C10H2**1.0 -1*A314 * T**n314 * np.exp(-1*E314/R/T) * RLIGB**1.0 * LIG**1.0 +1*A322 * T**n322 * np.exp(-1*E322/R/T) * RC3H5O2**1.0 * LIG**1.0 +1*A323 * T**n323 * np.exp(-1*E323/R/T) * PRFET3**1.0 * LIG**1.0 +1*A324 * T**n324 * np.exp(-1*E324/R/T) * RC3H7O2**1.0 * LIG**1.0 +1*A325 * T**n325 * np.exp(-1*E325/R/T) * RADIOM2**1.0 * LIG**1.0 +1*A326 * T**n326 * np.exp(-1*E326/R/T) * PRFET3M2**1.0 * LIG**1.0 +1*A327 * T**n327 * np.exp(-1*E327/R/T) * PRLIGH**1.0 * LIG**1.0 +1*A328 * T**n328 * np.exp(-1*E328/R/T) * RLIGM2B**1.0 * LIG**1.0 +1*A329 * T**n329 * np.exp(-1*E329/R/T) * RLIGM2A**1.0 * LIG**1.0 +1*A330 * T**n330 * np.exp(-1*E330/R/T) * RCH3**1.0 * LIG**1.0 +1*A331 * T**n331 * np.exp(-1*E331/R/T) * PRKETM2**1.0 * LIG**1.0 +1*A332 * T**n332 * np.exp(-1*E332/R/T) * RKET**1.0 * LIG**1.0 +1*A333 * T**n333 * np.exp(-1*E333/R/T) * PRADIO**1.0 * LIG**1.0 +1*A334 * T**n334 * np.exp(-1*E334/R/T) * RC3H3O**1.0 * LIG**1.0 -1*A335 * T**n335 * np.exp(-1*E335/R/T) * RLIGB**1.0 * LIG**1.0 +1*A335 * T**n335 * np.exp(-1*E335/R/T) * RLIGB**1.0 * LIG**1.0 +1*A336 * T**n336 * np.exp(-1*E336/R/T) * RLIGA**1.0 * LIG**1.0 +1*A337 * T**n337 * np.exp(-1*E337/R/T) * PRADIOM2**1.0 * LIG**1.0 +1*A338 * T**n338 * np.exp(-1*E338/R/T) * RMGUAI**1.0 * LIG**1.0 +1*A339 * T**n339 * np.exp(-1*E339/R/T) * OH**1.0 * LIG**1.0 +1*A340 * T**n340 * np.exp(-1*E340/R/T) * RCH3O**1.0 * LIG**1.0 +1*A341 * T**n341 * np.exp(-1*E341/R/T) * RPHENOL**1.0 * LIG**1.0 +1*A342 * T**n342 * np.exp(-1*E342/R/T) * RADIO**1.0 * LIG**1.0 -1*A356 * T**n356 * np.exp(-1*E356/R/T) * RLIGB**1.0 * PFET3**1.0 -1*A377 * T**n377 * np.exp(-1*E377/R/T) * RLIGB**1.0 * ADIO**1.0 -1*A398 * T**n398 * np.exp(-1*E398/R/T) * RLIGB**1.0 * KET**1.0,
-1*A12 * T**n12 * np.exp(-1*E12/R/T) * RLIGH**1.0 -1*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 -1*A79 * T**n79 * np.exp(-1*E79/R/T) * RLIGH**1.0 * RLIGH**1.0 +1*A112 * T**n112 * np.exp(-1*E112/R/T) * RC3H5O2**1.0 * LIGH**1.0 +1*A113 * T**n113 * np.exp(-1*E113/R/T) * PRFET3**1.0 * LIGH**1.0 +1*A114 * T**n114 * np.exp(-1*E114/R/T) * RC3H7O2**1.0 * LIGH**1.0 +1*A115 * T**n115 * np.exp(-1*E115/R/T) * RADIOM2**1.0 * LIGH**1.0 +1*A116 * T**n116 * np.exp(-1*E116/R/T) * PRFET3M2**1.0 * LIGH**1.0 +1*A117 * T**n117 * np.exp(-1*E117/R/T) * PRLIGH**1.0 * LIGH**1.0 +1*A118 * T**n118 * np.exp(-1*E118/R/T) * RLIGM2B**1.0 * LIGH**1.0 +1*A119 * T**n119 * np.exp(-1*E119/R/T) * RLIGM2A**1.0 * LIGH**1.0 +1*A120 * T**n120 * np.exp(-1*E120/R/T) * RCH3**1.0 * LIGH**1.0 +1*A121 * T**n121 * np.exp(-1*E121/R/T) * PRKETM2**1.0 * LIGH**1.0 +1*A122 * T**n122 * np.exp(-1*E122/R/T) * RKET**1.0 * LIGH**1.0 +1*A123 * T**n123 * np.exp(-1*E123/R/T) * PRADIO**1.0 * LIGH**1.0 +1*A124 * T**n124 * np.exp(-1*E124/R/T) * RC3H3O**1.0 * LIGH**1.0 +1*A125 * T**n125 * np.exp(-1*E125/R/T) * RLIGB**1.0 * LIGH**1.0 +1*A126 * T**n126 * np.exp(-1*E126/R/T) * RLIGA**1.0 * LIGH**1.0 +1*A127 * T**n127 * np.exp(-1*E127/R/T) * PRADIOM2**1.0 * LIGH**1.0 +1*A128 * T**n128 * np.exp(-1*E128/R/T) * RMGUAI**1.0 * LIGH**1.0 +1*A129 * T**n129 * np.exp(-1*E129/R/T) * OH**1.0 * LIGH**1.0 +1*A130 * T**n130 * np.exp(-1*E130/R/T) * RCH3O**1.0 * LIGH**1.0 +1*A131 * T**n131 * np.exp(-1*E131/R/T) * RPHENOL**1.0 * LIGH**1.0 +1*A132 * T**n132 * np.exp(-1*E132/R/T) * RADIO**1.0 * LIGH**1.0,
+1*A1 * T**n1 * np.exp(-1*E1/R/T) * LIGH**1.0 +1*A12 * T**n12 * np.exp(-1*E12/R/T) * RLIGH**1.0 -1*A15 * T**n15 * np.exp(-1*E15/R/T) * RLIGM2A**1.0 -2*A62 * T**n62 * np.exp(-1*E62/R/T) * RLIGM2A**2.0 -1*A119 * T**n119 * np.exp(-1*E119/R/T) * RLIGM2A**1.0 * LIGH**1.0 -1*A140 * T**n140 * np.exp(-1*E140/R/T) * RLIGM2A**1.0 * PLIGH**1.0 -1*A161 * T**n161 * np.exp(-1*E161/R/T) * RLIGM2A**1.0 * PLIGM2**1.0 +1*A175 * T**n175 * np.exp(-1*E175/R/T) * RC3H5O2**1.0 * LIGM2**1.0 +1*A176 * T**n176 * np.exp(-1*E176/R/T) * PRFET3**1.0 * LIGM2**1.0 +1*A177 * T**n177 * np.exp(-1*E177/R/T) * RC3H7O2**1.0 * LIGM2**1.0 +1*A178 * T**n178 * np.exp(-1*E178/R/T) * RADIOM2**1.0 * LIGM2**1.0 +1*A179 * T**n179 * np.exp(-1*E179/R/T) * PRFET3M2**1.0 * LIGM2**1.0 +1*A180 * T**n180 * np.exp(-1*E180/R/T) * PRLIGH**1.0 * LIGM2**1.0 +1*A181 * T**n181 * np.exp(-1*E181/R/T) * RLIGM2B**1.0 * LIGM2**1.0 -1*A182 * T**n182 * np.exp(-1*E182/R/T) * RLIGM2A**1.0 * LIGM2**1.0 +1*A182 * T**n182 * np.exp(-1*E182/R/T) * RLIGM2A**1.0 * LIGM2**1.0 +1*A183 * T**n183 * np.exp(-1*E183/R/T) * RCH3**1.0 * LIGM2**1.0 +1*A184 * T**n184 * np.exp(-1*E184/R/T) * PRKETM2**1.0 * LIGM2**1.0 +1*A185 * T**n185 * np.exp(-1*E185/R/T) * RKET**1.0 * LIGM2**1.0 +1*A186 * T**n186 * np.exp(-1*E186/R/T) * PRADIO**1.0 * LIGM2**1.0 +1*A187 * T**n187 * np.exp(-1*E187/R/T) * RC3H3O**1.0 * LIGM2**1.0 +1*A188 * T**n188 * np.exp(-1*E188/R/T) * RLIGB**1.0 * LIGM2**1.0 +1*A189 * T**n189 * np.exp(-1*E189/R/T) * RLIGA**1.0 * LIGM2**1.0 +1*A190 * T**n190 * np.exp(-1*E190/R/T) * PRADIOM2**1.0 * LIGM2**1.0 +1*A191 * T**n191 * np.exp(-1*E191/R/T) * RMGUAI**1.0 * LIGM2**1.0 +1*A192 * T**n192 * np.exp(-1*E192/R/T) * OH**1.0 * LIGM2**1.0 +1*A193 * T**n193 * np.exp(-1*E193/R/T) * RCH3O**1.0 * LIGM2**1.0 +1*A194 * T**n194 * np.exp(-1*E194/R/T) * RPHENOL**1.0 * LIGM2**1.0 +1*A195 * T**n195 * np.exp(-1*E195/R/T) * RADIO**1.0 * LIGM2**1.0 -1*A203 * T**n203 * np.exp(-1*E203/R/T) * RLIGM2A**1.0 * LIGM2**1.0 -1*A224 * T**n224 * np.exp(-1*E224/R/T) * RLIGM2A**1.0 * PFET3M2**1.0 -1*A245 * T**n245 * np.exp(-1*E245/R/T) * RLIGM2A**1.0 * ADIOM2**1.0 -1*A266 * T**n266 * np.exp(-1*E266/R/T) * RLIGM2A**1.0 * KETM2**1.0 -1*A287 * T**n287 * np.exp(-1*E287/R/T) * RLIGM2A**1.0 * C10H2**1.0 -1*A308 * T**n308 * np.exp(-1*E308/R/T) * RLIGM2A**1.0 * LIG**1.0 -1*A329 * T**n329 * np.exp(-1*E329/R/T) * RLIGM2A**1.0 * LIG**1.0 -1*A350 * T**n350 * np.exp(-1*E350/R/T) * RLIGM2A**1.0 * PFET3**1.0 -1*A371 * T**n371 * np.exp(-1*E371/R/T) * RLIGM2A**1.0 * ADIO**1.0 -1*A392 * T**n392 * np.exp(-1*E392/R/T) * RLIGM2A**1.0 * KET**1.0,
-1*A17 * T**n17 * np.exp(-1*E17/R/T) * RLIGM2B**1.0 -2*A61 * T**n61 * np.exp(-1*E61/R/T) * RLIGM2B**2.0 -1*A118 * T**n118 * np.exp(-1*E118/R/T) * RLIGM2B**1.0 * LIGH**1.0 -1*A139 * T**n139 * np.exp(-1*E139/R/T) * RLIGM2B**1.0 * PLIGH**1.0 -1*A160 * T**n160 * np.exp(-1*E160/R/T) * RLIGM2B**1.0 * PLIGM2**1.0 -1*A181 * T**n181 * np.exp(-1*E181/R/T) * RLIGM2B**1.0 * LIGM2**1.0 +1*A196 * T**n196 * np.exp(-1*E196/R/T) * RC3H5O2**1.0 * LIGM2**1.0 +1*A197 * T**n197 * np.exp(-1*E197/R/T) * PRFET3**1.0 * LIGM2**1.0 +1*A198 * T**n198 * np.exp(-1*E198/R/T) * RC3H7O2**1.0 * LIGM2**1.0 +1*A199 * T**n199 * np.exp(-1*E199/R/T) * RADIOM2**1.0 * LIGM2**1.0 +1*A200 * T**n200 * np.exp(-1*E200/R/T) * PRFET3M2**1.0 * LIGM2**1.0 +1*A201 * T**n201 * np.exp(-1*E201/R/T) * PRLIGH**1.0 * LIGM2**1.0 -1*A202 * T**n202 * np.exp(-1*E202/R/T) * RLIGM2B**1.0 * LIGM2**1.0 +1*A202 * T**n202 * np.exp(-1*E202/R/T) * RLIGM2B**1.0 * LIGM2**1.0 +1*A203 * T**n203 * np.exp(-1*E203/R/T) * RLIGM2A**1.0 * LIGM2**1.0 +1*A204 * T**n204 * np.exp(-1*E204/R/T) * RCH3**1.0 * LIGM2**1.0 +1*A205 * T**n205 * np.exp(-1*E205/R/T) * PRKETM2**1.0 * LIGM2**1.0 +1*A206 * T**n206 * np.exp(-1*E206/R/T) * RKET**1.0 * LIGM2**1.0 +1*A207 * T**n207 * np.exp(-1*E207/R/T) * PRADIO**1.0 * LIGM2**1.0 +1*A208 * T**n208 * np.exp(-1*E208/R/T) * RC3H3O**1.0 * LIGM2**1.0 +1*A209 * T**n209 * np.exp(-1*E209/R/T) * RLIGB**1.0 * LIGM2**1.0 +1*A210 * T**n210 * np.exp(-1*E210/R/T) * RLIGA**1.0 * LIGM2**1.0 +1*A211 * T**n211 * np.exp(-1*E211/R/T) * PRADIOM2**1.0 * LIGM2**1.0 +1*A212 * T**n212 * np.exp(-1*E212/R/T) * RMGUAI**1.0 * LIGM2**1.0 +1*A213 * T**n213 * np.exp(-1*E213/R/T) * OH**1.0 * LIGM2**1.0 +1*A214 * T**n214 * np.exp(-1*E214/R/T) * RCH3O**1.0 * LIGM2**1.0 +1*A215 * T**n215 * np.exp(-1*E215/R/T) * RPHENOL**1.0 * LIGM2**1.0 +1*A216 * T**n216 * np.exp(-1*E216/R/T) * RADIO**1.0 * LIGM2**1.0 -1*A223 * T**n223 * np.exp(-1*E223/R/T) * RLIGM2B**1.0 * PFET3M2**1.0 -1*A244 * T**n244 * np.exp(-1*E244/R/T) * RLIGM2B**1.0 * ADIOM2**1.0 -1*A265 * T**n265 * np.exp(-1*E265/R/T) * RLIGM2B**1.0 * KETM2**1.0 -1*A286 * T**n286 * np.exp(-1*E286/R/T) * RLIGM2B**1.0 * C10H2**1.0 -1*A307 * T**n307 * np.exp(-1*E307/R/T) * RLIGM2B**1.0 * LIG**1.0 -1*A328 * T**n328 * np.exp(-1*E328/R/T) * RLIGM2B**1.0 * LIG**1.0 -1*A349 * T**n349 * np.exp(-1*E349/R/T) * RLIGM2B**1.0 * PFET3**1.0 -1*A370 * T**n370 * np.exp(-1*E370/R/T) * RLIGM2B**1.0 * ADIO**1.0 -1*A391 * T**n391 * np.exp(-1*E391/R/T) * RLIGM2B**1.0 * KET**1.0,
+1*A14 * T**n14 * np.exp(-1*E14/R/T) * RADIOM2**1.0 +1*A17 * T**n17 * np.exp(-1*E17/R/T) * RLIGM2B**1.0 -2*A63 * T**n63 * np.exp(-1*E63/R/T) * RMGUAI**2.0 -1*A128 * T**n128 * np.exp(-1*E128/R/T) * RMGUAI**1.0 * LIGH**1.0 -1*A149 * T**n149 * np.exp(-1*E149/R/T) * RMGUAI**1.0 * PLIGH**1.0 -1*A170 * T**n170 * np.exp(-1*E170/R/T) * RMGUAI**1.0 * PLIGM2**1.0 -1*A191 * T**n191 * np.exp(-1*E191/R/T) * RMGUAI**1.0 * LIGM2**1.0 -1*A212 * T**n212 * np.exp(-1*E212/R/T) * RMGUAI**1.0 * LIGM2**1.0 -1*A233 * T**n233 * np.exp(-1*E233/R/T) * RMGUAI**1.0 * PFET3M2**1.0 -1*A254 * T**n254 * np.exp(-1*E254/R/T) * RMGUAI**1.0 * ADIOM2**1.0 -1*A275 * T**n275 * np.exp(-1*E275/R/T) * RMGUAI**1.0 * KETM2**1.0 -1*A296 * T**n296 * np.exp(-1*E296/R/T) * RMGUAI**1.0 * C10H2**1.0 -1*A317 * T**n317 * np.exp(-1*E317/R/T) * RMGUAI**1.0 * LIG**1.0 -1*A338 * T**n338 * np.exp(-1*E338/R/T) * RMGUAI**1.0 * LIG**1.0 -1*A359 * T**n359 * np.exp(-1*E359/R/T) * RMGUAI**1.0 * PFET3**1.0 -1*A380 * T**n380 * np.exp(-1*E380/R/T) * RMGUAI**1.0 * ADIO**1.0 -1*A401 * T**n401 * np.exp(-1*E401/R/T) * RMGUAI**1.0 * KET**1.0,
+1*A19 * T**n19 * np.exp(-1*E19/R/T) * RADIO**1.0 +1*A21 * T**n21 * np.exp(-1*E21/R/T) * RLIGB**1.0 -1*A80 * T**n80 * np.exp(-1*E80/R/T) * RPHENOX**1.0 * RPHENOL**1.0 -1*A131 * T**n131 * np.exp(-1*E131/R/T) * RPHENOL**1.0 * LIGH**1.0 -1*A152 * T**n152 * np.exp(-1*E152/R/T) * RPHENOL**1.0 * PLIGH**1.0 -1*A173 * T**n173 * np.exp(-1*E173/R/T) * RPHENOL**1.0 * PLIGM2**1.0 -1*A194 * T**n194 * np.exp(-1*E194/R/T) * RPHENOL**1.0 * LIGM2**1.0 -1*A215 * T**n215 * np.exp(-1*E215/R/T) * RPHENOL**1.0 * LIGM2**1.0 -1*A236 * T**n236 * np.exp(-1*E236/R/T) * RPHENOL**1.0 * PFET3M2**1.0 -1*A257 * T**n257 * np.exp(-1*E257/R/T) * RPHENOL**1.0 * ADIOM2**1.0 -1*A278 * T**n278 * np.exp(-1*E278/R/T) * RPHENOL**1.0 * KETM2**1.0 -1*A299 * T**n299 * np.exp(-1*E299/R/T) * RPHENOL**1.0 * C10H2**1.0 -1*A320 * T**n320 * np.exp(-1*E320/R/T) * RPHENOL**1.0 * LIG**1.0 -1*A341 * T**n341 * np.exp(-1*E341/R/T) * RPHENOL**1.0 * LIG**1.0 -1*A362 * T**n362 * np.exp(-1*E362/R/T) * RPHENOL**1.0 * PFET3**1.0 -1*A383 * T**n383 * np.exp(-1*E383/R/T) * RPHENOL**1.0 * ADIO**1.0 -1*A404 * T**n404 * np.exp(-1*E404/R/T) * RPHENOL**1.0 * KET**1.0,
+1*A4 * T**n4 * np.exp(-1*E4/R/T) * LIG**1.0 +1*A5 * T**n5 * np.exp(-1*E5/R/T) * PLIG**1.0 -1*A11 * T**n11 * np.exp(-1*E11/R/T) * RPHENOX**1.0 +1*A20 * T**n20 * np.exp(-1*E20/R/T) * RLIGA**1.0 +1*A22 * T**n22 * np.exp(-1*E22/R/T) * PRFET3**1.0 +1*A28 * T**n28 * np.exp(-1*E28/R/T) * C10H2M4**1.0 +1*A29 * T**n29 * np.exp(-1*E29/R/T) * C10H2M2**1.0 -1*A36 * T**n36 * np.exp(-1*E36/R/T) * ADIOM2**1.0 * RPHENOX**1.0 -1*A37 * T**n37 * np.exp(-1*E37/R/T) * KETM2**1.0 * RPHENOX**1.0 -1*A38 * T**n38 * np.exp(-1*E38/R/T) * KETDM2**1.0 * RPHENOX**1.0 -1*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 -1*A40 * T**n40 * np.exp(-1*E40/R/T) * ADIO**1.0 * RPHENOX**1.0 -1*A41 * T**n41 * np.exp(-1*E41/R/T) * KET**1.0 * RPHENOX**1.0 -1*A42 * T**n42 * np.exp(-1*E42/R/T) * KETD**1.0 * RPHENOX**1.0 -1*A43 * T**n43 * np.exp(-1*E43/R/T) * COUMARYL**1.0 * RPHENOX**1.0 -1*A50 * T**n50 * np.exp(-1*E50/R/T) * C10H2M4**1.0 * RPHENOX**1.0 -1*A51 * T**n51 * np.exp(-1*E51/R/T) * C10H2M2**1.0 * RPHENOX**1.0 -1*A52 * T**n52 * np.exp(-1*E52/R/T) * RCH3O**1.0 * RPHENOX**1.0 -1*A55 * T**n55 * np.exp(-1*E55/R/T) * RPHENOX**1.0 * RCH3**1.0 -1*A73 * T**n73 * np.exp(-1*E73/R/T) * RPHENOX**1.0 * RLIGB**1.0 -1*A80 * T**n80 * np.exp(-1*E80/R/T) * RPHENOX**1.0 * RPHENOL**1.0 -1*A81 * T**n81 * np.exp(-1*E81/R/T) * RPHENOX**1.0 * RC3H3O**1.0 -1*A82 * T**n82 * np.exp(-1*E82/R/T) * RPHENOX**1.0 * CHAR**1.0,
+1*A2 * T**n2 * np.exp(-1*E2/R/T) * LIGM2**1.0 +1*A3 * T**n3 * np.exp(-1*E3/R/T) * PLIGM2**1.0 -1*A10 * T**n10 * np.exp(-1*E10/R/T) * RPHENOXM2**1.0 +1*A15 * T**n15 * np.exp(-1*E15/R/T) * RLIGM2A**1.0 +1*A16 * T**n16 * np.exp(-1*E16/R/T) * PRLIGM2A**1.0 +1*A18 * T**n18 * np.exp(-1*E18/R/T) * PRFET3M2**1.0 -1*A32 * T**n32 * np.exp(-1*E32/R/T) * ADIOM2**1.0 * RPHENOXM2**1.0 -1*A33 * T**n33 * np.exp(-1*E33/R/T) * KETM2**1.0 * RPHENOXM2**1.0 -1*A34 * T**n34 * np.exp(-1*E34/R/T) * KETDM2**1.0 * RPHENOXM2**1.0 -1*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 -1*A44 * T**n44 * np.exp(-1*E44/R/T) * ADIO**1.0 * RPHENOXM2**1.0 -1*A45 * T**n45 * np.exp(-1*E45/R/T) * KET**1.0 * RPHENOXM2**1.0 -1*A46 * T**n46 * np.exp(-1*E46/R/T) * KETD**1.0 * RPHENOXM2**1.0 -1*A47 * T**n47 * np.exp(-1*E47/R/T) * COUMARYL**1.0 * RPHENOXM2**1.0 -1*A48 * T**n48 * np.exp(-1*E48/R/T) * C10H2M4**1.0 * RPHENOXM2**1.0 -1*A49 * T**n49 * np.exp(-1*E49/R/T) * C10H2M2**1.0 * RPHENOXM2**1.0 -1*A53 * T**n53 * np.exp(-1*E53/R/T) * RCH3O**1.0 * RPHENOXM2**1.0 -1*A54 * T**n54 * np.exp(-1*E54/R/T) * RPHENOXM2**1.0 * RCH3**1.0 -1*A83 * T**n83 * np.exp(-1*E83/R/T) * RPHENOXM2**1.0 * CHAR**1.0,
+1*A23 * T**n23 * np.exp(-1*E23/R/T) * RADIOM2**1.0 -1*A35 * T**n35 * np.exp(-1*E35/R/T) * SYNAPYL**1.0 * RPHENOXM2**1.0 -1*A39 * T**n39 * np.exp(-1*E39/R/T) * SYNAPYL**1.0 * RPHENOX**1.0 -1*A87 * T**n87 * np.exp(-1*E87/R/T) * SYNAPYL**1.0,
+1*A90 * T**n90 * np.exp(-1*E90/R/T) * ADIO**1.0,
+1*A84 * T**n84 * np.exp(-1*E84/R/T) * ADIOM2**1.0,
+1*A89 * T**n89 * np.exp(-1*E89/R/T) * COUMARYL**1.0,
+1*A91 * T**n91 * np.exp(-1*E91/R/T) * KET**1.0,
+1*A92 * T**n92 * np.exp(-1*E92/R/T) * KETD**1.0,
+1*A86 * T**n86 * np.exp(-1*E86/R/T) * KETDM2**1.0,
+1*A85 * T**n85 * np.exp(-1*E85/R/T) * KETM2**1.0,
+1*A88 * T**n88 * np.exp(-1*E88/R/T) * MGUAI**1.0,
+1*A93 * T**n93 * np.exp(-1*E93/R/T) * PHENOL**1.0,
+1*A87 * T**n87 * np.exp(-1*E87/R/T) * SYNAPYL**1.0]
return dydt
def run():
start = time.time()
R = 8.314
# A, n, E values
A0 = 1.00E+13
n0 = 0
E0 = 163254
A1 = 1.00E+13
n1 = 0
E1 = 163254
A2 = 1.00E+13
n2 = 0
E2 = 163254
A3 = 1.00E+13
n3 = 0
E3 = 163254
A4 = 1.00E+13
n4 = 0
E4 = 184184
A5 = 1.00E+13
n5 = 0
E5 = 188370
A6 = 1.00E+13
n6 = 0
E6 = 171626
A7 = 1.00E+13
n7 = 0
E7 = 179998
A8 = 1.00E+13
n8 = 0
E8 = 167440
A9 = 1.00E+13
n9 = 0
E9 = 121394
A10 = 4.00E+10
n10 = 0
E10 = 209300
A11 = 4.00E+10
n11 = 0
E11 = 209300
A12 = 1.00E+13
n12 = 0
E12 = 133952
A13 = 1.00E+13
n13 = 0
E13 = 133952
A14 = 1.00E+13
n14 = 0
E14 = 133952
A15 = 5.00E+12
n15 = 0
E15 = 133952
A16 = 5.00E+12
n16 = 0
E16 = 133952
A17 = 1.00E+13
n17 = 0
E17 = 163254
A18 = 1.00E+13
n18 = 0
E18 = 133952
A19 = 1.00E+13
n19 = 0
E19 = 163254
A20 = 1.00E+13
n20 = 0
E20 = 138138
A21 = 1.00E+13
n21 = 0
E21 = 163254
A22 = 1.00E+13
n22 = 0
E22 = 138138
A23 = 3.00E+11
n23 = 0
E23 = 104650
A24 = 3.00E+11
n24 = 0
E24 = 104650
A25 = 3.00E+11
n25 = 0
E25 = 104650
A26 = 3.00E+11
n26 = 0
E26 = 113022
A27 = 1.00E+13
n27 = 0
E27 = 129766
A28 = 1.00E+13
n28 = 0
E28 = 196742
A29 = 1.00E+13
n29 = 0
E29 = 196742
A30 = 1.00E+08
n30 = 0
E30 = 121394
A31 = 1.00E+09
n31 = 0
E31 = 108836
A32 = 1.00E+09
n32 = 0
E32 = 121394
A33 = 1.00E+09
n33 = 0
E33 = 121394
A34 = 1.00E+09
n34 = 0
E34 = 121394
A35 = 1.00E+09
n35 = 0
E35 = 121394
A36 = 1.00E+09
n36 = 0
E36 = 121394
A37 = 1.00E+09
n37 = 0
E37 = 121394
A38 = 1.00E+09
n38 = 0
E38 = 121394
A39 = 1.00E+09
n39 = 0
E39 = 121394
A40 = 1.00E+09
n40 = 0
E40 = 121394
A41 = 1.00E+09
n41 = 0
E41 = 121394
A42 = 1.00E+09
n42 = 0
E42 = 121394
A43 = 1.00E+09
n43 = 0
E43 = 121394
A44 = 1.00E+09
n44 = 0
E44 = 121394
A45 = 1.00E+09
n45 = 0
E45 = 121394
A46 = 1.00E+09
n46 = 0
E46 = 121394
A47 = 1.00E+09
n47 = 0
E47 = 121394
A48 = 1.00E+09
n48 = 0
E48 = 115115
A49 = 1.00E+09
n49 = 0
E49 = 115115
A50 = 1.00E+09
n50 = 0
E50 = 117208
A51 = 1.00E+09
n51 = 0
E51 = 117208
A52 = 1.00E+08
n52 = 0
E52 = 54418
A53 = 1.00E+08
n53 = 0
E53 = 54418
A54 = 1.00E+08
n54 = 0
E54 = 48139
A55 = 1.00E+08
n55 = 0
E55 = 48139
A56 = 1.00E+08
n56 = 0
E56 = 12558
A57 = 1.00E+08
n57 = 0
E57 = 12558
A58 = 1.00E+08
n58 = 0
E58 = 52325
A59 = 1.00E+08
n59 = 0
E59 = 52325
A60 = 3.16E+07
n60 = 0
E60 = 83720
A61 = 3.16E+07
n61 = 0
E61 = 83720
A62 = 3.16E+07
n62 = 0
E62 = 83720
A63 = 3.16E+07
n63 = 0
E63 = 83720
A64 = 3.16E+07
n64 = 0
E64 = 83720
A65 = 3.16E+07
n65 = 0
E65 = 83720
A66 = 1.00E+08
n66 = 0
E66 = 12558
A67 = 1.00E+08
n67 = 0
E67 = 12558
A68 = 1.00E+08
n68 = 0
E68 = 12558
A69 = 1.00E+08
n69 = 0
E69 = 12558
A70 = 1.00E+08
n70 = 0
E70 = 12558
A71 = 1.00E+08
n71 = 0
E71 = 48139
A72 = 1.00E+08
n72 = 0
E72 = 48139
A73 = 3.16E+07
n73 = 0
E73 = 83720
A74 = 3.16E+07
n74 = 0
E74 = 92092
A75 = 3.16E+07
n75 = 0
E75 = 83720
A76 = 3.16E+07
n76 = 0
E76 = 83720
A77 = 3.16E+07
n77 = 0
E77 = 83720
A78 = 3.16E+07
n78 = 0
E78 = 83720
A79 = 3.16E+07
n79 = 0
E79 = 83720
A80 = 5.00E+07
n80 = 0
E80 = 83720
A81 = 1.00E+08
n81 = 0
E81 = 48139
A82 = 3.00E+07
n82 = 0
E82 = 104650
A83 = 3.00E+07
n83 = 0
E83 = 104650
A84 = 1
n84 = 1
E84 = 62790
A85 = 1
n85 = 1
E85 = 62790
A86 = 1
n86 = 1
E86 = 58604
A87 = 1
n87 = 1
E87 = 62790
A88 = 1
n88 = 1
E88 = 66976
A89 = 1
n89 = 1
E89 = 83720
A90 = 1
n90 = 1
E90 = 66976
A91 = 1
n91 = 1
E91 = 66976
A92 = 1
n92 = 1
E92 = 83720
A93 = 1
n93 = 1
E93 = 58604
A94 = 2.00E+08
n94 = 0
E94 = 209300
A95 = 1.00E+07
n95 = 0
E95 = 138138
A96 = 1.00E+10
n96 = 0
E96 = 209300
A97 = 5.00E+08
n97 = 0
E97 = 205114
A98 = 5.00E+08
n98 = 0
E98 = 0
A99 = 5.00E+08
n99 = 0
E99 = 0
A100 = 5.00E+08
n100 = 0
E100 = 0
A101 = 1.00E+13
n101 = 0
E101 = 125580
A102 = 5.00E+08
n102 = 0
E102 = 209300
A103 = 5.00E+08
n103 = 0
E103 = 209300
A104 = 1E13
n104 = 0
E104 = 154812
A105 = 1E13
n105 = 0
E105 = 168458
A106 = 1E13
n106 = 0
E106 = 154812
A107 = 1E9
n107 = 0
E107 = 108836
A108 = 1E8
n108 = 0
E108 = 121394
A109 = 3.16E7
n109 = 0
E109 = 92092
A110 = 3.16E7
n110 = 0
E110 = 83720
A111 = 1E7
n111 = 0
E111 = 138138
A112 = 2*10**8
n112 = 0
E112 = 71162-4186
A113 = 2*10**8
n113 = 0
E113 = 54418-4186
A114 = 2*10**8
n114 = 0
E114 = 62790-4186
A115 = 2*10**8
n115 = 0
E115 = 54418-4186
A116 = 2*10**8
n116 = 0
E116 = 54418-4186
A117 = 2*10**8.5
n117 = 0
E117 = 41860-4186
A118 = 2*10**8
n118 = 0
E118 = 54418-4186
A119 = 2*10**8
n119 = 0
E119 = 54418-4186
A120 = 2*10**8.5
n120 = 0
E120 = 48139-4186
A121 = 2*10**8
n121 = 0
E121 = 54418-4186
A122 = 2*10**8.5
n122 = 0
E122 = 54418-4186
A123 = 2*10**8
n123 = 0
E123 = 46046-4186
A124 = 2*10**8
n124 = 0
E124 = 83720-4186
A125 = 2*10**8
n125 = 0
E125 = 56511-4186
A126 = 2*10**8
n126 = 0
E126 = 56511-4186
A127 = 2*10**8.5
n127 = 0
E127 = 41860-4186
A128 = 2*10**8
n128 = 0
E128 = 54418-4186
A129 = 2*10**9.5
n129 = 0
E129 = 14651-4186
A130 = 2*10**8.5
n130 = 0
E130 = 33069-4186
A131 = 2*10**8.5
n131 = 0
E131 = 54418-4186
A132 = 2*10**8
n132 = 0
E132 = 54418-4186
A133 = 2*10**8
n133 = 0
E133 = 71162-4186
A134 = 2*10**8
n134 = 0
E134 = 54418-4186
A135 = 2*10**8
n135 = 0
E135 = 62790-4186
A136 = 2*10**8
n136 = 0
E136 = 54418-4186
A137 = 2*10**8
n137 = 0
E137 = 54418-4186
A138 = 2*10**8.5
n138 = 0
E138 = 41860-4186
A139 = 2*10**8
n139 = 0
E139 = 54418-4186
A140 = 2*10**8
n140 = 0
E140 = 54418-4186
A141 = 2*10**8.5
n141 = 0
E141 = 48139-4186
A142 = 2*10**8
n142 = 0
E142 = 54418-4186
A143 = 2*10**8.5
n143 = 0
E143 = 54418-4186
A144 = 2*10**8
n144 = 0
E144 = 46046-4186
A145 = 2*10**8
n145 = 0
E145 = 83720-4186
A146 = 2*10**8
n146 = 0
E146 = 56511-4186
A147 = 2*10**8
n147 = 0
E147 = 56511-4186
A148 = 2*10**8.5
n148 = 0
E148 = 41860-4186
A149 = 2*10**8
n149 = 0
E149 = 54418-4186
A150 = 2*10**9.5
n150 = 0
E150 = 14651-4186
A151 = 2*10**8.5
n151 = 0
E151 = 33069-4186
A152 = 2*10**8.5
n152 = 0
E152 = 54418-4186
A153 = 2*10**8
n153 = 0
E153 = 54418-4186
A154 = 2*10**8
n154 = 0
E154 = 71162-4186
A155 = 2*10**8
n155 = 0
E155 = 54418-4186
A156 = 2*10**8
n156 = 0
E156 = 62790-4186
A157 = 2*10**8
n157 = 0
E157 = 54418-4186
A158 = 2*10**8
n158 = 0
E158 = 54418-4186
A159 = 2*10**8.5
n159 = 0
E159 = 41860-4186
A160 = 2*10**8
n160 = 0
E160 = 54418-4186
A161 = 2*10**8
n161 = 0
E161 = 54418-4186
A162 = 2*10**8.5
n162 = 0
E162 = 48139-4186
A163 = 2*10**8
n163 = 0
E163 = 54418-4186
A164 = 2*10**8.5
n164 = 0
E164 = 54418-4186
A165 = 2*10**8
n165 = 0
E165 = 46046-4186
A166 = 2*10**8
n166 = 0
E166 = 83720-4186
A167 = 2*10**8
n167 = 0
E167 = 56511-4186
A168 = 2*10**8
n168 = 0
E168 = 56511-4186
A169 = 2*10**8.5
n169 = 0
E169 = 41860-4186
A170 = 2*10**8
n170 = 0
E170 = 54418-4186
A171 = 2*10**9.5
n171 = 0
E171 = 14651-4186
A172 = 2*10**8.5
n172 = 0
E172 = 33069-4186
A173 = 2*10**8.5
n173 = 0
E173 = 54418-4186
A174 = 2*10**8
n174 = 0
E174 = 54418-4186
A175 = 1*10**8
n175 = 0
E175 = 71162-4186
A176 = 1*10**8
n176 = 0
E176 = 54418-4186
A177 = 1*10**8
n177 = 0
E177 = 62790-4186
A178 = 1*10**8
n178 = 0
E178 = 54418-4186
A179 = 1*10**8
n179 = 0
E179 = 54418-4186
A180 = 1*10**8.5
n180 = 0
E180 = 41860-4186
A181 = 1*10**8
n181 = 0
E181 = 54418-4186
A182 = 1*10**8
n182 = 0
E182 = 54418-4186
A183 = 1*10**8.5
n183 = 0
E183 = 48139-4186
A184 = 1*10**8
n184 = 0
E184 = 54418-4186
A185 = 1*10**8.5
n185 = 0
E185 = 54418-4186
A186 = 1*10**8
n186 = 0
E186 = 46046-4186
A187 = 1*10**8
n187 = 0
E187 = 83720-4186
A188 = 1*10**8
n188 = 0
E188 = 56511-4186
A189 = 1*10**8
n189 = 0
E189 = 56511-4186
A190 = 1*10**8.5
n190 = 0
E190 = 41860-4186
A191 = 1*10**8
n191 = 0
E191 = 54418-4186
A192 = 1*10**9.5
n192 = 0
E192 = 14651-4186
A193 = 1*10**8.5
n193 = 0
E193 = 33069-4186
A194 = 1*10**8.5
n194 = 0
E194 = 54418-4186
A195 = 1*10**8
n195 = 0
E195 = 54418-4186
A196 = 1*10**8
n196 = 0
E196 = 71162-4186
A197 = 1*10**8
n197 = 0
E197 = 54418-4186
A198 = 1*10**8
n198 = 0
E198 = 62790-4186
A199 = 1*10**8
n199 = 0
E199 = 54418-4186
A200 = 1*10**8
n200 = 0
E200 = 54418-4186
A201 = 1*10**8.5
n201 = 0
E201 = 41860-4186
A202 = 1*10**8
n202 = 0
E202 = 54418-4186
A203 = 1*10**8
n203 = 0
E203 = 54418-4186
A204 = 1*10**8.5
n204 = 0
E204 = 48139-4186
A205 = 1*10**8
n205 = 0
E205 = 54418-4186
A206 = 1*10**8.5
n206 = 0
E206 = 54418-4186
A207 = 1*10**8
n207 = 0
E207 = 46046-4186
A208 = 1*10**8
n208 = 0
E208 = 83720-4186
A209 = 1*10**8
n209 = 0
E209 = 56511-4186
A210 = 1*10**8
n210 = 0
E210 = 56511-4186
A211 = 1*10**8.5
n211 = 0
E211 = 41860-4186
A212 = 1*10**8
n212 = 0
E212 = 54418-4186
A213 = 1*10**9.5
n213 = 0
E213 = 14651-4186
A214 = 1*10**8.5
n214 = 0
E214 = 33069-4186
A215 = 1*10**8.5
n215 = 0
E215 = 54418-4186
A216 = 1*10**8
n216 = 0
E216 = 54418-4186
A217 = 2*10**8
n217 = 0
E217 = 71162-4186
A218 = 2*10**8
n218 = 0
E218 = 54418-4186
A219 = 2*10**8
n219 = 0
E219 = 62790-4186
A220 = 2*10**8
n220 = 0
E220 = 54418-4186
A221 = 2*10**8
n221 = 0
E221 = 54418-4186
A222 = 2*10**8.5
n222 = 0
E222 = 41860-4186
A223 = 2*10**8
n223 = 0
E223 = 54418-4186
A224 = 2*10**8
n224 = 0
E224 = 54418-4186
A225 = 2*10**8.5
n225 = 0
E225 = 48139-4186
A226 = 2*10**8
n226 = 0
E226 = 54418-4186
A227 = 2*10**8.5
n227 = 0
E227 = 54418-4186
A228 = 2*10**8
n228 = 0
E228 = 46046-4186
A229 = 2*10**8
n229 = 0
E229 = 83720-4186
A230 = 2*10**8
n230 = 0
E230 = 56511-4186
A231 = 2*10**8
n231 = 0
E231 = 56511-4186
A232 = 2*10**8.5
n232 = 0
E232 = 41860-4186
A233 = 2*10**8
n233 = 0
E233 = 54418-4186
A234 = 2*10**9.5
n234 = 0
E234 = 14651-4186
A235 = 2*10**8.5
n235 = 0
E235 = 33069-4186
A236 = 2*10**8.5
n236 = 0
E236 = 54418-4186
A237 = 2*10**8
n237 = 0
E237 = 54418-4186
A238 = 1*10**8
n238 = 0
E238 = 71162-4186
A239 = 1*10**8
n239 = 0
E239 = 54418-4186
A240 = 1*10**8
n240 = 0
E240 = 62790-4186
A241 = 1*10**8
n241 = 0
E241 = 54418-4186
A242 = 1*10**8
n242 = 0
E242 = 54418-4186
A243 = 1*10**8.5
n243 = 0
E243 = 41860-4186
A244 = 1*10**8
n244 = 0
E244 = 54418-4186
A245 = 1*10**8
n245 = 0
E245 = 54418-4186
A246 = 1*10**8.5
n246 = 0
E246 = 48139-4186
A247 = 1*10**8
n247 = 0
E247 = 54418-4186
A248 = 1*10**8.5
n248 = 0
E248 = 54418-4186
A249 = 1*10**8
n249 = 0
E249 = 46046-4186
A250 = 1*10**8
n250 = 0
E250 = 83720-4186
A251 = 1*10**8
n251 = 0
E251 = 56511-4186
A252 = 1*10**8
n252 = 0
E252 = 56511-4186
A253 = 1*10**8.5
n253 = 0
E253 = 41860-4186
A254 = 1*10**8
n254 = 0
E254 = 54418-4186
A255 = 1*10**9.5
n255 = 0
E255 = 14651-4186
A256 = 1*10**8.5
n256 = 0
E256 = 33069-4186
A257 = 1*10**8.5
n257 = 0
E257 = 54418-4186
A258 = 1*10**8
n258 = 0
E258 = 54418-4186
A259 = 1*10**8
n259 = 0
E259 = 71162-4186
A260 = 1*10**8
n260 = 0
E260 = 54418-4186
A261 = 1*10**8
n261 = 0
E261 = 62790-4186
A262 = 1*10**8
n262 = 0
E262 = 54418-4186
A263 = 1*10**8
n263 = 0
E263 = 54418-4186
A264 = 1*10**8.5
n264 = 0
E264 = 41860-4186
A265 = 1*10**8
n265 = 0
E265 = 54418-4186
A266 = 1*10**8
n266 = 0
E266 = 54418-4186
A267 = 1*10**8.5
n267 = 0
E267 = 48139-4186
A268 = 1*10**8
n268 = 0
E268 = 54418-4186
A269 = 1*10**8.5
n269 = 0
E269 = 54418-4186
A270 = 1*10**8
n270 = 0
E270 = 46046-4186
A271 = 1*10**8
n271 = 0
E271 = 83720-4186
A272 = 1*10**8
n272 = 0
E272 = 56511-4186
A273 = 1*10**8
n273 = 0
E273 = 56511-4186
A274 = 1*10**8.5
n274 = 0
E274 = 41860-4186
A275 = 1*10**8
n275 = 0
E275 = 54418-4186
A276 = 1*10**9.5
n276 = 0
E276 = 14651-4186
A277 = 1*10**8.5
n277 = 0
E277 = 33069-4186
A278 = 1*10**8.5
n278 = 0
E278 = 54418-4186
A279 = 1*10**8
n279 = 0
E279 = 54418-4186
A280 = 1*10**8
n280 = 0
E280 = 71162--20930
A281 = 1*10**8
n281 = 0
E281 = 54418--20930
A282 = 1*10**8
n282 = 0
E282 = 62790--20930
A283 = 1*10**8
n283 = 0
E283 = 54418--20930
A284 = 1*10**8
n284 = 0
E284 = 54418--20930
A285 = 1*10**8.5
n285 = 0
E285 = 41860--20930
A286 = 1*10**8
n286 = 0
E286 = 54418--20930
A287 = 1*10**8
n287 = 0
E287 = 54418--20930
A288 = 1*10**8.5
n288 = 0
E288 = 48139--20930
A289 = 1*10**8
n289 = 0
E289 = 54418--20930
A290 = 1*10**8.5
n290 = 0
E290 = 54418--20930
A291 = 1*10**8
n291 = 0
E291 = 46046--20930
A292 = 1*10**8
n292 = 0
E292 = 83720--20930
A293 = 1*10**8
n293 = 0
E293 = 56511--20930
A294 = 1*10**8
n294 = 0
E294 = 56511--20930
A295 = 1*10**8.5
n295 = 0
E295 = 41860--20930
A296 = 1*10**8
n296 = 0
E296 = 54418--20930
A297 = 1*10**9.5
n297 = 0
E297 = 14651--20930
A298 = 1*10**8.5
n298 = 0
E298 = 33069--20930
A299 = 1*10**8.5
n299 = 0
E299 = 54418--20930
A300 = 1*10**8
n300 = 0
E300 = 54418--20930
A301 = 1*10**8
n301 = 0
E301 = 71162-4186
A302 = 1*10**8
n302 = 0
E302 = 54418-4186
A303 = 1*10**8
n303 = 0
E303 = 62790-4186
A304 = 1*10**8
n304 = 0
E304 = 54418-4186
A305 = 1*10**8
n305 = 0
E305 = 54418-4186
A306 = 1*10**8.5
n306 = 0
E306 = 41860-4186
A307 = 1*10**8
n307 = 0
E307 = 54418-4186
A308 = 1*10**8
n308 = 0
E308 = 54418-4186
A309 = 1*10**8.5
n309 = 0
E309 = 48139-4186
A310 = 1*10**8
n310 = 0
E310 = 54418-4186
A311 = 1*10**8.5
n311 = 0
E311 = 54418-4186
A312 = 1*10**8
n312 = 0
E312 = 46046-4186
A313 = 1*10**8
n313 = 0
E313 = 83720-4186
A314 = 1*10**8
n314 = 0
E314 = 56511-4186
A315 = 1*10**8
n315 = 0
E315 = 56511-4186
A316 = 1*10**8.5
n316 = 0
E316 = 41860-4186
A317 = 1*10**8
n317 = 0
E317 = 54418-4186
A318 = 1*10**9.5
n318 = 0
E318 = 14651-4186
A319 = 1*10**8.5
n319 = 0
E319 = 33069-4186
A320 = 1*10**8.5
n320 = 0
E320 = 54418-4186
A321 = 1*10**8
n321 = 0
E321 = 54418-4186
A322 = 1*10**8
n322 = 0
E322 = 71162-4186
A323 = 1*10**8
n323 = 0
E323 = 54418-4186
A324 = 1*10**8
n324 = 0
E324 = 62790-4186
A325 = 1*10**8
n325 = 0
E325 = 54418-4186
A326 = 1*10**8
n326 = 0
E326 = 54418-4186
A327 = 1*10**8.5
n327 = 0
E327 = 41860-4186
A328 = 1*10**8
n328 = 0
E328 = 54418-4186
A329 = 1*10**8
n329 = 0
E329 = 54418-4186
A330 = 1*10**8.5
n330 = 0
E330 = 48139-4186
A331 = 1*10**8
n331 = 0
E331 = 54418-4186
A332 = 1*10**8.5
n332 = 0
E332 = 54418-4186
A333 = 1*10**8
n333 = 0
E333 = 46046-4186
A334 = 1*10**8
n334 = 0
E334 = 83720-4186
A335 = 1*10**8
n335 = 0
E335 = 56511-4186
A336 = 1*10**8
n336 = 0
E336 = 56511-4186
A337 = 1*10**8.5
n337 = 0
E337 = 41860-4186
A338 = 1*10**8
n338 = 0
E338 = 54418-4186
A339 = 1*10**9.5
n339 = 0
E339 = 14651-4186
A340 = 1*10**8.5
n340 = 0
E340 = 33069-4186
A341 = 1*10**8.5
n341 = 0
E341 = 54418-4186
A342 = 1*10**8
n342 = 0
E342 = 54418-4186
A343 = 2*10**8
n343 = 0
E343 = 71162-4186
A344 = 2*10**8
n344 = 0
E344 = 54418-4186
A345 = 2*10**8
n345 = 0
E345 = 62790-4186
A346 = 2*10**8
n346 = 0
E346 = 54418-4186
A347 = 2*10**8
n347 = 0
E347 = 54418-4186
A348 = 2*10**8.5
n348 = 0
E348 = 41860-4186
A349 = 2*10**8
n349 = 0
E349 = 54418-4186
A350 = 2*10**8
n350 = 0
E350 = 54418-4186
A351 = 2*10**8.5
n351 = 0
E351 = 48139-4186
A352 = 2*10**8
n352 = 0
E352 = 54418-4186
A353 = 2*10**8.5
n353 = 0
E353 = 54418-4186
A354 = 2*10**8
n354 = 0
E354 = 46046-4186
A355 = 2*10**8
n355 = 0
E355 = 83720-4186
A356 = 2*10**8
n356 = 0
E356 = 56511-4186
A357 = 2*10**8
n357 = 0
E357 = 56511-4186
A358 = 2*10**8.5
n358 = 0
E358 = 41860-4186
A359 = 2*10**8
n359 = 0
E359 = 54418-4186
A360 = 2*10**9.5
n360 = 0
E360 = 14651-4186
A361 = 2*10**8.5
n361 = 0
E361 = 33069-4186
A362 = 2*10**8.5
n362 = 0
E362 = 54418-4186
A363 = 2*10**8
n363 = 0
E363 = 54418-4186
A364 = 1*10**8
n364 = 0
E364 = 71162-4186
A365 = 1*10**8
n365 = 0
E365 = 54418-4186
A366 = 1*10**8
n366 = 0
E366 = 62790-4186
A367 = 1*10**8
n367 = 0
E367 = 54418-4186
A368 = 1*10**8
n368 = 0
E368 = 54418-4186
A369 = 1*10**8.5
n369 = 0
E369 = 41860-4186
A370 = 1*10**8
n370 = 0
E370 = 54418-4186
A371 = 1*10**8
n371 = 0
E371 = 54418-4186
A372 = 1*10**8.5
n372 = 0
E372 = 48139-4186
A373 = 1*10**8
n373 = 0
E373 = 54418-4186
A374 = 1*10**8.5
n374 = 0
E374 = 54418-4186
A375 = 1*10**8
n375 = 0
E375 = 46046-4186
A376 = 1*10**8
n376 = 0
E376 = 83720-4186
A377 = 1*10**8
n377 = 0
E377 = 56511-4186
A378 = 1*10**8
n378 = 0
E378 = 56511-4186
A379 = 1*10**8.5
n379 = 0
E379 = 41860-4186
A380 = 1*10**8
n380 = 0
E380 = 54418-4186
A381 = 1*10**9.5
n381 = 0
E381 = 14651-4186
A382 = 1*10**8.5
n382 = 0
E382 = 33069-4186
A383 = 1*10**8.5
n383 = 0
E383 = 54418-4186
A384 = 1*10**8
n384 = 0
E384 = 54418-4186
A385 = 1*10**8
n385 = 0
E385 = 71162-4186
A386 = 1*10**8
n386 = 0
E386 = 54418-4186
A387 = 1*10**8
n387 = 0
E387 = 62790-4186
A388 = 1*10**8
n388 = 0
E388 = 54418-4186
A389 = 1*10**8
n389 = 0
E389 = 54418-4186
A390 = 1*10**8.5
n390 = 0
E390 = 41860-4186
A391 = 1*10**8
n391 = 0
E391 = 54418-4186
A392 = 1*10**8
n392 = 0
E392 = 54418-4186
A393 = 1*10**8.5
n393 = 0
E393 = 48139-4186
A394 = 1*10**8
n394 = 0
E394 = 54418-4186
A395 = 1*10**8.5
n395 = 0
E395 = 54418-4186
A396 = 1*10**8
n396 = 0
E396 = 46046-4186
A397 = 1*10**8
n397 = 0
E397 = 83720-4186
A398 = 1*10**8
n398 = 0
E398 = 56511-4186
A399 = 1*10**8
n399 = 0
E399 = 56511-4186
A400 = 1*10**8.5
n400 = 0
E400 = 41860-4186
A401 = 1*10**8
n401 = 0
E401 = 54418-4186
A402 = 1*10**9.5
n402 = 0
E402 = 14651-4186
A403 = 1*10**8.5
n403 = 0
E403 = 33069-4186
A404 = 1*10**8.5
n404 = 0
E404 = 54418-4186
A405 = 1*10**8
n405 = 0
E405 = 54418-4186
# Initial conditions
ADIO = ADIOM2 = ALD3 = C10H2 = C10H2M2 = C10H2M4 = C2H6 = C3H4O = C3H4O2 = C3H6 = C3H6O2 = C3H8O2 = CH2CO = CH3CHO = CH3OH = CH4 = CHAR = CO = CO2 = COUMARYL = ETOH = H2 = H2O = KET = KETD = KETDM2 = KETM2 = LIG = LIGC = LIGH = LIGM2 = LIGO = MGUAI = OH = PADIO = PADIOM2 = PC2H2 = PCH2OH = PCH2P = PCH3 = PCHO = PCHOHP = PCHP2 = PCOH = PCOHP2 = PCOS = PFET3 = PFET3M2 = PH2 = PHENOL = PKETM2 = PLIG = PLIGM2 = PRADIO = PRADIOM2 = PRFET3 = PRFET3M2 = PRKETM2 = PRLIGH = PRLIGH2 = PRLIGM2A = RADIO = RADIOM2 = RC3H3O = RC3H5O2 = RC3H7O2 = RCH3 = RCH3O = RKET = RKETM2 = RLIGA = RLIGB = RLIGH = RLIGM2A = RLIGM2B = RMGUAI = RPHENOL = RPHENOX = RPHENOXM2 = SYNAPYL = VADIO = VADIOM2 = VCOUMARYL = VKET = VKETD = VKETDM2 = VKETM2 = VMGUAI = VPHENOL = VSYNAPYL = 0
# ODE solver parameters
abserr = 1e-11
relerr = 1e-09
numpoints = int(np.ceil(stoptime))+1
t = [stoptime * int(i) / (numpoints - 1) for i in range(numpoints)]
y0 = [T0, ADIO, ADIOM2, ALD3, C10H2, C10H2M2, C10H2M4, C2H6, C3H4O, C3H4O2, C3H6, C3H6O2, C3H8O2, CH2CO, CH3CHO, CH3OH, CH4, CHAR, CO, CO2, COUMARYL, ETOH, H2, H2O, KET, KETD, KETDM2, KETM2, LIG, LIGC, LIGH, LIGM2, LIGO, MGUAI, OH, PADIO, PADIOM2, PC2H2, PCH2OH, PCH2P, PCH3, PCHO, PCHOHP, PCHP2, PCOH, PCOHP2, PCOS, PFET3, PFET3M2, PH2, PHENOL, PKETM2, PLIG, PLIGC, PLIGH, PLIGM2, PLIGO, PRADIO, PRADIOM2, PRFET3, PRFET3M2, PRKETM2, PRLIGH, PRLIGH2, PRLIGM2A, RADIO, RADIOM2, RC3H3O, RC3H5O2, RC3H7O2, RCH3, RCH3O, RKET, RKETM2, RLIGA, RLIGB, RLIGH, RLIGM2A, RLIGM2B, RMGUAI, RPHENOL, RPHENOX, RPHENOXM2, SYNAPYL, VADIO, VADIOM2, VCOUMARYL, VKET, VKETD, VKETDM2, VKETM2, VMGUAI, VPHENOL, VSYNAPYL]
p = [alpha, R, A0, n0, E0, A1, n1, E1, A2, n2, E2, A3, n3, E3, A4, n4, E4, A5, n5, E5, A6, n6, E6, A7, n7, E7, A8, n8, E8, A9, n9, E9, A10, n10, E10, A11, n11, E11, A12, n12, E12, A13, n13, E13, A14, n14, E14, A15, n15, E15, A16, n16, E16, A17, n17, E17, A18, n18, E18, A19, n19, E19, A20, n20, E20, A21, n21, E21, A22, n22, E22, A23, n23, E23, A24, n24, E24, A25, n25, E25, A26, n26, E26, A27, n27, E27, A28, n28, E28, A29, n29, E29, A30, n30, E30, A31, n31, E31, A32, n32, E32, A33, n33, E33, A34, n34, E34, A35, n35, E35, A36, n36, E36, A37, n37, E37, A38, n38, E38, A39, n39, E39, A40, n40, E40, A41, n41, E41, A42, n42, E42, A43, n43, E43, A44, n44, E44, A45, n45, E45, A46, n46, E46, A47, n47, E47, A48, n48, E48, A49, n49, E49, A50, n50, E50, A51, n51, E51, A52, n52, E52, A53, n53, E53, A54, n54, E54, A55, n55, E55, A56, n56, E56, A57, n57, E57, A58, n58, E58, A59, n59, E59, A60, n60, E60, A61, n61, E61, A62, n62, E62, A63, n63, E63, A64, n64, E64, A65, n65, E65, A66, n66, E66, A67, n67, E67, A68, n68, E68, A69, n69, E69, A70, n70, E70, A71, n71, E71, A72, n72, E72, A73, n73, E73, A74, n74, E74, A75, n75, E75, A76, n76, E76, A77, n77, E77, A78, n78, E78, A79, n79, E79, A80, n80, E80, A81, n81, E81, A82, n82, E82, A83, n83, E83, A84, n84, E84, A85, n85, E85, A86, n86, E86, A87, n87, E87, A88, n88, E88, A89, n89, E89, A90, n90, E90, A91, n91, E91, A92, n92, E92, A93, n93, E93, A94, n94, E94, A95, n95, E95, A96, n96, E96, A97, n97, E97, A98, n98, E98, A99, n99, E99, A100, n100, E100, A101, n101, E101, A102, n102, E102, A103, n103, E103, A104, n104, E104, A105, n105, E105, A106, n106, E106, A107, n107, E107, A108, n108, E108, A109, n109, E109, A110, n110, E110, A111, n111, E111, A112, n112, E112, A113, n113, E113, A114, n114, E114, A115, n115, E115, A116, n116, E116, A117, n117, E117, A118, n118, E118, A119, n119, E119, A120, n120, E120, A121, n121, E121, A122, n122, E122, A123, n123, E123, A124, n124, E124, A125, n125, E125, A126, n126, E126, A127, n127, E127, A128, n128, E128, A129, n129, E129, A130, n130, E130, A131, n131, E131, A132, n132, E132, A133, n133, E133, A134, n134, E134, A135, n135, E135, A136, n136, E136, A137, n137, E137, A138, n138, E138, A139, n139, E139, A140, n140, E140, A141, n141, E141, A142, n142, E142, A143, n143, E143, A144, n144, E144, A145, n145, E145, A146, n146, E146, A147, n147, E147, A148, n148, E148, A149, n149, E149, A150, n150, E150, A151, n151, E151, A152, n152, E152, A153, n153, E153, A154, n154, E154, A155, n155, E155, A156, n156, E156, A157, n157, E157, A158, n158, E158, A159, n159, E159, A160, n160, E160, A161, n161, E161, A162, n162, E162, A163, n163, E163, A164, n164, E164, A165, n165, E165, A166, n166, E166, A167, n167, E167, A168, n168, E168, A169, n169, E169, A170, n170, E170, A171, n171, E171, A172, n172, E172, A173, n173, E173, A174, n174, E174, A175, n175, E175, A176, n176, E176, A177, n177, E177, A178, n178, E178, A179, n179, E179, A180, n180, E180, A181, n181, E181, A182, n182, E182, A183, n183, E183, A184, n184, E184, A185, n185, E185, A186, n186, E186, A187, n187, E187, A188, n188, E188, A189, n189, E189, A190, n190, E190, A191, n191, E191, A192, n192, E192, A193, n193, E193, A194, n194, E194, A195, n195, E195, A196, n196, E196, A197, n197, E197, A198, n198, E198, A199, n199, E199, A200, n200, E200, A201, n201, E201, A202, n202, E202, A203, n203, E203, A204, n204, E204, A205, n205, E205, A206, n206, E206, A207, n207, E207, A208, n208, E208, A209, n209, E209, A210, n210, E210, A211, n211, E211, A212, n212, E212, A213, n213, E213, A214, n214, E214, A215, n215, E215, A216, n216, E216, A217, n217, E217, A218, n218, E218, A219, n219, E219, A220, n220, E220, A221, n221, E221, A222, n222, E222, A223, n223, E223, A224, n224, E224, A225, n225, E225, A226, n226, E226, A227, n227, E227, A228, n228, E228, A229, n229, E229, A230, n230, E230, A231, n231, E231, A232, n232, E232, A233, n233, E233, A234, n234, E234, A235, n235, E235, A236, n236, E236, A237, n237, E237, A238, n238, E238, A239, n239, E239, A240, n240, E240, A241, n241, E241, A242, n242, E242, A243, n243, E243, A244, n244, E244, A245, n245, E245, A246, n246, E246, A247, n247, E247, A248, n248, E248, A249, n249, E249, A250, n250, E250, A251, n251, E251, A252, n252, E252, A253, n253, E253, A254, n254, E254, A255, n255, E255, A256, n256, E256, A257, n257, E257, A258, n258, E258, A259, n259, E259, A260, n260, E260, A261, n261, E261, A262, n262, E262, A263, n263, E263, A264, n264, E264, A265, n265, E265, A266, n266, E266, A267, n267, E267, A268, n268, E268, A269, n269, E269, A270, n270, E270, A271, n271, E271, A272, n272, E272, A273, n273, E273, A274, n274, E274, A275, n275, E275, A276, n276, E276, A277, n277, E277, A278, n278, E278, A279, n279, E279, A280, n280, E280, A281, n281, E281, A282, n282, E282, A283, n283, E283, A284, n284, E284, A285, n285, E285, A286, n286, E286, A287, n287, E287, A288, n288, E288, A289, n289, E289, A290, n290, E290, A291, n291, E291, A292, n292, E292, A293, n293, E293, A294, n294, E294, A295, n295, E295, A296, n296, E296, A297, n297, E297, A298, n298, E298, A299, n299, E299, A300, n300, E300, A301, n301, E301, A302, n302, E302, A303, n303, E303, A304, n304, E304, A305, n305, E305, A306, n306, E306, A307, n307, E307, A308, n308, E308, A309, n309, E309, A310, n310, E310, A311, n311, E311, A312, n312, E312, A313, n313, E313, A314, n314, E314, A315, n315, E315, A316, n316, E316, A317, n317, E317, A318, n318, E318, A319, n319, E319, A320, n320, E320, A321, n321, E321, A322, n322, E322, A323, n323, E323, A324, n324, E324, A325, n325, E325, A326, n326, E326, A327, n327, E327, A328, n328, E328, A329, n329, E329, A330, n330, E330, A331, n331, E331, A332, n332, E332, A333, n333, E333, A334, n334, E334, A335, n335, E335, A336, n336, E336, A337, n337, E337, A338, n338, E338, A339, n339, E339, A340, n340, E340, A341, n341, E341, A342, n342, E342, A343, n343, E343, A344, n344, E344, A345, n345, E345, A346, n346, E346, A347, n347, E347, A348, n348, E348, A349, n349, E349, A350, n350, E350, A351, n351, E351, A352, n352, E352, A353, n353, E353, A354, n354, E354, A355, n355, E355, A356, n356, E356, A357, n357, E357, A358, n358, E358, A359, n359, E359, A360, n360, E360, A361, n361, E361, A362, n362, E362, A363, n363, E363, A364, n364, E364, A365, n365, E365, A366, n366, E366, A367, n367, E367, A368, n368, E368, A369, n369, E369, A370, n370, E370, A371, n371, E371, A372, n372, E372, A373, n373, E373, A374, n374, E374, A375, n375, E375, A376, n376, E376, A377, n377, E377, A378, n378, E378, A379, n379, E379, A380, n380, E380, A381, n381, E381, A382, n382, E382, A383, n383, E383, A384, n384, E384, A385, n385, E385, A386, n386, E386, A387, n387, E387, A388, n388, E388, A389, n389, E389, A390, n390, E390, A391, n391, E391, A392, n392, E392, A393, n393, E393, A394, n394, E394, A395, n395, E395, A396, n396, E396, A397, n397, E397, A398, n398, E398, A399, n399, E399, A400, n400, E400, A401, n401, E401, A402, n402, E402, A403, n403, E403, A404, n404, E404, A405, n405, E405]
ysol = odeint(ODEs, y0, t, args=(p,), atol=abserr, rtol=relerr, mxstep=5000)
with open('sol_check.dat', 'a+') as f:
#data_format = '{:15.10f}' * 95
a = list(zip(t, ysol))
b = len(a)
result = str(a[b-1])
f.write(result)
#b = list(map(int, a))
#for tt, yy in a:
#print(data_format.format(tt, *yy), file=f)
#print(result)
#end = time.time()
#run_time = end - start
#data_format = '{:15d}' * 95
#col = list(range(0, 95))
#print(data_format.format(*col), file=f)
#print(run_time, file=f)
if __name__ == '__main__':
run()
| 106.456103
| 6,993
| 0.555366
|
ca04b72a3d017ae8c18ff0a576282d95ee324e79
| 1,946
|
py
|
Python
|
digproj/persona/views.py
|
mans-18/digest
|
2c62ab33b8647b6e3da93a714da614ff201eee67
|
[
"MIT"
] | null | null | null |
digproj/persona/views.py
|
mans-18/digest
|
2c62ab33b8647b6e3da93a714da614ff201eee67
|
[
"MIT"
] | null | null | null |
digproj/persona/views.py
|
mans-18/digest
|
2c62ab33b8647b6e3da93a714da614ff201eee67
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets, mixins
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from core.models import Kollege, Event, Persona
from persona import serializers
class BasePersonaAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
"""Base viewset for user owned persona attr"""
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def perform_create(self, serializer):
"""Create a new object"""
serializer.save(user=self.request.user)
# Gone use only list mixin. There are update, delete mixins ...
class KollegeViewSet(BasePersonaAttrViewSet):
"""Manage kollegen in the db"""
# ListModeMixin requires a queryset set
queryset = Kollege.objects.all()
serializer_class = serializers.KollegeSerializer
def get_queryset(self):
"""Return objects for current auth user only"""
return self.queryset.filter(user=self.request.user).order_by('-name')
class EventViewSet(BasePersonaAttrViewSet):
"""Manage events in the db"""
queryset = Event.objects.all()
serializer_class = serializers.EventSerializer
def get_queryset(self):
"""Return objects for the current authenticated user"""
return self.queryset.filter(user=self.request.user).order_by('title')
# Gonna implement more functionalities here not only list
class PersonaViewSet(viewsets.ModelViewSet):
"""Manage persona in the db"""
serializer_class = serializers.PersonaSerializer
queryset = Persona.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Retrieve the personas for the auth user"""
return self.queryset.filter(user=self.request.user).order_by('name')
| 35.381818
| 77
| 0.723535
|
d3cc9d444271665a2b443ba7670d10a3209623a5
| 1,929
|
py
|
Python
|
src/mpd_reader.py
|
dlesz/recsyschallenge2018
|
4f06333412c1146d1f3d0784e55c776cfd657a98
|
[
"Apache-2.0"
] | null | null | null |
src/mpd_reader.py
|
dlesz/recsyschallenge2018
|
4f06333412c1146d1f3d0784e55c776cfd657a98
|
[
"Apache-2.0"
] | null | null | null |
src/mpd_reader.py
|
dlesz/recsyschallenge2018
|
4f06333412c1146d1f3d0784e55c776cfd657a98
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import argparse
import codecs
import logging
import time
import numpy
import pandas as pd
from pandas.io.json import json_normalize
import os
import json
def read_data(folderpath):
start = time.time()
logging.debug("reading data from %s", folderpath)
# creating empty df
tracks_df = pd.DataFrame(columns=['pid','track_uri'])
path_to_json = folderpath
json_files = [pos_json for pos_json in os.listdir(path_to_json) if pos_json.endswith('.json')]
json_files.sort()
# we need both the json and an index number so use enumerate()
for index, js in enumerate(json_files):
s = time.time()
with open(os.path.join(path_to_json, js)) as json_file:
j = json.load(json_file)
# extracting tracks from playlists in each slice
tracks = json_normalize(j['playlists'], record_path='tracks',
meta=['pid'])
# append tracks to tracks_df
tracks = tracks[['pid', 'track_uri']]
tracks_df = tracks_df.append(tracks)
#print('reading slice #'+ str(index) + ' in : '+ str(s-time.time()))
logging.debug("read data file in %s", time.time() - start)
start = time.time()
logging.debug("writing data to file")
tracks_df.to_csv('../my_data/mpd.tsv', sep='\t', index=False)
logging.debug("wrote data file in %s", time.time() - start)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Reads the MPD, trims and saves it to a .tsv file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', type=str,
dest='folderpath', help='specify path to folder with spotify-mpd json slices', required=True)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
read_data(args.folderpath)
| 32.694915
| 117
| 0.648004
|
006fe43e33f4db4cfe4f891db82071b43879bfe4
| 2,142
|
py
|
Python
|
toyota_na/vehicle/entity_types/ToyotaRemoteStart.py
|
MrReBot/toyota-na
|
4afd28660c1f964ce52d3c3bc5b985221b89534e
|
[
"MIT"
] | 8
|
2021-09-21T16:53:58.000Z
|
2022-01-19T02:01:24.000Z
|
toyota_na/vehicle/entity_types/ToyotaRemoteStart.py
|
MrReBot/toyota-na
|
4afd28660c1f964ce52d3c3bc5b985221b89534e
|
[
"MIT"
] | 7
|
2022-01-30T03:08:57.000Z
|
2022-03-21T19:04:48.000Z
|
toyota_na/vehicle/entity_types/ToyotaRemoteStart.py
|
MrReBot/toyota-na
|
4afd28660c1f964ce52d3c3bc5b985221b89534e
|
[
"MIT"
] | 3
|
2022-01-19T08:03:56.000Z
|
2022-01-22T03:47:29.000Z
|
from datetime import datetime, timedelta
from typing import Optional
import pytz
class ToyotaRemoteStart:
_on: bool
_start_time: Optional[datetime]
_timer: Optional[float]
def __init__(self, date: Optional[str], on: bool, timer: Optional[float]):
if date is not None:
self._start_time = datetime.strptime(
f"{date}", "%Y-%m-%dT%H:%M:%SZ"
).replace(tzinfo=pytz.UTC)
else:
self._start_time = None
self._on = on
self._timer = timer
@property
def end_time(self) -> Optional[datetime]:
if self._start_time is not None and self._timer is not None:
return self._start_time.__add__(timedelta(minutes=self._timer))
@property
def on(self) -> bool:
"""Returns whether or not the vehicle is remote started"""
return self._on
@property
def start_time(self) -> Optional[datetime]:
return self._start_time
@property
def time_left(self) -> Optional[float]:
"""Returns the time left in minutes if the vehicle is on"""
if self.end_time is not None and self._timer is not None:
# do it this way to get the convenience of a `timedelta` object for grabbing the seconds
return (
self.end_time.__sub__(
# bit of a hack to get the tz to match but it works
datetime.utcnow().replace(tzinfo=pytz.UTC)
).total_seconds()
/ 60
)
@property
def timer(self) -> Optional[float]:
"""Returns the total time the vehicle will run in minutes when remote started"""
return self._timer
@on.setter
def on(self, value: bool) -> None:
self._on = value
@start_time.setter
def start_time(self, value: Optional[datetime]):
self._start_time = value
@timer.setter
def timer(self, value: float):
self._timer = value
def __repr__(self) -> str:
return f"{self.__class__.__name__}(end_time={self.end_time}, on={self._on}, start_time={self.start_time}, time_left={self.time_left})"
| 31.5
| 142
| 0.609711
|
aeb7d83c5bd7c98070696fd4899386de6bcd1e1e
| 7,099
|
py
|
Python
|
dymos/transcriptions/common/timeseries_output_comp.py
|
Kenneth-T-Moore/dymos
|
0ae11aab9cb69ac9dd1d784616d1dfe35a6e5b11
|
[
"Apache-2.0"
] | null | null | null |
dymos/transcriptions/common/timeseries_output_comp.py
|
Kenneth-T-Moore/dymos
|
0ae11aab9cb69ac9dd1d784616d1dfe35a6e5b11
|
[
"Apache-2.0"
] | null | null | null |
dymos/transcriptions/common/timeseries_output_comp.py
|
Kenneth-T-Moore/dymos
|
0ae11aab9cb69ac9dd1d784616d1dfe35a6e5b11
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import openmdao.api as om
from scipy.linalg import block_diag
from dymos.transcriptions.grid_data import GridData
from dymos.utils.lagrange import lagrange_matrices
class TimeseriesOutputCompBase(om.ExplicitComponent):
"""
TimeseriesOutputComp collects variable values from the phase and provides them in chronological
order as outputs. Some phase types don't internally have access to a contiguous array of all
values of a given variable in the phase. For instance, the GaussLobatto pseudospectral has
separate arrays of variable values at discretization and collocation nodes. These values
need to be interleaved to provide a time series. Pseudospectral techniques provide timeseries
data at 'all' nodes, while ExplicitPhase provides values at the step boundaries.
"""
def initialize(self):
self._timeseries_outputs = []
self._vars = []
self.options.declare('input_grid_data',
types=GridData,
desc='Container object for grid on which inputs are provided.')
self.options.declare('output_grid_data',
types=GridData,
allow_none=True,
default=None,
desc='Container object for grid on which outputs are interpolated.')
self.options.declare('output_subset',
types=str,
default='all',
desc='Name of the node subset at which outputs are desired.')
def _add_timeseries_output(self, name, var_class, shape=(1,), units=None, desc='',
distributed=False):
"""
Add a final constraint to this component
Parameters
----------
name : str
name of the variable in this component's namespace.
var_class : str
The 'class' of the variable as given by phase.classify_var. One of 'time', 'state',
'indep_control', 'input_control', 'design_parameter', 'input_parameter',
'control_rate', 'control_rate2', or 'ode'.
shape : int or tuple or list or None
Shape of this variable, only required if val is not an array.
Default is None.
units : str or None
Units in which the output variables will be provided to the component during execution.
Default is None, which means it has no units.
desc : str
description of the timeseries output variable.
distributed : bool
If True, this variable is distributed across multiple processes.
"""
src_all = var_class in ['time', 'time_phase', 'indep_control', 'input_control',
'control_rate', 'control_rate2', 'indep_polynomial_control',
'input_polynomial_control', 'polynomial_control_rate',
'polynomial_control_rate2', 'design_parameter', 'input_parameter']
kwargs = {'shape': shape, 'units': units, 'desc': desc, 'src_all': src_all,
'distributed': distributed}
self._timeseries_outputs.append((name, kwargs))
class PseudospectralTimeseriesOutputComp(TimeseriesOutputCompBase):
def setup(self):
"""
Define the independent variables as output variables.
"""
igd = self.options['input_grid_data']
ogd = self.options['output_grid_data']
output_subset = self.options['output_subset']
if ogd is None:
ogd = igd
input_num_nodes = igd.num_nodes
output_num_nodes = ogd.subset_num_nodes[output_subset]
# Build the interpolation matrix which maps from the input grid to the output grid.
# Rather than a single phase-wide interpolating polynomial, map each segment.
# To do this, find the nodes in the output grid which fall in each segment of the input
# grid. Then build a Lagrange interpolating polynomial for that segment
L_blocks = []
output_nodes_ptau = ogd.node_ptau[ogd.subset_node_indices[output_subset]].tolist()
for iseg in range(igd.num_segments):
i1, i2 = igd.segment_indices[iseg]
iptau_segi = igd.node_ptau[i1:i2]
istau_segi = igd.node_stau[i1:i2]
# The indices of the output grid that fall within this segment of the input grid
if ogd is igd:
optau_segi = iptau_segi
else:
ptau_hi = igd.segment_ends[iseg+1]
if iseg < igd.num_segments - 1:
idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]
else:
idxs_in_iseg = np.arange(len(output_nodes_ptau))
optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]
# Remove the captured nodes so we don't accidentally include them again
output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]
# # Now get the output nodes which fall in iseg in iseg's segment tau space.
ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1
# Create the interpolation matrix and add it to the blocks
L, _ = lagrange_matrices(istau_segi, ostau_segi)
L_blocks.append(L)
self.interpolation_matrix = block_diag(*L_blocks)
for (name, kwargs) in self._timeseries_outputs:
input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}
input_name = 'input_values:{0}'.format(name)
shape = kwargs['shape']
self.add_input(input_name,
shape=(input_num_nodes,) + shape,
**input_kwargs)
output_name = name
output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}
output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']
self.add_output(output_name, **output_kwargs)
self._vars.append((input_name, output_name, shape))
size = np.prod(shape)
val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))
for i in range(size):
val_jac[:, i, :, i] = self.interpolation_matrix
val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),
order='C')
val_jac_rows, val_jac_cols = np.where(val_jac != 0)
rs, cs = val_jac_rows, val_jac_cols
self.declare_partials(of=output_name,
wrt=input_name,
rows=rs, cols=cs, val=val_jac[rs, cs])
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
for (input_name, output_name, _) in self._vars:
outputs[output_name] = np.tensordot(self.interpolation_matrix, inputs[input_name],
axes=(1, 0))
| 44.647799
| 99
| 0.599239
|
c42f51519e649b5edeffc6cc4ceac5299b064c4c
| 2,238
|
py
|
Python
|
Python/Sonstige_Uebungen/create_and_parse_ip_adresses.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | null | null | null |
Python/Sonstige_Uebungen/create_and_parse_ip_adresses.py
|
Apop85/Scripts
|
e71e1c18539e67543e3509c424c7f2d6528da654
|
[
"MIT"
] | 6
|
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Sonstige_Uebungen/create_and_parse_ip_adresses.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
####
# File: create_and_parse_ip_adresses.py
# Project: Sonstige_Uebungen
#-----
# Created Date: Saturday 16.01.2021, 16:54
# Author: Apop85
#-----
# Last Modified: Saturday 16.01.2021, 19:10
#-----
# Copyright (c) 2021 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
#-----
# Description: Parsing string numbers to ip-addresses
####
def generateIp(startValue = 0):
# Function to generate IPs out of integers
for i in range(startValue, 255**4):
o1 = i // 2 ** 24 % 256
o2 = i // 2 ** 16 % 256
o3 = i // 2 ** 8 % 256
o4 = i % 256 % 256
yield f"{o1}.{o2}.{o3}.{o4}"
def parseIp(ip, ips=[]):
# Recursive function to generate valid IPs from strings with no pointations
# Input:
# ip = string | example: 1921681125
# Output
# returnArray = Array | example: 192.168.112.5, 192.168.11.25, 192.168.1.125, 192.16.81.125, 19.216.81.125
returnArray = []
if len(ips) == 4:
if ip != "":
# Return empty array if there are numbers left
return []
# return IP-Address
return [".".join(ips)]
if not ip:
# return empty array if ip is not set
return []
# Check the first 3 values and check if the value is between 100 and 2^8
if len(ip) > 2 and 99 < int(ip[:3]) < 256:
# Analyze reduced ip
returnArray += parseIp(ip[3:], ips + [ip[:3]])
# Check the first 2 values and check if the value is between 9 and 100
if len(ip) > 1 and int(ip[:2]) > 9:
# Analyze reduced ip
returnArray += parseIp(ip[2:], ips + [ip[:2]])
# Analyze reduced ip
returnArray += parseIp(ip[1:], ips + [ip[0]])
return returnArray
startValue = 3232235776 # 3232235776 equals to 192.168.1.0
ipAmount = 5
ipAddrGenerator = generateIp(startValue)
for i in range(ipAmount):
ip = next(ipAddrGenerator)
stringToParse = ip.split(".")
stringToParse = "".join(ip.split("."))
parsed = parseIp(stringToParse)
print("█" * (len(parsed[0]) + 4))
for ip in parsed:
print(f"█ {ip} █")
print("█" * (len(parsed[0]) + 4))
| 29.447368
| 112
| 0.592046
|
5ccb6aa7133da67aa99b774d5129cd85b05853af
| 2,319
|
py
|
Python
|
tests/test_util.py
|
AFCYBER-DREAM/piedpiper-gman
|
fb4c36b0bdf17161c52e5b37c3216139fe40a99a
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
AFCYBER-DREAM/piedpiper-gman
|
fb4c36b0bdf17161c52e5b37c3216139fe40a99a
|
[
"MIT"
] | 3
|
2019-05-23T02:04:47.000Z
|
2019-06-10T20:30:21.000Z
|
tests/test_util.py
|
AFCYBER-DREAM/piperci-gman
|
fb4c36b0bdf17161c52e5b37c3216139fe40a99a
|
[
"MIT"
] | 3
|
2019-06-04T19:29:49.000Z
|
2019-06-24T14:21:14.000Z
|
import uuid
import json
import pytest
from piperci import sri
from piperci_gman.util import (GManJSONEncoder, Api)
from piperci_gman.gman import GMan
from piperci_gman.artman import ArtMan
from pytest import raises
def test_jsonencoder_uuid():
json.dumps({'uuid': uuid.uuid4()}, cls=GManJSONEncoder)
def test_jsonencoder_default_encoder():
j = json.dumps({'uuid': 'stringxyz'}, cls=GManJSONEncoder)
assert json.loads(j)['uuid'] == 'stringxyz'
def test_jsonencoder_hash():
j = json.dumps(
{'uuid':
sri.sri_to_hash('sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=')},
cls=GManJSONEncoder)
assert 'sha256-sCDaaxdshXhK4sA/v4dMHiMWhtGyQwA1fP8PgrN0O5g=' in j
def test_jsonencoder_not_uuid():
class SomethingNotSupported(object):
propx = None
with raises(TypeError):
json.dumps({'notsupported': SomethingNotSupported()}, cls=GManJSONEncoder)
def test_handle_error_404(client):
resp = client.get('/artifact/sri/mal-formedsri')
assert resp.status_code == 404
def test_handle_error_500(monkeypatch, client):
class DumbyError(Exception):
def __init__(self, *args, **kwargs):
self.code = 500
err = Api.handle_error
def error_wrapper(self, e):
return err(self, DumbyError())
monkeypatch.setattr(Api, 'handle_error', error_wrapper)
assert client.get('/artifact/sri/mal-formedsri').status_code == 500
hashes = [
sri.sri_to_hash('sha256-vFatceyWaE9Aks3N9ouRUtba1mwrIHdEVLti88atIvc='),
'sha256-vFatceyWaE9Aks3N9ouRUtba1mwrIHdEVLti88atIvc='
]
@pytest.mark.parametrize('hash', hashes)
def test_to_url_sri(client, api, hash):
url = api.url_for(ArtMan, sri=hash)
assert 'c2hhMjU2LXZGYXRjZXlXYUU5QWtzM045b3VSVXRiYTFtd3JJSGRFVkx0aTg4YXRJdmM9' in url
def test_to_url_sri_error(client, api):
url = api.url_for(ArtMan, sri={'asdfasdfsad'})
assert 'sri=' in url
def test_unhandled_error(client, api, monkeypatch):
h_errors = Api.handle_error
def handle_error_stub(self, e):
e = ValueError('testing a non .status_code error that is not TypeError')
return h_errors(self, e)
monkeypatch.setattr('piperci_gman.util.Api.handle_error', handle_error_stub)
resp = client.put(api.url_for(GMan))
assert resp.status_code == 500
| 24.15625
| 88
| 0.721432
|
bd1cd41bfcc4a0f457fefb0da410f63d438a15bb
| 13,851
|
py
|
Python
|
PySide2/QtQml.py
|
arjun-namdeo/py_stubs
|
605bb167e239978f5417f3f1fc1f5c12e2a243cc
|
[
"MIT"
] | null | null | null |
PySide2/QtQml.py
|
arjun-namdeo/py_stubs
|
605bb167e239978f5417f3f1fc1f5c12e2a243cc
|
[
"MIT"
] | null | null | null |
PySide2/QtQml.py
|
arjun-namdeo/py_stubs
|
605bb167e239978f5417f3f1fc1f5c12e2a243cc
|
[
"MIT"
] | null | null | null |
from PySide2.QtCore import QObject as _QObject
class _Property(object):
def __call__(*args, **kwargs):
"""
x.__call__(...) <==> x(...)
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def getter(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def setter(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
__new__ = None
class _Object(object):
__dict__ = None
class QQmlListReference(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def append(*args, **kwargs):
pass
def at(*args, **kwargs):
pass
def canAppend(*args, **kwargs):
pass
def canAt(*args, **kwargs):
pass
def canClear(*args, **kwargs):
pass
def canCount(*args, **kwargs):
pass
def clear(*args, **kwargs):
pass
def count(*args, **kwargs):
pass
def isManipulable(*args, **kwargs):
pass
def isReadable(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def listElementType(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
__new__ = None
class QJSEngine(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def collectGarbage(*args, **kwargs):
pass
def evaluate(*args, **kwargs):
pass
def globalObject(*args, **kwargs):
pass
def installTranslatorFunctions(*args, **kwargs):
pass
def newArray(*args, **kwargs):
pass
def newObject(*args, **kwargs):
pass
def newQObject(*args, **kwargs):
pass
def toScriptValue(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QQmlFileSelector(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def setExtraSelectors(*args, **kwargs):
pass
def get(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QJSValue(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __nonzero__(*args, **kwargs):
"""
x.__nonzero__() <==> x != 0
"""
pass
def call(*args, **kwargs):
pass
def callAsConstructor(*args, **kwargs):
pass
def callWithInstance(*args, **kwargs):
pass
def deleteProperty(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def equals(*args, **kwargs):
pass
def hasOwnProperty(*args, **kwargs):
pass
def hasProperty(*args, **kwargs):
pass
def isArray(*args, **kwargs):
pass
def isBool(*args, **kwargs):
pass
def isCallable(*args, **kwargs):
pass
def isDate(*args, **kwargs):
pass
def isError(*args, **kwargs):
pass
def isNull(*args, **kwargs):
pass
def isNumber(*args, **kwargs):
pass
def isObject(*args, **kwargs):
pass
def isQObject(*args, **kwargs):
pass
def isRegExp(*args, **kwargs):
pass
def isString(*args, **kwargs):
pass
def isUndefined(*args, **kwargs):
pass
def isVariant(*args, **kwargs):
pass
def property(*args, **kwargs):
pass
def prototype(*args, **kwargs):
pass
def setProperty(*args, **kwargs):
pass
def setPrototype(*args, **kwargs):
pass
def strictlyEquals(*args, **kwargs):
pass
def toBool(*args, **kwargs):
pass
def toDateTime(*args, **kwargs):
pass
def toInt(*args, **kwargs):
pass
def toNumber(*args, **kwargs):
pass
def toQObject(*args, **kwargs):
pass
def toString(*args, **kwargs):
pass
def toUInt(*args, **kwargs):
pass
def toVariant(*args, **kwargs):
pass
NullValue = None
SpecialValue = None
UndefinedValue = None
__new__ = None
class QQmlExpression(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def clearError(*args, **kwargs):
pass
def columnNumber(*args, **kwargs):
pass
def context(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def error(*args, **kwargs):
pass
def evaluate(*args, **kwargs):
pass
def expression(*args, **kwargs):
pass
def hasError(*args, **kwargs):
pass
def lineNumber(*args, **kwargs):
pass
def notifyOnValueChanged(*args, **kwargs):
pass
def scopeObject(*args, **kwargs):
pass
def setExpression(*args, **kwargs):
pass
def setNotifyOnValueChanged(*args, **kwargs):
pass
def setSourceLocation(*args, **kwargs):
pass
def sourceFile(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
valueChanged = None
class ListProperty(_Property):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
class QQmlProperty(_Object):
def __copy__(*args, **kwargs):
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getattribute__(*args, **kwargs):
"""
x.__getattribute__('name') <==> x.name
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def connectNotifySignal(*args, **kwargs):
pass
def hasNotifySignal(*args, **kwargs):
pass
def index(*args, **kwargs):
pass
def isDesignable(*args, **kwargs):
pass
def isProperty(*args, **kwargs):
pass
def isResettable(*args, **kwargs):
pass
def isSignalProperty(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def isWritable(*args, **kwargs):
pass
def method(*args, **kwargs):
pass
def name(*args, **kwargs):
pass
def needsNotifySignal(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
def property(*args, **kwargs):
pass
def propertyType(*args, **kwargs):
pass
def propertyTypeCategory(*args, **kwargs):
pass
def propertyTypeName(*args, **kwargs):
pass
def reset(*args, **kwargs):
pass
def type(*args, **kwargs):
pass
def read(*args, **kwargs):
pass
def write(*args, **kwargs):
pass
Invalid = None
InvalidCategory = None
List = None
Normal = None
Object = None
Property = None
PropertyTypeCategory = None
SignalProperty = None
Type = None
__new__ = None
class QQmlError(_Object):
def __copy__(*args, **kwargs):
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def column(*args, **kwargs):
pass
def description(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def line(*args, **kwargs):
pass
def object(*args, **kwargs):
pass
def setColumn(*args, **kwargs):
pass
def setDescription(*args, **kwargs):
pass
def setLine(*args, **kwargs):
pass
def setObject(*args, **kwargs):
pass
def setUrl(*args, **kwargs):
pass
def toString(*args, **kwargs):
pass
def url(*args, **kwargs):
pass
__new__ = None
class QQmlContext(_QObject):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def baseUrl(*args, **kwargs):
pass
def contextObject(*args, **kwargs):
pass
def contextProperty(*args, **kwargs):
pass
def engine(*args, **kwargs):
pass
def isValid(*args, **kwargs):
pass
def nameForObject(*args, **kwargs):
pass
def parentContext(*args, **kwargs):
pass
def resolvedUrl(*args, **kwargs):
pass
def setBaseUrl(*args, **kwargs):
pass
def setContextObject(*args, **kwargs):
pass
def setContextProperty(*args, **kwargs):
pass
__new__ = None
staticMetaObject = None
class QQmlEngine(QJSEngine):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def addImportPath(*args, **kwargs):
pass
def addNamedBundle(*args, **kwargs):
pass
def addPluginPath(*args, **kwargs):
pass
def baseUrl(*args, **kwargs):
pass
def clearComponentCache(*args, **kwargs):
pass
def event(*args, **kwargs):
pass
def importPathList(*args, **kwargs):
pass
def importPlugin(*args, **kwargs):
pass
def networkAccessManager(*args, **kwargs):
pass
def offlineStoragePath(*args, **kwargs):
pass
def outputWarningsToStandardError(*args, **kwargs):
pass
def pluginPathList(*args, **kwargs):
pass
def removeImageProvider(*args, **kwargs):
pass
def rootContext(*args, **kwargs):
pass
def setBaseUrl(*args, **kwargs):
pass
def setImportPathList(*args, **kwargs):
pass
def setOfflineStoragePath(*args, **kwargs):
pass
def setOutputWarningsToStandardError(*args, **kwargs):
pass
def setPluginPathList(*args, **kwargs):
pass
def trimComponentCache(*args, **kwargs):
pass
def contextForObject(*args, **kwargs):
pass
def objectOwnership(*args, **kwargs):
pass
def setContextForObject(*args, **kwargs):
pass
def setObjectOwnership(*args, **kwargs):
pass
CppOwnership = None
JavaScriptOwnership = None
ObjectOwnership = None
__new__ = None
quit = None
staticMetaObject = None
warnings = None
class QQmlApplicationEngine(QQmlEngine):
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def load(*args, **kwargs):
pass
def loadData(*args, **kwargs):
pass
def rootObjects(*args, **kwargs):
pass
__new__ = None
objectCreated = None
staticMetaObject = None
def qmlRegisterType(*args, **kwargs):
pass
QML_HAS_ATTACHED_PROPERTIES = 1
| 15.121179
| 70
| 0.456646
|
b65c93fa80afd8062496ebb369c4c5ad1020162f
| 405
|
py
|
Python
|
pygram/filters/sepia.py
|
achillesrasquinha/PyGram
|
2ca49c8ac3287cb0b722e1086f0c55418e197a28
|
[
"MIT"
] | 18
|
2017-02-02T05:33:20.000Z
|
2022-02-11T05:31:44.000Z
|
pygram/filters/sepia.py
|
Rutvikbk/PyGram
|
2ca49c8ac3287cb0b722e1086f0c55418e197a28
|
[
"MIT"
] | null | null | null |
pygram/filters/sepia.py
|
Rutvikbk/PyGram
|
2ca49c8ac3287cb0b722e1086f0c55418e197a28
|
[
"MIT"
] | 11
|
2017-02-03T09:39:27.000Z
|
2018-03-26T18:59:37.000Z
|
import numpy as np
from PIL import Image
def sepia(image):
arr = np.copy(image)
r,g,b = arr[:,:,0], arr[:,:,1], arr[:,:,2]
arr[:,:,0] = np.minimum(r * 0.393 + g * 0.769 + b * 0.189, 255)
arr[:,:,1] = np.minimum(r * 0.349 + g * 0.686 + b * 0.168, 255)
arr[:,:,2] = np.minimum(r * 0.272 + g * 0.534 + b * 0.131, 255)
copy = Image.fromarray(arr)
return copy
| 27
| 67
| 0.491358
|
894a701c0cd644b5e3e237e8bca282f76a92312b
| 153
|
py
|
Python
|
pyjac/__main__.py
|
stgeke/pyJac-v2
|
c2716a05df432efd8e5f6cc5cc3d46b72c24c019
|
[
"MIT"
] | 9
|
2018-10-08T07:49:20.000Z
|
2021-06-26T15:28:30.000Z
|
pyjac/__main__.py
|
stgeke/pyJac-v2
|
c2716a05df432efd8e5f6cc5cc3d46b72c24c019
|
[
"MIT"
] | 13
|
2018-09-01T14:17:51.000Z
|
2021-07-30T16:19:33.000Z
|
pyjac/__main__.py
|
arghdos/spyJac
|
bdd6940c27681e3b19ee41efb31abd20a89d1be8
|
[
"MIT"
] | 7
|
2018-09-08T11:57:34.000Z
|
2022-02-14T13:57:20.000Z
|
import sys
from pyjac import utils
def main(args=None):
if args is None:
utils.create()
if __name__ == '__main__':
sys.exit(main())
| 11.769231
| 26
| 0.627451
|
9dbf3c44398528f585cb44329e01eb1173640b72
| 8,881
|
py
|
Python
|
office/outlook.py
|
ktr/office
|
27b502aae6875aca57a41adc8b3f63236c3cb2e0
|
[
"MIT"
] | null | null | null |
office/outlook.py
|
ktr/office
|
27b502aae6875aca57a41adc8b3f63236c3cb2e0
|
[
"MIT"
] | null | null | null |
office/outlook.py
|
ktr/office
|
27b502aae6875aca57a41adc8b3f63236c3cb2e0
|
[
"MIT"
] | null | null | null |
"""
outlook.py - utilities to help with Microsoft Outlook (TM)
Example:
outlook = Outlook()
tbl = outlook.list_to_tbl([[1, 2, 3], [4, 5, 6]])
with open(r'H:\test1.png', 'rb') as io:
img = outlook.inline_img(io)
outlook.create_mail('test@example.com', 'Hello!', f'Hello!<br><br>{tbl}<br><br>{img}<br><br>Goodbye!', show=True)
"""
import base64
import datetime
import logging
import pytz
from win32com.client import Dispatch
import win32com.client
class Outlook:
def __init__(self):
self.ns = self.outlook = self.inbox = None
def _connect(self):
# https://docs.microsoft.com/en-us/office/vba/api/outlook.oldefaultfolders
if not self.outlook:
self.outlook = Dispatch("Outlook.Application")
self.ns = self.outlook.GetNamespace("MAPI")
self.calendar = self.ns.GetDefaultFolder(9)
self.inbox = self.ns.GetDefaultFolder(6)
def create_mail(self, to: str, subject: str, body: str, cc: str='', attachments: list=[], show: bool=False, send: bool=False):
self._connect()
msg = self.outlook.CreateItem(0x0)
msg.Subject = subject
for path in attachments:
msg.Attachments.Add(path)
msg.To = to
msg.CC = cc
msg.HTMLBody = body
if show:
msg.Display()
if send:
msg.Send()
def inline_img(self, io) -> str:
"""
Returns html snippet to embed an image in an email.
`io` should be an open file (e.g., open(<path>, 'rb')).
"""
encoded_image = base64.b64encode(io.read()).decode("utf-8")
return '<img src="data:image/png;base64,%s"/>' % encoded_image
def tbl_style(self, styles={}):
styles.setdefault('header-bg', '#1F77B4')
styles.setdefault('header-fg', '#FFFFFF')
styles.setdefault('th-border-color', '#222222')
styles.setdefault('td-border-color', '#222222')
return '''\
<style type="text/css">
table {
border-collapse:collapse;
border-spacing:0;
}
table td {
font-family:Arial, sans-serif;
font-size:14px;
padding:5px 10px;
border-style:solid;
border-width:1px;
overflow:hidden;
word-break:normal;
border-color: %(td-border-color)s;
text-align: right;
width: 100px;
}
table th {
font-family:Arial, Helvetica, sans-serif !important;
font-size:14px;
font-weight:bold;
padding:5px 10px;
border-style:solid;
border-width:1px;
overflow:hidden;
word-break:normal;
background-color:%(header-bg)s;
color:%(header-fg)s;
vertical-align:top;
border-color: %(th-border-color)s;
width: 100px;
}
</style>''' % styles
def list_to_tbl(self, lst: list, first_is_hdr: bool=True) -> str:
"""
Returns html table of `lst`.
"""
head = '<table class="tg">\n'
rowg = lambda row, mk='td': '<tr>\n' + '\n'.join([f'<{mk}>{_}</{mk}>' for _ in row]) + '\n</tr>'
row1 = rowg(lst[0], 'th') + '\n'
rows = row1 + '\n'.join([rowg(_) for _ in lst[1:]])
foot = '\n</table>'
return self.tbl_style() + head + rows + foot
def find_open_slots(self, appts, duration=None):
# appts should be list of start/end times
start = appts[0][0]
end = appts[-1][1]
# find open slots between 9am–6pm
utc = pytz.UTC
hours = (utc.localize(datetime.datetime(start.year, start.month, start.day, 9)),
utc.localize(datetime.datetime(end.year, end.month, end.day, 18)))
if duration is None:
duration = datetime.timedelta(minutes=30)
slots = sorted([(hours[0], hours[0])] + appts + [(hours[1], hours[1])])
open_slots = []
for start, end in ((slots[i][1], slots[i+1][0]) for i in range(len(slots)-1)):
while start + duration <= end:
open_slots.append([start, start + duration])
start += duration
this = open_slots[0]
print(f'\n{this[0]:%Y-%m-%d}')
for slot in open_slots[1:]:
# only offer up times after default start time
if slot[0].hour < hours[0].hour or slot[0].hour > hours[1].hour:
continue
# or before the default end time
if slot[1].hour < hours[0].hour or slot[1].hour > hours[1].hour:
continue
# otherwise, check if we should combine consecutive time sequences
if slot[0] <= this[1]:
this[1] = slot[1]
# if not, print them out and move on to the next one
else:
print(f' {this[0]:%I:%M %p} to {this[1]:%I:%M %p}')
if this[0].day != slot[0].day:
print(f'\n{this[0]:%Y-%m-%d}')
this = slot
print(f' {this[0]:%I:%M %p} to {this[1]:%I:%M %p}')
def appointments(self, begin=None, end=None):
self._connect()
if begin is None:
begin = datetime.date.today() + datetime.timedelta(days=1) # tomorrow
if end is None:
end = begin + datetime.timedelta(days=2) # duration of 1 day
# http://msdn.microsoft.com/en-us/library/office/aa210899(v=office.11).aspx
appts = self.calendar.Items
appts.IncludeRecurrences = "True"
# Need the following call to 'Sort', otherwise will include all
# recurrences (whether they are in the list or not!)
appts.Sort("[Start]")
where = f"[Start] >= '{begin.strftime('%m/%d/%Y')}' AND [End] <= '{end.strftime('%m/%d/%Y')}'"
return [_ for _ in appts.Restrict(where)]
def show_appts(self, begin=None, end=None):
self._connect()
if begin is None:
begin = datetime.date.today() + datetime.timedelta(days=1) # tomorrow
if end is None:
end = begin + datetime.timedelta(days=2) # duration of 1 day
# http://msdn.microsoft.com/en-us/library/office/aa210899(v=office.11).aspx
appts = self.calendar.Items
appts.IncludeRecurrences = "True"
# Need the following call to 'Sort', otherwise will include all
# recurrences (whether they are in the list or not!)
appts.Sort("[Start]")
where = f"[Start] >= '{begin.strftime('%m/%d/%Y')}' AND [End] <= '{end.strftime('%m/%d/%Y')}'"
msg = "{1:%H:%M}–{2:%H:%M}, {0} (Organizer: {3})"
appt_lst = []
for item in appts.Restrict(where):
print(msg.format(item.Subject, item.Start, item.End, item.Organizer))
appt_lst.append((item.Start, item.End,))
print("\nOpen Slots:")
self.find_open_slots(appt_lst)
def messages(self):
"""
Return all messages from the inbox sorted by ReceivedTime (descending).
"""
self._connect()
inbox = self.ns.GetDefaultFolder(6)
messages = inbox.Items
messages.Sort("ReceivedTime", True)
message = messages.GetFirst()
while message:
yield message
message = messages.GetNext()
def filter_messages(self, after=None, before=None, subject=None, done=None):
"""
Return pipeline reports in Outlook sent after `after` and before `before`.
If `after` is None, return any pipeline report since the last one was
downloaded (according to `last_time_downloaded`). If `before` is None (the
default), any win report after `after` is extracted.
subject, if provided, should be a function taking a message subject as
its only argument and returning True if you want to keep the message,
False otherwise.
done, if provided, should be a function that takes a message object and
returns True if you want to stop iterating over messages or False if
you want to keep going. If not provided, this function will go through
every message in your inbox.
"""
has_recd_time = lambda msg: hasattr(msg, 'ReceivedTime')
always_true = lambda _: True
before = before or always_true
after = after or always_true
subject = subject or always_true
done = done or always_true
for msg in self.messages():
if has_recd_time(msg) and before(msg.ReceivedTime) and after(msg.ReceivedTime) and subject(msg.Subject):
yield msg
if done(msg):
break
if __name__ == "__main__":
outlook = Outlook()
create_html_sample = 0
show_appts = 1
if create_html_sample:
tbl = outlook.list_to_tbl([[1, 2, 3], [4, 5, 6]])
with open(r'C:\test1.png', 'rb') as io:
img = outlook.inline_img(io)
outlook.create_mail('test@example.com', 'Hello!', f'Hello!<br><br>{tbl}<br><br>{img}<br><br>Goodbye!', show=True)
if show_appts:
outlook.show_appts()
| 36.397541
| 130
| 0.581691
|
08008ca212f21a36d762e9da25b5d6dff0bff6ee
| 1,631
|
py
|
Python
|
resources/rides.py
|
mikenthiwa/Ride-My-Way
|
2ad702903f971e0cd474d9e7339174bbe37c47d8
|
[
"MIT"
] | null | null | null |
resources/rides.py
|
mikenthiwa/Ride-My-Way
|
2ad702903f971e0cd474d9e7339174bbe37c47d8
|
[
"MIT"
] | 2
|
2018-06-12T15:29:44.000Z
|
2018-07-04T09:59:12.000Z
|
resources/rides.py
|
mikenthiwa/Ride-My-Way
|
2ad702903f971e0cd474d9e7339174bbe37c47d8
|
[
"MIT"
] | 4
|
2018-06-24T19:05:33.000Z
|
2018-10-17T15:05:43.000Z
|
from flask_restplus import Resource, Namespace, reqparse, fields
from app.models import Rides
from resources.auth import token_required
rides = Rides()
api = Namespace("Rides", description="Passenger related operations")
request_model = api.model('Request Model', {'username': fields.String,
"pickup_point": fields.String,
"time": fields.String})
class RideList(Resource):
"""Contain GET methods"""
@api.doc(security='apikey')
@token_required
def get(self):
"""Get all rides endpoint"""
response = rides.get_rides()
return response
class Ride(Resource):
"""Contains GET method"""
@api.doc(security='apikey')
@token_required
def get(self, ride_id):
"""get a ride(passenger)"""
response = rides.get_ride(ride_id=ride_id)
return response
class RequestRide(Resource):
"""Contain PATCH method"""
@api.expect(request_model)
@api.doc(security='apikey')
@token_required
def post(self, ride_id):
"""Request ride"""
parser = reqparse.RequestParser()
parser.add_argument('pickup_point', required=True, type=str, help='Pickup_point is required', location=['json'])
args = parser.parse_args()
pickup_point = args['pickup_point']
res = rides.request_ride(ride_id=ride_id, pickup_point=pickup_point)
return res
api.add_resource(RideList, '/rides', endpoint='ridelist')
api.add_resource(Ride, '/rides/<int:ride_id>')
api.add_resource(RequestRide, '/users/rides/<int:ride_id>/request')
| 28.12069
| 120
| 0.642551
|
52a74e260824f9aea1b54ce683427d3a0b3ec4ed
| 7,709
|
py
|
Python
|
app/users/views.py
|
jingshu-fk/FXTest_copy
|
150012f87021b6b8204fd342c62538c10d8dfa85
|
[
"MIT"
] | null | null | null |
app/users/views.py
|
jingshu-fk/FXTest_copy
|
150012f87021b6b8204fd342c62538c10d8dfa85
|
[
"MIT"
] | 2
|
2021-03-26T00:24:28.000Z
|
2022-03-22T22:06:39.000Z
|
app/users/views.py
|
jingshu-fk/FXTest_copy
|
150012f87021b6b8204fd342c62538c10d8dfa85
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author : lileilei
# @File : views.py
# @Time : 2017/12/7 12:25
from flask import Blueprint
user = Blueprint('user', __name__)
from flask import redirect, request, \
session, url_for, flash, jsonify
from app.models import *
from flask.views import View, MethodView
from common.decorators import chckuserpermisson
from flask_login import login_required
from config import OneAdminCount
from error_message import *
class SetadView(View): # 设置管理员
methods = ['GET', "POST"]
@login_required
def dispatch_request(self):
if chckuserpermisson() == False:
return jsonify({'code': 13, 'msg': permiss_is_ness, 'data': ''})
projec = request.get_json()
try:
username = projec['username']
por = projec['url']
if por == '':
return jsonify({'code': 14, 'msg': '请选择项目', 'data': ''})
pan_user = User.query.filter_by(username=username).first()
if not pan_user:
return jsonify({'code': 15, 'msg': login_user_not_exict_message, 'data': ''})
if pan_user.is_sper is True:
return jsonify({'code': 16, 'msg': '超级管理员不用设置项目', 'data': ''})
pand_por = Project.query.filter_by(project_name=por).first()
if not pand_por:
return jsonify({'code': 17, 'msg': '设置的项目不存在', 'data': ''})
pro_per = Quanxian.query.filter_by(project=pand_por.id).all()
oneadmin = []
for i in pro_per:
if i.rose == 2:
oneadmin.append(i.user.all())
if [pan_user] in oneadmin:
return jsonify({'code': 18, 'msg': '你已经是项目管理员了,不需要再次设置'})
if (len(oneadmin)) > OneAdminCount:
return jsonify({'code': 19, 'msg': '单个项目的管理员已经达到后台设置的个数限制'})
for roses in pan_user.quanxians:
if roses.project == pand_por.id:
roses.rose = 2
try:
db.session.commit()
return jsonify({'code': 200, 'msg': '设置管理成功'})
except:
db.session.rollback()
return jsonify({'code': 20, 'msg': '设置管理失败', 'data': ''})
except Exception as e:
return jsonify({'code': 21, 'msg': '设置过程目前存在异常,原因是:%s' % e, 'data': ''})
class DeladView(View): # 取消管理员
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() is False:
flash(permiss_is_ness)
return redirect(request.headers.get('Referer'))
new_ad = User.query.filter_by(id=id, status=False).first()
if not new_ad:
flash(login_user_not_exict_message)
return redirect(url_for('home.adminuser'))
if new_ad == user:
flash(admin_cannot_use)
return redirect(url_for('home.adminuser'))
return redirect(url_for('home.adminuser'))
class FreadView(View): # 冻结
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() == False:
flash(permiss_is_ness)
return redirect(request.headers.get('Referer'))
user = User.query.filter_by(username=session.get('username')).first()
if user.is_sper != 1:
flash(permiss_is_ness)
return redirect(request.headers.get('Referer'))
new_ad = User.query.filter_by(id=id).first()
if new_ad.status == True:
flash(free_is_again)
return redirect(url_for('home.adminuser'))
if new_ad == user:
flash(ower_cannot_free_me)
return redirect(url_for('home.adminuser'))
new_ad.status = True
try:
db.session.commit()
flash(free_is_success)
return redirect(url_for('home.adminuser'))
except Exception as e:
db.session.rollback()
flash(free_user_error)
return redirect(url_for('home.adminuser'))
class FrereView(View): # 解冻
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() == False:
flash(permiss_is_ness)
return redirect(request.headers.get('Referer'))
user = User.query.filter_by(username=session.get('username')).first()
new_ad = User.query.filter_by(id=id).first()
if new_ad.status == False:
flash(user_is_not_free)
return redirect(url_for('home.adminuser'))
if new_ad != user:
new_ad.status = False
try:
db.session.commit()
flash(user_is_un_free)
return redirect(url_for('home.adminuser'))
except Exception as e:
db.session.rollback()
flash(user_is_unfree_success)
return redirect(url_for('home.adminuser'))
flash(ower_not_free_me)
return redirect(url_for('home.adminuser'))
class Acivauserview(View):
methods = ['GET', "POST"]
@login_required
def dispatch_request(self):
if chckuserpermisson() == False:
return jsonify({'code': 13, 'msg': permiss_is_ness, 'data': ''})
userjobnum = request.get_json()
try:
id = int(userjobnum['id'])
job_num = int(userjobnum['jobnum'])
except Exception as e:
return jsonify({'code': 13, 'msg': activ_is_int})
user = User.query.filter_by(id=id, status=False).first()
if not user:
return jsonify({'code': 13, 'msg': login_user_not_exict_message})
try:
user_job = User.query.filter_by(jobnum=job_num).first()
if user_job:
return jsonify({'code': 13, 'msg': activi_user_jobnum})
except Exception as e:
pass
if (user.jobnum == None or user.jobnum == "None"):
user.jobnum = job_num
db.session.add(user)
db.session.commit()
return jsonify({'code': 20, 'msg': '激活成功', 'data': ''})
return jsonify({'code': 13, 'msg': '激活失败', 'data': activi_user_jobnum_is})
class RedpassView(View): # 重置密码
methods = ['GET', "POST"]
@login_required
def dispatch_request(self, id):
if chckuserpermisson() is False:
flash(permiss_is_ness)
return redirect(request.headers.get('Referer'))
user = User.query.filter_by(username=session.get('username')).first()
new_ad = User.query.filter_by(id=id).first()
if new_ad != user:
if user.is_sper == 1:
new_ad.set_password('111111')
try:
db.session.commit()
flash(reset_success_message)
return redirect(url_for('home.adminuser'))
except Exception as e:
db.session.rollback()
flash(user_reset_error)
return redirect(url_for('home.adminuser'))
flash(user_reset_isnot_amin)
return redirect(url_for('home.adminuser'))
flash(user_reset_owner)
return redirect(url_for('home.adminuser'))
class ChangePassword(MethodView):
@login_required
def post(self):
password = request.data.decode('utf-8')
user = User.query.filter_by(username=session.get('username')).first()
user.set_password(password)
try:
db.session.commit()
return jsonify({'code': 1, 'data': change_password_success})
except Exception as e:
db.session.rollback()
return jsonify({'code': 2, 'data': change_password_error})
| 37.604878
| 93
| 0.569983
|
f25d7fca5b5fdbeba8c9fb4be7f65eea5ff9cab5
| 154
|
py
|
Python
|
scrapers/AGY-isle-of-anglesey/councillors.py
|
DemocracyClub/LGSF
|
21c2a049db08575e03db2fb63a8bccc8de0c636b
|
[
"MIT"
] | 4
|
2018-10-17T13:30:08.000Z
|
2021-06-22T13:29:43.000Z
|
scrapers/AGY-isle-of-anglesey/councillors.py
|
DemocracyClub/LGSF
|
21c2a049db08575e03db2fb63a8bccc8de0c636b
|
[
"MIT"
] | 46
|
2018-10-15T13:47:48.000Z
|
2022-03-23T10:26:18.000Z
|
scrapers/AGY-isle-of-anglesey/councillors.py
|
DemocracyClub/LGSF
|
21c2a049db08575e03db2fb63a8bccc8de0c636b
|
[
"MIT"
] | 1
|
2018-10-15T13:36:03.000Z
|
2018-10-15T13:36:03.000Z
|
from lgsf.councillors.scrapers import ModGovCouncillorScraper
class Scraper(ModGovCouncillorScraper):
base_url = "http://democracy.anglesey.gov.uk"
| 25.666667
| 61
| 0.811688
|
dd87e6475eab3d287734190ac09a3edb4c191ccd
| 47,888
|
py
|
Python
|
evaluate_3dpw_mine.py
|
akashsengupta1997/ProHMR
|
7015a3d070c79b4571d43abdf5e522468091a94d
|
[
"BSD-3-Clause"
] | null | null | null |
evaluate_3dpw_mine.py
|
akashsengupta1997/ProHMR
|
7015a3d070c79b4571d43abdf5e522468091a94d
|
[
"BSD-3-Clause"
] | null | null | null |
evaluate_3dpw_mine.py
|
akashsengupta1997/ProHMR
|
7015a3d070c79b4571d43abdf5e522468091a94d
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import math
import my_config
from prohmr.datasets.pw3d_eval_dataset import PW3DEvalDataset
from prohmr.configs import get_config, prohmr_config
from prohmr.models import ProHMR
from prohmr.models.smpl_mine import SMPL
from prohmr.utils.pose_utils import compute_similarity_transform_batch_numpy, scale_and_translation_transform_batch
from prohmr.utils.geometry import undo_keypoint_normalisation, orthographic_project_torch, convert_weak_perspective_to_camera_translation
from prohmr.utils.renderer import Renderer
from prohmr.utils.sampling_utils import compute_vertex_uncertainties_from_samples
import subsets
def evaluate_3dpw(model,
model_cfg,
eval_dataset,
metrics_to_track,
device,
save_path,
num_pred_samples,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000,
num_samples_to_visualise=10,
save_per_frame_uncertainty=True):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl_neutral = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1).to(device)
smpl_male = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1, gender='male').to(device)
smpl_female = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1, gender='female').to(device)
metric_sums = {'num_datapoints': 0}
per_frame_metrics = {}
for metric in metrics_to_track:
metric_sums[metric] = 0.
per_frame_metrics[metric] = []
if metric == 'joints3D_coco_invis_samples_dist_from_mean':
metric_sums['num_invis_joints3Dsamples'] = 0
elif metric == 'hrnet_joints2D_l2es':
metric_sums['num_vis_hrnet_joints2D'] = 0
elif metric == 'hrnet_joints2Dsamples_l2es':
metric_sums['num_vis_hrnet_joints2Dsamples'] = 0
fname_per_frame = []
pose_per_frame = []
shape_per_frame = []
cam_per_frame = []
if save_per_frame_uncertainty:
vertices_uncertainty_per_frame = []
renderer = Renderer(model_cfg, faces=model.smpl.faces)
reposed_cam_wp = np.array([0.85, 0., -0.2])
reposed_cam_t = convert_weak_perspective_to_camera_translation(cam_wp=reposed_cam_wp,
focal_length=model_cfg.EXTRA.FOCAL_LENGTH,
resolution=model_cfg.MODEL.IMAGE_SIZE)
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# if batch_num == 2:
# break
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input'].to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
hrnet_joints2D_coco = samples_batch['hrnet_kps'].cpu().detach().numpy()
hrnet_joints2D_coco_vis = samples_batch['hrnet_kps_vis'].cpu().detach().numpy()
fname = samples_batch['fname']
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_reposed_smpl_output = smpl_male(betas=target_shape)
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_vertices = target_smpl_output.vertices
target_joints_h36mlsp = target_smpl_output.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :]
target_reposed_vertices = target_reposed_smpl_output.vertices
# ------------------------------- PREDICTIONS -------------------------------
out = model({'img': input})
"""
out is a dict with keys:
- pred_cam: (1, num_samples, 3) tensor, camera is same for all samples
- pred_cam_t: (1, num_samples, 3) tensor, camera is same for all samples
- This is just pred_cam converted from weak-perspective (i.e. [s, tx, ty]) to
full-perspective (i.e. [tx, ty, tz] and focal_length = 5000 --> this is basically just weak-perspective anyway)
- pred_smpl_params: dict with keys:
- global_orient: (1, num_samples, 1, 3, 3) tensor
- body_pose: (1, num_samples, 23, 3, 3) tensor
- betas: (1, num_samples, 10) tensor, betas are same for all samples
- pred_pose_6d: (1, num_samples, 144) tensor
- pred_vertices: (1, num_samples, 6890, 3) tensor
- pred_keypoints_3d: (1, num_samples, 44, 3) tensor
- pred_keypoints_2d: (1, num_samples, 44, 2) tensor
- log_prob: (1, num_samples) tensor
- conditioning_feats: (1, 2047) tensor
"""
pred_cam_wp = out['pred_cam'][:, 0, :]
pred_pose_rotmats_mode = out['pred_smpl_params']['body_pose'][:, 0, :, :, :]
pred_glob_rotmat_mode = out['pred_smpl_params']['global_orient'][:, 0, :, :, :]
pred_shape_mode = out['pred_smpl_params']['betas'][:, 0, :]
pred_pose_rotmats_samples = out['pred_smpl_params']['body_pose'][0, 1:, :, :, :]
pred_glob_rotmat_samples = out['pred_smpl_params']['global_orient'][0, 1:, :, :, :]
pred_shape_samples = out['pred_smpl_params']['betas'][0, 1:, :]
assert pred_pose_rotmats_samples.shape[0] == num_pred_samples
pred_smpl_output_mode = smpl_neutral(body_pose=pred_pose_rotmats_mode,
global_orient=pred_glob_rotmat_mode,
betas=pred_shape_mode,
pose2rot=False)
pred_vertices_mode = pred_smpl_output_mode.vertices # (1, 6890, 3)
pred_joints_h36mlsp_mode = pred_smpl_output_mode.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :] # (1, 14, 3)
pred_joints_coco_mode = pred_smpl_output_mode.joints[:, my_config.ALL_JOINTS_TO_COCO_MAP, :] # (1, 17, 3)
pred_vertices2D_mode = orthographic_project_torch(pred_vertices_mode, pred_cam_wp, scale_first=False)
pred_vertices2D_mode = undo_keypoint_normalisation(pred_vertices2D_mode, input.shape[-1])
pred_joints2D_coco_mode = orthographic_project_torch(pred_joints_coco_mode, pred_cam_wp) # (1, 17, 2)
pred_joints2D_coco_mode = undo_keypoint_normalisation(pred_joints2D_coco_mode, input.shape[-1])
pred_reposed_vertices_mean = smpl_neutral(betas=pred_shape_mode).vertices # (1, 6890, 3)
pred_smpl_output_samples = smpl_neutral(body_pose=pred_pose_rotmats_samples,
global_orient=pred_glob_rotmat_samples,
betas=pred_shape_samples,
pose2rot=False)
pred_vertices_samples = pred_smpl_output_samples.vertices # (num_pred_samples, 6890, 3)
pred_joints_h36mlsp_samples = pred_smpl_output_samples.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :] # (num_samples, 14, 3)
pred_joints_coco_samples = pred_smpl_output_samples.joints[:, my_config.ALL_JOINTS_TO_COCO_MAP, :] # (num_pred_samples, 17, 3)
pred_joints2D_coco_samples = orthographic_project_torch(pred_joints_coco_samples, pred_cam_wp) # (num_pred_samples, 17, 2)
pred_joints2D_coco_samples = undo_keypoint_normalisation(pred_joints2D_coco_samples, input.shape[-1])
pred_reposed_vertices_samples = smpl_neutral(body_pose=torch.zeros(num_pred_samples, 69, device=device, dtype=torch.float32),
global_orient=torch.zeros(num_pred_samples, 3, device=device, dtype=torch.float32),
betas=pred_shape_samples).vertices # (num_pred_samples, 6890, 3)
# ------------------------------------------------ METRICS ------------------------------------------------
# Numpy-fying targets
target_vertices = target_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
# Numpy-fying preds
pred_vertices_mode = pred_vertices_mode.cpu().detach().numpy()
pred_joints_h36mlsp_mode = pred_joints_h36mlsp_mode.cpu().detach().numpy()
pred_joints_coco_mode = pred_joints_coco_mode.cpu().detach().numpy()
pred_vertices2D_mode = pred_vertices2D_mode.cpu().detach().numpy()
pred_joints2D_coco_mode = pred_joints2D_coco_mode.cpu().detach().numpy()
pred_reposed_vertices_mean = pred_reposed_vertices_mean.cpu().detach().numpy()
pred_vertices_samples = pred_vertices_samples.cpu().detach().numpy()
pred_joints_h36mlsp_samples = pred_joints_h36mlsp_samples.cpu().detach().numpy()
pred_joints_coco_samples = pred_joints_coco_samples.cpu().detach().numpy()
pred_joints2D_coco_samples = pred_joints2D_coco_samples.cpu().detach().numpy()
pred_reposed_vertices_samples = pred_reposed_vertices_samples.cpu().detach().numpy()
# -------------- 3D Metrics with Mode and Minimum Error Samples --------------
if 'pves' in metrics_to_track:
pve_batch = np.linalg.norm(pred_vertices_mode - target_vertices,
axis=-1) # (bs, 6890)
metric_sums['pves'] += np.sum(pve_batch) # scalar
per_frame_metrics['pves'].append(np.mean(pve_batch, axis=-1))
if 'pves_samples_min' in metrics_to_track:
pve_per_sample = np.linalg.norm(pred_vertices_samples - target_vertices, axis=-1) # (num samples, 6890)
min_pve_sample = np.argmin(np.mean(pve_per_sample, axis=-1))
pve_samples_min_batch = pve_per_sample[min_pve_sample] # (6890,)
metric_sums['pves_samples_min'] += np.sum(pve_samples_min_batch)
per_frame_metrics['pves_samples_min'].append(np.mean(pve_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Scale and translation correction
if 'pves_sc' in metrics_to_track:
pred_vertices_sc = scale_and_translation_transform_batch(
pred_vertices_mode,
target_vertices)
pve_sc_batch = np.linalg.norm(
pred_vertices_sc - target_vertices,
axis=-1) # (bs, 6890)
metric_sums['pves_sc'] += np.sum(pve_sc_batch) # scalar
per_frame_metrics['pves_sc'].append(np.mean(pve_sc_batch, axis=-1))
if 'pves_sc_samples_min' in metrics_to_track:
target_vertices_tiled = np.tile(target_vertices, (num_pred_samples, 1, 1)) # (num samples, 6890, 3)
pred_vertices_samples_sc = scale_and_translation_transform_batch(
pred_vertices_samples,
target_vertices_tiled)
pve_sc_per_sample = np.linalg.norm(pred_vertices_samples_sc - target_vertices_tiled, axis=-1) # (num samples, 6890)
min_pve_sc_sample = np.argmin(np.mean(pve_sc_per_sample, axis=-1))
pve_sc_samples_min_batch = pve_sc_per_sample[min_pve_sc_sample] # (6890,)
metric_sums['pves_sc_samples_min'] += np.sum(pve_sc_samples_min_batch)
per_frame_metrics['pves_sc_samples_min'].append(np.mean(pve_sc_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Procrustes analysis
if 'pves_pa' in metrics_to_track:
pred_vertices_pa = compute_similarity_transform_batch_numpy(pred_vertices_mode, target_vertices)
pve_pa_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (bs, 6890)
metric_sums['pves_pa'] += np.sum(pve_pa_batch) # scalar
per_frame_metrics['pves_pa'].append(np.mean(pve_pa_batch, axis=-1))
if 'pves_pa_samples_min' in metrics_to_track:
target_vertices_tiled = np.tile(target_vertices, (num_pred_samples, 1, 1)) # (num samples, 6890, 3)
pred_vertices_samples_pa = compute_similarity_transform_batch_numpy(
pred_vertices_samples,
target_vertices_tiled)
pve_pa_per_sample = np.linalg.norm(pred_vertices_samples_pa - target_vertices_tiled, axis=-1) # (num samples, 6890)
min_pve_pa_sample = np.argmin(np.mean(pve_pa_per_sample, axis=-1))
pve_pa_samples_min_batch = pve_pa_per_sample[min_pve_pa_sample] # (6890,)
metric_sums['pves_pa_samples_min'] += np.sum(pve_pa_samples_min_batch)
per_frame_metrics['pves_pa_samples_min'].append(np.mean(pve_pa_samples_min_batch, axis=-1, keepdims=True)) # (1,)
if 'pve-ts' in metrics_to_track:
pvet_batch = np.linalg.norm(pred_reposed_vertices_mean - target_reposed_vertices, axis=-1)
metric_sums['pve-ts'] += np.sum(pvet_batch) # scalar
per_frame_metrics['pve-ts'].append(np.mean(pvet_batch, axis=-1))
if 'pve-ts_samples_min' in metrics_to_track:
pvet_per_sample = np.linalg.norm(pred_reposed_vertices_samples - target_reposed_vertices, axis=-1) # (num samples, 6890)
min_pvet_sample = np.argmin(np.mean(pvet_per_sample, axis=-1))
pvet_samples_min_batch = pvet_per_sample[min_pvet_sample] # (6890,)
metric_sums['pve-ts_samples_min'] += np.sum(pvet_samples_min_batch)
per_frame_metrics['pve-ts_samples_min'].append(np.mean(pvet_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Scale and translation correction
if 'pve-ts_sc' in metrics_to_track:
pred_reposed_vertices_sc = scale_and_translation_transform_batch(
pred_reposed_vertices_mean,
target_reposed_vertices)
pvet_scale_corrected_batch = np.linalg.norm(
pred_reposed_vertices_sc - target_reposed_vertices,
axis=-1) # (bs, 6890)
metric_sums['pve-ts_sc'] += np.sum(pvet_scale_corrected_batch) # scalar
per_frame_metrics['pve-ts_sc'].append(np.mean(pvet_scale_corrected_batch, axis=-1))
if 'pve-ts_sc_samples_min' in metrics_to_track:
target_reposed_vertices_tiled = np.tile(target_reposed_vertices, (num_pred_samples, 1, 1)) # (num samples, 6890, 3)
pred_reposed_vertices_samples_sc = scale_and_translation_transform_batch(
pred_reposed_vertices_samples,
target_reposed_vertices_tiled)
pvet_sc_per_sample = np.linalg.norm(pred_reposed_vertices_samples_sc - target_reposed_vertices_tiled, axis=-1) # (num samples, 6890)
min_pvet_sc_sample = np.argmin(np.mean(pvet_sc_per_sample, axis=-1))
pvet_sc_samples_min_batch = pvet_sc_per_sample[min_pvet_sc_sample] # (6890,)
metric_sums['pve-ts_sc_samples_min'] += np.sum(pvet_sc_samples_min_batch)
per_frame_metrics['pve-ts_sc_samples_min'].append(np.mean(pvet_sc_samples_min_batch, axis=-1, keepdims=True)) # (1,)
if 'mpjpes' in metrics_to_track:
mpjpe_batch = np.linalg.norm(pred_joints_h36mlsp_mode - target_joints_h36mlsp, axis=-1) # (bs, 14)
metric_sums['mpjpes'] += np.sum(mpjpe_batch) # scalar
per_frame_metrics['mpjpes'].append(np.mean(mpjpe_batch, axis=-1))
if 'mpjpes_samples_min' in metrics_to_track:
mpjpe_per_sample = np.linalg.norm(pred_joints_h36mlsp_samples - target_joints_h36mlsp, axis=-1) # (num samples, 14)
min_mpjpe_sample = np.argmin(np.mean(mpjpe_per_sample, axis=-1))
mpjpe_samples_min_batch = mpjpe_per_sample[min_mpjpe_sample] # (14,)
metric_sums['mpjpes_samples_min'] += np.sum(mpjpe_samples_min_batch)
per_frame_metrics['mpjpes_samples_min'].append(np.mean(mpjpe_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Scale and translation correction
if 'mpjpes_sc' in metrics_to_track:
pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(
pred_joints_h36mlsp_mode,
target_joints_h36mlsp)
mpjpe_sc_batch = np.linalg.norm(
pred_joints_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
metric_sums['mpjpes_sc'] += np.sum(mpjpe_sc_batch) # scalar
per_frame_metrics['mpjpes_sc'].append(np.mean(mpjpe_sc_batch, axis=-1))
if 'mpjpes_sc_samples_min' in metrics_to_track:
target_joints_h36mlsp_tiled = np.tile(target_joints_h36mlsp, (num_pred_samples, 1, 1)) # (num samples, 14, 3)
pred_joints_h36mlsp_samples_sc = scale_and_translation_transform_batch(
pred_joints_h36mlsp_samples,
target_joints_h36mlsp_tiled)
mpjpe_sc_per_sample = np.linalg.norm(pred_joints_h36mlsp_samples_sc - target_joints_h36mlsp_tiled, axis=-1) # (num samples, 14)
min_mpjpe_sc_sample = np.argmin(np.mean(mpjpe_sc_per_sample, axis=-1))
mpjpe_sc_samples_min_batch = mpjpe_sc_per_sample[min_mpjpe_sc_sample] # (14,)
metric_sums['mpjpes_sc_samples_min'] += np.sum(mpjpe_sc_samples_min_batch)
per_frame_metrics['mpjpes_sc_samples_min'].append(np.mean(mpjpe_sc_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# Procrustes analysis
if 'mpjpes_pa' in metrics_to_track:
pred_joints_h36mlsp_pa = compute_similarity_transform_batch_numpy(pred_joints_h36mlsp_mode, target_joints_h36mlsp)
mpjpe_pa_batch = np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
metric_sums['mpjpes_pa'] += np.sum(mpjpe_pa_batch) # scalar
per_frame_metrics['mpjpes_pa'].append(np.mean(mpjpe_pa_batch, axis=-1))
if 'mpjpes_pa_samples_min' in metrics_to_track:
target_joints_h36mlsp_tiled = np.tile(target_joints_h36mlsp, (num_pred_samples, 1, 1)) # (num samples, 14, 3)
pred_joints_h36mlsp_samples_pa = compute_similarity_transform_batch_numpy(
pred_joints_h36mlsp_samples,
target_joints_h36mlsp_tiled)
mpjpe_pa_per_sample = np.linalg.norm(pred_joints_h36mlsp_samples_pa - target_joints_h36mlsp_tiled, axis=-1) # (num samples, 14)
min_mpjpe_pa_sample = np.argmin(np.mean(mpjpe_pa_per_sample, axis=-1))
mpjpe_pa_samples_min_batch = mpjpe_pa_per_sample[min_mpjpe_pa_sample] # (14,)
metric_sums['mpjpes_pa_samples_min'] += np.sum(mpjpe_pa_samples_min_batch)
per_frame_metrics['mpjpes_pa_samples_min'].append(np.mean(mpjpe_pa_samples_min_batch, axis=-1, keepdims=True)) # (1,)
# ---------------- 3D Sample Distance from Mean (i.e. Variance) Metrics -----------
if 'verts_samples_dist_from_mean' in metrics_to_track:
verts_samples_mean = pred_vertices_samples.mean(axis=0) # (6890, 3)
verts_samples_dist_from_mean = np.linalg.norm(pred_vertices_samples - verts_samples_mean, axis=-1) # (num samples, 6890)
metric_sums['verts_samples_dist_from_mean'] += verts_samples_dist_from_mean.sum()
per_frame_metrics['verts_samples_dist_from_mean'].append(verts_samples_dist_from_mean.mean()[None]) # (1,)
if 'joints3D_coco_samples_dist_from_mean' in metrics_to_track:
joints3D_coco_samples_mean = pred_joints_coco_samples.mean(axis=0) # (17, 3)
joints3D_coco_samples_dist_from_mean = np.linalg.norm(pred_joints_coco_samples - joints3D_coco_samples_mean, axis=-1) # (num samples, 17)
metric_sums['joints3D_coco_samples_dist_from_mean'] += joints3D_coco_samples_dist_from_mean.sum() # scalar
per_frame_metrics['joints3D_coco_samples_dist_from_mean'].append(joints3D_coco_samples_dist_from_mean.mean()[None]) # (1,)
if 'joints3D_coco_invis_samples_dist_from_mean' in metrics_to_track:
# (In)visibility of specific joints determined by HRNet 2D joint predictions and confidence scores.
hrnet_joints2D_coco_invis = np.logical_not(hrnet_joints2D_coco_vis[0]) # (17,)
if np.any(hrnet_joints2D_coco_invis):
joints3D_coco_invis_samples = pred_joints_coco_samples[:, hrnet_joints2D_coco_invis, :] # (num samples, num invis joints, 3)
joints3D_coco_invis_samples_mean = joints3D_coco_invis_samples.mean(axis=0) # (num_invis_joints, 3)
joints3D_coco_invis_samples_dist_from_mean = np.linalg.norm(joints3D_coco_invis_samples - joints3D_coco_invis_samples_mean,
axis=-1) # (num samples, num_invis_joints)
metric_sums['joints3D_coco_invis_samples_dist_from_mean'] += joints3D_coco_invis_samples_dist_from_mean.sum() # scalar
metric_sums['num_invis_joints3Dsamples'] += np.prod(joints3D_coco_invis_samples_dist_from_mean.shape)
per_frame_metrics['joints3D_coco_invis_samples_dist_from_mean'].append(joints3D_coco_invis_samples_dist_from_mean.mean()[None]) # (1,)
else:
per_frame_metrics['joints3D_coco_invis_samples_dist_from_mean'].append(np.zeros(1))
# -------------------------------- 2D Metrics ---------------------------
# Using JRNet 2D joints as target, rather than GT
if 'hrnet_joints2D_l2es' in metrics_to_track:
hrnet_joints2D_l2e_batch = np.linalg.norm(pred_joints2D_coco_mode[:, hrnet_joints2D_coco_vis[0], :] - hrnet_joints2D_coco[:, hrnet_joints2D_coco_vis[0], :],
axis=-1) # (1, num vis joints)
assert hrnet_joints2D_l2e_batch.shape[1] == hrnet_joints2D_coco_vis.sum()
metric_sums['hrnet_joints2D_l2es'] += np.sum(hrnet_joints2D_l2e_batch) # scalar
metric_sums['num_vis_hrnet_joints2D'] += hrnet_joints2D_l2e_batch.shape[1]
per_frame_metrics['hrnet_joints2D_l2es'].append(np.mean(hrnet_joints2D_l2e_batch, axis=-1)) # (1,)
# -------------------------------- 2D Metrics after Averaging over Samples ---------------------------
if 'hrnet_joints2Dsamples_l2es' in metrics_to_track:
hrnet_joints2Dsamples_l2e_batch = np.linalg.norm(pred_joints2D_coco_samples[:, hrnet_joints2D_coco_vis[0], :] - hrnet_joints2D_coco[:, hrnet_joints2D_coco_vis[0], :],
axis=-1) # (num_samples, num vis joints)
assert hrnet_joints2Dsamples_l2e_batch.shape[1] == hrnet_joints2D_coco_vis.sum()
metric_sums['hrnet_joints2Dsamples_l2es'] += np.sum(hrnet_joints2Dsamples_l2e_batch) # scalar
metric_sums['num_vis_hrnet_joints2Dsamples'] += np.prod(hrnet_joints2Dsamples_l2e_batch.shape)
per_frame_metrics['hrnet_joints2Dsamples_l2es'].append(np.mean(hrnet_joints2Dsamples_l2e_batch)[None]) # (1,)
metric_sums['num_datapoints'] += target_pose.shape[0]
fname_per_frame.append(fname)
pose_per_frame.append(np.concatenate([pred_glob_rotmat_mode.cpu().detach().numpy(),
pred_pose_rotmats_mode.cpu().detach().numpy()],
axis=1))
shape_per_frame.append(pred_shape_mode.cpu().detach().numpy())
cam_per_frame.append(pred_cam_wp.cpu().detach().numpy())
# ------------------------------- VISUALISE -------------------------------
if vis_every_n_batches is not None and batch_num % vis_every_n_batches == 0:
vis_img = samples_batch['vis_img'].numpy()
vis_img = np.transpose(vis_img, [0, 2, 3, 1])
pred_cam_t = out['pred_cam_t'][0, 0, :].cpu().detach().numpy()
# Uncertainty Computation
# Uncertainty computed by sampling + average distance from mean
avg_vertices_distance_from_mean, avg_vertices_sc_distance_from_mean = compute_vertex_uncertainties_from_samples(
vertices_samples=pred_vertices_samples,
target_vertices=target_vertices)
if save_per_frame_uncertainty:
vertices_uncertainty_per_frame.append(avg_vertices_distance_from_mean)
# Render predicted meshes
body_vis_rgb_mode = renderer(vertices=pred_vertices_mode[0],
camera_translation=pred_cam_t.copy(),
image=vis_img[0],
unnormalise_img=False)
body_vis_rgb_mode_rot = renderer(vertices=pred_vertices_mode[0],
camera_translation=pred_cam_t.copy(),
image=np.zeros_like(vis_img[0]),
unnormalise_img=False,
angle=np.pi/2.,
axis=[0., 1., 0.])
reposed_body_vis_rgb_mean = renderer(vertices=pred_reposed_vertices_mean[0],
camera_translation=reposed_cam_t.copy(),
image=np.zeros_like(vis_img[0]),
unnormalise_img=False,
flip_updown=False)
reposed_body_vis_rgb_mean_rot = renderer(vertices=pred_reposed_vertices_mean[0],
camera_translation=reposed_cam_t.copy(),
image=np.zeros_like(vis_img[0]),
unnormalise_img=False,
angle=np.pi / 2.,
axis=[0., 1., 0.],
flip_updown=False)
body_vis_rgb_samples = []
body_vis_rgb_rot_samples = []
for i in range(num_samples_to_visualise):
body_vis_rgb_samples.append(renderer(vertices=pred_vertices_samples[i],
camera_translation=pred_cam_t.copy(),
image=vis_img[0],
unnormalise_img=False))
body_vis_rgb_rot_samples.append(renderer(vertices=pred_vertices_samples[i],
camera_translation=pred_cam_t.copy(),
image=np.zeros_like(vis_img[0]),
unnormalise_img=False,
angle=np.pi / 2.,
axis=[0., 1., 0.]))
# Save samples
samples_save_path = os.path.join(save_path, os.path.splitext(fname[0])[0] + '_samples.npy')
np.save(samples_save_path, pred_vertices_samples)
# ------------------ Model Prediction, Error and Uncertainty Figure ------------------
num_row = 5
num_col = 6
subplot_count = 1
plt.figure(figsize=(20, 20))
# Plot image and mask vis
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(vis_img[0])
subplot_count += 1
# Plot pred vertices 2D and body render overlaid over input
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(vis_img[0])
plt.scatter(pred_vertices2D_mode[0, :, 0],
pred_vertices2D_mode[0, :, 1],
c='r', s=0.01)
subplot_count += 1
# Plot body render overlaid on vis image
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(body_vis_rgb_mode)
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(body_vis_rgb_mode_rot)
subplot_count += 1
# Plot reposed body render
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(reposed_body_vis_rgb_mean)
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(reposed_body_vis_rgb_mean_rot)
subplot_count += 1
if 'pves_sc' in metrics_to_track:
# Plot PVE-SC pred vs target comparison
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.text(0.5, 0.5, s='PVE-SC')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
plt.scatter(target_vertices[0, :, 0],
target_vertices[0, :, 1],
s=0.02,
c='blue')
plt.scatter(pred_vertices_sc[0, :, 0],
pred_vertices_sc[0, :, 1],
s=0.01,
c='red')
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_sc[0, :, 0],
pred_vertices_sc[0, :, 1],
s=0.05,
c=pve_sc_batch[0],
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
plt.text(-0.6, -0.9, s='PVE-SC: {:.4f}'.format(per_frame_metrics['pves_sc'][batch_num][0]))
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_sc[0, :, 2], # Equivalent to Rotated 90° about y axis
pred_vertices_sc[0, :, 1],
s=0.05,
c=pve_sc_batch[0],
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
plt.text(-0.6, -0.9, s='PVE-SC: {:.4f}'.format(per_frame_metrics['pves_sc'][batch_num][0]))
subplot_count += 1
if 'pves_pa' in metrics_to_track:
# Plot PVE-PA pred vs target comparison
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.text(0.5, 0.5, s='PVE-PA')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
plt.scatter(target_vertices[0, :, 0],
target_vertices[0, :, 1],
s=0.02,
c='blue')
plt.scatter(pred_vertices_pa[0, :, 0],
pred_vertices_pa[0, :, 1],
s=0.01,
c='red')
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_pa[0, :, 0],
pred_vertices_pa[0, :, 1],
s=0.05,
c=pve_pa_batch[0],
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
plt.text(-0.6, -0.9, s='PVE-PA: {:.4f}'.format(per_frame_metrics['pves_pa'][batch_num][0]))
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_pa[0, :, 2], # Equivalent to Rotated 90° about y axis
pred_vertices_pa[0, :, 1],
s=0.05,
c=pve_pa_batch[0],
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
plt.text(-0.6, -0.9, s='PVE-PA: {:.4f}'.format(per_frame_metrics['pves_pa'][batch_num][0]))
subplot_count += 1
if 'pve-ts_sc' in metrics_to_track:
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.text(0.5, 0.5, s='PVE-T-SC')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.scatter(target_reposed_vertices[0, :, 0],
target_reposed_vertices[0, :, 1],
s=0.02,
c='blue')
plt.scatter(pred_reposed_vertices_sc[0, :, 0],
pred_reposed_vertices_sc[0, :, 1],
s=0.01,
c='red')
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
norm = plt.Normalize(vmin=0.0, vmax=0.03, clip=True)
plt.scatter(pred_reposed_vertices_sc[0, :, 0],
pred_reposed_vertices_sc[0, :, 1],
s=0.05,
c=pvet_scale_corrected_batch[0],
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
plt.text(-0.6, -0.9, s='PVE-T-SC: {:.4f}'.format(per_frame_metrics['pve-ts_sc'][batch_num][0]))
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
norm = plt.Normalize(vmin=0.0, vmax=0.03, clip=True)
plt.scatter(pred_reposed_vertices_sc[0, :, 2], # Equivalent to Rotated 90° about y axis
pred_reposed_vertices_sc[0, :, 1],
s=0.05,
c=pvet_scale_corrected_batch[0],
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
plt.text(-0.6, -0.9, s='PVE-T-SC: {:.4f}'.format(per_frame_metrics['pve-ts_sc'][batch_num][0]))
subplot_count += 1
# Plot per-vertex uncertainties
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.text(0.5, 0.5, s='Uncertainty for\nPVE')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_sc[0, :, 0],
pred_vertices_sc[0, :, 1],
s=0.05,
c=avg_vertices_distance_from_mean,
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_sc[0, :, 2],
pred_vertices_sc[0, :, 1],
s=0.05,
c=avg_vertices_sc_distance_from_mean,
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.text(0.5, 0.5, s='Uncertainty for\nPVE-SC')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_pa[0, :, 0],
pred_vertices_pa[0, :, 1],
s=0.05,
c=avg_vertices_sc_distance_from_mean,
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.gca().invert_yaxis()
norm = plt.Normalize(vmin=0.0, vmax=0.2, clip=True)
plt.scatter(pred_vertices_pa[0, :, 2],
# Equivalent to Rotated 90° about y axis
pred_vertices_pa[0, :, 1],
s=0.05,
c=avg_vertices_sc_distance_from_mean,
cmap='jet',
norm=norm)
plt.gca().set_aspect('equal', adjustable='box')
subplot_count += 1
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.margins(0, 0)
save_fig_path = os.path.join(save_path, fname[0])
plt.savefig(save_fig_path, bbox_inches='tight')
plt.close()
# ------------------ Samples from Predicted Distribution Figure ------------------
num_subplots = num_samples_to_visualise * 2 + 2
num_row = 4
num_col = math.ceil(num_subplots / float(num_row))
subplot_count = 1
plt.figure(figsize=(20, 20))
# Plot mode prediction
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(body_vis_rgb_mode)
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(body_vis_rgb_mode_rot)
subplot_count += 1
# Plot samples from predicted distribution
for i in range(num_samples_to_visualise):
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(body_vis_rgb_samples[i])
subplot_count += 1
plt.subplot(num_row, num_col, subplot_count)
plt.gca().axis('off')
plt.imshow(body_vis_rgb_rot_samples[i])
subplot_count += 1
plt.subplots_adjust(top=1, bottom=0, right=1, left=0,
hspace=0, wspace=0)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
save_fig_path = os.path.join(save_path, os.path.splitext(fname[0])[0] + '_samples.png')
plt.savefig(save_fig_path, bbox_inches='tight')
plt.close()
# ------------------------------- DISPLAY METRICS AND SAVE PER-FRAME METRICS -------------------------------
print('\n--- Check Pred save Shapes ---')
fname_per_frame = np.concatenate(fname_per_frame, axis=0)
np.save(os.path.join(save_path, 'fname_per_frame.npy'), fname_per_frame)
print(fname_per_frame.shape)
pose_per_frame = np.concatenate(pose_per_frame, axis=0)
np.save(os.path.join(save_path, 'pose_per_frame.npy'), pose_per_frame)
print(pose_per_frame.shape)
shape_per_frame = np.concatenate(shape_per_frame, axis=0)
np.save(os.path.join(save_path, 'shape_per_frame.npy'), shape_per_frame)
print(shape_per_frame.shape)
cam_per_frame = np.concatenate(cam_per_frame, axis=0)
np.save(os.path.join(save_path, 'cam_per_frame.npy'), cam_per_frame)
print(cam_per_frame.shape)
if vis_every_n_batches is not None and save_per_frame_uncertainty:
vertices_uncertainty_per_frame = np.stack(vertices_uncertainty_per_frame, axis=0)
np.save(os.path.join(save_path, 'vertices_uncertainty_per_frame.npy'), vertices_uncertainty_per_frame)
print(vertices_uncertainty_per_frame.shape)
final_metrics = {}
for metric_type in metrics_to_track:
if metric_type == 'hrnet_joints2D_l2es':
joints2D_l2e = metric_sums['hrnet_joints2D_l2es'] / metric_sums['num_vis_hrnet_joints2D']
final_metrics[metric_type] = joints2D_l2e
print('Check total samples:', metric_type, metric_sums['num_vis_hrnet_joints2D'])
elif metric_type == 'hrnet_joints2D_l2es_best_j2d_sample':
joints2D_l2e_best_j2d_sample = metric_sums['hrnet_joints2D_l2es_best_j2d_sample'] / metric_sums['num_vis_hrnet_joints2D']
final_metrics[metric_type] = joints2D_l2e_best_j2d_sample
elif metric_type == 'hrnet_joints2Dsamples_l2es':
joints2Dsamples_l2e = metric_sums['hrnet_joints2Dsamples_l2es'] / metric_sums['num_vis_hrnet_joints2Dsamples']
final_metrics[metric_type] = joints2Dsamples_l2e
print('Check total samples:', metric_type, metric_sums['num_vis_hrnet_joints2Dsamples'])
elif metric_type == 'verts_samples_dist_from_mean':
final_metrics[metric_type] = metric_sums[metric_type] / (metric_sums['num_datapoints'] * num_pred_samples * 6890)
elif metric_type == 'joints3D_coco_samples_dist_from_mean':
final_metrics[metric_type] = metric_sums[metric_type] / (metric_sums['num_datapoints'] * num_pred_samples * 17)
elif metric_type == 'joints3D_coco_invis_samples_dist_from_mean':
if metric_sums['num_invis_joints3Dsamples'] > 0:
final_metrics[metric_type] = metric_sums[metric_type] / metric_sums['num_invis_joints3Dsamples']
else:
print('No invisible 3D COCO joints!')
else:
if 'pves' in metric_type:
num_per_sample = 6890
elif 'mpjpes' in metric_type:
num_per_sample = 14
# print('Check total samples:', metric_type, num_per_sample, self.total_samples)
final_metrics[metric_type] = metric_sums[metric_type] / (metric_sums['num_datapoints'] * num_per_sample)
print('\n---- Metrics ----')
for metric in final_metrics.keys():
if final_metrics[metric] > 0.3:
mult = 1
else:
mult = 1000
print(metric, '{:.2f}'.format(final_metrics[metric] * mult)) # Converting from metres to millimetres
print('\n---- Check metric save shapes ----')
for metric_type in metrics_to_track:
per_frame = np.concatenate(per_frame_metrics[metric_type], axis=0)
print(metric_type, per_frame.shape)
np.save(os.path.join(save_path, metric_type + '_per_frame.npy'), per_frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', type=str, default='data/checkpoint.pt', help='Path to pretrained model checkpoint')
parser.add_argument('--model_cfg', type=str, default=None, help='Path to config file. If not set use the default (prohmr/configs/prohmr.yaml)')
parser.add_argument('--gpu', default='0', type=str, help='GPU')
parser.add_argument('--num_samples', '-N', type=int, default=25, help='Number of test samples to evaluate with')
parser.add_argument('--use_subset', '-S',action='store_true')
args = parser.parse_args()
# Set seeds
np.random.seed(0)
torch.manual_seed(0)
# Device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Model
if args.model_cfg is None:
model_cfg = prohmr_config()
else:
model_cfg = get_config(args.model_cfg)
model = ProHMR.load_from_checkpoint(args.checkpoint, strict=False, cfg=model_cfg).to(device)
model.eval()
model_cfg.defrost()
model_cfg.TRAIN.NUM_TEST_SAMPLES = args.num_samples + 1
model_cfg.freeze()
# Setup evaluation dataset
if args.use_subset:
selected_fnames = subsets.PW3D_OCCLUDED_JOINTS
vis_every_n_batches = 1
vis_joints_threshold = 0.8
else:
selected_fnames = None
vis_every_n_batches = 1000
vis_joints_threshold = 0.6
dataset_path = '/scratches/nazgul_2/as2562/datasets/3DPW/test'
dataset = PW3DEvalDataset(dataset_path,
img_wh=model_cfg.MODEL.IMAGE_SIZE,
selected_fnames=selected_fnames,
visible_joints_threshold=vis_joints_threshold)
print("Eval examples found:", len(dataset))
# Metrics
metrics = ['pves', 'pves_sc', 'pves_pa', 'pve-ts', 'pve-ts_sc', 'mpjpes', 'mpjpes_sc', 'mpjpes_pa']
metrics.extend([metric + '_samples_min' for metric in metrics ])
metrics.extend(['verts_samples_dist_from_mean', 'joints3D_coco_samples_dist_from_mean', 'joints3D_coco_invis_samples_dist_from_mean'])
metrics.append('hrnet_joints2D_l2es')
metrics.append('hrnet_joints2Dsamples_l2es')
save_path = '/scratch/as2562/ProHMR/evaluations/3dpw_{}_samples'.format(args.num_samples)
if args.use_subset:
save_path += '_selected_fnames_occluded_joints'
if not os.path.exists(save_path):
os.makedirs(save_path)
print("Saving to:", save_path)
# Run evaluation
evaluate_3dpw(model=model,
model_cfg=model_cfg,
eval_dataset=dataset,
metrics_to_track=metrics,
device=device,
save_path=save_path,
num_pred_samples=args.num_samples,
num_workers=4,
pin_memory=True,
vis_every_n_batches=vis_every_n_batches,
num_samples_to_visualise=10,
save_per_frame_uncertainty=True)
| 53.927928
| 178
| 0.589187
|
1ab6542953d11f1d2d8d5391f55e0e28f9721d42
| 13,117
|
py
|
Python
|
adjutant/common/quota.py
|
CCI-MOC/adjutant
|
032db3124ea0b0632afdfc27afc60b6c66cf5f66
|
[
"Apache-2.0"
] | null | null | null |
adjutant/common/quota.py
|
CCI-MOC/adjutant
|
032db3124ea0b0632afdfc27afc60b6c66cf5f66
|
[
"Apache-2.0"
] | null | null | null |
adjutant/common/quota.py
|
CCI-MOC/adjutant
|
032db3124ea0b0632afdfc27afc60b6c66cf5f66
|
[
"Apache-2.0"
] | 1
|
2019-04-18T12:21:59.000Z
|
2019-04-18T12:21:59.000Z
|
# Copyright (C) 2015 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from adjutant.common import openstack_clients
from django.conf import settings
class QuotaManager(object):
"""
A manager to allow easier updating and access to quota information
across all services.
"""
default_size_diff_threshold = .2
class ServiceQuotaHelper(object):
def set_quota(self, values):
self.client.quotas.update(self.project_id, **values)
class ServiceQuotaCinderHelper(ServiceQuotaHelper):
def __init__(self, region_name, project_id):
self.client = openstack_clients.get_cinderclient(
region=region_name)
self.project_id = project_id
def get_quota(self):
return self.client.quotas.get(self.project_id).to_dict()
def get_usage(self):
volumes = self.client.volumes.list(
search_opts={'all_tenants': 1, 'project_id': self.project_id})
snapshots = self.client.volume_snapshots.list(
search_opts={'all_tenants': 1, 'project_id': self.project_id})
# gigabytesUsed should be a total of volumes and snapshots
gigabytes = sum([getattr(volume, 'size', 0) for volume
in volumes])
gigabytes += sum([getattr(snap, 'size', 0) for snap
in snapshots])
return {'gigabytes': gigabytes,
'volumes': len(volumes),
'snapshots': len(snapshots)
}
class ServiceQuotaNovaHelper(ServiceQuotaHelper):
def __init__(self, region_name, project_id):
self.client = openstack_clients.get_novaclient(
region=region_name)
self.project_id = project_id
def get_quota(self):
return self.client.quotas.get(self.project_id).to_dict()
def get_usage(self):
nova_usage = self.client.limits.get(
tenant_id=self.project_id).to_dict()['absolute']
nova_usage_keys = [
('instances', 'totalInstancesUsed'),
('floating_ips', 'totalFloatingIpsUsed'),
('ram', 'totalRAMUsed'),
('cores', 'totalCoresUsed'),
('secuirty_groups', 'totalSecurityGroupsUsed')
]
nova_usage_dict = {}
for key, usage_key in nova_usage_keys:
nova_usage_dict[key] = nova_usage[usage_key]
return nova_usage_dict
class ServiceQuotaNeutronHelper(ServiceQuotaHelper):
def __init__(self, region_name, project_id):
self.client = openstack_clients.get_neutronclient(
region=region_name)
self.project_id = project_id
def set_quota(self, values):
body = {
'quota': values
}
self.client.update_quota(self.project_id, body)
def get_usage(self):
networks = self.client.list_networks(
tenant_id=self.project_id)['networks']
routers = self.client.list_routers(
tenant_id=self.project_id)['routers']
floatingips = self.client.list_floatingips(
tenant_id=self.project_id)['floatingips']
ports = self.client.list_ports(
tenant_id=self.project_id)['ports']
subnets = self.client.list_subnets(
tenant_id=self.project_id)['subnets']
security_groups = self.client.list_security_groups(
tenant_id=self.project_id)['security_groups']
security_group_rules = self.client.list_security_group_rules(
tenant_id=self.project_id)['security_group_rules']
return {'network': len(networks),
'router': len(routers),
'floatingip': len(floatingips),
'port': len(ports),
'subnet': len(subnets),
'secuirty_group': len(security_groups),
'security_group_rule': len(security_group_rules)
}
def get_quota(self):
return self.client.show_quota(self.project_id)['quota']
class ServiceQuotaOctaviaHelper(ServiceQuotaNeutronHelper):
def __init__(self, region_name, project_id):
self.client = openstack_clients.get_octaviaclient(
region=region_name)
self.project_id = project_id
def get_quota(self):
project_quota = self.client.quota_show(
project_id=self.project_id)
# NOTE(amelia): Instead of returning the default quota if ANY
# of the quotas are the default, the endpoint
# returns None
default_quota = None
for name, quota in project_quota.items():
if quota is None:
if not default_quota:
default_quota = self.client.quota_defaults_show()[
'quota']
project_quota[name] = default_quota[name]
return project_quota
def set_quota(self, values):
self.client.quota_set(self.project_id, json={'quota': values})
def get_usage(self):
usage = {}
usage['load_balancer'] = len(self.client.load_balancer_list(
project_id=self.project_id)['loadbalancers'])
usage['listener'] = len(self.client.listener_list(
project_id=self.project_id)['listeners'])
pools = self.client.pool_list(
project_id=self.project_id)['pools']
usage['pool'] = len(pools)
members = []
for pool in pools:
members += pool['members']
usage['member'] = len(members)
usage['health_monitor'] = len(self.client.health_monitor_list(
project_id=self.project_id)['healthmonitors'])
return usage
_quota_updaters = {
'cinder': ServiceQuotaCinderHelper,
'nova': ServiceQuotaNovaHelper,
'neutron': ServiceQuotaNeutronHelper,
'octavia': ServiceQuotaOctaviaHelper,
}
def __init__(self, project_id, size_difference_threshold=None):
# TODO(amelia): Try to find out which endpoints are available and get
# the non enabled ones out of the list
self.default_helpers = dict(self._quota_updaters)
self.helpers = {}
if settings.QUOTA_SERVICES:
quota_services = dict(settings.QUOTA_SERVICES)
all_regions = quota_services.pop('*', None)
if all_regions:
self.default_helpers = {}
for service in all_regions:
if service in self._quota_updaters:
self.default_helpers[service] = \
self._quota_updaters[service]
for region, services in quota_services.items():
self.helpers[region] = {}
for service in services:
if service in self._quota_updaters:
self.helpers[region][service] = \
self._quota_updaters[service]
self.project_id = project_id
self.size_diff_threshold = (size_difference_threshold
or self.default_size_diff_threshold)
def get_current_region_quota(self, region_id):
current_quota = {}
region_helpers = self.helpers.get(region_id, self.default_helpers)
for name, service in region_helpers.items():
helper = service(region_id, self.project_id)
current_quota[name] = helper.get_quota()
return current_quota
def get_quota_differences(self, current_quota):
""" Gets the closest matching quota size for a given quota """
quota_differences = {}
for size, setting in settings.PROJECT_QUOTA_SIZES.items():
match_percentages = []
for service_name, values in setting.items():
if service_name not in current_quota:
continue
for name, value in values.items():
if name not in current_quota[service_name]:
continue
if value > 0:
current = current_quota[service_name][name]
dividend = float(min(current, value))
divisor = float(max(current, value))
match_percentages.append(dividend / divisor)
elif value < 0:
# NOTE(amelia): Sub-zero quota means unlimited
if current_quota[service_name][name] < 0:
match_percentages.append(1.0)
else:
match_percentages.append(0.0)
elif current_quota[service_name][name] == 0:
match_percentages.append(1.0)
else:
match_percentages.append(0.0)
# Calculate the average of how much it matches the setting
difference = abs(
(sum(match_percentages) / float(len(match_percentages))) - 1)
quota_differences[size] = difference
return quota_differences
def get_quota_size(self, current_quota, difference_threshold=None):
""" Gets the closest matching quota size for a given quota """
quota_differences = self.get_quota_differences(current_quota)
diff_threshold = difference_threshold or self.size_diff_threshold
quota_differences_pruned = {}
for size, difference in quota_differences.items():
if (difference <= diff_threshold):
quota_differences_pruned[size] = difference
if len(quota_differences_pruned) > 0:
return min(
quota_differences_pruned, key=quota_differences_pruned.get)
# If we don't get a match return custom which means the project will
# need admin approval for any change
return 'custom'
def get_quota_change_options(self, quota_size):
""" Get's the pre-approved quota change options for a given size """
quota_list = settings.QUOTA_SIZES_ASC
try:
list_position = quota_list.index(quota_size)
except ValueError:
return []
quota_change_list = quota_list[:list_position]
if list_position + 1 < len(quota_list):
quota_change_list.append(quota_list[list_position + 1])
return quota_change_list
def get_smaller_quota_options(self, quota_size):
""" Get the quota sizes smaller than the current size."""
quota_list = settings.QUOTA_SIZES_ASC
try:
list_position = quota_list.index(quota_size)
except ValueError:
return []
return quota_list[:list_position]
def get_region_quota_data(self, region_id, include_usage=True):
current_quota = self.get_current_region_quota(region_id)
current_quota_size = self.get_quota_size(current_quota)
change_options = self.get_quota_change_options(current_quota_size)
region_data = {
'region': region_id,
"current_quota": current_quota,
"current_quota_size": current_quota_size,
"quota_change_options": change_options,
}
if include_usage:
region_data['current_usage'] = self.get_current_usage(region_id)
return region_data
def get_current_usage(self, region_id):
current_usage = {}
region_helpers = self.helpers.get(region_id, self.default_helpers)
for name, service in region_helpers.items():
helper = service(region_id, self.project_id)
current_usage[name] = helper.get_usage()
return current_usage
def set_region_quota(self, region_id, quota_dict):
notes = []
for service_name, values in quota_dict.items():
updater_class = self.helpers.get(
region_id, self.default_helpers).get(service_name)
if not updater_class:
notes.append("No quota updater found for %s. Ignoring" %
service_name)
continue
service_helper = updater_class(region_id, self.project_id)
service_helper.set_quota(values)
return notes
| 39.272455
| 78
| 0.593733
|
b1c9bd63ff9d9946a0583df2997c2d43c8e6f9a6
| 3,563
|
py
|
Python
|
drag5a.py
|
jwcraftsman/kivytest
|
e1271239560d6f81d18dc15ca8ba0ce08163b800
|
[
"MIT"
] | null | null | null |
drag5a.py
|
jwcraftsman/kivytest
|
e1271239560d6f81d18dc15ca8ba0ce08163b800
|
[
"MIT"
] | null | null | null |
drag5a.py
|
jwcraftsman/kivytest
|
e1271239560d6f81d18dc15ca8ba0ce08163b800
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Use touch events to drag a rectangle around the screen with borders,
keeping the rectangle inside a specified widget and keeping the
rectangle from sliding away from the touch position when going in and
out of the specified widget's screen area. The rectangle is also only
allowed to be dropped in the area of a specified widget.
"""
from kivy.properties import ListProperty, ObjectProperty
from kivy.uix.label import Label
from kivy.uix.floatlayout import FloatLayout
from kivy.app import App
from kivy.lang import Builder
kv = """
<Rect>:
canvas.before:
Color:
rgba: self.background_color
Rectangle:
size: self.size
pos: self.x,self.y
<DragRect>:
drag_rectangle: self.x, self.y, self.width, self.height
<MyLabel>:
background_color: 0.5,0.5,0.5,1
color: 0,0,0,1
padding: 10, 10
<Box>:
pos: 100, 100
size_hint: None, None
size: 50,50
background_color: 0,1,0,0.5
BoxLayout:
orientation: "horizontal"
MyLabel:
text: "Left"
size_hint_x: None
width: self.texture_size[0]
BoxLayout:
orientation: "vertical"
BoxLayout:
id: drag_area
orientation: "vertical"
MyLabel:
text: "Drag over me"
size_hint_y: None
height: 100
background_color: 1.0,0,0,1
RelativeLayout:
id: page
Box:
drag_area: drag_area
pos: 100, 200
background_color: 0,0,1,0.5
MyLabel:
text: "Bottom"
size_hint_y: None
height: self.texture_size[1]
MyLabel:
text: "Right"
size_hint_x: None
width: self.texture_size[0]
"""
# mixin class
class Rect:
background_color = ListProperty([1, 1, 1, 1])
# Similar to DragBehavior
class DragRect(Rect, FloatLayout):
drag_area = ObjectProperty(None)
drop_area = ObjectProperty(None)
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._x_offset = 0
self._y_offset = 0
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
touch.grab(self)
self._x_offset = self.x - touch.x
self._y_offset = self.y - touch.y
return True # Don't allow simultaneous grabs
def on_touch_move(self, touch):
if not touch.grab_current is self:
return
self.x = touch.x + self._x_offset
self.y = touch.y + self._y_offset
# Stay inside drag_area widget
self.move_inside(self.drag_area)
def on_touch_up(self, touch):
if touch.grab_current is self:
# Stay inside drop_area widget
self.move_inside(self.drop_area)
touch.ungrab(self)
def move_inside(self, widget):
if widget is None:
widget = self.parent
if self.x < 0:
self.x = 0
elif self.right > widget.width:
self.right = widget.width
if self.y < 0:
self.y = 0
elif self.top > widget.height:
self.top = widget.height
class RectLabel(Rect, Label):
pass
class MyLabel(RectLabel):
pass
class Box(DragRect):
pass
class TestApp(App):
def build(self):
root = Builder.load_string(kv)
page = root.ids.page
page.add_widget(Box())
return root
TestApp().run()
| 25.45
| 70
| 0.580971
|
03c85b8b62da14c0d751ec7e388acf0ea092f514
| 6,146
|
py
|
Python
|
xmind2testlink/sharedparser.py
|
WinterDing/xmind2testlink
|
ae84ae173afd24dcd0519cd501506f2650fd337b
|
[
"MIT"
] | null | null | null |
xmind2testlink/sharedparser.py
|
WinterDing/xmind2testlink
|
ae84ae173afd24dcd0519cd501506f2650fd337b
|
[
"MIT"
] | null | null | null |
xmind2testlink/sharedparser.py
|
WinterDing/xmind2testlink
|
ae84ae173afd24dcd0519cd501506f2650fd337b
|
[
"MIT"
] | 1
|
2020-12-14T06:00:25.000Z
|
2020-12-14T06:00:25.000Z
|
from xmindparser import xmind_to_dict, config
from .datatype import *
config['hideEmptyValue'] = False
_config = {'sep': ' ',
'valid_sep': '/>-+',
'precondition_sep': '\n----\n',
'summary_sep': '\n----\n'}
def ignore_filter(topics):
"""filter topics starts with !"""
result = [t for t in topics if t['title'] and not t['title'].startswith('!')]
for topic in result:
more_topics = topic.get('topics', [])
topic['topics'] = ignore_filter(more_topics)
return result
def open_and_cache_xmind(xmind_file):
if not cache:
cache['sheet'] = xmind_to_dict(xmind_file)
cache['root'] = get_default_sheet(cache['sheet'])['topic']
root_topics = cache['root'].get('topics', [])
assert len(root_topics) > 0, "Invalid Xmind, should have at least 1 topic!"
cache['root']['topics'] = ignore_filter(root_topics)
cache['name'] = xmind_file
get_logger().debug('Cached xmind: {}'.format(cache))
def get_default_sheet(sheets):
"""First sheet is the default sheet."""
assert len(sheets) >= 0, 'Invalid xmind: should have at least 1 sheet!'
return sheets[0]
def get_logger():
from xmindparser import logger
return logger
def flat_suite(suite):
"""Convert a suite object into flat testcase list."""
tests = []
for suite in suite.sub_suites:
for test in suite.testcase_list:
d = test.to_dict()
d['suite'] = suite.name
tests.append(d)
return tests
def is_v2_format(d):
"""v2 xmind root dict will have a star maker, or sub node of testcase marked with priority."""
if _is_v2_by_marker(d) or _is_v2_by_guess(d):
_get_v2_sep(d)
return True
def _get_v2_sep(d):
"""v2 sep is this last char of title."""
last_char = d['title'][-1:]
if last_char in _config['valid_sep']:
cache['sep'] = last_char
def _is_v2_by_marker(d, maker_prefix='star'):
"""check if a node have a star maker"""
if isinstance(d['makers'], list):
for m in d['makers']:
if m.startswith(maker_prefix):
return True
def _is_v2_by_guess(d):
"""if any sub topic from testcase node mark with priority, this can be guessed as v2 xmind. """
for suite_node in d['topics']:
for testcase_node in suite_node['topics']:
sub_topics = testcase_node['topics']
while sub_topics:
for _ in sub_topics:
temp_topics = []
if _is_v2_by_marker(_, maker_prefix='priority'):
return True
else:
temp_topics.extend(_['topics'])
sub_topics = temp_topics
def get_priority(d):
if isinstance(d['makers'], list):
for m in d['makers']:
if m.startswith('priority'):
return int(m[-1])
def get_execution_type(d):
"""
support testcase option automation/manual by using "flag-green"
:param d: testcase topic
:return: 2 is automation, 1 is manual
"""
#winter add to get automation flag "flag_green"
if isinstance(d['makers'], list):
if 'flag-green' in d['makers']:
return 2
return 1
def _filter_empty_value(values):
result = [v for v in values if v]
for r in result:
if not isinstance(r, str):
get_logger().error('Expected string but not: {}'.format(r))
return [v.strip() for v in result] # remove blank char in leading and trailing
def _filter_empty_comments(comment_values):
"""comment value like: [[{content:comment1},{content:comment2}],[...]]"""
for comments in comment_values:
for comment in comments:
if comment.get('content'):
yield comment['content']
def is_testcase_topic(d):
priority = get_priority(d)
if priority:
return True
child_node = d.get('topics', [])
# if only one child topic and it is image or blank, consider parent is a test
if len(child_node) == 1 and child_node[0]['title'] in ('[Image]', '[Blank]'):
return True
if child_node:
return False
return True
def build_testcase_title(nodes):
values = [n['title'] for n in nodes]
values = _filter_empty_value(values)
# when sep is not blank, will add space around sep, e.g. '/' will be changed to ' / '
sep = cache.get('sep', _config['sep'])
if sep != ' ':
sep = ' {} '.format(sep)
return sep.join(values)
def build_testcase_precondition(nodes):
values = (n['comment'] for n in nodes if n.get('comment', None))
values = list(_filter_empty_comments(values))
if not values: # try to get from callout
for n in nodes:
for _ in n.get('callout', None) or []:
values.append(_)
comments = _filter_empty_value(values)
return _config['precondition_sep'].join(comments)
def build_testcase_summary(nodes):
values = [n['note'] for n in nodes]
values = _filter_empty_value(values)
return _config['summary_sep'].join(values)
def parse_step(step_dict):
step = TestStep()
step.action = step_dict['title']
expected_node = step_dict.get('topics', None)
if expected_node:
step.expected = expected_node[0]['title']
return step
def parse_steps(steps_dict):
steps = []
for step_number, step_node in enumerate(steps_dict, 1):
step = parse_step(step_node)
step.number = step_number
steps.append(step)
return steps
def parse_testcase(testcase_dict, parent=None):
testcase = TestCase()
nodes = parent + [testcase_dict] if parent else [testcase_dict]
testcase.name = build_testcase_title(nodes)
testcase.summary = build_testcase_summary(nodes)
testcase.preconditions = build_testcase_precondition(nodes)
testcase.importance = get_priority(testcase_dict)
testcase.execution_type = get_execution_type(testcase_dict)
steps_node = testcase_dict.get('topics', None)
if steps_node:
testcase.steps = parse_steps(steps_node)
return testcase
| 27.809955
| 99
| 0.622356
|
bd558656981b3f712358d03ca088cf37aacf0aae
| 565
|
py
|
Python
|
LeetCode/0452_Minimum_Number_of_Arrows_to_Burst_Balloons.py
|
Achyut-sudo/PythonAlgorithms
|
21fb6522510fde7a0877b19a8cedd4665938a4df
|
[
"MIT"
] | 144
|
2020-09-13T22:54:57.000Z
|
2022-02-24T21:54:25.000Z
|
LeetCode/0452_Minimum_Number_of_Arrows_to_Burst_Balloons.py
|
Achyut-sudo/PythonAlgorithms
|
21fb6522510fde7a0877b19a8cedd4665938a4df
|
[
"MIT"
] | 587
|
2020-05-06T18:55:07.000Z
|
2021-09-20T13:14:53.000Z
|
LeetCode/0452_Minimum_Number_of_Arrows_to_Burst_Balloons.py
|
Achyut-sudo/PythonAlgorithms
|
21fb6522510fde7a0877b19a8cedd4665938a4df
|
[
"MIT"
] | 523
|
2020-09-09T12:07:13.000Z
|
2022-02-24T21:54:31.000Z
|
class Solution:
def findMinArrowShots(self, points: List[List[int]]) -> int:
if points == []:
return 0
points.sort(key = lambda x: x[0])
start = points[0][0]
end = points[0][1]
ans = len(points)
for i in range(1, len(points)):
if start <= points[i][0] <= end:
ans -= 1
if points[i][1] < end:
end = points[i][1]
else:
start = points[i][0]
end = points[i][1]
return ans
| 29.736842
| 64
| 0.414159
|
86306a66bd617f64631f7112508129b41aaa7b54
| 570
|
py
|
Python
|
backend/home/api/v1/urls.py
|
crowdbotics-apps/virtual-match-29867
|
9c2a092e49db5aa5c09704c957d616390a4e5543
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/home/api/v1/urls.py
|
crowdbotics-apps/virtual-match-29867
|
9c2a092e49db5aa5c09704c957d616390a4e5543
|
[
"FTL",
"AML",
"RSA-MD"
] | 15
|
2021-08-23T01:13:44.000Z
|
2022-03-13T17:38:13.000Z
|
backend/home/api/v1/urls.py
|
crowdbotics-apps/virtual-match-29867
|
9c2a092e49db5aa5c09704c957d616390a4e5543
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import MessageViewSet, CustomTextViewSet, HomePageViewSet
from home.api.v1.viewsets import (
SignupViewSet,
LoginViewSet,
)
router = DefaultRouter()
router.register("signup", SignupViewSet, basename="signup")
router.register("login", LoginViewSet, basename="login")
router.register("customtext", CustomTextViewSet)
router.register("homepage", HomePageViewSet)
router.register("message", MessageViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 28.5
| 72
| 0.782456
|
aec598d37e499a71afb9ff4482b2213048230ba9
| 997
|
py
|
Python
|
sites/mysites/tools/checkers.py
|
cmwaura/Final_Red_Scrap
|
6b1b78de7d1129cda787e9f4688ddd409af39eb5
|
[
"MIT"
] | null | null | null |
sites/mysites/tools/checkers.py
|
cmwaura/Final_Red_Scrap
|
6b1b78de7d1129cda787e9f4688ddd409af39eb5
|
[
"MIT"
] | null | null | null |
sites/mysites/tools/checkers.py
|
cmwaura/Final_Red_Scrap
|
6b1b78de7d1129cda787e9f4688ddd409af39eb5
|
[
"MIT"
] | null | null | null |
from dynamic_scraper.spiders.django_checker import DjangoChecker
from mysites.models import JobAd
class JobChecker(DjangoChecker):
'''
The JobChecker class inherits from the DjangoChecker class in the Dynamic Django scraper
module hence why we imported it. The main aim of this module is checking the objects that we
scrape mainly through the ref_object.url and see if there is any recurrence in the process. If
there is a recurrence the new object will be deleted to keep the integrity of the data. We will
also configure a scheduler_runtime which will configure times at which the checker class will be run.
This is important especially after scheduling various cron jobs at which the scraper will be run.
'''
name = 'job_checker'
def __init__(self, *args, **kwargs):
self._set_ref_object(JobAd, **kwargs)
self.scraper = self.ref_object.job_website.scraper
self.scheduler_runtime = self.ref_object.checker_runtime
super(JobChecker, self).__init__(self, *args, **kwargs)
| 47.47619
| 103
| 0.790371
|
dc41e0e960525b53a902f0f2949214041e8e4935
| 778
|
py
|
Python
|
Tests/test_Calculator.py
|
axisonoid/calculator
|
5eb63b6f0fb544feeb50ad596520eb6dbad62be8
|
[
"MIT"
] | 1
|
2021-06-26T11:22:19.000Z
|
2021-06-26T11:22:19.000Z
|
Tests/test_Calculator.py
|
axisonoid/calculator
|
5eb63b6f0fb544feeb50ad596520eb6dbad62be8
|
[
"MIT"
] | null | null | null |
Tests/test_Calculator.py
|
axisonoid/calculator
|
5eb63b6f0fb544feeb50ad596520eb6dbad62be8
|
[
"MIT"
] | null | null | null |
import unittest
from Calculator.Calculator import Calculator
from CsvReader.CsvReader import CsvReader
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.calculator = Calculator()
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, Calculator)
def test_subtraction(self):
test_data = CsvReader("Tests/Data/subtraction.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.subtract(row['Value 1'], row]['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_results_property(self):
self.assertEqual(self.calculator.result, 0)
if __name__ == '__main__':
unittest.main()
| 31.12
| 95
| 0.695373
|
1f14bab1dc1508f74fb790ff4c8b9e1a5119dfe0
| 2,125
|
py
|
Python
|
src/exif/_app1_create.py
|
TNThieding/exif
|
2e59701aec7416fbb3b2db76e7d090f166f1f132
|
[
"MIT"
] | 51
|
2018-12-28T19:48:40.000Z
|
2021-12-10T00:35:41.000Z
|
src/exif/_app1_create.py
|
TNThieding/exif
|
2e59701aec7416fbb3b2db76e7d090f166f1f132
|
[
"MIT"
] | 33
|
2019-02-08T10:15:25.000Z
|
2022-02-11T18:37:45.000Z
|
src/exif/_app1_create.py
|
TNThieding/exif
|
2e59701aec7416fbb3b2db76e7d090f166f1f132
|
[
"MIT"
] | 11
|
2019-10-24T14:03:02.000Z
|
2020-12-10T04:07:20.000Z
|
"""Utility to create empty APP1 metadata bytes."""
from plum.bigendian import uint16
from exif._constants import ATTRIBUTE_ID_MAP, ExifMarkers
from exif._datatypes import ExifType, Ifd, IfdTag, TiffByteOrder, TiffHeader
HEADER_BYTES_EXCLUDED_FROM_LENGTH = (
2 # IMPORTANT: APP1 marker is excluded from the length of field.
)
def generate_empty_app1_bytes():
"""Generate an empty APP1 segment with IFDs 0, EXIF, and GPS.
:returns: big endian APP1 segment with 3 IFDs
:rtype: bytes
"""
header_bytes = bytearray(ExifMarkers.APP1)
header_bytes += b"\x00\x00" # APP1 length (touched up later at end)
header_bytes += b"\x45\x78\x69\x66\x00\x00" # EXIF word, NULL, and padding
tiff_header = TiffHeader(
byte_order=TiffByteOrder.BIG, reserved=0x2A, ifd_offset=0x8
)
default_tags = [
# Note: These pointers are touched up later.
IfdTag(
tag_id=ATTRIBUTE_ID_MAP["_exif_ifd_pointer"],
type=ExifType.LONG,
value_count=1,
value_offset=0,
),
IfdTag(
tag_id=ATTRIBUTE_ID_MAP["_gps_ifd_pointer"],
type=ExifType.LONG,
value_count=1,
value_offset=0,
),
]
ifd0 = Ifd(
tags=default_tags, next=0
) # leave pointer to IFD 1 as 0 since there isn't a thumbnail
exif_ifd = Ifd(tags=[], next=0)
gps_ifd = Ifd(tags=[], next=0)
# pylint: disable=unsubscriptable-object
ifd0.tags[0].value_offset = tiff_header.nbytes + ifd0.nbytes # IFD 0 --> EXIF
ifd0.tags[1].value_offset = (
tiff_header.nbytes + ifd0.nbytes + exif_ifd.nbytes
) # IFD 0 --> GPS
# pylint: enable=unsubscriptable-object
body_bytes = bytearray(tiff_header.ipack())
body_bytes += ifd0.ipack()
body_bytes += exif_ifd.ipack()
body_bytes += gps_ifd.ipack()
# Adjust the APP1 length (2 bytes into header).
app1_length_view = uint16.view(header_bytes, offset=2)
app1_length_view.set(
len(header_bytes + body_bytes) - HEADER_BYTES_EXCLUDED_FROM_LENGTH
)
return header_bytes + body_bytes
| 30.797101
| 82
| 0.657412
|
a37e8ce9569e2f9a3498fc7b5610383ba38d5a2b
| 17,615
|
py
|
Python
|
gm/rebaseline_server/imagediffdb.py
|
Acidburn0zzz/tampllate-skia
|
7d5d73cfaee397788788765238725a5058ee7f08
|
[
"BSD-3-Clause"
] | 11
|
2015-01-19T22:09:14.000Z
|
2019-10-03T21:45:31.000Z
|
gm/rebaseline_server/imagediffdb.py
|
Acidburn0zzz/tampllate-skia
|
7d5d73cfaee397788788765238725a5058ee7f08
|
[
"BSD-3-Clause"
] | null | null | null |
gm/rebaseline_server/imagediffdb.py
|
Acidburn0zzz/tampllate-skia
|
7d5d73cfaee397788788765238725a5058ee7f08
|
[
"BSD-3-Clause"
] | 9
|
2015-02-23T12:59:43.000Z
|
2022-01-27T10:30:45.000Z
|
#!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Calulate differences between image pairs, and store them in a database.
"""
import contextlib
import csv
import logging
import os
import re
import shutil
import sys
import tempfile
import urllib
try:
from PIL import Image, ImageChops
except ImportError:
raise ImportError('Requires PIL to be installed; see '
+ 'http://www.pythonware.com/products/pil/')
# Set the PYTHONPATH to include the tools directory.
sys.path.append(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir,
'tools'))
import find_run_binary
SKPDIFF_BINARY = find_run_binary.find_path_to_program('skpdiff')
DEFAULT_IMAGE_SUFFIX = '.png'
DEFAULT_IMAGES_SUBDIR = 'images'
DISALLOWED_FILEPATH_CHAR_REGEX = re.compile('[^\w\-]')
DIFFS_SUBDIR = 'diffs'
WHITEDIFFS_SUBDIR = 'whitediffs'
VALUES_PER_BAND = 256
# Keys used within DiffRecord dictionary representations.
# NOTE: Keep these in sync with static/constants.js
KEY__DIFFERENCES__MAX_DIFF_PER_CHANNEL = 'maxDiffPerChannel'
KEY__DIFFERENCES__NUM_DIFF_PIXELS = 'numDifferingPixels'
KEY__DIFFERENCES__PERCENT_DIFF_PIXELS = 'percentDifferingPixels'
KEY__DIFFERENCES__PERCEPTUAL_DIFF = 'perceptualDifference'
KEY__DIFFERENCES__WEIGHTED_DIFF = 'weightedDiffMeasure'
class DiffRecord(object):
""" Record of differences between two images. """
def __init__(self, storage_root,
expected_image_url, expected_image_locator,
actual_image_url, actual_image_locator,
expected_images_subdir=DEFAULT_IMAGES_SUBDIR,
actual_images_subdir=DEFAULT_IMAGES_SUBDIR,
image_suffix=DEFAULT_IMAGE_SUFFIX):
"""Download this pair of images (unless we already have them on local disk),
and prepare a DiffRecord for them.
TODO(epoger): Make this asynchronously download images, rather than blocking
until the images have been downloaded and processed.
Args:
storage_root: root directory on local disk within which we store all
images
expected_image_url: file or HTTP url from which we will download the
expected image
expected_image_locator: a unique ID string under which we will store the
expected image within storage_root (probably including a checksum to
guarantee uniqueness)
actual_image_url: file or HTTP url from which we will download the
actual image
actual_image_locator: a unique ID string under which we will store the
actual image within storage_root (probably including a checksum to
guarantee uniqueness)
expected_images_subdir: the subdirectory expected images are stored in.
actual_images_subdir: the subdirectory actual images are stored in.
image_suffix: the suffix of images.
"""
expected_image_locator = _sanitize_locator(expected_image_locator)
actual_image_locator = _sanitize_locator(actual_image_locator)
# Download the expected/actual images, if we don't have them already.
# TODO(rmistry): Add a parameter that makes _download_and_open_image raise
# an exception if images are not found locally (instead of trying to
# download them).
expected_image_file = os.path.join(
storage_root, expected_images_subdir,
str(expected_image_locator) + image_suffix)
actual_image_file = os.path.join(
storage_root, actual_images_subdir,
str(actual_image_locator) + image_suffix)
try:
expected_image = _download_and_open_image(
expected_image_file, expected_image_url)
except Exception:
logging.exception('unable to download expected_image_url %s to file %s' %
(expected_image_url, expected_image_file))
raise
try:
actual_image = _download_and_open_image(
actual_image_file, actual_image_url)
except Exception:
logging.exception('unable to download actual_image_url %s to file %s' %
(actual_image_url, actual_image_file))
raise
# Generate the diff image (absolute diff at each pixel) and
# max_diff_per_channel.
diff_image = _generate_image_diff(actual_image, expected_image)
diff_histogram = diff_image.histogram()
(diff_width, diff_height) = diff_image.size
self._weighted_diff_measure = _calculate_weighted_diff_metric(
diff_histogram, diff_width * diff_height)
self._max_diff_per_channel = _max_per_band(diff_histogram)
# Generate the whitediff image (any differing pixels show as white).
# This is tricky, because when you convert color images to grayscale or
# black & white in PIL, it has its own ideas about thresholds.
# We have to force it: if a pixel has any color at all, it's a '1'.
bands = diff_image.split()
graydiff_image = ImageChops.lighter(ImageChops.lighter(
bands[0], bands[1]), bands[2])
whitediff_image = (graydiff_image.point(lambda p: p > 0 and VALUES_PER_BAND)
.convert('1', dither=Image.NONE))
# Calculate the perceptual difference percentage.
skpdiff_csv_dir = tempfile.mkdtemp()
try:
skpdiff_csv_output = os.path.join(skpdiff_csv_dir, 'skpdiff-output.csv')
expected_img = os.path.join(storage_root, expected_images_subdir,
str(expected_image_locator) + image_suffix)
actual_img = os.path.join(storage_root, actual_images_subdir,
str(actual_image_locator) + image_suffix)
find_run_binary.run_command(
[SKPDIFF_BINARY, '-p', expected_img, actual_img,
'--csv', skpdiff_csv_output, '-d', 'perceptual'])
with contextlib.closing(open(skpdiff_csv_output)) as csv_file:
for row in csv.DictReader(csv_file):
perceptual_similarity = float(row[' perceptual'].strip())
if not 0 <= perceptual_similarity <= 1:
# skpdiff outputs -1 if the images are different sizes. Treat any
# output that does not lie in [0, 1] as having 0% perceptual
# similarity.
perceptual_similarity = 0
# skpdiff returns the perceptual similarity, convert it to get the
# perceptual difference percentage.
self._perceptual_difference = 100 - (perceptual_similarity * 100)
finally:
shutil.rmtree(skpdiff_csv_dir)
# Final touches on diff_image: use whitediff_image as an alpha mask.
# Unchanged pixels are transparent; differing pixels are opaque.
diff_image.putalpha(whitediff_image)
# Store the diff and whitediff images generated above.
diff_image_locator = _get_difference_locator(
expected_image_locator=expected_image_locator,
actual_image_locator=actual_image_locator)
basename = str(diff_image_locator) + image_suffix
_save_image(diff_image, os.path.join(
storage_root, DIFFS_SUBDIR, basename))
_save_image(whitediff_image, os.path.join(
storage_root, WHITEDIFFS_SUBDIR, basename))
# Calculate difference metrics.
(self._width, self._height) = diff_image.size
self._num_pixels_differing = (
whitediff_image.histogram()[VALUES_PER_BAND - 1])
def get_num_pixels_differing(self):
"""Returns the absolute number of pixels that differ."""
return self._num_pixels_differing
def get_percent_pixels_differing(self):
"""Returns the percentage of pixels that differ, as a float between
0 and 100 (inclusive)."""
return ((float(self._num_pixels_differing) * 100) /
(self._width * self._height))
def get_perceptual_difference(self):
"""Returns the perceptual difference percentage."""
return self._perceptual_difference
def get_weighted_diff_measure(self):
"""Returns a weighted measure of image diffs, as a float between 0 and 100
(inclusive).
TODO(epoger): Delete this function, now that we have perceptual diff?
"""
return self._weighted_diff_measure
def get_max_diff_per_channel(self):
"""Returns the maximum difference between the expected and actual images
for each R/G/B channel, as a list."""
return self._max_diff_per_channel
def as_dict(self):
"""Returns a dictionary representation of this DiffRecord, as needed when
constructing the JSON representation."""
return {
KEY__DIFFERENCES__NUM_DIFF_PIXELS: self._num_pixels_differing,
KEY__DIFFERENCES__PERCENT_DIFF_PIXELS:
self.get_percent_pixels_differing(),
KEY__DIFFERENCES__WEIGHTED_DIFF: self.get_weighted_diff_measure(),
KEY__DIFFERENCES__MAX_DIFF_PER_CHANNEL: self._max_diff_per_channel,
KEY__DIFFERENCES__PERCEPTUAL_DIFF: self._perceptual_difference,
}
class ImageDiffDB(object):
""" Calculates differences between image pairs, maintaining a database of
them for download."""
def __init__(self, storage_root):
"""
Args:
storage_root: string; root path within the DB will store all of its stuff
"""
self._storage_root = storage_root
# Dictionary of DiffRecords, keyed by (expected_image_locator,
# actual_image_locator) tuples.
self._diff_dict = {}
def add_image_pair(self,
expected_image_url, expected_image_locator,
actual_image_url, actual_image_locator):
"""Download this pair of images (unless we already have them on local disk),
and prepare a DiffRecord for them.
TODO(epoger): Make this asynchronously download images, rather than blocking
until the images have been downloaded and processed.
When we do that, we should probably add a new method that will block
until all of the images have been downloaded and processed. Otherwise,
we won't know when it's safe to start calling get_diff_record().
jcgregorio notes: maybe just make ImageDiffDB thread-safe and create a
thread-pool/worker queue at a higher level that just uses ImageDiffDB?
Args:
expected_image_url: file or HTTP url from which we will download the
expected image
expected_image_locator: a unique ID string under which we will store the
expected image within storage_root (probably including a checksum to
guarantee uniqueness)
actual_image_url: file or HTTP url from which we will download the
actual image
actual_image_locator: a unique ID string under which we will store the
actual image within storage_root (probably including a checksum to
guarantee uniqueness)
"""
expected_image_locator = _sanitize_locator(expected_image_locator)
actual_image_locator = _sanitize_locator(actual_image_locator)
key = (expected_image_locator, actual_image_locator)
if not key in self._diff_dict:
try:
new_diff_record = DiffRecord(
self._storage_root,
expected_image_url=expected_image_url,
expected_image_locator=expected_image_locator,
actual_image_url=actual_image_url,
actual_image_locator=actual_image_locator)
except Exception:
# If we can't create a real DiffRecord for this (expected, actual) pair,
# store None and the UI will show whatever information we DO have.
# Fixes http://skbug.com/2368 .
logging.exception(
'got exception while creating a DiffRecord for '
'expected_image_url=%s , actual_image_url=%s; returning None' % (
expected_image_url, actual_image_url))
new_diff_record = None
self._diff_dict[key] = new_diff_record
def get_diff_record(self, expected_image_locator, actual_image_locator):
"""Returns the DiffRecord for this image pair.
Raises a KeyError if we don't have a DiffRecord for this image pair.
"""
key = (_sanitize_locator(expected_image_locator),
_sanitize_locator(actual_image_locator))
return self._diff_dict[key]
# Utility functions
def _calculate_weighted_diff_metric(histogram, num_pixels):
"""Given the histogram of a diff image (per-channel diff at each
pixel between two images), calculate the weighted diff metric (a
stab at how different the two images really are).
TODO(epoger): Delete this function, now that we have perceptual diff?
Args:
histogram: PIL histogram of a per-channel diff between two images
num_pixels: integer; the total number of pixels in the diff image
Returns: a weighted diff metric, as a float between 0 and 100 (inclusive).
"""
# TODO(epoger): As a wild guess at an appropriate metric, weight each
# different pixel by the square of its delta value. (The more different
# a pixel is from its expectation, the more we care about it.)
assert(len(histogram) % VALUES_PER_BAND == 0)
num_bands = len(histogram) / VALUES_PER_BAND
max_diff = num_pixels * num_bands * (VALUES_PER_BAND - 1)**2
total_diff = 0
for index in xrange(len(histogram)):
total_diff += histogram[index] * (index % VALUES_PER_BAND)**2
return float(100 * total_diff) / max_diff
def _max_per_band(histogram):
"""Given the histogram of an image, return the maximum value of each band
(a.k.a. "color channel", such as R/G/B) across the entire image.
Args:
histogram: PIL histogram
Returns the maximum value of each band within the image histogram, as a list.
"""
max_per_band = []
assert(len(histogram) % VALUES_PER_BAND == 0)
num_bands = len(histogram) / VALUES_PER_BAND
for band in xrange(num_bands):
# Assuming that VALUES_PER_BAND is 256...
# the 'R' band makes up indices 0-255 in the histogram,
# the 'G' band makes up indices 256-511 in the histogram,
# etc.
min_index = band * VALUES_PER_BAND
index = min_index + VALUES_PER_BAND
while index > min_index:
index -= 1
if histogram[index] > 0:
max_per_band.append(index - min_index)
break
return max_per_band
def _generate_image_diff(image1, image2):
"""Wrapper for ImageChops.difference(image1, image2) that will handle some
errors automatically, or at least yield more useful error messages.
TODO(epoger): Currently, some of the images generated by the bots are RGBA
and others are RGB. I'm not sure why that is. For now, to avoid confusion
within the UI, convert all to RGB when diffing.
Args:
image1: a PIL image object
image2: a PIL image object
Returns: per-pixel diffs between image1 and image2, as a PIL image object
"""
try:
return ImageChops.difference(image1.convert('RGB'), image2.convert('RGB'))
except ValueError:
logging.error('Error diffing image1 [%s] and image2 [%s].' % (
repr(image1), repr(image2)))
raise
def _download_and_open_image(local_filepath, url):
"""Open the image at local_filepath; if there is no file at that path,
download it from url to that path and then open it.
Args:
local_filepath: path on local disk where the image should be stored
url: URL from which we can download the image if we don't have it yet
Returns: a PIL image object
"""
if not os.path.exists(local_filepath):
_mkdir_unless_exists(os.path.dirname(local_filepath))
with contextlib.closing(urllib.urlopen(url)) as url_handle:
with open(local_filepath, 'wb') as file_handle:
shutil.copyfileobj(fsrc=url_handle, fdst=file_handle)
return _open_image(local_filepath)
def _open_image(filepath):
"""Wrapper for Image.open(filepath) that yields more useful error messages.
Args:
filepath: path on local disk to load image from
Returns: a PIL image object
"""
try:
return Image.open(filepath)
except IOError:
# If we are unable to load an image from the file, delete it from disk
# and we will try to fetch it again next time. Fixes http://skbug.com/2247
logging.error('IOError loading image file %s ; deleting it.' % filepath)
os.remove(filepath)
raise
def _save_image(image, filepath, format='PNG'):
"""Write an image to disk, creating any intermediate directories as needed.
Args:
image: a PIL image object
filepath: path on local disk to write image to
format: one of the PIL image formats, listed at
http://effbot.org/imagingbook/formats.htm
"""
_mkdir_unless_exists(os.path.dirname(filepath))
image.save(filepath, format)
def _mkdir_unless_exists(path):
"""Unless path refers to an already-existing directory, create it.
Args:
path: path on local disk
"""
if not os.path.isdir(path):
os.makedirs(path)
def _sanitize_locator(locator):
"""Returns a sanitized version of a locator (one in which we know none of the
characters will have special meaning in filenames).
Args:
locator: string, or something that can be represented as a string
"""
return DISALLOWED_FILEPATH_CHAR_REGEX.sub('_', str(locator))
def _get_difference_locator(expected_image_locator, actual_image_locator):
"""Returns the locator string used to look up the diffs between expected_image
and actual_image.
We must keep this function in sync with getImageDiffRelativeUrl() in
static/loader.js
Args:
expected_image_locator: locator string pointing at expected image
actual_image_locator: locator string pointing at actual image
Returns: already-sanitized locator where the diffs between expected and
actual images can be found
"""
return "%s-vs-%s" % (_sanitize_locator(expected_image_locator),
_sanitize_locator(actual_image_locator))
| 38.971239
| 80
| 0.71723
|
cfecba810e36331947d77c035996417aa2a118ab
| 8,617
|
py
|
Python
|
inkind/contribution.py
|
rubin-observatory/in-kind
|
af837ff8bfb9f5f643b0f11f92defeed58846958
|
[
"MIT"
] | null | null | null |
inkind/contribution.py
|
rubin-observatory/in-kind
|
af837ff8bfb9f5f643b0f11f92defeed58846958
|
[
"MIT"
] | null | null | null |
inkind/contribution.py
|
rubin-observatory/in-kind
|
af837ff8bfb9f5f643b0f11f92defeed58846958
|
[
"MIT"
] | null | null | null |
import sys
# ======================================================================
class Contribution():
def __init__(self, vb=False, program=None):
self.vb = vb
self.PROGRAM_CODE = program
self.current = None
self.ID = None
self.URL = None
self.TITLE = None
self.EXCEPTION = None
self.LOI_CODE = None
self.LEAD = None
self.EMAIL = None
self.RECIPIENTS = None
self.VALUE = None
self.CATEGORY = None
self.text = {}
return
def read(self, data):
# Check the contribution ID:
if "Statement of Work and Detailed Plan" in data:
THIS_ID = data.split(".")[0].strip()
if THIS_ID != self.ID[8:11]:
print(" WARNING: Modifying contribution ID to match value in proposal "+self.PROGRAM_CODE+". Proposal value cf sequential ID: ", THIS_ID, self.ID[8:11],"New ID:",self.PROGRAM_CODE+"-"+THIS_ID, file=sys.stderr)
self.ID = self.PROGRAM_CODE+"-"+THIS_ID
return
# Get the contribution title:
if "TITLE:" in data[0:20]:
if self.vb: print(" Data: ", data[0:20])
self.TITLE = ":".join(data.split(":")[1:])[1:].strip()
if self.vb: print(" Contribution Title: ", self.TITLE)
return
# Check for exception requests. Format: "Exception requested: please begin review on November 6"
if "Exception requested:" in data[0:20]:
request = data.split(":")[1][1:]
self.EXCEPTION = " ".join(request.split(" ")[-2:]).strip()
if self.vb: print(" Contribution Due Date: ", self.EXCEPTION)
return
# Get the LOI Code:
if "LOI Code:" in data[0:20]:
self.LOI_CODE = data.split(":")[1][1:].strip()
if self.vb: print(" Contribution LOI Code: ", self.LOI_CODE)
return
# Get the Contribution Lead:
if "Contribution Lead:" in data[0:20]:
self.LEAD = data.split(":")[1][1:].strip()
if self.vb: print(" Contribution Lead: ", self.LEAD)
return
# Get the Contribution Recipients:
if "Contribution Recipients:" in data[0:40]:
self.RECIPIENTS = data.split(":")[1][1:].strip()
if self.vb: print(" Contribution Recipients: ", self.RECIPIENTS)
# The recipients are the last thing of interest in the section, so ignore everything else from here.
self.current = None
return
# Ignore the main subsection headings:
if "PLANNED ACTIVITIES" in data: return
if "TECHNICAL OBJECTIVES" in data: return
if "EXPECTED RIGHTS" in data: return
if "KEY PERSONNEL" in data: return
# Set the current subsection:
if "Background: Description" in data:
self.current = "BACKGROUND_DESCRIPTION"
self.text[self.current] = ""
return
if "Background: One" in data:
self.current = "BACKGROUND_SUMMARY"
self.text[self.current] = ""
return
if "Activity: Description" in data:
self.current = "ACTIVITY_DESCRIPTION"
self.text[self.current] = ""
return
if "Activity: One" in data:
self.current = "ACTIVITY_SUMMARY"
self.text[self.current] = ""
return
if "Deliverables: Description" in data:
self.current = "DELIVERABLES_DESCRIPTION"
self.text[self.current] = ""
return
if "Deliverables: One" in data:
self.current = "DELIVERABLES_SUMMARY"
self.text[self.current] = ""
return
if "Deliverables: Timeline" in data:
self.current = "DELIVERABLES_TIMELINE"
self.text[self.current] = ""
return
if "Data Rights: Description" in data:
self.current = "DATA_RIGHTS_DESCRIPTION"
self.text[self.current] = ""
return
if "Data Rights: One" in data:
self.current = "DATA_RIGHTS_SUMMARY"
self.text[self.current] = ""
return
# Now append the data chunk:
if self.current is not None:
self.text[self.current] = self.text[self.current] + data.replace('"',"'")
return
def print_SOW(self):
print("Title: "+self.TITLE)
print("Background: "+self.text["BACKGROUND_SUMMARY"])
print("Activities: "+self.text["ACTIVITY_SUMMARY"])
print("Deliverables: "+self.text["DELIVERABLES_SUMMARY"])
print("Data Rights: "+self.text["DATA_RIGHTS_SUMMARY"])
print("Contribution Lead: "+str(self.LEAD))
print("Contribution Recipients: "+str(self.RECIPIENTS))
return
def one_line_SOW(self):
try:
line = "Background: "+self.text["BACKGROUND_SUMMARY"] + \
" Activities: "+self.text["ACTIVITY_SUMMARY"] + \
" Deliverables: "+self.text["DELIVERABLES_SUMMARY"] + \
" Data Rights: "+self.text["DATA_RIGHTS_SUMMARY"]
except:
line = "Not yet available"
return line
def timeline(self):
try:
line = self.text["DELIVERABLES_TIMELINE"]
except:
line = "Not yet available"
return line
def extract_PI_value(self):
try:
N = []
for word in self.text["DATA_RIGHTS_SUMMARY"].split():
try:
N.append(float(word))
except ValueError:
pass
# PJM 2021-03-29 Switched to non-integer floats for PI value
# Return integer if possible:
# if int(N[0]) == int(round(N[0]+0.01)):
# self.VALUE = int(N[0])
# else:
self.VALUE = N[0]
except:
self.VALUE = "Not yet available"
return self.VALUE
def estimate_category(self):
try:
self.CATEGORY = "Unknown"
if "ataset" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "1.1 - Old complementary dataset"
if "dded" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "1.2 - Reprocessed/analyzed LSST data"
if "arget" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "1.3 - New complementary targeted data"
if "urvey" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "1.4 - New complementary survey"
if "IDAC" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "2.2 - Lite IDAC"
if "Full IDAC" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "2.1 - Full IDAC"
if "Scientific Processing Center" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "2.3 - Computing resources for SCs"
if "elescope time" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "3.1 - Open telescope time"
if "ollow-up" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "3.2 - Active Follow-up Program"
if "directable" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "4.2 - Directable SW dev"
if "non-directable" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "4.3 - Non-directable SW dev"
if "eneral pool" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "4.1 - General pooled SW dev"
if "onstruction" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "5.1 - Construction"
if "ommissioning" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "5.2 - Commissioning"
if "ffset" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "5.3 - Operations Cost Offset"
if "nhancement" in self.text["ACTIVITY_SUMMARY"]:
self.CATEGORY = "5.4 - Non-SW Facility Enhancement"
except:
self.CATEGORY = "Not yet available"
return self.CATEGORY
def match_email(self, directory):
# print(directory.people)
if self.LEAD is None:
self.EMAIL = None
else:
# Attempt to extract their surname
# print("self.LEAD = ",self.LEAD)
surname = self.LEAD.split()[-1]
for name in directory.people:
if surname in name:
self.EMAIL = directory.people[name]["EMAIL"]
return self.EMAIL
| 42.448276
| 228
| 0.543577
|
c6900a2b165728df482de4a9373ac81b3fcfc3a0
| 3,020
|
py
|
Python
|
gtrxl.py
|
stefanosantaris/sar
|
c011089d81699c2950b005e4d7d7d86fc5a95667
|
[
"MIT"
] | 3
|
2021-09-27T03:10:01.000Z
|
2021-12-29T07:18:06.000Z
|
gtrxl.py
|
stefanosantaris/sar
|
c011089d81699c2950b005e4d7d7d86fc5a95667
|
[
"MIT"
] | null | null | null |
gtrxl.py
|
stefanosantaris/sar
|
c011089d81699c2950b005e4d7d7d86fc5a95667
|
[
"MIT"
] | null | null | null |
import os
import torch as T
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
from typing import Optional
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch import Tensor
import math
'''
Positional Encoding : takes a 2d tensor --> 3d tensor
Injects some information on the relevant position of the img in the sequence
'''
class PositionalEncoding(nn.Module):
def __init__(self, d_model,dropout=0.1, max_len=1024):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = T.zeros(max_len, d_model)
position = T.arange(0, max_len, dtype=T.float).unsqueeze(1)
div_term = T.exp(T.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = T.sin(position * div_term)
pe[:, 1::2] = T.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0,1)
self.register_buffer('pe',pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
'''
Recreate the transfomer layers done in the following paper
https://arxiv.org/pdf/1910.06764.pdf
'''
class TEL(TransformerEncoderLayer):
def __init__(self, d_model, nhead, n_layers=1, dim_feedforward=256, activation="relu", dropout=0):
super().__init__(d_model, nhead, dim_feedforward, dropout,activation)
# 2 GRUs are needed - 1 for the beginning / 1 at the end
self.gru_1 = nn.GRU(d_model, d_model, num_layers=n_layers, batch_first=True)
self.gru_2 = nn.GRU(input_size=d_model, hidden_size=d_model, num_layers=n_layers, batch_first=True)
def forward(self, src: Tensor, src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
h = (src).sum(dim=1).unsqueeze(dim=0)
src = self.norm1(src)
out = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
out,h = self.gru_1(out,h)
out = self.norm2(out)
out = self.activation(self.linear1(out))
out = self.activation(self.linear2(out))
out,h = self.gru_2(out,h)
return out
'''
Implementation of transfomer model using GRUs
'''
class GTrXL(nn.Module):
def __init__(self, d_model, nheads, transformer_layers,hidden_dims=256, n_layers=1 ,chkpt_dir="models",activation='relu',network_name='network.pt'):
super(GTrXL, self).__init__()
# Module layers
self.embed = PositionalEncoding(d_model)
encoded = TEL(d_model, nheads, n_layers, dim_feedforward = hidden_dims, activation=activation)
self.transfomer = TransformerEncoder(encoded, transformer_layers)
self.file = os.path.join(chkpt_dir, network_name)
def forward(self, x):
x = self.embed(x)
x = self.transfomer(x)
return x
def save(self):
T.save(self.state_dict() , self.file)
def load(self):
self.load_state_dict(T.load(self.file))
| 38.227848
| 153
| 0.664238
|
137837915daf470736bdad1def33eb5f5a3e04c4
| 7,529
|
py
|
Python
|
flumine/streams/historicalstream.py
|
betcode-org/flumine
|
b33d82b75175106b2de0d0c4f9851599b085e389
|
[
"MIT"
] | 2
|
2022-03-21T11:42:56.000Z
|
2022-03-26T08:36:18.000Z
|
flumine/streams/historicalstream.py
|
betcode-org/flumine
|
b33d82b75175106b2de0d0c4f9851599b085e389
|
[
"MIT"
] | 4
|
2022-03-25T09:49:37.000Z
|
2022-03-25T10:18:13.000Z
|
flumine/streams/historicalstream.py
|
betcode-org/flumine
|
b33d82b75175106b2de0d0c4f9851599b085e389
|
[
"MIT"
] | null | null | null |
import logging
import datetime
from typing import Optional
from betfairlightweight.streaming import StreamListener, HistoricalGeneratorStream
from betfairlightweight.streaming.stream import MarketStream, RaceStream
from betfairlightweight.streaming.cache import MarketBookCache, RaceCache
from betfairlightweight.resources.baseresource import BaseResource
from betfairlightweight.compat import json
from .basestream import BaseStream
from ..exceptions import ListenerError
from ..utils import create_time
logger = logging.getLogger(__name__)
class FlumineMarketStream(MarketStream):
"""
Custom bflw stream to speed up processing
by limiting to inplay/not inplay or limited
seconds to start.
`_process` updated to not call `on_process`
which reduces some function calls.
"""
def _process(self, data: list, publish_time: int) -> bool:
active = False
for market_book in data:
if "id" not in market_book:
continue
market_id = market_book["id"]
full_image = market_book.get("img", False)
market_book_cache = self._caches.get(market_id)
if (
full_image or market_book_cache is None
): # historic data does not contain img
if "marketDefinition" not in market_book:
logger.warning(
"[%s: %s]: Missing marketDefinition on market %s resulting "
"in potential missing data in the MarketBook (make sure "
"EX_MARKET_DEF is requested)"
% (self, self.unique_id, market_id)
)
market_book_cache = MarketBookCache(
market_id,
publish_time,
self._lightweight,
self._calculate_market_tv,
self._cumulative_runner_tv,
)
self._caches[market_id] = market_book_cache
logger.info(
"[%s: %s]: %s added, %s markets in cache"
% (self, self.unique_id, market_id, len(self._caches))
)
# listener_kwargs filtering
active = True
if "marketDefinition" in market_book:
_definition_status = market_book["marketDefinition"].get("status")
_definition_in_play = market_book["marketDefinition"].get("inPlay")
_definition_market_time = market_book["marketDefinition"].get(
"marketTime"
)
else:
_definition_status = market_book_cache._definition_status
_definition_in_play = market_book_cache._definition_in_play
_definition_market_time = market_book_cache.market_definition[
"marketTime"
]
# if market is not open (closed/suspended) process regardless
if _definition_status == "OPEN":
if self._listener.inplay:
if not _definition_in_play:
active = False
elif self._listener.seconds_to_start:
_now = datetime.datetime.utcfromtimestamp(publish_time / 1e3)
_market_time = BaseResource.strip_datetime(_definition_market_time)
seconds_to_start = (_market_time - _now).total_seconds()
if seconds_to_start > self._listener.seconds_to_start:
active = False
if self._listener.inplay is False:
if _definition_in_play:
active = False
# check if refresh required
if active and not market_book_cache.active:
market_book_cache.refresh_cache()
market_book_cache.update_cache(market_book, publish_time, active=active)
self._updates_processed += 1
return active
class FlumineRaceStream(RaceStream):
"""
`_process` updated to not call `on_process`
which reduces some function calls.
"""
def _process(self, race_updates: list, publish_time: int) -> bool:
active = False
for update in race_updates:
market_id = update["mid"]
race_cache = self._caches.get(market_id)
if race_cache is None:
race_id = update.get("id")
race_cache = RaceCache(
market_id, publish_time, race_id, self._lightweight
)
race_cache.start_time = create_time(publish_time, race_id)
self._caches[market_id] = race_cache
logger.info(
"[%s: %s]: %s added, %s markets in cache"
% (self, self.unique_id, market_id, len(self._caches))
)
# filter after start time
diff = (
race_cache.start_time
- datetime.datetime.utcfromtimestamp(publish_time / 1e3)
).total_seconds()
if diff <= 0:
race_cache.update_cache(update, publish_time)
self._updates_processed += 1
active = True
return active
class HistoricListener(StreamListener):
"""
Custom listener to restrict processing by
inplay or seconds_to_start.
"""
def __init__(self, inplay: bool = None, seconds_to_start: float = None, **kwargs):
super(HistoricListener, self).__init__(**kwargs)
self.inplay = inplay
self.seconds_to_start = seconds_to_start
def _add_stream(self, unique_id: int, operation: str):
if operation == "marketSubscription":
return FlumineMarketStream(self, unique_id)
elif operation == "orderSubscription":
raise ListenerError("Unable to process order stream")
elif operation == "raceSubscription":
return FlumineRaceStream(self, unique_id)
def on_data(self, raw_data: str) -> Optional[bool]:
try:
data = json.loads(raw_data)
except ValueError:
logger.error("value error: %s" % raw_data)
return
# remove error handler / operation check
# skip on_change / on_update as we know it is always an update
publish_time = data["pt"]
return self.stream._process(data[self.stream._lookup], publish_time)
class FlumineHistoricalGeneratorStream(HistoricalGeneratorStream):
"""Super fast historical stream"""
def _read_loop(self) -> dict:
self.listener.register_stream(self.unique_id, self.operation)
listener_on_data = self.listener.on_data # cache functions
stream_snap = self.listener.stream.snap
with open(self.file_path, "r") as f:
for update in f:
if listener_on_data(update):
yield stream_snap()
class HistoricalStream(BaseStream):
LISTENER = HistoricListener
MAX_LATENCY = None
def run(self) -> None:
pass
def handle_output(self) -> None:
pass
def create_generator(self):
self._listener.update_clk = (
False # do not update clk on updates (not required when simulating)
)
stream = FlumineHistoricalGeneratorStream(
file_path=self.market_filter,
listener=self._listener,
operation=self.operation,
unique_id=self.stream_id,
)
return stream.get_generator()
| 37.834171
| 87
| 0.59822
|
768aaed0c43e30592ad75c47e47f304261cf7d97
| 38,011
|
py
|
Python
|
torcharrow/test/lib_test/test_column.py
|
Nayef211/torcharrow
|
28b8ca4bf1d91878daf087a0030790a8161b5f1f
|
[
"BSD-3-Clause"
] | null | null | null |
torcharrow/test/lib_test/test_column.py
|
Nayef211/torcharrow
|
28b8ca4bf1d91878daf087a0030790a8161b5f1f
|
[
"BSD-3-Clause"
] | null | null | null |
torcharrow/test/lib_test/test_column.py
|
Nayef211/torcharrow
|
28b8ca4bf1d91878daf087a0030790a8161b5f1f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import unittest
from dataclasses import dataclass
from typing import Any, List, Union
import pyarrow as pa # @manual=@/third-party:apache-arrow:apache-arrow-py
import torcharrow._torcharrow as ta
from pyarrow.cffi import ffi # @manual=@/third-party:python-cffi:python-cffi-py
class BaseTestColumns(unittest.TestCase):
# pyre-fixme[11]: Annotation `BaseColumn` is not defined as a type.
def assert_Column(self, col: ta.BaseColumn, val: List[Any]) -> None:
self.assertEqual(len(col), len(val))
self.assertEqual(col.get_null_count(), sum(x is None for x in val))
for i in range(len(val)):
if val[i] is None:
self.assertTrue(col.is_null_at(i))
else:
self.assertFalse(col.is_null_at(i))
if isinstance(val[i], list):
self.assert_Column(col[i], val[i])
elif isinstance(val[i], float):
self.assertAlmostEqual(col[i], val[i], places=6)
else:
self.assertEqual(col[i], val[i])
class TestSimpleColumns(BaseTestColumns):
def test_SimpleColumnInt64(self) -> None:
data = [1, 2, None, 3, 4, None]
col = infer_column(data)
self.assertEqual(col[0], 1)
self.assertEqual(col[1], 2)
self.assertEqual(col[3], 3)
self.assertEqual(col[4], 4)
self.assertEqual(len(col), 6)
with self.assertRaises(TypeError):
# TypeError: an integer is required (got type NoneType)
col.append(None)
with self.assertRaises(TypeError):
# TypeError: an integer is required (got type String)
col.append("hello")
self.assertEqual(col.is_null_at(0), False)
self.assertEqual(col.is_null_at(1), False)
self.assertEqual(col.is_null_at(2), True)
self.assertEqual(col.is_null_at(3), False)
self.assertEqual(col.is_null_at(4), False)
self.assertEqual(col.is_null_at(5), True)
self.assertEqual(col.get_null_count(), 2)
sliced_col = col.slice(1, 3)
self.assertEqual(len(sliced_col), 3)
self.assertEqual(sliced_col[0], 2)
self.assertEqual(sliced_col[2], 3)
self.assertEqual(sliced_col.get_null_count(), 1)
def test_SimpleColumnInt64_unary(self) -> None:
data = [1, -2, None, 3, -4, None]
col = infer_column(data)
self.assertEqual(col.type().kind(), ta.TypeKind.BIGINT)
neg_col = col.neg()
self.assert_Column(neg_col, [-1, 2, None, -3, 4, None])
self.assertEqual(neg_col.type().kind(), ta.TypeKind.BIGINT)
neg_col2 = neg_col.neg()
self.assert_Column(neg_col2, [1, -2, None, 3, -4, None])
self.assertEqual(neg_col2.type().kind(), ta.TypeKind.BIGINT)
neg_col3 = neg_col2.neg()
self.assert_Column(neg_col3, [-1, 2, None, -3, 4, None])
self.assertEqual(neg_col3.type().kind(), ta.TypeKind.BIGINT)
abs_col = col.abs()
self.assert_Column(abs_col, [1, 2, None, 3, 4, None])
self.assertEqual(abs_col.type().kind(), ta.TypeKind.BIGINT)
def test_SimpleColumnInt64_binary(self) -> None:
data1 = [1, -2, None, 3, -4, None]
col1 = infer_column(data1)
data2 = [None, 1, 2, 3, 4, 5]
col2 = infer_column(data2)
# column ops
sum_col = col1.add(col2)
self.assert_Column(sum_col, [None, -1, None, 6, 0, None])
self.assertEqual(sum_col.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(col1.sub(col2), [None, -3, None, 0, -8, None])
self.assert_Column(col1.mul(col2), [None, -2, None, 9, -16, None])
self.assert_Column(col1.mod(col2), [None, 0, None, 0, 0, None])
# type promotion
data3 = [None, 1.0, 2.0, 3.0, 4.0, 5.0]
col3 = infer_column(data3)
self.assertEqual(col3.type().kind(), ta.TypeKind.REAL)
sum_col = col1.add(col3)
self.assertEqual(sum_col.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sum_col, [None, -1.0, None, 6.0, 0.0, None])
sum_col2 = col3.add(col1)
self.assertEqual(sum_col2.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sum_col2, [None, -1.0, None, 6.0, 0.0, None])
# scalar ops
add_scalar = col1.add(1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(add_scalar, [2, -1, None, 4, -3, None])
add_scalar = col1.add(0.1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(add_scalar, [1.1, -1.9, None, 3.1, -3.9, None])
add_scalar = col1.radd(1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(add_scalar, [2, -1, None, 4, -3, None])
add_scalar = col1.radd(0.1)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(add_scalar, [1.1, -1.9, None, 3.1, -3.9, None])
sub_scalar = col1.sub(2)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(sub_scalar, [-1, -4, None, 1, -6, None])
sub_scalar = col1.sub(0.1)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sub_scalar, [0.9, -2.1, None, 2.9, -4.1, None])
sub_scalar = col1.rsub(2)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(sub_scalar, [1, 4, None, -1, 6, None])
sub_scalar = col1.rsub(0.1)
self.assertEqual(sub_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(sub_scalar, [-0.9, 2.1, None, -2.9, 4.1, None])
mul_scalar = col1.mul(2)
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mul_scalar, [2, -4, None, 6, -8, None])
mul_scalar = col1.mul(-2.0)
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mul_scalar, [-2.0, 4.0, None, -6.0, 8.0, None])
mul_scalar = col1.rmul(2)
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mul_scalar, [2, -4, None, 6, -8, None])
mul_scalar = col1.rmul(-2.0)
self.assertEqual(mul_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mul_scalar, [-2.0, 4.0, None, -6.0, 8.0, None])
mod_scalar = col1.mod(3)
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mod_scalar, [1, 1, None, 0, 2, None])
mod_scalar = col1.mod(-3.0)
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mod_scalar, [-2.0, -2.0, None, 0.0, -1.0, None])
mod_scalar = col1.rmod(3)
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(mod_scalar, [0, -1, None, 0, -1, None])
mod_scalar = col1.rmod(-3.0)
self.assertEqual(mod_scalar.type().kind(), ta.TypeKind.REAL)
self.assert_Column(mod_scalar, [0.0, -1.0, None, 0.0, -3.0, None])
# It's debatable whether this (add BIGINT with BOOLEAN) should be supported.
# But since PyTorch supports it for NumPy compatbility, TorchArrow also supports this.
add_scalar = col1.add(True)
self.assertEqual(add_scalar.type().kind(), ta.TypeKind.BIGINT)
self.assert_Column(add_scalar, [2, -1, None, 4, -3, None])
def test_SimpleColumnFloat32_unary(self) -> None:
data = [1.2, -2.3, None, 3.4, -4.6, None]
col = infer_column(data)
self.assertEqual(col.type().kind(), ta.TypeKind.REAL)
neg_col = col.neg()
self.assert_Column(neg_col, [-1.2, 2.3, None, -3.4, 4.6, None])
self.assertEqual(neg_col.type().kind(), ta.TypeKind.REAL)
abs_col = col.abs()
self.assert_Column(abs_col, [1.2, 2.3, None, 3.4, 4.6, None])
self.assertEqual(abs_col.type().kind(), ta.TypeKind.REAL)
round_col = col.round()
self.assert_Column(round_col, [1.0, -2.0, None, 3.0, -5.0, None])
self.assertEqual(round_col.type().kind(), ta.TypeKind.REAL)
def test_SimpleColumnBoolean(self) -> None:
data = [True, True, True, True]
col = infer_column(data)
for i in range(4):
self.assertEqual(col[i], True)
self.assertEqual(len(col), 4)
with self.assertRaises(TypeError):
# TypeError: a boolean is required (got type NoneType)
col.append(None)
with self.assertRaises(TypeError):
# TypeError: a boolean is required (got type String)
col.append("hello")
col.append_null()
self.assertEqual(col.is_null_at(0), False)
self.assertEqual(col.is_null_at(1), False)
self.assertEqual(col.is_null_at(2), False)
self.assertEqual(col.is_null_at(3), False)
self.assertEqual(col.is_null_at(4), True)
def test_SimpleColumnBoolean_unary(self) -> None:
data = [True, False, None, True, False, None]
col = infer_column(data)
self.assertEqual(col.type().kind(), ta.TypeKind.BOOLEAN)
inv_col = col.invert()
self.assertEqual(inv_col.type().kind(), ta.TypeKind.BOOLEAN)
self.assert_Column(inv_col, [False, True, None, False, True, None])
def test_SimpleColumnString(self) -> None:
data = ["0", "1", "2", "3"]
col = infer_column(data)
for i in range(4):
self.assertEqual(col[i], str(i))
self.assertEqual(len(col), 4)
with self.assertRaises(TypeError):
# TypeError: a string is required (got type NoneType)
col.append(None)
with self.assertRaises(TypeError):
# TypeError: a string is required (got type int)
col.append(1)
col.append_null()
self.assertEqual(col.is_null_at(0), False)
self.assertEqual(col.is_null_at(1), False)
self.assertEqual(col.is_null_at(2), False)
self.assertEqual(col.is_null_at(3), False)
self.assertEqual(col.is_null_at(4), True)
def test_SimpleColumnUTF(self) -> None:
s = ["hello.this", "is.interesting.", "this.is_24", "paradise"]
col = infer_column(s)
for i in range(4):
self.assertEqual(col[i], s[i])
self.assertEqual(len(col), 4)
def test_ConstantColumn(self) -> None:
# INTEGER
col = ta.ConstantColumn(42, 6, ta.VeloxType_INTEGER())
self.assertTrue(isinstance(col.type(), ta.VeloxType_INTEGER))
self.assert_Column(col, [42] * 6)
###########
# BIGINT
col = ta.ConstantColumn(42, 6)
self.assertTrue(isinstance(col.type(), ta.VeloxType_BIGINT))
self.assert_Column(col, [42] * 6)
# Test use constant column for normal add
data = [1, -2, None, 3, -4, None]
num_column = infer_column(data)
add_result = num_column.add(col)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_BIGINT))
self.assert_Column(add_result, [43, 40, None, 45, 38, None])
add_result = col.add(num_column)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_BIGINT))
self.assert_Column(add_result, [43, 40, None, 45, 38, None])
###########
# REAL
col = ta.ConstantColumn(4.2, 6)
self.assertTrue(isinstance(col.type(), ta.VeloxType_REAL))
self.assert_Column(col, [4.2] * 6)
# Test use constant column for normal add
data = [1.2, -2.3, None, 3.4, -4.6, None]
num_column = infer_column(data)
add_result = num_column.add(col)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_REAL))
self.assert_Column(add_result, [5.4, 1.9, None, 7.6, -0.4, None])
add_result = col.add(num_column)
self.assertTrue(isinstance(add_result.type(), ta.VeloxType_REAL))
self.assert_Column(add_result, [5.4, 1.9, None, 7.6, -0.4, None])
###########
# VARCHAR
col = ta.ConstantColumn("abc", 6)
self.assertTrue(isinstance(col.type(), ta.VeloxType_VARCHAR))
self.assert_Column(col, ["abc"] * 6)
def test_FromPyList(self) -> None:
# BIGINT
col = ta.Column(ta.VeloxType_BIGINT(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_BIGINT))
self.assert_Column(col, [1, 2, None, 4])
# INTEGER
col = ta.Column(ta.VeloxType_INTEGER(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_INTEGER))
self.assert_Column(col, [1, 2, None, 4])
# SMALLINT
col = ta.Column(ta.VeloxType_SMALLINT(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_SMALLINT))
self.assert_Column(col, [1, 2, None, 4])
# TINYINT
col = ta.Column(ta.VeloxType_TINYINT(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_TINYINT))
self.assert_Column(col, [1, 2, None, 4])
# REAL
col = ta.Column(ta.VeloxType_REAL(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_REAL))
self.assert_Column(col, [1.0, 2.0, None, 4.0])
# DOUBLE
col = ta.Column(ta.VeloxType_DOUBLE(), [1, 2, None, 4])
self.assertTrue(isinstance(col.type(), ta.VeloxType_DOUBLE))
self.assert_Column(col, [1.0, 2.0, None, 4.0])
# BOOLEAN
col = ta.Column(ta.VeloxType_BOOLEAN(), [True, False, None, True])
self.assertTrue(isinstance(col.type(), ta.VeloxType_BOOLEAN))
self.assert_Column(col, [True, False, None, True])
# VARCHAR
col = ta.Column(ta.VeloxType_VARCHAR(), ["foo", "bar", None, "abc"])
self.assertTrue(isinstance(col.type(), ta.VeloxType_VARCHAR))
self.assert_Column(col, ["foo", "bar", None, "abc"])
# ARRAY of scalar element
col = ta.Column(
ta.VeloxArrayType(ta.VeloxType_VARCHAR()),
[["foo", "bar"], None, ["abc", None]],
)
self.assertTrue(isinstance(col.type(), ta.VeloxArrayType))
self.assertTrue(isinstance(col.type().element_type(), ta.VeloxType_VARCHAR))
self.assert_Column(col, [["foo", "bar"], None, ["abc", None]])
# ARRAY of ROW element
# col = ta.Column(
# ta.VeloxArrayType(
# ta.VeloxRowType(
# ["f1", "f2"], [ta.VeloxType_VARCHAR(), ta.VeloxType_BIGINT()]
# )
# ),
# [[("foo", 1), ("bar", 2)], None, [("abc", 3), ("def", 4)]],
# )
# self.assertTrue(isinstance(col.type(), ta.VeloxArrayType))
# self.assertTrue(isinstance(col.type().element_type(), ta.VeloxRowType))
# self.assert_Column(
# col, [[("foo", 1), ("bar", 2)], None, [("abc", 3), ("def", 4)]]
# )
def test_NullCount(self) -> None:
col = infer_column([None, 1, 2, None])
self.assertEqual(col.get_null_count(), 2)
colSlice = col.slice(col.offset, col.length)
self.assertEqual(colSlice.get_null_count(), 2)
colSlice = col.slice(1, col.length - 1)
self.assertEqual(colSlice.get_null_count(), 1)
self.assertEqual(col.get_null_count(), 2)
colSlice = col.slice(1, 2)
self.assertEqual(colSlice.get_null_count(), 0)
self.assertEqual(col.get_null_count(), 2)
def test_ToArrow_Numerical(self) -> None:
c_array = ffi.new("struct ArrowArray*")
ptr_array = int(ffi.cast("uintptr_t", c_array))
col = infer_column([None, 1, 2, None])
col._export_to_arrow(ptr_array)
self.assertEqual(c_array.length, 4)
self.assertEqual(c_array.null_count, 2)
self.assertEqual(c_array.n_buffers, 2)
vals = ffi.cast("int64_t*", c_array.buffers[1])
self.assertEqual(vals[1], 1)
self.assertEqual(vals[2], 2)
self.assertEqual(c_array.n_children, 0)
self.assertNotEqual(c_array.release, ffi.NULL)
c_array_slice = ffi.new("struct ArrowArray*")
ptr_array_slice = int(ffi.cast("uintptr_t", c_array_slice))
col_slice = col.slice(1, 3)
col_slice._export_to_arrow(ptr_array_slice)
self.assertEqual(c_array_slice.length, 3)
self.assertEqual(c_array_slice.null_count, 1)
self.assertEqual(c_array_slice.n_buffers, 2)
vals_slice = ffi.cast("int64_t*", c_array_slice.buffers[1])
self.assertEqual(vals_slice[0], 1)
self.assertEqual(vals_slice[1], 2)
self.assertEqual(c_array_slice.n_children, 0)
self.assertNotEqual(c_array_slice.release, ffi.NULL)
def test_ToArrow_Struct(self) -> None:
c_array = ffi.new("struct ArrowArray*")
ptr_array = int(ffi.cast("uintptr_t", c_array))
col = ta.Column(
ta.VeloxRowType(
["f1", "f2"],
[ta.VeloxType_INTEGER(), ta.VeloxType_INTEGER()],
)
)
col.child_at(0).append(1)
col.child_at(1).append(10)
col.set_length(1)
col.child_at(0).append(2)
col.child_at(1).append(20)
col.set_length(2)
col._export_to_arrow(ptr_array)
self.assertEqual(c_array.length, 2)
self.assertEqual(c_array.null_count, 0)
self.assertEqual(c_array.n_buffers, 1)
self.assertEqual(c_array.n_children, 2)
self.assertNotEqual(c_array.release, ffi.NULL)
# pyre-fixme[16]: `pa.StructArray` has no attribute `_import_from_c`.
s = pa.StructArray._import_from_c(
ptr_array,
pa.struct(
[
pa.field("f1", pa.int32(), nullable=False),
pa.field("f2", pa.int32(), nullable=False),
]
),
)
self.assertTrue(isinstance(s, pa.StructArray))
self.assertEqual(len(s), len(col))
self.assertEqual(pa.StructArray.field(s, 0).to_pylist(), [1, 2])
self.assertEqual(pa.StructArray.field(s, 1).to_pylist(), [10, 20])
def test_FromArrow_Numerical(self) -> None:
c_schema = ffi.new("struct ArrowSchema*")
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
c_array = ffi.new("struct ArrowArray*")
ptr_array = int(ffi.cast("uintptr_t", c_array))
a = pa.array([None, 1, 2, None])
# pyre-fixme[16]: Item `Array` of `Union[Array[typing.Any], ChunkedArray]`
# has no attribute `_export_to_c`.
a._export_to_c(ptr_array, ptr_schema)
col = ta._import_from_arrow(ta.VeloxType_BIGINT(), ptr_array, ptr_schema)
self.assertEqual(len(col), 4)
self.assertEqual(col.get_null_count(), 2)
self.assertTrue(col.is_null_at(0))
self.assertEqual(col[1], 1)
self.assertEqual(col[2], 2)
self.assertTrue(col.is_null_at(3))
self.assertEqual(c_array.release, ffi.NULL)
self.assertEqual(c_schema.release, ffi.NULL)
def test_FromArrow_Struct(self) -> None:
c_schema = ffi.new("struct ArrowSchema*")
ptr_schema = int(ffi.cast("uintptr_t", c_schema))
c_array = ffi.new("struct ArrowArray*")
ptr_array = int(ffi.cast("uintptr_t", c_array))
f1 = pa.array([1, 2, 3], type=pa.int64())
f2 = pa.array([True, False, None], type=pa.bool_())
s = pa.StructArray.from_arrays(
# pyre-fixme[6]: In call `pa.StructArray.from_arrays`, for 1st positional only parameter expected `Iterable[Array[typing.Any]]` but got `Iterable[Union[Array[typing.Any], ChunkedArray]]`
[f1, f2],
fields=[
# pyre-fixme[16]: Item `pa.Array` of `typing.Union[pa.Array[typing.Any], pa.ChunkedArray]` has no attribute `type`.
pa.field("f1", f1.type, nullable=False),
# pyre-fixme[16]: Item `pa.Array` of `typing.Union[pa.Array[typing.Any], pa.ChunkedArray]` has no attribute `type`.
pa.field("f2", f2.type, nullable=True),
],
)
# pyre-fixme[16]: Item `Array` of `Union[Array[typing.Any], ChunkedArray]`
# has no attribute `_export_to_c`.
s._export_to_c(ptr_array, ptr_schema)
col = ta._import_from_arrow(
ta.VeloxRowType(
["f1", "f2"],
[ta.VeloxType_INTEGER(), ta.VeloxType_BOOLEAN()],
),
ptr_array,
ptr_schema,
)
self.assertEqual(len(col), 3)
self.assertEqual(col.get_null_count(), 0)
self.assertEqual(col.child_at(0).get_null_count(), 0)
self.assertEqual(col.child_at(1).get_null_count(), 1)
self.assertEqual(col.type().name_of(0), "f1")
self.assertEqual(col.type().name_of(1), "f2")
self.assert_Column(col.child_at(0), [1, 2, 3])
self.assert_Column(col.child_at(1), [True, False, None])
self.assertEqual(c_array.release, ffi.NULL)
self.assertEqual(c_schema.release, ffi.NULL)
def is_same_type(a, b) -> bool:
if isinstance(a, ta.VeloxType_BIGINT):
return isinstance(b, ta.VeloxType_BIGINT)
if isinstance(a, ta.VeloxType_VARCHAR):
return isinstance(b, ta.VeloxType_VARCHAR)
if isinstance(a, ta.VeloxType_BOOLEAN):
return isinstance(b, ta.VeloxType_BOOLEAN)
if isinstance(a, ta.VeloxArrayType):
return isinstance(b, ta.VeloxArrayType) and is_same_type(
a.element_type(), b.element_type()
)
raise NotImplementedError()
# infer result
@dataclass(frozen=True)
class Unresolved:
def union(self, other: Unresolved) -> Unresolved:
return other
@dataclass(frozen=True)
class UnresolvedArray(Unresolved):
element_type: Unresolved
def infer_column(data) -> ta.BaseColumn:
inferred_column = _infer_column(data)
if isinstance(inferred_column, Unresolved):
return resolve_column_with_arbitrary_type(inferred_column)
else:
return inferred_column
def resolve_column_with_arbitrary_type(unresolved: Unresolved) -> ta.BaseColumn:
if isinstance(unresolved, UnresolvedArray):
element = resolve_column_with_arbitrary_type(unresolved.element_type)
col = ta.Column(ta.VeloxArrayType(element.type()))
col.append(element)
return col
else:
return ta.Column(ta.VeloxType_BIGINT())
def get_union_type(inferred_columns: List[Union[ta.BaseColumn, Unresolved, None]]):
unresolved_item_type = None
resolved_item_type = None
for item_col in inferred_columns:
if item_col is None:
pass
elif isinstance(item_col, Unresolved):
if unresolved_item_type is None:
unresolved_item_type = item_col
else:
unresolved_item_type = unresolved_item_type.union(item_col)
elif resolved_item_type is None:
resolved_item_type = item_col.type()
else:
assert is_same_type(resolved_item_type, item_col.type())
if resolved_item_type is None:
if unresolved_item_type is None:
return None
else:
return unresolved_item_type
else:
return resolved_item_type
def _infer_column(data) -> Union[ta.BaseColumn, Unresolved, None]:
if data is None:
return None
assert isinstance(data, list)
non_null_item = next((item for item in data if item is not None), None)
if non_null_item is None:
return Unresolved()
else:
if isinstance(non_null_item, list):
inferred_columns = [_infer_column(item) for item in data]
union_type = get_union_type(inferred_columns)
if union_type is None:
return Unresolved()
elif isinstance(union_type, Unresolved):
return UnresolvedArray(union_type)
else:
resolved_item_type = union_type
col = ta.Column(ta.VeloxArrayType(resolved_item_type))
for item_col, item in zip(inferred_columns, data):
if item is None:
resolved_item_col = None
elif isinstance(item_col, Unresolved):
resolved_item_col = resolve_column(item, resolved_item_type)
else:
resolved_item_col = item_col
if resolved_item_col is None:
col.append_null()
else:
col.append(resolved_item_col)
return col
elif isinstance(non_null_item, dict):
keys_array = []
values_array = []
for item in data:
if item is None:
keys_array.append(None)
values_array.append(None)
elif isinstance(item, dict):
keys_array.append(list(item.keys()))
values_array.append(list(item.values()))
else:
raise ValueError("non-dict item in dict list")
inferred_keys_array_columns = _infer_column(keys_array)
inferred_values_array_columns = _infer_column(values_array)
keys_array_type = inferred_keys_array_columns.type()
values_array_type = inferred_values_array_columns.type()
if isinstance(keys_array_type, ta.VeloxArrayType) and isinstance(
values_array_type,
ta.VeloxArrayType,
):
col = ta.Column(
ta.VeloxMapType(
keys_array_type.element_type(), values_array_type.element_type()
)
)
for item in data:
if item is None:
col.append_null()
else:
key_col = ta.Column(keys_array_type.element_type())
value_col = ta.Column(values_array_type.element_type())
for key, value in item.items():
key_col.append(key)
if value is None:
value_col.append_null()
else:
value_col.append(value)
col.append(key_col, value_col)
return col
else:
raise NotImplementedError()
else:
type_ = {
int: ta.VeloxType_BIGINT(),
float: ta.VeloxType_REAL(),
str: ta.VeloxType_VARCHAR(),
bool: ta.VeloxType_BOOLEAN(),
}.get(type(non_null_item))
if type_ is None:
raise NotImplementedError(f"Cannot infer {type(non_null_item)}")
else:
col = ta.Column(type_)
for item in data:
if item is None:
col.append_null()
else:
col.append(item)
return col
def resolve_column(item, type_) -> ta.BaseColumn:
col = ta.Column(type_)
for value in item:
if value is None:
col.append_null()
else:
if type(type_) in (
ta.VeloxType_INTEGER,
ta.VeloxType_VARCHAR,
ta.VeloxType_BOOLEAN,
):
col.append(value)
elif type(type_) == ta.VeloxArrayType:
col.append(resolve_column(value, type_.element_type()))
else:
raise NotImplementedError(f"{type(type_)}")
return col
class TestInferColumn(unittest.TestCase):
def test_infer_simple(self) -> None:
data = [1, 2, 3]
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxType_BIGINT()))
def test_infer_array(self) -> None:
data = [[1], [2], [3]]
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxArrayType(ta.VeloxType_BIGINT())))
def test_infer_nested_array(self) -> None:
data = [[[1]], [[2], [5]], [[3, 4]]]
type_ = infer_column(data).type()
self.assertTrue(
is_same_type(
type_,
ta.VeloxArrayType(ta.VeloxArrayType(ta.VeloxType_BIGINT())),
)
)
def test_unresolved(self) -> None:
data = []
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxType_BIGINT()))
def test_nested_unresolved1(self) -> None:
data = [[]]
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxArrayType(ta.VeloxType_BIGINT())))
def test_nested_unresolved2(self) -> None:
data = [None]
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxType_BIGINT()))
def test_nested_unresolved3(self) -> None:
data = [[None]]
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxArrayType(ta.VeloxType_BIGINT())))
def test_propagate_unresolved(self) -> None:
data = [None, [], [1], [1, None, 2], None]
type_ = infer_column(data).type()
self.assertTrue(is_same_type(type_, ta.VeloxArrayType(ta.VeloxType_BIGINT())))
class TestArrayColumns(BaseTestColumns):
def test_ArrayColumnInt64(self) -> None:
data = [None, [], [1], [1, None, 2], None]
col = infer_column(data)
self.assert_Column(col.elements(), [1, 1, None, 2])
for sliced_col, sliced_data in (
(col, data),
(col.slice(2, 2), data[2:4]),
(col.slice(1, 4), data[1:5]),
):
self.assertEqual(len(sliced_col), len(sliced_data))
for i, item in enumerate(sliced_data):
if item is None:
self.assertTrue(sliced_col.is_null_at(i))
else:
self.assertFalse(sliced_col.is_null_at(i))
self.assertEqual(len(sliced_col[i]), len(item))
for j, value in enumerate(item):
if value is None:
self.assertTrue(sliced_col[i].is_null_at(j))
else:
self.assertFalse(sliced_col[i].is_null_at(j))
# pyre-fixme[16]: Item `None` of `Union[None,
# List[typing.Any], List[int],
# List[typing.Optional[int]]]` has no attribute
# `__getitem__`.
self.assertEqual(sliced_col[i][j], sliced_data[i][j])
def test_NestedArrayColumnInt64(self) -> None:
data = [[[1, 2], None, [3, 4]], [[4], [5]]]
col = infer_column(data)
self.assertEqual(col[0][0][0], 1)
self.assertEqual(col[0][0][1], 2)
self.assertTrue(col[0].is_null_at(1))
self.assertEqual(col[0][2][0], 3)
self.assertEqual(col[0][2][1], 4)
self.assertEqual(col[1][0][0], 4)
self.assertEqual(col[1][1][0], 5)
def test_NestedArrayColumnString(self) -> None:
data = [[], [[]], [["a"]], [["b", "c"], ["d", "e", "f"]]]
col = infer_column(data)
self.assertEqual(len(col[0]), 0)
self.assertEqual(len(col[1]), 1)
self.assertEqual(len(col[1][0]), 0)
self.assertEqual(col[2][0][0], "a")
self.assertEqual(col[3][0][0], "b")
self.assertEqual(col[3][0][1], "c")
self.assertEqual(col[3][1][0], "d")
self.assertEqual(col[3][1][1], "e")
self.assertEqual(col[3][1][2], "f")
class TestMapColumns(unittest.TestCase):
def test_MapColumnInt64(self) -> None:
data = [{"a": 1, "b": 2}, {"c": 3, "d": 4, "e": 5}]
col = infer_column(data)
self.assertEqual(len(col), 2)
keys = col.keys()
self.assertEqual(len(keys), 2)
self.assertEqual(len(keys[0]), 2)
self.assertEqual(keys[0][0], "a")
self.assertEqual(keys[0][1], "b")
self.assertEqual(len(keys[1]), 3)
self.assertEqual(keys[1][0], "c")
self.assertEqual(keys[1][1], "d")
self.assertEqual(keys[1][2], "e")
values = col.values()
self.assertEqual(len(values), 2)
self.assertEqual(len(values[0]), 2)
self.assertEqual(values[0][0], 1)
self.assertEqual(values[0][1], 2)
self.assertEqual(len(values[1]), 3)
self.assertEqual(values[1][0], 3)
self.assertEqual(values[1][1], 4)
self.assertEqual(values[1][2], 5)
sliced_col = col.slice(1, 1)
self.assertEqual(len(sliced_col), 1)
keys = sliced_col.keys()
self.assertEqual(len(keys), 1)
self.assertEqual(len(keys[0]), 3)
self.assertEqual(keys[0][0], "c")
self.assertEqual(keys[0][1], "d")
self.assertEqual(keys[0][2], "e")
values = sliced_col.values()
self.assertEqual(len(values), 1)
self.assertEqual(len(values[0]), 3)
self.assertEqual(values[0][0], 3)
self.assertEqual(values[0][1], 4)
self.assertEqual(values[0][2], 5)
def test_MapColumnInt64_with_none(self) -> None:
data = [None, {"a": 1, "b": 2}, {"c": None, "d": 4, "e": 5}]
col = infer_column(data)
self.assertEqual(len(col), 3)
self.assertTrue(col.is_null_at(0))
keys = col.keys()
self.assertEqual(len(keys), 3)
self.assertEqual(len(keys[1]), 2)
self.assertEqual(keys[1][0], "a")
self.assertEqual(keys[1][1], "b")
self.assertEqual(len(keys[2]), 3)
self.assertEqual(keys[2][0], "c")
self.assertEqual(keys[2][1], "d")
self.assertEqual(keys[2][2], "e")
values = col.values()
self.assertEqual(len(values), 3)
self.assertEqual(len(values[1]), 2)
self.assertEqual(values[1][0], 1)
self.assertEqual(values[1][1], 2)
self.assertEqual(len(values[2]), 3)
self.assertTrue(values[2].is_null_at(0))
self.assertEqual(values[2][1], 4)
self.assertEqual(values[2][2], 5)
sliced_col = col.slice(1, 1)
self.assertEqual(len(sliced_col), 1)
keys = sliced_col.keys()
self.assertEqual(len(keys), 1)
self.assertEqual(len(keys[0]), 2)
self.assertEqual(keys[0][0], "a")
self.assertEqual(keys[0][1], "b")
values = sliced_col.values()
self.assertEqual(len(values), 1)
self.assertEqual(len(values[0]), 2)
self.assertEqual(values[0][0], 1)
self.assertEqual(values[0][1], 2)
class TestRowColumns(unittest.TestCase):
def test_RowColumn1(self) -> None:
col = ta.Column(
ta.VeloxRowType(
["a", "b"],
[ta.VeloxType_INTEGER(), ta.VeloxType_VARCHAR()],
)
)
col.child_at(0).append(1)
col.child_at(1).append("x")
col.set_length(1)
col.child_at(0).append(2)
col.child_at(1).append("y")
col.set_length(2)
self.assertEqual(col.type().name_of(0), "a")
self.assertEqual(col.type().name_of(1), "b")
self.assertEqual(col.child_at(col.type().get_child_idx("a"))[0], 1)
self.assertEqual(col.child_at(col.type().get_child_idx("b"))[0], "x")
self.assertEqual(col.child_at(col.type().get_child_idx("a"))[1], 2)
self.assertEqual(col.child_at(col.type().get_child_idx("b"))[1], "y")
sliced_col = col.slice(1, 1)
self.assertEqual(
sliced_col.child_at(sliced_col.type().get_child_idx("a"))[0], 2
)
self.assertEqual(
sliced_col.child_at(sliced_col.type().get_child_idx("b"))[0], "y"
)
def test_set_child(self) -> None:
col = ta.Column(
ta.VeloxRowType(
["a", "b"],
[ta.VeloxType_INTEGER(), ta.VeloxType_VARCHAR()],
)
)
col.child_at(0).append(1)
col.child_at(1).append("x")
col.set_length(1)
col.child_at(0).append(2)
col.child_at(1).append("y")
col.set_length(2)
new_child = infer_column([3, 4])
col.set_child(0, new_child)
self.assertEqual(col.type().name_of(0), "a")
self.assertEqual(col.type().name_of(1), "b")
self.assertEqual(col.child_at(col.type().get_child_idx("a"))[0], 3)
self.assertEqual(col.child_at(col.type().get_child_idx("b"))[0], "x")
self.assertEqual(col.child_at(col.type().get_child_idx("a"))[1], 4)
self.assertEqual(col.child_at(col.type().get_child_idx("b"))[1], "y")
def test_nested_row(self) -> None:
col = ta.Column(
ta.VeloxRowType(
["a", "b"],
[
ta.VeloxType_INTEGER(),
ta.VeloxRowType(
["b1", "b2"],
[ta.VeloxType_VARCHAR(), ta.VeloxType_INTEGER()],
),
],
)
)
col.child_at(0).append(1)
col.child_at(1).child_at(0).append("21")
col.child_at(1).child_at(1).append(22)
self.assertEqual(col.type().get_child_idx("a"), 0)
self.assertEqual(col.type().get_child_idx("b"), 1)
self.assertEqual(col.child_at(1).type().get_child_idx("b1"), 0)
self.assertEqual(col.child_at(1).type().get_child_idx("b2"), 1)
self.assertEqual(col.child_at(0)[0], 1)
self.assertEqual(col.child_at(1).child_at(0)[0], "21")
self.assertEqual(col.child_at(1).child_at(1)[0], 22)
if __name__ == "__main__":
unittest.main()
| 38.985641
| 198
| 0.580122
|
53f978bd7c2f2f08ac846d0e0272334b6fb10bb0
| 4,019
|
py
|
Python
|
com.ppc.Bot/devices/lock/lock.py
|
slrobertson1/botlab
|
fef6005c57010a30ed8d1d599d15644dd7c870d8
|
[
"Apache-2.0"
] | null | null | null |
com.ppc.Bot/devices/lock/lock.py
|
slrobertson1/botlab
|
fef6005c57010a30ed8d1d599d15644dd7c870d8
|
[
"Apache-2.0"
] | null | null | null |
com.ppc.Bot/devices/lock/lock.py
|
slrobertson1/botlab
|
fef6005c57010a30ed8d1d599d15644dd7c870d8
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on February 8, 2018
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
from devices.device import Device
class LockDevice(Device):
"""Lock Device"""
# List of Device Types this class is compatible with
DEVICE_TYPES = [9010]
# Measurement name for the lock status
MEASUREMENT_NAME_LOCK_STATUS = 'lockStatus'
MEASUREMENT_PARAMETERS_LIST = [
MEASUREMENT_NAME_LOCK_STATUS
]
# Possible lock states
STATUS_PARTIALLY_LOCKED = 0
STATUS_LOCKED = 1
STATUS_UNLOCKED = 2
# Goals
GOAL_INTELLIGENT_AUTO_LOCK = 101
GOAL_STATIC_AUTO_LOCK = 102
def get_device_type_name(self):
"""
:return: the name of this device type in the given language, for example, "Entry Sensor"
"""
# NOTE: Abstract device type name, doesn't show up in end user documentation
return _("Lock")
def get_image_name(self):
"""
:return: the font icon name of this device type
"""
return "lock"
def did_unlock(self, botengine=None):
"""
Did the door just unlock
:param botengine:
:return: True if the door just unlocked
"""
if self.MEASUREMENT_NAME_LOCK_STATUS in self.last_updated_params:
return self.is_unlocked(botengine)
return False
def did_lock(self, botengine=None):
"""
Did the door just lock
:param botengine:
:return: True if the door just locked
"""
if self.MEASUREMENT_NAME_LOCK_STATUS in self.last_updated_params:
return self.is_fully_locked(botengine)
return False
def did_partially_lock(self, botengine=None):
"""
Did the door just partially lock
:param botengine:
:return: True if the door just locked
"""
if self.MEASUREMENT_NAME_LOCK_STATUS in self.last_updated_params:
return self.is_partially_locked(botengine)
return False
def is_fully_locked(self, botengine=None):
"""
Note that just because a door is not fully locked doesn't mean it's actually unlocked. It could be partially locked.
Test conditions explicitly.
:param botengine:
:return: True if the door is fully locked; None if the measurement doesn't exist
"""
if self.MEASUREMENT_NAME_LOCK_STATUS in self.measurements:
return self.measurements[self.MEASUREMENT_NAME_LOCK_STATUS][0][0] == self.STATUS_LOCKED
return None
def is_partially_locked(self, botengine=None):
"""
:param botengine:
:return: True if the door is partially but not fully locked; None if the measurement doesn't exist
"""
if self.MEASUREMENT_NAME_LOCK_STATUS in self.measurements:
return self.measurements[self.MEASUREMENT_NAME_LOCK_STATUS][0][0] == self.STATUS_PARTIALLY_LOCKED
return None
def is_unlocked(self, botengine=None):
"""
Note that just because a door is not unlocked doesn't mean it is fully locked. It could be partially locked.
Test conditions explicitly.
:param botengine:
:return: True if the door is unlocked; None if the measurement doesn't exist
"""
if self.MEASUREMENT_NAME_LOCK_STATUS in self.measurements:
return self.measurements[self.MEASUREMENT_NAME_LOCK_STATUS][0][0] == self.STATUS_UNLOCKED
return None
def lock(self, botengine):
"""
Lock the door
:param botengine: BotEngine environment
"""
botengine.send_command(self.device_id, self.MEASUREMENT_NAME_LOCK_STATUS, self.STATUS_LOCKED)
def unlock(self, botengine):
"""
Unlock the door
:param botengine: BotEngine environment
"""
botengine.send_command(self.device_id, self.MEASUREMENT_NAME_LOCK_STATUS, self.STATUS_UNLOCKED)
| 30.915385
| 124
| 0.657129
|
92dab6b800ffa79a99ab39d22c9202555d66fba6
| 93
|
py
|
Python
|
src/friendapp/apps.py
|
Darshee-m/Friend_Finder_WebApp
|
98c30386df9780b7fc0225f1df777aa0be704f3f
|
[
"bzip2-1.0.6"
] | 15
|
2020-12-23T13:56:49.000Z
|
2021-12-10T11:04:23.000Z
|
src/friendapp/apps.py
|
Darshee-m/Friend_Finder_WebApp
|
98c30386df9780b7fc0225f1df777aa0be704f3f
|
[
"bzip2-1.0.6"
] | 41
|
2021-03-19T07:51:48.000Z
|
2021-11-22T09:45:46.000Z
|
src/friendapp/apps.py
|
Darshee-m/Friend_Finder_WebApp
|
98c30386df9780b7fc0225f1df777aa0be704f3f
|
[
"bzip2-1.0.6"
] | 3
|
2021-03-24T15:18:24.000Z
|
2021-09-11T14:51:35.000Z
|
from django.apps import AppConfig
class FriendappConfig(AppConfig):
name = 'friendapp'
| 15.5
| 33
| 0.763441
|
73f1ee0d5cd868e51b13a348211443fb5e4f8cbd
| 4,467
|
py
|
Python
|
learn_stem/python/utilities/pyprogrammer.py
|
wgong/open_source_learning
|
ccf819626bf17125abcee8edf66401e4a0d2dc20
|
[
"Apache-2.0"
] | 1
|
2019-04-11T13:27:29.000Z
|
2019-04-11T13:27:29.000Z
|
learn_stem/python/utilities/pyprogrammer.py
|
wgong/open_source_learning
|
ccf819626bf17125abcee8edf66401e4a0d2dc20
|
[
"Apache-2.0"
] | 4
|
2017-03-09T18:42:19.000Z
|
2017-03-19T21:06:23.000Z
|
learn_stem/python/utilities/pyprogrammer.py
|
wgong/open_source_learning
|
ccf819626bf17125abcee8edf66401e4a0d2dc20
|
[
"Apache-2.0"
] | null | null | null |
# Evolution of a Python programmer.py
#Newbie programmer
def factorial_newbie(x):
if x == 0:
return 1
else:
return x * factorial(x - 1)
print factorial_newbie(6)
#First year programmer, studied Pascal
def factorial_pascal(x):
result = 1
i = 2
while i <= x:
result = result * i
i = i + 1
return result
print factorial_pascal(6)
#First year programmer, studied C
def factorial_c(x):
result = i = 1;
while (i <= x):
result *= i;
i += 1;
return result;
print(factorial_c(6))
#First year programmer, SICP
@tailcall
def factorial_sicp(x, acc=1):
if (x > 1):
return x*factorial_sicp(x - 1)
else:
return acc
print(factorial_sicp(6))
#First year programmer, Python
def factorial(x):
res = 1
for i in xrange(2, x + 1):
res *= i
return res
print factorial(6)
#Lazy Python programmer
def factorial_lazy(x):
return x > 1 and x * fact(x - 1) or 1
print factorial_lazy(6)
#Lazier Python programmer
f = lambda x: x and x * f(x - 1) or 1
print f(6)
#Python expert programmer
# does not work using Canopy
import operator as op
import functional as f
fact = lambda x: f.foldl(op.mul, 1, xrange(2, x + 1))
print fact(6)
#Python hacker
import sys
#@tailcall
def fact(x, acc=1):
if x: return fact(x.__sub__(1), acc.__mul__(x))
return acc
sys.stdout.write(str(fact(6)) + '\n')
#EXPERT PROGRAMMER
import c_math # ImportError: No module named c_math
fact = c_math.fact
print fact(6)
#ENGLISH EXPERT PROGRAMMER
import c_maths
fact = c_maths.fact
print fact(6)
#Web designer
def factorial(x):
#-------------------------------------------------
#--- Code snippet from The Math Vault ---
#--- Calculate factorial (C) Arthur Smith 1999 ---
#-------------------------------------------------
result = str(1)
i = 1 #Thanks Adam
while i <= x:
#result = result * i #It's faster to use *=
#result = str(result * result + i)
#result = int(result *= i) #??????
result str(int(result) * i)
#result = int(str(result) * i)
i = i + 1
return result
print factorial(6)
#Unix programmer
# incomplete
import os
def fact(x):
os.system('factorial ' + str(x))
fact(6)
#Windows programmer
NULL = None
def CalculateAndPrintFactorialEx(dwNumber,
hOutputDevice,
lpLparam,
lpWparam,
lpsscSecurity,
*dwReserved):
if lpsscSecurity != NULL:
return NULL #Not implemented
dwResult = dwCounter = 1
while dwCounter <= dwNumber:
dwResult *= dwCounter
dwCounter += 1
hOutputDevice.write(str(dwResult))
hOutputDevice.write('\n')
return 1
import sys
CalculateAndPrintFactorialEx(6, sys.stdout, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)
#Enterprise programmer
def new(cls, *args, **kwargs):
return cls(*args, **kwargs)
class Number(object):
pass
class IntegralNumber(int, Number):
def toInt(self):
return new (int, self)
class InternalBase(object):
def __init__(self, base):
self.base = base.toInt()
def getBase(self):
return new (IntegralNumber, self.base)
class MathematicsSystem(object):
def __init__(self, ibase):
Abstract
@classmethod
def getInstance(cls, ibase):
try:
cls.__instance
except AttributeError:
cls.__instance = new (cls, ibase)
return cls.__instance
class StandardMathematicsSystem(MathematicsSystem):
def __init__(self, ibase):
if ibase.getBase() != new (IntegralNumber, 2):
raise NotImplementedError
self.base = ibase.getBase()
def calculateFactorial(self, target):
result = new (IntegralNumber, 1)
i = new (IntegralNumber, 2)
while i <= target:
result = result * i
i = i + new (IntegralNumber, 1)
return result
print StandardMathematicsSystem.getInstance(new (InternalBase, new (IntegralNumber, 2))).calculateFactorial(new (IntegralNumber, 6))
| 24.145946
| 140
| 0.565704
|
f8f212f258c8b009406970eb693d0b3788589c5a
| 384
|
py
|
Python
|
sign.py
|
abinash-boruah/Spam-SMS-classifier
|
4c57872c3c924fefad34c4d82fe7fa4122f6aa4a
|
[
"Unlicense"
] | null | null | null |
sign.py
|
abinash-boruah/Spam-SMS-classifier
|
4c57872c3c924fefad34c4d82fe7fa4122f6aa4a
|
[
"Unlicense"
] | null | null | null |
sign.py
|
abinash-boruah/Spam-SMS-classifier
|
4c57872c3c924fefad34c4d82fe7fa4122f6aa4a
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 10:06:08 2019
@author: abinash boruah
"""
import numpy as np
import matplotlib.pyplot as plt
def sign(x):
l=[]
for i in x:
if i>0:
l1=1
l.append(l1)
else:
l2=0
l.append(l2)
return l
x = np.arange(-8,8,1)
y = sign(x)
plt.plot(x,y)
| 15.36
| 36
| 0.466146
|
bad7b13c5c6da7879bb27515660990d401cec8b0
| 2,733
|
py
|
Python
|
src/mlpro/rl/pool/envs/multigeorobot/multigeorobot.py
|
fhswf/MLPro
|
e944b69bed9c2d5548677711270e4a4fe868aea9
|
[
"Apache-2.0"
] | 5
|
2022-01-31T15:52:19.000Z
|
2022-03-21T18:34:27.000Z
|
src/mlpro/rl/pool/envs/multigeorobot/multigeorobot.py
|
fhswf/MLPro
|
e944b69bed9c2d5548677711270e4a4fe868aea9
|
[
"Apache-2.0"
] | 61
|
2021-12-17T13:03:59.000Z
|
2022-03-31T10:24:37.000Z
|
src/mlpro/rl/pool/envs/multigeorobot/multigeorobot.py
|
fhswf/MLPro
|
e944b69bed9c2d5548677711270e4a4fe868aea9
|
[
"Apache-2.0"
] | null | null | null |
## -------------------------------------------------------------------------------------------------
## -- Project : MLPro - A Synoptic Framework for Standardized Machine Learning Tasks
## -- Package : mlpro
## -- Module : multigeorobot.py
## -------------------------------------------------------------------------------------------------
## -- History :
## -- yyyy-mm-dd Ver. Auth. Description
## -- 2021-12-19 0.0.0 MRD Creation
## -- 2021-12-19 1.0.0 MRD Released first version
## -- 2022-02-25 1.0.1 SY Refactoring due to auto generated ID in class Dimension
## -- 2022-04-29 1.0.2 MRD Wrap the environment with WrEnvGYM2MLPro
## -------------------------------------------------------------------------------------------------
"""
Ver. 1.0.2 (2022-04-29)
This module provides an environment for multi geometry robot.
"""
from mlpro.rl.models import *
import mlpro
import rospy
import subprocess
from mlpro.wrappers.openai_gym import WrEnvGYM2MLPro
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
class MultiGeo(WrEnvGYM2MLPro):
"""
This module provides an environment for multi geometry robot.
"""
C_NAME = 'MultiGeo'
C_LATENCY = timedelta(0,5,0)
C_INFINITY = np.finfo(np.float32).max
def __init__(self, p_seed=0, p_logging=True):
roscore = subprocess.Popen('roscore')
rospy.init_node('multi_geo_robot_training', anonymous=True, log_level=rospy.WARN)
LoadYamlFileParamsTest(rospackage_name="multi_geo_robot_training",
rel_path_from_package_to_file="config",
yaml_file_name="multi_geo_robot.yaml")
ros_ws_path = mlpro.rl.pool.envs.multigeorobot.__file__.replace("/__init__.py", "")
rospy.set_param('ros_ws_path', ros_ws_path)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/multi_geo_robot/task_and_robot_environment_name')
max_step_episode = rospy.get_param(
'/multi_geo_robot/max_iterations')
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name, max_step_episode)
env.seed(p_seed)
super().__init__(p_gym_env=env)
## -------------------------------------------------------------------------------------------------
def compute_success(self, p_state: State) -> bool:
obs = p_state.get_values()
close = np.allclose(a=obs[:3],
b=obs[3:],
atol=0.1)
if close:
self._state.set_terminal(True)
return close
| 39.042857
| 100
| 0.555434
|
35a046f8d8366225d17fd3244dffb297cbc03c8f
| 337
|
py
|
Python
|
test/__init__.py
|
elwirth/cgavi
|
ceeba31bfc28ea3a32476677041c4b9e9ed59dbc
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
elwirth/cgavi
|
ceeba31bfc28ea3a32476677041c4b9e9ed59dbc
|
[
"MIT"
] | null | null | null |
test/__init__.py
|
elwirth/cgavi
|
ceeba31bfc28ea3a32476677041c4b9e9ed59dbc
|
[
"MIT"
] | null | null | null |
# To run all the tests, run: python -m unittest in the terminal in the project directory.
from os.path import dirname, basename, isfile, join
import glob
# makes the modules easily loadable
modules = glob.glob(join(dirname(__file__), "*.py"))
__all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
| 42.125
| 92
| 0.735905
|
e7d7cd028d97a95271462da815ef9d0eba6b3d7a
| 2,121
|
py
|
Python
|
pyqtgraph/graphicsItems/GraphicsWidget.py
|
hishizuka/pyqtgraph
|
4820625d93ffb41f324431d0d29b395cf91f339e
|
[
"MIT"
] | 2,762
|
2015-01-02T14:34:10.000Z
|
2022-03-30T14:06:07.000Z
|
pyqtgraph/graphicsItems/GraphicsWidget.py
|
hishizuka/pyqtgraph
|
4820625d93ffb41f324431d0d29b395cf91f339e
|
[
"MIT"
] | 1,901
|
2015-01-12T03:20:30.000Z
|
2022-03-31T16:33:36.000Z
|
pyqtgraph/graphicsItems/GraphicsWidget.py
|
hishizuka/pyqtgraph
|
4820625d93ffb41f324431d0d29b395cf91f339e
|
[
"MIT"
] | 1,038
|
2015-01-01T04:05:49.000Z
|
2022-03-31T11:57:51.000Z
|
# -*- coding: utf-8 -*-
from ..Qt import QtGui
from .GraphicsItem import GraphicsItem
__all__ = ['GraphicsWidget']
class GraphicsWidget(GraphicsItem, QtGui.QGraphicsWidget):
_qtBaseClass = QtGui.QGraphicsWidget
def __init__(self, *args, **kargs):
"""
**Bases:** :class:`GraphicsItem <pyqtgraph.GraphicsItem>`, :class:`QtGui.QGraphicsWidget`
Extends QGraphicsWidget with several helpful methods and workarounds for PyQt bugs.
Most of the extra functionality is inherited from :class:`GraphicsItem <pyqtgraph.GraphicsItem>`.
"""
QtGui.QGraphicsWidget.__init__(self, *args, **kargs)
GraphicsItem.__init__(self)
## done by GraphicsItem init
#GraphicsScene.registerObject(self) ## workaround for pyqt bug in graphicsscene.items()
# Removed due to https://bugreports.qt-project.org/browse/PYSIDE-86
#def itemChange(self, change, value):
## BEWARE: Calling QGraphicsWidget.itemChange can lead to crashing!
##ret = QtGui.QGraphicsWidget.itemChange(self, change, value) ## segv occurs here
## The default behavior is just to return the value argument, so we'll do that
## without calling the original method.
#ret = value
#if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]:
#self._updateView()
#return ret
def setFixedHeight(self, h):
self.setMaximumHeight(h)
self.setMinimumHeight(h)
def setFixedWidth(self, h):
self.setMaximumWidth(h)
self.setMinimumWidth(h)
def height(self):
return self.geometry().height()
def width(self):
return self.geometry().width()
def boundingRect(self):
br = self.mapRectFromParent(self.geometry()).normalized()
#print "bounds:", br
return br
def shape(self): ## No idea why this is necessary, but rotated items do not receive clicks otherwise.
p = QtGui.QPainterPath()
p.addRect(self.boundingRect())
#print "shape:", p.boundingRect()
return p
| 36.568966
| 106
| 0.649222
|
e4841314effe514e33d22b8ba23cd0f37b1e78c6
| 301
|
py
|
Python
|
data/multilingual/Latn.YAO/Mono_8/pdf_to_json_test_Latn.YAO_Mono_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.YAO/Mono_8/pdf_to_json_test_Latn.YAO_Mono_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.YAO/Mono_8/pdf_to_json_test_Latn.YAO_Mono_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.YAO/Mono_8/udhr_Latn.YAO_Mono_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.1
| 71
| 0.810631
|
a09c741f358fcfeccbe544463ce71221fb5c298f
| 11,857
|
py
|
Python
|
vpc_core_infra/vpcx_cdk/vpc_stack.py
|
aws-samples/amz-vpc-provisioning-api-sls
|
d6779d24f8f0aa46e53cee17b7d94af401daeb18
|
[
"MIT-0"
] | null | null | null |
vpc_core_infra/vpcx_cdk/vpc_stack.py
|
aws-samples/amz-vpc-provisioning-api-sls
|
d6779d24f8f0aa46e53cee17b7d94af401daeb18
|
[
"MIT-0"
] | null | null | null |
vpc_core_infra/vpcx_cdk/vpc_stack.py
|
aws-samples/amz-vpc-provisioning-api-sls
|
d6779d24f8f0aa46e53cee17b7d94af401daeb18
|
[
"MIT-0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# pylint: disable=line-too-long
"""Class to handle the Cloudformatiom stack template creation"""
from aws_cdk import (
aws_ec2 as ec2,
core
)
from typing import List
from .vpc_context import VpcContext, VpcType, VpcConnectivity
class VpcStack(core.Stack):
"""
VPC generated using the CDK
"""
# These are used in Subnet / RouteTable names. This is the only way to determine
# what the intended usage is other than inferring from configuration
PRIVATE_TYPE_NAME = "Private"
PUBLIC_TYPE_NAME = "Public"
def __init__(self, scope: core.Construct, construct_id: str,
vpc_context: VpcContext, external_configs: dict, **kwargs) -> None:
# Initialize VPC
super().__init__(scope, construct_id, **kwargs)
# Collect input args
self.net_config = external_configs["net_config"]
self.vpc_context = vpc_context
vpc = self.make_vpc()
#self.make_vpc_flow_logs(vpc.ref)
private_route_table, private_subnets = self.make_subnets(vpc.ref,
vpc_context.vpc_private_subnet_cidrs,
self.PRIVATE_TYPE_NAME)
public_route_table, public_subnets = self.make_subnets(vpc.ref,
vpc_context.vpc_public_subnet_cidrs,
self.PUBLIC_TYPE_NAME,
len(private_subnets))
self.make_s3_vpc_gateway_endpoint(vpc.ref, private_route_table.ref)
#self.make_vpc_peering(vpc.ref, private_route_table, public_route_table)
has_public_setup = (public_route_table and public_subnets)
# Public Specific Config
if has_public_setup:
self.make_internet_gateway(vpc.ref, public_route_table.ref)
# Non-HPC Specific Config
if vpc_context.vpc_type != VpcType.hpc:
self.add_dhcp_options(vpc.ref)
# No provided connectivity will create a VGW in addition to explicit request
if vpc_context.connectivity_type == VpcConnectivity.vgw:
self.make_vgw(vpc.ref, private_route_table.ref)
# HPC Specific Config (HPC should always have public set up)
# if vpc_context.vpc_type == VpcType.hpc and has_public_setup:
# self.make_nat_gateway(public_subnets[0].ref, private_route_table.ref)
@classmethod
def _get_subnet_id_per_az(cls, subnets: List[ec2.CfnSubnet]):
"""
Returns one subnet from each AZ that is in use
Args:
subnets: list of CfnSubnets
Returns: of CfnSubnets ids by ref
"""
one_subnet_per_az = []
used_azs = []
for subnet in subnets:
if subnet.availability_zone not in used_azs:
one_subnet_per_az.append(subnet.ref)
used_azs.append(subnet.availability_zone)
return one_subnet_per_az
def make_vgw(self, vpc_id, private_route_table_id):
"""
Generate VGW connection which will be manually connected elsewhere to a DCG
Args:
vpc_id: VPC CDK ID
private_route_table_id: Private route table id
Returns: nothing
"""
name = f"{self.vpc_context.vpcx_name}VGW"
vgw = ec2.CfnVPNGateway(self, id=name, type="ipsec.1", tags=self.make_tags(("Name", name)))
vgw_attach = ec2.CfnVPCGatewayAttachment(self, id=f"{name}Attachment", vpc_id=vpc_id, vpn_gateway_id=vgw.ref)
vgw_route = ec2.CfnRoute(self,
id=f"{name}Route",
route_table_id=private_route_table_id,
destination_cidr_block=self.net_config["global"]["InternetCidr"],
gateway_id=vgw.ref)
# Needs depends on or will be created prematurely in Cloudformation
vgw_route.add_depends_on(target=vgw_attach)
def make_s3_vpc_gateway_endpoint(self, vpc_id, private_route_table_id):
"""
Generate VPC endpoint
Args:
vpc_id: VPC CDK ID
private_route_table_id: Private route table id
Returns: nothing
"""
ec2.CfnVPCEndpoint(self,
id=f"{self.vpc_context.vpcx_name}S3Gateway",
vpc_id=vpc_id,
route_table_ids=[private_route_table_id],
service_name=f"com.amazonaws.{self.vpc_context.region}.s3")
def make_internet_gateway(self, vpc_id, public_rt_id):
"""
Generate IGW resources
Args:
vpc_id: VPC CDK ID
public_rt_id: public route table id to add IGW routes to
Returns: nothing
"""
igw_name = f"{self.vpc_context.vpcx_name}IGW"
igw = ec2.CfnInternetGateway(self,
id=igw_name,
tags=self.make_tags(("Name", igw_name)))
ec2.CfnVPCGatewayAttachment(self,
id=f"{igw_name}Attachment",
vpc_id=vpc_id,
internet_gateway_id=igw.ref)
ec2.CfnRoute(self,
id=f"{igw_name}Route",
route_table_id=public_rt_id,
destination_cidr_block=self.net_config["global"]["InternetCidr"],
gateway_id=igw.ref)
def make_nat_gateway(self, public_subnet_id, private_rt_id):
"""
Generate NAT GW resources
Args:
public_subnet_id: a public subnet id to use for hosting NAT
private_rt_id: private route table id to add NAT routes to
Returns: nothing
"""
nat_name = f"{self.vpc_context.vpcx_name}NatGW"
eip_name = f"{nat_name}EIP"
eip = ec2.CfnEIP(self, id=eip_name, domain="vpc", tags=self.make_tags(("Name", eip_name)))
nat_gw = ec2.CfnNatGateway(self,
id=nat_name,
allocation_id=eip.attr_allocation_id,
subnet_id=public_subnet_id,
tags=self.make_tags(("Name", nat_name)))
ec2.CfnRoute(self,
id=f"{nat_name}Route",
route_table_id=private_rt_id,
destination_cidr_block=self.net_config["global"]["InternetCidr"],
nat_gateway_id=nat_gw.ref)
@staticmethod
def make_tags(*specific_tags):
"""
Make tags
Args:
specific_tags: specific tags as tuples of key values to be added to resource
Returns: cfnTag list containing all common tags plus any specific tags added
"""
common_tags = []
tags_all = list(specific_tags) + common_tags
return [core.CfnTag(key=key, value=val) for key, val in tags_all]
def make_vpc(self):
"""
Generate a VPC template
Returns: CDK VPC object
"""
# Create initial VPC
vpc = ec2.CfnVPC(self,
id=self.vpc_context.vpcx_name,
cidr_block=self.vpc_context.vpc_top_level_cidr[0],
tags=self.make_tags(("Name", self.vpc_context.vpcx_name),
("vpcx", str(self.vpc_context.vpc_type)))
)
# Extend VPC top-level CIDR
if len(self.vpc_context.vpc_top_level_cidr) > 1:
for i, additional_cidr in enumerate(self.vpc_context.vpc_top_level_cidr[1:]):
name = f"{self.vpc_context.vpcx_name}AddtionalVpcCidr{i + 1}"
ec2.CfnVPCCidrBlock(self,
id=name,
vpc_id=vpc.ref,
cidr_block=additional_cidr)
return vpc
def make_subnets(self, vpc_id, subnet_cidrs, subnet_type, subnet_count_offset=0):
"""
Generate VPC subnets
Args:
vpc_id: VPC CDK ID
subnet_cidrs: block of subnets cidrs to create
subnet_type: type of subnet created (for naming, eg Private / Public)
subnet_count_offset: number of subnets to offset during AZ allocation
Returns: list of CDK subnet objects
"""
subnets = list()
route_table = None
# Grab AZs in region
az_length = len(self.vpc_context.availability_zones)
# Generate subnet in alternating AZs
if subnet_cidrs:
name = f"{self.vpc_context.vpcx_name}{subnet_type}RouteTable"
route_table = ec2.CfnRouteTable(self,
id=name,
vpc_id=vpc_id,
tags=self.make_tags(("Name", name)))
for i, cidr in enumerate(subnet_cidrs):
name = f"{self.vpc_context.vpcx_name}{subnet_type}Subnet{i + 1}"
subnet = ec2.CfnSubnet(self,
id=name,
vpc_id=vpc_id,
cidr_block=cidr,
availability_zone=self.vpc_context.availability_zones[
(subnet_count_offset + i) % az_length],
tags=self.make_tags(("Name", name)))
# Associate route table
ec2.CfnSubnetRouteTableAssociation(self,
id=f"{name}RouteTableAssoc",
route_table_id=route_table.ref,
subnet_id=subnet.ref)
subnets.append(subnet)
return route_table, subnets
def add_dhcp_options(self, vpc_id):
"""
Generate VPC DHCP options
Args:
vpc_id: VPC CDK ID
Returns: CDK DHCP objects
"""
# Generate DHCP options
dhcp_options = ec2.CfnDHCPOptions(self,
id=f"{self.vpc_context.vpcx_name}DHCPOptions",
domain_name=self.net_config["global"]["DomainName"],
domain_name_servers=self.net_config["TopLevel"]["DomainNameServers"],
netbios_name_servers=self.net_config["TopLevel"]["NetbiosNameServers"],
netbios_node_type=8,
ntp_servers=self.net_config["TopLevel"]["NetworkTimeServers"])
# Associate route table
ec2.CfnVPCDHCPOptionsAssociation(self,
id=f"{self.vpc_context.vpcx_name}DHCPOptionsAssoc",
dhcp_options_id=dhcp_options.ref,
vpc_id=vpc_id)
def make_vpc_flow_logs(self, vpc_id):
"""
Generate VPC endpoint
Args:
vpc_id: vpc_id: VPC CDK ID
Returns: nothing
"""
ec2.CfnFlowLog(self,
id=f"{self.vpc_context.vpcx_name}FlowLog",
resource_id=vpc_id,
resource_type="VPC",
traffic_type="ALL",
log_destination=f"arn:aws:s3:::{self.vpc_context.log_bucket}/vpc-flow-log/{vpc_id}/",
log_destination_type="s3")
| 41.313589
| 117
| 0.540609
|
4055cdfa21aa11ab634b64d4e57757b53359d115
| 4,038
|
py
|
Python
|
pysnmp/NETSCREEN-SMI.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/NETSCREEN-SMI.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/NETSCREEN-SMI.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module NETSCREEN-SMI (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NETSCREEN-SMI
# Produced by pysmi-0.3.4 at Mon Apr 29 20:10:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, NotificationType, Integer32, ObjectIdentity, MibIdentifier, Unsigned32, enterprises, TimeTicks, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Gauge32, iso, Counter64, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "NotificationType", "Integer32", "ObjectIdentity", "MibIdentifier", "Unsigned32", "enterprises", "TimeTicks", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Gauge32", "iso", "Counter64", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
netscreen = ModuleIdentity((1, 3, 6, 1, 4, 1, 3224))
netscreen.setRevisions(('2004-08-31 00:00', '2004-05-03 00:00', '2004-03-03 00:00', '2001-09-28 00:00', '2000-08-02 00:00',))
if mibBuilder.loadTexts: netscreen.setLastUpdated('200408310000Z')
if mibBuilder.loadTexts: netscreen.setOrganization('Juniper Networks, Inc.')
netscreenTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 0))
netscreenProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 1))
netscreenTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 2))
netscreenIDS = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 3))
netscreenVpn = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 4))
netscreenQos = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 5))
netscreenNsrp = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 6))
netscreenSetting = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 7))
netscreenZone = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 8))
netscreenInterface = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 9))
netscreenPolicy = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 10))
netscreenNAT = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 11))
netscreenAddr = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 12))
netscreenService = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 13))
netscreenSchedule = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 14))
netscreenVsys = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 15))
netscreenResource = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 16))
netscreenIp = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 17))
netscreenVR = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 18))
netscreenChassis = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 21))
netscreenSettingMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 7, 0))
netscreenVpnMibModule = MibIdentifier((1, 3, 6, 1, 4, 1, 3224, 4, 0))
mibBuilder.exportSymbols("NETSCREEN-SMI", netscreenIDS=netscreenIDS, netscreenSchedule=netscreenSchedule, netscreenResource=netscreenResource, netscreenVpn=netscreenVpn, netscreen=netscreen, PYSNMP_MODULE_ID=netscreen, netscreenVR=netscreenVR, netscreenProducts=netscreenProducts, netscreenIp=netscreenIp, netscreenVpnMibModule=netscreenVpnMibModule, netscreenTrapInfo=netscreenTrapInfo, netscreenAddr=netscreenAddr, netscreenSettingMibModule=netscreenSettingMibModule, netscreenNAT=netscreenNAT, netscreenQos=netscreenQos, netscreenChassis=netscreenChassis, netscreenPolicy=netscreenPolicy, netscreenNsrp=netscreenNsrp, netscreenService=netscreenService, netscreenVsys=netscreenVsys, netscreenSetting=netscreenSetting, netscreenTrap=netscreenTrap, netscreenInterface=netscreenInterface, netscreenZone=netscreenZone)
| 98.487805
| 816
| 0.757553
|
6edbedce54cf297b59ed831a082e27c67444f149
| 16,107
|
py
|
Python
|
xarray/coding/times.py
|
plogerais/xarray
|
0f70a876759197388d32d6d9f0317f0fe63e0336
|
[
"Apache-2.0"
] | 2
|
2019-03-11T12:37:15.000Z
|
2021-07-16T15:09:41.000Z
|
xarray/coding/times.py
|
plogerais/xarray
|
0f70a876759197388d32d6d9f0317f0fe63e0336
|
[
"Apache-2.0"
] | 2
|
2018-10-04T06:56:42.000Z
|
2018-10-04T07:11:13.000Z
|
xarray/coding/times.py
|
plogerais/xarray
|
0f70a876759197388d32d6d9f0317f0fe63e0336
|
[
"Apache-2.0"
] | 1
|
2019-12-02T09:29:55.000Z
|
2019-12-02T09:29:55.000Z
|
from __future__ import absolute_import, division, print_function
import re
import traceback
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from ..core.common import contains_cftime_datetimes
from ..core import indexing
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.options import OPTIONS
from ..core.pycompat import PY3
from ..core.variable import Variable
from .variables import (
SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to,
safe_setitem, unpack_for_decoding, unpack_for_encoding)
try:
from pandas.errors import OutOfBoundsDatetime
except ImportError:
# pandas < 0.20
from pandas.tslib import OutOfBoundsDatetime
# standard calendars recognized by cftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
_NS_PER_TIME_DELTA = {'us': int(1e3),
'ms': int(1e6),
's': int(1e9),
'm': int(1e9) * 60,
'h': int(1e9) * 60 * 60,
'D': int(1e9) * 60 * 60 * 24}
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _import_cftime():
'''
helper function handle the transition to netcdftime/cftime
as a stand-alone package
'''
try:
import cftime
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as cftime
except ImportError:
raise ImportError("Failed to import cftime")
return cftime
def _require_standalone_cftime():
"""Raises an ImportError if the standalone cftime is not found"""
try:
import cftime # noqa: F401
except ImportError:
raise ImportError('Using a CFTimeIndex requires the standalone '
'version of the cftime library.')
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_cftime(num_dates, units, calendar,
enable_cftimeindex):
cftime = _import_cftime()
if enable_cftimeindex:
_require_standalone_cftime()
dates = np.asarray(cftime.num2date(num_dates, units, calendar,
only_use_cftime_datetimes=True))
else:
dates = np.asarray(cftime.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
if not enable_cftimeindex or calendar in _STANDARD_CALENDARS:
warnings.warn(
'Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'cftime.datetime objects instead, reason: dates out '
'of range', SerializationWarning, stacklevel=3)
else:
if enable_cftimeindex:
if calendar in _STANDARD_CALENDARS:
dates = cftime_to_nptime(dates)
else:
try:
dates = cftime_to_nptime(dates)
except ValueError as e:
warnings.warn(
'Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy cftime.datetime objects instead, reason:'
'{0}'.format(e), SerializationWarning, stacklevel=3)
return dates
def _decode_cf_datetime_dtype(data, units, calendar, enable_cftimeindex):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(
indexing.as_indexable(data))
example_value = np.concatenate([first_n_items(values, 1) or [0],
last_item(values) or [0]])
try:
result = decode_cf_datetime(example_value, units, calendar,
enable_cftimeindex)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
dtype = getattr(result, 'dtype', np.dtype('object'))
return dtype
def decode_cf_datetime(num_dates, units, calendar=None,
enable_cftimeindex=False):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'invalid value encountered',
RuntimeWarning)
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (flat_num_dates.astype(np.float64) *
_NS_PER_TIME_DELTA[delta]).astype(np.int64)
dates = (pd.to_timedelta(flat_num_dates_ns_int, 'ns') +
ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(np.float), units, calendar,
enable_cftimeindex)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ['days', 'hours', 'minutes', 'seconds']:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_calendar_name(dates):
"""Given an array of datetimes, infer the CF calendar name"""
if np.asarray(dates).dtype == 'datetime64[ns]':
return 'proleptic_gregorian'
else:
return np.asarray(dates).ravel()[0].calendar
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = np.asarray(dates).ravel()
if np.asarray(dates).dtype == 'datetime64[ns]':
dates = pd.to_datetime(dates, box=False)
dates = dates[pd.notnull(dates)]
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
reference_date = pd.Timestamp(reference_date)
else:
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
reference_date = format_cftime_datetime(reference_date)
unique_timedeltas = np.unique(np.diff(dates))
if unique_timedeltas.dtype == np.dtype('O'):
# Convert to np.timedelta64 objects using pandas to work around a
# NumPy casting bug: https://github.com/numpy/numpy/issues/11096
unique_timedeltas = pd.to_timedelta(unique_timedeltas, box=False)
units = _infer_time_units_from_diff(unique_timedeltas)
return '%s since %s' % (units, reference_date)
def format_cftime_datetime(date):
"""Converts a cftime.datetime object to a string with the format:
YYYY-MM-DD HH:MM:SS.UUUUUU
"""
return '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}'.format(
date.year, date.month, date.day, date.hour, date.minute, date.second,
date.microsecond)
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def cftime_to_nptime(times):
"""Given an array of cftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_cftime(dates, units, calendar):
"""Fallback method for encoding dates using cftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
cftime = _import_cftime()
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else cftime.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
cftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = infer_calendar_name(dates)
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with cftime instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64(pd.Timestamp(ref_date))
num = (dates - ref_date) / time_delta
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_cftime(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
num = cast_to_int_if_safe(num)
return (num, units)
class CFDatetimeCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if (np.issubdtype(data.dtype, np.datetime64) or
contains_cftime_datetimes(variable)):
(data, units, calendar) = encode_cf_datetime(
data,
encoding.pop('units', None),
encoding.pop('calendar', None))
safe_setitem(attrs, 'units', units, name=name)
safe_setitem(attrs, 'calendar', calendar, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
enable_cftimeindex = OPTIONS['enable_cftimeindex']
if 'units' in attrs and 'since' in attrs['units']:
units = pop_to(attrs, encoding, 'units')
calendar = pop_to(attrs, encoding, 'calendar')
dtype = _decode_cf_datetime_dtype(
data, units, calendar, enable_cftimeindex)
transform = partial(
decode_cf_datetime, units=units, calendar=calendar,
enable_cftimeindex=enable_cftimeindex)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class CFTimedeltaCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.timedelta64):
data, units = encode_cf_timedelta(
data, encoding.pop('units', None))
safe_setitem(attrs, 'units', units, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if 'units' in attrs and attrs['units'] in TIME_UNITS:
units = pop_to(attrs, encoding, 'units')
transform = partial(decode_cf_timedelta, units=units)
dtype = np.dtype('timedelta64[ns]')
data = lazy_elemwise_func(data, transform, dtype=dtype)
return Variable(dims, data, attrs, encoding)
| 36.773973
| 79
| 0.65276
|
28ee980144cf40d4b09a15d28fedf4bfd93fb3d4
| 5,201
|
py
|
Python
|
appGUI/VisPyPatches.py
|
DannyPol/flatcam
|
25a8634d0658e98b7fae31a095f8bef40c1b3067
|
[
"MIT"
] | 1
|
2022-02-11T06:19:34.000Z
|
2022-02-11T06:19:34.000Z
|
appGUI/VisPyPatches.py
|
MRemy2/FlatCam
|
d4f941335ca8a8d5351aab23b396f99da06a9029
|
[
"MIT"
] | null | null | null |
appGUI/VisPyPatches.py
|
MRemy2/FlatCam
|
d4f941335ca8a8d5351aab23b396f99da06a9029
|
[
"MIT"
] | null | null | null |
# ##########################################################
# FlatCAM: 2D Post-processing for Manufacturing #
# http://flatcam.org #
# File Author: Dennis Hayrullin #
# Date: 2/5/2016 #
# MIT Licence #
# ##########################################################
from vispy.visuals import markers, LineVisual, InfiniteLineVisual
from vispy.visuals.axis import Ticker, _get_ticks_talbot
from vispy.scene.widgets import Grid
import numpy as np
def apply_patches():
# Patch MarkersVisual to have crossed lines marker
cross_lines = """
float cross(vec2 pointcoord, float size)
{
//vbar
float r1 = abs(pointcoord.x - 0.5)*size;
float r2 = abs(pointcoord.y - 0.5)*size - $v_size/2;
float vbar = max(r1,r2);
//hbar
float r3 = abs(pointcoord.y - 0.5)*size;
float r4 = abs(pointcoord.x - 0.5)*size - $v_size/2;
float hbar = max(r3,r4);
return min(vbar, hbar);
}
"""
markers._marker_dict['++'] = cross_lines
markers.marker_types = tuple(sorted(list(markers._marker_dict.copy().keys())))
# # Add clear_data method to LineVisual to have possibility of clearing data
# def clear_data(self):
# self._bounds = None
# self._pos = None
# self._changed['pos'] = True
# self.update()
#
# LineVisual.clear_data = clear_data
# Patch VisPy Grid to prevent updating layout on PaintGL, which cause low fps
def _prepare_draw(self, view):
pass
def _update_clipper(self):
super(Grid, self)._update_clipper()
try:
self._update_child_widget_dim()
except Exception as e:
print("VisPyPatches.apply_patches._update_clipper() -> %s" % str(e))
Grid._prepare_draw = _prepare_draw
Grid._update_clipper = _update_clipper
# Patch InfiniteLine visual to 1px width
def _prepare_draw(self, view=None):
"""This method is called immediately before each draw.
The *view* argument indicates which view is about to be drawn.
"""
GL = None
from vispy.app._default_app import default_app
if default_app is not None and \
default_app.backend_name != 'ipynb_webgl':
try:
import OpenGL.GL as GL
except Exception: # can be other than ImportError sometimes
pass
if GL:
GL.glDisable(GL.GL_LINE_SMOOTH)
GL.glLineWidth(2.0)
if self._changed['pos']:
self.pos_buf.set_data(self._pos)
self._changed['pos'] = False
if self._changed['color']:
self._program.vert['color'] = self._color
self._changed['color'] = False
InfiniteLineVisual._prepare_draw = _prepare_draw
# Patch AxisVisual to have less axis labels
def _get_tick_frac_labels(self):
"""Get the major ticks, minor ticks, and major labels"""
minor_num = 4 # number of minor ticks per major division
if self.axis.scale_type == 'linear':
domain = self.axis.domain
if domain[1] < domain[0]:
flip = True
domain = domain[::-1]
else:
flip = False
offset = domain[0]
scale = domain[1] - domain[0]
transforms = self.axis.transforms
length = self.axis.pos[1] - self.axis.pos[0] # in logical coords
n_inches = np.sqrt(np.sum(length ** 2)) / transforms.dpi
# major = np.linspace(domain[0], domain[1], num=11)
# major = MaxNLocator(10).tick_values(*domain)
major = _get_ticks_talbot(domain[0], domain[1], n_inches, 1)
labels = ['%g' % x for x in major]
majstep = major[1] - major[0]
minor = []
minstep = majstep / (minor_num + 1)
minstart = 0 if self.axis._stop_at_major[0] else -1
minstop = -1 if self.axis._stop_at_major[1] else 0
for i in range(minstart, len(major) + minstop):
maj = major[0] + i * majstep
minor.extend(np.linspace(maj + minstep,
maj + majstep - minstep,
minor_num))
major_frac = (major - offset) / scale
major_frac = major_frac[::-1] if flip else major_frac
use_mask = (major_frac > -0.0001) & (major_frac < 1.0001)
major_frac = major_frac[use_mask]
labels = [l for li, l in enumerate(labels) if use_mask[li]]
minor_frac = (np.array(minor) - offset) / scale
use_minor_mask = (minor_frac > -0.0001) & (minor_frac < 1.0001)
minor_frac = minor_frac[use_minor_mask]
return major_frac, minor_frac, labels
elif self.axis.scale_type == 'logarithmic':
return NotImplementedError
elif self.axis.scale_type == 'power':
return NotImplementedError
Ticker._get_tick_frac_labels = _get_tick_frac_labels
| 37.688406
| 82
| 0.553355
|
bb7b326477a776068c3c1b9d08b2057ede1c776c
| 3,773
|
py
|
Python
|
test_ariac/test_example_node.py
|
ashfaqfarooqui/ariac
|
6f36a992bec947a9be1b91673969e3a1b3369fd8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test_ariac/test_example_node.py
|
ashfaqfarooqui/ariac
|
6f36a992bec947a9be1b91673969e3a1b3369fd8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test_ariac/test_example_node.py
|
ashfaqfarooqui/ariac
|
6f36a992bec947a9be1b91673969e3a1b3369fd8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import sys
import time
import unittest
from ariac_example import ariac_example
from std_msgs.msg import Float32
import rospy
import rostest
class ExampleNodeTester(unittest.TestCase):
def comp_score_callback(self, msg):
self.current_comp_score = msg.data
def prepare_tester(self):
self.comp_class = ariac_example.MyCompetitionClass()
ariac_example.connect_callbacks(self.comp_class)
self.current_comp_score = None
self.comp_state_sub = rospy.Subscriber(
"/ariac/current_score", Float32, self.comp_score_callback)
# Pre-defined initial pose because sometimes the arm starts "droopy"
self._send_arm_to_initial_pose()
def test(self):
self.prepare_tester()
self._test_send_arm_to_zero_state()
# Starting the competition will cause parts from the order to be spawned on AGV1
self._test_start_comp()
time.sleep(1.0)
self._test_order_reception()
self._test_agv_control()
time.sleep(5.0)
self._test_comp_end()
def _test_start_comp(self):
success = ariac_example.start_competition()
self.assertTrue(success, 'Failed to start the competition')
time.sleep(0.5)
self.assertTrue(
self.comp_class.current_comp_state == 'go', 'Competition not in "go" state')
def _test_order_reception(self):
self.assertEqual(len(self.comp_class.received_orders), 1)
def _send_arm_to_initial_pose(self):
positions = [1.51, 0.0, -1.12, 3.14, 3.77, -1.51, 0.0]
self.comp_class.send_arm_to_state(positions)
time.sleep(1.0)
def _test_send_arm_to_zero_state(self):
self.comp_class.send_arm_to_state([0] * len(self.comp_class.arm_joint_names))
# This can be slow if there are a lot of models in the environment
time.sleep(5.0)
error = 0
for position in self.comp_class.current_joint_state.position:
error += abs(position - 0.0)
self.assertTrue(error < 0.5, 'Arm was not properly sent to zero state')
def _test_agv_control(self, index=1, kit_id='order_0_kit_0'):
success = ariac_example.control_agv(index, kit_id)
self.assertTrue(success, 'Failed to control AGV')
def _test_comp_end(self):
num_received_orders = len(self.comp_class.received_orders)
num_kits = len(self.comp_class.received_orders[0].kits)
if num_received_orders == 1 and num_kits == 1:
self.assertTrue(
self.comp_class.current_comp_state == 'done', 'Competition not in "done" state')
else:
# If there were more kits expected, the order won't be done
self.assertTrue(
self.comp_class.current_comp_state == 'go', 'Competition not in "go" state')
num_parts_in_order = len(self.comp_class.received_orders[0].kits[0].objects)
self.assertTrue(
# Expect to have a point for each part, the all parts bonus, and a point for each part's pose
self.current_comp_score == 3 * num_parts_in_order,
'Something went wrong in the scoring. Current score: ' + str(self.current_comp_score))
if __name__ == '__main__':
rospy.init_node('test_example_node', anonymous=True)
# Wait until /clock is being published; this can take an unpredictable
# amount of time when we're downloading models.
while rospy.Time.now().to_sec() == 0.0:
print('Waiting for Gazebo to start...')
time.sleep(1.0)
# Take an extra nap, to allow plugins to be loaded
time.sleep(10.0)
print('OK, starting test.')
rostest.run('osrf_gear', 'test_example_node', ExampleNodeTester, sys.argv)
| 36.990196
| 105
| 0.67506
|
cee12bfb287aeefc4e9835fea35cdbc3322230a9
| 648
|
py
|
Python
|
backend/contest/views.py
|
cjc7373/hackergame
|
86971b4cf8a2761044d417b4c8bd934c3309d6fd
|
[
"MIT"
] | 2
|
2020-07-12T13:11:43.000Z
|
2020-07-14T08:12:17.000Z
|
backend/contest/views.py
|
cjc7373/hackergame
|
86971b4cf8a2761044d417b4c8bd934c3309d6fd
|
[
"MIT"
] | 1
|
2020-08-13T13:56:18.000Z
|
2020-09-29T12:39:08.000Z
|
backend/contest/views.py
|
cjc7373/hackergame
|
86971b4cf8a2761044d417b4c8bd934c3309d6fd
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from contest.models import Stage, Pause
from contest.serializer import StageSerializer, PauseSerializer
class StageAPI(APIView):
def get(self, request):
stage = Stage.objects.get()
stage_serializer = StageSerializer(stage)
pause_serializer = PauseSerializer(Pause.objects.all(), many=True)
data = stage_serializer.data
data['pause'] = pause_serializer.data
return Response(data)
class CurrentStageAPI(APIView):
def get(self, request):
return Response({"status": Stage.objects.current_status})
| 30.857143
| 74
| 0.731481
|
14bb8c23d430174e99292b327533383c8b25ea0b
| 20,476
|
py
|
Python
|
official/nlp/modeling/layers/transformer_scaffold_test.py
|
lorynebissuel/models
|
7f597cf851c793ce1b8db7a93a94894b04424d4c
|
[
"Apache-2.0"
] | 2
|
2021-04-02T12:21:35.000Z
|
2021-12-14T07:29:38.000Z
|
official/nlp/modeling/layers/transformer_scaffold_test.py
|
lorynebissuel/models
|
7f597cf851c793ce1b8db7a93a94894b04424d4c
|
[
"Apache-2.0"
] | null | null | null |
official/nlp/modeling/layers/transformer_scaffold_test.py
|
lorynebissuel/models
|
7f597cf851c793ce1b8db7a93a94894b04424d4c
|
[
"Apache-2.0"
] | 3
|
2019-11-12T11:18:11.000Z
|
2021-12-29T09:14:37.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based transformer block layer."""
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import attention
from official.nlp.modeling.layers import transformer_scaffold
# Test class that wraps a standard attention layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyAttention')
class ValidatedAttentionLayer(attention.MultiHeadAttention):
def __init__(self, call_list, **kwargs):
super(ValidatedAttentionLayer, self).__init__(**kwargs)
self.list = call_list
def call(self, query, value, attention_mask=None):
self.list.append(True)
return super(ValidatedAttentionLayer, self).call(
query, value, attention_mask=attention_mask)
def get_config(self):
config = super(ValidatedAttentionLayer, self).get_config()
config['call_list'] = []
return config
# Test class implements a simple feedforward layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package='TestOnlyFeedforward')
class ValidatedFeedforwardLayer(tf.keras.layers.Layer):
def __init__(self, call_list, activation, **kwargs):
super(ValidatedFeedforwardLayer, self).__init__(**kwargs)
self.list = call_list
self.activation = activation
def build(self, input_shape):
hidden_size = input_shape.as_list()[-1]
self._feedforward_dense = tf.keras.layers.experimental.EinsumDense(
'...x,xy->...y',
output_shape=hidden_size,
bias_axes='y',
activation=self.activation,
name='feedforward')
def call(self, inputs):
self.list.append(True)
return self._feedforward_dense(inputs)
def get_config(self):
config = super(ValidatedFeedforwardLayer, self).get_config()
config['call_list'] = []
config['activation'] = self.activation
return config
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class TransformerLayerTest(keras_parameterized.TestCase):
def tearDown(self):
super(TransformerLayerTest, self).tearDown()
tf.keras.mixed_precision.experimental.set_policy('float32')
def test_layer_creation(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_creation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
intermediate_size=None,
intermediate_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_creation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output_tensor.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output_tensor = test_layer(data_tensor)
# Create a model from the test layer.
model = tf.keras.Model(data_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
_ = model.predict(input_data)
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_feedforward_cls(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
feedforward_layer = ValidatedFeedforwardLayer(**feedforward_layer_cfg)
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=feedforward_layer,
num_attention_heads=10,
intermediate_size=None,
intermediate_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
self.assertNotEmpty(feedforward_call_list)
self.assertTrue(feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_invocation_with_mask(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_layer_invocation_with_float16_dtype(self):
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = (10 * np.random.random_sample(
(batch_size, sequence_length, width)))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
_ = model.predict([input_data, mask_data])
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_transform_with_initializer(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu',
kernel_initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02))
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
output = test_layer(data_tensor)
# The default output of a transformer layer should be the same as the input.
self.assertEqual(data_tensor.shape.as_list(), output.shape.as_list())
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0])
def test_layer_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
num_attention_heads=10,
intermediate_size=2048,
intermediate_activation='relu')
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
# Serialize the model config. Pass the serialized data through json to
# ensure that we can serialize this layer to disk.
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
def test_layer_with_feedforward_cls_restoration_from_config(self):
sequence_length = 21
width = 80
call_list = []
attention_layer_cfg = {
'num_heads': 10,
'key_dim': 8,
'call_list': call_list,
'name': 'test_layer',
}
feedforward_call_list = []
feedforward_layer_cfg = {
'activation': 'relu',
'call_list': feedforward_call_list,
}
test_layer = transformer_scaffold.TransformerScaffold(
attention_cls=ValidatedAttentionLayer,
attention_cfg=attention_layer_cfg,
feedforward_cls=ValidatedFeedforwardLayer,
feedforward_cfg=feedforward_layer_cfg,
num_attention_heads=10,
intermediate_size=None,
intermediate_activation=None)
# Create a 3-dimensional input (the first dimension is implicit).
data_tensor = tf.keras.Input(shape=(sequence_length, width))
# Create a 2-dimensional input (the first dimension is implicit).
mask_tensor = tf.keras.Input(shape=(sequence_length, sequence_length))
output_tensor = test_layer([data_tensor, mask_tensor])
# Create a model from the test layer.
model = tf.keras.Model([data_tensor, mask_tensor], output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 6
input_data = 10 * np.random.random_sample(
(batch_size, sequence_length, width))
# The attention mask should be of shape (batch, from_seq_len, to_seq_len),
# which here is (batch, sequence_length, sequence_length)
mask_data = np.random.randint(
2, size=(batch_size, sequence_length, sequence_length))
pre_serialization_output = model.predict([input_data, mask_data])
serialized_data = model.get_config()
# Create a new model from the old config, and copy the weights. These models
# should have identical outputs.
new_model = tf.keras.Model.from_config(serialized_data)
new_model.set_weights(model.get_weights())
output = new_model.predict([input_data, mask_data])
self.assertAllClose(pre_serialization_output, output)
# If the layer was configured correctly, it should have a list attribute
# (since it should have the custom class and config passed to it).
new_model.summary()
new_call_list = new_model.get_layer(
name='transformer_scaffold')._attention_layer.list
self.assertNotEmpty(new_call_list)
self.assertTrue(new_call_list[0],
"The passed layer class wasn't instantiated.")
new_feedforward_call_list = new_model.get_layer(
name='transformer_scaffold')._feedforward_block.list
self.assertNotEmpty(new_feedforward_call_list)
self.assertTrue(new_feedforward_call_list[0],
"The passed layer class wasn't instantiated.")
if __name__ == '__main__':
tf.test.main()
| 40.14902
| 101
| 0.712883
|
c2c26ec98703599334d84658ce985159b5931b89
| 4,316
|
py
|
Python
|
data/split_data.py
|
MattAshman/pvi
|
74322ac23efd2674e46c953e5573d19d8d6bce4c
|
[
"MIT"
] | 1
|
2022-03-17T00:41:27.000Z
|
2022-03-17T00:41:27.000Z
|
data/split_data.py
|
MattAshman/pvi
|
74322ac23efd2674e46c953e5573d19d8d6bce4c
|
[
"MIT"
] | null | null | null |
data/split_data.py
|
MattAshman/pvi
|
74322ac23efd2674e46c953e5573d19d8d6bce4c
|
[
"MIT"
] | 1
|
2022-03-17T10:53:03.000Z
|
2022-03-17T10:53:03.000Z
|
import numpy as np
def homogenous(x, y, m, dataset_seed):
"""
Homogenous split of the data into m groups.
"""
random_state = np.random.get_state()
if dataset_seed is not None:
np.random.seed(dataset_seed)
if m == 1:
client_data = [{"x": x, "y": y}]
return client_data
perm = np.random.permutation(len(x))
client_data = []
for i in range(m):
client_data.append({
"x": x[perm[i::m]],
"y": y[perm[i::m]],
})
np.random.set_state(random_state)
return client_data
def inhomogenous1(x, y, m, client_size_factor, class_balance_factor,
dataset_seed):
"""
Splits the data into m groups, half of which are small clients and the
other half are large clients. Split is based upon the output distribution.
"""
random_state = np.random.get_state()
if dataset_seed is not None:
np.random.seed(dataset_seed)
if m == 1:
client_data = [{"x": x, "y": y}]
return client_data
if m % 2 != 0:
raise ValueError('Num clients should be even for nice maths')
n = x.shape[0]
small_client_size = int(np.floor((1 - client_size_factor) * n / m))
big_client_size = int(np.floor((1 + client_size_factor) * n / m))
class_balance = np.mean(y == 0)
small_client_class_balance = class_balance + (
1 - class_balance) * class_balance_factor
small_client_negative_class_size = int(
np.floor(small_client_size * small_client_class_balance))
small_client_positive_class_size = int(
small_client_size - small_client_negative_class_size)
if small_client_negative_class_size < 0:
raise ValueError('Small_client_negative_class_size is negative, '
'invalid settings.')
if small_client_positive_class_size < 0:
raise ValueError('Small_client_positive_class_size is negative, '
'invalid settings.')
if small_client_negative_class_size * m / 2 > class_balance * n:
raise ValueError(
f'Not enough negative class instances to fill the small clients. '
f'Client size factor:{client_size_factor}, class balance '
f'factor:{class_balance_factor}')
if small_client_positive_class_size * m / 2 > (1 - class_balance) * n:
raise ValueError(
f'Not enough positive class instances to fill the small clients. '
f'Client size factor:{client_size_factor}, class balance '
f'factor:{class_balance_factor}')
pos_inds = np.where(y > 0)
zero_inds = np.where(y == 0)
assert (len(pos_inds[0]) + len(zero_inds[0])) == len(y), \
"Some indeces missed."
y_pos = y[pos_inds]
y_neg = y[zero_inds]
x_pos = x[pos_inds]
x_neg = x[zero_inds]
client_data = []
# Populate small classes.
for i in range(int(m / 2)):
client_x_pos = x_pos[:small_client_positive_class_size]
x_pos = x_pos[small_client_positive_class_size:]
client_y_pos = y_pos[:small_client_positive_class_size]
y_pos = y_pos[small_client_positive_class_size:]
client_x_neg = x_neg[:small_client_negative_class_size]
x_neg = x_neg[small_client_negative_class_size:]
client_y_neg = y_neg[:small_client_negative_class_size]
y_neg = y_neg[small_client_negative_class_size:]
client_x = np.concatenate([client_x_pos, client_x_neg])
client_y = np.concatenate([client_y_pos, client_y_neg])
shuffle_inds = np.random.permutation(client_x.shape[0])
client_x = client_x[shuffle_inds, :]
client_y = client_y[shuffle_inds]
client_data.append({'x': client_x, 'y': client_y})
# Recombine remaining data and shuffle.
x = np.concatenate([x_pos, x_neg])
y = np.concatenate([y_pos, y_neg])
shuffle_inds = np.random.permutation(x.shape[0])
x = x[shuffle_inds]
y = y[shuffle_inds]
# Distribute among large clients.
for i in range(int(m / 2)):
client_x = x[:big_client_size]
client_y = y[:big_client_size]
x = x[big_client_size:]
y = y[big_client_size:]
client_data.append({'x': client_x, 'y': client_y})
np.random.set_state(random_state)
return client_data
| 30.828571
| 78
| 0.641566
|
4e6877fc69eea22a5935206f580611d15b564930
| 149
|
py
|
Python
|
HackerRank/list2.py
|
SomuSysAdmin/python
|
7707a6ebe54d8ad73aabe64836ec898c5fd7725d
|
[
"MIT"
] | null | null | null |
HackerRank/list2.py
|
SomuSysAdmin/python
|
7707a6ebe54d8ad73aabe64836ec898c5fd7725d
|
[
"MIT"
] | null | null | null |
HackerRank/list2.py
|
SomuSysAdmin/python
|
7707a6ebe54d8ad73aabe64836ec898c5fd7725d
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
n = int(input())
integer_list = map(int, input().split())
t = tuple(list(integer_list)[:n])
print(hash(t))
| 29.8
| 44
| 0.590604
|
844db8a8a3e7cc28f5862ac5b7855fc985e74af6
| 14,241
|
py
|
Python
|
src/borg/testsuite/__init__.py
|
alfredo08154711/borg
|
303c11f245e0a23e522a53aaad8cbeaaa2074551
|
[
"BSD-3-Clause"
] | 1
|
2020-10-24T09:47:20.000Z
|
2020-10-24T09:47:20.000Z
|
src/borg/testsuite/__init__.py
|
alfredo08154711/borg
|
303c11f245e0a23e522a53aaad8cbeaaa2074551
|
[
"BSD-3-Clause"
] | 1
|
2020-11-01T15:50:16.000Z
|
2020-11-01T15:50:16.000Z
|
src/borg/testsuite/__init__.py
|
alfredo08154711/borg
|
303c11f245e0a23e522a53aaad8cbeaaa2074551
|
[
"BSD-3-Clause"
] | null | null | null |
from contextlib import contextmanager
import filecmp
import functools
import os
try:
import posix
except ImportError:
posix = None
import stat
import sys
import sysconfig
import tempfile
import time
import uuid
import unittest
from ..xattr import get_all
from ..platform import get_flags
from ..helpers import umount
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from .. import platform
# Note: this is used by borg.selftest, do not use or import py.test functionality here.
try:
import llfuse
# Does this version of llfuse support ns precision?
have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
except ImportError:
have_fuse_mtime_ns = False
try:
from pytest import raises
except: # noqa
raises = None
has_lchflags = hasattr(os, 'lchflags') or sys.platform.startswith('linux')
try:
with tempfile.NamedTemporaryFile() as file:
platform.set_flags(file.name, stat.UF_NODUMP)
except OSError:
has_lchflags = False
try:
import llfuse
has_llfuse = True or llfuse # avoids "unused import"
except ImportError:
has_llfuse = False
# The mtime get/set precision varies on different OS and Python versions
if posix and 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []):
st_mtime_ns_round = 0
elif 'HAVE_UTIMES' in sysconfig.get_config_vars():
st_mtime_ns_round = -6
else:
st_mtime_ns_round = -9
if sys.platform.startswith('netbsd'):
st_mtime_ns_round = -4 # only >1 microsecond resolution here?
@contextmanager
def unopened_tempfile():
with tempfile.TemporaryDirectory() as tempdir:
yield os.path.join(tempdir, "file")
@functools.lru_cache()
def are_symlinks_supported():
with unopened_tempfile() as filepath:
try:
os.symlink('somewhere', filepath)
if os.stat(filepath, follow_symlinks=False) and os.readlink(filepath) == 'somewhere':
return True
except OSError:
pass
return False
@functools.lru_cache()
def are_hardlinks_supported():
if not hasattr(os, 'link'):
# some pythons do not have os.link
return False
with unopened_tempfile() as file1path, unopened_tempfile() as file2path:
open(file1path, 'w').close()
try:
os.link(file1path, file2path)
stat1 = os.stat(file1path)
stat2 = os.stat(file2path)
if stat1.st_nlink == stat2.st_nlink == 2 and stat1.st_ino == stat2.st_ino:
return True
except OSError:
pass
return False
@functools.lru_cache()
def are_fifos_supported():
with unopened_tempfile() as filepath:
try:
os.mkfifo(filepath)
return True
except OSError:
pass
except NotImplementedError:
pass
except AttributeError:
pass
return False
@functools.lru_cache()
def is_utime_fully_supported():
with unopened_tempfile() as filepath:
# Some filesystems (such as SSHFS) don't support utime on symlinks
if are_symlinks_supported():
os.symlink('something', filepath)
else:
open(filepath, 'w').close()
try:
os.utime(filepath, (1000, 2000), follow_symlinks=False)
new_stats = os.stat(filepath, follow_symlinks=False)
if new_stats.st_atime == 1000 and new_stats.st_mtime == 2000:
return True
except OSError:
pass
except NotImplementedError:
pass
return False
@functools.lru_cache()
def is_birthtime_fully_supported():
if not hasattr(os.stat_result, 'st_birthtime'):
return False
with unopened_tempfile() as filepath:
# Some filesystems (such as SSHFS) don't support utime on symlinks
if are_symlinks_supported():
os.symlink('something', filepath)
else:
open(filepath, 'w').close()
try:
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime(filepath, (atime, birthtime), follow_symlinks=False)
os.utime(filepath, (atime, mtime), follow_symlinks=False)
new_stats = os.stat(filepath, follow_symlinks=False)
if new_stats.st_birthtime == birthtime and new_stats.st_mtime == mtime and new_stats.st_atime == atime:
return True
except OSError:
pass
except NotImplementedError:
pass
return False
def no_selinux(x):
# selinux fails our FUSE tests, thus ignore selinux xattrs
SELINUX_KEY = b'security.selinux'
if isinstance(x, dict):
return {k: v for k, v in x.items() if k != SELINUX_KEY}
if isinstance(x, list):
return [k for k in x if k != SELINUX_KEY]
class BaseTestCase(unittest.TestCase):
"""
"""
assert_in = unittest.TestCase.assertIn
assert_not_in = unittest.TestCase.assertNotIn
assert_equal = unittest.TestCase.assertEqual
assert_not_equal = unittest.TestCase.assertNotEqual
assert_true = unittest.TestCase.assertTrue
if raises:
assert_raises = staticmethod(raises)
else:
assert_raises = unittest.TestCase.assertRaises
@contextmanager
def assert_creates_file(self, path):
self.assert_true(not os.path.exists(path), '{} should not exist'.format(path))
yield
self.assert_true(os.path.exists(path), '{} should exist'.format(path))
def assert_dirs_equal(self, dir1, dir2, **kwargs):
diff = filecmp.dircmp(dir1, dir2)
self._assert_dirs_equal_cmp(diff, **kwargs)
def _assert_dirs_equal_cmp(self, diff, ignore_flags=False, ignore_xattrs=False, ignore_ns=False):
self.assert_equal(diff.left_only, [])
self.assert_equal(diff.right_only, [])
self.assert_equal(diff.diff_files, [])
self.assert_equal(diff.funny_files, [])
for filename in diff.common:
path1 = os.path.join(diff.left, filename)
path2 = os.path.join(diff.right, filename)
s1 = os.stat(path1, follow_symlinks=False)
s2 = os.stat(path2, follow_symlinks=False)
# Assume path2 is on FUSE if st_dev is different
fuse = s1.st_dev != s2.st_dev
attrs = ['st_uid', 'st_gid', 'st_rdev']
if not fuse or not os.path.isdir(path1):
# dir nlink is always 1 on our FUSE filesystem
attrs.append('st_nlink')
d1 = [filename] + [getattr(s1, a) for a in attrs]
d2 = [filename] + [getattr(s2, a) for a in attrs]
d1.insert(1, oct(s1.st_mode))
d2.insert(1, oct(s2.st_mode))
if not ignore_flags:
d1.append(get_flags(path1, s1))
d2.append(get_flags(path2, s2))
# ignore st_rdev if file is not a block/char device, fixes #203
if not stat.S_ISCHR(s1.st_mode) and not stat.S_ISBLK(s1.st_mode):
d1[4] = None
if not stat.S_ISCHR(s2.st_mode) and not stat.S_ISBLK(s2.st_mode):
d2[4] = None
# If utime isn't fully supported, borg can't set mtime.
# Therefore, we shouldn't test it in that case.
if is_utime_fully_supported():
# Older versions of llfuse do not support ns precision properly
if ignore_ns:
d1.append(int(s1.st_mtime_ns / 1e9))
d2.append(int(s2.st_mtime_ns / 1e9))
elif fuse and not have_fuse_mtime_ns:
d1.append(round(s1.st_mtime_ns, -4))
d2.append(round(s2.st_mtime_ns, -4))
else:
d1.append(round(s1.st_mtime_ns, st_mtime_ns_round))
d2.append(round(s2.st_mtime_ns, st_mtime_ns_round))
if not ignore_xattrs:
d1.append(no_selinux(get_all(path1, follow_symlinks=False)))
d2.append(no_selinux(get_all(path2, follow_symlinks=False)))
self.assert_equal(d1, d2)
for sub_diff in diff.subdirs.values():
self._assert_dirs_equal_cmp(sub_diff, ignore_flags=ignore_flags, ignore_xattrs=ignore_xattrs, ignore_ns=ignore_ns)
@contextmanager
def fuse_mount(self, location, mountpoint=None, *options, fork=True, os_fork=False, **kwargs):
# For a successful mount, `fork = True` is required for
# the borg mount daemon to work properly or the tests
# will just freeze. Therefore, if argument `fork` is not
# specified, the default value is `True`, regardless of
# `FORK_DEFAULT`. However, leaving the possibilty to run
# the command with `fork = False` is still necessary for
# testing for mount failures, for example attempting to
# mount a read-only repo.
# `os_fork = True` is needed for testing (the absence of)
# a race condition of the Lock during lock migration when
# borg mount (local repo) is daemonizing (#4953). This is another
# example where we need `fork = False`, because the test case
# needs an OS fork, not a spawning of the fuse mount.
# `fork = False` is implied if `os_fork = True`.
if mountpoint is None:
mountpoint = tempfile.mkdtemp()
else:
os.mkdir(mountpoint)
args = ['mount', location, mountpoint] + list(options)
if os_fork:
# Do not spawn, but actually (OS) fork.
if os.fork() == 0:
# The child process.
# Decouple from parent and fork again.
# Otherwise, it becomes a zombie and pretends to be alive.
os.setsid()
if os.fork() > 0:
os._exit(0)
# The grandchild process.
try:
self.cmd(*args, fork=False, **kwargs) # borg mount not spawning.
finally:
# This should never be reached, since it daemonizes,
# and the grandchild process exits before cmd() returns.
# However, just in case...
print('Fatal: borg mount did not daemonize properly. Force exiting.',
file=sys.stderr, flush=True)
os._exit(0)
else:
self.cmd(*args, fork=fork, **kwargs)
if kwargs.get('exit_code', EXIT_SUCCESS) == EXIT_ERROR:
# If argument `exit_code = EXIT_ERROR`, then this call
# is testing the behavior of an unsuccessful mount and
# we must not continue, as there is no mount to work
# with. The test itself has already failed or succeeded
# with the call to `self.cmd`, above.
yield
return
self.wait_for_mountstate(mountpoint, mounted=True)
yield
umount(mountpoint)
self.wait_for_mountstate(mountpoint, mounted=False)
os.rmdir(mountpoint)
# Give the daemon some time to exit
time.sleep(0.2)
def wait_for_mountstate(self, mountpoint, *, mounted, timeout=5):
"""Wait until a path meets specified mount point status"""
timeout += time.time()
while timeout > time.time():
if os.path.ismount(mountpoint) == mounted:
return
time.sleep(0.1)
message = 'Waiting for %s of %s' % ('mount' if mounted else 'umount', mountpoint)
raise TimeoutError(message)
@contextmanager
def read_only(self, path):
"""Some paths need to be made read-only for testing
If the tests are executed inside a fakeroot environment, the
changes from chmod won't affect the real permissions of that
folder. This issue is circumvented by temporarily disabling
fakeroot with `LD_PRELOAD=`.
Using chmod to remove write permissions is not enough if the
tests are running with root privileges. Instead, the folder is
rendered immutable with chattr or chflags, respectively.
"""
if sys.platform.startswith('linux'):
cmd_immutable = 'chattr +i "%s"' % path
cmd_mutable = 'chattr -i "%s"' % path
elif sys.platform.startswith(('darwin', 'freebsd', 'netbsd', 'openbsd')):
cmd_immutable = 'chflags uchg "%s"' % path
cmd_mutable = 'chflags nouchg "%s"' % path
elif sys.platform.startswith('sunos'): # openindiana
cmd_immutable = 'chmod S+vimmutable "%s"' % path
cmd_mutable = 'chmod S-vimmutable "%s"' % path
else:
message = 'Testing read-only repos is not supported on platform %s' % sys.platform
self.skipTest(message)
try:
os.system('LD_PRELOAD= chmod -R ugo-w "%s"' % path)
os.system(cmd_immutable)
yield
finally:
# Restore permissions to ensure clean-up doesn't fail
os.system(cmd_mutable)
os.system('LD_PRELOAD= chmod -R ugo+w "%s"' % path)
class changedir:
def __init__(self, dir):
self.dir = dir
def __enter__(self):
self.old = os.getcwd()
os.chdir(self.dir)
def __exit__(self, *args, **kw):
os.chdir(self.old)
class environment_variable:
def __init__(self, **values):
self.values = values
self.old_values = {}
def __enter__(self):
for k, v in self.values.items():
self.old_values[k] = os.environ.get(k)
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
def __exit__(self, *args, **kw):
for k, v in self.old_values.items():
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
class FakeInputs:
"""Simulate multiple user inputs, can be used as input() replacement"""
def __init__(self, inputs):
self.inputs = inputs
def __call__(self, prompt=None):
if prompt is not None:
print(prompt, end='')
try:
return self.inputs.pop(0)
except IndexError:
raise EOFError from None
| 36.703608
| 126
| 0.607261
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.