content
stringlengths 5
1.05M
|
|---|
# Generated by Django 2.2.11 on 2021-09-17 10:14
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0274_auto_20210910_1647'),
]
operations = [
migrations.AlterField(
model_name='dailyround',
name='resp',
field=models.IntegerField(default=None, null=True, validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(70)]),
),
]
|
#!/usr/bin/python
import sys
import string
import re
import subprocess
length=0
src=str(sys.argv[1])
target = re.sub(".*builtins-", "", src)
target = re.sub("\.ll$", "", target)
target = re.sub("\.c$", "", target)
target = re.sub("-", "_", target)
try:
as_out=subprocess.Popen([ "llvm-as", "-", "-o", "-"], stdout=subprocess.PIPE)
except IOError:
print >> sys.stderr, "Couldn't open " + src
sys.exit(1)
print "unsigned char builtins_bitcode_" + target + "[] = {"
for line in as_out.stdout.readlines():
length = length + len(line)
for c in line:
print ord(c)
print ", "
print " 0 };\n\n"
print "int builtins_bitcode_" + target + "_length = " + str(length) + ";\n"
as_out.wait()
sys.exit(as_out.returncode)
|
from .pyaabb import AABB
|
expected_output = {
'global_mdns_gateway': {
'active_query_timer': 'Enabled',
'active_query_timer_minutes': 30,
'active_query_timer_mode': 'default',
'active_response_timer': '20 Seconds',
'any_query_forward': 'Disabled',
'airprint_helper': 'Disabled',
'cache_sync_periodicity_minutes': 30,
'cache_sync_periodicity_mode': 'default',
'mdns_gateway': 'Enabled',
'mdns_query_type': 'PTR only',
'mode': 'SDG-Agent',
'rate_limit_mode': 'default',
'rate_limit_pps': 60,
'sdg_agent_ip': '40.1.3.1',
'service_eumeration_period': 'Default',
'source_interface': 'Vl1301',
'service_receiver_purge_timer': '60 Seconds',
'ingress_client_query_suppression': 'Disabled',
'service_record_ttl': 'original',
'next_advertisement_to_sdg': '00:00:06',
'next_query_to_sdg': '00:00:06',
'query_response_mode': 'Recurring (default)',
'sso': 'Inactive'
}
}
|
import os
import pandas as pd
import sklearn
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, OrdinalEncoder, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split, cross_val_score, KFold, GridSearchCV
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C, WhiteKernel
dataset = pd.read_excel(r'data/regression/concrete-compressive-strength/Concrete_Data.xls', sheet_name=0)
y = dataset.pop('Concrete compressive ')
cat_si_step = ('si', SimpleImputer(strategy='constant', fill_value=-99)) # This is for training
ohe_step = ('ohe', OneHotEncoder(sparse=False, handle_unknown='ignore')) # This is for testing
num_si_step = ('si', SimpleImputer(strategy='constant'))
sc_step = ('sc', StandardScaler())
oe_step = ('le', OrdinalEncoder())
bin_si_step = ('si', SimpleImputer(strategy='most_frequent'))
cat_pipe = Pipeline([cat_si_step, ohe_step])
num_pipe = Pipeline([num_si_step, sc_step])
bin_pipe = Pipeline([bin_si_step, oe_step])
transformers = [
('cat', cat_pipe, []),
('num', num_pipe, ['Cement (component 1)(kg in a m^3 mixture)',
'Blast Furnace Slag (component 2)(kg in a m^3 mixture)',
'Fly Ash (component 3)(kg in a m^3 mixture)',
'Water (component 4)(kg in a m^3 mixture)',
'Superplasticizer (component 5)(kg in a m^3 mixture)',
'Coarse Aggregate (component 6)(kg in a m^3 mixture)',
'Fine Aggregate (component 7)(kg in a m^3 mixture)',
'Age (day)',
]),
('bin', bin_pipe, []),
]
ct = ColumnTransformer(transformers=transformers)
# X_transformed = ct.fit_transform(dataset)
# print(X_transformed.shape)
linear_r_pipe = Pipeline([
('X_transform', ct),
('linear_r', LinearRegression(fit_intercept=True)),
])
kf = KFold(n_splits=5, shuffle=True)
# cv_score = cross_val_score(ml_pipe, dataset, y, cv=kf).mean()
linear_r_param_grid = {
'X_transform__num__si__strategy': ['mean', 'median'],
# 'mlp__solver': ['sgd', 'adam', 'lbfgs'],
# 'mlp__alpha': [1e-1, 1e-3, 1e-5],
# 'mlp__hidden_layer_sizes': [(10,), (20,), (5, 2), (4, 3), (4, 4)],
# 'mlp__activation': ['identity', 'logistic', 'tanh', 'relu'],
}
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp_pipe = Pipeline([
('X_transform', ct),
('gp', GaussianProcessRegressor()),
])
gp_param_grid = {
'X_transform__num__si__strategy': ['mean', 'median'],
'gp__alpha': [0.1],
# 'gp__kernel': [1**2 * RBF(length_scale=100) + WhiteKernel(noise_level=1)],
# 'rf__n_estimators': range(1, 20),
# 'rf__criterion': ['mse', 'mae'],
}
scoring = 'r2'
gs_linear_r = GridSearchCV(linear_r_pipe, linear_r_param_grid, cv=kf, scoring=scoring)
gs_linear_r.fit(dataset, y)
print("##################### Linear Regression for ConcreteCS Dataset ############################")
print('CV Score', gs_linear_r.best_score_)
print('Training accuracy', gs_linear_r.score(dataset, y))
print("##################### GaussianProcessRegressor for ConcreteCS Dataset #####################")
gs_gp = GridSearchCV(gp_pipe, gp_param_grid, cv=kf, scoring=scoring)
gs_gp.fit(dataset, y)
print('CV Score', gs_gp.best_score_)
print('Training accuracy', gs_gp.score(dataset, y))
#
# file_name = os.path.splitext(os.path.basename(__file__))[0]
# with open(f'{file_name}-result.txt', mode='a', encoding='utf-8') as handler:
# handler.write(f'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ LINEAR REGRESSION FOR CONCRETE_C_STRENGTH DATA @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n')
# handler.write(f'The CV best {scoring} score: {gs_linear_r.best_score_:.4f}\n')
# handler.write(f'The train set {scoring} score: {gs_linear_r.score(dataset, y):.4f}\n')
# handler.write(f'{gs_linear_r.best_params_}\n')
# handler.write(f'{gs_linear_r.best_estimator_}\n')
# handler.write(f'@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ GAUSSIAN PROCESS FOR CONCRETE_C_STRENGTH DATA @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n')
# handler.write(f'The CV best {scoring} score: {gs_gp.best_score_:.4f}\n')
# handler.write(f'The train set {scoring} score: {gs_gp.score(dataset, y):.4f}\n')
# handler.write(f'{gs_gp.best_params_}\n')
# handler.write(f'{gs_gp.best_estimator_}\n')
# handler.write('#' * 120)
# handler.write('\n')
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from ecl.identity import identity_service
from ecl import service_filter
class TestValidVersion(testtools.TestCase):
def test_constructor(self):
sot = service_filter.ValidVersion('v1.0', 'v1')
self.assertEqual('v1.0', sot.module)
self.assertEqual('v1', sot.path)
class TestServiceFilter(testtools.TestCase):
def test_init(self):
sot = service_filter.ServiceFilter(
'ServiceType', region='REGION1', service_name='ServiceName',
version='1', api_version='1.23', requires_project_id=True)
self.assertEqual('servicetype', sot.service_type)
self.assertEqual('REGION1', sot.region)
self.assertEqual('ServiceName', sot.service_name)
self.assertEqual('1', sot.version)
self.assertEqual('1.23', sot.api_version)
self.assertTrue(sot.requires_project_id)
def test_get_module(self):
sot = identity_service.IdentityService()
self.assertEqual('ecl.identity.v3', sot.get_module())
self.assertEqual('identity', sot.get_service_module())
|
""" Tests of the knowledge base schema for prokaryotes
:Author: Yin Hoon Chew <yinhoon.chew@mssm.edu>
:Date: 2018-09-18
:Copyright: 2018, Karr Lab
:License: MIT
"""
from wc_kb import core, eukaryote
from wc_utils.util import chem
import Bio.Alphabet
import Bio.Seq
import Bio.SeqIO
import Bio.SeqUtils
import mendeleev
import os
import shutil
import tempfile
import unittest
class CellTestCase(unittest.TestCase):
def test_constructor(self):
cell = core.Cell(taxon=9606)
self.assertEqual(cell.knowledge_base, None)
self.assertEqual(cell.taxon, 9606)
self.assertEqual(cell.observables, [])
self.assertEqual(cell.species_types, [])
self.assertEqual(cell.compartments, [])
self.assertEqual(cell.reactions, [])
self.assertEqual(cell.loci, [])
class GenericLocusTestCase(unittest.TestCase):
def test_serialize(self):
gen_locus1 = eukaryote.GenericLocus(start=102, end=137)
self.assertEqual(gen_locus1.serialize(), '102:137')
class LocusAttributeTestCase(unittest.TestCase):
def test_LocusAttribute(self):
gen_locus1 = eukaryote.GenericLocus(start=102, end=137)
gen_locus2 = eukaryote.GenericLocus(start=285, end=379)
self.assertEqual(eukaryote.LocusAttribute().serialize(
coordinates=[gen_locus1, gen_locus2]), '102:137, 285:379')
objects = {
eukaryote.GenericLocus:
{
'102:137': gen_locus1,
'285:379': gen_locus2,
}
}
result = eukaryote.LocusAttribute().deserialize(
value='102:137, 285:379', objects=objects)
self.assertEqual(result[0][0].start, 102)
self.assertEqual(result[0][0].end, 137)
self.assertEqual(result[0][1].start, 285)
self.assertEqual(result[0][1].end, 379)
self.assertEqual(result[1], None)
class TranscriptSpeciesTypeTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
self.sequence_path = os.path.join(self.tmp_dirname, 'test_seq.fasta')
with open(self.sequence_path, 'w') as f:
f.write('>dna1\nACGTACGTACGTACGTTTT\n'
'>dna2\nAcTGAGTTACGTACGTTTT\n'
'>dna3\nACGT\n'
'>dna4\nATAT\n'
'>dna5\nAAAA\n'
'>dna6\nAACCGGTT\n')
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
def test_constructor(self):
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
gene1 = eukaryote.GeneLocus(polymer=dna1, start=1, end=15)
transcript1 = eukaryote.TranscriptSpeciesType(
id='t1', name='transcript1', gene=gene1, type=eukaryote.TranscriptType.mRna)
self.assertEqual(transcript1.id, 't1')
self.assertEqual(transcript1.name, 'transcript1')
self.assertEqual(transcript1.gene, gene1)
self.assertEqual(transcript1.type.name, 'mRna')
self.assertEqual(transcript1.exons, [])
self.assertEqual(transcript1.comments, '')
self.assertEqual(transcript1.references, [])
self.assertEqual(transcript1.identifiers, [])
exon1 = eukaryote.GenericLocus(start=1, end=1)
exon2 = eukaryote.GenericLocus(start=2, end=2)
transcript1.exons = [exon1, exon2]
transcript2 = eukaryote.TranscriptSpeciesType(
id='t2', name='transcript2', gene=gene1, exons=[exon2])
self.assertEqual(transcript1.exons, [exon1, exon2])
self.assertEqual(transcript2.exons, [exon2])
def test_get_seq(self):
dna1 = core.DnaSpeciesType(id='dna2', sequence_path=self.sequence_path)
gene1 = eukaryote.GeneLocus(
polymer=dna1, start=1, end=15, strand=core.PolymerStrand.positive)
exon1_1 = eukaryote.GenericLocus(start=1, end=4)
exon1_2 = eukaryote.GenericLocus(start=7, end=8)
transcript1 = eukaryote.TranscriptSpeciesType(
gene=gene1, exons=[exon1_1, exon1_2])
gene2 = eukaryote.GeneLocus(
polymer=dna1, start=4, end=18, strand=core.PolymerStrand.negative)
exon2_1 = eukaryote.GenericLocus(start=4, end=10)
exon2_2 = eukaryote.GenericLocus(start=14, end=16)
transcript2 = eukaryote.TranscriptSpeciesType(
gene=gene2, exons=[exon2_1, exon2_2])
self.assertEqual(transcript1.get_seq(), 'AcUGUU')
self.assertEqual(transcript2.get_seq(), 'ACGGUAACUC')
def test_get_empirical_formula(self):
dna1 = core.DnaSpeciesType(id='dna3', sequence_path=self.sequence_path)
gene1 = eukaryote.GeneLocus(polymer=dna1, start=1, end=4)
exon1 = eukaryote.GenericLocus(start=1, end=1)
transcript1 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon1])
self.assertEqual(transcript1.get_empirical_formula(),
chem.EmpiricalFormula('C10H12N5O7P'))
exon2 = eukaryote.GenericLocus(start=2, end=2)
transcript2 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon2])
self.assertEqual(transcript2.get_empirical_formula(),
chem.EmpiricalFormula('C9H12N3O8P'))
exon3 = eukaryote.GenericLocus(start=3, end=3)
transcript3 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon3])
self.assertEqual(transcript3.get_empirical_formula(),
chem.EmpiricalFormula('C10H12N5O8P'))
exon4 = eukaryote.GenericLocus(start=4, end=4)
transcript4 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon4])
self.assertEqual(transcript4.get_empirical_formula(),
chem.EmpiricalFormula('C9H11N2O9P'))
dna2 = core.DnaSpeciesType(id='dna4', sequence_path=self.sequence_path)
gene2 = eukaryote.GeneLocus(polymer=dna2, start=1, end=4)
exon5_1 = eukaryote.GenericLocus(start=1, end=1)
exon5_2 = eukaryote.GenericLocus(start=3, end=3)
transcript5 = eukaryote.TranscriptSpeciesType(gene=gene2, exons=[exon5_1, exon5_2])
self.assertEqual(transcript5.get_empirical_formula(),
chem.EmpiricalFormula('C20H23N10O13P2'))
# Test using input sequence
test_trans = eukaryote.TranscriptSpeciesType()
self.assertEqual(test_trans.get_empirical_formula(seq_input=Bio.Seq.Seq('AA')),
chem.EmpiricalFormula('C20H23N10O13P2'))
def test_get_charge(self):
dna1 = core.DnaSpeciesType(id='dna5', sequence_path=self.sequence_path)
gene1 = eukaryote.GeneLocus(polymer=dna1, start=1, end=1)
exon1 = eukaryote.GenericLocus(start=1, end=1)
transcript1 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon1])
self.assertEqual(transcript1.get_charge(), -2)
gene2 = eukaryote.GeneLocus(polymer=dna1, start=2, end=4)
exon2_1 = eukaryote.GenericLocus(start=2, end=2)
exon2_2 = eukaryote.GenericLocus(start=4, end=4)
transcript2 = eukaryote.TranscriptSpeciesType(gene=gene2, exons=[exon2_1, exon2_2])
self.assertEqual(transcript2.get_charge(), -3)
# Test using input sequence
test_trans = eukaryote.TranscriptSpeciesType()
self.assertEqual(test_trans.get_charge(seq_input=Bio.Seq.Seq('CG')), -3)
def test_get_mol_wt(self):
dna1 = core.DnaSpeciesType(id='dna6', sequence_path=self.sequence_path)
gene1 = eukaryote.GeneLocus(polymer=dna1, start=1, end=6)
exon1 = eukaryote.GenericLocus(start=1, end=1)
transcript1 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon1])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(transcript1.get_seq()) \
- (transcript1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(transcript1.get_mol_wt(), exp_mol_wt, places=1)
exon2 = eukaryote.GenericLocus(start=3, end=3)
transcript2 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon2])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(transcript2.get_seq()) \
- (transcript2.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(transcript2.get_mol_wt(), exp_mol_wt, places=1)
exon3 = eukaryote.GenericLocus(start=5, end=5)
transcript3 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon3])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(transcript3.get_seq()) \
- (transcript3.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(transcript3.get_mol_wt(), exp_mol_wt, places=1)
# Test using input sequence
test_trans = eukaryote.TranscriptSpeciesType()
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(transcript1.get_seq()) \
- (transcript1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(test_trans.get_mol_wt(seq_input=Bio.Seq.Seq('A')), exp_mol_wt, places=1)
class ProteinSpeciesTypeTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
sequence_path = os.path.join(self.tmp_dirname, 'test_seq.fasta')
with open(sequence_path, 'w') as f:
f.write('>dna1\nTTTATGAARGTNCTCATHAAYAARAAYGARCTCTAGTTTATGAARTTYAARTTYCTCCTCACNCCNCTCTAATTT\n')
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=sequence_path)
cell1 = dna1.cell = core.Cell()
gene1 = eukaryote.GeneLocus(polymer=dna1, start=1, end=36)
exon1 = eukaryote.GenericLocus(start=4, end=36)
transcript1 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon1])
cds1 = eukaryote.GenericLocus(start=4, end=36)
self.prot1 = eukaryote.ProteinSpeciesType(id='prot1', name='protein1',
uniprot='Q12X34', transcript=transcript1, coding_regions=[cds1])
gene2 = eukaryote.GeneLocus(polymer=dna1,
start=30, end=75, strand=core.PolymerStrand.positive)
exon2_1 = eukaryote.GenericLocus(start=32, end=35)
exon2_2 = eukaryote.GenericLocus(start=38, end=45)
exon2_3 = eukaryote.GenericLocus(start=49, end=54)
exon2_4 = eukaryote.GenericLocus(start=55, end=72)
exon2_5 = eukaryote.GenericLocus(start=73, end=74)
transcript2 = eukaryote.TranscriptSpeciesType(
gene=gene2, exons=[exon2_1, exon2_2, exon2_3, exon2_4, exon2_5])
cds2_2 = eukaryote.GenericLocus(start=40, end=45)
cds2_3 = eukaryote.GenericLocus(start=49, end=54)
cds2_4 = eukaryote.GenericLocus(start=55, end=72)
self.prot2 = eukaryote.ProteinSpeciesType(id='prot2', name='protein2',
uniprot='P12345', cell=cell1, transcript=transcript2,
coding_regions=[cds2_4, cds2_2, cds2_3])
gene3 = eukaryote.GeneLocus(polymer=dna1, start=70, end=75)
exon3 = eukaryote.GenericLocus(start=70, end=75)
transcript3 = eukaryote.TranscriptSpeciesType(gene=gene3, exons=[exon3])
cds3 = eukaryote.GenericLocus(start=70, end=75)
self.prot3 = eukaryote.ProteinSpeciesType(id='prot3', name='protein3',
uniprot='Q12121', transcript=transcript3, coding_regions=[cds3])
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
def test_constructor(self):
self.assertEqual(self.prot1.id, 'prot1')
self.assertEqual(self.prot1.name, 'protein1')
self.assertEqual(self.prot1.uniprot, 'Q12X34')
self.assertEqual(self.prot1.coding_regions[0].serialize(), '4:36')
self.assertEqual(self.prot1.comments, '')
self.assertEqual(self.prot1.references, [])
self.assertEqual(self.prot1.identifiers, [])
self.assertEqual(self.prot1.cell, None)
def test_get_seq(self):
# Default translation table used is 1 (standard)
self.assertEqual(self.prot1.get_seq(), 'MKVLINKNEL')
self.assertEqual(self.prot2.get_seq(), 'MKKFLLTPL')
def test_get_seq_and_start_codon(self):
# Default translation table used is 1 (standard)
coding_rna_seq, aa_seq, start_codon = self.prot1.get_seq_and_start_codon()
self.assertEqual(coding_rna_seq, 'AUGAARGUNCUCAUHAAYAARAAYGARCUCUAG')
self.assertEqual(aa_seq, 'MKVLINKNEL')
self.assertEqual(start_codon, 'AUG')
coding_rna_seq, aa_seq, start_codon = self.prot2.get_seq_and_start_codon()
self.assertEqual(coding_rna_seq, 'AUGAARAARUUYCUCCUCACNCCNCUCUAA')
self.assertEqual(aa_seq, 'MKKFLLTPL')
self.assertEqual(start_codon, 'AUG')
coding_rna_seq, aa_seq, start_codon = self.prot3.get_seq_and_start_codon(cds=False)
self.assertEqual(coding_rna_seq, 'UAAUUU')
self.assertEqual(aa_seq, '*F')
self.assertEqual(start_codon, 'UUU')
def test_get_empirical_formula(self):
# Default translation table used is 1 (standard)
self.assertEqual(self.prot1.get_empirical_formula(),
chem.EmpiricalFormula('C53H96N14O15S1'))
self.assertEqual(self.prot2.get_empirical_formula(),
chem.EmpiricalFormula('C53H91N11O11S1'))
# Test using input sequence
test_prot = eukaryote.ProteinSpeciesType()
self.assertEqual(test_prot.get_empirical_formula(seq_input=Bio.Seq.Seq('MKVLINKNEL')),
chem.EmpiricalFormula('C53H96N14O15S1'))
self.assertEqual(test_prot.get_empirical_formula(seq_input=Bio.Seq.Seq('MKKFLLTPL')),
chem.EmpiricalFormula('C53H91N11O11S1'))
def test_get_mol_wt(self):
# Default translation table used is 1 (standard)
self.assertAlmostEqual(self.prot1.get_mol_wt(), 1201.49, delta=0.3)
self.assertAlmostEqual(self.prot2.get_mol_wt(), 1090.43, delta=0.3)
# Test using input sequence
test_prot = eukaryote.ProteinSpeciesType()
self.assertAlmostEqual(test_prot.get_mol_wt(seq_input=Bio.Seq.Seq('MKVLINKNEL')), 1201.49, delta=0.3)
self.assertAlmostEqual(test_prot.get_mol_wt(seq_input=Bio.Seq.Seq('MKKFLLTPL')), 1090.43, delta=0.3)
def test_get_charge(self):
# Default translation table used is 1 (standard)
self.assertEqual(self.prot1.get_charge(), 1)
self.assertEqual(self.prot2.get_charge(), 2)
# Test using input sequence
test_prot = eukaryote.ProteinSpeciesType()
self.assertEqual(test_prot.get_charge(seq_input=Bio.Seq.Seq('MKVLINKNEL')), 1)
self.assertEqual(test_prot.get_charge(seq_input=Bio.Seq.Seq('MKKFLLTPL')), 2)
class ComplexSpeciesTypeTestCase(unittest.TestCase):
def test_ComplexSpeciesType(self):
self.tmp_dirname = tempfile.mkdtemp()
sequence_path = os.path.join(self.tmp_dirname, 'test_seq.fasta')
with open(sequence_path, 'w') as f:
f.write('>dna1\nTTTATGAARGTNCTCATHAAYAARAAYGARCTCTAGTTTATGAARTTYAARTTYCTCCTCACNCCNCTCTAATTT\n')
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=sequence_path)
# Protein subunit 1
gene1 = eukaryote.GeneLocus(polymer=dna1, start=1, end=36)
exon1 = eukaryote.GenericLocus(start=4, end=36)
transcript1 = eukaryote.TranscriptSpeciesType(gene=gene1, exons=[exon1])
cds1 = eukaryote.GenericLocus(start=4, end=36)
prot1 = eukaryote.ProteinSpeciesType(transcript=transcript1, coding_regions=[cds1])
# Protein subunit 2
gene2 = eukaryote.GeneLocus(polymer=dna1, start=37, end=75)
exon2 = eukaryote.GenericLocus(start=40, end=72)
transcript2 = eukaryote.TranscriptSpeciesType(gene=gene2, exons=[exon2])
cds2 = eukaryote.GenericLocus(start=40, end=72)
prot2 = eukaryote.ProteinSpeciesType(transcript=transcript2, coding_regions=[cds2])
species_coeff1 = core.SpeciesTypeCoefficient(
species_type=prot1, coefficient=2)
species_coeff2 = core.SpeciesTypeCoefficient(
species_type=prot2, coefficient=3)
complex1 = core.ComplexSpeciesType(
subunits = [species_coeff1, species_coeff2])
self.assertEqual(complex1.get_charge(), 8)
self.assertAlmostEqual(complex1.get_mol_wt(),
(2*prot1.get_mol_wt() + 3 * prot2.get_mol_wt()))
self.assertEqual(complex1.get_empirical_formula(),
chem.EmpiricalFormula('C292H492N64O66S5'))
shutil.rmtree(self.tmp_dirname)
class GeneLocusTestCase(unittest.TestCase):
def test_constructor(self):
gene = eukaryote.GeneLocus(id='gene1', name='gene1', symbol='gene_1',
strand=core.PolymerStrand.negative, start=1, end=2)
self.assertEqual(gene.id, 'gene1')
self.assertEqual(gene.name, 'gene1')
self.assertEqual(gene.symbol, 'gene_1')
self.assertEqual(gene.strand, core.PolymerStrand.negative)
self.assertEqual(gene.start, 1)
self.assertEqual(gene.end, 2)
self.assertEqual(gene.comments, '')
self.assertEqual(gene.references, [])
self.assertEqual(gene.identifiers, [])
class TranscriptionFactorRegulationTestCase(unittest.TestCase):
def test_constructor(self):
tf1 = eukaryote.ProteinSpeciesType(id='tf1')
tf2 = eukaryote.ProteinSpeciesType(id='tf2')
tf_reg1 = eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf1,
direction=eukaryote.RegulatoryDirection.repression)
tf_reg2 = eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf2,
direction=eukaryote.RegulatoryDirection.activation)
tf_reg3 = eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf2,
direction=eukaryote.RegulatoryDirection.repression)
self.assertEqual(tf_reg1.transcription_factor, tf1)
self.assertEqual(tf_reg1.direction.name, 'repression')
self.assertEqual(tf_reg2.direction.name, 'activation')
self.assertEqual(tf_reg3.transcription_factor, tf2)
def test_serialize(self):
tf1 = eukaryote.ProteinSpeciesType(id='tf1')
tf_reg1 = eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf1,
direction=eukaryote.RegulatoryDirection.repression)
tf_reg2 = eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf1,
direction=eukaryote.RegulatoryDirection.activation)
self.assertEqual(tf_reg1.serialize(), 'tf1:repression')
self.assertEqual(tf_reg2.serialize(), 'tf1:activation')
def test_deserialize(self):
tf1 = eukaryote.ProteinSpeciesType(id='tf1')
objects = {
eukaryote.ProteinSpeciesType: {
'tf1': tf1,
},
}
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf1:activation', objects)
self.assertEqual(result[0].transcription_factor, tf1)
self.assertEqual(result[0].direction, eukaryote.RegulatoryDirection.activation)
self.assertEqual(result[1], None)
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf1:repression', objects)
self.assertEqual(result[0].transcription_factor, tf1)
self.assertEqual(result[0].direction, eukaryote.RegulatoryDirection.repression)
self.assertEqual(result[1], None)
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf1:unknown', objects)
self.assertEqual(result[0].transcription_factor, tf1)
self.assertEqual(result[0].direction, eukaryote.RegulatoryDirection.unknown)
self.assertEqual(result[1], None)
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf:activation', objects)
self.assertEqual(result[0], None)
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf1:1', objects)
self.assertEqual(result[0], None)
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf1:error', objects)
self.assertEqual(result[0], None)
result = eukaryote.TranscriptionFactorRegulation.deserialize('tf1:3.6', objects)
self.assertEqual(result[0], None)
class RegulatoryModuleTestCase(unittest.TestCase):
def test_constructor(self):
dna1 = core.DnaSpeciesType(circular=False, double_stranded=False)
gene1 = eukaryote.GeneLocus(polymer=dna1, start=9, end=15)
gene2 = eukaryote.GeneLocus(polymer=dna1, start=17, end=18)
promoter1 = 'ENSR00000172399'
promoter2 = 'ENSR00000309980'
tf1 = eukaryote.ProteinSpeciesType(id='tf1')
tf2 = eukaryote.ProteinSpeciesType(id='tf2')
reg_module1 = eukaryote.RegulatoryModule(
gene=gene1,
promoter=promoter1,
activity=eukaryote.ActivityLevel.active,
type=eukaryote.RegulationType.proximal,
transcription_factor_regulation=[
eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf1,
direction=eukaryote.RegulatoryDirection.activation),
eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf2,
direction=eukaryote.RegulatoryDirection.repression)
])
reg_module2 = eukaryote.RegulatoryModule(
gene=gene1,
promoter=promoter1,
activity=eukaryote.ActivityLevel.active,
type=eukaryote.RegulationType.distal,
transcription_factor_regulation=[eukaryote.TranscriptionFactorRegulation(
transcription_factor=tf1,
direction=eukaryote.RegulatoryDirection.repression)])
reg_module3 = eukaryote.RegulatoryModule(
id='rm3',
name='reg_module3',
activity=eukaryote.ActivityLevel.inactive,
gene=gene2,
promoter=promoter2)
self.assertEqual(reg_module1.gene, gene1)
self.assertEqual(reg_module1.promoter, promoter1)
self.assertEqual(reg_module1.activity.name, 'active')
self.assertEqual(reg_module1.type.value, 1)
self.assertEqual(sorted([i.transcription_factor.id for i in reg_module1.transcription_factor_regulation]),
['tf1', 'tf2'])
self.assertEqual(sorted([i.direction.name for i in reg_module1.transcription_factor_regulation]),
['activation', 'repression'])
self.assertEqual(reg_module2.gene, gene1)
self.assertEqual(reg_module2.promoter, promoter1)
self.assertEqual(reg_module2.activity.name, 'active')
self.assertEqual(reg_module2.type.value, 2)
self.assertEqual(reg_module2.transcription_factor_regulation[0].transcription_factor, tf1)
self.assertEqual(reg_module2.transcription_factor_regulation[0].direction.name, 'repression')
self.assertEqual(reg_module3.id, 'rm3')
self.assertEqual(reg_module3.activity.name, 'inactive')
self.assertEqual(reg_module3.name, 'reg_module3')
self.assertEqual(reg_module3.promoter, promoter2)
class PtmSiteTestCase(unittest.TestCase):
def test_constructor(self):
# defining modified protein name
mp = eukaryote.ProteinSpeciesType(id='mp')
# testing example of modification: one protein one modified site
ptm1 = eukaryote.PtmSite(id='protptm1', name='ptm1', modified_protein=mp,
type='phosphorylation', modified_residue='s145', fractional_abundance='0.5', comments='oneprot_onesite')
self.assertEqual(ptm1.id, 'protptm1')
self.assertEqual(ptm1.name, 'ptm1')
self.assertEqual(ptm1.modified_protein, mp)
self.assertEqual(ptm1.type, 'phosphorylation')
self.assertEqual(ptm1.modified_residue, 's145')
self.assertEqual(ptm1.fractional_abundance, '0.5')
self.assertEqual(ptm1.comments, 'oneprot_onesite')
self.assertEqual(ptm1.references, [])
self.assertEqual(ptm1.identifiers, [])
|
import os
from collections import OrderedDict, defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.optim import AdamW
from meta_neural_network_architectures import VGGActivationNormNetwork, \
VGGActivationNormNetworkWithAttention
from meta_optimizer import LSLRGradientDescentLearningRule
from pytorch_utils import int_to_one_hot
from standard_neural_network_architectures import TaskRelationalEmbedding, \
SqueezeExciteDenseNetEmbeddingSmallNetwork, CriticNetwork, VGGEmbeddingNetwork
def set_torch_seed(seed):
"""
Sets the pytorch seeds for current experiment run
:param seed: The seed (int)
:return: A random number generator to use
"""
rng = np.random.RandomState(seed=seed)
torch_seed = rng.randint(0, 999999)
torch.manual_seed(seed=torch_seed)
return rng
class MAMLFewShotClassifier(nn.Module):
def __init__(self, batch_size, seed, num_classes_per_set, num_samples_per_support_class,
num_samples_per_target_class, image_channels,
num_filters, num_blocks_per_stage, num_stages, dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps, init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs, weight_decay, meta_learning_rate, **kwargs):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(MAMLFewShotClassifier, self).__init__()
self.batch_size = batch_size
self.current_epoch = -1
self.rng = set_torch_seed(seed=seed)
self.num_classes_per_set = num_classes_per_set
self.num_samples_per_support_class = num_samples_per_support_class
self.num_samples_per_target_class = num_samples_per_target_class
self.image_channels = image_channels
self.num_filters = num_filters
self.num_blocks_per_stage = num_blocks_per_stage
self.num_stages = num_stages
self.dropout_rate = dropout_rate
self.output_spatial_dimensionality = output_spatial_dimensionality
self.image_height = image_height
self.image_width = image_width
self.num_support_set_steps = num_support_set_steps
self.init_learning_rate = init_learning_rate
self.num_target_set_steps = num_target_set_steps
self.conditional_information = conditional_information
self.min_learning_rate = min_learning_rate
self.total_epochs = total_epochs
self.weight_decay = weight_decay
self.meta_learning_rate = meta_learning_rate
for key, value in kwargs.items():
setattr(self, key, value)
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.clip_grads = True
self.rng = set_torch_seed(seed=seed)
self.build_module()
def build_module(self):
return NotImplementedError
def setup_optimizer(self):
exclude_param_string = None if "none" in self.exclude_param_string else self.exclude_param_string
self.optimizer = optim.Adam(self.trainable_parameters(exclude_params_with_string=exclude_param_string),
lr=0.001,
weight_decay=self.weight_decay, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer,
T_max=self.total_epochs,
eta_min=0.001)
print('min learning rate'.self.min_learning_rate)
self.to(self.device)
print("Inner Loop parameters")
num_params = 0
for key, value in self.inner_loop_optimizer.named_parameters():
print(key, value.shape)
num_params += np.prod(value.shape)
print('Total inner loop parameters', num_params)
print("Outer Loop parameters")
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
num_params += np.prod(value.shape)
print('Total outer loop parameters', num_params)
print("Memory parameters")
num_params = 0
for name, param in self.get_params_that_include_strings(included_strings=['classifier']):
if param.requires_grad:
print(name, param.shape)
num_params += np.prod(value.shape)
print('Total Memory parameters', num_params)
def get_params_that_include_strings(self, included_strings, include_all=False):
for name, param in self.named_parameters():
if any([included_string in name for included_string in included_strings]) and not include_all:
yield name, param
if all([included_string in name for included_string in included_strings]) and include_all:
yield name, param
def get_per_step_loss_importance_vector(self):
"""
Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target
loss towards the optimization loss.
:return: A tensor to be used to compute the weighted average of the loss, useful for
the MSL (Multi Step Loss) mechanism.
"""
loss_weights = np.ones(shape=(self.number_of_training_steps_per_iter)) * (
1.0 / self.number_of_training_steps_per_iter)
decay_rate = 1.0 / self.number_of_training_steps_per_iter / self.multi_step_loss_num_epochs
min_value_for_non_final_losses = self.minimum_per_task_contribution / self.number_of_training_steps_per_iter
for i in range(len(loss_weights) - 1):
curr_value = np.maximum(loss_weights[i] - (self.current_epoch * decay_rate), min_value_for_non_final_losses)
loss_weights[i] = curr_value
curr_value = np.minimum(
loss_weights[-1] + (self.current_epoch * (self.number_of_training_steps_per_iter - 1) * decay_rate),
1.0 - ((self.number_of_training_steps_per_iter - 1) * min_value_for_non_final_losses))
loss_weights[-1] = curr_value
loss_weights = torch.Tensor(loss_weights).to(device=self.device)
return loss_weights
def apply_inner_loop_update(self, loss, names_weights_copy, use_second_order, current_step_idx):
"""
Applies an inner loop update given current step's loss, the weights to update, a flag indicating whether to use
second order derivatives and the current step's index.
:param loss: Current step's loss with respect to the support set.
:param names_weights_copy: A dictionary with names to parameters to update.
:param use_second_order: A boolean flag of whether to use second order derivatives.
:param current_step_idx: Current step's index.
:return: A dictionary with the updated weights (name, param)
"""
self.classifier.zero_grad(params=names_weights_copy)
grads = torch.autograd.grad(loss, names_weights_copy.values(),
create_graph=use_second_order, allow_unused=True)
names_grads_copy = dict(zip(names_weights_copy.keys(), grads))
for key, grad in names_grads_copy.items():
if grad is None:
print('NOT FOUND INNER LOOP', key)
names_weights_copy = self.inner_loop_optimizer.update_params(names_weights_dict=names_weights_copy,
names_grads_wrt_params_dict=names_grads_copy,
num_step=current_step_idx)
return names_weights_copy
def get_inner_loop_parameter_dict(self, params, exclude_strings=None):
"""
Returns a dictionary with the parameters to use for inner loop updates.
:param params: A dictionary of the network's parameters.
:return: A dictionary of the parameters to use for the inner loop optimization process.
"""
param_dict = dict()
if exclude_strings is None:
exclude_strings = []
for name, param in params:
if param.requires_grad:
if all([item not in name for item in exclude_strings]):
if "norm_layer" not in name and 'bn' not in name and 'prelu' not in name:
param_dict[name] = param.to(device=self.device)
return param_dict
def net_forward(self, x, y, weights, backup_running_statistics, training, num_step,
return_features=False):
"""
A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires
boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).
A flag indicating whether this is the training session and an int indicating the current step's number in the
inner loop.
:param x: A data batch of shape b, c, h, w
:param y: A data targets batch of shape b, n_classes
:param weights: A dictionary containing the weights to pass to the network.
:param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their
previous values after the run (only for evaluation)
:param training: A flag indicating whether the current process phase is a training or evaluation.
:param num_step: An integer indicating the number of the step in the inner loop.
:return: the crossentropy losses with respect to the given y, the predictions of the base model.
"""
if return_features:
preds, features = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step,
return_features=return_features)
loss = F.cross_entropy(preds, y)
return loss, preds, features
else:
preds = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
loss = F.cross_entropy(preds, y)
return loss, preds
def trainable_parameters(self, exclude_params_with_string=None):
"""
Returns an iterator over the trainable parameters of the model.
"""
for name, param in self.named_parameters():
if exclude_params_with_string is not None:
if param.requires_grad and all(
list([exclude_string not in name for exclude_string in exclude_params_with_string])):
yield param
else:
if param.requires_grad:
yield param
def trainable_names_parameters(self, exclude_params_with_string=None):
"""
Returns an iterator over the trainable parameters of the model.
"""
for name, param in self.named_parameters():
if exclude_params_with_string is not None:
if param.requires_grad and all(
list([exclude_string not in name for exclude_string in exclude_params_with_string])):
yield (name, param)
else:
if param.requires_grad:
yield (name, param)
def train_forward_prop(self, data_batch, epoch):
"""
Runs an outer loop forward prop using the meta-model and base-model.
:param data_batch: A data batch containing the support set and the target set input, output pairs.
:param epoch: The index of the currrent epoch.
:return: A dictionary of losses for the current step.
"""
losses, per_task_preds = self.forward(data_batch=data_batch, epoch=epoch,
use_second_order=self.second_order and
epoch > self.first_order_to_second_order_epoch,
use_multi_step_loss_optimization=self.use_multi_step_loss_optimization,
num_steps=self.number_of_training_steps_per_iter,
training_phase=True)
return losses, per_task_preds
def evaluation_forward_prop(self, data_batch, epoch):
"""
Runs an outer loop evaluation forward prop using the meta-model and base-model.
:param data_batch: A data batch containing the support set and the target set input, output pairs.
:param epoch: The index of the currrent epoch.
:return: A dictionary of losses for the current step.
"""
losses, per_task_preds = self.forward(data_batch=data_batch, epoch=epoch, use_second_order=False,
use_multi_step_loss_optimization=self.use_multi_step_loss_optimization,
num_steps=self.number_of_evaluation_steps_per_iter,
training_phase=False)
return losses, per_task_preds
def meta_update(self, loss, exclude_string_list=None, retain_graph=False):
"""
Applies an outer loop update on the meta-parameters of the model.
:param loss: The current crossentropy loss.
"""
self.optimizer.zero_grad()
loss.backward(retain_graph=retain_graph)
if 'imagenet' in self.dataset_name:
for name, param in self.trainable_names_parameters(exclude_params_with_string=exclude_string_list):
#
if self.clip_grads and param.grad is None and param.requires_grad:
print(name, 'no grad information computed')
# else:
# print("passed", name)
else:
if param.grad is None:
print('no grad information computed', name)
# print('No Grad', name, param.shape)
if self.clip_grads and param.grad is not None and param.requires_grad and "softmax":
param.grad.data.clamp_(-10, 10)
self.optimizer.step()
class EmbeddingMAMLFewShotClassifier(MAMLFewShotClassifier):
def __init__(self, batch_size, seed, num_classes_per_set, num_samples_per_support_class,
num_samples_per_target_class, image_channels,
num_filters, num_blocks_per_stage, num_stages, dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps, init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs, weight_decay, meta_learning_rate, **kwargs):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(EmbeddingMAMLFewShotClassifier, self).__init__(batch_size, seed, num_classes_per_set,
num_samples_per_support_class,
num_samples_per_target_class, image_channels,
num_filters, num_blocks_per_stage, num_stages,
dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps,
init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs,
weight_decay, meta_learning_rate, **kwargs)
def param_dict_to_vector(self, param_dict):
param_list = []
for name, param in param_dict.items():
param_list.append(param.view(-1, 1))
param_as_vector = torch.cat(param_list, dim=0)
return param_as_vector
def param_vector_to_param_dict(self, param_vector, names_params_dict):
new_names_params_dict = dict()
cur_idx = 0
for name, param in names_params_dict.items():
new_names_params_dict[name] = param_vector[cur_idx:cur_idx + param.view(-1).shape[0]].view(param.shape)
cur_idx += param.view(-1).shape[0]
return new_names_params_dict
def build_module(self):
support_set_shape = (
self.num_classes_per_set * self.num_samples_per_support_class,
self.image_channels,
self.image_height, self.image_width)
target_set_shape = (
self.num_classes_per_set * self.num_samples_per_target_class,
self.image_channels,
self.image_height, self.image_width)
x_support_set = torch.ones(support_set_shape)
x_target_set = torch.ones(target_set_shape)
# task_size = x_target_set.shape[0]
x_target_set = x_target_set.view(-1, x_target_set.shape[-3], x_target_set.shape[-2], x_target_set.shape[-1])
x_support_set = x_support_set.view(-1, x_support_set.shape[-3], x_support_set.shape[-2],
x_support_set.shape[-1])
num_target_samples = x_target_set.shape[0]
num_support_samples = x_support_set.shape[0]
self.dense_net_embedding = SqueezeExciteDenseNetEmbeddingSmallNetwork(
im_shape=torch.cat([x_support_set, x_target_set], dim=0).shape, num_filters=self.num_filters,
num_blocks_per_stage=self.num_blocks_per_stage,
num_stages=self.num_stages, average_pool_outputs=False, dropout_rate=self.dropout_rate,
output_spatial_dimensionality=self.output_spatial_dimensionality, use_channel_wise_attention=True)
task_features = self.dense_net_embedding.forward(
x=torch.cat([x_support_set, x_target_set], dim=0), dropout_training=True)
task_features = task_features.squeeze()
encoded_x = task_features
support_set_features = F.avg_pool2d(encoded_x[:num_support_samples], encoded_x.shape[-1]).squeeze()
self.current_iter = 0
output_units = int(self.num_classes_per_set if self.overwrite_classes_in_each_task else \
(self.num_classes_per_set * self.num_support_sets) / self.class_change_interval)
self.classifier = VGGActivationNormNetworkWithAttention(input_shape=encoded_x.shape,
num_output_classes=output_units,
num_stages=1, use_channel_wise_attention=True,
num_filters=48,
num_support_set_steps=2 *
self.num_support_sets
* self.num_support_set_steps,
num_target_set_steps=self.num_support_set_steps + 1,
num_blocks_per_stage=1)
print("init learning rate", self.init_learning_rate)
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
if self.num_target_set_steps > 0:
preds, penultimate_features_x = self.classifier.forward(x=encoded_x, num_step=0, return_features=True)
self.task_relational_network = None
relational_embedding_shape = None
x_support_set_task = F.avg_pool2d(
encoded_x[:self.num_classes_per_set * (self.num_samples_per_support_class)],
encoded_x.shape[-1]).squeeze()
x_target_set_task = F.avg_pool2d(
encoded_x[self.num_classes_per_set * (self.num_samples_per_support_class):],
encoded_x.shape[-1]).squeeze()
x_support_set_classifier_features = F.avg_pool2d(penultimate_features_x[
:self.num_classes_per_set * (
self.num_samples_per_support_class)],
penultimate_features_x.shape[-2]).squeeze()
x_target_set_classifier_features = F.avg_pool2d(
penultimate_features_x[self.num_classes_per_set * (self.num_samples_per_support_class):],
penultimate_features_x.shape[-2]).squeeze()
self.critic_network = CriticNetwork(
task_embedding_shape=relational_embedding_shape,
num_classes_per_set=self.num_classes_per_set,
support_set_feature_shape=x_support_set_task.shape,
target_set_feature_shape=x_target_set_task.shape,
support_set_classifier_pre_last_features=x_support_set_classifier_features.shape,
target_set_classifier_pre_last_features=x_target_set_classifier_features.shape,
num_target_samples=self.num_samples_per_target_class,
num_support_samples=self.num_samples_per_support_class,
logit_shape=preds[self.num_classes_per_set * (self.num_samples_per_support_class):].shape,
conditional_information=self.conditional_information,
support_set_label_shape=(
self.num_classes_per_set * (self.num_samples_per_support_class), self.num_classes_per_set))
self.inner_loop_optimizer = LSLRGradientDescentLearningRule(
total_num_inner_loop_steps=2 * (
self.num_support_sets * self.num_support_set_steps) + self.num_target_set_steps + 1,
learnable_learning_rates=self.learnable_learning_rates,
init_learning_rate=self.init_learning_rate)
self.inner_loop_optimizer.initialise(names_weights_dict=names_weights_copy)
print("Inner Loop parameters")
num_params = 0
for key, value in self.inner_loop_optimizer.named_parameters():
print(key, value.shape)
num_params += np.prod(value.shape)
print('Total inner loop parameters', num_params)
print("Outer Loop parameters")
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
num_params += np.prod(value.shape)
print('Total outer loop parameters', num_params)
print("Memory parameters")
num_params = 0
for name, param in self.get_params_that_include_strings(included_strings=['classifier']):
if param.requires_grad:
print(name, param.shape)
product = 1
for item in param.shape:
product = product * item
num_params += product
print('Total Memory parameters', num_params)
self.exclude_list = None
self.switch_opt_params(exclude_list=self.exclude_list)
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
if torch.cuda.device_count() > 1:
self.to(self.device)
self.dense_net_embedding = nn.DataParallel(module=self.dense_net_embedding)
else:
self.to(self.device)
def switch_opt_params(self, exclude_list):
print("current trainable params")
for name, param in self.trainable_names_parameters(exclude_params_with_string=exclude_list):
print(name, param.shape)
self.optimizer = optim.Adam(self.trainable_parameters(exclude_list), lr=self.meta_learning_rate,
weight_decay=self.weight_decay, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.total_epochs,
eta_min=self.min_learning_rate)
def net_forward(self, x, y, weights, backup_running_statistics, training, num_step,
return_features=False):
"""
A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires
boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).
A flag indicating whether this is the training session and an int indicating the current step's number in the
inner loop.
:param x: A data batch of shape b, c, h, w
:param y: A data targets batch of shape b, n_classes
:param weights: A dictionary containing the weights to pass to the network.
:param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their
previous values after the run (only for evaluation)
:param training: A flag indicating whether the current process phase is a training or evaluation.
:param num_step: An integer indicating the number of the step in the inner loop.
:return: the crossentropy losses with respect to the given y, the predictions of the base model.
"""
outputs = {"loss": 0., "preds": 0, "features": 0.}
if return_features:
outputs['preds'], outputs['features'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step,
return_features=return_features)
if type(outputs['preds']) == tuple:
if len(outputs['preds']) == 2:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
else:
outputs['preds'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
if type(outputs['preds']) == tuple:
if len(outputs['preds']) == 2:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
return outputs
def get_per_step_loss_importance_vector(self, current_epoch):
"""
Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target
loss towards the optimization loss.
:return: A tensor to be used to compute the weighted average of the loss, useful for
the MSL (Multi Step Loss) mechanism.
"""
loss_weights = torch.ones(size=(self.number_of_training_steps_per_iter * self.num_support_sets,),
device=self.device) / (
self.number_of_training_steps_per_iter * self.num_support_sets)
early_steps_decay_rate = (1. / (
self.number_of_training_steps_per_iter * self.num_support_sets)) / 100.
loss_weights = loss_weights - (early_steps_decay_rate * current_epoch)
loss_weights = torch.max(input=loss_weights,
other=torch.ones(loss_weights.shape, device=self.device) * 0.001)
loss_weights[-1] = 1. - torch.sum(loss_weights[:-1])
return loss_weights
def forward(self, data_batch, epoch, use_second_order, use_multi_step_loss_optimization, num_steps, training_phase):
"""
Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.
:param data_batch: A data batch containing the support and target sets.
:param epoch: Current epoch's index
:param use_second_order: A boolean saying whether to use second order derivatives.
:param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's
target loss (True) or whether to use multi step loss which improves the stability of the system (False)
:param num_steps: Number of inner loop steps.
:param training_phase: Whether this is a training phase (True) or an evaluation phase (False)
:return: A dictionary with the collected losses of the current outer forward propagation.
"""
x_support_set, x_target_set, y_support_set, y_target_set, _, _ = data_batch
self.classifier.zero_grad()
total_per_step_losses = []
total_per_step_accuracies = []
per_task_preds = []
num_losses = 2
importance_vector = torch.Tensor([1.0 / num_losses for i in range(num_losses)]).to(self.device)
step_magnitude = (1.0 / num_losses) / self.total_epochs
current_epoch_step_magnitude = torch.ones(1).to(self.device) * (step_magnitude * (epoch + 1))
importance_vector[0] = importance_vector[0] - current_epoch_step_magnitude
importance_vector[1] = importance_vector[1] + current_epoch_step_magnitude
pre_target_loss_update_loss = []
pre_target_loss_update_acc = []
post_target_loss_update_loss = []
post_target_loss_update_acc = []
for task_id, (x_support_set_task, y_support_set_task, x_target_set_task, y_target_set_task) in \
enumerate(zip(x_support_set,
y_support_set,
x_target_set,
y_target_set)):
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
names_weights_copy = {
name.replace('module.', ''): value.unsqueeze(0).repeat(
[num_devices] + [1 for i in range(len(value.shape))]) for
name, value in names_weights_copy.items()}
c, h, w = x_target_set_task.shape[-3:]
x_target_set_task = x_target_set_task.view(-1, c, h, w).to(self.device)
y_target_set_task = y_target_set_task.view(-1).to(self.device)
x_support_set_task = x_support_set_task.view(-1, c, h, w).to(self.device)
y_support_set_task = y_support_set_task.to(self.device)
image_embedding = self.dense_net_embedding.forward(
x=torch.cat([x_support_set_task, x_target_set_task], dim=0), dropout_training=True)
x_support_set_task = image_embedding[:x_support_set_task.shape[0]]
x_target_set_task = image_embedding[x_support_set_task.shape[0]:]
x_support_set_task = x_support_set_task.view(
(self.num_support_sets, self.num_classes_per_set, self.num_samples_per_support_class,
x_support_set_task.shape[-3],
x_support_set_task.shape[-2], x_support_set_task.shape[-1]))
target_set_per_step_loss = []
importance_weights = self.get_per_step_loss_importance_vector(current_epoch=self.current_epoch)
step_idx = 0
for sub_task_id, (x_support_set_sub_task, y_support_set_sub_task) in enumerate(zip(x_support_set_task,
y_support_set_task)):
x_support_set_sub_task = x_support_set_sub_task.view(-1, x_support_set_task.shape[-3],
x_support_set_task.shape[-2],
x_support_set_task.shape[-1])
y_support_set_sub_task = y_support_set_sub_task.view(-1)
if self.num_target_set_steps > 0:
x_support_set_sub_task_features = F.avg_pool2d(x_support_set_sub_task,
x_support_set_sub_task.shape[-1]).squeeze()
x_target_set_task_features = F.avg_pool2d(x_target_set_task,
x_target_set_task.shape[-1]).squeeze()
task_embedding = None
else:
task_embedding = None
# print(x_target_set_task.shape, x_target_set_task_features.shape)
for num_step in range(self.num_support_set_steps):
support_outputs = self.net_forward(x=x_support_set_sub_task,
y=y_support_set_sub_task,
weights=names_weights_copy,
backup_running_statistics=
True if (num_step == 0) else False,
training=True,
num_step=step_idx,
return_features=True)
names_weights_copy = self.apply_inner_loop_update(loss=support_outputs['loss'],
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_per_step_loss.append(target_outputs['loss'])
step_idx += 1
if not self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_loss = target_outputs['loss']
step_idx += 1
else:
target_set_loss = torch.sum(
torch.stack(target_set_per_step_loss, dim=0) * importance_weights)
for num_step in range(self.num_target_set_steps):
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
predicted_loss = self.critic_network.forward(logits=target_outputs['preds'],
task_embedding=task_embedding)
names_weights_copy = self.apply_inner_loop_update(loss=predicted_loss,
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.num_target_set_steps > 0:
post_update_outputs = self.net_forward(
x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
post_update_loss, post_update_target_preds, post_updated_target_features = post_update_outputs[
'loss'], \
post_update_outputs[
'preds'], \
post_update_outputs[
'features']
step_idx += 1
else:
post_update_loss, post_update_target_preds, post_updated_target_features = target_set_loss, \
target_outputs['preds'], \
target_outputs[
'features']
pre_target_loss_update_loss.append(target_set_loss)
pre_softmax_target_preds = F.softmax(target_outputs['preds'], dim=1).argmax(dim=1)
pre_update_accuracy = torch.eq(pre_softmax_target_preds,
y_target_set_task).data.cpu().float().mean()
pre_target_loss_update_acc.append(pre_update_accuracy)
post_target_loss_update_loss.append(post_update_loss)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds,
y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
loss = target_outputs['loss'] * importance_vector[0] + post_update_loss * importance_vector[1]
total_per_step_losses.append(loss)
total_per_step_accuracies.append(post_update_accuracy)
per_task_preds.append(post_update_target_preds.detach().cpu().numpy())
if not training_phase:
self.classifier.restore_backup_stats()
x_support_set_sub_task = x_support_set_sub_task.to(torch.device('cpu'))
y_support_set_sub_task = y_support_set_sub_task.to(torch.device('cpu'))
x_target_set_task = x_target_set_task.to(torch.device('cpu'))
y_target_set_task = y_target_set_task.to(torch.device('cpu'))
loss_metric_dict = dict()
loss_metric_dict['pre_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['pre_target_loss_update_acc'] = pre_target_loss_update_acc
loss_metric_dict['post_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['post_target_loss_update_acc'] = post_target_loss_update_acc
losses = self.get_across_task_loss_metrics(total_losses=total_per_step_losses,
total_accuracies=total_per_step_accuracies,
loss_metrics_dict=loss_metric_dict)
return losses, per_task_preds
def load_model(self, model_save_dir, model_name, model_idx):
"""
Load checkpoint and return the state dictionary containing the network state params and experiment state.
:param model_save_dir: The directory from which to load the files.
:param model_name: The model_name to be loaded from the direcotry.
:param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current
experiment)
:return: A dictionary containing the experiment state and the saved model parameters.
"""
filepath = os.path.join(model_save_dir, "{}_{}".format(model_name, model_idx))
state = torch.load(filepath, map_location='cpu')
net = dict(state['network'])
state['network'] = OrderedDict(net)
state_dict_loaded = state['network']
self.load_state_dict(state_dict=state_dict_loaded)
self.starting_iter = state['current_iter']
return state
def run_train_iter(self, data_batch, epoch, current_iter):
"""
Runs an outer loop update step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
epoch = int(epoch)
self.scheduler.step(epoch=epoch)
if self.current_epoch != epoch:
self.current_epoch = epoch
if not self.training:
self.train()
losses, per_task_preds = self.train_forward_prop(data_batch=data_batch, epoch=epoch)
exclude_string = None
self.meta_update(loss=losses['loss'], exclude_string_list=exclude_string)
losses['opt:learning_rate'] = self.scheduler.get_lr()[0]
losses['opt:weight_decay'] = self.weight_decay
self.zero_grad()
self.current_iter += 1
return losses, per_task_preds
def run_validation_iter(self, data_batch):
"""
Runs an outer loop evaluation step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
if self.training:
self.eval()
losses, per_task_preds = self.evaluation_forward_prop(data_batch=data_batch, epoch=self.current_epoch)
return losses, per_task_preds
def save_model(self, model_save_dir, state):
"""
Save the network parameter state and experiment state dictionary.
:param model_save_dir: The directory to store the state at.
:param state: The state containing the experiment state and the network. It's in the form of a dictionary
object.
"""
state['network'] = self.state_dict()
torch.save(state, f=model_save_dir)
def get_across_task_loss_metrics(self, total_losses, total_accuracies, loss_metrics_dict):
losses = dict()
losses['loss'] = torch.mean(torch.stack(total_losses), dim=(0,))
losses['accuracy'] = torch.mean(torch.stack(total_accuracies), dim=(0,))
if 'saved_logits' in loss_metrics_dict:
losses['saved_logits'] = loss_metrics_dict['saved_logits']
del loss_metrics_dict['saved_logits']
for name, value in loss_metrics_dict.items():
losses[name] = torch.stack(value).mean()
for idx_num_step, (name, learning_rate_num_step) in enumerate(self.inner_loop_optimizer.named_parameters()):
for idx, learning_rate in enumerate(learning_rate_num_step.mean().view(1)):
losses['task_learning_rate_num_step_{}_{}'.format(idx_num_step,
name)] = learning_rate.detach().cpu().numpy()
return losses
class VGGMAMLFewShotClassifier(MAMLFewShotClassifier):
def __init__(self, batch_size, seed, num_classes_per_set, num_samples_per_support_class, image_channels,
num_filters, num_blocks_per_stage, num_stages, dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps, init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs, weight_decay, meta_learning_rate,
num_samples_per_target_class, **kwargs):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(VGGMAMLFewShotClassifier, self).__init__(batch_size, seed, num_classes_per_set,
num_samples_per_support_class,
num_samples_per_target_class, image_channels,
num_filters, num_blocks_per_stage, num_stages,
dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps,
init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs,
weight_decay, meta_learning_rate, **kwargs)
self.batch_size = batch_size
self.current_epoch = -1
self.rng = set_torch_seed(seed=seed)
self.num_classes_per_set = num_classes_per_set
self.num_samples_per_support_class = num_samples_per_support_class
self.image_channels = image_channels
self.num_filters = num_filters
self.num_blocks_per_stage = num_blocks_per_stage
self.num_stages = num_stages
self.dropout_rate = dropout_rate
self.output_spatial_dimensionality = output_spatial_dimensionality
self.image_height = image_height
self.image_width = image_width
self.num_support_set_steps = num_support_set_steps
self.init_learning_rate = init_learning_rate
self.num_target_set_steps = num_target_set_steps
self.conditional_information = conditional_information
self.min_learning_rate = min_learning_rate
self.total_epochs = total_epochs
self.weight_decay = weight_decay
self.meta_learning_rate = meta_learning_rate
self.current_epoch = -1
for key, value in kwargs.items():
setattr(self, key, value)
self.rng = set_torch_seed(seed=seed)
def param_dict_to_vector(self, param_dict):
param_list = []
for name, param in param_dict.items():
param_list.append(param.view(-1, 1))
param_as_vector = torch.cat(param_list, dim=0)
return param_as_vector
def param_vector_to_param_dict(self, param_vector, names_params_dict):
new_names_params_dict = dict()
cur_idx = 0
for name, param in names_params_dict.items():
new_names_params_dict[name] = param_vector[cur_idx:cur_idx + param.view(-1).shape[0]].view(param.shape)
cur_idx += param.view(-1).shape[0]
return new_names_params_dict
def build_module(self):
support_set_shape = (
self.num_classes_per_set * self.num_samples_per_support_class,
self.image_channels,
self.image_height, self.image_width)
target_set_shape = (
self.num_classes_per_set * self.num_samples_per_target_class,
self.image_channels,
self.image_height, self.image_width)
x_support_set = torch.ones(support_set_shape)
x_target_set = torch.ones(target_set_shape)
# task_size = x_target_set.shape[0]
x_target_set = x_target_set.view(-1, x_target_set.shape[-3], x_target_set.shape[-2], x_target_set.shape[-1])
x_support_set = x_support_set.view(-1, x_support_set.shape[-3], x_support_set.shape[-2],
x_support_set.shape[-1])
num_target_samples = x_target_set.shape[0]
num_support_samples = x_support_set.shape[0]
output_units = int(self.num_classes_per_set if self.overwrite_classes_in_each_task else \
(self.num_classes_per_set * self.num_support_sets) / self.class_change_interval)
self.current_iter = 0
self.classifier = VGGActivationNormNetwork(input_shape=torch.cat([x_support_set, x_target_set], dim=0).shape,
num_output_classes=output_units,
num_stages=4, use_channel_wise_attention=True,
num_filters=48,
num_support_set_steps=2 * self.num_support_sets * self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps + 1,
)
print("init learning rate", self.init_learning_rate)
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
task_name_params = self.get_inner_loop_parameter_dict(self.named_parameters())
self.inner_loop_optimizer = LSLRGradientDescentLearningRule(
total_num_inner_loop_steps=2 * (
self.num_support_sets * self.num_support_set_steps) + self.num_target_set_steps + 1,
learnable_learning_rates=self.learnable_learning_rates,
init_learning_rate=self.init_learning_rate)
self.inner_loop_optimizer.initialise(names_weights_dict=names_weights_copy)
print("Inner Loop parameters")
for key, value in self.inner_loop_optimizer.named_parameters():
print(key, value.shape)
print("Outer Loop parameters")
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
print("Memory parameters")
num_params = 0
for name, param in self.get_params_that_include_strings(included_strings=['classifier']):
if param.requires_grad:
print(name, param.shape)
product = 1
for item in param.shape:
product = product * item
num_params += product
print('Total Memory parameters', num_params)
self.exclude_list = None
self.switch_opt_params(exclude_list=self.exclude_list)
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
if torch.cuda.device_count() > 1:
self.to(self.device)
self.classifier = nn.DataParallel(module=self.classifier)
else:
self.to(self.device)
def switch_opt_params(self, exclude_list):
print("current trainable params")
for name, param in self.trainable_names_parameters(exclude_params_with_string=exclude_list):
print(name, param.shape)
self.optimizer = AdamW(self.trainable_parameters(exclude_list), lr=self.meta_learning_rate,
weight_decay=self.weight_decay, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.total_epochs,
eta_min=self.min_learning_rate)
def net_forward(self, x, y, weights, backup_running_statistics, training, num_step,
return_features=False):
"""
A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires
boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).
A flag indicating whether this is the training session and an int indicating the current step's number in the
inner loop.
:param x: A data batch of shape b, c, h, w
:param y: A data targets batch of shape b, n_classes
:param weights: A dictionary containing the weights to pass to the network.
:param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their
previous values after the run (only for evaluation)
:param training: A flag indicating whether the current process phase is a training or evaluation.
:param num_step: An integer indicating the number of the step in the inner loop.
:return: the crossentropy losses with respect to the given y, the predictions of the base model.
"""
outputs = {"loss": 0., "preds": 0, "features": 0.}
if return_features:
outputs['preds'], outputs['features'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step,
return_features=return_features)
if type(outputs['preds']) == tuple:
if len(outputs['preds']) == 2:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
else:
outputs['preds'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
if type(outputs['preds']) == tuple:
if len(outputs['preds']) == 2:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
return outputs
def get_per_step_loss_importance_vector(self, current_epoch):
"""
Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target
loss towards the optimization loss.
:return: A tensor to be used to compute the weighted average of the loss, useful for
the MSL (Multi Step Loss) mechanism.
"""
loss_weights = torch.ones(size=(self.number_of_training_steps_per_iter * self.num_support_sets,),
device=self.device) / (
self.number_of_training_steps_per_iter * self.num_support_sets)
early_steps_decay_rate = (1. / (
self.number_of_training_steps_per_iter * self.num_support_sets)) / 100.
loss_weights = loss_weights - (early_steps_decay_rate * current_epoch)
loss_weights = torch.max(input=loss_weights,
other=torch.ones(loss_weights.shape, device=self.device) * 0.001)
loss_weights[-1] = 1. - torch.sum(loss_weights[:-1])
return loss_weights
def forward(self, data_batch, epoch, use_second_order, use_multi_step_loss_optimization, num_steps, training_phase):
"""
Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.
:param data_batch: A data batch containing the support and target sets.
:param epoch: Current epoch's index
:param use_second_order: A boolean saying whether to use second order derivatives.
:param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's
target loss (True) or whether to use multi step loss which improves the stability of the system (False)
:param num_steps: Number of inner loop steps.
:param training_phase: Whether this is a training phase (True) or an evaluation phase (False)
:return: A dictionary with the collected losses of the current outer forward propagation.
"""
x_support_set, x_target_set, y_support_set, y_target_set, _, _ = data_batch
self.classifier.zero_grad()
total_per_step_losses = []
total_per_step_accuracies = []
per_task_preds = []
num_losses = 2
importance_vector = torch.Tensor([1.0 / num_losses for i in range(num_losses)]).to(self.device)
step_magnitude = (1.0 / num_losses) / self.total_epochs
current_epoch_step_magnitude = torch.ones(1).to(self.device) * (step_magnitude * (epoch + 1))
importance_vector[0] = importance_vector[0] - current_epoch_step_magnitude
importance_vector[1] = importance_vector[1] + current_epoch_step_magnitude
pre_target_loss_update_loss = []
pre_target_loss_update_acc = []
post_target_loss_update_loss = []
post_target_loss_update_acc = []
for task_id, (x_support_set_task, y_support_set_task, x_target_set_task, y_target_set_task) in \
enumerate(zip(x_support_set,
y_support_set,
x_target_set,
y_target_set)):
c, h, w = x_target_set_task.shape[-3:]
x_target_set_task = x_target_set_task.view(-1, c, h, w).to(self.device)
y_target_set_task = y_target_set_task.view(-1).to(self.device)
target_set_per_step_loss = []
importance_weights = self.get_per_step_loss_importance_vector(current_epoch=self.current_epoch)
step_idx = 0
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
names_weights_copy = {
name.replace('module.', ''): value.unsqueeze(0).repeat(
[num_devices] + [1 for i in range(len(value.shape))]) for
name, value in names_weights_copy.items()}
for sub_task_id, (x_support_set_sub_task, y_support_set_sub_task) in \
enumerate(zip(x_support_set_task,
y_support_set_task)):
# in the future try to adapt the features using a relational component
x_support_set_sub_task = x_support_set_sub_task.view(-1, c, h, w).to(self.device)
y_support_set_sub_task = y_support_set_sub_task.view(-1).to(self.device)
if self.num_target_set_steps > 0 and 'task_embedding' in self.conditional_information:
image_embedding = self.dense_net_embedding.forward(
x=torch.cat([x_support_set_sub_task, x_target_set_task], dim=0), dropout_training=True)
x_support_set_task_features = image_embedding[:x_support_set_sub_task.shape[0]]
x_target_set_task_features = image_embedding[x_support_set_sub_task.shape[0]:]
x_support_set_task_features = F.avg_pool2d(x_support_set_task_features,
x_support_set_task_features.shape[-1]).squeeze()
x_target_set_task_features = F.avg_pool2d(x_target_set_task_features,
x_target_set_task_features.shape[-1]).squeeze()
task_embedding = None
else:
task_embedding = None
for num_step in range(self.num_support_set_steps):
support_outputs = self.net_forward(x=x_support_set_sub_task,
y=y_support_set_sub_task,
weights=names_weights_copy,
backup_running_statistics=
True if (num_step == 0) else False,
training=True,
num_step=step_idx,
return_features=True)
names_weights_copy = self.apply_inner_loop_update(loss=support_outputs['loss'],
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_per_step_loss.append(target_outputs['loss'])
step_idx += 1
if not self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_loss = target_outputs['loss']
step_idx += 1
else:
target_set_loss = torch.sum(
torch.stack(target_set_per_step_loss, dim=0) * importance_weights)
for num_step in range(self.num_target_set_steps):
predicted_loss = self.critic_network.forward(logits=target_outputs['preds'],
task_embedding=task_embedding)
names_weights_copy = self.apply_inner_loop_update(loss=predicted_loss,
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
post_update_loss, post_update_target_preds, post_updated_target_features = target_set_loss, \
target_outputs['preds'], \
target_outputs[
'features']
pre_target_loss_update_loss.append(target_set_loss)
pre_softmax_target_preds = F.softmax(target_outputs['preds'], dim=1).argmax(dim=1)
pre_update_accuracy = torch.eq(pre_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
pre_target_loss_update_acc.append(pre_update_accuracy)
post_target_loss_update_loss.append(post_update_loss)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
loss = target_outputs['loss'] # * importance_vector[0] + post_update_loss * importance_vector[1]
total_per_step_losses.append(loss)
total_per_step_accuracies.append(post_update_accuracy)
per_task_preds.append(post_update_target_preds.detach().cpu().numpy())
if not training_phase:
self.classifier.restore_backup_stats()
loss_metric_dict = dict()
loss_metric_dict['pre_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['pre_target_loss_update_acc'] = pre_target_loss_update_acc
loss_metric_dict['post_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['post_target_loss_update_acc'] = post_target_loss_update_acc
losses = self.get_across_task_loss_metrics(total_losses=total_per_step_losses,
total_accuracies=total_per_step_accuracies,
loss_metrics_dict=loss_metric_dict)
return losses, per_task_preds
def load_model(self, model_save_dir, model_name, model_idx):
"""
Load checkpoint and return the state dictionary containing the network state params and experiment state.
:param model_save_dir: The directory from which to load the files.
:param model_name: The model_name to be loaded from the direcotry.
:param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current
experiment)
:return: A dictionary containing the experiment state and the saved model parameters.
"""
filepath = os.path.join(model_save_dir, "{}_{}".format(model_name, model_idx))
state = torch.load(filepath, map_location='cpu')
net = dict(state['network'])
state['network'] = OrderedDict(net)
state_dict_loaded = state['network']
self.load_state_dict(state_dict=state_dict_loaded)
self.starting_iter = state['current_iter']
return state
def run_train_iter(self, data_batch, epoch, current_iter):
"""
Runs an outer loop update step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
epoch = int(epoch)
self.scheduler.step(epoch=epoch)
if self.current_epoch != epoch:
self.current_epoch = epoch
if not self.training:
self.train()
losses, per_task_preds = self.train_forward_prop(data_batch=data_batch, epoch=epoch)
exclude_string = None
self.meta_update(loss=losses['loss'], exclude_string_list=exclude_string)
losses['learning_rate'] = self.scheduler.get_lr()[0]
self.zero_grad()
self.current_iter += 1
return losses, per_task_preds
def run_validation_iter(self, data_batch):
"""
Runs an outer loop evaluation step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
if self.training:
self.eval()
losses, per_task_preds = self.evaluation_forward_prop(data_batch=data_batch, epoch=self.current_epoch)
return losses, per_task_preds
def save_model(self, model_save_dir, state):
"""
Save the network parameter state and experiment state dictionary.
:param model_save_dir: The directory to store the state at.
:param state: The state containing the experiment state and the network. It's in the form of a dictionary
object.
"""
state['network'] = self.state_dict()
torch.save(state, f=model_save_dir)
def get_across_task_loss_metrics(self, total_losses, total_accuracies, loss_metrics_dict):
losses = dict()
losses['loss'] = torch.mean(torch.stack(total_losses), dim=(0,))
losses['accuracy'] = torch.mean(torch.stack(total_accuracies), dim=(0,))
if 'saved_logits' in loss_metrics_dict:
losses['saved_logits'] = loss_metrics_dict['saved_logits']
del loss_metrics_dict['saved_logits']
for name, value in loss_metrics_dict.items():
losses[name] = torch.stack(value).mean()
for idx_num_step, (name, learning_rate_num_step) in enumerate(self.inner_loop_optimizer.named_parameters()):
for idx, learning_rate in enumerate(learning_rate_num_step.mean().view(1)):
losses['task_learning_rate_num_step_{}_{}'.format(idx_num_step,
name)] = learning_rate.detach().cpu().numpy()
return losses
def calculate_cosine_distance(support_set_embeddings, support_set_labels, target_set_embeddings):
eps = 1e-10
per_task_similarities = []
for support_set_embedding_task, target_set_embedding_task in zip(support_set_embeddings, target_set_embeddings):
target_set_embedding_task = target_set_embedding_task # sb, f
support_set_embedding_task = support_set_embedding_task # num_classes, f
dot_product = torch.stack(
[torch.matmul(target_set_embedding_task, support_vector) for support_vector in support_set_embedding_task],
dim=1)
cosine_similarity = dot_product
cosine_similarity = cosine_similarity.squeeze()
per_task_similarities.append(cosine_similarity)
similarities = torch.stack(per_task_similarities)
preds = similarities
return preds, similarities
class MatchingNetworkFewShotClassifier(nn.Module):
def __init__(self, **kwargs):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(MatchingNetworkFewShotClassifier, self).__init__()
for key, value in kwargs.items():
setattr(self, key, value)
self.input_shape = (2, self.image_channels, self.image_height, self.image_width)
self.current_epoch = -1
self.rng = set_torch_seed(seed=self.seed)
self.classifier = VGGEmbeddingNetwork(im_shape=self.input_shape)
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
if torch.cuda.device_count() > 1:
self.classifier = nn.DataParallel(self.classifier)
print("Outer Loop parameters")
num_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
print("Memory parameters")
num_params = 0
for name, param in self.trainable_names_parameters(exclude_params_with_string=None):
if param.requires_grad:
print(name, param.shape)
product = 1
for item in param.shape:
product = product * item
num_params += product
print('Total Memory parameters', num_params)
self.optimizer = optim.Adam(self.trainable_parameters(exclude_list=[]),
lr=self.meta_learning_rate,
weight_decay=self.weight_decay, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.total_epochs,
eta_min=self.min_learning_rate)
self.to(self.device)
def trainable_names_parameters(self, exclude_params_with_string=None):
"""
Returns an iterator over the trainable parameters of the model.
"""
for name, param in self.named_parameters():
if exclude_params_with_string is not None:
if param.requires_grad and all(
list([exclude_string not in name for exclude_string in exclude_params_with_string])):
yield (name, param)
else:
if param.requires_grad:
yield (name, param)
def forward(self, data_batch, training_phase):
"""
Builds tf graph for Matching Networks, produces losses and summary statistics.
:return:
"""
data_batch = [item.to(self.device) for item in data_batch]
x_support_set, x_target_set, y_support_set, y_target_set, _, _ = data_batch
x_support_set = x_support_set.view(-1, x_support_set.shape[-3], x_support_set.shape[-2],
x_support_set.shape[-1])
x_target_set = x_target_set.view(-1, x_target_set.shape[-3], x_target_set.shape[-2], x_target_set.shape[-1])
y_support_set = y_support_set.view(-1)
y_target_set = y_target_set.view(-1)
output_units = int(self.num_classes_per_set if self.overwrite_classes_in_each_task else \
(self.num_classes_per_set * self.num_support_sets) / self.class_change_interval)
y_support_set_one_hot = int_to_one_hot(y_support_set)
g_encoded_images = []
h, w, c = x_support_set.shape[-3:]
x_support_set = x_support_set.view(size=(self.batch_size, -1, h, w, c))
x_target_set = x_target_set.view(size=(self.batch_size, -1, h, w, c))
y_support_set = y_support_set.view(size=(self.batch_size, -1))
y_target_set = y_target_set.view(self.batch_size, -1)
for x_support_set_task, y_support_set_task in zip(x_support_set,
y_support_set): # produce embeddings for support set images
support_set_cnn_embed, _ = self.classifier.forward(x=x_support_set_task) # nsc * nc, h, w, c
per_class_embeddings = torch.zeros(
(output_units, int(np.prod(support_set_cnn_embed.shape) / (self.num_classes_per_set
* support_set_cnn_embed.shape[-1])),
support_set_cnn_embed.shape[-1])).to(x_support_set_task.device)
counter_dict = defaultdict(lambda: 0)
for x, y in zip(support_set_cnn_embed, y_support_set_task):
counter_dict[y % output_units] += 1
per_class_embeddings[y % output_units][counter_dict[y % output_units] - 1] = x
per_class_embeddings = per_class_embeddings.mean(1)
g_encoded_images.append(per_class_embeddings)
f_encoded_image, _ = self.classifier.forward(x=x_target_set.view(-1, h, w, c))
f_encoded_image = f_encoded_image.view(self.batch_size, -1, f_encoded_image.shape[-1])
g_encoded_images = torch.stack(g_encoded_images, dim=0)
preds, similarities = calculate_cosine_distance(support_set_embeddings=g_encoded_images,
support_set_labels=y_support_set_one_hot.float(),
target_set_embeddings=f_encoded_image)
y_target_set = y_target_set.view(-1)
preds = preds.view(-1, preds.shape[-1])
loss = F.cross_entropy(input=preds, target=y_target_set)
softmax_target_preds = F.softmax(preds, dim=1).argmax(dim=1)
accuracy = torch.eq(softmax_target_preds, y_target_set).data.cpu().float().mean()
losses = dict()
losses['loss'] = loss
losses['accuracy'] = accuracy
return losses, preds.view(self.batch_size,
self.num_support_sets * self.num_classes_per_set *
self.num_samples_per_target_class,
output_units)
def trainable_parameters(self, exclude_list):
"""
Returns an iterator over the trainable parameters of the model.
"""
for name, param in self.named_parameters():
if all([entry not in name for entry in exclude_list]):
if param.requires_grad:
yield param
def trainable_named_parameters(self, exclude_list):
"""
Returns an iterator over the trainable parameters of the model.
"""
for name, param in self.named_parameters():
if all([entry not in name for entry in exclude_list]):
if param.requires_grad:
yield name, param
def train_forward_prop(self, data_batch, epoch, current_iter):
"""
Runs an outer loop forward prop using the meta-model and base-model.
:param data_batch: A data batch containing the support set and the target set input, output pairs.
:param epoch: The index of the currrent epoch.
:return: A dictionary of losses for the current step.
"""
losses, per_task_preds = self.forward(data_batch=data_batch, training_phase=True)
return losses, per_task_preds.detach().cpu().numpy()
def evaluation_forward_prop(self, data_batch, epoch):
"""
Runs an outer loop evaluation forward prop using the meta-model and base-model.
:param data_batch: A data batch containing the support set and the target set input, output pairs.
:param epoch: The index of the currrent epoch.
:return: A dictionary of losses for the current step.
"""
losses, per_task_preds = self.forward(data_batch=data_batch, training_phase=False)
return losses, per_task_preds.detach().cpu().numpy()
def meta_update(self, loss):
"""
Applies an outer loop update on the meta-parameters of the model.
:param loss: The current crossentropy loss.
"""
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def run_train_iter(self, data_batch, epoch, current_iter):
"""
Runs an outer loop update step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
epoch = int(epoch)
self.scheduler.step(epoch=epoch)
if self.current_epoch != epoch:
self.current_epoch = epoch
# print(epoch, self.optimizer)
if not self.training:
self.train()
losses, per_task_preds = self.train_forward_prop(data_batch=data_batch, epoch=epoch, current_iter=current_iter)
self.meta_update(loss=losses['loss'])
losses['learning_rate'] = self.scheduler.get_lr()[0]
self.zero_grad()
return losses, per_task_preds
def run_validation_iter(self, data_batch):
"""
Runs an outer loop evaluation step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
if self.training:
self.eval()
losses, per_task_preds = self.evaluation_forward_prop(data_batch=data_batch, epoch=self.current_epoch)
return losses, per_task_preds
def save_model(self, model_save_dir, state):
"""
Save the network parameter state and experiment state dictionary.
:param model_save_dir: The directory to store the state at.
:param state: The state containing the experiment state and the network. It's in the form of a dictionary
object.
"""
state['network'] = self.state_dict()
torch.save(state, f=model_save_dir)
def load_model(self, model_save_dir, model_name, model_idx):
"""
Load checkpoint and return the state dictionary containing the network state params and experiment state.
:param model_save_dir: The directory from which to load the files.
:param model_name: The model_name to be loaded from the direcotry.
:param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current
experiment)
:return: A dictionary containing the experiment state and the saved model parameters.
"""
filepath = os.path.join(model_save_dir, "{}_{}".format(model_name, model_idx))
state = torch.load(filepath, map_location='cpu')
net = dict(state['network'])
state['network'] = OrderedDict(net)
state_dict_loaded = state['network']
self.load_state_dict(state_dict=state_dict_loaded)
self.starting_iter = state['current_iter']
return state
class FineTuneFromPretrainedFewShotClassifier(MAMLFewShotClassifier):
def __init__(self, batch_size, seed, num_classes_per_set, num_samples_per_support_class, image_channels,
num_filters, num_blocks_per_stage, num_stages, dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps, init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs, weight_decay, meta_learning_rate,
num_samples_per_target_class, **kwargs):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(FineTuneFromPretrainedFewShotClassifier, self).__init__(batch_size, seed, num_classes_per_set,
num_samples_per_support_class,
num_samples_per_target_class, image_channels,
num_filters, num_blocks_per_stage, num_stages,
dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps,
init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate,
total_epochs,
weight_decay, meta_learning_rate, **kwargs)
self.batch_size = batch_size
self.current_epoch = -1
self.rng = set_torch_seed(seed=seed)
self.num_classes_per_set = num_classes_per_set
self.num_samples_per_support_class = num_samples_per_support_class
self.image_channels = image_channels
self.num_filters = num_filters
self.num_blocks_per_stage = num_blocks_per_stage
self.num_stages = num_stages
self.dropout_rate = dropout_rate
self.output_spatial_dimensionality = output_spatial_dimensionality
self.image_height = image_height
self.image_width = image_width
self.num_support_set_steps = num_support_set_steps
self.init_learning_rate = init_learning_rate
self.num_target_set_steps = num_target_set_steps
self.conditional_information = conditional_information
self.min_learning_rate = min_learning_rate
self.total_epochs = total_epochs
self.weight_decay = weight_decay
self.meta_learning_rate = meta_learning_rate
self.current_epoch = -1
for key, value in kwargs.items():
setattr(self, key, value)
self.rng = set_torch_seed(seed=seed)
def param_dict_to_vector(self, param_dict):
param_list = []
for name, param in param_dict.items():
param_list.append(param.view(-1, 1))
param_as_vector = torch.cat(param_list, dim=0)
return param_as_vector
def param_vector_to_param_dict(self, param_vector, names_params_dict):
new_names_params_dict = dict()
cur_idx = 0
for name, param in names_params_dict.items():
new_names_params_dict[name] = param_vector[cur_idx:cur_idx + param.view(-1).shape[0]].view(param.shape)
cur_idx += param.view(-1).shape[0]
return new_names_params_dict
def build_module(self):
support_set_shape = (
self.num_classes_per_set * self.num_samples_per_support_class,
self.image_channels,
self.image_height, self.image_width)
target_set_shape = (
self.num_classes_per_set * self.num_samples_per_target_class,
self.image_channels,
self.image_height, self.image_width)
x_support_set = torch.ones(support_set_shape)
x_target_set = torch.ones(target_set_shape)
# task_size = x_target_set.shape[0]
x_target_set = x_target_set.view(-1, x_target_set.shape[-3], x_target_set.shape[-2], x_target_set.shape[-1])
x_support_set = x_support_set.view(-1, x_support_set.shape[-3], x_support_set.shape[-2],
x_support_set.shape[-1])
num_target_samples = x_target_set.shape[0]
num_support_samples = x_support_set.shape[0]
output_units = int(self.num_classes_per_set if self.overwrite_classes_in_each_task else \
(self.num_classes_per_set * self.num_support_sets) / self.class_change_interval)
self.current_iter = 0
self.classifier = VGGActivationNormNetwork(input_shape=torch.cat([x_support_set, x_target_set], dim=0).shape,
num_output_classes=[output_units, 2000],
num_stages=4, use_channel_wise_attention=True,
num_filters=48,
num_support_set_steps=2 * self.num_support_sets * self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps + 1,
)
print("init learning rate", self.init_learning_rate)
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters(),
exclude_strings=['linear_1'])
task_name_params = self.get_inner_loop_parameter_dict(self.named_parameters())
if self.num_target_set_steps > 0:
self.dense_net_embedding = SqueezeExciteDenseNetEmbeddingSmallNetwork(
im_shape=torch.cat([x_support_set, x_target_set], dim=0).shape, num_filters=self.num_filters,
num_blocks_per_stage=self.num_blocks_per_stage,
num_stages=self.num_stages, average_pool_outputs=False, dropout_rate=self.dropout_rate,
output_spatial_dimensionality=self.output_spatial_dimensionality, use_channel_wise_attention=True)
task_features = self.dense_net_embedding.forward(
x=torch.cat([x_support_set, x_target_set], dim=0), dropout_training=True)
task_features = task_features.squeeze()
encoded_x = task_features
support_set_features = F.avg_pool2d(encoded_x[:num_support_samples], encoded_x.shape[-1]).squeeze()
preds, penultimate_features_x = self.classifier.forward(x=torch.cat([x_support_set, x_target_set], dim=0),
num_step=0, return_features=True)
if 'task_embedding' in self.conditional_information:
self.task_relational_network = TaskRelationalEmbedding(input_shape=support_set_features.shape,
num_samples_per_support_class=self.num_samples_per_support_class,
num_classes_per_set=self.num_classes_per_set)
relational_encoding_x = self.task_relational_network.forward(x_img=support_set_features)
relational_embedding_shape = relational_encoding_x.shape
else:
self.task_relational_network = None
relational_embedding_shape = None
x_support_set_task = F.avg_pool2d(
encoded_x[:self.num_classes_per_set * (self.num_samples_per_support_class)],
encoded_x.shape[-1]).squeeze()
x_target_set_task = F.avg_pool2d(
encoded_x[self.num_classes_per_set * (self.num_samples_per_support_class):],
encoded_x.shape[-1]).squeeze()
x_support_set_classifier_features = F.avg_pool2d(penultimate_features_x[
:self.num_classes_per_set * (
self.num_samples_per_support_class)],
penultimate_features_x.shape[-2]).squeeze()
x_target_set_classifier_features = F.avg_pool2d(
penultimate_features_x[self.num_classes_per_set * (self.num_samples_per_support_class):],
penultimate_features_x.shape[-2]).squeeze()
self.critic_network = CriticNetwork(
task_embedding_shape=relational_embedding_shape,
num_classes_per_set=self.num_classes_per_set,
support_set_feature_shape=x_support_set_task.shape,
target_set_feature_shape=x_target_set_task.shape,
support_set_classifier_pre_last_features=x_support_set_classifier_features.shape,
target_set_classifier_pre_last_features=x_target_set_classifier_features.shape,
num_target_samples=self.num_samples_per_target_class,
num_support_samples=self.num_samples_per_support_class,
logit_shape=preds[self.num_classes_per_set * (self.num_samples_per_support_class):].shape,
support_set_label_shape=(
self.num_classes_per_set * (self.num_samples_per_support_class), self.num_classes_per_set),
conditional_information=self.conditional_information)
self.inner_loop_optimizer = LSLRGradientDescentLearningRule(
total_num_inner_loop_steps=2 * (
self.num_support_sets * self.num_support_set_steps) + self.num_target_set_steps + 1,
learnable_learning_rates=self.learnable_learning_rates,
init_learning_rate=self.init_learning_rate)
self.inner_loop_optimizer.initialise(names_weights_dict=names_weights_copy)
print("Inner Loop parameters")
for key, value in self.inner_loop_optimizer.named_parameters():
print(key, value.shape)
print("Outer Loop parameters")
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
self.exclude_list = None
self.switch_opt_params(exclude_list=self.exclude_list)
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
if torch.cuda.device_count() > 1:
self.to(self.device)
self.dense_net_embedding = nn.DataParallel(module=self.dense_net_embedding)
else:
self.to(self.device)
def switch_opt_params(self, exclude_list):
print("current trainable params")
for name, param in self.trainable_names_parameters(exclude_params_with_string=exclude_list):
print(name, param.shape)
self.optimizer = AdamW(self.trainable_parameters(exclude_list), lr=self.meta_learning_rate,
weight_decay=self.weight_decay, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.total_epochs,
eta_min=self.min_learning_rate)
def net_forward(self, x, y, weights, backup_running_statistics, training, num_step,
return_features=False):
"""
A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires
boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).
A flag indicating whether this is the training session and an int indicating the current step's number in the
inner loop.
:param x: A data batch of shape b, c, h, w
:param y: A data targets batch of shape b, n_classes
:param weights: A dictionary containing the weights to pass to the network.
:param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their
previous values after the run (only for evaluation)
:param training: A flag indicating whether the current process phase is a training or evaluation.
:param num_step: An integer indicating the number of the step in the inner loop.
:return: the crossentropy losses with respect to the given y, the predictions of the base model.
"""
outputs = {"loss": 0., "preds": 0, "features": 0.}
if return_features:
outputs['preds'], outputs['features'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step,
return_features=return_features)
if type(outputs['preds']) == list:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
else:
outputs['preds'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
if type(outputs['preds']) == list:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
return outputs
def get_per_step_loss_importance_vector(self, current_epoch):
"""
Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target
loss towards the optimization loss.
:return: A tensor to be used to compute the weighted average of the loss, useful for
the MSL (Multi Step Loss) mechanism.
"""
loss_weights = torch.ones(size=(self.number_of_training_steps_per_iter * self.num_support_sets,),
device=self.device) / (
self.number_of_training_steps_per_iter * self.num_support_sets)
early_steps_decay_rate = (1. / (
self.number_of_training_steps_per_iter * self.num_support_sets)) / 100.
loss_weights = loss_weights - (early_steps_decay_rate * current_epoch)
loss_weights = torch.max(input=loss_weights,
other=torch.ones(loss_weights.shape, device=self.device) * 0.001)
loss_weights[-1] = 1. - torch.sum(loss_weights[:-1])
return loss_weights
def forward(self, data_batch, epoch, use_second_order, use_multi_step_loss_optimization, num_steps, training_phase):
"""
Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.
:param data_batch: A data batch containing the support and target sets.
:param epoch: Current epoch's index
:param use_second_order: A boolean saying whether to use second order derivatives.
:param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's
target loss (True) or whether to use multi step loss which improves the stability of the system (False)
:param num_steps: Number of inner loop steps.
:param training_phase: Whether this is a training phase (True) or an evaluation phase (False)
:return: A dictionary with the collected losses of the current outer forward propagation.
"""
x_support_set, x_target_set, y_support_set, y_target_set, x, y = data_batch
self.classifier.zero_grad()
total_per_step_losses = []
total_per_step_accuracies = []
per_task_preds = []
num_losses = 2
importance_vector = torch.Tensor([1.0 / num_losses for i in range(num_losses)]).to(self.device)
step_magnitude = (1.0 / num_losses) / self.total_epochs
current_epoch_step_magnitude = torch.ones(1).to(self.device) * (step_magnitude * (epoch + 1))
importance_vector[0] = importance_vector[0] - current_epoch_step_magnitude
importance_vector[1] = importance_vector[1] + current_epoch_step_magnitude
pre_target_loss_update_loss = []
pre_target_loss_update_acc = []
post_target_loss_update_loss = []
post_target_loss_update_acc = []
for task_id, (x_support_set_task, y_support_set_task, x_target_set_task, y_target_set_task) in \
enumerate(zip(x_support_set,
y_support_set,
x_target_set,
y_target_set)):
c, h, w = x_target_set_task.shape[-3:]
x_target_set_task = x_target_set_task.view(-1, c, h, w).to(self.device)
y_target_set_task = y_target_set_task.view(-1).to(self.device)
target_set_per_step_loss = []
importance_weights = self.get_per_step_loss_importance_vector(current_epoch=self.current_epoch)
step_idx = 0
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters(),
exclude_strings=['linear_1'])
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
names_weights_copy = {
name.replace('module.', ''): value.unsqueeze(0).repeat(
[num_devices] + [1 for i in range(len(value.shape))]) for
name, value in names_weights_copy.items()}
for sub_task_id, (x_support_set_sub_task, y_support_set_sub_task) in \
enumerate(zip(x_support_set_task,
y_support_set_task)):
# in the future try to adapt the features using a relational component
x_support_set_sub_task = x_support_set_sub_task.view(-1, c, h, w).to(self.device)
y_support_set_sub_task = y_support_set_sub_task.view(-1).to(self.device)
if self.num_target_set_steps > 0 and 'task_embedding' in self.conditional_information:
image_embedding = self.dense_net_embedding.forward(
x=torch.cat([x_support_set_sub_task, x_target_set_task], dim=0), dropout_training=True)
x_support_set_task_features = image_embedding[:x_support_set_sub_task.shape[0]]
x_target_set_task_features = image_embedding[x_support_set_sub_task.shape[0]:]
x_support_set_task_features = F.avg_pool2d(x_support_set_task_features,
x_support_set_task_features.shape[-1]).squeeze()
x_target_set_task_features = F.avg_pool2d(x_target_set_task_features,
x_target_set_task_features.shape[-1]).squeeze()
if self.task_relational_network is not None:
task_embedding = self.task_relational_network.forward(x_img=x_support_set_task_features)
else:
task_embedding = None
else:
task_embedding = None
for num_step in range(self.num_support_set_steps):
support_outputs = self.net_forward(x=x_support_set_sub_task,
y=y_support_set_sub_task,
weights=names_weights_copy,
backup_running_statistics=
True if (num_step == 0) else False,
training=True,
num_step=step_idx,
return_features=True)
names_weights_copy = self.apply_inner_loop_update(loss=support_outputs['loss'],
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_per_step_loss.append(target_outputs['loss'])
step_idx += 1
if not self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_loss = target_outputs['loss']
step_idx += 1
else:
target_set_loss = torch.sum(
torch.stack(target_set_per_step_loss, dim=0) * importance_weights)
# print(target_set_loss, target_set_per_step_loss, importance_weights)
# if self.save_preds:
# if saved_logits_list is None:
# saved_logits_list = []
#
# saved_logits_list.extend(target_outputs['preds'])
for num_step in range(self.num_target_set_steps):
predicted_loss = self.critic_network.forward(logits=target_outputs['preds'],
task_embedding=task_embedding)
names_weights_copy = self.apply_inner_loop_update(loss=predicted_loss,
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.num_target_set_steps > 0:
post_update_outputs = self.net_forward(
x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
post_update_loss, post_update_target_preds, post_updated_target_features = post_update_outputs[
'loss'], \
post_update_outputs[
'preds'], \
post_update_outputs[
'features']
else:
post_update_loss, post_update_target_preds, post_updated_target_features = target_set_loss, \
target_outputs['preds'], \
target_outputs[
'features']
pre_target_loss_update_loss.append(target_set_loss)
pre_softmax_target_preds = F.softmax(target_outputs['preds'], dim=1).argmax(dim=1)
pre_update_accuracy = torch.eq(pre_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
pre_target_loss_update_acc.append(pre_update_accuracy)
post_target_loss_update_loss.append(post_update_loss)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
loss = target_outputs['loss'] # * importance_vector[0] + post_update_loss * importance_vector[1]
total_per_step_losses.append(loss)
total_per_step_accuracies.append(post_update_accuracy)
per_task_preds.append(post_update_target_preds.detach().cpu().numpy())
if not training_phase:
self.classifier.restore_backup_stats()
loss_metric_dict = dict()
loss_metric_dict['pre_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['pre_target_loss_update_acc'] = pre_target_loss_update_acc
loss_metric_dict['post_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['post_target_loss_update_acc'] = post_target_loss_update_acc
losses = self.get_across_task_loss_metrics(total_losses=total_per_step_losses,
total_accuracies=total_per_step_accuracies,
loss_metrics_dict=loss_metric_dict)
return losses, per_task_preds
def load_model(self, model_save_dir, model_name, model_idx):
"""
Load checkpoint and return the state dictionary containing the network state params and experiment state.
:param model_save_dir: The directory from which to load the files.
:param model_name: The model_name to be loaded from the direcotry.
:param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current
experiment)
:return: A dictionary containing the experiment state and the saved model parameters.
"""
filepath = os.path.join(model_save_dir, "{}_{}".format(model_name, model_idx))
state = torch.load(filepath, map_location='cpu')
net = dict(state['network'])
state['network'] = OrderedDict(net)
state_dict_loaded = state['network']
self.load_state_dict(state_dict=state_dict_loaded)
self.starting_iter = state['current_iter']
return state
def run_train_iter(self, data_batch, epoch, current_iter):
"""
Runs an outer loop update step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
epoch = int(epoch)
self.scheduler.step(epoch=epoch)
if self.current_epoch != epoch:
self.current_epoch = epoch
if not self.training:
self.train()
x_support_set, x_target_set, y_support_set, y_target_set, x, y = data_batch
x = x.view(-1, x.shape[-3], x.shape[-2], x.shape[-1]).to(self.device)
y = y.view(-1).to(self.device).long()
preds = self.classifier.forward(x=x, num_step=0)
loss = F.cross_entropy(input=preds[1], target=y)
preds = preds[1].argmax(dim=1)
accuracy = torch.eq(preds, y).data.cpu().float().mean()
losses = dict()
losses['loss'] = loss
losses['accuracy'] = accuracy
exclude_string = None
self.meta_update(loss=losses['loss'], exclude_string_list=exclude_string)
losses['learning_rate'] = self.scheduler.get_lr()[0]
self.zero_grad()
self.current_iter += 1
return losses, None
def run_validation_iter(self, data_batch):
"""
Runs an outer loop evaluation step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
if self.training:
self.eval()
losses, per_task_preds = self.evaluation_forward_prop(data_batch=data_batch, epoch=self.current_epoch)
return losses, per_task_preds
def save_model(self, model_save_dir, state):
"""
Save the network parameter state and experiment state dictionary.
:param model_save_dir: The directory to store the state at.
:param state: The state containing the experiment state and the network. It's in the form of a dictionary
object.
"""
state['network'] = self.state_dict()
torch.save(state, f=model_save_dir)
def get_across_task_loss_metrics(self, total_losses, total_accuracies, loss_metrics_dict):
losses = dict()
losses['loss'] = torch.mean(torch.stack(total_losses), dim=(0,))
losses['accuracy'] = torch.mean(torch.stack(total_accuracies), dim=(0,))
if 'saved_logits' in loss_metrics_dict:
losses['saved_logits'] = loss_metrics_dict['saved_logits']
del loss_metrics_dict['saved_logits']
for name, value in loss_metrics_dict.items():
losses[name] = torch.stack(value).mean()
for idx_num_step, (name, learning_rate_num_step) in enumerate(self.inner_loop_optimizer.named_parameters()):
for idx, learning_rate in enumerate(learning_rate_num_step.mean().view(1)):
losses['task_learning_rate_num_step_{}_{}'.format(idx_num_step,
name)] = learning_rate.detach().cpu().numpy()
return losses
class FineTuneFromScratchFewShotClassifier(MAMLFewShotClassifier):
def __init__(self, batch_size, seed, num_classes_per_set, num_samples_per_support_class, image_channels,
num_filters, num_blocks_per_stage, num_stages, dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps, init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate, total_epochs, weight_decay, meta_learning_rate,
num_samples_per_target_class, **kwargs):
"""
Initializes a MAML few shot learning system
:param im_shape: The images input size, in batch, c, h, w shape
:param device: The device to use to use the model on.
:param args: A namedtuple of arguments specifying various hyperparameters.
"""
super(FineTuneFromScratchFewShotClassifier, self).__init__(batch_size, seed, num_classes_per_set,
num_samples_per_support_class,
num_samples_per_target_class, image_channels,
num_filters, num_blocks_per_stage, num_stages,
dropout_rate, output_spatial_dimensionality,
image_height, image_width, num_support_set_steps,
init_learning_rate, num_target_set_steps,
conditional_information, min_learning_rate,
total_epochs,
weight_decay, meta_learning_rate, **kwargs)
self.batch_size = batch_size
self.current_epoch = -1
self.rng = set_torch_seed(seed=seed)
self.num_classes_per_set = num_classes_per_set
self.num_samples_per_support_class = num_samples_per_support_class
self.image_channels = image_channels
self.num_filters = num_filters
self.num_blocks_per_stage = num_blocks_per_stage
self.num_stages = num_stages
self.dropout_rate = dropout_rate
self.output_spatial_dimensionality = output_spatial_dimensionality
self.image_height = image_height
self.image_width = image_width
self.num_support_set_steps = num_support_set_steps
self.init_learning_rate = init_learning_rate
self.num_target_set_steps = num_target_set_steps
self.conditional_information = conditional_information
self.min_learning_rate = min_learning_rate
self.total_epochs = total_epochs
self.weight_decay = weight_decay
self.meta_learning_rate = meta_learning_rate
self.current_epoch = -1
for key, value in kwargs.items():
setattr(self, key, value)
self.rng = set_torch_seed(seed=seed)
def param_dict_to_vector(self, param_dict):
param_list = []
for name, param in param_dict.items():
param_list.append(param.view(-1, 1))
param_as_vector = torch.cat(param_list, dim=0)
return param_as_vector
def param_vector_to_param_dict(self, param_vector, names_params_dict):
new_names_params_dict = dict()
cur_idx = 0
for name, param in names_params_dict.items():
new_names_params_dict[name] = param_vector[cur_idx:cur_idx + param.view(-1).shape[0]].view(param.shape)
cur_idx += param.view(-1).shape[0]
return new_names_params_dict
def build_module(self):
support_set_shape = (
self.num_classes_per_set * self.num_samples_per_support_class,
self.image_channels,
self.image_height, self.image_width)
target_set_shape = (
self.num_classes_per_set * self.num_samples_per_target_class,
self.image_channels,
self.image_height, self.image_width)
x_support_set = torch.ones(support_set_shape)
x_target_set = torch.ones(target_set_shape)
# task_size = x_target_set.shape[0]
x_target_set = x_target_set.view(-1, x_target_set.shape[-3], x_target_set.shape[-2], x_target_set.shape[-1])
x_support_set = x_support_set.view(-1, x_support_set.shape[-3], x_support_set.shape[-2],
x_support_set.shape[-1])
num_target_samples = x_target_set.shape[0]
num_support_samples = x_support_set.shape[0]
output_units = int(self.num_classes_per_set if self.overwrite_classes_in_each_task else \
(self.num_classes_per_set * self.num_support_sets) / self.class_change_interval)
self.current_iter = 0
self.classifier = VGGActivationNormNetwork(input_shape=torch.cat([x_support_set, x_target_set], dim=0).shape,
num_output_classes=output_units,
num_stages=4, use_channel_wise_attention=True,
num_filters=48,
num_support_set_steps=2 * self.num_support_sets * self.num_support_set_steps,
num_target_set_steps=self.num_target_set_steps + 1,
)
print("init learning rate", self.init_learning_rate)
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
task_name_params = self.get_inner_loop_parameter_dict(self.named_parameters())
if self.num_target_set_steps > 0:
self.dense_net_embedding = SqueezeExciteDenseNetEmbeddingSmallNetwork(
im_shape=torch.cat([x_support_set, x_target_set], dim=0).shape, num_filters=self.num_filters,
num_blocks_per_stage=self.num_blocks_per_stage,
num_stages=self.num_stages, average_pool_outputs=False, dropout_rate=self.dropout_rate,
output_spatial_dimensionality=self.output_spatial_dimensionality, use_channel_wise_attention=True)
task_features = self.dense_net_embedding.forward(
x=torch.cat([x_support_set, x_target_set], dim=0), dropout_training=True)
task_features = task_features.squeeze()
encoded_x = task_features
support_set_features = F.avg_pool2d(encoded_x[:num_support_samples], encoded_x.shape[-1]).squeeze()
preds, penultimate_features_x = self.classifier.forward(x=torch.cat([x_support_set, x_target_set], dim=0),
num_step=0, return_features=True)
if 'task_embedding' in self.conditional_information:
self.task_relational_network = TaskRelationalEmbedding(input_shape=support_set_features.shape,
num_samples_per_support_class=self.num_samples_per_support_class,
num_classes_per_set=self.num_classes_per_set)
relational_encoding_x = self.task_relational_network.forward(x_img=support_set_features)
relational_embedding_shape = relational_encoding_x.shape
else:
self.task_relational_network = None
relational_embedding_shape = None
x_support_set_task = F.avg_pool2d(
encoded_x[:self.num_classes_per_set * (self.num_samples_per_support_class)],
encoded_x.shape[-1]).squeeze()
x_target_set_task = F.avg_pool2d(
encoded_x[self.num_classes_per_set * (self.num_samples_per_support_class):],
encoded_x.shape[-1]).squeeze()
x_support_set_classifier_features = F.avg_pool2d(penultimate_features_x[
:self.num_classes_per_set * (
self.num_samples_per_support_class)],
penultimate_features_x.shape[-2]).squeeze()
x_target_set_classifier_features = F.avg_pool2d(
penultimate_features_x[self.num_classes_per_set * (self.num_samples_per_support_class):],
penultimate_features_x.shape[-2]).squeeze()
self.critic_network = CriticNetwork(
task_embedding_shape=relational_embedding_shape,
num_classes_per_set=self.num_classes_per_set,
support_set_feature_shape=x_support_set_task.shape,
target_set_feature_shape=x_target_set_task.shape,
support_set_classifier_pre_last_features=x_support_set_classifier_features.shape,
target_set_classifier_pre_last_features=x_target_set_classifier_features.shape,
num_target_samples=self.num_samples_per_target_class,
num_support_samples=self.num_samples_per_support_class,
logit_shape=preds[self.num_classes_per_set * (self.num_samples_per_support_class):].shape,
support_set_label_shape=(
self.num_classes_per_set * (self.num_samples_per_support_class), self.num_classes_per_set),
conditional_information=self.conditional_information)
self.inner_loop_optimizer = LSLRGradientDescentLearningRule(
total_num_inner_loop_steps=2 * (
self.num_support_sets * self.num_support_set_steps) + self.num_target_set_steps + 1,
learnable_learning_rates=self.learnable_learning_rates,
init_learning_rate=self.init_learning_rate)
self.inner_loop_optimizer.initialise(names_weights_dict=names_weights_copy)
print("Inner Loop parameters")
for key, value in self.inner_loop_optimizer.named_parameters():
print(key, value.shape)
print("Outer Loop parameters")
for name, param in self.named_parameters():
if param.requires_grad:
print(name, param.shape)
self.exclude_list = ['classifier', 'inner_loop']
# self.switch_opt_params(exclude_list=self.exclude_list)
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
if torch.cuda.device_count() > 1:
self.to(self.device)
self.dense_net_embedding = nn.DataParallel(module=self.dense_net_embedding)
else:
self.to(self.device)
def switch_opt_params(self, exclude_list):
print("current trainable params")
for name, param in self.trainable_names_parameters(exclude_params_with_string=exclude_list):
print(name, param.shape)
self.optimizer = AdamW(self.trainable_parameters(exclude_list), lr=self.meta_learning_rate,
weight_decay=self.weight_decay, amsgrad=False)
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer=self.optimizer, T_max=self.total_epochs,
eta_min=self.min_learning_rate)
def net_forward(self, x, y, weights, backup_running_statistics, training, num_step,
return_features=False):
"""
A base model forward pass on some data points x. Using the parameters in the weights dictionary. Also requires
boolean flags indicating whether to reset the running statistics at the end of the run (if at evaluation phase).
A flag indicating whether this is the training session and an int indicating the current step's number in the
inner loop.
:param x: A data batch of shape b, c, h, w
:param y: A data targets batch of shape b, n_classes
:param weights: A dictionary containing the weights to pass to the network.
:param backup_running_statistics: A flag indicating whether to reset the batch norm running statistics to their
previous values after the run (only for evaluation)
:param training: A flag indicating whether the current process phase is a training or evaluation.
:param num_step: An integer indicating the number of the step in the inner loop.
:return: the crossentropy losses with respect to the given y, the predictions of the base model.
"""
outputs = {"loss": 0., "preds": 0, "features": 0.}
if return_features:
outputs['preds'], outputs['features'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step,
return_features=return_features)
if type(outputs['preds']) == tuple:
if len(outputs['preds']) == 2:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
else:
outputs['preds'] = self.classifier.forward(x=x, params=weights,
training=training,
backup_running_statistics=backup_running_statistics,
num_step=num_step)
if type(outputs['preds']) == tuple:
if len(outputs['preds']) == 2:
outputs['preds'] = outputs['preds'][0]
outputs['loss'] = F.cross_entropy(outputs['preds'], y)
return outputs
def get_per_step_loss_importance_vector(self, current_epoch):
"""
Generates a tensor of dimensionality (num_inner_loop_steps) indicating the importance of each step's target
loss towards the optimization loss.
:return: A tensor to be used to compute the weighted average of the loss, useful for
the MSL (Multi Step Loss) mechanism.
"""
loss_weights = torch.ones(size=(self.number_of_training_steps_per_iter * self.num_support_sets,),
device=self.device) / (
self.number_of_training_steps_per_iter * self.num_support_sets)
early_steps_decay_rate = (1. / (
self.number_of_training_steps_per_iter * self.num_support_sets)) / 100.
loss_weights = loss_weights - (early_steps_decay_rate * current_epoch)
loss_weights = torch.max(input=loss_weights,
other=torch.ones(loss_weights.shape, device=self.device) * 0.001)
loss_weights[-1] = 1. - torch.sum(loss_weights[:-1])
return loss_weights
def forward(self, data_batch, epoch, use_second_order, use_multi_step_loss_optimization, num_steps, training_phase):
"""
Runs a forward outer loop pass on the batch of tasks using the MAML/++ framework.
:param data_batch: A data batch containing the support and target sets.
:param epoch: Current epoch's index
:param use_second_order: A boolean saying whether to use second order derivatives.
:param use_multi_step_loss_optimization: Whether to optimize on the outer loop using just the last step's
target loss (True) or whether to use multi step loss which improves the stability of the system (False)
:param num_steps: Number of inner loop steps.
:param training_phase: Whether this is a training phase (True) or an evaluation phase (False)
:return: A dictionary with the collected losses of the current outer forward propagation.
"""
x_support_set, x_target_set, y_support_set, y_target_set, x, y = data_batch
self.classifier.zero_grad()
total_per_step_losses = []
total_per_step_accuracies = []
per_task_preds = []
num_losses = 2
importance_vector = torch.Tensor([1.0 / num_losses for i in range(num_losses)]).to(self.device)
step_magnitude = (1.0 / num_losses) / self.total_epochs
current_epoch_step_magnitude = torch.ones(1).to(self.device) * (step_magnitude * (epoch + 1))
importance_vector[0] = importance_vector[0] - current_epoch_step_magnitude
importance_vector[1] = importance_vector[1] + current_epoch_step_magnitude
pre_target_loss_update_loss = []
pre_target_loss_update_acc = []
post_target_loss_update_loss = []
post_target_loss_update_acc = []
for task_id, (x_support_set_task, y_support_set_task, x_target_set_task, y_target_set_task) in \
enumerate(zip(x_support_set,
y_support_set,
x_target_set,
y_target_set)):
c, h, w = x_target_set_task.shape[-3:]
x_target_set_task = x_target_set_task.view(-1, c, h, w).to(self.device)
y_target_set_task = y_target_set_task.view(-1).to(self.device)
target_set_per_step_loss = []
importance_weights = self.get_per_step_loss_importance_vector(current_epoch=self.current_epoch)
step_idx = 0
names_weights_copy = self.get_inner_loop_parameter_dict(self.classifier.named_parameters())
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
names_weights_copy = {
name.replace('module.', ''): value.unsqueeze(0).repeat(
[num_devices] + [1 for i in range(len(value.shape))]) for
name, value in names_weights_copy.items()}
for sub_task_id, (x_support_set_sub_task, y_support_set_sub_task) in \
enumerate(zip(x_support_set_task,
y_support_set_task)):
# in the future try to adapt the features using a relational component
x_support_set_sub_task = x_support_set_sub_task.view(-1, c, h, w).to(self.device)
y_support_set_sub_task = y_support_set_sub_task.view(-1).to(self.device)
if self.num_target_set_steps > 0 and 'task_embedding' in self.conditional_information:
image_embedding = self.dense_net_embedding.forward(
x=torch.cat([x_support_set_sub_task, x_target_set_task], dim=0), dropout_training=True)
x_support_set_task_features = image_embedding[:x_support_set_sub_task.shape[0]]
x_target_set_task_features = image_embedding[x_support_set_sub_task.shape[0]:]
x_support_set_task_features = F.avg_pool2d(x_support_set_task_features,
x_support_set_task_features.shape[-1]).squeeze()
x_target_set_task_features = F.avg_pool2d(x_target_set_task_features,
x_target_set_task_features.shape[-1]).squeeze()
if self.task_relational_network is not None:
task_embedding = self.task_relational_network.forward(x_img=x_support_set_task_features)
else:
task_embedding = None
else:
task_embedding = None
for num_step in range(self.num_support_set_steps):
support_outputs = self.net_forward(x=x_support_set_sub_task,
y=y_support_set_sub_task,
weights=names_weights_copy,
backup_running_statistics=
True if (num_step == 0) else False,
training=True,
num_step=step_idx,
return_features=True)
names_weights_copy = self.apply_inner_loop_update(loss=support_outputs['loss'],
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_per_step_loss.append(target_outputs['loss'])
step_idx += 1
if not self.use_multi_step_loss_optimization:
target_outputs = self.net_forward(x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
target_set_loss = target_outputs['loss']
step_idx += 1
else:
target_set_loss = torch.sum(
torch.stack(target_set_per_step_loss, dim=0) * importance_weights)
# print(target_set_loss, target_set_per_step_loss, importance_weights)
# if self.save_preds:
# if saved_logits_list is None:
# saved_logits_list = []
#
# saved_logits_list.extend(target_outputs['preds'])
for num_step in range(self.num_target_set_steps):
predicted_loss = self.critic_network.forward(logits=target_outputs['preds'],
task_embedding=task_embedding)
names_weights_copy = self.apply_inner_loop_update(loss=predicted_loss,
names_weights_copy=names_weights_copy,
use_second_order=use_second_order,
current_step_idx=step_idx)
step_idx += 1
if self.num_target_set_steps > 0:
post_update_outputs = self.net_forward(
x=x_target_set_task,
y=y_target_set_task, weights=names_weights_copy,
backup_running_statistics=False, training=True,
num_step=step_idx,
return_features=True)
post_update_loss, post_update_target_preds, post_updated_target_features = post_update_outputs[
'loss'], \
post_update_outputs[
'preds'], \
post_update_outputs[
'features']
else:
post_update_loss, post_update_target_preds, post_updated_target_features = target_set_loss, \
target_outputs['preds'], \
target_outputs[
'features']
pre_target_loss_update_loss.append(target_set_loss)
pre_softmax_target_preds = F.softmax(target_outputs['preds'], dim=1).argmax(dim=1)
pre_update_accuracy = torch.eq(pre_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
pre_target_loss_update_acc.append(pre_update_accuracy)
post_target_loss_update_loss.append(post_update_loss)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
post_softmax_target_preds = F.softmax(post_update_target_preds, dim=1).argmax(dim=1)
post_update_accuracy = torch.eq(post_softmax_target_preds, y_target_set_task).data.cpu().float().mean()
post_target_loss_update_acc.append(post_update_accuracy)
loss = target_outputs['loss'] # * importance_vector[0] + post_update_loss * importance_vector[1]
total_per_step_losses.append(loss)
total_per_step_accuracies.append(post_update_accuracy)
per_task_preds.append(post_update_target_preds.detach().cpu().numpy())
if not training_phase:
self.classifier.restore_backup_stats()
loss_metric_dict = dict()
loss_metric_dict['pre_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['pre_target_loss_update_acc'] = pre_target_loss_update_acc
loss_metric_dict['post_target_loss_update_loss'] = post_target_loss_update_loss
loss_metric_dict['post_target_loss_update_acc'] = post_target_loss_update_acc
losses = self.get_across_task_loss_metrics(total_losses=total_per_step_losses,
total_accuracies=total_per_step_accuracies,
loss_metrics_dict=loss_metric_dict)
return losses, per_task_preds
def load_model(self, model_save_dir, model_name, model_idx):
"""
Load checkpoint and return the state dictionary containing the network state params and experiment state.
:param model_save_dir: The directory from which to load the files.
:param model_name: The model_name to be loaded from the direcotry.
:param model_idx: The index of the model (i.e. epoch number or 'latest' for the latest saved model of the current
experiment)
:return: A dictionary containing the experiment state and the saved model parameters.
"""
filepath = os.path.join(model_save_dir, "{}_{}".format(model_name, model_idx))
state = torch.load(filepath, map_location='cpu')
net = dict(state['network'])
state['network'] = OrderedDict(net)
state_dict_loaded = state['network']
self.load_state_dict(state_dict=state_dict_loaded)
self.starting_iter = state['current_iter']
return state
def run_train_iter(self, data_batch, epoch, current_iter):
"""
Runs an outer loop update step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
epoch = int(epoch)
# self.scheduler.step(epoch=epoch)
if self.current_epoch != epoch:
self.current_epoch = epoch
if not self.training:
self.train()
losses, per_task_preds = self.train_forward_prop(data_batch=data_batch, epoch=epoch)
exclude_string = None
# self.meta_update(loss=losses['loss'], exclude_string_list=exclude_string)
# losses['learning_rate'] = self.scheduler.get_lr()[0]
self.zero_grad()
self.current_iter += 1
return losses, per_task_preds
def run_validation_iter(self, data_batch):
"""
Runs an outer loop evaluation step on the meta-model's parameters.
:param data_batch: input data batch containing the support set and target set input, output pairs
:param epoch: the index of the current epoch
:return: The losses of the ran iteration.
"""
if self.training:
self.eval()
losses, per_task_preds = self.evaluation_forward_prop(data_batch=data_batch, epoch=self.current_epoch)
return losses, per_task_preds
def save_model(self, model_save_dir, state):
"""
Save the network parameter state and experiment state dictionary.
:param model_save_dir: The directory to store the state at.
:param state: The state containing the experiment state and the network. It's in the form of a dictionary
object.
"""
state['network'] = self.state_dict()
torch.save(state, f=model_save_dir)
def get_across_task_loss_metrics(self, total_losses, total_accuracies, loss_metrics_dict):
losses = dict()
losses['loss'] = torch.mean(torch.stack(total_losses), dim=(0,))
losses['accuracy'] = torch.mean(torch.stack(total_accuracies), dim=(0,))
if 'saved_logits' in loss_metrics_dict:
losses['saved_logits'] = loss_metrics_dict['saved_logits']
del loss_metrics_dict['saved_logits']
for name, value in loss_metrics_dict.items():
losses[name] = torch.stack(value).mean()
for idx_num_step, (name, learning_rate_num_step) in enumerate(self.inner_loop_optimizer.named_parameters()):
for idx, learning_rate in enumerate(learning_rate_num_step.mean().view(1)):
losses['task_learning_rate_num_step_{}_{}'.format(idx_num_step,
name)] = learning_rate.detach().cpu().numpy()
return losses
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from scipy.optimize import brentq
from . import amino_acid_properties
class protein:
"""
Does basic calculations on protein, enzymes and polypeptides.
Calculations include e.g. the isoelectric point similar to the "ExPASy
Compute pI/Mw tool".
"""
def __init__(self, mode, aa_abundances, pka_data='pka_bjellqvist',
**kwargs):
"""
Initialize a protein instance.
Parameters
----------
mode : str
Mode defining how amino acid abundances are passed to the class.
Currently can be 'mmol_g', 'sequence', 'absolute' or
'res_per_1000'.
aa_abundances : list or string
Gives the amino acid abundances in the unit defined by mode.
Format is as follows:
mode == 'mmol_g' : list of float or ndarray
For mode 'protein', contains abundance of amino acids
and C- and N-terminus in the order ['D', 'N', 'T', 'S', 'E',
'Q', 'G', 'A', 'C', 'V', 'M', 'I', 'L', 'Y', 'F', 'H', 'K',
'R', 'P', 'W', 'Hyp'm 'Hyl']
mode == 'sequence' : str
For mode 'protein', alternative to mmol_g. Has to be a
string consisting of one letter codes for amino acids.
mode == 'absolute' : list of float or ndarray
For mode 'protein', alternative to mmol_g and sequence. Is a
list in the same order as for mmol_g, but with absolute
abundances of amino acids, e.g. deducted from the sequence.
mode == 'res_per_1000' : list of float or ndarray
For mode 'protein', list in the same order as mmol_g, but
abundances normalized to 1000 amino acid residues.
pka_data : string, optional
Gives the pKa dataset used for charge and IEP calculations. Allowed
values are 'pka_ipc_protein', 'pka_emboss' or 'pka_bjellqvist'.
Default is 'pka_bjellqvist'.
**kwargs in case of modifications are present
mod_types : list of strings
Allowed values are the index values of
amino_acid_properties.chain_modifications, defaults to empty
list. Currently only 'N_term' and 'C_term' are allowed in order
to account for the N and C terminus. Will be expanded in the
future.
mod_abundances : list of float
The abundances of the modifications given by mod_types. Must be
given in the same units as the input data for the main chain
and contain the same amount of elements like mod_types. Default
is an empty list. Usually, ist [1, 1] to account for one C
terminus and 1 n terminus.
mod_sites : list of strings
Currently unused. The plan is:
The sites on the main chain which are modified, so allowed
values are the index values in self.main_chain or 'any'. 'any'
means that the modification site is undefined. In this case,
the modification is only important e.g. for molar mass
calculations, but irrelevant e.g. for IEP calculations. Must
contain the same amount of elements like mod_types. Default is
a list the same size as mod_types with only 'any' entries.
pka_scales : list of strings
The pKa sclaes used for the different modifications. Must
contains the same amount of entires like mod_types. Default is
a list with the same size as mod_types with only
'pka_other' entries.
Returns
-------
None.
"""
self.mode = mode
self.amino_acid_sequence = None
self.number_of_residues = None
allowed_modes = ['mmol_g', 'sequence', 'absolute', 'res_per_1000']
if self.mode == allowed_modes[0]: # 'mmol_g'
self.abundance_unit = 'mmol_g'
self.abundance = aa_abundances
elif self.mode == allowed_modes[1]: # 'sequence'
self.abundance_unit = 'sequence'
self.amino_acid_sequence = aa_abundances
self.abundance = []
elif self.mode == allowed_modes[2]: # 'absolute'
self.abundance_unit = 'absolute'
self.abundance = aa_abundances
elif self.mode == allowed_modes[3]: # 'res_per_1000'
self.abundance_unit = 'res_per_1000'
self.abundance = aa_abundances
else:
raise ValueError('No valid option for mode given. Allowed values '
'are {}'.format(allowed_modes))
if isinstance(self.abundance, np.ndarray):
self.abundance = self.abundance.tolist()
self.mod_types = kwargs.get('mod_types', [])
self.mod_abundances = kwargs.get('mod_abundances', [])
# self.mod_sites = kwargs.get('mod_sites',
# ['any']*len(self.mod_types))
self.pka_scales = kwargs.get(
'pka_scales', ['pka_other']*len(self.mod_types))
# Make sure that kwargs contain equal numbers of elements.
assert (
len(self.mod_types) == len(self.mod_abundances) ==
len(self.pka_scales)), (
'Length mismatch, mod_types and mod_abundances must contain an'
' equal number of elements, but contain {}, {} and {} '
'elements.'.format(len(self.mod_types), len(self.mod_abundances),
len(self.pka_scales)))
self.pka_data = pka_data
self.initialize_main_chain()
self.initialize_modifications()
# self.combine_main_chain_modifications()
self.initialize_pka_dataset()
def initialize_main_chain(self):
"""
Transform input data into DataFrame.
Resulting DataFrame contains amino acid abundances, pKa values and
charge_indicator of relevant groups.
"""
self.main_chain = pd.DataFrame(
[], index=amino_acid_properties.amino_acids.index)
self.main_chain['molar_mass_residue'] = (
amino_acid_properties.amino_acids['molar_mass_residue'])
self.main_chain['N_content_residue'] = (
amino_acid_properties.amino_acids['N_content_residue'])
if self.abundance_unit in ['mmol_g', 'absolute', 'res_per_1000']:
# if less abundance values than entries in amino acid table are
# given, remaining abundances are set to zero
self.abundance.extend(
(len(self.main_chain.index) -
len(self.abundance)) * [0])
self.main_chain[
'abundance_' + self.abundance_unit] = self.abundance
elif self.abundance_unit == 'sequence':
# occurences of the first 20 amino acids in the sequence are
# counted, needs to be adapted if more than 20 amino acids such
# as hydroxylysine are added
self.number_of_residues = len(self.amino_acid_sequence)
for key in self.main_chain.index[:20]:
self.abundance.append(self.amino_acid_sequence.count(key))
# Hyp, Hyl abundances
self.abundance.extend(
(len(self.main_chain.index) - len(self.abundance)) * [0])
self.main_chain[
'abundance_' + self.abundance_unit] = self.abundance
# Normalize abundances to sum to make different inputs comparable.
self.main_chain['abundance_norm'] = (
self.main_chain['abundance_' + self.abundance_unit].values /
np.sum(self.main_chain['abundance_' +
self.abundance_unit].values))
def initialize_modifications(self):
self.modifications = pd.DataFrame(
[], index=amino_acid_properties.chain_modifications.index)
self.modifications['molar_mass_residue'] = (
amino_acid_properties.chain_modifications['molar_mass_residue'])
self.modifications['N_content_residue'] = (
amino_acid_properties.chain_modifications['N_content_residue'])
self.modifications['abundance_' + self.abundance_unit] = np.nan
# for mod_type, abundance, site, pka_scale in zip(self.mod_types,
# self.mod_abundances,
# self.mod_sites,
# self.pka_scales):
for mod_type, abundance, pka_scale in zip(self.mod_types,
self.mod_abundances,
self.pka_scales):
self.modifications.at[
mod_type, 'abundance_' + self.abundance_unit] = abundance
# self.modifications.at[mod_type, 'modified_residues'] = site
self.modifications.at[mod_type, 'pka_scale'] = pka_scale
self.modifications.dropna(subset=['abundance_' + self.abundance_unit],
inplace=True)
# Modification abundances are normalized on the basis of the input data
# of the main chain because the modifications do not introduce any
# additional residues/repeating units
self.modifications['abundance_norm'] = (
self.modifications['abundance_' + self.abundance_unit].values /
np.sum(self.main_chain['abundance_' + self.abundance_unit].values))
def initialize_pka_dataset(self):
self.IEP_dataset = pd.DataFrame([], index=self.main_chain.index)
self.IEP_dataset['abundance_norm'] = self.main_chain['abundance_norm']
self.IEP_dataset['charge_indicator'] = (
amino_acid_properties.amino_acids['charge_indicator'])
self.IEP_dataset['pka_data'] = amino_acid_properties.amino_acids[
self.pka_data]
for idx in self.modifications.index:
# mod_idx = self.modifications.at[idx, 'modified_residues']
mod_pka_scale = self.modifications.at[idx, 'pka_scale']
# if mod_idx != 'any':
# self.IEP_dataset.at[mod_idx, 'abundance_norm'] -= (
# self.modifications.at[idx, 'abundance_norm'])
self.IEP_dataset.at[idx, 'abundance_norm'] = self.modifications.at[
idx, 'abundance_norm']
self.IEP_dataset.at[idx, 'charge_indicator'] = (
amino_acid_properties.chain_modifications.at[
idx, 'charge_indicator'])
self.IEP_dataset.at[idx, 'pka_data'] = (
amino_acid_properties.chain_modifications.at[
idx, mod_pka_scale])
self.IEP_dataset.dropna(subset=['pka_data'], inplace=True)
def charge(self, pH):
"""
Calculate the net charge of the protein at a given pH.
Parameters
----------
pH : float
The pH used for net charge calculation.
Returns
-------
charge : float
The net charge of the protein at the given pH.
"""
# There might be a problem with the normalized abundances, because
# C-term and N_term abundances are not included in the denominator
# of normalization. Therefore, the sum of normalized abundances of all
# amino acids and the termini is a little larger than one. Must be
# checked in the future, also relevant for charge_curve. Probably
# especially important for small peptides. Is irrelevant if no amino
# acid sequence is given because then the termini are and must be
# ignored anyway in the calculations.
charge = np.sum(self.IEP_dataset['charge_indicator'].values *
self.IEP_dataset['abundance_norm'].values /
(1+10**(self.IEP_dataset['charge_indicator'].values *
(pH-self.IEP_dataset['pka_data'].values))))
return charge
def charge_curve(self, ph_range=[0, 14], data_points=100):
"""
Calculate the charge curve of the protein in a given pH range.
Parameters
----------
ph_range : list of floats, optional
First value is the lower pH limit and second value the upper pH
limit used for calculations. The default is [0, 14].
data_points : int, optional
Number of data points calculated. The default is 100.
Returns
-------
curve : ndarray
2D array with shape (2,data_points) containing the pH values
in the first row and the net charges in the second row.
"""
pH = np.linspace(ph_range[0], ph_range[1], data_points)
curve = np.sum(self.IEP_dataset['charge_indicator'].values *
self.IEP_dataset['abundance_norm'].values /
(1+10**(self.IEP_dataset['charge_indicator'].values *
(pH[:, np.newaxis] -
self.IEP_dataset['pka_data'].values
))), axis=1)
return np.array([pH, curve])
def IEP(self, ph_range=[0, 14]):
"""
Calculate the isoelectric point (IEP) of the protein.
The IEP is the pH value at which the protein has net charge of zero.
Parameters
----------
ph_range : list of floats, optional
First value is the lower pH limit and second value the upper pH
limit used for calculations. The default is [0, 14].
Returns
-------
IEP : float or np.nan
The calculated IEP is returned as float. If no IEP was found np.nan
is returned.
"""
try:
IEP = brentq(self.charge, ph_range[0], ph_range[1])
except ValueError:
IEP = np.nan
return IEP
def molar_mass(self, molecule_part='all'):
"""
Calculate the molar mass of the polyelectrolyte.
Works only in case absolute abundances of the residues/repeating units
are known, so when the sequence or absolute abundances are given.
Parameters
----------
molecule_part : str, optional
Defines if the molar mass is calculated for the full modified
polymer ('all'), only the main chain ('main_chain'), or only the
modifications ('mods'). Default is 'all'.
Returns
-------
float
The molar mass of the protein.
"""
if self.abundance_unit in ['absolute', 'sequence']:
main_chain_contribution = np.sum(
self.main_chain['abundance_' + self.abundance_unit].values *
self.main_chain['molar_mass_residue'].values)
modification_contribution = np.sum(
self.modifications['abundance_' + self.abundance_unit].values *
self.modifications['molar_mass_residue'].values)
molar_mass = main_chain_contribution + modification_contribution
allowed_parts = ['all', 'main_chain', 'mods']
if molecule_part == allowed_parts[0]: # 'all'
return molar_mass
elif molecule_part == allowed_parts[1]: # 'main_chain'
return main_chain_contribution
elif molecule_part == allowed_parts[2]: # 'mods'
return modification_contribution
else:
raise ValueError('No valid value given for molecule_part. '
'Allowed values are {}, but \'{}\' was given'
'.'.format(allowed_parts, molecule_part))
else:
raise Exception('Molar mass can only be calculated if mode is '
'\'absolute\' or \'sequence\', however mode is '
'\'{}\'.'.format(self.abundance_unit))
def mean_residue_molar_mass(self, molecule_part='all'):
"""
Calculate the mean residue molecular weight of proteins.
Calculation is done without taking C and N terminus into account if
only relative abundances are known and not the entire sequence.
Parameters
----------
molecule_part : str, optional
Defines if the mean residue molecular mass is calculated for the
full modified polymer ('all'), only the main chain ('main_chain'),
or only the modifications ('mods'). Default is 'all'.
Returns
-------
mean_residue_mass : float
The mean residue molecular weight based on the abundances of amino
acids of the molecule part given with molecule_part.
"""
main_chain_contribution = np.sum(
self.main_chain['molar_mass_residue'].values *
self.main_chain['abundance_norm'].values)
modification_contribution = np.sum(
self.modifications['molar_mass_residue'].values *
self.modifications['abundance_norm'].values)
mean_residue_molar_mass = (
main_chain_contribution + modification_contribution)
allowed_parts = ['all', 'main_chain', 'mods']
if molecule_part == allowed_parts[0]: # 'all'
return mean_residue_molar_mass
elif molecule_part == allowed_parts[1]: # 'main_chain'
return main_chain_contribution
elif molecule_part == allowed_parts[2]: # 'mods'
return modification_contribution
else:
raise ValueError('No valid value given for molecule_part. '
'Allowed values are {}, but \'{}\' was given'
'.'.format(allowed_parts, molecule_part))
def n_content(self, molecule_part='all'):
"""
Calculate the nitrogen mass fraction of polymers.
Parameters
----------
molecule_part : str, optional
Defines if the n content is calculated for the full modified
polymer ('all'), only the main chain ('main_chain'), or only the
modifications ('mods'). Default is 'all'.
Returns
-------
n_content : float
The n_content based on the abundances of amino acids of the
molecule part given with molecule_part.
"""
mean_chain = self.mean_residue_molar_mass(molecule_part='main_chain')
mean_mods = self.mean_residue_molar_mass(molecule_part='mods')
main_chain_contribution = (
np.sum(
self.main_chain['N_content_residue'] *
self.main_chain['abundance_norm'] *
self.main_chain['molar_mass_residue']) /
mean_chain
)
modification_contribution = (
np.sum(
self.modifications['N_content_residue'] *
self.modifications['abundance_norm'] *
self.modifications['molar_mass_residue']) /
mean_mods
) if mean_mods != 0 else 0
n_content = main_chain_contribution + modification_contribution
allowed_parts = ['all', 'main_chain', 'mods']
if molecule_part == allowed_parts[0]: # 'all'
return n_content
elif molecule_part == allowed_parts[1]: # 'main_chain'
return main_chain_contribution
elif molecule_part == allowed_parts[2]: # 'mods'
return modification_contribution
else:
raise ValueError('No valid value given for molecule_part. '
'Allowed values are {}, but \'{}\' was given'
'.'.format(allowed_parts, molecule_part))
|
## -*- coding: utf-8 -*-
##
## Jonathan Salwan - 2014-05-17 - ROPgadget tool
##
## http://twitter.com/JonathanSalwan
## http://shell-storm.org/project/ROPgadget/
##
import re
import codecs
from capstone import CS_MODE_32
from struct import pack
class Options:
def __init__(self, options, binary, gadgets):
self.__options = options
self.__gadgets = gadgets
self.__binary = binary
if options.filter: self.__filterOption()
if options.only: self.__onlyOption()
if options.range: self.__rangeOption()
if options.re: self.__reOption()
if options.badbytes: self.__deleteBadBytes()
def __filterOption(self):
new = []
if not self.__options.filter:
return
filt = self.__options.filter.split("|")
if not len(filt):
return
for gadget in self.__gadgets:
flag = 0
insts = gadget["gadget"].split(" ; ")
for ins in insts:
if ins.split(" ")[0] in filt:
flag = 1
break
if not flag:
new += [gadget]
self.__gadgets = new
def __onlyOption(self):
new = []
if not self.__options.only:
return
only = self.__options.only.split("|")
if not len(only):
return
for gadget in self.__gadgets:
flag = 0
insts = gadget["gadget"].split(" ; ")
for ins in insts:
if ins.split(" ")[0] not in only:
flag = 1
break
if not flag:
new += [gadget]
self.__gadgets = new
def __rangeOption(self):
new = []
rangeS = int(self.__options.range.split('-')[0], 16)
rangeE = int(self.__options.range.split('-')[1], 16)
if rangeS == 0 and rangeE == 0:
return
for gadget in self.__gadgets:
vaddr = gadget["vaddr"]
if vaddr >= rangeS and vaddr <= rangeE:
new += [gadget]
self.__gadgets = new
def __reOption(self):
new = []
re_strs = []
if not self.__options.re:
return
if '|' in self.__options.re:
re_strs = self.__options.re.split(' | ')
if 1 == len(re_strs):
re_strs = self.__options.re.split('|')
else:
re_strs.append(self.__options.re)
patterns = []
for __re_str in re_strs:
pattern = re.compile(__re_str)
patterns.append(pattern)
for gadget in self.__gadgets:
flag = 1
insts = gadget["gadget"].split(" ; ")
for pattern in patterns:
for ins in insts:
res = pattern.search(ins)
if res:
flag = 1
break
else:
flag = 0
if not flag:
break
if flag:
new += [gadget]
self.__gadgets = new
def __deleteBadBytes(self):
if not self.__options.badbytes:
return
new = []
#Filter out empty badbytes (i.e if badbytes was set to 00|ff| there's an empty badbyte after the last '|')
#and convert each one to the corresponding byte
bbytes = [codecs.decode(bb.encode("ascii"), "hex") for bb in self.__options.badbytes.split("|") if bb]
archMode = self.__binary.getArchMode()
for gadget in self.__gadgets:
gadAddr = pack("<L", gadget["vaddr"]) if archMode == CS_MODE_32 else pack("<Q", gadget["vaddr"])
try:
for x in bbytes:
if x in gadAddr: raise
new += [gadget]
except:
pass
self.__gadgets = new
def getGadgets(self):
return self.__gadgets
|
#
#
# =================================================================
# =================================================================
"""
IBM PowerVC Storage utilities for retrieving and parsing HMC (K2)
response data for topology resources.
"""
import time
from nova.openstack.common import log as logging
from powervc_k2.k2operator import K2Error
from powervc_nova import _
from oslo.config.cfg import CONF
import powervc_nova.objects.storage.dom as storage_dom
from powervc_nova.virt.ibmpowervm.vif.hmc import K2_RMC_READ_SEC
from powervc_nova.virt.ibmpowervm.vif.hmc import K2_MNG_SYS_READ_SEC
LOG = logging.getLogger(__name__)
CONF.import_opt('ibmpowervm_register_ssps', 'powervc_nova.virt.ibmpowervm.hmc')
CONF.import_opt('ibmpowervm_ssp_hmc_version_check',
'powervc_nova.virt.ibmpowervm.hmc')
VFCM_PORTELEMT_LEN = 2
XAG_DEFAULT = [None]
XAG_VIOS = ['ViosStorage']
def k2_read(operator, rootType, rootId=None, childType=None,
suffixType=None, suffixParm=None, timeout=-1, age=-1, xag=[]):
"""
We provide our own read abstraction interface on top of K2's here.
This can be enhanced as needed, but it is mainly for our own custom
logging.
NOTES: This method will not be called if cached K2 feeds are being used
from the compute driver.
If this is called from the compute periodic task, then the
operator should encapsulate a common session that is always
re-used.
The operator may do its own internal caching so an over-the-wire
request may not be performed.
"""
# Log the call
LOG.debug("\nK2_OPER_REQ - rootType=%(rootType)s, rootId=%(rootId)s, "
"childType=%(childType)s, suffixType=%(suffixType)s, "
"suffixParm=%(suffixParm)s, timeout=%(timeout)d, age=%(age)d, "
"xag=%(xag)s, ...operator_obj=%(operator)s" % locals())
# Make the K2 call.
ticks = time.time()
resp = operator.read(rootType, rootId=rootId, childType=childType,
suffixType=suffixType, suffixParm=suffixParm,
timeout=timeout, age=age, xag=xag)
# Set up timing object
global logTiming
try:
logTiming
except NameError:
logTiming = restTiming() # init if it does not exist yet
# Log that the response was received. Note there is a special logger
# that can be enabled for the operator to log the response itself.
logTiming.log_return(time.time() - ticks, operator, rootType, rootId,
childType, suffixType)
# return
return resp
class restTiming():
""" Simple class to track and log the timing of a K2 rest call """
def __init__(self):
self.hashdict = {}
def log_return(self, seconds, operator, rootType,
rootId=None, childType=None, suffixType=None):
# Create a hash key from a few pieces of info so that we can
# compare query timings between 'types' of calls. Note that
# operator.session.host will contain the HMC IP/hostname.
pieces = [operator.session.host, rootType, rootId, childType,
suffixType]
key = ''.join(map(lambda x: str(x) + "_" if x is not None else "*",
pieces))
if key not in self.hashdict:
self.hashdict[key] = {'samples': 1, 'sum': seconds}
else:
self.hashdict[key]['samples'] += 1
self.hashdict[key]['sum'] += seconds
avg = self.hashdict[key]['sum'] / self.hashdict[key]['samples']
LOG.debug("\nK2_OPER_Timing=%.3f Call_hash=%s, average=%.3f, "
"avg_delta=%+.3f, entry=%s.\n" %
(seconds, key, avg, seconds - avg, self.hashdict[key]))
def _find_managed_system(oper, host_dict):
"""
Finds the ManagedSystem entry for this specific host that the HMC is
managing.
:param oper: The HMC operator for interacting with the HMC.
:param host_dict: has the machine, model, and serial info for
managed system to look up
:return: The K2 object for the managed system
"""
# Get the ManagedSystem entry we want by Machine type, model, and serial
# from the K2 API. We use the 'search' suffix to achieve this.
k2resp = None
try:
suffix = '(MachineType==%s&&Model==%s&&SerialNumber==%s)' % \
(host_dict['machine'], host_dict['model'], host_dict['serial'])
k2resp = k2_read(oper, 'ManagedSystem', suffixType='search',
suffixParm=suffix, timeout=K2_MNG_SYS_READ_SEC,
xag=XAG_DEFAULT)
except Exception as ex:
LOG.exception(ex)
if k2resp is None:
LOG.error(_("Managed System K2 response was none or failed."))
return None
xpath = './MachineTypeModelAndSerialNumber/SerialNumber'
entries = k2resp.feed.findentries(xpath, host_dict['serial'])
if entries is None:
LOG.warn(_("Managed System HMC response did not have any entries "
"for host '%s'.") % host_dict)
return None
# Confirm same model and type
machine_xpath = './MachineTypeModelAndSerialNumber/MachineType'
model_xpath = './MachineTypeModelAndSerialNumber/Model'
for entry in entries:
entry_machine = entry.element.findtext(machine_xpath)
entry_model = entry.element.findtext(model_xpath)
if (entry_machine == host_dict['machine'] and
entry_model == host_dict['model']):
host_dict['uuid'] = entry.properties['id']
return entry
LOG.warn(_("Managed System HMC response did not have an 'entry' "
"element for host '%s'.") % host_dict)
return None
def collect_host_vios_feeds(oper, topology):
"""
This is an alternate method to the collect_vios_topology() method below.
Instead of looping over the associated VIOS partitions for a
ManagedSystem, we get the feed for all its VIOSes, then loop over the
feed entries. This flow is for when we know the uuid of the host.
The topology dictionary passed is updated with the collected and parsed
information.
"""
host_ref = topology['nova_host'].mtm_serial
LOG.debug('Obtaining VIOS feed for Managed System: %s' % host_ref)
k2resp = None
try:
k2resp = k2_read(oper, 'ManagedSystem',
rootId=topology['nova_host'].uuid,
childType='VirtualIOServer',
timeout=K2_MNG_SYS_READ_SEC, xag=XAG_VIOS,
age=topology['max_cache_age'])
except Exception as ex:
log_k2ex_and_get_msg(ex, _("Cannot get VIOSs for host %s.") % host_ref,
topology)
return topology
if k2resp is None:
# should not get here
msg = _("Managed System K2 response was none for host: "
"%s.") % host_ref
LOG.error(msg)
topology['error'] = [msg]
return topology
if not k2resp.feed or not k2resp.feed.entries:
LOG.info(_("No VIOS entries for host: %s") % host_ref)
return topology
# Loop over all vioses found
num = 0
for vios_entry in k2resp.feed.entries:
num += 1
# call the helper to get a dict for this vios entry
vios_info = get_vios_info(vios_entry)
vios_info['id'] = get_vios_id(host_ref, vios_info)
# Add the vios entry to both indexes
topology['vios_by_id'][vios_info['id']] = vios_info
topology['vios_by_uuid'][vios_info['uuid']] = vios_info
if 'state' in vios_info and vios_info['state'] == "running" and\
'rmc_state' in vios_info and\
vios_info['rmc_state'] == "active":
topology['vios_num_trusted'] += 1
vios_info['trusted'] = True
else:
# Some VIOS information like cluster membership and FC ports
# should not be trusted if state info is not all OK.
vios_info['trusted'] = False
# Note the trust state marking below only makes a difference if the
# vios is being tracked as a prior cluster member.
if not topology['skip-update-clusters']:
# The static_topo structure is not syncronized, so we skip
# changing it if the 'skip' value is present, which deploy
# processing will always set. This avoids periodic task
# collections colliding with deploy collections.
topology['static_topo'].mark_vios_trust_state(vios_info['id'],
vios_info['trusted'])
LOG.debug("Added VIOS dict entry to topology: %s" % vios_info)
LOG.debug("Returning %d trusted VIOS entries out of %d collected for "
"compute host %s." % (topology['vios_num_trusted'], num,
host_ref))
return topology
def collect_vios_topology(oper, topology, host_dict):
"""
Connect to the HMC K2 API and request the Managed System information.
The Managed System is represented by K2 Response, K2 Entry, and K2 Element
class objects. The data for the VIOS can be obtained by traversing
the object hierarchy. The topology dictionary passed is updated with the
obtained and parsed info.
"""
LOG.debug('Listing VIOSes by Managed System.')
# call the helper to get the K2 managed system
managedSysEntry = _find_managed_system(oper, host_dict)
if managedSysEntry is None:
return topology # Nothing to do
associatedVirtualIOServers = \
managedSysEntry.element.find('./AssociatedVirtualIOServers')
if associatedVirtualIOServers is None:
LOG.debug("There are no AssociatedVirtualIOServers from K2 for host: "
"%s." % host_dict)
return topology
vios_links = associatedVirtualIOServers.findall('./link')
if vios_links is None:
LOG.warn(_("HMC Problem: Associated VIOSes were returned, but the "
"links are missing."))
return topology
# LOOP over the VIOS links
for vios_link in vios_links:
href = vios_link.get('href')
if href is None:
LOG.error(_('vios_link \'%s\' has no href element.')
% vios_link.gettext())
continue
LOG.debug('Found href property: %s.' % href)
# Split the URL into elements.
href_elements = href.split('/')
# Ensure the list has at least one element.
if len(href_elements) == 0:
LOG.error(_("VIOS href property '%s'"
" does not have any elements." % href))
continue
# We only care about the last element in
# the list, which should be the uuid.
if href_elements[-1] is not None:
vios_uuid = href_elements[-1]
LOG.debug("Found vios uuid: %s" % vios_uuid)
# Request the VIOS data from the K2 operator
viosResponse = k2_read(oper, 'VirtualIOServer',
rootId=vios_uuid,
timeout=K2_RMC_READ_SEC, xag=XAG_VIOS,
age=topology['max_cache_age'])
if viosResponse is None:
LOG.warn(_("Unexpected HMC response condition: No VIOS "
"Element found for VIOS '%s'.") % vios_uuid)
continue
# Get the rest of the VIOS information
vios_info = get_vios_info(viosResponse.entry)
else:
LOG.error(_("Error parsing VIOS uuid from href '%s'.") % href)
continue # Don't append if vios props aren't set
# Add current vios dict to the mapping by the 'id' impl we define.
vios_info['id'] = get_vios_id(host_dict['name'], vios_info)
if vios_info['id'] in topology['vios_by_id']:
LOG.debug("VIOS %s already collected by another HMC. Skip." %
vios_info)
continue
# Add the vios entry to both indexes
topology['vios_by_id'][vios_info['id']] = vios_info
topology['vios_by_uuid'][vios_info['uuid']] = vios_info
if 'state' in vios_info and vios_info['state'] == "running" and\
'rmc_state' in vios_info and\
vios_info['rmc_state'] == "active":
topology['vios_num_trusted'] += 1
vios_info['trusted'] = True
else:
vios_info['trusted'] = True
# Note the trust state marking below only makes a difference if the
# vios is being tracked as a prior cluster member.
topology['static_topo'].mark_vios_trust_state(vios_info['id'],
vios_info['trusted'])
LOG.debug("Added VIOS dict entry to topology: %s" % vios_info)
# End for vios loop
return topology
def get_vios_info(vios_entry):
"""
Get the specific data for the VIOS from the K2 VIOS entry.
"""
# Initialize the empty dictionary.
vios_info = {}
if not vios_entry:
LOG.warn(_("No Virtual I/O Servers returned in HMC response."))
return vios_info
vios_info['k2element'] = vios_entry # track for later logging
# Get the Partition Name Element
partitionNameElement = \
vios_entry.element.find('./PartitionName')
# If the Partition Name element exists
if partitionNameElement is not None:
# Save the partition name into the vios info object.
vios_info['name'] = partitionNameElement.gettext()
else:
vios_info['name'] = None
vios_info['uuid'] = uuid = vios_entry.properties['id']
LOG.debug("Processing VIOS partition '%s' with uuid: %s" %
(vios_info['name'], uuid))
#Get the Partition State Element
partitionStateElement = \
vios_entry.element.find('./PartitionState')
if partitionStateElement is not None:
#Save the partition State into the vios info object.
vios_info['state'] = partitionStateElement.gettext()
#Get the RMC State Element
RMCStateElement = \
vios_entry.element.find('./ResourceMonitoringControlState')
if RMCStateElement is not None:
#Save the RMC State into the vios info object.
vios_info['rmc_state'] = RMCStateElement.gettext()
# Get the Partition ID Element.
partitionIDElement = vios_entry.element.find('./PartitionID')
# If the partition ID Element exists
if partitionIDElement is not None:
# Save the partition id to the vios info object.
vios_info['lpar_id'] = partitionIDElement.gettext()
# For each VIOS, also call the helper method to extract
# the FC Port info from call already done.
LOG.debug("Getting FC ports for vios '%s'." % uuid)
vios_info['fcports'] = parse_fcports(vios_entry, vios_info)
else:
LOG.warn(_("HMC Problem: No PartitionID element for VIOS '%s'.") %
uuid)
return vios_info
def parse_fcports(vios, vios_info):
"""
Get a list of the FC Ports related to a specific VIOS.
'vios' passed in is the K2 response VIOS entry of a previous call.
We traverse the hierarchy of objects to find the desired
information.
"""
LOG.debug('Extracting FC Ports from VIOS K2 entry.')
# Create an empty dictionary for the FC port mapping (UDID --> port info)
fcports = {}
# Traverse the hierarchy to find the FC port information.
# First, check under PhysicalFibreChannelAdapter
adapterPath = str('./PartitionIOConfiguration/ProfileIOSlots/'
'ProfileIOSlot/AssociatedIOSlot/RelatedIOAdapter/'
'PhysicalFibreChannelAdapter')
fcAdapters = vios.element.findall(adapterPath)
if fcAdapters:
LOG.debug('Found element(s): %s.' % adapterPath)
for fcAdapter in fcAdapters:
portElements = fcAdapter.findall(
'./PhysicalFibreChannelPorts/PhysicalFibreChannelPort')
if portElements:
for portElement in portElements:
LOG.debug('Found PhysicalFibreChannelPort element.')
_parse_single_fc_port(portElement, fcports, fcAdapter)
else:
LOG.debug("PhysicalFibreChannelPort elements not found for "
"vios '%s'." % vios_info['name'])
else:
LOG.debug("PhysicalFibreChannelAdapter elements not found for vios "
"'%s'." % vios_info['name'])
# Check for the parent RelatedIOAdapters as we have seen cases
# where these exist for the FC adapters, but the FC adapter children
# elements are not returned. We can log this case.
ioAdaptPath = str('./PartitionIOConfiguration/ProfileIOSlots/'
'ProfileIOSlot/AssociatedIOSlot/RelatedIOAdapter/'
'IOAdapter')
ioAdapters = vios.element.findall(ioAdaptPath)
if ioAdapters:
vios_info['IOAdapters'] = []
for ioAdapter in ioAdapters:
if ioAdapter.find('./DeviceName') is not None:
adapterID = ioAdapter.find('./DeviceName').gettext()
desc = ioAdapter.find('./Description').gettext()
vios_info['IOAdapters'].append(adapterID)
if 'Fibre' in desc:
LOG.debug("No FC adapters reported, but found "
"IOAdapter %s with description: %s." %
(adapterID, desc))
# Removed code that scans through VirtualFibreChannelMappings
# if no physical adapter elements are found as this code is
# obsolete. With the current K2 schema we cannot fall back on this
# since there may be no virtual machines and thus no mappings (but
# we still need to collect the port inventory).
# Return the mapping of all the ports for the VIOS.
LOG.debug("Finished parsing FC ports on VIOS: %s" % vios_info['name'])
return fcports
def _parse_single_fc_port(portElement, fcports, adapterElement=None):
"""
If portElement is a valid, NPIV-capable FCPort, add its information
to fcports as a new dictionary port entry.
NOTE: This function modifies 'fcports' by adding an element for a
suported FC port, rather than returning a dictionary to add later.
Could refactor in the future to be more openstack-esque.
"""
uniqueDeviceIDElement = portElement.find('./UniqueDeviceID')
portNameElement = portElement.find('./PortName')
# AdapterUUID have been removed from schema. Keep it
# as a placeholder.
# adapter_uuidElement = portElement.find('./AdapterUUID')
wwpnElement = portElement.find('./WWPN')
total_portsElement = portElement.find('./TotalPorts')
avail_portsElement = portElement.find('./AvailablePorts')
adapterID = '[unknown]'
status = "Down"
# use exception to find invalid attribute.
try:
if adapterElement is not None:
# adapter id is not needed for Paxes at this time,
# it is part of the DB DTO for the fcport object, and if the
# parent adapter element is passed, we use the DeviceName from
# that.
adapterID = adapterElement.find('./DeviceName').gettext()
# The portElement contains non-NPIV ports which
# has no TotalPorts and AvailablePorts attributes.
# Filter out non-NPIV ports other wise VFC map
# will fail.
fcport = {'id': uniqueDeviceIDElement.gettext(),
'name': portNameElement.gettext(),
'adapter_id': adapterID,
'wwpn': wwpnElement.gettext()
}
# WARNING! We need to be very careful in checking k2 elements.
# The following code line is not equivalent to:
# if total_portsElement and avail_portsElement
# This is because these K2 elemnts have a __len()__ method that
# evaluates to 0, meaning they have no nested entries. But the
# objects still exist!
if total_portsElement is not None and avail_portsElement is not None:
fcport['total_vports'] = int(total_portsElement.gettext())
fcport['available_vports'] = int(avail_portsElement.gettext())
if fcport['available_vports'] > 0:
# put number of VFCs left on the port for OK status as
# this info can be split out and used for API info.
status = "OK:%d" % fcport['available_vports']
else:
status = "Full"
else:
LOG.debug("Not a currently supported FC Port since "
"TotalPorts and AvailablePorts are missing properties"
"from the element: %s" % portElement.toxmlstring())
status = "Unsupported/Offline"
fcport['status'] = status
if not None in fcport.values():
LOG.debug("Adding FC port: %(fcport)s" % locals())
fcports[fcport['id']] = fcport
else:
LOG.info("HMC Problem: Not adding FC Port, which has a 'None' "
"value: %(fcport)s") % dict(fcport=fcport)
except Exception:
# One of the required port elements are missing.
LOG.warn(_("HMC Problem: Found non-NPIV capable port due to "
"unexpected format. Skip portElement: %(portelem)s") %
{'portelem': portElement.toxmlstring()})
def log_k2ex_and_get_msg(ex, prefix, topology):
""" LOG K2 exception and extracted message. Return NLS message """
LOG.exception(ex)
detail = {}
k2msg = _("None")
if isinstance(ex, K2Error) and ex.k2response:
detail['Request_headers'] = ex.k2response.reqheaders
detail['Response_headers'] = ex.k2response.headers
detail['Response_body'] = ex.k2response.body
detail['Response_status'] = ex.k2response.status
if hasattr(ex.k2response, 'k2err'):
m = ex.k2response.k2err.find('./Message')
if m is not None:
k2msg = m.text
msg = _("%(prefix)s ***K2 Operator Error***: %(ex_msg)s [K2 Error body "
"Message: %(k2msg)s]") %\
dict(prefix=prefix, ex_msg=ex, k2msg=k2msg)
LOG.error(msg)
if detail:
LOG.error(_("Error details: %s") % detail)
if topology is not None:
if 'error' in topology:
topology['error'].append(msg)
else:
topology['error'] = [msg]
return msg
def map_vios_to_cluster(oper, topology):
"""
Given a K2 operator connection to an HMC, retrieve all the VIOS
Clusters and loop over the member VIOSes, add the membership info
to the passed topology data structure. This mapping
will subsequently be used in the DB reconciliation to update the
cluster membership field on the VIOS DB resource if needed.
"""
if topology['skip-update-clusters']:
LOG.debug("Skip getting Cluster feed info. Could be during deploy.")
return
if topology['vios_num_trusted'] == 0:
LOG.debug("No VIOSes are reporting a trusted state for collection "
"sequence %d. Skip getting Cluster feed." % topology['seq'])
topology['skip-update-clusters'] = True
return
#See if Registering Clusters is disabled in this Environment, if it
#is then it isn't worth the Performance hit to query Clusters from K2
if not CONF.ibmpowervm_register_ssps or \
cluster_registration_verification(oper) is False:
LOG.debug("Cluster registration is disabled, so no HMC cluster "
"retrieval.")
topology['skip-update-clusters'] = True
return
LOG.debug("Retrieving all clusters and VIOS members for current hmc.")
# Get the root cluster object from the K2 API.
# NOTE: In HMC 810, the Cluster info does not have events and so the
# caching is not event based. The K2 operator will just employ
# a small fixed timeout for the Cluster cache.
try:
k2resp = k2_read(oper, 'Cluster', xag=XAG_DEFAULT,
age=topology['max_cache_age'])
except Exception as ex:
log_k2ex_and_get_msg(ex, _("Cannot retrieve any VIOS clusters."),
topology)
# We want to skip updating the cluster providers if there is an error
topology['skip-update-clusters'] = True
return
if len(k2resp.feed.entries) == 0:
LOG.debug("No cluster entries returned from HMC.")
cluster_members = {}
topo_factory = storage_dom.HostStorageTopologyFactory.get_factory()
static_topo = topo_factory.get_static_topology()
# Traverse the hierarchy structure to find the VIOS objects.
# The structure is:
# k2resp --> entry --> Node --> Node --> VirtualIOServer
for i in range(len(k2resp.feed.entries)):
cluster = k2resp.feed.entries[i]
cluster_id = cluster.element.findtext('./ClusterID')
if cluster_id is None or len(cluster_id) == 0:
LOG.warn(_("HMC Problem: Cluster entry does not have a ClusterID. "
"Skipping. Entry is '%s'." % cluster.gettext()))
continue
cluster_members[cluster_id] = set()
cluster_name = cluster.element.findtext('./ClusterName')
LOG.debug("Cluster entry #%d: id=%s, display_name=%s." %
(i, cluster_id, cluster_name))
backend_id = cluster.properties['id']
viosElements = cluster.element.findall('./Node/Node/VirtualIOServer')
if not viosElements:
# This may not be possible.
LOG.info(_("The cluster '%s' has no member VIOSs reporting.") %
cluster_name)
for j in range(len(viosElements)):
vios_elmt = viosElements[j]
href = vios_elmt.get('href')
if href is None:
LOG.warn(_("HMC Problem: VirtualIOServer element has no "
"'href': %s." % str(vios_elmt)))
continue
LOG.debug("Found href for VirtualIOServer member #%d: %s."
% (j, str(href)))
# Split the URL into elements.
href_components = href.split('/')
# Ensure the list has at least one element.
if len(href_components) > 0:
# We only care about the last element in
# the list, which should be the uuid.
vios_uuid = href_components[-1]
LOG.debug("Found uuid: '%s'." % vios_uuid)
vios_cluster = {'cluster_id': cluster_id,
'cluster_name': cluster_name,
'backend_id': backend_id}
if vios_uuid in topology['vios_by_uuid']:
vios = topology['vios_by_uuid'][vios_uuid]
vios['vios_cluster'] = vios_cluster
static_topo.set_cluster_for_vios(vios['id'], cluster_id,
cluster_name)
if not vios['trusted']:
# For debug purposes only
LOG.debug("VIOS is reporting cluster membership "
"even though its state information in a "
"separate K2 response has it marked not "
"trusted: %s." % vios)
else:
LOG.debug("Clustered VIOS uuid %s not under host for this "
"topology collection. Cluster = %s" %
(vios_uuid, vios_cluster))
# end for each vios element
# Log a missing cluster.
for cluster in static_topo.cluster_keyed.values():
if cluster['set_cluster_seq'] != static_topo.sequence_num:
LOG.info(_("The cluster %(cluster_name)s, inventoried during "
"prior topology collection sequence %(old)d, did not "
"report members this time [%(new)d].") %
dict(cluster_name=cluster['display_name'],
old=cluster['set_cluster_seq'],
new=static_topo.sequence_num))
LOG.debug("Tracked cluster membership: %s." % static_topo.cluster_keyed)
def cluster_registration_verification(oper):
"""Method to verify the HMC level is good for cluster reg. support"""
if not CONF.ibmpowervm_ssp_hmc_version_check:
return True
else:
try:
#Get the ManagementConsole information from the K2 API.
mc_info = oper.read('ManagementConsole')
#Parse the feed for the HMC version info
ver_info = mc_info.feed.entries[0].element.find('VersionInfo')
#Get the maintenance number from the version info
maintenance = ver_info.findtext('Maintenance')
#Get the minor number from the version info
minor = ver_info.findtext('Minor')
#Get the version number from the version info
version = ver_info.findtext('Version')
#Combine to get the HMC release version
release = version + maintenance + minor
LOG.info(_("Management Console release is '%s'.") % release)
return int(release) >= 810
except Exception as exc:
LOG.exception(exc)
LOG.error("There was an error getting the HMC release version")
return True
def get_vios_id(host_name, vios_dict):
""" Generate the VIOS ID for REST API consumption based on K2 data """
# When the VIOS uuid gets persisted with the vios, that could be used
# by itself and this impl can change to address that field.
return host_name + "##" + str(vios_dict['lpar_id'])
|
from discord.ext import commands, tasks
from discord.ext.commands import CommandNotFound, MissingPermissions
from utilities.Drive import Drive
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.color = int('f03c3c', 16)
self.drive_object = Drive()
print('Authenticating Services')
self.drive_object.auth()
self.GDrive_Refresh.start()
@commands.Cog.listener()
async def on_ready(self):
print(f'Logged In {self.bot.user}')
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, CommandNotFound):
await ctx.channel.send(
'That command was not found and/or does not exist. Please use &help to get a list of commands!')
return
if isinstance(error, MissingPermissions):
await ctx.channel.send('Hey! >:( you have don\'t have permission to do that!')
raise error
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.bot.user:
return
@tasks.loop(hours=6)
async def GDrive_Refresh(self):
self.drive_object.get_files_from_id('1c5T1EHN0aWefDiV7c_zEPICQDXcLgCBX')
self.drive_object.download_files_from_json('assets/Palettes/downloaded.json')
@commands.command()
async def logout(self, ctx):
if ctx.author.id == 693089171002097724 or ctx.author.id == 214935867859402752:
await ctx.channel.send('Going Offline')
await self.bot.logout()
else:
await ctx.channel.send('You do not have the permission to do that!')
# raise MissingPermissions() ?? Need to figure out what to pass in.
def setup(bot):
bot.add_cog(Events(bot))
|
"""
Dirty wrapping of the Mrtrix3 command necessary but not available in Nipype
Commands are wrapped using python function and the Function variant interface of nipype
TO DO : use the Mrtrix3Base or CommandLine class of Nipype to perform a cleaner wrap
"""
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
def mrregister_rigid(image, template, transform):
"""
Dirty wrapping of the Mrtrix3 mrregister command that estimate rigid transformation between
image and template (reference image)
:param image: path of the image to register
:param template: path of the reference image
:param transform: path of the text file containing the estimated transform
:return:
"""
import subprocess
from distutils import spawn
mrregister = spawn.find_executable("mrregister")
cmd = mrregister + ' -type rigid ' + ' -rigid ' + transform + ' ' + image + ' ' + template
subprocess.run(cmd)
pass
def mrtransform_linear(input, output, transform):
"""
Dirty wrapping of the mrtransform command to apply linear transform to a volume
:param input:
:param output:
:param transform:
:return:
"""
import subprocess
from distutils import spawn
mrtransform = spawn.find_executable("mrtransform")
# inverse option is passed to take into account reverse convention (see Mrtrix doc)
cmd = mrtransform + ' -linear ' + transform + ' -inverse ' + input + ' ' + output
subprocess.run(cmd)
pass
def tcksift(input_tracks, wm_fod, filtered_tracks):
"""
:param input_tracks:
:param wm_fod:
:param filtered_tracks:
:return:
"""
import subprocess
from distutils import spawn
sift = spawn.find_executable("tcksift")
cmd = sift + ' ' + input_tracks + ' ' + wm_fod + ' ' + filtered_tracks
subprocess.run(cmd)
pass
rigid_transform_estimation = pe.Node(name='rigid_transform_estimation', interface=Function(input_names=['image', 'template'], output_names=['transform'], function=mrregister_rigid))
apply_linear_transform = pe.Node(name="apply_linear_transform", interface=Function(input_names=["input","transform"], output_names=["output"],function=mrtransform_linear))
rigid_registration = pe.Workflow(name="rigid_registration")
rigid_registration.connect(rigid_transform_estimation,'transform', apply_linear_transform, 'transform')
sift_filtering = pe.Node(name="sift_filtering", interface=Function(input_names=["input_tracks", "wm_fod"], output_names=["filtered_tracks"], function=tcksift))
|
from django.template import Template, Context
from requests_toolbelt.utils import dump
def render_with_context(template, context):
template = Template(template)
context = Context(context)
return template.render(context)
def parse_dump_result(fun, obj):
prefixes = dump.PrefixSettings('', '')
try:
result = bytearray()
fun(obj, prefixes, result)
return result.decode('utf-8')
except Exception:
return "Could not parse request as a string"
|
'''
@author: Kittl
'''
def exportStorageTypes(dpf, exportProfile, tables, colHeads):
# Get the index in the list of worksheets
if exportProfile is 2:
cmpStr = "StorageType"
elif exportProfile is 3:
cmpStr = ""
idxWs = [idx for idx,val in enumerate(tables[exportProfile-1]) if val == cmpStr]
if not idxWs:
dpf.PrintPlain('')
dpf.PrintInfo('There is no worksheet '+( cmpStr if not cmpStr == "" else "for storage types" )+' defined. Skip this one!')
return (None, None)
elif len(idxWs) > 1:
dpf.PrintError('There is more than one table with the name '+cmpStr+' defined. Cancel this script.')
exit(1)
else:
idxWs = idxWs[0]
colHead = list();
# Index 11 indicates the storageType-Worksheet
for cHead in colHeads[exportProfile-1][idxWs]:
colHead.append(str(cHead.name))
dpf.PrintPlain('')
dpf.PrintPlain('#####################################')
dpf.PrintPlain('# Starting to export storage types. #')
dpf.PrintPlain('#####################################')
expMat = list()
expMat.append(colHead)
# ~~~~~~~~ Implement storage export! ~~~~~~~~
# reses = dpf.GetCalcRelevantObjects('*.ElmGenstat')
# for res in reses:
# if exportProfile is 2:
# expMat.append([
# res.loc_name, # id
# res.bus1.cterm.loc_name, # node
# res.cCategory, # type
# "", # pvType
# "", # wecType
# "", # bmType#
# res.pgini, # pload
# res.cosgini, # cosphi
# res.sgn, # sR
# res.cpArea.loc_name if res.cpArea is not None else "", # subnet
# res.cpZone.loc_name if res.cpZone is not None else "" # voltLvl
# ])
# else:
# dpf.PrintError("This export profile isn't implemented yet.")
# exit(1)
return (idxWs, expMat)
|
# main.py
from flask import Flask
from flask import request
from flask import jsonify
import hashlib
import inspect
app = Flask(__name__)
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
_usage = '<h1>Welcome to a URLShortener demo </h1>' \
'<h2>You can use the APIs to generate a shorturl by URL, APIs as following</h2>' \
'<h3>/create?url=URL</h3>' \
'<h3>/preview?shorturl=shorturl</h3>' \
'<h3>/list</h3>'
# class URLShortener Definition
class URLShortener():
def __init__(self, ServiceHost='http://localhost/', debug=False):
self.ServiceHost = ServiceHost
self.debug = debug
self.ShortURL_dict = {}
self.AlnumList = []
for char in '0123456789ABCDEFGHIJKLNMOPQRSTUVWXYZabcdefghijklnmopqrstuvwxyz':
self.AlnumList.append(char)
self.DP(lineno(), 'Debug Mode Enable')
# Debug Print
def DP(self, line, msg):
if self.debug == True:
print('[DEBUG:%3s] %s' % (line, msg))
def GenShortURL(self, URL):
if URL[0:8] != 'http://' and URL[0:8] != 'https://':
self.DP(lineno(), 'The URL(%s) miss http:// or https://' % URL)
URL = 'http://' + URL
hexstr = hashlib.sha256(URL.encode('UTF-8')).hexdigest()[0:16]
self.DP(lineno(), 'Get first 16 bytes of sha256(%s)=%s' % (URL, hexstr))
ShortURL = ''.join(self.AlnumList[int(hexstr[i:i+2], 16) % len(self.AlnumList)] for i in range(0, len(hexstr), 2))
self.DP(lineno(), 'Convert(%s) to ShortURL(%s) ' % (hexstr, ShortURL))
self.ShortURL_dict[ShortURL] = URL
return ShortURL
def GetURL(self, ShortURL):
return self.ShortURL_dict.get(ShortURL, None)
@app.route("/")
def index():
return _usage
# Convert from URL to shorturl
@app.route("/create")
def create():
print(request.args)
param = request.args.get('url', default=None)
URL = str(param)
if param is None:
return '<h1>Please give a url to shorturl, format is<br/> /create?url=http://your_url/</h1>'
else:
# Bugfix: the parameters of GET method will be split by '&'
# so we need to combine it together
for key in request.args:
if key != 'url':
URL += '&' + key + '=' +request.args[key]
return us.ServiceHost + us.GenShortURL(URL)
# Convert shorturl to URL
@app.route("/<string:shorturl>")
def forward(shorturl):
URL = us.GetURL(shorturl)
if URL is None:
return '<h1>The shorturl is not found</h1>'
else:
return '<html><script> window.location.replace("' + URL + '");</script>'
# Don't forward URL immediately, show URL to user
@app.route("/preview")
def preview():
shorturl = request.args.get('shorturl', default=None)
if shorturl is None:
return '<h1>Please give a shorturl</h1>'
URL = us.GetURL(shorturl)
if URL is None:
return '<h1>The shorturl is not found</h1>'
else:
return URL
# give a di
@app.route("/list")
def list():
return jsonify(us.ShortURL_dict)
if __name__ == "__main__":
print(' * URLShortener Demo start!')
us = URLShortener(ServiceHost='http://localhost:5000/', debug=True)
# us = URLShortener(ServiceHost='http://localhost:5000/')
# test data
us.GenShortURL('https://www.google.com')
us.GenShortURL('https://twitter.com')
us.GenShortURL('http://www.reddit.com')
us.GenShortURL('www.facebook.com')
us.GenShortURL('www.yahoo.com.tw/')
us.GenShortURL('testdata')
us.GenShortURL('https://github.com/tobypusheen')
us.GenShortURL('http://localhost:5000/create?url=https://www.google.com.tw/search?q=%E7%B8%AE%E7%B6%B2%E5%9D%80%E6%9C%8D%E5%8B%99&oq=%E7%B8%AE%E7%B6%B2%E5%9D%80%E6%9C%8D%E5%8B%99&aqs=chrome..69i57.200j0j4&sourceid=chrome&ie=UTF-8')
# shorturl is a hash like njwhgE6i, b1C4Vhse...etc,
# it is generated by URL
toby_linkedin = 'https://www.linkedin.com/in/toby-lin-b72025119/'
shorturl = us.GenShortURL(toby_linkedin)
us.DP(lineno(), 'Generate a shorturl ' + shorturl + ' by ' + toby_linkedin)
# URL is a real website address can be accessed,
# it may can be found by shorturl if it had generated
URL = us.GetURL(shorturl)
us.DP(lineno(), 'Find a URL ' + URL + ' by ' + shorturl)
print(' * Show test data ')
us.DP(lineno(), us.ShortURL_dict)
app.run(debug=True, host='0.0.0.0', port='5000')
'''
Todo
Documentation
TEST data
curl http://localhost:5000/create?url=http://www.google.com/
curl http://localhost:5000/create?url=http://www.yahoo.com.tw/
curl http://localhost:5000/create?url=https://www.ldoceonline.com/
curl http://localhost:5000/create?url=https://www.google.com.tw/search?q=%E7%B8%AE%E7%B6%B2%E5%9D%80%E6%9C%8D%E5%8B%99&oq=%E7%B8%AE%E7%B6%B2%E5%9D%80%E6%9C%8D%E5%8B%99&aqs=chrome..69i57.200j0j4&sourceid=chrome&ie=UTF-8
curl http://localhost:5000/list | jq
'''
|
import boto3
import hashlib
import sys
import argparse
import uuid
def parse_arguments():
parser = argparse.ArgumentParser(
description="calculate the streaming md5 sum for s3 objects"
)
parser.add_argument("-b", "--bucketName", required=True, help="s3 bucket")
parser.add_argument(
"-o", "--objectName", required=True, help="object key in s3 bucket"
)
args = parser.parse_args()
return args
s3 = boto3.resource("s3")
client = boto3.client("s3")
def md5ObjectSum(bucketName, objectName):
bucket = s3.Bucket(bucketName)
obj = bucket.Object(objectName)
size = obj.content_length
hash_md5 = hashlib.md5()
step = 5000000
front = 0
back = 0 + step
while front != (size + 1):
byteRange = "bytes=" + str(front) + "-" + str(back)
response = client.get_object(Bucket=bucketName, Key=objectName, Range=byteRange)
body = response["Body"]
length = response["ResponseMetadata"]["HTTPHeaders"]["content-length"]
chunk = body.read()
hash_md5.update(chunk)
front = back + 1
if (back + step) > size:
back = size
else:
back = back + step
guid = str(uuid.uuid4())
line = (
guid
+ "\t"
+ hash_md5.hexdigest()
+ "\t"
+ str(size)
+ "\t"
+ "PROJECT-ID"
+ "\t"
+ "s3://"
+ bucketName
+ "/"
+ objectName
)
fileName = "output/" + objectName + ".tsv"
print "putting the manifest file for object back into s3 without making file first"
bucket.put_object(Body=line.encode(), Key=fileName)
print line
return hash_md5.hexdigest()
if __name__ == "__main__":
args = parse_arguments()
md5 = md5ObjectSum(args.bucketName, args.objectName)
|
"""Module contenant les fonctions de l'interface graphique du projet monstercat_media_player"""
import tkinter as tk
import tkinter.ttk as ttk
from PIL import ImageTk, Image
class App(tk.Frame):
"""Classe principale de l'interface graphique"""
def __init__(self, master:tk.Tk, sorties, monstercat_media_player):
super().__init__(master)
self.configure(background="red")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
b=tk.Button(self.master, text="Quitter", command=self.destroy_window_object)
b.grid(row=0, column=0, sticky="nsew")
i=0
for sortie in sorties:
image=ImageTk.PhotoImage(Image.open(sortie[2]).resize((300,300)))
button_sortie=tk.Button(self.master, borderwidth=0, image=image, height=300, width=300, command=lambda sortie=sortie:monstercat_media_player.jouer(sortie[1]))
button_sortie.grid(row=i//3, column=i%3, sticky="wn")
i+=1
self.grid(row=0, column=0, sticky="nsew")
def destroy_window_object(self):
"""Destruction de l'interface graphique"""
self.destroy()
super().destroy()
class Chargement(tk.Tk):
"""Fenêtre de chargement au démarrage"""
def __init__(self, message:str, geometry="300x80", title="Chargement",callback=None):
super().__init__()
self.title(title)
self.geometry(geometry)
self.resizable(False, False)
self.label = tk.Label(self, text=message)
self.label.pack(side="top", pady=10)
self.progressbar=ttk.Progressbar(self, orient="horizontal", length=260, mode="determinate")
self.progressbar.pack(side="top", padx=20)
self.after(100, lambda: callback(self))
def add_progress(self, value:int):
"""Ajoute une valeur à la progressbar"""
self.progressbar.step(value)
def set_progress(self, value:int):
"""Met à jour la progressbar"""
self.progressbar.config(value=value)
def start_progress(self, interval:int):
"""Démarre la progressbar"""
self.progressbar.start(interval)
def start(self):
"""Démarre la fenêtre de chargement"""
self.mainloop()
def destroy_window_object(self):
"""Destruction de la fenêtre de chargement"""
self.destroy()
super().destroy()
|
"""
Example showing a 3D scene with a 2D overlay.
The idea is to render both scenes, but clear the depth before rendering
the overlay, so that it's always on top.
"""
import numpy as np
from wgpu.gui.auto import WgpuCanvas, run
import pygfx as gfx
# Create a canvas and renderer
canvas = WgpuCanvas(size=(500, 300))
renderer = gfx.renderers.WgpuRenderer(canvas)
# Compose a 3D scene
scene1 = gfx.Scene()
geometry1 = gfx.box_geometry(200, 200, 200)
material1 = gfx.MeshPhongMaterial(color=(1, 1, 0, 1.0))
cube1 = gfx.Mesh(geometry1, material1)
scene1.add(cube1)
camera1 = gfx.OrthographicCamera(300, 300)
# Compose another scene, a 2D overlay
scene2 = gfx.Scene()
positions = np.array(
[
[-1, -1, 0.5],
[-1, +1, 0.5],
[+1, +1, 0.5],
[+1, -1, 0.5],
[-1, -1, 0.5],
[+1, +1, 0.5],
],
np.float32,
)
geometry2 = gfx.Geometry(positions=positions * 0.9)
material2 = gfx.LineMaterial(thickness=5.0, color=(0.8, 0.0, 0.2, 1.0))
line2 = gfx.Line(geometry2, material2)
scene2.add(line2)
camera2 = gfx.NDCCamera()
def animate():
rot = gfx.linalg.Quaternion().set_from_euler(gfx.linalg.Euler(0.005, 0.01))
cube1.rotation.multiply(rot)
renderer.render(scene1, camera1, flush=False)
renderer.render(scene2, camera2)
canvas.request_draw()
if __name__ == "__main__":
canvas.request_draw(animate)
run()
|
from setuptools import setup
with open('requirements.txt', 'r') as f:
requirements = [line.strip() for line in f]
discription = "qwgc is a quantum walk graph classifier \
for classification for Graph data."
setup(
name="qwgc",
version="0.0.3",
description="Graph classifier based on quantum walk",
long_description=discription,
url="https://Chibikuri.github.io/qwgc",
author="Ryosuke Satoh",
author_email="ryosuke.satoh.wk@gmail.com",
license="Apache 2.0",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="quantum walk machine learning",
install_requires=requirements,
include_package_data=True,
python_requires=">=3.5",
)
|
# fib(n) = fib(n - 1) + fib(n - 2)
# 0 1 1 2 3 5 8 13 21...
def fibonacci(n):
if n == 0:
return n
last = 0
next = 1
for _ in range(1, n):
last, next = next, last + next
return next
if __name__ == "__main__":
for i in range(0, 10):
print("fibonacci: ", fibonacci(i))
|
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.stella import dashboard
class ConfigPanel(horizon.Panel):
name = _("Configuration")
slug = "SLAConfigPanel"
dashboard.StellaDashboard.register(ConfigPanel)
|
# -*- coding: utf-8 -*-
##--------------------------------------#
## Kvasir Scheduler functions
##
## (c) 2010-2014 Cisco Systems, Inc.
## (c) 2015 Kurt Grutzmacher
##
## Scheduler functions for long running processes
##
## Author: Kurt Grutzmacher <grutz@jingojango.net>
##--------------------------------------#
import os
import socket
from skaldship.hosts import get_host_record
from gluon.scheduler import Scheduler
import logging
logger = logging.getLogger("web2py.app.kvasir")
##----------------------------------------------------------------------------
def launch_terminal(record=None, launch_cmd=None):
"""
Opens a terminal on the Web Server. This only works if the
web2py server is running on the user's workstation.
The command to execute is stored in the user's settings db
under auth_user.f_launch_cmd. Variables translated:
_IP_ -- The current IP Address (v4 by default, v6 if exists)
_LOGFILE_ -- Session logfile name (we prepend the path)
If an IPv6 address is used then ':' is changed to '_'
Example:
xterm -sb -sl 1500 -vb -T 'manual hacking: _IP_' -n 'manual hacking: _IP_' -e script _LOGFILE_
"""
record = get_host_record(record)
# only execute launch on requests from localhost!
if request.env['remote_addr'] != '127.0.0.1':
logger.error("Can only launch from localhost! remote_addr = %s" % (request.env['remote_addr']))
return "Can only launch from localhost"
if record is None:
return "No record found"
import string, os, subprocess
import time
from gluon.validators import IS_IPADDRESS
# if no launch command use the default
if not launch_cmd:
launch_cmd = "xterm -sb -sl 1500 -vb -T 'manual hacking: _IP_' -n 'manual hacking: _IP_' -e 'script _LOGFILE_'"
# check ip address
ip = record.f_ipaddr
logip = ip
if IS_IPADDRESS(is_ipv6=True)(ip)[0] == None:
logip = ip.replace(":", "_")
logdir = "session-logs"
logfilename = "%s-%s.log" % (logip, time.strftime("%Y%m%d%H%M%S", time.localtime(time.time())))
logfile = os.path.join(logdir, logfilename)
launch_cmd = launch_cmd.replace("_IP_", ip)
launch_cmd = launch_cmd.replace("_LOGFILE_", logfile)
from skaldship.general import check_datadir
# Check to see if data directories exist, create otherwise
check_datadir(request.folder)
datadir = os.path.join(os.getcwd(), request.folder, "data")
# chdir to datadir!
launch_cmd = launch_cmd.replace("_DATADIR_", datadir)
os.chdir(datadir)
# set environment variables
os.environ['IP'] = ip
os.environ['HOSTNAME'] = record.f_hostname or ""
os.environ['DATADIR'] = datadir
try:
logger.info("Spawning: %s\n" % (launch_cmd))
print("Spawning: %s" % (launch_cmd))
subprocess.Popen(launch_cmd, shell=True)#, stdout=None, stdin=None, stderr=None)
except Exception, e:
logger.error("Error spawning launch cmd (%s): %s\n" % (launch_cmd, e))
print("Error spawning launch cmd (%s): %s\n" % (launch_cmd, e))
return False
##----------------------------------------------------------------------------
def run_scanner(
scanner=None,
asset_group=None,
engineer=None,
target_list=None,
blacklist=None,
scan_options=None,
addnoports=False,
update_hosts=False,
**kwargs
):
'''
Schedule handler to process nmap scan
'''
from skaldship.log import log
if not isinstance(scanner, str):
return False
scanner = scanner.upper()
logger.info(" [*] Processing Nmap scan ")
if scanner == 'NMAP':
from skaldship.nmap import run_scan
nmap_xml_file = run_scan(
blacklist=blacklist,
target_list=target_list,
scan_options=scan_options,
)
if nmap_xml_file:
from skaldship.nmap import process_xml
log("Processing nmap xml file: %s" % (nmap_xml_file))
process_xml(
filename=nmap_xml_file,
addnoports=addnoports,
asset_group=asset_group,
engineer=engineer,
msf_settings={},
ip_ignore_list=None,
ip_include_list=None,
update_hosts=update_hosts,
)
##----------------------------------------------------------------------------
def canvas_exploit_xml(filename=None):
"""
Process ImmunitySec CANVAS Exploits.xml file into the database
"""
from skaldship.canvas import process_exploits
from skaldship.exploits import connect_exploits
process_exploits(filename)
connect_exploits()
return True
##----------------------------------------------------------------------------
def nexpose_exploit_xml(filename=None):
"""
Process Nexpose exploits.xml file into the database
"""
from skaldship.nexpose import process_exploits
from skaldship.exploits import connect_exploits
process_exploits(filename)
connect_exploits()
return True
##----------------------------------------------------------------------------
def scanner_import(
scanner=None,
filename=None,
addnoports=False,
asset_group=None,
engineer=None,
msf_settings={},
ip_ignore_list=None,
ip_include_list=None,
update_hosts=False,
**kwargs
):
"""
Imports a Scanner XML file to Kvasir
"""
if not isinstance(scanner, str):
return False
scanner = scanner.upper()
if scanner == 'NMAP':
from skaldship.nmap import process_xml
logger.info("Processing nmap file: %s" % (filename))
process_xml(
filename=filename,
addnoports=addnoports,
asset_group=asset_group,
engineer=engineer,
msf_settings=msf_settings,
ip_ignore_list=ip_ignore_list,
ip_include_list=ip_include_list,
update_hosts=update_hosts,
)
elif scanner == 'NEXPOSE':
from skaldship.nexpose import process_xml
logger.info("Processing Nexpose file: %s" % (filename))
process_xml(
filename=filename,
asset_group=asset_group,
engineer=engineer,
msf_settings=msf_settings,
ip_ignore_list=ip_ignore_list,
ip_include_list=ip_include_list,
update_hosts=update_hosts,
)
elif scanner == 'NESSUS':
from skaldship.nessus.processor import process_scanfile
logger.info("Processing Nessus file: %s" % (filename))
process_scanfile(
filename=filename,
asset_group=asset_group,
engineer=engineer,
msf_settings=msf_settings,
ip_ignore_list=ip_ignore_list,
ip_include_list=ip_include_list,
update_hosts=update_hosts,
)
elif scanner == 'METASPLOIT':
from skaldship.metasploit.pro import process_report_xml
logger.info("Processing Metasploit Pro file: %s" % filename)
process_report_xml(
filename=filename,
asset_group=asset_group,
engineer=engineer,
ip_ignore_list=ip_ignore_list,
ip_include_list=ip_include_list,
update_hosts=update_hosts,
)
elif scanner == 'SHODANHQ':
from skaldship.shodanhq import process_report
logger.info("Processing ShodanHQ file: %s" % (filename))
process_report(
filename=filename,
host_list=kwargs.get('hosts') or [],
query=kwargs.get('query') or None,
asset_group=asset_group,
engineer=engineer,
ip_ignore_list=ip_ignore_list,
ip_include_list=ip_include_list,
#update_hosts=update_hosts,
)
return True
##----------------------------------------------------------------------------
def do_host_status(records=[], query=None, asset_group=None, hosts=[]):
"""
Runs through the t_hosts table and updates the *_count entries.
Can also run through a specific list of record IDs instead.
"""
from skaldship.hosts import do_host_status
do_host_status(records=records, query=query, asset_group=asset_group, hosts=hosts)
return True
##------------------------------------------------------------------------
def accounts_import_file(filename=None, service=['info', '0'], f_type=None, f_source=None):
"""
Processes an Imported password file to the accounts table
"""
print("Processing password file: %s" % (filename))
from skaldship.passwords.utils import process_password_file, insert_or_update_acct
account_data = process_password_file(pw_file=filename, file_type=f_type, source=f_source)
resp_text = insert_or_update_acct(service, account_data)
print(resp_text)
return True
##------------------------------------------------------------------------
def cpe_import_xml(filename=None, download=False, wipe=False):
"""
Process the CPE data through an uploaded file or have it download directly
from the MITRE webserver
"""
from skaldship.cpe import process_xml
process_xml(filename, download, wipe)
return True
##------------------------------------------------------------------------
def run_valkyrie(valkyrie_type=None, services=None):
"""
Run a valkyrie
"""
if valkyrie_type == 'webshot':
from skaldship.valkyries.webimaging import do_screenshot
do_screenshot(services)
elif valkyrie_type == 'vncshot':
from skaldship.valkyries.vncscreenshot import do_screenshot
do_screenshot(services)
return True
##-------------------------------------------------------
def import_all_nexpose_vulndata(overwrite=False, nexpose_server={}):
"""
Import all vulnerability data from Nexpose
"""
from skaldship.nexpose import import_all_vulndata
import_all_vulndata(overwrite=overwrite, nexpose_server=nexpose_server)
return True
##-------------------------------------------------------
def connect_exploits():
"""
Process Nexpose exploits.xml file into the database
"""
from skaldship.exploits import connect_exploits
connect_exploits()
return True
##----------------------------------------------------------------------------
scheduler = Scheduler(
db=db,
migrate=settings.migrate,
group_names=[settings.scheduler_group_name],
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('warehouse', '0011_auto_20141223_0744'),
]
operations = [
migrations.AlterModelOptions(
name='importjob',
options={'ordering': ('-created_at',)},
),
migrations.RemoveField(
model_name='session',
name='status',
),
migrations.AddField(
model_name='importjob',
name='status',
field=models.CharField(max_length=255, null=True, choices=[(b'started', b'Started'), (b'completed', b'Completed'), (b'failed', b'Failed')]),
preserve_default=True,
),
]
|
from ..utils import validate_response
class DocumentRevealing():
"""Manage revealing related profile calls."""
def __init__(self, api):
"""Init."""
self.client = api
def post(self, text):
"""
Retrieve revealing.
Args:
text: <string>
text
Returns
Revealing
"""
payload = {
"text": text
}
response = self.client.post('document/revealing', json=payload)
return validate_response(response)
|
# Modulo polls.py v1
# ///---- Imports ----///
import re
import os
import logging
from discord import Embed
from discord.ext.commands import Cog, group
# from discord.ext.commands import has_permissions, MissingPermissions
# Database
from libs.database import Database as DB
# ///---- Log ----///
log = logging.getLogger(__name__)
# ///---- Clase ----///
class Polls(Cog):
'''
Creación de polls
'''
def __init__(self, bot):
'''
__init__ del bot (importa este codigo como modulo al bot)
'''
secret = os.getenv("FAUNADB_SECRET_KEY")
self.bot = bot
self.db = DB(secret)
@staticmethod
def colour():
# return Colour.from_rgb(0, 235, 188).value
return 0x00ebbc
#! poll
#! Comando poll
@group()
async def poll(self, ctx):
'''
Comando poll
'''
PREFIX = os.getenv("DISCORD_PREFIX")
if ctx.invoked_subcommand is None:
await ctx.send(f"Este comando no existe! Tipea `{PREFIX}poll help` para ver los comandos disponibles :D")
#! Subcomando help
@poll.command()
async def help(self, ctx):
'''
Descripción: Ayuda de Encuestas
Precondicion: Escribir en un canal `poll help`
Poscondición: El bot escribe lista de comandos con descripción
'''
PREFIX = os.getenv("DISCORD_PREFIX")
lines = f"""
```md
### COMANDO {PREFIX}poll ###
- {PREFIX}poll help: Muestra la ayuda.
- {PREFIX}poll add: Agregar encuesta.
- {PREFIX}poll close: Finalizar encuesta.
Ejemplos:
{PREFIX}poll add "Pregunta" -> (Encuesta simple, Sí o No)
{PREFIX}poll add "¿Te gusta la comunidad de FrontendCafé?"
{PREFIX}poll add "Pregunta" "Opción 1" "Opción 2" "Opción 3" (Encuesta personalizada, máximo 10 respuestas)
{PREFIX}poll add "¿Participas de alguno de los grupos de estudio, cuál?" "Python-Study-Group" "JS-Study-Group" "PHP-Study-Group" "Algorithms-Group"
{PREFIX}poll close ID
{PREFIX}poll close 123456789654687651233
```
"""
await ctx.send(lines)
#! Subcomando add
@poll.command()
# @has_permissions(manage_messages=False)
async def add(self, ctx, question, *args):
'''
Agregar poll
'''
def db_create(self, id, poll_type, author, avatar, question, votes):
self.db.create('Polls', {
"id": id,
"type": poll_type,
"author": str(author),
"avatar_url": str(avatar),
"question": question,
"is_active": True,
"users_voted": [],
"votes_count": votes
})
PREFIX = os.getenv("DISCORD_PREFIX")
try:
await ctx.message.delete()
# Embed message
pollEmbed = Embed(
title=(f":clipboard: {question}"), color=self.colour())
pollEmbed.set_thumbnail(
url="https://res.cloudinary.com/sebasec/image/upload/v1614807768/Fec_with_Shadow_jq8ll8.png")
pollEmbed.set_author(name="Encuesta")
pollEmbed.set_footer(
text=ctx.author, icon_url=ctx.author.avatar_url)
# Verifies if no answers were provided, and creates a yes/no poll
if not args:
pollEmbed.add_field(
name="\u200b", value="**Opciones (voto único):**\n✅ Sí: 0 \n❎ No: 0", inline=False)
msg = await ctx.channel.send(embed=pollEmbed)
votes_count = {
'Si': 0,
'No': 0
}
# Add poll to database
db_create(self, msg.id, "normal", ctx.author,
ctx.author.avatar_url, question, votes_count)
# Add BOT reactions
emojis = ['✅', '❎']
for emoji in emojis:
await msg.add_reaction(emoji)
# Verifies if the amount of answers provided are more than 1 and equal or less than 10
elif (len(args) > 1) and (len(args) <= 10):
emoji_number_list = ["1️⃣", "2️⃣", "3️⃣", "4️⃣",
"5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣", "🔟"]
try:
# Format and add answers to embed
poll_text = ""
for idx, answer in enumerate(args):
poll_text += (
f"\n{emoji_number_list[idx]} {answer}: 0")
pollEmbed.add_field(
name="**Opciones (voto único):**", value=poll_text, inline=False)
# Dict of answers for DB
votes_count = {}
for answer in args:
votes_count[answer] = 0
# Send poll message to discord
msg = await ctx.channel.send(embed=pollEmbed)
# Add poll to database
db_create(self, msg.id, "custom", ctx.author,
ctx.author.avatar_url, question, votes_count)
# Add BOT reactions
for i in range(len(args)):
await msg.add_reaction(emoji_number_list[i])
except Exception as e:
print(e)
else:
await ctx.channel.send("❌Cantidad de respuestas no válidas (mínimo 2 respuestas | máximo 10 respuestas)", delete_after=15)
user = self.bot.get_user(ctx.author.id)
await user.send(f"Para cerrar la votación de la encuesta '{question}' usar el siguiente comando: \n``` {PREFIX}poll close {msg.id} ```")
except Exception as e:
print(e)
# @add.error
# async def add_error(self, ctx, error):
# if isinstance(error, MissingPermissions):
# await ctx.message.delete()
# await ctx.channel.send("No tienes permiso para crear encuestas :slight_frown:", delete_after=15)
@poll.command()
# @has_permissions(manage_messages=False)
async def close(self, ctx, poll_id):
emoji_number_list = ["1️⃣", "2️⃣", "3️⃣", "4️⃣",
"5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣", "🔟"]
try:
poll = self.db.get_poll_by_discord_id(int(poll_id))
votes = poll['data']['votes_count']
self.db.update_with_ref(
poll['ref'],
{
"is_active": False,
}
)
# Send finish message
pollEmbed = Embed(
title=f":clipboard: {poll['data']['question']}", color=self.colour())
pollEmbed.set_thumbnail(
url="https://res.cloudinary.com/sebasec/image/upload/v1614807768/Fec_with_Shadow_jq8ll8.png")
pollEmbed.set_author(name="Encuesta Finalizada")
pollEmbed.set_footer(
text=poll['data']['author'], icon_url=poll['data']['avatar_url'])
poll_text = ""
# Sort votes by greater to lower
for idx, (k, v) in enumerate(sorted(votes.items(), key=lambda vote: vote[1], reverse=True)):
poll_text += (
f"\n{emoji_number_list[idx]} {k}: {v}")
pollEmbed.add_field(
name="\u200b", value=f"**Votos:**{poll_text}", inline=False)
await ctx.message.delete()
await ctx.channel.send(embed=pollEmbed)
except Exception as e:
print(e)
# @close.error
# async def close_error(self, ctx, error):
# if isinstance(error, MissingPermissions):
# await ctx.message.delete()
# await ctx.channel.send("No tienes permiso para cerrar encuestas :slight_frown:", delete_after=15)
# On poll reaction:
@Cog.listener()
async def on_raw_reaction_add(self, payload):
# Obtain reacted message by id
channel = self.bot.get_channel(payload.channel_id)
msg = await channel.fetch_message(payload.message_id)
# Check if reacted message was sended by the bot
colour = msg.embeds[0].colour if len(msg.embeds) == 1 else Embed.Empty
if colour != Embed.Empty:
colour = colour.value
if msg.author == self.bot.user and colour == self.colour():
# Check if the reaction was added by the bot
if (payload.user_id != self.bot.user.id):
# Search poll in DB
try:
poll = self.db.get_poll_by_discord_id(payload.message_id)
ref = poll['ref']
is_active = poll['data']['is_active']
p_type = poll['data']['type']
users = poll['data']['users_voted']
votes = poll['data']['votes_count']
# Check if poll is "active"
if is_active:
# Check if user has voted
if (payload.user_id not in users):
users.append(payload.user_id)
# Check if poll type is "normal" (yes/no poll)
if (p_type == "normal"):
if(payload.emoji.name == '✅'):
votes['Si'] += 1
elif(payload.emoji.name == '❎'):
votes['No'] += 1
# Update users and votes to DB
try:
self.db.update_with_ref(
ref,
{
"users_voted": users,
"votes_count": votes
}
)
except Exception as e:
print(e)
# Edit message
pollEmbed = Embed(
title=f":clipboard: {poll['data']['question']}", color=self.colour())
pollEmbed.set_thumbnail(
url="https://res.cloudinary.com/sebasec/image/upload/v1614807768/Fec_with_Shadow_jq8ll8.png")
pollEmbed.set_author(name="Encuesta")
pollEmbed.set_footer(
text=poll['data']['author'], icon_url=poll['data']['avatar_url'])
pollEmbed.add_field(
name="\u200b", value=f"**Opciones (voto único):**\n:white_check_mark: Sí: {votes['Si']} \n:negative_squared_cross_mark: No: {votes['No']}", inline=False)
await msg.edit(embed=pollEmbed)
# Check if poll type is "custom" (personalized answers poll)
elif (p_type == "custom"):
emoji_number_list = ["1️⃣", "2️⃣", "3️⃣", "4️⃣",
"5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣", "🔟"]
for idx, emoji in enumerate(emoji_number_list):
if (emoji == payload.emoji.name):
listVotes = list(votes)
votes[listVotes[idx]] += 1
# Update users and votes to DB
try:
self.db.update_with_ref(
ref,
{
"users_voted": users,
"votes_count": votes
}
)
pollEmbed = Embed(
title=f":clipboard: {poll['data']['question']}", color=self.colour())
pollEmbed.set_thumbnail(
url="https://res.cloudinary.com/sebasec/image/upload/v1614807768/Fec_with_Shadow_jq8ll8.png")
pollEmbed.set_author(
name="Encuesta")
pollEmbed.set_footer(
text=poll['data']['author'], icon_url=poll['data']['avatar_url'])
poll_text = ""
for idx, answer in enumerate(votes):
poll_text += (
f"\n{emoji_number_list[idx]} {answer}: {votes[listVotes[idx]]}")
pollEmbed.add_field(
name="**Opciones (voto único):**", value=poll_text, inline=False)
await msg.edit(embed=pollEmbed)
except Exception as e:
print(e)
else:
# Send DM if the user has voted
user = self.bot.get_user(payload.user_id)
await user.send(f"Ya has votado en la encuesta '{poll['data']['question']}'")
else:
# Send DM if the poll has finished
user = self.bot.get_user(payload.user_id)
await user.send(f"La votación de la encuesta '{poll['data']['question']}' ha finalizado")
except Exception as e:
print(e)
|
"""Test for the memoryone strategies."""
import random
import axelrod
from test_player import TestPlayer
class TestWinStayLostShift(TestPlayer):
name = "Win-Stay Lose-Shift"
player = axelrod.WinStayLoseShift
def test_strategy(self):
"""Starts by cooperating"""
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
def test_effect_of_strategy(self):
"""Check that switches if does not get best payoff."""
P1 = self.player()
P2 = axelrod.Player()
P1.history = ['C']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'C')
class TestGTFT(TestPlayer):
name = "Generous Tit-For-Tat"
player = axelrod.GTFT
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticCooperator(TestPlayer):
name = "Stochastic Cooperator"
player = axelrod.StochasticCooperator
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(15)
# With probability .065 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(1)
# With probability .229 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(3)
# With probability .266 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(13)
# With probability .42 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticWSLS(TestPlayer):
name = "Stochastic WSLS"
player = axelrod.StochasticWSLS
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
class TestZDChi(TestPlayer):
name = "ZDChi"
player = axelrod.ZDChi
stochastic = True
def test_four_vector(self):
P1 = self.player()
expected_dictionary = {('C', 'D'): 0.5, ('D', 'C'): 0.75, ('D', 'D'): 0.0, ('C', 'C'): 1.1666666666666667}
for key in sorted(expected_dictionary.keys()):
self.assertAlmostEqual(P1._four_vector[key],
expected_dictionary[key])
def test_strategy(self):
# Testing the expected value is difficult here so these just ensure that
# future changes that break these tests will be examined carefully.
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
|
#!/usr/bin/env python3
# coding: UTF-8
import time
import sys
import csv
from PIL import Image, ImageDraw,ImageFont
from rgbmatrix import RGBMatrix, RGBMatrixOptions
# 使用するLEDのパラメーター(この辺はgithubのサンプルのコピペです)
options = RGBMatrixOptions()
options.rows = 32
options.chain_length = 4
options.brightness = 80
options.parallel = 1
options.brightness = 100
options.gpio_slowdown = 2
options.hardware_mapping = 'regular' # If you have an Adafruit HAT: 'adafruit-hat'
options.gpio_slowdown = 2
matrix = RGBMatrix(options = options)
images = []
for num in range (1,26,1):
file_name = 'nex/pics/nex_' + str(num) + '.png'
img = Image.open(file_name)
images.append(img)
anime = []
for flame in range (0,49):
ims_base = Image.new("RGB",(128,32),(0,0,0))
ims_base.paste(images[0])
anime.append(ims_base)
for flame in range (50,79):
ims_base = Image.new("RGB",(128,32),(0,0,0))
ims_base.paste(images[1])
anime.append(ims_base)
for flame in range (80,89):
ims_base = Image.new("RGB",(128,32),(0,0,0))
anime.append(ims_base)
for komasu in range (4,16):
ims_base = Image.new("RGB",(128,32),(0,0,0))
ims_base.paste(images[komasu])
anime.append(ims_base)
for flame in range (93,95):
ims_base = Image.new("RGB",(128,32),(0,0,0))
ims_base.paste(images[17])
anime.append(ims_base)
for komasu in range (18,25):
ims_base = Image.new("RGB",(128,32),(0,0,0))
ims_base.paste(images[komasu])
anime.append(ims_base)
for flame in range(10):
ims_base = Image.new("RGB",(128,32),(0,0,0))
anime.append(ims_base)
while True:
for im_scroll in anime:
matrix.SetImage(im_scroll)
time.sleep(0.1)
|
print("简易记账本(三月)")
March=[]
for date in range(31):
March.append([])
while True:
day=int(input("请问输入几号的开销?结束请输入0:"))
if day==0:
break
else:
print("请输入每一笔开销,结束请输入0:")
n=1
while True:
each = float(input("第"+str(n)+"笔:"))
if each == 0:
break
else:
March[day-1].append(each)
n = n + 1
print("记录成功")
total=0
for each_day in March:
if each_day:
total = total + sum(each_day)
print("本月支出汇总报告")
print("总支出:" + str(total))
|
#!/usr/bin/python3
import shamir_server
import client_handler
import settings
#If the node is listed as an auth node then start the auth server
#Otherwise start the client node program
if settings.ID == 'auth':
shamir_server.run()
else:
client_handler.run()
|
#from .pomdp.basics.state import State
#from .pomdp.basics.observation import Observation
|
#!/usr/bin/env python
# encoding: utf-8
"""
Advent of Code 2019 - Day 14 - Challenge 2
https://adventofcode.com/2019/day/14
Solution: 2267486
PEP 8 compliant
"""
__author__ = "Filippo Corradino"
__email__ = "filippo.corradino@gmail.com"
from day14_1 import main as cost
def cost_function(n_target):
return cost(n_target=n_target, printout=False)
def main(n_source=1000000000000):
n_target = 1
# The function n_source = f(n_target) is surely concave
# This is because for higher n_target, we can optimize some reactions
# We'll apply the secant method to reach an approximate solution
while cost_function(n_target=n_target) < n_source:
est_target_to_source = cost_function(n_target=n_target) // n_target
n_target = n_source // est_target_to_source
# Refine the solution
while cost_function(n_target=n_target) > n_source:
n_target = n_target - 1
print("\nNumber of FUEL achievable with {0} ORE: {1}\n"
.format(n_source, n_target))
return n_target
if __name__ == "__main__":
main()
|
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kernel.base.params.base_param import BaseParam
class FeatureStandardizedParam(BaseParam):
"""
method: Standardized method,
min-max: (x - x_min)/(x_max - x_min)
z-score: (x - x_mean)/std
"""
def __init__(self, method='z-score', save_dataset=False, with_label=False, fields=None):
self.save_dataset = save_dataset
self.with_label = with_label
self.method = method
self.fields = fields
def check(self):
pass
|
"""The signal processor for Review Board search."""
from __future__ import unicode_literals
import threading
from functools import partial
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_delete, post_save, m2m_changed
from django.utils import six
from djblets.siteconfig.models import SiteConfiguration
from haystack.signals import BaseSignalProcessor
from reviewboard.accounts.models import Profile
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.signals import review_request_published
from reviewboard.search import search_backend_registry
class SignalProcessor(BaseSignalProcessor):
""""Listens for signals and updates the search index.
This will listen for any signals that would affect the search index, and
invokes a suitable Haystack callback to immediately update the data stored
in the index.
This only updates the search index if:
1) Search is enabled.
2) The current search engine backend supports on-the-fly indexing.
"""
save_signals = [
(ReviewRequest, review_request_published, 'review_request'),
(User, post_save, 'instance'),
(Profile, post_save, 'instance'),
]
delete_signals = [
(ReviewRequest, post_delete),
(User, post_delete),
]
def __init__(self, *args, **kwargs):
"""Initialize the signal processor.
Args:
*args (tuple):
Positional arguments to pass to the parent constructor.
**kwargs (dict):
Keyword arguments to pass to the parent constructor.
"""
self.is_setup = False
self._can_process_signals = False
self._handlers = {}
self._pending_user_changes = threading.local()
super(SignalProcessor, self).__init__(*args, **kwargs)
@property
def can_process_signals(self):
"""Whether the signal processor can currently process signals."""
if not self._can_process_signals:
try:
SiteConfiguration.objects.get_current()
self._can_process_signals = True
except ObjectDoesNotExist:
pass
return self._can_process_signals
def setup(self):
"""Register the signal handlers for this processor."""
# We define this here instead of at the class level because we cannot
# reference class members during the class' definition.
m2m_changed_signals = [
(Group.users.through, self._handle_group_m2m_changed),
]
if not self.is_setup:
for cls, signal, instance_kwarg in self.save_signals:
handler = partial(self.check_handle_save,
instance_kwarg=instance_kwarg)
self._handlers[(cls, signal)] = handler
for cls, signal in self.delete_signals:
self._handlers[(cls, signal)] = self.check_handle_delete
for cls, handler in m2m_changed_signals:
self._handlers[(cls, m2m_changed)] = handler
for (cls, signal), handler in six.iteritems(self._handlers):
signal.connect(handler, sender=cls)
self.is_setup = True
def teardown(self):
"""Unregister all signal handlers for this processor."""
if self.is_setup:
for (cls, signal), handler in six.iteritems(self._handlers):
signal.disconnect(handler, sender=cls)
self.is_setup = False
def check_handle_save(self, instance_kwarg, **kwargs):
"""Conditionally update the search index when an object is updated.
Args:
instance_kwarg (unicode):
The name of the instance parameter.
**kwargs (dict):
Signal arguments. These will be passed to
:py:meth:`handle_save`.
"""
if not self.can_process_signals:
return
instance = kwargs.pop(instance_kwarg)
backend = search_backend_registry.current_backend
if backend and search_backend_registry.on_the_fly_indexing_enabled:
if isinstance(instance, Profile):
# When we save a Profile, we want to update the User index.
kwargs['sender'] = User
instance = instance.user
self.handle_save(instance=instance, **kwargs)
def check_handle_delete(self, **kwargs):
"""Conditionally update the search index when an object is deleted.
Args:
**kwargs (dict):
Signal arguments. These will be passed to
:py:meth:`handle_delete`.
"""
if not self.can_process_signals:
return
backend = search_backend_registry.current_backend
if backend and search_backend_registry.on_the_fly_indexing_enabled:
self.handle_delete(**kwargs)
def _handle_group_m2m_changed(self, instance, action, pk_set, reverse,
**kwargs):
"""Handle a Group.users relation changing.
When the :py:attr:`Group.users
<reviewboard.reviews.models.group.Group.users>` field changes, we don't
get a corresponding :py:data:`~django.db.signals.post_save` signal
(because the related model wasn't saved). Instead, we will get multiple
:py:data:`~django.db.signals.m2m_changed` signals that indicate how the
relation is changing. This method will handle those signals and
call the correct save method so that they can be re-indexed.
Args:
instance (django.contrib.auth.models.User or reviewboward.reviews.models.group.Group):
The model that updated.
action (unicode):
The update action. This will be one of:
* ``'pre_add'``
* ``'post_add'``
* ``'pre_remove'``
* ``'post_remove'``
* ``'pre_clear'``
* ``'post_clear'``
pk_set (set of int):
The primary keys of the related objects that changed.
When the action is ``'pre_clear'`` or ``'post_clear'``,
this argument will be an empty set.
reverse (bool):
Whether or not the reverse relation was modified. If
true, this indicated that ``instance`` is a
:py:class:`~django.contrib.auth.models.User` object and
``pk_set`` is the set of primary keys of the added or removed
groups.
When this argument is false, ``instance`` is a
:py:class:`~reviewboard.reviews.models.group.Group`
object and ``pk_set`` is the set of primary keys of the added
or removed users.
**kwargs (dict):
Additional keyword arguments.
"""
backend = search_backend_registry.current_backend
if not (backend and
search_backend_registry.on_the_fly_indexing_enabled):
return
if not hasattr(self._pending_user_changes, 'data'):
self._pending_user_changes.data = {}
if action in ('post_add', 'post_remove'):
if reverse:
# When using the reverse relation, the instance is the User and
# the pk_set is the PKs of the groups being added or removed.
users = [instance]
else:
# Otherwise the instance is the Group and the pk_set is the set
# of User primary keys.
users = User.objects.filter(pk__in=pk_set)
for user in users:
self.handle_save(instance=user, instance_kwarg='instance',
sender=User)
elif action == 'pre_clear':
# When ``reverse`` is ``True``, a User is having their groups
# cleared so we don't need to worry about storing any state in the
# pre_clear phase.
#
# Otherwise, a ReviewGroup is having their users cleared. In both
# the pre_clear and post_clear phases, the ``pk_set`` argument will
# be empty, so we cache the PKs of the current members of the
# groups so we know to reindex them.
if not reverse:
self._pending_user_changes.data[instance.pk] = list(
instance.users.values_list('pk', flat=True))
elif action == 'post_clear':
if reverse:
# When ``reverse`` is ``True``, we just have to reindex a
# single user.
self.handle_save(instance=instance, instance_kwarg='instance',
sender=User)
else:
# Here, we are reindexing every user that got removed from the
# group via clearing.
pks = self._pending_user_changes.data.pop(instance.pk)
for user in User.objects.filter(pk__in=pks):
self.handle_save(instance=user, instance_kwarg='instance',
sender=User)
|
import os
import shutil
from subprocess import call, Popen, PIPE
import logging
from lib.typecheck import *
from .. import util
from ..sample import Sample
from . import is_event
cur_dir = os.path.dirname(__file__)
root_dir = os.path.join(cur_dir, "..", "..")
lib_dir = os.path.join(root_dir, "lib")
agent_jar = os.path.join(lib_dir, "loggeragent.jar")
smpl_dir = os.path.join(root_dir, "sample")
sim = "Simulator"
@takes(str, str, str)
@returns(nothing)
def gen_aux(cmd, demo, java_dir):
# extract events from the demo's samples
smpl_path = os.path.join(smpl_dir, cmd, demo)
smpl_files = util.get_files_from_path(smpl_path, "txt")
smpls = []
for fname in smpl_files:
smpl = Sample(fname, is_event)
smpls.append(smpl)
def evt_to_str(evt): return "\"{}\"".format(evt)
def smpl_to_arr(smpl):
return '{' + ", ".join(map(evt_to_str, smpl.evts)) + '}'
evtss = [ smpl_to_arr(smpl) for smpl in smpls ]
# generate Simulator.java
sim_java = sim+".java"
sim_path = os.path.join(cur_dir, sim_java)
with open(sim_path, 'r') as f1:
scenarios = """
{{
{}
}}""".format(",\n".join(evtss))
raw_body = f1.read()
sim_body = raw_body.format(**locals())
sim_path_tgt = os.path.join(java_dir, sim_java)
with open(sim_path_tgt, 'w') as f2:
f2.write(sim_body)
logging.info("generating " + sim_java)
# generate adapted demo file
adp_name = "Adapted{demo}".format(**locals())
# TODO: read the demo;
# TODO: identify internal elements and make them public (or add getters)
# generate EventHandler.java
evt_hdl_java = "EventHandler"+".java"
evt_hdl_path = os.path.join(cur_dir, evt_hdl_java)
with open(evt_hdl_path, 'r') as f1:
# TODO: interpret the given line; generate and feed events to the demo
handle_code = ''
raw_body = f1.read()
impl_body = raw_body.format(**locals())
evt_hdl_path_tgt = os.path.join(java_dir, evt_hdl_java)
with open(evt_hdl_path_tgt, 'w') as f2:
f2.write(impl_body)
logging.info("generating " + evt_hdl_java)
@takes(str, str, list_of(str), str, str)
@returns(int)
def run(cmd, demo, patterns, out_dir, log_fname):
java_dir = os.path.join(out_dir, "java")
# generate auxiliary java files
gen_aux(cmd, demo, java_dir)
# rename packages
cwd = os.getcwd()
os.chdir(out_dir)
logging.info("renaming package")
res = call(["./rename-"+cmd+".sh"])
if res: return res
# build
logging.info("building the synthesized model, along with " + demo)
res = call(["ant"])
os.chdir(cwd)
if res: return res
# run, along with logger agent, and capture logs
opts = []
opts.append("-javaagent:{}=time".format(agent_jar))
opts.extend(["-cp", "bin", sim])
info = "INFO: "
p1 = Popen(["java"] + opts, stderr=PIPE)
p2 = Popen(["grep", info], stdin=p1.stderr, stdout=PIPE)
f = open(os.path.join(out_dir, log_fname), 'w')
indent = -2
while True:
try:
line = p2.stdout.readline()
if len(line) == 0: break # EOF
if not line.startswith(info): continue
line = line.rstrip("\r\n")
# "INFO: (>|<) ..."
if line[6] == '>':
indent += 2
#print "%*s%s" % (indent, "", line[6:])
f.write("%*s%s\n" % (indent, "", line[6:]))
elif line[6] == '<':
#print "%*s%s" % (indent, "", line[6:])
f.write("%*s%s\n" % (indent, "", line[6:]))
indent -= 2
except KeyboardInterrupt: break
f.close()
return 0
|
from deephyper.nas.preprocessing.preprocessing import minmaxstdscaler, stdscaler
|
from fastapi import Depends, APIRouter, Response
from fastapi.responses import JSONResponse
from dependency_injector.wiring import Provide, inject
from http import HTTPStatus
from pydantic.networks import HttpUrl
from application.container import ApplicationContainer
from application.service.device import DeviceService
from .model.event import EventStatus, EventModelResponse
from .model import ErrorResponseModel
event_api_router = APIRouter()
@event_api_router.post(
path="/device/{device_id}/event",
tags=["device","event"],
summary="Create Event for a dedicated Device",
description="",
responses={
HTTPStatus.OK.value: {"model": EventModelResponse},
HTTPStatus.NOT_FOUND.value: {"model": ErrorResponseModel},
HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponseModel}
}
)
@inject
async def post_event_to_device(
response: Response,
device_id: str,
device_service: DeviceService = Depends(Provide[ApplicationContainer.device_service])
):
_device = device_service.get_device_by_device_id(device_id=device_id)
if _device is None:
response.status_code = HTTPStatus.NOT_FOUND.value
return ErrorResponseModel()
else:
if await device_service.send_event(device=_device, message="Force"):
_status = EventStatus.OK
else:
_status = EventStatus.ERROR
response.status_code = HTTPStatus.OK.value
return EventModelResponse(
device=_device,
event_status=_status
)
|
import sys
from collections import defaultdict
def evolve_sequence(s0, inserts, steps):
# just count the pairs at each iteration
counts = defaultdict(lambda: 0)
# edge cases for later
edges = (s0[0], s0[-1])
for i in range(len(s0)-1):
counts[s0[i:i+2]] += 1
for _ in range(steps):
new_counts = defaultdict(lambda: 0)
for pair in counts:
a, b = pair
c = inserts[pair]
new_counts[a+c] += counts[pair]
new_counts[c+b] += counts[pair]
counts = new_counts
letters = set(''.join(pair for pair in counts))
totals = {}
for letter in letters:
total = sum(counts[pair] for pair in counts if letter in pair)
# also need to add doubles again to avoid undercounting
total += sum(
counts[pair] for pair in counts if pair == letter + letter)
# now divide by 2 since a single element belongs to 2 pairs
total = int(total / 2)
# in the case of the edges, we've under counted by 1
if letter in edges:
total += 1
totals[letter] = total
# sort totals by value
frequencies = sorted(totals.values())
return frequencies[-1] - frequencies[0]
def main(input_file):
with open(input_file, 'r') as f:
content = f.read()
s0, inserts_s = content.split('\n\n')
inserts = {}
for s in inserts_s.splitlines():
pair, insert = s.split(' -> ')
inserts[pair] = insert
val1 = evolve_sequence(s0, inserts, 10)
print('Part 1:', val1)
val2 = evolve_sequence(s0, inserts, 40)
print('Part 2:', val2)
if __name__ == '__main__':
input_file = sys.argv[-1] if len(sys.argv) > 1 else 'input.txt'
main(input_file)
|
from kivy.properties import StringProperty
from kivymd.theming import ThemableBehavior
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.screen import MDScreen
class FortnightlyRootScreen(MDScreen):
pass
class FortnightlyListItem(ThemableBehavior, MDBoxLayout):
title = StringProperty()
secondary_text = StringProperty()
image = StringProperty()
|
from scipy.integrate import odeint
from lmfit import minimize, Parameters
import numpy as np
from ..loss_common import msse, rmsse, wrap_reduce
from ...data import cols as DataCol
import pandas as pd
import optuna
from xlrd import XLRDError
msse = wrap_reduce(msse)
rmsse = wrap_reduce(rmsse)
def dpsird(y, t, n, beta, gamma, delta):
s, i, r, d = y
i_flow = beta * s * i / n
r_flow = gamma * i * 1
d_flow = delta * i * 1
dSdt = -i_flow
dIdt = i_flow - r_flow - d_flow
dRdt = r_flow
dDdt = d_flow
dPdt = dSdt + dIdt + dRdt + dDdt
assert abs(dPdt) <= 1e-6
return dSdt, dIdt, dRdt, dDdt
def pred(t, y0, n, beta, gamma, delta):
# Integrate the SIR equations over the time grid, t.
y0 = np.array([n - y0[0], *y0])
ret = odeint(dpsird, y0, t, args=(
n, beta, gamma, delta
))
retT = ret.T
s, i, r, d = retT
return s, i, r, d
def pred_full(days, n, beta, gamma, delta, first=1):
if not (isinstance(first, tuple) or isinstance(first, list) or isinstance(first, np.ndarray)):
first = (first, 0, 0)
t = np.linspace(0, days - 1, days) # days
y0 = first
return pred(t, y0, n, beta, gamma, delta)
def make_objective(data, n, x=None):
def objective(params):
s, i, r, d = pred_full(len(data), n, first=data[0][0], **params)
if x is not None:
i, r, d = i[x], r[x], d[x]
ret = np.concatenate((i, r, d))
ret = ret - data.flatten("F") # Test flatten
return ret
return objective
def make_params(params):
params_1 = Parameters()
for kwarg, (init, mini, maxi) in params.items():
params_1.add(str(kwarg), value=init, min=mini, max=maxi, vary=True)
return params_1
def fit(objective, params):
result = minimize(objective, params, calc_covar=True)
return result
class SIRDModel:
def __init__(self, params_hint, n, loss_fn=rmsse, limit_fit=None, reduction="mean"):
self.params_hint = params_hint
self.n = n
self.loss_fn = loss_fn
self.loss = None
self.limit_fit = limit_fit
self.reduction = reduction
self.clear()
@property
def fit_params(self):
if not self.fit_result:
raise Exception("Please fit the model first")
return {k: self.fit_result.params[k].value for k in self.fit_result.params.keys()}
def clear(self):
self.fit_result = None
self.prev = None
self.loss = None
self.pred_start = None
self.first = 1
def fit(self, past, limit_fit=None):
self.clear()
limit_fit = limit_fit or self.limit_fit
if limit_fit:
past = past[-limit_fit:]
objective = make_objective(past, self.n)
self.fit_result = fit(objective, self.params_hint)
self.first = past[0]
self.prev = past[-1]
self.pred_start = len(past)
return self.fit_result
def pred(self, days):
if not self.fit_result:
raise Exception("Please fit the model first!")
full_len = self.pred_start + days
s, i, r, d = pred(
np.linspace(self.pred_start, full_len - 1, days),
self.prev,
self.n,
**self.fit_params
)
return np.array([i, r, d]).T
def pred_full(self, days):
if not self.fit_result:
raise Exception("Please fit the model first!")
s, i, r, d = pred_full(
days,
self.n,
first=self.first,
**self.fit_params
)
return np.array([i, r, d]).T
def test(self, past, future, loss_fn=None):
loss_fn = loss_fn or self.loss_fn
pred = self.pred(len(future))
self.loss = loss_fn(past, future, pred)
return self.loss
def eval(self, past, future, loss_fn=rmsse, limit_fit=None):
self.fit(past, limit_fit=limit_fit)
return self.test(past, future, loss_fn=loss_fn)
def eval_dataset(self, dataset, loss_fn=rmsse, reduction=None, limit_fit=None):
reduction = reduction or self.reduction
losses = [
self.eval(past, future, loss_fn=loss_fn, limit_fit=limit_fit)
for past, future, indices in dataset
]
sum_loss = sum(losses)
count = len(losses)
if reduction == "sum":
loss = sum_loss
elif reduction in ("mean", "avg"):
loss = sum_loss / count
else:
raise Exception(f"Invalid reduction \"{reduction}\"")
self.loss = loss
return loss
def search_optuna(params_hint, n, dataset, loss_fn=msse, reduction="mean", limit_fit_min=7, limit_fit_max=366, no_limit=False, n_trials=None):
def objective(trial):
no_limit_1 = no_limit
if no_limit_1 is None:
no_limit_1 = trial.suggest_categorical("no_limit", (False, True))
if no_limit_1:
limit_fit = None
else:
limit_fit = trial.suggest_int("limit_fit", limit_fit_min, limit_fit_max)
model = SIRDModel(params_hint=params_hint, n=n, loss_fn=loss_fn, reduction=reduction, limit_fit=limit_fit)
return model.eval_dataset(dataset)
if n_trials is None:
n_trials = (limit_fit_max - limit_fit_min + 1)
study = optuna.create_study()
study.optimize(objective, n_trials=n_trials, n_jobs=1)
return study
def search_greedy(params_hint, n, dataset, loss_fn=rmsse, reduction="mean", limit_fit_min=7, limit_fit_max=366):
best_model = None
best_loss = np.inf
for limit_fit in [*range(limit_fit_min, limit_fit_max + 1), None]:
model = SIRDModel(params_hint=params_hint, n=n, loss_fn=loss_fn, reduction=reduction, limit_fit=limit_fit)
loss = model.eval_dataset(dataset)
if loss < best_loss:
best_model = model
best_loss = loss
return best_model, best_loss
class SIRDSearchLog:
def __init__(self, log_path, log_sheet_name="SIRD"):
self.log_path = log_path
self.log_sheet_name = log_sheet_name
self.load_log()
def load_log(self, log_path=None, log_sheet_name=None):
log_path = log_path or self.log_path
log_sheet_name = log_sheet_name or self.log_sheet_name
try:
self.log_df = pd.read_excel(log_path, sheet_name=log_sheet_name)
except (FileNotFoundError, ValueError, XLRDError):
self.log_df = pd.DataFrame([], columns=["group", "cluster", "kabko", "limit_fit", "loss"])
self.save_log(log_path=log_path, log_sheet_name=log_sheet_name)
return self.log_df
def save_log(self, log_path=None, log_sheet_name=None):
log_path = log_path or self.log_path
log_sheet_name = log_sheet_name or self.log_sheet_name
self.log_df.to_excel(log_path, sheet_name=log_sheet_name, index=False)
def is_search_done(self, group, cluster, kabko):
df = self.log_df
try:
return ((df["group"] == group) & (df["cluster"] == cluster) & (df["kabko"] == kabko)).any()
except (ValueError, XLRDError) as ex:
if "No sheet" in str(ex) or "is not in list" in str(ex):
return False
raise
def log(self, group, cluster, kabko, limit_fit, loss):
df = self.load_log()
df.loc[df.shape[0]] = {
"group": group,
"cluster": cluster,
"kabko": kabko,
"limit_fit": limit_fit,
"loss": loss
}
self.save_log()
class SIRDEvalLog:
def __init__(self, source_path, log_path, source_sheet_name="SIRD", log_sheet_name="Eval"):
self.source_path = source_path
self.log_path = log_path
self.source_sheet_name = source_sheet_name
self.log_sheet_name = log_sheet_name
self.source_df = pd.read_excel(source_path, sheet_name=source_sheet_name)
self.load_log()
def load_log(self, log_path=None, log_sheet_name=None):
log_path = log_path or self.log_path
log_sheet_name = log_sheet_name or self.log_sheet_name
try:
self.log_df = pd.read_excel(log_path, sheet_name=log_sheet_name)
except (FileNotFoundError, ValueError, XLRDError):
self.log_df = pd.DataFrame([], columns=["group", "cluster", "kabko", "limit_fit", "i", "r", "d"])
self.save_log(log_path=log_path, log_sheet_name=log_sheet_name)
return self.log_df
def save_log(self, log_path=None, log_sheet_name=None):
log_path = log_path or self.log_path
log_sheet_name = log_sheet_name or self.log_sheet_name
self.log_df.to_excel(log_path, sheet_name=log_sheet_name, index=False)
def is_search_done(self, group, cluster, kabko, df=None):
df = self.source_df if df is None else df
try:
return ((df["group"] == group) & (df["cluster"] == cluster) & (df["kabko"] == kabko)).any()
except (ValueError, XLRDError) as ex:
if "No sheet" in str(ex) or "is not in list" in str(ex):
return False
raise
def is_eval_done(self, group, cluster, kabko):
df = self.log_df
return self.is_search_done(group, cluster, kabko, df=df)
def log(self, group, cluster, kabko, limit_fit, loss, log_path=None, log_sheet_name=None):
assert len(loss) == 3
df = self.load_log()
df.loc[df.shape[0]] = {
"group": group,
"cluster": cluster,
"kabko": kabko,
"limit_fit": limit_fit,
"i": loss[0],
"r": loss[1],
"d": loss[2]
}
self.save_log(log_path=log_path, log_sheet_name=log_sheet_name)
def read_sird(self, group, cluster, kabko):
df = self.source_df
cond = ((df["group"] == group) & (df["cluster"] == cluster) & (df["kabko"] == kabko))
return df[cond]
|
#!/usr/bin/python
# Copyright 2014 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import sys
t = BoostBuild.Tester(pass_toolset=False, pass_d0=False)
t.write("file.jam", """
actions run {
$(ACTION)
}
# Raw commands only work on Windows
if $(OS) = NT
{
JAMSHELL on test-raw = % ;
JAMSHELL on test-raw-fail = % ;
}
ACTION on test-raw = "\"$(PYTHON)\" -V" ;
run test-raw ;
ACTION on test-raw-fail = missing-executable ;
run test-raw-fail ;
# On Windows, the command is stored in a temporary
# file. On other systems it is passed directly.
if $(OS) = NT
{
JAMSHELL on test-py = $(PYTHON) ;
}
else
{
JAMSHELL on test-py = $(PYTHON) -c ;
}
ACTION on test-py = "
print \\\",\\\".join([str(x) for x in range(3)])
" ;
run test-py ;
DEPENDS all : test-raw test-raw-fail test-py ;
""")
t.run_build_system(["-ffile.jam", "-d1", "-sPYTHON=" + sys.executable], status=1)
t.expect_output_lines([
"...failed run test-raw-fail...",
"0,1,2",
"...failed updating 1 target...",
"...updated 2 targets..."])
t.cleanup()
|
import _tkinter
import tkinter as tk
from tkinter import ttk
from DataPoint import DataPoint
from RGB import RGB
import random
import time
from SortingAlgorithms.SortingAlgorithms import *
class Screen(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
# Dataset
self.data_set = list()
# Parameter
self.active_rgb = RGB(0, 0, 255)
self.show_as_bars = False
self.redraw_step_size_gui = tk.IntVar()
self.redraw_step_size_gui.trace_add("write", self.set_redraw_step_size)
self.redraw_step_size_gui.set(1)
# Status
self.is_sorting = False
# Dimensions ==
self.window_width = 1000
self.window_height = 700
self.control_width = 100
self.canvas_data_point_width = 3
self.canvas_data_point_space = 1
# Dimensions ==
# Sorting Algorithms
self.sorting_algorithms = SortingAlgorithms()
# Setting the window title
self.wm_title("Sorting Visualization")
# Make window not resizable
self.resizable(0, 0)
self.minsize(width=self.window_width, height=self.window_height)
# Init window_frame
self.window_frame = tk.Frame(self, width=self.window_width, height=self.window_height)
self.window_frame.grid(row=0, column=0, padx=0, pady=0)
# Setting up the GUI frames
self.application_status_frame = tk.Frame(self.window_frame, width=self.control_width, height=self.window_height)
self.application_status_frame.grid(row=0, column=0, padx=0, pady=0)
self.application_status_frame.pack_propagate(0)
self.application_main_frame = tk.Frame(self.window_frame, width=self.window_width - self.control_width,
height=self.window_height)
self.application_main_frame.grid(row=0, column=1, padx=0, pady=0)
# Setting up Canvas
self.canvas = tk.Canvas(self.application_main_frame, width=self.window_width - self.control_width,
height=self.window_height)
self.canvas.pack()
# Setting up control elements
toggle_view_button = ttk.Button(self.application_status_frame, text="Toggle view", command=self.toggle_view)
toggle_view_button.grid(row=0, column=0, padx=0, pady=5)
shuffle_button = ttk.Button(self.application_status_frame, text="Shuffle Single", command=self.shuffle_single)
shuffle_button.grid(row=1, column=0, padx=0, pady=5)
shuffle_button = ttk.Button(self.application_status_frame, text="Shuffle", command=self.shuffle)
shuffle_button.grid(row=2, column=0, padx=0, pady=5)
# Setting up Listbox
self.sorting_algorithms_select_box = tk.Listbox(self.application_status_frame, selectmode='browse')
self.sorting_algorithms_select_box.grid(row=3, column=0, padx=0, pady=5)
for i in self.sorting_algorithms.algorithms:
self.sorting_algorithms_select_box.insert('end', i.name())
sort_button = ttk.Button(self.application_status_frame, text="Sort", command=self.button_sort_pressed)
sort_button.grid(row=4, column=0, padx=0, pady=5)
# Setting up status label
self.status_label = ttk.Label(self.application_status_frame, text="")
self.status_label.grid(row=5, column=0, padx=0, pady=5)
redraw_step_size_label_text = tk.StringVar()
redraw_step_size_label_text.set("Enter redraw step size:")
redraw_step_size_label = tk.Label(self.application_status_frame, textvariable=redraw_step_size_label_text,
height=1)
redraw_step_size_label.grid(row=6, column=0, padx=0, pady=0)
redraw_step_size_input = tk.Entry(self.application_status_frame, textvariable=self.redraw_step_size_gui)
redraw_step_size_input.grid(row=7, column=0, padx=0, pady=0)
# Generate random dataset
self.generate_data_set(
int((self.window_width - self.control_width) / (
self.canvas_data_point_width + self.canvas_data_point_space)))
# Shuffle dataset
self.shuffle()
self.mainloop()
def shuffle(self):
if not self.is_sorting:
random.shuffle(self.data_set)
self.update_canvas()
def shuffle_single(self):
if not self.is_sorting:
index1 = random.randint(0, len(self.data_set) - 1)
index2 = random.randint(0, len(self.data_set) - 1)
self.data_set[index1], self.data_set[index2] = self.data_set[index2], self.data_set[index1]
self.update_canvas()
def generate_data_set(self, length: int):
self.data_set = list()
for _ in range(length):
value = random.randint(10, self.window_height)
green_portion = int(value / self.window_height * 255)
red_portion = 255 - green_portion
rgb = RGB(red_portion, green_portion, 0)
self.data_set.append(DataPoint(value, rgb))
def update_canvas(self):
pos = self.canvas_data_point_width
self.canvas.delete("all")
for i in self.data_set:
color = i.color
if i.active:
color = self.active_rgb
if self.show_as_bars:
self.canvas.create_line(pos + (self.canvas_data_point_width / 2), self.window_height,
pos + (self.canvas_data_point_width / 2), self.window_height - i.value,
fill=self.get_hex_code(color.r, color.g, color.b),
width=self.canvas_data_point_width)
else:
self.canvas.create_rectangle(pos, self.window_height - i.value, pos + self.canvas_data_point_width,
self.window_height - i.value + self.canvas_data_point_width,
fill=self.get_hex_code(color.r, color.g, color.b), width=0)
pos += self.canvas_data_point_width + self.canvas_data_point_space
self.canvas.update()
def get_hex_code(self, r, g, b):
return "#" + '{:02x}'.format(r) + '{:02x}'.format(g) + '{:02x}'.format(b)
def set_info(self, msg):
self.status_label.config(text=msg)
def set_redraw_step_size(self, n, m, x):
try:
tmp = int(self.redraw_step_size_gui.get())
if tmp < 1:
tmp = 1
self.redraw_step_size = tmp
self.redraw_step_size_gui.set(tmp)
except _tkinter.TclError:
self.redraw_step_size = 1
self.redraw_step_size_gui.set(1)
def toggle_view(self):
self.show_as_bars = not self.show_as_bars
self.update_canvas()
def sort(self, sorting_algorithm: SortingAlgorithm):
self.set_info("Sorting with " + sorting_algorithm.name())
start = time.time()
self.is_sorting = True
sort = sorting_algorithm(self.data_set, self.update_canvas, self.redraw_step_size)
sort.sort()
self.is_sorting = False
self.set_info("Done in " + "{:.{}f}".format(time.time() - start, 2) + "sec")
def get_selected_sorting_algorithm(self):
curselection = self.sorting_algorithms_select_box.curselection()
if len(curselection) > 0:
selected_item = curselection[0]
return self.sorting_algorithms_select_box.get(selected_item)
else:
return None
def button_sort_pressed(self):
if not self.is_sorting:
selected_sorting_algorithm = self.get_selected_sorting_algorithm()
found = False
for sorting_algorithm in self.sorting_algorithms.algorithms:
if selected_sorting_algorithm == sorting_algorithm.name():
found = True
self.sort(sorting_algorithm)
break
if not found:
self.set_info("Please select a valid Algorithm.")
else:
self.set_info("Please wait till it's sorted.")
|
import pytest
import six
import tensorflow as tf
from tensorflow.keras.mixed_precision import experimental as mixed_precision
from models import simplenet_tf
def tf_context(func):
@six.wraps(func)
def wrapped(*args, **kwargs):
# Run tests only on the gpu as grouped convs are not supported on cpu
with tf.device('gpu:0'):
out = func(*args, **kwargs)
return out
return wrapped
@pytest.mark.parametrize("groups", [1, 32, 64])
@pytest.mark.parametrize("channel_deconv_loc", ['pre', 'post'])
@pytest.mark.parametrize("blocks", [1, 32, 64])
@tf_context
def test_fastdeconv_1d(groups, channel_deconv_loc, blocks):
""" Test 1D variant """
x = tf.zeros([16, 24, 3])
model2 = simplenet_tf.SimpleNet1D(num_classes=10, num_channels=64, groups=groups,
channel_deconv_loc=channel_deconv_loc, blocks=blocks)
# trace the model
model_traced2 = tf.function(model2)
out = model_traced2(x, training=True)
assert out.shape == [16, 10]
out = model_traced2(x, training=False)
assert out.shape == [16, 10]
@pytest.mark.parametrize("groups", [1, 32, 64])
@pytest.mark.parametrize("channe_deconv_loc", ['pre', 'post'])
@pytest.mark.parametrize("blocks", [1, 32, 64])
@tf_context
def test_fastdeconv_2d(groups, channe_deconv_loc, blocks):
""" Test 1D variant """
x = tf.zeros([16, 32, 32, 3])
model2 = simplenet_tf.SimpleNet2D(num_classes=10, num_channels=64, groups=groups,
channel_deconv_loc=channe_deconv_loc, blocks=blocks)
# trace the model
model_traced2 = tf.function(model2)
out = model_traced2(x, training=True)
assert out.shape == [16, 10]
out = model_traced2(x, training=False)
assert out.shape == [16, 10]
if __name__ == '__main__':
pytest.main([__file__])
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
from azure.eventhub import EventPosition, EventHubClient
@pytest.mark.liveTest
def test_iothub_receive_sync(iot_connection_str, device_id):
client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False)
receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events')
try:
received = receiver.receive(timeout=10)
assert len(received) == 0
finally:
receiver.close()
@pytest.mark.liveTest
def test_iothub_get_properties_sync(iot_connection_str, device_id):
client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False)
properties = client.get_properties()
assert properties["partition_ids"] == ["0", "1", "2", "3"]
@pytest.mark.liveTest
def test_iothub_get_partition_ids_sync(iot_connection_str, device_id):
client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False)
partitions = client.get_partition_ids()
assert partitions == ["0", "1", "2", "3"]
@pytest.mark.liveTest
def test_iothub_get_partition_properties_sync(iot_connection_str, device_id):
client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False)
partition_properties = client.get_partition_properties("0")
assert partition_properties["id"] == "0"
@pytest.mark.liveTest
def test_iothub_receive_after_mgmt_ops_sync(iot_connection_str, device_id):
client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False)
partitions = client.get_partition_ids()
assert partitions == ["0", "1", "2", "3"]
receiver = client.create_consumer(consumer_group="$default", partition_id=partitions[0], event_position=EventPosition("-1"), operation='/messages/events')
with receiver:
received = receiver.receive(timeout=10)
assert len(received) == 0
@pytest.mark.liveTest
def test_iothub_mgmt_ops_after_receive_sync(iot_connection_str, device_id):
client = EventHubClient.from_connection_string(iot_connection_str, network_tracing=False)
receiver = client.create_consumer(consumer_group="$default", partition_id="0", event_position=EventPosition("-1"), operation='/messages/events')
with receiver:
received = receiver.receive(timeout=10)
assert len(received) == 0
partitions = client.get_partition_ids()
assert partitions == ["0", "1", "2", "3"]
|
"""Added constraints for col status in user
Revision ID: fb92d381ab6a
Revises: 180046a31cb3
Create Date: 2019-11-02 23:15:43.686061
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fb92d381ab6a'
down_revision = '180046a31cb3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user', 'status',
existing_type=sa.String(length=30),
nullable=False)
op.create_index(op.f('ix_user_status'), 'user', ['status'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_status'), table_name='user')
op.alter_column('user', 'status',
existing_type=sa.String(length=30),
nullable=True)
# ### end Alembic commands ###
|
# -*- coding: utf-8 -*-
import logging
import json
import requests
import pkg_resources
from django import forms
from sentry.plugins.bases import NotificationPlugin
TOKEN_ENDPOINT = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
NOTIFICATION_ENDPOINT = "https://qyapi.weixin.qq.com/cgi-bin/message/send"
MESSAGE_TEMPLATE = '''Sentry {team_name}/{project_name}\t{level}\n
{message}\n
{url}
'''
logger = logging.getLogger("sentry.plugins.wechat")
dist = pkg_resources.get_distribution("sentry_wechat")
class WechatOptionsForm(forms.Form):
agent_id = forms.IntegerField(help_text="Agent ID")
access_key = forms.CharField(help_text="Access key")
secret_key = forms.CharField(help_text="Secret key")
target_users = forms.CharField(
help_text="Target users (multiple values should separated with \"|\")",
required=False)
target_parties = forms.CharField(
help_text="Target parties (multiple values should separated with \"|\")",
required=False)
target_tags = forms.CharField(
help_text="Target tags (multiple values should separated with \"|\")",
required=False)
is_safe = forms.BooleanField(help_text="Safe or not", required=False)
class WechatMessage(NotificationPlugin):
title = "WeChat"
slug = "wechat"
conf_title = title
conf_key = "wechat"
version = dist.version
author = "Aaron Jheng"
project_conf_form = WechatOptionsForm
def is_configured(self, project):
return all((self.get_option(k, project)
for k in ("agent_id", "access_key", "secret_key")))
def notify_users(self, group, event, fail_silently=False):
project = event.project
team = project.team
agent_id = self.get_option("agent_id", project)
access_key = self.get_option("access_key", project)
secret_key = self.get_option("secret_key", project)
target_users = self.get_option("target_users", project)
target_parties = self.get_option("target_parties", project)
target_tags = self.get_option("target_tags", project)
is_safe = self.get_option("is_safe", project)
message = {
"msgtype": "text",
"touser": target_users if target_users else "@all",
"toparty": target_parties if target_parties else "@all",
"totag": target_tags if target_tags else "@all",
"agentid": agent_id,
"text": {
"content":
MESSAGE_TEMPLATE.format(
**{
"team_name": team.name,
"project_name": project.name,
"level": event.get_tag('level').capitalize(),
"message": event.get_legacy_message(),
"url": group.get_absolute_url(),
}),
},
"safe": "1" if is_safe else "0"
}
return self._push_notification(access_key, secret_key, message)
def _get_token(self, access_key, secret_key):
resp = requests.get(
TOKEN_ENDPOINT,
params={
"corpid": access_key,
"corpsecret": secret_key
})
token = None
try:
token = resp.json().get("access_token")
except:
pass
return token
def _push_notification(self, access_key, secret_key, message):
info = "Failed"
access_token = self._get_token(access_key, secret_key)
if access_token is None:
info = "No valid token"
else:
resp = requests.post(
NOTIFICATION_ENDPOINT,
params={"access_token": access_token},
data=json.dumps(message))
info = resp.text
return info
|
# BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
HDFS core implementation.
"""
import os
def init():
import pydoop.utils.jvm as jvm
jvm.load_jvm_lib()
try:
# NOTE: JVM must be already instantiated
import pydoop.native_core_hdfs
except ImportError:
return None # should only happen at compile time
else:
return pydoop.native_core_hdfs
def core_hdfs_fs(host, port, user):
_CORE_MODULE = init()
if _CORE_MODULE is None:
if os.path.isdir("pydoop"):
msg = "Trying to import from the source directory?"
else:
msg = "Check that Pydoop is correctly installed"
raise RuntimeError("Core module unavailable. %s" % msg)
return _CORE_MODULE.CoreHdfsFs(host, port, user)
|
import tensorflow as tf
from utils import bbox_utils
def decode_predictions(
y_pred,
input_size,
nms_max_output_size=400,
confidence_threshold=0.01,
iou_threshold=0.45,
num_predictions=10
):
# decode bounding boxes predictions
df_boxes = y_pred[..., -8:-4]
variances = y_pred[..., -4:]
cx = (y_pred[..., -20] * tf.sqrt(variances[..., 0]) * df_boxes[..., 2]) + df_boxes[..., 0]
cy = (y_pred[..., -19] * tf.sqrt(variances[..., 1]) * df_boxes[..., 3]) + df_boxes[..., 1]
w = tf.exp(y_pred[..., -18] * tf.sqrt(variances[..., 2])) * df_boxes[..., 2]
h = tf.exp(y_pred[..., -17] * tf.sqrt(variances[..., 3])) * df_boxes[..., 3]
x1 = y_pred[..., -16] * tf.sqrt(variances[..., 0]) * df_boxes[..., 2] + (df_boxes[..., 0] - df_boxes[..., 2]/2)
y1 = y_pred[..., -15] * tf.sqrt(variances[..., 1]) * df_boxes[..., 3] + (df_boxes[..., 1] - df_boxes[..., 3]/2)
x2 = y_pred[..., -14] * tf.sqrt(variances[..., 0]) * df_boxes[..., 2] + (df_boxes[..., 0] + df_boxes[..., 2]/2)
y2 = y_pred[..., -13] * tf.sqrt(variances[..., 1]) * df_boxes[..., 3] + (df_boxes[..., 1] - df_boxes[..., 3]/2)
x3 = y_pred[..., -12] * tf.sqrt(variances[..., 0]) * df_boxes[..., 2] + (df_boxes[..., 0] + df_boxes[..., 2]/2)
y3 = y_pred[..., -11] * tf.sqrt(variances[..., 1]) * df_boxes[..., 3] + (df_boxes[..., 1] + df_boxes[..., 3]/2)
x4 = y_pred[..., -10] * tf.sqrt(variances[..., 0]) * df_boxes[..., 2] + (df_boxes[..., 0] - df_boxes[..., 2]/2)
y4 = y_pred[..., -9] * tf.sqrt(variances[..., 1]) * df_boxes[..., 3] + (df_boxes[..., 1] + df_boxes[..., 3]/2)
# convert bboxes to corners format (xmin, ymin, xmax, ymax) and scale to fit input size
xmin = (cx - (0.5 * w)) * input_size
ymin = (cy - (0.5 * h)) * input_size
xmax = (cx + (0.5 * w)) * input_size
ymax = (cy + (0.5 * h)) * input_size
x1 = x1*input_size
y1 = y1*input_size
x2 = x2*input_size
y2 = y2*input_size
x3 = x3*input_size
y3 = y3*input_size
x4 = x4*input_size
y4 = y4*input_size
# concat class predictions and bbox predictions together
y_pred = tf.concat([
y_pred[..., :-20],
tf.expand_dims(xmin, axis=-1),
tf.expand_dims(ymin, axis=-1),
tf.expand_dims(xmax, axis=-1),
tf.expand_dims(ymax, axis=-1),
tf.expand_dims(x1, axis=-1),
tf.expand_dims(y1, axis=-1),
tf.expand_dims(x2, axis=-1),
tf.expand_dims(y2, axis=-1),
tf.expand_dims(x3, axis=-1),
tf.expand_dims(y3, axis=-1),
tf.expand_dims(x4, axis=-1),
tf.expand_dims(y4, axis=-1),
], -1)
#
batch_size = tf.shape(y_pred)[0] # Output dtype: tf.int32
num_boxes = tf.shape(y_pred)[1]
num_classes = y_pred.shape[2] - 12
class_indices = tf.range(1, num_classes)
# Create a function that filters the predictions for the given batch item. Specifically, it performs:
# - confidence thresholding
# - non-maximum suppression (NMS)
# - top-k filtering
def filter_predictions(batch_item):
# Create a function that filters the predictions for one single class.
def filter_single_class(index):
# From a tensor of shape (n_boxes, n_classes + 4 coordinates) extract
# a tensor of shape (n_boxes, 1 + 4 coordinates) that contains the
# confidnece values for just one class, determined by `index`.
confidences = tf.expand_dims(batch_item[..., index], axis=-1)
class_id = tf.fill(dims=tf.shape(confidences), value=tf.cast(index, tf.float32))
box_coordinates = batch_item[..., -12:]
single_class = tf.concat([class_id, confidences, box_coordinates], -1)
# Apply confidence thresholding with respect to the class defined by `index`.
threshold_met = single_class[:, 1] > confidence_threshold
single_class = tf.boolean_mask(tensor=single_class,
mask=threshold_met)
# If any boxes made the threshold, perform NMS.
def perform_nms():
scores = single_class[..., 1]
# `tf.image.non_max_suppression()` needs the box coordinates in the format `(ymin, xmin, ymax, xmax)`.
xmin = tf.expand_dims(single_class[..., -12], axis=-1)
ymin = tf.expand_dims(single_class[..., -11], axis=-1)
xmax = tf.expand_dims(single_class[..., -10], axis=-1)
ymax = tf.expand_dims(single_class[..., -9], axis=-1)
boxes = tf.concat([ymin, xmin, ymax, xmax], -1)
maxima_indices = tf.image.non_max_suppression(boxes=boxes,
scores=scores,
max_output_size=nms_max_output_size,
iou_threshold=iou_threshold,
name='non_maximum_suppresion')
maxima = tf.gather(params=single_class,
indices=maxima_indices,
axis=0)
return maxima
def no_confident_predictions():
return tf.constant(value=0.0, shape=(1, 14))
single_class_nms = tf.cond(tf.equal(tf.size(single_class), 0), no_confident_predictions, perform_nms)
# Make sure `single_class` is exactly `self.nms_max_output_size` elements long.
padded_single_class = tf.pad(tensor=single_class_nms,
paddings=[[0, nms_max_output_size - tf.shape(single_class_nms)[0]], [0, 0]],
mode='CONSTANT',
constant_values=0.0)
return padded_single_class
# Iterate `filter_single_class()` over all class indices.
filtered_single_classes = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(fn=lambda i: filter_single_class(i),
elems=tf.range(1, num_classes),
# dtype=tf.float32,
parallel_iterations=128,
# back_prop=False,
swap_memory=False,
# infer_shape=True,
fn_output_signature=tf.TensorSpec((None, 14), dtype=tf.float32),
name='loop_over_classes'))
# Concatenate the filtered results for all individual classes to one tensor.
filtered_predictions = tf.reshape(tensor=filtered_single_classes, shape=(-1, 14))
# Perform top-k filtering for this batch item or pad it in case there are
# fewer than `self.top_k` boxes left at this point. Either way, produce a
# tensor of length `self.top_k`. By the time we return the final results tensor
# for the whole batch, all batch items must have the same number of predicted
# boxes so that the tensor dimensions are homogenous. If fewer than `self.top_k`
# predictions are left after the filtering process above, we pad the missing
# predictions with zeros as dummy entries.
def top_k():
return tf.gather(params=filtered_predictions,
indices=tf.nn.top_k(filtered_predictions[:, 1], k=num_predictions, sorted=True).indices,
axis=0)
def pad_and_top_k():
padded_predictions = tf.pad(tensor=filtered_predictions,
paddings=[[0, num_predictions - tf.shape(filtered_predictions)[0]], [0, 0]],
mode='CONSTANT',
constant_values=0.0)
return tf.gather(params=padded_predictions,
indices=tf.nn.top_k(padded_predictions[:, 1], k=num_predictions, sorted=True).indices,
axis=0)
top_k_boxes = tf.cond(tf.greater_equal(tf.shape(filtered_predictions)[0], num_predictions), top_k, pad_and_top_k)
return top_k_boxes
# Iterate `filter_predictions()` over all batch items.
output_tensor = tf.nest.map_structure(
tf.stop_gradient,
tf.map_fn(fn=lambda x: filter_predictions(x),
elems=y_pred,
# dtype=None,
parallel_iterations=128,
# back_prop=False,
swap_memory=False,
# infer_shape=True,
fn_output_signature=tf.TensorSpec((num_predictions, 14), dtype=tf.float32),
name='loop_over_batch'))
return output_tensor
|
from utils import *
from darknet import Darknet
import cv2
from DroneVision import DroneVision
from Mambo import Mambo
import threading
import time
import shutil
from subprocess import check_output, CalledProcessError
import signal
# set this to true if you want to fly for the demo
testFlying = False
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
#print("in save pictures")
img = self.vision.get_latest_valid_picture()
filename = "test_image_%06d.png" % self.index
cv2.imwrite(filename, img)
self.index +=1
#print(self.index)
def demo_cam(cfgfile, weightfile):
m = Darknet(cfgfile)
m.print_network()
m.load_weights(weightfile)
print('Loading weights from %s... Done!' % (weightfile))
if m.num_classes == 20:
namesfile = 'data/voc.names'
elif m.num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
class_names = load_class_names(namesfile)
use_cuda = 1
if use_cuda:
m.cuda()
cap = cv2.VideoCapture("rtsp://192.168.10.36")
if not cap.isOpened():
print("Unable to open camera")
exit(-1)
cv2.namedWindow("YoloV2", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("YoloV2", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
while True:
res, img = cap.read()
if res:
sized = cv2.resize(img, (m.width, m.height))
bboxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
print('------')
draw_img = plot_boxes_cv2(img, bboxes, None, class_names)
cv2.imshow("YoloV2", draw_img)
cv2.waitKey(1)
else:
print("Unable to read image")
exit(-1)
def demo(cfgfile, weightfile):
m = Darknet(cfgfile)
m.print_network()
m.load_weights(weightfile)
print('Loading weights from %s... Done!' % (weightfile))
if m.num_classes == 20:
namesfile = 'data/voc.names'
elif m.num_classes == 80:
namesfile = 'data/coco.names'
else:
namesfile = 'data/names'
class_names = load_class_names(namesfile)
use_cuda = 1
if use_cuda:
m.cuda()
# cap = cv2.VideoCapture("rtsp://192.168.10.1:554/onvif1")
# cap = cv2.VideoCapture("rtsp://192.168.99.1/media/stream2")
#---------------------FOR Mambo------------------------------------
# you will need to change this to the address of YOUR mambo; For BLE use only
mamboAddr = "C8:3A:35:CE:87:B2"
# make my mambo object
# remember to set True/False for the wifi depending on if you are using the wifi or the BLE to connect
mambo = Mambo(mamboAddr, use_wifi=True)
print("trying to connect to mambo now")
success = mambo.connect(num_retries=3)
print("connected: %s" % success)
if (success):
# get the state information
print("sleeping")
mambo.smart_sleep(1)
mambo.ask_for_state_update()
mambo.smart_sleep(1)
# wait for next step
nextstep = raw_input("Any key to continue:")
print("Preparing to open vision")
mamboVision = DroneVision(mambo, is_bebop=False, buffer_size=30)
# userVision = UserVision(mamboVision)
# mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
success = mamboVision.open_video()
#img = mamboVision.get_latest_valid_picture()
print("Success in opening vision is %s" % success)
cv2.namedWindow("YoloV2", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("YoloV2", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
# ------------------------------------------------------------------
if (success):
print("Vision successfully started!")
mambo.smart_sleep(1)
# removed the user call to this function (it now happens in open_video())
# mamboVision.start_video_buffering()
count = 30
while True:
imgName = "./images/image_" + str(count).zfill(5) + ".png"
img = cv2.imread(imgName)
# img = mamboVision.get_latest_valid_picture()
if img is None:
print("Unable to read image")
else:
sized = cv2.resize(img, (m.width, m.height))
bboxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
print('------')
draw_img = plot_boxes_cv2(img, bboxes, None, class_names)
cv2.imshow("YoloV2", draw_img)
count = count + 1
if cv2.waitKey(1) == ord('q'):
break
# done doing vision demo
print("Quiting...")
mamboVision.close_video()
mambo.smart_sleep(10)
# grep the ffmpeg process
try:
pidlist = map(int, check_output(["pidof", "ffmpeg"]).split())
except CalledProcessError:
print("Cannot grep the pid")
pidlist = []
for pid in pidlist:
os.kill(pid,signal.SIGTERM)
print("Killing the ffmpeg process...")
time.sleep(2)
# print("disconnecting")
# mambo.disconnect()
print("Cleaning up...")
for file in os.listdir('./images'):
while True:
try:
if os.path.isfile(os.path.join('./images',file)):
os.unlink(os.path.join('./images',file))
break
except Exception:
print('Trying to delet images')
print("disconnecting")
mambo.disconnect()
# print("Done!")
# shutil.rmtree("./images",ignore_errors=True)
# os.mkdir("./images")
# cap = cv2.VideoCapture("rtsp://192.168.10.36")
# if not cap.isOpened():
# print("Unable to open camera")
# exit(-1)
# while True:
# res, img = cap.read()
# if res:
# sized = cv2.resize(img, (m.width, m.height))
# bboxes = do_detect(m, sized, 0.5, 0.4, use_cuda)
# print('------')
# draw_img = plot_boxes_cv2(img, bboxes, None, class_names)
# cv2.imshow(cfgfile, draw_img)
# cv2.waitKey(1)
# else:
# print("Unable to read image")
# exit(-1)
############################################
if __name__ == '__main__':
demo("cfg/yolo.cfg", "yolo.weights")
# demo_cam("cfg/yolov2.cfg", "yolov2.weights")
# if sys.argv[1] == "Drone":
# demo("cfg/yolo.cfg", "yolo.weights")
# elif sys.argv[1] == "Camera":
# demo_cam("cfg/yolo.cfg", "yolo.weights")
# if len(sys.argv) == 3:
# cfgfile = sys.argv[1]
# weightfile = sys.argv[2]
# demo(cfgfile, weightfile)
# #demo('cfg/tiny-yolo-voc.cfg', 'tiny-yolo-voc.weights')
# else:
# print('Usage:')
# print(' python demo.py cfgfile weightfile')
# print('')
# print(' perform detection on camera')
|
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.web import StaticFileHandler
from tornado.web import Application, RequestHandler
from tornado.options import define, options
import socket
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import os.path
import uuid
import ast
import time
from sysmpy.config import gui_server_address
#=================================================================================================#
# Main Handler #
#=================================================================================================#
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('gui_index.html')
#=================================================================================================#
# Property Chart Handler #
#=================================================================================================#
class PropertyChartHandler(tornado.web.RequestHandler):
def get(self):
"""
# http://127.0.0.1:9191/pc/?g={'x':0, 'y':0}
When the chart_view is initialized, multiple charts are created.
e.g.,)
<div class="grid-container">
<div class="grid-item">
<div id="linechart" name="size" style="width: 400; height: 300"></div>
</div>
<div class="grid-item">
<div id="linechart2" name="speed" style="width: 400; height: 300"></div>
</div>
</div>
"""
ids = ''
chart_html = ''
properties = self.get_arguments("g")[0]
properties = ast.literal_eval(properties)
# print(properties)
ids += 'var ids = ['
chart_html += '<div class="grid-container">'
for k, v in properties.items():
chart_html += '<div class="grid-item">'
chart_html += f'<div id="{k}" name="{k}" style="width: 200; height: 150"></div>'
chart_html += '</div>'
ids += f'"{k}", '
chart_html += '</div>'
ids += ']'
self.render("chart_line_multi_vars.html", ids=ids, chart_html=chart_html)
#=================================================================================================#
# Property Table Handler #
#=================================================================================================#
class PropertyTableHandler(tornado.web.RequestHandler):
def get(self):
"""
# http://127.0.0.1:9191/pt/?g={'x':0, 'y':'0', 'z':'true'}
e.g.,)
columns =
"data.addColumn('string', 'Name');
data.addColumn('number', 'Salary');
data.addColumn('boolean', 'Full Time Employee');"
"""
columns = ''
properties = self.get_arguments("g")[0]
properties = ast.literal_eval(properties)
# print(properties)
for k, v in properties.items():
if isinstance(v, str):
if v == 'true':
type_val = 'boolean'
else:
type_val = 'string'
else:
type_val = 'number'
columns += f'data.addColumn("{type_val}", "{k}"); '
self.render('table_multi_vars.html', columns=columns)
#=================================================================================================#
# Simulation Update Handler #
#=================================================================================================#
class SimUpdateHandler(tornado.web.RequestHandler):
def get(self):
"""
# http://127.0.0.1:9191/sim_updated?g={'x':0, 'y':0}
"""
# print('Simulation data was updated')
data = self.get_arguments("g")[0]
GuiSocketHandler.updated_events.append(data)
GuiSocketHandler.send_to_clients()
#=================================================================================================#
# Action Diagram Handler #
#=================================================================================================#
class ActionDiagramHandler(RequestHandler):
def get(self):
my_graph = self.get_arguments("g")
if len(my_graph) == 0:
my_graph = """var A_process = graph.insertVertex(parent, 'A process', '', 105.0, 42.0, 30, 30, 'Process') """
else:
my_graph = str(my_graph[0])
my_graph = my_graph.replace("/n", "\n")
my_graph = my_graph.replace("/8/", ";")
mxClient_js = f"<script type='text/javascript' src='http://{gui_server_address}:9191/src/js/mxClient.js'></script>"
self.render("mx_ad_view.html", ad_model=my_graph, mxClient_js=mxClient_js)
#=================================================================================================#
# Block Diagram Handler #
#=================================================================================================#
class BlockDiagramHandler(RequestHandler):
def get(self):
# print('BlockDiagramHandler')
my_graph = self.get_arguments("g")
if len(my_graph) == 0:
my_graph = """var c3 = graph.insertVertex(parent, 'idc3','Process 1', 150,30,100,200,'Process;');
var c31 = graph.insertVertex(c3,null,'', 0,0,100,50,'ProcessImage;image=images/img3.png;');"""
else:
my_graph = str(my_graph[0])
my_graph = my_graph.replace("/n", "\n")
my_graph = my_graph.replace("/8/", ";")
# print(my_graph)
mxClient_js = f"<script type='text/javascript' src='http://{gui_server_address}:9191/src/js/mxClient.js'></script>"
self.render("mx_bd_view.html", bd_model=my_graph, mxClient_js=mxClient_js)
#=================================================================================================#
# Hierarchy Diagram Handler #
#=================================================================================================#
class HierarchyDiagramHandler(RequestHandler):
def get(self):
my_graph = self.get_arguments("g")
if len(my_graph) == 0:
my_graph = """var A_process = graph.insertVertex(parent, 'A process', '', 105.0, 42.0, 30, 30, 'Process') """
else:
my_graph = str(my_graph[0])
my_graph = my_graph.replace("/n", "\n")
my_graph = my_graph.replace("/8/", ";")
mxClient_js = f"<script type='text/javascript' src='http://{gui_server_address}:9191/src/js/mxClient.js'></script>"
self.render("mx_hd_view.html", hd_model=my_graph, mxClient_js=mxClient_js)
#=================================================================================================#
# Gui Socket Handler #
#=================================================================================================#
class GuiSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
updated_events = []
def get_compression_options(self):
return {}
def open(self):
GuiSocketHandler.waiters.add(self)
def on_close(self):
GuiSocketHandler.waiters.remove(self)
@classmethod
def send_to_clients(cls):
while len(cls.updated_events) > 0:
evt = cls.updated_events.pop(0)
for waiter in cls.waiters:
time.sleep(0.1)
try:
waiter.write_message(evt)
except:
print("Update error!")
def on_message(self, message):
parsed = tornado.escape.json_decode(message)
chat = {"id": str(uuid.uuid4()), "body": parsed["body"]}
chat["html"] = tornado.escape.to_basestring(
self.render_string("message.html", message=chat)
)
# ChatSocketHandler.update_cache(chat)
GuiSocketHandler.send_to_clients()
#=================================================================================================#
# Tornado Application #
#=================================================================================================#
class Application(tornado.web.Application):
def __init__(self, images_path=None):
define("port", default=9191, help="run on the given port", type=int)
if images_path is None:
images_path = '/examples/Jupyter_notebook_examples/CyberFactory/images'
handlers = [(r"/", MainHandler),
(r'/src/js/(.*)', StaticFileHandler, {'path': './src/js'}),
(r'/src/css/(.*)', StaticFileHandler, {'path': './src/css'}),
(r'/src/images/(.*)', StaticFileHandler, {'path': './src/images'}),
(r'/images/(.*)', StaticFileHandler, {'path': images_path}),
(r"/pt/", PropertyTableHandler),
(r"/pc/", PropertyChartHandler),
(r"/ad/", ActionDiagramHandler),
(r"/bd/", BlockDiagramHandler),
(r"/hd/", HierarchyDiagramHandler),
(r"/sim_updated", SimUpdateHandler),
(r"/guisocket", GuiSocketHandler)]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
super(Application, self).__init__(handlers, **settings)
def TornadoGuiServer(images_path=None):
app = Application(images_path)
http_server = tornado.httpserver.HTTPServer(app, max_header_size=1024 ** 3)
try:
http_server.listen(options.port)
except socket.error as e:
print(e)
current = tornado.ioloop.IOLoop.current()
if current.asyncio_loop.is_running() is False:
print('Tornado Server runs!')
current.start()
else:
print('We use the existing tornado server!')
def RunServer():
notebook_path = os.path.abspath('')+'\images'
print(notebook_path)
TornadoGuiServer(images_path=notebook_path)
# if __name__ == "__main__":
# TornadoGuiServer(images_path=None)
# RunServer()
|
"""
SoftLayer.tests.managers.object_storage_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import SoftLayer
from SoftLayer import fixtures
from SoftLayer import testing
class ObjectStorageTests(testing.TestCase):
def set_up(self):
self.object_storage = SoftLayer.ObjectStorageManager(self.client)
def test_list_accounts(self):
accounts = self.object_storage.list_accounts()
self.assertEqual(accounts,
fixtures.SoftLayer_Account.getHubNetworkStorage)
def test_list_endpoints(self):
accounts = self.set_mock('SoftLayer_Account', 'getHubNetworkStorage')
accounts.return_value = {
'storageNodes': [{
'datacenter': {'name': 'dal05'},
'frontendIpAddress': 'https://dal05/auth/v1.0/',
'backendIpAddress': 'https://dal05/auth/v1.0/'}
],
}
endpoints = self.object_storage.list_endpoints()
self.assertEqual(endpoints,
[{'datacenter': {'name': 'dal05'},
'private': 'https://dal05/auth/v1.0/',
'public': 'https://dal05/auth/v1.0/'}])
def test_list_endpoints_no_results(self):
accounts = self.set_mock('SoftLayer_Account', 'getHubNetworkStorage')
accounts.return_value = {
'storageNodes': [],
}
endpoints = self.object_storage.list_endpoints()
self.assertEqual(endpoints,
[])
def test_create_credential(self):
accounts = self.set_mock('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'credentialCreate')
accounts.return_value = {
"id": 1103123,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAF",
"username": "XfHhBNBPlPdlWyaP",
"type": {
"name": "S3 Compatible Signature"
}
}
credential = self.object_storage.create_credential(100)
self.assertEqual(credential,
{
"id": 1103123,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAF",
"username": "XfHhBNBPlPdlWyaP",
"type": {
"name": "S3 Compatible Signature"
}
})
def test_delete_credential(self):
accounts = self.set_mock('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'credentialDelete')
accounts.return_value = True
credential = self.object_storage.delete_credential(100)
self.assertEqual(credential, True)
def test_limit_credential(self):
accounts = self.set_mock('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'getCredentialLimit')
accounts.return_value = 2
credential = self.object_storage.limit_credential(100)
self.assertEqual(credential, 2)
def test_list_credential(self):
accounts = self.set_mock('SoftLayer_Network_Storage_Hub_Cleversafe_Account', 'getCredentials')
accounts.return_value = [
{
"id": 1103123,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXsf4sf",
"username": "XfHhBNBPlPdlWyaP3fsd",
"type": {
"name": "S3 Compatible Signature"
}
},
{
"id": 1102341,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAF",
"username": "XfHhBNBPlPdlWyaP",
"type": {
"name": "S3 Compatible Signature"
}
}
]
credential = self.object_storage.list_credential(100)
self.assertEqual(credential,
[
{
"id": 1103123,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXsf4sf",
"username": "XfHhBNBPlPdlWyaP3fsd",
"type": {
"name": "S3 Compatible Signature"
}
},
{
"id": 1102341,
"password": "nwUEUsx6PiEoN0B1Xe9z9hUCyXMkAF",
"username": "XfHhBNBPlPdlWyaP",
"type": {
"name": "S3 Compatible Signature"
}
}
]
)
|
#! /usr/bin/env python
#
# Copyright (C) Hideto Mori
DESCRIPTION = "QUEEN (a Python module to universally program, QUinE, and Edit Nucleotide sequences)"
DISTNAME = 'python-queen'
MAINTAINER = 'Hideto Mori'
MAINTAINER_EMAIL = 'hidto7592@gmail.com'
URL = 'https://github.com/yachielab/QUEEN'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/yachielab/QUEEN'
VERSION = '1.1.0'
PYTHON_REQUIRES = ">=3.7"
INSTALL_REQUIRES = [
'numpy>=1.2',
'biopython>=1.78',
'matplotlib>=3.2',
'requests~=2.23.0',
'regex>=2.5',
'graphviz==0.17',
'beautifulsoup4>=4.4'
]
PACKAGES = [
'QUEEN'
]
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'License :: OSI Approved :: MIT License',
]
with open('README.md', 'r', encoding='utf-8') as fp:
readme = fp.read()
LONG_DESCRIPTION = readme
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
if __name__ == "__main__":
from setuptools import setup
import sys
if sys.version_info[:2] < (3, 7):
raise RuntimeError("QUEEN requires python >= 3.7.")
setup(
name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
python_requires=PYTHON_REQUIRES,
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
classifiers=CLASSIFIERS
)
|
from typing import Dict
# The rest of the codebase uses storoshis everywhere.
# Only use these units for user facing interfaces.
units: Dict[str, int] = {
"stor": 10 ** 12, # 1 stor (STOR) is 1,000,000,000,000 storoshi (1 trillion)
"storoshi": 1,
"colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin storoshis
}
|
"""
Day X - Y
"""
import utils
if __name__ == '__main__':
lines = utils.read_strings_from_lines("in/day_13.txt")
departure = int(lines[0])
numbers = [int(x) for x in lines[1].split(",") if x != "x"]
print(numbers)
real_departures = []
for number in numbers:
times = departure // number
if times * number == departure:
print("Exactly: ", number)
times += 1
result = times * number
real_departures.append(result)
best_bus_line_index = real_departures.index(min(real_departures))
print(f"Best departure has ID {numbers[best_bus_line_index]}")
print(f"The departure is at time {real_departures[best_bus_line_index]}")
print(f"Solution: {numbers[best_bus_line_index] * (real_departures[best_bus_line_index] - departure)}")
|
#!/usr/bin/env python3
# -*- Coding: UTF-8 -*- #
# -*- System: Linux -*- #
# -*- Usage: *.py -*- #
# Owner: Jacob B. Sanders
# Source: code.cloud-technology.io
# License: BSD 2-Clause License
"""
Six-Digit Random Number Generator
"""
import asyncio
from . import *
class Interface(Request):
"""
...
"""
Route = "Awaitable"
Application = ASGI.Application
Generator = Request.Generator(
prefix = "{0}/{1}".format(
Request.Prefix, Route
), tags = Request.Tags + [Route]
)
Methods = [
"GET"
]
def __init__(self, *argv, **kwargs):
super(Interface, self).__init__()
@staticmethod
async def Waiter(Time: Integer, Lock: asyncio.Semaphore):
Internal = asyncio.Semaphore()
await Internal.acquire()
await asyncio.sleep(float(Time))
Internal.release()
Lock.release()
@staticmethod
@Generator.get("",
name = "Awaitable (GET)",
responses = Request.Responses,
response_model = Boolean
)
async def Generate(Time: Integer = 5):
"""
# Spawn an Awaitable #
*Waiter will block for 5 seconds by default.*
"""
Internal = asyncio.Semaphore()
await Internal.acquire()
await Interface.Waiter(Time, Internal)
return True
@staticmethod
@Generator.post("",
name = "Awaitable (POST)",
responses = Request.Responses,
response_model = Dictionary
)
async def Generate(Time: Integer = 5,
Data: Optional[Union[String, Dictionary]] = Body(default = {})
):
"""
# Spawn an Awaitable #
*Waiter will block for 5 seconds by default.*
"""
Internal = asyncio.Semaphore()
await Internal.acquire()
await Interface.Waiter(Time, Internal)
return {
"Response": True,
"Body": Data
}
@staticmethod
@Generator.put("",
name = "Awaitable (PUT)",
responses = Request.Responses,
response_model = Dictionary
)
async def Generate(Time: Integer = 5,
Data: Optional[Union[String, Dictionary]] = Body(default = {})
):
"""
# Spawn an Awaitable #
*Waiter will block for 5 seconds by default.*
"""
Internal = asyncio.Semaphore()
await Internal.acquire()
await Interface.Waiter(Time, Internal)
return {
"Response": True,
"Body": Data
}
@staticmethod
@Generator.delete("",
name = "Awaitable (DELETE)",
responses = Request.Responses,
response_model = Boolean
)
async def Generate(Time: Integer = 5):
"""
# Spawn an Awaitable #
*Waiter will block for 5 seconds by default.*
"""
Internal = asyncio.Semaphore()
await Internal.acquire()
await Interface.Waiter(Time, Internal)
return True
Interface.Application.API.include_router(Interface.Generator)
__all__ = [
"Interface"
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 19:26:07 2017
@author: raimundoabrillopez
This file reads the export csv from typeform and transforms it in a file with the same structure
we use to generate calendars. You need to have your temporada-alimentos-report.csv in the raw folder.
Writes different calendar files to ../data/calendar/, one per answer.
To be improved> Add header with geodata.
"""
# Do the imports
import os
import pandas as pd
import locale
locale.setlocale(locale.LC_TIME, 'es_ES.UTF-8')
# Get paths
currentPWD = os.getcwd()
dwd = currentPWD[:-7]+'data/raw/'
aux = currentPWD[:-7]+'data/aux/'
cwd = currentPWD[:-7]+'data/calendar/'
hwd = currentPWD[:-7]+'data/'
#Get typeform file
os.chdir(dwd)
files = os.listdir(dwd)
data = pd.read_csv('temporada-alimentos-report.csv', encoding ='utf-8',index_col=1)
data.fillna(0,inplace=True)
data.drop('#',axis = 1,inplace=True)
data.drop('Fecha de entrada (UTC)',axis = 1,inplace=True)
data.drop('Fecha de envío (UTC)',axis = 1,inplace=True)
data.drop('Network ID',axis = 1,inplace=True)
data.drop('Puntúa esta web',axis = 1,inplace=True)
data.drop('¿Tienes comentarios?',axis = 1,inplace=True)
data.index.name='Producto'
# Generate valid column names
col_names = []
for i in range(int(len(data.columns)/12)):
col_names.append('Ene.'+str(i))
col_names.append('Feb.'+str(i))
col_names.append('Mar.'+str(i))
col_names.append('Abr.'+str(i))
col_names.append('May.'+str(i))
col_names.append('Jun.'+str(i))
col_names.append('Jul.'+str(i))
col_names.append('Ago.'+str(i))
col_names.append('Sep.'+str(i))
col_names.append('Oct.'+str(i))
col_names.append('Nov.'+str(i))
col_names.append('Dic.'+str(i))
# Transpose and map names to soydetemporada
data.columns = col_names
data = data.transpose()
names = pd.read_csv(aux+'typeform_names.csv',encoding ='utf-8',header=0,index_col=0)
data['_PRODUCTO']= data.index.map(lambda x: x.split('.')[1])
data['_Mes']= data.index.map(lambda x: x.split('.')[0])
# Assign X to answers
for column in data.columns.tolist()[:-2]:
data[column] = data[column].apply(lambda x: 'X' if x else None)
data['_PRODUCTO'] = data['_PRODUCTO'].astype('int64')
data['_PRODUCTO'] = data['_PRODUCTO'].apply(lambda x: names.loc[x])
names.set_index('Producto',inplace=True)
os.chdir(cwd)
# Generate answer file per user, filtering and pivoting table.
for column in data.columns.tolist()[:-2]:
datos = pd.DataFrame(index=data.index, data=data._Mes)
datos['_PRODUCTO'] = data['_PRODUCTO']
datos['Valor']= data[column]
#datos['ID'] = datos['TIPO DE PRODUCTO'].apply(lambda x: names.loc[x])
datos = datos.pivot(index='_PRODUCTO', columns='_Mes', values='Valor')
datos.columns = ['ABR','AGO','DIC','ENE','FEB','JUL','JUN','MAR','MAY','NOV','OCT','SEP']
datos['ID'] = names.CAL
datos['TIPO DE PRODUCTO'] = names.index
datos.set_index('ID',inplace=True)
cols = datos.columns.tolist()
cols = cols[-1:] + cols[:-1]
datos = datos[cols]
cols = ['TIPO DE PRODUCTO', 'ENE','FEB','MAR','ABR','MAY','JUN','JUL','AGO','SEP','OCT','NOV','DIC']
datos = datos[cols]
datos.to_csv(column+'.csv', encoding='utf-8')
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: widevine_pssh_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='widevine_pssh_data.proto',
package='shaka.media',
syntax='proto2',
serialized_pb=_b('\n\x18widevine_pssh_data.proto\x12\x0bshaka.media\"\x8f\x02\n\x10WidevinePsshData\x12:\n\talgorithm\x18\x01 \x01(\x0e\x32\'.shaka.media.WidevinePsshData.Algorithm\x12\x0e\n\x06key_id\x18\x02 \x03(\x0c\x12\x10\n\x08provider\x18\x03 \x01(\t\x12\x12\n\ncontent_id\x18\x04 \x01(\x0c\x12\x0e\n\x06policy\x18\x06 \x01(\t\x12\x1b\n\x13\x63rypto_period_index\x18\x07 \x01(\r\x12\x17\n\x0fgrouped_license\x18\x08 \x01(\x0c\x12\x19\n\x11protection_scheme\x18\t \x01(\r\"(\n\tAlgorithm\x12\x0f\n\x0bUNENCRYPTED\x10\x00\x12\n\n\x06\x41\x45SCTR\x10\x01\"G\n\x0eWidevineHeader\x12\x0f\n\x07key_ids\x18\x02 \x03(\t\x12\x10\n\x08provider\x18\x03 \x01(\t\x12\x12\n\ncontent_id\x18\x04 \x01(\x0c')
)
_WIDEVINEPSSHDATA_ALGORITHM = _descriptor.EnumDescriptor(
name='Algorithm',
full_name='shaka.media.WidevinePsshData.Algorithm',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNENCRYPTED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AESCTR', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=273,
serialized_end=313,
)
_sym_db.RegisterEnumDescriptor(_WIDEVINEPSSHDATA_ALGORITHM)
_WIDEVINEPSSHDATA = _descriptor.Descriptor(
name='WidevinePsshData',
full_name='shaka.media.WidevinePsshData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='algorithm', full_name='shaka.media.WidevinePsshData.algorithm', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key_id', full_name='shaka.media.WidevinePsshData.key_id', index=1,
number=2, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='provider', full_name='shaka.media.WidevinePsshData.provider', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='content_id', full_name='shaka.media.WidevinePsshData.content_id', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='policy', full_name='shaka.media.WidevinePsshData.policy', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crypto_period_index', full_name='shaka.media.WidevinePsshData.crypto_period_index', index=5,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='grouped_license', full_name='shaka.media.WidevinePsshData.grouped_license', index=6,
number=8, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='protection_scheme', full_name='shaka.media.WidevinePsshData.protection_scheme', index=7,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_WIDEVINEPSSHDATA_ALGORITHM,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=42,
serialized_end=313,
)
_WIDEVINEHEADER = _descriptor.Descriptor(
name='WidevineHeader',
full_name='shaka.media.WidevineHeader',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key_ids', full_name='shaka.media.WidevineHeader.key_ids', index=0,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='provider', full_name='shaka.media.WidevineHeader.provider', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='content_id', full_name='shaka.media.WidevineHeader.content_id', index=2,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=315,
serialized_end=386,
)
_WIDEVINEPSSHDATA.fields_by_name['algorithm'].enum_type = _WIDEVINEPSSHDATA_ALGORITHM
_WIDEVINEPSSHDATA_ALGORITHM.containing_type = _WIDEVINEPSSHDATA
DESCRIPTOR.message_types_by_name['WidevinePsshData'] = _WIDEVINEPSSHDATA
DESCRIPTOR.message_types_by_name['WidevineHeader'] = _WIDEVINEHEADER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
WidevinePsshData = _reflection.GeneratedProtocolMessageType('WidevinePsshData', (_message.Message,), dict(
DESCRIPTOR = _WIDEVINEPSSHDATA,
__module__ = 'widevine_pssh_data_pb2'
# @@protoc_insertion_point(class_scope:shaka.media.WidevinePsshData)
))
_sym_db.RegisterMessage(WidevinePsshData)
WidevineHeader = _reflection.GeneratedProtocolMessageType('WidevineHeader', (_message.Message,), dict(
DESCRIPTOR = _WIDEVINEHEADER,
__module__ = 'widevine_pssh_data_pb2'
# @@protoc_insertion_point(class_scope:shaka.media.WidevineHeader)
))
_sym_db.RegisterMessage(WidevineHeader)
# @@protoc_insertion_point(module_scope)
|
import asyncio
from collections import deque
__all__ = ["SendCtrl"]
class SendCtrl:
"""
"""
__slots__ = "_flowing", "_waiters", "_sending"
def __init__(self):
self._flowing = True
self._waiters = deque()
self._sending = False
def qlen(self):
return len(self._waiters)
def startup(self):
if self._waiters:
raise Exception("Unclean startup", {"waiters": self._waiters})
self._flowing = True
self._sending = True
def shutdown(self, exc=None):
self._sending = False
self._flowing = False
waiters = self._waiters
self._waiters = deque()
for fut in waiters:
if fut.done():
continue
if exc is None: # init BPE as late as possible
exc = BrokenPipeError("transport closed")
fut.set_exception(exc)
async def flowing(self):
"""
awaitable for this ctrl object to be in `flowing` state.
"""
if not self._sending:
raise asyncio.InvalidStateError("not in sending state")
# is in flowing state, return fast
if self._flowing:
return
# in non-flowing state, await unleash notification
fut = asyncio.get_sending_loop().create_future()
self._waiters.append(fut)
try:
await fut
except asyncio.CancelledError:
# remove earlier to conserve some RAM, or it'll be removed from the deque at next unleash
raise # re-raise
def obstruct(self):
"""
put this ctrl object into `non-flowing` state
"""
self._flowing = False
def unleash(self):
"""
put this ctrl object into `flowing` state and awake as many pending coroutines awaiting `flowing` state as
possible as this ctrl object is still in `flowing` state
"""
self._flowing = True
self._unleash_one()
def _unleash_one(self):
# stop unleashing if not in flowing state anymore
if not self._flowing:
return
# trigger 1st non-canceled waiter
while self._waiters:
fut = self._waiters.popleft()
if fut.done():
# just throw away canceled waiters
continue
# trigger this waiter now
fut.set_result(None)
# schedule one more unleash to run later.
# as this future's awaiter will send more data in next tick, and it'll run before next _unleash_one()
# here scheduled after the set_result() call, it's fairly possible `restrain` has been called then.
# python default loop and uvloop is confirmed working this way
asyncio.get_sending_loop().call_soon(self._unleash_one)
return
|
def predict_boxes_near(heatmap,T,threshold=0.85): #bounding box usually large in size
threshold = threshold*np.amax(heatmap)
t_area = T.shape[0]*T.shape[1]
def explore(i,j,cnt):
if heatmap[i][j]<threshold or cnt>100:
return [[],[]]
heatmap[i][j]=0
coords = [[i],[j]]
if i>=1:
res1 = explore(i-1,j,cnt+1)
coords[0]+=res1[0]
coords[1]+=res1[1]
if i<len(heatmap)-1:
res2 = explore(i+1,j,cnt+1)
coords[0]+=res2[0]
coords[1]+=res2[1]
if j>=1:
res3 = explore(i,j-1,cnt+1)
coords[0]+=res3[0]
coords[1]+=res3[1]
if j<len(heatmap[0])-1:
res4 = explore(i,j+1,cnt+1)
coords[0]+=res4[0]
coords[1]+=res4[1]
return coords
boxes = []
for i in range(len(heatmap)):
for j in range(len(heatmap[0])):
if heatmap[i][j]>threshold:
coords = explore(i,j,0)
tl_row,tl_col,br_row,br_col=min(coords[0]), min(coords[1]), max(coords[0]), max(coords[1])
if 0.3*t_area <(tl_row-br_row)-(tl_col-br_col)<1.5*t_area and 0.6<=(tl_row-br_row)/(tl_col-br_col)<=1.6:
score = np.mean(np.array(heatmap)[tl_row:br_row+1,br_row:br_col+1])
boxes.append([tl_row,tl_col,br_row,br_col,score])
return boxes
def predict_boxes(heatmap,T,threshold=0.91): #easy to cause a series of overlapped false alarms on the edge of an object
'''
This function takes heatmap and returns the bounding boxes and associated
confidence scores.
'''
'''
BEGIN YOUR CODE
'''
threshold = threshold*np.amax(heatmap)
temp_h = int(T.shape[0]//2)
temp_w = int(T.shape[1]//2)
boxes = []
origin_map=np.copy(heatmap)
center_r, center_c =-1, -1
while True:
max_val = np.amax(heatmap)
if max_val<threshold:
break
center_posi = np.where(origin_map==max_val)
if center_r!= center_posi[0][0] and center_c!= center_posi[1][0]:
center_r, center_c=center_posi[0][0],center_posi[1][0]
else:
break
print(center_r,center_c)
tl_row = max(center_r-temp_h,0)
tl_col = max(center_c-temp_w,0)
br_row = min(center_r+temp_h,len(heatmap))
br_col = min(center_c+temp_w,len(heatmap[0]))
for row in range(tl_row,br_row+1):
for col in range(tl_col,br_col+1):
heatmap[row][col] = 0
score = origin_map[center_r][center_c] ##### score: score of conv / temp conv temp
boxes.append([tl_row,tl_col,br_row,br_col,score])
return boxes
|
# tests grabbed from:
# https://github.com/tidyverse/dplyr/blob/master/tests/testthat/test-if-else.R
# and
# https://github.com/tidyverse/dplyr/blob/master/tests/testthat/test-case-when.R
import pytest
import pandas
import numpy as np
from datar.core.backends.pandas import Series
from datar import f
from datar.base import NA, c
from datar.datar import get
from datar.dplyr import if_else, case_when, mutate, pull
from datar.datasets import mtcars
from ..conftest import assert_iterable_equal
def test_scalar_true_false_are_vectorized():
x = c(True, True, False, False)
out = if_else(x, 1, 2)
assert list(out) == [1, 1, 2, 2]
# Series
x = Series(c(True, True, False, False))
out = if_else(x, 1, 2)
assert isinstance(out, Series)
assert list(out) == [1, 1, 2, 2]
def test_vector_true_false_ok():
x = np.array([-1, 0, 1])
out = if_else(x < 0, x, 0)
assert list(out) == [-1, 0, 0]
out = if_else(x > 0, x, 0)
assert list(out) == [0, 0, 1]
def test_missing_values_are_missing():
out = if_else(c(True, NA, False), -1, 1)
# assert_iterable_equal(out, [-1, NA, 1])
# NA as false
assert_iterable_equal(out, [-1, 1, 1])
out = if_else(c(True, NA, False), -1, 1, 0)
assert_iterable_equal(out, [-1, 0, 1])
def test_if_else_errors():
# ok, numbers are able to be converted to booleans
out = if_else(range(1, 11), 1, 2)
assert list(out) == [1.0] * 10
data = np.array([1, 2, 3])
with pytest.raises(ValueError, match="size"):
if_else(data < 2, [1, 2], [1, 2, 3])
with pytest.raises(ValueError, match="size"):
if_else(data < 2, [1, 2, 3], [1, 2])
# case_hwne ------------------
def test_matches_values_in_order():
x = np.array([1, 2, 3])
out = case_when(x <= 1, 1, x <= 2, 2, x <= 3, 3)
assert list(out) == [1, 2, 3]
def test_unmatched_gets_missing_value():
x = np.array([1, 2, 3])
out = case_when(x <= 1, 1, x <= 2, 2)
assert_iterable_equal(out, [1, 2, NA])
def test_missing_values_can_be_replaced():
x = np.array([1, 2, 3, NA])
out = case_when(x <= 1, 1, x <= 2, 2, pandas.isna(x), 0)
assert_iterable_equal(out, [1, 2, NA, 0])
def test_na_conditions():
out = case_when([True, False, NA], [1, 2, 3], True, 4)
assert list(out) == [1, 4, 4]
def test_atomic_conditions():
import warnings
warnings.filterwarnings("error")
out = case_when(True, [1, 2, 3], False, [4, 5, 6])
assert list(out) == [1, 2, 3]
out = case_when(NA, [1, 2, 3], True, [4, 5, 6])
assert list(out) == [4, 5, 6]
def test_0len_conditions_and_values():
out = case_when(True, [], False, [])
assert list(out) == []
out = case_when([], 1, [], 2)
assert list(out) == []
def test_inside_mutate():
out = (
mtcars
>> get(f[:4])
>> mutate(out=case_when(f.cyl == 4, 1, f["am"] == 1, 2, True, 0))
>> pull(to="list")
)
assert out == [2, 2, 1, 0]
def test_errors():
x = np.array([NA] * 10)
with pytest.raises(ValueError):
# condition has to be the same length as data
case_when(x, [True, False], [1, 2, 3], [False, True], [1, 2])
with pytest.raises(ValueError):
case_when()
with pytest.raises(ValueError):
case_when("a")
# ok
case_when([], 1)
|
#!/usr/bin/env python3
import sys
assert sys.version_info[:2] >= (3,0), "This is Python 3 code"
def generate():
import hashlib
print("""\
# DO NOT EDIT DIRECTLY! Autogenerated by agenttestgen.py
#
# To regenerate, run
# python3 agenttestgen.py > agenttestdata.py
#
# agenttestgen.py depends on the testcrypt system, so you must also
# have built testcrypt in the parent directory, or else set
# PUTTY_TESTCRYPT to point at a working implementation of it.
""")
from testcrypt import (rsa_generate, dsa_generate, ecdsa_generate,
eddsa_generate, random_clear, random_queue,
ssh_key_public_blob, ssh_key_openssh_blob,
ssh_key_sign, rsa1_generate, rsa_ssh1_encrypt,
rsa_ssh1_public_blob, rsa_ssh1_private_blob_agent,
mp_from_bytes_be)
from agenttest import (Key2, TestSig2, test_message_to_sign,
Key1, test_session_id)
import ssh
keygen2 = [
('RSA-1024', lambda: rsa_generate(1024, False),
(ssh.SSH_AGENT_RSA_SHA2_256, ssh.SSH_AGENT_RSA_SHA2_512)),
('DSA-1024', lambda: dsa_generate(1024)),
('ECDSA-p256', lambda: ecdsa_generate(256)),
('Ed25519', lambda: eddsa_generate(256)),
]
keys2 = []
for record in keygen2:
if len(record) == 2:
record += ((),)
comment, genfn, flaglist = record
flaglist = (0,) + flaglist
random_clear()
random_queue(b''.join(hashlib.sha512('{}{:d}'.format(comment, j)
.encode('ASCII')).digest()
for j in range(1000)))
key = genfn()
sigs = [TestSig2(flags, ssh_key_sign(key, test_message_to_sign, flags))
for flags in flaglist]
keys2.append(Key2(comment.encode("ASCII"),
ssh_key_public_blob(key),
sigs,
ssh_key_openssh_blob(key)))
print("def key2examples(Key2, TestSig2):\n return {!r}".format(keys2))
keygen1 = [
('RSA-1024a', 1024),
('RSA-1024b', 1024),
('RSA-768c', 768),
('RSA-768d', 768),
]
keys1 = []
for comment, bits in keygen1:
random_clear()
random_queue(b''.join(hashlib.sha512('{}{:d}'.format(comment, j)
.encode('ASCII')).digest()
for j in range(1000)))
key = rsa1_generate(bits)
preimage = b'Test128BitRSA1ChallengeCleartext'
assert len(preimage) == 32
challenge_bytes = rsa_ssh1_encrypt(preimage, key)
assert len(challenge_bytes) > 0
challenge = int(mp_from_bytes_be(challenge_bytes))
response = hashlib.md5(preimage + test_session_id).digest()
keys1.append(Key1(comment.encode("ASCII"),
rsa_ssh1_public_blob(key, 'exponent_first'),
challenge, response,
rsa_ssh1_private_blob_agent(key)))
print("def key1examples(Key1):\n return {!r}".format(keys1))
if __name__ == "__main__":
generate()
|
# -*- coding: utf-8 -*-
"""
"""
from chronos.cluster import ClusterCatalog, Cluster
CATALOG = "CantatGaudin2020"
CLUSTER = "IC_2602"
def test_cluster_catalog():
# catalog
cc = ClusterCatalog(catalog_name=CATALOG, verbose=False)
df = cc.query_catalog(return_members=False)
assert len(df) > 0
df_mem = cc.query_catalog(return_members=True)
assert len(df_mem) > 0
def test_cluster():
c = Cluster(CLUSTER, catalog_name=CATALOG, verbose=False)
df_gaia_mem = c.query_cluster_members_gaia_params()
assert len(df_gaia_mem) > 0
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from . import _ortmodule_utils as _utils, _ortmodule_io as _io
from ._ortmodule_graph_execution_manager import GraphExecutionManager, _run_forward
import copy
import onnx
import onnxruntime
import torch
class InferenceManager(GraphExecutionManager):
"""Concrete instance of GraphExecutionManager that is able to manage the inference model
InferenceManager is resposible for building and running the forward graph of the inference model
"""
def __init__(self, model):
super().__init__(model)
self._export_mode = torch.onnx.TrainingMode.EVAL
def forward(self, *inputs, **kwargs):
'''Forward pass of the inference model
ONNX model is exported the first time this method is executed.
Next, we build an optimized inference graph with module_graph_builder.
Finally, we instantiate the ONNX Runtime InferenceSession through the InferenceAgent.
'''
# Exporting module to ONNX for the first time
build_graph = self._export_model(*inputs, **kwargs)
if build_graph:
# If model was exported, then initialize the graph builder
self._initialize_graph_builder(training=False)
# Save the onnx model if the model was exported
if self._save_onnx:
onnx.save(self._onnx_model, self._save_onnx_prefix + '_exported_inference_model.onnx')
# Build the inference graph
if build_graph:
self._build_graph()
module_device = _utils.get_device_from_module(self._original_module)
# The inference session should be created every time
# the graph was built or if the device changed between calls to forward
create_execution_session = build_graph or self._device != module_device
if self._device != module_device:
self._device = module_device
if create_execution_session:
# Create execution session creates the inference_session
self._create_execution_agent()
user_outputs, _ = _run_forward(self._execution_agent,
self._optimized_onnx_model,
self._device,
*_io._convert_input_to_list(self._flattened_module.named_parameters(),
self._graph_info.user_input_names,
self._flattened_module.named_buffers(),
inputs,
kwargs))
return _io.populate_user_output_from_schema_and_outputs(self._module_output_schema,
self._graph_info.user_output_names,
user_outputs)
def _build_graph(self):
"""Build an optimized inference graph using the module_graph_builder"""
super()._build_graph()
if self._save_onnx:
onnx.save(self._optimized_onnx_model, self._save_onnx_prefix + '_inference.onnx')
def _create_execution_agent(self):
"""Creates an InferenceAgent that can run forward graph on an inference model"""
session_options, providers, provider_options = self._get_session_config()
self._execution_agent = onnxruntime.training.InferenceAgent(self._optimized_onnx_model.SerializeToString(),
session_options, providers, provider_options)
|
# -*- coding: utf-8 -*-
"""q1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oYCd94tF34ZvCbTrcYwOoeVidp_WppBw
"""
import numpy as np
from math import *
import random
from bayes_opt import BayesianOptimization
a = 30
b = 20
len = 606#6060
wi = 216#2160
def get_theta(theta,a=a,b=b,l=len,w=wi):
theta = theta/360*pi
x = sqrt(4*a*b/(b*sin(theta)*sin(theta)+a*cos(theta)*cos(theta)))
i,j=0,0
ww = w
num=0
detal = x-x*cos(theta)
while j != int(ww / (2 * b)) + 1:
for i in range(int(l/(2*x*cos(theta)))):
num = num+1
# print(j,int(ww / (2 * b)))
j+=1
ww=w+j*1
return num
# get_theta(60)
#
rf_bo = BayesianOptimization(
get_theta,
{'theta': (30, 60),}
)
print(rf_bo.maximize())
import matplotlib.pyplot as plt
import numpy.random as rnd
from matplotlib.patches import Ellipse
detal = a-sqrt(3)*a/2
ells = []
j=0
h = wi
while j != int(h/(2*b))+1:
for i in range(int(len/(2*a))):
if j%2==0:
e = Ellipse(xy=[i*2*a+a,j*2*b+b-detal*j], width=a*2, height=b*2, angle=0)
else:
e = Ellipse(xy=[i*2*a+2*a,j*2*b+b-detal*j], width=a*2, height=b*2, angle=0)
ells.append(e)
print(j,int(h/(2*b)))
j = j+1
h = wi+detal*j
fig = plt.figure(figsize=(10,8))
ax = fig.add_subplot(111, aspect='equal')
for e in ells:
ax.add_artist(e)
e.set_clip_box(ax.bbox)
alf = rnd.rand()+0.1
alf = 1 if alf>1 else alf
e.set_alpha(alf)
# e.set_facecolor(rnd.rand(3))
ax.set_xlim(0, len)
ax.set_ylim(0, wi)
plt.show()
# plt.savefig("demo.png")
|
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import io
import re
import sys
import glob
from .messages import *
statusStyle = {
'accepted' : 'a',
'retracted' : 'a',
'rejected' : 'r',
'objection' : 'fo',
'deferred' : 'd',
'invalid' : 'oi',
'outofscope': 'oi',
};
def printIssueList(infilename=None, outfilename=None):
if infilename is None:
if glob.glob("issues*.txt"):
# Look for digits in the filename, and use the one with the largest number if it's unique.
def extractNumber(filename):
number = re.sub("\D", "", filename)
return number if number else None
filenames = [(extractNumber(fn), fn) for fn in glob.glob("issues*.txt") if extractNumber(fn) is not None]
filenames.sort(reverse=True)
if len(filenames) > 1 and filenames[0][0] == filenames[1][0]:
die("Can't tell which issues-list file is the most recent. Explicitly pass a filename.")
return
infilename = filenames[0][1]
else:
printHelpMessage();
return
if infilename == "-":
infile = sys.stdin
else:
for suffix in [".txt", "txt", ""]:
try:
infile = io.open(infilename + suffix, 'r', encoding="utf-8")
infilename += suffix
break
except Exception, e:
pass
else:
die("Couldn't read from the infile:\n {0}", str(e))
return
lines = infile.readlines()
headerInfo = extractHeaderInfo(lines, infilename)
if outfilename is None:
if infilename == "-":
outfilename = "issues-{status}-{cdate}.html".format(**headerInfo).lower()
elif infilename.endswith(".txt"):
outfilename = infilename[:-4] + ".html"
else:
outfilename = infilename + ".html"
if outfilename == "-":
outfile = sys.stdout
else:
try:
outfile = io.open(outfilename, 'w', encoding="utf-8")
except Exception, e:
die("Couldn't write to outfile:\n{0}", str(e))
return
printHeader(outfile, headerInfo)
printIssues(outfile, lines)
printScript(outfile);
def extractHeaderInfo(lines, infilename):
title = None
url = None
status = None
for line in lines:
match = re.match("(Draft|Title|Status):\s*(.*)", line)
if match:
if match.group(1) == "Draft":
url = match.group(2)
elif match.group(1) == "Title":
title = match.group(2)
elif match.group(1) == "Status":
status = match.group(2).upper()
if url is None:
die("Missing 'Draft' metadata.")
return
if title is None:
die("Missing 'Title' metadata.")
return
match = re.search("([A-Z]{2,})-([a-z0-9-]+)-(\d{8})", url)
if match:
if status is None:
# Auto-detect from the URL and filename.
status = match.group(1)
if status == "WD" and re.search("LC", infilename, re.I):
status = "LCWD"
shortname = match.group(2)
cdate = match.group(3)
date = "{0}-{1}-{2}".format(*re.match("(\d{4})(\d\d)(\d\d)", cdate).groups())
else:
die("Draft url needs to have the format /status-shortname-date/. Got:\n{0}", url)
return
return {
'title': title,
'date': date,
'cdate': cdate,
'shortname': shortname,
'status': status,
'url': url
}
def printHelpMessage():
say('''Draft: http://www.w3.org/TR/2013/WD-css-foo-3-20130103/
Title: CSS Foo Level 3
... anything else you want here, except 4 dashes ...
----
Issue 1.
Summary: [summary]
From: [name]
Comment: [url]
Response: [url]
Closed: [Accepted/OutOfScope/Invalid/Rejected/Retracted ... or replace this "Closed" line with "Open"]
Verified: [url]
Resolved: Editorial/Bugfix (for obvious fixes)/Editors' discretion/[url to minutes]
----
Issue 2.
...''')
def printHeader(outfile, headerInfo):
outfile.write('''<!DOCTYPE html>
<meta charset="utf-8">
<title>{title} Disposition of Comments for {date} {status}</title>
<style type="text/css">
.a {{ background: lightgreen }}
.d {{ background: lightblue }}
.r {{ background: orange }}
.fo {{ background: red }}
.open {{ border: solid red; padding: 0.2em; }}
:target {{ box-shadow: 0.25em 0.25em 0.25em; }}
</style>
<h1>{title} Disposition of Comments for {date} {status}</h1>
<p>Review document: <a href="{url}">{url}</a>
<p>Editor's draft: <a href="http://dev.w3.org/csswg/{shortname}/">http://dev.w3.org/csswg/{shortname}/</a>
<p>The following color coding convention is used for comments:</p>
<ul>
<li class="a">Accepted or Rejected and positive response
<li class="r">Rejected and no response
<li class="fo">Rejected and negative response
<li class="d">Deferred
<li class="oi">Out-of-Scope or Invalid and not verified
</ul>
<p class=open>Open issues are marked like this</p>
<p>An issue can be closed as <code>Accepted</code>, <code>OutOfScope</code>,
<code>Invalid</code>, <code>Rejected</code>, or <code>Retracted</code>.
<code>Verified</code> indicates commentor's acceptance of the response.</p>
'''.format(**headerInfo))
def printIssues(outfile, lines):
text = ''.join(lines)
issues = text.split('----\n')[1:]
for issue in issues:
issue = issue.strip().replace("&", "&").replace("<", "<")
if issue == "":
continue
originaltext = issue[:]
# Issue number
issue = re.sub(r"Issue (\d+)\.", r"Issue \1. <a href='#issue-\1'>#</a>", issue)
match = re.search(r"Issue (\d+)\.", issue)
if match:
index = match.group(1)
else:
die("Issues must contain a line like 'Issue 1.'. Got:\n{0}", originaltext)
# Color coding
if re.search(r"\nVerified:\s*\S+", issue):
code = 'a'
elif re.search(r"\n(Closed|Open):\s+\S+", issue):
match = re.search(r"\n(Closed|Open):\s+(\S+)", issue)
code = match.group(2)
if code.lower() in statusStyle:
code = statusStyle[code.lower()]
else:
code = ''
if match.group(1) == "Closed":
warn("Unknown status value found for issue #{num}: “{code}”", code=code, num=index)
else:
code = ''
if re.search(r"\nOpen", issue):
code += " open"
# Linkify urls
issue = re.sub(r"((http|https):\S+)", r"<a href='\1'>\1</a>", issue)
# And print it
outfile.write("<pre class='{0}' id='issue-{1}'>\n".format(code, index))
outfile.write(issue)
outfile.write("</pre>\n")
def printScript(outfile):
outfile.write('''<script>
(function () {
var sheet = document.styleSheets[0];
function addCheckbox(className) {
var element = document.querySelector('*.' + className);
var label = document.createElement('label');
label.innerHTML = element.innerHTML;
element.innerHTML = null;
var check = document.createElement('input');
check.type = 'checkbox';
if (className == 'open') {
check.checked = false;
sheet.insertRule('pre:not(.open)' + '{}', sheet.cssRules.length);
check.onchange = function (e) {
rule.style.display = this.checked ? 'none' : 'block';
}
}
else {
check.checked = true;
sheet.insertRule('pre.' + className + '{}', sheet.cssRules.length);
check.onchange = function (e) {
rule.style.display = this.checked ? 'block' : 'none';
}
}
var rule = sheet.cssRules[sheet.cssRules.length - 1];
element.appendChild(label);
label.insertBefore(check, label.firstChild);
}
['a', 'd', 'fo', 'oi', 'r', 'open'].forEach(addCheckbox);
}());
</script>
''');
|
import logging
import numpy as np
import pandas as pd
from typing import Optional
from timeit import default_timer as timer
logger = logging.getLogger(__name__)
class DefaultWriter:
"""
Default writer to be used by the agents.
Can be used in the fit() method of the agents, so
that training data can be handled by AgentManager and RemoteAgentManager.
Parameters
----------
name : str
Name of the writer.
log_interval : int
Minimum number of seconds between consecutive logs.
metadata : dict
Extra information to be logged.
"""
def __init__(self, name: str, log_interval: int = 3, metadata: Optional[dict] = None):
self._name = name
self._log_interval = log_interval
self._metadata = metadata or dict()
self._data = None
self._time_last_log = None
self._log_time = True
self.reset()
def reset(self):
"""Clear all data."""
self._data = dict()
self._initial_time = timer()
self._time_last_log = timer()
@property
def data(self):
df = pd.DataFrame(columns=('name', 'tag', 'value', 'global_step'))
for tag in self._data:
df = df.append(pd.DataFrame(self._data[tag]), ignore_index=True)
return df
def add_scalar(self, tag: str, scalar_value: float, global_step: Optional[int] = None):
"""
Store scalar value.
Note: the tag 'dw_time_elapsed' is reserved and updated internally.
It logs automatically the number of seconds elapsed
Parameters
----------
tag : str
Tag for the scalar.
scalar_value : float
Value of the scalar.
global_step : int
Step where scalar was added. If None, global steps will no longer be stored for the current tag.
"""
# Update data structures
if tag not in self._data:
self._data[tag] = dict()
self._data[tag]['name'] = []
self._data[tag]['tag'] = []
self._data[tag]['value'] = []
self._data[tag]['global_step'] = []
self._data[tag]['name'].append(self._name) # used in plots, when aggregating several writers
self._data[tag]['tag'].append(tag) # useful to convert all data to a single DataFrame
self._data[tag]['value'].append(scalar_value)
if global_step is None:
self._data[tag]['global_step'].append(np.nan)
else:
self._data[tag]['global_step'].append(global_step)
# Append time interval corresponding to global_step
if global_step is not None and self._log_time:
assert tag != 'dw_time_elapsed', 'The tag dw_time_elapsed is reserved.'
self._log_time = False
self.add_scalar(tag='dw_time_elapsed', scalar_value=timer() - self._initial_time, global_step=global_step)
self._log_time = True
# Log
if not self._log_time:
self._log()
def _log(self):
# time since last log
t_now = timer()
time_elapsed = t_now - self._time_last_log
# log if enough time has passed since the last log
max_global_step = 0
if time_elapsed > self._log_interval:
self._time_last_log = t_now
message = ''
for tag in self._data:
val = self._data[tag]['value'][-1]
gstep = self._data[tag]['global_step'][-1]
message += f'{tag} = {val} | '
if not np.isnan(gstep):
max_global_step = max(max_global_step, gstep)
header = self._name
if self._metadata:
header += f' | {self._metadata}'
message = f'[{header}] | max_global_step = {max_global_step} | ' + message
logger.info(message)
def __getattr__(self, attr):
"""
Avoid raising exceptions when invalid method is called, so
that DefaultWriter does not raise exceptions when
the code expects a tensorboard writer.
"""
if attr[:2] == '__':
raise AttributeError(attr)
def method(*args, **kwargs):
pass
return method
|
from django.forms import ChoiceField, ModelChoiceField, RadioSelect
from django.contrib.admin import site, ModelAdmin, StackedInline
from django.contrib.auth import admin, forms, models
from .models import Affiliation, FinprintUser, Team
class UserCreationForm(forms.UserCreationForm):
affiliation = ModelChoiceField(
queryset=Affiliation.objects.all(),
empty_label=None
)
role = ChoiceField(
choices=[('superuser', 'Superuser'), ('lead', 'Lead'), ('annotator', 'Annotator')],
widget=RadioSelect()
)
class Meta:
model = models.User
fields = ('username', 'first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
for field in ['first_name', 'last_name', 'email', 'affiliation', 'role']:
self.fields[field].required = True
self.fields['role'].initial = 'annotator'
self.fields['affiliation'].initial = 0
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=True)
role = self.cleaned_data.get('role')
user.groups = [2] if role == 'annotator' else [1, 2]
if role == 'superuser':
user.is_superuser = True
user.is_staff = True
user.save()
FinprintUser(
user=user,
affiliation=self.cleaned_data.get('affiliation')
).save()
return user
class FinprintUserInline(StackedInline):
actions = None
model = FinprintUser
fields = ('affiliation',)
# disable the delete button and remove delete from actions
def has_delete_permission(self, request, obj=None):
return False
class UserAdmin(admin.UserAdmin):
actions = None
fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'first_name', 'last_name', 'email', 'is_active', 'groups', 'password')
}),
)
inlines = (FinprintUserInline,)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2', 'first_name', 'last_name', 'email', 'affiliation', 'role'),
}),
)
add_form = UserCreationForm
list_display = ('username', 'last_name', 'first_name', 'email', 'is_active')
list_filter = ['finprintuser__affiliation', 'groups', 'is_active', 'is_superuser']
search_fields = ['last_name', 'first_name', 'username', 'email', 'finprintuser__affiliation__name']
def get_formsets_with_inlines(self, request, obj=None):
for inline in self.get_inline_instances(request, obj):
# hide FinprintUserInline in the add view
if isinstance(inline, FinprintUserInline) and obj is None:
continue
yield inline.get_formset(request, obj), inline
# disable the delete button and remove delete from actions
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
# if user is being set to "inactive", remove any assignments that are not complete
if 'is_active' in form.changed_data and not obj.is_active:
for assignment in obj.finprintuser.assignment_set.all():
assignment.remove(unfinished_only=True)
obj.save()
class FinprintUserAdmin(ModelAdmin):
actions = None
fields = ('user', 'affiliation')
ordering = ['affiliation__name', 'user__last_name', 'user__first_name']
list_filter = ['affiliation__name']
search_fields = ['affiliation__name', 'user__last_name', 'user__first_name']
# disable the delete button and remove delete from actions
def has_delete_permission(self, request, obj=None):
return False
site.unregister(models.User)
site.register(models.User, UserAdmin)
site.register(Affiliation)
site.register(Team)
site.register(FinprintUser, FinprintUserAdmin)
|
__author__ = "Jerry Overton"
__copyright__ = "Copyright (C) 2022 appliedAIstudio LLC"
__version__ = "0.0.1"
# needed to read the ai server host and port environment variables
import os
# needed to read the laundry schedule from a file
import json
# the Highcliff ai_actions we are going to implement
from highcliff_sdk.laundry import ConfirmLaundryMaintained
class LaundryMaintenanceConfirmation(ConfirmLaundryMaintained):
def behavior(self):
# update the context
new_context = self._smart_appliance_settings()
self.update_context(new_context)
print("laundry maintenance is complete")
@staticmethod
def _smart_appliance_settings():
file = open("context/sdfSmartApplianceSettings.json")
return json.load(file)
def start_confirmation():
LaundryMaintenanceConfirmation(server=os.environ['ai_server'], port=os.environ['ai_server_port'])
if __name__ == "__main__":
start_confirmation()
|
from flask import Flask
from flask_wtf.csrf import CSRFProtect
app = Flask(__name__)
csrf = CSRFProtect(app)
@app.route("/")
def pagina_inicial():
return "Pipeline-DevOps-video"
if __name__ == '__main__':
app.run(debug=True)
|
# coding: utf-8
import requests
from bs4 import BeautifulSoup
import re
import json
import os
from xml.etree import ElementTree
import time
import io
import pandas as pd
import math
from gotoeat_map.module import getLatLng, checkRemovedMerchant
def main():
merchantFilePath = os.path.dirname(
os.path.abspath(__file__)) + "/merchants.json"
if os.path.exists(merchantFilePath):
json_open = open(merchantFilePath, "r", encoding="utf8")
merchants = json.load(json_open)
else:
merchants = {
"data": [],
"names": []
}
findMerchants = []
response = requests.get(
"https://gotoeat-fukuoka.jp/csv/fk_gotoeat_UTF-8.csv")
response.encoding = response.apparent_encoding
reader = pd.read_csv(io.BytesIO(response.content),
names=("merchant_id", "merchant_name", "merchant_name_kana", "merchant_type", "merchant_postal_code", "merchant_prefecture", "merchant_area", "merchant_address", "merchant_building_name", "merchant_tel", "merchant_url", "merchant_addedDate"))
for row in reader.iterrows():
if row[1]["merchant_id"] == "id":
continue
merchant_name = str(row[1]["merchant_name"])
merchant_type = str(row[1]["merchant_type"])
merchant_postal_code = str(row[1]["merchant_postal_code"])
merchant_area = str(row[1]["merchant_area"])
merchant_address = str(row[1]["merchant_prefecture"]) + \
str(row[1]["merchant_area"]) + \
str(row[1]["merchant_address"])
if type(row[1]["merchant_building_name"]) is str:
merchant_address += row[1]["merchant_building_name"]
merchant_tel = str(row[1]["merchant_tel"])
print(merchant_name + " - " + merchant_address)
findMerchants.append(merchant_name)
if merchant_name in merchants["names"]:
continue
lat, lng = getLatLng(merchant_address)
print(str(lat) + " " + str(lng))
merchants["data"].append({
"name": merchant_name,
"type": merchant_type,
"area": merchant_area,
"address": merchant_address,
"postal_code": merchant_postal_code,
"tel": merchant_tel,
"lat": lat,
"lng": lng
})
merchants["names"].append(merchant_name)
with open(merchantFilePath, mode="w", encoding="utf8") as f:
f.write(json.dumps(merchants, indent=4, ensure_ascii=False))
merchants = checkRemovedMerchant(merchants, findMerchants)
with open(merchantFilePath, mode="w", encoding="utf8") as f:
f.write(json.dumps(merchants, indent=4, ensure_ascii=False))
main()
|
# Django settings for qatrack project.
import django.conf.global_settings as DEFAULT_SETTINGS
import os
#-----------------------------------------------------------------------------
# Debug settings - remember to set both DEBUG & TEMPLATE_DEBUG to false when
# deploying (either here or in local_settings.py)
DEBUG = True
TEMPLATE_DEBUG = True
# Who to email when server errors occur
ADMINS = (
('Admin Name', 'YOUR_EMAIL_ADDRESS_GOES_HERE'),
)
MANAGERS = ADMINS
SEND_BROKEN_LINK_EMAILS = True
#-----------------------------------------------------------------------------
# misc settings
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
VERSION = "0.2.8.1"
BUG_REPORT_URL = "https://bitbucket.org/tohccmedphys/qatrackplus/issues/new"
FEATURE_REQUEST_URL = BUG_REPORT_URL
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'qatrack.wsgi.application'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '78kj_s=rqh46bsv10eb-)uyy02kr35jy19pp*7u$4-te=x0^86'
ROOT_URLCONF = 'qatrack.urls'
SITE_ID = 1
SITE_NAME = "QATrack+"
#-----------------------------------------------------------------------------
# Database settings
# if you wish to override the database settings below (e.g. for deployment),
# please do so here or in a local_settings.py file
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_ROOT, '..', 'db/default.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.S
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
#----------------------------------------------------------------------------
# Default local settings
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Toronto'
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
FORMAT_MODULE_PATH = "qatrack.formats"
INPUT_DATE_FORMATS = (
"%d-%m-%Y %H:%M", "%d/%m/%Y %H:%M",
"%d-%m-%y %H:%M", "%d/%m/%y %H:%M",
)
SIMPLE_DATE_FORMAT = "%d-%m-%Y"
DATETIME_HELP = "Format DD-MM-YY hh:mm (hh:mm is 24h time e.g. 31-05-12 14:30)"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
CONSTANT_PRECISION = 8
#----------------------------------------------------------------------------
# static media settings
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "media")
UPLOAD_ROOT = os.path.join(MEDIA_ROOT, "uploads")
TMP_UPLOAD_ROOT = os.path.join(UPLOAD_ROOT, "tmp")
for d in (MEDIA_ROOT, UPLOAD_ROOT, TMP_UPLOAD_ROOT):
if not os.path.isdir(d):
os.mkdir(d)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
UPLOADS_URL = MEDIA_URL + 'uploads/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "admin_media"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# add a site specific css file if one doesn't already exist
SITE_SPECIFIC_CSS_PATH = os.path.join(PROJECT_ROOT, "qa", "static", "css", "site.css")
if not os.path.isfile(SITE_SPECIFIC_CSS_PATH):
with open(SITE_SPECIFIC_CSS_PATH, 'w') as f:
f.write("/* You can place any site specific css in this file*/\n")
#------------------------------------------------------------------------------
# Middleware
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'qatrack.middleware.login_required.LoginRequiredMiddleware',
'qatrack.middleware.maintain_filters.FilterPersistMiddleware',
)
# for django-debug-toolbar
INTERNAL_IPS = ('127.0.0.1',)
# login required middleware settings
LOGIN_EXEMPT_URLS = [r"^accounts/", ]
ACCOUNT_ACTIVATION_DAYS = 7
LOGIN_REDIRECT_URL = '/qa/unit/'
LOGIN_URL = "/accounts/login/"
#------------------------------------------------------------------------------
# Template settings
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
#('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#)),
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, "templates"),
os.path.join(PROJECT_ROOT, "theme_bootstrap", "templates"),
"genericdropdown/templates",
)
TEMPLATE_CONTEXT_PROCESSORS = list(DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS)
TEMPLATE_CONTEXT_PROCESSORS += [
'django.core.context_processors.request',
"qatrack.context_processors.site",
]
#------------------------------------------------------------------------------
# Fixtures
# you can add more default fixture locations here
FIXTURE_DIRS = (
'fixtures/defaults/qa',
'fixtures/defaults/units',
)
#------------------------------------------------------------------------------
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.formtools',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'tastypie',
'genericdropdown',
'qatrack.cache',
'qatrack.accounts',
'qatrack.units',
'qatrack.qa',
'qatrack.theme_bootstrap',
'qatrack.data_tables',
'qatrack.notifications',
'qatrack.contacts',
'south',
'admin_views',
]
#-----------------------------------------------------------------------------
# Cache settings
CACHE_UNREVIEWED_COUNT = 'unreviewed-count'
CACHE_QA_FREQUENCIES = 'qa-frequencies'
MAX_CACHE_TIMEOUT = 24 * 60 * 60 # 24hours
CACHE_LOCATION = os.path.join(PROJECT_ROOT, "cache", "cache_data")
if not os.path.isdir(CACHE_LOCATION):
os.mkdir(CACHE_LOCATION)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': CACHE_LOCATION,
'TIMEOUT': MAX_CACHE_TIMEOUT,
}
}
#-----------------------------------------------------------------------------
# Session Settings
SESSION_COOKIE_AGE = 14 * 24 * 60 * 60
#-----------------------------------------------------------------------------
# Email and notification settings
EMAIL_NOTIFICATION_USER = None
EMAIL_NOTIFICATION_PWD = None
EMAIL_NOTIFICATION_TEMPLATE = "notification_email.txt"
EMAIL_NOTIFICATION_SENDER = "qatrack"
# use either a static subject or a customizable template
#EMAIL_NOTIFICATION_SUBJECT = "QATrack+ Test Status Notification"
EMAIL_NOTIFICATION_SUBJECT_TEMPLATE = "notification_email_subject.txt"
EMAIL_FAIL_SILENTLY = True
EMAIL_HOST = "" # e.g. 'smtp.gmail.com'
EMAIL_HOST_USER = '' # e.g. "randle.taylor@gmail.com"
EMAIL_HOST_PASSWORD = 'your_password_here'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
#-----------------------------------------------------------------------------
# Account settings
# a list of group names to automatically add users to when they sign up
DEFAULT_GROUP_NAMES = [] # eg ["Therapists"]
#-----------------------------------------------------------------------------
# Authentication backend settings
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
#'qatrack.accounts.backends.ActiveDirectoryGroupMembershipSSLBackend',
#'qatrack.accounts.backends.WindowsIntegratedAuthenticationBackend',
)
# active directory settings (not required if only using ModelBackend
AD_DNS_NAME = '' # e.g. ad.civic1.ottawahospital.on.ca
# If using non-SSL use these
AD_LDAP_PORT = 389
AD_LDAP_URL = 'ldap://%s:%s' % (AD_DNS_NAME, AD_LDAP_PORT)
AD_LDAP_USER = ''
AD_LDAP_PW = ''
AD_LU_ACCOUNT_NAME = "sAMAccountName"
AD_LU_MAIL = "mail"
AD_LU_SURNAME = "sn"
AD_LU_GIVEN_NAME = "givenName"
AD_LU_MEMBER_OF = "memberOf"
# If using SSL use these:
# AD_LDAP_PORT=636
# AD_LDAP_URL='ldaps://%s:%s' % (AD_DNS_NAME,AD_LDAP_PORT)
AD_SEARCH_DN = "" # eg "dc=ottawahospital,dc=on,dc=ca"
AD_NT4_DOMAIN = "" # Network domain that AD server is part of
AD_SEARCH_FIELDS = [AD_LU_MAIL, AD_LU_SURNAME, AD_LU_GIVEN_NAME, AD_LU_ACCOUNT_NAME, AD_LU_MEMBER_OF]
AD_MEMBERSHIP_REQ = [] # eg ["*TOHCC - All Staff | Tout le personnel - CCLHO"]
# AD_CERT_FILE='/path/to/your/cert.txt'
AD_DEBUG_FILE = None
AD_DEBUG = False
CLEAN_USERNAME_STRING = ''
#------------------------------------------------------------------------------
# Logging Settings
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'qatrack.console': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
FORCE_SCRIPT_NAME = None
#------------------------------------------------------------------------------
# QA Settings
PAGINATE_DEFAULT = 50 # remember to change iDisplayLength in unittestcollection.js and testlistinstance.js if you change this
NHIST = 5 # number of historical test results to show when reviewing/performing qa
ICON_SETTINGS = {
'SHOW_STATUS_ICONS_PERFORM': True,
'SHOW_STATUS_ICONS_LISTING': True,
'SHOW_STATUS_ICONS_REVIEW': True,
'SHOW_STATUS_ICONS_HISTORY': False,
'SHOW_REVIEW_ICONS': True,
'SHOW_DUE_ICONS': True,
}
# Display ordering on the "Choose Unit" page. (Use "name" or "number")
ORDER_UNITS_BY = "number"
#------------------------------------------------------------------------------
# Testing settings
TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner'
COVERAGE_ADDITIONAL_MODULES = ["qatrack.tests"]
#------------------------------------------------------------------------------
# local_settings contains anything that should be overridden
# based on site specific requirements (e.g. deployment, development etc)
try:
from local_settings import * # NOQA
except ImportError:
pass
|
################
# Dependencies #
################
# Sci
import pandas as pd
import numpy as np
from scipy import stats
# General
import math
import os
import string
import pickle
# Workflow
from sklearn.model_selection import GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
# Preprocessing
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
# Trees
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
# Ensemble
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier
# Support vector machines
from sklearn.svm import SVC
# Other classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# Metrics
from sklearn.metrics import confusion_matrix
################
# Master train #
################
def getTransformPipeline():
imputer = Imputer(missing_values = "NaN", strategy = "mean", axis = 0)
numberPipeline = Pipeline([
("numberFilter", GetNumbers()),
("imputer", imputer)
])
textPipeline = Pipeline([
("textFilter", GetText()),
("vectoriser", MixedDict())
])
transformPipeline = [
("feats", FeatureUnion([
("numberPipeline", numberPipeline),
("textPipeline", textPipeline)
])),
("scaler", StandardScaler()),
]
return transformPipeline
def train(X, y, projectName, scoring):
print ("\nIdentifying type of problem...")
#transformPipeline = getTransformPipeline()
if isClf(y):
models, names = trainClf(X, y, projectName, scoring)
else:
models, names = trainReg(X, y, projectName, scoring)
for i in range(len(models)):
# Save model
path = os.path.join("models", projectName, names[i] + ".sav")
os.makedirs(os.path.dirname(path), exist_ok=True)
f = open(path, "wb")
pickle.dump(models[i], f)
def stackedTrain(X, y, projectName, scoring):
print ("\nTraining stacked...")
model_dir = "models"
basePath = os.path.join(model_dir, projectName)
models = os.listdir(basePath)
df = pd.DataFrame()
#y = pd.DataFrame(data = y, columns = ["y"])
skipName = "ensemble"
for model_name in models:
model_name_base = model_name.split(".")[0]
suffix = model_name.split(".")[1]
if model_name_base != skipName and suffix == "sav":
print ("\n" + model_name_base)
path = os.path.join(basePath, model_name)
model = pickle.load(open(path, "rb"))
y_hat = model.predict(X)
df[model_name_base] = y_hat
tn, fp, fn, tp = confusion_matrix(y, y_hat).ravel()
n = tn + fp + fn + tp
precision = tp / (tp + fp)
recall = tp / (tp + fn)
accuracy = (tp + tn) / n
f1 = stats.hmean([precision, recall])
print (accuracy)
path = os.path.join("models", projectName, model_name_base + ".txt")
f = open(path, "w")
f.write("N:\t\t" + str(n))
f.write("\n\nTrue positive:\t" + str(tp) + "\t(" + str(tp/n) + ")")
f.write("\nTrue negative:\t" + str(tn) + "\t(" + str(tn/n) + ")")
f.write("\nFalse positive:\t" + str(fp) + "\t(" + str(fp/n) + ")")
f.write("\nFalse negative:\t" + str(fn) + "\t(" + str(fn/n) + ")")
f.write("\n\nAccuracy:\t" + str(accuracy))
f.write("\n\nPrecision:\t" + str(precision))
f.write("\nRecall:\t\t" + str(recall))
f.write("\nF1:\t\t" + str(f1))
f.close()
kSplits = 2
param_grid = {}
model = RandomForestClassifier()
#transformPipeline = getTransformPipeline()
#pipelineArray = transformPipeline[:]
#pipelineArray.append(("clf", model))
#pipeline = Pipeline(pipelineArray)
grid_search = GridSearchCV(model, param_grid = param_grid, cv = kSplits, verbose = 2, scoring = scoring)
grid_search.fit(df, y)
bestParameters = grid_search.best_params_
model.set_params(**bestParameters)
model.fit(df, y)
path = os.path.join("models", projectName, skipName + ".sav")
f = open(path, "wb")
pickle.dump(model, f)
f.close()
return
################
# Transformers #
################
def isNumber(cType):
if cType != np.float64 and cType != np.int64:
return False
return True
class GetText(BaseEstimator, TransformerMixin):
def __init__(self):
a = 1
def transform(self, X, *_):
for column in X.columns:
cType = X[column].dtype
if isNumber(cType):
X = X.drop([column], axis = 1)
return X
def fit(self, X, *_):
return self
class GetNumbers(BaseEstimator, TransformerMixin):
def __init__(self):
a = 1
def transform(self, X, *_):
for column in X.columns:
cType = X[column].dtype
if not isNumber(cType):
X = X.drop([column], axis = 1)
return X
def fit(self, X, *_):
return self
def text_process(text):
text = str(text)
text = [char for char in text if char not in string.punctuation]
text = "".join(text)
text = text.lower()
text = [word for word in text.split()]# if word not in stopWords]
return text
def textExtraction(df, series):
vectorizer = CountVectorizer(analyzer = text_process, min_df = 0.1)
df[series] = df[series].replace(np.nan, '', regex=True)
vectorizer.fit_transform(df[series])
vocab = vectorizer.get_feature_names()
return vocab
class MixedDict(BaseEstimator, TransformerMixin):
def __init__(self):
self.vocabDict = {}
def transform(self, X, *_):
for column in X.columns:
if column in self.vocabDict:
vectorizer = CountVectorizer(analyzer = text_process, vocabulary = self.vocabDict[column])
if len(vectorizer.vocabulary) > 0:
vector = vectorizer.fit_transform(X[column])
i = 0
vector = vector.toarray()
for each in vector.T:
new_name = column + "_" + str(i)
X[new_name] = vector.T[i]
i = i + 1
X = X.drop([column], axis = 1)
return X
def fit(self, X, *_):
for column in X.columns:
try:
vocab = textExtraction(X, column)
self.vocabDict[column] = vocab
#print ("- \"" + column + "\" has a vocabulary\n--\t"+ str(vocab))
except:
self.vocabDict[column] = []
#print ("- \"" + column+ "\" does not have a vocabulary")
return self
##############
# Predicting #
##############
def predict(X, json_data, index):
print ("\nPredicting...")
regressors = []
model_dir = "models"
basePath = os.path.join(model_dir, json_data["projectName"])
models = os.listdir(basePath)
skipName = "ensemble.sav"
for model_name in models:
suffix = model_name.split(".")[1]
if model_name != skipName and suffix == "sav":
print (model_name.split(".")[0])
path = os.path.join(basePath, model_name)
model_name_base = model_name.split(".")[0]
model = pickle.load(open(path, "rb"))
y = model.predict(X)
output = pd.DataFrame(y, columns = [json_data["outputY"]], index = index.index)
output[json_data["indexCol"]] = output.index
output = output[[json_data["indexCol"], json_data["outputY"]]]
writeCSV(json_data["outputFile"] + "_" + model_name_base + ".csv", output, json_data["projectName"])
return
def stackedPredict(X, json_data, index):
print ("Stacked predicting...")
projectName = json_data["projectName"]
model_dir = "models"
csv_dir = "output"
basePath = os.path.join(csv_dir, projectName)
modelPath = os.path.join(model_dir, projectName)
CSVs = os.listdir(basePath)
df = pd.DataFrame()
baseFileName = json_data["outputFile"].split(".")[0]
skipName = baseFileName + "_ensemble.csv"
for csv_name in CSVs:
if csv_name != skipName:
path = os.path.join(basePath, csv_name)
csv = pd.read_csv(path)
#print (csv)
#y = model.predict(X)
df[csv_name] = csv[json_data["outputY"]]
model_name = "ensemble.sav"
path = os.path.join(modelPath, model_name)
model = pickle.load(open(path, "rb"))
y = model.predict(df)
output = pd.DataFrame(y, columns = [json_data["outputY"]], index = index.index)
output[json_data["indexCol"]] = output.index
output = output[[json_data["indexCol"], json_data["outputY"]]]
writeCSV(skipName, output, projectName)
return
###############################
# Classification / Regression #
###############################
def isClf(y):
cutOff = 0.1
sampleSize = len(y)
print ("Sample size: " + str(sampleSize))
uniques = len(np.unique(y))
print ("Unique y values: " + str(uniques))
ratio = uniques / sampleSize
if ratio < cutOff:
return True
return False
####################
# Train classifier #
####################
def trainClf(X, y, projectName, scoring):
transformPipeline = getTransformPipeline()
print ("Type: Classification")
names = []
classifiers = []
hyperParameters = []
####
# Decision tree
####
clf = DecisionTreeClassifier()
maxDepthArray = [20]
minSamplesSplitArray = [4]
parameters = [{"max_depth":maxDepthArray, "min_samples_split": minSamplesSplitArray}]
names.append("Decision tree")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Bagging
####
clf = BaggingClassifier()
nEstimatorsArray = [10]
parameters = [{"n_estimators": nEstimatorsArray}]
names.append("Bagging")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Random Forest (bagging+)
####
clf = RandomForestClassifier()
maxDepthArray = [20]
minSamplesSplitArray = [2]
parameters = [{"max_depth":maxDepthArray, "min_samples_split": minSamplesSplitArray}]
names.append("Random forest")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Adaboost (boosting)
####
clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth = 10, min_samples_split = 5))
n_estimatorsArray = [1]
parameters = [{"n_estimators": n_estimatorsArray}]
names.append("Adaboost")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Gradient boosting
####
clf = GradientBoostingClassifier()
nEstimatorsArray = [10]
parameters = [{"n_estimators": nEstimatorsArray}]
names.append("Gradient boosting")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Logistic regression
####
clf = LogisticRegression()
penaltyArray = ["l1", "l2"]
parameters = [{}]
names.append("Logistic regression")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Naive bayes
####
clf = GaussianNB()
parameters = [{}]
names.append("Naive bayes")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# K-nearest neighbours
####
clf = KNeighborsClassifier()
nNeighborsArray = [5]
parameters = [{"n_neighbors": nNeighborsArray}]
names.append("K-nearest neighbours")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Support Vector Classifier
####
clf = SVC()
cArray = [1]
degreeArray = [1]
gammaArray = [0.3]
kernelArray = ["poly"]
parameters = [{"kernel": kernelArray, "degree": degreeArray, "gamma": gammaArray, "C": cArray}]
names.append("Support vector classifier")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Multi-Layer Perceptron
####
clf = MLPClassifier(hidden_layer_sizes=(100,50,50))
alphaArray = [1e-05]
parameters = [{"alpha": alphaArray}]
names.append("Multi-layer perceptron")
classifiers.append(clf)
hyperParameters.append(parameters)
####
# Train
####
pipelines = []
for i in range(len(classifiers)):
print ("\nTraining: " + str(classifiers[i]))
# Get pipeline
pipelineArray = transformPipeline[:]
pipelineArray.append(("clf", classifiers[i]))
pipeline = Pipeline(pipelineArray)
kSplits = 2
param_grid = {}
for parameter in hyperParameters[i][0]:
param_grid["clf__" + parameter] = hyperParameters[i][0][parameter]
grid_search = GridSearchCV(pipeline, param_grid = param_grid, cv = kSplits, verbose = 2, scoring = scoring)
grid_search.fit(X, y)
bestParameters = grid_search.best_params_
pipeline.set_params(**bestParameters)
pipeline.fit(X, y)
pipelines.append(pipeline)
return pipelines, names
####################
# Train regression #
####################
def trainReg(X, y, projectName):
transformPipeline = transformPipeline()
print ("Type: Regression")
names = []
regressors = []
hyperParameters = []
####
# Train
####
for i in range(len(regressors)):
print ("\nTraining: " + str(regressors[i]))
bestParameters = crossValidate(X, y, regressors[i], hyperParameters[i])
regressors[i].set_params(**bestParameters)
regressors[i].fit(X, y)
return pipelines
################
# Read / Write #
################
def readCSV(projectName, fileName):
print ("\nReading CSV...")
path = os.path.join("raw", projectName, fileName)
df = pd.read_csv(path)
return df
def writeCSV(fileName, df, projectName):
print ("\nWriting CSV...")
path = os.path.join("output", projectName, fileName)
os.makedirs(os.path.dirname(path), exist_ok=True)
df.to_csv(path, index = False, header = True)
##################
# Select columns #
##################
def getX(df, ignore):
print ("\nSelect columns (X)...")
# Exclude columns
for column in ignore:
if column in df:
df = df.drop([column], axis = 1)
return df
def getY(df, yColumn):
print ("\nSelect columns (Y)...")
y = df[yColumn]
y = y.values.ravel()
return y
|
# -*- coding: utf-8 -*-
"""This file is part of the TPOT library.
TPOT was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Weixuan Fu (weixuanf@upenn.edu)
- Daniel Angell (dpa34@drexel.edu)
- and many more generous open source contributors
TPOT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
TPOT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with TPOT. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.feature_selection.base import SelectorMixin
import inspect
class Operator(object):
"""Base class for operators in TPOT."""
root = False # Whether this operator type can be the root of the tree
import_hash = None
sklearn_class = None
arg_types = None
class ARGType(object):
"""Base class for parameter specifications."""
pass
def source_decode(sourcecode, verbose=0):
"""Decode operator source and import operator class.
Parameters
----------
sourcecode: string
a string of operator source (e.g 'sklearn.feature_selection.RFE')
verbose: int, optional (default: 0)
How much information TPOT communicates while it's running.
0 = none, 1 = minimal, 2 = high, 3 = all.
if verbose > 2 then ImportError will rasie during initialization
Returns
-------
import_str: string
a string of operator class source (e.g. 'sklearn.feature_selection')
op_str: string
a string of operator class (e.g. 'RFE')
op_obj: object
operator class (e.g. RFE)
"""
tmp_path = sourcecode.split('.')
op_str = tmp_path.pop()
import_str = '.'.join(tmp_path)
try:
if sourcecode.startswith('tpot.'):
exec('from {} import {}'.format(import_str[4:], op_str))
else:
exec('from {} import {}'.format(import_str, op_str))
op_obj = eval(op_str)
except Exception as e:
if verbose > 2:
raise ImportError('Error: could not import {}.\n{}'.format(sourcecode, e))
else:
print('Warning: {} is not available and will not be used by TPOT.'.format(sourcecode))
op_obj = None
return import_str, op_str, op_obj
def set_sample_weight(pipeline_steps, sample_weight=None):
"""Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
"""
sample_weight_dict = {}
if not isinstance(sample_weight, type(None)):
for (pname, obj) in pipeline_steps:
if inspect.getargspec(obj.fit).args.count('sample_weight'):
step_sw = pname + '__sample_weight'
sample_weight_dict[step_sw] = sample_weight
if sample_weight_dict:
return sample_weight_dict
else:
return None
def ARGTypeClassFactory(classname, prange, BaseClass=ARGType):
"""Dynamically create parameter type class.
Parameters
----------
classname: string
parameter name in a operator
prange: list
list of values for the parameter in a operator
BaseClass: Class
inherited BaseClass for parameter
Returns
-------
Class
parameter class
"""
return type(classname, (BaseClass,), {'values': prange})
def TPOTOperatorClassFactory(opsourse, opdict, BaseClass=Operator, ArgBaseClass=ARGType, verbose=0):
"""Dynamically create operator class.
Parameters
----------
opsourse: string
operator source in config dictionary (key)
opdict: dictionary
operator params in config dictionary (value)
regression: bool
True if it can be used in TPOTRegressor
classification: bool
True if it can be used in TPOTClassifier
BaseClass: Class
inherited BaseClass for operator
ArgBaseClass: Class
inherited BaseClass for parameter
verbose: int, optional (default: 0)
How much information TPOT communicates while it's running.
0 = none, 1 = minimal, 2 = high, 3 = all.
if verbose > 2 then ImportError will rasie during initialization
Returns
-------
op_class: Class
a new class for a operator
arg_types: list
a list of parameter class
"""
class_profile = {}
dep_op_list = {} # list of nested estimator/callable function
dep_op_type = {} # type of nested estimator/callable function
import_str, op_str, op_obj = source_decode(opsourse, verbose=verbose)
if not op_obj:
return None, None
else:
# define if the operator can be the root of a pipeline
if issubclass(op_obj, ClassifierMixin):
class_profile['root'] = True
optype = "Classifier"
elif issubclass(op_obj, RegressorMixin):
class_profile['root'] = True
optype = "Regressor"
if issubclass(op_obj, TransformerMixin):
optype = "Transformer"
if issubclass(op_obj, SelectorMixin):
optype = "Selector"
@classmethod
def op_type(cls):
"""Return the operator type.
Possible values:
"Classifier", "Regressor", "Selector", "Transformer"
"""
return optype
class_profile['type'] = op_type
class_profile['sklearn_class'] = op_obj
import_hash = {}
import_hash[import_str] = [op_str]
arg_types = []
for pname in sorted(opdict.keys()):
prange = opdict[pname]
if not isinstance(prange, dict):
classname = '{}__{}'.format(op_str, pname)
arg_types.append(ARGTypeClassFactory(classname, prange, ArgBaseClass))
else:
for dkey, dval in prange.items():
dep_import_str, dep_op_str, dep_op_obj = source_decode(dkey, verbose=verbose)
if dep_import_str in import_hash:
import_hash[import_str].append(dep_op_str)
else:
import_hash[dep_import_str] = [dep_op_str]
dep_op_list[pname] = dep_op_str
dep_op_type[pname] = dep_op_obj
if dval:
for dpname in sorted(dval.keys()):
dprange = dval[dpname]
classname = '{}__{}__{}'.format(op_str, dep_op_str, dpname)
arg_types.append(ARGTypeClassFactory(classname, dprange, ArgBaseClass))
class_profile['arg_types'] = tuple(arg_types)
class_profile['import_hash'] = import_hash
class_profile['dep_op_list'] = dep_op_list
class_profile['dep_op_type'] = dep_op_type
@classmethod
def parameter_types(cls):
"""Return the argument and return types of an operator.
Parameters
----------
None
Returns
-------
parameter_types: tuple
Tuple of the DEAP parameter types and the DEAP return type for the
operator
"""
return ([np.ndarray] + arg_types, np.ndarray) # (input types, return types)
class_profile['parameter_types'] = parameter_types
@classmethod
def export(cls, *args):
"""Represent the operator as a string so that it can be exported to a file.
Parameters
----------
args
Arbitrary arguments to be passed to the operator
Returns
-------
export_string: str
String representation of the sklearn class with its parameters in
the format:
SklearnClassName(param1="val1", param2=val2)
"""
op_arguments = []
if dep_op_list:
dep_op_arguments = {}
for dep_op_str in dep_op_list.values():
dep_op_arguments[dep_op_str] = []
for arg_class, arg_value in zip(arg_types, args):
aname_split = arg_class.__name__.split('__')
if isinstance(arg_value, str):
arg_value = '\"{}\"'.format(arg_value)
if len(aname_split) == 2: # simple parameter
op_arguments.append("{}={}".format(aname_split[-1], arg_value))
# Parameter of internal operator as a parameter in the
# operator, usually in Selector
else:
dep_op_arguments[aname_split[1]].append("{}={}".format(aname_split[-1], arg_value))
tmp_op_args = []
if dep_op_list:
# To make sure the inital operators is the first parameter just
# for better persentation
for dep_op_pname, dep_op_str in dep_op_list.items():
arg_value = dep_op_str # a callable function, e.g scoring function
doptype = dep_op_type[dep_op_pname]
if inspect.isclass(doptype): # a estimator
if issubclass(doptype, BaseEstimator) or \
issubclass(doptype, ClassifierMixin) or \
issubclass(doptype, RegressorMixin) or \
issubclass(doptype, TransformerMixin):
arg_value = "{}({})".format(dep_op_str, ", ".join(dep_op_arguments[dep_op_str]))
tmp_op_args.append("{}={}".format(dep_op_pname, arg_value))
op_arguments = tmp_op_args + op_arguments
return "{}({})".format(op_obj.__name__, ", ".join(op_arguments))
class_profile['export'] = export
op_classname = 'TPOT_{}'.format(op_str)
op_class = type(op_classname, (BaseClass,), class_profile)
op_class.__name__ = op_str
return op_class, arg_types
|
#!/usr/bin/python
"""
Description: The DUT CA stands between the Test Manager and WICED, converting CAPI commands from TCP/IP link to WICED console
commands through the serial link.
"""
import serial
import io
import getopt
import sys
import time
import glob
import os
import select
import termios
import fcntl
import traceback
import socket
import string
# define global variables
HOST = ''
PORT = 9000
endl = "\r\n"
connlist = []
dut_current_command = None
last_command = ''
DEBUG_PRINTS = False
INTERFACE_ID = "wlan0"
MODEL_NAME = "WICED"
VENDOR_NAME = "Broadcom"
CA_VERSION = "4.2" # DUT_CA Version
HARDWARE_VERSION = "1.0" # DUT Hardware Version
SOFTWARE_VERSION = "1.1" # SDK Version
try:
import warnings
warnings.filterwarnings("ignore", "", FutureWarning, "", 0)
except NameError:
pass
class UUartTimestampError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class WicedError(Exception):
pass
class uuart_timestamp:
def __init__(self, tty, format, dump_file, verbose, baud=115200):
self.debug_print = True
self.dump_file = dump_file
self.format = format
#self.interactive_mode = interactive_mode
self.ser = serial.Serial()
self.ser.baudrate = baud
self.ser.port = tty
self.ser.timeout = 0 # Read timeout.
self.ser.writeTimeout = 0 # Write timeout.
self.stdin_fd = sys.stdin.fileno()
self.stdout_fd = sys.stdout.fileno()
self.termios_settings = []
# Buffer that contains the data read from the serial port and is to be written to stdout
self.ser_in_buf = ""
# Buffer that contains the data read from stdin and is to be written to the serial port
self.ser_out_buf = ""
self.start()
def start(self):
# Open the tty port.
try:
self.ser.open()
except serial.SerialException, x:
# Repackage the exception as a UUartTimestampError.
raise UUartTimestampError(x)
# Clear the receive buffer.
self.ser.flushInput()
self.ser.flushOutput()
# Write some sync bytes to the port.
#self.ser.write('\x55\x55\x55\x55')
# Get a copy of the terminal settings.
new = termios.tcgetattr(self.stdin_fd)
self.termios_settings = termios.tcgetattr(self.stdin_fd)
# Set the terminal to non-canonical (unbuffered) input handling.
new[3] = new[3] & ~termios.ICANON
new[3] = new[3] & ~termios.ECHO
termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new)
new = termios.tcgetattr(self.stdout_fd)
new[3] = new[3] & ~termios.ICANON
termios.tcsetattr(self.stdout_fd, termios.TCSANOW, new)
# Currently we don't change the serial's termios settings. Just leaving this code here in case we need
# to in the future.
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(self.ser.fd)
termios.tcsetattr(self.ser.fd, termios.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
def stop(self):
""" This method closes the tty and cleans up the lock file.
"""
termios.tcsetattr(self.stdin_fd, termios.TCSANOW, self.termios_settings)
# Close the serial port.
self.ser.close()
# Non blocking receive_line, return '' immediately if no new line received
def receive_line(self):
rx_string = ''
in_fd_list = []
out_fd_list = []
# If there is any serial out data, try to write it before we attempt another read
if len(self.ser_in_buf) > 0:
if self.ser_in_buf.find('\n') != -1:
n = 0
'''
if self.interactive_mode:
n = os.write(self.stdout_fd, self.ser_in_buf[0:self.ser_in_buf.index('\n') + 1])
else:
n = len(self.ser_in_buf[0:self.ser_in_buf.index('\n') + 1])
'''
# FIXME
#n = os.write(self.stdout_fd, self.ser_in_buf[0:self.ser_in_buf.index('\n') + 1])
n = len(self.ser_in_buf[0:self.ser_in_buf.index('\n') + 1])
rx_string += self.ser_in_buf[0:n]
# Shorten buffer
self.ser_in_buf = self.ser_in_buf[n:]
'''
if self.interactive_mode:
n = os.write(self.stdout_fd, self.ser_in_buf)
rx_string += self.ser_in_buf[0:n]
# Shorten buffer
self.ser_in_buf = self.ser_in_buf[n:]
'''
# There is data to write to the serial port, check if we can write it
if len(self.ser_out_buf) > 0:
out_fd_list.append(self.ser.fd)
# Check to see if our output buffer isn't too full. If it isn't we can attempt to read from the keyboard.
#if len(self.ser_out_buf) < 1024:
# in_fd_list.append(self.stdin_fd)
# Check to see if our input buffer isn't too full. If not, we can attempt to read some more.
if len(self.ser_in_buf) < 1024:
in_fd_list.append(self.ser.fd)
# Don't bother testing to see if we can write to the stdout - just assume we always can.
#print "DEBUG: fd lists:", in_fd_list, out_fd_list
# Await for 0.1 seconds
(in_fds, out_fds, except_fds) = select.select(in_fd_list, out_fd_list, [], 0.1)
#print "DEBUG: after lists:", in_fds, out_fds
if self.ser.fd in in_fds:
self.ser_in_buf += os.read(self.ser.fd, 1024)
'''
if interactive_mode:
if self.stdin_fd in in_fds:
self.ser_out_buf += os.read(self.stdin_fd, 1024)
'''
if self.ser.fd in out_fds:
self.ser_out_buf = self.ser_out_buf.replace('\n', '\r')
self.ser_out_buf = self.ser_out_buf.replace('\x7f', '\b')
n = os.write(self.ser.fd, self.ser_out_buf)
self.ser_out_buf = self.ser_out_buf[n:]
termios.tcflush(self.ser.fd, termios.TCOFLUSH)
self.ser.flushOutput()
#print "DEBUG: ser_in_buf", [self.ser_in_buf]
#print "DEBUG: ser_out_buf", [self.ser_out_buf]
#else:
# FIXME:
'''
if self.stdin_fd in in_fds:
self.ser_out_buf += os.read(self.stdin_fd, 1024)
n = os.write(self.stdout_fd, self.ser_out_buf)
rx_string += self.ser_out_buf[0:n]
self.ser_out_buf = self.ser_out_buf[n:]
'''
return rx_string
def send(self, msg):
self.ser_out_buf += msg
def usage(exec_name):
print "Usage:"
print " %s -l <IP address of local interface> -p <port number> -t <terminal> [-b <baud>] [-h] [--help]"%exec_name
print " -l <interface IP address>"
print " The IP address of a specific netwrok interface"
print " -p <port number>"
print " The port number to listen on"
print " -t <terminal>"
print " Path to a uart terminal device for connecting to the user UART."
print " -b <baud>"
print " Optional bit rate parameter for configuring the serial port."
print " -i"
print " Interactive mode. Use this mode with console applications."
print " No timestamping of screen output occurs in this mode. File output can be timestamped."
print " -o, --output=FILE"
print " Optional output file."
print " -r"
print " Overwrite output file if it already exists."
print " -a"
print " Append to output file if it already exists."
print " -f [h|f|i|d|b|n]"
print " Format of timestamp: human, float, integer, diff, float+diff(b), none."
print " -q"
print " Don't prepend output with a brief banner."
print " --help | -h"
print " This help message."
def parse_args():
try:
opts, args = getopt.getopt(sys.argv[1:], 't:b:f:o:qiharl:p:v', ['help','listen','port'])
except getopt.GetoptError:
# print help information and exit:
usage(sys.argv[0])
sys.exit(2)
# The local IP address to listen on
host = ''
# The CAPI server port number
port = PORT
terminal = "/dev/ttyUSB0"
baud = None
dump_file = "dump_file.txt"
format = 'n'
verbose = True
banner = 'n'
overwrite = '-a'
# Decode the options.
for o, a in opts:
if o == "-t":
terminal = a
elif o == "-b":
baud = a
elif o == "-f":
if a in ('h', 'f', 'i', 'd', 'b', 'n'):
format = a
elif o in ('-o', '--output'):
dump_file = a
elif o in ("-q"):
banner = 'n'
elif o in ("-a", "-r"):
overwrite = o
elif o in ("-l", "--listen"):
ip = a
elif o in ("-p", "--port"):
port = int(a)
elif o in ("-h", "--help"):
usage(sys.argv[0])
sys.exit(0)
# Error checking on command line parameters.
if terminal == None:
sys.stderr.write("Error: required parameter 'terminal' not provided.\n")
usage(sys.argv[0])
sys.exit(2)
return (host, port, terminal, baud, format, dump_file, verbose, overwrite)
class CAPI:
"""class to process CAPI commands from Testing Engine
"""
def __init__(self, conn, addr, uart, outfile):
self.conn, self.addr, self.uart, self.outfile = conn, addr, uart, outfile
# The supported commands
self.command_list = (
('sta_verify_ip_connection', self.sta_verify_ip_connection),
# CAPI commands accomplished by CA
('ca_get_version', self.ca_get_version),
# Unsupported CAPI commands
('sta_get_stats', self.not_implemented),
('sta_set_ibss', self.not_implemented),
('sta_set_uapsd', self.not_implemented),
)
# Processing CAPI commands (coming from UCC)
def UCC_Read(self, wiced_cmd):
processed = None
cmd = wiced_cmd.strip()
print "From UCC> %s" % cmd
if self.outfile:
self.outfile.write("%s%s From UCC> %s\n" % (time.strftime("%b %d %H:%M:%S", time.localtime()), (".%03d" % ((time.time()-int(time.time()))*1000)), cmd))
parts = cmd.split(",")
if not len(parts) % 2:
self.UCC_Write("status,INVALID,errorCode,syntax_error")
return
for command, function in self.command_list:
capi_cmd = parts[0].lower()
if capi_cmd == command:
try:
function(parts[1:])
except WicedError:
self.UCC_Write("status,ERROR,errorCode,Wiced_watchdogged")
processed = True
break
if not processed:
#wiced_cmd = wiced_cmd
self.UCC_Write("status,RUNNING")
self.WicedWrite(cmd)
wiced_reply = self.WicedRead() # Throw away the echo
# time.sleep(1)
wiced_reply = self.WicedRead()
self.UCC_Write(wiced_reply)
# self.UCC_Write("status,INVALID,errorCode,unknown_command")
# Send message to UCC
def UCC_Write(self, msg):
print "To UCC<", msg
if self.outfile:
self.outfile.write("%s%s To UCC< %s\n" % (time.strftime("%b %d %H:%M:%S", time.localtime()), (".%03d" % ((time.time()-int(time.time()))*1000)), msg))
self.conn.send(msg + endl)
# Blocking while receiving Wiced's status reply, debug message will be filtered
def WicedRead(self):
Wiced_reply = ''
while len(Wiced_reply) == 0:
Wiced_reply = self.uart.receive_line()
Wiced_reply = Wiced_reply.strip()
if Wiced_reply == '':
time.sleep(0.01)
continue
# Get rid of prompt
if len(Wiced_reply) > 2:
if Wiced_reply[0:2] == "> ":
Wiced_reply = Wiced_reply[2:]
print "From WICED>", Wiced_reply
else:
print Wiced_reply
if self.outfile:
self.outfile.write("%s%s From WICED> %s\n" % (time.strftime("%b %d %H:%M:%S", time.localtime()), (".%03d" % ((time.time()-int(time.time()))*1000)), Wiced_reply))
return Wiced_reply
def WicedWrite(self, msg):
# CA makes sure that all commands sent to DUT are in lowercase
# msg = msg.lower()
print "To WICED<", msg
if self.outfile:
self.outfile.write("%s%s To WICED< %s" % (time.strftime("%b %d %H:%M:%S", time.localtime()), (".%03d" % ((time.time()-int(time.time()))*1000)), msg))
# Our Wiced's maximum uart receive buffer is set to 256
if len(msg) > 256:
print "Error: Due to limited buffer size, Wiced can not receive more than 256 bytes at once"
else:
msg = msg + "\n"
self.uart.send(msg)
def capi_parse(self, param_name_list, args):
pairs = []
Wiced_cmd = ''
param_dict = {} # Used to validate input values
ignored_args = ["interface", "streamid"]
for i in xrange(len(args)/2):
pairs.append((args[i*2], args[i*2+1]))
for param in param_name_list:
# Initialize the parameter dictionry with empty string
param_dict[param] = ''
for name, value in pairs:
name = name.lower()
# Don't lowercase SSID and Passphrase as they may case sensitive
if name not in ("ssid", "passphrase"):
value = value.lower()
if name == param:
# Add the parameter into parameter dictionary
param_dict[param] = value
# Ignore these arguments as we do not support them
if name in ignored_args:
continue
# STA_SET_ENCRYPTION
if name == "encptype":
if value == "none" or value == "open":
value = "0"
elif value == "wep":
value = "1"
# STA_SET_PSK
if name == "keymgmttype":
if value == "wpa1" or value == "wpa":
value = "2"
elif value == "wpa_mixed": # this is not in the spec
value == "3"
elif value == "wpa2":
value = "4"
if name == "flag": # Debug command: echo
if value in ("on", "1"):
value = "1"
elif value in ("off", "0"):
value = "0"
else:
value = "0"
Wiced_cmd += ",%s" % value
return Wiced_cmd, param_dict
def capi_format(self, return_value_list, Wiced_reply):
tm_reply = ''
if Wiced_reply == "$PowerON":
tm_reply = "===== Wiced Rebooted ====="
# FIXME: multiple returns
return tm_reply
Wiced_reply = Wiced_reply.upper()
values = Wiced_reply.split(",")
if return_value_list is None:
return_value_list = []
if values[0] != '$COMPLETE':
tm_reply = "status," + values[0][1:] # Get rid of the $ sign
else:
tm_reply = "status,COMPLETE"
# Ignore status
values = values[1:]
# The offset of skipped parameters
skip = 0
for i in xrange(len(return_value_list)):
name = return_value_list[i]
if len(values) >= i-skip+1:
value = values[i-skip]
else:
value = None
# Filling in those parameters with fixed values
if name == 'interface':
value = INTERFACE_ID
skip += 1
# StreamID is fixed as we support only one stream at this moment
elif name == 'streamID':
value = '1'
skip += 1
# secondary-dns is not supported
elif name == 'secondary-dns':
value = '0.0.0.0'
skip += 1
if value == None:
print ">>>>>>>>>>>>Error: The last Wiced response doesn't comply to CA control protocol"
break
tm_reply = tm_reply + ',' + name + ',' + value
return tm_reply
# CAPI commands implementation
def ca_get_version(self, args):
self.UCC_Write("status,RUNNING")
self.UCC_Write("status,COMPLETE,version,%s" % (CA_VERSION))
def sta_verify_ip_connection(self, args):
self.UCC_Write("status,RUNNING")
input_param_list = ["interface", "destination", "timeout"]
return_value_list = ["connected"]
Wiced_cmd, param_dict = self.capi_parse(input_param_list, args)
Wiced_cmd = "verify_ip_conn" + Wiced_cmd
self.WicedWrite(Wiced_cmd + "\n")
Wiced_reply = self.WicedRead()
tm_reply = self.capi_format(return_value_list, Wiced_reply)
self.UCC_Write(tm_reply)
def not_implemented(self, args):
self.UCC_Write("status,ERROR,errorCode,not_implemented")
pass
class CA:
def __init__(self):
self.outfile = None
pass
def pollNewConn(self, s, uart):
"""Routine to check for incoming connections. Accepts a socket object 's'
as argument and returns a User object 'user'
"""
try:
conn, addr = s.accept()
except socket.error:
return None
out_str = "Connection from %s\n" % str(addr)
print "="*40
print out_str,
print "="*40
if self.outfile:
#self.outfile.write("%s%s CA: %s\n" % (time.strftime("%b %d %H:%M:%S", time.localtime()), (".%03d" % ((time.time()-int(time.time()))*1000)), out_str))
self.outfile.write(out_str)
conn.setblocking(0) # check this
client = CAPI(conn, addr, uart, self.outfile)
return client
def main(self):
global dut_current_command
'''pycom main() start from here'''
(host, port, terminal, baud, format, dumpfile, verbose, overwrite) = parse_args()
if baud == None:
baud = 115200
if port == None:
port = 4000
if dumpfile != "":
if glob.glob(dumpfile):
if overwrite not in ('-a', '-r'):
overwrite = raw_input("%s already exists. y[es] to overwrite, n[o] to append? [yN] "%dumpfile)
if overwrite in ('-r', 'y', 'yes', 'Y', 'o', 'overwrite', 'O'):
print("Overwriting %s" % (dumpfile,))
os.remove(dumpfile)
self.outfile = open(dumpfile,'w')
elif overwrite in ('-a', 'N', 'n', 'no', 'a', 'A', 'append', ''):
self.outfile = open(dumpfile,'a')
else:
print("Invalid input. Aborting...")
sys.exit(1)
else:
self.outfile = open(dumpfile,'w')
# set up the server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.setblocking(0)
s.listen(5)
client = None # The telnet client, Test Manager
try:
uart = uuart_timestamp(terminal, format, dumpfile, verbose, baud)
except UUartTimestampError:
print "Could not open serial port"
sys.exit(1)
last_time = time.time()
first_time = last_time
last_dump_time = last_time
if verbose != 'n':
print ("\n============== %s ==============")%(time.strftime("%b %d %H:%M:%S ", time.localtime()))
if self.outfile:
self.outfile.write("\n============== %s ==============\n"%(time.strftime("%b %d %H:%M:%S ", time.localtime())))
try:
while True:
time.sleep(0.01) # WARNING: big sleep time will affect response time
if client is None:
client = self.pollNewConn(s, uart) # check for incoming connections
else:
try:
data = client.conn.recv(2048) # The maximum length of a CAPI command is 2048
if not data:
client = None
else:
client.UCC_Read(data)
except socket.error, e:
if e[0] == 32: # Broken pipe
print "Disconnected with Test Manager"
elif e[0] == 11: # Resource temporarily unavailable (No data available
pass
else:
print "Socket error::", e
client = None
read_line = uart.receive_line().strip() # May encounter the I/O error exception here
if read_line != '':
global DEBUG_PRINTS
if DEBUG_PRINTS:
print "From WICED> %s" % read_line
pass
out_str = ""
this_time = time.time()
#print out_str
if dumpfile != "":
#outfile.write(out_str + "\n")
# I get bored waiting for the output file to flush.
# So manually flush it every 10 seconds...
if (this_time-last_dump_time) > 10:
self.outfile.flush() # flush every 10 seconds
last_dump_time = this_time
except KeyboardInterrupt:
if verbose != 'n':
print ("=========== Exit: %s ===========\n\n")%(time.strftime("%b %d %H:%M:%S ", time.localtime()))
if self.outfile:
self.outfile.write("\n=========== Exit: %s ===========\n\n"%(time.strftime("%b %d %H:%M:%S ", time.localtime())))
print "Shutting down ..."
uart.stop()
s.shutdown(2)
s.close()
if self.outfile:
self.outfile.flush()
client = None
if client: # Close the connection
client.close()
if __name__ == "__main__":
ca = CA()
ca.main()
|
import pyhomogenize as pyh
from ._consts import _bounds, _fmt
class PreProcessing:
def __init__(
self,
ds=None,
var_name=None,
freq="year",
time_range=None,
crop_time_axis=True,
check_time_axis=True,
**kwargs,
):
if ds is None:
raise ValueError("Please select an input xarray dataset. 'ds=...'")
self.ds = ds
self.var_name = var_name
self.freq = freq
self.fmt = _fmt[freq]
self.afmt = _fmt[ds.frequency]
self.time_range = time_range
self.crop_time_axis = crop_time_axis
self.check_time_axis = check_time_axis
self.preproc = self.preprocessing()
def preprocessing(self):
def get_time_range_as_str(time, fmt):
basics = pyh.basics()
ts = basics.date_to_str(time[0], fmt)
te = basics.date_to_str(time[-1], fmt)
return [ts, te]
time_control = pyh.time_control(self.ds)
if not self.var_name:
self.var_name = time_control.get_var_name()
avail_time = get_time_range_as_str(time_control.time, self.afmt)
if self.time_range:
time_control.select_time_range(self.time_range)
if self.crop_time_axis:
time_control.select_limited_time_range(
smonth=_bounds[self.freq]["start"],
emonth=_bounds[self.freq]["end"],
)
req_time = get_time_range_as_str(time_control.time, self.fmt)
if self.check_time_axis:
time_control.check_timestamps(correct=True)
self.TimeRange = req_time
self.ATimeRange = avail_time
return time_control.ds
|
from dataclasses import dataclass
from .InvoiceOperationList import InvoiceOperationList
from .BasicOnlineInvoiceRequest import BasicOnlineInvoiceRequest
@dataclass
class ManageInvoiceRequest(BasicOnlineInvoiceRequest):
"""Request type of the POST /manageInvoice REST operation
:param exchange_token: The decoded unique token issued for the current transaction
:param invoice_operations: Batch invoice operations of the request
"""
exchange_token: str
invoice_operations: InvoiceOperationList
|
"""
Asserts for system tests.
These are meant to be run by the sys_test.py script.
"""
import unittest
import apsw
import os
class SysTest(unittest.TestCase):
testPath = os.path.join(os.path.dirname(__file__), "__test__")
dbPath = os.path.join(testPath, "systest.db")
axPacket = b'\x9a\x92\x86\xa8\xa4\x98`\x8a\xa6\xa8\x86\xaa\x84w\x03\xf0\x01\x074\xf7\x9a\xbaZ\x03${]\xa8_,\x01\x0f\x00\x00\x00\t\x05\x00\x00\x00\x9a\x00\x00\x00\x03\xd2\x04\x00\x00\xea\x04\x00\x00\xb0\x04b\xbd\x0190_\xd6\xff\xff\xff\xff\x0f\xfe\xc8\xfe\xc8d\x08\x0c4:\x05\x05\xe7x~'
def test_checkDBEntry(self):
conn = apsw.Connection(self.dbPath)
cur = conn.cursor()
cur.execute("select count(*) from ax_frame;")
data = cur.fetchone()[0]
self.assertGreaterEqual(data, 1)
cur.execute("select count(*) from telemetry_packet;")
data = cur.fetchone()[0]
self.assertGreaterEqual(data, 1)
if __name__ == "__main__":
unittest.main()
|
from math import sqrt
import torch
def gaussian_radius(det_size, min_overlap=0.7):
# Inherit from CenterTrack
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
"""Generate 2D gaussian kernel.
Args:
radius (int): Radius of gaussian kernel.
sigma (int): Sigma of gaussian function. Default: 1.
dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
device (str): Device of gaussian tensor. Default: 'cpu'.
Returns:
h (Tensor): Gaussian kernel with a
``(2 * radius + 1) * (2 * radius + 1)`` shape.
"""
x = torch.arange(
-radius, radius + 1, dtype=dtype, device=device).view(1, -1)
y = torch.arange(
-radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
h[h < torch.finfo(h.dtype).eps * h.max()] = 0
return h
def gen_gaussian_target(heatmap, center, radius, k=1):
"""Generate 2D gaussian heatmap.
Args:
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
it and maintain the max value.
center (list[int]): Coord of gaussian kernel's center.
radius (int): Radius of gaussian kernel.
k (int): Coefficient of gaussian kernel. Default: 1.
Returns:
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
"""
diameter = 2 * radius + 1
gaussian_kernel = gaussian2D(
radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)
x, y = center
height, width = heatmap.shape[:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian_kernel[radius - top:radius + bottom,
radius - left:radius + right]
out_heatmap = heatmap
torch.max(
masked_heatmap,
masked_gaussian * k,
out=out_heatmap[y - top:y + bottom, x - left:x + right])
return out_heatmap
|
BBBBBBB BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
BBBBB BBBBBBB
XXXXX XXXX X XXXXXX XXXXXX XXXXXXXX
BBBBBBBB
|
"""
Tools to create programs-related data for use in bok choy tests.
"""
from common.test.acceptance.fixtures.config import ConfigModelFixture
class ProgramsConfigMixin:
"""Mixin providing a method used to configure the programs feature."""
def set_programs_api_configuration(self, is_enabled=False):
"""Dynamically adjusts the Programs config model during tests."""
ConfigModelFixture('/config/programs', {
'enabled': is_enabled,
'marketing_path': '/foo',
}).install()
|
"""
TODO: is newline on windows different for python?
TODO: dry-run? use logging for printing
TODO: treat PART as a custom command
http://click.pocoo.org/6/commands/#custom-multi-commands ?
"""
import logging
import logging.config
import click
from bamp.config import add_config, get_root_path
from bamp.engine import bamp_version
from bamp.helpers import docs
from bamp.helpers.callbacks import enable_debug, read_config, required
from bamp.helpers.ui import machine_out, verify_response
from bamp.persistence import bamp_files
from bamp.vcs import (
create_commit,
create_tag,
is_tree_clean,
make_message,
make_tag_name,
)
logger = logging.getLogger("bamp")
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--dry-run",
help=docs.DRY_RUN_HELP,
is_flag=True,
)
@click.option("-n", "new_line", help="Don't print new line", is_flag=True, default=True)
@click.option(
"--debug",
help=docs.DEBUG_OPTION_HELP,
is_flag=True,
expose_value=False,
callback=enable_debug,
is_eager=True,
)
@click.option(
"--config",
type=click.Path(exists=True, dir_okay=False),
help=docs.CONFIG_OPTION_HELP,
callback=read_config,
)
@click.option("-v", "--version", help=docs.VERSION_OPTION_HELP, callback=required)
@click.option(
"files",
"-f",
"--file",
help=docs.FILES_OPTION_HELP,
type=click.Path(exists=True),
multiple=True,
)
@click.option("vcs", "-V", "--vcs", help=docs.VCS_OPTION_HELP)
@click.option("allow_dirty", "-a", "--allow-dirty", is_flag=True)
@click.option(
"commit", "-c", "--commit", is_flag=True, help=docs.COMMIT_FLAG_OPTION_HELP
)
@click.option("message", "-m", "--message", help=docs.MESSAGE_OPTION_HELP)
@click.option("tag", "-t", "--tag", is_flag=True, help=docs.TAG_FLAG_OPTION_HELP)
@click.option(
"tag_name",
"-T",
"--tag-name",
help=docs.TAG_NAME_OPTION_HELP,
metavar=docs.TAG_NAME_OPTION_METAVAR,
)
@click.argument(
"part", nargs=1, type=click.Choice(["patch", "minor", "major", "current"])
)
@add_config
def bamp(
dry_run,
new_line,
version,
part,
files,
vcs,
allow_dirty,
commit,
message,
config,
tag,
tag_name,
):
root_path = get_root_path()
sanity_checks(root_path)
if part == "current":
return machine_out(version)
new_version = bamp_version(version, part)
if dry_run:
return machine_out(new_version)
bamp_files(version, new_version, files)
if commit:
commit_message = make_message(message, version, new_version)
commit_sha1 = create_commit(vcs, files, commit_message)
if tag and commit_sha1:
tag_message = make_tag_name(tag_name, new_version)
create_tag(vcs, commit_sha1, tag_message)
machine_out(new_version)
@verify_response
def sanity_checks(root_path):
"""Run environment and configuration sanity checks
:param root_path: path to the vcs repo dir
:type root_path: str
:returns: True, [] if env is sane, False and error message otherwise
:rtype: tuple(bool, str)
"""
ctx = click.get_current_context()
if ctx.params.get("commit"):
is_tree_clean(ctx.params.get("vcs"), root_path)
return True, []
|
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,zlib=False,least_significant_digit=None,format='NETCDF4',closeit=False):
file = netCDF4.Dataset(filename,'w',format=format,diskless=True,persist=True)
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=None)
foo.testme="hi I am an attribute"
foo.testme1="hi I am an attribute"
foo.testme2="hi I am an attribute"
foo.testme3="hi I am an attribute"
foo.testme4="hi I am an attribute"
foo.testme5="hi I am an attribute"
foo[:] = array
if closeit: file.close()
return file
def read_netcdf(ncfile):
data = ncfile.variables['data'][:]
for format in ['NETCDF4','NETCDF3_CLASSIC','NETCDF3_64BIT']:
sys.stdout.write('testing file format %s ...\n' % format)
# writing, no compression.
t = Timer("write_netcdf('test1.nc',closeit=True,format='%s')" % format,"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
ncfile = write_netcdf('test1.nc',format=format)
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# test diskless=True in nc_open
format='NETCDF3_CLASSIC'
trials=50
sys.stdout.write('test caching of file in memory on open for %s\n' % format)
sys.stdout.write('testing file format %s ...\n' % format)
write_netcdf('test1.nc',format=format,closeit=True)
ncfile = netCDF4.Dataset('test1.nc',diskless=False)
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading (from disk) took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
ncfile.close()
ncfile = netCDF4.Dataset('test1.nc',diskless=True)
# setting diskless=True should cache the file in memory,
# resulting in faster reads.
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading (cached in memory) took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
ncfile.close()
|
from django.db.models.base import Model
from django.db.models.deletion import CASCADE
from django.db.models.fields import CharField, DateTimeField, PositiveIntegerField, TextField, BooleanField
from django.db.models.fields.related import ForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.conf import settings
from mptt.models import MPTTModel, TreeForeignKey
User = settings.AUTH_USER_MODEL
class Tag(Model):
label = CharField(max_length=50)
class TaggedItem(Model):
tag = ForeignKey(to=Tag, on_delete=CASCADE)
content_type = ForeignKey(to=ContentType, on_delete=CASCADE)
object_id = PositiveIntegerField()
content_object = GenericForeignKey()
class Like(Model):
user = ForeignKey(to=User, on_delete=CASCADE)
content_type = ForeignKey(to=ContentType, on_delete=CASCADE)
object_id = PositiveIntegerField()
content_object = GenericForeignKey()
class Comment(MPTTModel):
text = TextField(max_length=300)
hidden = BooleanField(default=False, blank=True)
created_at = DateTimeField(auto_now_add=True)
updated_at = DateTimeField(auto_now=True)
user = ForeignKey(to=User, on_delete=CASCADE)
reply_to = TreeForeignKey(to='self', on_delete=CASCADE, related_name="reply", null=True, blank=True)
content_type = ForeignKey(to=ContentType, on_delete=CASCADE)
object_id = PositiveIntegerField()
content_object = GenericForeignKey()
class MPTTMeta:
order_insertion_by = ['created_at']
parent_attr = 'reply_to'
|
import pytest
import pizdyuk.pzd_errors as errors
from pizdyuk.market.core import MarketObjectBase, Action
def test_market_object_base():
object_base = MarketObjectBase()
with pytest.raises(Exception) as e:
object_base.update()
assert isinstance(e, errors.PzdNotImplementedError)
with pytest.raises(Exception) as e:
object_base.get_object_info()
assert isinstance(e, errors.PzdNotImplementedError)
def test_action(mocker):
mock = mocker.Mock()
action = Action(mock.function, "mock_value")
action.execute()
mock.function.assert_called_once()
mock.function.assert_called_with("mock_value")
|
from django.contrib import admin
from myApp import models
from aip import AipFace
from .views import get_en_name
# Register your models here.
class DetailGradeSubInline(admin.TabularInline):
model = models.UserInformation
extra = 0
@admin.register(models.IdentityInformation)
class IdentityAdmin(admin.ModelAdmin):
list_per_page = 50
@admin.register(models.CollegeInformation)
class CollegeAdmin(admin.ModelAdmin):
list_per_page = 50
list_display = ['pk', 'college_name']
@admin.register(models.MajorInformation)
class MajorAdmin(admin.ModelAdmin):
list_per_page = 50
list_display = ['pk', 'major_name']
@admin.register(models.GradeInformation)
class GradeAdmin(admin.ModelAdmin):
list_per_page = 50
list_display = ['pk', 'grade_name']
@admin.register(models.DetailGradeInformation)
class DetailGradeAdmin(admin.ModelAdmin):
def show_info(self):
return "%s-%s-%s" % (self.college_id.college_name, self.major_id.major_name, self.grade_id.grade_name)
list_per_page = 50
inlines = [DetailGradeSubInline]
actions_on_top = False
actions_on_bottom = False
list_filter = ['college_id__college_name']
search_fields = ['college_id__college_name', 'major_id__major_name', 'grade_id__grade_name']
list_display = [show_info, 'student_num']
list_editable = ['student_num']
def save_model(self, request, obj, form, change):
# 每当创建一个准确班级的时候,在人脸库中加入对应组
college_id = str(obj.college_id.id)
major_id = str(obj.major_id.id)
grade_id = str(obj.grade_id.id)
app_id = '14807296'
api_key = 'HrRWN5CIoqfr2Xje4SwUdKdK'
secret_key = 'fGupsKW4qtIrqYW3bA5ToiLk19oO483X'
client = AipFace(appId = app_id, apiKey = api_key, secretKey = secret_key)
group_id_list = college_id + major_id + grade_id
client.groupAdd(group_id_list)
super(DetailGradeAdmin, self).save_model(request, obj, form, change)
def delete_model(self, request, obj):
# 每当删除一个准确班级的时候,在人脸库中删除对应组
college_id = str(obj.college_id.id)
major_id = str(obj.major_id.id)
grade_id = str(obj.grade_id.id)
app_id = '14807296'
api_key = 'HrRWN5CIoqfr2Xje4SwUdKdK'
secret_key = 'fGupsKW4qtIrqYW3bA5ToiLk19oO483X'
client = AipFace(appId = app_id, apiKey = api_key, secretKey = secret_key)
group_id_list = college_id + major_id + grade_id
client.groupDelete(group_id_list)
super(DetailGradeAdmin, self).delete_model(request, obj)
@admin.register(models.SignInformation)
class SignAdmin(admin.ModelAdmin):
list_per_page = 50
@admin.register(models.UserInformation)
class UserAdmin(admin.ModelAdmin):
def gender(self):
if self.gender is True:
return '女'
else:
return '男'
def show_identity(self):
if self.identity.role_name == 'Teacher':
return '教师'
else:
return '学生'
list_per_page = 40
list_display = ['name', gender, show_identity, 'grade', 'account', 'password']
list_filter = ['identity']
search_fields = ['name']
list_editable = ['grade']
fields = ['account', 'password', 'name', 'gender', 'identity', 'grade', 'new_sign', 'isDelete']
def delete_model(self, request, obj):
if obj.identity.role_name == 'Student':
app_id = '14807296'
api_key = 'HrRWN5CIoqfr2Xje4SwUdKdK'
secret_key = 'fGupsKW4qtIrqYW3bA5ToiLk19oO483X'
client = AipFace(appId = app_id, apiKey = api_key, secretKey = secret_key)
user_id = get_en_name(obj.name)
college_id = str(obj.college_id.id)
major_id = str(obj.major_id.id)
grade_id = str(obj.grade_id.id)
group_id_list = college_id + major_id + grade_id
client.deleteUser(user_id = user_id, group_id = group_id_list)
super(UserAdmin, self).delete_model(request, obj)
|
# https://leetcode.com/problems/convert-sorted-list-to-binary-search-tree/
#
# Given a singly linked list where elements are sorted in ascending order,
# convert it to a height balanced BST.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
if head == None:
return None
array = []
while head is not None:
array.append(head.val)
head = head.next
root = self.sortedArrayToBST(array)
return root
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
numsSize = len(nums)
if numsSize == 0:
return None
if numsSize == 1:
return TreeNode(nums[0])
root = TreeNode(nums[numsSize/2])
root.left = self.sortedArrayToBST(nums[:numsSize/2])
root.right = self.sortedArrayToBST(nums[numsSize/2+1:])
return root
|
import unittest
from pgdrive import PGDriveEnv
class TestObsActionSpace(unittest.TestCase):
def setUp(self):
self.env = PGDriveEnv()
def test_obs_space(self):
obs = self.env.reset()
assert self.env.observation_space.contains(obs), (self.env.observation_space, obs.shape)
obs, _, _, _ = self.env.step(self.env.action_space.sample())
assert self.env.observation_space.contains(obs), (self.env.observation_space, obs.shape)
def tearDown(self):
self.env.close()
if __name__ == '__main__':
unittest.main()
|
from unittest.mock import patch
import google.api_core.exceptions
from octue.cloud.pub_sub.service import Service
from octue.cloud.pub_sub.topic import Topic
from octue.resources.service_backends import GCPPubSubBackend
from tests.base import BaseTestCase
class TestTopic(BaseTestCase):
service = Service(backend=GCPPubSubBackend(project_name="my-project"))
topic = Topic(name="world", namespace="hello", service=service)
def test_namespace_only_in_name_once(self):
"""Test that the topic's namespace only appears in its name once, even if it is repeated."""
self.assertEqual(self.topic.name, "hello.world")
topic_with_repeated_namespace = Topic(name="hello.world", namespace="hello", service=self.service)
self.assertEqual(topic_with_repeated_namespace.name, "hello.world")
def test_repr(self):
"""Test that Topics are represented correctly."""
self.assertEqual(repr(self.topic), "<Topic(hello.world)>")
def test_error_raised_when_creating_without_allowing_existing_when_topic_already_exists(self):
"""Test that an error is raised when trying to create a topic that already exists and `allow_existing` is
`False`.
"""
with patch(
"octue.cloud.pub_sub.service.pubsub_v1.PublisherClient.create_topic",
side_effect=google.api_core.exceptions.AlreadyExists(""),
):
with self.assertRaises(google.api_core.exceptions.AlreadyExists):
self.topic.create(allow_existing=False)
def test_create_with_allow_existing_when_already_exists(self):
"""Test that trying to create a topic that already exists when `allow_existing` is `True` results in no error."""
with patch(
"octue.cloud.pub_sub.service.pubsub_v1.PublisherClient.create_topic",
side_effect=google.api_core.exceptions.AlreadyExists(""),
):
self.topic.create(allow_existing=True)
def test_exists(self):
"""Test that topics can be tested for existence."""
with patch("octue.cloud.pub_sub.service.pubsub_v1.PublisherClient.get_topic"):
self.assertTrue(self.topic.exists())
with patch(
"octue.cloud.pub_sub.service.pubsub_v1.PublisherClient.get_topic",
side_effect=google.api_core.exceptions.NotFound(""),
):
self.assertFalse(self.topic.exists())
|
from django.views import View
from django.shortcuts import render, redirect
from json import load
from django.conf import settings
from datetime import datetime
with open(settings.NEWS_JSON_PATH, 'r') as f:
posts = load(f)
class CreateNews(View):
def get(self, request):
return render(request, "news/create_news.html")
def post(self, request):
created = datetime.now()
posts.append({"created": created.strftime("%d/%m/%Y %H:%M:%S"),
"title": request.POST.get("title"),
"text": request.POST.get("text"),
"link": created.strftime("d%m%Y%H%M%S")})
return redirect("/news/")
class NewsView(View):
def get(self, request):
if request.GET.get("q"):
searched_posts = [post for post in posts if request.GET.get("q").lower() in post["title"].lower()]
context = {
"articles": searched_posts
}
else:
context = {
"articles": posts
}
return render(request, "news/all_news.html", context)
class PostView(View):
def get(self, request, link):
post = next(post for post in posts if post["link"] == link)
return render(request, "news/news.html", context=post)
class MainIndexView(View):
def get(self, request, *args, **kwargs):
return redirect("/news/")
|
def ln(t = 1):
#função que quando executada mostrará uma linha conforme o valor recebido em t. Criada como teste de uso de funções, para diminuir o número de comandos print na tela e para poupar trabalho de reescrever a função print novamente(preguiça que chama :p )
if t == 0:
print('=' * 30)
elif t == 1:
print('-' * 30)
elif t == 2:
print('-=' * 30)
#Supondo que queremos escrever uma mensagem(um tipo de dado), para isso, precisaremos de delimitadores especiais para mensagens, o delimitador padrão do python nessa situação é aspas["(duplas);'(simples)] e para executar a ação de escrever essa mensagem, utilizamos a função print().
ln(2)
#Exemplo:
print('Olá, mundo!')
#Parte prática
print('Olá, ' + 'Mundo!')
#Utilizando o mais(+) podemos juntar uma mensagem(dado tipo string) a outra e caso o tipo de dado for numérico como números inteiros, podemos utilizar o mais para operações matemáticas(quando não colocados as aspas).
#Exemplo
print(1 + 1)#Resultado -> 2 (dado tipo int)
print('1' + '1')#Resultado -> '11' (dado tipo str)
ln(2)
#Podemos incluir variáveis para que seja possível o uso de informações de forma mais complexa, como por exemplo, podemos deixar registrado qualquer tipo de dado nelas e logo, manipular os dados para que sejam exibidos da maneira que queremos.
#Exemplo
nome = 'Luiz'
#A variável (nome) recebe(=) 'Luiz'(dado tipo str)
type(nome)#<class 'str'>
idade = 24
#A variável (idade) recebe(=) 26(dado tipo int)
type(idade)#<class 'int'>
peso = 62.5
#A variável (peso) recebe(=) 62.5(dado tipo float)
type(peso)#<class 'float'>
print(nome, idade, peso)
ln()
#Agora com mais interatividade, podemos fazer com que o programa registre a entrada de um dado, permitir que digitem os dados utilizando a função input()
nome = input('Qual o seu nome? ')
#Recebe do teclado um dado tipo 'str'
idade = input('Qual sua idade? ')
#Também recebe um dado tipo 'str'(por mais que seja digitado um número), pois, quando não especificado o tipo de dado[Exemplo: int(input())] que o input irá receber, ele automaticamente receberá um dado tipo str(não importa se é numérico ou não)
peso = input('Qual o seu peso? ')
print(nome, idade, peso)
ln(2)
#Desafios da aula ->
print('Desafio 01')
Nome = str(input('Digite seu nome? '))
print('Seja muito bem vindo ao mundo da programação', Nome, '!')
print('Seja muito bem vindo {0}, ao mundo da programação!'.format(Nome))
print(f'Seja muito bem vindo {Nome}, ao mundo da programação!')
ln(0)
print('Desafio 02')
dia = int(input('Qual dia você nasceu? '))
mes = int(input('De que mês? '))
ano = int(input('E de que ano? '))
print('Data de nascimento: {0}/{1}/{2}'.format(dia, mes, ano))
print(f'Data de nascimento: {dia}/{mes}/{ano}')
ln(0)
print('Desafio 03')
n0 = int(input('Digite um valor: '))
n1 = int(input('Digite outro valor: '))
print('A soma de {0} e {1} é {2}'.format(n0, n1, (n0 + n1)))
print(f'{n0} + {n1} = {n0 + n1}')
ln()
#Ou -->
n2 = int(input('Type a value: '))
n3 = int(input('Type another value: '))
s = n2 + n3
print('{0} + {1} = {2}'.format(n2, n3, s))
print(f'{n2} + {n3} = {s}')
|
import os
import numpy as np
from PIL import Image
from sklearn.metrics import confusion_matrix
# name_list = ['19: Hat', '18: Hair', '17: Glove', '16: Sunglasses', '15: UpperClothes', '14: Dress', '13: Coat', '12: Socks', '11: Pants',
# '10: Torso-skin', '9: Scarf', '8: Skirt', '7: Face', '6: Left-arm', '5: Right-arm', '4: Left-leg', '3: Right-leg', '2: Left-shoe',
# '1: Right-shoe', '0: BG'
# ]
name_list = ['0: BG', '1: Hat', '2: Hair', '3: Glove', '4: Sunglasses', '5: UpperClothes', '6: Dress', '7: Coat', '8: Socks', '9: Pants',
'10: Torso-skin', '11: Scarf', '12: Skirt', '13: Face', '14: Left-arm', '15: Right-arm', '16: Left-leg', '17: Right-leg', '18: Left-shoe',
'19: Right-shoe'
]
def main():
image_paths, label_paths = init_path()
hist = compute_hist(image_paths, label_paths)
show_result(hist)
def init_path():
list_file = './human/list/val_id.txt'
file_names = []
with open(list_file, 'rb') as f:
for fn in f:
file_names.append(fn.strip())
image_dir = './human/features/attention/val/results/'
label_dir = './human/data/labels/'
image_paths = []
label_paths = []
for file_name in file_names:
image_paths.append(os.path.join(image_dir, file_name + '.png'))
label_paths.append(os.path.join(label_dir, file_name + '.png'))
return image_paths, label_paths
def fast_hist(lbl, pred, n_cls):
'''
compute the miou
:param lbl: label
:param pred: output
:param n_cls: num of class
:return:
'''
# print(n_cls)
k = (lbl >= 0) & (lbl < n_cls)
# print(lbl.shape)
# print(k)
# print(lbl[k].shape)
# print(np.bincount(n_cls * lbl[k].astype(int) + pred[k], minlength=n_cls ** 2).shape)
return np.bincount(n_cls * lbl[k].astype(int) + pred[k], minlength=n_cls ** 2).reshape(n_cls, n_cls)
def compute_hist(images, labels,n_cls=20):
hist = np.zeros((n_cls, n_cls))
for img_path, label_path in zip(images, labels):
print(img_path)
label = Image.open(label_path)
label_array = np.array(label, dtype=np.int32)
image = Image.open(img_path)
image_array = np.array(image, dtype=np.int32)
if '105047_427469' in img_path:
continue
gtsz = label_array.shape
imgsz = image_array.shape
if not gtsz == imgsz:
image = image.resize((gtsz[1], gtsz[0]), Image.ANTIALIAS)
image_array = np.array(image, dtype=np.int32)
print(label_array.shape, image_array.shape)
hist += fast_hist(label_array, image_array, n_cls)
return hist
def show_result(hist):
f = open('cihp_iou.txt', 'w+')
classes = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe',
'rightShoe']
# num of correct pixels
num_cor_pix = np.diag(hist)
# num of gt pixels
num_gt_pix = hist.sum(1)
print('=' * 50)
print('=' * 50, file=f)
# @evaluation 1: overall accuracy
acc = num_cor_pix.sum() / hist.sum()
print('>>>', 'overall accuracy', acc)
print('>>>', 'overall accuracy', acc, file=f)
print('-' * 50)
print('=' * 50, file=f)
# @evaluation 2: mean accuracy & per-class accuracy
print('Accuracy for each class (pixel accuracy):')
for i in range(20):
print('%-15s: %f' % (classes[i], num_cor_pix[i] / num_gt_pix[i]))
acc = num_cor_pix / num_gt_pix
temp = np.nanmean(acc)
print('>>>', 'mean accuracy', temp)
print('>>>', 'mean accuracy', temp, file=f)
print('-' * 50)
print('=' * 50, file=f)
# @evaluation 3: mean IU & per-class IU
print('Per class miou:')
union = num_gt_pix + hist.sum(0) - num_cor_pix
for i in range(20):
print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]))
print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]), file=f)
iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix)
temp = np.nanmean(iu)
print('>>>', 'mean IU', temp)
print('>>>', 'mean IU', temp, file=f)
print('-' * 50)
# @evaluation 4: frequency weighted IU
freq = num_gt_pix / hist.sum()
print('>>>', 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum())
print('=' * 50)
f.close()
def show_result_pascal(hist):
f = open('pascal_iou.txt', 'w+')
classes = ['background', 'head', 'torso', 'upper-arm', 'lower-arm', 'upper-leg',
'lower-leg']
# num of correct pixels
num_cor_pix = np.diag(hist)
# num of gt pixels
num_gt_pix = hist.sum(1)
print('=' * 50)
print('=' * 50, file=f)
# @evaluation 1: overall accuracy
acc = num_cor_pix.sum() / hist.sum()
print('>>>', 'overall accuracy', acc)
print('>>>', 'overall accuracy', acc, file=f)
print('-' * 50)
print('=' * 50, file=f)
# @evaluation 2: mean accuracy & per-class accuracy
print('Accuracy for each class (pixel accuracy):')
for i in range(7):
print('%-15s: %f' % (classes[i], num_cor_pix[i] / num_gt_pix[i]))
acc = num_cor_pix / num_gt_pix
temp = np.nanmean(acc)
print('>>>', 'mean accuracy', temp)
print('>>>', 'mean accuracy', temp, file=f)
print('-' * 50)
print('=' * 50, file=f)
# @evaluation 3: mean IU & per-class IU
print('Per class miou:')
union = num_gt_pix + hist.sum(0) - num_cor_pix
for i in range(7):
print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]))
print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]), file=f)
iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix)
temp = np.nanmean(iu)
print('>>>', 'mean IU', temp)
print('>>>', 'mean IU', temp, file=f)
print('-' * 50)
# @evaluation 4: frequency weighted IU
freq = num_gt_pix / hist.sum()
print('>>>', 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum())
print('=' * 50)
f.close()
def show_result_atr(hist):
f = open('atr_iou.txt', 'w+')
classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17"]
# num of correct pixels
num_cor_pix = np.diag(hist)
# num of gt pixels
num_gt_pix = hist.sum(1)
print('=' * 50)
print('=' * 50, file=f)
# @evaluation 1: overall accuracy
acc = num_cor_pix.sum() / hist.sum()
print('>>>', 'overall accuracy', acc)
print('>>>', 'overall accuracy', acc, file=f)
print('-' * 50)
print('=' * 50, file=f)
# @evaluation 2: mean accuracy & per-class accuracy
print('Accuracy for each class (pixel accuracy):')
for i in range(18):
print('%-15s: %f' % (classes[i], num_cor_pix[i] / num_gt_pix[i]))
acc = num_cor_pix / num_gt_pix
temp = np.nanmean(acc)
print('>>>', 'mean accuracy', temp)
print('>>>', 'mean accuracy', temp, file=f)
print('-' * 50)
print('=' * 50, file=f)
# @evaluation 3: mean IU & per-class IU
print('Per class miou:')
union = num_gt_pix + hist.sum(0) - num_cor_pix
for i in range(18):
print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]))
print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]), file=f)
iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix)
temp = np.nanmean(iu)
print('>>>', 'mean IU', temp)
print('>>>', 'mean IU', temp, file=f)
print('-' * 50)
# @evaluation 4: frequency weighted IU
freq = num_gt_pix / hist.sum()
print('>>>', 'fwavacc', (freq[freq > 0] * iu[freq > 0]).sum())
print('=' * 50)
f.close()
def get_iou(pred,lbl,n_cls):
'''
need tensor cpu
:param pred:
:param lbl:
:param n_cls:
:return:
'''
hist = np.zeros((n_cls,n_cls))
for i,j in zip(range(pred.size(0)),range(lbl.size(0))):
pred_item = pred[i].data.numpy()
lbl_item = lbl[j].data.numpy()
hist += fast_hist(lbl_item, pred_item, n_cls)
# num of correct pixels
num_cor_pix = np.diag(hist)
# num of gt pixels
num_gt_pix = hist.sum(1)
union = num_gt_pix + hist.sum(0) - num_cor_pix
# for i in range(20):
# print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]))
iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix)
print('>>>', 'mean IU', np.nanmean(iu))
miou = np.nanmean(iu)
print('-' * 50)
return miou
def get_iou_from_list(pred,lbl,n_cls):
'''
need tensor cpu
:param pred: list
:param lbl: list
:param n_cls:
:return:
'''
hist = np.zeros((n_cls,n_cls))
for i,j in zip(range(len(pred)),range(len(lbl))):
pred_item = pred[i].data.numpy()
lbl_item = lbl[j].data.numpy()
# print(pred_item.shape,lbl_item.shape)
hist += fast_hist(lbl_item, pred_item, n_cls)
# num of correct pixels
num_cor_pix = np.diag(hist)
# num of gt pixels
num_gt_pix = hist.sum(1)
union = num_gt_pix + hist.sum(0) - num_cor_pix
# for i in range(20):
acc = num_cor_pix.sum() / hist.sum()
print('>>>', 'overall accuracy', acc)
print('-' * 50)
# print('%-15s: %f' % (classes[i], num_cor_pix[i] / union[i]))
iu = num_cor_pix / (num_gt_pix + hist.sum(0) - num_cor_pix)
print('>>>', 'mean IU', np.nanmean(iu))
miou = np.nanmean(iu)
print('-' * 50)
acc = num_cor_pix / num_gt_pix
print('>>>', 'mean accuracy', np.nanmean(acc))
print('-' * 50)
return miou
def get_acc(pred,lbl,n_cls):
'''
need tensor cpu
:param pred: list
:param lbl: list
:param n_cls:
:return:
'''
hist = np.zeros((n_cls,n_cls))
for i,j in zip(range(len(pred)),range(len(lbl))):
pred_item = pred[i].data.numpy()
lbl_item = lbl[j].data.numpy()
# print(pred_item.shape,lbl_item.shape)
hist += fast_hist(lbl_item, pred_item, n_cls)
# num of correct pixels
num_cor_pix = np.diag(hist)
# num of gt pixels
num_gt_pix = hist.sum(1)
union = num_gt_pix + hist.sum(0) - num_cor_pix
# for i in range(20):
acc = num_cor_pix.sum() / hist.sum()
print('>>>', 'classification overall accuracy', acc)
return acc
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def get_iou_from_list_cat_iou(pred,lbl,n_cls, dataset='cihp'):
'''
need tensor cpu
:param pred: list
:param lbl: list
:param n_cls:
:return:
'''
hist = compute_hist(pred, lbl, n_cls=n_cls)
if dataset == 'cihp':
show_result(hist)
elif dataset == 'atr':
show_result_atr(hist)
else:
show_result_pascal(hist)
print("\n\n\n\n\n")
print(hist)
print("\n\n\n\n\n")
return
def cal_tb_(lbl, pred, n_cls):
tp = np.zeros((n_cls,1))
precision_m = np.zeros((n_cls,1))
recall_m = np.zeros((n_cls,1))
for i in range(n_cls):
pred_cls = (pred == i)
lbl_cls = lbl == i
pred_sum = pred_cls.sum()
lbl_sum = lbl_cls.sum()
true_pred = ((2*pred_cls - lbl_cls) == 1).sum()
tp[i] = true_pred
precision_m[i] = pred_sum
recall_m[i] = lbl_sum
return tp,precision_m,recall_m
# def confusion_matrix(pred, lbl, n_cls):
# '''
# need tensor cpu
# :param pred: list
# :param lbl: list
# :param n_cls:
# :return:
# '''
#
# return
if __name__ == '__main__':
import torch
pred = torch.autograd.Variable(torch.ones((2,1,32,32)).int())*20
pred2 = torch.autograd.Variable(torch.zeros((2,1, 32, 32)).int())
# lbl = [torch.zeros((32,32)).int() for _ in range(len(pred))]
get_iou(pred,pred2,7)
|
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Simulation/ParamExplorerInterval.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Simulation/ParamExplorerInterval
"""
from os import linesep
from sys import getsizeof
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ..Functions.copy import copy
from ..Functions.load import load_init_dict
from ..Functions.Load.import_class import import_class
from .ParamExplorer import ParamExplorer
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Simulation.ParamExplorerInterval.get_value import get_value
except ImportError as error:
get_value = error
try:
from ..Methods.Simulation.ParamExplorerInterval.get_min import get_min
except ImportError as error:
get_min = error
try:
from ..Methods.Simulation.ParamExplorerInterval.get_max import get_max
except ImportError as error:
get_max = error
try:
from ..Methods.Simulation.ParamExplorerInterval.get_N import get_N
except ImportError as error:
get_N = error
from ntpath import basename
from os.path import isfile
from ._check import CheckTypeError
import numpy as np
import random
from ._check import InitUnKnowClassError
class ParamExplorerInterval(ParamExplorer):
"""Define a set of value (for parameter sweep) on interval"""
VERSION = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Simulation.ParamExplorerInterval.get_value
if isinstance(get_value, ImportError):
get_value = property(
fget=lambda x: raise_(
ImportError(
"Can't use ParamExplorerInterval method get_value: "
+ str(get_value)
)
)
)
else:
get_value = get_value
# cf Methods.Simulation.ParamExplorerInterval.get_min
if isinstance(get_min, ImportError):
get_min = property(
fget=lambda x: raise_(
ImportError(
"Can't use ParamExplorerInterval method get_min: " + str(get_min)
)
)
)
else:
get_min = get_min
# cf Methods.Simulation.ParamExplorerInterval.get_max
if isinstance(get_max, ImportError):
get_max = property(
fget=lambda x: raise_(
ImportError(
"Can't use ParamExplorerInterval method get_max: " + str(get_max)
)
)
)
else:
get_max = get_max
# cf Methods.Simulation.ParamExplorerInterval.get_N
if isinstance(get_N, ImportError):
get_N = property(
fget=lambda x: raise_(
ImportError(
"Can't use ParamExplorerInterval method get_N: " + str(get_N)
)
)
)
else:
get_N = get_N
# save and copy methods are available in all object
save = save
copy = copy
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self,
min_value=0,
max_value=1,
N=4,
type_value_gen=0,
type_value=0,
name="",
symbol="",
unit="",
setter=None,
getter=None,
init_dict=None,
init_str=None,
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for pyleecan type, -1 will call the default constructor
- __init__ (init_dict = d) d must be a dictionary with property names as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Load from a file
init_dict = load_init_dict(init_str)[1]
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "min_value" in list(init_dict.keys()):
min_value = init_dict["min_value"]
if "max_value" in list(init_dict.keys()):
max_value = init_dict["max_value"]
if "N" in list(init_dict.keys()):
N = init_dict["N"]
if "type_value_gen" in list(init_dict.keys()):
type_value_gen = init_dict["type_value_gen"]
if "type_value" in list(init_dict.keys()):
type_value = init_dict["type_value"]
if "name" in list(init_dict.keys()):
name = init_dict["name"]
if "symbol" in list(init_dict.keys()):
symbol = init_dict["symbol"]
if "unit" in list(init_dict.keys()):
unit = init_dict["unit"]
if "setter" in list(init_dict.keys()):
setter = init_dict["setter"]
if "getter" in list(init_dict.keys()):
getter = init_dict["getter"]
# Set the properties (value check and convertion are done in setter)
self.min_value = min_value
self.max_value = max_value
self.N = N
self.type_value_gen = type_value_gen
self.type_value = type_value
# Call ParamExplorer init
super(ParamExplorerInterval, self).__init__(
name=name, symbol=symbol, unit=unit, setter=setter, getter=getter
)
# The class is frozen (in ParamExplorer init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this object in a readeable string (for print)"""
ParamExplorerInterval_str = ""
# Get the properties inherited from ParamExplorer
ParamExplorerInterval_str += super(ParamExplorerInterval, self).__str__()
ParamExplorerInterval_str += "min_value = " + str(self.min_value) + linesep
ParamExplorerInterval_str += "max_value = " + str(self.max_value) + linesep
ParamExplorerInterval_str += "N = " + str(self.N) + linesep
ParamExplorerInterval_str += (
"type_value_gen = " + str(self.type_value_gen) + linesep
)
ParamExplorerInterval_str += "type_value = " + str(self.type_value) + linesep
return ParamExplorerInterval_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from ParamExplorer
if not super(ParamExplorerInterval, self).__eq__(other):
return False
if other.min_value != self.min_value:
return False
if other.max_value != self.max_value:
return False
if other.N != self.N:
return False
if other.type_value_gen != self.type_value_gen:
return False
if other.type_value != self.type_value:
return False
return True
def compare(self, other, name="self", ignore_list=None):
"""Compare two objects and return list of differences"""
if ignore_list is None:
ignore_list = list()
if type(other) != type(self):
return ["type(" + name + ")"]
diff_list = list()
# Check the properties inherited from ParamExplorer
diff_list.extend(super(ParamExplorerInterval, self).compare(other, name=name))
if other._min_value != self._min_value:
diff_list.append(name + ".min_value")
if other._max_value != self._max_value:
diff_list.append(name + ".max_value")
if other._N != self._N:
diff_list.append(name + ".N")
if other._type_value_gen != self._type_value_gen:
diff_list.append(name + ".type_value_gen")
if other._type_value != self._type_value:
diff_list.append(name + ".type_value")
# Filter ignore differences
diff_list = list(filter(lambda x: x not in ignore_list, diff_list))
return diff_list
def __sizeof__(self):
"""Return the size in memory of the object (including all subobject)"""
S = 0 # Full size of the object
# Get size of the properties inherited from ParamExplorer
S += super(ParamExplorerInterval, self).__sizeof__()
S += getsizeof(self.min_value)
S += getsizeof(self.max_value)
S += getsizeof(self.N)
S += getsizeof(self.type_value_gen)
S += getsizeof(self.type_value)
return S
def as_dict(self, **kwargs):
"""
Convert this object in a json serializable dict (can be use in __init__).
Optional keyword input parameter is for internal use only
and may prevent json serializability.
"""
# Get the properties inherited from ParamExplorer
ParamExplorerInterval_dict = super(ParamExplorerInterval, self).as_dict(
**kwargs
)
ParamExplorerInterval_dict["min_value"] = self.min_value
ParamExplorerInterval_dict["max_value"] = self.max_value
ParamExplorerInterval_dict["N"] = self.N
ParamExplorerInterval_dict["type_value_gen"] = self.type_value_gen
ParamExplorerInterval_dict["type_value"] = self.type_value
# The class name is added to the dict for deserialisation purpose
# Overwrite the mother class name
ParamExplorerInterval_dict["__class__"] = "ParamExplorerInterval"
return ParamExplorerInterval_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.min_value = None
self.max_value = None
self.N = None
self.type_value_gen = None
self.type_value = None
# Set to None the properties inherited from ParamExplorer
super(ParamExplorerInterval, self)._set_None()
def _get_min_value(self):
"""getter of min_value"""
return self._min_value
def _set_min_value(self, value):
"""setter of min_value"""
check_var("min_value", value, "float")
self._min_value = value
min_value = property(
fget=_get_min_value,
fset=_set_min_value,
doc=u"""Minumum value of the interval
:Type: float
""",
)
def _get_max_value(self):
"""getter of max_value"""
return self._max_value
def _set_max_value(self, value):
"""setter of max_value"""
check_var("max_value", value, "float")
self._max_value = value
max_value = property(
fget=_get_max_value,
fset=_set_max_value,
doc=u"""Maximum value of the interval
:Type: float
""",
)
def _get_N(self):
"""getter of N"""
return self._N
def _set_N(self, value):
"""setter of N"""
check_var("N", value, "int", Vmin=2)
self._N = value
N = property(
fget=_get_N,
fset=_set_N,
doc=u"""Number of value to take in the interval
:Type: int
:min: 2
""",
)
def _get_type_value_gen(self):
"""getter of type_value_gen"""
return self._type_value_gen
def _set_type_value_gen(self, value):
"""setter of type_value_gen"""
check_var("type_value_gen", value, "int", Vmin=0, Vmax=1)
self._type_value_gen = value
type_value_gen = property(
fget=_get_type_value_gen,
fset=_set_type_value_gen,
doc=u"""How to generate the value list. 0: linspace, 1: random (Not available yet)
:Type: int
:min: 0
:max: 1
""",
)
def _get_type_value(self):
"""getter of type_value"""
return self._type_value
def _set_type_value(self, value):
"""setter of type_value"""
check_var("type_value", value, "int", Vmin=0, Vmax=1)
self._type_value = value
type_value = property(
fget=_get_type_value,
fset=_set_type_value,
doc=u"""Type of the value: 0:float, 1:int
:Type: int
:min: 0
:max: 1
""",
)
|
"""
How about writing a queue that holds the last 5 items?
Queue follows First-In-First-Out methodology, i.e., the data item
stored first will be accessed first. A real-world example of queue
can be a single-lane one-way road, where the vehicle enters first,
exits first. More real-world examples can be seen as queues at the
ticket windows and bus-stops. [source]
Complete the my_queue function to return a queue-like data type that
keeps the last n items.
Check the standard library to see how you can do this in the
shortest/most efficient way.
See an example output below and the tests that check for various
values of n. Have fun!
"""
from collections import deque
def my_queue(n=5):
return deque(maxlen=n)
if __name__ == "__main__":
mq = my_queue()
for i in range(10):
mq.append(i)
print((i, list(mq)))
"""Queue size does not go beyond n int, this outputs:
(0, [0])
(1, [0, 1])
(2, [0, 1, 2])
(3, [0, 1, 2, 3])
(4, [0, 1, 2, 3, 4])
(5, [1, 2, 3, 4, 5])
(6, [2, 3, 4, 5, 6])
(7, [3, 4, 5, 6, 7])
(8, [4, 5, 6, 7, 8])
(9, [5, 6, 7, 8, 9])
"""
|
# Copyright 2019 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from asyncio import Future
from contextlib import suppress
from functools import wraps
from typing import Any, Awaitable, Callable, Dict, Optional, Tuple, Union
from structlog import get_logger
from twisted.internet.defer import Deferred, ensureDeferred
logger = get_logger()
class Periodic:
""" Create an asyncio task that calls an async function periodically.
Adapted from:
- https://stackoverflow.com/a/37514633/947511
- https://stackoverflow.com/a/55505152/947511
"""
def __init__(self,
afunc: Callable[..., Awaitable[None]],
interval: Union[int, float],
args: Tuple = (),
kwargs: Dict = {}):
""" Create Periodic instance from async function, `interval` is in seconds.
"""
self.afunc = afunc
self.args = args
self.kwargs = kwargs
self.interval = interval
self.is_started = False
self._task: Optional[Future[None]] = None
async def start(self) -> None:
if not self.is_started:
self.is_started = True
# Start task to call func periodically:
self._task = asyncio.ensure_future(self._run())
async def stop(self) -> None:
if self.is_started:
assert self._task is not None
self.is_started = False
# Stop task and await it stopped:
self._task.cancel()
with suppress(asyncio.CancelledError):
await self._task
async def _run(self) -> None:
assert self._task is not None
while self.is_started:
try:
await asyncio.gather(
self.afunc(*self.args, **self.kwargs),
asyncio.sleep(self.interval),
)
except asyncio.CancelledError:
raise
except Exception:
logger.exception('periodic call failed')
break
def as_future(d: Deferred) -> Callable[..., Awaitable[Any]]:
"""Convert twisted deferred to asyncio future."""
return d.asFuture(asyncio.get_event_loop())
def as_deferred(f: Awaitable[Any]) -> Deferred:
"""Convert asyncio future to twisted deferred."""
return Deferred.fromFuture(asyncio.ensure_future(f))
def ensure_deferred(f):
@wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
return ensureDeferred(result)
return wrapper
|
from rest_framework import permissions
class UpdateOwnProifle(permissions.BasePermission):
""""Allow user update his own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to update his own profile"""
if request.method in permissions.SAFE_METHODS:
return False
return request.user.id == obj.id
class UpdateOwnFeed(permissions.BasePermission):
""""Allow user update his own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to update his own profile"""
if request.method in permissions.SAFE_METHODS:
return False
return request.user.id == obj.user_profile.id
|
from django.test import TestCase
from modeltree.tree import ModelTree
from .models import TargetProxy
class ProxyModelTestCase(TestCase):
def setUp(self):
self.tree = ModelTree(model='proxy.Root')
def test_without_model(self):
f = TargetProxy._meta.pk
qs = self.tree.query_string_for_field(f)
self.assertEqual(qs, 'standard_path__id')
def test_with_model(self):
f = TargetProxy._meta.pk
qs = self.tree.query_string_for_field(f, model=TargetProxy)
self.assertEqual(qs, 'proxy_path__id')
|
"""Feature elimination methods."""
from xfeat.base import SelectorMixin
from xfeat.types import XDataFrame
from xfeat.utils import analyze_columns, cudf_is_available
try:
import cudf # NOQA
except ImportError:
cudf = None
class DuplicatedFeatureEliminator(SelectorMixin):
"""Remove duplicated features."""
def __init__(self):
"""[summary]."""
self._selected_cols = []
def fit_transform(self, input_df: XDataFrame) -> XDataFrame:
"""Fit to data frame, then transform it.
Args:
input_df (XDataFrame): Input data frame.
Returns:
XDataFrame : Output data frame.
"""
if cudf_is_available() and isinstance(input_df, cudf.DataFrame):
self._selected_cols = (
input_df.to_pandas()
.T.drop_duplicates(keep="first")
.index.values.tolist()
)
else:
self._selected_cols = input_df.T.drop_duplicates(
keep="first"
).index.values.tolist()
return input_df[self._selected_cols]
def transform(self, input_df: XDataFrame) -> XDataFrame:
"""Transform data frame.
Args:
input_df (XDataFrame): Input data frame.
Returns:
XDataFrame : Output data frame.
"""
return input_df[self._selected_cols]
class ConstantFeatureEliminator(SelectorMixin):
"""Remove constant features."""
def __init__(self):
"""[summary]."""
self._selected_cols = []
def fit_transform(self, input_df: XDataFrame) -> XDataFrame:
"""Fit to data frame, then transform it.
Args:
input_df (XDataFrame): Input data frame.
Returns:
XDataFrame : Output data frame.
"""
num_cols, cat_cols = analyze_columns(input_df)
constant_cols = []
for col in input_df.columns:
if col in num_cols:
if input_df[col].std() > 0:
continue
value_count = input_df[col].count()
if value_count == len(input_df) or value_count == 0:
constant_cols.append(col)
elif col in cat_cols:
value_count = input_df[col].count()
if input_df[col].unique().shape[0] == 1 or value_count == 0:
constant_cols.append(col)
else:
# All nan values, like as [np.nan, np.nan, np.nan, np.nan, ...]
constant_cols.append(col)
self._selected_cols = [
col for col in input_df.columns if col not in constant_cols
]
return input_df[self._selected_cols]
def transform(self, input_df: XDataFrame) -> XDataFrame:
"""Transform data frame.
Args:
input_df (XDataFrame): Input data frame.
Returns:
XDataFrame : Output data frame.
"""
return input_df[self._selected_cols]
|
"""短信第三方接口"""
import random
import requests
from qcloudsms_py import SmsSingleSender
from wsc_django.apps.settings import (
TENCENT_SMS_APPID,
TENCENT_SMS_APPKEY,
YUNPIAN_SYSTEM_APIKEY,
)
# 短信签名
yunpian_sms_common_sign = "【志浩web开发】"
tencent_sms_common_sign = "【志浩web开发】"
class YunPianSms:
"""云片短信"""
# 服务地址
sms_host = "sms.yunpian.com"
voice_host = "voice.yunpian.com"
# 版本号
version = "v2"
# 模板短信接口的URI
sms_tpl_send_uri = "/{}/sms/tpl_single_send.json".format(version)
sms_text_send_uri = "/{}/sms/single_send.json".format(version)
# 获取短信接口的URI
sms_short_url_uri = "/{}/short_url/shorten.json".format(version)
# 营销短信群发URI
sms_marketing_group_send_uri = "/{}/sms/tpl_batch_send.json".format(version)
@classmethod
def tpl_send_sms(cls, tpl_id, tpl_value, mobile, apikey=YUNPIAN_SYSTEM_APIKEY):
"""
模板接口发短信(模版id传入)
"""
# 短信中不能包含【 和】,发送前进行替换
tpl_value = tpl_value.replace("【", "[")
tpl_value = tpl_value.replace("】", "]")
params = {
"apikey": apikey,
"tpl_id": tpl_id,
"tpl_value": tpl_value,
"mobile": mobile,
}
try:
res = requests.post(
"http://" + cls.sms_host + cls.sms_tpl_send_uri,
data=params,
timeout=(1, 5),
)
except:
return False, "短信发送接口超时或异常, 请稍后重试"
response = res.json()
if response.get("code", 1) == 0:
return True, ""
else:
return False, response.get("detail", "验证码发送失败,请稍后再试")
@classmethod
def tpl_send_sms_with_text(
cls, tpl_value, mobile, sign_type=yunpian_sms_common_sign, apikey=YUNPIAN_SYSTEM_APIKEY
):
"""
模板接口发短信(文本传入)
"""
# 短信中不能包含【 和】,发送前进行替换
tpl_value = tpl_value.replace("【", "[")
tpl_value = tpl_value.replace("】", "]")
params = {
"apikey": apikey,
"mobile": mobile,
"text": "{}{}".format(sign_type, tpl_value),
}
try:
res = requests.post(
"http://" + cls.sms_host + cls.sms_text_send_uri,
data=params,
timeout=(1, 5),
)
except:
return False, "短信发送接口超时或异常, 请稍后重试"
response = res.json()
if response.get("code", 1) == 0:
return True, ""
else:
return False, response.get("detail", "验证码发送失败,请稍后再试")
@classmethod
def tpl_short_url(cls, long_url, apikey=YUNPIAN_SYSTEM_APIKEY):
"""获取短链接"""
params = {"apikey": apikey, "long_url": long_url}
try:
res = requests.post(
"http://" + cls.sms_host + cls.sms_short_url_uri,
data=params,
timeout=(1, 5),
)
except:
return False, "短信发送接口超时或异常, 请稍后重试"
response = res.json()
if response.get("code", 1) == 0:
return True, response["short_url"]["short_url"]
else:
return False, long_url
@classmethod
def tpl_send_sms_ret(cls, tpl_id, mobile, tpl_value, apikey=YUNPIAN_SYSTEM_APIKEY):
"""
单条发送接口,返回实际发送消耗的短信条数或发送失败原因
"""
params = {
"apikey": apikey,
"mobile": mobile,
"tpl_value": tpl_value,
"tpl_id": tpl_id,
}
try:
res = requests.post(
"https://" + cls.sms_host + cls.sms_tpl_send_uri,
data=params,
timeout=(1, 5),
)
except:
return False, "短信发送接口超时或返回异常,请稍后再试"
response = res.json()
if response.get("code", 1) == 0:
return True, response.get("count", 1)
else:
return False, response.get("detail") or response.get("msg", "短信发送失败,原因未知")
@classmethod
def send_sms_branch_ret(
cls, tpl_id, mobiles, tpl_value, callback_url=None, apikey=YUNPIAN_SYSTEM_APIKEY
):
"""
群发接口,返回所有结果
:param apikey: 用户唯一标识,在管理控制台获取
:param tpl_id: 模板id
:param mobiles: 单号码:15205201314 多号码:15205201314,15205201315
:param text: 已审核短信模板
:param callback_url: 短信发送后将向这个地址推送发送报告, 这个接口好像是同步接口,直接返回结果。。。异步回调还有必要吗?
:return: total_count, total_fee, unit, data
"""
params = {
"apikey": apikey,
"tpl_id": tpl_id,
"mobile": mobiles,
"tpl_value": tpl_value,
}
if callback_url:
params["callback_url"] = callback_url
headers = {
"Content-type": "application/x-www-form-urlencoded;charset=utf-8;",
"Accept": "application/json;charset=utf-8;",
"Connection": "keep-alive",
}
try:
res = requests.post(
"https://" + cls.sms_host + cls.sms_marketing_group_send_uri,
data=params,
headers=headers,
)
except:
return False, "短信发送接口超时或返回异常,请稍后再试"
response = res.json()
total_count = response.get("total_count", 0)
data = response.get("data", [])
return True, (total_count, data)
@classmethod
def send_yunpian_verify_code(cls, mobile, code, use, mode="text"):
"""发送短信验证码,模版内容:
【微商城助手】您的验证码是#code#。此验证码用于绑定手机号,5分钟内有效。
ps:这个单独用了一个不一样的签名,现在审核模版必须要加图形验证码,狗带 2018-07-19 by yy
"""
if mode == "text":
tpl_value = "您的验证码是{code}。此验证码用于绑定手机号".format(code=code)
return cls.tpl_send_sms_with_text(tpl_value, mobile)
else:
tpl_id = 4460930
tpl_value = "#code#={}&#use#={}".format(code, use)
return cls.tpl_send_sms(tpl_id, tpl_value, mobile)
class TencentSms:
"""腾讯短信"""
# 创建接口调用对象
ssender = SmsSingleSender(TENCENT_SMS_APPID, TENCENT_SMS_APPKEY)
@classmethod
def tpl_send_sms(cls, sms_text, mobile, smsType=0, smsSign=tencent_sms_common_sign):
""" 单发短信接口
:param text string 短信内容
:param mobile string 手机号
:param smsType int 签名类型 0: 普通短信, 1: 营销短信
:param smsSign string 签名内容
:rtype True or errmsg
"""
# 短信中不能包含【 和】,发送前进行替换
sms_text = sms_text.replace("【", "[")
sms_text = sms_text.replace("】", "]")
# 拼接签名和短信内容
sms_text = "{}{}".format(smsSign, sms_text)
try:
# 返回结果{'result': 1014, 'errmsg': 'package format error, sdkappid not have this tpl_id', 'ext': ''}
result = cls.ssender.send(
smsType, 86, mobile, sms_text, extend="", ext=""
) # 签名参数未提供或者为空时,会使用默认签名发送短信
except:
return False, "短信发送接口超时或返回异常,请稍后再试"
result_code = result["result"]
if result_code == 0:
return True, ""
else:
return False, result["errmsg"]
@classmethod
def send_tencent_verify_code(cls, mobile, code, use):
""" 发送短信验证码
模版内容:您的验证码是#code#。此验证码用于#use#,5分钟内有效。
param: mobile 手机号
param: code 验证码
param: use 验证码用途
"""
sms_text = "您的验证码是{code}。此验证码用于绑定手机号,5分钟内有效。".format(code=code)
return cls.tpl_send_sms(sms_text, mobile)
def gen_sms_code():
population_seq = "0123456789" # 组成验证码元素的序列
code_length = 4 # 验证码长度
code = "".join([random.choice(population_seq) for i in range(code_length)])
return code
|
import json
class Config:
def __init__(self, json_path=None):
self.reddit_client_id = ''
self.reddit_client_secret = ''
self.reddit_user_agent = ''
self.reddit_username = ''
self.reddit_password = ''
self.reddit_commentsPerCheck = 100
self.bot_footer = ''
self.bot_commentpath = ''
self.bot_logfolder = ''
self.bot_subreddits = ['test']
self.bot_sleep = 60
self.bot_dbname = ''
if json_path:
self.load_json(path=json_path)
def load_json(self, path):
"""
Load configuration from a JSON file.
:param path: Path of the JSON file.
"""
with open(path, 'r') as file:
config = json.load(file)
for item in config:
self.__setattr__(item, config[item])
def save_json(self, path):
"""
Save configuration to a JSON file.
:param path: Path of the JSON file.
:return:
"""
with open(path, 'w+') as file:
json.dump(self.__dict__, file)
|
from exterminate.Utilities import builtins
_range = range
def alt_range(start, stop, step=1):
return _range(start-2, stop+2, max(1, int(step/2)))
builtins.range = alt_range
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.