content stringlengths 5 1.05M |
|---|
"""Language class for gruut"""
import logging
import os
import typing
from pathlib import Path
import pydash
import yaml
from gruut_ipa import IPA, Phonemes
from .phonemize import Phonemizer
from .toksen import PostTokenizeFunc, Token, TokenizeFunc, Tokenizer
from .utils import env_constructor
# -----------------------------------------------------------------------------
_LOGGER = logging.getLogger("gruut")
_DIR = Path(__file__).parent
__version__ = (_DIR / "VERSION").read_text().strip()
# -----------------------------------------------------------------------------
class Language:
"""Configuation, tokenizer, and phonemizer for a language"""
def __init__(
self,
config,
language: typing.Optional[str] = None,
preload_lexicon: bool = False,
custom_tokenize: typing.Optional[TokenizeFunc] = None,
custom_post_tokenize: typing.Optional[PostTokenizeFunc] = None,
):
if language is None:
self.language = pydash.get(config, "language.code")
else:
self.language = language
self.config = config
self.tokenizer = Tokenizer(
config,
custom_tokenize=custom_tokenize,
custom_post_tokenize=custom_post_tokenize,
)
self.phonemizer = Phonemizer(config, preload_lexicon=preload_lexicon)
self.phonemizer.is_word = self.tokenizer.is_word # type: ignore
self.phonemes = Phonemes.from_language(self.language)
self.accents: typing.Dict[str, typing.Dict[str, typing.List[str]]] = {}
# If True, primary/seconary stress should be kept during phonemization
self.keep_stress = bool(pydash.get(self.config, "language.keep_stress", False))
# If True, acute/grave accents should be kept during phonemization
self.keep_accents = bool(
pydash.get(self.config, "language.keep_accents", False)
)
# Allowable tones in the language
self.tones: typing.List[str] = pydash.get(self.config, "language.tones", [])
# Load language-specific "accents" (different than acute/grave)
accents = self.config.get("accents", {})
for accent_lang, accent_map in accents.items():
final_map = {}
for from_phoneme, to_phonemes in accent_map.items():
if isinstance(to_phonemes, str):
to_phonemes = [to_phonemes]
final_map[from_phoneme] = to_phonemes
self.accents[accent_lang] = final_map
def id_to_phonemes(
self, pad="_", no_pad=False, no_word_break=False
) -> typing.List[str]:
"""Return map of integer ids to phonemes"""
# Pad symbol must always be first (index 0)
pad = "_"
# Acute/grave accents (' and ²)
accents = []
if self.keep_accents:
accents = [IPA.ACCENT_ACUTE.value, IPA.ACCENT_GRAVE.value]
# Primary/secondary stress (ˈ and ˌ)
# NOTE: Accute accent (0x0027) != primary stress (0x02C8)
stresses = []
if self.keep_stress:
stresses = [IPA.STRESS_PRIMARY.value, IPA.STRESS_SECONDARY.value]
# Tones
tones = self.tones
# Word break
word_break = [IPA.BREAK_WORD.value]
if no_word_break:
word_break = []
phonemes_list = [pad]
if no_pad:
phonemes_list = []
# Always include pad and break symbols.
# In the future, intontation/tones should also be added.
phonemes_list = (
phonemes_list
+ [IPA.BREAK_MINOR.value, IPA.BREAK_MAJOR.value]
+ word_break
+ accents
+ stresses
+ tones
+ sorted([p.text for p in self.phonemes])
)
return phonemes_list
# -------------------------------------------------------------------------
@staticmethod
def load(
language: str,
lang_dir: typing.Optional[typing.Union[str, Path]] = None,
data_dirs: typing.Optional[typing.List[typing.Union[str, Path]]] = None,
preload_lexicon: bool = False,
custom_tokenizers: bool = True,
) -> typing.Optional["Language"]:
"""Load language from code"""
if not lang_dir:
if not data_dirs:
data_dirs = Language.get_data_dirs()
assert data_dirs is not None
for data_dir in data_dirs:
lang_dir = Path(data_dir) / language
if lang_dir and lang_dir.is_dir():
config_path = lang_dir / "language.yml"
if config_path.is_file():
break
assert lang_dir, "Language '{language}' not found in {data_dirs}"
# Expand environment variables in string value
yaml.SafeLoader.add_constructor("!env", env_constructor)
# Load configuration
lang_dir = Path(lang_dir)
assert isinstance(lang_dir, Path)
config_path = lang_dir / "language.yml"
if not config_path.is_file():
_LOGGER.warning("Missing %s", config_path)
return None
# Set environment variable for config loading
os.environ["config_dir"] = str(config_path.parent)
with open(config_path, "r") as config_file:
config = yaml.safe_load(config_file)
# Language-specific loading
custom_tokenize: typing.Optional[TokenizeFunc] = None
custom_post_tokenize: typing.Optional[PostTokenizeFunc] = None
if custom_tokenizers:
if language == "fa":
# Use hazm for text normalization and POS tagging.
custom_tokenize = Language.make_fa_tokenize(lang_dir)
elif language in ("en-us", "en-gb"):
# Use the Stanford POS tagger.
# Requires java, so don't bother if it's not available.
custom_post_tokenize = Language.make_en_post_tokenize(lang_dir)
elif language == "fr-fr":
# Use crfsuite model for POS tagging.
custom_post_tokenize = Language.make_fr_post_tokenize(lang_dir)
return Language(
config=config,
language=language,
preload_lexicon=preload_lexicon,
custom_tokenize=custom_tokenize,
custom_post_tokenize=custom_post_tokenize,
)
# -------------------------------------------------------------------------
@staticmethod
def get_data_dirs(
first_data_dirs: typing.Optional[typing.List[typing.Union[str, Path]]] = None
):
"""Get language data directories to search in order"""
if first_data_dirs:
data_dirs = [Path(p) for p in first_data_dirs]
else:
data_dirs = []
# All other commands
env_data_dir = os.environ.get("GRUUT_DATA_DIR")
if env_data_dir:
data_dirs.append(Path(env_data_dir))
# ${XDG_CONFIG_HOME}/gruut or ${HOME}/gruut
maybe_config_home = os.environ.get("XDG_CONFIG_HOME")
if maybe_config_home:
data_dirs.append(Path(maybe_config_home) / "gruut")
else:
data_dirs.append(Path.home() / ".config" / "gruut")
# Data directory *next to* gruut
data_dirs.append(_DIR.parent / "data")
# Data directory *inside* gruut
data_dirs.append(_DIR / "data")
return data_dirs
# -------------------------------------------------------------------------
@staticmethod
def make_en_post_tokenize(lang_dir: Path) -> typing.Optional[PostTokenizeFunc]:
"""Tokenization post-processing for English"""
from .pos import load_model, predict
# Load part of speech tagger
pos_dir = lang_dir / "pos"
model_path = pos_dir / "model.pkl"
if not (model_path.is_file()):
_LOGGER.warning("Missing POS model: %s", model_path)
return None
_LOGGER.debug("Loading POS model from %s", model_path)
pos_model = load_model(model_path)
pos_map = {
"NNS": "NN",
"NNP": "NN",
"NNPS": "NN",
"PRP$": "PRP",
"RBR": "RB",
"RBS": "RB",
"VBG": "VB",
"VBN": "VBD",
"VBP": "VB",
"VBZ": "VB",
"JJR": "JJ",
"JJS": "JJ",
}
def do_post_tokenize(
sentence_tokens: typing.List[Token], **kwargs
) -> typing.List[Token]:
"""Tag part of speech for sentence tokens"""
guess_pos = kwargs.get("guess_pos", True)
if not guess_pos:
# Don't run tagger is POS isn't needed
return sentence_tokens
words = [t.text for t in sentence_tokens]
sents = [words]
sents_pos = predict(pos_model, sents)
assert sents_pos, "No POS predictions"
words_pos = sents_pos[0]
assert len(words_pos) == len(words), f"Length mismatch for words/pos"
for i, pos in enumerate(words_pos):
sentence_tokens[i].pos = pos_map.get(pos, pos)
return sentence_tokens
return do_post_tokenize
@staticmethod
def make_fa_tokenize(lang_dir: Path) -> typing.Optional[TokenizeFunc]:
"""Tokenize Persian/Farsi"""
try:
import hazm
except ImportError:
_LOGGER.warning("hazm is highly recommended for language 'fa'")
_LOGGER.warning("pip install 'hazm>=0.7.0'")
return None
normalizer = hazm.Normalizer()
# Load part of speech tagger
model_path = lang_dir / "postagger.model"
if not model_path.is_file():
_LOGGER.warning("Missing model: %s", model_path)
return None
_LOGGER.debug("Using hazm tokenizer (model=%s)", model_path)
tagger = hazm.POSTagger(model=str(model_path))
def do_tokenize(text: str, **kwargs) -> typing.List[typing.List[Token]]:
"""Normalize, tokenize, and recognize part of speech"""
sentences_tokens = []
sentences = hazm.sent_tokenize(normalizer.normalize(text))
for sentence in sentences:
sentence_tokens = []
for word, pos in tagger.tag(hazm.word_tokenize(sentence)):
sentence_tokens.append(Token(text=word, pos=pos))
sentences_tokens.append(sentence_tokens)
return sentences_tokens
return do_tokenize
@staticmethod
def make_fr_post_tokenize(lang_dir: Path) -> typing.Optional[PostTokenizeFunc]:
"""Tokenization post-processing for French"""
from .pos import load_model, predict
# Load part of speech tagger
pos_dir = lang_dir / "pos"
model_path = pos_dir / "model.pkl"
if not (model_path.is_file()):
_LOGGER.warning("Missing POS model: %s", model_path)
return None
_LOGGER.debug("Loading POS model from %s", model_path)
pos_model = load_model(model_path)
def do_post_tokenize(
sentence_tokens: typing.List[Token], **kwargs
) -> typing.List[Token]:
"""Tag part of speech for sentence tokens"""
guess_pos = kwargs.get("guess_pos", True)
if not guess_pos:
# Don't run tagger if POS isn't needed
return sentence_tokens
words = [t.text for t in sentence_tokens]
sents = [words]
sents_pos = predict(pos_model, sents)
assert sents_pos, "No POS predictions"
words_pos = sents_pos[0]
assert len(words_pos) == len(words), f"Length mismatch for words/pos"
for i, pos in enumerate(words_pos):
sentence_tokens[i].pos = pos
return sentence_tokens
return do_post_tokenize
|
"""This file contains an implementation of the Morse wavelet"""
import copy
import numpy as np
from scipy.special import gamma, gammaln, comb
from .wavelet import Wavelet
from . import morseutils
__all__ = ['Morse']
class Morse(Wavelet):
def __init__(self, *, fs=None, freq=None, gamma=None, beta=None):
"""Initializes a Morse wavelet class
Parameters
----------
fs : float, optional
The sampling rate in Hz.
Default is 1
freq : float, optional
Frequency at which the wavelet reaches its peak value
in the frequency domain, in units of Hz
gamma : float, optional
Gamma parameter of Morse wavelet
Default is 3
beta : float, optional
Beta parameter of Morse wavelet
Default is 20
"""
super().__init__()
if fs is None:
fs = 1
self.fs = fs
if freq is None:
freq = 0.25 * self.fs
self.freq = freq # will also set norm_radian_freq
# MATLAB has default time bandwidth of 60 so we have similar
# defaults
if gamma is None:
gamma = 3
if beta is None:
beta = 20
self.gamma = gamma
self.beta = beta
def __call__(self, length, *, normalization=None):
"""Gets wavelet representation
Parameters
----------
length : int
Length of wavelet
normalization : string, optional
The type of normalization to use, 'bandpass' or 'energy'
If 'bandpass', the DFT of the wavelet has a peak value of 2
If 'energy', the time-domain wavelet energy sum(abs(psi)**2)
is 1.
Returns
-------
A tuple of (psi, psif), where psi is the time-domain representation
of the wavelet and psif is frequency-domain
"""
if length is None:
length = 16384
if length < 1:
raise ValueError("length must at least 1 but got {}".
format(length))
if normalization is None:
normalization = 'bandpass'
if normalization not in ('bandpass', 'energy'):
raise ValueError("normalization must be 'bandpass' or"
" 'energy' but got {}".format(normalization))
psi, psif = morseutils.morsewave(length,
self._gamma,
self._beta,
self._norm_radian_freq,
n_wavelets=1,
normalization=normalization)
return psi.squeeze(), psif.squeeze()
def compute_freq_bounds(self, N, *, p=None, **kwargs):
if p is None:
p = 5
wh = morseutils.morsehigh(self._gamma, self._beta, **kwargs)
w0 = morseutils.morsefreq(self._gamma, self._beta)
base_length = (2 * np.sqrt(2) * np.sqrt(self._gamma * self._beta)) / w0 * 4
max_length = int(np.floor(N / p))
max_scale = max_length / base_length
wl = w0 / max_scale
return [wl, wh]
def compute_lengths(self, norm_radian_freqs):
# Peak frequency of mother wavelet
w0 = morseutils.morsefreq(self._gamma, self._beta)
# 4 times the mother wavelet footprint to be safe
# see Lilly 2017 for reference
base_length = (2 * np.sqrt(2) * np.sqrt(self._gamma * self._beta)
/ w0 * 4)
scale_fact = w0 / norm_radian_freqs
# lower frequencies require more samples
# and higher frequencies fewer
return np.ceil(scale_fact * base_length).astype(int)
def copy(self):
return copy.deepcopy(self)
def _norm_radians_to_hz(self, val):
return val / np.pi * self._fs / 2
def _hz_to_norm_radians(self, val):
return val / (self._fs / 2) * np.pi
@property
def fs(self):
return self._fs
@fs.setter
def fs(self, val):
if not val > 0:
raise ValueError("fs must be positive but got {}"
.format(val))
self._fs = val
@property
def frequency(self):
return self._freq
@frequency.setter
def frequency(self, val):
if not val > 0 and val <= self._fs/2:
raise ValueError("The frequency must be between 0"
" and the Nyquist frequency {} Hz"
" but got {}".format(self._fs / 2, val))
self._freq = val
self._norm_radian_freq = self._hz_to_norm_radians(val)
@property
def norm_radian_freq(self):
return self._norm_radian_freq
@norm_radian_freq.setter
def norm_radian_freq(self, val):
if not val > 0 and val <= np.pi:
raise ValueError("The normalized radian frequency must"
" be between 0 and the Nyquist frequency"
" pi but got {]".format(val))
self._norm_radian_freq = val
self._freq = self._norm_radians_to_hz(val)
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, val):
if not val > 0:
raise ValueError("gamma must be positive")
self._gamma = val
@property
def beta(self):
return self._beta
@beta.setter
def beta(self, val):
if not val > 0:
raise ValueError("beta must be positive")
self._beta = val
@property
def time_bandwidth(self):
return self._gamma * self._beta
|
from views import *
from lookups import *
import requests
import re
from utils import *
import itertools
from config import config
if config.IMPORT_PYSAM_PRIMER3:
import pysam
import csv
#hpo lookup
import orm
from pprint import pprint
import os
import json
import pymongo
import sys
import re
import itertools
from urllib2 import HTTPError, URLError
import csv
from collections import defaultdict, Counter
#import rest as annotation
from optparse import OptionParser
import mygene
import lookups
from orm import Patient
import requests
from neo4j.v1 import GraphDatabase, basic_auth
def individuals_update(external_ids):
patients_db=get_db(app.config['DB_NAME_PATIENTS'])
users_db=get_db(app.config['DB_NAME_USERS'])
def f(eid):
p=patients_db.patients.find_one({'external_id':eid},{'_id':False})
print p['external_id']
p['features']=[f for f in p.get('features',[]) if f['observed']=='yes']
if 'solved' in p:
if 'gene' in p['solved']:
p['solved']=[p['solved']['gene']]
else:
p['solved']=[]
else: p['solved']=[]
if 'genes' in p: p['genes']=[x['gene'] for x in p['genes'] if 'gene' in x]
else: p['genes']=[]
p['genes']=list(frozenset(p['genes']+p['solved']))
p2=get_db().patients.find_one({'external_id':p['external_id']},{'rare_homozygous_variants_count':1,'rare_compound_hets_count':1, 'rare_variants_count':1,'total_variant_count':1})
if not p2: return p
p['rare_homozygous_variants_count']=p2.get('rare_homozygous_variants_count','')
p['rare_compound_hets_count']=p2.get('rare_compound_hets_count','')
p['rare_variants_count']=p2.get('rare_variants_count','')
p['total_variant_count']=p2.get('total_variant_count','')
#p['all_variants_count']=get_db().patients.find_one({'external_id':p['external_id']},{'_id':0,'all_variants_count':1})['all_variants_count']
#db.cache.find_one({"key" : "%s_blindness,macula,macular,retina,retinal,retinitis,stargardt_" % })
if '_id' in p: del p['_id']
return p
new_individuals=[f(eid) for eid in external_ids]
old_individuals=users_db.users.find_one({'user':session['user']}).get('individuals',[])
old_individuals=[ind for ind in old_individuals if ind['external_id'] not in external_ids]
individuals=new_individuals+old_individuals
users_db.users.update_one({'user':session['user']},{'$set':{'individuals':individuals}})
return individuals
@app.route('/update_patient_data/<individual>',methods=['POST'])
@requires_auth
def update_patient_data(individual):
if session['user']=='demo': return 'not permitted'
print(request.form)
consanguinity=request.form.getlist('consanguinity_edit[]')[0]
gender=request.form.getlist('gender_edit[]')[0]
genes=request.form.getlist('genes[]')
features=request.form.getlist('feature[]')
print('INDIVIDUAL',individual)
print('GENDER',gender)
print('CONSANGUINITY',consanguinity)
print('GENES',genes)
print('FEATURES',features)
print(individual)
external_id=individual
individual=get_db(app.config['DB_NAME_PATIENTS']).patients.find_one({'external_id':external_id})
print('edit patient gender')
print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'sex':{'female':'F','male':'M','unknown':'U'}[gender]}}))
print('edit patient genes')
individual['genes']=[]
for g in genes:
gene=get_db(app.config['DB_NAME']).genes.find_one({'gene_name_upper':g})
print(gene)
if gene in [g['gene'] for g in individual['genes']]: continue
if not gene: continue
individual['genes'].append({'gene':g, 'status':'candidate'})
print(individual['genes'])
print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'genes':individual['genes']}}))
print('edit patient features')
individual['features']=[]
for f in features:
hpo=get_db(app.config['DB_NAME_HPO']).hpo.find_one({'name':re.compile('^'+f+'$',re.IGNORECASE)})
if not hpo: continue
if hpo in [h['label'] for h in individual['features']]: continue
individual['features'].append({'id':hpo['id'][0], 'label':hpo['name'][0], 'observed':'yes'})
print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'features':individual['features']}}))
print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'observed_features':[f for f in individual['features'] if f['observed']=='yes']}}))
print('edit patient consanguinity')
individual['family_history']=individual.get('family_history',{})
if (consanguinity)=='unknown':
individual['family_history']['consanguinity']=None
elif consanguinity.lower()=='yes':
individual['family_history']['consanguinity']=True
elif consanguinity.lower()=='no':
individual['family_history']['consanguinity']=False
print(get_db(app.config['DB_NAME_PATIENTS']).patients.update_one({'external_id':external_id},{'$set':{'family_history':individual['family_history']}}))
# also trigger refresh of that individual for individuals summary page
patient=Patient(external_id,get_db(app.config['DB_NAME_PATIENTS']))
print(patient.consanguinity)
print(patient.observed_features)
print(patient.genes)
print(patient.gender)
individuals_update([external_id])
return jsonify({'success': True}), 200
@app.route('/individual_json/<individual>')
@requires_auth
def individual_json(individual):
patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']))
#PP.addPatientGenderInfo(data.result.sex);
#PP.addPatientFeaturesInfo(data.result.observed_features);
#PP.addPatientConsanguinityInfo(data.result.family_history);
#PP.addPatientGenesInfo(data.result.genes);
#PP.submitEditedIndividual(patientId);
return patient.json()
@app.route('/individual/<individual>')
@requires_auth
#@cache.cached(timeout=24*3600)
def individual_page(individual):
patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']),variant_db=get_db(app.config['DB_NAME']),hpo_db=get_db(app.config['DB_NAME_HPO']))
#if session['user']=='demo': individual=decrypt(str(individual))
# make sure that individual is accessible by user
if not lookup_patient(db=get_db(app.config['DB_NAME_USERS']),user=session['user'],external_id=individual): return 'Sorry you are not permitted to see this patient, please get in touch with us to access this information.'
db=get_db()
hpo_db=get_db(app.config['DB_NAME_HPO'])
# TODO
# mode of inheritance in hpo terms: HP:0000005
#print lookups.get_hpo_children(hpo_db, 'HP:0000005')
#patient['global_mode_of_inheritance']=patient2.get('global_mode_of_inheritance',None)
# minimise it
patient.__dict__['hpo_ids']=lookups.hpo_minimum_set(get_db(app.config['DB_NAME_HPO']), patient.hpo_ids)
hpo_gene=get_hpo_gene(patient.hpo_ids)
# get pubmedbatch scores
pubmedbatch = {}
genes = {}
# is this still updating?
if type(pubmedbatch) is dict:
update_status = pubmedbatch.get('status', 0)
else:
update_status=0
# get known and retnet genes
known_genes=[x['gene_name'] for x in db.retnet.find()]
RETNET = dict([(i['gene_name'],i) for i in db.retnet.find({},projection={'_id':False})])
print 'get pubmed score and RETNET'
gene_info=dict()
individuals=dict()
#
genes=[]
#genes['homozygous_variants']=[v['canonical_gene_name_upper'] for v in patient.homozygous_variants]
#genes['compound_hets']=[v['canonical_gene_name_upper'] for v in patient.compound_het_variants]
#genes['rare_variants']=[v['canonical_gene_name_upper'] for v in patient.rare_variants]
# print(g, genes_pubmed[g])
# figure out the order of columns from the variant row
table_headers=re.findall("<td class='?\"?(.*)-cell'?\"?.*>",file('templates/individual-page-tabs/individual_variant_row.tmpl','r').read())
if session['user']=='demo': table_headers=table_headers[:-1]
print table_headers
# get a list of genes related to retinal dystrophy. only relevant to subset group of ppl. talk to Jing or Niko for other cohorts. Note that dominant p value only counts paitents with 1 qualified variant on the gene.
# current setting: unrelated, exac_af 0.01 for recessive, 0.001 for dominant, cadd_phred 15
print 'get phenogenon genes'
retinal_genes = {}
return render_template('individual.html',
patient=patient,
table_headers=table_headers,
pubmedbatch=pubmedbatch,
pubmed_db=get_db('pubmed_cache'),
genes = genes,
individuals=individuals,
hpo_gene = hpo_gene,
gene_info={},
update_status = 0,
retinal_genes = {},
feature_venn = [])
def get_feature_venn(patient):
s="""
MATCH (p:Person)-[:PersonToObservedTerm]->(t:Term)--(g:Gene)
WHERE p.personId='%s'
RETURN t.termId, t.name, g.gene_id, g.gene_name
""" % patient
print(s)
with neo4j_driver.session() as neo4j_session:
result=neo4j_session.run(s)
data = []
for r in result:
data.append({
'hpo_id': r['t.termId'],
'hpo_term': r['t.name'],
'gene_id': r['g.gene_id'],
'gene_name': r['g.gene_name']
})
hpo_terms=[(k,v,) for k, v, in dict([(x['hpo_id'],x['hpo_term'],) for x in data]).items()]
hpo_gene=dict()
for x in data:
hpo_gene[x['hpo_id']]=hpo_gene.get(x['hpo_id'],[])+[x['gene_name']]
genes = {}
feature_combo = []
feature_venn = []
print "get combinatorics of features to draw venn diagram"
for i in range(len(hpo_terms[:5])):
feature_combo.extend(itertools.combinations(range(len(hpo_terms)), i+1))
print 'calculate Venn diagram'
for combo in feature_combo:
# construct features_venn key
#venn_ind += 1
dic_key = [hpo_terms[i][1] for i in combo]
for ind in range(len(combo)):
if ind == 0:
x=hpo_terms[combo[ind]][0]
feature_venn.append({'key': dic_key, 'value':list(frozenset(hpo_gene.get(x,"")))})
else:
tem = feature_venn[-1]['value']
feature_venn[-1]['value'] = list(frozenset(feature_venn[-1]['value']) & frozenset(hpo_gene[hpo_terms[combo[ind]][0]]))
return feature_venn
@app.route('/venn_json/<individual>')
@requires_auth
def venn_json(individual):
feature_venn=get_feature_venn(individual)
return jsonify(result=feature_venn)
def patient_variants():
# add known gene and retnet gene labels, and re-calculate pubmed_score
for mm in ['rare_variants','homozygous_variants','compound_het_variants']:
for v in patient.__dict__[mm]:
if 'canonical_gene_name_upper' not in v: v['canonical_gene_name_upper']=v['Gene']
gene=v['canonical_gene_name_upper']
pubmed_key = '_'.join([gene,patient.get('pubmed_key','')])
gene_info[gene]=dict()
if gene in known_genes:
gene_info[gene]['known']=True
pubmedbatch[pubmed_key] = max(1,pubmedbatch.get('pubmed_key',0))
if gene not in RETNET: continue
gene_info[gene]['disease'] = RETNET[gene]['disease']
gene_info[gene]['omim'] = RETNET[gene]['omim']
gene_info[gene]['mode'] = RETNET[gene]['mode']
pubmedbatch[pubmed_key] = max(1,pubmedbatch.get('pubmed_key',0))
if mm != 'rare_variants' or ('d' in gene_info[gene]['mode'] and mm == 'rare_variants') :
pubmedbatch[pubmed_key] = max(100,pubmedbatch[pubmed_key])
if gene=='DRAM2':
print pubmed_key
print pubmedbatch[pubmed_key]
if 'het_samples' not in v: print(v)
for s in v['het_samples']:
if v['HET_COUNT'] < 10:
individuals[s]=individuals.get(s,[])+[v]
def get_hpo_gene(hpo_ids):
hpo_db=get_db(app.config['DB_NAME_HPO'])
hpo_terms = [(i, hpo_db.hpo.find_one({'id':i})['name'][0]) for i in hpo_ids]
# this has missing HPO ids. see IRDC_batch2_OXF_3001 and #HP:0000593
hpo_gene=dict()
for hpo_id,hpo_term, in hpo_terms:
hpo_gene[hpo_id] = []
for gene_name in [x['Gene-Name'] for x in hpo_db.ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.find({'HPO-ID':hpo_id},{'Gene-Name':1,'_id':0})]:
#gene_hpo[gene_name]=gene_hpo.get(gene_name,[])+[{'hpo_id':hpo_id,'hpo_term':hpo_term}]
hpo_gene[hpo_id]=hpo_gene.get(hpo_id,[])+[gene_name]
for k in hpo_gene: hpo_gene[k]=list(frozenset(list(hpo_gene[k])))
return hpo_gene
def find_item(obj, key):
if key in obj:
return obj[key]
if isinstance(obj, dict):
for k in obj:
if isinstance(obj[k], dict):
item = find_item(obj[k], key)
if item is not None:
return item
elif isinstance(obj[k], list):
for i in obj[k]:
if isinstance(i, str):
continue
item = find_item(i, key)
if item is not None:
return item
elif isinstance(obj, list):
for k in obj:
if isinstance(k, dict):
item = find_item(k, key)
if item is not None:
return item
elif isinstance(k, list):
for i in k:
if isinstance(i, str):
continue
item = find_item(i, key)
if item is not None:
return item
def exomiser(individual):
patient_hpo_terms=lookups.get_patient_hpo(hpo_db, patient_db, individual, ancestors=False)
patient_hpo_terms = dict([(hpo['id'][0],{'id':hpo['id'][0],'name':hpo['name'][0], 'is_a':hpo.get('is_a',[])}) for hpo in patient_hpo_terms])
patient_hpo_ids=patient_hpo_terms.keys()
x['exomiser']=[]
for g in list(set(x['genes'])):
r=db.ensembl_entrez.find_one({'Ensembl Gene ID':g})
if not r or not r['EntrezGene ID']: continue
x['entrezgeneid']=r['EntrezGene ID']
#url='http://localhost:8085/exomiser/api/prioritise/?phenotypes=%s&prioritiser=hiphive&genes=%s&prioritiser-params=human,mouse,fish'%(','.join(patient_hpo_terms.keys()), x['entrezgeneid'])
url='http://monarch-exomiser-prod.monarchinitiative.org/exomiser/api/prioritise/?phenotypes=%s&prioritiser=hiphive&genes=%s&prioritiser-params=human,mouse,fish'%(','.join(patient_hpo_terms.keys()), x['entrezgeneid'])
print(url)
r=requests.get(url)
if isinstance(r.json(),list):
x['exomiser']+=r.json()[0]['results']
else:
x['exomiser']+=r.json()['results']
if len(x['exomiser'])<1: x['exomiser']=[{'score':-1}]
exomiser_scores=[xx['score'] for xx in x['exomiser']]
i=exomiser_scores.index(max(exomiser_scores))
x['exomiser']=x['exomiser'][i]
@app.route('/homozygous_variants_json/<individual>')
@requires_auth
def homozgous_variants(individual):
patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']),variant_db=get_db(app.config['DB_NAME']),hpo_db=get_db(app.config['DB_NAME_HPO']))
return jsonify(result=patient.homozygous_variants)
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
@app.route('/homozygous_variants_json2/<individual>')
@requires_auth
def homozygous_variants2(individual):
allele_freq=float(request.args.get('allele_freq',0.001))
kaviar_AF=float(request.args.get('kaviar_AF',0.001))
s=""" MATCH
(p)-[:PersonToObservedTerm]-(t:Term),
(t)--(g:Gene)--(gv:GeneticVariant)-[:HomVariantToPerson]-(p:Person),
(gv)--(tv:TranscriptVariant)
WHERE p.personId='%s' AND gv.kaviar_AF < %f AND gv.allele_freq < %f
WITH gv, g, t, tv
OPTIONAL
MATCH
(gv)-[:HetVariantToPerson]-(p2:Person)
OPTIONAL
MATCH
(gv)-[:HomVariantToPerson]-(p3:Person)
RETURN gv,
collect(distinct g),
collect(distinct t),
collect(distinct tv),
collect(distinct p2),
collect(distinct p3)
""" % (individual,kaviar_AF,allele_freq,)
print(s)
with neo4j_driver.session() as neo4j_session:
result=neo4j_session.run(s)
return jsonify(result=[merge_dicts(dict(r[0]),
{'genes':[dict(x) for x in r[1]]},
{'terms':[dict(x) for x in r[2]]},
{'transcript_variants':[dict(x) for x in r[3]]},
{'het_individuals':[dict(x) for x in r[4]]},
{'hom_individuals':[dict(x) for x in r[5]]}
) for r in result])
@app.route('/compound_het_variants_json2/<individual>',methods=['GET','POST'])
@requires_auth
def compound_het_variants2(individual):
kaviar_AF=float(request.args.get('kaviar_AF',0.01))
allele_freq=float(request.args.get('allele_freq',0.01))
s="""
MATCH
(p)-[:PersonToObservedTerm]-(t:Term),
(g:Gene)--(gv:GeneticVariant)-[:HetVariantToPerson]-(p:Person)
WHERE p.personId='%s' AND gv.kaviar_AF<%f and gv.allele_freq < %f
WITH g, collect(distinct gv) AS cgv
WHERE length(cgv) > 1
UNWIND cgv as v
OPTIONAL
MATCH
(v)-[:HetVariantToPerson]-(p2:Person)
OPTIONAL
MATCH
(v)-[:HomVariantToPerson]-(p3:Person)
RETURN v,
collect(distinct g),
collect(distinct p2),
collect(distinct p3)
""" % (individual,kaviar_AF,allele_freq)
print(s)
with neo4j_driver.session() as neo4j_session:
result=neo4j_session.run(s)
return jsonify(result=[ merge_dicts(
dict(r[0]),
{'terms':[]},
{'genes':[dict(x) for x in r[1]]},
{'transcript_variants':[]},
{'het_individuals':[dict(x) for x in r[2]]},
{'hom_individuals':[dict(x) for x in r[3]]}
) for r in result])
@app.route('/compound_het_variants_json/<individual>')
@requires_auth
def compound_het_variants(individual):
patient=Patient(individual,patient_db=get_db(app.config['DB_NAME_PATIENTS']),variant_db=get_db(app.config['DB_NAME']),hpo_db=get_db(app.config['DB_NAME_HPO']))
return jsonify(result=patient.compound_het_variants)
@app.route('/rare_variants_json2/<individual>')
@requires_auth
def rare_variants2(individual):
kaviar_AF=float(request.args.get('kaviar_AF',0.01))
allele_freq=float(request.args.get('allele_freq',0.01))
s=""" MATCH
(p)-[:PersonToObservedTerm]-(t:Term),
(t)--(g:Gene)--(gv:GeneticVariant)-[:HetVariantToPerson]-(p:Person),
(gv)--(tv:TranscriptVariant)
WHERE p.personId='%s' AND gv.kaviar_AF < %f AND gv.allele_freq < %f
WITH gv, g, t, tv
OPTIONAL
MATCH
(gv)-[:HetVariantToPerson]-(p2:Person)
OPTIONAL
MATCH
(gv)-[:HomVariantToPerson]-(p3:Person)
RETURN gv,
collect(distinct g),
collect(distinct t),
collect(distinct tv),
collect(distinct p2),
collect(distinct p3)
""" % (individual,kaviar_AF,allele_freq,)
print(s)
with neo4j_driver.session() as neo4j_session:
result=neo4j_session.run(s)
return jsonify(result=[ merge_dicts(
dict(r[0]),
{'genes':[dict(x) for x in r[1]]},
{'terms':[dict(x) for x in r[2]]},
{'transcript_variants':[dict(x) for x in r[3]]},
{'het_individuals':[dict(x) for x in r[4]]},
{'hom_individuals':[dict(x) for x in r[5]]}
) for r in result])
def load_patient(individual,auth,pubmed_key,hpo='HP:0000001'):
hpo_db=get_db(app.config['DB_NAME_HPO'])
db = get_db()
patient_db=get_db(app.config['DB_NAME_PATIENTS'])
patient_id=individual
patient={u'features': {u'observed': u'yes', u'type': u'phenotype', u'id': hpo}, 'clinicalStatus': {u'clinicalStatus': u'affected'}, u'ethnicity': {u'maternal_ethnicity': [], u'paternal_ethnicity': []}, u'family_history': {}, u'disorders': [], u'life_status': u'alive', u'reporter': u'', u'genes': [], u'prenatal_perinatal_phenotype': {u'prenatal_phenotype': [], u'negative_prenatal_phenotype': []}, u'prenatal_perinatal_history': {u'twinNumber': u''}, u'sex': u'U', u'solved': {u'status': u'unsolved'}}
eid=patient_id
if p: patient.update(p)
#patient_hpo_terms=','.join([f['id'] for f in patient['features'] if f['observed']=='yes'])
gene_counter=Counter([var['canonical_gene_name_upper'] for var in patient.rare_variants])
for var in patient['rare_variants']: var['gene_count']=gene_counter[var['canonical_gene_name_upper']]
patient["pubmedbatch_status"]=0
pubmed_key="blindness-macula-macular-pigmentosa-retina-retinal-retinitis-stargardt"
patient["pubmed_key"]=pubmed_key
#db.patients.update({'external_id':patient_id}, patient, upsert=True)
@app.route('/individual_update/<individual>')
@requires_auth
def individual_update(individual):
print 'UPDATE'
print p
print get_db(app.config['DB_NAME_PATIENTS']).patients.update({'external_id':individual},{'$set':p})
print 'DB'
print get_db(app.config['DB_NAME_PATIENTS']).patients.find_one({'external_id':individual})
if request.referrer:
referrer=request.referrer
u = urlparse(referrer)
referrer='%s://%s' % (u.scheme,u.hostname,)
if u.port: referrer='%s:%s' % (referrer,u.port,)
return redirect(referrer+'/individual/'+individual)
else:
return 'done'
'''
progress bar query
'''
@app.route('/pubmedbatch_progress_bar/<id>')
def pubmedbatch_progress(id):
user = session.get('user') or app.config['DEFAULT_USER']
progress_id = user + id
return jsonify(PROGRESS_BAR[progress_id])
'''
get pubmedbatch cache results based on pubmedkey
'''
@app.route('/pubmedbatch-cache/<pubmedkey>')
def pubmedbatch_getcache(pubmedkey):
db = get_db('pubmedbatch')
result = db.cache.find_one({'key':pubmedkey},{'_id':False})
if result: return jsonify(result)
else: return jsonify('')
@app.route('/homozygous_individuals_json/<variant_id>')
@requires_auth
def get_homozygous_individuals(variant_id):
s="""
MATCH
(v)-[:HomVariantToPerson]-(p:Person)
WHERE v.variantId='%s'
RETURN p
""" % variant_id
with neo4j_driver.session() as neo4j_session:
result=neo4j_session.run(s)
return jsonify(result=[ merge_dicts(
dict(r[0])) for r in result])
@app.route('/heterozygous_individuals_json/<variant_id>')
@requires_auth
def get_heterozygous_individuals(variant_id):
s="""
MATCH
(v)-[:HetVariantToPerson]-(p:Person)
WHERE v.variantId='%s'
RETURN p
""" % variant_id
db_session = neo4j_driver.session()
result=db_session.run(s)
return jsonify(result=[ merge_dicts(
dict(r[0])) for r in result])
|
from django.core.management.base import BaseCommand
from usaspending_api.transactions.agnostic_transaction_loader import AgnosticTransactionLoader
from usaspending_api.transactions.models.source_assistance_transaction import SourceAssistanceTransaction
class Command(AgnosticTransactionLoader, BaseCommand):
help = "Upsert assistance transactions from a Broker database into an USAspending database"
broker_source_table_name = SourceAssistanceTransaction().broker_source_table
delete_management_command = "delete_assistance_records"
destination_table_name = SourceAssistanceTransaction().table_name
extra_predicate = [{"field": "is_active", "op": "EQUAL", "value": "true"}]
last_load_record = "source_assistance_transaction"
lookback_minutes = 15
shared_pk = "afa_generated_unique"
working_file_prefix = "assistance_load_ids"
broker_full_select_sql = 'SELECT "{id}" FROM "{table}" WHERE "is_active" IS TRUE'
broker_incremental_select_sql = """
SELECT "{id}"
FROM "{table}"
WHERE
"is_active" IS TRUE
AND
"submission_id" IN (
SELECT "submission_id"
FROM "submission"
WHERE
"d2_submission" IS TRUE
AND "publish_status_id" IN (2, 3)
{optional_predicate}
)
"""
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaScene
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class VertexFormat(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = VertexFormat()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsVertexFormat(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# VertexFormat
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# VertexFormat
def Attributes(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 8
from DeepSeaScene.VertexAttribute import VertexAttribute
obj = VertexAttribute()
obj.Init(self._tab.Bytes, x)
return obj
return None
# VertexFormat
def AttributesLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# VertexFormat
def AttributesIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
# VertexFormat
def Instanced(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def Start(builder): builder.StartObject(2)
def VertexFormatStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddAttributes(builder, attributes): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(attributes), 0)
def VertexFormatAddAttributes(builder, attributes):
"""This method is deprecated. Please switch to AddAttributes."""
return AddAttributes(builder, attributes)
def StartAttributesVector(builder, numElems): return builder.StartVector(8, numElems, 4)
def VertexFormatStartAttributesVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartAttributesVector(builder, numElems)
def AddInstanced(builder, instanced): builder.PrependBoolSlot(1, instanced, 0)
def VertexFormatAddInstanced(builder, instanced):
"""This method is deprecated. Please switch to AddInstanced."""
return AddInstanced(builder, instanced)
def End(builder): return builder.EndObject()
def VertexFormatEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) |
import six
import timeit
def do(setup_statements, statement, times):
# extracted from timeit.py
t = timeit.Timer(stmt=statement,
setup="\n".join(setup_statements))
return t.timeit(times) / times
def test_importaddress():
entropy = "b3d45565178cbc13b91a52db39ff5ef6b2d9b464ee6c250c84bb1b63459970a4"
for path in ["m/44'", "m/44'/0'", "m/44'/0'/0'", "m/44'/0'/0'/0"]:
S1 = "from importaddress.hdprotocol import serialize"
S2 = """bip = serialize(path="{path}", entropy="{entropy}")""".format(path=path, entropy=entropy)
# default poolsize is 8.
S3 = "key = bip.generate(20)"
S4 = "key = bip.generate(20, poolsize=1)"
init = do([S1], S2, 4)
genkey = do([S1,S2], S3, 7)
print("{:<13s}: init:{:7f}s, generate 20 keys:{:7f}s.".format(path, init, genkey))
if __name__ == '__main__':
test_importaddress() |
import csv
from fpdf import FPDF
def convert(source:str, destination:str, orientation="P", delimiter=",",
font=None, headerfont=None, align="C", size=8, headersize=10) -> None:
"""
# CONVERTS A CSV FILE TO PDF FILE ( .csv ➜ .pdf )
:param `source`: `***str*** : ***Required*** : The file path of the CSV FILE to be converted.
:param `destination`: ***str*** : ***Required*** :The file path of the PDF FILE to be generated.
:param `orientation`: ***str*** : *Optional* : The orientation in which the PDF will be created. ***Default ➜ "P"***
***Possible Values** ➜ **'P' ➣ Potrait** --- **'L' ➣ Landscape***
:param `delimiter`: ***str*** : *Optional* : The delimiter to be used for reading the CSV FILE. ***Default ➜ ","***
:param `font`: ***str*** : *Optional* : Path of the font to be used for the CSV data. ***Default ➜ None***
:param `headerfont`: ***str*** : *Optional* : Path of the font to be used for the CSV headers. ***Default ➜ None***
:param `align`: ***str*** : *Optional* : Alignment for the cells in PDF. ***Default ➜ "C"***
***Possible Values** ➜ **'J' ➣ Justify** --- **'C' ➣ Center** --- **'L ➣ Left** --- **'R' ➣ Right***
:param `size`: ***int*** : *Optional* : Specify the font size for the CSV data. ***Default size ➜ 8***
:param `headersize`: ***int*** : *Optional* : Specify the font size for the CSV header. ***Default size ➜ 10***
"""
if orientation not in ["P", "L"]:
raise Exception("Orientation Error: Invalid orientation parameter!\
\nExpected values: 'P' ➣ Potrait | 'L ➣ Landscape")
if align not in ["J", "C", "L", "R"]:
raise Exception("Alignment Error: Invalid alignment parameter!\
\nExpected values: 'J' ➣ Justify | 'C' ➣ Center | 'L ➣ Left | 'R' ➣ Right")
if not (isinstance(size, int) and isinstance(headersize, int)):
raise Exception("Type Error: Font Size should be of int data type")
PDF = FPDF(orientation)
PDF.add_page()
with open(source) as CSV:
data = [row for row in csv.reader(CSV, delimiter =delimiter )]
header = data[0]
rows = data[1:]
max = len(header)
for row in rows:
if len(row) > max:
max = len(row)
header.extend(list(" "*(max - len(header))))
for row in rows:
row.extend(list(" "*(max - len(row))))
if headerfont == None:
PDF.set_font("Courier", "B", size=size)
else:
PDF.add_font("header-font", "", font, uni=True)
PDF.set_font("header-font", size=size)
line_height = PDF.font_size * 2.5
col_width = PDF.epw / max
for cell in header:
PDF.multi_cell(col_width, line_height, cell, align=align, border=1,
ln=3, max_line_height=PDF.font_size)
PDF.ln(line_height)
if font == None:
PDF.set_font("Courier", size=size)
else:
PDF.add_font("normal-font", "", font, uni=True)
PDF.set_font("normal-font", size=size)
line_height = PDF.font_size * 2.5
for cells in rows:
for cell_value in cells:
PDF.multi_cell(col_width, line_height, cell_value, align=align, border=1,
ln=3, max_line_height=PDF.font_size)
PDF.ln(line_height)
PDF.output(destination)
|
import time
class MemCache(object):
class Record:
def __init__(self, value):
self.time = time.time()
self.value = value
def clear(self):
self.value = None
def expired(self, timeout):
return timeout > 0 and time.time() - self.time >= timeout
'''
A global dict to help cache some temporary data.
'''
def __init__(self, timeout=-1):
self._timeout = timeout
self._data = {}
def set(self, key, value):
self._data[key] = MemCache.Record(value)
def get(self, key):
rcd = self._data.get(key, None)
if not rcd: return None
# do not delete the key to accelerate speed
if rcd.expired(self._timeout):
rcd.clear()
return None
return rcd.value
if __name__ == '__main__':
import unittest
class TestMemCacheTest(unittest.TestCase):
def setUp(self):
self.cache = MemCache(timeout=1)
def expire(self):
self.cache.set("message", "hello")
self.assertFalse(self.cache.expired(1))
time.sleep(4)
self.assertTrue(self.cache.expired(1))
def test_have_key(self):
self.cache.set('message', 'hello')
self.assertTrue(self.cache.get('message'))
time.sleep(1.1)
self.assertFalse(self.cache.get('message'))
self.assertTrue(self.cache.get("message") is None)
unittest.main()
|
def solution(sequence):
sol = []
neg_sum = 0
pos_sum = 0
for i in range(len(sequence)):
if (i+1) < len(sequence):
x = sequence[i]
y = sequence[i+1]
print(x,y)
if x > y:
sol.append(-1) #azalma
else:
sol.append(1) #artma
for i, j in enumerate(sol):
if j == -1:
neg_sum+=1
else:
pos_sum+=1
print(sol)
print("\n",neg_sum, pos_sum)
if neg_sum %2 == 0:
if neg_sum != 0:
return False
else:
return True
else:
return True
print(solution(sequence=[1, 2, 1, 2]))
|
# -*- coding: utf-8 -*-
"""Constants for BEL Commons."""
from pybel.constants import METADATA_CONTACT, METADATA_DESCRIPTION, METADATA_LICENSES
BEL_COMMONS_STARTUP_NOTIFY = 'BEL_COMMONS_STARTUP_NOTIFY'
SENTRY_DSN = 'SENTRY_DSN'
SWAGGER = 'SWAGGER'
SQLALCHEMY_DATABASE_URI = 'SQLALCHEMY_DATABASE_URI'
SQLALCHEMY_TRACK_MODIFICATIONS = 'SQLALCHEMY_TRACK_MODIFICATIONS'
MAIL_DEFAULT_SENDER = 'MAIL_DEFAULT_SENDER'
SERVER_NAME = 'SERVER_NAME'
integrity_message = "A graph with the same name ({}) and version ({}) already exists. If there have been changes " \
"since the last version, try bumping the version number."
#: Label for nodes' differential gene expression values
LABEL = 'dgxa'
NETWORK_ID = 'network_id'
SOURCE_NODE = 'source'
TARGET_NODE = 'target'
UNDIRECTED = 'undirected'
FORMAT = 'format'
PATHOLOGY_FILTER = 'pathology_filter'
PATHS_METHOD = 'paths_method'
QUERY = 'query'
AND = 'and'
RANDOM_PATH = 'random'
BLACK_LIST = {
NETWORK_ID,
SOURCE_NODE,
TARGET_NODE,
UNDIRECTED,
FORMAT,
PATHOLOGY_FILTER,
PATHS_METHOD,
QUERY,
AND,
}
SWAGGER_CONFIG = {
'title': 'BEL Commons API',
'description': 'This exposes the functions of PyBEL as a RESTful API',
'contact': {
'responsibleDeveloper': 'Charles Tapley Hoyt',
'email': 'cthoyt@gmail.com',
},
'version': '0.1.0',
}
DEFAULT_METADATA = {
METADATA_DESCRIPTION: {'Document description'},
METADATA_CONTACT: {'your@email.com'},
METADATA_LICENSES: {'Document license'},
}
|
from .models import Entry
def latest_entries():
try:
entries = (
Entry.objects.filter(status=Entry.PUBLISHED_STATUS)
.order_by("-created_at")
.select_related()[:2]
)
if entries.count() > 1 and not entries[0].is_micro and not entries[1].is_micro:
entries = entries[:1]
except Entry.DoesNotExist:
entries = None
return entries
|
from __future__ import absolute_import, print_function
import os
import pandas as pd
import numpy as np
from .BaseStructProtocol import BaseStructProtocol
from codifyComplexes.CodifyComplexException import CodifyComplexException
from computeFeatures.seqStep.seqToolManager import SeqToolManager
AA_CODE_ELEMENTS= SeqToolManager.AA_CODE_ELEMENTS
'''
(feature_name, path_to_dir, columns ). If columns==None, all columns will be used
Structural features must come first as sequential single chains features muy contain more aminoacids
(e.g. non 3D-solved residues)
'''
FEATURES_TO_INCLUDE_CHAIN= [
("psaia", ("structStep/PSAIA/procPSAIA", None)),
("halfSphereExpos", ("structStep/halfSphereExpos", None)),
("dssp", ("structStep/DSSP", [3])),
("al2co", ("seqStep/conservation/al2co",None)),
("winPssms", ("seqStep/conservation/pssms/windowedPSSMs/wSize11", None)),
("winSeq", ("seqStep/slidingWinSeq11", None))
]
FEATURES_TO_INCLUDE_PAIR= [
("corrMut", ("seqStep/conservation/corrMut", None)),
]
class StructProtocol(BaseStructProtocol):
'''
This class implements structural voronoi environment codification
'''
def __init__(self, dataRootPath, cMapPath, prevStepPaths=None, singleChainfeatsToInclude= FEATURES_TO_INCLUDE_CHAIN,
pairfeatsToInclude=FEATURES_TO_INCLUDE_PAIR, verbose=False):
'''
:param dataRootPath: str. A path to computedFeatures directory that contains needed features. Example:
computedFeatures/
common/
contactMaps/
seqStep/
conservation/
...
structStep/
PSAIA/
VORONOI/
...
:param cMapPath: str. A path to a dir that contains the contact map of the protein complex
:param prevStepPaths: str or str[]. A path to previous results files directory. If it is None, contactMaps files will be used
to define which residue pairs are in contact. Can also be a str[] if multiple feedback_path's
wanted
'''
BaseStructProtocol.__init__(self, dataRootPath, cMapPath, prevStepPaths,
singleChainfeatsToInclude=FEATURES_TO_INCLUDE_CHAIN,
pairfeatsToInclude= FEATURES_TO_INCLUDE_PAIR, verbose= verbose)
def loadSingleChainFeatures(self, prefixOneChainType, chainType):
'''
@overrides BaseStructProtocol method to make use of sequence profiles (loaded directly) and struct
neighbour but not computing struct neighbours on non central residue features of sliding window
Loads all features files computed for ligand or receptor chains. Returns a pandas.DataFrame
that contains in each row all features from all files for each amino acid. Just amino acids
that appears in each file will be included. Others will be ruled out (intersection)
:param prefixOneChainType: str. A prefixOneChainType that identifies the receptor or ligand
:param chainType: str. "l" for ligand and "r" for receptor
:return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
one amino acid
Column names are:
'chainId%s', 'resId%s', 'resName%s', [properties] #no defined order for properties
%s is L if chainType=="l" and R if chainType=="r"
'''
#super (BaseStructProtocol,self) is AbstractProtocol
singleChainFeats= super(BaseStructProtocol,self).loadSingleChainFeatures( prefixOneChainType, chainType) #Load with no aggregation
chainType= chainType.upper()
winSize= max([ int(elem.split(".")[-1][:-1]) for elem in singleChainFeats.columns if elem.startswith("pssmWin") ])+1
centralRes= winSize//2
#find variables that will not be considered for structural aggreation: sliding window features of non central amino acids
selectedSeqEntr= set([ 'informationWin.%d.%d%s'%(i, centralRes, chainType) for i in range(2)])
selectedPssm= set([ 'pssmWin.%d.%d%s'%(i, centralRes, chainType) for i in range(20)])
selectedPsfm= set([ 'psfmWin.%d.%d%s'%(i, centralRes, chainType) for i in range(20)])
selectedWinAA= set([ 'aaWin.0.%d_dummy_%s%s'%(centralRes,letter, chainType) for letter in AA_CODE_ELEMENTS ])
#this variables will be aggregated
centralResCols= selectedSeqEntr.union(selectedPssm).union(selectedPsfm).union(selectedWinAA)
winCols= set([col for col in singleChainFeats.columns if not "ggr" in col and "Win" in col ])
#this variables will not be aggreaged
allWinButCentralCols= winCols.difference(centralResCols)
allButWinData= singleChainFeats[ [col for col in singleChainFeats.columns if not col in allWinButCentralCols] ]
winData= singleChainFeats[ list(singleChainFeats.columns[:3])+[col for col in singleChainFeats.columns if col in allWinButCentralCols] ]
# print( list( allButWinData.columns) );raw_input("enter")
singleChainFeats= self.addSingleChainAggregation(allButWinData, chainType)
mergeOn= [ elem%chainType.upper() for elem in ["chainId%s", "resId%s", "resName%s"] ]
singleChainFeats= pd.merge(singleChainFeats, winData, how='inner', on=mergeOn)
return singleChainFeats
# uncomment to use product terms
# def addProductTerms(self, df):
# winSize= max([ int(elem.split(".")[-1][:-1]) for elem in df.columns if elem.startswith("pssmWin") ])+1
# centralRes= winSize//2
# selectedColsL= sorted(['pssmWin.%d.%d%s'%(i, centralRes, "L") for i in range(20)] +
# [ 'total_RASAL', 'average_DPXL', 'HydrophobicityL'])
# selectedColsR= sorted(['pssmWin.%d.%d%s'%(i, centralRes, "R") for i in range(20)] +
# [ 'total_RASAR', 'average_DPXR', 'HydrophobicityR'])
# print(selectedColsL)
# for colL in selectedColsL:
# for colR in selectedColsR:
# df[ colL+colR+"_P"]= df[colL]*df[colR]
# return df
# def applyProtocol( self, prefixComplex, prefixL, prefixR):
# '''
# This method is the basic skeleton for applyProtocol of subclasses
# Given a prefix that identifies the complex and prefixes that identifies
# the ligand and the receptor, this method integrates the information that
# is contained in self.dataRootPath and is described in self.singleChainfeatsToInclude
#
# :param prefixComplex: str. A prefix that identifies a complex
# :param prefixL: str. A prefix that identifies the ligand of the complex
# :param prefixR: str. A prefix that identifies the receptor of the complex
# :return df: pandas.DataFrame. A pandas.Dataframe in which each row represents
# a pair of amino acids in direct form (L to R).
# Column names are:
# 'chainIdL', 'resIdL', 'resNameL', 'chainIdR', 'resIdR', 'resNameR', 'categ'
# [ propertiesL .... propertiesR .... propertiesP]
# '''
# allPairsCodified= super(StructProtocol,self).applyProtocol( prefixComplex, prefixL, prefixR)
# allPairsCodified= self.addProductTerms(allPairsCodified)
# allPairsCodified= self.reorderColumns(allPairsCodified)
# return allPairsCodified
|
import typing as t
from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
from .tests import TESTS as DEFAULT_TESTS # noqa: F401
from .utils import Cycler
from .utils import generate_lorem_ipsum
from .utils import Joiner
from .utils import Namespace
# defaults for the parser / lexer
BLOCK_START_STRING = "{%"
BLOCK_END_STRING = "%}"
VARIABLE_START_STRING = "{{"
VARIABLE_END_STRING = "}}"
COMMENT_START_STRING = "{#"
COMMENT_END_STRING = "#}"
LINE_STATEMENT_PREFIX: t.Optional[str] = None
LINE_COMMENT_PREFIX: t.Optional[str] = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = "\n"
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
DEFAULT_NAMESPACE = {
"range": range,
"dict": dict,
"lipsum": generate_lorem_ipsum,
"cycler": Cycler,
"joiner": Joiner,
"namespace": Namespace,
}
# default policies
DEFAULT_POLICIES = {
"compiler.ascii_str": True,
"urlize.rel": "noopener",
"urlize.target": None,
"urlize.extra_schemes": None,
"truncate.leeway": 5,
"json.dumps_function": None,
"json.dumps_kwargs": {"sort_keys": True},
"ext.i18n.trimmed": False,
}
|
from collections import namedtuple
Order = namedtuple("Order", "id shiftleft shifttop width height")
with open("data.txt") as f:
orders = [
Order(*list(map(int, x.split())))
for x in (
f.read()
.replace("#", "")
.replace("@ ", "")
.replace(",", " ")
.replace(":", "")
.replace("x", " ")
.split("\n")
)
]
fabric = list(list(set() for _ in range(1002)) for x in range(1002))
for o in orders:
for i in range(o.shifttop, o.shifttop + o.height):
for j in range(o.shiftleft, o.shiftleft + o.width):
fabric[i][j].add(o.id)
def part1():
return sum(len(x) >= 2 for row in fabric for x in row)
def part2():
ids_taken = set()
for row in fabric:
for x in row:
if len(x) >= 2:
ids_taken.update(x)
return list({o.id for o in orders} - ids_taken)[0]
print("1)", part1(), "\n2)", part2())
|
from setuptools import setup, find_packages
# https://docs.djangoproject.com/en/1.11/intro/reusable-apps/
version = "0.3.8"
setup(
name="kwikapi-tornado",
version=version,
packages=["kwikapi.tornado"],
include_package_data=True,
license="MIT License", # example license
description="Quickest way to build powerful HTTP APIs in Python",
url="https://github.com/deep-compute/kwikapi.tornado",
download_url="https://github.com/deep-compute/kwikapi.tornado/tarball/%s" % version,
author="Deep Compute, LLC",
author_email="contact@deepcompute.com",
install_requires=["tornado==5.0.2", "deeputil==0.2.9", "requests==2.20.0"],
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
)
|
#!/usr/bin/python3
# Zadani:
# V tomto prikladu budeme pracovat se stromy aritmetickych vyrazu, pojmenujme
# si je ATree. ATree je binarni strom obsahujici v listech cisla a v jinych
# uzlech operatory. Operatory jsou nasledujici:
# '+' je binarni plus, ktere secte prvni a druhy argument,
# '*' je binarni krat, ktere vynasobi prvni a druhy argument, a
# '-' je unarni minus, ktere vraci zapornou hodnotu jedineho argumentu.
# Arita uzlu je dana aritou operace, podle operace bude mit uzel korektni pocet
# potomku.
# Testy implementovanych funkci generuji stromy v graphvizu.
# Soubory .dot vykreslite napr. na http://www.webgraphviz.com/.
class Node:
"""Trida Node slouzi k reprezentaci uzlu ve strome aritmetickych vyrazu.
Atributy:
value cislo nebo operator
children pole potomku, kde potomci jsou serazeni z leva doprava
"""
def __init__(self):
self.value = None
self.children = []
class ATree:
"""Trida ATree slouzi k reprezentaci stromu aritmetickych vyrazu.
Atributy:
root koren stromu aritmetickych vyrazu
"""
def __init__(self, root=None):
self.root = root
# Ukol 1. (10 bodu)
# Implementujte funkci eval_atree(tree), ktera vyhodnoti vyraz zadany vstupnim
# stromem 'tree'.
#
# Muzete pocitat s tim, ze na vstupu je korektni ATree obsahujici alespon jeden
# uzel. Napriklad nasledujici strom se vyhodnoti na hodnotu -8.
# '-'
# |
# '*'
# / \
# 2 '+'
# / \
# 1 3
#
# Strom na vstupu nijak nemodifikujte.
def evaluation(operator, lhs, rhs):
if operator == '+':
return lhs + rhs
if operator == '*':
return lhs * rhs
if operator == '-':
return (-1) * lhs
def is_operator(character):
return character == "*" or character == "+" or character == "-"
def eval_atree_recursion(node):
length = len(node.children)
if length == 0:
return node.value
lhs = eval_atree_recursion(node.children[0])
if length == 2:
rhs = eval_atree_recursion(node.children[1])
else:
rhs = None
result = evaluation(node.value, lhs, rhs)
return result
def eval_atree(tree):
"""
vstup: 'tree' korektni strom typu ATree s alespon jednim uzlem, strom
nemodifikujte
vystup: celociselna hodnota, na kterou se vyhodnoti aritmeticky vyraz
zadany stromem 'tree'
casova slozitost: O(n), kde 'n' je pocet vrcholu ve strome 'tree'
"""
return eval_atree_recursion(tree.root)
# Ukol 2. (10 bodu)
# Implementujte funkci constants_to_array(tree), ktera vrati pole vsech
# cisel ve strome 'tree'. Poradi cisel v poli odpovida poradi zleva
# doprava ve vstupnim stromu. Funkce pro strom nize vrati pole [2, 1, 3].
# '-'
# |
# '*'
# / \
# 2 '+'
# / \
# 1 3
#
# Strom na vstupu nijak nemodifikujte.
def constants_to_array_recursion(node, result):
if not is_operator(node.value):
result.append(node.value)
for item in node.children:
constants_to_array_recursion(item, result)
def constants_to_array(tree):
"""
vstup: 'tree' korektni strom typu ATree s alespon jednim uzlem, strom
nemodifikujte
vystup: pole cisel stromu 'tree' v poradi zleva doprava
casova slozitost: O(n), kde 'n' je pocet vrcholu ve strome 'tree'
"""
result = []
constants_to_array_recursion(tree.root, result)
return result
# Ukol 3. (15 bodu)
# Vybudujte strom aritmetickych vyrazu ze vstupniho pole, ktere popisuje vyraz
# v prefixove notaci. Prefixova notace je zapis aritmetickeho vyrazu, kde
# operator predchazi sve operandy. Napr. vyraz 1 + 3 by se prefixove zapsal
# jako + 1 3. V poli jsou ulozeny hodnoty jako retezce operaci (napr.
# '+'), nebo jako cela cisla. Pro vyraz v poli ['-', '*', 2, '+', 1, 3]
# vygenerujete nasledujici strom:
#
# '-'
# |
# '*'
# / \
# 2 '+'
# / \
# 1 3
#
# V poli dostanete vzdy validni vyraz (se spravnym poctem cisel), pouze ze
# tri zminenych operaci '+', '*' a '-'. Pokud presto chcete testovat, zda je
# v poli cislo, muzete pouzit test type(x) == int. Pripominame, ze operace
# array.pop(0) ma linearni slozitost k delce pole. Za reseni nesplnujici
# zadanou celkovou slozitosti nemuze ziskat plny pocet bodu. Vstupni pole nijak
# nemodifikujte.
def create_node(value):
node = Node()
node.value = value
return node
def get_number_of_ascendents(value):
if value == "-":
return 1
return 2
def build_atree_recursion(array):
character = array.pop()
if not is_operator(character):
return create_node(character)
if character == "-":
node = create_node(character)
lhs = build_atree_recursion(array)
node.children.append(lhs)
return node
node = create_node(character)
lhs, rhs = build_atree_recursion(array), build_atree_recursion(array)
node.children.append(lhs)
node.children.append(rhs)
return node
def build_atree(array):
tree = ATree()
tree.root = build_atree_recursion(list(reversed(array)))
return tree
"""
def build_atree(array):
# vstup: 'array' pole obsahujici vyraz v prefixove notaci (obsahuje
alespon jeden prvek), pole nijak nemodifikujte
# output: korektni strom typu ATree popisujici stejny vyraz jako 'array'
# casova slozitost: O(n), kde 'n' je pocet prvku v poli 'array'
tree = ATree()
parent = create_node(array[0])
tree.root, ascendents = parent, get_number_of_ascendents(parent.value)
for i in range(1, len(array)):
node = Node()
node.value = array[i]
parent.children.append(node)
ascendents -= 1
if ascendents == 0:
ascendents = get_number_of_ascendents(node.value)
parent = node
return tree
"""
# Ukol 4. (15 bodu)
# Implementujte funkci simplify_atree(tree), ktera ze zadaneho stromu odstrani
# redundantni uzly podle nasledujicich pravidel. V tomto ukolu upravujte primo
# strom na vstupu.
# Pravidla:
# a) pricitani 0: pokud je jeden z potomku scitani cislo 0, pak tento
# operand spolu s uzlem scitani odstrante:
# '-' '-'
# | |
# '+' -> '+'
# / \ / \
# 0 '+' 1 3
# / \
# 1 3
# b) nasobeni -1: prevedte na unarni minus pro druhy z operandu:
# '*' '-'
# / \ |
# -1 '+' -> '+'
# / \ / \
# 1 3 1 3
# Obe operace funguji symetricky pro oba podstromy.
def simplify_atree(tree):
"""
vstup: 'tree' korektni strom typu ATree s alespon jednim uzlem
vystup: nic, upravujte primo vstupni strom
casova slozitost: O(n), kde 'n' je pocet uzlu ve strome 'tree'
"""
pass # TODO
"""
Soubory .dot z testu vykreslite napr. na http://www.webgraphviz.com/.
"""
# kolik se zobrazuje chyb
MaxErrCount = 2
########################################################################
# Nasleduje kod testu, NEMODIFIKUJTE JEJ #
########################################################################
def make_graph(tree, file_name):
"""
Zde mate k dispozici funkci `make_graph`, ktera vam z `tree` na vstupu
vygeneruje do souboru `fileName` reprezentaci stromu pro graphviz.
Graphviz je nastroj, ktery vam umozni vizualizaci datovych struktur,
coz se hodi predevsim pro ladeni.
Pro zobrazeni graphvizu muzete vyuzit:
http://www.webgraphviz.com/
"""
def make_node(n_id, label):
f.write("{} [label=\"{}\"]\n".format(n_id, label))
def make_edge(n1, n2):
f.write("{} -> {}\n".format(n1, n2))
def make_graphviz(node):
make_node(id(node), node.value)
for n in node.children:
make_edge(id(node), id(n))
make_graphviz(n)
with open(file_name, 'w') as f:
f.write("digraph Tree {\n")
f.write("node [color=black, ordering=\"out\"];\n")
if tree is not None:
if tree.root is not None:
make_graphviz(tree.root)
f.write("}\n")
class Ib002BuiltTrees:
def __init__(self, array, res=None, constants=None, tree=None,
simplified=None):
self.array = array
self.result = res
self.constants = constants
self.tree = tree
self.simplified = simplified
def ib002_deserialise_tree(array):
stack = list(array)
nodes = []
while stack:
n = Node()
n.value = stack.pop()
if n.value == '-':
n.children.append(nodes.pop())
if n.value in ['+', '*']:
n1 = nodes.pop()
n.children.append(nodes.pop())
n.children.append(n1)
nodes.append(n)
t = ATree(nodes.pop())
return t
def ib002_trees_equality(t1, t2):
if t1 is not None and t2 is not None:
return ib002_trees_equality_rec(t1.root, t2.root)
def ib002_trees_equality_rec(n1, n2):
if n1.value != n2.value or len(n1.children) != len(n2.children):
return False
if not n1.children:
return True
eq = True
if n1.value in ['-', '+', '*']:
eq = eq and ib002_trees_equality_rec(n1.children[0], n2.children[0])
if n1.value in ['+', '*']:
eq = eq and ib002_trees_equality_rec(n1.children[1], n2.children[1])
return eq
testing_dataset = [
Ib002BuiltTrees([1], 1, [1], [1], [1]),
Ib002BuiltTrees([0], 0, [0], [0], [0]),
Ib002BuiltTrees([4], 4, [4], [4], [4]),
Ib002BuiltTrees([2], 2, [2], [2], [2]),
Ib002BuiltTrees([3], 3, [3], [3], [3]),
Ib002BuiltTrees([-3], -3, [-3], [-3], [-3]),
Ib002BuiltTrees([-1], -1, [-1], [-1], [-1]),
Ib002BuiltTrees([-2], -2, [-2], [-2], [-2]),
Ib002BuiltTrees(['-', 0], 0, [0], ['-', 0], ['-', 0]),
Ib002BuiltTrees(['-', 1], -1, [1], ['-', 1], ['-', 1]),
Ib002BuiltTrees(['-', 2], -2, [2], ['-', 2], ['-', 2]),
Ib002BuiltTrees(['-', -1], 1, [-1], ['-', -1], ['-', -1]),
Ib002BuiltTrees(['-', -2], 2, [-2], ['-', -2], ['-', -2]),
Ib002BuiltTrees(['*', 0, 1], 0, [0, 1], ['*', 1, 0], ['*', 1, 0]),
Ib002BuiltTrees(['+', 2, 1], 3, [2, 1], ['+', 1, 2], ['+', 1, 2]),
Ib002BuiltTrees(['*', 0, -1], 0, [0, -1], ['*', -1, 0], ['-', 0]),
Ib002BuiltTrees(['+', 3, 4], 7, [3, 4], ['+', 4, 3], ['+', 4, 3]),
Ib002BuiltTrees(['*', -1, 4], -4, [-1, 4], ['*', 4, -1], ['-', 4]),
Ib002BuiltTrees(['-', '-', 4], 4, [4], ['-', '-', 4], ['-', '-', 4]),
Ib002BuiltTrees(['+', -2, 2], 0, [-2, 2], ['+', 2, -2], ['+', 2, -2]),
Ib002BuiltTrees(['+', -2, 4], 2, [-2, 4], ['+', 4, -2], ['+', 4, -2]),
Ib002BuiltTrees(['*', 1, -3], -3, [1, -3], ['*', -3, 1], ['*', -3, 1]),
Ib002BuiltTrees(['*', -3, 3], -9, [-3, 3], ['*', 3, -3], ['*', 3, -3]),
Ib002BuiltTrees(['-', '-', -1], -1, [-1], ['-', '-', -1], ['-', '-', -1]),
Ib002BuiltTrees(['+', -3, -3], -6, [-3, -3], ['+', -3, -3], ['+', -3, -3]),
Ib002BuiltTrees(['+', 3, '-', 3], 0, [3, 3], ['+', '-', 3, 3],
['+', '-', 3, 3]),
Ib002BuiltTrees(['*', 4, '-', 4], -16, [4, 4], ['*', '-', 4, 4],
['*', '-', 4, 4]),
Ib002BuiltTrees(['+', '-', -1, 3], 4, [-1, 3], ['+', 3, '-', -1],
['+', 3, '-', -1]),
Ib002BuiltTrees(['+', 3, '-', -2], 5, [3, -2], ['+', '-', -2, 3],
['+', '-', -2, 3]),
Ib002BuiltTrees(['-', '-', '-', -1], 1, [-1], ['-', '-', '-', -1],
['-', '-', '-', -1]),
Ib002BuiltTrees(['*', '+', 1, 2, 3], 9, [1, 2, 3], ['*', 3, '+', 2, 1],
['*', 3, '+', 2, 1]),
Ib002BuiltTrees(['+', 1, '*', 2, -1], -1, [1, 2, -1], ['+', '*', -1, 2, 1],
['+', '-', 2, 1]),
Ib002BuiltTrees(['-', '*', '-', 3, 1], 3, [3, 1], ['-', '*', 1, '-', 3],
['-', '*', 1, '-', 3]),
Ib002BuiltTrees(['-', '*', -2, '-', 2], -4, [-2, 2],
['-', '*', '-', 2, -2], ['-', '*', '-', 2, -2]),
Ib002BuiltTrees(['-', '+', '-', '-', -3, 0], 3, [-3, 0],
['-', '+', 0, '-', '-', -3], ['-', '-', '-', -3]),
Ib002BuiltTrees(['*', '-', '-', '-', 0, 0], 0, [0, 0],
['*', 0, '-', '-', '-', 0], ['*', 0, '-', '-', '-', 0]),
Ib002BuiltTrees(['-', '*', -3, '*', 1, 0], 0, [-3, 1, 0],
['-', '*', '*', 0, 1, -3], ['-', '*', '*', 0, 1, -3]),
Ib002BuiltTrees(['-', '+', '-', -3, '-', 1], -2, [-3, 1],
['-', '+', '-', 1, '-', -3], ['-', '+', '-', 1, '-', -3]),
Ib002BuiltTrees(['-', '-', '+', '+', 3, 0, -1], 2, [3, 0, -1],
['-', '-', '+', -1, '+', 0, 3], ['-', '-', '+', -1, 3]),
Ib002BuiltTrees(['-', '-', '*', -2, '-', -3], -6, [-2, -3],
['-', '-', '*', '-', -3, -2],
['-', '-', '*', '-', -3, -2]),
Ib002BuiltTrees(['+', '+', '+', 1, 2, 3, 4], 10, [1, 2, 3, 4],
['+', 4, '+', 3, '+', 2, 1], ['+', 4, '+', 3, '+', 2, 1]),
Ib002BuiltTrees(['+', '+', 1, 2, '+', 3, 4], 10, [1, 2, 3, 4],
['+', '+', 4, 3, '+', 2, 1], ['+', '+', 4, 3, '+', 2, 1]),
Ib002BuiltTrees(['*', '*', '*', 1, 2, 3, 4], 24, [1, 2, 3, 4],
['*', 4, '*', 3, '*', 2, 1], ['*', 4, '*', 3, '*', 2, 1]),
Ib002BuiltTrees(['*', 1, '*', 2, '*', 3, 4], 24, [1, 2, 3, 4],
['*', '*', '*', 4, 3, 2, 1], ['*', '*', '*', 4, 3, 2, 1]),
Ib002BuiltTrees(['+', 0, '+', 0, '+', 0, '+', 2, 3], 5, [0, 0, 0, 2, 3],
['+', '+', '+', '+', 3, 2, 0, 0, 0], ['+', 3, 2]),
Ib002BuiltTrees(['+', 4, '-', '+', '+', 0, -1, 0], 5, [4, 0, -1, 0],
['+', '-', '+', 0, '+', -1, 0, 4], ['+', '-', -1, 4]),
Ib002BuiltTrees(['*', 4, '*', -3, '*', 1, 0], 0, [4, -3, 1, 0],
['*', '*', '*', 0, 1, -3, 4],
['*', '*', '*', 0, 1, -3, 4]),
Ib002BuiltTrees(['+', '-', '*', 0, 2, '-', 2], -2, [0, 2, 2],
['+', '-', 2, '-', '*', 2, 0],
['+', '-', 2, '-', '*', 2, 0]),
Ib002BuiltTrees(['-', '+', 3, '-', '*', -1, 2], -5, [3, -1, 2],
['-', '+', '-', '*', 2, -1, 3],
['-', '+', '-', '-', 2, 3]),
Ib002BuiltTrees(['+', '-', '*', '-', 3, -2, 1], -5, [3, -2, 1],
['+', 1, '-', '*', -2, '-', 3],
['+', 1, '-', '*', -2, '-', 3]),
Ib002BuiltTrees(['*', -1, '+', 0, '*', -1, '+', 2, 3], 5,
[-1, 0, -1, 2, 3], ['*', '+', '*', '+', 3, 2, -1, 0, -1],
['-', '-', '+', 3, 2]),
Ib002BuiltTrees(['*', '+', '-', 2, '-', 1, '-', -3], -9, [2, 1, -3],
['*', '-', -3, '+', '-', 1, '-', 2],
['*', '-', -3, '+', '-', 1, '-', 2]),
Ib002BuiltTrees(['+', '-', '+', '-', -2, -2, '-', 3], -3, [-2, -2, 3],
['+', '-', 3, '-', '+', -2, '-', -2],
['+', '-', 3, '-', '+', -2, '-', -2]),
Ib002BuiltTrees(['-', '*', 5, '*', '-', 2, '+', 1, 1], 20, [5, 2, 1, 1],
['-', '*', '*', '+', 1, 1, '-', 2, 5],
['-', '*', '*', '+', 1, 1, '-', 2, 5]),
Ib002BuiltTrees(['+', '*', '-', '+', 3, '-', 3, -3, -3], -3,
[3, 3, -3, -3], ['+', -3, '*', -3, '-', '+', '-', 3, 3],
['+', -3, '*', -3, '-', '+', '-', 3, 3]),
Ib002BuiltTrees(['+', '-', '*', -2, '-', 0, '+', 1, -3], -2,
[-2, 0, 1, -3], ['+', '+', -3, 1, '-', '*', '-', 0, -2],
['+', '+', -3, 1, '-', '*', '-', 0, -2]),
Ib002BuiltTrees(['-', '+', '-', '+', '-', '-', -1, 3, -2], 4, [-1, 3, -2],
['-', '+', -2, '-', '+', 3, '-', '-', -1],
['-', '+', -2, '-', '+', 3, '-', '-', -1]),
Ib002BuiltTrees(['-', '+', '+', -2, '*', 1, '-', -3, -2], 1,
[-2, 1, -3, -2], ['-', '+', -2, '+', '*', '-', -3, 1, -2],
['-', '+', -2, '+', '*', '-', -3, 1, -2]),
Ib002BuiltTrees(['*', '-', 3, '-', '-', '*', '-', 3, '+', 0, '-', 0], 0,
[3, 3, 0, 0],
['*', '-', '-', '*', '+', '-', 0, 0, '-', 3, '-', 3],
['*', '-', '-', '*', '-', 0, '-', 3, '-', 3]),
Ib002BuiltTrees(['-', '+', '*', '-', '-', '+', -3, 2, '-', 1, -2], 1,
[-3, 2, 1, -2],
['-', '+', -2, '*', '-', 1, '-', '-', '+', 2, -3],
['-', '+', -2, '*', '-', 1, '-', '-', '+', 2, -3]),
Ib002BuiltTrees(['*', '-', -1, '*', 2, '-', '+', '*', -3, 0, '+', 0, 0], 0,
[-1, 2, -3, 0, 0, 0],
['*', '*', '-', '+', '+', 0, 0, '*', 0, -3, 2, '-', -1],
['*', '*', '-', '*', 0, -3, 2, '-', -1]),
Ib002BuiltTrees(['-', '-', '+', '*', -3, '-', '+', '-', '-', -2, 0, -1],
-7, [-3, -2, 0, -1],
['-', '-', '+', -1, '*', '-', '+', 0, '-', '-', -2, -3],
['-', '-', '+', -1, '*', '-', '-', '-', -2, -3]),
Ib002BuiltTrees(
['+', -3, '+', 2, '*', '+', 0, '-', '*', '-', '-', 0, 4, 3], -1,
[-3, 2, 0, 0, 4, 3],
['+', '+', '*', 3, '+', '-', '*', 4, '-', '-', 0, 0, 2, -3],
['+', '+', '*', 3, '-', '*', 4, '-', '-', 0, 2, -3]),
Ib002BuiltTrees(
['+', 1, '*', '-', '*', '+', -2, '*', '-', 3, '-', 2, -3, -3], -35,
[1, -2, 3, 2, -3, -3],
['+', '*', -3, '-', '*', -3, '+', '*', '-', 2, '-', 3, -2, 1],
['+', '*', -3, '-', '*', -3, '+', '*', '-', 2, '-', 3, -2, 1]),
Ib002BuiltTrees(
['*', '+', '+', '+', 2, '+', '-', '+', 4, 0, '-', '-', 2, '*', -3, 4,
-1, -3], 39, [2, 4, 0, 2, -3, 4, -1, -3],
['*', -3, '+', -1, '+', '*', 4, -3, '+', '+', '-', '-', 2, '-', '+', 0,
4, 2],
['*', -3, '+', -1, '+', '*', 4, -3, '+', '+', '-', '-', 2, '-', 4, 2]),
Ib002BuiltTrees(
['-', '+', '*', '*', -1, '+', '*', '-', '*', -3, '-', '-', 3, '*', -1,
3, '-', 0, -3, 0], 81, [-1, -3, 3, -1, 3, 0, -3, 0],
['-', '+', 0, '*', -3, '*', '+', '-', 0, '*', '*', 3, -1, '-', '*',
'-', '-', 3, -3, -1],
['-', '*', -3, '-', '+', '-', 0, '*', '-', 3, '-', '*', '-', '-', 3,
-3]),
Ib002BuiltTrees(
['*', '+', '*', 1, '+', '-', 3, 4, '*', '*', '*', '-', '+', '-', '-',
2, -2, 2, -1, '*', 2, '*', '-', -3, '-', -2, 2], 2,
[1, 3, 4, 2, -2, 2, -1, 2, -3, -2, 2],
['*', 2, '+', '*', '*', '*', '-', -2, '-', -3, 2, '*', -1, '*', 2, '-',
'+', -2, '-', '-', 2, '*', '+', 4, '-', 3, 1],
['*', 2, '+', '*', '*', '*', '-', -2, '-', -3, 2, '-', '*', 2, '-',
'+', -2, '-', '-', 2, '*', '+', 4, '-', 3, 1]),
Ib002BuiltTrees(
['+', '-', '+', '+', -1, '*', '+', '-', '-', -2, '*', '+', -2, '+', 1,
'-', 2, '-', '*', 2, -1, '+', '-', '-', 3, 1, -2, '-', 0], 35,
[-1, -2, -2, 1, 2, 2, -1, 3, 1, -2, 0],
['+', '-', 0, '-', '+', -2, '+', '*', '+', 1, '-', '-', 3, '+', '*',
'-', '*', -1, 2, '+', '+', '-', 2, 1, -2, '-', '-', -2, -1],
['+', '-', 0, '-', '+', -2, '+', '*', '+', 1, '-', '-', 3, '+', '*',
'-', '-', 2, '+', '+', '-', 2, 1, -2, '-', '-', -2, -1]),
Ib002BuiltTrees(
['*', 0, '-', '*', '+', -2, '-', '-', '-', '-', 3, '*', '+', '*', '-',
'-', '-', '*', '-', 3, '-', '+', '-', '-', -3, 2, -1, -1, 4], 0,
[0, -2, 3, 3, -3, 2, -1, -1, 4],
['*', '-', '*', '*', 4, '+', -1, '*', -1, '-', '-', '-', '*', '-', '+',
2, '-', '-', -3, '-', 3, '+', '-', '-', '-', '-', 3, -2, 0],
['*', '-', '*', '*', 4, '+', -1, '-', '-', '-', '-', '*', '-', '+', 2,
'-', '-', -3, '-', 3, '+', '-', '-', '-', '-', 3, -2, 0]),
Ib002BuiltTrees(
['-', '*', '*', '-', '*', '+', '+', '-', '-', '-', '+', 0, '*', '+',
'-', 4, -3, 0, 0, '-', '+', '-', '-', '+', '+', 3, 0, 0, -3, -1, 1,
2], 0, [0, 4, -3, 0, 0, 3, 0, 0, -3, -1, 1, 2],
['-', '*', 2, '*', 1, '-', '*', -1, '+', '-', '+', -3, '-', '-', '+',
0, '+', 0, 3, '+', 0, '-', '-', '-', '+', '*', 0, '+', -3, '-', 4, 0],
['-', '*', 2, '*', 1, '-', '-', '+', '-', '+', -3, '-', '-', 3, '-',
'-', '-', '*', 0, '+', -3, '-', 4]),
Ib002BuiltTrees(
['-', '+', 3, '-', '+', '-', '+', '+', '-', -1, '*', '*', 1, '+', '+',
'*', '-', 4, '-', '-', 3, 2, '-', '+', '-', '-', 1, 0, 0, -2, 3], 1,
[3, -1, 1, 4, 3, 2, 1, 0, 0, -2, 3],
['-', '+', '-', '+', 3, '-', '+', -2, '+', '*', 0, '*', '+', '-', '+',
0, '-', '-', 1, '+', 2, '*', '-', '-', 3, '-', 4, 1, '-', -1, 3],
['-', '+', '-', '+', 3, '-', '+', -2, '+', '*', 0, '*', '+', '-', '-',
'-', 1, '+', 2, '*', '-', '-', 3, '-', 4, 1, '-', -1, 3]),
Ib002BuiltTrees(
['+', '*', '-', '+', '+', '-', -3, '-', '+', '*', '-', '+', '-', '-',
'-', '+', '+', '-', '-', 2, '-', 2, '-', 4, -2, -2, 1, 1, 3, 2], 5,
[-3, 2, 2, 4, -2, -2, 1, 1, 3, 2],
['+', 2, '*', 3, '-', '+', 1, '+', '-', '+', 1, '*', -2, '-', '+', -2,
'-', '-', '-', '+', '-', 4, '+', '-', 2, '-', '-', 2, '-', -3],
['+', 2, '*', 3, '-', '+', 1, '+', '-', '+', 1, '*', -2, '-', '+', -2,
'-', '-', '-', '+', '-', 4, '+', '-', 2, '-', '-', 2, '-', -3]),
Ib002BuiltTrees(
['*', '*', '*', '+', '-', '-', '+', '+', -2, '*', '*', '+', 4, '-',
'+', '-', '-', '-', '-', '-', '-', -1, '-', 2, 1, 4, 2, 0, -1, 4, -1],
112, [-2, 4, -1, 2, 1, 4, 2, 0, -1, 4, -1],
['*', -1, '*', 4, '*', -1, '+', 0, '-', '-', '+', 2, '+', '*', 4, '*',
1, '+', '-', '+', '-', 2, '-', '-', '-', '-', '-', '-', -1, 4, -2],
['-', '*', 4, '-', '-', '-', '+', 2, '+', '*', 4, '*', 1, '+', '-',
'+', '-', 2, '-', '-', '-', '-', '-', '-', -1, 4, -2]),
Ib002BuiltTrees(
['+', '+', '-', -2, 2, '+', '-', '-', '-', '-', '-', '*', '+', '*',
'-', -3, '*', '-', '+', '-', -2, '*', '+', '-', -2, 0, -2, 0, 0, 0,
2], 6, [-2, 2, -3, -2, -2, 0, -2, 0, 0, 0, 2],
['+', '+', 2, '-', '-', '-', '-', '-', '*', 0, '+', 0, '*', '*', 0,
'-', '+', '*', -2, '+', 0, '-', -2, '-', -2, '-', -3, '+', 2, '-',
-2],
['+', '+', 2, '-', '-', '-', '-', '-', '*', 0, '*', '*', 0, '-', '+',
'*', -2, '-', -2, '-', -2, '-', -3, '+', 2, '-', -2]),
Ib002BuiltTrees(
['+', '+', '*', '*', 3, '-', '*', '-', -3, '-', 2, 4, '*', '-', -3,
'-', '-', -3, '-', '-', '*', '*', '-', 2, '-', 4, '-', '+', -3, 2],
71, [3, -3, 2, 4, -3, -3, 2, 4, -3, 2],
['+', '-', '-', '*', '-', '+', 2, -3, '*', '-', 4, '-', 2, '+', '*',
'-', '-', -3, '-', -3, '*', 4, '*', '-', '*', '-', 2, '-', -3, 3],
['+', '-', '-', '*', '-', '+', 2, -3, '*', '-', 4, '-', 2, '+', '*',
'-', '-', -3, '-', -3, '*', 4, '*', '-', '*', '-', 2, '-', -3, 3]),
Ib002BuiltTrees(
['-', '-', '*', '*', 0, '-', '+', '*', '-', '*', '-', -1, 1, '+', '-',
'-', '*', '-', '-', '+', '*', '-', '*', 1, 3, -3, 4, 0, 3, 1, 3], 0,
[0, -1, 1, 1, 3, -3, 4, 0, 3, 1, 3],
['-', '-', '*', 3, '*', '-', '+', 1, '*', '+', 3, '-', '-', '*', 0,
'-', '-', '+', 4, '*', -3, '-', '*', 3, 1, '-', '*', 1, '-', -1, 0],
['-', '-', '*', 3, '*', '-', '+', 1, '*', '+', 3, '-', '-', '*', 0,
'-', '-', '+', 4, '*', -3, '-', '*', 3, 1, '-', '*', 1, '-', -1, 0]),
Ib002BuiltTrees(
['+', '+', -2, '+', '-', '-', '*', '-', '-', '+', '-', '*', '+', '*',
'+', 0, '-', '-', '-', -3, '-', '*', 4, 1, '-', 0, 0, 2, -2, -2, 1],
-7, [-2, 0, -3, 4, 1, 0, 0, 2, -2, -2, 1],
['+', 1, '+', '+', -2, '-', '-', '*', -2, '-', '-', '+', 2, '-', '*',
0, '+', '-', 0, '*', '-', '*', 1, 4, '+', '-', '-', '-', -3, 0, -2],
['+', 1, '+', '+', -2, '-', '-', '*', -2, '-', '-', '+', 2, '-', '*',
0, '+', '-', 0, '*', '-', '*', 1, 4, '-', '-', '-', -3, -2]),
Ib002BuiltTrees(
['*', 0, '+', '*', '+', '-', '-', -1, '+', -2, '*', '*', '-', '+', '-',
'+', 1, '-', '*', '-', 3, -3, 2, 1, '+', -3, -1, '-', -1, -1], 0,
[0, -1, -2, 1, 3, -3, 2, 1, -3, -1, -1, -1],
['*', '+', -1, '*', '-', -1, '+', '+', '*', '+', -1, -3, '*', 1, '-',
'+', 2, '-', '+', '-', '*', -3, '-', 3, 1, -2, '-', '-', -1, 0],
['*', '+', -1, '*', '-', -1, '+', '+', '*', '+', -1, -3, '*', 1, '-',
'+', 2, '-', '+', '-', '*', -3, '-', 3, 1, -2, '-', '-', -1, 0]),
Ib002BuiltTrees(
['+', '-', '-', -2, '*', '-', '+', '+', '+', 0, '+', 2, '-', '-', '-',
1, '+', '*', '*', '-', -2, -1, '+', -2, 1, '-', '-', '+', 0, 1, 2, 3],
-20, [-2, 0, 2, 1, -2, -1, -2, 1, 0, 1, 2, 3],
['+', '*', 3, '-', '+', 2, '+', '+', '-', '-', '+', 1, 0, '*', '+', 1,
-2, '*', -1, '-', -2, '+', '+', '-', '-', '-', 1, 2, 0, '-', '-', -2],
['+', '*', 3, '-', '+', 2, '+', '+', '-', '-', 1, '*', '+', 1, -2, '-',
'-', -2, '+', '-', '-', '-', 1, 2, '-', '-', -2]),
Ib002BuiltTrees(
['+', '-', '+', '*', -2, 0, '+', '+', 3, '-', '*', '-', 2, '-', '-',
'-', -2, 4, '+', '-', '+', 0, '+', '-', '-', '+', '+', 3, 3, 4, 2, 4],
-19, [-2, 0, 3, 2, -2, 4, 0, 3, 3, 4, 2, 4],
['+', '+', 4, '-', '+', '+', 2, '-', '-', '+', 4, '+', 3, 3, 0, '-',
'+', '+', 4, '+', '-', '*', '-', '-', '-', -2, '-', 2, 3, '*', 0, -2],
['+', '+', 4, '-', '+', 2, '-', '-', '+', 4, '+', 3, 3, '-', '+', '+',
4, '+', '-', '*', '-', '-', '-', -2, '-', 2, 3, '*', 0, -2]),
Ib002BuiltTrees(
['*', '+', '-', '+', -1, 2, '+', '*', '-', '+', 0, '-', '+', -1, '-',
'+', 0, '+', '-', '-', '+', '-', '+', '*', '*', -1, 3, 2, 0, 0, 2, 0,
4, -3], -9, [-1, 2, 0, -1, 0, -1, 3, 2, 0, 0, 2, 0, 4, -3],
['*', -3, '+', '+', 4, '*', 0, '-', '+', '-', '+', '-', '+', '+', 2,
'-', '-', '+', 0, '-', '+', 0, '*', 2, '*', 3, -1, 0, -1, 0, '-', '+',
2, -1],
['*', -3, '+', '+', 4, '*', 0, '-', '-', '+', '-', '+', 2, '-', '-',
'-', '*', 2, '-', 3, -1, '-', '+', 2, -1]),
Ib002BuiltTrees(
['*', 1, '*', '-', '+', '*', '-', '+', '+', -3, '-', '-', -2, 1, '-',
'-', '-', '-', '*', '*', '-', '*', '*', '-', -1, -2, -1, -1, 1, 1, 1],
-9, [1, -3, -2, 1, -1, -2, -1, -1, 1, 1, 1],
['*', '*', 1, '-', '+', 1, '*', '-', '-', '-', '-', '*', 1, '*', -1,
'-', '*', -1, '*', -2, '-', -1, '-', '+', 1, '+', '-', '-', -2, -3,
1],
['*', '*', 1, '-', '+', 1, '*', '-', '-', '-', '-', '*', 1, '-', '-',
'-', '*', -2, '-', -1, '-', '+', 1, '+', '-', '-', -2, -3, 1]),
Ib002BuiltTrees(
['+', '+', -2, '*', '-', '*', '*', '*', 3, '-', '-', '+', 0, '-', '+',
'-', 3, '+', 3, '+', 0, '-', '-', '+', '-', 3, '*', 1, -3, -1, 1, 2,
2], 36, [-2, 3, 0, 3, 3, 0, 3, 1, -3, -1, 1, 2, 2],
['+', 2, '+', '*', 2, '-', '*', 1, '*', -1, '*', '-', '-', '+', '-',
'+', '+', '+', '-', '-', '+', '*', -3, 1, '-', 3, 0, 3, '-', 3, 0, 3,
-2],
['+', 2, '+', '*', 2, '-', '*', 1, '-', '*', '-', '-', '-', '+', '+',
'-', '-', '+', '*', -3, 1, '-', 3, 3, '-', 3, 3, -2]),
Ib002BuiltTrees(
['-', '*', '*', '*', '-', '+', '-', '*', 3, '+', '*', '-', '-', '-',
-1, 1, -1, '-', '-', '+', '-', '*', '-', '*', -2, 4, 0, 1, -3, 4, 4],
-48, [3, -1, 1, -1, -2, 4, 0, 1, -3, 4, 4],
['-', '*', 4, '*', 4, '*', -3, '-', '+', '-', '-', '+', 1, '-', '*', 0,
'-', '*', 4, -2, '-', '*', '+', -1, '*', 1, '-', '-', '-', -1, 3],
['-', '*', 4, '*', 4, '*', -3, '-', '+', '-', '-', '+', 1, '-', '*', 0,
'-', '*', 4, -2, '-', '*', '+', -1, '*', 1, '-', '-', '-', -1, 3]),
Ib002BuiltTrees(
['-', '-', '*', '+', -1, '-', 1, '*', '-', '-', '-', '+', '+', 1, '-',
'-', -2, '+', '+', '-', -1, 2, '-', '*', '+', 1, '+', 4, 4, -2, 3],
120, [-1, 1, 1, -2, -1, 2, 1, 4, 4, -2, 3],
['-', '-', '*', '*', 3, '-', '-', '-', '+', '+', '-', '*', -2, '+',
'+', 4, 4, 1, '+', 2, '-', -1, '+', '-', '-', -2, 1, '+', '-', 1, -1],
['-', '-', '*', '*', 3, '-', '-', '-', '+', '+', '-', '*', -2, '+',
'+', 4, 4, 1, '+', 2, '-', -1, '+', '-', '-', -2, 1, '+', '-', 1,
-1]),
Ib002BuiltTrees(
['-', '+', 3, '*', '*', '-', '-', '+', '-', '-', '+', '-', '-', '+',
'+', '*', '*', 1, '-', '*', -1, '-', '-', 0, -1, -1, 4, -1, -3, -3,
3], -12, [3, 1, -1, 0, -1, -1, 4, -1, -3, -3, 3],
['-', '+', '*', 3, '*', -3, '-', '-', '+', -3, '-', '-', '+', -1, '-',
'-', '+', 4, '+', -1, '*', -1, '*', '-', '*', '-', '-', 0, -1, 1, 3],
['-', '+', '*', 3, '*', -3, '-', '-', '+', -3, '-', '-', '+', -1, '-',
'-', '+', 4, '+', -1, '-', '*', '-', '-', '-', '-', 0, 1, 3]),
Ib002BuiltTrees(
['+', '-', '-', '-', '+', '+', '-', 4, '*', '+', '-', 4, '-', '*', '-',
4, '-', '-', 1, '*', '+', '*', '+', '*', 0, -1, 3, 4, 2, 4, -1, -3],
2, [4, 4, 4, 1, 0, -1, 3, 4, 2, 4, -1, -3],
['+', -3, '-', '-', '-', '+', -1, '+', '*', '*', 4, '+', 2, '*', 4,
'+', 3, '*', -1, 0, '+', '-', '*', '-', '-', 1, '-', 4, '-', 4, '-',
4],
['+', -3, '-', '-', '-', '+', -1, '+', '*', '*', 4, '+', 2, '*', 4,
'+', 3, '-', 0, '+', '-', '*', '-', '-', 1, '-', 4, '-', 4, '-', 4]),
Ib002BuiltTrees(
['-', '-', '*', '*', '*', '-', '-', -2, '*', 1, '-', '+', '*', '+',
'-', 1, '-', '+', '+', '*', '-', '*', '-', -3, 4, -1, 0, 2, 0, -2, -3,
3], 36, [-2, 1, 1, -3, 4, -1, 0, 2, 0, -2, -3, 3],
['-', '-', '*', 3, '*', -3, '*', '*', '-', '+', -2, '*', 0, '+', '-',
'+', 2, '+', 0, '*', -1, '-', '*', 4, '-', -3, '-', 1, 1, '-', '-',
-2],
['-', '-', '*', 3, '*', -3, '*', '*', '-', '+', -2, '*', 0, '+', '-',
'+', 2, '-', '-', '*', 4, '-', -3, '-', 1, 1, '-', '-', -2]),
Ib002BuiltTrees(
['+', '-', '-', '+', '*', '*', '*', '+', '*', '-', '*', '+', 4, '*',
'-', '-', '*', '-', '-', '-', '+', 1, 0, 2, 4, 0, 2, -1, 2, 0, -1, 1,
-1], 0, [4, 1, 0, 2, 4, 0, 2, -1, 2, 0, -1, 1, -1],
['+', -1, '-', '-', '+', 1, '*', -1, '*', 0, '*', 2, '+', -1, '*', 2,
'-', '*', 0, '+', '*', 4, '-', '-', '*', 2, '-', '-', '-', '+', 0, 1,
4],
['+', -1, '-', '-', '+', 1, '-', '*', 0, '*', 2, '+', -1, '*', 2, '-',
'*', 0, '+', '*', 4, '-', '-', '*', 2, '-', '-', '-', 1, 4]),
Ib002BuiltTrees(
['*', 2, '+', '+', 3, '+', '*', '+', '*', '*', '-', '-', '+', '+', 0,
'+', '-', '-', -1, '-', '+', 1, '*', 4, '-', 3, '-', 3, 2, 3, 2, 2, 0,
1], 184, [2, 3, 0, -1, 1, 4, 3, 3, 2, 3, 2, 2, 0, 1],
['*', '+', 1, '+', '+', 0, '*', 2, '+', 2, '*', 3, '*', 2, '-', '-',
'+', '-', 3, '+', '+', '-', '+', '*', '-', 3, 4, 1, '-', '-', -1, 0,
3, 2],
['*', '+', 1, '+', '*', 2, '+', 2, '*', 3, '*', 2, '-', '-', '+', '-',
3, '+', '-', '+', '*', '-', 3, 4, 1, '-', '-', -1, 3, 2]),
Ib002BuiltTrees(
['*', '*', '+', '+', '+', '*', '-', '-', '-', '+', '-', '*', '-', '-',
'-', '-', '*', '-', '-', '*', -3, 1, -3, 4, -3, 3, -1, -3, -3, 1, -1],
-110, [-3, 1, -3, 4, -3, 3, -1, -3, -3, 1, -1],
['*', -1, '*', 1, '+', -3, '+', -3, '+', -1, '*', 3, '-', '-', '-',
'+', -3, '-', '*', 4, '-', '-', '-', '-', '*', -3, '-', '-', '*', 1,
-3],
['-', '*', 1, '+', -3, '+', -3, '+', -1, '*', 3, '-', '-', '-', '+',
-3, '-', '*', 4, '-', '-', '-', '-', '*', -3, '-', '-', '*', 1, -3]),
Ib002BuiltTrees(
['-', '+', '-', '-', '*', '*', 0, '*', -3, '-', 2, '-', '*', '-', '+',
'-', 2, '*', '+', '+', '*', '*', '+', '-', 2, 0, -3, -3, -3, -1, 0, 4,
0], 0, [0, -3, 2, 2, 2, 0, -3, -3, -3, -1, 0, 4, 0],
['-', '+', 0, '-', '-', '*', '-', '*', 4, '-', '+', '*', 0, '+', -1,
'+', -3, '*', -3, '*', -3, '+', 0, '-', 2, '-', 2, '*', '*', '-', 2,
-3, 0],
['-', '-', '-', '*', '-', '*', 4, '-', '+', '*', 0, '+', -1, '+', -3,
'*', -3, '*', -3, '-', 2, '-', 2, '*', '*', '-', 2, -3, 0]),
Ib002BuiltTrees(
['-', '*', -2, '-', '+', '-', '-', '-', '-', '*', '*', '+', 2, 3, '*',
'-', '*', '+', 3, '+', '+', '-', '*', '+', 1, -3, -1, 0, 2, 2, 0, 4,
-2], 4, [-2, 2, 3, 3, 1, -3, -1, 0, 2, 2, 0, 4, -2],
['-', '*', '-', '+', -2, '-', '-', '-', '-', '*', 4, '*', '*', 0, '-',
'*', 2, '+', '+', 2, '+', 0, '-', '*', -1, '+', -3, 1, 3, '+', 3, 2,
-2],
['-', '*', '-', '+', -2, '-', '-', '-', '-', '*', 4, '*', '*', 0, '-',
'*', 2, '+', '+', 2, '-', '-', '+', -3, 1, 3, '+', 3, 2, -2]),
Ib002BuiltTrees(
['+', -1, '-', '-', '*', '-', '-', '-', '-', '-', '*', 0, '-', '-',
'+', '+', '*', '+', '*', '*', '*', '+', 4, 0, 3, -1, -2, -3, -3, -2,
-2, -1], -1, [-1, 0, 4, 0, 3, -1, -2, -3, -3, -2, -2, -1],
['+', '-', '-', '*', -1, '-', '-', '-', '-', '-', '*', '-', '-', '+',
-2, '+', -2, '*', -3, '+', -3, '*', -2, '*', -1, '*', 3, '+', 0, 4, 0,
-1],
['+', '-', '-', '-', '-', '-', '-', '-', '-', '*', '-', '-', '+', -2,
'+', -2, '*', -3, '+', -3, '*', -2, '-', '*', 3, 4, 0, -1]),
Ib002BuiltTrees(
['-', '+', 2, '-', '*', 0, '+', '-', '-', 1, '+', 2, '+', '*', '+',
'-', '-', '-', '*', '+', '-', '+', 2, 3, '+', '+', 3, 2, -2, -3, 0, 1,
-2], -2, [2, 0, 1, 2, 2, 3, 3, 2, -2, -3, 0, 1, -2],
['-', '+', '-', '*', '+', '+', '+', -2, '*', 1, '+', 0, '-', '-', '-',
'*', -3, '+', '+', -2, '+', 2, 3, '-', '+', 3, 2, 2, '-', '-', 1, 0,
2],
['-', '+', '-', '*', '+', '+', '+', -2, '*', 1, '-', '-', '-', '*', -3,
'+', '+', -2, '+', 2, 3, '-', '+', 3, 2, 2, '-', '-', 1, 0, 2]),
Ib002BuiltTrees(
['-', '*', '*', '+', '-', '+', '-', 1, -3, '+', '+', '-', '-', '-',
'-', 1, 3, '*', -2, '+', '*', '*', '+', '-', 2, '-', 1, -3, -1, -3,
-1, 1], 32, [1, -3, 1, 3, -2, 2, 1, -3, -1, -3, -1, 1],
['-', '*', 1, '*', -1, '+', '+', '*', '+', -3, '*', -1, '*', -3, '+',
'-', 1, '-', 2, -2, '+', 3, '-', '-', '-', '-', 1, '-', '+', -3, '-',
1],
['-', '*', 1, '-', '+', '+', '*', '+', -3, '-', '*', -3, '+', '-', 1,
'-', 2, -2, '+', 3, '-', '-', '-', '-', 1, '-', '+', -3, '-', 1]),
Ib002BuiltTrees(
['+', '+', '*', 1, '-', '+', '*', '-', '*', '+', '-', -3, -1, '+', '*',
-1, 3, '*', '*', '-', '-', -2, '-', '-', '+', 0, '+', 0, 2, 0, 0, 1,
1, 1], 1, [1, -3, -1, -1, 3, -2, 0, 0, 2, 0, 0, 1, 1, 1],
['+', 1, '+', 1, '*', '-', '+', 1, '*', 0, '-', '*', '+', '*', 0, '*',
'-', '-', '+', '+', 2, 0, 0, '-', '-', -2, '*', 3, -1, '+', -1, '-',
-3, 1],
['+', 1, '+', 1, '*', '-', '+', 1, '*', 0, '-', '*', '+', '*', 0, '*',
'-', '-', 2, '-', '-', -2, '-', 3, '+', -1, '-', -3, 1]),
Ib002BuiltTrees(
['-', '+', '*', 1, '-', '-', '+', 4, '+', 0, '+', 0, '*', '-', '-',
'*', '+', '*', 0, '*', '-', '+', '-', '+', '-', -2, -1, -3, 2, -3, -1,
-2, -2], 4, [1, 4, 0, 0, 0, -2, -1, -3, 2, -3, -1, -2, -2],
['-', '+', -2, '*', '-', '-', '+', '+', '+', '*', -2, '-', '-', '*',
-1, '+', -3, '*', '*', 2, '-', '+', -3, '-', '+', -1, '-', -2, 0, 0,
0, 4, 1],
['-', '+', -2, '*', '-', '-', '+', '*', -2, '-', '-', '-', '+', -3,
'*', '*', 2, '-', '+', -3, '-', '+', -1, '-', -2, 0, 4, 1]),
Ib002BuiltTrees(
['*', '*', '-', -2, '-', '-', '*', '*', '*', '+', '-', 0, '+', '-',
'+', '*', '*', -2, '+', 0, '-', '-', '*', 2, 4, '-', 0, 3, -3, 2, 2,
-1, -2], -96, [-2, 0, -2, 0, 2, 4, 0, 3, -3, 2, 2, -1, -2],
['*', -2, '*', '-', '-', '*', -1, '*', 2, '*', 2, '+', '+', -3, '-',
'+', 3, '*', '-', 0, '*', '+', '-', '-', '*', 4, 2, 0, -2, '-', 0,
'-', -2],
['*', -2, '*', '-', '-', '-', '*', 2, '*', 2, '+', '+', -3, '-', '+',
3, '*', '-', 0, '*', '-', '-', '*', 4, 2, -2, '-', 0, '-', -2]),
Ib002BuiltTrees(
['-', '+', 0, '+', '+', '*', '*', 3, '*', '-', '-', -3, '+', '-', -3,
'+', 0, '*', 2, '*', '-', -2, -2, '-', '*', '-', '-', -1, '+', 2, -1,
3, 2], -50, [0, 3, -3, -3, 0, 2, -2, -2, -1, 2, -1, 3, 2],
['-', '+', '+', 2, '+', 3, '*', '-', '*', '+', -1, 2, '-', '-', -1,
'*', '*', '+', '+', '*', '*', -2, '-', -2, 2, 0, '-', -3, '-', '-',
-3, 3, 0],
['-', '+', 2, '+', 3, '*', '-', '*', '+', -1, 2, '-', '-', -1, '*',
'*', '+', '*', '*', -2, '-', -2, 2, '-', -3, '-', '-', -3, 3]),
Ib002BuiltTrees(
['*', '-', '+', 3, '-', '*', '+', '*', '+', '+', '-', '+', '*', '*',
'+', '+', 0, '-', '-', '-', 2, '-', '*', 4, -3, 1, 3, -3, 0, -2, 0, 1,
0, -3], 9, [3, 0, 2, 4, -3, 1, 3, -3, 0, -2, 0, 1, 0, -3],
['*', -3, '-', '+', '-', '*', 0, '+', 1, '*', 0, '+', -2, '+', 0, '-',
'+', -3, '*', 3, '*', 1, '+', '-', '*', -3, 4, '+', '-', '-', '-', 2,
0, 3],
['*', -3, '-', '+', '-', '*', 0, '+', 1, '*', 0, '+', -2, '-', '+', -3,
'*', 3, '*', 1, '+', '-', '*', -3, 4, '-', '-', '-', 2, 3]),
Ib002BuiltTrees(
['*', '-', '*', '*', 3, '*', '*', '+', '-', '*', '*', '-', '+', '*',
'-', '+', '-', '-', '*', '+', 1, '+', -2, 0, 3, -1, 2, 0, 3, 4, 0, 1,
2, 3, -2], 3456, [3, 1, -2, 0, 3, -1, 2, 0, 3, 4, 0, 1, 2, 3, -2],
['*', -2, '-', '*', 3, '*', '*', 2, '*', 1, '+', 0, '-', '*', 4, '*',
3, '-', '+', 0, '*', 2, '-', '+', -1, '-', '-', '*', 3, '+', '+', 0,
-2, 1, 3],
['*', -2, '-', '*', 3, '*', '*', 2, '*', 1, '-', '*', 4, '*', 3, '-',
'*', 2, '-', '+', -1, '-', '-', '*', 3, '+', -2, 1, 3]),
Ib002BuiltTrees(
['+', '-', '+', 3, -2, '+', 4, '*', '+', '-', '+', '+', 2, 2, '+', 0,
'+', '+', 0, -3, '-', 0, '+', '+', '-', '+', 0, '-', '-', '*', 2, -1,
-2, -2, 1], 0, [3, -2, 4, 2, 2, 0, 0, -3, 0, 0, 2, -1, -2, -2, 1],
['+', '+', '*', 1, '+', '+', -2, '+', -2, '-', '+', '-', '-', '*', -1,
2, 0, '-', '+', '+', '+', '-', 0, '+', -3, 0, 0, '+', 2, 2, 4, '-',
'+', -2, 3],
['+', '+', '*', 1, '+', '+', -2, '+', -2, '-', '-', '-', '-', 2, '-',
'+', '+', '-', 0, -3, '+', 2, 2, 4, '-', '+', -2, 3]),
Ib002BuiltTrees(
['+', '+', '-', -2, '-', '-', '+', '+', -3, '-', '-', '+', 2, '*', '+',
'-', '*', '-', '+', '*', '*', '*', '-', 3, 1, 3, -1, -1, 4, -2, 0, 2,
1], 4, [-2, -3, 2, 3, 1, 3, -1, -1, 4, -2, 0, 2, 1],
['+', 1, '+', '-', '-', '+', 2, '+', '-', '-', '+', '*', 0, '+', -2,
'-', '*', 4, '-', '+', -1, '*', -1, '*', 3, '*', 1, '-', 3, 2, -3,
'-', -2],
['+', 1, '+', '-', '-', '+', 2, '+', '-', '-', '+', '*', 0, '+', -2,
'-', '*', 4, '-', '+', -1, '-', '*', 3, '*', 1, '-', 3, 2, -3, '-',
-2]),
Ib002BuiltTrees(
['-', '-', '+', '-', '+', 1, '-', '*', '*', 3, '-', '*', '+', 0, '+',
'*', '+', '+', '-', -1, -3, '-', '-', '+', -2, 1, '*', -3, 2, -2, 2,
0, -2], -3, [1, 3, 0, -1, -3, -2, 1, -3, 2, -2, 2, 0, -2],
['-', '-', '+', -2, '-', '+', '-', '*', 0, '*', '-', '*', 2, '+', '+',
-2, '*', '*', 2, -3, '+', '-', '-', '+', 1, -2, '+', -3, '-', -1, 0,
3, 1],
['-', '-', '+', -2, '-', '+', '-', '*', 0, '*', '-', '*', 2, '+', -2,
'*', '*', 2, -3, '+', '-', '-', '+', 1, -2, '+', -3, '-', -1, 3, 1]),
Ib002BuiltTrees(
['*', '*', '*', '+', -3, '-', '-', '*', '-', 3, -3, '+', '-', '-', '-',
'+', '*', '*', '*', '+', 3, -1, 3, '-', '*', 2, '-', 1, 4, 4, 2, 1,
-2], 600, [-3, 3, -3, 3, -1, 3, 2, 1, 4, 4, 2, 1, -2],
['*', -2, '*', 1, '*', '+', 2, '-', '-', '-', '+', 4, '*', 4, '*', '-',
'*', '-', 1, 2, '*', 3, '+', -1, 3, '+', '-', '-', '*', -3, '-', 3,
-3],
['*', -2, '*', 1, '*', '+', 2, '-', '-', '-', '+', 4, '*', 4, '*', '-',
'*', '-', 1, 2, '*', 3, '+', -1, 3, '+', '-', '-', '*', -3, '-', 3,
-3]),
Ib002BuiltTrees(
['+', '-', '*', '-', 1, '+', -1, '*', '-', '+', '-', '*', '*', '+',
'+', '*', '+', '-', 2, '-', 3, '+', '*', '-', 0, -1, -3, 1, -2, 3, 1,
0, 4, 4], 171, [1, -1, 2, 3, 0, -1, -3, 1, -2, 3, 1, 0, 4, 4],
['+', 4, '-', '*', '+', '*', 4, '-', '+', 0, '-', '*', 1, '*', 3, '+',
-2, '+', 1, '*', '+', -3, '*', -1, '-', 0, '+', '-', 3, '-', 2, -1,
'-', 1],
['+', 4, '-', '*', '+', '*', 4, '-', '-', '*', 1, '*', 3, '+', -2, '+',
1, '*', '+', -3, '-', '-', 0, '+', '-', 3, '-', 2, -1, '-', 1]),
Ib002BuiltTrees(
['*', '+', '-', '*', 0, '-', -3, '-', '*', '-', '+', 0, '*', '-', '*',
'-', '-', '*', -2, '+', -3, '*', '+', '*', 3, 1, '-', 1, -2, 2, -3,
-1, -3], 252, [0, -3, 0, -2, -3, 3, 1, 1, -2, 2, -3, -1, -3],
['*', -3, '+', '-', '*', -1, '-', '+', '*', -3, '-', '*', 2, '-', '-',
'*', '+', '*', -2, '+', '-', 1, '*', 1, 3, -3, -2, 0, '-', '*', '-',
-3, 0],
['*', -3, '+', '-', '-', '-', '*', -3, '-', '*', 2, '-', '-', '*', '+',
'*', -2, '+', '-', 1, '*', 1, 3, -3, -2, '-', '*', '-', -3, 0]),
Ib002BuiltTrees(
['-', '*', '-', '+', '*', '-', -3, '-', '+', '+', 4, '-', '*', '+', 0,
'-', -2, '-', '-', '+', '*', '*', 4, '+', '*', 4, -2, 1, -2, 2, -3,
-1, 1], 344, [-3, 4, 0, -2, 4, 4, -2, 1, -2, 2, -3, -1, 1],
['-', '*', 1, '-', '+', -1, '*', '-', '+', -3, '+', '-', '*', '-', '-',
'+', 2, '*', -2, '*', '+', 1, '*', -2, 4, 4, '+', '-', -2, 0, 4, '-',
-3],
['-', '*', 1, '-', '+', -1, '*', '-', '+', -3, '+', '-', '*', '-', '-',
'+', 2, '*', -2, '*', '+', 1, '*', -2, 4, 4, '-', -2, 4, '-', -3]),
Ib002BuiltTrees(
['+', 4, '+', '+', '-', 2, '*', '-', '-', '*', '+', -2, 0, '-', 4, -1,
'*', '*', '+', '+', '-', '+', 2, '-', -1, '+', 1, '-', '*', 0, 3, 4,
-3, 1], -12, [4, 2, -2, 0, 4, -1, 2, -1, 1, 0, 3, 4, -3, 1],
['+', '+', '*', 1, '*', -3, '+', 4, '+', '+', '-', '*', 3, 0, 1, '-',
'+', '-', -1, 2, '+', '*', -1, '-', '-', '*', '-', 4, '+', 0, -2, '-',
2, 4],
['+', '+', '*', 1, '*', -3, '+', 4, '+', '+', '-', '*', 3, 0, 1, '-',
'+', '-', -1, 2, '+', '-', '-', '-', '*', '-', 4, -2, '-', 2, 4]),
Ib002BuiltTrees(
['*', '+', -2, '+', '*', '*', '+', '-', '-', '*', '+', 4, '*', '-',
'-', '+', '-', '-', '-', '*', '*', '+', 3, 0, 1, -1, -1, 4, 2, 1, 3,
1, 4, -3], -231, [-2, 4, 3, 0, 1, -1, -1, 4, 2, 1, 3, 1, 4, -3],
['*', -3, '+', '+', 4, '*', 1, '*', 3, '+', 1, '-', '-', '*', 2, '+',
'*', 4, '-', '-', '+', -1, '-', '-', '-', '*', -1, '*', 1, '+', 0, 3,
4, -2],
['*', -3, '+', '+', 4, '*', 1, '*', 3, '+', 1, '-', '-', '*', 2, '+',
'*', 4, '-', '-', '+', -1, '-', '-', '-', '-', '*', 1, 3, 4, -2]),
Ib002BuiltTrees(
['*', '+', '+', '*', 0, '+', '-', '*', '*', '-', -1, -1, '+', -3, '+',
1, '-', '+', '*', '-', '-', '*', '+', '-', '+', 2, 0, -3, -2, -3, 3,
2, 0, 0, 2], 0, [0, -1, -1, -3, 1, 2, 0, -3, -2, -3, 3, 2, 0, 0, 2],
['*', 2, '+', 0, '+', 0, '*', '+', 2, '-', '*', '+', '+', '-', '+', 3,
'*', -3, '-', '-', '*', -2, '+', -3, '-', '+', 0, 2, 1, -3, '*', -1,
'-', -1, 0],
['*', 2, '*', '+', 2, '-', '*', '+', '+', '-', '+', 3, '*', -3, '-',
'-', '*', -2, '+', -3, '-', 2, 1, -3, '-', '-', -1, 0]),
Ib002BuiltTrees(
['-', '-', '*', '*', '-', '+', '*', '-', -2, '-', 2, '-', '*', '-',
'+', -2, '*', 4, '+', '*', 0, '+', '*', '+', 2, '+', 1, 1, -2, 2, 0,
1, 4, -2], -48, [-2, 2, -2, 4, 0, 2, 1, 1, -2, 2, 0, 1, 4, -2],
['-', '-', '*', -2, '*', 4, '-', '+', '-', '*', 1, '-', '+', '*', '+',
0, '*', '+', 2, '*', -2, '+', '+', 1, 1, 2, 0, 4, -2, '*', '-', 2,
'-', -2],
['-', '-', '*', -2, '*', 4, '-', '+', '-', '*', 1, '-', '+', '*', '*',
'+', 2, '*', -2, '+', '+', 1, 1, 2, 0, 4, -2, '*', '-', 2, '-', -2]),
Ib002BuiltTrees(
['*', 0, '-', '+', '*', '*', '+', '-', '*', '-', '-', '*', 3, '+', '-',
'*', '*', '-', '+', -1, '*', '-', '+', 3, -1, 3, 4, -3, 1, 4, 2, 1, 3,
3], 0, [0, 3, -1, 3, -1, 3, 4, -3, 1, 4, 2, 1, 3, 3],
['*', '-', '+', 3, '*', 3, '*', 1, '+', 2, '-', '*', 4, '-', '-', '*',
'+', 1, '-', '*', -3, '*', 4, '-', '+', '*', 3, '-', '+', -1, 3, -1,
3, 0],
['*', '-', '+', 3, '*', 3, '*', 1, '+', 2, '-', '*', 4, '-', '-', '*',
'+', 1, '-', '*', -3, '*', 4, '-', '+', '*', 3, '-', '+', -1, 3, -1,
3, 0]),
Ib002BuiltTrees(
['*', '-', '+', '*', '*', '-', '*', '-', -1, '+', '-', '*', '-', -3,
'+', 3, '*', '*', -1, '-', '*', -2, '+', '-', '-', -2, -3, 0, -3, 2,
-1, 4, 1], 20, [-1, -3, 3, -1, -2, -2, -3, 0, -3, 2, -1, 4, 1],
['*', 1, '-', '+', 4, '*', -1, '*', 2, '-', '*', '+', -3, '-', '*',
'+', '*', 0, '*', '-', '*', '+', -3, '-', '-', -2, -2, -1, 3, '-', -3,
'-', -1],
['*', 1, '-', '+', 4, '-', '*', 2, '-', '*', '+', -3, '-', '*', '+',
'*', 0, '-', '-', '*', '+', -3, '-', '-', -2, -2, 3, '-', -3, '-',
-1]),
Ib002BuiltTrees(
['+', '-', '+', '+', '-', '-', '+', '-', -2, -1, '+', '-', '*', '-',
'*', 1, '*', '+', '+', 4, '-', 3, '*', '+', '*', -3, 3, 2, -2, 2, -3,
4, 0, 1], 86, [-2, -1, 1, 4, 3, -3, 3, 2, -2, 2, -3, 4, 0, 1],
['+', 1, '-', '+', 0, '+', '+', 4, '-', '*', -3, '-', '*', '*', 2, '+',
'*', -2, '+', 2, '*', 3, -3, '+', '-', 3, 4, 1, '-', '-', '+', -1,
'-', -2],
['+', 1, '-', '+', '+', 4, '-', '*', -3, '-', '*', '*', 2, '+', '*',
-2, '+', 2, '*', 3, -3, '+', '-', 3, 4, 1, '-', '-', '+', -1, '-',
-2]),
Ib002BuiltTrees(
['-', '+', '*', '*', '+', '*', '-', '*', '*', 0, '+', '-', '-', '+',
'+', '-', '*', '-', 2, '*', '-', '*', 4, -2, 3, 3, 1, 2, 4, 2, 3, 4,
-2, -1], 25, [0, 2, 4, -2, 3, 3, 1, 2, 4, 2, 3, 4, -2, -1],
['-', '+', -1, '*', -2, '*', 4, '+', 3, '*', 2, '-', '*', 4, '*', '+',
2, '-', '-', '+', 1, '+', 3, '-', '*', '*', 3, '-', '*', -2, 4, '-',
2, 0],
['-', '+', -1, '*', -2, '*', 4, '+', 3, '*', 2, '-', '*', 4, '*', '+',
2, '-', '-', '+', 1, '+', 3, '-', '*', '*', 3, '-', '*', -2, 4, '-',
2, 0]),
Ib002BuiltTrees(
['+', '*', '-', '+', '+', '-', -2, '*', -3, '-', '-', 1, '-', '*', -3,
1, '*', '+', '+', 3, '-', '*', '*', 3, '*', '-', '-', -1, -1, -2, -2,
3, 3], -39, [-2, -3, 1, -3, 1, 3, 3, -1, -1, -2, -2, 3, 3],
['+', 3, '*', '*', 3, '+', -2, '+', '-', '*', -2, '*', '*', -1, '-',
'-', -1, 3, 3, '-', '+', '-', '*', 1, -3, '+', '*', '-', '-', 1, -3,
'-', -2],
['+', 3, '*', '*', 3, '+', -2, '+', '-', '*', -2, '*', '-', '-', '-',
-1, 3, 3, '-', '+', '-', '*', 1, -3, '+', '*', '-', '-', 1, -3, '-',
-2]),
Ib002BuiltTrees(
['*', '*', '+', '-', '+', '-', '-', '+', '+', '-', 1, '*', '*', 4, '-',
2, '*', -3, '*', '+', '*', '-', 4, '+', '-', 4, 3, -1, 1, -2, 2, 2,
-1, 2], 138, [1, 4, 2, -3, 4, 4, 3, -1, 1, -2, 2, 2, -1, 2],
['*', 2, '*', -1, '+', 2, '-', '+', 2, '-', '-', '+', -2, '+', '*',
'*', '*', 1, '+', -1, '*', '+', 3, '-', 4, '-', 4, -3, '*', '-', 2, 4,
'-', 1],
['*', 2, '-', '+', 2, '-', '+', 2, '-', '-', '+', -2, '+', '*', '*',
'*', 1, '+', -1, '*', '+', 3, '-', 4, '-', 4, -3, '*', '-', 2, 4, '-',
1]),
Ib002BuiltTrees(
['+', 4, '-', '-', '-', '*', '+', -1, 2, '*', '-', '-', 2, '*', '*',
'*', '*', -1, '+', '-', '*', 1, '-', '*', 4, '*', '+', -3, 1, -2, 3,
2, 3, 2], 460, [4, -1, 2, 2, -1, 1, 4, -3, 1, -2, 3, 2, 3, 2],
['+', '-', '-', '-', '*', '*', '*', 2, '*', 3, '*', 2, '*', '+', 3,
'-', '*', '-', '*', '*', -2, '+', 1, -3, 4, 1, -1, '-', '-', 2, '+',
2, -1, 4],
['+', '-', '-', '-', '*', '*', '*', 2, '*', 3, '*', 2, '-', '+', 3,
'-', '*', '-', '*', '*', -2, '+', 1, -3, 4, 1, '-', '-', 2, '+', 2,
-1, 4]),
Ib002BuiltTrees(
['*', '+', '-', -3, 0, '*', '-', 4, '*', '-', '+', '*', '+', '+', '-',
'-', 2, -3, '+', '*', '-', '*', '-', 4, '+', '+', -2, 1, 3, 2, 1, 3,
-2, -2], -1104, [-3, 0, 4, 2, -3, 4, -2, 1, 3, 2, 1, 3, -2, -2],
['*', '*', '*', -2, '-', '+', -2, '*', 3, '+', '+', 1, '*', 2, '-',
'*', '+', 3, '+', 1, -2, '-', 4, '+', -3, '-', '-', 2, '-', 4, '+', 0,
'-', -3],
['*', '*', '*', -2, '-', '+', -2, '*', 3, '+', '+', 1, '*', 2, '-',
'*', '+', 3, '+', 1, -2, '-', 4, '+', -3, '-', '-', 2, '-', 4, '-',
-3]),
Ib002BuiltTrees(
['*', 2, '+', '-', '*', '*', 2, '*', 3, '-', '+', '*', 4, '+', '+',
'-', '+', 0, '-', '*', -1, '*', '*', 0, '-', '+', '+', 4, 2, 0, -3,
-2, 2, 4, 0, 1], 2,
[2, 2, 3, 4, 0, -1, 0, 4, 2, 0, -3, -2, 2, 4, 0, 1],
['*', '+', 1, '-', '*', 0, '*', '*', '-', '+', 4, '*', '+', 2, '+', -2,
'-', '+', '-', '*', '*', -3, '*', '-', '+', 0, '+', 2, 4, 0, -1, 0, 4,
3, 2, 2],
['*', '+', 1, '-', '*', 0, '*', '*', '-', '+', 4, '*', '+', 2, '+', -2,
'-', '-', '-', '*', -3, '*', '-', '+', 2, 4, 0, 4, 3, 2, 2]),
Ib002BuiltTrees(
['*', 1, '*', '*', '*', '+', '*', '-', '+', '*', '-', '-', '-', '*', 3,
'-', -1, '*', '-', '*', 4, '*', '*', '+', 0, 4, 3, 2, 0, -3, -3, 1, 3,
-1, 0], 0, [1, 3, -1, 4, 0, 4, 3, 2, 0, -3, -3, 1, 3, -1, 0],
['*', '*', 0, '*', -1, '*', 3, '+', 1, '*', -3, '-', '+', -3, '*', '*',
0, '-', '*', '*', 2, '*', 3, '+', 4, 0, 4, '-', '-', '-', '*', '-',
-1, 3, 1],
['*', '*', 0, '-', '*', 3, '+', 1, '*', -3, '-', '+', -3, '*', '*', 0,
'-', '*', '*', 2, '*', 3, 4, 4, '-', '-', '-', '*', '-', -1, 3, 1]),
Ib002BuiltTrees(
['-', '*', '*', -3, '*', -3, '*', 4, '+', '+', 0, '*', 0, -1, 0, '*',
'*', 4, '-', 4, '-', '+', '-', 2, '+', '+', '*', '-', '-', '*', -2, 3,
-2, 2, -3], 0, [-3, -3, 4, 0, 0, -1, 0, 4, 4, 2, -2, 3, -2, 2, -3],
['-', '*', '*', '-', '+', '+', -3, '+', 2, '*', -2, '-', '-', '*', 3,
-2, '-', 2, '*', '-', 4, 4, '*', '*', '*', '+', 0, '+', '*', -1, 0, 0,
4, -3, -3],
['-', '*', '*', '-', '+', '+', -3, '+', 2, '*', -2, '-', '-', '*', 3,
-2, '-', 2, '*', '-', 4, 4, '*', '*', '*', '-', 0, 4, -3, -3]),
Ib002BuiltTrees(
['+', '-', '*', 0, '-', 1, '*', '+', '+', '+', '*', '+', '+', '*', 2,
'+', '+', '-', '*', -1, '+', 3, '*', 1, -3, '-', '-', 0, 0, -3, 0, 4,
4, 2, 2, 2], -8, [0, 1, 2, -1, 3, 1, -3, 0, 0, -3, 0, 4, 4, 2, 2, 2],
['+', '*', 2, '+', 2, '+', 2, '+', 4, '*', 4, '+', 0, '+', -3, '*',
'+', 0, '+', '-', '-', 0, '-', '*', '+', '*', -3, 1, 3, -1, 2, '-',
'*', '-', 1, 0],
['+', '*', 2, '+', 2, '+', 2, '+', 4, '*', 4, '+', -3, '*', '+', '-',
'-', 0, '-', '-', '+', '*', -3, 1, 3, 2, '-', '*', '-', 1, 0]),
Ib002BuiltTrees(
['+', '-', '+', '+', '-', '-', '*', 1, '*', -1, '*', '*', '*', '*',
'+', '-', -2, '+', '-', '+', '-', '*', '+', -3, 0, 4, 2, -1, 3, 4, 4,
0, 4, 3, 1], -6, [1, -1, -2, -3, 0, 4, 2, -1, 3, 4, 4, 0, 4, 3, 1],
['+', 1, '-', '+', 3, '+', 4, '-', '-', '*', '*', '*', 0, '*', 4, '*',
4, '*', 3, '+', '+', -1, '-', '+', 2, '-', '*', 4, '+', 0, -3, '-',
-2, -1, 1],
['+', 1, '-', '+', 3, '+', 4, '-', '-', '*', '-', '*', 0, '*', 4, '*',
4, '*', 3, '+', '+', -1, '-', '+', 2, '-', '*', 4, -3, '-', -2, 1]),
Ib002BuiltTrees(
['+', '*', '+', 4, '+', '*', -1, '-', 4, '-', -2, '+', -3, '*', '+',
'+', '-', -3, '*', '*', 1, '-', '*', 0, 3, '-', '-', '*', -3, 4, '+',
0, 0, -2, 3], -87,
[4, -1, 4, -2, -3, -3, 1, 0, 3, -3, 4, 0, 0, -2, 3],
['+', 3, '*', '+', '*', -2, '+', '+', 0, 0, '+', '*', '-', '-', '*', 4,
-3, '*', '-', '*', 3, 0, 1, '-', -3, -3, '+', '+', '-', -2, '*', '-',
4, -1, 4],
['+', 3, '*', '+', '*', -2, '+', '*', '-', '-', '*', 4, -3, '*', '-',
'*', 3, 0, 1, '-', -3, -3, '+', '+', '-', -2, '-', '-', 4, 4]),
Ib002BuiltTrees(
['+', '+', '+', '-', '+', 3, '-', '*', '*', '*', '-', '-', -2, '-',
'+', '+', 3, 1, '*', '+', -1, '+', '+', 3, -1, 0, 4, '+', '+', -1, 0,
0, 3, 3, 4, -3], -47,
[3, -2, 3, 1, -1, 3, -1, 0, 4, -1, 0, 0, 3, 3, 4, -3],
['+', -3, '+', 4, '+', 3, '-', '+', '-', '*', 3, '*', '+', 0, '+', 0,
-1, '*', '-', '+', '*', 4, '+', '+', 0, '+', -1, 3, -1, '+', 1, 3,
'-', '-', -2, 3],
['+', -3, '+', 4, '+', 3, '-', '+', '-', '*', 3, '-', '*', '-', '+',
'*', 4, '+', '+', -1, 3, -1, '+', 1, 3, '-', '-', -2, 3]),
Ib002BuiltTrees(
['*', '-', '+', '-', '-', '-', '*', '-', '+', '+', '*', '+', '-', '+',
'-', '+', '+', '+', -1, '*', '-', -1, -3, 1, 2, -2, 3, 1, 2, 1, -2,
-3, -3], -51, [-1, -1, -3, 1, 2, -2, 3, 1, 2, 1, -2, -3, -3],
['*', -3, '-', '+', -3, '-', '-', '-', '*', -2, '-', '+', 1, '+', 2,
'*', 1, '+', 3, '-', '+', -2, '-', '+', 2, '+', 1, '+', '*', -3, '-',
-1, -1],
['*', -3, '-', '+', -3, '-', '-', '-', '*', -2, '-', '+', 1, '+', 2,
'*', 1, '+', 3, '-', '+', -2, '-', '+', 2, '+', 1, '+', '*', -3, '-',
-1, -1]),
Ib002BuiltTrees(
['+', '+', '+', '*', '-', '*', '*', 1, -2, '+', '*', '-', '*', '+',
'*', -2, '+', '-', '-', '+', '-', '+', '+', 4, 0, 2, 4, 3, -3, 4, -3,
3, 4, 0, 3, 0], -453,
[1, -2, -2, 4, 0, 2, 4, 3, -3, 4, -3, 3, 4, 0, 3, 0],
['+', 0, '+', 3, '+', 0, '*', 4, '-', '*', '+', 3, '*', -3, '-', '*',
4, '+', -3, '*', '+', 3, '-', '-', '+', 4, '-', '+', 2, '+', 0, 4, -2,
'*', -2, 1],
['+', 3, '*', 4, '-', '*', '+', 3, '*', -3, '-', '*', 4, '+', -3, '*',
'+', 3, '-', '-', '+', 4, '-', '+', 2, 4, -2, '*', -2, 1]),
Ib002BuiltTrees(
['+', '-', -2, '*', '*', '*', -1, '*', '*', '+', 0, 3, '*', '*', '-',
'*', -1, '-', '-', '+', -3, '-', '*', '+', 4, '*', '-', 1, -1, -3, -1,
2, 2, 0, -2], 2,
[-2, -1, 0, 3, -1, -3, 4, 1, -1, -3, -1, 2, 2, 0, -2],
['+', '*', -2, '*', 0, '*', '*', 2, '*', '*', 2, '*', -1, '-', '*',
'-', '-', '+', '-', '*', -3, '+', '*', -1, '-', 1, 4, -3, -1, '+', 3,
0, -1, '-', -2],
['+', '*', -2, '*', 0, '-', '*', 2, '*', '*', 2, '-', '-', '-', '-',
'-', '+', '-', '*', -3, '+', '-', '-', 1, 4, -3, 3, '-', -2]),
Ib002BuiltTrees(
['+', '+', '*', '-', -1, '*', -3, -2, '-', '*', '*', '*', -3, 3, '-',
'-', 2, '*', '*', '*', '*', 3, '*', 0, '-', '*', '+', 0, 1, '-', 1,
-1, -1, 3, 1], 7, [-1, -3, -2, -3, 3, 2, 3, 0, 0, 1, 1, -1, -1, 3, 1],
['+', 1, '+', '-', '*', '*', 3, '*', -1, '*', -1, '*', '*', '-', '*',
'-', 1, '+', 1, 0, 0, 3, '*', '-', '-', 2, '*', 3, -3, '*', '*', -2,
-3, '-', -1],
['+', 1, '+', '-', '*', '*', 3, '-', '-', '*', '*', '-', '*', '-', 1,
1, 0, 3, '*', '-', '-', 2, '*', 3, -3, '*', '*', -2, -3, '-', -1]),
Ib002BuiltTrees(
['*', '*', '-', '*', '*', '*', '+', '-', 0, '-', -3, -2, '*', '*', '*',
-2, '-', '-', 1, '-', '+', '*', 4, '+', '*', 1, 3, '-', 2, -1, 3, 4,
-1, 3], -1296, [0, -3, -2, -2, 1, 4, 1, 3, 2, -1, 3, 4, -1, 3],
['*', 3, '*', -1, '-', '*', 4, '*', '*', 3, '*', '-', '+', -1, '*',
'+', '-', 2, '*', 3, 1, 4, '*', '-', '-', 1, -2, '*', -2, '+', '-',
-3, '-', 0],
['*', 3, '-', '-', '*', 4, '*', '*', 3, '*', '-', '+', -1, '*', '+',
'-', 2, '*', 3, 1, 4, '*', '-', '-', 1, -2, '*', -2, '+', '-', -3,
'-', 0]),
Ib002BuiltTrees(
['*', '*', '+', '+', '-', '+', 2, 2, '*', -2, '+', '-', '+', -1, -2,
'*', -1, '*', -2, '*', '-', '-', '*', '+', 1, '-', '-', '-', 1, -3,
-1, 1, 0, 1], 0, [2, 2, -2, -1, -2, -1, -2, 1, 1, -3, -1, 1, 0, 1],
['*', 1, '*', 0, '+', 1, '+', '*', '+', '*', '*', '*', -1, '-', '-',
'*', -3, '+', '-', '-', '-', 1, 1, -2, -1, '-', '+', -2, -1, -2, '-',
'+', 2, 2],
['*', 1, '*', 0, '+', 1, '+', '*', '+', '-', '*', '-', '-', '-', '*',
-3, '+', '-', '-', '-', 1, 1, -2, '-', '+', -2, -1, -2, '-', '+', 2,
2]),
Ib002BuiltTrees(
['+', '-', -2, '*', '+', '-', '+', 1, '-', '*', 2, '*', '*', '*', '*',
1, '*', '+', '+', '+', 3, 4, '+', '+', '+', '-', -2, 0, 0, 0, 4, -2,
0, -2, -1, 0, 3], -1,
[-2, 1, 2, 1, 3, 4, -2, 0, 0, 0, 4, -2, 0, -2, -1, 0, 3],
['+', '*', 3, '+', 0, '-', '+', '-', '*', '*', -1, '*', -2, '*', 0,
'*', '*', -2, '+', 4, '+', '+', 0, '+', 0, '+', 0, '-', -2, '+', 4, 3,
1, 2, 1, '-', -2],
['+', '*', 3, '-', '+', '-', '*', '-', '*', -2, '*', 0, '*', '*', -2,
'+', 4, '+', '-', -2, '+', 4, 3, 1, 2, 1, '-', -2]),
Ib002BuiltTrees(
['*', '*', '*', 2, '+', 0, '+', '+', '+', '-', 1, '-', '*', '*', -2,
'-', 0, '-', '+', '-', '*', '*', '+', '-', 2, '+', 1, 3, 3, 3, -3, -1,
4, -2, 1], -8, [2, 0, 1, -2, 0, 2, 1, 3, 3, 3, -3, -1, 4, -2, 1],
['*', 1, '*', -2, '*', '+', '+', 4, '+', -1, '+', '-', '*', '-', '+',
-3, '-', '*', 3, '*', 3, '+', '+', 3, 1, '-', 2, '*', '-', 0, -2, '-',
1, 0, 2],
['*', 1, '*', -2, '*', '+', 4, '+', -1, '+', '-', '*', '-', '+', -3,
'-', '*', 3, '*', 3, '+', '+', 3, 1, '-', 2, '*', '-', 0, -2, '-', 1,
2]),
Ib002BuiltTrees(
['+', '*', 0, '*', '*', 2, '-', '+', '-', '-', '+', -1, '-', 2, '+',
'+', '+', '*', '-', '*', '-', -1, '+', 1, '+', -3, '*', 3, 3, 2, 2, 0,
2, 4, -1], -1, [0, 2, -1, 2, -1, 1, -3, 3, 3, 2, 2, 0, 2, 4, -1],
['+', -1, '*', '*', 4, '*', '-', '+', '+', 2, '+', 0, '+', 2, '*', 2,
'-', '*', '+', '+', '*', 3, 3, -3, 1, '-', -1, '-', '-', '+', '-', 2,
-1, 2, 0],
['+', -1, '*', '*', 4, '*', '-', '+', '+', 2, '+', 2, '*', 2, '-', '*',
'+', '+', '*', 3, 3, -3, 1, '-', -1, '-', '-', '+', '-', 2, -1, 2,
0]),
Ib002BuiltTrees(
['-', '*', '+', '*', '+', '-', '+', '+', 2, '+', '-', '-', -2, '-',
'+', 0, 0, '*', '*', -1, '*', 4, '+', 4, -3, 0, '-', '*', '*', -2, -1,
2, -3, 2, 1], -14,
[2, -2, 0, 0, -1, 4, 4, -3, 0, -2, -1, 2, -3, 2, 1],
['-', '*', 1, '+', 2, '*', -3, '+', '-', '*', 2, '*', -1, -2, '-', '+',
'*', 0, '*', '*', '+', -3, 4, 4, -1, '+', '+', '-', '+', 0, 0, '-',
'-', -2, 2],
['-', '*', 1, '+', 2, '*', -3, '+', '-', '*', 2, '-', -2, '-', '+',
'*', 0, '-', '*', '+', -3, 4, 4, '+', '+', '-', 0, '-', '-', -2, 2]),
Ib002BuiltTrees(
['*', '*', '*', '-', -2, '*', 3, '+', 4, '-', '-', 2, '-', '+', 4, '*',
'*', 0, '*', '+', 0, '*', '-', 2, '-', '+', '*', '+', 4, 1, -3, -3, 3,
3, -3], 432, [-2, 3, 4, 2, 4, 0, 0, 2, 4, 1, -3, -3, 3, 3, -3],
['*', -3, '*', '-', '+', '*', 3, '*', '*', 3, '+', '*', '-', '+', -3,
'*', -3, '+', 1, 4, '-', 2, 0, 0, 4, '*', '*', '+', '-', '-', 2, 4, 3,
'-', -2],
['*', -3, '*', '-', '+', '*', 3, '*', '*', 3, '*', '-', '+', -3, '*',
-3, '+', 1, 4, '-', 2, 0, 4, '*', '*', '+', '-', '-', 2, 4, 3, '-',
-2]),
Ib002BuiltTrees(
['+', '*', '+', '+', '*', 1, '+', '-', 4, '*', '*', '-', '*', -2, '*',
-2, '-', '+', '+', '-', '*', '+', '+', '-', -3, 4, 0, -3, 0, 0, 1, -1,
-2, 4, 3, 3], -255,
[1, 4, -2, -2, -3, 4, 0, -3, 0, 0, 1, -1, -2, 4, 3, 3],
['+', 3, '*', 3, '+', 4, '+', -2, '*', '+', '*', -1, '*', 1, '-', '*',
'*', '-', '+', 0, '+', 0, '-', '*', -3, '+', 0, '+', 4, '-', -3, -2,
-2, '-', 4, 1],
['+', 3, '*', 3, '+', 4, '+', -2, '*', '+', '-', '*', 1, '-', '*', '*',
'-', '-', '*', -3, '+', 4, '-', -3, -2, -2, '-', 4, 1]),
Ib002BuiltTrees(
['*', '*', '+', '*', '-', -1, '*', '-', '*', '+', -3, 0, '+', '+', '-',
'+', '-', '*', -2, 3, '-', '-', '*', '+', -1, '*', 1, 2, -2, 4, 1, 1,
3, 3, 3], 54, [-1, -3, 0, -2, 3, -1, 1, 2, -2, 4, 1, 1, 3, 3, 3],
['*', 3, '*', 3, '+', 3, '*', '*', 1, '-', '*', '+', 1, '+', 4, '-',
'+', '-', '-', '*', -2, '+', '*', 2, 1, -1, '-', '*', 3, -2, '+', 0,
-3, '-', -1],
['*', 3, '*', 3, '+', 3, '*', '*', 1, '-', '*', '+', 1, '+', 4, '-',
'+', '-', '-', '*', -2, '+', '*', 2, 1, -1, '-', '*', 3, -2, -3, '-',
-1]),
Ib002BuiltTrees(
['-', '-', '+', '*', '+', '+', '-', '*', -2, '+', '+', '+', '*', '*',
'-', 2, '+', '*', '*', '-', 0, '+', '*', -2, 1, 4, 2, 1, -3, 1, -2, 2,
1, 0, 2, 0], 30, [-2, 2, 0, -2, 1, 4, 2, 1, -3, 1, -2, 2, 1, 0, 2, 0],
['-', '-', '+', 0, '*', 2, '+', 0, '+', 1, '-', '*', '+', 2, '+', -2,
'+', 1, '*', -3, '*', '+', 1, '*', 2, '*', '+', 4, '*', 1, -2, '-', 0,
'-', 2, -2],
['-', '-', '*', 2, '+', 1, '-', '*', '+', 2, '+', -2, '+', 1, '*', -3,
'*', '+', 1, '*', 2, '*', '+', 4, '*', 1, -2, '-', 0, '-', 2, -2]),
Ib002BuiltTrees(
['*', '*', '-', '+', '-', 1, '*', '+', -1, '-', 2, '*', '*', -2, '+',
'*', '+', -2, 2, '-', 3, '-', -2, '-', '*', '+', '-', '+', -3, 1, 3,
0, -2, 2], -4, [1, -1, 2, -2, -2, 2, 3, -2, -3, 1, 3, 0, -2, 2],
['*', 2, '*', -2, '-', '+', '*', '*', '-', '*', 0, '+', 3, '-', '+', 1,
-3, '*', '+', '-', -2, '*', '-', 3, '+', 2, -2, -2, '+', '-', 2, -1,
'-', 1],
['*', 2, '*', -2, '-', '+', '*', '*', '-', '*', 0, '+', 3, '-', '+', 1,
-3, '*', '+', '-', -2, '*', '-', 3, '+', 2, -2, -2, '+', '-', 2, -1,
'-', 1]),
Ib002BuiltTrees(
['*', '*', '*', 3, '-', '+', '+', '+', '-', '-', '-', '+', '+', '+',
'+', '+', '*', '*', '-', '+', '*', -1, -2, -2, 0, -2, 0, 4, 0, -3, 3,
2, 1, 2, 1, 4], -12,
[3, -1, -2, -2, 0, -2, 0, 4, 0, -3, 3, 2, 1, 2, 1, 4],
['*', 4, '*', 1, '*', '-', '+', 2, '+', 1, '+', 2, '-', '-', '-', '+',
3, '+', -3, '+', 0, '+', 4, '+', 0, '*', -2, '*', 0, '-', '+', -2,
'*', -2, -1, 3],
['*', 4, '*', 1, '*', '-', '+', 2, '+', 1, '+', 2, '-', '-', '-', '+',
3, '+', -3, '+', 4, '*', -2, '*', 0, '-', '+', -2, '-', -2, 3]),
Ib002BuiltTrees(
['+', '-', '*', '+', '-', '-', 4, '+', '+', '+', '-', '+', -2, '+',
'+', -3, '*', '*', '+', '+', 3, '*', '-', '-', 3, 3, -1, 4, -1, 2, -1,
0, -1, 4, 2], -194,
[4, -2, -3, 3, 3, 3, -1, 4, -1, 2, -1, 0, -1, 4, 2],
['+', 2, '-', '*', 4, '+', '+', -1, '+', 0, '+', -1, '-', '+', '+', 2,
'+', '*', -1, '*', 4, '+', -1, '+', '*', 3, '-', '-', 3, 3, -3, -2,
'-', '-', 4],
['+', 2, '-', '*', 4, '+', '+', -1, '+', -1, '-', '+', '+', 2, '+',
'-', '*', 4, '+', -1, '+', '*', 3, '-', '-', 3, 3, -3, -2, '-', '-',
4]),
Ib002BuiltTrees(
['*', -1, '+', '*', -1, '-', '+', '+', '-', '*', '+', 2, '-', '+', '*',
'*', '*', '*', '*', 1, '*', '*', '-', '-', -3, 0, -3, 2, 0, 2, 4, 0,
1, 2, -1, 1], 0,
[-1, -1, 2, 1, -3, 0, -3, 2, 0, 2, 4, 0, 1, 2, -1, 1],
['*', '+', 1, '*', '-', '+', -1, '+', 2, '-', '*', 1, '+', '-', '+', 0,
'*', 4, '*', 2, '*', 0, '*', 2, '*', '*', -3, '*', 0, '-', '-', -3, 1,
2, -1, -1],
['-', '+', 1, '-', '-', '+', -1, '+', 2, '-', '*', 1, '+', '-', '*', 4,
'*', 2, '*', 0, '*', 2, '*', '*', -3, '*', 0, '-', '-', -3, 1, 2]),
Ib002BuiltTrees(
['-', '*', '-', -2, '-', '-', '*', '*', 0, '*', '*', '+', -1, '+', '*',
-2, '*', '-', '-', '*', '*', '+', 3, '-', 0, 4, 2, '*', 0, -3, -2, -3,
2, -3], 0, [-2, 0, -1, -2, 3, 0, 4, 2, 0, -3, -2, -3, 2, -3],
['-', '*', '-', '-', '*', -3, '*', '*', 2, '*', -3, '+', '+', -2, '*',
'*', '*', -3, 0, '-', '-', '*', 2, '*', 4, '+', '-', 0, 3, -2, -1, 0,
'-', -2],
['-', '*', '-', '-', '*', -3, '*', '*', 2, '*', -3, '+', '+', -2, '*',
'*', '*', -3, 0, '-', '-', '*', 2, '*', 4, '+', '-', 0, 3, -2, -1, 0,
'-', -2]),
Ib002BuiltTrees(
['*', '-', 2, '+', '*', '*', '*', 4, '-', '*', '*', 2, '*', '-', '+',
'-', 3, '*', -2, '*', 0, -3, 1, '+', 4, '-', '-', '*', '*', 4, 4, 1,
2, -3, 1], -5762, [2, 4, 2, 3, -2, 0, -3, 1, 4, 4, 4, 1, 2, -3, 1],
['*', '+', 1, '*', -3, '*', 2, '*', '-', '*', '+', '-', '-', '*', 1,
'*', 4, 4, 4, '*', '*', 1, '-', '+', '*', '*', -3, 0, -2, '-', 3, 2,
4, '-', 2],
['*', '+', 1, '*', -3, '*', 2, '*', '-', '*', '+', '-', '-', '*', 1,
'*', 4, 4, 4, '*', '*', 1, '-', '+', '*', '*', -3, 0, -2, '-', 3, 2,
4, '-', 2]),
Ib002BuiltTrees(
['+', '*', '*', '-', '*', '*', '-', 0, '-', '*', '-', '*', 4, '+', -3,
'*', '-', '+', '*', 2, '-', '*', '+', '+', -2, 2, -2, 1, 2, 3, -2, 2,
-1, 3, 1], 1, [0, 4, -3, 2, -2, 2, -2, 1, 2, 3, -2, 2, -1, 3, 1],
['+', 1, '*', 3, '*', -1, '-', '*', 2, '*', '-', '*', -2, '-', '*',
'+', '*', 3, '-', '+', 2, '*', '-', '*', 1, '+', -2, '+', 2, -2, 2,
-3, 4, '-', 0],
['+', 1, '*', 3, '-', '-', '*', 2, '*', '-', '*', -2, '-', '*', '+',
'*', 3, '-', '+', 2, '*', '-', '*', 1, '+', -2, '+', 2, -2, 2, -3, 4,
'-', 0]),
Ib002BuiltTrees(
['-', '+', '+', 3, '-', '*', '*', '*', '+', '+', '*', '+', '*', '+',
'*', '*', '*', '-', '+', 4, 2, 1, '+', '-', 1, 2, -1, 1, 3, 3, -2, -3,
0, 2, -1, 0, 0], -3,
[3, 4, 2, 1, 1, 2, -1, 1, 3, 3, -2, -3, 0, 2, -1, 0, 0],
['-', '+', 0, '+', '-', '*', 0, '*', -1, '*', 2, '+', 0, '+', -3, '*',
-2, '+', 3, '*', 3, '+', 1, '*', -1, '*', '+', 2, '-', 1, '*', 1, '-',
'+', 2, 4, 3],
['-', '+', '-', '*', 0, '-', '*', 2, '+', -3, '*', -2, '+', 3, '*', 3,
'+', 1, '-', '*', '+', 2, '-', 1, '*', 1, '-', '+', 2, 4, 3]),
Ib002BuiltTrees(
['+', '*', 3, '+', '+', '+', '-', '-', '+', '*', '*', '-', '*', '-',
'+', -2, '*', '+', '-', 3, -2, '*', '+', '+', 1, 0, 0, -2, 1, 2, 0, 2,
4, 1, -1, -1], 17,
[3, -2, 3, -2, 1, 0, 0, -2, 1, 2, 0, 2, 4, 1, -1, -1],
['+', -1, '*', '+', -1, '+', 1, '+', 4, '-', '-', '+', 2, '*', 0, '*',
2, '-', '*', 1, '-', '+', '*', '*', -2, '+', 0, '+', 0, 1, '+', -2,
'-', 3, -2, 3],
['+', -1, '*', '+', -1, '+', 1, '+', 4, '-', '-', '+', 2, '*', 0, '*',
2, '-', '*', 1, '-', '+', '*', '*', -2, 1, '+', -2, '-', 3, -2, 3]),
Ib002BuiltTrees(
['+', '-', '+', '*', -2, '*', '-', '+', 2, '+', '+', '+', '-', '-',
'+', '+', '*', '-', -3, '+', '-', '*', '-', 2, 2, -1, -2, 4, -2, -2,
-1, -1, 2, 3], 17, [-2, 2, -3, 2, 2, -1, -2, 4, -2, -2, -1, -1, 2, 3],
['+', 3, '-', '+', 2, '*', '*', -1, '-', '+', '+', -1, '+', -2, '+',
-2, '-', '-', '+', 4, '+', -2, '*', '+', -1, '-', '*', 2, '-', 2, '-',
-3, 2, -2],
['+', 3, '-', '+', 2, '*', '-', '-', '+', '+', -1, '+', -2, '+', -2,
'-', '-', '+', 4, '+', -2, '*', '+', -1, '-', '*', 2, '-', 2, '-', -3,
2, -2]),
Ib002BuiltTrees(
['+', '*', '*', '*', 3, '*', 2, 4, '+', 1, '*', '*', 3, -3, '-', '+',
'*', '*', 3, '-', '*', '-', '+', '-', '+', '-', '*', 2, -1, -2, 3, 0,
0, 0, 3, -1], 71,
[3, 2, 4, 1, 3, -3, 3, 2, -1, -2, 3, 0, 0, 0, 3, -1],
['+', -1, '*', 3, '*', '+', '*', '-', '+', 0, '*', 0, '*', '-', '*', 0,
'-', '+', 3, '-', '+', -2, '-', '*', -1, 2, 3, '*', -3, 3, 1, '*',
'*', 4, 2, 3],
['+', -1, '*', 3, '*', '+', '*', '-', '*', 0, '*', '-', '*', 0, '-',
'+', 3, '-', '+', -2, '-', '-', 2, 3, '*', -3, 3, 1, '*', '*', 4, 2,
3]),
Ib002BuiltTrees(
['+', '+', '-', '-', '+', '*', '*', '+', '*', 4, '*', 3, '-', -3, '*',
1, '*', '*', '-', '+', 2, '*', '+', '-', -3, '-', 2, 1, 1, 2, 4, 3, 3,
-1, -3], 359, [4, 3, -3, 1, 2, -3, 2, 1, 1, 2, 4, 3, 3, -1, -3],
['+', -3, '+', -1, '-', '-', '+', 3, '*', 3, '*', 4, '+', '*', '*', 2,
'*', 1, '-', '+', '*', 1, '+', '-', 2, '-', -3, 2, 1, '*', '*', '-',
-3, 3, 4],
['+', -3, '+', -1, '-', '-', '+', 3, '*', 3, '*', 4, '+', '*', '*', 2,
'*', 1, '-', '+', '*', 1, '+', '-', 2, '-', -3, 2, 1, '*', '*', '-',
-3, 3, 4]),
Ib002BuiltTrees(
['+', '+', '-', '*', '-', '-', '*', '+', '-', '-', -2, '-', '+', -3,
'+', '*', '+', '-', 3, '+', 4, '+', '+', '*', 1, -3, -3, -3, 1, -2, 2,
2, 1, -3], -46, [-2, -3, 3, 4, 1, -3, -3, -3, 1, -2, 2, 2, 1, -3],
['+', -3, '+', 1, '-', '*', 2, '-', '-', '*', 2, '+', '-', '+', '+',
-2, '*', 1, '+', '+', '+', -3, '+', -3, '*', -3, 1, 4, '-', 3, -3,
'-', '-', -2],
['+', -3, '+', 1, '-', '*', 2, '-', '-', '*', 2, '+', '-', '+', '+',
-2, '*', 1, '+', '+', '+', -3, '+', -3, '*', -3, 1, 4, '-', 3, -3,
'-', '-', -2]),
Ib002BuiltTrees(
['-', '-', '*', '+', '+', '+', '*', '*', '+', '-', '*', '*', '+', '-',
'*', '*', '*', 4, 4, '*', '*', '-', -2, 1, 0, 3, 4, 2, 4, -1, 4, 2,
-2, 1, 0, -1], 265,
[4, 4, -2, 1, 0, 3, 4, 2, 4, -1, 4, 2, -2, 1, 0, -1],
['-', '-', '*', -1, '+', 0, '+', 1, '+', -2, '*', 2, '*', 4, '+', -1,
'-', '*', 4, '*', 2, '+', 4, '-', '*', 3, '*', '*', 0, '*', 1, '-',
-2, '*', 4, 4],
['-', '-', '-', '+', 1, '+', -2, '*', 2, '*', 4, '+', -1, '-', '*', 4,
'*', 2, '+', 4, '-', '*', 3, '*', '*', 0, '*', 1, '-', -2, '*', 4,
4]),
Ib002BuiltTrees(
['+', '*', '*', '*', 4, '-', '-', '-', '-', -3, '+', '*', '+', '*',
'*', '-', '*', '+', -3, '+', '*', '-', '-', 2, 0, -2, -2, -2, -3, 2,
-2, 0, -3, -3], 4173,
[4, -3, -3, 2, 0, -2, -2, -2, -3, 2, -2, 0, -3, -3],
['+', -3, '*', -3, '*', '+', 0, '*', -2, '+', 2, '*', -3, '*', -2, '-',
'*', -2, '+', '+', -2, '*', 0, '-', '-', 2, -3, '*', '-', '-', '-',
'-', -3, 4],
['+', -3, '*', -3, '*', '*', -2, '+', 2, '*', -3, '*', -2, '-', '*',
-2, '+', '+', -2, '*', 0, '-', '-', 2, -3, '*', '-', '-', '-', '-',
-3, 4]),
Ib002BuiltTrees(
['*', -3, '*', '*', '+', '+', -2, '-', '-', '-', '*', '-', '*', 2, '+',
'*', '+', 4, '-', '*', 0, 3, '*', '+', '-', 4, '+', -2, 4, 1, 2, 1, 4,
-3, 2], -180, [-3, -2, 2, 4, 0, 3, 4, -2, 4, 1, 2, 1, 4, -3, 2],
['*', '*', 2, '*', -3, '+', 4, '+', '-', '-', '-', '*', 1, '-', '*',
'+', 2, '*', '*', 1, '+', '+', 4, -2, '-', 4, '+', '-', '*', 3, 0, 4,
2, -2, -3],
['*', '*', 2, '*', -3, '+', 4, '+', '-', '-', '-', '*', 1, '-', '*',
'+', 2, '*', '*', 1, '+', '+', 4, -2, '-', 4, '+', '-', '*', 3, 0, 4,
2, -2, -3]),
Ib002BuiltTrees(
['+', '*', '*', 2, 2, '-', 3, '*', '-', '+', '+', 4, '*', 0, -2, '+',
'+', '*', '*', '+', 0, '*', '-', '-', '+', 4, '-', '*', 4, 2, -2, -1,
-3, 4, 4, 4], -156,
[2, 2, 3, 4, 0, -2, 0, 4, 4, 2, -2, -1, -3, 4, 4, 4],
['+', '*', 4, '-', '+', '+', 4, '+', 4, '*', -3, '*', -1, '+', '*', -2,
'-', '-', '+', '-', '*', 2, 4, 4, 0, '+', '*', -2, 0, 4, '*', '-', 3,
'*', 2, 2],
['+', '*', 4, '-', '+', '+', 4, '+', 4, '*', -3, '-', '*', -2, '-',
'-', '+', '-', '*', 2, 4, 4, '+', '*', -2, 0, 4, '*', '-', 3, '*', 2,
2]),
Ib002BuiltTrees(
['*', 0, '+', '-', '*', 1, '+', '-', '*', '-', '+', 2, '+', '*', '-',
-2, '*', '+', '+', '+', -1, '-', '*', '*', 3, '+', 2, 2, -1, 3, 1, 0,
3, 2, 3, 4], 0, [0, 1, 2, -2, -1, 3, 2, 2, -1, 3, 1, 0, 3, 2, 3, 4],
['*', '+', 4, '-', '*', '+', 3, '-', '*', 2, '-', '+', '+', 3, '*',
'*', 0, '+', 1, '+', 3, '+', '-', '*', -1, '*', '+', 2, 2, 3, -1, '-',
-2, 2, 1, 0],
['*', '+', 4, '-', '*', '+', 3, '-', '*', 2, '-', '+', '+', 3, '*',
'*', 0, '+', 1, '+', 3, '+', '-', '-', '*', '+', 2, 2, 3, -1, '-', -2,
2, 1, 0]),
Ib002BuiltTrees(
['*', '*', '*', '*', '*', '*', '*', 2, '-', '+', '+', '*', '*', '-',
'-', '-', '-', '*', '+', '*', '-', -2, 1, -1, -1, -1, -2, 2, 0, -3,
-3, 4, 4, -2, -3], 0,
[2, -2, 1, -1, -1, -1, -2, 2, 0, -3, -3, 4, 4, -2, -3],
['*', -3, '*', -2, '*', 4, '*', 4, '*', -3, '*', -3, '*', '-', '+', 0,
'+', 2, '*', -2, '*', -1, '-', '-', '-', '-', '*', -1, '+', -1, '*',
1, '-', -2, 2],
['*', -3, '*', -2, '*', 4, '*', 4, '*', -3, '*', -3, '*', '-', '+', 2,
'*', -2, '-', '-', '-', '-', '-', '-', '+', -1, '*', 1, '-', -2, 2]),
Ib002BuiltTrees(
['*', '+', '-', '*', '+', '*', 3, '-', '-', 3, '-', '-', '*', '+', -1,
'-', '+', '*', '+', '*', '*', '*', '*', 4, -3, 0, 0, 1, -1, 3, -2, 4,
0, 4, -2], -8, [3, 3, -1, 4, -3, 0, 0, 1, -1, 3, -2, 4, 0, 4, -2],
['*', -2, '+', 4, '-', '*', 0, '+', '-', '-', '*', 4, '+', '-', '+',
-2, '*', 3, '+', -1, '*', 1, '*', 0, '*', 0, '*', -3, 4, -1, '*', '-',
'-', 3, 3],
['*', -2, '+', 4, '-', '*', 0, '+', '-', '-', '*', 4, '+', '-', '+',
-2, '*', 3, '+', -1, '*', 1, '*', 0, '*', 0, '*', -3, 4, -1, '*', '-',
'-', 3, 3]),
Ib002BuiltTrees(
['+', '-', '+', '+', -1, -2, '-', '+', '-', 0, '-', '+', '*', '-', 3,
'+', '-', '*', '*', '+', 4, 0, 1, 1, '+', '+', '+', '+', -1, 4, -2, 3,
-2, -1, -2], -4, [-1, -2, 0, 3, 4, 0, 1, 1, -1, 4, -2, 3, -2, -1, -2],
['+', -2, '-', '+', '-', '+', '-', '+', -1, '*', '+', '+', -2, '+', 3,
'+', -2, '+', 4, -1, '-', '*', 1, '*', 1, '+', 0, 4, '-', 3, '-', 0,
'+', -2, -1],
['+', -2, '-', '+', '-', '+', '-', '+', -1, '*', '+', '+', -2, '+', 3,
'+', -2, '+', 4, -1, '-', '*', 1, '*', 1, 4, '-', 3, '-', 0, '+', -2,
-1]),
Ib002BuiltTrees(
['*', '+', 1, '+', '-', '+', -1, '+', '-', '+', '+', '*', '*', '*',
'*', '-', -1, '*', '*', '-', '-', '*', '-', 2, 3, 3, 1, 0, -2, 1, -2,
2, 4, -1, 4], -12, [1, -1, -1, 2, 3, 3, 1, 0, -2, 1, -2, 2, 4, -1, 4],
['*', 4, '+', '+', -1, '-', '+', '+', 4, '-', '+', 2, '+', -2, '*', 1,
'*', -2, '*', 0, '*', '*', 1, '*', 3, '-', '-', '*', 3, '-', 2, '-',
-1, -1, 1],
['*', 4, '+', '+', -1, '-', '+', '+', 4, '-', '+', 2, '+', -2, '*', 1,
'*', -2, '*', 0, '*', '*', 1, '*', 3, '-', '-', '*', 3, '-', 2, '-',
-1, -1, 1]),
Ib002BuiltTrees(
['-', '+', '*', '+', '*', 3, '-', '*', '-', '*', '-', '+', '+', '-', 2,
'*', '*', '+', 4, '+', '*', '*', '+', 4, 3, 4, 4, 2, -3, 0, 0, -2, 4,
-1, -2, 3], -101,
[3, 2, 4, 4, 3, 4, 4, 2, -3, 0, 0, -2, 4, -1, -2, 3],
['-', '+', 3, '*', -2, '+', -1, '*', '-', '*', 4, '-', '*', -2, '-',
'+', 0, '+', '*', 0, '*', -3, '+', '+', 2, '*', 4, '*', 4, '+', 3, 4,
4, '-', 2, 3],
['-', '+', 3, '*', -2, '+', -1, '*', '-', '*', 4, '-', '*', -2, '-',
'+', '*', 0, '*', -3, '+', '+', 2, '*', 4, '*', 4, '+', 3, 4, 4, '-',
2, 3]),
Ib002BuiltTrees(
['*', -3, '-', '+', '+', '-', -2, 1, '-', '+', '*', '*', -3, '-', '+',
-3, -3, '-', '+', '-', -1, 2, '*', '*', '*', '*', '+', '-', -2, -2, 1,
3, 3, 1], -153, [-3, -2, 1, -3, -3, -3, -1, 2, -2, -2, 1, 3, 3, 1],
['*', '-', '+', '-', '+', '*', 1, '*', 3, '*', 3, '*', 1, '+', -2, '-',
-2, '*', '-', '+', 2, '-', -1, '*', '-', '+', -3, -3, -3, '+', 1, '-',
-2, -3],
['*', '-', '+', '-', '+', '*', 1, '*', 3, '*', 3, '*', 1, '+', -2, '-',
-2, '*', '-', '+', 2, '-', -1, '*', '-', '+', -3, -3, -3, '+', 1, '-',
-2, -3]),
Ib002BuiltTrees(
['*', '-', -3, '*', '-', -3, '+', '*', '-', '-', 0, '-', -2, '+', '*',
'-', -2, '+', '+', '*', '+', '*', '*', '*', '-', -2, 0, 4, 0, -2, 2,
-3, 1, -1], -117, [-3, -3, 0, -2, -2, -2, 0, 4, 0, -2, 2, -3, 1, -1],
['*', '*', '+', '+', -1, '*', '+', 1, '+', -3, '*', 2, '+', -2, '*', 0,
'*', 4, '*', 0, '-', -2, '-', -2, '*', '-', -2, '-', '-', 0, '-', -3,
'-', -3],
['*', '*', '+', '+', -1, '*', '+', 1, '+', -3, '*', 2, '+', -2, '*', 0,
'*', 4, '*', 0, '-', -2, '-', -2, '*', '-', -2, '-', '-', 0, '-', -3,
'-', -3]),
Ib002BuiltTrees(
['-', '+', '*', '*', 1, '-', '*', '-', '*', '+', '-', '*', '*', '-',
'*', '+', '+', '-', '+', '+', '+', -3, 1, -3, -3, 0, 2, 2, 2, -2, 1,
-3, -1, -3, -2], -709,
[1, -3, 1, -3, -3, 0, 2, 2, 2, -2, 1, -3, -1, -3, -2],
['-', '+', -2, '*', -3, '*', '-', '*', -1, '-', '*', -3, '+', 1, '-',
'*', -2, '*', 2, '-', '*', 2, '+', 2, '+', 0, '-', '+', -3, '+', -3,
'+', 1, -3, 1],
['-', '+', -2, '*', -3, '*', '-', '-', '-', '*', -3, '+', 1, '-', '*',
-2, '*', 2, '-', '*', 2, '+', 2, '-', '+', -3, '+', -3, '+', 1, -3,
1]),
Ib002BuiltTrees(
['+', '-', '+', '*', '+', '*', '+', '-', -3, '+', '-', '*', 4, '*',
'*', '*', '+', '-', '-', '-', -3, -3, '+', '*', -1, -2, 2, -3, 2, -1,
2, -2, -2, 2, 0], 2,
[-3, 4, -3, -3, -1, -2, 2, -3, 2, -1, 2, -2, -2, 2, 0],
['+', 0, '-', '+', 2, '*', -2, '+', -2, '*', 2, '+', '+', -1, '-', '*',
'*', 2, '*', -3, '*', '+', 2, '*', -2, -1, '+', -3, '-', '-', '-', -3,
4, '-', -3],
['-', '+', 2, '*', -2, '+', -2, '*', 2, '+', '+', -1, '-', '*', '*', 2,
'*', -3, '*', '+', 2, '-', -2, '+', -3, '-', '-', '-', -3, 4, '-',
-3]),
Ib002BuiltTrees(
['*', '-', '-', '+', 3, '-', -2, '+', '+', -1, 1, '*', '+', 0, '-',
'+', 4, '*', '+', '*', '+', '+', '*', -2, '+', '+', '-', 1, 2, 2, 4,
-3, 0, -1, 0, 0], 0,
[3, -2, -1, 1, 0, 4, -2, 1, 2, 2, 4, -3, 0, -1, 0, 0],
['*', '+', '*', 0, '+', '-', '+', '*', 0, '+', -1, '*', 0, '+', -3,
'+', 4, '*', '+', 2, '+', 2, '-', 1, -2, 4, 0, '+', 1, -1, '-', '-',
'+', '-', -2, 3],
['*', '+', '*', 0, '-', '+', '*', 0, '+', -1, '*', 0, '+', -3, '+', 4,
'*', '+', 2, '+', 2, '-', 1, -2, 4, '+', 1, -1, '-', '-', '+', '-',
-2, 3]),
Ib002BuiltTrees(
['*', '+', 3, '*', '*', '-', '*', '*', '-', '*', '+', '-', 4, '-', -1,
'*', '*', -1, '*', '-', 2, 2, '+', '+', '+', '*', 1, 0, -1, -2, 2, 0,
0, 3, -1, 3], 9,
[3, 4, -1, -1, 2, 2, 1, 0, -1, -2, 2, 0, 0, 3, -1, 3],
['*', 3, '+', '*', -1, '*', 3, '-', '*', 0, '*', 0, '-', '*', '*', '+',
2, '+', -2, '+', -1, '*', 0, 1, '*', '*', 2, '-', 2, -1, '+', '-', -1,
'-', 4, 3],
['*', 3, '+', '-', '*', 3, '-', '*', 0, '*', 0, '-', '*', '*', '+', 2,
'+', -2, '+', -1, '*', 0, 1, '-', '*', 2, '-', 2, '+', '-', -1, '-',
4, 3]),
Ib002BuiltTrees(
['+', '+', '*', '+', '*', '*', '*', 2, -1, '-', '-', '+', '-', '+',
'+', '+', '-', 2, '-', '+', '+', '+', '*', 3, 0, -3, 2, -1, -3, 4, 3,
-2, -3, 3, 1, 0], 16,
[2, -1, 2, 3, 0, -3, 2, -1, -3, 4, 3, -2, -3, 3, 1, 0],
['+', 0, '+', 1, '*', 3, '+', -3, '*', -2, '*', '-', '-', '+', 3, '-',
'+', 4, '+', -3, '+', '-', '+', -1, '+', 2, '+', -3, '*', 0, 3, '-',
2, '*', -1, 2],
['+', 1, '*', 3, '+', -3, '*', -2, '*', '-', '-', '+', 3, '-', '+', 4,
'+', -3, '+', '-', '+', -1, '+', 2, '+', -3, '*', 0, 3, '-', 2, '-',
2]),
Ib002BuiltTrees(
['*', '+', '*', '-', 0, '-', '+', '+', '+', 1, '-', '*', 3, '+', -3,
'*', '*', '+', '-', '*', 4, '-', '+', '*', '*', 1, 0, 1, 2, 1, 1, 3,
4, -2, -2, 3], -6,
[0, 1, 3, -3, 4, 1, 0, 1, 2, 1, 1, 3, 4, -2, -2, 3],
['*', 3, '+', -2, '*', '-', '+', -2, '+', 4, '+', '-', '*', '+', '*',
3, '*', 1, '+', 1, '-', '*', '-', '+', 2, '*', 1, '*', 0, 1, 4, -3, 3,
1, '-', 0],
['*', 3, '+', -2, '*', '-', '+', -2, '+', 4, '+', '-', '*', '+', '*',
3, '*', 1, '+', 1, '-', '*', '-', '+', 2, '*', 1, '*', 0, 1, 4, -3, 3,
1, '-', 0]),
Ib002BuiltTrees(
['+', '*', 1, '-', -1, '*', '+', '+', -3, 0, -3, '+', '+', '*', '+',
'*', '-', '+', 3, '+', '*', -1, '-', '*', '-', '*', '-', 0, 3, 3, 3,
0, 4, -2, -3, 4], 43,
[1, -1, -3, 0, -3, 3, -1, 0, 3, 3, 3, 0, 4, -2, -3, 4],
['+', '*', '+', 4, '+', -3, '*', -2, '+', 4, '*', 0, '-', '+', '+', 3,
'*', '-', '*', 3, '-', '*', 3, '-', 0, -1, 3, '+', -3, '+', 0, -3,
'*', '-', -1, 1],
['+', '*', '+', 4, '+', -3, '*', -2, '+', 4, '*', 0, '-', '+', '+', 3,
'-', '-', '*', 3, '-', '*', 3, '-', 0, 3, '+', -3, -3, '*', '-', -1,
1]),
Ib002BuiltTrees(
['-', '+', '*', '*', 2, '*', '*', '+', '*', '-', '+', '-', -3, '-',
'+', '-', 4, '*', '*', '-', '*', -2, 0, '+', '*', 2, -1, 3, 0, 0, -2,
-3, 2, -3, -1], 73,
[2, -3, 4, -2, 0, 2, -1, 3, 0, 0, -2, -3, 2, -3, -1],
['-', '+', -1, '*', -3, '*', '*', 2, '*', -3, '+', -2, '*', 0, '-',
'+', '-', '+', '*', 0, '*', '+', 3, '*', -1, 2, '-', '*', 0, -2, '-',
4, '-', -3, 2],
['-', '+', -1, '*', -3, '*', '*', 2, '*', -3, '+', -2, '*', 0, '-',
'+', '-', '+', '*', 0, '*', '+', 3, '-', 2, '-', '*', 0, -2, '-', 4,
'-', -3, 2]),
Ib002BuiltTrees(
['+', '+', 4, '+', 2, '-', '*', -2, '-', '-', '+', '*', '*', '+', '-',
'+', '*', -3, '*', 3, '-', '*', '+', -1, 3, 1, -2, '*', '-', -1, 4, 1,
2, -1, 3], -41, [4, 2, -2, -3, 3, -1, 3, 1, -2, -1, 4, 1, 2, -1, 3],
['+', 3, '+', '+', '-', '*', '-', '-', '+', -1, '*', 2, '*', 1, '+',
'*', 4, '-', -1, '-', '+', -2, '*', '*', '-', '*', 1, '+', 3, -1, 3,
-3, -2, 2, 4],
['+', 3, '+', '+', '-', '*', '-', '-', '+', -1, '*', 2, '*', 1, '+',
'*', 4, '-', -1, '-', '+', -2, '*', '*', '-', '*', 1, '+', 3, -1, 3,
-3, -2, 2, 4]),
Ib002BuiltTrees(
['-', '+', '-', '-', '+', 0, '*', 3, '+', '*', 0, 2, '*', '*', '*',
'-', '-', '+', 2, '*', '*', '+', '+', -2, '*', '*', -1, -3, -3, -1,
-1, -2, -1, -1, -1, 3], -69,
[0, 3, 0, 2, 2, -2, -1, -3, -3, -1, -1, -2, -1, -1, -1, 3],
['-', '+', 3, '-', '-', '+', '*', '+', '*', -1, '*', -1, '*', -1, '-',
'-', '+', '*', -2, '*', -1, '+', -1, '+', '*', -3, '*', -3, -1, -2, 2,
'*', 2, 0, 3, 0],
['-', '+', 3, '-', '-', '*', '+', '-', '-', '-', '-', '-', '+', '*',
-2, '-', '+', -1, '+', '*', -3, '-', -3, -2, 2, '*', 2, 0, 3]),
Ib002BuiltTrees(
['*', '*', '+', 0, -3, '*', -2, 0, '+', '-', '*', '+', '*', 2, '+',
'+', -3, '-', -1, 2, '-', '-', '+', '+', 3, '-', '*', '+', -3, '+', 0,
2, -3, -2, 1, -1], 0,
[0, -3, -2, 0, 2, -3, -1, 2, 3, -3, 0, 2, -3, -2, 1, -1],
['*', '+', -1, '-', '*', 1, '+', '-', '-', '+', -2, '+', '-', '*', -3,
'+', '+', 2, 0, -3, 3, '*', '+', 2, '+', '-', -1, -3, 2, '*', '*', 0,
-2, '+', -3, 0],
['*', '+', -1, '-', '*', 1, '+', '-', '-', '+', -2, '+', '-', '*', -3,
'+', 2, -3, 3, '*', '+', 2, '+', '-', -1, -3, 2, '*', '*', 0, -2,
-3]),
Ib002BuiltTrees(
['-', '-', '+', '+', '+', '+', '+', '-', '*', '*', -1, '+', '+', -2,
-3, '*', 2, 0, '*', 0, '-', 0, '*', '+', '*', -3, '-', '+', -3, -3,
-1, -2, 0, 4, 0, -3], 39,
[-1, -2, -3, 2, 0, 0, 0, -3, -3, -3, -1, -2, 0, 4, 0, -3],
['-', '-', '+', -3, '+', 0, '+', 4, '+', 0, '+', '*', -2, '+', -1, '*',
'-', '+', -3, -3, -3, '-', '*', '*', '-', 0, 0, '*', '+', '*', 0, 2,
'+', -3, -2, -1],
['-', '-', '+', -3, '+', 4, '+', '*', -2, '+', -1, '*', '-', '+', -3,
-3, -3, '-', '*', '*', '-', 0, 0, '-', '+', '*', 0, 2, '+', -3, -2]),
Ib002BuiltTrees(
['+', '+', '*', '*', '*', '+', 1, '-', '*', '-', '*', '+', '+', '-',
-2, '-', 4, 2, '*', '+', '*', '+', '*', '-', -2, -2, 3, 3, -2, 2, 1,
1, 3, 4, 4, 3], 19,
[1, -2, 4, 2, -2, -2, 3, 3, -2, 2, 1, 1, 3, 4, 4, 3],
['+', 3, '+', 4, '*', 4, '*', 3, '*', 1, '+', '-', '*', 1, '-', '*',
'*', 2, '+', -2, '*', 3, '+', 3, '*', -2, '-', -2, '+', 2, '+', '-',
4, '-', -2, 1],
['+', 3, '+', 4, '*', 4, '*', 3, '*', 1, '+', '-', '*', 1, '-', '*',
'*', 2, '+', -2, '*', 3, '+', 3, '*', -2, '-', -2, '+', 2, '+', '-',
4, '-', -2, 1]),
Ib002BuiltTrees(
['+', '+', '+', '*', -1, '+', 3, '*', 4, '-', -3, '+', '-', '+', 4,
'*', '+', 1, '+', '-', '+', '+', '*', '-', '*', '+', 3, 2, 4, 0, 2, 2,
3, 1, 2, 3, 1], -13,
[-1, 3, 4, -3, 4, 1, 3, 2, 4, 0, 2, 2, 3, 1, 2, 3, 1],
['+', 1, '+', 3, '+', '+', 2, '-', '+', '*', 1, '+', '+', 3, '-', '+',
2, '+', 2, '*', 0, '-', '*', 4, '+', 2, 3, 1, 4, '*', '+', '*', '-',
-3, 4, 3, -1],
['+', 1, '+', 3, '+', '+', 2, '-', '+', '*', 1, '+', '+', 3, '-', '+',
2, '+', 2, '*', 0, '-', '*', 4, '+', 2, 3, 1, 4, '-', '+', '*', '-',
-3, 4, 3]),
Ib002BuiltTrees(
['*', '*', '-', '+', '+', '*', '+', '*', '-', '+', '*', 1, -3, '+',
'+', '+', '+', '-', '*', '+', '*', 2, '-', 2, -3, -3, -2, 4, 4, 0, 4,
-3, 1, 0, -3, 0, 2], 0,
[1, -3, 2, 2, -3, -3, -2, 4, 4, 0, 4, -3, 1, 0, -3, 0, 2],
['*', 2, '*', 0, '-', '+', -3, '+', 0, '*', 1, '+', -3, '*', 4, '-',
'+', '+', 0, '+', 4, '+', 4, '+', -2, '-', '*', -3, '+', -3, '*', '-',
2, 2, '*', -3, 1],
['*', 2, '*', 0, '-', '+', -3, '*', 1, '+', -3, '*', 4, '-', '+', '+',
4, '+', 4, '+', -2, '-', '*', -3, '+', -3, '*', '-', 2, 2, '*', -3,
1]),
Ib002BuiltTrees(
['+', '+', '*', 3, '+', '+', 1, '-', '+', '-', '+', '+', '+', '+', '-',
'*', -1, '*', 4, '-', '-', 2, 3, '*', 4, '+', -1, -3, '+', 2, 2, -2,
-1, 2, 1, -2], 2,
[3, 1, -1, 4, 2, 3, 4, -1, -3, 2, 2, -2, -1, 2, 1, -2],
['+', -2, '+', 1, '*', '+', 2, '+', '-', '+', -1, '-', '+', -2, '+',
'+', 2, 2, '+', '*', '+', -3, -1, 4, '+', 3, '-', '*', '*', '-', '-',
2, 4, -1, 1, 3],
['+', -2, '+', 1, '*', '+', 2, '+', '-', '+', -1, '-', '+', -2, '+',
'+', 2, 2, '+', '*', '+', -3, -1, 4, '+', 3, '-', '-', '*', '-', '-',
2, 4, 1, 3]),
Ib002BuiltTrees(
['-', '-', '+', '+', '-', -2, '+', -2, '*', '*', '+', '*', '-', '*',
'+', '*', 4, '+', '+', '+', '-', '-', '+', -2, -1, 0, -3, -2, -3, -2,
0, -2, 3, -2, 2], 14,
[-2, -2, 4, -2, -1, 0, -3, -2, -3, -2, 0, -2, 3, -2, 2],
['-', '-', '+', 2, '+', '+', '*', -2, '*', 3, '+', -2, '*', 0, '-',
'*', -2, '+', -3, '*', '+', -2, '+', -3, '+', 0, '-', '-', '+', -1,
-2, 4, -2, '-', -2],
['-', '-', '+', 2, '+', '+', '*', -2, '*', 3, '+', -2, '*', 0, '-',
'*', -2, '+', -3, '*', '+', -2, '+', -3, '-', '-', '+', -1, -2, 4, -2,
'-', -2]),
Ib002BuiltTrees(
['*', -3, '+', '*', -2, '-', '-', '*', '-', '+', '*', '+', '*', 2, 0,
'*', '-', '*', '*', '-', -2, '+', -2, '*', '*', '-', 3, 2, -2, -1, 2,
-3, -1, -1, -3], -717,
[-3, -2, 2, 0, -2, -2, 3, 2, -2, -1, 2, -3, -1, -1, -3],
['*', '+', -3, '*', '-', '-', '*', -1, '-', '+', -1, '*', -3, '+', '*',
2, '-', '*', -1, '*', '+', '*', -2, '*', 2, '-', 3, -2, '-', -2, '*',
0, 2, -2, -3],
['*', '+', -3, '*', '-', '-', '-', '-', '+', -1, '*', -3, '+', '*', 2,
'-', '-', '*', '+', '*', -2, '*', 2, '-', 3, -2, '-', -2, '*', 0, 2,
-2, -3]),
Ib002BuiltTrees(
['*', '-', -3, '+', '*', '*', '*', '+', '-', '*', '*', '*', '-', '-',
'*', -3, 1, '*', '+', 3, 2, '+', '*', '*', -3, '-', 3, 4, 1, 1, -2,
-3, 1, -2, -2, 0], -13356,
[-3, -3, 1, 3, 2, -3, 3, 4, 1, 1, -2, -3, 1, -2, -2, 0],
['*', '+', 0, '*', -2, '*', -2, '*', 1, '+', -3, '-', '*', -2, '*', 1,
'*', '*', '+', 1, '*', 4, '*', '-', 3, -3, '+', 2, 3, '-', '-', '*',
1, -3, '-', -3],
['*', '*', -2, '*', -2, '*', 1, '+', -3, '-', '*', -2, '*', 1, '*',
'*', '+', 1, '*', 4, '*', '-', 3, -3, '+', 2, 3, '-', '-', '*', 1, -3,
'-', -3]),
Ib002BuiltTrees(
['*', '*', -2, '+', -3, '+', '-', '-', '-', '*', '*', 2, '-', -1, '*',
'*', '-', '*', '+', '+', '+', '*', '*', '*', 2, -2, 4, -3, 0, 3, -3,
3, 3, 0, -2, -2], -20,
[-2, -3, 2, -1, 2, -2, 4, -3, 0, 3, -3, 3, 3, 0, -2, -2],
['*', -2, '*', '+', '+', -2, '-', '-', '-', '*', '*', 0, '*', 3, '-',
'*', 3, '+', -3, '+', 3, '+', 0, '*', -3, '*', 4, '*', -2, 2, '*',
'-', -1, 2, -3, -2],
['*', -2, '*', '+', '+', -2, '-', '-', '-', '*', '*', 0, '*', 3, '-',
'*', 3, '+', -3, '+', 3, '*', -3, '*', 4, '*', -2, 2, '*', '-', -1, 2,
-3, -2]),
Ib002BuiltTrees(
['-', '*', '*', -3, 1, '-', '*', -2, '+', '-', '+', -3, '*', '+', '+',
'+', '+', '*', 0, '+', '*', '+', '+', '-', 3, '-', 4, -1, 3, -3, -3,
0, 2, -3, -1, -2], -18,
[-3, 1, -2, -3, 0, 3, 4, -1, 3, -3, -3, 0, 2, -3, -1, -2],
['-', '*', '-', '*', '+', -2, '-', '+', '*', -1, '+', -3, '+', 2, '+',
0, '+', -3, '*', '+', -3, '*', 3, '+', -1, '+', '-', 4, '-', 3, 0, -3,
-2, '*', 1, -3],
['-', '*', '-', '*', '+', -2, '-', '+', '-', '+', -3, '+', 2, '+', -3,
'*', '+', -3, '*', 3, '+', -1, '+', '-', 4, '-', 3, 0, -3, -2, '*', 1,
-3]),
Ib002BuiltTrees(
['+', '+', '*', '-', '-', '*', '+', '+', '+', '+', '-', '*', '*', 4,
'*', '*', '-', 3, '*', '+', 2, '+', 0, 4, 2, 2, '+', 1, 3, -1, -1, 3,
1, -3, -1, -1, -1], 3460,
[4, 3, 2, 0, 4, 2, 2, 1, 3, -1, -1, 3, 1, -3, -1, -1, -1],
['+', -1, '+', -1, '*', -1, '-', '-', '*', -3, '+', 1, '+', 3, '+', -1,
'+', -1, '-', '*', '+', 3, 1, '*', '*', 2, '*', '*', 2, '+', '+', 4,
0, 2, '-', 3, 4],
['+', -1, '+', -1, '-', '-', '-', '*', -3, '+', 1, '+', 3, '+', -1,
'+', -1, '-', '*', '+', 3, 1, '*', '*', 2, '*', '*', 2, '+', 4, 2,
'-', 3, 4]),
Ib002BuiltTrees(
['+', '-', '-', '-', '*', -1, '+', '+', '-', '+', '*', -1, '*', '*',
'+', '*', '*', '*', '*', 4, '+', '+', '*', 3, 0, 3, 4, -2, -1, -2, -3,
-2, -3, 0, 3, 4, 1], -682,
[-1, -1, 4, 3, 0, 3, 4, -2, -1, -2, -3, -2, -3, 0, 3, 4, 1],
['+', 1, '-', '-', '-', '*', '+', 4, '+', 3, '-', '+', 0, '*', '*', -3,
'*', -2, '+', -3, '*', -2, '*', -1, '*', -2, '*', '+', 4, '+', 3, '*',
0, 3, 4, -1, -1],
['+', 1, '-', '-', '-', '-', '+', 4, '+', 3, '-', '-', '*', -3, '*',
-2, '+', -3, '*', -2, '-', '*', -2, '*', '+', 4, '+', 3, '*', 0, 3,
4]),
Ib002BuiltTrees(
['+', '*', '+', '*', '-', '+', 3, 1, '*', '*', -2, '*', '+', '+', '*',
'+', '-', '*', '+', '+', '-', '-', -1, '*', 2, -1, -2, 4, 3, 4, 2, 1,
4, -1, 1, -3, 3], 9120,
[3, 1, -2, -1, 2, -1, -2, 4, 3, 4, 2, 1, 4, -1, 1, -3, 3],
['+', 3, '*', -3, '+', 1, '*', '*', -1, '*', '*', 4, '+', 1, '+', 2,
'*', 4, '+', 3, '-', '*', 4, '+', -2, '+', '*', -1, 2, '-', '-', -1,
-2, '-', '+', 1, 3],
['+', 3, '*', -3, '+', 1, '*', '-', '*', '*', 4, '+', 1, '+', 2, '*',
4, '+', 3, '-', '*', 4, '+', -2, '+', '-', 2, '-', '-', -1, -2, '-',
'+', 1, 3]),
Ib002BuiltTrees(
['+', 2, '*', '+', '-', '*', '+', '*', '-', '+', '+', '*', '+', '*',
-3, '+', '*', '-', '+', '+', 3, '-', '-', -2, -3, -3, -1, 0, -3, -1,
1, -1, -1, 4, -3, 4], 1014,
[2, -3, 3, -2, -3, -3, -1, 0, -3, -1, 1, -1, -1, 4, -3, 4],
['+', '*', 4, '+', -3, '-', '*', 4, '+', -1, '*', -1, '-', '+', 1, '+',
-1, '*', -3, '+', 0, '*', '+', -1, '*', -3, '-', '+', -3, '+', '-',
'-', -2, 3, -3, 2],
['+', '*', 4, '+', -3, '-', '*', 4, '+', -1, '-', '-', '+', 1, '+', -1,
'*', -3, '*', '+', -1, '*', -3, '-', '+', -3, '+', '-', '-', -2, 3,
-3, 2]),
Ib002BuiltTrees(
['+', '*', '*', '*', 1, '-', 0, 4, '*', '-', '+', '+', '+', '+', -2, 3,
'-', -2, '*', '*', 3, -1, -1, 4, '+', '*', '+', '-', '*', '*', -2, 0,
0, 3, 3, -1, -2], -2,
[1, 0, 4, -2, 3, -2, 3, -1, -1, 4, -2, 0, 0, 3, 3, -1, -2],
['+', -2, '*', '*', '+', -1, '*', 3, '+', 3, '-', '*', 0, '*', 0, -2,
'-', '+', 4, '+', '*', -1, '*', -1, 3, '+', '-', -2, '+', 3, -2, '*',
4, '*', '-', 0, 1],
['+', -2, '*', '*', '+', -1, '*', 3, '+', 3, '-', '*', 0, '*', 0, -2,
'-', '+', 4, '+', '-', '-', 3, '+', '-', -2, '+', 3, -2, '*', 4, '*',
'-', 0, 1]),
Ib002BuiltTrees(
['*', '-', 4, '*', '*', '+', '*', -1, -1, '*', '-', '-', -1, '+', '*',
3, 3, '+', '+', '*', 1, '*', '-', '+', '*', '+', '*', -1, 4, -2, -2,
4, 3, 3, -2, 1, 4], -624,
[4, -1, -1, -1, 3, 3, 1, -1, 4, -2, -2, 4, 3, 3, -2, 1, 4],
['*', '*', 4, '*', 1, '+', '*', '+', '+', -2, '+', 3, '*', '*', 3, '-',
'+', 4, '*', -2, '+', -2, '*', 4, -1, 1, '*', 3, 3, '-', '-', -1, '*',
-1, -1, '-', 4],
['*', '*', 4, '*', 1, '+', '*', '+', '+', -2, '+', 3, '*', '*', 3, '-',
'+', 4, '*', -2, '+', -2, '-', 4, 1, '*', 3, 3, '-', '-', -1, '-', -1,
'-', 4]),
Ib002BuiltTrees(
['*', '-', '+', '*', '+', '+', '-', '+', '+', -1, '+', '*', 2, '*',
'*', '+', '*', '+', '+', 3, '+', '*', 2, '*', -1, 4, 2, 4, 0, 1, 0,
-2, 2, 4, 1, 2, 1, 0, 1], 2,
[-1, 2, 3, 2, -1, 4, 2, 4, 0, 1, 0, -2, 2, 4, 1, 2, 1, 0, 1],
['*', 1, '-', '+', 0, '*', 1, '+', 2, '+', 1, '-', '+', 4, '+', '+', 2,
'*', '*', -2, '*', 0, '+', 1, '*', 0, '+', 4, '+', '+', 2, '*', '*',
4, -1, 2, 3, 2, -1],
['*', 1, '-', '*', 1, '+', 2, '+', 1, '-', '+', 4, '+', '+', 2, '*',
'*', -2, '*', 0, '+', 1, '*', 0, '+', 4, '+', '+', 2, '*', '-', 4, 2,
3, 2, -1]),
Ib002BuiltTrees(
['-', '+', '-', 2, '*', '+', '-', '-', '-', '+', '+', '+', '+', '+',
'*', '*', -2, '+', -1, '+', 3, '*', '*', '+', -2, -2, 0, -2, -2, -2,
-2, -3, -1, 1, 4, -1], 5,
[2, -2, -1, 3, -2, -2, 0, -2, -2, -2, -2, -3, -1, 1, 4, -1],
['-', '+', '*', -1, '+', 4, '-', '-', '-', '+', 1, '+', -1, '+', -3,
'+', -2, '+', -2, '*', -2, '*', '+', '+', '*', -2, '*', 0, '+', -2,
-2, 3, -1, -2, '-', 2],
['-', '+', '-', '+', 4, '-', '-', '-', '+', 1, '+', -1, '+', -3, '+',
-2, '+', -2, '*', -2, '*', '+', '+', '*', -2, '*', 0, '+', -2, -2, 3,
-1, -2, '-', 2]),
Ib002BuiltTrees(
['+', '+', '*', '+', 3, '+', '+', '+', '*', '+', '*', '+', '+', '-',
'+', '+', '-', '-', '-', -1, 3, '+', -3, '*', -3, -3, -3, -2, -1, -1,
3, -3, 3, 3, 2, 0, -1], 95,
[3, -1, 3, -3, -3, -3, -3, -2, -1, -1, 3, -3, 3, 3, 2, 0, -1],
['+', -1, '+', 0, '*', 2, '+', '+', 3, '+', 3, '+', -3, '*', 3, '+',
-1, '*', -1, '+', -2, '+', -3, '-', '+', '+', '*', -3, -3, -3, '+', 3,
'-', '-', '-', -1, 3],
['+', -1, '*', 2, '+', '+', 3, '+', 3, '+', -3, '*', 3, '+', -1, '-',
'+', -2, '+', -3, '-', '+', '+', '*', -3, -3, -3, '+', 3, '-', '-',
'-', -1, 3]),
Ib002BuiltTrees(
['+', '*', -3, '+', 0, '+', 2, '+', '-', '+', '-', '*', '*', '-', 3,
'*', '+', 3, '+', -1, '+', '+', '+', -1, '*', 3, 4, '*', 1, '*', 3,
-3, -3, 4, -2, 2, 1, -1], -76,
[-3, 0, 2, 3, 3, -1, -1, 3, 4, 1, 3, -3, -3, 4, -2, 2, 1, -1],
['+', -1, '*', '+', '+', '+', 1, '-', '+', 2, '-', '*', -2, '*', '*',
4, '+', '+', '+', -3, '+', '*', '*', -3, 3, 1, '+', '*', 4, 3, -1, -1,
3, '-', 3, 2, 0, -3],
['+', -1, '*', '+', '+', 1, '-', '+', 2, '-', '*', -2, '*', '*', 4,
'+', '+', '+', -3, '+', '*', '*', -3, 3, 1, '+', '*', 4, 3, -1, -1, 3,
'-', 3, 2, -3]),
Ib002BuiltTrees(
['+', '+', '*', '+', '+', '*', -3, -3, '*', '-', '*', 3, 3, '*', 2, -3,
'*', -3, -3, '*', '-', -2, '+', '*', '+', '*', '-', '+', '+', 0, -1,
3, 4, -2, 0, 0, -2, -1], -3,
[-3, -3, 3, 3, 2, -3, -3, -3, -2, 0, -1, 3, 4, -2, 0, 0, -2, -1],
['+', -1, '+', -2, '*', '*', '+', 0, '*', 0, '+', -2, '*', 4, '-', '+',
3, '+', -1, 0, '-', -2, '+', '*', -3, -3, '+', '*', '*', -3, 2, '-',
'*', 3, 3, '*', -3, -3],
['+', -1, '+', -2, '*', '*', '*', 0, '+', -2, '*', 4, '-', '+', 3, -1,
'-', -2, '+', '*', -3, -3, '+', '*', '*', -3, 2, '-', '*', 3, 3, '*',
-3, -3]),
Ib002BuiltTrees(
['*', '*', '-', '*', '*', '-', '+', '+', '*', '+', 1, '*', 2, -2, '+',
-3, 4, '*', '*', '+', '*', -1, '*', '+', '+', '*', -1, -1, -2, -2, 1,
-2, 1, 4, 1, 3, -3, -2, -1], -36,
[1, 2, -2, -3, 4, -1, -1, -1, -2, -2, 1, -2, 1, 4, 1, 3, -3, -2, -1],
['*', -1, '*', -2, '-', '*', -3, '*', 3, '-', '+', 1, '+', '*', 4, '*',
1, '+', -2, '*', '*', 1, '+', -2, '+', -2, '*', -1, -1, -1, '*', '+',
4, -3, '+', '*', -2, 2, 1],
['-', '*', -2, '-', '*', -3, '*', 3, '-', '+', 1, '+', '*', 4, '*', 1,
'+', -2, '-', '*', 1, '+', -2, '+', -2, '-', -1, '*', '+', 4, -3, '+',
'*', -2, 2, 1]),
]
def ib002_t_eval_atree():
print("1. test 'eval_atree':")
fail_counter = 0
for scenario in testing_dataset:
tree = ib002_deserialise_tree(scenario.tree)
if eval_atree(tree) != scenario.result:
print("\nNOK, vas vysledek: {}, ocekavany vysledek: {}"
.format(eval_atree(tree), scenario.result))
make_graph(tree, "Er_eval_{}.dot".format(fail_counter))
print("Testovany strom ulozen v souboru Er_eval_{}.dot"
.format(fail_counter))
fail_counter += 1
if fail_counter >= MaxErrCount:
print("\nVypsano pouze prvnich MaxErrCount={} chyb,\
dalsi testy nespusteny.".format(MaxErrCount))
break
if fail_counter == 0:
print("OK")
def ib002_t_constants_to_array():
print("\n2. test 'constants_to_array':")
fail_counter = 0
for scenario in testing_dataset:
expected = scenario.constants
tree = ib002_deserialise_tree(scenario.tree)
out = constants_to_array(tree)
if out != expected:
print("\nNOK, vas vysledek: {}, ocekavany vysledek: {}"
.format(out, expected))
make_graph(tree, "Er_constants_{}.dot".format(fail_counter))
print("Testovany strom ulozen v souboru Er_constants_{}.dot"
.format(fail_counter))
fail_counter += 1
if fail_counter >= MaxErrCount:
print("\nVypsano pouze prvnich MaxErrCount={} chyb,\
dalsi testy nespusteny.".format(MaxErrCount))
break
if fail_counter == 0:
print("OK")
def ib002_t_build_atree():
print("\n3. test 'build_atree':")
fail_counter = 0
for scenario in testing_dataset:
array = list(scenario.array)
tree = build_atree(array)
if not ib002_trees_equality(tree,
ib002_deserialise_tree(scenario.tree)):
print("\nNOK, vybudovany strom se lisi od ocekavaneho.")
print("Vstupni pole: {}".format(str(scenario.array)))
make_graph(tree, "Er_build_{}_yours.dot".format(fail_counter))
make_graph(ib002_deserialise_tree(scenario.tree),
"Er_build_{}_expected.dot".format(fail_counter))
print(("Vami vytvoreny strom je ulozen v souboru "
"Er_build_{}_yours.dot,").format(fail_counter))
print(("ocekavany strom je ulozen v souboru "
"Er_build_{}_expected.dot").format(fail_counter))
fail_counter += 1
if fail_counter >= MaxErrCount:
print("\nVypsano pouze prvnich MaxErrCount={} chyb,\
dalsi testy nespusteny.".format(MaxErrCount))
break
if fail_counter == 0:
print("OK")
def ib002_t_simplify_atree():
print("\n4. test 'simplify_atree':")
fail_counter = 0
for scenario in testing_dataset:
tree = ib002_deserialise_tree(scenario.tree)
simplify_atree(tree)
if not ib002_trees_equality(tree,
ib002_deserialise_tree(
scenario.simplified)):
print("\nNOK, vybudovany strom se lisi od ocekavaneho.")
make_graph(ib002_deserialise_tree(scenario.tree),
"Er_simplify_{}_input.dot".format(fail_counter))
make_graph(tree, "Er_simplify_{}_yours.dot".format(fail_counter))
make_graph(ib002_deserialise_tree(scenario.simplified),
"Er_simplify_{}_expected.dot".format(fail_counter))
print(("Vami vytvoreny strom je ulozen v souboru "
"Er_simplify_{}_yours.dot,").format(fail_counter))
print(("ocekavany strom je ulozen v souboru "
"Er_simplify_{}_expected.dot").format(fail_counter))
print(("vstupni strom je ulozen v souboru "
"Er_simplify_{}_input.dot").format(fail_counter))
fail_counter += 1
if fail_counter >= MaxErrCount:
print("\nVypsano pouze prvnich MaxErrCount={} chyb,\
dalsi testy nespusteny.".format(MaxErrCount))
break
if fail_counter == 0:
print("OK")
# Hlavni funkce volana automaticky po spusteni programu.
# Pokud chcete krome dodanych testu spustit vlastni testy, dopiste je sem.
# Odevzdavejte reseni s puvodni verzi teto funkce.
if __name__ == '__main__':
ib002_t_eval_atree()
ib002_t_constants_to_array()
ib002_t_build_atree()
ib002_t_simplify_atree()
|
"""Data schema for Gamebox."""
import collections
Gamebox = collections.namedtuple(
"Gamebox", [
# Gems available. A defaultdict(int) keyed by GemType.
"gems",
# Development cards available.
"development_cards",
# NobleTiles available.
"noble_tiles",
# The GameRules.
"game_rules",
])
|
from tkinter import *
import random
import tkinter.messagebox
class Number_Guessing:
def __init__(self,root):
self.root=root
self.root.title("Number Guessing")
self.root.geometry("500x350")
self.root.iconbitmap("logo1064.ico")
self.root.resizable(0,0)
answer=StringVar()
def on_enter1(e):
but_guess['background']="black"
but_guess['foreground']="cyan"
def on_leave1(e):
but_guess['background']="SystemButtonFace"
but_guess['foreground']="SystemButtonText"
def on_enter2(e):
but_change['background']="black"
but_change['foreground']="cyan"
def on_leave2(e):
but_change['background']="SystemButtonFace"
but_change['foreground']="SystemButtonText"
def on_enter3(e):
but_clear['background']="black"
but_clear['foreground']="cyan"
def on_leave3(e):
but_clear['background']="SystemButtonFace"
but_clear['foreground']="SystemButtonText"
def change():
global actual_number
but_guess.config(state="normal")
number=random.randint(2,101)
start_number=number-1
actual_number=number+5
end_number=number+10
lab_hint.config(text=f"The number is between range {start_number} to {end_number}")
#print(actual_number)
#print(number,start_number,end_number,actual_guess_number)
def clear():
lab_hint.config(text="Please Select Change Number to start")
answer.set("")
but_guess.config(state="disable")
def guess():
try:
ans=answer.get()
if(len(ans)!=0):
if(ans==str(actual_number)):
tkinter.messagebox._show("Correct","Your Guess is correct")
change()
answer.set("")
else:
tkinter.messagebox._show("Wrong","Your Guess is wrong")
else:
tkinter.messagebox.showerror("Error","Please write guess")
except:
tkinter.messagebox.showerror("Error","Please Select Change Number to start")
#==================frame=============================#
mainframe=Frame(self.root,width=500,height=350,relief="ridge",bd=3,bg="black")
mainframe.place(x=0,y=0)
lab_number=Label(mainframe,text="Please Enter Number",font=('times new roman',16),bg="black",fg="white")
lab_number.place(x=170,y=10)
ent_number=Entry(mainframe,width=37,font=('times new roman',14),relief="ridge",bd=4,justify="center",textvariable=answer)
ent_number.place(x=70,y=50)
lab_hint=Label(mainframe,text="Please Select Change Number to start",font=('times new roman',14),bg="black",fg="white")
lab_hint.place(x=100,y=100)
but_guess=Button(mainframe,width=16,text="Guess",font=('times new roman',14),cursor="hand2",command=guess)
but_guess.place(x=20,y=190)
but_guess.bind("<Enter>",on_enter1)
but_guess.bind("<Leave>",on_leave1)
but_change=Button(mainframe,width=16,text="Change Numbers",font=('times new roman',14),cursor="hand2",command=change)
but_change.place(x=300,y=190)
but_change.bind("<Enter>",on_enter2)
but_change.bind("<Leave>",on_leave2)
but_clear=Button(mainframe,width=16,text="Clear",font=('times new roman',14),cursor="hand2",command=clear)
but_clear.place(x=150,y=250)
but_clear.bind("<Enter>",on_enter3)
but_clear.bind("<Leave>",on_leave3)
if __name__ == "__main__":
root=Tk()
Number_Guessing(root)
root.mainloop() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 15:49:57 2019
@author: xingyu
"""
import sys
sys.path.append('./LPRNet')
sys.path.append('./MTCNN')
from LPRNet_Test import *
from MTCNN import *
import numpy as np
import argparse
import torch
import time
import cv2
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MTCNN & LPR Demo')
parser.add_argument("-image", help='image path', default='test/8.jpg', type=str)
parser.add_argument("--scale", dest='scale', help="scale the iamge", default=1, type=int)
parser.add_argument('--mini_lp', dest='mini_lp', help="Minimum face to be detected", default=(50, 15), type=int)
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
lprnet = LPRNet(class_num=len(CHARS), dropout_rate=0)
lprnet.to(device)
lprnet.load_state_dict(torch.load('LPRNet/weights/Final_LPRNet_model.pth', map_location=lambda storage, loc: storage))
lprnet.eval()
STN = STNet()
STN.to(device)
STN.load_state_dict(torch.load('LPRNet/weights/Final_STN_model.pth', map_location=lambda storage, loc: storage))
STN.eval()
print("Successful to build LPR network!")
since = time.time()
image = cv2.imread(args.image)
image = cv2.resize(image, (0, 0), fx = args.scale, fy = args.scale, interpolation=cv2.INTER_CUBIC)
bboxes = create_mtcnn_net(image, args.mini_lp, device, p_model_path='MTCNN/weights/pnet_Weights', o_model_path='MTCNN/weights/onet_Weights')
for i in range(bboxes.shape[0]):
bbox = bboxes[i, :4]
x1, y1, x2, y2 = [int(bbox[j]) for j in range(4)]
w = int(x2 - x1 + 1.0)
h = int(y2 - y1 + 1.0)
img_box = np.zeros((h, w, 3))
img_box = image[y1:y2+1, x1:x2+1, :]
im = cv2.resize(img_box, (94, 24), interpolation=cv2.INTER_CUBIC)
im = (np.transpose(np.float32(im), (2, 0, 1)) - 127.5)*0.0078125
data = torch.from_numpy(im).float().unsqueeze(0).to(device) # torch.Size([1, 3, 24, 94])
transfer = STN(data)
preds = lprnet(transfer)
preds = preds.cpu().detach().numpy() # (1, 68, 18)
labels, pred_labels = decode(preds, CHARS)
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255), 1)
image = cv2ImgAddText(image, labels[0], (x1, y1-12), textColor=(255, 255, 0), textSize=15)
print("model inference in {:2.3f} seconds".format(time.time() - since))
image = cv2.resize(image, (0, 0), fx = 1/args.scale, fy = 1/args.scale, interpolation=cv2.INTER_CUBIC)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
class NotFoundException(Exception):
def __init__(self, message):
super(NotFoundException, self).__init__(message)
|
"""
Quantiphyse - Enumeration classes
Copyright (c) 2013-2020 University of Oxford
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Orientation:
RADIOLOGICAL = 0
NEUROLOGICAL = 1
class DisplayOrder:
USER = 0
DATA_ON_TOP = 1
ROI_ON_TOP = 2
class Visibility:
SHOW = 0
HIDE = 1
class Boundary:
TRANS = 0
CLAMP = 1
LOWERTRANS = 2
UPPERTRANS = 3
|
from pymongo import MongoClient
DEFAULT_MONGO_CONNECTION_STRING = "mongodb://127.0.0.1:27017/admin"
class KnowledgeBase:
def __init__(
self,
mongo_connection_string: str,
database: str,
collection: str,
page_id_attr: str,
page_title_attr: str,
page_description_attr: str
):
if mongo_connection_string is None:
mongo_connection_string = DEFAULT_MONGO_CONNECTION_STRING
self.client = MongoClient(mongo_connection_string)
self.db = self.client[database]
self.collection = self.db[collection]
self.page_id_attr = page_id_attr
self.page_title_attr = page_title_attr
self.page_description_attr = page_description_attr
def get_all_pages_cursor(self):
cursor = self.collection.find({})
return cursor
def get_num_pages(self):
return self.collection.count()
def get_page_by_id(self, page_id):
page = self.collection.find_one({"_id": str(page_id)})
return page
def get_page_by_title(self, page_title, attempt=0):
page = self.collection.find_one({self.page_title_attr: str(page_title)})
return page
def get_page_from_url(self, url):
...
@staticmethod
def build_connection_string(host, username=None, password=None):
if username is not None and password is not None:
return f'mongodb://{username}:{password}@{host}/admin'
else:
return f'mongodb://{host}/admin'
|
# From https://www.baldengineer.com/raspberry-pi-gui-tutorial.html
# by James Lewis (@baldengineer)
# Minimal python code to start PyQt5 GUI
#
# You don't need all of these "main.py" files
# they are here for illustration only.
# always seem to need this
import sys
# This gets the Qt stuff
import PyQt5
from PyQt5.QtWidgets import *
# This is our window from QtCreator
import mainwindow_auto
# create class for our Raspberry Pi GUI
class MainWindow(QMainWindow, mainwindow_auto.Ui_MainWindow):
# access variables inside of the UI's file
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self) # gets defined in the UI file
# I feel better having one of these
def main():
# a new app instance
app = QApplication(sys.argv)
form = MainWindow()
form.show()
# without this, the script exits immediately.
sys.exit(app.exec_())
# python bit to figure how who started This
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
"""
readBLgrid
function: read in the Turbsim '.wnd' binary wind field data
------------------------------------------------------------------------------------
This code is translated from the matlab version.
copyright: TurbSim (c) NREL
source: https://github.com/old-NWTC/TurbSim/blob/master/CertTest/readBLgrid.m
--------------------------------------------------------------------------------------
Usage
velocity, y, z, nz, ny, dz, dy, dt, zHub, z1, SummVars,Scale,Offset = readBLgrid(FileName)
------------------------------------------------------------------------------------------
Inputs
FileName - string, the name of the file to open (.wnd extension is optional)
-----------------------------------------------------------------------------------------
Outputs
velocity - 4-D vector: time, velocity component, iy, iz
y - 1-D vector: horizontal locations y(iy)
z - 1-D vector: vertical locations z(iz)
nz, ny - scalars: number of points in the vertical/horizontal
direction of the grid
dz, dy, dt - scalars: distance between two points in the vertical [m]/
horizontal [m]/time [s] dimension
zHub - hub height [m]
z1 - vertical location of bottom of grid [m above ground level]
SumVars - variables from the summary file (zHub, Clockwise, UBAR, TI_u, TI_v, TI_w)
Scale - a scale factor to write out binary data
Offset - an offset to write out binary data
--------------------------------------------------------------------------------------------
Created on 18.06.2021
Feng Guo (c) Flensburg University of Applied Sciences
Yiyin Chen (c) University of Stuttgart
"""
# import libirary
import numpy as np
import os
def readBLgrid(FileName):
length = len(FileName); # avoid using len
ending = FileName[length-4:length]
if '.wnd' in ending:
FileName = FileName[0:length-4]
#-------------------------------------------------------------
#initialize variables
fileFmt = 'int16';
ConvFact = 1.0; #results in meters and seconds
str_i = ['HUB HEIGHT','CLOCKWISE','UBAR','TI(U','TI(V','TI(W'] #MUST be in UPPER case
numVars = len(str_i )
SummVars = np.zeros((numVars, 1))
#-----------------------------------------
#READ THE HEADER OF THE BINARY FILE
#-----------------------------------------
if not os.path.isfile(FileName + '.wnd'):
print( 'Wind file could not be opened, check whether it is in the directory' )
fid_wnd =open(FileName + '.wnd' , 'rb')
#with open(FileName + '.wnd' , 'rb') as fid_wnd:
nffc = np.fromfile(fid_wnd,dtype=np.int16,count = 1)
if nffc != -99: # old or new bladed styles
dz = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # delta z in mm
dy = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # delta y in mm
dx = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # delta x (actually t in this case) in mm
nt = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # half number of time steps
MFFWS = np.fromfile(fid_wnd,dtype=np.int16,count = 1) #10 times mean FF wind speed, should be equal to MWS
notused = np.fromfile(fid_wnd,dtype=np.int16,count = 5) # unnecessary lines
nz = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # 1000 times number of points in vertical direction, max 32
ny = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # 1000 times the number of points in horizontal direction, max 32
notused = np.fromfile(fid_wnd,dtype=np.int16,count = 3*(-int(nffc)-1))
# convert the integers to real numbers
nffc = -nffc;
dz = 0.001*ConvFact*dz
dy = 0.001*ConvFact*dy
dx = 0.001*ConvFact*dx
MFFWS = 0.1*ConvFact*MFFWS
nz = np.fix( nz % pow(2,16) / 1000 ) # the mod 2^16 is a work around for somewhat larger grids
ny = np.fix( ny % pow(2,16) / 1000 ) # the mod 2^16 is a work around for somewhat larger grids
else: #THE NEWER-STYLE AERODYN WIND FILE
fc = np.fromfile(fid_wnd,dtype=np.int16,count = 1) # should be 4 to allow turbulence intensity to be stored in the header
nffc = np.fromfile(fid_wnd,dtype=np.int32,count = 1) # number of components (should be 3)
lat = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # latitude (deg)
z0 = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # Roughness length (m)
zOffset = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # Reference height (m) = Z(1) + GridHeight / 2.0
TI_U = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # Turbulence Intensity of u component (%)
TI_V = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # Turbulence Intensity of v component (%)
TI_W = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # Turbulence Intensity of w component (%)
dz = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # delta z in m
dy = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # delta y in m
dx = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # delta x in m
nt = np.fromfile(fid_wnd,dtype=np.int32,count = 1) # half the number of time steps
MFFWS = np.fromfile(fid_wnd,dtype=np.float32,count = 1) # mean full-field wind speed
notused = np.fromfile(fid_wnd,dtype=np.float32,count = 3) # unused variables (for BLADED)
notused = np.fromfile(fid_wnd,dtype=np.int32,count = 2) # unused variables (for BLADED)
nz = np.fromfile(fid_wnd,dtype=np.int32,count = 1) # number of points in vertical direction
ny = np.fromfile(fid_wnd,dtype=np.int32,count = 1) # number of points in horizontal direction
notused = np.fromfile(fid_wnd,dtype=np.int32,count = 3*(int(nffc)-1)) # unused variables (for BLADED)
#SummVars{numVars-3} = MFFWS;
#SummVars{numVars-2} = TI_U;
#SummVars{numVars-1} = TI_V;
#SummVars{numVars} = TI_W;
SummVars[2] = MFFWS
SummVars[3] = TI_U
SummVars[4] = TI_V
SummVars[5] = TI_W
nt = max([nt*2,1])
dt = dx/MFFWS
#-----------------------------------------
#READ THE SUMMARY FILE FOR SCALING FACTORS
#-----------------------------------------
print('Reading the summary file....')
indx = SummVars
if not os.path.isfile(FileName + '.sum'):
print( 'Summary file could not be opened, check whether it is in the directory' );
# loop to read in summary file
with open(FileName + '.sum' , 'r') as fid_sum:
while any(indx == 0): #MFFWS and the TIs should not be zero
line = fid_sum.readline()
#file_lines = fid_sum.readlines()
if not isinstance(line, str):
# We reached the end of the file
print('Reached the end of summary file without all necessary data.');
break
line = line.upper();
if '=' in line :
findx = line.find("=")+1 #first index
else:
findx = 1
if line.isspace():
lindx = 0
else:
lindx = len(line)-1 #last index
#matches = [line for line in file_lines ] #first index
#findx = file_lines.index(matches[0])+1
#if not findx:
# findx = 1;
# lindx = len(line);
i = 1;
while (i <= numVars):
if indx[i-1]==0:
k = line.find(str_i[i-1]);
if k>=0: # we found a string we're looking for
indx[i-1] = k;
k=line.find('%');
if k>=0:
lindx = max(findx,k-1)
tmp = line[findx:lindx].lstrip().split(' ')[0] # take the first string, ignore the white space in the begining
try:
SummVars[i-1] = float(tmp)
break;
except:
if tmp == 'T':
SummVars[i-1] = 1;
else:
SummVars[i-1] = -1; #use this for false instead of zero.
i = i + 1 # in while loop
## read the rest of the file to get the grid height offset, if it's there
ZGoffset = 0.0 # we are still in the fid open loop
while True:
line = fid_sum.readline()
if not isinstance(line, str):
break;
line = line.upper()
findx = line.find('HEIGHT OFFSET')
if findx>=0:
lindx = len(line)
findx = line.find('=')+1
ZGoffset = float(line[findx:lindx].lstrip().split(' ')[0]) #z grid offset
break;
# now the fid_sum is closed
#-----------------------------------------
#READ THE GRID DATA FROM THE BINARY FILE
#-----------------------------------------
print('Reading and scaling the grid data...')
# nffc = 3;
nv = nffc*ny*nz; # the size of one time step
Scale = 0.00001*SummVars[2]*SummVars[3:6]
Offset = np.zeros((3, 1))
Offset[0] = SummVars[2]
Offset[1] = 0
Offset[2] = 0
velocity = np.zeros((int(nt),int(nffc),int(ny),int(nz)))
if SummVars[1] > 0: #clockwise rotation
#flip the y direction....
#let's change the dimension of velocity so that it's 4-d instead of 3-d
y_ix = list(range(int(ny),0,-1))
else:
y_ix = list(range(0,int(ny)+1,1))
# [v cnt] = fread( fid_wnd, nv, fileFmt );
# if cnt < nv
# error(['Could not read entire file: at grid record ' num2str( (it-1)*nv+cnt2 ) ' of ' num2str(nrecs)]);
# end
# disp('Scaling the grid data...');
for it in range(1,int(nt)+1):
v = np.fromfile(fid_wnd,dtype=np.int16,count = int(nv))
cnt = len(v)
if cnt < nv:
print('Could not read entire file: at grid record '+ int( (it-1)*nv+cnt )+' of '+int(nv*nt))
cnt2 = 1;
for iz in range(1,int(nz)+1):
for iy in range(int(y_ix[0]),int(y_ix[-1])-1,-1):
for k in range(1,int(nffc)+1):
velocity[it-1,k-1,iy-1,iz-1] = v[cnt2-1]*Scale[k-1] + Offset[k-1]
cnt2 = cnt2 + 1;
#close the file io
fid_wnd.close()
y = range(0,int(ny))*dy - dy*(ny-1)/2
zHub = SummVars[0];
z1 = zHub - ZGoffset - dz*(nz-1)/2 #this is the bottom of the grid
z = range(0,int(ny))*dz + z1;
print('Finished.');
return velocity, y, z, nz, ny, dz, dy, dt, zHub, z1, SummVars,Scale,Offset |
from ree.core import Scraper
class Fuerteventura(Scraper):
def __init__(self, session=None, verify=True):
super(self.__class__, self).__init__(session, verify)
def get(self, date=None, last=True):
return super(self.__class__, self).get("FUERTEVE", "Atlantic/Canary", "Canarias", date, last)
def get_all(self, date=None):
return self.get(date, False)
|
'''
Arquivo de configuração para auxiliar na execução desde ETL.
Em FILE_SETTINGS estão as configurações relativas aos arquivos de texto
contendo as coordenadas.
Em DATABASE_SETTINGS estão as configurações relativas à Base de Dados,
como dados de conexão e ações.
Em VISUALIZATION_SETTINGS estão as configurações relativas à visualização.
Os dados podem ser modificados aqui neste arquivo ou por parâmetros na hora
da execução.
'''
FILE_SETTINGS = {
# Diretório onde encontram-se os arquivos com as coordenadas brutas
'path' : 'data_points',
}
DATABASE_SETTINGS = {
# Nome do host da Base de Dados
'host' : 'localhost',
# Usuário da Base de Dados
'user' : 'root',
# Senha do usuário da Base de Dados
'password' : 'toor',
# Database criado para armazenar os dados
'database' : 'etl',
# Flag para deletar ou não as tabelas existentes
'drop_tables' : False,
# Flag para criar as tabelas
'create_tables' : False,
# Flag para executar o ETL e armazenar dados na Base de Dados
'load_data' : False,
# Valor de inserções até armazenar os dados definitivamente na Base de dados
'commit' : 50,
}
VISUALIZATION_SETTINGS = {
# Flag para visualizar ou não os dados
'visualize' : True,
'max_rows' : None,
'max_columns' : None,
} |
from collections import OrderedDict
from clutchless.service.torrent import AnnounceUrl, OrganizeService
def test_formatted_hostname():
url = AnnounceUrl("http://domain.test.com/announce")
assert url.formatted_hostname == "TestCom"
def test_split_hostname():
result = AnnounceUrl.split_hostname("domain.test.com")
assert result == ["test", "com"]
def test_split_hostname_smaller():
result = AnnounceUrl.split_hostname("test.com")
assert result == ["test", "com"]
def test_sort_url_sets():
groups_by_name = {
"AnotherNet": {
"http://domain.another.net/announce",
"http://domain.another.net/announce2",
},
"TestCom": {"http://domain.test.com/announce"},
}
result = OrganizeService._sort_url_sets(groups_by_name)
assert result == {
"AnotherNet": [
"http://domain.another.net/announce",
"http://domain.another.net/announce2",
],
"TestCom": ["http://domain.test.com/announce"],
}
def test_sort_groups_by_name():
groups = {
"TestCom": {"http://domain.test.com/announce"},
"AnotherNet": {
"http://domain.another.net/announce",
"http://domain.another.net/announce2",
},
}
result = OrganizeService._sort_groups_by_name(groups)
assert result == {
"AnotherNet": {
"http://domain.another.net/announce",
"http://domain.another.net/announce2",
},
"TestCom": {"http://domain.test.com/announce"},
}
def test_get_groups_by_name():
announce_urls = {
"http://domain.test.com/announce",
"http://domain.another.net/announce",
"http://domain.another.net/announce2",
}
result = OrganizeService._get_groups_by_name(announce_urls)
assert result == {
"AnotherNet": {
"http://domain.another.net/announce",
"http://domain.another.net/announce2",
},
"TestCom": {"http://domain.test.com/announce"},
}
|
from summarus.modules.bahdanau_attention import BahdanauAttention
from summarus.modules.torch_transformer_decoder_net import TorchTransformerDecoderNet
from summarus.modules.torch_transformer_encoder import TorchTransformerEncoder |
#part of Python's built-in os module
from os import getenv
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from dotenv import load_dotenv
from flask import g
#calling load_dotenv from python-dotenv since we used .env file
load_dotenv()
#connect to database using env variable
#manages the overall connection to database
engine = create_engine(getenv('DB_URL'), echo=True, pool_size=20, max_overflow=0)
#generates temporary connections for performing CRUD operations
Session = sessionmaker(bind=engine)
#helps us map the models to real MySQL tables
Base = declarative_base()
# app gets sent in through the app/__init__.py
def init_db(app):
Base.metadata.create_all(engine)
# runs close_db() with its built-in teardown_appcontext() method
app.teardown_appcontext(close_db)
# saves the current connection on the g object if not already there, then returns connection from the g object instead of creating a new Session instance each time
def get_db():
if 'db' not in g:
# store db connection in app context
g.db = Session()
return g.db
# needs to be called above
def close_db(e=None):
# attempts to find and remove db from the g object
db = g.pop('db', None)
# if db exists (db doesn't equal None) then end the connection
if db is not None:
db.close() |
# Elastic search mapping definition for the Molecule entity
from glados.es.ws2es.es_util import DefaultMappings
# Shards size - can be overridden from the default calculated value here
# shards = 3,
replicas = 1
analysis = DefaultMappings.COMMON_ANALYSIS
mappings = \
{
'properties':
{
'l1': 'TEXT',
# EXAMPLES:
# 'Eukaryotes' , 'Eukaryotes' , 'Bacteria' , 'Fungi' , 'Eukaryotes' , 'Eukaryotes' , 'Eukaryotes' , 'Eukaryo
# tes' , 'Eukaryotes' , 'Eukaryotes'
'l2': 'TEXT',
# EXAMPLES:
# 'Mammalia' , 'Mammalia' , 'Gram-Negative' , 'Ascomycota' , 'Kinetoplastida' , 'Kinetoplastida' , 'Mammalia
# ' , 'Apicomplexa' , 'Apicomplexa' , 'Apicomplexa'
'l3': 'TEXT',
# EXAMPLES:
# 'Rodentia' , 'Primates' , 'Acinetobacter' , 'Saccharomycetales' , 'Leishmania' , 'Trypanosoma' , 'Primates
# ' , 'Cryptosporidium' , 'Cryptosporidium' , 'Eimeria'
'oc_id': 'NUMERIC',
# EXAMPLES:
# '1' , '2' , '3' , '4' , '5' , '6' , '7' , '8' , '9' , '10'
'tax_id': 'NUMERIC',
# EXAMPLES:
# '10030' , '9593' , '470' , '5475' , '5660' , '5691' , '9606' , '237895' , '5807' , '5800'
}
}
|
"""Configuration and fixtures for tests."""
import logging
import pytest
@pytest.fixture(scope="function")
def show_it_works():
"""Dummy fixture that outputs to console and shows that it works."""
logger = logging.getLogger(__name__)
logger.info("Setup of the fixture")
yield
logger.info("Tear down of the fixture")
|
from moodlesg.math.base import Expression, _bool
def trunc(expr):
return Expression.__trunc__(expr)
def bool(expr):
"""
Implement truth value testing.
Expression is considered true if its result is nonzero.
"""
return _bool(expr)
|
# @Title: 数组中数字出现的次数 II (数组中数字出现的次数 II LCOF)
# @Author: 18015528893
# @Date: 2021-01-28 20:22:52
# @Runtime: 240 ms
# @Memory: 15.7 MB
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> int:
result = 0
for i in range(32):
count = 0
index = 1 << i
for num in nums:
if num & index != 0:
count += 1
if count % 3 == 1:
result |= index
return result
if __name__ == '__main__':
s = Solution()
print(s.singleNumber([3, 4, 3, 3]))
|
CONFIG = {
"markers": [
{
# Deadline is 04/22/2022 @ 11:59pm, but use 04/23/2022 @ 12am
# since it'll look nicer
'd': ['2022-04-23'],
't': 0,
'l': 'solid',
'c': '#ad0be3',
'n': 'Drop (No W) Deadline',
's': False
},
{
# Deadline is 05/06/2022 @ 11:59pm, but use 05/07/2022 @ 12am
# since it'll look nicer
'd': ['2022-05-07'],
't': 0,
'l': 'solid',
'c': '#20c2d4',
'n': 'Drop (W) Deadline',
's': False
}
],
"settings": {
"termName": "Spring 2022 Post-Enrollment",
"isNormal": False,
"showTotal": False,
"useEnrolledTtl": True,
"useMarkers": True,
}
} |
from settings.defaults import *
EUKALYPSE_BROWSER='chrome'
EUKALYPSE_HOST='http://192.188.23.18:4444'
EMAIL_HOST = 'mail.s-v.de'
#SENTRY_DSN = 'http://10eb8cf305f741e9acf227a82f12d21a:017e859ff4434ae496b001f5d1f638de@sentry.inhouse.s-v.de:9000/12'
if SENTRY_DSN:
LOGGING['handlers']['sentry'] ={
'level': 'INFO',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': SENTRY_DSN,
}
LOGGING['loggers']['django.request']['handlers'].append('sentry')
|
# -*- coding: utf-8 -*-
class Lamp:
_LAMPS = ['''
.
. | .
\ | /
. .---. .
--- ( ) ---
\ _ /
_|_|_
|_____|
''',
'''
.---.
( )
\ _ /
_|_|_
|_____|
''']
# self es la instancia de la clase, siempre toda clase en python la debe llevar y __init__ es el constructor
def __init__(self, is_turn_on=False):
self._is_turned_on = is_turn_on
def turn_on(self):
self._is_turned_on = True
self._display_image()
def turn_off(self):
self._is_turned_on = False
self._display_image()
def _display_image(self):
if self._is_turned_on:
print(self._LAMPS[0])
else:
print(self._LAMPS[1])
def run():
lamp = Lamp(is_turn_on = False)
while True:
command = str(raw_input('''
¿Qué deseas hacer?
[p]render
[a]pagar
[s]alir
'''))
if command == 'p':
lamp.turn_on()
elif command == 'a':
lamp.turn_off()
else:
break
if __name__ == '__main__':
run() |
#
# PySNMP MIB module HPN-ICF-VXLAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HPN-ICF-VXLAN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:29:58 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint")
hpnicfCommon, = mibBuilder.importSymbols("HPN-ICF-OID-MIB", "hpnicfCommon")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Unsigned32, Gauge32, MibIdentifier, Bits, ModuleIdentity, ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, IpAddress, iso, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Unsigned32", "Gauge32", "MibIdentifier", "Bits", "ModuleIdentity", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "IpAddress", "iso", "Integer32")
TruthValue, RowStatus, TextualConvention, DisplayString, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "TextualConvention", "DisplayString", "MacAddress")
hpnicfVxlan = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150))
hpnicfVxlan.setRevisions(('2013-11-21 09:00',))
if mibBuilder.loadTexts: hpnicfVxlan.setLastUpdated('201311210900Z')
if mibBuilder.loadTexts: hpnicfVxlan.setOrganization('')
hpnicfVxlanObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1))
hpnicfVxlanScalarGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 1))
hpnicfVxlanLocalMacNotify = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 1, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVxlanLocalMacNotify.setStatus('current')
hpnicfVxlanRemoteMacLearn = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 1, 2), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpnicfVxlanRemoteMacLearn.setStatus('current')
hpnicfVxlanNextVxlanID = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanNextVxlanID.setStatus('current')
hpnicfVxlanConfigured = MibScalar((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanConfigured.setStatus('current')
hpnicfVxlanTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2), )
if mibBuilder.loadTexts: hpnicfVxlanTable.setStatus('current')
hpnicfVxlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1), ).setIndexNames((0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanID"))
if mibBuilder.loadTexts: hpnicfVxlanEntry.setStatus('current')
hpnicfVxlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: hpnicfVxlanID.setStatus('current')
hpnicfVxlanAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 2), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanAddrType.setStatus('current')
hpnicfVxlanGroupAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 3), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanGroupAddr.setStatus('current')
hpnicfVxlanSourceAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 4), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanSourceAddr.setStatus('current')
hpnicfVxlanVsiIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 5), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanVsiIndex.setStatus('current')
hpnicfVxlanRemoteMacCount = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanRemoteMacCount.setStatus('current')
hpnicfVxlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanRowStatus.setStatus('current')
hpnicfVxlanTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 3), )
if mibBuilder.loadTexts: hpnicfVxlanTunnelTable.setStatus('current')
hpnicfVxlanTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 3, 1), ).setIndexNames((0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanID"), (0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanTunnelID"))
if mibBuilder.loadTexts: hpnicfVxlanTunnelEntry.setStatus('current')
hpnicfVxlanTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 3, 1, 1), Unsigned32())
if mibBuilder.loadTexts: hpnicfVxlanTunnelID.setStatus('current')
hpnicfVxlanTunnelRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanTunnelRowStatus.setStatus('current')
hpnicfVxlanTunnelOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanTunnelOctets.setStatus('current')
hpnicfVxlanTunnelPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanTunnelPackets.setStatus('current')
hpnicfVxlanTunnelBoundTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 4), )
if mibBuilder.loadTexts: hpnicfVxlanTunnelBoundTable.setStatus('current')
hpnicfVxlanTunnelBoundEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 4, 1), ).setIndexNames((0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanTunnelID"))
if mibBuilder.loadTexts: hpnicfVxlanTunnelBoundEntry.setStatus('current')
hpnicfVxlanTunnelBoundVxlanNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 4, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanTunnelBoundVxlanNum.setStatus('current')
hpnicfVxlanMacTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 5), )
if mibBuilder.loadTexts: hpnicfVxlanMacTable.setStatus('current')
hpnicfVxlanMacEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 5, 1), ).setIndexNames((0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanVsiIndex"), (0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanMacAddr"))
if mibBuilder.loadTexts: hpnicfVxlanMacEntry.setStatus('current')
hpnicfVxlanMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 5, 1, 1), MacAddress())
if mibBuilder.loadTexts: hpnicfVxlanMacAddr.setStatus('current')
hpnicfVxlanMacTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 5, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanMacTunnelID.setStatus('current')
hpnicfVxlanMacType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("selfLearned", 1), ("staticConfigured", 2), ("protocolLearned", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpnicfVxlanMacType.setStatus('current')
hpnicfVxlanStaticMacTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 6), )
if mibBuilder.loadTexts: hpnicfVxlanStaticMacTable.setStatus('current')
hpnicfVxlanStaticMacEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 6, 1), ).setIndexNames((0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanVsiIndex"), (0, "HPN-ICF-VXLAN-MIB", "hpnicfVxlanStaticMacAddr"))
if mibBuilder.loadTexts: hpnicfVxlanStaticMacEntry.setStatus('current')
hpnicfVxlanStaticMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 6, 1, 1), MacAddress())
if mibBuilder.loadTexts: hpnicfVxlanStaticMacAddr.setStatus('current')
hpnicfVxlanStaticMacTunnelID = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 6, 1, 2), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanStaticMacTunnelID.setStatus('current')
hpnicfVxlanStaticMacRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 15, 2, 150, 1, 6, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hpnicfVxlanStaticMacRowStatus.setStatus('current')
mibBuilder.exportSymbols("HPN-ICF-VXLAN-MIB", PYSNMP_MODULE_ID=hpnicfVxlan, hpnicfVxlanMacTable=hpnicfVxlanMacTable, hpnicfVxlanObjects=hpnicfVxlanObjects, hpnicfVxlanScalarGroup=hpnicfVxlanScalarGroup, hpnicfVxlanGroupAddr=hpnicfVxlanGroupAddr, hpnicfVxlanTunnelTable=hpnicfVxlanTunnelTable, hpnicfVxlanMacTunnelID=hpnicfVxlanMacTunnelID, hpnicfVxlanConfigured=hpnicfVxlanConfigured, hpnicfVxlanRemoteMacLearn=hpnicfVxlanRemoteMacLearn, hpnicfVxlanRowStatus=hpnicfVxlanRowStatus, hpnicfVxlanLocalMacNotify=hpnicfVxlanLocalMacNotify, hpnicfVxlanMacType=hpnicfVxlanMacType, hpnicfVxlanVsiIndex=hpnicfVxlanVsiIndex, hpnicfVxlanTable=hpnicfVxlanTable, hpnicfVxlanRemoteMacCount=hpnicfVxlanRemoteMacCount, hpnicfVxlanID=hpnicfVxlanID, hpnicfVxlanStaticMacAddr=hpnicfVxlanStaticMacAddr, hpnicfVxlanTunnelBoundVxlanNum=hpnicfVxlanTunnelBoundVxlanNum, hpnicfVxlanMacEntry=hpnicfVxlanMacEntry, hpnicfVxlanStaticMacRowStatus=hpnicfVxlanStaticMacRowStatus, hpnicfVxlanEntry=hpnicfVxlanEntry, hpnicfVxlanSourceAddr=hpnicfVxlanSourceAddr, hpnicfVxlanNextVxlanID=hpnicfVxlanNextVxlanID, hpnicfVxlanTunnelOctets=hpnicfVxlanTunnelOctets, hpnicfVxlanTunnelBoundTable=hpnicfVxlanTunnelBoundTable, hpnicfVxlanStaticMacEntry=hpnicfVxlanStaticMacEntry, hpnicfVxlanStaticMacTable=hpnicfVxlanStaticMacTable, hpnicfVxlanAddrType=hpnicfVxlanAddrType, hpnicfVxlanTunnelBoundEntry=hpnicfVxlanTunnelBoundEntry, hpnicfVxlanTunnelRowStatus=hpnicfVxlanTunnelRowStatus, hpnicfVxlanMacAddr=hpnicfVxlanMacAddr, hpnicfVxlanTunnelEntry=hpnicfVxlanTunnelEntry, hpnicfVxlanTunnelID=hpnicfVxlanTunnelID, hpnicfVxlanTunnelPackets=hpnicfVxlanTunnelPackets, hpnicfVxlanStaticMacTunnelID=hpnicfVxlanStaticMacTunnelID, hpnicfVxlan=hpnicfVxlan)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import sqrt, fabs
from test_statistics import isclose
from dcstats.basic_stats import ttest_independent, ttest_paired
from dcstats.basic_stats import TTestBinomial, TTestContinuous
def test_ttest_P_paired():
X = [2, 4, 6]
Y = [1, 2, 3]
tval, P, df = ttest_paired(X, Y)
assert isclose(tval, 3.464101615137754, rel_tol=0.0000001)
assert isclose(P, 0.07417990022744862, rel_tol=0.0000001)
def test_ttest_P_unpaired():
X = [1, 2, 3]
Y = [1, 4, 6]
tval, P, df = ttest_independent(X, Y)
assert isclose(tval, -1.06600358178, rel_tol=0.0000001)
assert isclose(P, 0.346490370194, rel_tol=0.0000001)
def test_regression_ttest_binomial():
ir1, if1 = 3, 4
ir2, if2 = 4, 5
ttb = TTestBinomial(ir1, if1, ir2, if2)
assert isclose(ttb.p1, 0.428571, rel_tol=0.0001)
assert isclose(ttb.p2, 0.444444, rel_tol=0.0001)
assert isclose(ttb.sd1, 0.187044, rel_tol=0.0001)
assert isclose(ttb.sd2, 0.165635, rel_tol=0.0001)
assert isclose(ttb.tval, 0.063492, rel_tol=0.0001)
assert isclose(ttb.P, 0.949375, rel_tol=0.0001)
def test_regression_ttest_continuos():
# Samples from treatment T1 and T2
T1 = [100, 108, 119, 127, 132, 135, 136] #, 164]
T2 = [122, 130, 138, 142, 152, 154, 176]
are_paired = True
ttc = TTestContinuous(T1, T2, are_paired)
assert isclose(ttc.tval, -7.325473, rel_tol=0.000001)
assert isclose(ttc.P, 0.000331, rel_tol=0.01)
|
class Solution:
def diStringMatch(self, S: str) -> List[int]:
low, high = 0, len(S)
ans = []
for x in S:
if x == 'I':
ans.append(low)
low += 1
else:
ans.append(high)
high -= 1
return ans + [low]
|
"""File name operation.
Change file name for encryption.
"""
import random
WORDS = ('abcdefghijkmnopqrstuvwxyz'
'ABCDEFGHIJKLMNPQRSTUVWXYZ123456789'
'_-')
def change(filename, min_word=5, max_word=10, min_class=3, max_class=6):
"""Change directry name."""
extention = ""
if '.' in filename:
extention = filename.split('.')[-1]
dir_class_len = random.randint(min_class, max_class)
dir_names = []
for n in range(dir_class_len):
dir_names.append(_generate_random(min_word, max_word))
return '/'.join(dir_names) + '.' + extention
def _generate_random(min_word=5, max_word=10):
"""Generate random string.
>> _generate_random(5, 10) == _generate_random(5, 10)
False
"""
word_len = random.randrange(min_word, max_word)
word = ""
for n in range(word_len):
word_num = random.randint(0, int(len(WORDS)) - 1)
word += WORDS[word_num]
return word
if __name__ == '__main__':
ds = []
for n in range(10000):
d = change('hoge.png',
min_word=7,
max_word=13,
min_class=4,
max_class=7)
if d in ds:
raise ValueError('duplication dir')
ds.append(d)
|
from django.contrib.auth.models import User
from django.db import models
from django.db.models import Sum
class Provincia(models.Model):
dne_id = models.PositiveIntegerField(primary_key=True)
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Municipio(models.Model):
dne_id = models.PositiveIntegerField(primary_key=True)
provincia = models.ForeignKey(Provincia)
nombre = models.CharField(max_length=100)
def __unicode__(self):
return self.nombre
class Circuito(models.Model):
municipio = models.ForeignKey(Municipio)
numero = models.CharField(max_length=100)
def __unicode__(self):
return u"Circuito %s (%s)" % (self.numero, self.municipio)
class LugarVotacion(models.Model):
dne_id = models.PositiveIntegerField(primary_key=True)
circuito = models.ForeignKey(Circuito)
nombre = models.CharField(max_length=100)
direccion = models.CharField(max_length=100)
class Mesa(models.Model):
circuito = models.ForeignKey(Circuito)
lugarvotacion = models.ForeignKey(LugarVotacion, null=True)
numero = models.IntegerField(max_length=100)
url = models.URLField(null=True)
@property
def computados(self):
return self.votomesa_set.aggregate(Sum('votos'))['votos__sum']
def __unicode__(self):
return u"Mesa %s (%s)" % (self.numero, self.circuito)
class Opcion(models.Model):
nombre = models.CharField(max_length=100, unique=True)
dne_id = models.PositiveIntegerField(primary_key=True)
def __unicode__(self):
return self.nombre
class Eleccion(models.Model):
nombre = models.CharField(max_length=50)
fecha = models.DateTimeField()
opciones = models.ManyToManyField(Opcion)
def __unicode__(self):
return "%s - %s" % (self.nombre, self.fecha.strftime('%d/%m/%Y'))
class AbstractVotoMesa(models.Model):
mesa = models.ForeignKey(Mesa)
opcion = models.ForeignKey(Opcion)
votos = models.IntegerField()
class Meta:
abstract = True
unique_together = ('mesa', 'opcion')
def __unicode__(self):
return u"%s: %d" % (self.opcion, self.votos)
class VotoMesaOficial(AbstractVotoMesa):
pass
class VotoMesaSocial(AbstractVotoMesa):
usuario = models.ForeignKey(User)
class VotoMesaOCR(AbstractVotoMesa):
pass
|
from .object import KittiObjectLoader, KittiObjectClass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from azureml.core import Run
from tfod_utils.evaluation_tf2 import Scoring
from tfod_utils.evaluation_tf2 import ImageMetrics
from tfod_utils.evaluation_tf2 import DetectionMetrics
from tfod_utils.training_tf2 import TF2ODRun
from tfod_utils.tfrecords import TFRecord
run = Run.get_context()
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--desc',
help='Description of experiment',
required=True),
parser.add_argument('--data_dir',
help='mnt with images and label sets',
required=True),
parser.add_argument('--image_type',
help='Image type either thermal or plate',
required=True),
parser.add_argument('--train_csv',
help='CSV file containing the training data',
default="latest"),
parser.add_argument('--test_csv',
help='CSV file containing the test images and labels',
default="latest"),
parser.add_argument('--base_model',
help='Dir name of base model in mnt',
required=False),
parser.add_argument('--steps',
help='Number of Steps',
default=20000),
parser.add_argument('--batch_size',
help='Batch Size',
default=1),
parser.add_argument('--build_id',
help='Batch Size',
default=None),
parser.add_argument('--eval_conf',
help='Evaluation Conf Threshold',
default=0.5)
FLAGS = parser.parse_args()
return FLAGS
def main():
# parse arguments
FLAGS = get_arguments()
# Set base model dir
base_model_dir = os.path.join(FLAGS.data_dir, 'models')
image_dir = 'images'
label_dir = 'datasets'
# create tensorflow run object
train_run = TF2ODRun(run, FLAGS, base_model_dir, image_dir, label_dir)
# create tf records needed for training
tfrecords = TFRecord(train_run.base_path,
train_run.image_dir,
train_run.train_df,
train_run.test_df,
include_masks=train_run.base_model.startswith('mask_rcnn'))
# log the details of the configured run object to AML
train_run.log_details()
# in addition to tfrecords parse any hyparams required as kwargs
train_run.update_pipeline_config(tfrecords)
# Train
train_run.train_model()
# Model Saving Step
checkpoint_prefix = train_run.base_model_dir
model_dir = train_run.export_model(checkpoint_prefix)
saved_model_path = os.path.join(model_dir, 'saved_model')
# image and test csv path
img_dir = os.path.join(train_run.base_path, train_run.image_dir)
test_csv = os.path.join(train_run.base_path,
train_run.label_dir,
train_run.test_csv)
# create eval_tf2 object
dets = Scoring(saved_model_path,
train_run.mapping_file,
test_csv,
img_dir,
conf=FLAGS.eval_conf)
# Calulate image levele metrics
img_metrics = ImageMetrics(dets)
# Calculate detection metrics
det_metrics = DetectionMetrics(dets)
# AML log img and detection metrics
img_metrics.log_AML(train_run)
det_metrics.log_AML(train_run)
# if triggered from devops we automatically register with the build_id
if FLAGS.build_id is not None:
run_id = run.get_details()['runId']
tags = {'run_id': run_id,
'build_id': FLAGS.build_id}
run.register_model(model_name=FLAGS.image_type,
model_path=model_dir,
tags=tags)
if __name__ == '__main__':
main()
|
from RIAssigner.data import MatchMSData
class MatchMSDataBuilder:
def __init__(self):
self.filename = None
self._rt_unit = 'seconds'
def with_filename(self, filename: str):
self.filename = filename
return self
def with_rt_unit(self, rt_unit: str):
self._rt_unit = rt_unit
return self
def build(self) -> MatchMSData:
return MatchMSData(self.filename, self._rt_unit)
|
import random
class RankingSelection:
def select_candidates(self, candidates, number):
""" Select a number of candidates from given candidates list.
Fitness level is used to associate a probability of selection with each candidate.
Parameters:
- candidates (list): given candidates list to select
- number (int): number of candidates to select
"""
fitness_weight = [c.fitness for c in candidates]
selected_candidates = random.choices(candidates, weights=fitness_weight, k=number)
return selected_candidates
class Tournament:
def __init__(self, size=2, selection_rate=0.8):
self.size = size
self.selection_rate = selection_rate
def select_candidates(self, candidates, number):
""" Select a number of candidates from given candidates list.
Involves running several "tournaments" among a few individuals (or chromosomes) chosen at random from the population.
Parameters:
- candidates (list): given candidates list to select
- number (int): number of candidates to select
"""
selected_candidates = []
for _ in range(0, number):
competitors = random.choices(candidates, k=self.size)
selected_candidates.append(self.compete(competitors))
return selected_candidates
def compete(self, competitors):
competitors.sort(key=lambda x: -x.fitness)
q = 1 - self.selection_rate
cum_rate = q
r = random.random()
for i in range(0, len(competitors) - 1):
if r < 1 - cum_rate:
return competitors[i]
else:
cum_rate = cum_rate * q
return competitors[-1]
class TopSelection:
def __init__(self, selection_rate=0.2):
self.selection_rate = selection_rate
def select_candidates(self, candidates, number):
""" Randomly select a number of candidates from top portion of given candidates list.
Parameters:
- candidates (list): given candidates list to select
- number (int): number of candidates to select
"""
top_index = int(self.selection_rate * len(candidates))
return random.choices(candidates[:top_index], k=number) |
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.api.process import Process
from lib.exceptions.exceptions import CuckooPackageError
class Package(object):
"""Base abstract analysis package."""
PATHS = []
def __init__(self, options={}):
"""@param options: options dict."""
self.options = options
self.pids = []
def set_pids(self, pids):
"""Update list of monitored PIDs in the package context.
@param pids: list of pids.
"""
self.pids = pids
def start(self):
"""Run analysis package.
@raise NotImplementedError: this method is abstract.
"""
raise NotImplementedError
def check(self):
"""Check."""
return True
def execute(self, cmd):
"""Start an executable for analysis.
@param path: executable path
@param args: executable arguments
@return: process pid
"""
p = Process()
if not p.execute(cmd):
raise CuckooPackageError("Unable to execute the initial process, "
"analysis aborted.")
return p.pid
def package_files(self):
"""A list of files to upload to host.
The list should be a list of tuples (<path on guest>, <name of file in package_files folder>).
(package_files is a folder that will be created in analysis folder).
"""
return None
def finish(self):
"""Finish run.
If specified to do so, this method dumps the memory of
all running processes.
"""
if self.options.get("procmemdump"):
for pid in self.pids:
p = Process(pid=pid)
p.dump_memory()
return True
def get_pids(self):
return []
class Auxiliary(object):
priority = 0
def get_pids(self):
return []
|
import logging
import pytest
import requests
from pytest_bdd import parsers
from pytest_bdd import scenarios
from pytest_bdd import then
from pytest_bdd import when
@pytest.fixture
def pytestbdd_strict_gherkin():
return False
scenarios('features/prometheus.feature')
@when(parsers.parse(
"I list the prometheus '{prometheus_endpoints}' job endpoints"))
def get_prometheus_endpoint(request, kubectl_proxy, prometheus_endpoints):
prometheus_endpoints_res = requests.get(
'http://127.0.0.1:8001/api/v1/namespaces/kube-ops/services/'
'kube-prometheus:http/proxy/api/v1/targets')
prometheus_endpoints_res.raise_for_status()
def filter_endpoints(endpoints_result, job_label):
for endpoint in endpoints_result['data']['activeTargets']:
logging.debug('Prometheus Endpoint found {}'.format(endpoint))
try:
if endpoint['labels']['job'] == job_label:
yield endpoint
except KeyError:
logging.warning(
'Endpoints {} has no job label'.format(endpoint))
endpoints_list = list(filter_endpoints(
prometheus_endpoints_res.json(),
prometheus_endpoints))
request.prometheus_endpoints = endpoints_list
return endpoints_list
@then(parsers.parse('I should count as many endpoints as {groups_name} hosts'))
def count_prometheus_endpoint(request, groups_name, inventory_obj):
num_endpoints = len(request.prometheus_endpoints)
nodes = set()
for group_name in groups_name.split(":"):
nodes.update(inventory_obj.get_groups_dict()[group_name])
assert num_endpoints == len(nodes)
|
from command import *
class TransferCommand(Command):
def get_aliases(self):
return ["transfer", "transferb", "t", "tb"]
def get_params_help(self):
return ("\n\t<sender_account_address>|<sender_account_ref_id>"
" <receiver_account_address>|<receiver_account_ref_id> <number_of_coins>"
" [gas_unit_price_in_micro_libras (default=0)] [max_gas_amount_in_micro_libras (default 140000)]"
" Suffix 'b' is for blocking. ")
def get_description(self):
return "Transfer coins (in libra) from account to another."
def execute(self, client, params):
if len(params) < 4 or len(params) > 6:
print("Invalid number of arguments for transfer")
print(
"{} {}".format(
" | ".join(self.get_aliases()),
self.get_params_help()
)
)
return
try:
if len(params) == 5:
gas_unit_price_in_micro_libras = int(params[4])
else:
gas_unit_price_in_micro_libras = 0
if len(params) == 6:
max_gas_amount_in_micro_libras = int(params[5])
else:
max_gas_amount_in_micro_libras = 140_000
print(">> Transferring")
is_blocking = blocking_cmd(params[0])
index, sequence_number = client.transfer_coins(params[1], params[2], params[3],
max_gas_amount_in_micro_libras, gas_unit_price_in_micro_libras, is_blocking)
if is_blocking:
print("Finished transaction!")
else:
print("Transaction submitted to validator")
print(
"To query for transaction status, run: query txn_acc_seq {} {} \
<fetch_events=true|false>".format(
index, sequence_number
)
)
except Exception as err:
report_error("Failed to perform transaction", err)
|
"""Support for testing internet speed via Speedtest.net."""
from datetime import timedelta
import logging
import speedtest
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_STARTED,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
CONF_MANUAL,
CONF_SERVER_ID,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SERVER,
DOMAIN,
SENSOR_TYPES,
SPEED_TEST_SERVICE,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_SERVER_ID): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=timedelta(minutes=DEFAULT_SCAN_INTERVAL)
): cv.positive_time_period,
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES))]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["sensor"]
def server_id_valid(server_id):
"""Check if server_id is valid."""
try:
api = speedtest.Speedtest()
api.get_servers([int(server_id)])
except (speedtest.ConfigRetrievalError, speedtest.NoMatchedServers):
return False
return True
async def async_setup(hass, config):
"""Import integration from config."""
if DOMAIN in config:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config[DOMAIN]
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Speedtest.net component."""
coordinator = SpeedTestDataCoordinator(hass, config_entry)
await coordinator.async_setup()
async def _enable_scheduled_speedtests(*_):
"""Activate the data update coordinator."""
coordinator.update_interval = timedelta(
minutes=config_entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
)
await coordinator.async_refresh()
if not config_entry.options[CONF_MANUAL]:
if hass.state == CoreState.running:
await _enable_scheduled_speedtests()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
else:
# Running a speed test during startup can prevent
# integrations from being able to setup because it
# can saturate the network interface.
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STARTED, _enable_scheduled_speedtests
)
hass.data[DOMAIN] = coordinator
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
return True
async def async_unload_entry(hass, config_entry):
"""Unload SpeedTest Entry from config_entry."""
hass.services.async_remove(DOMAIN, SPEED_TEST_SERVICE)
hass.data[DOMAIN].async_unload()
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
hass.data.pop(DOMAIN)
return unload_ok
class SpeedTestDataCoordinator(DataUpdateCoordinator):
"""Get the latest data from speedtest.net."""
def __init__(self, hass, config_entry):
"""Initialize the data object."""
self.hass = hass
self.config_entry = config_entry
self.api = None
self.servers = {}
self._unsub_update_listener = None
super().__init__(
self.hass,
_LOGGER,
name=DOMAIN,
update_method=self.async_update,
)
def update_servers(self):
"""Update list of test servers."""
try:
server_list = self.api.get_servers()
except speedtest.ConfigRetrievalError:
_LOGGER.debug("Error retrieving server list")
return
self.servers[DEFAULT_SERVER] = {}
for server in sorted(
server_list.values(),
key=lambda server: server[0]["country"] + server[0]["sponsor"],
):
self.servers[
f"{server[0]['country']} - {server[0]['sponsor']} - {server[0]['name']}"
] = server[0]
def update_data(self):
"""Get the latest data from speedtest.net."""
self.update_servers()
self.api.closest.clear()
if self.config_entry.options.get(CONF_SERVER_ID):
server_id = self.config_entry.options.get(CONF_SERVER_ID)
self.api.get_servers(servers=[server_id])
self.api.get_best_server()
_LOGGER.debug(
"Executing speedtest.net speed test with server_id: %s", self.api.best["id"]
)
self.api.download()
self.api.upload()
return self.api.results.dict()
async def async_update(self, *_):
"""Update Speedtest data."""
try:
return await self.hass.async_add_executor_job(self.update_data)
except (speedtest.ConfigRetrievalError, speedtest.NoMatchedServers) as err:
raise UpdateFailed from err
async def async_set_options(self):
"""Set options for entry."""
if not self.config_entry.options:
data = {**self.config_entry.data}
options = {
CONF_SCAN_INTERVAL: data.pop(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL),
CONF_MANUAL: data.pop(CONF_MANUAL, False),
CONF_SERVER_ID: str(data.pop(CONF_SERVER_ID, "")),
}
self.hass.config_entries.async_update_entry(
self.config_entry, data=data, options=options
)
async def async_setup(self):
"""Set up SpeedTest."""
try:
self.api = await self.hass.async_add_executor_job(speedtest.Speedtest)
except speedtest.ConfigRetrievalError as err:
raise ConfigEntryNotReady from err
async def request_update(call):
"""Request update."""
await self.async_request_refresh()
await self.async_set_options()
await self.hass.async_add_executor_job(self.update_servers)
self.hass.services.async_register(DOMAIN, SPEED_TEST_SERVICE, request_update)
self._unsub_update_listener = self.config_entry.add_update_listener(
options_updated_listener
)
@callback
def async_unload(self):
"""Unload the coordinator."""
if not self._unsub_update_listener:
return
self._unsub_update_listener()
self._unsub_update_listener = None
async def options_updated_listener(hass, entry):
"""Handle options update."""
if entry.options[CONF_MANUAL]:
hass.data[DOMAIN].update_interval = None
return
hass.data[DOMAIN].update_interval = timedelta(
minutes=entry.options[CONF_SCAN_INTERVAL]
)
await hass.data[DOMAIN].async_request_refresh()
|
import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
import os
import argparse
import logging
import json
from forecaster.utils import _load_model_dict
app = Flask(__name__)
def forecast(ticker, data):
models = _load_model_dict()
print(models)
model = models[ticker]['model']
future = model.make_future_dataframe(data, periods=8, n_historic_predictions=False)
forecast = model.predict(future)
lookup_df = forecast.drop(['ds'], axis=1)
projection = forecast.assign(y_pred=lookup_df.lookup(lookup_df.index, lookup_df.isnull().idxmin(1)))[
['ds', 'y_pred']]
return projection
@app.route('/')
def home():
return True
@app.route('/predict/', methods=['POST'])
def predict():
payload = request.get_json(force=True)
ticker = payload['ticker']
data = payload['data']
projection = forecast(ticker, data)
return projection.to_json()
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.info("Initiating prediction API")
app.run(debug=True)
|
"""
-----------------------------------------------------------------------------
This source file is part of VPET - Virtual Production Editing Tools
http://vpet.research.animationsinstitut.de/
http://github.com/FilmakademieRnd/VPET
Copyright (c) 2021 Filmakademie Baden-Wuerttemberg, Animationsinstitut R&D Lab
This project has been initiated in the scope of the EU funded project
Dreamspace under grant agreement no 610005 in the years 2014, 2015 and 2016.
http://dreamspaceproject.eu/
Post Dreamspace the project has been further developed on behalf of the
research and development activities of Animationsinstitut.
The VPET component Blender Scene Distribution is intended for research and development
purposes only. Commercial use of any kind is not permitted.
There is no support by Filmakademie. Since the Blender Scene Distribution is available
for free, Filmakademie shall only be liable for intent and gross negligence;
warranty is limited to malice. Scene DistributiorUSD may under no circumstances
be used for racist, sexual or any illegal purposes. In all non-commercial
productions, scientific publications, prototypical non-commercial software tools,
etc. using the Blender Scene Distribution Filmakademie has to be named as follows:
“VPET-Virtual Production Editing Tool by Filmakademie Baden-Württemberg,
Animationsinstitut (http://research.animationsinstitut.de)“.
In case a company or individual would like to use the Blender Scene Distribution in
a commercial surrounding or for commercial purposes, software based on these
components or any part thereof, the company/individual will have to contact
Filmakademie (research<at>filmakademie.de).
-----------------------------------------------------------------------------
"""
import bpy
import sys
import subprocess # use Python executable (for pip usage)
from pathlib import Path # Object-oriented filesystem paths since Python 3.4
def initialize():
global vpet, v_prop
vpet = bpy.context.window_manager.vpet_data
#v_prop = bpy.context.scene.vpet_properties
def checkZMQ():
try:
import zmq
return True
except Exception as e:
print(e)
return False
## Create Collections for VPET objects
def setupCollections():
v_prop = bpy.context.scene.vpet_properties
if bpy.data.collections.find(v_prop.vpet_collection) < 0:
vpetColl = bpy.data.collections.new(v_prop.vpet_collection)
bpy.context.scene.collection.children.link(vpetColl)
if bpy.data.collections.find(v_prop.edit_collection) < 0:
editColl = bpy.data.collections.new(v_prop.edit_collection)
bpy.context.scene.collection.children.link(editColl)
#bpy.data.collections[v_prop.vpet_collection].children.link(editColl)
def cleanUp(level):
if level > 0:
vpet.objectsToTransfer = [] #list of all objects
vpet.nodeList = [] #list of all nodes
vpet.geoList = [] #list of geometry data
vpet.materialList = [] # list of materials
vpet.textureList = [] #list of textures
if level > 1:
vpet.editableList = []
vpet.headerByteData = bytearray([]) # header data as bytes
vpet.nodesByteData = bytearray([]) # nodes data as bytes
vpet.geoByteData = bytearray([]) # geo data as bytes
vpet.texturesByteData = bytearray([]) # texture data as bytes
vpet.rootChildCount = 0
def installZmq():
if checkZMQ():
return 'ZMQ is already installed'
else:
if bpy.app.version[0] == 2 and bpy.app.version[1] < 81:
return 'This only works with Blender versions > 2.81'
else:
try:
# will likely fail the first time, but works after `ensurepip.bootstrap()` has been called once
import pip
except ModuleNotFoundError as e:
# only first attempt will reach here
print("Pip import failed with: ", e)
print("ERROR: Pip not activated, trying bootstrap()")
try:
import ensurepip
ensurepip.bootstrap()
except: # catch *all* exceptions
e = sys.exc_info()[0]
print("ERROR: Pip not activated, trying bootstrap()")
print("bootstrap failed with: ", e)
py_exec = sys.executable
# pip update
try:
print("Trying pip upgrade")
output = subprocess.check_output([py_exec, '-m', 'pip', 'install', '--upgrade', 'pip'])
print(output)
except subprocess.CalledProcessError as e:
print("ERROR: Couldn't update pip. Please restart Blender and try again.")
return (e.output)
print("INFO: Pip working! Installing pyzmq...")
# pyzmq pip install
try:
print("Trying pyzmq install")
output = subprocess.check_output([py_exec, '-m', 'pip', 'install', '--ignore-installed', 'pyzmq'])
print(output)
if (str(output).find('not writeable') > -1):
return 'admin error'
else:
return 'success'
except subprocess.CalledProcessError as e:
print("ERROR: Couldn't install pyzmq.")
return (e.output) |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'api.network.views',
# base
# /network - get
url(r'^$', 'net_list', name='api_net_list'),
# ip
# /network/ip/<subnet> - get
url(r'^ip/$', 'subnet_ip_list', name='api_ip_list'),
url(r'^ip/(?P<subnet>[0-9\./]+)/$', 'subnet_ip_list', name='api_subnet_ip_list'),
# base
# /network/<name> - get, create, set, delete
url(r'^(?P<name>[A-Za-z0-9\._-]+)/$', 'net_manage', name='api_net_manage'),
# ip
# /network/<name>/ip - get
url(r'^(?P<name>[A-Za-z0-9\._-]+)/ip/$', 'net_ip_list', name='api_net_ip_list'),
# /network/<name>/ip/<ip> - get, create, delete
url(r'^(?P<name>[A-Za-z0-9\._-]+)/ip/(?P<ip>[0-9\.]+)/$', 'net_ip', name='api_net_ip'),
# vm
# /network/<name>/vm - get
url(r'^(?P<name>[A-Za-z0-9\._-]+)/vm/$', 'net_vm_list', name='api_net_vm_list'),
)
|
"""
终端命令 :export FLASK_ENV=development; flask run
"""
from flask import Flask
def create_app():
app = Flask(__name__)
# 注册蓝图
from .main import main as bp_main
app.register_blueprint(bp_main)
app.debug = True
return app
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
from six.moves.urllib import parse as urlparse
from webtest.app import AppError
from wsme import types as wtypes
from magnum.api import attr_validator
from magnum.api.controllers.v1 import cluster_template as api_cluster_template
from magnum.common import exception
from magnum.common import policy as magnum_policy
from magnum.tests import base
from magnum.tests.unit.api import base as api_base
from magnum.tests.unit.api import utils as apiutils
from magnum.tests.unit.objects import utils as obj_utils
class TestClusterTemplateObject(base.TestCase):
def test_cluster_template_init(self):
cluster_template_dict = apiutils.cluster_template_post_data()
del cluster_template_dict['image_id']
del cluster_template_dict['registry_enabled']
del cluster_template_dict['tls_disabled']
del cluster_template_dict['public']
del cluster_template_dict['server_type']
del cluster_template_dict['master_lb_enabled']
del cluster_template_dict['floating_ip_enabled']
del cluster_template_dict['hidden']
cluster_template = api_cluster_template.ClusterTemplate(
**cluster_template_dict)
self.assertEqual(wtypes.Unset, cluster_template.image_id)
self.assertFalse(cluster_template.registry_enabled)
self.assertFalse(cluster_template.tls_disabled)
self.assertFalse(cluster_template.public)
self.assertEqual('vm', cluster_template.server_type)
self.assertFalse(cluster_template.master_lb_enabled)
self.assertTrue(cluster_template.floating_ip_enabled)
self.assertFalse(cluster_template.hidden)
class TestListClusterTemplate(api_base.FunctionalTest):
_cluster_template_attrs = ('name', 'apiserver_port', 'network_driver',
'coe', 'flavor_id', 'fixed_network',
'dns_nameserver', 'http_proxy',
'docker_volume_size', 'server_type',
'cluster_distro', 'external_network_id',
'image_id', 'registry_enabled', 'no_proxy',
'keypair_id', 'https_proxy', 'tls_disabled',
'public', 'labels', 'master_flavor_id',
'volume_driver', 'insecure_registry', 'hidden')
def test_empty(self):
response = self.get_json('/clustertemplates')
self.assertEqual([], response['clustertemplates'])
def test_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates')
self.assertEqual(cluster_template.uuid,
response['clustertemplates'][0]["uuid"])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s' %
cluster_template['uuid'])
self.assertEqual(cluster_template.uuid, response['uuid'])
self._verify_attrs(self._cluster_template_attrs, response)
def test_get_one_by_name(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s' %
cluster_template['name'])
self.assertEqual(cluster_template.uuid, response['uuid'])
self._verify_attrs(self._cluster_template_attrs, response)
def test_get_one_by_name_not_found(self):
response = self.get_json(
'/clustertemplates/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_one_by_uuid(self):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid)
response = self.get_json(
'/clustertemplates/%s' % temp_uuid)
self.assertEqual(temp_uuid, response['uuid'])
def test_get_one_by_uuid_not_found(self):
temp_uuid = uuidutils.generate_uuid()
response = self.get_json(
'/clustertemplates/%s' % temp_uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_get_one_by_uuid_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
response = self.get_json(
'/clustertemplates/%s' % temp_uuid)
self.assertEqual(temp_uuid, response['uuid'])
def test_get_one_by_name_multiple_cluster_template(self):
obj_utils.create_test_cluster_template(
self.context, name='test_clustertemplate',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(
self.context, name='test_clustertemplate',
uuid=uuidutils.generate_uuid())
response = self.get_json(
'/clustertemplates/test_clustertemplate',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_get_all_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template)
response = self.get_json('/clustertemplates?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['clustertemplates']))
self.assertEqual(bm_list[-1].uuid,
response['clustertemplates'][0]['uuid'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_get_all_with_all_projects(self, mock_context, mock_policy):
for id_ in range(4):
obj_utils.create_test_cluster_template(
self.context, id=id_, project_id=id_,
uuid=uuidutils.generate_uuid())
self.context.is_admin = True
response = self.get_json('/clustertemplates')
self.assertEqual(4, len(response['clustertemplates']))
def test_detail(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/detail')
self.assertEqual(cluster_template.uuid,
response['clustertemplates'][0]["uuid"])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_detail_with_pagination_marker(self):
bm_list = []
for id_ in range(4):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template)
response = self.get_json('/clustertemplates/detail?limit=3&marker=%s'
% bm_list[2].uuid)
self.assertEqual(1, len(response['clustertemplates']))
self.assertEqual(bm_list[-1].uuid,
response['clustertemplates'][0]['uuid'])
self._verify_attrs(self._cluster_template_attrs,
response['clustertemplates'][0])
def test_detail_against_single(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.get_json('/clustertemplates/%s/detail' %
cluster_template['uuid'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
bm_list = []
for id_ in range(5):
cluster_template = obj_utils.create_test_cluster_template(
self.context, id=id_,
uuid=uuidutils.generate_uuid())
bm_list.append(cluster_template.uuid)
response = self.get_json('/clustertemplates')
self.assertEqual(len(bm_list), len(response['clustertemplates']))
uuids = [bm['uuid'] for bm in response['clustertemplates']]
self.assertEqual(sorted(bm_list), sorted(uuids))
def test_links(self):
uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, id=1, uuid=uuid)
response = self.get_json('/clustertemplates/%s' % uuid)
self.assertIn('links', response.keys())
self.assertEqual(2, len(response['links']))
self.assertIn(uuid, response['links'][0]['href'])
for link in response['links']:
bookmark = link['rel'] == 'bookmark'
self.assertTrue(self.validate_link(link['href'],
bookmark=bookmark))
def test_collection_links(self):
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/clustertemplates/?limit=3')
self.assertEqual(3, len(response['clustertemplates']))
next_marker = response['clustertemplates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
def test_collection_links_default_limit(self):
cfg.CONF.set_override('max_limit', 3, 'api')
for id_ in range(5):
obj_utils.create_test_cluster_template(
self.context, id=id_, uuid=uuidutils.generate_uuid())
response = self.get_json('/clustertemplates')
self.assertEqual(3, len(response['clustertemplates']))
next_marker = response['clustertemplates'][-1]['uuid']
self.assertIn(next_marker, response['next'])
class TestPatch(api_base.FunctionalTest):
def setUp(self):
super(TestPatch, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
self.cluster_template = obj_utils.create_test_cluster_template(
self.context,
name='cluster_model_example_A',
image_id='nerdherd',
apiserver_port=8080,
fixed_network='private',
flavor_id='m1.magnum',
master_flavor_id='m1.magnum',
external_network_id='public',
keypair_id='test',
volume_driver='rexray',
public=False,
docker_volume_size=20,
coe='swarm',
labels={'key1': 'val1', 'key2': 'val2'},
hidden=False
)
def test_update_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.patch_json('/clustertemplates/%s' % uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'add'}],
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_update_cluster_template_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/network_driver',
'value': 'flannel',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(cluster_template.uuid,
response.json['errors'][0]['detail'])
def test_update_cluster_template_name_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_cluster_template_success(self, mock_policy):
mock_policy.return_value = True
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertTrue(response['public'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_public_cluster_template_fail(self, mock_policy):
mock_policy.return_value = False
self.assertRaises(AppError, self.patch_json,
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/public', 'value': True,
'op': 'replace'}])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_cluster_template_with_cluster_allow_update(self,
mock_policy):
mock_policy.return_value = True
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/public',
'value': True,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['public'], True)
@mock.patch.object(magnum_policy, 'enforce')
def test_update_hidden_cluster_template_success(self, mock_policy):
mock_policy.return_value = True
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/hidden', 'value': True,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertTrue(response['hidden'])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_hidden_cluster_template_fail(self, mock_policy):
mock_policy.return_value = False
self.assertRaises(AppError, self.patch_json,
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/hidden', 'value': True,
'op': 'replace'}])
@mock.patch.object(magnum_policy, 'enforce')
def test_update_cluster_template_hidden_with_cluster_allow_update(
self, mock_policy):
mock_policy.return_value = True
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/hidden',
'value': True,
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['hidden'], True)
def test_update_cluster_template_with_devicemapper(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
note = 'deprecated in favor of overlay2'
with self.assertWarnsRegex(DeprecationWarning, note):
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/docker_storage_driver',
'value': 'devicemapper',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
def test_update_cluster_template_replace_labels_success(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/labels',
'value': '{\'etcd_volume_size\': \'1\'}',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(response['labels'], {'etcd_volume_size': '1'})
def test_update_cluster_template_with_cluster_not_allow_update(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.patch_json('/clustertemplates/%s' %
cluster_template.uuid,
[{'path': '/network_driver',
'value': 'calico',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(400, response.status_code)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_replace_singular(self, mock_utcnow):
name = 'cluster_model_example_B'
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/name', 'value': name,
'op': 'replace'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertEqual(name, response['name'])
return_updated_at = timeutils.parse_isotime(
response['updated_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_updated_at)
# Assert nothing else was changed
self.assertEqual(self.cluster_template.uuid, response['uuid'])
self.assertEqual(self.cluster_template.image_id, response['image_id'])
self.assertEqual(self.cluster_template.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.cluster_template.fixed_network,
response['fixed_network'])
self.assertEqual(self.cluster_template.network_driver,
response['network_driver'])
self.assertEqual(self.cluster_template.volume_driver,
response['volume_driver'])
self.assertEqual(self.cluster_template.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.cluster_template.coe,
response['coe'])
self.assertEqual(self.cluster_template.http_proxy,
response['http_proxy'])
self.assertEqual(self.cluster_template.https_proxy,
response['https_proxy'])
self.assertEqual(self.cluster_template.no_proxy,
response['no_proxy'])
self.assertEqual(self.cluster_template.labels,
response['labels'])
def test_replace_cluster_template_with_no_exist_flavor_id(self):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/flavor_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_keypair_id(self):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/keypair_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(404, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_external_network_id(self):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/external_network_id',
'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_replace_cluster_template_with_no_exist_image_id(self):
self.mock_valid_os_res.side_effect = exception.ImageNotFound("aaa")
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/image_id', 'value': 'aaa',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_create_cluster_template_with_no_os_distro_image(self):
image_exce = exception.OSDistroFieldNotFound('img')
self.mock_valid_os_res.side_effect = image_exce
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/image_id', 'value': 'img',
'op': 'replace'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_singular(self):
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertIsNotNone(response['dns_nameserver'])
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/dns_nameserver',
'op': 'remove'}])
self.assertEqual('application/json', response.content_type)
self.assertEqual(200, response.status_code)
response = self.get_json('/clustertemplates/%s' %
self.cluster_template.uuid)
self.assertIsNone(response['dns_nameserver'])
# Assert nothing else was changed
self.assertEqual(self.cluster_template.uuid, response['uuid'])
self.assertEqual(self.cluster_template.name, response['name'])
self.assertEqual(self.cluster_template.apiserver_port,
response['apiserver_port'])
self.assertEqual(self.cluster_template.image_id,
response['image_id'])
self.assertEqual(self.cluster_template.fixed_network,
response['fixed_network'])
self.assertEqual(self.cluster_template.network_driver,
response['network_driver'])
self.assertEqual(self.cluster_template.volume_driver,
response['volume_driver'])
self.assertEqual(self.cluster_template.docker_volume_size,
response['docker_volume_size'])
self.assertEqual(self.cluster_template.coe, response['coe'])
self.assertEqual(self.cluster_template.http_proxy,
response['http_proxy'])
self.assertEqual(self.cluster_template.https_proxy,
response['https_proxy'])
self.assertEqual(self.cluster_template.no_proxy, response['no_proxy'])
self.assertEqual(self.cluster_template.labels, response['labels'])
def test_remove_non_existent_property_fail(self):
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/non-existent',
'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_remove_mandatory_property_fail(self):
mandatory_properties = ('/image_id', '/coe',
'/external_network_id', '/server_type',
'/tls_disabled', '/public',
'/registry_enabled',
'/cluster_distro', '/network_driver')
for p in mandatory_properties:
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': p, 'op': 'remove'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_code)
self.assertTrue(response.json['errors'])
def test_add_root_non_existent(self):
response = self.patch_json(
'/clustertemplates/%s' % self.cluster_template.uuid,
[{'path': '/foo', 'value': 'bar', 'op': 'add'}],
expect_errors=True)
self.assertEqual('application/json', response.content_type)
self.assertEqual(400, response.status_int)
self.assertTrue(response.json['errors'])
def test_remove_uuid(self):
response = self.patch_json('/clustertemplates/%s' %
self.cluster_template.uuid,
[{'path': '/uuid', 'op': 'remove'}],
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_update_cluster_template_as_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
response = self.patch_json('/clustertemplates/%s' % temp_uuid,
[{'path': '/name',
'value': 'cluster_model_example_B',
'op': 'replace'}],
expect_errors=True)
self.assertEqual(200, response.status_int)
class TestPost(api_base.FunctionalTest):
def setUp(self):
super(TestPost, self).setUp()
p = mock.patch.object(attr_validator, 'validate_os_resources')
self.mock_valid_os_res = p.start()
self.addCleanup(p.stop)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_cluster_template(self, mock_utcnow,
mock_image_data):
bdict = apiutils.cluster_template_post_data()
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/clustertemplates/%s' % bdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_set_project_id_and_user_id(
self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
self.post_json('/clustertemplates', bdict)
cc_mock.assert_called_once_with(mock.ANY)
self.assertEqual(self.context.project_id,
cc_mock.call_args[0][0]['project_id'])
self.assertEqual(self.context.user_id,
cc_mock.call_args[0][0]['user_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_doesnt_contain_id(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(image_id='my-image')
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['image_id'], response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
# Check that 'id' is not in first arg of positional args
self.assertNotIn('id', cc_mock.call_args[0][0])
def _create_model_raises_app_error(self, **kwargs):
# Create mock for db and image data
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock,\
mock.patch('magnum.api.attr_validator.validate_image')\
as mock_image_data:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(**kwargs)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
self.assertFalse(cc_mock.called)
def test_create_cluster_template_with_invalid_long_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "labels", "volume_driver"]
for field in fields:
self._create_model_raises_app_error(**{field: 'i' * 256})
def test_create_cluster_template_with_invalid_empty_string(self):
fields = ["uuid", "name", "image_id", "flavor_id", "master_flavor_id",
"dns_nameserver", "keypair_id", "external_network_id",
"cluster_distro", "fixed_network", "apiserver_port",
"docker_volume_size", "labels", "http_proxy", "https_proxy",
"no_proxy", "network_driver", "volume_driver", "coe"]
for field in fields:
self._create_model_raises_app_error(**{field: ''})
def test_create_cluster_template_with_invalid_coe(self):
self._create_model_raises_app_error(coe='k8s')
self._create_model_raises_app_error(coe='storm')
self._create_model_raises_app_error(coe='meson')
self._create_model_raises_app_error(coe='osomatsu')
def test_create_cluster_template_with_invalid_docker_volume_size(self):
self._create_model_raises_app_error(docker_volume_size=-1)
self._create_model_raises_app_error(
docker_volume_size=1,
docker_storage_driver="devicemapper")
self._create_model_raises_app_error(
docker_volume_size=2,
docker_storage_driver="devicemapper")
self._create_model_raises_app_error(docker_volume_size='notanint')
def test_create_cluster_template_with_invalid_dns_nameserver(self):
self._create_model_raises_app_error(dns_nameserver='1.1.2')
self._create_model_raises_app_error(dns_nameserver='1.1..1')
self._create_model_raises_app_error(dns_nameserver='openstack.org')
def test_create_cluster_template_with_invalid_apiserver_port(self):
self._create_model_raises_app_error(apiserver_port=-12)
self._create_model_raises_app_error(apiserver_port=65536)
self._create_model_raises_app_error(apiserver_port=0)
self._create_model_raises_app_error(apiserver_port=1023)
self._create_model_raises_app_error(apiserver_port='not an int')
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_labels(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(
labels={'key1': 'val1', 'key2': 'val2'})
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['labels'],
response.json['labels'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_docker_volume_size(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(docker_volume_size=99)
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_overlay(self, mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(
docker_volume_size=1, docker_storage_driver="overlay")
note = 'deprecated in favor of overlay2'
with self.assertWarnsRegex(DeprecationWarning, note):
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['docker_volume_size'],
response.json['docker_volume_size'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def _test_create_cluster_template_network_driver_attr(
self,
cluster_template_dict,
cluster_template_config_dict,
expect_errors,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
for k, v in cluster_template_config_dict.items():
cfg.CONF.set_override(k, v, 'cluster_template')
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
bdict = apiutils.cluster_template_post_data(
**cluster_template_dict)
response = self.post_json('/clustertemplates', bdict,
expect_errors=expect_errors)
if expect_errors:
self.assertEqual(400, response.status_int)
else:
expected_driver = bdict.get('network_driver')
if not expected_driver:
expected_driver = (
cfg.CONF.cluster_template.swarm_default_network_driver)
self.assertEqual(expected_driver,
response.json['network_driver'])
self.assertEqual(bdict['image_id'],
response.json['image_id'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(uuidutils.is_uuid_like(response.json['uuid']))
def test_create_cluster_template_with_network_driver(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'flannel'}
config_dict = {} # Default config
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_no_network_driver(self):
cluster_template_dict = {}
config_dict = {}
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_network_driver_non_def_config(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'flannel'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'foo']}
expect_errors_flag = False
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
def test_create_cluster_template_with_invalid_network_driver(self):
cluster_template_dict = {'coe': 'kubernetes',
'network_driver': 'bad_driver'}
config_dict = {
'kubernetes_allowed_network_drivers': ['flannel', 'good_driver']}
expect_errors_flag = True
self._test_create_cluster_template_network_driver_attr(
cluster_template_dict,
config_dict,
expect_errors_flag)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(volume_driver='rexray')
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_volume_driver(self,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(bdict['volume_driver'],
response.json['volume_driver'])
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_success(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_policy.return_value = True
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=True)
response = self.post_json('/clustertemplates', bdict)
self.assertTrue(response.json['public'])
mock_policy.assert_called_with(mock.ANY,
"clustertemplate:publish",
None, do_raise=False)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_fail(self, mock_policy,
mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
# make policy enforcement fail
mock_policy.return_value = False
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=True)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_public_not_set(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(public=False)
response = self.post_json('/clustertemplates', bdict)
self.assertFalse(response.json['public'])
# policy enforcement is called only once for enforce_wsgi
self.assertEqual(1, mock_policy.call_count)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertFalse(cc_mock.call_args[0][0]['public'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_hidden_success(self, mock_policy,
mock_image_data):
with mock.patch.object(
self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template) as cc_mock:
mock_policy.return_value = True
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(hidden=True)
response = self.post_json('/clustertemplates', bdict)
self.assertTrue(response.json['hidden'])
mock_policy.assert_called_with(mock.ANY,
"clustertemplate:publish",
None, do_raise=False)
cc_mock.assert_called_once_with(mock.ANY)
self.assertNotIn('id', cc_mock.call_args[0][0])
self.assertTrue(cc_mock.call_args[0][0]['hidden'])
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch.object(magnum_policy, 'enforce')
def test_create_cluster_template_hidden_fail(self, mock_policy,
mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
# make policy enforcement fail
mock_policy.return_value = False
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data(hidden=True)
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_os_distro_image(self,
mock_image_data):
mock_image_data.side_effect = exception.OSDistroFieldNotFound('img')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_os_distro_image(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_image_name(self,
mock_image_data):
mock_image = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
mock_image_data.return_value = mock_image
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.ResourceNotFound('test-img')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_multi_image_name(self,
mock_image_data):
mock_image_data.side_effect = exception.Conflict('Multiple images')
bdict = apiutils.cluster_template_post_data()
del bdict['uuid']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(409, response.status_int)
def test_create_cluster_template_without_image_id(self):
bdict = apiutils.cluster_template_post_data()
del bdict['image_id']
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_without_keypair_id(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
del bdict['keypair_id']
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_dns(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['dns_nameserver'],
response.json['dns_nameserver'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_keypair(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.KeyPairNotFound("Test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_flavor(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['flavor_id'],
response.json['flavor_id'])
self.assertEqual(bdict['master_flavor_id'],
response.json['master_flavor_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_no_exist_flavor(self,
mock_image_data):
self.mock_valid_os_res.side_effect = exception.FlavorNotFound("flavor")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_with_external_network(self,
mock_image_data):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
self.assertEqual(bdict['external_network_id'],
response.json['external_network_id'])
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_no_exist_external_network(
self, mock_image_data):
self.mock_valid_os_res.side_effect = exception.ExternalNetworkNotFound(
"test")
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
response = self.post_json('/clustertemplates', bdict,
expect_errors=True)
self.assertEqual(400, response.status_int)
@mock.patch('magnum.api.attr_validator.validate_image')
def test_create_cluster_template_without_name(self, mock_image_data):
with mock.patch.object(self.dbapi, 'create_cluster_template',
wraps=self.dbapi.create_cluster_template):
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
bdict = apiutils.cluster_template_post_data()
bdict.pop('name')
resp = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, resp.status_int)
self.assertIsNotNone(resp.json['name'])
def test_create_cluster_with_disabled_driver(self):
cfg.CONF.set_override('disabled_drivers',
['mesos_ubuntu_v1'],
group='drivers')
bdict = apiutils.cluster_template_post_data(coe="mesos")
self.assertRaises(AppError, self.post_json, '/clustertemplates',
bdict)
@mock.patch('magnum.api.attr_validator.validate_image')
@mock.patch('oslo_utils.timeutils.utcnow')
def test_create_cluster_template_with_multi_dns(self, mock_utcnow,
mock_image_data):
bdict = apiutils.cluster_template_post_data(
dns_nameserver="8.8.8.8,114.114.114.114")
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
mock_image_data.return_value = {'name': 'mock_name',
'os_distro': 'fedora-atomic'}
response = self.post_json('/clustertemplates', bdict)
self.assertEqual(201, response.status_int)
# Check location header
self.assertIsNotNone(response.location)
expected_location = '/v1/clustertemplates/%s' % bdict['uuid']
self.assertEqual(expected_location,
urlparse.urlparse(response.location).path)
self.assertEqual(bdict['uuid'], response.json['uuid'])
self.assertNotIn('updated_at', response.json.keys)
return_created_at = timeutils.parse_isotime(
response.json['created_at']).replace(tzinfo=None)
self.assertEqual(test_time, return_created_at)
class TestDelete(api_base.FunctionalTest):
def test_delete_cluster_template(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self.delete('/clustertemplates/%s' % cluster_template.uuid)
response = self.get_json('/clustertemplates/%s' %
cluster_template.uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_template_with_cluster(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
obj_utils.create_test_cluster(
self.context, cluster_template_id=cluster_template.uuid)
response = self.delete('/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
self.assertEqual(400, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
self.assertIn(cluster_template.uuid,
response.json['errors'][0]['detail'])
def test_delete_cluster_template_not_found(self):
uuid = uuidutils.generate_uuid()
response = self.delete('/clustertemplates/%s' % uuid,
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_cluster_template_with_name(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
response = self.delete('/clustertemplates/%s' %
cluster_template['name'],
expect_errors=True)
self.assertEqual(204, response.status_int)
def test_delete_cluster_template_with_name_not_found(self):
response = self.delete('/clustertemplates/not_found',
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
def test_delete_multiple_cluster_template_by_name(self):
obj_utils.create_test_cluster_template(self.context,
name='test_cluster_template',
uuid=uuidutils.generate_uuid())
obj_utils.create_test_cluster_template(self.context,
name='test_cluster_template',
uuid=uuidutils.generate_uuid())
response = self.delete('/clustertemplates/test_cluster_template',
expect_errors=True)
self.assertEqual(409, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(response.json['errors'])
@mock.patch("magnum.common.policy.enforce")
@mock.patch("magnum.common.context.make_context")
def test_delete_cluster_template_as_admin(self, mock_context, mock_policy):
temp_uuid = uuidutils.generate_uuid()
obj_utils.create_test_cluster_template(self.context, uuid=temp_uuid,
project_id=temp_uuid)
self.context.is_admin = True
response = self.delete('/clustertemplates/%s' % temp_uuid,
expect_errors=True)
self.assertEqual(204, response.status_int)
class TestClusterTemplatePolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({rule: "project:non_fake"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"cluster_template:get_all", self.get_json, '/clustertemplates',
expect_errors=True)
def test_policy_disallow_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"cluster_template:get", self.get_json,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"cluster_template:detail", self.get_json,
'/clustertemplates/%s/detail' % uuidutils.generate_uuid(),
expect_errors=True)
def test_policy_disallow_update(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
name='example_A',
uuid=uuidutils.generate_uuid())
self._common_policy_check(
"cluster_template:update", self.patch_json,
'/clustertemplates/%s' % cluster_template.name,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_disallow_create(self):
bdict = apiutils.cluster_template_post_data(
name='cluster_model_example_A')
self._common_policy_check(
"cluster_template:create", self.post_json, '/clustertemplates',
bdict, expect_errors=True)
def test_policy_disallow_delete(self):
cluster_template = obj_utils.create_test_cluster_template(self.context)
self._common_policy_check(
"cluster_template:delete", self.delete,
'/clustertemplates/%s' % cluster_template.uuid, expect_errors=True)
def _owner_check(self, rule, func, *args, **kwargs):
self.policy.set_rules({rule: "user_id:%(user_id)s"})
response = func(*args, **kwargs)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
response.json['errors'][0]['detail'])
def test_policy_only_owner_get_one(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check("cluster_template:get", self.get_json,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
def test_policy_only_owner_update(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check(
"cluster_template:update", self.patch_json,
'/clustertemplates/%s' % cluster_template.uuid,
[{'path': '/name', 'value': "new_name", 'op': 'replace'}],
expect_errors=True)
def test_policy_only_owner_delete(self):
cluster_template = obj_utils.create_test_cluster_template(
self.context,
user_id='another')
self._owner_check(
"cluster_template:delete", self.delete,
'/clustertemplates/%s' % cluster_template.uuid,
expect_errors=True)
|
# coding:utf-8
"""Doctest example"""
import doctest
import unittest
def sum_fnc(a, b):
"""
Returns a + b
>>> sum_fnc(10, 20)
30
>>> sum_fnc(-10, -20)
-30
>>> sum_fnc(10, -20)
-10
"""
return a + b
class TestSumFnc(unittest.TestCase):
def test_sum_with_positive_numbers(self):
result = sum_fnc(10, 20)
self.assertEqual(result, 30)
def test_sum_with_negative_numbers(self):
result = sum_fnc(-10, -20)
self.assertEqual(result, -30)
def test_sum_with_mixed_signal_numbers(self):
result = sum_fnc(10, -20)
self.assertEqual(result, -10)
if __name__ == '__main__':
doctest.testmod(verbose=1)
unittest.main()
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import os
import time
import warnings
import torchvision.transforms as transforms
import torchvision.models as models
import copy
# content loss
class ContentLoss(nn.Module):
def __init__(self, target,):
super(ContentLoss, self).__init__()
self.target = target.detach()
def forward(self, input):
self.loss = F.mse_loss(input, self.target)
return input
# style loss
class StyleLoss(nn.Module):
def __init__(self, target_feature):
super(StyleLoss, self).__init__()
self.target = self.gram_matrix(target_feature).detach()
# gram matrix
def gram_matrix(self, input):
a, b, c, d = input.size()
features = input.view(a * b, c * d)
G = torch.mm(features, features.t())
return G.div(a * b * c * d)
def forward(self, input):
G = self.gram_matrix(input)
self.loss = F.mse_loss(G, self.target)
return input
# for normalizing the input image
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = mean.clone().detach().view(-1, 1, 1)
self.std = std.clone().detach().view(-1, 1, 1)
def forward(self, img):
return (img - self.mean) / self.std
class NeuralStyle(object):
def __init__(self, content_layers=['conv_4'], style_layers= ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'],
num_steps= 300, style_weight= 1000000, content_weight=1, use_gpu=False,
retain_dims = True, downsample=True, save=None):
"""
The Neural Style Transfer module. The class constructor is used to define some
major parameters. The actual inference can be run with `run_style_transfer()`.
Parameters:
- content_layers (default: ['conv_4']): Layers to extract content features from
- style_layers (default: ['conv_1', 'conv_2',
'conv_3', 'conv_4', 'conv_5']]): Layers to extracct style features from
- num_steps (default: 300): Number of steps to run style transfer for
- style_weight (default: 1000000): Weight given to style extracted from style
image
- content_weight (default: 1): Weight given to cotnent extracted from content image
- use_gpu (default: False): Run inference on gpu if available
- retain_dims (default: True): Upsample output image to original dims since style transfer
process downsamples to either 512/128. Warning: Upsampled image quality
may not be great
- save (default: None): Path to save output image in. Should be of type path/to/*.jpg.
Doesnt save if None
- downsample (default: True): If false, does not downsample to 512/128 i.e. style img is resized
to size of content img and style transfer is run on original content img
dimensions. VERY COMPUTATIONALLY intensive. Please ensure gpu is enabled.
"""
if not isinstance(content_layers, list) or len(content_layers) < 1:
raise ValueError("content_layers should be a list of len >= 1")
if not isinstance(style_layers, list) or len(style_layers) < 1:
raise ValueError("style_layers should be a list of len >= 1")
if use_gpu and not torch.cuda.is_available():
raise ValueError("use_gpu is True but cuda not available")
if save is True or save is False:
raise ValueError("save cannot be bool. Needs to be a path or None")
if downsample == False:
print("downsample = False does not set image size at 512 or 128. Performs style transfer on original content image size. Very computationally expensive. Please ensure gpu is enabled")
self.content_layers = content_layers
self.style_layers = style_layers
self.num_steps = num_steps
self.style_weight = style_weight
self.content_weight = content_weight
self.device = torch.device("cuda") if use_gpu else torch.device(("cpu"))
self.imsize = 512 if (torch.cuda.is_available()) else 128
self.retain_dims = retain_dims
self.save = save
self.downsample = downsample
self.cnn = models.vgg19(pretrained=True).features.to(self.device).eval()
self.cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(self.device)
self.cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(self.device)
# to convert a tensor to an image and then display it and save it in the desired path
def imshow(self, tensor, title=None):
unloader = transforms.ToPILImage()
plt.ion()
image = tensor.cpu().clone()
image = image.squeeze(0)
image = unloader(image)
plt.imshow(image)
if title is not None:
plt.title(title)
plt.pause(0.001)
#provide the path for saving the output image
image.save("output.png")
# to resize images so that the aspect ratio of the content image remains the same. Then convert the images to tensors and
# create a white noise input image, and return the three tensors
def image_loader(self, cnt, sty):
if isinstance(cnt, str):
if os.path.exists(cnt):
img_name = os.path.basename(cnt)
cntimg = Image.open(cnt)
else:
raise FileNotFoundError("2",img)
elif isinstance(cnt, np.ndarray):
cntimg = Image.fromarray(cv2.cvtColor(cnt, cv2.COLOR_BGR2RGB))
elif isinstance(img, Image.Image):
pass
if isinstance(sty, str):
if os.path.exists(sty):
img_name = os.path.basename(sty)
styimg = Image.open(sty)
else:
raise FileNotFoundError("2",sty)
elif isinstance(sty, np.ndarray):
styimg = Image.fromarray(cv2.cvtColor(sty, cv2.COLOR_BGR2RGB))
elif isinstance(sty, Image.Image):
pass
w,h=cntimg.size
ratio=w//h
if self.downsample:
l=max(w,h)
s=min(w,h)
if l>self.imsize:
s=int(s/l*self.imsize)
l=self.imsize
if ratio==(s//l):
styimg=styimg.resize((s,l))
cntimg=cntimg.resize((s,l))
elif ratio==(l//s):
styimg=styimg.resize((l,s))
cntimg=cntimg.resize((l,s))
else:
styimg = styimg.resize((w, h))
loader = transforms.Compose([transforms.ToTensor()])
content_image = loader(cntimg).unsqueeze(0)
style_image = loader(styimg).unsqueeze(0)
input_img = torch.randn(content_image.data.size(), device=self.device)
content_image = content_image.to(self.device, torch.float)
style_image = style_image.to(self.device, torch.float)
input_img = input_img.to(self.device, torch.float)
input_dims = (w, h)
return content_image, style_image, input_img, input_dims
# to get the model and two lists of the style and content losses
def get_style_model_and_losses(self, cnn, normalization_mean, normalization_std,
style_img, content_img):
content_layers=self.content_layers
style_layers=self.style_layers
cnn = copy.deepcopy(self.cnn)
normalization = Normalization(self.cnn_normalization_mean, self.cnn_normalization_std).to(self.device)
content_losses = []
style_losses = []
model = nn.Sequential(normalization)
i = 0 # increment every time we see a conv
for layer in self.cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_losses.append(style_loss)
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_losses, content_losses
def run_style_transfer(self, style_img, content_img):
num_steps = self.num_steps
style_weight = self.style_weight
content_weight = self.content_weight
content_img, style_img, input_img, orig_dims = self.image_loader(content_img, style_img)
#Run the style transfer.
print('Building the style transfer model..')
model, style_losses, content_losses = self.get_style_model_and_losses(self.cnn,
self.cnn_normalization_mean, self.cnn_normalization_std, style_img, content_img)
# setting the optimizer according to the paper
optimizer = optim.LBFGS([input_img.requires_grad_()])
run = [0]
start_time = time.time()
while run[0] <= num_steps:
print("Step #{}".format(run[0]))
def closure():
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += (1/5)* sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
run[0] += 1
if run[0] % 50 == 0:
print("Step #{}:".format(run[0]))
print('Style Loss: {:4f} Content Loss: {:4f}'.format(
style_score.item(), content_score.item()))
return style_score + content_score
optimizer.step(closure)
time_taken = time.time() - start_time
input_img.data.clamp_(0, 1)
unloader = transforms.ToPILImage()
image = input_img.cpu().clone()
image = image.squeeze(0)
image = unloader(image)
if self.retain_dims:
print("WARNING: retaining original dims can distort picture quality")
image = image.resize(orig_dims, Image.BILINEAR)
if self.save is not None:
image.save(self.save)
return image, time_taken
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# folddup.py
import filecmp
import group
import itertools
import os
def folddup(f1,f2):
try:
dcmp = filecmp.dircmp(f1,f2)
if any([len(dcmp.left_only),len(dcmp.right_only),len(dcmp.funny_files)]):
return False
(_, mismatch, errors) = filecmp.cmpfiles(f1,f2,dcmp.common_files,shallow=False)
if any([len(mismatch),len(errors)]):
return False
for cf in dcmp.common_dirs:
if not folddup(os.path.join(f1,cf),os.path.join(f2,cf)):
return False
return True
except FileNotFoundError:
return False
except PermissionError:
return False
print('''
Subfolders to find and compare:
(1) All of them
(2) Only those at the first level
(3) Only those at the lowest level
(Files with identical content but different filenames will be treated as different.)
''')
choice = int(input("Select: "))
if choice == 1:
subf = [dirpath for dirpath, _, _ in os.walk('.')]
elif choice == 2:
subf = [entry for entry in os.listdir('.') if os.path.isdir(os.path.join('.',entry))]
elif choice == 3:
subf = [dirpath for dirpath, dirnames, _ in os.walk('.') if not dirnames]
else:
exit(1)
# generate unordered folder pairs and put duplicate pairs in a list
results = [(f1,f2) for f1,f2 in itertools.combinations(subf,2) if folddup(f1,f2)]
group.print_complete(results) |
def LoadFromText(sList):
print("加载已有数据……")
try:
txtFile=open("/Users/chenchaoyang/Desktop/python/Python/File/students.txt","r")
while True:
line=txtFile.readline()
if not line or line.strip()==" ":
break
else:
s=line.split(" ")
stu=Student()
stu.ID=s[0]
stu.name=s[1]
stu.score1=float(s[2])
stu.score2=float(s[3])
stu.score3=float(s[4])
stu.total=float(s[5])
sList.append(stu)
except:
txtFile=open("/Users/chenchaoyang/Desktop/python/Python/File/students.txt","w")
txtFile.close()
print("加载成功!")
def AddToText(stu):
txtFile=open("/Users/chenchaoyang/Desktop/python/Python/File/students.txt","a")
txtFile.write(stu.ID)
txtFile.write(" ")
txtFile.write(stu.name)
txtFile.write(" ")
txtFile.write(str(stu.score1))
txtFile.write(" ")
txtFile.write(str(stu.score2))
txtFile.write(" ")
txtFile.write(str(stu.score3))
txtFile.write(" ")
txtFile.write(str(stu.total))
txtFile.write("\n")
print(stu)
txtFile.close()
def WriteToText(sList):
txtFile=open("/Users/chenchaoyang/Desktop/python/Python/File/students.txt","w")
for stu in sList:
txtFile.write(stu.ID)
txtFile.write("")
txtFile.write(stu.name)
txtFile.write("")
txtFile.write(str(stu.score1))
txtFile.write("")
txtFile.write(str(stu.score2))
txtFile.write("")
txtFile.write(str(stu.score3))
txtFile.write("")
txtFile.write(str(stu.total))
txtFile.write("\n")
txtFile.close()
class Student:
def __init__(self):
self.name=""
self.ID=""
self.score1=0
self.score2=0
self.score3=0
self.total=0
def getSum(self):
self.total=self.score1+self.score2+self.score3
def printStudent(self):
print(self.ID,"\t",self.name,"\t",self.score1,"\t",self.score2,"\t",self.score3,"\t",self.total)
def hasRecord(sList,sID):
result=-1
i=0
for temp in sList:
if temp.ID==sID:
result=i
break
else:
i=i+1
return result
def getScore(subject,action):
try:
score=float(input("请输入"+subject+"成绩:"))
except:
print("输入的不是数字,"+subject+"失败!")
return -1
if score <= 100 and score >= 0:
return score
else:
print("输入的"+subject+"成绩有错误,"+action+"失败!")
return -1
def showInfo():
print("-"*30)
print("学生成绩系统")
print("1.添加学生的信息")
print("2.删除学生的信息")
print("3.修改学生的信息")
print("4.查询学生的信息")
print("5.列出所有学生的信息")
print("6.退出系统")
def updateStudent(sList,student):
while True:
try:
alterNum=int(input("1.修改姓名\n2.修改学号\n3.修改语文成绩\n4.修改数学成绩\n5.修改英语成绩\n6.退出修改\n"))
except:
print("输入有误,请输入编号1到6")
continue
if alterNum==1:
newName=input("请输入更改好的姓名:")
student.name=newName
print("姓名修改成功")
continue
elif alterNum==2:
newld=input("请输入更改后的学号:")
newlndex=hasRecord(student,newld)
if newlndex >-1:
print("输入学号不可重复,修改失败!")
else:
student.ID=newld
print("学号修改成功")
continue
elif alterNum==3:
score1=getScore("语文","修改")
if score1>-1:
student.score1=score1
student.getSum()
print("语文成绩修改成功!")
continue
elif alterNum==4:
score2=getScore("数学","修改")
if score2>-1:
student.score2=score2
student.getSum()
print("数学成绩修改成功!")
continue
elif alterNum==5:
score3=getScore("英语","修改")
if score3>-1:
student.score3=score3
student.getSum()
print("英语成绩修改成功!")
continue
elif alterNum==6:
break
else:
print("输入错误请重新输入!")
print(sList)
WriteToText(sList)
studentList=[]
LoadFromText(studentList)
while True:
showInfo()
try:
key=int(input("请选择功能(输入序号1到6):"))
except:
print("您的输入有误,请输入序号1到6")
continue
if key==1:
print("您选择了添加学生信息功能")
name=input("请输入姓名:")
stuId=input("请输入学号(不可重复):")
index=hasRecord(studentList,stuId)
if index >- 1:
print("输入学号重复,添加失败!")
continue
else:
newStudent = Student()
newStudent.name = name
newStudent.ID = stuId
score1=getScore("语文","添加")
if score1 >- 1:
newStudent.score1 = score1
else:
continue
score2=getScore("数学","添加")
if score2 >- 1:
newStudent.score2 = score2
else:
continue
score3=getScore("英语","添加")
if score3 >- 1:
newStudent.score3 = score3
else:
continue
newStudent.getSum()
studentList.append(newStudent)
AddToText(newStudent)
print(newStudent.name + "的成绩录入成功!")
elif key == 2:
print("您选择了删除学生信息功能")
stuId=input("请输入要删除的学号:")
index=hasRecord(studentList,stuId)
if index >- 1:
del studentList[index]
WriteToText(studentList)
print("删除成功!")
else:
print("没有此学生学号,删除失败!")
elif key == 3:
print("您选择了修改学生信息功能")
stuId=input("请输入你要修改学生的学号:")
index=hasRecord(studentList,stuId)
if index == -1:
print("没有此学号,修改失败!")
else:
temp=studentList[index]
updateStudent(studentList,temp)
elif key == 4:
print("您选择了查询学生信息功能")
stuId=input("请输入你要查询学生的学号:")
index=hasRecord(studentList,stuId)
if index == -1:
print("没有此学生学号,查询失败!")
else:
temp=studentList[index]
print("学号\t姓名\t语文\t数学\t英语\t总分")
temp.printStudent()
elif key == 5:
print("接下来进行遍历所有的学生信息…")
print("学号\t姓名\t语文\t数学\t英语\t总分")
for temp in studentList:
temp.printStudent()
elif key==6:
quitConfirm=input("确认要退出本系统吗(Y或者N)?")
if quitConfirm.upper()=="Y":
print("欢迎使用本系统,谢谢!")
break
else:
print("您输入有误,请重新输入!") |
from .cas_number import CasNumber, InvalidCasNumber, NotSupported
import unittest
co2_set = {'000124-38-9', '000124389', '124-38-9', '124389'}
class CasNumberTest(unittest.TestCase):
def test_valid_co2(self):
g = CasNumber(124389)
self.assertSetEqual(set([k for k in g.terms]), co2_set)
self.assertEqual(str(g), '124-38-9')
def test_invalid(self):
with self.assertRaises(InvalidCasNumber):
CasNumber(1234)
with self.assertRaises(InvalidCasNumber):
CasNumber(55, 89, 47, 2)
with self.assertRaises(InvalidCasNumber):
CasNumber('37205.543.1')
def test_add_remove(self):
c = CasNumber(12345)
with self.assertRaises(NotSupported):
c.add_term('Floobie')
self.assertIn('12-34-5', c)
with self.assertRaises(NotSupported):
c.remove_term('12-34-5')
def test_equal(self):
g = CasNumber(124389)
h = CasNumber('124-38-9')
self.assertEqual(g, h)
def test_pad_input(self):
g = CasNumber('00124+38+9')
self.assertSetEqual(set([k for k in g.terms]), co2_set)
def test_tuple_input(self):
g = CasNumber(('32768', '4', 1))
self.assertEqual(str(g), '32768-04-1')
h = CasNumber((32768, 4, '1'))
self.assertEqual(h.object, g.object)
j = CasNumber('32768', 4.0, '1')
self.assertEqual(h.object, j.object)
if __name__ == '__main__':
unittest.main()
|
"""
June 17th, 2019. This python file combines past data with updated 2019 data points for further analysis.
"""
from fileLoading import loadExcel
from dateConv import noaaDateConv, decToDatetime
from scipy import stats
import pandas as pd
import datetime as dt
import calendar
import numpy as np
def methane():
# import original dataset and new datasets
methanePrev = loadExcel(r"C:\Users\ARL\Desktop\Jashan\Summit\analyses\Data\Methane.xlsx")
methane2018 = loadExcel(r'C:\Users\ARL\Desktop\SUM_CH4_insitu_2018.xlsx')
methane2019 = loadExcel(r'C:\Users\ARL\Desktop\Summit_GC_2019\CH4_results\SUM_CH4_insitu_2019.xlsx')
# identify column names we want to keep
goodcol = ['Decimal Year', 'Run median'] # good columns
badcol = [x for x in methane2018.columns if x not in goodcol] # bad columns
newnames = ['DecYear', 'MR']
for sheet in [methane2018, methane2019]:
sheet.drop(badcol, axis=1, inplace=True) # drop bad columns
sheet.dropna(how='any', axis=0, inplace=True) # drop NaN rows
sheet.columns = newnames # assign same col names
methanePrev = methanePrev[methanePrev['DecYear'] < 2018] # remove some pre 2018 vals
comb = [methanePrev, methane2018, methane2019] # create combination frame
methaneFinal = pd.concat(comb) # concat
# trim extreme outliers
values = methaneFinal['MR'].values
z = np.abs(stats.zscore(values))
thresh = 5
methaneFinal = methaneFinal[~(z > thresh)]
dates = decToDatetime(methaneFinal['DecYear'].values) # conv to datetime
methaneFinal['datetime'] = dates # add to dataframe
noaaMethane = pd.DataFrame(columns=['datetime', 'MR'])
noaaMethane['datetime'], noaaMethane['MR'] = dates, methaneFinal['MR'].values # noaa version
noaaMethane = noaaDateConv(noaaMethane)
noaaMethane.to_csv('methane2019updated.txt', header=None, index=None, sep=' ', mode='w+')
return methaneFinal
if __name__ == '__main__':
methane()
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Alcatel.AOS.get_metrics
# ----------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
from noc.sa.profiles.Generic.get_metrics import OIDRule
from noc.core.mib import mib
class SlotRule(OIDRule):
name = "slot"
def iter_oids(self, script, metric):
health_module_slot = [0]
i = 1
r = {}
if script.has_capability("Stack | Members"):
health_module_slot = list(range(1, script.capabilities["Stack | Members"] + 1))
for ms in health_module_slot:
r[str(i)] = "%d" % ms
# r[str(i)] = {"healthModuleSlot": ms}
i += 1
for i in r:
if self.is_complex:
gen = [mib[self.expand(o, {"hwSlotIndex": r[i]})] for o in self.oid]
path = ["0", "0", i, ""] if "CPU" in metric.metric else ["0", i, "0"]
if gen:
yield tuple(gen), self.type, self.scale, path
else:
oid = mib[self.expand(self.oid, {"hwSlotIndex": r[i]})]
path = ["0", "0", i, ""] if "CPU" in metric.metric else ["0", i, "0"]
if oid:
yield oid, self.type, self.scale, path
|
from test_helper import run_common_tests, failed, passed, get_answer_placeholders
def test_answer_placeholders():
placeholders = get_answer_placeholders()
placeholder = placeholders[0].replace('"', "'")
if placeholder == "logger = logging.getLogger('mortgage.rate')":
passed()
else:
failed('Sorry, that is not correct. Check the Hint for more help.')
if __name__ == '__main__':
run_common_tests()
test_answer_placeholders()
|
#
# TinyDB Model: Connections
#
from redmonty.models.tinydb.tinymodel import TinyModel
class Connections(TinyModel):
#
# Use the cerberus schema style
# which offer you immediate validation with cerberus
# http://docs.python-cerberus.org/en/stable/validation-rules.html
# types: http://docs.python-cerberus.org/en/stable/validation-rules.html#type
#
schema = {
"type" : { "type" : "string", "allowed" : ["redis", "tinydb"], "default" : "redis"},
'dbname' : { 'type' : 'string' },
'host' : { 'type' : 'string', "default" : "localhost" },
'port' : { 'type' : 'integer', "default" : 6379 },
'user' : { 'type' : 'string'},
"passwd" : { 'type' : 'string', "default" : "" },
"strict" : { "type" : "boolean", "default" : True}
}
# define class attributes/variables here that should be included in to_dict()
# conversion and also handed to the encoders but that are NOT part of the schema.
include_attributes=[]
#
# init
#
def __init__(self, **kwargs):
self.init_on_load(**kwargs)
#
# your model's methods down here
#
|
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
#Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
# Calcule e mostre o total do seu salário no referido mês, sabendo-se que são
# descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:
# • salário bruto.
# • quanto pagou ao INSS.
# • quanto pagou ao sindicato.
# • o salário líquido.
# • calcule os descontos e o salário líquido, conforme a tabela abaixo:
# • +SalárioBruto:R$ • -IR(11%):R$
# • - INSS (8%) :R$
# • -Sindicato(5%):R$
por_hora = float(raw_input("Informe quanto você ganha por hora: "))
horas = float(raw_input("Informe quantas horas você trabalha por mês: "))
bruto = por_hora*horas
inss = bruto*0.08
sind = bruto*0.05
liquido = bruto-inss-sind-bruto*0.11
print "Salário bruto: {}".format(bruto)
print "INSS: {}".format(inss)
print "Sidicato: {}".format(sind)
print "Salário Líquido: {}".format(liquido) |
from hms_workflow_platform.core.queries.base.base_query import *
class PhysicalExamQuery(BaseQuery):
def __init__(self, site):
super().__init__()
self.adapter = self.get_adapter(site)
self.query = self.adapter.query
def physicalexam_create(self, date_obj):
date = date_obj.strftime('%Y-%m-%d')
query = (
"SELECT format_vn(vn) en, (vital_sign_extend.modify_date || 'T' || vital_sign_extend.modify_time) mdate from visit v "
"left join vital_sign_extend on vital_sign_extend.visit_id = v.visit_id AND fix_visit_type_id <> '1' "
f"WHERE vital_sign_extend.examine_date >= '{date}' "
"union "
"SELECT format_an(an) en, (vital_sign_extend.modify_date || 'T' || vital_sign_extend.modify_time) mdate from visit v "
"left join vital_sign_extend on vital_sign_extend.visit_id = v.visit_id AND fix_visit_type_id = '1' "
f"WHERE vital_sign_extend.examine_date >= '{date}' "
"order by mdate")
result = self.query(query)
return result if result else None
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.command_modules.lab.validators import (validate_lab_vm_create,
validate_lab_vm_list,
validate_user_name,
validate_template_id,
validate_claim_vm,
_validate_artifacts)
from azure.cli.core.commands.parameters import resource_group_name_type
from azure.cli.core.sdk.util import ParametersContext
from azure.cli.core.util import get_json_object
with ParametersContext(command='lab') as c:
c.argument('resource_group', arg_type=resource_group_name_type,
help='Name of lab\'s resource group. You can configure the default group '
'using \'az configure --defaults group=<name>\'')
with ParametersContext(command='lab custom-image create') as c:
c.register_alias('resource_group', ('--resource-group', '-g'))
c.register_alias('name', ('--name', '-n'))
with ParametersContext(command='lab vm create') as c:
c.register_alias('resource_group', ('--resource-group', '-g'), validator=validate_lab_vm_create)
c.register_alias('name', ('--name', '-n'))
# Authentication related arguments
authentication_group_name = 'Authentication'
c.argument('admin_username', arg_group=authentication_group_name)
c.argument('admin_password', arg_group=authentication_group_name)
c.argument('authentication_type', arg_group=authentication_group_name)
c.argument('ssh_key', arg_group=authentication_group_name)
c.argument('generate_ssh_keys', action='store_true', arg_group=authentication_group_name)
c.argument('saved_secret', arg_group=authentication_group_name)
# Add Artifacts from json object
c.argument('artifacts', type=get_json_object)
# Image related arguments
c.ignore('os_type')
c.ignore('gallery_image_reference')
c.ignore('custom_image_id')
c.argument('image')
# Network related arguments
network_group_name = 'Network'
c.argument('ip_configuration', arg_group=network_group_name)
c.argument('subnet', arg_group=network_group_name)
c.argument('vnet_name', arg_group=network_group_name)
c.ignore('lab_subnet_name')
c.ignore('lab_virtual_network_id')
c.ignore('disallow_public_ip_address')
c.ignore('network_interface')
# Creating VM in the different location then lab is an officially unsupported scenario
c.ignore('location')
c.argument('expiration_date')
c.argument('formula')
c.argument('allow_claim', action='store_true')
with ParametersContext(command='lab vm list') as c:
filter_arg_group_name = 'Filter'
c.argument('filters', arg_group=filter_arg_group_name)
c.argument('all', action='store_true', arg_group=filter_arg_group_name)
c.argument('claimable', action='store_true', arg_group=filter_arg_group_name)
c.argument('environment', arg_group=filter_arg_group_name)
c.register_alias('resource_group', ('--resource-group', '-g'), validator=validate_lab_vm_list)
with ParametersContext(command='lab vm claim') as c:
c.register_alias('resource_group', ('--resource-group', '-g'), validator=validate_claim_vm)
c.register_alias('name', ('--name', '-n'), id_part='child_name_1')
c.argument('lab_name', id_part='name')
with ParametersContext(command='lab vm apply-artifacts') as c:
c.register('artifacts', ('--artifacts',), type=get_json_object, validator=_validate_artifacts)
c.register_alias('name', ('--name', '-n'))
with ParametersContext(command='lab formula') as c:
c.register_alias('name', ('--name', '-n'))
with ParametersContext(command='lab secret') as c:
from azure.mgmt.devtestlabs.models.secret import Secret
c.register_alias('name', ('--name', '-n'))
c.register_alias('secret', ('--value', ), type=lambda x: Secret(value=x))
c.ignore('user_name')
c.argument('lab_name', validator=validate_user_name)
with ParametersContext(command='lab formula export-artifacts') as c:
# Exporting artifacts does not need expand filter
c.ignore('expand')
with ParametersContext(command='lab environment') as c:
c.register_alias('name', ('--name', '-n'))
c.ignore('user_name')
c.argument('lab_name', validator=validate_user_name)
with ParametersContext(command='lab environment create') as c:
c.argument('arm_template', validator=validate_template_id)
c.argument('parameters', type=get_json_object)
with ParametersContext(command='lab arm-template') as c:
c.register_alias('name', ('--name', '-n'))
with ParametersContext(command='lab arm-template show') as c:
c.argument('export_parameters', action='store_true')
|
"""Module to test Serializer functionality"""
import marshmallow as ma
import pytest
from protean.core.exceptions import ConfigurationError
from protean_flask.core.serializers import EntitySerializer
from ..support.sample_app.entities import Dog
from ..support.sample_app.entities import Human
from ..support.sample_app.entities import RelatedDog
from ..support.sample_app.serializers import HumanDetailSerializer
from ..support.sample_app.serializers import RelatedDogSerializer
class DogSerializer(EntitySerializer):
""" Serializer for the Dog Entity """
class Meta:
entity = Dog
class TestEntitySerializer:
"""Tests for EntitySerializer class"""
def test_init(self):
"""Test initialization of EntitySerializer derived class"""
s = DogSerializer()
assert s is not None
# Check that the entity gets serialized correctly
s_result = s.dump(Dog(id=1, name='Johnny', owner='John'))
expected_data = {'age': 5, 'id': 1, 'name': 'Johnny', 'owner': 'John'}
assert s_result.data == expected_data
def test_abstraction(self):
"""Test that EntitySerializer class itself cannot be initialized"""
with pytest.raises(ConfigurationError):
EntitySerializer()
def test_include_fields(self):
""" Test the include fields option of the serializer"""
class DogSerializer2(EntitySerializer):
""" Serializer for the Dog Entity """
class Meta:
entity = Dog
fields = ('id', 'age')
s = DogSerializer2()
assert s is not None
# Check that the entity gets serialized correctly
s_result = s.dump(Dog(id=1, name='Johnny', owner='John'))
expected_data = {'age': 5, 'id': 1}
assert s_result.data == expected_data
def test_exclude_fields(self):
""" Test the exclude fields option of the serializer"""
class DogSerializer2(EntitySerializer):
""" Serializer for the Dog Entity """
class Meta:
entity = Dog
exclude = ('id', 'age')
s = DogSerializer2()
assert s is not None
# Check that the entity gets serialized correctly
s_result = s.dump(Dog(id=1, name='Johnny', owner='John'))
expected_data = {'name': 'Johnny', 'owner': 'John'}
assert s_result.data == expected_data
def test_method_fields(self):
""" Test the method field type of the serializer"""
class DogSerializer2(EntitySerializer):
""" Serializer for the Dog Entity """
old = ma.fields.Method('get_old')
def get_old(self, obj):
""" Check if the dog is old or young """
if obj.age > 5:
return True
else:
return False
class Meta:
entity = Dog
s = DogSerializer2()
assert s is not None
# Check that the entity gets serialized correctly
s_result = s.dump(Dog(id=1, name='Johnny', owner='John'))
expected_data = {
'name': 'Johnny',
'owner': 'John',
'age': 5,
'id': 1,
'old': False
}
assert s_result.data == expected_data
class TestEntitySerializer2:
"""Tests for EntitySerializer class with related fields """
@classmethod
def setup_class(cls):
""" Setup the test case """
cls.human = Human.create(id=1, name='John')
def test_reference_field(self):
""" Test that the reference field gets serialized """
dog = RelatedDog.create(id=5, name='Johnny', owner=self.human)
# Check that the entity gets serialized correctly
s = RelatedDogSerializer()
s_result = s.dump(dog)
expected_data = {
'name': 'Johnny',
'owner': {'contact': None, 'id': 1, 'name': 'John'},
'age': 5,
'id': 5,
}
assert s_result.data == expected_data
def test_hasmany_association(self):
""" Test the has many association gets serialized """
RelatedDog.create(id=5, name='Johnny', owner=self.human)
s = HumanDetailSerializer()
s_result = s.dump(self.human)
expected_data = {
'name': 'John',
'dogs': [{'age': 5, 'id': 5, 'name': 'Johnny'}],
'id': 1,
'contact': None
}
assert s_result.data == expected_data
|
# IMPORTS
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# Chrome Driver Setup Functions
def get_chrome_driver(options):
return webdriver.Chrome(chrome_options=options)
def get_chromdriver_options():
return webdriver.ChromeOptions()
def set_ignore_certificate_error(options):
return options.add_argument('--ignore-certifictae-errors')
def set_incognito_mode(options):
return options.add_argument('--incognito')
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Definition of classes GenerationProbabilityV(D)J to compute Pgen of a CDR3 seq.
Copyright (C) 2018 Zachary Sethna
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
This module defines three classes. The first, and parent of the other two,
GenerationProbability has wrapper methods for formatting and calling a pgen
computation for a nucleotide, amino acid, or regular expression CDR3 sequence.
GenerationProbabilityVDJ and GenerationProbabilityVJ each have a method,
compute_CDR3_pgen, which implements the respective dynamic programming
algorithm for either a VDJ or VJ generative recombination model.
In order to instantiate GenerationProbabilityV(D)J, instances of
GenerativeModelV(D)J and GenomicDataV(D)J are needed.
GenerationProbabilityV(D)J will inherit the processed parameters of the
provided generative model and genomic data through the
PreprocessedParametersV(D)J classes.
Example
-------
>>> import olga.load_model as load_model
>>> import olga.generation_probability as pgen
>>>
>>> params_file_name = './models/human_T_beta/model_params.txt'
>>> marginals_file_name = './models/human_T_beta/model_marginals.txt'
>>> V_anchor_pos_file ='./models/human_T_beta/V_gene_CDR3_anchors.csv'
>>> J_anchor_pos_file = './models/human_T_beta/J_gene_CDR3_anchors.csv'
>>>
>>> genomic_data = load_model.GenomicDataVDJ()
>>> genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file)
>>>
>>> generative_model = load_model.GenerativeModelVDJ()
>>> generative_model.load_and_process_igor_model(marginals_file_name)
>>>
>>> pgen_model = pgen.GenerationProbabilityVDJ(generative_model, genomic_data)
>>>
>>> pgen_model.compute_regex_CDR3_template_pgen('CASSAX{0,5}SARPEQFF')
6.846877804096558e-10
>>>
>>> pgen_model.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF', 'TRBV30*01', 'TRBJ1-2*01')
1.203646865765782e-10
>>>
>>> pgen_model.compute_nt_CDR3_pgen('TGTGCCAGTAGTATAACAACCCAGGGCTTGTACGAGCAGTACTTC')
3.9945642868171824e-14
@author: zacharysethna
"""
import numpy as np
import re
from .utils import nt2codon_rep
from .preprocess_generative_model_and_data import PreprocessedParametersVDJ, PreprocessedParametersVJ
class GenerationProbability(object):
"""Class used to define Pgen functions and sequence formatting.
This class is used to define three types of functions that are used by
both the VDJ pgen algorithm and the VJ pgen algorithm.
The first type is functions that wrap around the core 'amino acid'
algorithms to allow for computing Pgen of regular expression, amino acid,
and nucleotide CDR3 sequences (etc).
The second type are functions that format some of the inputs (V/J mask,
lists seqs for regular expressions) of the first type.
The last group of functions are alignment/matching scripts that are used to
check how much of an 'amino acid' CDR3 is consistent with a given
nucleotide sequence. These methods are used in both core algorithms when
including the V and J contributions.
Attributes
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
d_V_usage_mask : list of int
Default V usage mask of indices of all productive V genes/alleles.
V_mask_mapping : dict
Dictionary mapping allowed keywords (strings corresponding to V gene
and V allele names) to the indices of the V alleles they refer to.
d_J_usage_mask : list of int
Default J usage mask of indices of all productive J genes/alleles.
J_mask_mapping : dict
Dictionary mapping allowed keywords (strings corresponding to J gene
and J allele names) to the indices of the J alleles they refer to.
Methods
----------
compute_regex_CDR3_template_pgen(regex_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True, raise_overload_warning = True)
Compute Pgen for all seqs consistent with regular expression regex_seq.
compute_aa_CDR3_pgen(CDR3_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True)
Compute Pgen for the amino acid sequence CDR3_seq
compute_hamming_dist_1_pgen(CDR3_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True)
Compute Pgen of all seqs hamming dist 1 (in amino acids) from CDR3_seq
compute_nt_CDR3_pgen(CDR3_ntseq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True)
Compute Pgen for the inframe nucleotide sequence CDR3_ntseq.
compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
Dummy function that is replaced in classes GenerationProbabilityV(D)J.
The methods that replace it implement the different algorithms for
computing Pgen on a VDJ CDR3 sequence or a VJ CDR3 sequence.
format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings = True)
Format raw usage masks into lists of indices.
list_seqs_from_regex(regex_seq, print_warnings = True, raise_overload_warning = True)
List sequences that match regular expression template.
max_nt_to_aa_alignment_left(CDR3_seq, ntseq)
Find maximum match between CDR3_seq and ntseq from the left.
max_nt_to_aa_alignment_right(CDR3_seq, ntseq)
Find maximum match between CDR3_seq and ntseq from the right.
"""
def __init__(self):
"""Initialize class GenerationProbability.
Only define dummy attributes for this class. The children classes
GenerationProbabilityVDJ and GenerationProbabilityVJ will initialize
the actual attributes.
"""
self.codons_dict = None
self.d_V_usage_mask = None
self.V_mask_mapping = None
self.d_J_usage_mask = None
self.J_mask_mapping = None
def compute_regex_CDR3_template_pgen(self, regex_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True, raise_overload_warning = True):
"""Compute Pgen for all seqs consistent with regular expression regex_seq.
Computes Pgen for a (limited vocabulary) regular expression of CDR3
amino acid sequences, conditioned on the V genes/alleles indicated in
V_usage_mask_in and the J genes/alleles in J_usage_mask_in. Please note
that this function will list out all the sequences that correspond to the
regular expression and then calculate the Pgen of each sequence in
succession. THIS CAN BE SLOW. Consider defining a custom alphabet to
represent any undetermined amino acids as this will greatly speed up the
computations. For example, if the symbol ^ is defined as [AGR] in a custom
alphabet, then instead of running
compute_regex_CDR3_template_pgen('CASS[AGR]SARPEQFF', ppp),
which will compute Pgen for 3 sequences, the single sequence
'CASS^SARPEQFF' can be considered. (Examples are TCRB sequences/model)
Parameters
----------
regex_seq : str
The regular expression string that represents the CDR3 sequences to be
listed then their Pgens computed and summed.
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_regex_CDR3_template_pgen('CASS[AGR]SARPEQFF')
8.1090898050318022e-10
>>> generation_probability.compute_regex_CDR3_template_pgen('CASSAX{0,5}SARPEQFF')
6.8468778040965569e-10
"""
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
CDR3_seqs = self.list_seqs_from_regex(regex_seq, print_warnings, raise_overload_warning)
pgen = 0
for CDR3_seq in CDR3_seqs:
if len(CDR3_seq) == 0:
continue
pgen += self.compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
return pgen
def compute_aa_CDR3_pgen(self, CDR3_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True):
"""Compute Pgen for the amino acid sequence CDR3_seq.
Conditioned on the V genes/alleles indicated in V_usage_mask_in and the
J genes/alleles in J_usage_mask_in. (Examples are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' -- the standard amino acids,
plus any custom symbols for an expanded codon alphabet (note the
standard ambiguous amino acids -- B, J, X, and Z -- are included by
default).
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF')
1.5756106696284584e-10
>>> generation_probability.compute_aa_CDR3_pgen('CAWSVAPDRGGYTF', 'TRBV30*01', 'TRBJ1-2*01')
1.203646865765782e-10
>>> generation_probability.compute_aa_CDR3_pgen('CAWXXXXXXXGYTF')
7.8102586432014974e-05
"""
if len(CDR3_seq) == 0:
return 0
for aa in CDR3_seq:
if aa not in list(self.codons_dict.keys()):
#Check to make sure all symbols are accounted for
if print_warnings:
print('Invalid amino acid CDR3 sequence --- unfamiliar symbol: ' + aa)
return 0
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
return self.compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
def compute_hamming_dist_1_pgen(self, CDR3_seq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True):
"""Compute Pgen of all seqs hamming dist 1 (in amino acids) from CDR3_seq.
Please note that this function will list out all the
sequences that are hamming distance 1 from the base sequence and then
calculate the Pgen of each sequence in succession. THIS CAN BE SLOW
as it computes Pgen for L+1 sequences where L = len(CDR3_seq). (Examples
are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of amino acids (ONLY the standard amino acids).
Pgens for all sequences of hamming distance 1 (in amino acid sequence)
are summed.
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float
The sum of generation probabilities (Pgens) of the sequences at most
hamming distance 1 (in amino acids) from CDR3_seq.
"""
#make sure that the symbol X is defined as the fully undetermined amino acid:
#X ~ ACDEFGHIKLMNPQRSTVWY
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
if len(CDR3_seq) == 0:
return 0
for aa in CDR3_seq:
if aa not in 'ACDEFGHIKLMNPQRSTVWY':
#Check to make sure all symbols are accounted for
if print_warnings:
print('Invalid amino acid CDR3 sequence --- unfamiliar symbol: ' + aa)
return 0
tot_pgen = 0
for i in range(len(CDR3_seq)):
tot_pgen += self.compute_CDR3_pgen(CDR3_seq[:i] + 'X' + CDR3_seq[i+1:], V_usage_mask, J_usage_mask)
tot_pgen += -(len(CDR3_seq) - 1)*self.compute_CDR3_pgen(CDR3_seq, V_usage_mask, J_usage_mask)
return tot_pgen
def compute_nt_CDR3_pgen(self, CDR3_ntseq, V_usage_mask_in = None, J_usage_mask_in = None, print_warnings = True):
"""Compute Pgen for the inframe nucleotide sequence CDR3_ntseq.
Conditioned on the V genes/alleles indicated in V_usage_mask_in and the
J genes/alleles in J_usage_mask_in. (Examples are TCRB sequences/model)
Parameters
----------
CDR3_ntseq : str
Inframe nucleotide sequence composed of ONLY A, C, G, or T (either
uppercase or lowercase).
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
pgen : float64
The generation probability (Pgen) of the sequence
Examples
--------
>>> generation_probability.compute_nt_CDR3_pgen('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC')
3.2674893012379071e-12
>>> generation_probability.compute_nt_CDR3_pgen('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC', 'TRBV30*01', 'TRBJ1-2*01')
2.3986503758867323e-12
"""
if not len(CDR3_ntseq)%3 == 0:
#Make sure sequence is inframe
if print_warnings:
print('Invalid nucleotide CDR3 sequence --- out of frame sequence')
return 0
elif len(CDR3_ntseq) == 0:
return 0
else:
for nt in CDR3_ntseq:
if nt not in 'ACGTacgt':
if print_warnings:
print('Invalid nucleotide CDR3 sequence --- unfamiliar nucleotide: ' + nt)
return 0
V_usage_mask, J_usage_mask = self.format_usage_masks(V_usage_mask_in, J_usage_mask_in, print_warnings)
return self.compute_CDR3_pgen(nt2codon_rep(CDR3_ntseq), V_usage_mask, J_usage_mask)
def compute_CDR3_pgen(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Dummy function that is replaced in classes GenerationProbabilityV(D)J."""
#Proxy for the actual function that will call either the VDJ algorithm
#or the VJ algorithm
pass
#Formatting methods for the top level Pgen computation calls
def format_usage_masks(self, V_usage_mask_in, J_usage_mask_in, print_warnings = True):
"""Format raw usage masks into lists of indices.
Usage masks allows the Pgen computation to be conditioned on the V and J
gene/allele identities. The inputted masks are lists of strings, or a
single string, of the names of the genes or alleles to be conditioned on.
The default mask includes all productive V or J genes.
Parameters
----------
V_usage_mask_in : str or list
An object to indicate which V alleles should be considered. The default
input is None which returns the list of all productive V alleles.
J_usage_mask_in : str or list
An object to indicate which J alleles should be considered. The default
input is None which returns the list of all productive J alleles.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
Returns
-------
V_usage_mask : list of integers
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list of integers
Indices of the J alleles to be considered in the Pgen computation
Examples
--------
>>> generation_probability.format_usage_masks('TRBV27*01','TRBJ1-1*01')
([34], [0])
>>> generation_probability.format_usage_masks('TRBV27*01', '')
([34], [0, 1, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13])
>>> generation_probability.format_usage_masks(['TRBV27*01', 'TRBV13*01'], 'TRBJ1-1*01')
([34, 18], [0])
"""
#Format the V usage mask
if V_usage_mask_in is None: #Default case, use all productive V genes with non-zero probability
#V_usage_mask = [v for v, V in enumerate(ppp['cutV_genomic_CDR3_segs']) if len(V) > 0]
V_usage_mask = self.d_V_usage_mask
elif isinstance(V_usage_mask_in, list):
e_V_usage_mask = set()
for v in V_usage_mask_in:
try:
e_V_usage_mask = e_V_usage_mask.union(self.V_mask_mapping[v])
except KeyError:
if print_warnings:
print('Unfamiliar V gene/allele: ' + v)
pass
if len(e_V_usage_mask) == 0:
if print_warnings:
print('No recognized V genes/alleles. Using default V_usage_mask')
V_usage_mask = self.d_V_usage_mask
else:
V_usage_mask = list(e_V_usage_mask)
else:
try:
V_usage_mask = self.V_mask_mapping[V_usage_mask_in]
except KeyError:
#Do raise error here as the mask will be empty
if print_warnings:
print('Unfamiliar V usage mask: ' + str(V_usage_mask_in) + ', please check the allowed V alleles. Using default V_usage_mask')
V_usage_mask = self.d_V_usage_mask
#Format the J usage mask
if J_usage_mask_in is None: #Default case, use all productive J genes with non-zero probability
#J_usage_mask = [j for j, J in enumerate(ppp['cutJ_genomic_CDR3_segs']) if len(J) > 0]
J_usage_mask = self.d_J_usage_mask
elif isinstance(J_usage_mask_in, list):
e_J_usage_mask = set()
for j in J_usage_mask_in:
try:
e_J_usage_mask = e_J_usage_mask.union(self.J_mask_mapping[j])
except KeyError:
if print_warnings:
print('Unfamiliar J gene/allele: ' + j)
pass
if len(e_J_usage_mask) == 0:
if print_warnings:
print('No recognized J genes/alleles. Using default J_usage_mask')
J_usage_mask = self.d_J_usage_mask
else:
J_usage_mask = list(e_J_usage_mask)
else:
try:
J_usage_mask = self.J_mask_mapping[J_usage_mask_in]
except KeyError:
#Do raise error here as the mask will be empty
if print_warnings:
print('Unfamiliar J usage mask: ' + str(J_usage_mask_in) + ', please check the allowed J alleles. Using default J_usage_mask')
J_usage_mask = self.d_J_usage_mask
return V_usage_mask, J_usage_mask
def list_seqs_from_regex(self, regex_seq, print_warnings = True, raise_overload_warning = True):
"""List sequences that match regular expression template.
This function parses a limited regular expression vocabulary, and
lists all the sequences consistent with the regular expression. Supported
regex syntax: [] and {}. Cannot have two {} in a row. Note we can't use
Kline star (*) as this is the symbol for a stop codon --- use {}.
Parameters
----------
regex_seq : str
The regular expression string that represents the sequences to be
listed.
print_warnings : bool
Determines whether warnings are printed or not. Default ON.
raise_overload_warning : bool
A flag to warn of more than 10000 seqs corresponding to the regex_seq
Returns
-------
CDR3_seqs : list
A list of CDR3 sequences that correspond to the regex_seq
Examples
--------
>>> generation_probability.list_seqs_from_regex('CASS[AGR]SARPEQFF')
['CASSGSARPEQFF', 'CASSRSARPEQFF', 'CASSASARPEQFF']
>>> generation_probability.list_seqs_from_regex('CASSAX{0,5}SARPEQFF')
['CASSASARPEQFF',
'CASSAXXXXSARPEQFF',
'CASSAXXSARPEQFF',
'CASSAXXXXXSARPEQFF',
'CASSAXXXSARPEQFF',
'CASSAXSARPEQFF']
"""
aa_symbols = ''.join(self.codons_dict)
default_max_reps = 40
#Check to make sure that expression is of the right form/symbols
#Identify bracket expressions
bracket_ex = [x for x in re.findall('\[[' + aa_symbols + ']*?\]|\{\d+,{0,1}\d*\}', regex_seq)]
split_seq = re.split('\[[' + aa_symbols + ']*?\]|\{\d+,{0,1}\d*\}', regex_seq)
#Check that all remaining characters are in the codon dict
for aa in ''.join(split_seq):
if aa not in aa_symbols:
if print_warnings:
print('Unfamiliar symbol representing a codon:' + aa + ' --- check codon dictionary or the regex syntax')
return []
regex_list = [split_seq[i//2] if i%2 == 0 else bracket_ex[i//2] for i in range(len(bracket_ex) + len(split_seq)) if not (i%2 == 0 and len(split_seq[i//2]) == 0)]
max_num_seqs = 1
for l, ex in enumerate(regex_list[::-1]):
i = len(regex_list) - l - 1
if ex[0] == '[': #bracket expression
#check characters
for aa in ex.strip('[]'):
if aa not in aa_symbols:
if print_warnings:
print('Unfamiliar symbol representing a codon:' + aa + ' --- check codon dictionary')
return []
max_num_seqs *= len(ex) - 2
elif ex[0] == '{': #curly bracket
if i == 0:
if print_warnings:
print("Can't have {} expression at start of sequence")
return []
elif isinstance(regex_list[i-1], list):
if print_warnings:
print("Two {} expressions in a row is not supported")
return []
elif regex_list[i-1][0] == '[':
syms = regex_list[i-1].strip('[]')
regex_list[i-1] = ''
else:
syms = regex_list[i-1][-1]
regex_list[i-1] = regex_list[i-1][:-1]
if ',' not in ex:
new_expression = [int(ex.strip('{}')), int(ex.strip('{}')), syms]
max_num_seqs *= len(syms)**new_expression[0]
else:
try:
new_expression = [int(ex.strip('{}').split(',')[0]), int(ex.strip('{}').split(',')[1]), syms]
except ValueError: #No max limit --- use default
new_expression = [int(ex.strip('{}').split(',')[0]), default_max_reps, syms]
if new_expression[0] > new_expression[1]:
if print_warnings:
print('Check regex syntax --- should be {min,max}')
return []
max_num_seqs *= sum([len(syms)**n for n in range(new_expression[0], new_expression[1]+1)])/len(syms)
#print new_expression
regex_list[i] = new_expression
if max_num_seqs > 10000 and raise_overload_warning:
if print_warnings:
answer = input('Warning large number of sequences (estimated ' + str(max_num_seqs) + ' seqs) match the regular expression. Possible memory and time issues. Continue? (y/n)')
if not answer == 'y':
print('Canceling...')
return []
else:
return []
#print regex_list
CDR3_seqs = ['']
for l, ex in enumerate(regex_list[::-1]):
i = len(regex_list) - l - 1
if isinstance(ex, list): #curly bracket case
c_seqs = ['']
f_seqs = []
for j in range(ex[1] + 1):
if j in range(ex[0], ex[1]+1):
f_seqs += c_seqs
c_seqs = [aa + c_seq for aa in ex[2] for c_seq in c_seqs]
CDR3_seqs = [f_seq + CDR3_seq for f_seq in f_seqs for CDR3_seq in CDR3_seqs]
elif len(ex) == 0:
pass
elif ex[0] == '[': #square bracket case
CDR3_seqs = [aa + CDR3_seq for aa in ex.strip('[]') for CDR3_seq in CDR3_seqs]
else:
CDR3_seqs = [ex + CDR3_seq for CDR3_seq in CDR3_seqs]
return list(set(CDR3_seqs))
# Alignment/Matching methods
def max_nt_to_aa_alignment_left(self, CDR3_seq, ntseq):
"""Find maximum match between CDR3_seq and ntseq from the left.
This function returns the length of the maximum length nucleotide
subsequence of ntseq contiguous from the left (or 5' end) that is
consistent with the 'amino acid' sequence CDR3_seq.
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
ntseq : str
Genomic (V locus) nucleotide sequence to match.
Returns
-------
max_alignment : int
Maximum length (in nucleotides) nucleotide sequence that matches the
CDR3 'amino acid' sequence.
Example
--------
>>> generation_probability.max_nt_to_aa_alignment_left('CASSSEGAGGPSLRGHEQFF', 'TGTGCCAGCAGTTTATCGATA')
13
"""
max_alignment = 0
if len(ntseq) == 0:
return 0
aa_aligned = True
while aa_aligned:
if ntseq[max_alignment:max_alignment+3] in self.codons_dict[CDR3_seq[max_alignment//3]]:
max_alignment += 3
if max_alignment//3 == len(CDR3_seq):
return max_alignment
else:
break
aa_aligned = False
last_codon = ntseq[max_alignment:max_alignment+3]
codon_frag = ''
for nt in last_codon:
codon_frag += nt
if codon_frag in self.sub_codons_left[CDR3_seq[max_alignment//3]]:
max_alignment += 1
else:
break
return max_alignment
def max_nt_to_aa_alignment_right(self, CDR3_seq, ntseq):
"""Find maximum match between CDR3_seq and ntseq from the right.
This function returns the length of the maximum length nucleotide
subsequence of ntseq contiguous from the right (or 3' end) that is
consistent with the 'amino acid' sequence CDR3_seq
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
ntseq : str
Genomic (J locus) nucleotide sequence to match.
Returns
-------
max_alignment : int
Maximum length (in nucleotides) nucleotide sequence that matches the
CDR3 'amino acid' sequence.
Example
--------
>>> generation_probability.max_nt_to_aa_alignment_right('CASSSEGAGGPSLRGHEQFF', 'TTCATGAACACTGAAGCTTTCTTT')
6
"""
r_CDR3_seq = CDR3_seq[::-1] #reverse CDR3_seq
r_ntseq = ntseq[::-1] #reverse ntseq
max_alignment = 0
if len(ntseq) == 0:
return 0
aa_aligned = True
while aa_aligned:
if r_ntseq[max_alignment:max_alignment+3][::-1] in self.codons_dict[r_CDR3_seq[max_alignment//3]]:
max_alignment += 3
if max_alignment//3 == len(CDR3_seq):
return max_alignment
else:
break
aa_aligned = False
r_last_codon = r_ntseq[max_alignment:max_alignment+3]
codon_frag = ''
for nt in r_last_codon:
codon_frag = nt + codon_frag
if codon_frag in self.sub_codons_right[r_CDR3_seq[max_alignment//3]]:
max_alignment += 1
else:
break
return max_alignment
#%%
class GenerationProbabilityVDJ(GenerationProbability, PreprocessedParametersVDJ):
"""Class used to compute the Pgen of CDR3 sequences from a VDJ model.
All of the attributes of GenerationProbabilityVDJ are inherited from the
class PreprocessedParametersVDJ. The methods of the class are used to
compute the Pgen of an 'amino acid' sequence from a VDJ generative model.
Attributes
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping
V_allele_names : list of strings
List of V allele names in genomic_data
d_V_usage_mask : list of int
Default V usage mask of indices of all productive V genes/alleles.
V_mask_mapping : dict
Dictionary mapping allowed keywords (strings corresponding to V gene
and V allele names) to the indices of the V alleles they refer to.
cutV_genomic_CDR3_segs : list of strings
List of the V germline nucleotide sequences, trimmed to begin at the
CDR3 region (includes the conserved C residue) with the maximum number
of reverse complementary palindromic insertions appended.
PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(V)*P(delV|V) into the correct form for a Pi
array or V_{x_1}. This is only done for the first and last position in
each codon.
PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi array
or V_{x_1} given the 'amino acid'.
D_allele_names : list of strings
List of D allele names in genomic_data
cutD_genomic_CDR3_segs : list of strings
List of the D germline nucleotide sequences, with the maximum number
of reverse complementary palindromic insertions appended to both ends.
PD_nt_pos_vec : list of ndarrays
For each D allele, format P(delDl, delDr|D) into the correct form for a
Pi array as if each position were the first in a codon.
PD_2nd_nt_pos_per_aa_vec : list of dicts
For each D allele, and each 'amino acid', format P(delDl, delDr|D) for
positions in the middle of a codon into the correct form for a Pi array
as if each position were the middle of a codon corresponding to the
'amino acid'.
min_delDl_given_DdelDr : list of lists
minimum delDl for each delDr, D combination.
max_delDl_given_DdelDr : list of lists
maximum delDl for each delDr, D combination.
zeroD_given_D : list of floats
The probability that a given D allele is fully deleted away.
PdelDldelDr_given_D : ndarray
Joint probability distribution of the D deletions given the D allele,
i.e. P(delDl, delDr |D)
J_allele_names : list of strings
List of J allele names in genomic_data
d_J_usage_mask : list of int
Default J usage mask of indices of all productive J genes/alleles.
J_mask_mapping : dict
Dictionary mapping allowed keywords (strings corresponding to V gene
and V allele names) to the indices of the V alleles they refer to.
cutJ_genomic_CDR3_segs : list of strings
List of the J germline nucleotide sequences, trimmed to end at the
CDR3 region (includes the conserved F or W residue) with the maximum
number of reverse complementary palindromic insertions appended.
PJdelJ_nt_pos_vec : list
For each J allele, format P(J)*P(delJ|J) into the correct form for a Pi
array or J(D)^{x_4}. This is only done for the first and last position
in each codon.
PJdelJ_2nd_nt_pos_per_aa_vec : list
For each J allele, and each 'amino acid', format P(J)*P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi array
or J(D)^{x_4} given the 'amino acid'.
PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
PinsVD : ndarray
Probability distribution of the VD (N1) insertion sequence length
PinsDJ : ndarray
Probability distribution of the DJ (N2) insertion sequence length
Rvd : ndarray
Markov transition matrix for the VD insertion junction.
Rdj : ndarray
Markov transition matrix for the DJ insertion junction.
first_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VD junction.
zero_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VD insertion.
zero_nt_bias_insVD = Rvd^{-1}first_nt_bias_insVD
first_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the DJ junction.
zero_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the DJ insertion. Note, as the Markov model
at the DJ junction goes 3' to 5' this is the position AFTER the
insertions reading left to right.
zero_nt_bias_insVD = Rvd^{-1}first_nt_bias_insVD
Tvd, Svd, Dvd, lTvd, lDvd : dicts
Dictionaries (by 'amino acid') of insertion transfer matrices
((4, 4) ndarrays) for the VD junction.
Tdj, Sdj, Ddj, rTdj, rDdj : dicts
Dictionaries (by 'amino acid') of insertion transfer matrices
((4, 4) ndarrays) for the DJ junction.
"""
def __init__(self, generative_model, genomic_data, alphabet_file = None):
"""Initialize GenerationProbabilityVDJ.
This intialization inherits all of the attributes of
PreprocessedParametersVDJ (which include all of the processed
parameters needed for Pgen computation) and the methods of
GenerationProbability which include some wrappers/formatting of
sequences to make Pgen computation of nucleotide and regular expression
sequences easier (etc).
Parameters
----------
generative_model : GenerativeModelVDJ
VDJ generative model class containing the model parameters.
genomic_data : GenomicDataVDJ
VDJ genomic data class containing the V, D, and J germline
sequences and info.
alphabet_file : str, optional
File name (full pathing from current directory) for a custom alphabet
definition. If no file is provided, the default alphabet is used, i.e.
standard amino acids, undetermined amino acids (B, J, X, and Z), and
single codon symbols.
"""
GenerationProbability.__init__(self)
PreprocessedParametersVDJ.__init__(self, generative_model, genomic_data, alphabet_file)
def compute_CDR3_pgen(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pgen for CDR3 'amino acid' sequence CDR3_seq from VDJ model.
Conditioned on the already formatted V genes/alleles indicated in
V_usage_mask and the J genes/alleles in J_usage_mask.
(Examples are TCRB sequences/model)
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> compute_CDR3_pgen('CAWSVAPDRGGYTF', ppp, [42], [1])
1.203646865765782e-10
>>> compute_CDR3_pgen(nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC'), ppp, [42], [1])
2.3986503758867323e-12
>>> compute_CDR3_pgen('\xbb\x96\xab\xb8\x8e\xb6\xa5\x92\xa8\xba\x9a\x93\x94\x9f', ppp, [42], [1])
2.3986503758867323e-12
"""
#Genomic V alignment/matching (contribution from P(V, delV)), return Pi_V
Pi_V, max_V_align = self.compute_Pi_V(CDR3_seq, V_usage_mask)
#Include VD insertions (Rvd and PinsVD) to get the total contribution from the left (3') side. Return Pi_L
Pi_L = self.compute_Pi_L(CDR3_seq, Pi_V, max_V_align)
#Genomic J alignment/matching (contribution from P(D, J, delJ)), return Pi_J_given_D
Pi_J_given_D, max_J_align = self.compute_Pi_J_given_D(CDR3_seq, J_usage_mask)
#Include DJ insertions (Rdj and PinsDJ), return Pi_JinsDJ_given_D
Pi_JinsDJ_given_D = self.compute_Pi_JinsDJ_given_D(CDR3_seq, Pi_J_given_D, max_J_align)
#Include D genomic contribution (P(delDl, delDr | D)) to complete the contribution from the right (5') side. Return Pi_R
Pi_R = self.compute_Pi_R(CDR3_seq, Pi_JinsDJ_given_D)
pgen = 0
#zip Pi_L and Pi_R together to get total pgen
for pos in range(len(CDR3_seq)*3 - 1):
pgen += np.dot(Pi_L[:, pos], Pi_R[:, pos+1])
return pgen
#Genomic V alignment/matching (contribution from P(V, delV)), return Pi_V
def compute_Pi_V(self, CDR3_seq, V_usage_mask):
"""Compute Pi_V.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V). This corresponds to V_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(V)*P(delV|V) into the correct form for
a Pi array or V_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V_{x_1} given the 'amino acid'.
Returns
-------
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V = np.zeros((4, len(CDR3_seq)*3)) #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print('Check provided V usage mask. Contains indicies out of allowed range.')
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos//3]][:, pos]
Pi_V[:, :current_alignment_length] += current_Pi_V[:, :current_alignment_length]
return Pi_V, max(alignment_lengths)
#Include VD insertions (Rvd and PinsVD) to get the total contribution from the left (5') side. Return Pi_L
def compute_Pi_L(self, CDR3_seq, Pi_V, max_V_align):
"""Compute Pi_L.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V)*P(delV|V), and the VD (N1) insertions,
first_nt_bias_insVD(m_1)PinsVD(\ell_{VD})\prod_{i=2}^{\ell_{VD}}Rvd(m_i|m_{i-1}).
This corresponds to V_{x_1}{M^{x_1}}_{x_2}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_V : ndarray
(4, 3L) array corresponding to V_{x_1}.
max_V_align : int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
self.PinsVD : ndarray
Probability distribution of the VD (N1) insertion sequence length
self.first_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VD junction.
self.zero_nt_bias_insVD : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VD insertion.
zero_nt_bias_insVD = Rvd^{-1}first_nt_bias_insVD
self.Tvd : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Svd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the first position.
self.Dvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.lTvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion starting in the first position.
self.lDvd : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
VD insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_L : ndarray
(4, 3L) array corresponding to V_{x_1}{M^{x_1}}_{x_2}.
"""
#max_insertions = 30 #len(PinsVD) - 1 should zeropad the last few spots
max_insertions = len(self.PinsVD) - 1
Pi_L = np.zeros((4, len(CDR3_seq)*3))
#start position is first nt in a codon
for init_pos in range(0, max_V_align, 3):
#Zero insertions
Pi_L[:, init_pos] += self.PinsVD[0]*Pi_V[:, init_pos]
#One insertion
Pi_L[:, init_pos+1] += self.PinsVD[1]*np.dot(self.lDvd[CDR3_seq[init_pos//3]], Pi_V[:, init_pos])
#Two insertions and compute the base nt vec for the standard loop
current_base_nt_vec = np.dot(self.lTvd[CDR3_seq[init_pos//3]], Pi_V[:, init_pos])
Pi_L[0, init_pos+2] += self.PinsVD[2]*np.sum(current_base_nt_vec)
base_ins = 2
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 + 1: init_pos//3 + max_insertions//3]:
Pi_L[:, init_pos+base_ins+1] += self.PinsVD[base_ins + 1]*np.dot(self.Svd[aa], current_base_nt_vec)
Pi_L[:, init_pos+base_ins+2] += self.PinsVD[base_ins + 2]*np.dot(self.Dvd[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvd[aa], current_base_nt_vec)
Pi_L[0, init_pos+base_ins+3] += self.PinsVD[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is second nt in a codon
for init_pos in range(1, max_V_align, 3):
#Zero insertions
Pi_L[:, init_pos] += self.PinsVD[0]*Pi_V[:, init_pos]
#One insertion --- we first compute our p vec by pairwise mult with the ss distr
current_base_nt_vec = np.multiply(Pi_V[:, init_pos], self.first_nt_bias_insVD)
Pi_L[0, init_pos+1] += self.PinsVD[1]*np.sum(current_base_nt_vec)
base_ins = 1
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 + 1: init_pos//3 + max_insertions//3]:
Pi_L[:, init_pos+base_ins+1] += self.PinsVD[base_ins + 1]*np.dot(self.Svd[aa], current_base_nt_vec)
Pi_L[:, init_pos+base_ins+2] += self.PinsVD[base_ins + 2]*np.dot(self.Dvd[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvd[aa], current_base_nt_vec)
Pi_L[0, init_pos+base_ins+3] += self.PinsVD[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is last nt in a codon
for init_pos in range(2, max_V_align, 3):
#Zero insertions
Pi_L[0, init_pos] += self.PinsVD[0]*Pi_V[0, init_pos]
#current_base_nt_vec = first_nt_bias_insVD*Pi_V[0, init_pos] #Okay for steady state
current_base_nt_vec = self.zero_nt_bias_insVD*Pi_V[0, init_pos]
base_ins = 0
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 + 1: init_pos//3 + max_insertions//3]:
Pi_L[:, init_pos+base_ins+1] += self.PinsVD[base_ins + 1]*np.dot(self.Svd[aa], current_base_nt_vec)
Pi_L[:, init_pos+base_ins+2] += self.PinsVD[base_ins + 2]*np.dot(self.Dvd[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvd[aa], current_base_nt_vec)
Pi_L[0, init_pos+base_ins+3] += self.PinsVD[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
return Pi_L
#Genomic J alignment/matching (contribution from P(D, J, delJ)), return Pi_J_given_D
def compute_Pi_J_given_D(self, CDR3_seq, J_usage_mask):
"""Compute Pi_J conditioned on D.
This function returns the Pi array from the model factors of the D and J
genomic contributions, P(D, J)*P(delJ|J) = P(D|J)P(J)P(delJ|J). This
corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation.
self.cutJ_genomic_CDR3_segs : list
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(J)*P(delJ|J) into the correct form for
a Pi array or J(D)^{x_4}. This is only done for the first and last
position in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(J)*P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J(D)^{x_4} given the 'amino acid'.
Returns
-------
Pi_J_given_D : list
List of (4, 3L) ndarrays corresponding to J(D)^{x_4}.
max_J_align: int
Maximum alignment of the CDR3_seq to any genomic J allele allowed by
J_usage_mask.
"""
#Note, the cutJ_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at a conserved region on the J side
num_D_genes = self.PD_given_J.shape[0]
Pi_J_given_D = [np.zeros((4, len(CDR3_seq)*3)) for i in range(num_D_genes)] #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for J_in in J_usage_mask:
try:
cutJ_gen_seg = self.cutJ_genomic_CDR3_segs[J_in]
except IndexError:
print('Check provided V usage mask. Contains indicies out of allowed range.')
continue
current_alignment_length = self.max_nt_to_aa_alignment_right(CDR3_seq, cutJ_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_J = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PJdelJ_nt_pos_vec
current_Pi_J[:, -current_alignment_length:] = self.PJdelJ_nt_pos_vec[J_in][:, -current_alignment_length:]
for pos in range(-2, -current_alignment_length-1, -3): #for middle nt use PJdelJ_2nd_nt_pos_per_aa_vec
current_Pi_J[:, pos] = self.PJdelJ_2nd_nt_pos_per_aa_vec[J_in][CDR3_seq[pos//3]][:, pos]
for D_in, pd_given_j in enumerate(self.PD_given_J[:, J_in]):
Pi_J_given_D[D_in][:, -current_alignment_length:] += pd_given_j*current_Pi_J[:, -current_alignment_length:]
return Pi_J_given_D, max(alignment_lengths)
#Include DJ insertions (Rdj and PinsDJ), return Pi_JinsDJ_given_D
def compute_Pi_JinsDJ_given_D(self, CDR3_seq, Pi_J_given_D, max_J_align):
"""Compute Pi_JinsDJ conditioned on D.
This function returns the Pi array from the model factors of the J genomic
contributions, P(D,J)*P(delJ|J), and the DJ (N2) insertions,
first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1})
conditioned on D identity. This corresponds to {N^{x_3}}_{x_4}J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_J_given_D : ndarray
List of (4, 3L) ndarrays corresponding to J(D)^{x_4}.
max_J_align : int
Maximum alignment of the CDR3_seq to any genomic J allele allowed by
J_usage_mask.
self.PinsDJ : ndarray
Probability distribution of the DJ (N2) insertion sequence length
self.first_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the DJ junction.
self.zero_nt_bias_insDJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the DJ insertion. Note, as the Markov model
at the DJ junction goes 3' to 5' this is the position AFTER the
insertions reading left to right.
self.Tdj : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Sdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the DJ insertion ending in the first position.
self.Ddj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.rTdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the DJ insertion starting in the first position.
self.rDdj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
DJ insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_JinsDJ_given_D : list
List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
"""
#max_insertions = 30 #len(PinsVD) - 1 should zeropad the last few spots
max_insertions = len(self.PinsDJ) - 1
Pi_JinsDJ_given_D = [np.zeros((4, len(CDR3_seq)*3)) for i in range(len(Pi_J_given_D))]
for D_in in range(len(Pi_J_given_D)):
#start position is first nt in a codon
for init_pos in range(-1, -(max_J_align+1), -3):
#Zero insertions
Pi_JinsDJ_given_D[D_in][:, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][:, init_pos]
#One insertion
Pi_JinsDJ_given_D[D_in][:, init_pos-1] += self.PinsDJ[1]*np.dot(self.rDdj[CDR3_seq[init_pos//3]], Pi_J_given_D[D_in][:, init_pos])
#Two insertions and compute the base nt vec for the standard loop
current_base_nt_vec = np.dot(self.rTdj[CDR3_seq[init_pos//3]], Pi_J_given_D[D_in][:, init_pos])
Pi_JinsDJ_given_D[D_in][0, init_pos-2] += self.PinsDJ[2]*np.sum(current_base_nt_vec)
base_ins = 2
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 - 1: init_pos//3 - max_insertions//3:-1]:
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is second nt in a codon
for init_pos in range(-2, -(max_J_align+1), -3):
#Zero insertions
Pi_JinsDJ_given_D[D_in][:, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][:, init_pos]
#One insertion --- we first compute our p vec by pairwise mult with the ss distr
current_base_nt_vec = np.multiply(Pi_J_given_D[D_in][:, init_pos], self.first_nt_bias_insDJ)
Pi_JinsDJ_given_D[D_in][0, init_pos-1] += self.PinsDJ[1]*np.sum(current_base_nt_vec)
base_ins = 1
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 - 1: init_pos//3 - max_insertions//3:-1]:
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is last nt in a codon
for init_pos in range(-3, -(max_J_align+1), -3):
#Zero insertions
Pi_JinsDJ_given_D[D_in][0, init_pos] += self.PinsDJ[0]*Pi_J_given_D[D_in][0, init_pos]
#current_base_nt_vec = first_nt_bias_insDJ*Pi_J_given_D[D_in][0, init_pos] #Okay for steady state
current_base_nt_vec = self.zero_nt_bias_insDJ*Pi_J_given_D[D_in][0, init_pos]
base_ins = 0
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 - 1: init_pos//3 - max_insertions//3:-1]:
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-1] += self.PinsDJ[base_ins + 1]*np.dot(self.Sdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][:, init_pos-base_ins-2] += self.PinsDJ[base_ins + 2]*np.dot(self.Ddj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tdj[aa], current_base_nt_vec)
Pi_JinsDJ_given_D[D_in][0, init_pos-base_ins-3] += self.PinsDJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
return Pi_JinsDJ_given_D
#Include D genomic contribution (P(delDl, delDr | D)) to complete the contribution from the right (5') side. Return Pi_R
def compute_Pi_R(self, CDR3_seq, Pi_JinsDJ_given_D):
"""Compute Pi_R.
This function returns the Pi array from the model factors of the D and J
genomic contributions, P(D, J)*P(delJ|J)P(delDl, delDr |D) and
the DJ (N2) insertions,
first_nt_bias_insDJ(n_1)PinsDJ(\ell_{DJ})\prod_{i=2}^{\ell_{DJ}}Rdj(n_i|n_{i-1}).
This corresponds to \sum_D {D^{x_2}}_{x_3}{N^{x_3}}_{x_4}J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_JinsDJ_given_D : list
List of (4, 3L) ndarrays corresponding to {N^{x_3}}_{x_4}J(D)^{x_4}.
self.cutD_genomic_CDR3_segs : list of strings
List of all the D genomic nucleotide sequences with the maximum number
of palindromic insertions appended on both ends.
self.PD_given_J : ndarray
Probability distribution of D conditioned on J, i.e. P(D|J).
self.PD_nt_pos_vec : list of ndarrays
For each D allele, format P(delDl, delDr|D) into the correct form
for a Pi array as if each position were the first in a codon.
self.PD_2nd_nt_pos_per_aa_vec : list of dicts
For each D allele, and each 'amino acid', format P(delDl, delDr|D)
for positions in the middle of a codon into the correct form for a
Pi array as if each position were the middle of a codon
corresponding to the 'amino acid'.
self.min_delDl_given_DdelDr : list of lists
minimum delDl for each delDr, D combination.
self.max_delDl_given_DdelDr : list of lists
maximum delDl for each delDr, D combination.
self.PdelDldelDr_given_D : ndarray
Joint probability distribution of the D deletions given the D allele,
i.e. P(delDl, delDr |D)
self.zeroD_given_D : list of floats
The probability that a given D allele is fully deleted away.
self.codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
self.sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping
Returns
-------
Pi_L : ndarray
(4, 3L) array corresponding to
\sum_D {D^{x_2}}_{x_3}{N^{x_3}}_{x_4}J(D)^{x_4}.
"""
#Need to consider all D alignments from all possible positions and right deletions.
nt2num = {'A': 0, 'C': 1, 'G': 2, 'T': 3}
#n_aaseq = [aa_dict[aa] for aa in CDR3_seq]
Pi_R = np.zeros((4, len(CDR3_seq)*3))
min_pos = -len(CDR3_seq)*3
num_dell_pos, num_delr_pos, num_D_genes = self.PdelDldelDr_given_D.shape
for D_in, cutD_gen_seg in enumerate(self.cutD_genomic_CDR3_segs):
l_D_seg = len(cutD_gen_seg)
#start position is first nt in a codon
for init_pos in range(-1, -len(CDR3_seq)*3-1, -3):
Pi_R[:, init_pos] += Pi_JinsDJ_given_D[D_in][:, init_pos]*self.zeroD_given_D[D_in]
second_pos_dict = {'A': np.zeros(4), 'C': np.zeros(4), 'G': np.zeros(4), 'T': np.zeros(4)}
codon_prefix_dict = {}
for last_nt in 'ACGT':
for second_nt in 'ACGT':
codon_prefix_dict[last_nt + second_nt] = np.zeros(4)
#for first_nt in ['ACGT'[nt] for nt in range(4) if Pi_JinsDJ_given_D[D_in][nt, init_pos] > 0]:
for first_nt in 'ACGT':
if last_nt + second_nt + first_nt in self.codons_dict[CDR3_seq[init_pos//3]]: #possible allowed codon
second_pos_dict[second_nt][nt2num[last_nt]] += Pi_JinsDJ_given_D[D_in][nt2num[first_nt], init_pos] #base weight for middle pos nt
codon_prefix_dict[last_nt + second_nt][0] += Pi_JinsDJ_given_D[D_in][nt2num[first_nt], init_pos] #base weight for last pos nt
for nt1 in 'ACGT':
if np.sum(second_pos_dict[nt1]) == 0:
second_pos_dict.pop(nt1, None)
for nt2 in 'ACGT':
if np.sum(codon_prefix_dict[nt1+nt2])== 0:
codon_prefix_dict.pop(nt1+nt2, None)
# if len(second_pos_dict)> 0:
# print second_pos_dict
# return -1
for delDr in range(num_delr_pos):
if self.min_delDl_given_DdelDr[D_in][delDr] == -1: # P(delDr | D) = 0 for this delDr --> move to next
continue
#Check if first nt from the D segment is okay
if cutD_gen_seg[l_D_seg - delDr - 1] in list(second_pos_dict.keys()):
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if l_D_seg - delDr - 1 <= self.max_delDl_given_DdelDr[D_in][delDr]:
Pi_R[:, init_pos - 1] += self.PdelDldelDr_given_D[l_D_seg - delDr - 1, delDr, D_in]*second_pos_dict[cutD_gen_seg[l_D_seg - delDr - 1]]
else:
continue #not okay, reject the alignment
#Check if the second nt from the D segment is okay
if cutD_gen_seg[l_D_seg - delDr - 2:l_D_seg - delDr] in list(codon_prefix_dict.keys()):
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if l_D_seg - delDr - 2 <= self.max_delDl_given_DdelDr[D_in][delDr]:
Pi_R[0, init_pos - 2] += self.PdelDldelDr_given_D[l_D_seg - delDr - 2, delDr, D_in]*codon_prefix_dict[cutD_gen_seg[l_D_seg - delDr - 2:l_D_seg - delDr]][0]
base_prob = codon_prefix_dict[cutD_gen_seg[l_D_seg - delDr - 2:l_D_seg - delDr]][0]
else:
continue #no longer aligned, move to next delDr
#Enter main loop
for pos in range(init_pos - 3, max(init_pos - l_D_seg + delDr, min_pos)-1, -1):
#note delDl = D_pos
D_pos = l_D_seg - delDr - 1 - ((init_pos - 1) - pos)
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if D_pos > self.max_delDl_given_DdelDr[D_in][delDr]:
current_PdelDldelDr = 0
else:
current_PdelDldelDr = self.PdelDldelDr_given_D[D_pos, delDr, D_in]
#Position is the first nt in codon
if pos%3 == 2:
#check alignment
if cutD_gen_seg[D_pos] in self.sub_codons_right[CDR3_seq[pos//3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_nt_pos_vec[D_in][:, D_pos]
else:
break #no longer aligned -- exit loop
#Position is the second nt in codon
elif pos%3 == 1:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 2] in self.sub_codons_right[CDR3_seq[pos//3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_2nd_nt_pos_per_aa_vec[D_in][CDR3_seq[pos//3]][:, D_pos]
else:
break #no longer aligned --- exit loop
#Position is the last nt in codon
else:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 3] in self.codons_dict[CDR3_seq[pos//3]]:
Pi_R[0, pos] += current_PdelDldelDr*base_prob
else:
break #no longer aligned --- exit loop
#start position is second nt in a codon
for init_pos in range(-2, -len(CDR3_seq)*3-1, -3):
Pi_R[:, init_pos] += Pi_JinsDJ_given_D[D_in][:, init_pos]*self.zeroD_given_D[D_in]
allowed_final_nts = ['ACGT'[nt] for nt in range(4) if Pi_JinsDJ_given_D[D_in][nt, init_pos] > 0]
for delDr in range(num_delr_pos):
if self.min_delDl_given_DdelDr[D_in][delDr] == -1: # P(delDr | D) = 0 for this delDr --> move to next
continue
#check first nt of the D region (last in the codon)
if cutD_gen_seg[l_D_seg - delDr - 1] in allowed_final_nts: #first nt match
base_prob = Pi_JinsDJ_given_D[D_in][nt2num[cutD_gen_seg[l_D_seg - delDr - 1]], init_pos]
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if l_D_seg - delDr - 1 <= self.max_delDl_given_DdelDr[D_in][delDr]:
Pi_R[0, init_pos-1] += self.PdelDldelDr_given_D[l_D_seg - delDr - 1, delDr, D_in]*base_prob
else:
continue #no alignment
#Enter main loop
for pos in range(init_pos - 2, max(init_pos - l_D_seg + delDr, min_pos)-1, -1):
#note delDl = D_pos
D_pos = l_D_seg - delDr - 1 - ((init_pos - 1) - pos)
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if D_pos > self.max_delDl_given_DdelDr[D_in][delDr]:
current_PdelDldelDr = 0
else:
current_PdelDldelDr = self.PdelDldelDr_given_D[D_pos, delDr, D_in]
#Position is the first nt in codon
if pos%3 == 2:
#check alignment
if cutD_gen_seg[D_pos] in self.sub_codons_right[CDR3_seq[pos//3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_nt_pos_vec[D_in][:, D_pos]
else:
break #no longer aligned -- exit loop
#Position is the second nt in codon
elif pos%3 == 1:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 2] in self.sub_codons_right[CDR3_seq[pos//3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_2nd_nt_pos_per_aa_vec[D_in][CDR3_seq[pos//3]][:, D_pos]
else:
break #no longer aligned --- exit loop
#Position is the last nt in codon
else:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 3] in self.codons_dict[CDR3_seq[pos//3]]:
Pi_R[0, pos] += current_PdelDldelDr*base_prob
else:
break #no longer aligned --- exit loop
#start position is last nt in a codon
for init_pos in range(-3, -len(CDR3_seq)*3-1, -3):
Pi_R[0, init_pos] += Pi_JinsDJ_given_D[D_in][0, init_pos]*self.zeroD_given_D[D_in]
for delDr in range(num_delr_pos):
if self.min_delDl_given_DdelDr[D_in][delDr] == -1: # P(delDr | D) = 0 for this delDr --> move to next
continue
base_prob = Pi_JinsDJ_given_D[D_in][0, init_pos]
for pos in range(init_pos - 1, max(init_pos - l_D_seg + delDr, min_pos)-1, -1):
#note delDl = D_pos
D_pos = l_D_seg - delDr - 1 - ((init_pos - 1) - pos)
#The dell pos may be out of range of the PdelDldelDr_given_D -- check!
if D_pos > self.max_delDl_given_DdelDr[D_in][delDr]:
current_PdelDldelDr = 0
else:
current_PdelDldelDr = self.PdelDldelDr_given_D[D_pos, delDr, D_in]
#Position is the first nt in codon
if pos%3 == 2:
#check alignment
if cutD_gen_seg[D_pos] in self.sub_codons_right[CDR3_seq[pos//3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_nt_pos_vec[D_in][:, D_pos]
else:
break #no longer aligned -- exit loop
#Position is the second nt in codon
elif pos%3 == 1:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 2] in self.sub_codons_right[CDR3_seq[pos//3]]:
Pi_R[:, pos] += current_PdelDldelDr*base_prob*self.PD_2nd_nt_pos_per_aa_vec[D_in][CDR3_seq[pos//3]][:, D_pos]
else:
break #no longer aligned --- exit loop
#Position is the last nt in codon
else:
#check alignment
if cutD_gen_seg[D_pos:D_pos + 3] in self.codons_dict[CDR3_seq[pos//3]]:
Pi_R[0, pos] += current_PdelDldelDr*base_prob
else:
break #no longer aligned --- exit loop
return Pi_R
#%%
class GenerationProbabilityVJ(GenerationProbability, PreprocessedParametersVJ):
"""Class used to compute the Pgen of CDR3 sequences from a VJ model.
All of the attributes of GenerationProbabilityVJ are inherited from the
class PreprocessedParametersVJ. The methods of the class are used to
compute the Pgen of an 'amino acid' sequence from a VJ generative model.
Attributes
----------
codons_dict : dict
Dictionary, keyed by the allowed 'amino acid' symbols with the values
being lists of codons corresponding to the symbol.
sub_codons_left : dict
Dictionary of the 1 and 2 nucleotide prefixes (read from 5') for
each codon in an 'amino acid' grouping
sub_codons_right : dict
Dictionary of the 1 and 2 nucleotide suffixes (read from 5') for
each codon in an 'amino acid' grouping
V_allele_names : list of strings
List of V allele names in genomic_data
d_V_usage_mask : list of int
Default V usage mask of indices of all productive V genes/alleles.
V_mask_mapping : dict
Dictionary mapping allowed keywords (strings corresponding to V gene
and V allele names) to the indices of the V alleles they refer to.
cutV_genomic_CDR3_segs : list of strings
List of the V germline nucleotide sequences, trimmed to begin at the
CDR3 region (includes the conserved C residue) with the maximum number
of reverse complementary palindromic insertions appended.
PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last position
in each codon.
PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi array
or V(J)_{x_1} given the 'amino acid'.
J_allele_names : list of strings
List of J allele names in genomic_data
d_J_usage_mask : list of int
Default J usage mask of indices of all productive J genes/alleles.
J_mask_mapping : dict
Dictionary mapping allowed keywords (strings corresponding to V gene
and V allele names) to the indices of the V alleles they refer to.
cutJ_genomic_CDR3_segs : list of strings
List of the J germline nucleotide sequences, trimmed to end at the
CDR3 region (includes the conserved F or W residue) with the maximum
number of reverse complementary palindromic insertions appended.
PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(delJ|J) into the correct form for a Pi
array or J^{x_2}. This is only done for the first and last position in
each codon.
PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi array
or J^{x_2} given the 'amino acid'.
PinsVJ : ndarray
Probability distribution of the VJ (N) insertion sequence length
Rvj : ndarray
Markov transition matrix for the VJ insertion junction.
first_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VJ junction.
zero_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VJ insertion.
zero_nt_bias_insVJ = Rvj^{-1}first_nt_bias_insVJ
Tvj, Svj, Dvj, lTvj, lDvj : dicts
Dictionaries (by 'amino acid') of insertion transfer matrices
((4, 4) ndarrays) for the VJ junction.
"""
def __init__(self, generative_model, genomic_data, alphabet_file = None):
"""Initialize GenerationProbabilityVJ.
This intialization inherits all of the attributes of
PreprocessedParametersVJ (which include all of the processed
parameters needed for Pgen computation) and the methods of
GenerationProbability which include some wrappers/formatting of
sequences to make Pgen computation of nucleotide and regular expression
sequences easier (etc).
Parameters
----------
generative_model : GenerativeModelVJ
VJ generative model class containing the model parameters.
genomic_data : GenomicDataVJ
VJ genomic data class containing the V and J germline sequences and
info.
alphabet_file : str, optional
File name (full pathing from current directory) for a custom alphabet
definition. If no file is provided, the default alphabet is used, i.e.
standard amino acids, undetermined amino acids (B, J, X, and Z), and
single codon symbols.
"""
GenerationProbability.__init__(self)
PreprocessedParametersVJ.__init__(self, generative_model, genomic_data, alphabet_file)
def compute_CDR3_pgen(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pgen for CDR3 'amino acid' sequence CDR3_seq from VJ model.
Conditioned on the already formatted V genes/alleles indicated in
V_usage_mask and the J genes/alleles in J_usage_mask.
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
Returns
-------
pgen : float
The generation probability (Pgen) of the sequence
Examples
--------
>>> compute_CDR3_pgen('CAVKIQGAQKLVF', ppp, [72], [56])
4.1818202431143785e-07
>>> compute_CDR3_pgen(nt2codon_rep('TGTGCCTGGAGTGTAGCTCCGGACAGGGGTGGCTACACCTTC'), ppp, [42], [1])
1.3971676613008565e-08
>>> compute_CDR3_pgen('\xbb\xb6\xbe\x80\xbc\xa1\x8a\x96\xa1\xa0\xad\x8e\xbf', ppp, [72], [56])
1.3971676613008565e-08
"""
#Genomic J alignment/matching (contribution from P(delJ | J)), return Pi_J and reduced J_usage_mask
Pi_J, r_J_usage_mask = self.compute_Pi_J(CDR3_seq, J_usage_mask)
#Genomic V alignment/matching conditioned on J gene (contribution from P(V, J, delV)), return Pi_V_given_J
Pi_V_given_J, max_V_align = self.compute_Pi_V_given_J(CDR3_seq, V_usage_mask, r_J_usage_mask)
#Include insertions (R and PinsVJ) to get the total contribution from the left (3') side conditioned on J gene. Return Pi_V_insVJ_given_J
Pi_V_insVJ_given_J = self.compute_Pi_V_insVJ_given_J(CDR3_seq, Pi_V_given_J, max_V_align)
pgen = 0
#zip Pi_V_insVJ_given_J and Pi_J together for each J gene to get total pgen
for j in range(len(r_J_usage_mask)):
for pos in range(len(CDR3_seq)*3 - 1):
pgen += np.dot(Pi_V_insVJ_given_J[j][:, pos], Pi_J[j][:, pos+1])
return pgen
#Genomic V alignment/matching (contribution from P(V, delV)), return Pi_V
def compute_Pi_V_given_J(self, CDR3_seq, V_usage_mask, J_usage_mask):
"""Compute Pi_V conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V). This corresponds to V(J)_{x_1}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
V_usage_mask : list
Indices of the V alleles to be considered in the Pgen computation
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutV_genomic_CDR3_segs : list of strings
List of all the V genomic nucleotide sequences trimmed to begin at the
conserved C residue and with the maximum number of palindromic
insertions appended.
self.PVdelV_nt_pos_vec : list of ndarrays
For each V allele, format P(delV|V) into the correct form for a Pi
array or V(J)_{x_1}. This is only done for the first and last
position in each codon.
self.PVdelV_2nd_nt_pos_per_aa_vec : list of dicts
For each V allele, and each 'amino acid', format P(V)*P(delV|V) for
positions in the middle of a codon into the correct form for a Pi
array or V(J)_{x_1} given the 'amino acid'.
self.PVJ : ndarray
Joint probability distribution of V and J, P(V, J).
Returns
-------
Pi_V_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align: int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
"""
#Note, the cutV_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at the conserved C
Pi_V_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in J_usage_mask] #Holds the aggregate weight for each nt possiblity and position
alignment_lengths = []
for V_in in V_usage_mask:
try:
cutV_gen_seg = self.cutV_genomic_CDR3_segs[V_in]
except IndexError:
print('Check provided V usage mask. Contains indicies out of allowed range.')
continue
current_alignment_length = self.max_nt_to_aa_alignment_left(CDR3_seq, cutV_gen_seg)
alignment_lengths += [current_alignment_length]
current_Pi_V = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PVdelV_nt_pos_vec
current_Pi_V[:, :current_alignment_length] = self.PVdelV_nt_pos_vec[V_in][:, :current_alignment_length]
for pos in range(1, current_alignment_length, 3): #for middle nt use PVdelV_2nd_nt_pos_per_aa_vec
current_Pi_V[:, pos] = self.PVdelV_2nd_nt_pos_per_aa_vec[V_in][CDR3_seq[pos//3]][:, pos]
for j, J_in in enumerate(J_usage_mask):
Pi_V_given_J[j][:, :current_alignment_length] += self.PVJ[V_in, J_in]*current_Pi_V[:, :current_alignment_length]
return Pi_V_given_J, max(alignment_lengths)
#Include insertions (R and PinsVJ) to get the total contribution from the the V and insertions conditioned on J identity. Return Pi_V_insVJ_given_J
def compute_Pi_V_insVJ_given_J(self, CDR3_seq, Pi_V_given_J, max_V_align):
"""Compute Pi_V_insVJ conditioned on J.
This function returns the Pi array from the model factors of the V genomic
contributions, P(V, J)*P(delV|V), and the VJ (N) insertions,
first_nt_bias_insVJ(m_1)PinsVJ(\ell_{VJ})\prod_{i=2}^{\ell_{VJ}}Rvj(m_i|m_{i-1}).
This corresponds to V(J)_{x_1}{M^{x_1}}_{x_2}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
Pi_V_given_J : ndarray
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}.
max_V_align : int
Maximum alignment of the CDR3_seq to any genomic V allele allowed by
V_usage_mask.
self.PinsVJ : ndarray
Probability distribution of the VJ insertion sequence length
self.first_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
first nucleotide insertion for the VJ junction.
self.zero_nt_bias_insVJ : ndarray
(4,) array of the probability distribution of the indentity of the
the nucleotide BEFORE the VJ insertion.
zero_nt_bias_insVJ = Rvj^{-1}first_nt_bias_insVJ
self.Tvj : dict
Dictionary of full codon transfer matrices ((4, 4) ndarrays) by
'amino acid'.
self.Svj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the first position.
self.Dvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion ending in the second position.
self.lTvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
the VD insertion starting in the first position.
self.lDvj : dict
Dictionary of transfer matrices ((4, 4) ndarrays) by 'amino acid' for
VD insertion starting in the first position and ending in the second
position of the same codon.
Returns
-------
Pi_V_insVJ_given_J : list
List of (4, 3L) ndarrays corresponding to V(J)_{x_1}{M^{x_1}}_{x_2}.
"""
#max_insertions = 30 #len(PinsVJ) - 1 should zeropad the last few spots
max_insertions = len(self.PinsVJ) - 1
Pi_V_insVJ_given_J = [np.zeros((4, len(CDR3_seq)*3)) for i in range(len(Pi_V_given_J))]
for j in range(len(Pi_V_given_J)):
#start position is first nt in a codon
for init_pos in range(0, max_V_align, 3):
#Zero insertions
Pi_V_insVJ_given_J[j][:, init_pos] += self.PinsVJ[0]*Pi_V_given_J[j][:, init_pos]
#One insertion
Pi_V_insVJ_given_J[j][:, init_pos+1] += self.PinsVJ[1]*np.dot(self.lDvj[CDR3_seq[init_pos//3]], Pi_V_given_J[j][:, init_pos])
#Two insertions and compute the base nt vec for the standard loop
current_base_nt_vec = np.dot(self.lTvj[CDR3_seq[init_pos//3]], Pi_V_given_J[j][:, init_pos])
Pi_V_insVJ_given_J[j][0, init_pos+2] += self.PinsVJ[2]*np.sum(current_base_nt_vec)
base_ins = 2
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 + 1: init_pos//3 + max_insertions//3]:
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+1] += self.PinsVJ[base_ins + 1]*np.dot(self.Svj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+2] += self.PinsVJ[base_ins + 2]*np.dot(self.Dvj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][0, init_pos+base_ins+3] += self.PinsVJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is second nt in a codon
for init_pos in range(1, max_V_align, 3):
#Zero insertions
Pi_V_insVJ_given_J[j][:, init_pos] += self.PinsVJ[0]*Pi_V_given_J[j][:, init_pos]
#One insertion --- we first compute our p vec by pairwise mult with the ss distr
current_base_nt_vec = np.multiply(Pi_V_given_J[j][:, init_pos], self.first_nt_bias_insVJ)
Pi_V_insVJ_given_J[j][0, init_pos+1] += self.PinsVJ[1]*np.sum(current_base_nt_vec)
base_ins = 1
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 + 1: init_pos//3 + max_insertions//3]:
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+1] += self.PinsVJ[base_ins + 1]*np.dot(self.Svj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+2] += self.PinsVJ[base_ins + 2]*np.dot(self.Dvj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][0, init_pos+base_ins+3] += self.PinsVJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
#start position is last nt in a codon
for init_pos in range(2, max_V_align, 3):
#Zero insertions
Pi_V_insVJ_given_J[j][0, init_pos] += self.PinsVJ[0]*Pi_V_given_J[j][0, init_pos]
#current_base_nt_vec = first_nt_bias_insVJ*Pi_V_given_J[j][0, init_pos] #Okay for steady state
current_base_nt_vec = self.zero_nt_bias_insVJ*Pi_V_given_J[j][0, init_pos]
base_ins = 0
#Loop over all other insertions using base_nt_vec
for aa in CDR3_seq[init_pos//3 + 1: init_pos//3 + max_insertions//3]:
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+1] += self.PinsVJ[base_ins + 1]*np.dot(self.Svj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][:, init_pos+base_ins+2] += self.PinsVJ[base_ins + 2]*np.dot(self.Dvj[aa], current_base_nt_vec)
current_base_nt_vec = np.dot(self.Tvj[aa], current_base_nt_vec)
Pi_V_insVJ_given_J[j][0, init_pos+base_ins+3] += self.PinsVJ[base_ins + 3]*np.sum(current_base_nt_vec)
base_ins +=3
return Pi_V_insVJ_given_J
#Genomic J alignment/matching (contribution from P(delJ | J)), return Pi_J and the reduced J_usage_mask (reduced based on non-zero alignment)
def compute_Pi_J(self, CDR3_seq, J_usage_mask):
"""Compute Pi_J.
This function returns the Pi array from the model factors of the J genomic
contributions, P(delJ|J). This corresponds to J(D)^{x_4}.
For clarity in parsing the algorithm implementation, we include which
instance attributes are used in the method as 'parameters.'
Parameters
----------
CDR3_seq : str
CDR3 sequence composed of 'amino acids' (single character symbols
each corresponding to a collection of codons as given by codons_dict).
J_usage_mask : list
Indices of the J alleles to be considered in the Pgen computation
self.cutJ_genomic_CDR3_segs : list of strings
List of all the J genomic nucleotide sequences trimmed to begin at the
conserved 3' residue (F/W) and with the maximum number of palindromic
insertions appended.
self.PJdelJ_nt_pos_vec : list of ndarrays
For each J allele, format P(delJ|J) into the correct form for a Pi
array or J^{x_2}. This is only done for the first and last position
in each codon.
self.PJdelJ_2nd_nt_pos_per_aa_vec : list of dicts
For each J allele, and each 'amino acid', format P(delJ|J) for
positions in the middle of a codon into the correct form for a Pi
array or J^{x_2} given the 'amino acid'.
Returns
-------
Pi_J : ndarray
(4, 3L) array corresponding to J^{x_4}.
r_J_usage_mask: list
Reduced J_usage mask. J genes/alleles with no contribution (bad
alignment) are removed from the mask. This is done to speed up the
computation on the V side (which must be done conditioned on the J).
"""
#Note, the cutJ_genomic_CDR3_segs INCLUDE the palindromic insertions and thus are max_palindrome nts longer than the template.
#furthermore, the genomic sequence should be pruned to start at a conserved region on the J side
Pi_J = [] #Holds the aggregate weight for each nt possiblity and position
r_J_usage_mask = []
for j, J_in in enumerate(J_usage_mask):
try:
cutJ_gen_seg = self.cutJ_genomic_CDR3_segs[J_in]
except IndexError:
print('Check provided J usage mask. Contains indicies out of allowed range.')
continue
current_alignment_length = self.max_nt_to_aa_alignment_right(CDR3_seq, cutJ_gen_seg)
#alignment_lengths += [current_alignment_length]
current_Pi_J = np.zeros((4, len(CDR3_seq)*3))
if current_alignment_length > 0:
#For first and last nt in a codon use PJdelJ_nt_pos_vec
current_Pi_J[:, -current_alignment_length:] = self.PJdelJ_nt_pos_vec[J_in][:, -current_alignment_length:]
for pos in range(-2, -current_alignment_length-1, -3): #for middle nt use PJdelJ_2nd_nt_pos_per_aa_vec
current_Pi_J[:, pos] = self.PJdelJ_2nd_nt_pos_per_aa_vec[J_in][CDR3_seq[pos//3]][:, pos]
if np.sum(current_Pi_J) > 0:
Pi_J.append(current_Pi_J)
r_J_usage_mask.append(J_in)
return Pi_J, r_J_usage_mask
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.network_services import tcp_route_pb2
from google3.cloud.graphite.mmv2.services.google.network_services import (
tcp_route_pb2_grpc,
)
from typing import List
class TcpRoute(object):
def __init__(
self,
name: str = None,
create_time: str = None,
update_time: str = None,
description: str = None,
rules: list = None,
routers: list = None,
meshes: list = None,
labels: dict = None,
project: str = None,
location: str = None,
self_link: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.rules = rules
self.routers = routers
self.meshes = meshes
self.labels = labels
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = tcp_route_pb2_grpc.NetworkservicesAlphaTcpRouteServiceStub(
channel.Channel()
)
request = tcp_route_pb2.ApplyNetworkservicesAlphaTcpRouteRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if TcpRouteRulesArray.to_proto(self.rules):
request.resource.rules.extend(TcpRouteRulesArray.to_proto(self.rules))
if Primitive.to_proto(self.routers):
request.resource.routers.extend(Primitive.to_proto(self.routers))
if Primitive.to_proto(self.meshes):
request.resource.meshes.extend(Primitive.to_proto(self.meshes))
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyNetworkservicesAlphaTcpRoute(request)
self.name = Primitive.from_proto(response.name)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.description = Primitive.from_proto(response.description)
self.rules = TcpRouteRulesArray.from_proto(response.rules)
self.routers = Primitive.from_proto(response.routers)
self.meshes = Primitive.from_proto(response.meshes)
self.labels = Primitive.from_proto(response.labels)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
self.self_link = Primitive.from_proto(response.self_link)
def delete(self):
stub = tcp_route_pb2_grpc.NetworkservicesAlphaTcpRouteServiceStub(
channel.Channel()
)
request = tcp_route_pb2.DeleteNetworkservicesAlphaTcpRouteRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if TcpRouteRulesArray.to_proto(self.rules):
request.resource.rules.extend(TcpRouteRulesArray.to_proto(self.rules))
if Primitive.to_proto(self.routers):
request.resource.routers.extend(Primitive.to_proto(self.routers))
if Primitive.to_proto(self.meshes):
request.resource.meshes.extend(Primitive.to_proto(self.meshes))
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteNetworkservicesAlphaTcpRoute(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = tcp_route_pb2_grpc.NetworkservicesAlphaTcpRouteServiceStub(
channel.Channel()
)
request = tcp_route_pb2.ListNetworkservicesAlphaTcpRouteRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListNetworkservicesAlphaTcpRoute(request).items
def to_proto(self):
resource = tcp_route_pb2.NetworkservicesAlphaTcpRoute()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if TcpRouteRulesArray.to_proto(self.rules):
resource.rules.extend(TcpRouteRulesArray.to_proto(self.rules))
if Primitive.to_proto(self.routers):
resource.routers.extend(Primitive.to_proto(self.routers))
if Primitive.to_proto(self.meshes):
resource.meshes.extend(Primitive.to_proto(self.meshes))
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class TcpRouteRules(object):
def __init__(self, matches: list = None, action: dict = None):
self.matches = matches
self.action = action
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = tcp_route_pb2.NetworkservicesAlphaTcpRouteRules()
if TcpRouteRulesMatchesArray.to_proto(resource.matches):
res.matches.extend(TcpRouteRulesMatchesArray.to_proto(resource.matches))
if TcpRouteRulesAction.to_proto(resource.action):
res.action.CopyFrom(TcpRouteRulesAction.to_proto(resource.action))
else:
res.ClearField("action")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return TcpRouteRules(
matches=TcpRouteRulesMatchesArray.from_proto(resource.matches),
action=TcpRouteRulesAction.from_proto(resource.action),
)
class TcpRouteRulesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [TcpRouteRules.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [TcpRouteRules.from_proto(i) for i in resources]
class TcpRouteRulesMatches(object):
def __init__(self, address: str = None, port: str = None):
self.address = address
self.port = port
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = tcp_route_pb2.NetworkservicesAlphaTcpRouteRulesMatches()
if Primitive.to_proto(resource.address):
res.address = Primitive.to_proto(resource.address)
if Primitive.to_proto(resource.port):
res.port = Primitive.to_proto(resource.port)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return TcpRouteRulesMatches(
address=Primitive.from_proto(resource.address),
port=Primitive.from_proto(resource.port),
)
class TcpRouteRulesMatchesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [TcpRouteRulesMatches.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [TcpRouteRulesMatches.from_proto(i) for i in resources]
class TcpRouteRulesAction(object):
def __init__(self, destinations: list = None, original_destination: bool = None):
self.destinations = destinations
self.original_destination = original_destination
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = tcp_route_pb2.NetworkservicesAlphaTcpRouteRulesAction()
if TcpRouteRulesActionDestinationsArray.to_proto(resource.destinations):
res.destinations.extend(
TcpRouteRulesActionDestinationsArray.to_proto(resource.destinations)
)
if Primitive.to_proto(resource.original_destination):
res.original_destination = Primitive.to_proto(resource.original_destination)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return TcpRouteRulesAction(
destinations=TcpRouteRulesActionDestinationsArray.from_proto(
resource.destinations
),
original_destination=Primitive.from_proto(resource.original_destination),
)
class TcpRouteRulesActionArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [TcpRouteRulesAction.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [TcpRouteRulesAction.from_proto(i) for i in resources]
class TcpRouteRulesActionDestinations(object):
def __init__(self, weight: int = None, service_name: str = None):
self.weight = weight
self.service_name = service_name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = tcp_route_pb2.NetworkservicesAlphaTcpRouteRulesActionDestinations()
if Primitive.to_proto(resource.weight):
res.weight = Primitive.to_proto(resource.weight)
if Primitive.to_proto(resource.service_name):
res.service_name = Primitive.to_proto(resource.service_name)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return TcpRouteRulesActionDestinations(
weight=Primitive.from_proto(resource.weight),
service_name=Primitive.from_proto(resource.service_name),
)
class TcpRouteRulesActionDestinationsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [TcpRouteRulesActionDestinations.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [TcpRouteRulesActionDestinations.from_proto(i) for i in resources]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
"""BDBF Docstring"""
from bdbf.main import *
__title__ = 'bdbf'
__author__ = 'Bertik23'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020-2021 Bertik23'
__version__ = '1.1.2' |
OUTPUT_BASE_DIRECTORY = "/g11/wangdp/project/work/data/playground/operation/gfs/wxzx/output"
|
import torch
import cv2
from PIL import Image
from openvino.inference_engine import IECore
import onnxruntime
#https://pytorch.org/hub/hustvl_yolop/
# Model
model = torch.hub.load('hustvl/yolop', 'yolop', pretrained=True)
# Images
# img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list
imgpath='../MVI_1587_VIS_00423.jpg'
img=cv2.imread(imgpath)
img1=Image.open(imgpath)
im=cv2.resize(img,(640,640))
im=torch.Tensor(img)
im=torch.unsqueeze(im.permute(2,0,1),0)
# Inference
# img = torch.randn(1,3,640,640)
det_out, da_seg_out,ll_seg_out = model(im)
results = model(im)
# Results
results.print() # or .show(), .save(), .crop(), .pandas(), etc.
results.show()
#modelm.model.model.model(im.cuda())
#model = torch.hub.load('pytorch/vision', 'resnet18', pretrained=True)
# onnx_model_name='yolov5m.onnx'
# input_names = ["input"]
# output_names = ["output"]
#torch.onnx.export(modelm, (img), onnx_model_name, export_params=True,verbose=True, input_names=input_names, output_names=output_names)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-10-28 15:14
from __future__ import unicode_literals
from django.db import migrations
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('osf', '0012_auto_20161028_0908'),
]
operations = [
migrations.AlterField(
model_name='externalaccount',
name='display_name',
field=osf.utils.fields.EncryptedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='externalaccount',
name='oauth_key',
field=osf.utils.fields.EncryptedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='externalaccount',
name='oauth_secret',
field=osf.utils.fields.EncryptedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='externalaccount',
name='profile_url',
field=osf.utils.fields.EncryptedTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='externalaccount',
name='refresh_token',
field=osf.utils.fields.EncryptedTextField(blank=True, null=True),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 25 19:30:05 2014
@author: nestor
"""
import glob
import pandas as pd
import plot_storage_helper as ph
#Get the data from different files for the different algorithms and libraries
df_openfec = pd.concat(
[pd.read_csv(f) for f in glob.glob('openfec_debian6*.csv')])
df_jerasure = pd.concat(
[pd.read_csv(f) for f in glob.glob('jerasure_debian6*.csv')])
df_isa = pd.concat(
[pd.read_csv(f) for f in glob.glob('isa_debian6*.csv')])
df_fullrlnc = pd.concat(
[pd.read_csv(f) for f in glob.glob('fullrlnc_debian6*.csv')])
df_sparse = pd.concat(
[pd.read_csv(f) for f in glob.glob('sparse_debian6*.csv')])
df_perpetual = pd.concat(
[pd.read_csv(f) for f in glob.glob('perpetual_debian6*.csv')])
df_thread = pd.concat(
[pd.read_csv(f) for f in glob.glob('thread_rlnc_debian6*.csv')])
df_sparse_thread = pd.concat(
[pd.read_csv(f) for f in glob.glob('sparse_thread_debian6*.csv')])
#Patch dataframes
#Density for non-sparse
df_openfec['density'] = 1
df_isa['density'] = 1
df_jerasure['density'] = 1
df_fullrlnc['density'] = 1
df_thread['density'] = 1
#Rename perpetual dataframe 'width_ratio' column as 'density' because the
#width ratio can be seen as a density
df_perpetual = df_perpetual.rename(columns = {'width_ratio':'density'})
#Concatenate all dataframes into a single one
df_all_sparse = pd.concat([df_openfec, df_perpetual,
df_sparse_thread[(
df_sparse_thread['density'] != 0.5)],
df_sparse[df_sparse['density'] != 0.5 ]])
df_all_dense = pd.concat([df_isa, df_jerasure, df_fullrlnc,df_thread,
df_sparse[df_sparse['density'] == 0.5 ],
df_sparse_thread[df_sparse_thread['density'] == 0.5 ]
])
#Note: Sparse RLNC (threaded or not) with 50% density can be regarded as dense
#Goodput dataframe vs. symbols (Fixed: symbol size, loss rate, type)
ph.plot_metric(df_all_sparse,'goodput','symbols',
['symbol_size','loss_rate','type'],
['testcase','density'],'sparse')
ph.plot_metric(df_all_dense,'goodput','symbols',
['symbol_size','loss_rate','type'],
['testcase','density'],'dense')
#Goodput dataframe vs. symbol size (Fixed: symbols, loss rate, type)
ph.plot_metric(df_all_sparse,'goodput','symbol_size',
['symbols','loss_rate','type'],
['testcase','density'],'sparse')
ph.plot_metric(df_all_dense,'goodput','symbol_size',
['symbols','loss_rate','type'],
['testcase','density'],'dense')
#Goodput dataframe vs. loss rate (Fixed: symbols, loss rate, type)
ph.plot_metric(df_all_sparse,'goodput','loss_rate',
['symbol_size','symbols','type'],
['testcase','density'],'sparse')
ph.plot_metric(df_all_dense,'goodput','loss_rate',
['symbol_size','symbols','type'],
['testcase','density'],'dense')
#Goodput dataframe vs. erased symbols (Fixed: symbol size, loss rate, type)
ph.plot_metric(df_all_sparse,'goodput','erased_symbols',
['symbol_size','symbols','type'],
['testcase','density'],'sparse')
ph.plot_metric(df_all_dense,'goodput','erased_symbols',
['symbol_size','symbols','type'],
['testcase','density'],'dense')
#Dataframe for checking linear dependency (overhead)
df_linear_dependency = df_all_sparse[df_all_sparse['type'] == "decoder"]
#Goodput dataframe vs. erased symbols (Fixed: symbol size, loss rate, type)
ph.plot_metric(df_linear_dependency,'extra_symbols','symbols',
['symbol_size','loss_rate'],
['testcase','density'],'sparse') |
import os
import numpy as np
import xml.etree.ElementTree as ET
def cub():
IMAGE_LIST_FILE = '../data/CUB_200_2011/images.txt'
IMAGE_LABEL_FILE = '../data/CUB_200_2011/image_class_labels.txt'
BOX_FILE = '../data/CUB_200_2011/bounding_boxes.txt'
SPLIT_FILE = '../data/CUB_200_2011/train_test_split.txt'
SPLIT_SET_TEMPLATE = '../data/CUB_200_2011/split_{}.txt'
SPLIT_SET_BOX_TEMPLATE = '../data/CUB_200_2011/split_{}_box.txt'
with open(IMAGE_LIST_FILE, 'r') as f:
lines = f.readlines()
image_names = [x.strip().split(' ')[-1] for x in lines]
image_names = np.asarray(image_names)
with open(IMAGE_LABEL_FILE, 'r') as f:
lines = f.readlines()
img_labels = [int(x.strip().split(' ')[-1]) for x in lines]
img_labels = np.asarray(img_labels)
with open(BOX_FILE, 'r') as f:
lines = f.readlines()
all_box = [x for x in lines]
with open(SPLIT_FILE, 'r') as f:
lines = f.readlines()
split_idx = [int(x.strip().split(' ')[-1]) for x in lines]
split_idx = np.array(split_idx)
for i in np.unique(split_idx):
with open(SPLIT_SET_TEMPLATE.format(i), 'w') as f:
for img_idx in np.where(split_idx == i)[0]:
f.write('{} {}\n'.format(image_names[img_idx], img_labels[img_idx]-1))
with open(SPLIT_SET_BOX_TEMPLATE.format(i), 'w') as f:
for img_idx in np.where(split_idx == i)[0]:
f.write(all_box[img_idx])
def generate_voc_listfile(set_file, list_file):
classes = ['__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
IMAGE_FILE_NAME = 'JPEGImages/{}.jpg'
ANNOTATION_FILE_NAME = '../data/voc2012/Annotations/{}.xml'
with open(set_file, 'r') as f:
lines = f.readlines()
train_list_filenmae = [IMAGE_FILE_NAME.format(x.strip()) for x in lines]
# get gt_labels
gt_labels = []
for x in lines:
tree = ET.parse(ANNOTATION_FILE_NAME.format(x.strip()))
objs = tree.findall('object')
# filter difficult example
non_diff_objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
objs = non_diff_objs
num_objs = len(objs)
gt_classes = np.zeros(num_objs, dtype=np.int32)
class_to_index = dict(zip(classes, range(len(classes))))
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
cls = class_to_index[obj.find('name').text.lower().strip()]
gt_classes[ix] = cls - 1
gt_labels.append(np.unique(gt_classes))
with open(list_file, 'w') as f:
for img_name, labels in zip(train_list_filenmae, gt_labels):
line_str = img_name
for lbl in labels:
line_str += ' {}'.format(lbl)
line_str += '\n'
f.write(line_str)
def voc():
TRAINSET_FILE = '../data/voc2012/ImageSets/Main/train.txt'
VALSET_FILE = '../data/voc2012/ImageSets/Main/val.txt'
if not os.path.exists('../data/voc2012/list'):
os.makedirs('../data/voc2012/list')
TRAIN_LIST_FILE = '../data/voc2012/list/train_list.txt'
VAL_LIST_FILE = '../data/voc2012/list/val_list.txt'
generate_voc_listfile(TRAINSET_FILE, TRAIN_LIST_FILE)
generate_voc_listfile(VALSET_FILE, VAL_LIST_FILE)
if __name__ == '__main__':
cub()
|
from colorpickerwidget import ColorPickerButton, ColorPickerWidget
from pathwidget import PathWidget
from loggingwidget import LoggingWidget
from loginwidget import LoginWidget
from progresswidget import ProgressWidget
from propertieseditor import PropertiesEditor
from choicewidget import ChoiceWidget |
# Copyright (c) 2006-2010 Tampere University of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# sys.path should be such that rules, rules_parser, lsts and lstslist
# can be imported
import tema.rules.rules as rules
import tema.rules.rules_parser as rules_parser
import tema.lsts.lsts as lsts
import tema.lsts.lstslist as lstslist
import tema.model.model as model
import tema.model.lstsmodel as lstsmodel
import os # os.sep needed in path name strings
from tema.model.parallelmodel import ParallelModel,Action,Transition,State
class ParallelLstsModel(ParallelModel):
def __init__(self):
ParallelModel.__init__(self)
self._lstslist=lstslist.LstsList()
self._rulelist=rules.RuleList()
self._dirprefix=""
def log(self,*args):
pass # this should be replaced by a true logger
def getActions(self):
return [self._newAction(i)
for i in range(self._first_result_action_index,
self._last_result_action_index+1)]
def loadFromObject(self,rules_file_contents=None):
parser=rules_parser.ExtRulesParser()
# Load LSTSs mentioned in the rules to the lstslist
lstss=parser.parseLstsFiles(rules_file_contents)
for lstsnum,lstsfile in lstss:
lstsobj=lsts.reader()
if self._dirprefix:
filename=self._dirprefix+"/"+lstsfile
else:
filename=lstsfile
try:
lstsobj.read(file(filename))
self.log("Model component %s loaded from '%s'" % (len(self._lstslist),filename))
except Exception,(errno,errstr):
raise ValueError("Could not read lsts '%s':\n(%s) %s" % (filename,errno,errstr))
self._lstslist.append((lstsnum,lstsobj))
# Create global action numbering
self._lstslist.createActionIndex()
self._first_result_action_index=self._lstslist.getActionCount()
# Convert rules into global action numbers
# and append to rulelist
for rule in parser.parseRules(rules_file_contents):
syncact=[]
result=""
# go through syncronized actions (everything except the last field)
try:
for lstsnum,actname in rule[:-1]:
syncact.append( self._lstslist.act2int("%s.%s" % (lstsnum,actname)) )
result=rule[-1]
self._lstslist.addActionToIndex(result)
self._rulelist.append(rules.Rule(syncact,self._lstslist.act2int(result)))
except: # ??? catch the right exception only!!! (thrown by act2int)
# the rule may have referred to non-existing actions
pass
self._last_result_action_index=self._lstslist.getActionCount()-1
# LSTSs are handled through model interface, so store them to
# modellist.
self._modellist=[ lstsmodel.LstsModel(litem[1]) for litem in self._lstslist ]
for m in self._modellist: m.useActionMapper(self._lstslist)
self._actionmapper=self._lstslist
def loadFromFile(self,rules_file_object):
# Try to find out a directory for lsts files
if not self._dirprefix and os.sep in rules_file_object.name:
try: self._dirprefix=rules_file_object.name.rsplit(os.sep,1)[0]
except: pass
return self.loadFromObject(rules_file_object.read())
def setLSTSDirectory(self,dirname):
"""Every LSTS mentioned in the rules file will be prefixed with dirname/"""
self._dirprefix=dirname
Model=ParallelLstsModel
|
#!/usr/bin/python
import socket, time
bind_addr='172.16.11.50'
bcast_addr='172.16.11.255'
port=31585
msg='DEADBEEF'
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
my_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST,1)
while 1:
my_socket.sendto(msg, (bcast_addr, port))
time.sleep(5)
|
from src.objs import *
from src.keyboard import *
from src.floodControl import floodControl
from urllib.parse import urlparse
from urllib.parse import parse_qs
def sendVideo(url, chatId, messageId=None, userLanguage=None):
userLanguage = userLanguage or dbSql.getSetting(chatId, 'language')
url = url.split('/?')[0]
url = 'https' + url if not url.startswith('http') else url
#! Check if the URL is already in the database
videoId = dbSql.getVideo(url=url)
setUrlVideoId = False
setRcVideoId = False
if not videoId:
setUrlVideoId = True
video = getVideo(url)
if video['success']:
videoLink = video['link']
#! Getting the rc parameter from the link
rc = parse_qs(urlparse(videoLink).query)['rc'][0]
#! Check if the rc is already in the database
videoId = dbSql.getVideo(rc=rc)
if not videoId:
videoId = videoLink
setRcVideoId = True
#! If video download is successful
if videoId:
bot.send_chat_action(chatId, 'upload_video')
if setRcVideoId:
sent = bot.send_video(chatId, videoId, reply_markup=resultKeyboard(userLanguage, url))
dbSql.increaseCounter('messageRequest')
else:
sent = bot.send_video(chatId, videoId['videoId'], reply_markup=resultKeyboard(userLanguage, url))
dbSql.increaseCounter('messageRequestCached')
if messageId:
bot.delete_message(chatId, messageId)
if setRcVideoId:
dbSql.setVideo(rc=rc, url=url, videoId=sent.video.file_id, duration=sent.video.duration, description=video['description'])
elif setUrlVideoId:
dbSql.setVideo(url=url, rc=rc, setRc=False)
#! Error
else:
bot.send_message(chatId, language[video['error']][userLanguage], reply_markup=socialKeyboard(userLanguage) if video['error'] in ['exception', 'unknownError'] else None)
#: Text handler
@bot.message_handler(content_types=['text'])
def message(message):
userLanguage = dbSql.getSetting(message.chat.id, 'language')
if floodControl(message, userLanguage):
#! Start message handler
if message.text == '/start':
bot.send_message(message.chat.id, language['greet'][userLanguage].format(message.from_user.first_name), reply_markup=startKeyboard(userLanguage))
#! Get user token
elif message.text == '/token' or message.text == '/start getToken':
token = dbSql.getSetting(message.chat.id, 'token', 'users')
bot.send_message(message.chat.id, language['token'][userLanguage].format(token))
#! Inline query start handler
elif message.text == '/start inlineQuery':
bot.send_sticker(message.chat.id, 'CAACAgIAAxkBAANEYWV8vnrx1aDQVFFjqajvaCqpwc4AAksNAAIUOzlLPz1-YEAZN1QhBA')
#! Link message handler
else:
sendVideo(url=message.text, chatId=message.chat.id, messageId=message.id, userLanguage=userLanguage) |
import uuid
from django.test import Client, TestCase
from django.urls import reverse
from corehq.apps.data_dictionary.models import CaseProperty, CaseType
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.users.models import WebUser
from corehq.util.test_utils import flag_enabled
@flag_enabled('DATA_DICTIONARY')
class UpdateCasePropertyViewTest(TestCase):
domain_name = uuid.uuid4().hex
@classmethod
def setUpClass(cls):
super(UpdateCasePropertyViewTest, cls).setUpClass()
cls.domain = create_domain(cls.domain_name)
cls.couch_user = WebUser.create(None, "test", "foobar", None, None)
cls.couch_user.add_domain_membership(cls.domain_name, is_admin=True)
cls.couch_user.save()
cls.case_type_obj = CaseType(name='caseType', domain=cls.domain_name)
cls.case_type_obj.save()
CaseProperty(case_type=cls.case_type_obj, name='property').save()
@classmethod
def tearDownClass(cls):
cls.case_type_obj.delete()
cls.couch_user.delete(cls.domain_name, deleted_by=None)
cls.domain.delete()
super(UpdateCasePropertyViewTest, cls).tearDownClass()
def setUp(self):
self.url = reverse('update_case_property', args=[self.domain_name])
self.client = Client()
self.client.login(username='test', password='foobar')
def _get_property(self):
return CaseProperty.objects.filter(
case_type=self.case_type_obj,
name='property'
).first()
def _assert_type(self, value=''):
prop = self._get_property()
self.assertEqual(prop.data_type, value)
def _get_case_property(self, name, case_type):
return CaseProperty.objects.filter(
case_type__name=case_type,
name=name
).first()
def test_new_case_type(self):
self._assert_type()
post_data = {"properties": '[{"caseType": "somethingelse", "name": "property", "data_type": "date"}]'}
response = self.client.post(self.url, post_data)
self.assertEqual(response.status_code, 200)
prop = self._get_case_property(name="property", case_type="somethingelse")
self.assertEqual(prop.data_type, 'date')
def test_new_case_property(self):
self._assert_type()
post_data = {"properties": '[{"caseType": "caseType", "name": "otherproperty", "data_type": "date"}]'}
response = self.client.post(self.url, post_data)
self.assertEqual(response.status_code, 200)
prop = self._get_case_property(name="otherproperty", case_type="caseType")
self.assertEqual(prop.data_type, 'date')
def test_update_with_incorrect_data_type(self):
self._assert_type()
post_data = {"properties": '[{"caseType": "caseType", "name": "property", "data_type": "blah"}]'}
response = self.client.post(self.url, post_data)
self.assertEqual(response.status_code, 400)
self._assert_type()
def test_update_no_name(self):
self._assert_type()
post_data = {"properties": '[{"caseType": "caseType", "name": "", "data_type": "date"}]'}
response = self.client.post(self.url, post_data)
self.assertEqual(response.status_code, 400)
self._assert_type()
def test_update_of_correct_data_type(self):
self._assert_type()
post_data = {"properties": '[{"caseType": "caseType", "name": "property", "data_type": "date"}]'}
response = self.client.post(self.url, post_data)
self.assertEqual(response.status_code, 200)
self._assert_type('date')
def test_update_description(self):
prop = self._get_property()
self.assertEqual(prop.description, '')
post_data = {"properties": '[{"caseType": "caseType", "name": "property", "description": "description"}]'}
response = self.client.post(self.url, post_data)
self.assertEqual(response.status_code, 200)
prop = self._get_property()
self.assertEqual(prop.description, 'description')
|
# XParty - A Framework for Building Tools to Support Social Learning in Synchronous Environments
# Authors: Ben Bederson - www.cs.umd.edu/~bederson
# Alex Quinn - www.cs.umd.edu/~aq
# Anne Rose - www.cs.umd.edu/hcil/members/~arose
# University of Maryland, Human-Computer Interaction Lab - www.cs.umd.edu/hcil
# Date: Originally created July 2011
# License: Apache License 2.0 - http://www.apache.org/licenses/LICENSE-2.0
from XPartyHandler import XPartyHandler
from server import channel
from server import exceptions
from server import model_access
from server.lib import gaesessions
import random, string
class StudentLoginHandler(XPartyHandler):
def post(self):
try:
self.init_user_context("student")
student_nickname = self.request.get("student_nickname")
student_nickname = " ".join(student_nickname.split())
anonymous = True if not student_nickname else False
activity_code = self.request.get("activity_code")
ext = int(self.request.get("ext", 0))
# Retrieve activity from datastore
# - If activity does not exist, this will return None.
# - If activity existed but is disabled, it will return the activity, but activity.is_active will be False.
# - If activity existed but was deleted (hidden), it will return the activity, but activity.is_deleted will be True.
# (Deleting activities is done lazily. Actually, they are merely hidden from the teacher's view.)
activity = model_access.get_activity(activity_code) if activity_code != "" else None
if not activity_code:
raise exceptions.XPartyException("Please enter an activity code.")
# Activity not found
if exceptions.ActivityNotFoundError.check(activity):
raise exceptions.ActivityNotFoundError("Please check the activity code.")
# Activity is no longer active or was deleted (hidden)
elif exceptions.NotAnActiveActivityError.check(activity):
raise exceptions.NotAnActiveActivityError("This activity is finished.")
else:
# Fetch student from datastore
# Might return None if nobody has ever logged in with this nickname+activity combination
if student_nickname:
student = model_access.get_student(student_nickname, activity_code)
# If no student nickname, generate an anonymous one
else:
student = None
alphabet = string.letters + string.digits
for i in range(10):
anonymous_nickname = "".join(random.choice(alphabet) for j in range(10))
anonymous_student = model_access.get_student(anonymous_nickname, activity_code)
if anonymous_student is None:
student_nickname = anonymous_nickname
break
if student_nickname is None:
raise exceptions.XPartyException("An anonymous login could not be created.")
else:
session = gaesessions.get_current_session()
# Student found
if student is not None:
# Check if student already logged in to another session
if student.is_logged_in and session.sid != student.session_sid:
raise exceptions.XPartyException("Please choose another name. Someone is already logged in as %s."%\
(student_nickname.encode("ascii","xmlcharrefreplace")), "Session ID doesn't match.",
student.session_sid, session.sid, student.latest_login_timestamp, student.latest_logout_timestamp)
# Otherwise, update login info
else:
model_access.update_student_login_time(student, session_sid=session.sid)
# Create new student
else:
student = model_access.create_student({
"student_nickname": student_nickname,
"anonymous": anonymous,
"activity": activity,
"session_sid": session.sid
})
self.user = student
response_data = { "status": 1 }
# for external applications
# notify teacher about login since /student not loaded
if ext == 1:
response_data["student"] = model_access.student_data_to_dict(self.user)
response_data["activity"] = self.user.activity.to_dict()
channel.send_student_log_in(student=student)
self.write_response_as_json(response_data)
except exceptions.XPartyException as e:
self.user = None
e.write_response_as_json(self)
|
import RFFT
apiServerProcess = RFFT.APITask(debug=True, host="192.168.178.24", port=5000)
apiServerProcess.start()
while(True):
try:
pass
except KeyboardInterrupt:
apiServerProcess.shutdown()
print("Shutting down") |
import sys
import numpy as np
import pandas as pd
import time
from sklearn.decomposition import TruncatedSVD
from iterative_svd import IterativeSVD
import matplotlib.pyplot as plt
import plotly_express as px
import plotly
from distutils.util import strtobool
from file_processing import get_masked_matrix, process_labels_weights, center_masked_matrix
def run_iterative_svd(X_incomplete, start_rank, end_rank, rank, choose_best, num_cores, save_completed_matrix, completed_matrix_filename):
num_masked = np.isnan(X_incomplete).sum()
if num_masked > 0:
start_time = time.time()
X_complete = IterativeSVD(start_rank=start_rank, end_rank=end_rank, rank=rank, choose_best=choose_best, num_cores=num_cores).fit_transform(X_incomplete)
print("Iterative SVD --- %s seconds ---" % (time.time() - start_time))
else:
X_complete = X_incomplete
if save_completed_matrix:
np.save(completed_matrix_filename, X_complete)
return X_complete
def weight_completed_matrix(X_complete, X_incomplete):
num_samples, num_pos = X_complete.shape
weights = np.sum(~np.isnan(X_incomplete), axis=1) / num_pos
# weights = np.sqrt(weights)
sum_weights = sum(weights)
W = np.diag(weights) / sum_weights
X_complete -= np.sum(np.matmul(W, X_complete), axis=0)
Wsqrt_X = np.matmul(np.sqrt(W), X_complete)
return Wsqrt_X, X_complete
def project_weighted_matrix(Wsqrt_X, X_complete):
svd = TruncatedSVD(2, algorithm="arpack")
WX_normalized = Wsqrt_X #- np.sum(WX, axis=0)
svd.fit(WX_normalized)
X_projected = svd.transform(X_complete)
num_samples, num_pos = X_complete.shape
total_var = np.trace(np.matmul(X_complete, X_complete.T)) / num_samples
pc1_percentvar = 100 * np.var(X_projected[:,0]) / total_var
pc2_percentvar = 100 * np.var(X_projected[:,1]) / total_var
return X_projected, pc1_percentvar, pc2_percentvar
def scatter_plot(X_projected, scatterplot_filename, output_filename, ind_IDs, labels):
plot_df = pd.DataFrame()
plot_df['x'] = X_projected[:,0]
plot_df['y'] = X_projected[:,1]
plot_df['Label'] = labels
plot_df['ID'] = ind_IDs
scatter = px.scatter(plot_df, x='x', y='y', color='Label', hover_name='ID', color_discrete_sequence=px.colors.qualitative.Alphabet)
plotly.offline.plot(scatter, filename = scatterplot_filename, auto_open=False)
plot_df.to_csv(output_filename, columns=['ID', 'x', 'y'], sep='\t', index=False)
def run_method(beagle_or_vcf, beagle_filename, vcf_filename, is_masked, vit_or_fbk_or_tsv, vit_filename, fbk_filename, fb_or_msp, tsv_filename, num_ancestries, ancestry, prob_thresh, average_parents, start_rank, end_rank, rank, choose_best, num_cores, is_weighted, labels_filename, output_filename, scatterplot_filename, save_masked_matrix, masked_matrix_filename, save_completed_matrix, completed_matrix_filename):
X_incomplete, ind_IDs, rs_IDs = get_masked_matrix(beagle_filename, vcf_filename, beagle_or_vcf, is_masked, vit_filename, fbk_filename, tsv_filename, vit_or_fbk_or_tsv, fb_or_msp, num_ancestries, ancestry, average_parents, prob_thresh)
X_incomplete, ind_IDs, labels, _ = process_labels_weights(labels_filename, X_incomplete, ind_IDs, average_parents, is_weighted, save_masked_matrix, masked_matrix_filename)
X_incomplete = center_masked_matrix(X_incomplete)
X_complete = run_iterative_svd(X_incomplete, start_rank, end_rank, rank, choose_best, num_cores, save_completed_matrix, completed_matrix_filename)
Wsqrt_X, X_complete = weight_completed_matrix(X_complete, X_incomplete)
X_projected, pc1_percentvar, pc2_percentvar = project_weighted_matrix(Wsqrt_X, X_complete)
scatter_plot(X_projected, scatterplot_filename, output_filename, ind_IDs, labels)
print("Percent variance explained by the 1st principal component: ", pc1_percentvar)
print("Percent variance explained by the 2nd principal component: ", pc2_percentvar)
def run(params_filename):
file = open(params_filename)
params = {}
for line in file:
line = line.strip()
if not line.startswith('#'):
key_value = line.split('=')
if len(key_value) == 2:
params[key_value[0].strip()] = key_value[1].strip()
file.close()
beagle_or_vcf = int(params['BEAGLE_OR_VCF'])
beagle_filename = str(params['BEAGLE_FILE'])
vcf_filename = str(params['VCF_FILE'])
is_masked = bool(strtobool(params['IS_MASKED']))
vit_or_fbk_or_tsv = int(params['VIT_OR_FBK_OR_TSV'])
vit_filename = str(params['VIT_FILE'])
fbk_filename = str(params['FBK_FILE'])
fb_or_msp = int(params['FB_OR_MSP'])
tsv_filename = str(params['TSV_FILE'])
num_ancestries = int(params['NUM_ANCESTRIES'])
ancestry = int(params['ANCESTRY'])
prob_thresh = float(params['PROB_THRESH'])
average_parents = bool(strtobool(params['AVERAGE_PARENTS']))
start_rank = int(params['START_RANK'])
end_rank = int(params['END_RANK'])
rank = int(params['RANK'])
choose_best = bool(strtobool(params['CHOOSE_BEST']))
num_cores = int(params['NUM_CORES'])
is_weighted = bool(strtobool(params['IS_WEIGHTED']))
labels_filename = str(params['LABELS_FILE'])
output_filename = str(params['OUTPUT_FILE'])
scatterplot_filename = str(params['SCATTERPLOT_FILE'])
save_masked_matrix = bool(strtobool(params['SAVE_MASKED_MATRIX']))
masked_matrix_filename = str(params['MASKED_MATRIX_FILE'])
save_completed_matrix = bool(strtobool(params['SAVE_COMPLETED_MATRIX']))
completed_matrix_filename = str(params['COMPLETED_MATRIX_FILE'])
run_method(beagle_or_vcf, beagle_filename, vcf_filename, is_masked, vit_or_fbk_or_tsv, vit_filename, fbk_filename, fb_or_msp, tsv_filename, num_ancestries, ancestry, prob_thresh, average_parents, start_rank, end_rank, rank, choose_best, num_cores, is_weighted, labels_filename, output_filename, scatterplot_filename, save_masked_matrix, masked_matrix_filename, save_completed_matrix, completed_matrix_filename)
def main():
params_filename = sys.argv[1]
start_time = time.time()
run(params_filename)
print("Total time --- %s seconds ---" % (time.time() - start_time))
if __name__ == "__main__":
main() |
'''
https://leetcode.com/contest/weekly-contest-160/problems/maximum-length-of-a-concatenated-string-with-unique-characters/
'''
class Solution:
def maxLength(self, arr: List[str]) -> int:
def bitty(a):
if sorted(list(set(a))) != sorted(list(a)):
return None
x = 0
for c in a:
x |= 1 << (ord(c) - 97)
return x
def bits(a):
n = 0
while a > 0:
if a & 1 > 0: n += 1
a >>= 1
return n
arrr = list(filter(lambda x: x is not None, map(bitty, arr)))
maxl = 0
n = len(arrr)
for mask in range(1 << n):
an = 0
for i in range(n):
if mask & (1 << i):
if arrr[i] & an > 0:
an = 0; break
an |= arrr[i]
maxl = max(bits(an), maxl)
return maxl
|
#!/bin/python2
# Copyright (c) 2019 ZettaDB inc. All rights reserved.
# This source code is licensed under Apache 2.0 License,
# combined with Common Clause Condition 1.0, as detailed in the NOTICE file.
import sys
import json
import getpass
import argparse
from cluster_common import *
def generate_storage_service(args, machines, commandslist, node, idx, filesmap):
mach = machines.get(node['ip'])
storagedir = "kunlun-storage-%s" % args.product_version
fname = "%d-kunlun-storage-%d.service" % (idx, node['port'])
servname = "kunlun-storage-%d" % node['port']
fname_to = "kunlun-storage-%d.service" % node['port']
servicef = open('install/%s' % fname, 'w')
servicef.write("# kunlun-storage-%d systemd service file\n\n" % node['port'])
servicef.write("[Unit]\n")
servicef.write("Description=kunlun-storage-%d\n" % node['port'])
servicef.write("After=network.target\n\n")
servicef.write("[Install]\n")
servicef.write("WantedBy=multi-user.target\n\n")
servicef.write("[Service]\n")
servicef.write("Type=forking\n")
servicef.write("User=%s\n" % mach['user'])
servicef.write("Restart=on-failure\n")
servicef.write("WorkingDirectory=%s/%s/dba_tools\n" % (mach['basedir'], storagedir))
servicef.write("ExecStart=/bin/bash startmysql.sh %d\n" % (node['port']))
servicef.write("ExecStop=/bin/bash stopmysql.sh %d\n" % (node['port']))
servicef.close()
addNodeToFilesMap(filesmap, node, fname, './%s' % fname_to)
addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname_to)
addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)
def generate_server_service(args, machines, commandslist, node, idx, filesmap):
mach = machines.get(node['ip'])
serverdir = "kunlun-server-%s" % args.product_version
fname = "%d-kunlun-server-%d.service" % (idx, node['port'])
servname = "kunlun-server-%d" % node['port']
fname_to = "kunlun-server-%d.service" % node['port']
servicef = open('install/%s' % fname, 'w')
servicef.write("# kunlun-server-%d systemd service file\n\n" % node['port'])
servicef.write("[Unit]\n")
servicef.write("Description=kunlun-server-%d\n" % node['port'])
servicef.write("After=network.target\n\n")
servicef.write("[Install]\n")
servicef.write("WantedBy=multi-user.target\n\n")
servicef.write("[Service]\n")
servicef.write("Type=forking\n")
servicef.write("User=%s\n" % mach['user'])
servicef.write("Restart=on-failure\n")
servicef.write("WorkingDirectory=%s/%s/scripts\n" % (mach['basedir'], serverdir))
servicef.write("ExecStart=/usr/bin/python2 start_pg.py --port=%d\n" % (node['port']))
servicef.close()
addNodeToFilesMap(filesmap, node, fname, './%s' % fname_to)
addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname_to)
addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)
def generate_clustermgr_service(args, machines, commandslist, node, idx, filesmap):
mach = machines.get(node['ip'])
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
fname = "%d-kunlun-cluster-manager-%d.service" % (idx, node['brpc_raft_port'])
servname = "kunlun-cluster-manager-%d" % node['brpc_raft_port']
fname_to = "kunlun-cluster-manager-%d.service" % node['brpc_raft_port']
servicef = open('install/%s' % fname, 'w')
servicef.write("# kunlun-cluster-manager-%d systemd service file\n\n" % node['brpc_raft_port'])
servicef.write("[Unit]\n")
servicef.write("Description=kunlun-cluster-manager-%d\n" % node['brpc_raft_port'])
servicef.write("After=network.target\n\n")
servicef.write("[Install]\n")
servicef.write("WantedBy=multi-user.target\n\n")
servicef.write("[Service]\n")
servicef.write("Type=forking\n")
servicef.write("User=%s\n" % mach['user'])
servicef.write("Restart=on-failure\n")
servicef.write("WorkingDirectory=%s/%s/bin\n" % (mach['basedir'], clustermgrdir))
servicef.write("ExecStart=/bin/bash start_cluster_mgr.sh\n")
servicef.write("ExecStop=/bin/bash stop_cluster_mgr.sh\n")
servicef.close()
addNodeToFilesMap(filesmap, node, fname, './%s' % fname_to)
addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname_to)
addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)
def generate_haproxy_service(args, machines, commandslist, node, filesmap):
mach = machines.get(node['ip'])
fname = "kunlun-haproxy-%d.service" % (node['port'])
servname = "kunlun-haproxy-%d" % node['port']
servicef = open('install/%s' % fname, 'w')
servicef.write("# Kunlun-HAProxy-%d systemd service file\n\n" % node['port'])
servicef.write("[Unit]\n")
servicef.write("Description=Kunlun-HAProxy-%d\n" % node['port'])
servicef.write("After=network.target\n\n")
servicef.write("[Install]\n")
servicef.write("WantedBy=multi-user.target\n\n")
servicef.write("[Service]\n")
servicef.write("Type=forking\n")
servicef.write("User=%s\n" % mach['user'])
servicef.write("Restart=on-failure\n")
servicef.write("WorkingDirectory=%s\n" % (mach['basedir'],))
servicef.write("ExecStart=%s/haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg\n" % (mach['basedir'],))
servicef.close()
addNodeToFilesMap(filesmap, node, fname, '.')
addToCommandsList(commandslist, node['ip'], '.', "sudo cp -f %s /usr/lib/systemd/system/" % fname)
addToCommandsList(commandslist, node['ip'], '.', "sudo systemctl enable %s" % servname)
def generate_haproxy_config(jscfg, machines, confname):
cluster = jscfg['cluster']
comps = cluster['comp']['nodes']
haproxy = cluster['haproxy']
mach = machines[haproxy['ip']]
maxconn = haproxy.get('maxconn', 10000)
conf = open(confname, 'w')
conf.write('''# generated automatically
global
pidfile %s/haproxy.pid
maxconn %d
daemon
defaults
log global
retries 5
timeout connect 5s
timeout client 30000s
timeout server 30000s
listen kunlun-cluster
bind :%d
mode tcp
balance roundrobin
''' % (mach['basedir'], maxconn, haproxy['port']))
i = 1
for node in comps:
conf.write(" server comp%d %s:%d weight 1 check inter 10s\n" % (i, node['ip'], node['port']))
i += 1
conf.close()
def generate_install_scripts(jscfg, args):
validate_and_set_config1(jscfg, args)
machines = {}
setup_machines1(jscfg, machines, args)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
installtype = args.installtype
sudopfx=""
if args.sudo:
sudopfx="sudo "
valgrindopt = ""
if args.valgrind:
valgrindopt = "--valgrind"
filesmap = {}
commandslist = []
dirmap = {}
cluster = jscfg['cluster']
cluster_name = cluster['name']
meta = cluster['meta']
datas = cluster['data']
if not meta.has_key('group_uuid'):
meta['group_uuid'] = getuuid()
meta_extraopt = " --ha_mode=%s" % meta['ha_mode']
my_metaname = 'mysql_meta.json'
metaf = open(r'install/%s' % my_metaname,'w')
json.dump(meta, metaf, indent=4)
metaf.close()
cmdpat = '%spython2 install-mysql.py --config=./%s --target_node_index=%d --cluster_id=%s --shard_id=%s'
if args.small:
cmdpat += ' --dbcfg=./template-small.cnf'
# commands like:
# python2 install-mysql.py --config=./mysql_meta.json --target_node_index=0
targetdir='%s/dba_tools' % storagedir
i=0
storageidx = 0
mpries = []
msecs = []
shard_id = "meta"
meta_addrs = []
for node in meta['nodes']:
meta_addrs.append("%s:%s" % (node['ip'], str(node['port'])))
addNodeToFilesMap(filesmap, node, my_metaname, targetdir)
cmd = cmdpat % (sudopfx, my_metaname, i, cluster_name, shard_id)
if node.get('is_primary', False):
mpries.append([node['ip'], targetdir, cmd])
else:
msecs.append([node['ip'], targetdir, cmd])
addToDirMap(dirmap, node['ip'], node['data_dir_path'])
addToDirMap(dirmap, node['ip'], node['log_dir_path'])
if node.has_key('innodb_log_dir_path'):
addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
if args.autostart:
generate_storage_service(args, machines, commandslist, node, storageidx, filesmap)
i+=1
storageidx += 1
pries = []
secs = []
shard_extraopt = " --ha_mode=%s" % cluster['ha_mode']
i=1
for shard in datas:
if not shard.has_key('group_uuid'):
shard['group_uuid'] = getuuid()
shard_id = "shard%d" % i
my_shardname = "mysql_shard%d.json" % i
shardf = open(r'install/%s' % my_shardname, 'w')
json.dump(shard, shardf, indent=4)
shardf.close()
j = 0
for node in shard['nodes']:
addNodeToFilesMap(filesmap, node, my_shardname, targetdir)
cmd = cmdpat % (sudopfx, my_shardname, j, cluster_name, shard_id)
if node.get('is_primary', False):
pries.append([node['ip'], targetdir, cmd])
else:
secs.append([node['ip'], targetdir, cmd])
addToDirMap(dirmap, node['ip'], node['data_dir_path'])
addToDirMap(dirmap, node['ip'], node['log_dir_path'])
if node.has_key('innodb_log_dir_path'):
addToDirMap(dirmap, node['ip'], node['innodb_log_dir_path'])
if args.autostart:
generate_storage_service(args, machines, commandslist, node, storageidx, filesmap)
j += 1
storageidx += 1
i+=1
for item in mpries:
addToCommandsList(commandslist, item[0], item[1], item[2] + meta_extraopt)
for item in pries:
addToCommandsList(commandslist, item[0], item[1], item[2] + shard_extraopt)
for item in msecs:
addToCommandsList(commandslist, item[0], item[1], item[2] + meta_extraopt)
for item in secs:
addToCommandsList(commandslist, item[0], item[1], item[2] + shard_extraopt)
comps = cluster['comp']['nodes']
pg_compname = 'postgres_comp.json'
compf = open(r'install/%s' % pg_compname, 'w')
json.dump(comps, compf, indent=4)
compf.close()
# python2 install_pg.py --config=docker-comp.json --install_ids=1,2,3
targetdir="%s/scripts" % serverdir
for node in comps:
addNodeToFilesMap(filesmap, node, pg_compname, targetdir)
cmdpat = r'python2 install_pg.py --config=./%s --install_ids=%d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (pg_compname, node['id']))
addToDirMap(dirmap, node['ip'], node['datadir'])
# This only needs to transfered to machine creating the cluster.
reg_metaname = 'reg_meta.json'
metaf = open(r'install/%s' % reg_metaname, 'w')
objs = []
for node in meta['nodes']:
obj = {}
obj['is_primary'] = node.get('is_primary', False)
obj['data_dir_path'] = node['data_dir_path']
obj['ip'] = node['ip']
obj['port'] = node['port']
obj['user'] = "pgx"
obj['password'] = "pgx_pwd"
objs.append(obj)
json.dump(objs, metaf, indent=4)
metaf.close()
# This only needs to transfered to machine creating the cluster.
reg_shardname = 'reg_shards.json'
shardf = open(r'install/%s' % reg_shardname, 'w')
shards = []
i=1
for shard in datas:
obj={'shard_name': "shard%d" % i}
i+=1
nodes=[]
for node in shard['nodes']:
n={'user':'pgx', 'password':'pgx_pwd'}
n['ip'] = node['ip']
n['port'] = node['port']
nodes.append(n)
obj['shard_nodes'] = nodes
shards.append(obj)
json.dump(shards, shardf, indent=4)
shardf.close()
comp1 = comps[0]
addNodeToFilesMap(filesmap, comp1, reg_metaname, targetdir)
addNodeToFilesMap(filesmap, comp1, reg_shardname, targetdir)
resourcedir = "%s/resources" % serverdir
cmdpat=r'/bin/bash build_driver_forpg.sh'
addToCommandsList(commandslist, comp1['ip'], resourcedir, cmdpat, "all")
cmdpat=r'python2 bootstrap.py --config=./%s --bootstrap_sql=./meta_inuse.sql' + meta_extraopt
addToCommandsList(commandslist, comp1['ip'], targetdir, cmdpat % reg_metaname, "storage")
cmdpat='python2 create_cluster.py --shards_config=./%s \
--comps_config=./%s --meta_config=./%s --cluster_name=%s --meta_ha_mode=%s --ha_mode=%s --cluster_owner=abc --cluster_biz=test'
addToCommandsList(commandslist, comp1['ip'], targetdir,
cmdpat % (reg_shardname, pg_compname, reg_metaname, cluster_name, meta['ha_mode'], cluster['ha_mode']), "all")
# bash -x bin/cluster_mgr_safe --debug --pidfile=run.pid clustermgr.cnf >& run.log </dev/null &
clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
metaseeds=",".join(meta_addrs)
clmgrcnf = "%s/conf/cluster_mgr.cnf" % clustermgrdir
cmdpat = "bash change_config.sh %s '%s' '%s'"
startpat = 'bash start_cluster_mgr.sh </dev/null >& start.log &'
initmember = "%s:%d:0," % (clmgrnodes[0]['ip'], clmgrnodes[0]['brpc_raft_port'])
clustermgridx = 0
for node in clmgrnodes:
addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'meta_group_seeds', metaseeds))
addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'brpc_raft_port', node['brpc_raft_port']))
addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'brpc_http_port', node['brpc_http_port']))
addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'local_ip', node['ip']))
addToCommandsList(commandslist, node['ip'], '.', cmdpat % (clmgrcnf, 'raft_group_member_init_config', initmember))
addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, startpat)
if args.autostart:
generate_clustermgr_service(args, machines, commandslist, node, clustermgridx, filesmap)
clustermgridx += 1
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
generate_haproxy_config(jscfg, machines, 'install/haproxy.cfg')
cmdpat = r'haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg >& haproxy.log'
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
if args.autostart:
generate_haproxy_service(args, machines, commandslist, haproxy, filesmap)
initobj = cluster.get("initialization", None)
initfile = "auto_init.sql"
if initobj is not None:
initsqlf = open("install/%s" % initfile, 'w')
for sqlc in initobj.get("sqlcommands", []):
initsqlf.write(sqlc)
initsqlf.write(";\n")
initsqlf.close()
node = comps[0]
waitTime = initobj.get("waitseconds", 10)
addNodeToFilesMap(filesmap, node, initfile, ".")
cmdpat = r'sleep %s; psql -f %s postgres://%s:%s@%s:%s/postgres'
addToCommandsList(commandslist, node['ip'], ".",
cmdpat % (str(waitTime), initfile, node['user'], node['password'], 'localhost', str(node['port'])), "computing")
com_name = 'commands.sh'
comf = open(r'install/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
# files copy.
for ip in machines:
mach = machines.get(ip)
if args.sudo:
process_command_noenv(comf, args, machines, ip, '/',
'sudo mkdir -p %s && sudo chown -R %s:\`id -gn %s\` %s' % (mach['basedir'],
mach['user'], mach['user'], mach['basedir']))
else:
process_command_noenv(comf, args, machines, ip, '/', 'mkdir -p %s' % mach['basedir'])
# Set up the files
if installtype == 'full':
process_file(comf, args, machines, ip, '%s.tgz' % storagedir, mach['basedir'])
process_file(comf, args, machines, ip, '%s.tgz' % serverdir, mach['basedir'])
process_file(comf, args, machines, ip, '%s.tgz' % clustermgrdir, mach['basedir'])
process_command_noenv(comf, args, machines, ip, mach['basedir'], 'tar -xzf %s.tgz' % storagedir)
process_command_noenv(comf, args, machines, ip, mach['basedir'], 'tar -xzf %s.tgz' % serverdir)
process_command_noenv(comf, args, machines, ip, mach['basedir'], 'tar -xzf %s.tgz' % clustermgrdir)
if cluster.has_key('haproxy'):
process_file(comf, args, machines, ip, 'haproxy-2.5.0-bin.tar.gz', mach['basedir'])
process_command_noenv(comf, args, machines, ip, mach['basedir'], 'tar -xzf haproxy-2.5.0-bin.tar.gz')
# files
flist = [
['build_driver_forpg.sh', '%s/resources' % serverdir],
['build_driver_formysql.sh', '%s/resources' % storagedir],
[reg_metaname, '%s/scripts' % serverdir],
['process_deps.sh', '.'],
['change_config.sh', '.']
]
for fpair in flist:
process_file(comf, args, machines, ip, 'install/%s' % fpair[0], "%s/%s" % (mach['basedir'], fpair[1]))
if cluster.has_key('haproxy'):
process_file(comf, args, machines, ip, 'install/haproxy.cfg', mach['basedir'])
# Set up the env.sh, this must be before 'process_command_setenv'
process_file(comf, args, machines, ip, 'env.sh.template', mach['basedir'])
extstr = "sed -s 's#KUNLUN_BASEDIR#%s#g' env.sh.template > env.sh" % mach['basedir']
process_command_noenv(comf, args, machines, ip, mach['basedir'], extstr)
extstr = "sed -i 's#KUNLUN_VERSION#%s#g' env.sh" % args.product_version
process_command_noenv(comf, args, machines, ip, mach['basedir'], extstr)
comstr = "bash ../../process_deps.sh"
process_command_setenv(comf, args, machines, ip, "%s/lib" % storagedir, comstr, "storage")
process_command_setenv(comf, args, machines, ip, "%s/lib" % serverdir, comstr, "computing")
comstr = "bash build_driver_formysql.sh %s"
process_command_setenv(comf, args, machines, ip, "%s/resources" % storagedir, comstr % mach['basedir'], "storage")
comstr = "bash build_driver_forpg.sh %s"
process_command_setenv(comf, args, machines, ip, "%s/resources" % serverdir, comstr % mach['basedir'], "all")
comstr = "cd %s || exit 1; test -d etc && echo > etc/instances_list.txt 2>/dev/null; exit 0" % serverdir
process_command_noenv(comf, args, machines, ip, mach['basedir'], comstr)
comstr = "cd %s || exit 1; test -d etc && echo > etc/instances_list.txt 2>/dev/null; exit 0" % storagedir
process_command_noenv(comf, args, machines, ip, mach['basedir'], comstr)
# dir making
for ip in dirmap:
mach = machines.get(ip)
dirs=dirmap[ip]
for d in dirs:
if args.sudo:
process_command_noenv(comf, args, machines, ip, '/',
'sudo mkdir -p %s && sudo chown -R %s:\`id -gn %s\` %s' % (d, mach['user'], mach['user'], d))
else:
process_command_noenv(comf, args, machines, ip, '/', 'mkdir -p %s' % d)
# files copy.
for ip in filesmap:
mach = machines.get(ip)
fmap = filesmap[ip]
for fname in fmap:
process_file(comf, args, machines, ip, 'install/%s' % fname, '%s/%s' % (mach['basedir'], fmap[fname]))
# The reason for not using commands map is that, we need to keep the order for the commands.
process_commandslist_setenv(comf, args, machines, commandslist)
comf.close()
# The order is meta shard -> data shards -> cluster_mgr -> comp nodes
def generate_start_scripts(jscfg, args):
validate_and_set_config1(jscfg, args)
machines = {}
setup_machines1(jscfg, machines, args)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
sudopfx=""
if args.sudo:
sudopfx="sudo "
valgrindopt = ""
if args.valgrind:
valgrindopt = "--valgrind"
filesmap = {}
commandslist = []
cluster = jscfg['cluster']
meta = cluster['meta']
# commands like:
# bash startmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
for node in meta['nodes']:
cmdpat = r'%sbash startmysql.sh %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['port']))
# bash startmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
cmdpat = r'%sbash startmysql.sh %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (sudopfx, node['port']))
clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
cmdpat = r'bash start_cluster_mgr.sh </dev/null >& run.log &'
for node in clmgrnodes:
addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, cmdpat)
# su postgres -c "python2 start_pg.py --port=5401"
comps = cluster['comp']['nodes']
targetdir="%s/scripts" % serverdir
for node in comps:
cmdpat = r'python2 start_pg.py --port=%d %s'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['port'], valgrindopt), "computing")
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
cmdpat = r'haproxy-2.5.0-bin/sbin/haproxy -f haproxy.cfg >& haproxy.log'
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
com_name = 'commands.sh'
comf = open(r'start/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
process_commandslist_setenv(comf, args, machines, commandslist)
comf.close()
# The order is: comp-nodes -> cluster_mgr -> data shards -> meta shard
def generate_stop_scripts(jscfg, args):
validate_and_set_config1(jscfg, args)
machines = {}
setup_machines1(jscfg, machines, args)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
commandslist = []
cluster = jscfg['cluster']
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
cmdpat="cat haproxy.pid | xargs kill -9"
addToCommandsList(commandslist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
# pg_ctl -D %s stop"
comps = cluster['comp']['nodes']
targetdir="%s/scripts" % serverdir
for node in comps:
cmdpat = r'pg_ctl -D %s stop -m immediate'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['datadir'], "computing")
clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
cmdpat = r'bash stop_cluster_mgr.sh'
for node in clmgrnodes:
addToCommandsList(commandslist, node['ip'], "%s/bin" % clustermgrdir, cmdpat)
# bash stopmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
meta = cluster['meta']
# commands like:
# mysqladmin --defaults-file=/kunlun/kunlun-storage-$version/etc/my_6001.cnf -uroot -proot shutdown
targetdir='%s/dba_tools' % storagedir
for node in meta['nodes']:
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % node['port'], "storage")
com_name = 'commands.sh'
comf = open(r'stop/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
process_commandslist_setenv(comf, args, machines, commandslist)
comf.close()
def generate_systemctl_clean(servname, ip, commandslist):
syscmdpat1 = "sudo systemctl stop %s"
syscmdpat2 = "sudo systemctl disable %s"
syscmdpat3 = "sudo rm -f /usr/lib/systemd/system/%s"
addToCommandsList(commandslist, ip, '/', syscmdpat1 % servname)
addToCommandsList(commandslist, ip, '/', syscmdpat2 % servname)
addToCommandsList(commandslist, ip, '/', syscmdpat3 % servname)
# The order is: comp-nodes -> cluster_mgr -> data shards -> meta shard
def generate_clean_scripts(jscfg, args):
validate_and_set_config1(jscfg, args)
machines = {}
setup_machines1(jscfg, machines, args)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
sudopfx=""
if args.sudo:
sudopfx="sudo "
cleantype = args.cleantype
env_cmdlist = []
noenv_cmdlist = []
cluster = jscfg['cluster']
haproxy = cluster.get("haproxy", None)
if haproxy is not None:
cmdpat="cat haproxy.pid | xargs kill -9"
addToCommandsList(noenv_cmdlist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
cmdpat="rm -f haproxy.pid"
addToCommandsList(noenv_cmdlist, haproxy['ip'], machines[haproxy['ip']]['basedir'], cmdpat)
if args.autostart:
servname = 'kunlun-haproxy-%d.service' % haproxy['port']
generate_systemctl_clean(servname, haproxy['ip'], noenv_cmdlist)
# pg_ctl -D %s stop"
comps = cluster['comp']['nodes']
targetdir="%s/scripts" % serverdir
for node in comps:
cmdpat = r'pg_ctl -D %s stop -m immediate'
addToCommandsList(env_cmdlist, node['ip'], targetdir, cmdpat % node['datadir'], "computing")
cmdpat = r'%srm -fr %s'
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['datadir']))
if args.autostart:
servname = 'kunlun-server-%d.service' % node['port']
generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)
clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
cmdpat = r'bash stop_cluster_mgr.sh'
for node in clmgrnodes:
addToCommandsList(env_cmdlist, node['ip'], "%s/bin" % clustermgrdir, cmdpat)
if args.autostart:
servname = 'kunlun-cluster-manager-%d.service' % node['brpc_raft_port']
generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)
# bash stopmysql.sh [port]
targetdir='%s/dba_tools' % storagedir
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(env_cmdlist, node['ip'], targetdir, cmdpat % node['port'], "storage")
cmdpat = r'%srm -fr %s'
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['log_dir_path']))
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['data_dir_path']))
if node.has_key('innodb_log_dir_path'):
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['innodb_log_dir_path']))
if args.autostart:
servname = 'kunlun-storage-%d.service' % node['port']
generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)
meta = cluster['meta']
# commands like:
# mysqladmin --defaults-file=/kunlun/kunlun-storage-$version/etc/my_6001.cnf -uroot -proot shutdown
targetdir='%s/dba_tools' % storagedir
for node in meta['nodes']:
cmdpat = r'bash stopmysql.sh %d'
addToCommandsList(env_cmdlist, node['ip'], targetdir, cmdpat % node['port'], "storage")
cmdpat = r'%srm -fr %s'
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['log_dir_path']))
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['data_dir_path']))
if node.has_key('innodb_log_dir_path'):
addToCommandsList(noenv_cmdlist, node['ip'], ".", cmdpat % (sudopfx, node['innodb_log_dir_path']))
if args.autostart:
servname = 'kunlun-storage-%d.service' % node['port']
generate_systemctl_clean(servname, node['ip'], noenv_cmdlist)
if cleantype == 'full':
for ip in machines:
mach =machines[ip]
cmdpat = '%srm -fr %s/*'
addToCommandsList(noenv_cmdlist, ip, ".", cmdpat % (sudopfx, mach['basedir']))
com_name = 'commands.sh'
comf = open(r'clean/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
process_commandslist_setenv(comf, args, machines, env_cmdlist)
process_commandslist_noenv(comf, args, machines, noenv_cmdlist)
comf.close()
# The order is meta shard -> data shards -> cluster_mgr -> comp nodes
def generate_check_scripts(jscfg, args):
validate_and_set_config1(jscfg, args)
machines = {}
setup_machines1(jscfg, machines, args)
storagedir = "kunlun-storage-%s" % args.product_version
serverdir = "kunlun-server-%s" % args.product_version
clustermgrdir = "kunlun-cluster-manager-%s" % args.product_version
filesmap = {}
commandslist = []
cluster = jscfg['cluster']
meta = cluster['meta']
metacnt = len(meta['nodes'])
ha_mode = "no_rep"
if metacnt > 1:
ha_mode = get_ha_mode(jscfg, args)
if ha_mode == '' or ha_mode == 'no_rep':
ha_mode = 'mgr'
# commands like:
# bash check_storage.sh [host] [port] [ha_mode]
targetdir='.'
cmdpat = r'bash check_storage.sh %s %s %s'
for node in meta['nodes']:
addNodeToFilesMap(filesmap, node, 'check/check_storage.sh', targetdir)
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['ip'], str(node['port']), ha_mode), "storage")
datas = cluster['data']
for shard in datas:
for node in shard['nodes']:
addNodeToFilesMap(filesmap, node, 'check/check_storage.sh', targetdir)
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['ip'], str(node['port']), ha_mode), "storage")
# commands like:
# bash check_cluster_manager.sh [basedir]
clmgrnodes = jscfg['cluster']['clustermgr']['nodes']
cmdpat = r'bash check_cluster_manager.sh %s'
for node in clmgrnodes:
addNodeToFilesMap(filesmap, node, 'check/check_cluster_manager.sh', targetdir)
mach = machines.get(node['ip'])
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (mach['basedir']), "clustermgr")
# commands like
# bash check_server.sh [port] [user] [password]
comps = cluster['comp']['nodes']
cmdpat=r'bash check_server.sh %s %s %s'
for node in comps:
addNodeToFilesMap(filesmap, node, 'check/check_server.sh', targetdir)
addToCommandsList(commandslist, node['ip'], targetdir, cmdpat % (node['port'], node['user'], node['password']), "computing")
com_name = 'commands.sh'
comf = open(r'check/%s' % com_name, 'w')
comf.write('#! /bin/bash\n')
# files copy.
for ip in filesmap:
mach = machines.get(ip)
fmap = filesmap[ip]
for fname in fmap:
process_file(comf, args, machines, ip, fname, '%s/%s' % (mach['basedir'], fmap[fname]))
process_commandslist_setenv(comf, args, machines, commandslist)
comf.close()
if __name__ == '__main__':
actions=["install", "start", "stop", "clean", "check"]
parser = argparse.ArgumentParser(description='Specify the arguments.')
parser.add_argument('--action', type=str, help="The action", required=True, choices=actions)
parser.add_argument('--config', type=str, help="The cluster config path", required=True)
parser.add_argument('--defuser', type=str, help="the default user", default=getpass.getuser())
parser.add_argument('--defbase', type=str, help="the default basedir", default='/kunlun')
parser.add_argument('--installtype', type=str, help="the install type", default='full', choices=['full', 'cluster'])
parser.add_argument('--cleantype', type=str, help="the clean type", default='full', choices=['full', 'cluster'])
parser.add_argument('--sudo', help="whether to use sudo", default=False, action='store_true')
parser.add_argument('--autostart', help="whether to start the cluster automaticlly", default=False, action='store_true')
parser.add_argument('--localip', type=str, help="The local ip address", default=gethostip())
parser.add_argument('--product_version', type=str, help="kunlun version", default='0.9.2')
parser.add_argument('--small', help="whether to use small template", default=False, action='store_true')
parser.add_argument('--valgrind', help="whether to use valgrind", default=False, action='store_true')
parser.add_argument('--defbrpc_raft_port', type=int, help="default brpc_raft_port for cluster_manager", default=58000)
parser.add_argument('--defbrpc_http_port', type=int, help="default brpc_raft_port for cluster_manager", default=58001)
args = parser.parse_args()
if not args.defbase.startswith('/'):
raise ValueError('Error: the default basedir must be absolute path!')
checkdirs(actions)
print str(sys.argv)
jscfg = get_json_from_file(args.config)
if args.autostart:
args.sudo = True
# print str(jscfg)
if args.action == 'install':
generate_install_scripts(jscfg, args)
elif args.action == 'start':
generate_start_scripts(jscfg, args)
elif args.action == 'stop':
generate_stop_scripts(jscfg, args)
elif args.action == 'clean':
generate_clean_scripts(jscfg, args)
elif args.action == 'check':
generate_check_scripts(jscfg, args)
else:
usage()
sys.exit(1)
|
import re
from types import MappingProxyType
from typing import (
Sequence,
Mapping,
Any,
List,
Type,
Set,
Optional,
Tuple,
cast,
Iterable,
)
IMMUTABLE_EMPTY_DICT = MappingProxyType({})
class Line:
"""
A line of text and its associated indentation level.
This class allows not to constantly copy strings to add a new indentation
level at every scope of the AST.
"""
INDENT_UNIT: str = " " * 4
def __init__(self, text: str, indent_level: int = 0) -> None:
self.text = text
self.indent_level = indent_level
def __str__(self) -> str:
return f"{self.INDENT_UNIT * self.indent_level}{self.text}"
def __repr__(self) -> str:
return "{}(text={!r}, indent_level={!r})".format(
self.__class__.__qualname__, self.text, self.indent_level
)
def clone(self) -> "Line":
"""
Creates an exact but disconnected copy of self.
Useful in tests.
"""
return self.__class__(text=self.text, indent_level=self.indent_level)
def __eq__(self, o: object) -> bool:
return (
isinstance(o, self.__class__)
and self.text == cast(__class__, o).text
and self.indent_level == cast(__class__, o).indent_level
)
def _resplit(parts: Iterable[str]) -> List[str]:
"""
Given a list of strings, returns a list of lines, by splitting each string
into multiple lines where it contains newlines.
>>> _resplit([])
[]
>>> _resplit(['a', 'b'])
['a', 'b']
>>> _resplit(['a', 'b\\nc\\nd'])
['a', 'b', 'c', 'd']
"""
return [line for part in parts for line in part.splitlines()]
class Statement:
"""
Python distinguishes between statements and expressions: basically,
statements cannot be assigned to a variable, whereas expressions can.
For our purpose, another distinction is important: statements may span over
multiple lines (and not just for style), whereas all expressions can be
expressed in a single line.
This class serves as abstract base for all implementors of lines() and
handles comment processing for them.
"""
def __init__(self, comments: Sequence[str] = ()) -> None:
self._comments = _resplit(comments)
@property
def comments(self) -> List[str]:
self._comments = _resplit(self._comments)
return self._comments
@comments.setter
def comments(self, value: List[str]):
self._comments = value
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
"""
All Line objects necessary to represent this Statement, along with the
appropriate indentation level.
:param indent_level: How much indentation to apply to the least indented
line of this statement.
:param comments: Whether existing comments attached to self should be
included in the result.
"""
raise NotImplementedError
def comment_lines(self, indent_level: int) -> List[Line]:
"""
Converts self.comments from str to Line with "#" prefixes.
"""
return [Line(f"# {s}", indent_level) for s in self.comments]
def attach_comment(self, line: Line) -> List[Line]:
"""
Attach a comment to line: inline if self.comments is just one line,
on dedicated new lines above otherwise.
"""
comments = self.comments
if not comments:
return [line]
if len(comments) == 1:
line.text += f" # {comments[0]}"
return [line]
lines = self.comment_lines(line.indent_level)
lines.append(line)
return lines
def __eq__(self, o: object) -> bool:
return (
isinstance(o, self.__class__)
and self.comments == cast(__class__, o).comments
)
# Handy alias for type signatures.
Program = Sequence[Statement]
class OpaqueBlock(Statement):
"""
A block of code already represented as a string.
This helps moving existing code (e.g. in plugins) from our ad-hoc
"blocks of code" framework to the AST framework defined in this module.
It also allows to express Python constructs that would otherwise not yet be
representable with this AST framework.
"""
PREFIX_RX = re.compile(r"\s+")
TAB_SIZE = 8
def __init__(self, block: str, comments: Sequence[str] = ()) -> None:
super().__init__(comments)
if not block.strip():
raise ValueError(f"OpaqueBlock can't be empty but got {block!r}")
self.block = block
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
raw_lines = [l.expandtabs(self.TAB_SIZE) for l in self.block.splitlines()]
first_nonempty_line = next(i for i, l in enumerate(raw_lines) if l.strip())
after_last_nonempty_line = next(
len(raw_lines) - i for i, l in enumerate(reversed(raw_lines)) if l.strip()
)
raw_lines = raw_lines[first_nonempty_line:after_last_nonempty_line]
indents = [self.PREFIX_RX.match(l) for l in raw_lines]
shortest_indent = min(len(p.group()) if p else 0 for p in indents)
block_lines = [Line(l[shortest_indent:], indent_level) for l in raw_lines]
if comments:
return [*self.comment_lines(indent_level), *block_lines]
return block_lines
def __repr__(self) -> str:
return "{}({!r}, comments={!r})".format(
self.__class__.__qualname__, self.block, self.comments
)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) and self.block == cast(__class__, o).block
class Function(Statement):
"""
A function definition (def ...).
"""
def __init__(
self,
name: str,
params: Sequence[str],
statements: Sequence[Statement],
comments: Sequence[str] = (),
) -> None:
super().__init__(comments)
self.name = name
self.params = list(params)
self.statements = list(statements)
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
param_list = ", ".join(self.params)
body_lines = [
line
for stmt in self.statements
for line in stmt.lines(indent_level + 1, comments)
] or [Line("pass", indent_level + 1)]
top = Line(f"def {self.name}({param_list}):", indent_level)
if comments:
return [*self.attach_comment(top), *body_lines]
return [top, *body_lines]
def __repr__(self) -> str:
return "{}(name={!r}, params={!r}, statements={!r}, comments={!r})".format(
self.__class__.__qualname__,
self.name,
self.params,
self.statements,
self.comments,
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.name == cast(__class__, o).name
and self.params == cast(__class__, o).params
and self.statements == cast(__class__, o).statements
)
class Decoration(Statement):
"""
A function or class definition to which is applied a decorator
(e.g. @task).
"""
def __init__(
self, decorator: str, target: Statement, comments: Sequence[str] = ()
) -> None:
super().__init__(comments)
self.decorator = decorator
self.target = target
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
top = Line(f"@{self.decorator}", indent_level)
target_lines = self.target.lines(indent_level, comments)
if comments:
return [*self.attach_comment(top), *target_lines]
return [top, *target_lines]
def __repr__(self) -> str:
return "{}({!r}, {!r}, comments={!r})".format(
self.__class__.__qualname__, self.decorator, self.target, self.comments
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.decorator == cast(__class__, o).decorator
and self.target == cast(__class__, o).target
)
class Class(Statement):
"""
A class definition.
"""
def __init__(
self,
name: str,
statements: Sequence[Statement],
superclasses: Sequence[str] = (),
comments: Sequence[str] = (),
) -> None:
super().__init__(comments)
self.name = name
self.statements = list(statements)
self.superclasses = list(superclasses)
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
superclasses = ""
if self.superclasses:
superclasses = "({})".format(", ".join(self.superclasses))
body = [
line
for stmt in self.statements
for line in stmt.lines(indent_level + 1, comments)
] or [Line("pass", indent_level + 1)]
top = Line(f"class {self.name}{superclasses}:", indent_level)
if comments:
return [*self.attach_comment(top), *body]
return [top, *body]
def __repr__(self) -> str:
return (
"{}(name={!r}, statements={!r}, " "superclasses={!r}, comments={!r})"
).format(
self.__class__.__qualname__,
self.name,
self.statements,
self.superclasses,
self.comments,
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.name == cast(__class__, o).name
and self.statements == cast(__class__, o).statements
and self.superclasses == cast(__class__, o).superclasses
)
class Expression:
"""
See the documentation of Statement for why Expression is a separate class.
An expression is still a statement in Python (e.g. functions can be called
anywhere), but this Expression class is NOT a Statement because we can't
attach comments to arbitrary expressions (e.g. between braces).
If you need to use an Expression as a Statement, see the Standalone wrapper
class.
This class serves as abstract base for all our implementors of __str__().
"""
def __str__(self) -> str:
raise NotImplementedError
def __eq__(self, o: object) -> bool:
return isinstance(o, self.__class__)
class Standalone(Statement):
"""
Wraps an Expression so that it can be used as a Statement.
"""
def __init__(self, expr: Expression, comments: Sequence[str] = ()) -> None:
super().__init__(comments)
self.expr = expr
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
"""
An Expression E used as a Statement is serialized as the result of
str(E) on its own Line.
"""
line = Line(str(self.expr), indent_level)
if comments:
return self.attach_comment(line)
return [line]
def __repr__(self) -> str:
return "{}({!r}, comments={!r})".format(
self.__class__.__qualname__, self.expr, self.comments
)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) and self.expr == cast(__class__, o).expr
def _all_subclasses_of(cls: Type) -> Set[Type]:
"""
All subclasses of cls, including non-direct ones (child of child of ...).
"""
direct_subclasses = set(cls.__subclasses__())
return direct_subclasses.union(
s for d in direct_subclasses for s in _all_subclasses_of(d)
)
class Literal(Expression):
"""
All literal Python expressions (integers, strings, lists, etc.).
Everything will be serialized using repr(), except Expression objects that
could be contained in a composite value like list: they will be serialized
with str(), as is probably expected.
Thus:
>>> str(Literal([1, {"a": FString("-{x}")}]))
"[1, {'a': f'-{x}'}]"
instead of something like "[1, {'a': FString('-{x}')}]".
"""
def __init__(self, value: Any) -> None:
super().__init__()
self.value = value
_REPR_BY_EXPR_CLS = None
def __str__(self) -> str:
# This is not pretty, but repr() doesn't accept a visitor we could use
# to say "just this time, use that code to serialize Expression objects".
if Literal._REPR_BY_EXPR_CLS is None:
Literal._REPR_BY_EXPR_CLS = {
c: c.__repr__ for c in _all_subclasses_of(Expression)
}
try:
for k in Literal._REPR_BY_EXPR_CLS.keys():
k.__repr__ = k.__str__
return repr(self.value)
finally:
for k, _repr in Literal._REPR_BY_EXPR_CLS.items():
k.__repr__ = _repr
def __repr__(self) -> str:
return f"{self.__class__.__qualname__}({self.value!r})"
def __eq__(self, o: object) -> bool:
return super().__eq__(o) and self.value == cast(__class__, o).value
class FString(Literal):
"""
f-strings cannot be handled like most literals because they are evaluated
first, so they lose their "f" prefix and their template is executed too
early.
"""
def __init__(self, s: str) -> None:
if not isinstance(s, str):
raise TypeError(
f"expecting a format string, got {s.__class__.__qualname__}: {s!r}"
)
super().__init__(s)
def __str__(self) -> str:
return "f" + repr(str(self.value))
class Symbol(Expression):
"""
The name of something (variable, function, etc.).
Avoids any kind of text transformation that would happen with Literal.
>>> str(Literal("x"))
"'x'"
>>> str(Symbol("x"))
'x'
The provided argument's type is explicitly checked and a TypeError may be
raised to avoid confusion when a user expects e.g. Symbol(True) to work like
Symbol("True").
"""
def __init__(self, name: str) -> None:
super().__init__()
if not isinstance(name, str):
raise TypeError(
f"expected symbol name, got {name.__class__.__qualname__}: {name!r}"
)
self.name = name
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return f"{self.__class__.__qualname__}({self.name!r})"
def __eq__(self, o: object) -> bool:
return super().__eq__(o) and self.name == cast(__class__, o).name
class FunctionCall(Expression):
"""
The invocation of a function or method.
"""
def __init__(
self,
name: str,
positional_args: Sequence[Expression] = (),
named_args: Mapping[str, Expression] = IMMUTABLE_EMPTY_DICT,
) -> None:
super().__init__()
self.name = name
self.positional_args = list(positional_args)
self.named_args = dict(named_args)
def __str__(self) -> str:
args = [str(a) for a in self.positional_args] + [
f"{k}={v}" for k, v in self.named_args.items()
]
return f"{self.name}({', '.join(args)})"
def __repr__(self) -> str:
return "{}({!r}, {!r}, {!r})".format(
self.__class__.__qualname__,
self.name,
self.positional_args,
self.named_args,
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.name == cast(__class__, o).name
and self.positional_args == cast(__class__, o).positional_args
and self.named_args == cast(__class__, o).named_args
)
class BinaryOp(Expression):
"""
The invocation of a binary operator.
To avoid any precedence error in the generated code, operands that are also
BinaryOps are always surrounded by braces (even when not necessary, as in
"1 + (2 + 3)", as a more subtle behavior has increased complexity of
implementation without much benefit.
"""
def __init__(self, lhs: Expression, op: str, rhs: Expression) -> None:
super().__init__()
self.lhs = lhs
self.op = op
self.rhs = rhs
def __str__(self) -> str:
operands = [self.lhs, self.rhs]
return f" {self.op} ".join(
f"({x})" if isinstance(x, BinaryOp) else str(x) for x in operands
)
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.lhs == cast(__class__, o).lhs
and self.op == cast(__class__, o).op
and self.rhs == cast(__class__, o).rhs
)
class Assignment(Statement):
"""
The assignment of a value to a variable.
For our purposes, we don't treat multiple assignment via tuples differently.
We also don't support chained assignments such as "a = b = 1".
"""
def __init__(self, lhs: str, rhs: Expression, comments: Sequence[str] = ()) -> None:
super().__init__(comments)
self.lhs = lhs
self.rhs = rhs
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
line = Line(f"{self.lhs} = {self.rhs}", indent_level)
if comments:
return self.attach_comment(line)
return [line]
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.lhs == cast(__class__, o).lhs
and self.rhs == cast(__class__, o).rhs
)
def __repr__(self) -> str:
return "{}(lhs={!r}, rhs={!r}, comments={!r})".format(
self.__class__.__qualname__, self.lhs, self.rhs, self.comments
)
class IfElse(Statement):
"""
The if/elif/else construct, where elif and else are optional and elif can
be repeated.
"""
def __init__(
self,
condition_blocks: Sequence[Tuple[Expression, Sequence[Statement]]],
else_block: Optional[Sequence[Statement]] = None,
comments: Sequence[str] = (),
) -> None:
super().__init__(comments)
self.condition_blocks = [
(cond, list(stmts)) for cond, stmts in condition_blocks
]
self._assert_consistency()
self.else_block = else_block
def _assert_consistency(self):
if not self.condition_blocks:
raise ValueError("can't have an if without at least one block")
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
self._assert_consistency()
lines = []
for i, block in enumerate(self.condition_blocks):
keyword = "if" if i == 0 else "elif"
lines.append(Line(f"{keyword} {block[0]}:", indent_level))
lines.extend(
[
line
for stmt in block[1]
for line in stmt.lines(indent_level + 1, comments)
]
or [Line("pass", indent_level + 1)]
)
if self.else_block:
lines.append(Line("else:", indent_level))
lines.extend(
[
line
for stmt in self.else_block
for line in stmt.lines(indent_level + 1, comments)
]
)
if comments:
# There is always a first line, or _assert_consistency would fail.
return [*self.attach_comment(lines[0]), *lines[1:]]
return lines
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.condition_blocks == cast(__class__, o).condition_blocks
and self.else_block == cast(__class__, o).else_block
)
def __repr__(self) -> str:
return "{}(condition_blocks={!r}, else_block={!r}, comments={!r})".format(
self.__class__.__qualname__,
self.condition_blocks,
self.else_block,
self.comments,
)
class Import(Statement):
"""
The import statement in all its forms: "import", "import X as A",
"from M import X", "from M import X as A", and "from M import X, Y".
"""
def __init__(
self,
targets: Sequence[str],
source: Optional[str] = None,
alias: Optional[str] = None,
comments: Sequence[str] = (),
) -> None:
super().__init__(comments)
self.targets = list(targets)
self.source = source
self.alias = alias
self._assert_consistency()
def _assert_consistency(self):
if not self.targets:
raise ValueError("expected at least one import target")
if len(self.targets) > 1 and self.alias:
raise ValueError("alias forbidden for multiple import targets")
def lines(self, indent_level: int = 0, comments: bool = True) -> List[Line]:
self._assert_consistency()
import_kw = f"from {self.source} import" if self.source else "import"
alias_clause = f" as {self.alias}" if self.alias else ""
lines = [
Line(f"{import_kw} {target}{alias_clause}", indent_level)
for target in self.targets
]
if comments:
return [*self.comment_lines(indent_level), *lines]
return lines
def __eq__(self, o: object) -> bool:
return (
super().__eq__(o)
and self.targets == cast(__class__, o).targets
and self.source == cast(__class__, o).source
and self.alias == cast(__class__, o).alias
)
def __repr__(self) -> str:
return "{}(targets={!r}, source={!r}, alias={!r}, comments={!r})".format(
self.__class__.__qualname__,
self.targets,
self.source,
self.alias,
self.comments,
)
|
"""
AVA package for training a VAE
Contains
--------
`ava.models.vae`
Defines the variational autoencoder (VAE).
`ava.models.vae_dataset`
Feeds syllable data to the VAE.
`ava.models.window_vae_dataset`
Feeds random data to the (shotgun) VAE.
`ava.models.utils`
Useful functions related to the `ava.models` subpackage.
"""
|
#!/usr/bin/python3
# Developed by Alexander Bersenev from Hackerdom team, bay@hackerdom.ru
"""Restores vm from snapshot"""
import sys
import time
import os
import traceback
import re
import do_api
from cloud_common import (log_progress, call_unitl_zero_exit, # get_cloud_ip,
SSH_OPTS, # SSH_YA_OPTS
)
TEAM = int(sys.argv[1])
NAME = sys.argv[2]
IMAGE_VM_NAME = "team%d" % TEAM
def log_stderr(*params):
print("Team %d:" % TEAM, *params, file=sys.stderr)
def main():
if not re.fullmatch(r"[0-9a-zA-Z_]{,64}", NAME):
print("msg: ERR, name validation error")
return 1
image_state = open("db/team%d/image_deploy_state" % TEAM).read().strip()
if image_state == "NOT_STARTED":
print("msg: ERR, vm is not started")
return 1
if image_state == "RUNNING":
vm_ids = do_api.get_ids_by_vmname(IMAGE_VM_NAME)
if not vm_ids:
log_stderr("failed to find vm")
return 1
if len(vm_ids) > 1:
log_stderr("more than one vm with this name exists")
return 1
vm_id = list(vm_ids)[0]
SNAPSHOT_NAME = IMAGE_VM_NAME + "-" + NAME
snapshots = do_api.list_snapshots()
ids = []
for snapshot in snapshots:
if snapshot.get("name", "") != SNAPSHOT_NAME:
continue
ids.append(int(snapshot["id"]))
if not ids:
print("msg:", "no such snapshot")
return 1
if len(ids) > 1:
print("msg:", "internal error: too much snapshots")
return 1
snapshot_id = ids[0]
result = do_api.restore_vm_from_snapshot_by_id(vm_id, snapshot_id)
if not result:
log_stderr("restore shapshot failed")
return 1
print("msg:", "restore started, it takes couple of minutes")
# cloud_ip = get_cloud_ip(TEAM)
# if not cloud_ip:
# log_stderr("no cloud ip, exiting")
# return 1
# cmd = ["sudo", "/cloud/scripts/restore_vm_from_snapshot.sh", str(TEAM), NAME]
# ret = call_unitl_zero_exit(["ssh"] + SSH_YA_OPTS +
# [cloud_ip] + cmd, redirect_out_to_err=False,
# attempts=1)
# if not ret:
# log_stderr("restore vm from snapshot shapshot failed")
# return 1
return 0
if __name__ == "__main__":
sys.stdout = os.fdopen(1, 'w', 1)
print("started: %d" % time.time())
exitcode = 1
try:
os.chdir(os.path.dirname(os.path.realpath(__file__)))
exitcode = main()
except:
traceback.print_exc()
print("exit_code: %d" % exitcode)
print("finished: %d" % time.time())
|
import sys
import matplotlib.pyplot as plt
from .plutoTools import readVTKSpherical
import numpy as np
import copy
from scipy import interpolate
from astropy import units as u
from astropy import constants as c
import astropy.io.fits as fits
import subprocess
from .parameters import Params
from .disc_structure import Disc
def pluto2mcfost(
data_dir,
nr=100,
nz=100,
umass=1,
udist = 10,
mcfost_ref_file="/Users/cpinte/mcfost/src/ref3.0_3D.para",
mcfost_filename="mcfost_PLUTO.para",
fitsname=None,
):
# umass in solar masses
# udist in au
# Inner and outer radii over which we plot the solution
rin=1.0
rout=3.0
##########################
## Units options
#########################
# Mass of central object (modifiable)
Mstar=umass * c.M_sun
# unit of length (=inner radius of the run, here taken to be 10 AU) (modifiable)
R0=(udist*u.au).to(u.m)
#Assumed surface density at R0 (eq 5 in Riols 2020)
Sigma0=200*(R0.to(u.au)/u.au)**(-0.5)*u.g/(u.m)**2
# Disc aspect-ratio
epsilon=0.05
###########################
# Computed quantities
###########################
#Time unit
T0=np.sqrt(R0**3/(Mstar*c.G)).to(u.s)
#Velocity unit
V0=R0/T0
#density unit rho=sigma/(sqr(Zpi)H)
rho0=Sigma0/(np.sqrt(2*np.pi)*epsilon*R0)
# pressure unit
P0=rho0*V0*V0
#density unit rho=sigma/(sqr(Zpi)H)
rho0=Sigma0/(np.sqrt(2*np.pi)*epsilon*R0)
# Read data file
V=readVTKSpherical(data_dir+'data.0700.vtk')
# Apply units
V.data['rho']= np.squeeze((rho0*V.data['rho']).value)
V.data['prs']= np.squeeze((P0*V.data['prs']).value)
V.data['vx1']= np.squeeze((V0*V.data['vx1']).value)
V.data['vx2']= np.squeeze((V0*V.data['vx2']).value)
V.data['vx3']= np.squeeze((V0*V.data['vx3']).value)
V.r = V.r * udist # V.r in au
# Compute the sound speed
V.data['cs']=np.sqrt(V.data['prs']/V.data['rho'])
n_r = V.r.size
n_theta = V.theta.size
# define cylindrical coordinates of the pluto model
[r,theta] = np.meshgrid(V.r,V.theta,indexing='ij')
rcyl = r*np.sin(theta)
z = r*np.cos(theta)
# Density and velocity data
rho = V.data['rho']
# Create the cartesian components (in units of sound speed)
vr = V.data['vx1']*np.sin(theta)+V.data['vx2']*np.cos(theta)
vz = V.data['vx1']*np.cos(theta)-V.data['vx2']*np.sin(theta)
vphi = V.data['vx3']
# --- mcfost : updating values from PLUTO
# --- Setting up a parameter file and creating the corresponding grid
# Setting up the mcfost model
P = Params(mcfost_ref_file)
P.grid.n_rad = nr
P.grid.n_rad_in = 1
P.grid.nz = nz
P.grid.n_az = 1
P.zones[0].Rin = np.min(V.r)
P.zones[0].edge = 0.0
P.zones[0].Rout = np.max(V.r)
P.zones[0].Rc = 0.0
# Don't compute SED
P.simu.compute_SED = False
# -- Turn off symmetries
P.simu.image_symmetry = False
P.simu.central_symmetry = False
P.simu.axial_symmetry = False
# -- Write new parameter file
P.writeto(mcfost_filename)
# --- Running mcfost to create the grid
subprocess.call(["rm", "-rf", "data_disk", "data_disk_old"])
result = subprocess.call(["mcfost", mcfost_filename, "-disk_struct"])
# --- Reading mcfost grid
mcfost_disc = Disc("./")
# --- print the mcfost radial grid to check that it is the same as FARGO's
print("MCFOST radii=")
print(mcfost_disc.r()[0, 0, :])
mcfost_r = mcfost_disc.r().transpose() # dims are now, r, z, theta
mcfost_z = mcfost_disc.z().transpose()
# -- computing the 3D density structure for mcfost
rho_mcfost = interpolate.griddata((rcyl.flatten(),z.flatten()),rho.flatten() ,(mcfost_r,mcfost_z),fill_value=0)
vr_mcfost = interpolate.griddata((rcyl.flatten(),z.flatten()),vr.flatten() ,(mcfost_r,mcfost_z),fill_value=0)
vz_mcfost = interpolate.griddata((rcyl.flatten(),z.flatten()),vz.flatten() ,(mcfost_r,mcfost_z),fill_value=0)
vphi_mcfost = interpolate.griddata((rcyl.flatten(),z.flatten()),vphi.flatten(),(mcfost_r,mcfost_z),fill_value=0)
velocities = np.array([vr_mcfost.transpose(),vphi_mcfost.transpose(),vz_mcfost.transpose()])
#velocities = np.moveaxis(velocities, 0, -1)
primary_hdu = fits.PrimaryHDU(np.abs(rho_mcfost.transpose()))
second_hdu = fits.ImageHDU(np.abs(rho_mcfost.transpose()))
tertiary_hdu = fits.ImageHDU(velocities)
primary_hdu.header['hierarch read_gas_velocity'] = 2
primary_hdu.header['hierarch gas_dust_ratio'] = 100
primary_hdu.header['hierarch read_gas_density'] = 1
primary_hdu.header['read_n_a'] = 0
hdul = fits.HDUList([primary_hdu, second_hdu, tertiary_hdu])
# --- Write a fits file for mcfost
if fitsname is not None:
print("Writing ", fitsname, " to disk")
hdul.writeto(fitsname, overwrite=True)
else:
print("Fits file for mcfost was not created")
return mcfost_r, mcfost_z, rho_mcfost, vr_mcfost, vphi_mcfost, vz_mcfost
|
# Generated by Django 2.2.7 on 2019-11-21 11:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('collect', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='digestcounter',
unique_together=set(),
),
migrations.AlterUniqueTogether(
name='digestpartial',
unique_together=set(),
),
migrations.AddConstraint(
model_name='digestcounter',
constraint=models.UniqueConstraint(fields=('server_nonce', 'client_nonce'), name='unique_digest_counter'),
),
migrations.AddConstraint(
model_name='digestpartial',
constraint=models.UniqueConstraint(fields=('user', 'username'), name='unique_digest_partial'),
),
]
|
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional
from melime.generators.gen_base import GenBase
class CNNVAEGen(GenBase):
def __init__(self, image_channels, latent_dim, cuda=True, verbose=False, **kwargs):
self.model = ModelVAE(image_channels, latent_dim=latent_dim, cuda=cuda, verbose=verbose, **kwargs)
self.manifold = None
def fit(self, x, epochs=10):
self.model.train_epochs(train_loader=x, epochs=epochs)
return self
def load_manifold(self, path):
self.model.model = torch.load(path)
self.model.model.eval()
return self
def save_manifold(self, path):
torch.save(self.model.model, path)
def sample_radius(self, x_exp, r=None, n_samples=1000, random_state=None):
with torch.no_grad():
x_exp_tensor = torch.from_numpy(x_exp).to(self.model.device)
mu_p, log_var_p = self.model.model.encode(x_exp_tensor)
ones = torch.ones(n_samples).to(self.model.device)
mu_m = torch.ger(ones, mu_p.view(-1))
noise = (torch.rand(n_samples, self.model.latent_dim).to(self.model.device)-0.5)*r
z = self.model.model.reparameterize(mu_m, log_var_p)
z = z + noise
z = self.model.model.reparameterize(mu_m, log_var_p)
x_sample = self.model.model.decode(z)
# Clean cache torch.
del noise
del mu_m
torch.cuda.empty_cache()
return x_sample
def sample(self, n_samples=1, random_state=None):
# TODO: Need to be implemented.
pass
class ModelVAE(object):
def __init__(self, image_channels, latent_dim=256, device="cpu", batch_size=128, verbose=False, **kwargs):
self.verbose = verbose
self.latent_dim = latent_dim
self.image_channels = image_channels
self.batch_size = batch_size
self.device = device
self.device_cpu = torch.device("cpu")
self.model = VAE(image_channels=self.image_channels, latent_dim=latent_dim)
if self.cuda:
self.model.cuda()
self.optimizer = optim.Adam(self.model.parameters(), lr=1e-3)
self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.95)
# Variable to print on the screen.
self.log_interval = 1000
def train(self, train_loader, epoch):
self.model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(self.device) # .reshape(-1, self.input_dim)
self.optimizer.zero_grad()
recon_batch, mu, log_var = self.model(data)
# loss = self.model.loss_function(recon_batch, data, mu, log_var)
loss = self.model.loss_function_2(recon_batch, data, mu, log_var)
loss.backward()
train_loss += loss.item()
self.optimizer.step()
if self.verbose:
if batch_idx % self.log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item() / len(data),
)
)
if self.verbose:
print("Epoch: {} - Mean loss: {:.4f}".format(epoch, train_loss / len(train_loader.dataset)))
def test(self, test_loader):
self.model.eval()
test_loss = 0
with torch.no_grad():
for i, (data, _) in enumerate(test_loader):
data = data.to(self.device)
recon_batch, mu, log_var = self.model(data)
test_loss += self.loss_function(recon_batch, data, mu, log_var).item()
test_loss /= len(test_loader.dataset)
print("Loss test set: {:.4f}".format(test_loss))
def train_epochs(self, train_loader, epochs):
for epoch in range(1, epochs + 1):
self.train(train_loader, epoch)
self.scheduler.step()
class VAE(nn.Module):
def __init__(self, image_channels=3, kernel_size=3, stride=1, latent_dim=32):
super().__init__()
self.verbose = True
self.image_channels = image_channels
self.latent_dim = latent_dim
self.channels = [32, 64, 128, 256, 512]
# Encoder
self.layers_encoder = nn.ModuleList(self.create_layers_encoder(image_channels=image_channels))
# Mu and log_var
in_linear_layer = self.channels[-1] # *4
self.fc_mu = nn.Linear(in_linear_layer, self.latent_dim)
self.fc_log_var = nn.Linear(in_linear_layer, self.latent_dim)
self.fc_out = nn.Linear(self.latent_dim, in_linear_layer)
# Decoder
self.layers_decoder = nn.ModuleList(self.create_layers_decoder(image_channels=image_channels))
def encode(self, x):
x_in = x
for layer in self.layers_encoder:
# TODO: doubt!! no functional here, not sure what is the best option
x_in = layer(x_in)
x_in = torch.flatten(x_in, start_dim=1)
return self.fc_mu(x_in), self.fc_log_var(x_in)
def decode(self, z):
z = self.fc_out(z)
z = z.view(-1, self.channels[-1], 1, 1)
x_in = z
for layer in self.layers_decoder:
# TODO: doubt!! no functional here, not sure what is the best option
x_in = layer(x_in)
return x_in
def forward(self, x):
mu, log_var = self.encode(x)
z = self.reparameterize(mu, log_var)
return self.decode(z), mu, log_var
@staticmethod
def reparameterize(mu, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return mu + eps * std
def create_layers_encoder(self, image_channels):
# TODO: Would be nice to have some options here.
# TODO: doubt!!! I choose to use the Elu layer here. not sure about this.
# TODO: I am thinking to put a batch normalization between the layers.
out_chanels = self.channels
layers = list()
for out_chanel in out_chanels:
layers.append(
nn.Sequential(
nn.Conv2d(image_channels, out_chanel, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(out_chanel),
nn.LeakyReLU(),
)
)
image_channels = out_chanel
return layers
def create_layers_decoder(self, image_channels):
# TODO: Would be nice to have some options here.
# TODO: doubt!!! I choose to use the Elu layer here. not sure about this.
# TODO: I am thinking to put a batch normalization between the layers.
out_channels = self.channels[:]
out_channels.reverse()
layers = list()
for in_chanel, out_chanel in zip(out_channels[:-1], out_channels[1:]):
layers.append(
nn.Sequential(
nn.ConvTranspose2d(
in_chanel,
out_chanel,
kernel_size=3,
stride=2,
padding=1,
output_padding=1
# , bias=False # TODO: I want to include this!
),
nn.BatchNorm2d(out_chanel),
nn.LeakyReLU(),
)
)
layers.append(
nn.Sequential(
nn.ConvTranspose2d(
out_channels[-1], out_channels[-1], kernel_size=3, stride=2, padding=1, output_padding=1
),
nn.LeakyReLU(),
nn.Conv2d(out_channels[-1], out_channels=self.image_channels, kernel_size=3, padding=1),
nn.Sigmoid(),
)
)
return layers
@staticmethod
def loss_function(recons, input_, mu, log_var, kld_weight=1):
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
:param args:
:param kwargs:
:return:
"""
recons_loss = functional.mse_loss(recons, input_)
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
loss = recons_loss + kld_weight * kld_loss
return loss # {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def loss_function_2(self, recon_x, x, mu, log_var):
# TODO: check if this is the best loss
bce = functional.binary_cross_entropy(recon_x, x.view(-1, self.input_dim), reduction="sum")
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
kld = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
return bce + kld
|
"""flask student stories table
Revision ID: 7c1cd37b02e6
Revises: aa2aa87a965d
Create Date: 2021-12-27 13:50:18.012256
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7c1cd37b02e6'
down_revision = 'aa2aa87a965d'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('flask student stories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('student_image', sa.String(length=300), nullable=True),
sa.Column('body', sa.String(length=300), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('allowed_status', sa.Boolean(), nullable=True),
sa.Column('admin_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['admin_id'], ['admin.id'], name=op.f('fk_flask student stories_admin_id_admin')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_flask student stories'))
)
with op.batch_alter_table('flask student stories', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_flask student stories_timestamp'), ['timestamp'], unique=False)
batch_op.create_index(batch_op.f('ix_flask student stories_username'), ['username'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('flask student stories', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_flask student stories_username'))
batch_op.drop_index(batch_op.f('ix_flask student stories_timestamp'))
op.drop_table('flask student stories')
# ### end Alembic commands ###
|
from __future__ import print_function
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
import numpy as np
from util import get_poke_xy
x_train, x_test, y_train, y_test = get_poke_xy()
#get indexes for 90/10 train/val split (stratified by y)
splits = StratifiedShuffleSplit(y_train, n_iter=1, test_size=0.1, random_state=42)
# form split
for train_index, val_index in splits:
x_train, x_val = x_train[train_index], x_train[val_index]
y_train, y_val = y_train[train_index], y_train[val_index]
#ks to eval
k_vals = range(1, 30, 2)
accuracies = []
# loop through k_vals and find best performance on val
for k in xrange(1, 30, 2):
# train the knn with current k
model = KNeighborsClassifier(n_neighbors=k)
model.fit(x_train, y_train)
# eval the model and update the accuracies list
score = model.score(x_val, y_val)
# print("k=%d, accuracy=%.2f%%" % (k, score * 100))
accuracies.append(score)
# find the value of k that has the largest accuracy
i = np.argmax(accuracies)
print("k=%d achieved highest accuracy of %.2f%% on validation data" % (k_vals[i],
accuracies[i] * 100))
# build model with best k from train
model = KNeighborsClassifier(n_neighbors=k_vals[i])
model.fit(x_train, y_train)
predictions = model.predict(x_test)
# deploy and eval results
print("\n[RESULTS] KNN w/ k={}".format(i))
print(classification_report(y_test, predictions))
|
class Solution:
def subsetsWithDup(self, nums: List[int], sorted: bool = False) -> List[List[int]]:
if not nums: return [[]]
if len(nums) == 1: return [[], nums]
if not sorted: nums.sort()
result = self.subsetsWithDup(nums[:-1], sorted=True)
result = [i + [nums[-1]] for i in result] + result
result = {tuple(r) for r in result}
return [list(r) for r in result]
|
from __future__ import absolute_import
import logging
import requests
import json
from minemeld.ft.basepoller import BasePollerFT
LOG = logging.getLogger(__name__)
class IPv4(BasePollerFT):
def configure(self):
super(IPv4, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', False)
self.url = 'https://my.incapsula.com/api/integration/v1/ips'
def _process_item(self, item):
# called on each item returned by _build_iterator
# it should return a list of (indicator, value) pairs
if item is None:
LOG.error('%s - no IP information found', self.name)
return []
else:
value = {
'type': 'IPv4',
'confidence': 100
}
return [[item, value]]
def _build_iterator(self, now):
# called at every polling interval
# here you should retrieve and return the list of items
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout,
data=[('resp_format','json'),]
)
r = requests.post(
self.url,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.debug('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
# parse the results into a list
return iter(json.loads(r.text)['ipRanges'])
class IPv6(BasePollerFT):
def configure(self):
super(IPv6, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', False)
self.url = 'https://my.incapsula.com/api/integration/v1/ips'
def _process_item(self, item):
# called on each item returned by _build_iterator
# it should return a list of (indicator, value) pairs
if item is None:
LOG.error('%s - no IP information found', self.name)
return []
else:
value = {
'type': 'IPv6',
'confidence': 100
}
return [[item, value]]
def _build_iterator(self, now):
# called at every polling interval
# here you should retrieve and return the list of items
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout,
data=[('resp_format','json'),]
)
r = requests.post(
self.url,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.debug('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
# parse the results into a list
return iter(json.loads(r.text)['ipv6Ranges'])
|
from league_api.api import ApiType
from typing import List, Mapping
class BannedChampion(ApiType):
pickTurn: int = None # The turn during which the champion was banned
championId: int = None # The ID of the banned champion
teamId: int = None # The ID of the team that banned the champion
@property
def pick_turn(self):
return self.pickTurn
@pick_turn.setter
def pick_turn(self, value):
self.pickTurn = value
@property
def champion_id(self):
return self.championId
@champion_id.setter
def champion_id(self, value):
self.championId = value
@property
def team_id(self):
return self.teamId
@team_id.setter
def team_id(self, value):
self.teamId = value
|
#!/usr/bin/env python3
import urllib
url = 'http://www.someserver.com/cgi-bin/register.cgi'
user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
values = {'name': 'Michael Foord',
'location': 'Northampton',
'language': 'Python' }
headers = {'User-Agent': user_agent}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
req = urllib.request.Request(url, data, headers)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
print('The server couldn\'t fulfill the request.')
print('Error Code: ', e.code)
except urllib.error.URLError as e:
print('We failed to reach a server.')
print('Reason: ', e.reason)
else:
the_page = response.read()
|
# GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.types import TypeDecorator, Unicode, TEXT
import json
class PathTupleWithSlashes(TypeDecorator):
"Represents a Tuple of strings as a slash separated string."
impl = Unicode
def process_bind_param(self, value, dialect):
if value is not None:
if len(value) == 0:
value = None
else:
value = '/'.join(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = tuple(value.split('/'))
return value
# The following two classes and only these two classes is in very
# large parts based on example code from sqlalchemy.
#
# The original copyright notice and license follows:
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
class JSONEncoded(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = TEXT
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class MutationDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutationDict."
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
|
#!/usr/bin/env python
"""Euler rotation matrix given sequence, frame, and angles."""
from __future__ import division, print_function
__author__ = 'Marcos Duarte, https://github.com/demotu/BMC'
__version__ = 'euler_rotmat.py v.1 2014/03/10'
def euler_rotmat(order='xyz', frame='local', angles=None, unit='deg',
str_symbols=None, showA=True, showN=True):
"""Euler rotation matrix given sequence, frame, and angles.
This function calculates the algebraic rotation matrix (3x3) for a given
sequence ('order' argument) of up to three elemental rotations of a given
coordinate system ('frame' argument) around another coordinate system, the
Euler (or Eulerian) angles [1]_.
This function also calculates the numerical values of the rotation matrix
when numerical values for the angles are inputed for each rotation axis.
Use None as value if the rotation angle for the particular axis is unknown.
The symbols for the angles are: alpha, beta, and gamma for the first,
second, and third rotations, respectively.
The matrix product is calulated from right to left and in the specified
sequence for the Euler angles. The first letter will be the first rotation.
The function will print and return the algebraic rotation matrix and the
numerical rotation matrix if angles were inputed.
Parameters
----------
order : string, optional (default = 'xyz')
Sequence for the Euler angles, any combination of the letters
x, y, and z with 1 to 3 letters is accepted to denote the
elemental rotations. The first letter will be the first rotation.
frame : string, optional (default = 'local')
Coordinate system for which the rotations are calculated.
Valid values are 'local' or 'global'.
angles : list, array, or bool, optional (default = None)
Numeric values of the rotation angles ordered as the 'order'
parameter. Enter None for a rotation whith unknown value.
unit : str, optional (default = 'deg')
Unit of the input angles.
str_symbols : list of strings, optional (default = None)
New symbols for the angles, for instance, ['theta', 'phi', 'psi']
showA : bool, optional (default = True)
True (1) displays the Algebraic rotation matrix in rich format.
False (0) to not display.
showN : bool, optional (default = True)
True (1) displays the Numeric rotation matrix in rich format.
False (0) to not display.
Returns
-------
R : Matrix Sympy object
Rotation matrix (3x3) in algebraic format.
Rn : Numpy array or Matrix Sympy object (only if angles are inputed)
Numeric rotation matrix (if values for all angles were inputed) or
a algebraic matrix with some of the algebraic angles substituted
by the corresponding inputed numeric values.
Notes
-----
This code uses Sympy, the Python library for symbolic mathematics, to
calculate the algebraic rotation matrix and shows this matrix in latex form
possibly for using with the IPython Notebook, see [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/duartexyz/BMC/blob/master/Transformation3D.ipynb
Examples
--------
>>> # import function
>>> from euler_rotmat import euler_rotmat
>>> # Default options: xyz sequence, local frame and show matrix
>>> R = euler_rotmat()
>>> # XYZ sequence (around global (fixed) coordinate system)
>>> R = euler_rotmat(frame='global')
>>> # Enter numeric values for all angles and show both matrices
>>> R, Rn = euler_rotmat(angles=[90, 90, 90])
>>> # show what is returned
>>> euler_rotmat(angles=[90, 90, 90])
>>> # show only the rotation matrix for the elemental rotation at x axis
>>> R = euler_rotmat(order='x')
>>> # zxz sequence and numeric value for only one angle
>>> R, Rn = euler_rotmat(order='zxz', angles=[None, 0, None])
>>> # input values in radians:
>>> import numpy as np
>>> R, Rn = euler_rotmat(order='zxz', angles=[None, np.pi, None], unit='rad')
>>> # shows only the numeric matrix
>>> R, Rn = euler_rotmat(order='zxz', angles=[90, 0, None], showA='False')
>>> # Change the angles' symbols
>>> R = euler_rotmat(order='zxz', str_symbols=['theta', 'phi', 'psi'])
>>> # Negativate the angles' symbols
>>> R = euler_rotmat(order='zxz', str_symbols=['-theta', '-phi', '-psi'])
>>> # all algebraic matrices for all possible sequences for the local frame
>>> s=['xyz','xzy','yzx','yxz','zxy','zyx','xyx','xzx','yzy','yxy','zxz','zyz']
>>> for seq in s: R = euler_rotmat(order=seq)
>>> # all algebraic matrices for all possible sequences for the global frame
>>> for seq in s: R = euler_rotmat(order=seq, frame='global')
"""
import numpy as np
import sympy as sym
try:
from IPython.core.display import Math, display
ipython = True
except:
ipython = False
angles = np.asarray(np.atleast_1d(angles), dtype=np.float64)
if ~np.isnan(angles).all():
if len(order) != angles.size:
raise ValueError("Parameters 'order' and 'angles' (when " +
"different from None) must have the same size.")
x, y, z = sym.symbols('x, y, z')
sig = [1, 1, 1]
if str_symbols is None:
a, b, g = sym.symbols('alpha, beta, gamma')
else:
s = str_symbols
if s[0][0] == '-': s[0] = s[0][1:]; sig[0] = -1
if s[1][0] == '-': s[1] = s[1][1:]; sig[1] = -1
if s[2][0] == '-': s[2] = s[2][1:]; sig[2] = -1
a, b, g = sym.symbols(s)
var = {'x': x, 'y': y, 'z': z, 0: a, 1: b, 2: g}
# Elemental rotation matrices for xyz (local)
cos, sin = sym.cos, sym.sin
Rx = sym.Matrix([[1, 0, 0], [0, cos(x), sin(x)], [0, -sin(x), cos(x)]])
Ry = sym.Matrix([[cos(y), 0, -sin(y)], [0, 1, 0], [sin(y), 0, cos(y)]])
Rz = sym.Matrix([[cos(z), sin(z), 0], [-sin(z), cos(z), 0], [0, 0, 1]])
if frame.lower() == 'global':
Rs = {'x': Rx.T, 'y': Ry.T, 'z': Rz.T}
order = order.upper()
else:
Rs = {'x': Rx, 'y': Ry, 'z': Rz}
order = order.lower()
R = Rn = sym.Matrix(sym.Identity(3))
str1 = r'\mathbf{R}_{%s}( ' %frame # last space needed for order=''
#str2 = [r'\%s'%var[0], r'\%s'%var[1], r'\%s'%var[2]]
str2 = [1, 1, 1]
for i in range(len(order)):
Ri = Rs[order[i].lower()].subs(var[order[i].lower()], sig[i] * var[i])
R = Ri * R
if sig[i] > 0:
str2[i] = '%s:%s' %(order[i], sym.latex(var[i]))
else:
str2[i] = '%s:-%s' %(order[i], sym.latex(var[i]))
str1 = str1 + str2[i] + ','
if ~np.isnan(angles).all() and ~np.isnan(angles[i]):
if unit[:3].lower() == 'deg':
angles[i] = np.deg2rad(angles[i])
Rn = Ri.subs(var[i], angles[i]) * Rn
#Rn = sym.lambdify(var[i], Ri, 'numpy')(angles[i]) * Rn
str2[i] = str2[i] + '=%.0f^o' %np.around(np.rad2deg(angles[i]), 0)
else:
Rn = Ri * Rn
Rn = sym.simplify(Rn) # for trigonometric relations
try:
# nsimplify only works if there are symbols
Rn2 = sym.latex(sym.nsimplify(Rn, tolerance=1e-8).n(chop=True, prec=4))
except:
Rn2 = sym.latex(Rn.n(chop=True, prec=4))
# there are no symbols, pass it as Numpy array
Rn = np.asarray(Rn)
if showA and ipython:
display(Math(str1[:-1] + ') =' + sym.latex(R, mat_str='matrix')))
if showN and ~np.isnan(angles).all() and ipython:
str2 = ',\;'.join(str2[:angles.size])
display(Math(r'\mathbf{R}_{%s}(%s)=%s' %(frame, str2, Rn2)))
if np.isnan(angles).all():
return R
else:
return R, Rn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.