content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os, os.path
import concurrent.futures
import csv
import urllib.request
import shutil
import gzip
os.chdir('/home/will/AutoMicroAnal/')
# <codecell>
with open('MicroarraySamples.tsv') as handle:
microdata = list(csv.DictReader(handle, delimiter = '\t'))
# <codecell>
def get_fileurl(supurl):
#print(supurl)
resp = urllib.request.urlopen(supurl)
for line in resp:
fname = str(line.split()[-1])
if fname.lower().endswith(".cel.gz'"):
#print('returning')
return supurl + fname[2:-1]
return None
def process_row(row):
supurl = row['URL'] + 'suppl/'
tmpfile = '/tmp/' + row['Sample Accession'] + '.CEL.gz'
finalfile = '/home/will/AutoMicroAnal/microadata/' + row['Sample Accession'] + '.CEL'
if os.path.exists(finalfile):
return None
fileurl = get_fileurl(supurl)
#print(fileurl)
if fileurl is None:
return fileurl
try:
resp = urllib.request.urlopen(fileurl)
with open(tmpfile, 'wb') as handle:
handle.write(resp.read())
except urllib.request.URLError:
return fileurl
with gzip.open(tmpfile) as zhandle:
with open(finalfile, 'wb') as handle:
handle.write(zhandle.read())
os.remove(tmpfile)
return None
# <codecell>
gp133As = [row for row in microdata if row['Platform'] == 'GPL96']
# <codecell>
for num, row in enumerate(gp133As):
try:
res = process_row(row)
except:
print('skipping')
continue
if (num == 0) | (num == 5) | (num == 20) | (num % 500 == 0):
print(num)
if res:
print(res)
# <codecell>
# <codecell>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("Documents"),
"items": [
{
"type": "doctype",
"name": "Files",
"description": _("Files.")
},
{
"type": "doctype",
"name": "Fuel Request",
"description": _("Fuel Request.")
},
{
"type": "doctype",
"name": "Transport Request",
"description": _("Transport Request.")
},
{
"type": "doctype",
"name": "Transport Assignment",
"description": _("Transport Assignment.")
},
{
"type": "doctype",
"name": "Vehicle Trip",
"description": _("Vehicle trips.")
},
{
"type": "doctype",
"name": "Vehicle Inspection",
"description": _("Vehicle Inspection.")
},
]
},
{
"label": _("Setup"),
"items": [
{
"type": "doctype",
"name": "Vehicle",
"description": _("Registered Vehicles")
},
{
"type": "doctype",
"name": "Trailer",
"description": _("Registered Trailers")
},
{
"type": "doctype",
"name": "Trip Route",
"description": _("Trip Route")
},
{
"type": "doctype",
"name": "Trip Location Type",
"description": _("Trip Location Type")
},
{
"type": "doctype",
"name": "Trip Location",
"description": _("Trip Location")
},
{
"type": "doctype",
"name": "Fixed Expense",
"description": _("Fixed Expense")
},
{
"type": "doctype",
"name": "Unit of Measure",
"description": _("Unit of Measure")
},
{
"type": "doctype",
"name": "Vehicle Documents Type",
"description": _("Vehicle Documents Type")
},
{
"type": "doctype",
"name": "Vehicle Type",
"description": _("Vehicle Type")
},
{
"type": "doctype",
"name": "Vehicle Inspection Template",
"description": _("Vehicle Inspection Template")
}
]
},
{
"label": _("Internal Reports"),
"items": [
{
"type": "report",
"name": "Trip Report",
"doctype": "Vehicle Trip",
"is_query_report": True,
},
{
"type": "report",
"name": "Fuel Report",
"doctype": "Vehicle Trip",
"is_query_report": True,
},
{
"type": "report",
"name": "Vehicle Status Report",
"doctype": "Vehicle Trip",
"is_query_report": True,
},
{
"type": "report",
"name": "Truck Document Expiration Report",
"doctype": "Vehicle",
"is_query_report": True,
},
{
"type": "report",
"name": "Transport Assignment Report",
"doctype": "Transport Assignment",
"is_query_report": True,
},
{
"type": "report",
"name": "Vehicles En Route to Border",
"doctype": "Vehicle Trip",
"is_query_report": True,
}
]
},
{
"label": _("Daily Customer Reports"),
"items": [
{
"type": "report",
"name": "Daily Customer Report - Transport",
"doctype": "Vehicle Trip",
"is_query_report": True,
}
]
}
]
|
JSON = {
"id": "999",
"to": {
"data": [
{
"id": "1",
"name": "Person One"
},
{
"id": "2",
"name": "Person Two"
},
{
"id": "3",
"name": "Person Three"
},
{
"id": "4",
"name": "Person Four"
},
{
"id": "5",
"name": "Person Five"
},
{
"id": "6",
"name": "Person Six"
},
{
"id": "7",
"name": "Person Seven"
},
{
"id": "8",
"name": "Person Eight"
}
]
},
"updated_time": "2010-01-23T14:00:00+0000",
"unread": 0,
"unseen": 0,
"comments": {
"data": []
}
} |
#!/usr/bin/env python2.7
# example
# An example to retrieve incident mttr statistics per analyst
#
# Author: Slavik Markovich
# Version: 1.0
#
import argparse
from datetime import date, datetime, timedelta
import csv
import demisto
def format_dt(dt):
return dt.strftime('%Y-%m-%dT%H:%M:%S')
def parse_dt(dt):
return datetime.strptime(dt[:19], '%Y-%m-%dT%H:%M:%S')
def p(what):
if verbose:
print(what)
def options_handler():
parser = argparse.ArgumentParser(
description='Incident MTTR statistics for a time period')
parser.add_argument(
'key', help='The API key to access the server')
parser.add_argument(
'server', help='The server URL to connect to')
monthAgo = date.today() - timedelta(days=30)
parser.add_argument(
'-f', '--filter', help='The filter query to chose the alerts, default is closed incidents in last month',
default='status:=2 and closed:>%s' % format_dt(monthAgo))
parser.add_argument(
'-g', '--group', help='The field to group by to calculate mttr, default is owner', default='owner')
parser.add_argument(
'-q', '--quiet', action='store_false',
dest='verbose', help="no extra prints")
parser.add_argument(
'-o', '--output', help='The output CSV file', default='mttr.csv')
options = parser.parse_args()
global verbose
verbose = options.verbose
return options
def main():
options = options_handler()
c = demisto.DemistoClient(options.key, options.server)
p('using filter %s' % options.filter)
incidents = c.SearchIncidents(0, 10000, options.filter)
p('Total #incidents: %d' % (incidents['total']))
mttr = {}
for i in incidents['data']:
if 'closed' not in i or not i['closed']:
continue
created = parse_dt(i['created'])
closed = parse_dt(i['closed'])
d = closed - created
field = ''
if options.group == 'owner':
field = i['owner'] if i['owner'] else 'dbot'
else:
field = i[options.group]
if field in mttr:
mttr[field]['mttr'] = mttr[field]['mttr'] + d.total_seconds()
mttr[field]['incidents'] = mttr[field]['incidents'] + 1
else:
mttr[field] = {'mttr': d.total_seconds(), 'incidents': 1}
with open(options.output, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=['Field', 'Incidents', 'MTTR'])
writer.writeheader()
for f in mttr:
writer.writerow({
'Field': f,
'Incidents': mttr[f]['incidents'],
'MTTR': int(mttr[f]['mttr'] / mttr[f]['incidents'] / 60)
})
if __name__ == '__main__':
main()
|
import emoji
print(emoji.emojize('Ola! :smiling_imp:', use_aliases=True))
|
import unittest
from apal_cxx import PyPolynomialTerm, PyPolynomial
class TestPhaseFieldPolynomial(unittest.TestCase):
def test_phase_field_poly(self):
term1 = PyPolynomialTerm([2, 3])
term2 = PyPolynomialTerm([1, 2])
poly = PyPolynomial(2)
poly.add_term(1.0, term1)
poly.add_term(-2.0, term2)
self.assertAlmostEqual(poly.evaluate([-1.0, 3.0]), 45.0)
self.assertAlmostEqual(poly.deriv([-1.0, 3.0], 0), -72.0)
self.assertAlmostEqual(poly.deriv([-1.0, 3.0], 1), 39.0)
if __name__ == "__main__":
unittest.main() |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Web', '0031_auto_20181030_1839'),
]
operations = [
migrations.CreateModel(
name='Seguir',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Equipo', models.ForeignKey(related_name='EquipoSeguir', on_delete=django.db.models.deletion.PROTECT, to='Web.Equipo')),
('Usuario', models.ForeignKey(related_name='UsuarioSeguir', on_delete=django.db.models.deletion.PROTECT, to='Web.Usuario')),
],
options={
'db_table': 'Seguir',
},
),
]
|
import csv
import datetime
import gzip
import json
import os
import subprocess
import sys
from xml.etree import ElementTree
import hail as hl
from tqdm import tqdm
from data_pipeline.datasets.gnomad_v3.gnomad_v3_mitochondrial_variants import (
FILTER_NAMES as MITOCHONDRIAL_VARIANT_FILTER_NAMES,
)
from data_pipeline.data_types.locus import normalized_contig
from data_pipeline.data_types.variant import variant_id
CLINVAR_XML_URL = "https://ftp.ncbi.nlm.nih.gov/pub/clinvar/xml/clinvar_variation/weekly_release/ClinVarVariationRelease_00-latest_weekly.xml.gz"
CLINVAR_GOLD_STARS = {
"criteria provided, conflicting interpretations": 1,
"criteria provided, multiple submitters, no conflicts": 2,
"criteria provided, single submitter": 1,
"no assertion criteria provided": 0,
"no assertion provided": 0,
"no interpretation for the single variant": 0,
"practice guideline": 4,
"reviewed by expert panel": 3,
}
class SkipVariant(Exception):
pass
def _parse_submission(submission_element, trait_mapping_list_element):
submission = {}
submission["id"] = submission_element.find("./ClinVarAccession").attrib["Accession"]
submission["submitter_name"] = submission_element.find("./ClinVarAccession").attrib["SubmitterName"]
submission["clinical_significance"] = None
interpretation_element = submission_element.find("./Interpretation")
interpretation_description_element = interpretation_element.find("./Description")
if interpretation_description_element is not None:
submission["clinical_significance"] = interpretation_description_element.text
submission["last_evaluated"] = interpretation_element.attrib.get("DateLastEvaluated", None)
submission["review_status"] = submission_element.find("./ReviewStatus").text
submission["conditions"] = []
trait_elements = submission_element.findall("./TraitSet/Trait")
for trait_element in trait_elements:
preferred_name_element = None
mapping_element = None
if trait_mapping_list_element is not None:
xref_elements = trait_element.findall("XRef")
for xref_element in xref_elements:
selector = f"./TraitMapping[@ClinicalAssertionID='{submission_element.attrib['ID']}'][@TraitType='{trait_element.attrib['Type']}'][@MappingType='XRef'][@MappingValue='{xref_element.attrib['ID']}']"
mapping_element = trait_mapping_list_element.find(selector)
if mapping_element is not None:
break
if mapping_element is None:
preferred_name_element = trait_element.find("./Name/ElementValue[@Type='Preferred']")
if preferred_name_element is not None and trait_mapping_list_element is not None:
selector = f"./TraitMapping[@ClinicalAssertionID='{submission_element.attrib['ID']}'][@TraitType='{trait_element.attrib['Type']}'][@MappingType='Name'][@MappingValue=\"{preferred_name_element.text}\"]"
mapping_element = trait_mapping_list_element.find(selector)
if mapping_element is None:
name_elements = trait_element.findall("./Name/ElementValue")
for name_element in name_elements:
if preferred_name_element is None:
preferred_name_element = name_element
if trait_mapping_list_element is not None:
selector = f"./TraitMapping[@ClinicalAssertionID='{submission_element.attrib['ID']}'][@TraitType='{trait_element.attrib['Type']}'][@MappingType='Name'][@MappingValue=\"{name_element.text}\"]"
mapping_element = trait_mapping_list_element.find(selector)
if mapping_element:
break
if mapping_element is not None:
medgen_element = mapping_element.find("./MedGen")
submission["conditions"].append(
{"name": medgen_element.attrib["Name"], "medgen_id": medgen_element.attrib["CUI"]}
)
elif preferred_name_element is not None:
submission["conditions"].append({"name": preferred_name_element.text, "medgen_id": None})
return submission
def _parse_variant(variant_element):
variant = {}
if variant_element.find("./InterpretedRecord") is None:
raise SkipVariant
variant["locations"] = {}
location_elements = variant_element.findall("./InterpretedRecord/SimpleAllele/Location/SequenceLocation")
for element in location_elements:
try:
variant["locations"][element.attrib["Assembly"]] = {
"locus": element.attrib["Chr"] + ":" + element.attrib["positionVCF"],
"alleles": [element.attrib["referenceAlleleVCF"], element.attrib["alternateAlleleVCF"]],
}
except KeyError:
pass
if not variant["locations"]:
raise SkipVariant
variant["clinvar_variation_id"] = variant_element.attrib["VariationID"]
variant["rsid"] = None
rsid_element = variant_element.find("./InterpretedRecord/SimpleAllele/XRefList/XRef[@DB='dbSNP']")
if rsid_element is not None:
variant["rsid"] = rsid_element.attrib["ID"]
review_status_element = variant_element.find("./InterpretedRecord/ReviewStatus")
variant["review_status"] = review_status_element.text
variant["gold_stars"] = CLINVAR_GOLD_STARS[variant["review_status"]]
variant["clinical_significance"] = None
clinical_significance_elements = variant_element.findall(
"./InterpretedRecord/Interpretations/Interpretation[@Type='Clinical significance']"
)
if clinical_significance_elements:
variant["clinical_significance"] = ", ".join(
el.find("Description").text for el in clinical_significance_elements
)
variant["last_evaluated"] = None
evaluated_dates = [el.attrib.get("DateLastEvaluated", None) for el in clinical_significance_elements]
evaluated_dates = [date for date in evaluated_dates if date]
if evaluated_dates:
variant["last_evaluated"] = sorted(
evaluated_dates, key=lambda date: datetime.datetime.strptime(date, "%Y-%m-%d"), reverse=True,
)[0]
submission_elements = variant_element.findall("./InterpretedRecord/ClinicalAssertionList/ClinicalAssertion")
trait_mapping_list_element = variant_element.find("./InterpretedRecord/TraitMappingList")
variant["submissions"] = [_parse_submission(el, trait_mapping_list_element) for el in submission_elements]
return variant
def import_clinvar_xml(clinvar_xml_path):
release_date = None
clinvar_xml_local_path = os.path.join("/tmp", os.path.basename(clinvar_xml_path))
print("Copying ClinVar XML")
if not os.path.exists(clinvar_xml_local_path):
subprocess.check_call(["gsutil", "cp", clinvar_xml_path, clinvar_xml_local_path])
print("Parsing XML file")
with open("/tmp/clinvar_variants.tsv", "w", newline="") as output_file:
writer = csv.writer(output_file, delimiter="\t", quotechar="", quoting=csv.QUOTE_NONE)
writer.writerow(["locus_GRCh37", "alleles_GRCh37", "locus_GRCh38", "alleles_GRCh38", "variant"])
open_file = gzip.open if clinvar_xml_local_path.endswith(".gz") else open
with open_file(clinvar_xml_local_path, "r") as xml_file:
# The exact number of variants in the XML file is unknown.
# Approximate it to show a progress bar.
progress = tqdm(total=1_100_000, mininterval=5)
xml = ElementTree.iterparse(xml_file, events=["end"])
for _, element in xml:
if element.tag == "ClinVarVariationRelease":
release_date = element.attrib["ReleaseDate"]
if element.tag == "VariationArchive":
try:
variant = _parse_variant(element)
locations = variant.pop("locations")
writer.writerow(
[
locations["GRCh37"]["locus"] if "GRCh37" in locations else "NA",
json.dumps(locations["GRCh37"]["alleles"]) if "GRCh37" in locations else "NA",
"chr" + locations["GRCh38"]["locus"].replace("MT", "M")
if "GRCh38" in locations
else "NA",
json.dumps(locations["GRCh38"]["alleles"]) if "GRCh38" in locations else "NA",
json.dumps(variant),
]
)
progress.update(1)
except SkipVariant:
pass
except Exception:
print(
f"Failed to parse variant {element.attrib['VariationID']}", file=sys.stderr,
)
raise
# https://stackoverflow.com/questions/7697710/python-running-out-of-memory-parsing-xml-using-celementtree-iterparse
element.clear()
progress.close()
subprocess.check_call(["hdfs", "dfs", "-cp", "-f", "file:///tmp/clinvar_variants.tsv", "/tmp/clinvar_variants.tsv"])
ds = hl.import_table(
"/tmp/clinvar_variants.tsv",
types={
"locus_GRCh37": hl.tlocus("GRCh37"),
"alleles_GRCh37": hl.tarray(hl.tstr),
"locus_GRCh38": hl.tlocus("GRCh38"),
"alleles_GRCh38": hl.tarray(hl.tstr),
"variant": hl.tstruct(
clinvar_variation_id=hl.tstr,
rsid=hl.tstr,
review_status=hl.tstr,
gold_stars=hl.tint,
clinical_significance=hl.tstr,
last_evaluated=hl.tstr,
submissions=hl.tarray(
hl.tstruct(
id=hl.tstr,
submitter_name=hl.tstr,
clinical_significance=hl.tstr,
last_evaluated=hl.tstr,
review_status=hl.tstr,
conditions=hl.tarray(hl.tstruct(name=hl.tstr, medgen_id=hl.tstr)),
)
),
),
},
min_partitions=2000,
)
ds = ds.annotate_globals(clinvar_release_date=release_date)
return ds
def prepare_clinvar_variants(clinvar_path, reference_genome):
ds = hl.read_table(clinvar_path)
ds = ds.filter(hl.is_defined(ds[f"locus_{reference_genome}"]) & hl.is_defined(ds[f"alleles_{reference_genome}"]))
ds = ds.select(locus=ds[f"locus_{reference_genome}"], alleles=ds[f"alleles_{reference_genome}"], **ds.variant)
# Remove any variants with alleles other than ACGT
ds = ds.filter(
hl.len(hl.set(hl.delimit(ds.alleles, "").split("")).difference(hl.set(["A", "C", "G", "T", ""]))) == 0
)
ds = ds.annotate(
variant_id=variant_id(ds.locus, ds.alleles),
reference_genome=reference_genome,
chrom=normalized_contig(ds.locus.contig),
pos=ds.locus.position,
ref=ds.alleles[0],
alt=ds.alleles[1],
)
ds = ds.key_by("locus", "alleles")
return ds
def _get_gnomad_variants(
gnomad_exome_variants_path=None, gnomad_genome_variants_path=None, gnomad_mitochondrial_variants_path=None
):
gnomad_exome_variants = None
gnomad_genome_variants = None
if gnomad_exome_variants_path:
gnomad_exome_variants = hl.read_table(gnomad_exome_variants_path)
gnomad_exome_variants = gnomad_exome_variants.select(
exome=hl.struct(
filters=gnomad_exome_variants.filters,
ac=gnomad_exome_variants.freq[0].AC,
an=gnomad_exome_variants.freq[0].AN,
)
)
# For purposes of marking ClinVar variants as "in gnomAD", exclude AC=0 gnomAD variants
gnomad_exome_variants = gnomad_exome_variants.filter(gnomad_exome_variants.exome.ac > 0)
if gnomad_genome_variants_path:
gnomad_genome_variants = hl.read_table(gnomad_genome_variants_path)
gnomad_genome_variants = gnomad_genome_variants.select(
genome=hl.struct(
filters=gnomad_genome_variants.filters,
ac=gnomad_genome_variants.freq[0].AC,
an=gnomad_genome_variants.freq[0].AN,
)
)
# For purposes of marking ClinVar variants as "in gnomAD", exclude AC=0 gnomAD variants
gnomad_genome_variants = gnomad_genome_variants.filter(gnomad_genome_variants.genome.ac > 0)
gnomad_variants = None
if gnomad_exome_variants and gnomad_genome_variants:
gnomad_variants = gnomad_exome_variants.join(gnomad_genome_variants, how="outer")
elif gnomad_exome_variants:
gnomad_variants = gnomad_exome_variants.annotate(genome=hl.missing(gnomad_exome_variants.exome.dtype))
elif gnomad_genome_variants:
gnomad_variants = gnomad_genome_variants.annotate(exome=hl.missing(gnomad_genome_variants.genome.dtype))
if gnomad_mitochondrial_variants_path:
gnomad_mitochondrial_variants = hl.read_table(gnomad_mitochondrial_variants_path)
gnomad_mitochondrial_variants = gnomad_mitochondrial_variants.select(
genome=hl.struct(
filters=gnomad_mitochondrial_variants.filters.map(
lambda f: MITOCHONDRIAL_VARIANT_FILTER_NAMES.get(f, f)
),
# AC/AN fields in the MT variants table are int64 instead of int32.
# They need to be converted to the same type as the nuclear variants' fields to union the tables.
ac=hl.int(gnomad_mitochondrial_variants.AC_hom + gnomad_mitochondrial_variants.AC_het),
an=hl.int(gnomad_mitochondrial_variants.AN),
)
)
gnomad_mitochondrial_variants = gnomad_mitochondrial_variants.filter(
gnomad_mitochondrial_variants.genome.ac > 0
)
gnomad_mitochondrial_variants = gnomad_mitochondrial_variants.annotate(
exome=hl.missing(gnomad_mitochondrial_variants.genome.dtype)
)
gnomad_variants = gnomad_variants.union(gnomad_mitochondrial_variants)
return gnomad_variants
def annotate_clinvar_variants_in_gnomad(
clinvar_path,
gnomad_exome_variants_path=None,
gnomad_genome_variants_path=None,
gnomad_mitochondrial_variants_path=None,
):
gnomad_variants = _get_gnomad_variants(
gnomad_exome_variants_path, gnomad_genome_variants_path, gnomad_mitochondrial_variants_path
)
ds = hl.read_table(clinvar_path)
ds = ds.annotate(gnomad=gnomad_variants[ds.key])
ds = ds.annotate(in_gnomad=hl.is_defined(ds.gnomad))
return ds
|
import datetime
class Solution:
# @return a string
def convert(self, s, nRows):
l = len(s)
if nRows==1 or nRows>=l:
return s
else :
dim = l - nRows + 1
arr = [([''] * dim) for i in range(nRows)]
x = y = 0 # position
d = 0 # 2 direction 0 down 1 right up
for i in range(l):
arr[x][y] = s[i]
#print x , y
if(d ==0) :
x += 1
if(x>=nRows):
d = 1
x -= 2
y += 1
else:
x -= 1
y += 1
if(x<0) :
d = 0
x += 2
y -=1
r = ''
for i in range(nRows):
#print arr[i]
r += ''.join(arr[i])
return r
s = Solution()
r = s.convert('PAYPALISHIRING', 3)
print r
print 'PAHNAPLSIIGYIR'
print s.convert('A', 2)
print s.convert('ABC', 2)
print s.convert('ABCDE', 2)
print s.convert('PAYPALISHIRING', 8)
print s.convert('chrynqxqqmlfotpqhvokiiammqmvxjvbsoaifzyxnjcberrnmixxsyjhovengbpyqrixqgwdrygxrxkfhicainhwi', 24)
starttime = datetime.datetime.now()
print s.convert("twckwuyvbihajbmhmodminftgpdcbquupwflqfiunpuwtigfwjtgzzcfofjpydjnzqysvgmiyifrrlwpwpyvqadefmvfshsrxsltbxbziiqbvosufqpwsucyjyfbhauesgzvfdwnloojejdkzugsrksakzbrzxwudxpjaoyocpxhycrxwzrpllpwlsnkqlevjwejkfxmuwvsyopxpjmbuexfwksoywkhsqqevqtpoohpd", 4)
endtime = datetime.datetime.now()
print (endtime - starttime)
starttime = datetime.datetime.now()
print s.convert("rbgneoikagdaszofomldsoiyvqcubmbsnjhuaqeayjdrktyzhjczxkasaeodqrxgdfadvgftpkmzgkyntdptnpnqtctmvrsywudtalmpxbqdlprfsgahxnxsrtenkynistduerjdsaherallilshyqzqjgfvgufznittzfzshgyyijjqwbzwtyoeetbnqgodqjzsoymxgkvovggrkfndfckmxlznuntkwuensqkaciqxowqydgekkqwhbxxvkqijkowehiejqujhsbhxhnhrdebfbycoomxabveiditwecdwgmtnaahdgikkinyyzxycsekxoevsiaoonjenjnqsxbcfqadrzdutheqvkonvodlbmrqxejpbmticvltuonnxugsguhdkkpoygeynnpozjzdekxbbmoygwtpsasdnhrnqvldldtkmsosfntizkiigbzberetbvitswydztqlogfttbdulletpdwvxaoaxytqurvtmlgatmtopylckpxzvuuuukusinkdghystftotodinokzfhycbuwygqsofctljsgezbvsryceomdvvdyzzuxfnrwstpgejmlkpgegggnuusrswprxmqdzhzrcqzgcltmczrjfxwbuotmmkfkwvnvcopgeomgcivehmnpllsfcntoyyautpsxdhdkxsszangjbtanhtujgoxeoabkpthtcnfzhxbhlhsmctbjmwuumuzcucmjxwcbjhcqhdzsmrgpmkayivtwpuyjyl", 647)
endtime = datetime.datetime.now()
print (endtime - starttime)
|
#!/usr/bin/env python3
import os
import sys
import pdb
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
class Parser(object):
def __init__(self):
self.config = {} # self.config['SYSTEM'] = 'Linux kernel ...'
self.data = {} # self.data[ self.key ] = {self.schema:VALUE}
self.key = () # (mem, ext2, DWOM, 0002)
self.schema = [] # ['ncpu', 'secs', 'works', 'works/sec']
def parse(self, log_file):
for l in self._get_line(log_file):
parse_fn = self._get_parse_fn(l)
parse_fn(l)
def search_data(self, key_list = []):
results = []
nkey = self._norm_key(key_list)
for key in self.data:
if not self._match_key(nkey, key):
continue
rt = (key, self.data[key])
results.append(rt)
results.sort()
return results
def get_config(self, key):
return self.config.get(key, None)
def _match_key(self, key1, key2):
for (k1, k2) in zip(key1, key2):
if k1 == "*" or k2 == "*":
continue
if k1 != k2:
return False
return True
def _get_line(self, pn):
with open(pn) as fd:
for l in fd:
l = l.strip()
if l is not "":
yield(l)
def _get_parse_fn(self, l):
type_parser = {
"###":self._parse_config,
"##":self._parse_key,
"#":self._parse_schema,
}
return type_parser.get(l.split()[0], self._parse_data)
def _parse_config(self, l):
kv = l.split(" ", 1)[1].split("=", 1)
(key, value) = (kv[0].strip(), kv[1].strip())
self.config[key] = value
def _norm_key(self, ks):
return tuple( map(lambda k: self._norm_str(k), ks))
def _parse_key(self, l):
ks = l.split(" ", 1)[1].split(":")
self.key = self._norm_key(ks)
def _parse_schema(self, l):
self.schema = l.split()[1:]
def _parse_data(self, l):
for (d_key, d_value) in zip(self.schema, l.split()):
d_kv = self.data.get(self.key, None)
if not d_kv:
self.data[self.key] = d_kv = {}
d_kv[d_key] = d_value
def _norm_str(self, s):
try:
n = int(s)
return "%09d" % n
except ValueError:
return s
def __get_cpu_num(log, fs, bc, core, sp):
test_log = os.path.normpath(os.path.join(CUR_DIR, log))
parser = Parser()
parser.parse(test_log)
key = ("mem", fs, bc, core)
results = parser.search_data(key)
for r in results:
systime = r[1]['sys.sec']
usertime = r[1]['user.sec']
iotime = r[1]['iowait.sec']
idletime = r[1]['idle.sec']
if float(sp) == 0:
print("user: %s\nsys: %s\nidle: %s\nio: %s\n" %
(usertime, systime, idletime, iotime))
else:
synctime = float(sp) * float(systime) / 100.0
fstime = float(systime) - synctime
print("user: %s\nfs-sys: %s\nsync-sys:%s\nidle: %s\nio: %s\n" %
(usertime, fstime, synctime, idletime, iotime))
def __get_performance(log, fs, bc):
test_log = os.path.normpath(os.path.join(CUR_DIR, log))
parser = Parser()
parser.parse(test_log)
key = ("mem", fs, bc, "*")
r = parser.search_data(key)
for i in range(len(r)):
print("%s: %s" % (r[i][1]["ncpu"], float(r[i][1]["works/sec"])
/ float(r[0][1]["works/sec"])))
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: parser.py {logfile} {fs} {bc} {core} {syn %} -> for time info")
print("Usage: parser.py p {logfile} {fs} {bc}")
print("Example: ./parser.py p ../logs/optimus-mem-lk-4.2.log f2fs DWAL")
print("Example: ./parser.py ../logs/optimus-mem-lk-4.2.log f2fs DWAL 80 85.7")
exit(1)
if sys.argv[1] == 'p':
__get_performance(sys.argv[2], sys.argv[3], sys.argv[4])
exit(0)
__get_cpu_num(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
|
# coding: utf-8
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import errno
import logging
import os
import random
import select
import signal
import socket
import sys
import threading
import time
import zmq
from zmq.eventloop import ioloop
#ioloop.install()
from zmq.eventloop.zmqstream import ZMQStream
from .kernel.kernelmanager import MappingKernelManager
from .notebook.nbmanager import NotebookManager
from .notebook.filenbmanager import FileNotebookManager
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import BaseIPythonApplication
from IPython.consoleapp import IPythonConsoleApp
from IPython.kernel import swallow_argv
from IPython.kernel.zmq.session import default_secure
from IPython.kernel.zmq.kernelapp import (
kernel_flags,
kernel_aliases,
)
from IPython.utils.importstring import import_item
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils import submodule
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes,
DottedObjectName
)
from IPython.utils import py3compat
from IPython.utils.path import filefind
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(kernel_flags)
notebook_flags = [ ]
aliases = dict(kernel_aliases)
aliases.update({
'notebook-dir': 'NotebookManager.notebook_dir',
})
# remove ipkernel flags that are singletons, and don't make sense in
# multi-kernel evironment:
aliases.pop('f', None)
notebook_aliases = [u'notebook-dir', u'profile', u'profile-dir']
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager,
FileNotebookManager]
flags = Dict(flags)
aliases = Dict(aliases)
kernel_argv = List(Unicode)
def _log_level_default(self):
return logging.DEBUG
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
notebook_manager_class = DottedObjectName('IPython.embedded.notebook.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.')
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
f = os.path.abspath(self.extra_args[0])
if os.path.isdir(f):
nbdir = f
else:
self.file_to_run = f
nbdir = os.path.dirname(f)
self.config.NotebookManager.notebook_dir = nbdir
def init_kernel_argv(self):
"""construct the kernel arguments"""
# Scrub frontend-specific flags
self.kernel_argv = swallow_argv(self.argv, notebook_aliases, notebook_flags)
# Kernel should inherit default config file from frontend
self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name)
# Kernel should get *absolute* path to profile directory
self.kernel_argv.extend(["--profile-dir", self.profile_dir.location])
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
self.kernel_manager = MappingKernelManager(
parent=self, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(parent=self, log=self.log)
self.notebook_manager.load_notebook_names()
#self.cluster_manager = ClusterManager(parent=self, log=self.log)
#self.cluster_manager.update_profiles()
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_kernel_argv()
self.init_configurables()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
try:
ioloop.IOLoop.instance().start()
except:
print "Error on stop:", sys.exc_info()[0]
raise
finally:
self.cleanup_kernels()
def stop(self):
try:
ioloop.IOLoop.instance().stop()
except:
print "Error on stop:", sys.exc_info()[0]
raise
finally:
self.cleanup_kernels()
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
#launch_new_instance = NotebookApp.launch_instance
|
import json
from indy.ledger import build_ledgers_freeze_request, build_get_frozen_ledgers_request
from plenum.test.helper import sdk_get_and_check_replies, \
sdk_send_signed_requests, sdk_sign_and_submit_req_obj, sdk_multi_sign_request_objects, sdk_json_to_request_object
def sdk_send_freeze_ledgers(looper, sdk_pool_handle, sdk_wallets, ledgers_ids: [int]):
req = looper.loop.run_until_complete(build_ledgers_freeze_request(sdk_wallets[0][1], ledgers_ids))
signed_reqs = sdk_multi_sign_request_objects(looper, sdk_wallets,
[sdk_json_to_request_object(json.loads(req))])
reps = sdk_send_signed_requests(sdk_pool_handle, signed_reqs)
return sdk_get_and_check_replies(looper, reps)[0]
def sdk_get_frozen_ledgers(looper, sdk_pool_handle, sdk_wallet):
req = looper.loop.run_until_complete(build_get_frozen_ledgers_request(sdk_wallet[1]))
rep = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, sdk_wallet, sdk_json_to_request_object(json.loads(req)))
return sdk_get_and_check_replies(looper, [rep])[0]
|
# -*- coding: utf-8 -*-
"""Module for defining classes that handle the conversion between bytes and
data structures.
"""
import struct
__author__ = 'andreas@blixt.org (Andreas Blixt)'
__all__ = ['Array', 'Marshaler', 'MarshalerInitializer']
class MarshalerInitializer(type):
"""Meta-class for setting up new classes with the base type Marshaler.
"""
def __new__(cls, name, bases, dct):
# Pre-calculate the size of the value, if possible.
if 'format' in dct:
dct['size'] = struct.calcsize(dct['format'])
return type.__new__(cls, name, bases, dct)
class Marshaler(object):
__metaclass__ = MarshalerInitializer
_counter = 0
def __init__(self, **kwargs):
if 'default' in kwargs:
self.default = kwargs['default']
# Keep track of the order that Marshaler instances are created so that
# the field order can be known by packets.
self._creation_index = Marshaler._counter
Marshaler._counter += 1
def __get__(self, instance, owner):
if hasattr(self, 'default'):
return getattr(instance, '_v_' + self.name, self.default)
else:
try:
return getattr(instance, '_v_' + self.name)
except AttributeError:
raise AttributeError('\'%s\' missing value for \'%s\' (%s)' % (
instance.__class__.__name__,
self.name,
self.__class__.__name__))
def __set__(self, instance, value):
setattr(instance, '_v_' + self.name, value)
def _set_up(self, name):
self.name = name
@classmethod
def bytes_from(cls, value):
"""Returns a byte string for the specified value.
This is the method that should be overridden for defining the binary
format of the data.
"""
return struct.pack(cls.format, value)
@classmethod
def read_bytes(cls, reader):
"""Reads a value from the specified reader.
This is the method that should be overridden for defining data load
behavior.
"""
data = reader.get(cls.size)
# XXX: Ignores formats with more than one value.
return struct.unpack(cls.format, data)[0]
def bytes_for(self, packet):
"""Returns the byte string for the value of this field for the
specified packet.
"""
return self.bytes_from(self.value_for(packet))
def has_value(self, packet):
"""Returns True if the specified Packet has a value set for this
Marshaler. Default values do not count.
"""
return hasattr(packet, '_v_' + self.name)
def read(self, packet, reader):
"""Reads the value from the specified PacketReader and sets the
appropriate field of the specified packet.
"""
self.__set__(packet, self.read_value(packet, reader))
def read_value(self, packet, reader):
"""Responsible for returning a value given a packet and a reader.
If the read_bytes function depends on packet-specific values, override
this method to provide those values to it.
"""
return self.read_bytes(reader)
def value_for(self, packet):
"""Returns the value of this field for the specified packet, or the
default value if the packet does not have a value set.
"""
value = self.__get__(packet, packet.__class__)
return value
class Array(Marshaler):
def __init__(self, length_type, item_type, **kwargs):
"""Initializes an array field. The length of the array is determined
using length_type. If length_type is a number, that number is used as
a fixed length instead. Every item in the array is read/written as
item_type.
"""
super(Array, self).__init__(**kwargs)
self.length_type = length_type
self.item_type = item_type
@classmethod
def bytes_from(cls, value, length_type, item_type, **item_kwargs):
"""Takes a list and gets the bytes for every item through the item_type
class. The length of the list is witten using length_type. If
length_type is a number, no value is written, but the list is checked
make sure that it's the length specified by length_type.
The item_kwargs argument is a dictionary of keyword arguments to pass
on to the bytes_from method of the item_type class.
"""
pieces = []
if isinstance(length_type, (int, long)):
assert len(value) == length_type, 'Invalid value length'
else:
pieces.append(length_type.bytes_from(len(value)))
for item in value:
pieces.append(item_type.bytes_from(item, **item_kwargs))
return ''.join(pieces)
@classmethod
def read_bytes(cls, reader, length_type, item_type, **item_kwargs):
"""Reads a sequence of item_type. The length of the sequence is
determined by reading a number using length_type. (If length_type is a
number, that number will be used.)
The item_kwargs argument is a dictionary of keyword arguments to pass
on to the read_bytes method of the item_type class.
"""
if isinstance(length_type, (int, long)):
length = length_type
else:
length = length_type.read_bytes(reader)
value = []
for i in xrange(length):
value.append(item_type.read_bytes(reader, **item_kwargs))
return value
def bytes_for(self, packet):
return self.bytes_from(self.value_for(packet), self.length_type,
self.item_type)
def read_value(self, packet, reader):
return self.read_bytes(reader, self.length_type, self.item_type)
|
# -*- coding: utf8 -*-
from __future__ import print_function, unicode_literals
import json
import logging
from . import BaseParser
logger = logging.getLogger("config")
class ThreathunterJsonParser(BaseParser):
"""
Parse config from json with threathunter format.
Threathunter api uses special json format.
"""
def __init__(self, name):
"""
:param name: parser name
"""
self.name = name or ""
def parse(self, data):
"""
:param data:
:return:
"""
if not data:
return {}
try:
result = json.loads(data)
if result["status"] != 0:
logger.error("config %s: the status is not good, status is %s", self.name, result["status"])
return {}
config = {}
for _ in result["values"]:
config[_["key"]] = _["value"]
return config
except Exception as err:
logger.error("config %s: fail to parse threathunter json config, the error is %s", self.name, err)
return {}
return {}
def dump(self, config, **kwargs):
result = {
"status": 0,
"msg": "OK",
}
try:
result["values"] = [{"key": k, "value": v} for k, v in config.items()]
return result
except Exception as err:
logger.error("config %s: fail to dump config to threathunter json, he error is %s", self.name, err)
result["values"] = []
return result
|
"""Class implementation for the rotation_around_center_interface
interface.
"""
from typing import Dict
from apysc._animation.animation_rotation_around_center_interface import \
AnimationRotationAroundCenterInterface
from apysc._type.attr_linking_interface import AttrLinkingInterface
from apysc._type.int import Int
from apysc._type.revert_interface import RevertInterface
class RotationAroundCenterInterface(
AnimationRotationAroundCenterInterface, RevertInterface,
AttrLinkingInterface):
_rotation_around_center: Int
def _initialize_rotation_around_center_if_not_initialized(self) -> None:
"""
Initialize the `_rotation_around_center` attribute if if hasn't
been initialized yet.
"""
if hasattr(self, '_rotation_around_center'):
return
self._rotation_around_center = Int(0)
self._append_rotation_around_center_attr_linking_setting()
def _append_rotation_around_center_attr_linking_setting(self) -> None:
"""
Append a rotation around center attribute linking setting.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self.
_append_rotation_around_center_attr_linking_setting,
locals_=locals(),
module_name=__name__, class_=RotationAroundCenterInterface):
self._append_applying_new_attr_val_exp(
new_attr=self._rotation_around_center,
attr_name='rotation_around_center')
self._append_attr_to_linking_stack(
attr=self._rotation_around_center,
attr_name='rotation_around_center')
@property
def rotation_around_center(self) -> Int:
"""
Get a rotation value around the center of this instance.
Returns
-------
rotation_around_center : Int
Rotation value around the center of this instance.
References
----------
- GraphicsBase rotation_around_center interface document
- https://bit.ly/3AjeSPr
"""
import apysc as ap
with ap.DebugInfo(
callable_='rotation_around_center', locals_=locals(),
module_name=__name__, class_=RotationAroundCenterInterface):
from apysc._type import value_util
self._initialize_rotation_around_center_if_not_initialized()
return value_util.get_copy(value=self._rotation_around_center)
@rotation_around_center.setter
def rotation_around_center(self, value: Int) -> None:
"""
Update a rotation value around the center of this instance.
Parameters
----------
value : int or Int
Rotation value around the center of this instance.
References
----------
- GraphicsBase rotation_around_center interface
- https://bit.ly/3AjeSPr
"""
import apysc as ap
with ap.DebugInfo(
callable_='rotation_around_center', locals_=locals(),
module_name=__name__, class_=RotationAroundCenterInterface):
from apysc._validation import number_validation
self._initialize_rotation_around_center_if_not_initialized()
number_validation.validate_integer(integer=value)
if not isinstance(value, ap.Int):
value = ap.Int(value)
before_value: ap.Int = self._rotation_around_center
self._rotation_around_center = value
self._append_rotation_around_center_update_expression(
before_value=before_value)
self._append_rotation_around_center_attr_linking_setting()
def _append_rotation_around_center_update_expression(
self, *, before_value: Int) -> None:
"""
Append the rotation around the center of this instance
updating expression.
Parameters
----------
before_value : ap.Int
Before updating value.
"""
import apysc as ap
with ap.DebugInfo(
callable_=self._append_rotation_around_center_update_expression, # noqa
locals_=locals(),
module_name=__name__, class_=RotationAroundCenterInterface):
from apysc._type import value_util
before_value_str: str = value_util.get_value_str_for_expression(
value=before_value)
after_value_str: str = value_util.get_value_str_for_expression(
value=self._rotation_around_center)
expression: str = (
f'{self.variable_name}.rotate(-{before_value_str});'
f'\n{self.variable_name}.rotate({after_value_str});'
f'\n{before_value_str} = {after_value_str};'
)
ap.append_js_expression(expression=expression)
_rotation_around_center_snapshots: Dict[str, int]
def _make_snapshot(self, *, snapshot_name: str) -> None:
"""
Make a value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
self._initialize_rotation_around_center_if_not_initialized()
self._set_single_snapshot_val_to_dict(
dict_name='_rotation_around_center_snapshots',
value=int(self._rotation_around_center._value),
snapshot_name=snapshot_name)
def _revert(self, *, snapshot_name: str) -> None:
"""
Revert a value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._rotation_around_center._value = \
self._rotation_around_center_snapshots[snapshot_name]
|
# flake8: noqa
# type: ignore
from .bot import PotiaBot
from .paginator import *
from .redis import RedisBridge
from .utils import *
|
import os
import shutil
import sys
import time
import tempfile
__test__ = {}
ROOT = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(__file__))))
DYNOMITE = os.path.join(ROOT, 'bin', 'dynomite')
TMP_DIR = None
def setup_module():
cmd = "%s start -o dpct1 -p 11222 -t 9200 --data '%s' " \
"--storage dict_storage " \
"-n 1 -r 1 -w 1 --detached" % (DYNOMITE, tmp_dir())
os.system(cmd)
time.sleep(2)
def teardown_module():
os.system("%s stop -o dpct1" % DYNOMITE)
if os.path.isdir(tmp_dir()):
shutil.rmtree(tmp_dir())
def tmp_dir():
# Don't want to create dir at import time, only on demand
global TMP_DIR
if TMP_DIR:
return TMP_DIR
TMP_DIR = tempfile.mkdtemp()
return TMP_DIR
|
from EAB_tools.tools import (
display_and_save_df,
hash_df,
)
|
#!/usr/bin/env python
import server
import supervisor
class MinionServer(server.Server):
def __init__(self, ip, port):
super(MinionServer, self).__init__(ip, port)
def handle(self, data):
"""Start a worker.
Message format:
{
'image': 'image name'
'numprocs': number of workers,
'args': 'extra arguments for "docker run -d image ..."'
}
"""
supervisor.start(
'worker.conf',
target='worker_{}'.format(data['image']),
image=data['image'],
numprocs=data.get('numprocs', 1),
args=data.get('args', ''))
return {'status': 'ok'}
def main():
server = MinionServer('*', 1234)
server.start()
server.join()
if __name__ == '__main__':
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# Hack so you don't have to put the library containing this script in the PYTHONPATH.
sys.path = [os.path.abspath(os.path.join(__file__, '..', '..'))] + sys.path
import argparse
import numpy as np
from os.path import join as pjoin
from smartlearner import views
from smartlearner.status import Status
from smartlearner import utils as smartutils
from convnade import datasets
from convnade.utils import Timer
from convnade.batch_schedulers import BatchSchedulerWithAutoregressiveMasks
from convnade.losses import NllUsingBinaryCrossEntropyWithAutoRegressiveMask
DATASETS = ['binarized_mnist']
def build_argparser():
DESCRIPTION = "Evaluate the exact NLL of a ConvNADE model."
p = argparse.ArgumentParser(description=DESCRIPTION)
p.add_argument('experiment', type=str, help='folder where to find a trained ConvDeepNADE model')
p.add_argument('subset', type=str, choices=['testset', 'validset'],
help='evaluate a specific subset (either "testset" or "validset").')
p.add_argument('batch_id', type=int, help='evaluate only a specific batch.')
p.add_argument('ordering_id', type=int, help='evaluate only a specific input ordering.')
p.add_argument('--batch-size', type=int, help='size of the batch to use when evaluating the model.', default=100)
p.add_argument('--nb-orderings', type=int, help='evaluate that many input orderings. Default: 128', default=128)
# p.add_argument('--subset', type=str, choices=['valid', 'test'],
# help='evaluate only a specific subset (either "testset" or "validset") {0}]. Default: evaluate both subsets.')
p.add_argument('--seed', type=int,
help="Seed used to choose the orderings. Default: 1234", default=1234)
# Optional parameters
p.add_argument('-f', '--force', action='store_true', help='overwrite evaluation results')
return p
def compute_NLL(model, dataset, batch_size, batch_id, ordering_id, seed):
status = Status()
batch_scheduler = BatchSchedulerWithAutoregressiveMasks(dataset,
batch_size=batch_size,
batch_id=batch_id,
ordering_id=ordering_id,
use_mask_as_input=model.nb_channels == 2,
seed=seed)
loss = NllUsingBinaryCrossEntropyWithAutoRegressiveMask(model, dataset, mod=batch_scheduler.mod)
nll = views.LossView(loss=loss, batch_scheduler=batch_scheduler)
nlls_xod_given_xoltd = nll.losses.view(Status())
nlls = np.sum(nlls_xod_given_xoltd.reshape(-1, batch_size), axis=0)
return nlls
def load_model(experiment_path):
with Timer("Loading model"):
from convnade import DeepConvNadeUsingLasagne, DeepConvNadeWithResidualUsingLasagne
from convnade import DeepConvNADE, DeepConvNADEWithResidual
for model_class in [DeepConvNadeUsingLasagne, DeepConvNadeWithResidualUsingLasagne, DeepConvNADE, DeepConvNADEWithResidual]:
try:
model = model_class.create(experiment_path)
return model
except Exception as e:
print (e)
pass
raise NameError("No model found!")
return None
def main():
parser = build_argparser()
args = parser.parse_args()
# Load experiments hyperparameters
try:
hyperparams = smartutils.load_dict_from_json_file(pjoin(args.experiment, "hyperparams.json"))
except:
hyperparams = smartutils.load_dict_from_json_file(pjoin(args.experiment, '..', "hyperparams.json"))
model = load_model(args.experiment)
print(str(model))
with Timer("Loading dataset"):
trainset, validset, testset = datasets.load(hyperparams['dataset'], keep_on_cpu=True)
# print(" (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset), len(testset)), end="")
# Result files.
evaluation_dir = smartutils.create_folder(pjoin(args.experiment, "evaluation"))
evaluation_file = pjoin(evaluation_dir, "{}_batch{}_ordering{}.npz".format(args.subset, args.batch_id, args.ordering_id))
if not os.path.isfile(evaluation_file) or args.force:
with Timer("Computing exact NLL on {} for batch #{} and ordering #{}".format(args.subset, args.batch_id, args.ordering_id)):
dataset = validset
if args.subset == "testset":
dataset = testset
nlls = compute_NLL(model, dataset, args.batch_size, args.batch_id, args.ordering_id, args.seed)
results = {"nlls": nlls,
"subset": args.subset,
"batch_id": args.batch_id,
"nb_batches": int(np.ceil(len(dataset)/float(args.batch_size))),
"ordering_id": args.ordering_id,
"nb_orderings": args.nb_orderings,
"batch_size": args.batch_size,
"seed": args.seed}
np.savez(evaluation_file, **results)
else:
print("Loading saved losses... (use --force to re-run evaluation)")
nlls = np.load(evaluation_file)['nlls']
avg_nlls = nlls.mean()
stderror_nlls = nlls.std(ddof=1) / np.sqrt(len(nlls))
print("NLL on {} for batch #{} and ordering #{}: {:.2f} ± {:.2f}".format(args.subset, args.batch_id, args.ordering_id, avg_nlls, stderror_nlls))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, text
from sqlalchemy.dialects.mysql import TIMESTAMP
class MysqlTimestampsMixin:
created_at = Column(
"created_at", TIMESTAMP, nullable=False, server_default=text("CURRENT_TIMESTAMP"),
)
updated_at = Column(
"updated_at",
TIMESTAMP,
nullable=False,
index=True,
unique=False,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
server_onupdate=text("CURRENT_TIMESTAMP"),
)
|
from django.core.management.base import BaseCommand
from users.models import Major, Minor, Course
from django.db import IntegrityError
from os import path
import json
class Command(BaseCommand):
def _create_majors(self):
base_path = path.dirname(__file__)
majors_path = path.abspath(path.join(base_path, "..", "..", "majors.json"))
with open(majors_path) as majors_file:
majors = json.load(majors_file)
for major in majors:
major_entry = Major(name=major)
try:
major_entry.save()
except IntegrityError:
pass
def _create_minors(self):
base_path = path.dirname(__file__)
minors_path = path.abspath(path.join(base_path, "..", "..", "minors.json"))
with open(minors_path) as minors_file:
minors = json.load(minors_file)
for minor in minors:
minor_entry = Minor(name=minor)
try:
minor_entry.save()
except IntegrityError:
pass
def _create_courses(self):
base_path = path.dirname(__file__)
courses_path = path.abspath(path.join(base_path, "..", "..", "courses.json"))
with open(courses_path) as courses_file:
courses = json.load(courses_file)
for course in courses:
course_entry = Course(name=course)
try:
course_entry.save()
except IntegrityError:
pass
def handle(self, *args, **kwargs):
self._create_majors()
self._create_minors()
self._create_courses() |
import os
def is_windows():
return os.name == 'nt'
def is_linux():
return os.name =='posix'
def is_raspberrypi():
return is_linux() and os.uname()[1] == 'raspberrypi' |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import pytest
from sentry import eventstore
from sentry.event_manager import EventManager
START_TIME = 1562873192.624
END_TIME = 1562873194.624
@pytest.fixture
def make_spans_snapshot(insta_snapshot):
def inner(data):
mgr = EventManager(data={"spans": data})
mgr.normalize()
evt = eventstore.create_event(data=mgr.get_data())
interface = evt.interfaces.get("spans")
insta_snapshot({"errors": evt.data.get("errors"), "to_json": interface.to_json()})
return inner
def test_empty(make_spans_snapshot):
make_spans_snapshot([])
def test_single_invalid(make_spans_snapshot):
make_spans_snapshot(
[
{
"trace_id": "bad",
"span_id": "bad",
"start_timestamp": START_TIME,
"timestamp": END_TIME,
}
]
)
def test_single_incomplete(make_spans_snapshot):
make_spans_snapshot(
[
{
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "8c931f4740435fb8",
"start_timestamp": START_TIME,
"timestamp": END_TIME,
}
]
)
def test_single_full(make_spans_snapshot):
make_spans_snapshot(
[
{
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "8c931f4740435fb8",
"start_timestamp": START_TIME,
"timestamp": END_TIME,
"op": "http",
"description": "GET http://example.com",
"data": {"status_code": 200, "reason": "OK"},
"tags": {"service": "example", "sentry:user": "id:1"},
}
]
)
def test_multiple_full(make_spans_snapshot):
make_spans_snapshot(
[
{
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "8c931f4740435fb8",
"start_timestamp": START_TIME,
"timestamp": END_TIME,
"op": "http",
"description": "GET http://example.com",
"data": {"status_code": 200, "reason": "OK"},
"tags": {"service": "example", "sentry:user": "id:1"},
},
{
"trace_id": "a0fa8803753e40fd8124b21eeb2986b5",
"span_id": "6c931f4740666fb6",
"start_timestamp": START_TIME,
"timestamp": END_TIME,
"op": "db",
"description": "SELECT * FROM users",
"tags": {"service": "example"},
},
]
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: docker_image_load
short_description: Load docker image(s) from archives
version_added: 1.3.0
description:
- Load one or multiple Docker images from a C(.tar) archive, and return information on
the loaded image(s).
options:
path:
description:
- The path to the C(.tar) archive to load Docker image(s) from.
type: path
required: true
extends_documentation_fragment:
- community.docker.docker
- community.docker.docker.docker_py_2_documentation
notes:
- Does not support C(check_mode).
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.5.0"
- "Docker API >= 1.23"
author:
- Felix Fontein (@felixfontein)
'''
EXAMPLES = '''
- name: Load all image(s) from the given tar file
community.docker.docker_image_load:
path: /path/to/images.tar
register: result
- name: Print the loaded image names
ansible.builtin.debug:
msg: "Loaded the following images: {{ result.image_names | join(', ') }}"
'''
RETURN = '''
image_names:
description: List of image names and IDs loaded from the archive.
returned: success
type: list
elements: str
sample:
- 'hello-world:latest'
- 'sha256:e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9'
images:
description: Image inspection results for the loaded images.
returned: success
type: list
elements: dict
sample: []
'''
import errno
import traceback
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common import (
AnsibleDockerClient,
DockerBaseClass,
is_image_name_id,
RequestException,
)
try:
from docker.errors import DockerException
except ImportError:
# missing Docker SDK for Python handled in module_utils.docker.common
pass
class ImageManager(DockerBaseClass):
def __init__(self, client, results):
super(ImageManager, self).__init__()
self.client = client
self.results = results
parameters = self.client.module.params
self.check_mode = self.client.check_mode
self.path = parameters['path']
self.load_images()
@staticmethod
def _extract_output_line(line, output):
'''
Extract text line from stream output and, if found, adds it to output.
'''
if 'stream' in line or 'status' in line:
# Make sure we have a string (assuming that line['stream'] and
# line['status'] are either not defined, falsish, or a string)
text_line = line.get('stream') or line.get('status') or ''
output.extend(text_line.splitlines())
def load_images(self):
'''
Load images from a .tar archive
'''
# Load image(s) from file
load_output = []
try:
self.log("Opening image {0}".format(self.path))
with open(self.path, 'rb') as image_tar:
self.log("Loading images from {0}".format(self.path))
for line in self.client.load_image(image_tar):
self.log(line, pretty_print=True)
self._extract_output_line(line, load_output)
except EnvironmentError as exc:
if exc.errno == errno.ENOENT:
self.client.fail("Error opening archive {0} - {1}".format(self.path, to_native(exc)))
self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
except Exception as exc:
self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
# Collect loaded images
loaded_images = []
for line in load_output:
if line.startswith('Loaded image:'):
loaded_images.append(line[len('Loaded image:'):].strip())
if line.startswith('Loaded image ID:'):
loaded_images.append(line[len('Loaded image ID:'):].strip())
if not loaded_images:
self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
images = []
for image_name in loaded_images:
if is_image_name_id(image_name):
images.append(self.client.find_image_by_id(image_name))
elif ':' in image_name:
image_name, tag = image_name.rsplit(':', 1)
images.append(self.client.find_image(image_name, tag))
else:
self.client.module.warn('Image name "{0}" is neither ID nor has a tag'.format(image_name))
self.results['image_names'] = loaded_images
self.results['images'] = images
self.results['changed'] = True
self.results['stdout'] = '\n'.join(load_output)
def main():
client = AnsibleDockerClient(
argument_spec=dict(
path=dict(type='path', required=True),
),
supports_check_mode=False,
min_docker_version='2.5.0',
min_docker_api_version='1.23',
)
try:
results = dict(
image_names=[],
images=[],
)
ImageManager(client, results)
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
except RequestException as e:
client.fail(
'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(to_native(e)),
exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This module defines slider widgets interacting with the transparency
of an image on a :class:`PlotWidget`
Classes:
--------
- :class:`BaseAlphaSlider` (abstract class)
- :class:`NamedImageAlphaSlider`
- :class:`ActiveImageAlphaSlider`
Example:
--------
This widget can, for instance, be added to a plot toolbar.
.. code-block:: python
import numpy
from silx.gui import qt
from silx.gui.plot import PlotWidget
from silx.gui.plot.ImageAlphaSlider import NamedImageAlphaSlider
app = qt.QApplication([])
pw = PlotWidget()
img0 = numpy.arange(200*150).reshape((200, 150))
pw.addImage(img0, legend="my background", z=0, origin=(50, 50))
x, y = numpy.meshgrid(numpy.linspace(-10, 10, 200),
numpy.linspace(-10, 5, 150),
indexing="ij")
img1 = numpy.asarray(numpy.sin(x * y) / (x * y),
dtype='float32')
pw.addImage(img1, legend="my data", z=1,
replace=False)
alpha_slider = NamedImageAlphaSlider(parent=pw,
plot=pw,
legend="my data")
alpha_slider.setOrientation(qt.Qt.Horizontal)
toolbar = qt.QToolBar("plot", pw)
toolbar.addWidget(alpha_slider)
pw.addToolBar(toolbar)
pw.show()
app.exec_()
"""
__authors__ = ["P. Knobel"]
__license__ = "MIT"
__date__ = "24/03/2017"
import logging
from silx.gui import qt
_logger = logging.getLogger(__name__)
class BaseAlphaSlider(qt.QSlider):
"""Slider widget to be used in a plot toolbar to control the
transparency of a plot primitive (image, scatter or curve).
Internally, the slider stores its state as an integer between
0 and 255. This is the value emitted by the :attr:`valueChanged`
signal.
The method :meth:`getAlpha` returns the corresponding opacity/alpha
as a float between 0. and 1. (with a step of :math:`\frac{1}{255}`).
You must subclass this class and implement :meth:`getItem`.
"""
sigAlphaChanged = qt.Signal(float)
"""Emits the alpha value when the slider's value changes,
as a float between 0. and 1."""
def __init__(self, parent=None, plot=None):
"""
:param parent: Parent QWidget
:param plot: Parent plot widget
"""
assert plot is not None
super(BaseAlphaSlider, self).__init__(parent)
self.plot = plot
self.setRange(0, 255)
# if already connected to an item, use its alpha as initial value
if self.getItem() is None:
self.setValue(255)
self.setEnabled(False)
else:
alpha = self.getItem().getAlpha()
self.setValue(round(255*alpha))
self.valueChanged.connect(self._valueChanged)
def getItem(self):
"""You must implement this class to define which item
to work on. It must return an item that inherits
:class:`silx.gui.plot.items.core.AlphaMixIn`.
:return: Item on which to operate, or None
:rtype: :class:`silx.plot.items.Item`
"""
raise NotImplementedError(
"BaseAlphaSlider must be subclassed to " +
"implement getItem()")
def getAlpha(self):
"""Get the opacity, as a float between 0. and 1.
:return: Alpha value in [0., 1.]
:rtype: float
"""
return self.value() / 255.
def _valueChanged(self, value):
self._updateItem()
self.sigAlphaChanged.emit(value / 255.)
def _updateItem(self):
"""Update the item's alpha channel.
"""
item = self.getItem()
if item is not None:
item.setAlpha(self.getAlpha())
class ActiveImageAlphaSlider(BaseAlphaSlider):
"""Slider widget to be used in a plot toolbar to control the
transparency of the **active image**.
:param parent: Parent QWidget
:param plot: Plot on which to operate
See documentation of :class:`BaseAlphaSlider`
"""
def __init__(self, parent=None, plot=None):
"""
:param parent: Parent QWidget
:param plot: Plot widget on which to operate
"""
super(ActiveImageAlphaSlider, self).__init__(parent, plot)
plot.sigActiveImageChanged.connect(self._activeImageChanged)
def getItem(self):
return self.plot.getActiveImage()
def _activeImageChanged(self, previous, new):
"""Activate or deactivate slider depending on presence of a new
active image.
Apply transparency value to new active image.
:param previous: Legend of previous active image, or None
:param new: Legend of new active image, or None
"""
if new is not None and not self.isEnabled():
self.setEnabled(True)
elif new is None and self.isEnabled():
self.setEnabled(False)
self._updateItem()
class NamedItemAlphaSlider(BaseAlphaSlider):
"""Slider widget to be used in a plot toolbar to control the
transparency of an item (defined by its kind and legend).
:param parent: Parent QWidget
:param plot: Plot on which to operate
:param str kind: Kind of item whose transparency is to be
controlled: "scatter", "image" or "curve".
:param str legend: Legend of item whose transparency is to be
controlled.
"""
def __init__(self, parent=None, plot=None,
kind=None, legend=None):
self._item_legend = legend
self._item_kind = kind
super(NamedItemAlphaSlider, self).__init__(parent, plot)
self._updateState()
plot.sigContentChanged.connect(self._onContentChanged)
def _onContentChanged(self, action, kind, legend):
if legend == self._item_legend and kind == self._item_kind:
if action == "add":
self.setEnabled(True)
elif action == "remove":
self.setEnabled(False)
def _updateState(self):
"""Enable or disable widget based on item's availability."""
if self.getItem() is not None:
self.setEnabled(True)
else:
self.setEnabled(False)
def getItem(self):
"""Return plot item currently associated to this widget (can be
a curve, an image, a scatter...)
:rtype: subclass of :class:`silx.gui.plot.items.Item`"""
if self._item_legend is None or self._item_kind is None:
return None
return self.plot._getItem(kind=self._item_kind,
legend=self._item_legend)
def setLegend(self, legend):
"""Associate a different item (of the same kind) to the slider.
:param legend: New legend of item whose transparency is to be
controlled.
"""
self._item_legend = legend
self._updateState()
def getLegend(self):
"""Return legend of the item currently controlled by this slider.
:return: Image legend associated to the slider
"""
return self._item_kind
def setItemKind(self, legend):
"""Associate a different item (of the same kind) to the slider.
:param legend: New legend of item whose transparency is to be
controlled.
"""
self._item_legend = legend
self._updateState()
def getItemKind(self):
"""Return kind of the item currently controlled by this slider.
:return: Item kind ("image", "scatter"...)
:rtype: str on None
"""
return self._item_kind
class NamedImageAlphaSlider(NamedItemAlphaSlider):
"""Slider widget to be used in a plot toolbar to control the
transparency of an image (defined by its legend).
:param parent: Parent QWidget
:param plot: Plot on which to operate
:param str legend: Legend of image whose transparency is to be
controlled.
"""
def __init__(self, parent=None, plot=None, legend=None):
NamedItemAlphaSlider.__init__(self, parent, plot,
kind="image", legend=legend)
class NamedScatterAlphaSlider(NamedItemAlphaSlider):
"""Slider widget to be used in a plot toolbar to control the
transparency of a scatter (defined by its legend).
:param parent: Parent QWidget
:param plot: Plot on which to operate
:param str legend: Legend of scatter whose transparency is to be
controlled.
"""
def __init__(self, parent=None, plot=None, legend=None):
NamedItemAlphaSlider.__init__(self, parent, plot,
kind="scatter", legend=legend)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=40)),
('username', models.CharField(max_length=20)),
('email', models.CharField(max_length=50)),
('login_service', models.CharField(max_length=20)),
('access_token', models.CharField(max_length=50)),
('refresh_token', models.CharField(max_length=50)),
('default_shipping_address', models.CharField(max_length=100)),
('phone_number', models.CharField(max_length=20)),
('roc_id', models.CharField(max_length=10)),
('real_name', models.CharField(max_length=10)),
],
),
]
|
import math
import random
import sys
from threading import *
class Ant(Thread):
def __init__(self, ID, start_node, colony, beta, q0, rho):
Thread.__init__(self)
self.ID = ID
self.start_node = start_node
self.colony = colony
self.curr_node = self.start_node
self.graph = self.colony.graph
self.path_vec = []
self.path_vec.append(self.start_node)
self.path_cost = 0
# same meaning as in standard equations
self.Beta = beta
#self.Q0 = 1 # Q0 = 1 works just fine for 10 city case (no explore)
self.Q0 = q0
self.Rho = rho
# store the nodes remaining to be explored here
self.nodes_to_visit = {}
for i in range(0, self.graph.num_nodes):
if i != self.start_node:
self.nodes_to_visit[i] = i
# create n X n matrix 0'd out to start
self.path_mat = []
for i in range(0, self.graph.num_nodes):
self.path_mat.append([0]*self.graph.num_nodes)
# overide Thread's run()
def run(self):
graph = self.colony.graph
while not self.end():
# we need exclusive access to the graph
graph.lock.acquire()
new_node = self.state_transition_rule(self.curr_node)
self.path_cost += graph.delta(self.curr_node, new_node)
self.path_vec.append(new_node)
self.path_mat[self.curr_node][new_node] = 1 #adjacency matrix representing path
#print ("Ant %s : %s, %s" % (self.ID, self.path_vec, self.path_cost,))
self.local_updating_rule(self.curr_node, new_node)
graph.lock.release()
self.curr_node = new_node
# don't forget to close the tour
self.path_cost += graph.delta(self.path_vec[-1], self.path_vec[0])
# send our results to the colony
self.colony.update(self)
print ("Ant thread %s terminating." % (self.ID,))
# allows thread to be restarted (calls Thread.__init__)
self.__init__(ID=self.ID, start_node=self.start_node, colony=self.colony, beta=self.Beta, q0=self.Q0, rho=self.Rho)
def end(self):
return not self.nodes_to_visit
# described in report -- determines next node to visit after curr_node
def state_transition_rule(self, curr_node):
graph = self.colony.graph
q = random.random()
max_node = -1
if q < self.Q0:
print ("Exploitation")
max_val = -1
val = None
for node in self.nodes_to_visit.values():
if graph.tau(curr_node, node) == 0:
raise Exception("tau = 0")
val = graph.tau(curr_node, node) * math.pow(graph.etha(curr_node, node), self.Beta)
if val > max_val:
max_val = val
max_node = node
else:
print ("Exploration")
sum = 0
node = -1
for node in self.nodes_to_visit.values():
if graph.tau(curr_node, node) == 0:
raise Exception("tau = 0")
sum += graph.tau(curr_node, node) * math.pow(graph.etha(curr_node, node), self.Beta)
if sum == 0:
raise Exception("sum = 0")
avg = sum / len(self.nodes_to_visit)
#print ("avg = %s" % (avg,))
for node in self.nodes_to_visit.values():
p = graph.tau(curr_node, node) * math.pow(graph.etha(curr_node, node), self.Beta)
if p > avg:
#print ("p = %s" % (p,))
max_node = node
if max_node == -1:
max_node = node
if max_node < 0:
raise Exception("max_node < 0")
del self.nodes_to_visit[max_node]
return max_node
# phermone update rule for indiv ants
def local_updating_rule(self, curr_node, next_node):
graph = self.colony.graph
val = (1 - self.Rho) * graph.tau(curr_node, next_node) + (self.Rho * graph.tau0)
graph.update_tau(curr_node, next_node, val)
|
# first create an empty list and append the strings into it,
# by taking inputs from user
a = []
list_size = int(input("Please enter the size of List: "))
for i in range(list_size):
n = str(input("Please enter your desired string: "))
a.append(n)
print("You entered", a, "as list.")
# now create an another empty list on which the converted string to ascii will be appended
# by taking help of ord() method, which accepts a string of len 1
# the function will convert the string into ASCII codes
# here the extend() function is used to expand strings
b = []
for ele in a:
b.extend(ord(num) for num in ele)
print("The converted string into the ASCII Character is: ", b)
# now we have created another empty list on which the coverted ASCII code to Binary digit will be appended
c = []
for i in b:
c.append(bin(i))
print("The coneverted ASCII Characters into Binary digits are: ", c) |
import argparse
from Bio import pairwise2
import numpy as np
from itertools import combinations
parser = argparse.ArgumentParser()
parser.add_argument("-t","--folderToWrite",type=str,help="Input a folder in which you want to write files into")
parser.add_argument("-m","--motifType",type=str,help="Input one of the of the motif types: ESE, ESS, ISE, ISS")
args = parser.parse_args()
TMPFOLDER=args.folderToWrite
motiftype=args.motifType
import os
if not os.path.exists(TMPFOLDER):
os.makedirs(TMPFOLDER)
# Open up motif file
# Each line has a motif
with open("../../data/"+motiftype+"_Overrepresented.tsv") as f:
motifs = [line.strip() for line in f]
# Get every combination of motif pairs
motifcombinations = [comb for comb in combinations(motifs, 2)]
# Go through every motif combo -> perform a local alignment to get dissimilarity score
motif_dissimilarity = []
for c in motifcombinations:
# Align
alignments = pairwise2.align.localms(c[0],c[1], 5, -4, -2, -0.5)
# Initialize minimum score to Inf
min_score = 20
# Go through every alignment and dissimilarity distance defined as the number of shifts plus the number of mismatches in the best local alignment of the two hexamers
for a in alignments:
seq1 = a[0]
seq2 = a[1]
s = 0
for i in range(len(seq1)):
if seq1[i]=='-' or seq2[i]=='-' or seq1[i]!=seq2[i]:
s += 1
# If this is the lowest score thus far, make it the new minimum score
if s < min_score:
min_score = s
# Append the two motifs and the min dissimilarity score to list
motif_dissimilarity.append([c[0],c[1],str(min_score)])
# Write to file
with open(TMPFOLDER+motiftype+"_DissimilarityScore.tsv",'w') as fw:
for m in motif_dissimilarity:
fw.write("\t".join(m))
fw.write("\n") |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercise 9.14 from Kane 1985."""
from __future__ import division
from sympy import expand, symbols
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dot, dynamicsymbols
from util import msprint, partial_velocities
from util import function_from_partials, generalized_active_forces
q1, q2, q3, q4, q5, q6 = q = dynamicsymbols('q1:7')
u1, u2, u3, u4, u5, u6 = u = dynamicsymbols('u1:7')
alpha, beta = symbols('α β')
# reference frames
A = ReferenceFrame('A')
B = A.orientnew('B', 'body', [q1, q2, q3], 'xyz')
# define points
pO = Point('O')
pP = pO.locatenew('P', q1*A.x + q2*A.y + q3*A.z)
pP.set_vel(A, pP.pos_from(pO).dt(A))
# kinematic differential equations
kde_map = dict(zip(map(lambda x: x.diff(), q), u))
# forces
forces = [(pP, -beta * pP.vel(A))]
torques = [(B, -alpha * B.ang_vel_in(A))]
partials_c = partial_velocities(zip(*forces + torques )[0], u, A, kde_map)
Fr_c, _ = generalized_active_forces(partials_c, forces + torques)
dissipation_function = function_from_partials(
map(lambda x: 0 if x == 0 else -x.subs(kde_map), Fr_c),
u,
zero_constants=True)
from sympy import simplify, trigsimp
dissipation_function = trigsimp(dissipation_function)
#print('ℱ = {0}'.format(msprint(dissipation_function)))
omega2 = trigsimp(dot(B.ang_vel_in(A), B.ang_vel_in(A)).subs(kde_map))
v2 = trigsimp(dot(pP.vel(A), pP.vel(A)).subs(kde_map))
sym_map = dict(zip([omega2, v2], map(lambda x: x**2, symbols('ω v'))))
#print('ω**2 = {0}'.format(msprint(omega2)))
#print('v**2 = {0}'.format(msprint(v2)))
print('ℱ = {0}'.format(msprint(dissipation_function.subs(sym_map))))
dissipation_function_expected = (alpha * omega2 + beta * v2) / 2
assert expand(dissipation_function - dissipation_function_expected) == 0
|
#
# Copyright (C) 2020 Satoru SATOH <satoru.satoh@gmail.com>.
# SPDX-License-Identifier: MIT
#
# ref. https://serverspec.org/resource_types.html#group
# ref. https://testinfra.readthedocs.io/en/latest/modules.html#group
#
"""User related tests.
"""
import typing
from ..common import get_group_by_name
def get(name: str) -> typing.Optional[typing.Mapping]:
"""
Get group's info by name.
"""
return get_group_by_name(name)
def get_gid(name: str) -> typing.Optional[int]:
"""
Get the gid of the group `name`.
"""
group = get(name)
if group is None:
return None
return group["gid"]
def exist(name: str) -> bool:
"""
Does the group `name` exist?
"""
return get(name) is not None
def have_gid(name: str, gid: int) -> bool:
"""
Does the gid of the group `name` match with the value `gid`?
"""
group = get(name)
if group is None:
return False
return group["gid"] == gid
# Aliases and other callables.
# pylint: disable=invalid-name
exists = exist
# vim:sw=4:ts=4:et:
|
import uvicore
from uvicore.typing import Dict
from uvicore.package import ServiceProvider
from uvicore.support.dumper import dump, dd
from uvicore.console.provider import Cli
from uvicore.support.module import load
from uvicore.console import group as cli_group
from uvicore.console import bootstrap
from uvicore.foundation.events import app as AppEvents
@uvicore.provider()
class Console(ServiceProvider, Cli):
def register(self) -> None:
# Register IoC bindings
#from uvicore.console.console import cli
#dump('service------------')
# self.bind('uvicore.console.console.cli', 'uvicore.console.console.cli',
# aliases=['Console', 'console', 'cli', 'cli2']
# )
# self.bind('Console', 'uvicore.console.console.cli',
# aliases=['uvicore.console.console.cli', 'console', 'cli', 'cli2']
# )
# Register event listeners
# Priority is 90 because we want the console to be bootstrapped after most
# other uvicore services like Database. If database is not initialized, and you import
# a DB model/table from a command, it will error because the connection strings have not yet
# been initialized.
AppEvents.Booted.listen(bootstrap.Console, priority=60)
#uvicore.events.listen('uvicore.foundation.events.app.Booted', bootstrap.Console, priority=60)
#uvicore.events.listen('uvicore.foundation.events.app.*', bootstrap.Console, priority=60)
def boot(self) -> None:
# Define service provider registration control
# No - Never allow this packages registrations to be disabled from other configs
# Define commands
self.commands(
group='gen',
help='Generate New Schematics (commands, models, views...)',
commands={
'command': 'uvicore.console.commands.generators.command'
}
)
|
from decouple import config
SANIC_HOST = config("SANIC_HOST", default="0.0.0.0")
SANIC_PORT = config("SANIC_PORT", default="8000", cast=int)
SANIC_DEBUG = config("SANIC_DEBUG", default="False", cast=bool)
POSTGRES_DB = config("POSTGRES_DB")
POSTGRES_HOST = config("POSTGRES_HOST")
POSTGRES_PASSWORD = config("POSTGRES_PASSWORD")
POSTGRES_PORT = config("POSTGRES_PORT", cast=int)
POSTGRES_USER = config("POSTGRES_USER")
|
import os
import numpy as np
import pandas as pd
from keras import models, optimizers, backend
from keras.layers import core, convolutional, pooling
from sklearn import model_selection,utils
from dataPreprocessing import generate_samples, preprocess
if __name__ == '__main__':
# Read splitted data
df_train = pd.read_csv('train.csv')
df_valid = pd.read_csv('test.csv')
# CNN Model Architecture
model = models.Sequential()
model.add(convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(convolutional.Convolution2D(32, 3, 3, activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(convolutional.Convolution2D(64, 3, 3, activation='relu'))
model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
model.add(core.Flatten())
model.add(core.Dense(500, activation='relu'))
model.add(core.Dropout(.5))
model.add(core.Dense(100, activation='relu'))
model.add(core.Dropout(.25))
model.add(core.Dense(20, activation='relu'))
model.add(core.Dense(1))
model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')
# load the exist model
model.load_weights("model.h5")
history = model.fit_generator(# continue training model for 17 epochs
generate_samples(df_train, ''),
samples_per_epoch=df_train.shape[0],
nb_epoch=17,#0.016
validation_data=generate_samples(df_valid, '', augment=False),
nb_val_samples=df_valid.shape[0],
)
with open(os.path.join('', 'model.json'), 'w') as file: # save trained model
file.write(model.to_json())
backend.clear_session() |
from django import template
from django.db.models import Count
from django.db.models.functions import TruncMonth
register = template.Library() |
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.destructibles.DistributedBarrel
from pirates.piratesbase.PiratesGlobals import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from pirates.piratesbase import PiratesGlobals
from direct.distributed import DistributedObject
from pirates.piratesbase import PLocalizer
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.effects import ShipSplintersA
from pirates.destructibles import DistributedDestructibleObject
class DistributedBarrel(DistributedDestructibleObject.DistributedDestructibleObject):
__module__ = __name__
notify = directNotify.newCategory('DistributedBarrel')
def __init__(self, cr):
DistributedDestructibleObject.DistributedDestructibleObject.__init__(self, cr)
NodePath.__init__(self)
self.assign(hidden.attachNewNode('barrel'))
self.Hp = 0
self.maxHp = 0
self.parentId = None
self.HpDisplay = None
self.prop = None
self.modelType = 0
return
def load(self):
self.loadModel()
self.displayHp()
def loadModel(self):
self.prop = loader.loadModel('models/props/barrel')
self.coll = self.prop.findAllMatches('**/collision*')
for c in self.coll:
self.curMask = c.node().getIntoCollideMask()
c.setCollideMask(PiratesGlobals.AmmoBitmask | self.curMask)
c.setTag('objType', str(PiratesGlobals.COLL_DESTRUCTIBLE))
c.setTag('propId', str(self.doId))
self.prop.reparentTo(self)
def playDamage(self, pos):
if base.cr.wantSpecialEffects:
shipSplintersAEffect = ShipSplintersA.getEffect()
if shipSplintersAEffect:
shipSplintersAEffect.reparentTo(render)
shipSplintersAEffect.setPos(pos)
shipSplintersAEffect.play()
def playDeath(self):
if self.prop != None:
self.prop.hide()
for c in self.coll:
c.setCollideMask(PiratesGlobals.AmmoBitmask.allOff())
return
def respawn(self):
if self.prop != None:
self.prop.show()
for c in self.coll:
c.setCollideMask(PiratesGlobals.AmmoBitmask | self.curMask)
return |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Jan, 2019
All the information shall come from the wssubjson and the
wssubsoillulatlon file.
@author: qyfen
"""
#######################################################
# Environment setting
#######################################################
import sys,os
import time
import json
import copy
#######################################################
# Input and output file defining
#######################################################
fddata = sys.argv[1]
fvtiledep = sys.argv[2]
scenariofdname = sys.argv[3]
#fvtiledep = '1.00'
#fddata = 'devfld'
fdapexrun = os.path.join(
fddata,
'apexruns',
scenariofdname
)
fin_runsubjson = os.path.join(
fdapexrun,
'runsub.json'
)
fin_runsoljson = os.path.join(
fdapexrun,
'runsol.json'
)
fin_soillujson = os.path.join(
fdapexrun,
'wssubsollulatlon.json'
)
#######################################################
# Defining classes
#######################################################
#class apexfuncs():
class apexsubs():
def __init__(self):
"""Constructor."""
# Read in the json files into dict
self.subjson = {}
self.subjson = self.read_json(fin_runsubjson)
self.soljson = {}
self.soljson = self.read_json(fin_runsoljson)
# Get the dictionary store the hsg of each mukey
self.solmkhsg = {}
self.solmkhsg = self.getMkHSG(self.soljson,
self.solmkhsg)
# Read in the json files into dict
self.solluson = {}
self.solluson = self.read_json(fin_soillujson)
# Modify sub json to install tile drainage
self.modsubjson = {}
self.modsubjson = self.modifySUBjson(self.solluson,
self.solmkhsg,
self.subjson)
def modifySUBjson(self, solluson, solmkhsg, subjson):
modsubjs = copy.deepcopy(subjson)
for wsk, wsv in subjson.items():
for subidx in range(len(wsv["model_setup"]["subid_snum"])):
solid = wsv["soil"]["soilid"][subidx]
soilhsg = solmkhsg[solluson[str(solid)]['mukey']]
if ((soilhsg == 'C') or (soilhsg == 'D')):
modsubjs[wsk]["drainage"][
"drainage_depth_idr"][subidx] = fvtiledep
return modsubjs
def getMkHSG(self, soljson, solmkhsg):
for k, v in soljson.items():
if "/" in v["line2"]["hydrologicgroup_hsg"]:
v["line2"]["hydrologicgroup_hsg"]=\
v["line2"]["hydrologicgroup_hsg"].split("/")[0]
solmkhsg[k] = v["line2"]["hydrologicgroup_hsg"]
return solmkhsg
def read_json(self, jsonname):
inf_usrjson = None
with open(jsonname) as json_file:
inf_usrjson = json.loads(json_file.read())
# pprint.pprint(inf_usrjson)
json_file.close()
return inf_usrjson
#######################################################
# Call Functions
#######################################################
start_time = time.time()
#apexfuncs = apexfuncs()
apexsubs = apexsubs()
os.remove(fin_runsubjson)
with open(fin_runsubjson, 'w') as outfile:
json.dump(apexsubs.modsubjson, outfile)
print("--- %s seconds ---" % (time.time() - start_time))
#######################################################
|
# pieces
from Classes import *
# black pieces
black_a_rook = Rook("black", (0, 0))
black_b_knight = Knight("black", (0, 1))
black_c_bishop = Bishop("black", (0, 2))
black_d_queen = Queen("black", (0, 3))
black_e_king = King("black", (0, 4))
black_f_bishop = Bishop("black", (0, 5))
black_g_knight = Knight("black", (0, 6))
black_h_rook = Rook("black", (0, 7))
black_a_pawn = Pawn("black", (1, 0))
black_b_pawn = Pawn("black", (1, 1))
black_c_pawn = Pawn("black", (1, 2))
black_d_pawn = Pawn("black", (1, 3))
black_e_pawn = Pawn("black", (1, 4))
black_f_pawn = Pawn("black", (1, 5))
black_g_pawn = Pawn("black", (1, 6))
black_h_pawn = Pawn("black", (1, 7))
# white pieces
white_a_rook = Rook("white", (7, 0))
white_b_knight = Knight("white", (7, 1))
white_c_bishop = Bishop("white", (7, 2))
white_d_queen = Queen("white", (7, 3))
white_e_king = King("white", (7, 4))
white_f_bishop = Bishop("white", (7, 5))
white_g_knight = Knight("white", (7, 6))
white_h_rook = Rook("white", (7, 7))
white_a_pawn = Pawn("white", (6, 0))
white_b_pawn = Pawn("white", (6, 1))
white_c_pawn = Pawn("white", (6, 2))
white_d_pawn = Pawn("white", (6, 3))
white_e_pawn = Pawn("white", (6, 4))
white_f_pawn = Pawn("white", (6, 5))
white_g_pawn = Pawn("white", (6, 6))
white_h_pawn = Pawn("white", (6, 7))
pieces = (black_a_rook,
black_b_knight,
black_c_bishop,
black_d_queen,
black_e_king,
black_f_bishop,
black_g_knight,
black_h_rook,
black_a_pawn,
black_b_pawn,
black_c_pawn,
black_d_pawn,
black_e_pawn,
black_f_pawn,
black_g_pawn,
black_h_pawn,
white_a_rook,
white_b_knight,
white_c_bishop,
white_d_queen,
white_e_king,
white_f_bishop,
white_g_knight,
white_h_rook,
white_a_pawn,
white_b_pawn,
white_c_pawn,
white_d_pawn,
white_e_pawn,
white_f_pawn,
white_g_pawn,
white_h_pawn,
)
|
class ExceptionResourceTracker:
class AttributeAlreadyExist(Exception):
def __init__(self, resource_group_name, attribute_name):
super().__init__(f"Attribute {attribute_name} already exist in {resource_group_name}")
|
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from docstrings_docref_multi import DocstringsDocrefMulti
class TestDocstringsDocrefMulti(unittest.TestCase):
def test_docstrings_docref_multi(self):
with DocstringsDocrefMulti.from_file('src/fixed_struct.bin') as r:
pass
|
"""
Class String Helper
@author Moch Nurhalimi Zaini D <moch.nurhalimi@gmail.com>
@author Irfan Andriansyah <irfan@99.co>
"""
import base64
class StringHelper:
"""
Helper for transform string
"""
@staticmethod
def decode_base64(text):
"""Decode base64, padding being optional.
Usage
self.decode_base64('string base64')
:param text: (String) Base64 data as an ASCII byte string
"""
missing_padding = 4 - len(text) % 4
text += '=' * missing_padding if missing_padding else ''
return base64.b64decode(text)
@staticmethod
def url_to_dict(url):
"""Parsing url into dictionary
Usage
self.decode_base64('string base64')
:param url: (String) Url parameter
"""
text = url[3:]
return dict([StringHelper.test(item, '=') for item in text.split('&')])
@staticmethod
def test(item, separate):
"""Text split based on separate parameter
Usage
self.test('a=1', '=')
:param item: (String) String to split
:param separate: (String) Separate
"""
param = item.split(separate)
if len(param) >= 2:
return (param[0], [param[1]])
return False
@staticmethod
def get_keys_refr():
"""
Get key and value referal site
"""
return {
'Google': 'google',
'Facebok': ['fb', 'facebook'],
'Urbanindo': ['99.co', 'urbanindo']
}
def parse_refr_site(self):
"""
Get key and value referal site
"""
for key, value in self.get_keys_refr().items():
print(key, value)
@staticmethod
def get_refr_site(key, value):
"""
Get key and value referal site
"""
if __name__ == "__main__":
StringHelper = StringHelper()
StringHelper.parse_refr_site()
|
import time
import torch
from cubework.module.loss.loss_3d import CrossEntropyLoss3D, VocabParallelCrossEntropyLoss3D
from cubework.module.module_std import ClassifierSTD, PatchEmbeddingSTD
from cubework.module.parallel_3d import (
Classifier3D,
Embedding3D,
LayerNorm3D,
Linear3D,
PatchEmbedding3D,
VocabParallelClassifier3D,
VocabParallelEmbedding3D,
)
from cubework.module.parallel_3d._utils import (
get_input_parallel_mode,
get_output_parallel_mode,
get_weight_parallel_mode,
)
from cubework.utils import get_current_device, get_logger
DEPTH = 2
BATCH_SIZE = 8
SEQ_LENGTH = 8
HIDDEN_SIZE = 8
NUM_CLASSES = 8
NUM_BLOCKS = 2
IMG_SIZE = 16
VOCAB_SIZE = 16
def check_equal(A, B):
eq = torch.allclose(A, B, rtol=1e-3, atol=1e-2)
assert eq
return eq
def check_linear():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
OUTPUT_SIZE = 2 * HIDDEN_SIZE
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
layer = Linear3D(INPUT_SIZE, OUTPUT_SIZE, dtype=dtype, bias=True)
layer = layer.to(device)
layer_master = torch.nn.Linear(INPUT_SIZE, OUTPUT_SIZE)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data.transpose(0, 1)
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[k]
weight = torch.chunk(weight, DEPTH, dim=-1)[j]
weight = torch.chunk(weight, DEPTH, dim=-1)[i]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[j]
layer.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info("linear forward: {0} --> {1} | {2:.3f} s".format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start))
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = torch.chunk(C, DEPTH, dim=0)[k]
logger.info("Rank {} linear forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = torch.chunk(grad, DEPTH, dim=0)[k]
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("linear backward: {:.3f} s".format(bwd_end - bwd_start))
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info("Rank {} linear backward (input_grad): {}".format(rank, check_equal(A_grad, A.grad)))
B_grad = layer_master.weight.grad.transpose(0, 1)
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[i]
logger.info("Rank {} linear backward (weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad)))
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[j]
logger.info("Rank {} linear backward (bias_grad): {}".format(rank, check_equal(bias_grad, layer.bias.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_layernorm():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
norm = LayerNorm3D(INPUT_SIZE, eps=1e-6, dtype=dtype)
norm = norm.to(device)
norm_master = torch.nn.LayerNorm(INPUT_SIZE, eps=1e-6)
norm_master = norm_master.to(device)
weight_master = norm_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH)[k]
norm.weight.data.copy_(weight)
bias_master = norm_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[k]
norm.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = norm(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"layer norm forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
)
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = norm_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} layernorm forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("layer norm backward: pass | {:.3f} s".format(bwd_end - bwd_start))
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info("Rank {} layernorm backward (input_grad): {}".format(rank, check_equal(A_grad, A.grad)))
bias_grad = norm_master.weight.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[k]
logger.info("Rank {} layernorm backward (weight_grad): {}".format(rank, check_equal(bias_grad, norm.weight.grad)))
bias_grad = norm_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[k]
logger.info("Rank {} layernorm backward (bias_grad): {}".format(rank, check_equal(bias_grad, norm.bias.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_classifier_no_given_weight():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
layer = Classifier3D(INPUT_SIZE, NUM_CLASSES, dtype=dtype, bias=True)
layer = layer.to(device)
layer_master = ClassifierSTD(INPUT_SIZE, NUM_CLASSES, bias=True, dtype=dtype)
layer_master = layer_master.to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
layer.bias.data.copy_(bias_master)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} classifier (no given weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("classifier (no given weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info(
"Rank {} classifier (no given weight) backward (input_grad): {}".format(rank, check_equal(A_grad, A.grad))
)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
if j == k:
logger.info(
"Rank {} classifier (no given weight) backward (weight_grad): {}".format(
rank, check_equal(B_grad, layer.weight.grad)
)
)
else:
logger.info(
"Rank {} classifier (no given weight) backward (weight_grad): {}".format(rank, layer.weight.grad is None)
)
bias_grad = layer_master.bias.grad
logger.info(
"Rank {} classifier (no given weight) backward (bias_grad): {}".format(
rank, check_equal(bias_grad, layer.bias.grad)
)
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_classifier_no_given_weight():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
INPUT_SIZE = HIDDEN_SIZE
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
layer = VocabParallelClassifier3D(INPUT_SIZE, VOCAB_SIZE, bias=True)
layer = layer.to(dtype).to(device)
layer_master = ClassifierSTD(INPUT_SIZE, VOCAB_SIZE, bias=True)
layer_master = layer_master.to(dtype).to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
bias_master = layer_master.bias.data
torch.distributed.broadcast(bias_master, src=0)
bias = torch.chunk(bias_master, DEPTH)[j]
layer.bias.data.copy_(bias)
A_shape = (BATCH_SIZE, SEQ_LENGTH, INPUT_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = torch.chunk(A_master, DEPTH, dim=0)[i]
A = torch.chunk(A, DEPTH, dim=-1)[k]
A = torch.chunk(A, DEPTH, dim=0)[j]
A = A.clone()
A.requires_grad = True
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"vocab parallel classifier (no given weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
)
A_master = A_master.clone()
A_master.requires_grad = True
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = torch.chunk(C, DEPTH, dim=0)[k]
logger.info("Rank {} vocab parallel classifier (no given weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = torch.chunk(grad, DEPTH, dim=0)[k]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("vocab parallel classifier (no given weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
A_grad = A_master.grad
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[i]
A_grad = torch.chunk(A_grad, DEPTH, dim=-1)[k]
A_grad = torch.chunk(A_grad, DEPTH, dim=0)[j]
logger.info(
"Rank {} vocab parallel classifier (no given weight) backward (input_grad): {}".format(
rank, check_equal(A_grad, A.grad)
)
)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info(
"Rank {} vocab parallel classifier (no given weight) backward (weight_grad): {}".format(
rank, check_equal(B_grad, layer.weight.grad)
)
)
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[j]
logger.info(
"Rank {} vocab parallel classifier (no given weight) backward (bias_grad): {}".format(
rank, check_equal(bias_grad, layer.bias.grad)
)
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_classifier_given_embed_weight():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
embed = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]
embed.weight.data.copy_(weight)
layer = Classifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = ClassifierSTD(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(embed(A))
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
)
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} classifier (given embed weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=get_current_device())
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("classifier (given embed weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
if j == k:
logger.info(
"Rank {} classifier (given embed weight) backward (weight_grad): {}".format(
rank, check_equal(B_grad, embed.weight.grad)
)
)
else:
logger.info(
"Rank {} classifier (given embed weight) backward (weight_grad): {}".format(rank, embed.weight.grad is None)
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_classifier_given_embed_weight():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
embed = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE)
embed = embed.to(dtype).to(device)
embed_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
embed_master = embed_master.to(dtype).to(device)
weight_master = embed_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[k]
embed.weight.data.copy_(weight)
layer = VocabParallelClassifier3D(HIDDEN_SIZE, VOCAB_SIZE, weight=embed.weight, bias=False)
layer = layer.to(dtype).to(device)
layer_master = ClassifierSTD(HIDDEN_SIZE, VOCAB_SIZE, weight=embed_master.weight, bias=False)
layer_master = layer_master.to(dtype).to(device)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(embed(A))
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"vocab parallel classifier (given embed weight) forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
)
A_master = A_master.clone()
C_master = layer_master(embed_master(A_master))
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[j]
C = torch.chunk(C, DEPTH, dim=0)[k]
logger.info("Rank {} vocab parallel classifier (given embed weight) forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[j]
grad = torch.chunk(grad, DEPTH, dim=0)[k]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("vocab parallel classifier (given embed weight) backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = embed_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info(
"Rank {} vocab parallel embed backward (weight_grad): {}".format(rank, check_equal(B_grad, embed.weight.grad))
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_patch_embed():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
layer = PatchEmbedding3D(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)
torch.nn.init.ones_(layer.cls_token)
torch.nn.init.ones_(layer.pos_embed)
layer = layer.to(device)
layer_master = PatchEmbeddingSTD(IMG_SIZE, 4, 3, HIDDEN_SIZE, dtype=dtype)
torch.nn.init.ones_(layer_master.cls_token)
torch.nn.init.ones_(layer_master.pos_embed)
layer_master = layer_master.to(device)
proj_weight_master = layer_master.weight.data
torch.distributed.broadcast(proj_weight_master, src=0)
proj_weight = torch.chunk(proj_weight_master, DEPTH, dim=0)[k]
layer.weight.data.copy_(proj_weight)
proj_bias_master = layer_master.bias.data
torch.distributed.broadcast(proj_bias_master, src=0)
proj_bias = torch.chunk(proj_bias_master, DEPTH)[k]
layer.bias.data.copy_(proj_bias)
A_shape = (BATCH_SIZE, 3, IMG_SIZE, IMG_SIZE)
A_master = torch.randn(A_shape, dtype=dtype, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"patch embed forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
),
)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} patch embed forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("patch embed backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
cls_grad_master = layer_master.cls_token.grad
cls_grad = torch.chunk(cls_grad_master, DEPTH, dim=-1)[k]
logger.info("Rank {} patch embed backward (cls_grad): {}".format(rank, check_equal(cls_grad, layer.cls_token.grad)))
pos_grad_master = layer_master.pos_embed.grad
pos_grad = torch.chunk(pos_grad_master, DEPTH, dim=-1)[k]
logger.info(
"Rank {} patch embed backward (pos_embed_grad): {}".format(rank, check_equal(pos_grad, layer.pos_embed.grad))
)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[k]
logger.info(
"Rank {} patch embed backward (proj_weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad))
)
bias_grad = layer_master.bias.grad
bias_grad = torch.chunk(bias_grad, DEPTH)[k]
logger.info(
"Rank {} patch embed backward (proj_bias_grad): {}".format(rank, check_equal(bias_grad, layer.bias.grad))
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_embed():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
layer = Embedding3D(VOCAB_SIZE, HIDDEN_SIZE)
layer = layer.to(dtype).to(device)
layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
layer_master = layer_master.to(dtype).to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"embed forward: pass | {0} --> {1} | {2:.3f} s".format(tuple(A.shape), tuple(out.shape), fwd_end - fwd_start)
)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} embed forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("embed backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
if j == k:
logger.info("Rank {} embed backward (weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad)))
else:
logger.info("Rank {} embed backward (weight_grad): {}".format(rank, layer.weight.grad is None))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_embed():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
layer = VocabParallelEmbedding3D(VOCAB_SIZE, HIDDEN_SIZE)
layer = layer.to(dtype).to(device)
layer_master = torch.nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
layer_master = layer_master.to(dtype).to(device)
weight_master = layer_master.weight.data
torch.distributed.broadcast(weight_master, src=0)
weight = torch.chunk(weight_master, DEPTH, dim=0)[j]
weight = torch.chunk(weight, DEPTH, dim=0)[i]
weight = torch.chunk(weight, DEPTH, dim=-1)[k]
layer.weight.data.copy_(weight)
A_shape = (BATCH_SIZE, SEQ_LENGTH)
A_master = torch.randint(VOCAB_SIZE, A_shape, device=device)
torch.distributed.broadcast(A_master, src=0)
A = A_master.clone()
fwd_start = time.time()
out = layer(A)
torch.cuda.synchronize()
fwd_end = time.time()
logger.info(
"vocab parallel embed forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(A.shape), tuple(out.shape), fwd_end - fwd_start
)
)
A_master = A_master.clone()
C_master = layer_master(A_master)
C = torch.chunk(C_master, DEPTH, dim=0)[i]
C = torch.chunk(C, DEPTH, dim=-1)[k]
C = torch.chunk(C, DEPTH, dim=0)[j]
logger.info("Rank {} vocab parallel embed forward: {}".format(rank, check_equal(out, C)))
grad_shape = C_master.shape
grad_master = torch.randn(grad_shape, dtype=dtype, device=device)
torch.distributed.broadcast(grad_master, src=0)
grad = torch.chunk(grad_master, DEPTH, dim=0)[i]
grad = torch.chunk(grad, DEPTH, dim=-1)[k]
grad = torch.chunk(grad, DEPTH, dim=0)[j]
grad = grad.clone()
bwd_start = time.time()
out.backward(grad)
torch.cuda.synchronize()
bwd_end = time.time()
logger.info("vocab parallel embed backward: pass | {:.3f} s".format(bwd_end - bwd_start))
grad_master = grad_master.clone()
C_master.backward(grad_master)
B_grad = layer_master.weight.grad
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[j]
B_grad = torch.chunk(B_grad, DEPTH, dim=0)[i]
B_grad = torch.chunk(B_grad, DEPTH, dim=-1)[k]
logger.info(
"Rank {} vocab parallel embed backward (weight_grad): {}".format(rank, check_equal(B_grad, layer.weight.grad))
)
return fwd_end - fwd_start, bwd_end - bwd_start
def check_loss():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
criterion = CrossEntropyLoss3D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, dtype=dtype, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, DEPTH, dim=0)[i]
out = torch.chunk(out, DEPTH, dim=0)[j]
out = out.clone()
out.requires_grad = True
fwd_start = time.time()
loss = criterion(out, target_master)
fwd_end = time.time()
logger.info(
"cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start
)
)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
logger.info("Rank {} cross entropy loss forward: {}".format(rank, check_equal(loss, loss_master)))
bwd_start = time.time()
loss.backward()
bwd_end = time.time()
logger.info("cross entropy loss backward: pass | {:.3f} s".format(bwd_end - bwd_start))
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j]
logger.info("Rank {} cross entropy loss backward: {}".format(rank, check_equal(out_grad, out.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
def check_vocab_parallel_loss():
logger = get_logger()
rank = torch.distributed.get_rank()
device = get_current_device()
dtype = torch.float32
input_parallel_mode = get_input_parallel_mode()
weight_parallel_mode = get_weight_parallel_mode()
output_parallel_mode = get_output_parallel_mode()
j = input_parallel_mode.local_rank
i = weight_parallel_mode.local_rank
k = output_parallel_mode.local_rank
criterion = VocabParallelCrossEntropyLoss3D()
criterion_master = torch.nn.CrossEntropyLoss()
out_shape = (BATCH_SIZE, NUM_CLASSES)
out_master = torch.randn(out_shape, dtype=dtype, device=device)
target_master = torch.randint(NUM_CLASSES, (BATCH_SIZE,), dtype=torch.long, device=device)
torch.distributed.broadcast(out_master, src=0)
torch.distributed.broadcast(target_master, src=0)
out = torch.chunk(out_master, DEPTH, dim=0)[i]
out = torch.chunk(out, DEPTH, dim=-1)[k]
out = torch.chunk(out, DEPTH, dim=0)[j]
out = out.clone()
out.requires_grad = True
fwd_start = time.time()
loss = criterion(out, target_master)
fwd_end = time.time()
logger.info(
"vocab parallel cross entropy loss forward: pass | {0} --> {1} | {2:.3f} s".format(
tuple(out.shape), tuple(loss.shape), fwd_end - fwd_start
)
)
out_master = out_master.clone()
out_master.requires_grad = True
loss_master = criterion_master(out_master, target_master)
logger.info("Rank {} vocab parallel cross entropy loss forward: {}".format(rank, check_equal(loss, loss_master)))
bwd_start = time.time()
loss.backward()
bwd_end = time.time()
logger.info("vocab parallel cross entropy loss backward: pass | {:.3f} s".format(bwd_end - bwd_start))
loss_master.backward()
out_grad = out_master.grad
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[i]
out_grad = torch.chunk(out_grad, DEPTH, dim=-1)[k]
out_grad = torch.chunk(out_grad, DEPTH, dim=0)[j]
logger.info("Rank {} vocab parallel cross entropy loss backward: {}".format(rank, check_equal(out_grad, out.grad)))
return fwd_end - fwd_start, bwd_end - bwd_start
|
from flask import Flask, make_response, jsonify
app = Flask('docker-flask:dev')
@app.route('/', methods=['GET'])
def root():
return make_response(
jsonify({'message': 'Hello from Flask', 'env': 'dev'}))
|
from Logics.BluetoothManager import BluetoothManager
from data import ServerEnvelope, PebbleCommand
from services.IncomingPebbleMessageService import IncomingPebbleMessageService
from injector import inject, singleton
import threading
class IncomingMessageService(object):
@singleton
@inject(bluetoothManager = BluetoothManager, incomingPebbleMessageService = IncomingPebbleMessageService)
def __init__(self, bluetoothManager, incomingPebbleMessageService):
self._bluetoothManager = bluetoothManager
self._incomingPebbleMessageService = incomingPebbleMessageService
def HandleIncomingMessage(self, message):
if self._readMessageFromServer(message) == False: return False
def _readMessageFromServer(self, message):
try:
messageList = message.split(',')
messageList = list(filter(None, messageList))
except:
return False
if(self._checkServerMessage(messageList) == False):
return False
def _checkServerMessage(self, message):
"""Check type of received webservice message and handle accordingly"""
envelopeType = int(message[0])
envelopeTypes = ServerEnvelope.ServerEnvelopeType
if(envelopeType == envelopeTypes.scan.value):
"""Scan for Pebbles""""
self._HandleScanCommand()
return
if(envelopeType == envelopeTypes.install.value):
"""Install Pebble application"""
targetPebble = message[1]
url = message[2]
envelope = self._HandleInstallCommand(targetPebble, url)
elif(envelopeType == envelopeTypes.connect.value) or (envelopeType == ServerEnvelope.ServerEnvelopeType.disconnect.value):
"""Connect/Disconnect Pebble"""
targetPebble = message[1]
envelope = self._HandleConnectionCommand(envelopeType, targetPebble)
elif(envelopeType == envelopeTypes.message.value):
"""Send message to specific Pebble applicaiton"""
envelope = self._HandleMessagingCommand(message)
elif(envelopeType == envelopeTypes.notification.value):
"""Send notification to Pebble. This message will pop-up regardless of opened application"""
targetPebble = message[1]
notification = message[2]
envelope = self._HandleNotificationCommand(targetPebble, notification)
else: return False
self._incomingPebbleMessageService.sendMessageToPebble(envelope)
def _HandleScanCommand(self):
"""Scan for available pebbles"""
scanThread = threading.Thread(target=self._bluetoothManager.sendAvailablePebblesToServer)
scanThread.start()
def _HandleInstallCommand(self, targetPebble, url):
"""Download pebble app from given url and install it on given Pebble"""
return ServerEnvelope.ServerEnvelope(ServerEnvelope.ServerEnvelopeType.install.value, target = targetPebble, data = url)
def _HandleConnectionCommand(self, envelopeType, targetPebble):
"""Notify webservice of pebble connection/disconnect"""
return ServerEnvelope.ServerEnvelope(envelopeType, targetPebble)
def _HandleMessagingCommand(self, message):
"""Check type of message to be delivered to Pebble"""
targetPebble = message[1]
messageType = int(message[2])
messageString = message[3]
if(messageType == 1):
listItems = message[4]
transactionId = message[5]
return ServerEnvelope.ServerEnvelope( ServerEnvelope.ServerEnvelopeType.message.value, targetPebble, messageType, data = messageString, note = listItems, uniqueID = transactionId)
else:
transactionID = message[4]
return ServerEnvelope.ServerEnvelope(envelopeType = ServerEnvelope.ServerEnvelopeType.message.value, target = targetPebble, messageType = messageType, data = messageString, uniqueID = transactionID)
def _HandleNotificationCommand(self, targetPebble, notification):
"""Send notifcation to given Pebble"""
return ServerEnvelope.ServerEnvelope(ServerEnvelope.ServerEnvelopeType.notification.value, targetPebble, data = notification) |
import unittest
import numpy as np
from gnn import GraphNeuralNetwork
from optimizer import SGD, MomentumSGD
from preprocessing import SplitError
from trainer import Trainer, binary_mean_accuracy
class TestTrainer(unittest.TestCase):
def test_fit(self):
gnn1 = GraphNeuralNetwork(2)
gnn2 = GraphNeuralNetwork(2)
sgd = MomentumSGD()
trainer1 = Trainer(gnn1, sgd)
trainer2 = Trainer(gnn2, sgd)
graphs = [np.random.randint(0, 2, (10, 10))] * 10
vertex_sizes = [10] * 10
labels1 = [0] * 10
expected1 = [0] * 10
trainer1.fit(graphs, vertex_sizes, labels1)
actual1 = trainer1.predict(graphs, vertex_sizes)
self.assertEqual(expected1, actual1)
labels2 = [1] * 10
expected2 = [1] * 10
trainer2.fit(graphs, vertex_sizes, labels2)
actual2 = trainer2.predict(graphs, vertex_sizes)
self.assertEqual(expected2, actual2)
def test_predict(self):
gnn1 = GraphNeuralNetwork(2)
gnn1.params["W"] = np.arange(1, 5).reshape(2, 2)
gnn1.params["A"] = np.arange(1, 3)
gnn1.params["b"] = np.array([1])
sgd = SGD()
trainer1 = Trainer(gnn1, sgd)
graphs = [[[0, 0, 1, 0],
[0, 0, 1, 1],
[1, 1, 0, 1],
[0, 1, 1, 0]]] * 10
vertex_sizes = [4] * 10
expected1 = [1] * 10
actual1 = trainer1.predict(graphs, vertex_sizes)
self.assertEqual(expected1, actual1)
gnn2 = GraphNeuralNetwork(3)
gnn2.params["W"] = -np.arange(1, 10).reshape(3, 3)
gnn2.params["b"] = -np.array([1])
trainer2 = Trainer(gnn2, sgd)
expected2 = [0] * 10
actual2 = trainer2.predict(graphs, vertex_sizes)
self.assertEqual(expected2, actual2)
def test_accuracy(self):
gnn = GraphNeuralNetwork(2)
gnn.params["W"] = np.arange(1, 5).reshape(2, 2)
gnn.params["A"] = np.arange(1, 3)
gnn.params["b"] = np.array([1])
sgd = SGD()
trainer = Trainer(gnn, sgd)
graphs = [[[0, 0, 1, 0],
[0, 0, 1, 1],
[1, 1, 0, 1],
[0, 1, 1, 0]]] * 10
vertex_sizes = [4] * 10
labels1 = [1] * 10
expected1 = 1.
actual1 = trainer.accuracy(graphs, vertex_sizes, labels1)
self.assertEqual(expected1, actual1)
labels2 = [1] * 7 + [0] * 3
expected2 = 0.7
actual2 = trainer.accuracy(graphs, vertex_sizes, labels2)
self.assertEqual(expected2, actual2)
def test_kfold_cross_val(self):
gnn = GraphNeuralNetwork(2)
sgd = SGD()
trainer = Trainer(gnn, sgd)
graphs = [[[0, 0, 1, 0],
[0, 0, 1, 1],
[1, 1, 0, 1],
[0, 1, 1, 0]]] * 100
vertex_sizes = [4] * 100
labels = [0] * 100
expected = gnn.params
_ = trainer.kfold_cross_validation(graphs, vertex_sizes, labels)
actual = gnn.params
self.assertEqual(expected, actual)
with self.assertRaises(SplitError):
trainer.kfold_cross_validation(graphs, vertex_sizes, labels,
minibatch_size=20)
trainer.kfold_cross_validation(graphs, vertex_sizes, labels,
k=20)
class TestBinaryMeanAccuracy(unittest.TestCase):
def test_binary_mean_accuracy(self):
epsilon = 1.0e-7
x1 = np.array([0, 0, 1, 0, 1])
y1 = np.array([0, 0, 1, 1, 0])
expected1 = (2 / (3+epsilon) + 1 / (2+epsilon)) / 2
actual1 = binary_mean_accuracy(x1, y1)
self.assertEqual(expected1, actual1)
x2 = np.array([0, 0, 1, 0, 1, 1])
y2 = np.array([0, 0, 1, 1, 1, 1])
expected2 = (2 / (2+epsilon) + 3 / (4+epsilon)) / 2
actual2 = binary_mean_accuracy(x2, y2)
self.assertEqual(expected2, actual2)
x3 = np.ones_like(x1, dtype=np.bool)
y3 = np.zeros_like(x1, dtype=np.bool)
expected3 = 0
actual3_1 = binary_mean_accuracy(x3, y3)
self.assertEqual(expected3, actual3_1)
actual3_2 = binary_mean_accuracy(x3, y3)
self.assertEqual(expected3, actual3_2)
x4 = [0, 0, 0, 0, 1, 1]
y4 = [1, 1, 1, 1, 1, 1]
expected4 = (2 / (6+epsilon)) / 2
actual4 = binary_mean_accuracy(x4, y4)
self.assertEqual(expected4, actual4)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
from pprint import pprint
def print_output(device, table, descr):
pprint('hostname: {}'.format(device.hostname))
pprint('netconf port: {}'.format(device.port))
pprint('user: {}'.format(device.user))
pprint('***** {} *****'.format(descr))
pprint(table)
|
import os, re
import numpy as np
import pandas as pd
rel_path = "dataset/"
sel_data = "train"
input_path = rel_path+sel_data+"_set/"
output_path = rel_path
short_file = open(output_path+"sS200_"+sel_data+".csv", 'a', encoding = 'utf-8', errors='ignore')
long_file = open(output_path+"lS200_"+sel_data+".csv", 'a', encoding = 'utf-8', errors='ignore')
sentence_len = 0
question_len = 0
count = 0
# input triple data (content) (question answer ans_pos)
for root, dirs, files in os.walk(input_path):
for f in files:
# record content & triple_list & answer position
content = ""
triple = []
pos_ans = []
fullpath = os.path.join(root, f)
openfile = open(fullpath, 'r' , encoding = 'utf-8', errors='ignore')
for line in openfile.readlines():
# first line for content
if content == "":
content = line.strip()
continue
# down below for triple
new_line = line.replace(",","")
part = new_line.split("#")
if (part[0][-1:]!="?") & (part[0][-1:]!="?"):
part[0] = part[1]+"?"
triple.append(part)
pos_ans.append(int(part[2]))
# find answer content
# record comma position
pattern = [u'。' , u';']
regex = re.compile(r'\b(' + '|'.join(pattern) + r')\b')
pos_comma = [-1]
pos_find = [m.start() for m in regex.finditer(content)]
pos_comma.extend(pos_find)
pos_comma.extend([len(content)])
# for answer content
ans_cont = []
for a_idx, pos_a in enumerate(pos_ans):
for c_idx, pos_c in enumerate(pos_comma):
if c_idx == len(pos_comma):
break
if pos_a > pos_c:
ans_cont.append("")
ans_cont[a_idx] = content[pos_comma[c_idx]+1:pos_comma[c_idx+1]+1].replace(",","")
else:
continue
# rewrite triple data (answer_content question answer)
for idx in range(len(triple)):
if len(ans_cont[idx])>200:
wrtline = ans_cont[idx].replace(",", "")+","+triple[idx][0].replace(",", "")+","+triple[idx][1].replace(",", "")+","+triple[idx][2].replace(",", "")
long_file.write(wrtline)
else:
count += 1
wrtline = ans_cont[idx].replace(",", "")+","+triple[idx][0].replace(",", "")+","+triple[idx][1].replace(",", "")+","+triple[idx][2].replace(",", "")
short_file.write(wrtline)
sentence_len += len(ans_cont[idx])
question_len += len(triple[idx][0])
print(count)
print(sentence_len)
print(sentence_len/count)
print(question_len)
print(question_len/count)
long_file.close()
short_file.close()
|
from django.contrib import admin
from .models import Customer, Photographer, Appointment, Blog, City
# Register your models here.
admin.site.register(Customer)
admin.site.register(Photographer)
admin.site.register(Appointment)
admin.site.register(Blog)
admin.site.register(City) |
#!/usr/bin/env python3
import pyperf
def bench_dict(loops, mydict):
range_it = range(loops)
t0 = pyperf.perf_counter()
for loops in range_it:
mydict['0']
mydict['100']
mydict['200']
mydict['300']
mydict['400']
mydict['500']
mydict['600']
mydict['700']
mydict['800']
mydict['900']
return pyperf.perf_counter() - t0
runner = pyperf.Runner()
mydict = {str(k): k for k in range(1000)}
# inner-loops: dict[str] is duplicated 10 times
runner.bench_time_func('dict[str]', bench_dict, mydict, inner_loops=10)
|
"""
create database proxydb charset utf8;
use proxydb;
create table proxytab(
ip varchar(50),
port varchar(10)
)charset=utf8;
"""
import requests
from lxml import etree
import time
import random
from fake_useragent import UserAgent
import pymysql
class KuaiProxy:
def __init__(self):
self.url = 'https://www.kuaidaili.com/free/inha/{}/'
self.test_url = 'http://httpbin.org/get'
self.headers = {'User-Agent':UserAgent().random}
self.db = pymysql.connect('localhost', 'root', '123456', 'proxydb', charset='utf8')
self.cur = self.db.cursor()
def get_proxy(self, url):
html = requests.get(url=url, headers=self.headers).text
# lxml+xpath解析提取数据
eobj = etree.HTML(html)
tr_list = eobj.xpath('//table/tbody/tr')
for tr in tr_list:
ip = tr.xpath('./td[1]/text()')[0]
port = tr.xpath('./td[2]/text()')[0]
# 测试此ip和port是否可用
self.test_proxy(ip, port)
def test_proxy(self, ip, port):
"""测试1个ip和port是否可用"""
proxies = {
'http' : 'http://{}:{}'.format(ip, port),
'https' : 'https://{}:{}'.format(ip, port),
}
try:
resp = requests.get(url=self.test_url,
proxies=proxies,
headers=self.headers,
timeout=3)
print(ip, port, '\033[31m可用\033[0m')
ins = 'insert into proxytab values(%s,%s)'
self.cur.execute(ins, [ip, port])
self.db.commit()
except Exception as e:
print(ip, port, '不可用')
def crawl(self):
for page in range(1, 1001):
page_url = self.url.format(page)
self.get_proxy(url=page_url)
time.sleep(random.uniform(0, 2))
if __name__ == '__main__':
spider = KuaiProxy()
spider.crawl()
|
from utils import* ;imp.reload(dsp)
from EDutils import pets as pets_imp ;imp.reload(pets_imp)
from utils import pytest_util ;imp.reload(pytest_util)
plt.close('all')
pts_path = 'pets/glycine.pts'
#make sure pets folder exists in dat
def test_import_pets():
pets = pets_imp.Pets(pts_path,gen=True)#,lam=0.02508,aper=0.005340,omega=230,gen=1)
@pytest_util.add_link(__file__)
def test_show_exp():
pets = pets_imp.Pets(pts_path)
vw=pets.show_exp(frame=1,h=True,pargs={'opt':''})
return vw.fig,vw.ax
test_import_pets()
# test_show_exp()
# pets.show_uvw()
# pets.show_xyz()
# pets.show_hkl()
# pets.show_sim()
# pets.compare_xyz_pxpy(frame=frame,opts='oab')
# pets.show_xyz(view=[0,0],opt='sc',name='pets_glycine_xyz.png')
# pets.show_hkl(view=[0,0],opt='sc',name='pets_glycine_hkl.png')
# pets.show_frame(frame=frame,name='pattern_%d.png' %frame, opt='psc')
# pets.show_ewald_sphere(frame=19,Nmax=5,Smax=0.025,h3d=1)
# K = pets.get_beam(frame)
# df = mut.get_kinematic_pattern(cif_file,K,Nmax=5,Smax=0.02)
# pets.show_frames(frame=frame,thick=1000,Nmax=7,Smax=0.015,rot=206,Imag=10,opts='PKh',
# v=0,pargs={'xylims':1.5})
# df = pets.compare_hkl(frame,Nmax=8,Smax=0.025,eps=None,v=0)
# frames = range(1,65) #[5,10,15,25,30]
# for f in frames:
# pets.show_frame(frame=f,Smax=0.01,name='glycine_exp%d.png' %f,opt='sc')
# xylims = [-5,10,-5,10,-12,1]
# mut.show_cell(cif_file,title='%d' %frame,xylims=xylims,n=uvw,
# axPos=[0,0,1,0.95],view=[0,0],h3D=1,opt='p')
# pets.mp4_crystal_rotation('glycine',xylims=xylims)
|
"""Coding module."""
import numpy as np
from scipy.sparse import csr_matrix
from . import utils
def parity_check_matrix(n_code, d_v, d_c, seed=None):
"""
Build a regular Parity-Check Matrix H following Callager's algorithm.
Parameters
----------
n_code: int, Length of the codewords.
d_v: int, Number of parity-check equations including a certain bit.
Must be greater or equal to 2.
d_c: int, Number of bits in the same parity-check equation. d_c Must be
greater or equal to d_v and must divide n.
seed: int, seed of the random generator.
Returns
-------
H: array (n_equations, n_code). LDPC regular matrix H.
Where n_equations = d_v * n / d_c, the total number of parity-check
equations.
"""
rng = utils.check_random_state(seed)
if d_v <= 1:
raise ValueError("""d_v must be at least 2.""")
if d_c <= d_v:
raise ValueError("""d_c must be greater than d_v.""")
if n_code % d_c:
raise ValueError("""d_c must divide n for a regular LDPC matrix H.""")
n_equations = (n_code * d_v) // d_c
block = np.zeros((n_equations // d_v, n_code), dtype=int)
H = np.empty((n_equations, n_code))
block_size = n_equations // d_v
# Filling the first block with consecutive ones in each row of the block
for i in range(block_size):
for j in range(i * d_c, (i+1) * d_c):
block[i, j] = 1
H[:block_size] = block
# reate remaining blocks by permutations of the first block's columns:
for i in range(1, d_v):
H[i * block_size: (i + 1) * block_size] = rng.permutation(block.T).T
H = H.astype(int)
return H
def coding_matrix(H, sparse=True):
"""Return the generating coding matrix G given the LDPC matrix H.
Parameters
----------
H: array (n_equations, n_code). Parity check matrix of an LDPC code with
code length `n_code` and `n_equations` number of equations.
sparse: (boolean, default True): if `True`, scipy.sparse format is used
to speed up computation.
Returns
-------
G.T: array (n_bits, n_code). Transposed coding matrix.
"""
if type(H) == csr_matrix:
H = H.toarray()
n_equations, n_code = H.shape
# DOUBLE GAUSS-JORDAN:
Href_colonnes, tQ = utils.gaussjordan(H.T, 1)
Href_diag = utils.gaussjordan(np.transpose(Href_colonnes))
Q = tQ.T
n_bits = n_code - Href_diag.sum()
Y = np.zeros(shape=(n_code, n_bits)).astype(int)
Y[n_code - n_bits:, :] = np.identity(n_bits)
if sparse:
Q = csr_matrix(Q)
Y = csr_matrix(Y)
tG = utils.binaryproduct(Q, Y)
return tG
def coding_matrix_systematic(H, sparse=True):
"""Compute a coding matrix G in systematic format with an identity block.
Parameters
----------
H: array (n_equations, n_code). Parity-check matrix.
sparse: (boolean, default True): if `True`, scipy.sparse is used
to speed up computation if n_code > 1000.
Returns
-------
H_new: (n_equations, n_code) array. Modified parity-check matrix given by a
permutation of the columns of the provided H.
G_systematic.T: Transposed Systematic Coding matrix associated to H_new.
"""
n_equations, n_code = H.shape
if n_code > 1000 or sparse:
sparse = True
else:
sparse = False
P1 = np.identity(n_code, dtype=int)
Hrowreduced = utils.gaussjordan(H)
n_bits = n_code - sum([a.any() for a in Hrowreduced])
# After this loop, Hrowreduced will have the form H_ss : | I_(n-k) A |
while(True):
zeros = [i for i in range(min(n_equations, n_code))
if not Hrowreduced[i, i]]
if len(zeros):
indice_colonne_a = min(zeros)
else:
break
list_ones = [j for j in range(indice_colonne_a + 1, n_code)
if Hrowreduced[indice_colonne_a, j]]
if len(list_ones):
indice_colonne_b = min(list_ones)
else:
break
aux = Hrowreduced[:, indice_colonne_a].copy()
Hrowreduced[:, indice_colonne_a] = Hrowreduced[:, indice_colonne_b]
Hrowreduced[:, indice_colonne_b] = aux
aux = P1[:, indice_colonne_a].copy()
P1[:, indice_colonne_a] = P1[:, indice_colonne_b]
P1[:, indice_colonne_b] = aux
# Now, Hrowreduced has the form: | I_(n-k) A | ,
# the permutation above makes it look like :
# |A I_(n-k)|
P1 = P1.T
identity = list(range(n_code))
sigma = identity[n_code - n_bits:] + identity[:n_code - n_bits]
P2 = np.zeros(shape=(n_code, n_code), dtype=int)
P2[identity, sigma] = np.ones(n_code)
if sparse:
P1 = csr_matrix(P1)
P2 = csr_matrix(P2)
H = csr_matrix(H)
P = utils.binaryproduct(P2, P1)
if sparse:
P = csr_matrix(P)
H_new = utils.binaryproduct(H, np.transpose(P))
G_systematic = np.zeros((n_bits, n_code), dtype=int)
G_systematic[:, :n_bits] = np.identity(n_bits)
G_systematic[:, n_bits:] = \
(Hrowreduced[:n_code - n_bits, n_code - n_bits:]).T
return H_new, G_systematic.T
def make_ldpc(n_code, d_v, d_c, systematic=False, sparse=True, seed=None):
"""Create an LDPC coding and decoding matrices H and G.
Parameters
----------
n_code: int, Length of the codewords.
d_v: int, Number of parity-check equations including a certain bit.
d_c: int, Number of bits in the same parity-check equation. d_c Must be
greater or equal to d_v and must divide n.
seed: int, seed of the random generator.
systematic: boolean, default False. if True, constructs a systematic
coding matrix G.
Returns:
--------
H: array (n_equations, n_code). Parity check matrix of an LDPC code with
code length `n_code` and `n_equations` number of equations.
G: (n_code, n_bits) array coding matrix.
"""
seed = utils.check_random_state(seed)
H = parity_check_matrix(n_code, d_v, d_c, seed=seed)
if systematic:
H, G = coding_matrix_systematic(H, sparse=sparse)
else:
G = coding_matrix(H, sparse=sparse)
return H, G
def get_matrix(N, K):
file_H = '/home/jianping/PycharmProjects/pyldpc-master/LDPC_matrix/LDPC_chk_mat_' + str(N) + '_' + str(N-K) + '.txt'
file_G = '/home/jianping/PycharmProjects/pyldpc-master/LDPC_matrix/LDPC_gen_mat_' + str(N) + '_' + str(N-K) + '.txt'
# file_H = '/home/jianping/PycharmProjects/pyldpc-master/LDPC_matrix/LDPC_chk_mat_155_62.txt'
# file_G = '/home/jianping/PycharmProjects/pyldpc-master/LDPC_matrix/LDPC_gen_mat_155_62.txt'
H_matrix_row_col = np.loadtxt(file_H, dtype=np.int32)
G_matrix_row_col = np.loadtxt(file_G, dtype=np.int32)
H_mat = np.zeros([K, N], dtype=np.int32)
G_mat = np.zeros([N-K, N], dtype=np.int32)
H_mat[H_matrix_row_col[:,0], H_matrix_row_col[:,1]] = 1
G_mat[G_matrix_row_col[:,0], G_matrix_row_col[:,1]] = 1
return H_mat, G_mat.T
|
# Copyright 2017 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import pytest
from planet.api.models import Features, Paged, Request, Response, WFS3Features
from mock import MagicMock
# try:
# from StringIO import StringIO as Buffy
# except ImportError:
# from io import BytesIO as Buffy
def mock_http_response(json, iter_content=None):
m = MagicMock(name='http_response')
m.headers = {}
m.json.return_value = json
m.iter_content = iter_content
return m
def make_page(cnt, start, key, next, af):
'''fake paged content'''
if af:
envelope = {
'links': [{
'href': next,
'rel': 'next',
'title': 'NEXT'
}],
key: [{
'thingee': start + t
} for t in range(cnt)]
}
else:
envelope = {
'_links': {
'_next': next
},
key: [{
'thingee': start + t
} for t in range(cnt)]
}
return start + cnt, envelope
def make_pages(cnt, num, key, af):
'''generator of 'cnt' pages containing 'num' content'''
start = 0
for p in range(num):
next = 'page %d' % (p + 1,) if p + 1 < num else None
start, page = make_page(cnt, start, key, next, af=af)
yield page
class Thingees(Paged):
ITEM_KEY = 'thingees'
def thingees(cnt, num, key='thingees', body=Thingees, af=False):
req = Request('url', 'auth')
dispatcher = MagicMock(name='dispatcher', )
# make 5 pages with 5 items on each page
pages = make_pages(5, 5, key=key, af=af)
# initial the paged object with the first page
paged = body(req, mock_http_response(json=next(pages)), dispatcher)
# the remaining 4 get used here
dispatcher._dispatch.side_effect = (
mock_http_response(json=p) for p in pages
)
# mimic dispatcher.response
dispatcher.response = lambda req: Response(req, dispatcher)
return paged
def test_body_write():
req = Request('url', 'auth')
dispatcher = MagicMock(name='dispatcher', )
chunks = ((str(i) * 16000).encode('utf-8') for i in range(10))
paged = Paged(req, mock_http_response(
json=None,
iter_content=lambda chunk_size: chunks
), dispatcher)
buf = io.BytesIO()
paged.write(buf)
assert len(buf.getvalue()) == 160000
def test_paged_items_iter():
paged = thingees(5, 5)
expected = 25
cnt = 0
for i in paged.items_iter(None):
if cnt > expected:
assert False
assert i['thingee'] == cnt
cnt += 1
assert cnt == expected
def test_paged_iter():
paged = thingees(5, 5)
pages = list(paged.iter(2))
assert 2 == len(pages)
assert 5 == len(pages[0].get()['thingees'])
assert 5 == len(pages[1].get()['thingees'])
def test_json_encode():
paged = thingees(5, 5)
buf = io.StringIO()
paged.json_encode(buf, 1)
assert '{"thingees": [{"thingee": 0}]}' == buf.getvalue()
def test_features():
features = thingees(5, 5, body=Features, key='features')
buf = io.StringIO()
features.json_encode(buf, 13)
features_json = json.loads(buf.getvalue())
assert features_json['type'] == 'FeatureCollection'
assert len(features_json['features']) == 13
@pytest.mark.parametrize('limit', [None, 13])
def test_wf3_features(limit):
pages = 5
page_size = 6
num_items = pages * page_size
features = thingees(page_size, pages,
body=WFS3Features,
key='features',
af=True)
buf = io.StringIO()
features.json_encode(buf, limit)
features_json = json.loads(buf.getvalue())
assert len(features_json['features']) == limit if limit else num_items
|
# Author: Francesco Nuzzo
from os import listdir
import numpy as np
import operator
# This is the script we used for aggregating the lp solutions all together.
# The result is a file with x rows, where x is the number of tests.
# Each row is in the format:
# n;seed;total_reward
path_with_solutions = '../lp_solutions/'
files = listdir(path=path_with_solutions)
all_solutions_filename = '../all_lp_solutions.sol'
data_solutions = np.empty((len(files), 3), dtype=int)
i = 0
for filename in files:
attributes = filename.split("_")
n = int(attributes[2])
seed = int(attributes[10].split('.')[0])
reward = 0
with open(path_with_solutions + filename) as file_reader:
count_line = 0
for line in file_reader:
count_line += 1
if line == "None":
break
if count_line == 2:
info = line.split(' ')
reward = int(info[1])
break
data_solutions[i, 0] = n
data_solutions[i, 1] = seed
data_solutions[i, 2] = reward
i += 1
# data_solutions = data_solutions[data_solutions[:, 0].argsort(kind='mergesort')]
data_solutions = np.array(sorted(data_solutions, key=operator.itemgetter(0, 1)))
str_all_solutions = ""
for entry in data_solutions:
str_all_solutions += '{0};{1};{2}\n'.format(entry[0], entry[1], entry[2])
sol_write = open(all_solutions_filename, 'w')
sol_write.write(str_all_solutions)
sol_write.close()
|
import FWCore.ParameterSet.Config as cms
hltFixedGridRhoFastjetAllCaloForMuons = cms.EDProducer("FixedGridRhoProducerFastjet",
gridSpacing = cms.double(0.55),
maxRapidity = cms.double(2.5),
pfCandidatesTag = cms.InputTag("hltTowerMakerForAll")
)
|
from typing import Dict, ClassVar, Any
from qcodes.instrument_drivers.Lakeshore.lakeshore_base import (
LakeshoreBase, BaseOutput, BaseSensorChannel)
from qcodes.instrument.group_parameter import GroupParameter, Group
import qcodes.utils.validators as vals
# There are 16 sensors channels (a.k.a. measurement inputs) in Model 372
_n_channels = 16
class Output_372(BaseOutput):
"""Class for control outputs (heaters) of model 372"""
MODES: ClassVar[Dict[str, int]] = {
'off': 0,
'monitor_out': 1,
'open_loop': 2,
'zone': 3,
'still': 4,
'closed_loop': 5,
'warm_up': 6}
POLARITIES: ClassVar[Dict[str, int]] = {
'unipolar': 0,
'bipolar': 1}
RANGES: ClassVar[Dict[str, int]] = {
'off': 0,
'31.6μA': 1,
'100μA': 2,
'316μA': 3,
'1mA': 4,
'3.16mA': 5,
'10mA': 6,
'31.6mA': 7,
'100mA': 8}
_input_channel_parameter_kwargs: ClassVar[Dict[str, Any]] = {
'get_parser': int,
'vals': vals.Numbers(1, _n_channels)}
def __init__(self, parent, output_name, output_index) -> None:
super().__init__(parent, output_name, output_index, has_pid=True)
# Add more parameters for OUTMODE command
# and redefine the corresponding group
self.add_parameter('polarity',
label='Output polarity',
docstring='Specifies output polarity (not '
'applicable to warm-up heater)',
val_mapping=self.POLARITIES,
parameter_class=GroupParameter)
self.add_parameter('use_filter',
label='Use filter for readings',
docstring='Specifies controlling on unfiltered or '
'filtered readings',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter)
self.add_parameter('delay',
label='Delay',
unit='s',
docstring='Delay in seconds for setpoint change '
'during Autoscanning',
vals=vals.Ints(0, 255),
get_parser=int,
parameter_class=GroupParameter)
self.output_group = Group([self.mode, self.input_channel,
self.powerup_enable, self.polarity,
self.use_filter, self.delay],
set_cmd=f'OUTMODE {output_index}, {{mode}}, '
f'{{input_channel}}, '
f'{{powerup_enable}}, {{polarity}}, '
f'{{use_filter}}, {{delay}}',
get_cmd=f'OUTMODE? {output_index}')
self.P.vals = vals.Numbers(0.0, 1000)
self.I.vals = vals.Numbers(0.0, 10000)
self.D.vals = vals.Numbers(0, 2500)
class Model_372_Channel(BaseSensorChannel):
SENSOR_STATUSES = {0: 'OK',
1: 'CS OVL',
2: 'VCM OVL',
4: 'VMIX OVL',
8: 'VDIF OVL',
16: 'R. OVER',
32: 'R. UNDER',
64: 'T. OVER',
128: 'T. UNDER'}
def __init__(self, parent, name, channel):
super().__init__(parent, name, channel)
# Parameters related to Input Channel Parameter Command (INSET)
self.add_parameter('enabled',
label='Enabled',
docstring='Specifies whether the input/channel is '
'enabled or disabled. At least one '
'measurement input channel must be '
'enabled. If all are configured to '
'disabled, channel 1 will change to '
'enabled.',
val_mapping={True: 1, False: 0},
parameter_class=GroupParameter)
self.add_parameter('dwell',
label='Dwell',
docstring='Specifies a value for the autoscanning '
'dwell time.',
unit='s',
get_parser=int,
vals=vals.Numbers(1, 200),
parameter_class=GroupParameter)
self.add_parameter('pause',
label='Change pause time',
docstring='Specifies a value for '
'the change pause time',
unit='s',
get_parser=int,
vals=vals.Numbers(3, 200),
parameter_class=GroupParameter)
self.add_parameter('curve_number',
label='Curve',
docstring='Specifies which curve the channel uses: '
'0 = no curve, 1 to 59 = standard/user '
'curves. Do not change this parameter '
'unless you know what you are doing.',
get_parser=int,
vals=vals.Numbers(0, 59),
parameter_class=GroupParameter)
self.add_parameter('temperature_coefficient',
label='Change pause time',
docstring='Sets the temperature coefficient that '
'will be used for temperature control if '
'no curve is selected (negative or '
'positive). Do not change this parameter '
'unless you know what you are doing.',
val_mapping={'negative': 1, 'positive': 2},
parameter_class=GroupParameter)
self.output_group = Group([self.enabled, self.dwell, self.pause,
self.curve_number,
self.temperature_coefficient],
set_cmd=f'INSET {self._channel}, '
f'{{enabled}}, {{dwell}}, {{pause}}, '
f'{{curve_number}}, '
f'{{temperature_coefficient}}',
get_cmd=f'INSET? {self._channel}')
# Parameters related to Input Setup Command (INTYPE)
self.add_parameter('excitation_mode',
label='Excitation mode',
docstring='Specifies excitation mode',
val_mapping={'voltage': 0, 'current': 1},
parameter_class=GroupParameter)
# The allowed values for this parameter change based on the value of
# the 'excitation_mode' parameter. Moreover, there is a table in the
# manual that assigns the numbers to particular voltage/current ranges.
# Once this parameter is heavily used, it can be implemented properly
# (i.e. using val_mapping, and that val_mapping is updated based on the
# value of 'excitation_mode'). At the moment, this parameter is added
# only because it is a part of a group.
self.add_parameter('excitation_range_number',
label='Excitation range number',
docstring='Specifies excitation range number '
'(1-12 for voltage excitation, 1-22 for '
'current excitation); refer to the manual '
'for the table of ranges',
get_parser=int,
vals=vals.Numbers(1, 22),
parameter_class=GroupParameter)
self.add_parameter('auto_range',
label='Auto range',
docstring='Specifies auto range setting',
val_mapping={'off': 0, 'current': 1},
parameter_class=GroupParameter)
self.add_parameter('range',
label='Range',
val_mapping={'2.0 mOhm': 1,
'6.32 mOhm': 2,
'20.0 mOhm': 3,
'63.2 mOhm': 4,
'200 mOhm': 5,
'632 mOhm': 6,
'2.00 Ohm': 7,
'6.32 Ohm': 8,
'20.0 Ohm': 9,
'63.2 Ohm': 10,
'200 Ohm': 11,
'632 Ohm': 12,
'2.00 kOhm': 13,
'6.32 kOhm': 14,
'20.0 kOhm': 15,
'63.2 kOhm': 16,
'200 kOhm': 17,
'632 kOhm': 18,
'2.0 MOhm': 19,
'6.32 MOhm': 20,
'20.0 MOhm': 21,
'63.2 MOhm': 22},
parameter_class=GroupParameter)
self.add_parameter('current_source_shunted',
label='Current source shunt',
docstring='Current source either not shunted '
'(excitation on), or shunted '
'(excitation off)',
val_mapping={False: 0, True: 1},
parameter_class=GroupParameter)
self.add_parameter('units',
label='Preferred units',
docstring='Specifies the preferred units parameter '
'for sensor readings and for the control '
'setpoint (kelvin or ohms)',
val_mapping={'kelvin': 1, 'ohms': 2},
parameter_class=GroupParameter)
self.output_group = Group([self.excitation_mode,
self.excitation_range_number,
self.auto_range, self.range,
self.current_source_shunted, self.units],
set_cmd=f'INTYPE {self._channel}, '
f'{{excitation_mode}}, '
f'{{excitation_range_number}}, '
f'{{auto_range}}, {{range}}, '
f'{{current_source_shunted}}, '
f'{{units}}',
get_cmd=f'INTYPE? {self._channel}')
class Model_372(LakeshoreBase):
"""
Lakeshore Model 372 Temperature Controller Driver
Note that interaction with the control input (referred to as 'A' in the
Computer Interface Operation section of the manual) is not implemented.
"""
channel_name_command: Dict[str, str] = {'ch{:02}'.format(i): str(i)
for i in range(1, 1 + _n_channels)}
input_channel_parameter_values_to_channel_name_on_instrument = {
i: f'ch{i:02}' for i in range(1, 1 + _n_channels)
}
CHANNEL_CLASS = Model_372_Channel
def __init__(self, name: str, address: str, **kwargs) -> None:
super().__init__(name, address, **kwargs)
self.sample_heater = Output_372(self, 'sample_heater', 0)
self.warmup_heater = Output_372(self, 'warmup_heater', 1)
self.analog_heater = Output_372(self, 'analog_heater', 2)
|
from django.db import models
# Create your models here.
class Common(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class Game(Common):
game_id = models.CharField(primary_key=True, max_length=100, blank=False)
wallet_number = models.CharField(max_length=100, blank=False)
bitcoin_amount = models.DecimalField(max_digits=10, decimal_places=5, default=0)
# deposit_id = models.ForeignKey('wallet.Deposit', on_delete=models.DO_NOTHING)
finished = models.BooleanField(default=False)
winner = models.BooleanField(default=False)
test = models.BooleanField(default=False)
def __str__(self):
return '%s' % (self.game_id)
class GameCard(Common):
card_id = models.CharField(primary_key=True, max_length=100, blank=False)
game = models.ForeignKey('montyhall.Game', on_delete=models.DO_NOTHING)
selected = models.BooleanField(default=False)
prize = models.BooleanField(default=False)
reveal = models.BooleanField(default=False)
def __str__(self):
return '%s' % (self.card_id) |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class QueryPriority(GenericTypeCode):
"""
v3.QueryPriority
From: http://terminology.hl7.org/ValueSet/v3-QueryPriority in v3-codesystems.xml
**** MISSING DEFINITIONS ****
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-QueryPriority
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-QueryPriority"
class QueryPriorityValues:
"""
Query response is deferred.
From: http://terminology.hl7.org/CodeSystem/v3-QueryPriority in v3-codesystems.xml
"""
Deferred = QueryPriority("D")
"""
Query response is immediate.
From: http://terminology.hl7.org/CodeSystem/v3-QueryPriority in v3-codesystems.xml
"""
Immediate = QueryPriority("I")
|
#!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class SurveyQuestionType(Enum):
BOOL = "BOOL"
EMAIL = "EMAIL"
COORDS = "COORDS"
PHONE = "PHONE"
TEXT = "TEXT"
TEXTAREA = "TEXTAREA"
PHOTO = "PHOTO"
WIFI = "WIFI"
CELLULAR = "CELLULAR"
FLOAT = "FLOAT"
INTEGER = "INTEGER"
DATE = "DATE"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: object) -> "SurveyQuestionType":
return cls.MISSING_ENUM
|
import argparse
import pandas as pd
import upload_plan
def arguments():
""" Process CLI arguments """
description = 'Upload multiple communication plans to Slate.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-l", "--List", action="store", help="CSV of Plans", required=True)
return parser.parse_args()
def main(list_file):
""" Batch upload plans to Slate. """
# Attempt to open the provided list file.
try:
list_df = pd.read_csv(list_file, encoding="UTF-8")
except FileNotFoundError:
upload_plan.exit_with_error("Unable to locate provided file. Check path and try again.")
# Loop through values.
for plan in list_df.Plans:
print("Uploading", plan)
upload_plan.main(plan)
if __name__ == "__main__":
# Parse CLI arguments
ARGS = arguments()
# Call main function with argument data.
main(ARGS.List) |
from django import forms
from django.contrib.auth.models import User
from .models import Post
class UserForm(forms.ModelForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ['username', 'email', 'password']
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if email and User.objects.filter(email=email).exclude(username=username).count():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class UserUpdateForm(forms.ModelForm):
username = forms.CharField(required=False)
password = forms.CharField(required=False)
class Meta:
model = User
fields = ['username', 'email', 'password']
def clean_email(self):
email = self.cleaned_data.get('email')
username = self.cleaned_data.get('username')
if email and User.objects.filter(email=email).exclude(username=username).count():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class PostForm(forms.ModelForm):
message = forms.CharField(required=True)
class Meta:
model = Post
fields = ['message']
|
# Copyright (C) 2019 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import sys
import copy
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'library'))
from scenario_item import HwInfo, VmInfo
import board_cfg_lib
import scenario_cfg_lib
import vm_configurations_c
import vm_configurations_h
import pci_dev_c
import common
ACRN_PATH = common.SOURCE_ROOT_DIR
ACRN_CONFIG_DEF = ACRN_PATH + 'hypervisor/scenarios/'
GEN_FILE = ["vm_configurations.h", "vm_configurations.c", "pci_dev.c"]
def get_scenario_item_values(board_info, scenario_info):
"""
Get items which capable multi select for user
:param board_info: it is a file what contains board information for script to read from
"""
scenario_item_values = {}
hw_info = HwInfo(board_info)
# get vm count
common.BOARD_INFO_FILE = board_info
common.SCENARIO_INFO_FILE = scenario_info
common.get_vm_num(scenario_info)
# pre scenario
guest_flags = copy.deepcopy(scenario_cfg_lib.GUEST_FLAG)
guest_flags.remove('0UL')
scenario_item_values["vm,vcpu_affinity"] = hw_info.get_processor_val()
scenario_item_values["vm,guest_flags"] = guest_flags
scenario_item_values["vm,clos"] = hw_info.get_clos_val()
scenario_item_values["vm,severity"] = scenario_cfg_lib.VM_SEVERITY
scenario_item_values["vm,os_config,kern_type"] = scenario_cfg_lib.KERN_TYPE_LIST
scenario_item_values.update(scenario_cfg_lib.avl_vuart_ui_select(scenario_info))
# pre board_private
scenario_item_values["vm,board_private,rootfs"] = board_cfg_lib.get_rootfs(board_info)
scenario_item_values["vm,board_private,console"] = board_cfg_lib.get_ttys_info(board_info)
# os config
scenario_item_values["vm,os_config,rootfs"] = board_cfg_lib.get_rootfs(board_info)
return scenario_item_values
def validate_scenario_setting(board_info, scenario_info):
"""
This is validate the data setting from scenario xml
:param board_info: it is a file what contains board information for script to read from
:param scenario_info: it is a file what user have already setting to
:return: return a dictionary contain errors
"""
scenario_cfg_lib.ERR_LIST = {}
common.BOARD_INFO_FILE = board_info
common.SCENARIO_INFO_FILE = scenario_info
vm_info = VmInfo(board_info, scenario_info)
vm_info.get_info()
vm_info.check_item()
return (scenario_cfg_lib.ERR_LIST, vm_info)
def main(args):
"""
This is main function to start generate source code related with board
:param args: it is a command line args for the script
"""
err_dic = {}
(err_dic, board_info_file, scenario_info_file, output_folder) = common.get_param(args)
if err_dic:
return err_dic
if output_folder:
common.ACRN_CONFIG_TARGET = os.path.abspath(output_folder) + '/'
# check env
err_dic = common.prepare()
if err_dic:
return err_dic
common.BOARD_INFO_FILE = board_info_file
common.SCENARIO_INFO_FILE = scenario_info_file
# get scenario name
(err_dic, scenario) = common.get_scenario_name()
if err_dic:
return err_dic
# check if this is the scenario config which matched board info
(err_dic, status) = common.is_config_file_match()
if not status:
err_dic['scenario config: Not match'] = "The board xml and scenario xml should be matched!"
return err_dic
if common.ACRN_CONFIG_TARGET:
scenario_dir = common.ACRN_CONFIG_TARGET + scenario + '/'
else:
scenario_dir = ACRN_CONFIG_DEF + scenario + '/'
common.mkdir(scenario_dir)
vm_config_h = scenario_dir + GEN_FILE[0]
vm_config_c = scenario_dir + GEN_FILE[1]
pci_config_c = scenario_dir + GEN_FILE[2]
# parse the scenario.xml
get_scenario_item_values(board_info_file, scenario_info_file)
(err_dic, vm_info) = validate_scenario_setting(board_info_file, scenario_info_file)
if err_dic:
common.print_red("Validate the scenario item failue", err=True)
return err_dic
# get kata vm count
if scenario != "logical_partition":
scenario_cfg_lib.KATA_VM_COUNT = common.VM_COUNT - scenario_cfg_lib.DEFAULT_VM_COUNT[scenario]
if scenario_cfg_lib.KATA_VM_COUNT > 1:
err_dic['scenario config: kata vm count err'] = "Only one kata vm is supported!"
return err_dic
# generate vm_configuration.h
with open(vm_config_h, 'w') as config:
vm_configurations_h.generate_file(scenario, vm_info, config)
# generate vm_configuration.c
with open(vm_config_c, 'w') as config:
err_dic = vm_configurations_c.generate_file(scenario, vm_info, config)
if err_dic:
return err_dic
# generate pci_dev.c if scenario is logical_partition
if scenario == 'logical_partition':
with open(pci_config_c, 'w') as config:
pci_dev_c.generate_file(config)
if not err_dic:
print("Scenario configurations for {} is generated successfully.".format(scenario))
else:
print("Scenario configurations for {} is generated failed.".format(scenario))
return err_dic
def ui_entry_api(board_info, scenario_info):
arg_list = ['board_cfg_gen.py', '--board', board_info, '--scenario', scenario_info]
err_dic = common.prepare()
if err_dic:
return err_dic
err_dic = main(arg_list)
return err_dic
if __name__ == '__main__':
ARGS = sys.argv
err_dic = main(ARGS)
if err_dic:
for err_k, err_v in err_dic.items():
common.print_red("{}: {}".format(err_k, err_v), err=True)
|
"""aniffinity class."""
from . import calcs
from . import endpoints
from . import models
from .exceptions import NoAffinityError
class Aniffinity:
"""
The Aniffinity class.
The purpose of this class is to store a "base user"'s scores, so
affinity with other users can be calculated easily.
For the username ``Josh`` on the service ``AniList``, the class can
be initialised as follows:
.. code-block:: python
from aniffinity import Aniffinity
af = Aniffinity("Josh", base_service="AniList")
There are multiple ways of specifying this information, and multiple
ways to initialise this class. For more info, read the documentation.
The instance, stored in ``af``, will now hold ``Josh``'s scores.
:meth:`.comparison` and :meth:`.calculate_affinity` can now be called,
to perform operations on this data.
"""
def __init__(self, base_user=None, base_service=None, round=False):
"""
Initialise an instance of ``Aniffinity``.
The information required to retrieve a users' score from a service
are their "username" and "service". For a list of "service"s,
read the documentation.
.. note::
As this applies to the "base" user, the params used are
``base_user`` and ``base_service`` respectively.
There are multiple ways of specifying the above information,
and multiple aliases for services that can be used as shorthand.
As docstrings are annoying to write, please refer to the
documentation for a list of these. For an example of the simplest
method to use, refer to the docstring for the :class:`Aniffinity`
class.
.. note::
To avoid dealing with dodgy globals, this class MAY be
initialised without the ``base_user`` argument, in the global
scope (if you wish), but :meth:`.init` MUST be called sometime
afterwards, with a ``base_user`` and ``base_service`` passed,
before affinity calculations take place.
Example (for the username ``Josh`` on the service ``AniList``):
.. code-block:: python
from aniffinity import Aniffinity
af = Aniffinity()
ma.init("Josh", base_service="AniList")
The class should then be good to go.
:param base_user: Base user
:type base_user: str or tuple
:param base_service: The service to use. If no value is specified
for this param, specify the service in the ``base_user`` param,
either as part of a url regex, or in a tuple
:type base_service: str or None
:param round: Decimal places to round affinity values to.
Specify ``False`` for no rounding
:type round: int or False
"""
self._base_username = None
self._base_service = None
self._base_scores = {}
self._round = round
if base_user:
self.init(base_user, base_service)
def __repr__(self): # noqa: D105 # pragma: no cover
return "{}(base_username={!r}, base_service={!r}, round={!r})" \
.format(self.__class__.__name__, self._base_username,
self._base_service, self._round)
def init(self, base_user, base_service=None):
"""
Retrieve a "base user"'s list, and store it in :attr:`._base_scores`.
:param base_user: Base user
:type base_user: str or tuple
:param base_service: The service to use. If no value is specified
for this param, specify the service in the ``base_user`` param,
either as part of a url regex, or in a tuple
:type base_service: str or None
"""
# Figure out the service ourselves, instead of just passing this to
# `endpoints.main` (and letting it handle everything), as we want
# to set `self._base_service`.
base_username, base_service = \
endpoints._resolve_service(base_user, base_service)
self._base_username = base_username
self._base_service = base_service
self._base_scores = endpoints._main(base_username, base_service)
return self
def comparison(self, user, service=None):
"""
Get a comparison of scores between the "base user" and ``user``.
A Key-Value returned will consist of the following:
.. code-block:: none
{
"ANIME_ID": [BASE_USER_SCORE, OTHER_USER_SCORE],
...
}
Example:
.. code-block:: none
{
"30831": [3, 8],
"31240": [4, 7],
"32901": [1, 5],
...
}
.. note::
The ``ANIME_ID`` s will be the MyAnimeList anime ids. As annoying
as it is, cross-compatibility is needed between services to get
this module to work, and MAL ids are the best ones to use as other
APIs are able to specify it. If you wish to use the anime ids for
the service you specified, set the param
``<TO BE IMPLEMENTED>`` to ``<TO BE IMPLEMENTED>``.
:param user: The user to compare the base users' scores to.
:type user: str or tuple
:param service: The service to use. If no value is specified
for this param, specify the service in the ``user`` param,
either as part of a url regex, or in a tuple
:type service: str or None
:return: Mapping of ``id`` to ``score`` as described above
:rtype: dict
"""
# Check if there's actually a base user to compare scores with.
if not self._base_username or not self._base_scores:
raise Exception("No base user has been specified. Call the `init` "
"function to retrieve a base users' scores")
user_list = endpoints._main(user, service)
comparison_dict = {}
for key in (self._base_scores.keys() & user_list.keys()):
comparison_dict[key] = [self._base_scores[key], user_list[key]]
return comparison_dict
def calculate_affinity(self, user, service=None):
"""
Get the affinity between the "base user" and ``user``.
.. note::
The data returned will be a namedtuple, with the affinity
and shared rated anime. This can easily be separated
as follows:
.. code-block:: python
affinity, shared = af.calculate_affinity(...)
Alternatively, the following also works:
.. code-block:: python
affinity = af.calculate_affinity(...)
with the affinity and shared available as ``affinity.value`` and
``affinity.shared`` respectively.
.. note::
The final affinity value may or may not be rounded, depending on
the value of :attr:`._round`, set at class initialisation.
:param user: The user to calculate affinity with.
:type user: str or tuple
:param service: The service to use. If no value is specified
for this param, specify the service in the ``user`` param,
either as part of a url regex, or in a tuple
:type service: str or None
:return: (float affinity, int shared)
:rtype: tuple
"""
scores = self.comparison(user, service)
# Handle cases where the shared scores are <= 10 so
# affinity can not be accurately calculated.
if len(scores) <= 10:
raise NoAffinityError("Shared rated anime count between "
"`{}` and `{}` is less than eleven"
.format(self._base_username, user))
# Sort multiple rows of scores into two arrays for calculations.
# E.G. [1,2], [3,4], [5,6] to [1,3,5], [2,4,6]
values = scores.values()
scores1, scores2 = list(zip(*values))
pearson = calcs.pearson(scores1, scores2)
pearson *= 100
if self._round is not False:
pearson = round(pearson, self._round)
return models.Affinity(value=pearson, shared=len(scores))
|
from Calculator.Sqrt import sqrt
from Stats.VarP import variance
def samplestddev(num):
try:
variance_float = variance(num)
return round(sqrt(variance_float), 5)
except ZeroDivisionError:
print("Can't Divide by 0 Error")
except ValueError:
print("Please Check your data inputs") |
import subprocess
import os
from ..qtim_utilities.nifti_util import save_numpy_2_nifti
from ..qtim_utilities.format_util import convert_input_2_numpy
def run_dtifit(input_data, input_bvec, input_bval, input_mask='', output_fileprefix='', method="fsl", command="fsl5.0-dtifit", temp_dir='./'):
""" This will fail if used in numpy mode, currently.
"""
motion_correction_methods = ['fsl']
if method not in motion_correction_methods:
print 'Input \"method\" parameter is not available. Available methods: ', motion_correction_methods
return
if method == 'fsl':
# A good reason to have a Class for qtim methods is to cut through all of this extra code.
temp_input, temp_output = False, False
if not isinstance(input_data, basestring):
input_filename = os.path.join(temp_dir, 'temp.nii.gz')
save_numpy_2_nifti(input_data, input_filename)
temp_input = True
else:
input_filename = input_data
if output_fileprefix == '':
temp_output = True
output_fileprefix = os.path.join(temp_dir, 'temp_out.nii.gz')
# TODO: Figure out what last parameter, reference number, means.
print ' '.join([command, '-V', '--sse', '-k', input_filename, '-o', output_fileprefix, '-m', input_mask, '-r', input_bvec, '-b', input_bval])
subprocess.call([command, '-V', '--sse', '-k', input_filename, '-o', output_fileprefix, '-m', input_mask, '-r', input_bvec, '-b', input_bval])
if temp_input:
os.remove(input_filename)
pass
if temp_output:
output = convert_input_2_numpy(output_fileprefix)
os.remove(output_fileprefix)
return output
def run_test():
return
if __name__ == '__main__':
run_test() |
''' modélisation de la propagation d'un virus '''
""" importation """
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
import random as rd
import time
from scipy.spatial import distance
def distance_e(x, y): # distance entre 2 points du plan cartésien
return distance.euclidean([x[0],x[1]],[y[0],y[1]])
def remove_(a,l): #pour supprimer de la liste des sains (a) les nouveaux infectés (l)
for i in range (len(l)):
a.remove(l[i])
return(list(a))
def chance_infecte(p): #return True si il devient infecté avec une proba p
proba = int(p*100)
if rd.randint(0,100) <= proba :
return(True)
else :
return(False)
def immuniser(l, l2, p): #l: infectés; l2: immunisés précédents
coord_immu = []
l_p = l[:] # création d'une copie pour éviter d'erreur d'indice
for i in range(len(l_p)):
proba = int(p * 100)
if rd.randint(0, 100) <= proba:
coord_immu.append(l_p[i])
l.remove(l_p[i])
coord_immu += l2 #on ajoute les immunisés précédents
return list(l), coord_immu
def deces(l, l2, l3, p): #l: infectés; l2: décès précédents; l3: immunisés
coord_deces = []
l_p = l[:] # création d'une copie pour éviter d'erreur d'indice
for i in range(len(l_p)):
proba = int(p * 100)
if rd.randint(0, 100) <= proba and l_p[i] not in l3:
coord_deces.append(l_p[i])
l.remove(l_p[i])
coord_deces += l2 #on ajoute les décès précédents
return list(l), coord_deces
""" Afficher la vague où le virus ne circule plus, avec les graphiques """
## version non optimisée, voir version plotly
def vague_seuil(nb_individu, var_population, rayon_contamination, infectiosite, p, d):
# recommandé :
# nb_individu=500; var_population=2; rayon_contamination=2,infectiosité=0.7;p=0.4;d=0.3
# cette fonction affiche la vague ou le nombre de personnes saines devient constant ou que le nombre d'infectés est nul
# c'est à dire que le virus ne circule plus
# NOTE : si les courbes restent constantes, augmentez le rayon de contamination
# si le virus est trés mortel il n'y aura pas beaucoup de propagation
if nb_individu < 10 or var_population <= 0 or rayon_contamination <= 0:
return 'error, nb_individu and var_population and rayon_contamination must be >=10 and > 0'
if infectiosite < 0 or infectiosite > 1:
return 'error, infectiosité must be in [0,1]'
if p < 0 or p > 1:
return 'error, p must be in [0,1]'
if d < 0 or p > 1:
return 'error, d must be in [0,1]'
# création des figures
ax = plt.figure()
ax1 = ax.add_subplot(1, 2, 1)
ax2 = ax.add_subplot(1, 2, 2)
# création des courbes finales
courbe_sains = []
courbe_infectes = []
courbe_immunises = []
courbe_deces = []
# dataset
x, y = make_blobs(n_samples=nb_individu, centers=3, cluster_std=var_population) # création du dataset
taille_pop = len(x)
numero_infecte_1 = rd.randint(0, taille_pop) # on choisit le premier individu infecté au hasard
coord_1er_infecte = [x[:, 0][numero_infecte_1], x[:, 1][numero_infecte_1]] # coordonnées du 1er infecté
courbe_sains.append(taille_pop - 1)
courbe_infectes.append(1)
courbe_immunises.append(0)
courbe_deces.append(0)
# 1er vague
coord_infectes = [] # cette liste sera implémentée des nouveaux cas
coord_sains = [] # liste des cas sains
for k in range(taille_pop):
if [x[:, 0][k], x[:, 1][k]] == coord_1er_infecte:
coord_infectes.append(coord_1er_infecte)
elif distance(coord_1er_infecte, [x[:, 0][k], x[:, 1][k]]) < rayon_contamination and chance_infecte(
infectiosite):
coord_infectes.append([x[:, 0][k], x[:, 1][k]])
else:
coord_sains.append([x[:, 0][k], x[:, 1][k]])
courbe_sains.append(len(coord_sains))
courbe_infectes.append(len(coord_infectes))
courbe_immunises.append(0)
courbe_deces.append(0)
# vagues 2 à n
coord_immunises = [] # on initialise
coord_deces = []
#for i in range(n - 2):
i = 1
while len(coord_infectes)>0.2*taille_pop or len(courbe_sains)<5 :
non_sains = []
coord_infectes1, coord_immunises = immuniser(coord_infectes, coord_immunises, p)
coord_infectes, coord_deces = deces(coord_infectes1, coord_deces, coord_immunises, d)
for k in range(len(coord_infectes)):
for j in range(len(coord_sains)):
if distance(np.array(coord_infectes)[k, :],
np.array(coord_sains)[j, :]) < rayon_contamination and np.array(coord_sains)[j,:] not in np.array(coord_infectes) and chance_infecte(infectiosite):
coord_infectes.append(list(np.array(coord_sains)[j, :]))
non_sains.append(list(np.array(coord_sains)[j, :]))
coord_sains = remove_(coord_sains, non_sains)
# pour les courbes finales
courbe_sains.append(len(coord_sains))
courbe_infectes.append(len(coord_infectes))
courbe_immunises.append(len(coord_immunises))
courbe_deces.append(len(coord_deces))
i += 1 # vague suivante
if coord_sains != []:
ax1.scatter(np.array(coord_sains)[:, 0], np.array(coord_sains)[:, 1], c='dodgerblue')
if coord_infectes != []:
ax1.scatter(np.array(coord_infectes)[:, 0], np.array(coord_infectes)[:, 1], c='firebrick')
if coord_immunises != []:
ax1.scatter(np.array(coord_immunises)[:, 0], np.array(coord_immunises)[:, 1], c='g')
if coord_deces != []:
ax1.scatter(np.array(coord_deces)[:, 0], np.array(coord_deces)[:, 1], c='k')
titre = str(i)+'-ième vague'
ax1.set_title(titre, fontsize=8)
ax1.axis('off')
ax2.pie([len(coord_infectes), len(coord_sains), len(coord_immunises), len(coord_deces)],
colors=['firebrick', 'dodgerblue', 'g', 'dimgrey'], labels=('infectés', 'sains', 'immunisés', 'décès'),
shadow=True, autopct='%1.1f%%')
plt.figure()
x_courbe = list(np.arange(0, len(courbe_sains)))
plt.plot(x_courbe, courbe_sains, label='sains', c='dodgerblue')
plt.plot(x_courbe, courbe_infectes, label='infectés', c='firebrick')
plt.plot(x_courbe, courbe_immunises, label='immunisés', c='g')
plt.plot(x_courbe, courbe_deces, label='décès', c='k')
plt.legend()
print(i,'ième vague')
plt.show()
|
#!/usr/bin/env python
# Copyright (c) 2013 Ondrej Kipila <ok100 at lavabit dot com>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
import argparse
import os
import runpy
from collections import OrderedDict
config = {
'output_dir': os.environ['HOME'],
'title': 'Start Page',
'font': ('Monospace', '12pt'),
'separator': '>',
'colors': ('#020202', '#999999', '#B3B3B3', '#4C4C4C'),
'links': OrderedDict([
('search', [
['google', 'https://www.google.com/'],
['duckduckgo', 'http://duckduckgo.com/'],
['startpage', 'https://startpage.com/'],
]),
('media', [
['youtube', 'http://www.youtube.com/'],
]),
('foo', [
['wikipedia', 'http://en.wikipedia.org/wiki/Main_Page'],
['wallbase', 'http://wallbase.cc/home'],
])
])
}
def parse_args():
parser = argparse.ArgumentParser(prog='homepage.py')
parser.add_argument('-c', '--config-file', help='path to an alternate config file')
parser.add_argument('-o', '--output-dir', help='output directory')
return parser.parse_args()
def main():
args = parse_args()
config_file = args.config_file or '%s/.config/homepage/homepage.conf' % os.environ['HOME']
if os.path.exists(config_file):
config.update((k, v) for k, v in runpy.run_path(config_file).items() if k in config)
css = '''html, body {
background-color: %s;
font-family: "%s";
font-weight: normal;
margin-left: 7%%;
height: 100%%;
}
a:link,a:visited,a:active {
text-decoration: none;
color: %s;
font-weight: normal;
}
a:hover {
text-decoration: underline;
color: %s;
font-weight: normal;
}
table {
font-size: %s;
border-spacing: 8px;
}
td {
vertical-align: middle;
}
td:first-child {
font-weight: bold;
color: %s
}
td:nth-child(2) {
font-weight: normal;
color: %s;
}''' % (config['colors'][0], config['font'][0], config['colors'][1], config['colors'][1],
config['font'][1], config['colors'][2], config['colors'][3])
links_html = ''
for group in config['links']:
links_html += '<tr><td align="right">%s</td><td>%s</td><td>' % (group, config['separator'])
for site in config['links'][group]:
links_html += '<a href="%s">%s</a> ' % (site[1], site[0])
links_html += '</td></tr>'
html = '''<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>%s</title>
<link rel="stylesheet" type="text/css" href="style.css" />
</head>
<body>
<table summary="container" border="0" width="100%%" style="height: 100%%"><tr><td><table summary="container">
%s
</table></td></tr></table>
</body>
</html>''' % (config['title'], links_html)
if not os.path.exists(config['output_dir']):
os.makedirs(config['output_dir'])
with open(config['output_dir'] + '/homepage.html', 'w') as file:
file.write(html)
with open(config['output_dir'] + '/style.css', 'w') as file:
file.write(css)
os.system('tidy -utf8 -i -m -q -asxhtml %s' % config['output_dir'] + '/homepage.html')
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
"""
Converts Mp3 files to Wav - also repeats them a given number of times
"""
import os
import random
import sys
def run_ffmpeg(fname:str, directory="$PWD"):
"""Runs the ffmpeg and copies the file a random number of times"""
filename = fname
if ".mp3" in fname:
filename = filename.replace(".mp3", "")
repl = """"concat:"""
# for i in range(0, int((random.random() * 10) % 4)+1):
# repl += """/workspace/{}.mp3|""".format(filename)
# print("Repeated ", i, " times")
repl += """/workspace/{}.mp3""".format(filename)
repl += "\""
print("Starting up program for ", fname)
# a = """docker run --rm --name "{}" --runtime=nvidia --volume {}:/workspace jrottenberg/ffmpeg:4.1-nvidia -nostats -loglevel 0 -hwaccel cuvid -i {} /workspace/{}.wav""".format(filename, directory,repl, filename)
a = """docker run --rm --volume {}:/workspace jrottenberg/ffmpeg:4.1-nvidia -nostats -loglevel 0 -hwaccel cuvid -i {} /workspace/{}.wav""".format(directory,repl, filename)
print(a)
print(a)
os.system(a)
if __name__ == "__main__":
for filename in [x for x in os.listdir(".") if ".mp3" in x]:
print("Running for", filename)
run_ffmpeg(filename)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Kacper Żuk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer. 2. Redistributions in
# binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import spotify
import threading
import alsaaudio
import time
from copy import copy
class Sink(object):
def __init__(self, session, pillow_size):
self.pillow_size = pillow_size
self.output = None
session.on(spotify.SessionEvent.MUSIC_DELIVERY, self.music_delivery)
self.lock = threading.Lock()
self.data = []
def worker(self):
while True:
if self.data:
self.lock.acquire()
tmp_data = copy(self.data)
self.data = []
l = len(tmp_data)
released = False
for i in range(0, l):
if not released and l - i < self.pillow_size:
self.lock.release()
released = True
self.output.write(tmp_data[i])
else:
time.sleep(0.05)
def music_delivery(self, session, audio_format, frames, num_frames):
if not self.output:
self.output = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, card='default')
self.output.setchannels(audio_format.channels)
self.output.setrate(audio_format.sample_rate)
self.output.setformat(alsaaudio.PCM_FORMAT_S16_LE)
self.output.setperiodsize(160)
threading.Thread(target=self.worker).start()
if not self.lock.acquire(False):
return 0
self.data.append(frames)
self.lock.release()
return num_frames
|
# -*- coding: utf-8 -*-
#
# screenViewInterface.py
#
# Interface for all screen's views
#
from abc import ABC, abstractmethod
class ScreenViewInterface(ABC):
"""
Interface contains methods that every screen view has to implement
"""
@abstractmethod
def get_screen_name(self):
pass
@abstractmethod
def on_keyboard_down(self, key_name = ''):
pass
@abstractmethod
def on_show_screen(self, event_string = ''):
pass
|
#!/bin/env python
"""
Unit tests for high level Python interface.
Author: Daniel Lee, DWD, 2016
"""
import os
from tempfile import NamedTemporaryFile
import unittest
from eccodes import GribFile
from eccodes import GribIndex
from eccodes import GribMessage
from eccodes.high_level.gribmessage import IndexNotSelectedError
from eccodes import BufrFile, BufrMessage
TESTGRIB = "../../data/high_level_api.grib2"
TESTBUFR = "../../data/bufr/syno_multi.bufr"
TEST_OUTPUT = "test-output.codes"
TEST_INDEX = "test.index"
TEST_KEYS = ("dataDate", "stepRange")
TEST_VALUES = 20110225, 0
SELECTION_DICTIONARY = {}
for i1 in range(len(TEST_KEYS)):
SELECTION_DICTIONARY[TEST_KEYS[i1]] = TEST_VALUES[i1]
TEST_INDEX_OUTPUT = TESTGRIB
TEST_STEPRANGE = ('0', '12', '18', '24', '6')
# These keys should be available even if new keys are defined
KNOWN_GRIB_KEYS = ['7777', 'GRIBEditionNumber', 'N', 'NV',
'Ni', 'Nj', 'PLPresent', 'PVPresent',
'addEmptySection2',
'addExtraLocalSection', 'alternativeRowScanning',
'angleDivisor', 'angleMultiplier', 'angularPrecision',
'average', 'backgroundProcess',
'basicAngleOfTheInitialProductionDomain',
'binaryScaleFactor', 'bitMapIndicator', 'bitmapPresent',
'bitsPerValue', 'bottomLevel', 'centre',
'centreDescription', 'cfName', 'cfNameECMF', 'cfVarName',
'cfVarNameECMF', 'changeDecimalPrecision', 'class',
'climateDateFrom', 'climateDateTo', 'codedValues',
'dataDate', 'dataRepresentationTemplateNumber', 'dataTime',
'day', 'decimalPrecision', 'decimalScaleFactor',
'deleteCalendarId', 'deleteExtraLocalSection', 'deletePV',
'discipline', 'distinctLatitudes', 'distinctLongitudes',
'editionNumber', 'endStep', 'eps',
'experimentVersionNumber', 'extraLocalSectionPresent',
'forecastTime', 'g2grid', 'gaussianGridName',
'genVertHeightCoords', 'generatingProcessIdentifier',
'getNumberOfValues', 'global', 'globalDomain',
'grib2LocalSectionNumber', 'grib2LocalSectionPresent',
'grib2divider', 'gridDefinitionDescription',
'gridDefinitionTemplateNumber',
'gridDescriptionSectionPresent', 'gridType', 'hour',
'hoursAfterDataCutoff', 'iDirectionIncrement',
'iDirectionIncrementGiven', 'iDirectionIncrementInDegrees',
'iScansNegatively', 'iScansPositively', 'identifier',
'ieeeFloats', 'ifsParam', 'ijDirectionIncrementGiven',
'indicatorOfUnitOfTimeRange',
'interpretationOfNumberOfPoints', 'isConstant',
'isHindcast', 'isOctahedral', 'is_uerra',
'jDirectionIncrementGiven', 'jPointsAreConsecutive',
'jScansPositively', 'julianDay', 'kurtosis', 'latLonValues',
'latitudeOfFirstGridPoint',
'latitudeOfFirstGridPointInDegrees',
'latitudeOfLastGridPoint',
'latitudeOfLastGridPointInDegrees', 'latitudes',
'legBaseDate', 'legBaseTime', 'legNumber',
'lengthOfHeaders', 'level', 'localDefinitionNumber',
'localDir', 'localTablesVersion',
'longitudeOfFirstGridPoint',
'longitudeOfFirstGridPointInDegrees',
'longitudeOfLastGridPoint',
'longitudeOfLastGridPointInDegrees', 'longitudes',
'mAngleMultiplier', 'mBasicAngle', 'marsClass',
'marsStream', 'marsType', 'masterDir', 'maximum',
'md5Headers', 'md5Section1', 'md5Section3', 'md5Section4',
'md5Section5', 'md5Section6', 'md5Section7', 'minimum',
'minute', 'minutesAfterDataCutoff', 'missingValue',
'modelName', 'month', 'name', 'nameECMF',
'nameOfFirstFixedSurface', 'nameOfSecondFixedSurface',
'neitherPresent', 'numberOfDataPoints',
'numberOfForecastsInEnsemble', 'numberOfMissing',
'numberOfOctectsForNumberOfPoints', 'numberOfSection',
'numberOfValues', 'oceanAtmosphereCoupling',
'offsetValuesBy', 'optimizeScaleFactor', 'packingError',
'packingType', 'paramId', 'paramIdECMF',
'parameterCategory', 'parameterName', 'parameterNumber',
'parameterUnits', 'perturbationNumber', 'pressureUnits',
'productDefinitionTemplateNumber',
'productDefinitionTemplateNumberInternal', 'productType',
'productionStatusOfProcessedData', 'radius',
'referenceDate', 'referenceValue', 'referenceValueError',
'resolutionAndComponentFlags',
'resolutionAndComponentFlags1',
'resolutionAndComponentFlags2',
'resolutionAndComponentFlags6',
'resolutionAndComponentFlags7',
'resolutionAndComponentFlags8',
'scaleFactorOfEarthMajorAxis',
'scaleFactorOfEarthMinorAxis',
'scaleFactorOfFirstFixedSurface',
'scaleFactorOfRadiusOfSphericalEarth',
'scaleFactorOfSecondFixedSurface', 'scaleValuesBy',
'scaledValueOfEarthMajorAxis',
'scaledValueOfEarthMinorAxis',
'scaledValueOfFirstFixedSurface',
'scaledValueOfRadiusOfSphericalEarth',
'scaledValueOfSecondFixedSurface', 'scanningMode',
'scanningMode5', 'scanningMode6', 'scanningMode7',
'scanningMode8', 'second', 'section0Length',
'section1Length', 'section2Length', 'section2Padding',
'section3Length', 'section3Padding', 'section4Length',
'section5Length', 'section6Length', 'section7Length',
'section8Length', 'sectionNumber',
'selectStepTemplateInstant', 'selectStepTemplateInterval',
'setBitsPerValue', 'setCalendarId', 'shapeOfTheEarth',
'shortName', 'shortNameECMF', 'significanceOfReferenceTime',
'skewness', 'sourceOfGridDefinition', 'standardDeviation',
'startStep', 'stepRange', 'stepType', 'stepTypeInternal',
'stepUnits', 'stream', 'subCentre',
'subdivisionsOfBasicAngle', 'tablesVersion',
'tablesVersionLatest', 'tempPressureUnits', 'topLevel',
'totalLength', 'type', 'typeOfEnsembleForecast',
'typeOfFirstFixedSurface', 'typeOfGeneratingProcess',
'typeOfLevel', 'typeOfOriginalFieldValues',
'typeOfProcessedData', 'typeOfSecondFixedSurface', 'units',
'unitsECMF', 'unitsOfFirstFixedSurface',
'unitsOfSecondFixedSurface', 'unpackedError',
'uvRelativeToGrid', 'validityDate', 'validityTime',
'values', 'year']
KNOWN_BUFR_KEYS = ['edition', 'masterTableNumber', 'bufrHeaderSubCentre', 'bufrHeaderCentre',
'updateSequenceNumber', 'dataCategory', 'dataSubCategory', 'masterTablesVersionNumber',
'localTablesVersionNumber', 'typicalYearOfCentury', 'typicalMonth', 'typicalDay',
'typicalHour', 'typicalMinute', 'rdbType', 'newSubtype', 'rdbtimeDay', 'rdbtimeHour',
'rdbtimeMinute', 'rdbtimeSecond', 'rectimeDay', 'rectimeHour', 'rectimeMinute', 'rectimeSecond',
'correction1', 'correction1Part', 'correction2', 'correction2Part', 'correction3', 'correction3Part',
'correction4', 'correction4Part', 'qualityControl', 'numberOfSubsets', 'localLatitude', 'localLongitude',
'observedData', 'compressedData', 'unexpandedDescriptors', '#1#blockNumber',
'#1#blockNumber->percentConfidence', '#1#stationNumber', '#1#stationNumber->percentConfidence',
'#1#stationType', '#1#stationType->percentConfidence', '#1#year', '#1#year->percentConfidence',
'#1#month', '#1#month->percentConfidence', '#1#day', '#1#day->percentConfidence', '#1#hour',
'#1#hour->percentConfidence', '#1#minute', '#1#minute->percentConfidence', '#1#latitude',
'#1#latitude->percentConfidence', '#1#longitude', '#1#longitude->percentConfidence',
'#1#heightOfStation', '#1#heightOfStation->percentConfidence', '#1#nonCoordinatePressure',
'#1#nonCoordinatePressure->percentConfidence', '#1#pressureReducedToMeanSeaLevel',
'#1#pressureReducedToMeanSeaLevel->percentConfidence', '#1#3HourPressureChange',
'#1#3HourPressureChange->percentConfidence', '#1#characteristicOfPressureTendency',
'#1#characteristicOfPressureTendency->percentConfidence', '#1#windDirectionAt10M',
'#1#windDirectionAt10M->percentConfidence', '#1#windSpeedAt10M', '#1#windSpeedAt10M->percentConfidence',
'#1#airTemperatureAt2M', '#1#airTemperatureAt2M->percentConfidence', '#1#dewpointTemperatureAt2M',
'#1#dewpointTemperatureAt2M->percentConfidence', '#1#relativeHumidity',
'#1#relativeHumidity->percentConfidence', '#1#horizontalVisibility',
'#1#horizontalVisibility->percentConfidence', '#1#presentWeather',
'#1#presentWeather->percentConfidence', '#1#pastWeather1', '#1#pastWeather1->percentConfidence',
'#1#pastWeather2', '#1#pastWeather2->percentConfidence', '#1#cloudCoverTotal',
'#1#cloudCoverTotal->percentConfidence', '#1#verticalSignificanceSurfaceObservations',
'#1#verticalSignificanceSurfaceObservations->percentConfidence', '#1#cloudAmount',
'#1#cloudAmount->percentConfidence', '#1#heightOfBaseOfCloud',
'#1#heightOfBaseOfCloud->percentConfidence', '#1#cloudType', '#1#cloudType->percentConfidence',
'#2#cloudType', '#2#cloudType->percentConfidence', '#3#cloudType', '#3#cloudType->percentConfidence',
'#2#verticalSignificanceSurfaceObservations', '#2#verticalSignificanceSurfaceObservations->percentConfidence',
'#2#cloudAmount', '#2#cloudAmount->percentConfidence', '#4#cloudType',
'#4#cloudType->percentConfidence', '#2#heightOfBaseOfCloud', '#2#heightOfBaseOfCloud->percentConfidence',
'#3#verticalSignificanceSurfaceObservations', '#3#verticalSignificanceSurfaceObservations->percentConfidence',
'#3#cloudAmount', '#3#cloudAmount->percentConfidence', '#5#cloudType', '#5#cloudType->percentConfidence',
'#3#heightOfBaseOfCloud', '#3#heightOfBaseOfCloud->percentConfidence', '#4#verticalSignificanceSurfaceObservations',
'#4#verticalSignificanceSurfaceObservations->percentConfidence', '#4#cloudAmount', '#4#cloudAmount->percentConfidence',
'#6#cloudType', '#6#cloudType->percentConfidence', '#4#heightOfBaseOfCloud', '#4#heightOfBaseOfCloud->percentConfidence',
'#5#verticalSignificanceSurfaceObservations', '#5#verticalSignificanceSurfaceObservations->percentConfidence', '#5#cloudAmount',
'#5#cloudAmount->percentConfidence', '#7#cloudType', '#7#cloudType->percentConfidence', '#5#heightOfBaseOfCloud',
'#5#heightOfBaseOfCloud->percentConfidence', '#1#totalPrecipitationPast6Hours',
'#1#totalPrecipitationPast6Hours->percentConfidence', '#1#totalSnowDepth', '#1#totalSnowDepth->percentConfidence',
'#1#centre', '#1#generatingApplication']
class TestGribFile(unittest.TestCase):
"""Test GribFile functionality."""
def test_memory_management(self):
"""Messages in GribFile can be opened and closed properly."""
with GribFile(TESTGRIB) as grib_file:
self.assertEqual(len(grib_file), 5)
for i in range(len(grib_file)):
msg = GribMessage(grib_file)
self.assertEqual(msg["shortName"], "msl")
self.assertEqual(msg['count'], i+1)
self.assertEqual(len(grib_file.open_messages), 5)
self.assertEqual(len(grib_file.open_messages), 0)
def test_message_counting_works(self):
"""The GribFile is aware of its messages."""
with GribFile(TESTGRIB) as grib_file:
msg_count = len(grib_file)
self.assertEqual(msg_count, 5)
def test_iterator_protocol(self):
"""The GribFile allows pythonic iteration over all messages."""
step_ranges = []
with GribFile(TESTGRIB) as grib_file:
for msg in grib_file:
step_ranges.append(msg["stepRange"])
self.assertSequenceEqual(step_ranges, ["0", "6", "12", "18", "24"])
def test_read_past_last_message(self):
"""Trying to open message on exhausted GRIB file raises IOError."""
with GribFile(TESTGRIB) as grib_file:
for _ in range(len(grib_file)):
GribMessage(grib_file)
self.assertRaises(IOError, lambda: GribMessage(grib_file))
def test_read_invalid_file(self):
"""Trying to open message on nonexistent GRIB file raises IOError."""
with NamedTemporaryFile(mode='r') as f:
with GribFile(f.name) as grib_file:
self.assertRaises(IOError, lambda: GribMessage(grib_file))
class TestGribMessage(unittest.TestCase):
"""Test GribMessage functionality."""
def test_metadata(self):
"""Metadata is read correctly from GribMessage."""
with GribFile(TESTGRIB) as grib_file:
msg = GribMessage(grib_file)
msg_keys = msg.keys()
for key in KNOWN_GRIB_KEYS:
assert key in msg_keys, "key '%s' not found" % key
# Size of message in bytes
self.assertEqual(msg.size(), 160219)
self.assertEqual(len(msg.keys()), len(msg))
def test_missing_message_behaviour(self):
"""Key with MISSING value."""
with GribFile(TESTGRIB) as grib_file:
msg = GribMessage(grib_file)
self.assertTrue(msg.missing("scaleFactorOfSecondFixedSurface"))
msg["scaleFactorOfSecondFixedSurface"] = 5
msg.set_missing("scaleFactorOfSecondFixedSurface")
#with self.assertRaises(KeyError):
# msg["scaleFactorOfSecondFixedSurface"]
def test_value_setting(self):
"""Keys can be set properly."""
with GribFile(TESTGRIB) as grib_file:
msg = GribMessage(grib_file)
msg["scaleFactorOfSecondFixedSurface"] = 5
msg["values"] = [1, 2, 3]
self.assertEqual( msg['scaleFactorOfSecondFixedSurface'], 5 )
def test_multi_value_setting(self):
"""Multiple keys/values can be set properly."""
msg = GribMessage(sample='GRIB1')
msg[ 'paramId', 'stepType', 'edition' ] = 49, 'avg', 2
self.assertEqual( msg['shortName'], '10fg' )
# Another test
with GribFile(TESTGRIB) as grib_file:
msg = GribMessage(grib_file)
msg[ 'setLocalDefinition', 'localDefinitionNumber' ] = 1,25
msg[ 'typeOfFirstFixedSurface', 'typeOfSecondFixedSurface' ] = 1, 8
msg[ ('typeOfFirstFixedSurface','typeOfSecondFixedSurface') ] = (1, 8) #Also works
self.assertEqual( msg['localDefinitionNumber'], 25 )
self.assertEqual( msg['typeOfLevel'], 'entireAtmosphere' )
def test_serialize(self):
"""Message can be serialized to file."""
with GribFile(TESTGRIB) as grib_file:
msg = GribMessage(grib_file)
with open(TEST_OUTPUT, "w") as test:
msg.write(test)
os.unlink(TEST_OUTPUT)
def test_clone(self):
"""Messages can be used to produce clone Messages."""
with GribFile(TESTGRIB) as grib_file:
msg = GribMessage(grib_file)
msg2 = GribMessage(clone=msg)
self.assertSequenceEqual(msg.keys(), msg2.keys())
class TestGribIndex(unittest.TestCase):
"""Test GribIndex functionality."""
def test_memory_management(self):
"""GribIndex closes GribMessages properly."""
with GribIndex(TESTGRIB, TEST_KEYS) as idx:
idx.select(SELECTION_DICTIONARY)
self.assertEqual(len(idx.open_messages), 1)
self.assertEqual(len(idx.open_messages), 0)
def test_create_and_serialize_index(self):
"""GribIndex can be saved to file, file can be added to index."""
with GribIndex(TESTGRIB, TEST_KEYS) as idx:
idx.write(TEST_INDEX)
with GribIndex(file_index=TEST_INDEX) as idx:
idx.add(TESTGRIB)
os.unlink(TEST_INDEX)
def test_index_comprehension(self):
"""GribIndex understands underlying GRIB index properly."""
with GribIndex(TESTGRIB, TEST_KEYS) as idx:
self.assertEqual(idx.size(TEST_KEYS[1]), 5)
self.assertSequenceEqual(idx.values(TEST_KEYS[1]), TEST_STEPRANGE)
with self.assertRaises(IndexNotSelectedError):
# Note: The following will issue a message to stderr:
# ECCODES ERROR : please select a value for index key "dataDate"
# This is expected behaviour
idx.select({TEST_KEYS[1]: TEST_VALUES[0]})
# Now it will be OK as we have selected all necessary keys
idx.select(SELECTION_DICTIONARY)
self.assertEqual(len(idx.open_messages), 1)
class TestBufrFile(unittest.TestCase):
"""Test BufrFile functionality."""
def test_memory_management(self):
"""Messages in BufrFile can be opened and closed properly."""
with BufrFile(TESTBUFR) as bufr_file:
self.assertEqual(len(bufr_file), 3)
for i in range(len(bufr_file)):
msg = BufrMessage(bufr_file)
self.assertEqual(msg["bufrHeaderCentre"], 98)
self.assertEqual(msg['count'], i+1)
self.assertEqual(len(bufr_file.open_messages), 3)
self.assertEquals(len(bufr_file.open_messages), 0)
def test_message_counting_works(self):
"""The BufrFile is aware of its messages."""
with BufrFile(TESTBUFR) as bufr_file:
msg_count = len(bufr_file)
self.assertEqual(msg_count, 3)
def test_iterator_protocol(self):
"""The BufrFile allows pythonic iteration over all messages."""
latitudes = []
with BufrFile(TESTBUFR) as bufr_file:
for msg in bufr_file:
latitudes.append(msg["localLatitude"])
self.assertSequenceEqual(latitudes, [70.93, 77, 78.92])
def test_read_past_last_message(self):
"""Trying to open message on exhausted BUFR file raises IOError."""
with BufrFile(TESTBUFR) as bufr_file:
for _ in range(len(bufr_file)):
BufrMessage(bufr_file)
self.assertRaises(IOError, lambda: BufrMessage(bufr_file))
def test_read_invalid_file(self):
"""Trying to open message on nonexistent file raises IOError."""
with NamedTemporaryFile(mode='r') as f:
with BufrFile(f.name) as bufr_file:
self.assertRaises(IOError, lambda: BufrMessage(bufr_file))
class TestBufrMessage(unittest.TestCase):
"""Test BufrMessage functionality"""
def test_metadata(self):
"""Metadata is read correctly from BufrMessage."""
with BufrFile(TESTBUFR) as bufr_file:
msg = BufrMessage(bufr_file)
msg.unpack()
msg_keys = msg.keys()
self.assertEqual(len(msg_keys), 140)
for key in KNOWN_BUFR_KEYS:
assert key in msg_keys
# Size of message in bytes
self.assertEqual(msg.size(), 220)
self.assertEqual(len(msg.keys()), len(msg))
def test_content(self):
"""Data values are read correctly from BufrMessage."""
with BufrFile(TESTBUFR) as bufr_file:
msg = BufrMessage(bufr_file)
msg.unpack()
self.assertEqual(msg["airTemperatureAt2M"], 274.5)
# TODO: Test behaviour with missing messages (SUP-1874)
def test_value_setting(self):
"""Keys can be set properly."""
with BufrFile(TESTBUFR) as bufr_file:
msg = BufrMessage(bufr_file)
key, val = "localLongitude", 5
msg[key] = val
self.assertEqual(msg[key], val)
def test_serialize(self):
"""Message can be serialized to file."""
with BufrFile(TESTBUFR) as bufr_file:
msg = BufrMessage(bufr_file)
with open(TEST_OUTPUT, "w") as test:
msg.write(test)
os.unlink(TEST_OUTPUT)
def test_clone(self):
"""Messages can be used to produce clone Messages."""
with BufrFile(TESTBUFR) as bufr_file:
msg = BufrMessage(bufr_file)
msg2 = BufrMessage(clone=msg)
self.assertSequenceEqual(msg.keys(), msg2.keys())
def test_copy_data(self):
"""Can copy data section from one message to another"""
bufr = BufrMessage(sample='BUFR3')
with BufrFile('../../data/bufr/metar_with_2_bias.bufr') as bufr_file:
bufrin = BufrMessage(bufr_file)
ivalues=(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0)
bufr['inputDataPresentIndicator'] = ivalues
bufr['edition'] = 3
bufr['masterTableNumber'] = 0
bufr['bufrHeaderSubCentre'] = 0
bufr['bufrHeaderCentre'] = 98
bufr['updateSequenceNumber'] = 1
bufr['dataCategory'] = 0
bufr['dataSubCategory'] = 140
bufr['masterTablesVersionNumber'] = 13
bufr['localTablesVersionNumber'] = 1
bufr['typicalYearOfCentury'] = 15
bufr['typicalMonth'] = 5
bufr['typicalDay'] = 4
bufr['typicalHour'] = 9
bufr['typicalMinute'] = 30
bufr['numberOfSubsets'] = 1
bufr['observedData'] = 1
bufr['compressedData'] = 0
ivalues=(
307011,7006,10004,222000,101023,31031,1031,1032,101023,33007,
225000,236000,101023,31031,1031,1032,8024,101001,225255,225000,
236000,101023,31031,1031,1032,8024,101001,225255,
1063,2001,4001,4002,4003,4004,4005,5002,
6002,7001,7006,11001,11016,11017,11002)
bufr['unexpandedDescriptors'] = ivalues
bufrin.unpack()
bufrin.copy_data(bufr)
with open(TEST_OUTPUT, 'w') as test:
bufr.write(test)
os.unlink(TEST_OUTPUT)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import utils
utils.check_version((3,7))
utils.clear()
print('Hello, my name is Robbie Frank')
print('')
print('My favorite game is probably Arma3.')
print('')
print("I don't really have any concerns for this class.")
print('')
print("I'm mostly excited to meet some fellow game designers and finding people to be able to collaborate with.")
print('')
print("My stackoverflow.com user number is 598075")
print('')
print("The URL to my github profile is https://github.com/Annuvian") |
from . import create_app
# run the create_app function defined in init
# which returns an app
app = create_app()
if(__name__ == '__main__'):
app.run(debug=True) |
"""
Attempt to replicate features of the Functional Model within a sub-classed model.
# TODO:
-Create a model that has dictionary inputs and outputs.
-Ability to run fit method with said dictionary outputs.
-Ability to control which variables are updated with the fit method.
"""
import tensorflow as tf
import tensorflow.keras.layers as KL
import json
from .layers.non_local import Non_local_nn
from .layers.approx_round import *
from .layers.inception import Inception
from .layers.reverse_inception import ReverseInception
import collections.abc
import os
from .common import *
def buildNetwork(configFile, actionSize, netConfigOverride={},debug=True, training=True, scope=None):
"""
Reads a network config file and processes that into a netowrk with appropriate naming structure.
This class only works on feed forward neural networks. Can only handle one input.
Parameters
----------
configFile : str
Config file which points to the network description to be loaded.
actionSize : int
Output sizes of different network components. Assumes the environment is discrete action space.
netConfigOverride : dict
Dictionary of values which will override the default contents loaded from the config file.
scope : str [opt]
Defines a tensorflow scope for the network.
Returns
-------
N/A
"""
#Checking if JSON file is fully defined path or just a file name without path.
#If just a name, then it will search in default directory.
if "/" in configFile:
if ".json" in configFile:
pass
else:
configFile = configFile + ".json"
else:
for (dirpath, dirnames, filenames) in os.walk("configs/network"):
for filename in filenames:
if configFile in filename:
configFile = os.path.join(dirpath,filename)
break
# raise
with open(configFile) as json_file:
data = json.load(json_file)
data = UpdateNestedDictionary(data,netConfigOverride)
# if data["NetworkBuilder"] != "network_v2":
# return
layers = {}
layerOutputs = {}
#Creating Recursion sweep to go through dictionaries and lists in the networkConfig to insert user defined values.
if "DefaultParams" in data.keys():
data["NetworkStructure"] = UpdateStringValues(data["NetworkStructure"],data["DefaultParams"])
#Creating Inputs for the network
Inputs = {}
for inputName,inputShape in data["Inputs"].items():
Inputs[inputName] = KL.Input(shape=inputShape,name=inputName)
#Creating layers of the network
for sectionName,layerList in data["NetworkStructure"].items():
for layerDict in layerList:
if "units" in layerDict["Parameters"]:
if layerDict["Parameters"]["units"] == "actionSize":
layerDict["Parameters"]["units"] = actionSize
if "ReuseLayer" in layerDict:
layer = layers["ReuseLayer"]
else:
layer = GetLayer(layerDict)
layers[layerDict["layerName"]] = layer
if debug: print("Creating Layer:",layerDict["layerName"])
if isinstance(layerDict["layerInput"], list): #Multi-input Layers
layerIn = []
for layerInput in layerDict["layerInput"]:
if "input" in layerInput:
_, input_name = layerInput.rsplit('.',1)
layerIn.append(Inputs[input_name])
else:
layerIn.append(layerOutputs[layerInput])
if debug: print("\tLayer Inputs",layerIn)
if layerType[layerName] == "Concatenate" or layerType[layerName] == "Multiply" or layerType[layerName] == "Add":
output = layer(layerIn)
if layerType[layerName] == "GaussianNoise":
output = layer(layerIn,training=training)
else:
output = layer(*layerIn)
else: # Single input layers
if "input" in layerDict["layerInput"]:
_, input_name = layerDict["layerInput"].rsplit('.',1)
if debug: print("\tLayer Input",Inputs[input_name])
output = layer(Inputs[input_name])
else:
if debug: print("\tLayer Input",layerOutputs[layerDict["layerInput"]])
output = layer(layerOutputs[layerDict["layerInput"]])
#If a layer has multiple outputs assign the outputs unique names. Otherwise just have output be the layername.
if "multiOutput" in layerDict:
for i,output_i in enumerate(output):
layerOutputs[multiOutput[layerDict["layerName"]][i]]=output_i
else:
layerOutputs[layerDict["layerName"]] = output
if debug: print("\tLayer Output",layerOutputs[layerDict["layerName"]])
#Creating the outputs for the model.
outputs=[]
for output,layerName in data["NetworkOutputs"].items():
outputs.append(layerOutputs[layerName])
if debug: print("Network Outputs:",outputs)
network = tf.keras.models.Model(Inputs,outputs)
return network
if __name__ == "__main__":
pass
|
"""
Classes for extending GraphMCF functionality for membranes
# Author: Antonio Martinez-Sanchez (Max Planck Institute for Biochemistry)
# Date: 03.11.15
"""
__author__ = 'martinez'
from pyseg.globals import *
from pyseg.graph import GraphGT
from pyseg import disperse_io
from pyseg import pexceptions
import math
import graph_tool.all as gt
from .variables import *
from pyseg.graph import GraphMCF
from pyseg.factory import SubGraphVisitor
from pyseg.filament import FilamentLDG, FilamentUDG, NetFilamentsSyn
XML_MBT_MIN = 'in'
XML_MBT_MOUT = 'out'
XML_MBT_MPERI = 'per_in'
XML_MBT_MPERO = 'per_out'
##################################################################################################
# Generic extension for dealing with GraphMCF of membranes
#
#
class MbGraphMCF(GraphMCF):
#### Constructor Area
# During building
# skel: DisPerSe skeleton
# manifolds: DisPerSe manifolds
# density: image density map
# mb_seg: tomogram with membrane segmentation (1-mb, 2-inside and 3-outside)
def __init__(self, skel, manifolds, density, mb_seg):
super(MbGraphMCF, self).__init__(skel, manifolds, density)
self.__mb_seg = mb_seg
self.__mb_fils = np.zeros(shape=self.get_nid(), dtype=object)
self.add_scalar_field_nn(self.__mb_seg, MB_SEG)
self.__mb_dst = disperse_io.seg_dist_trans(self.__mb_seg == MB_LBL) * self.get_resolution()
#### Set/Get functionality
#### External functionality
def get_mb_vertices_list(self):
verts_mb = list()
prop_seg_id = self.get_prop_id(MB_SEG)
for v in self.get_vertices_list():
if self.get_prop_entry_fast(prop_seg_id, v.get_id(), 1, np.int)[0] == MB_LBL:
verts_mb.append(v)
return verts_mb
# Euclidean (through euclidean space) shortest distance to membrane
# Returns: result stored as property MB_EU_DST
def compute_mb_eu_dst(self):
self.add_scalar_field(self.__mb_dst, MB_EU_DST)
# Geodesic (through graph and euclidean) distance to shortest contact point,
# it also computes differential geometry property for filaments
# update: if True (default) then membrane segmentation is added as scalar field and if edges length
# and edge filamentness are computed
# Returns: result stored as property MB_GEO_DST, MB_GEO_LEN and MB_GEO_SIM
def compute_mb_geo(self, update=True):
# Initialization
if update or (self.get_prop_id(MB_SEG) is None):
self.add_scalar_field_nn(self.__mb_seg, MB_SEG)
if update or (self.get_prop_id(SGT_EDGE_LENGTH) is None):
self.compute_edges_length(SGT_EDGE_LENGTH, 1, 1, 1, False)
if update or (self.get_prop_id(STR_EDGE_FNESS) is None):
self.compute_edge_filamentness()
key_dst_id = self.get_prop_id(MB_GEO_DST)
if key_dst_id is None:
key_dst_id = self.add_prop(MB_GEO_DST, 'float', 1)
key_len_id = self.get_prop_id(MB_GEO_LEN)
if key_len_id is None:
key_len_id = self.add_prop(MB_GEO_LEN, 'float', 1)
key_sin_id = self.get_prop_id(MB_GEO_SIN)
if key_sin_id is None:
key_sin_id = self.add_prop(MB_GEO_SIN, 'float', 1)
key_kt_id = self.get_prop_id(MB_GEO_KT)
if key_kt_id is None:
key_kt_id = self.add_prop(MB_GEO_KT, 'float', 1)
key_tt_id = self.get_prop_id(MB_GEO_TT)
if key_tt_id is None:
key_tt_id = self.add_prop(MB_GEO_TT, 'float', 1)
key_ns_id = self.get_prop_id(MB_GEO_NS)
if key_ns_id is None:
key_ns_id = self.add_prop(MB_GEO_NS, 'float', 1)
key_bs_id = self.get_prop_id(MB_GEO_BS)
if key_bs_id is None:
key_bs_id = self.add_prop(MB_GEO_BS, 'float', 1)
key_al_id = self.get_prop_id(MB_GEO_APL)
if key_al_id is None:
key_al_id = self.add_prop(MB_GEO_APL, 'float', 1)
key_cont_id = self.get_prop_id(MB_CONT_COORD)
if key_cont_id is None:
key_cont_id = self.add_prop(MB_CONT_COORD, 'float', 3)
# Getting the graph GraphGT
graph_gt = GraphGT(self).get_gt()
# prop_efn = graph_gt.edge_properties[STR_FIELD_VALUE]
prop_elen = graph_gt.edge_properties[SGT_EDGE_LENGTH]
prop_seg = graph_gt.vertex_properties[MB_SEG]
prop_eid = graph_gt.edge_properties[DPSTR_CELL]
prop_vid = graph_gt.vertex_properties[DPSTR_CELL]
# Creating LUTs
lut_in = list()
lut_out = list()
for v in graph_gt.vertices():
seg_lbl = prop_seg[v]
if seg_lbl == MB_IN_LBL:
lut_in.append(int(v))
elif seg_lbl == MB_OUT_LBL:
lut_out.append(int(v))
elif seg_lbl == MB_LBL:
lut_in.append(int(v))
lut_out.append(int(v))
lut_in = np.asarray(lut_in, dtype=np.int)
lut_out = np.asarray(lut_out, dtype=np.int)
# Loop for all vertices
for v in graph_gt.vertices():
# Look for vertices out the membrane
seg_lbl = prop_seg[v]
if seg_lbl != MB_LBL:
# Getting lut for vertices on the other side of the membrane
if seg_lbl == MB_IN_LBL:
lut_oth = lut_out
elif seg_lbl == MB_OUT_LBL:
lut_oth = lut_in
else:
continue
# Finding the shortest geodesic path to other vertices which crosses the membrane
# dists = gt.shortest_distance(graph_gt, source=v, weights=prop_efn).get_array()
dists = gt.shortest_distance(graph_gt, source=v, weights=prop_elen).get_array()
dists_pot = dists[lut_oth]
if len(dists_pot) > 0:
t = graph_gt.vertex(lut_oth[np.argmin(dists_pot)])
# Finding geodesic shortest path to shortest vertex
# v_path, e_path = gt.shortest_path(graph_gt, v, t, weights=prop_efn)
v_path, e_path = gt.shortest_path(graph_gt, v, t, weights=prop_elen)
if len(e_path) > 0:
contact = None
# Measuring the path (on euclidean space) until membrane contact point
fil_v_ids = list()
for h_v in v_path[0:-1]:
fil_v_ids.append(prop_vid[h_v])
fil_p_ids = list()
# Initialize the filament starting point
fil_p_ids.append(prop_vid[v])
path_len = .0
for e in e_path[0:-1]:
path_len += prop_elen[e]
# Add in a ordered ways path points to the filament
p_ids = self.get_edge_ids(self.get_edge(prop_eid[e]))
if p_ids[-1] == fil_p_ids[-1]:
p_ids = p_ids[::-1]
fil_p_ids += p_ids[1::]
edge = self.get_edge(prop_eid[e_path[-1]])
e_ids = self.get_edge_ids(edge)
e_coords = np.asarray(self.get_edge_arcs_coords(edge), dtype=np.float)
e_coords_int = np.asarray(e_coords.round(), dtype=np.int)
try:
xi, yi, zi = e_coords_int[0, :]
mb_i = self.__mb_seg[xi, yi, zi]
# Reverse for ending with vertex through membrane
if mb_i != seg_lbl:
e_coords = e_coords[::-1]
e_coords_int = e_coords_int[::-1]
e_ids = e_ids[::-1]
contact = e_coords[0, :]
fil_p_ids.append(e_ids[0])
# Loop for length increasing
xh, yh, zh = e_coords[0, :]
for i in range(1, e_coords.shape[0]):
x, y, z = e_coords_int[i, :]
if self.__mb_seg[x, y, z] == seg_lbl:
x, y, z = e_coords[i, :]
hold_len = np.asarray((x-xh, y-yh, z-zh), dtype=np.float)
path_len += (math.sqrt((hold_len*hold_len).sum()) * self.get_resolution())
xh, yh, zh = x, y, z
contact = e_coords[i, :]
fil_p_ids.append(e_ids[i])
else:
break
except IndexError:
pass
# Compute metrics only if a contact is found
if contact is not None:
# Add filament
v_id = prop_vid[v]
if len(fil_p_ids) > 2:
self.add_mb_fil(v_id, fil_v_ids, fil_p_ids)
fil = self.__mb_fils[v_id]
self.set_prop_entry_fast(key_dst_id, (fil.get_dst(),), v_id, 1)
self.set_prop_entry_fast(key_len_id, (fil.get_length(),), v_id, 1)
self.set_prop_entry_fast(key_sin_id, (fil.get_sinuosity(),), v_id, 1)
self.set_prop_entry_fast(key_kt_id, (fil.get_total_k(),), v_id, 1)
self.set_prop_entry_fast(key_tt_id, (fil.get_total_t(),), v_id, 1)
self.set_prop_entry_fast(key_ns_id, (fil.get_total_ns(),), v_id, 1)
self.set_prop_entry_fast(key_bs_id, (fil.get_total_bs(),), v_id, 1)
self.set_prop_entry_fast(key_al_id, (fil.get_apex_length(),), v_id, 1)
self.set_prop_entry_fast(key_cont_id, (contact[0], contact[1], contact[2]),
v_id, 3)
else:
print('WARNING: filament with less than 3 points!')
def add_mb_fil(self, v_id, v_ids, p_ids):
self.__mb_fils[v_id] = FilamentLDG(v_ids, p_ids, self)
def get_mb_fil(self, v_id):
return self.__mb_fils[v_id]
# Cloud of coordinates for all vertices in filaments of a membrane slice
# slices: Slice object with the membrane slice information
# Return: an array with points coordinates
def get_cloud_mb_slice_fils(self, slice):
# Initialization
seg_id = self.get_prop_id(MB_SEG)
seg_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=seg_id))
eu_id = self.get_prop_id(MB_EU_DST)
eu_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=eu_id))
geo_id = self.get_prop_id(MB_GEO_DST)
geo_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=geo_id))
gl_id = self.get_prop_id(MB_GEO_LEN)
gl_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gl_id))
sin_id = self.get_prop_id(MB_GEO_SIN)
sin_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=sin_id))
# Compute GraphGT
if self._GraphMCF__graph_gt is None:
self.compute_graph_gt()
graph_gt = self._GraphMCF__graph_gt
lut_gt = (-1) * np.ones(shape=self.get_nid(), dtype=object)
prop_vid = graph_gt.vertex_properties[DPSTR_CELL]
prop_vs = graph_gt.vertex_properties[MB_SEG]
prop_elen = graph_gt.edge_properties[SGT_EDGE_LENGTH]
# Find tail vertices within slice
lut_ver = (-1) * np.ones(shape=self.get_nid(), dtype=np.int)
cont = 0
v_ids = list()
for v in graph_gt.vertices():
v_id = prop_vid[v]
seg = self.get_prop_entry_fast(seg_id, v_id, 1, seg_dt)[0]
eu = self.get_prop_entry_fast(eu_id, v_id, 1, eu_dt)[0]
geo = self.get_prop_entry_fast(geo_id, v_id, 1, geo_dt)[0]
gl = self.get_prop_entry_fast(gl_id, v_id, 1, gl_dt)[0]
sin = self.get_prop_entry_fast(sin_id, v_id, 1, sin_dt)[0]
if slice.test(side=seg, eu_dst=eu, geo_dst=geo, geo_len=gl, sin=sin):
if lut_ver[v_id] == -1:
lut_ver[v_id] = cont
v_ids.append(v_id)
lut_gt[v_id] = int(v)
cont += 1
# Find shortest paths (filaments)
mat_geo = gt.shortest_distance(graph_gt, weights=prop_elen)
h_v_ids = list()
for v_id in v_ids:
# print 'v_id=' + str(v_id)
s = graph_gt.vertex(lut_gt[v_id])
s_lbl = prop_vs[s]
ts_gt = np.argsort(mat_geo[s])
for t_gt in ts_gt:
t = graph_gt.vertex(t_gt)
if prop_vs[t] != s_lbl:
v_path, _ = gt.shortest_path(graph_gt, s, t, weights=prop_elen)
for v_p in v_path[:-1]:
if prop_vs[v_p] != s_lbl:
break
v_p_id = prop_vid[v_p]
if lut_ver[v_p_id] == -1:
lut_ver[v_p_id] = cont
h_v_ids.append(v_p_id)
cont += 1
break
v_ids += h_v_ids
# Find edges within slice
e_ids = list()
for edge in self.get_edges_list():
s_id = lut_ver[edge.get_source_id()]
t_id = lut_ver[edge.get_target_id()]
if (s_id > -1) and (t_id > -1):
e_ids.append([s_id, t_id])
# graph_tool building
graph = gt.Graph(directed=False)
vertices_gt = np.empty(shape=len(v_ids), dtype=object)
for i in range(len(v_ids)):
vertices_gt[i] = graph.add_vertex()
for e_id in e_ids:
graph.add_edge(vertices_gt[e_id[0]], vertices_gt[e_id[1]])
# Subgraphs visitor initialization
sgraph_id = graph.new_vertex_property("int")
visitor = SubGraphVisitor(sgraph_id)
# Find subgraphs
coords = np.zeros(shape=(vertices_gt.shape[0], 3), dtype=np.float)
rids = np.zeros(shape=vertices_gt.shape[0], dtype=np.int)
for i, v in enumerate(vertices_gt):
if sgraph_id[v] == 0:
gt.dfs_search(graph, v, visitor)
visitor.update_sgraphs_id()
v_id = v_ids[i]
rids[i] = v_id
coords[i, :] = self._GraphMCF__skel.GetPoint(v_id)
# Clusters filtering
ids = sgraph_id.get_array()
max_id = ids.max() + 1
lut_counts = np.zeros(shape=max_id, dtype=np.int)
for idy in ids:
lut_counts[idy] += 1
hold_coords = list()
hold_ids = list()
for idy in range(1, max_id):
counts = lut_counts[idy]
if counts > 0:
if slice.test(cnv=counts):
c_ids = np.where(ids == idy)[0]
if len(c_ids) > 0:
for c_id in c_ids:
hold_coords.append(coords[c_id, :])
hold_ids.append(rids[c_id])
# Computing mask
mask_out = (self.__mb_seg == slice.get_side())
mask_out *= ((self.__mb_dst >= slice.get_eu_dst_low()) * (self.__mb_dst <= slice.get_eu_dst_high()))
return np.asarray(hold_coords, dtype=np.float), np.asarray(hold_ids, dtype=np.int), mask_out
# Cloud of points in a slice
# slices: Slice object with the membrane slice information
# cont_mode: if True (default false) contact points coordinates, instead of vertex ones, are returned
# graph: if not None (default) it contains the already compute GraphGT, it can save a lot of time
# if this function is going to be called several times
# cont_prop: if cont_mode active, then it may get an array from GraphMCF properties associated to the contact
# points. (default STR_FIELD_VALUE_INV)
# conf_fus: if cont_mode active, mode for contact point fusion criterium, valid: 'max'
# Return: an array with points coordinates, vertices ids and mask. Raises ValueError if points found
def get_cloud_mb_slice(self, slice, cont_mode=False, graph_gt=None, cont_prop=STR_FIELD_VALUE_INV,
cont_fus='max'):
# Initialisation
seg_id = self.get_prop_id(MB_SEG)
seg_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=seg_id))
eu_id = self.get_prop_id(MB_EU_DST)
eu_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=eu_id))
geo_id = self.get_prop_id(MB_GEO_DST)
geo_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=geo_id))
gl_id = self.get_prop_id(MB_GEO_LEN)
gl_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gl_id))
sin_id = self.get_prop_id(MB_GEO_SIN)
sin_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=sin_id))
if cont_mode:
cont_id = self.get_prop_id(MB_CONT_COORD)
cont_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_id))
cont_pid = self.get_prop_id(cont_prop)
cont_pdt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_pid))
if cont_fus != 'max':
error_msg = 'Input contact point property fusion criterium ' + cont_fus + ' is not valid.'
raise pexceptions.PySegInputError(expr='get_cloud_mb_slice (MbGraphMCF)', msg=error_msg)
# Find tail vertices within slice
h_v_ids = list()
for v in self.get_vertices_list():
v_id = v.get_id()
seg = self.get_prop_entry_fast(seg_id, v_id, 1, seg_dt)[0]
eu = self.get_prop_entry_fast(eu_id, v_id, 1, eu_dt)[0]
geo = self.get_prop_entry_fast(geo_id, v_id, 1, geo_dt)[0]
gl = self.get_prop_entry_fast(gl_id, v_id, 1, gl_dt)[0]
sin = self.get_prop_entry_fast(sin_id, v_id, 1, sin_dt)[0]
if slice.test(side=seg, eu_dst=eu, geo_dst=geo, geo_len=gl, sin=sin):
h_v_ids.append(v_id)
# Vertices thresholding
th_l = slice.get_list_th()
if len(th_l) > 0:
if graph_gt is None:
graph_gt = GraphGT(self)
for th in slice.get_list_th():
h_v_ids = self.threshold_slice(h_v_ids, th, graph_gt)
# Creating vertices LUT
v_ids = list()
cont = 0
lut_ver = (-1) * np.ones(shape=self.get_nid(), dtype=np.int)
for v_id in h_v_ids:
if lut_ver[v_id] == -1:
lut_ver[v_id] = cont
v_ids.append(v_id)
cont += 1
# Find edges within slice
e_ids = list()
for edge in self.get_edges_list():
s_id = lut_ver[edge.get_source_id()]
t_id = lut_ver[edge.get_target_id()]
if (s_id > -1) and (t_id > -1):
e_ids.append([s_id, t_id])
# graph_tool building
vertices_gt = np.empty(shape=len(v_ids), dtype=object)
graph = gt.Graph(directed=False)
for i in range(len(v_ids)):
vertices_gt[i] = graph.add_vertex()
for e_id in e_ids:
graph.add_edge(vertices_gt[e_id[0]], vertices_gt[e_id[1]])
# Subgraphs visitor initialization
sgraph_id = graph.new_vertex_property("int")
visitor = SubGraphVisitor(sgraph_id)
# Find subgraphs
coords = np.zeros(shape=(vertices_gt.shape[0], 3), dtype=np.float)
rids = np.zeros(shape=vertices_gt.shape[0], dtype=np.int)
if cont_fus == 'max':
cont_p = (-np.inf) * np.ones(shape=vertices_gt.shape[0], dtype=np.int)
for i, v in enumerate(vertices_gt):
if sgraph_id[v] == 0:
gt.dfs_search(graph, v, visitor)
visitor.update_sgraphs_id()
v_id = v_ids[i]
rids[i] = v_id
if cont_mode:
coords[i, :] = self.get_prop_entry_fast(cont_id, v_id, 3, cont_dt)
val = self.get_prop_entry_fast(cont_pid, v_id, 3, cont_pdt)[0]
if cont_fus == 'max':
if val > cont_p[i]:
cont_p[i] = val
else:
coords[i, :] = self._GraphMCF__skel.GetPoint(v_id)
# Clusters filtering
ids = sgraph_id.get_array()
max_id = ids.max()
lut_counts = np.zeros(shape=max_id + 1, dtype=np.int)
for idy in ids:
lut_counts[idy] += 1
hold_coords = list()
hold_ids = list()
for idy in range(1, max_id + 1):
counts = lut_counts[idy]
if slice.test(cnv=counts):
c_ids = np.where(ids == idy)[0]
for c_id in c_ids:
hold_coords.append(coords[c_id, :])
hold_ids.append(rids[c_id])
# Computing mask
mask_out = (self.__mb_seg == slice.get_side())
if cont_mode:
hold_mb = self.__mb_dst / self.get_resolution()
mask_out *= ((hold_mb >= 0) * (hold_mb <= 2))
else:
mask_out *= ((self.__mb_dst >= slice.get_eu_dst_low()) * (self.__mb_dst <= slice.get_eu_dst_high()))
if cont_mode:
return np.asarray(hold_coords, dtype=np.float), np.asarray(hold_ids, dtype=np.int), mask_out, cont_p
else:
return np.asarray(hold_coords, dtype=np.float), np.asarray(hold_ids, dtype=np.int), mask_out
def get_cloud_mb_slice_pick(self, slice, cont_mode=False, graph_gt=None, cont_prop=STR_FIELD_VALUE_INV, cont_fus='max'):
"""
Cloud of points in a slice for picking
:param slice: slice object with the membrane slice information
:param cont_mode: if 0 then vertices localization are directly provided, if 1 then the contact points associated
to the each edge between membrane and exterior, and if 2 then the last vertex points before the membrane is provided
:param graph_gt: if not None (default) it contains the already compute GraphGT, it can save a lot of time
if this function is going to be called several times
:param cont_prop: if cont_mode active, then it may get an array from GraphMCF properties associated to the contact
# points. (default STR_FIELD_VALUE_INV)
:param cont_fus: if cont_mode active, mode for contact point fusion criterium, valid: 'max'
:return: an array with points coordinates, vertices ids and mask. Raises ValueError if points found
"""
# Initialisation
seg_id = self.get_prop_id(MB_SEG)
seg_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=seg_id))
eu_id = self.get_prop_id(MB_EU_DST)
eu_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=eu_id))
geo_id = self.get_prop_id(MB_GEO_DST)
geo_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=geo_id))
gl_id = self.get_prop_id(MB_GEO_LEN)
gl_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gl_id))
sin_id = self.get_prop_id(MB_GEO_SIN)
sin_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=sin_id))
if cont_mode > 0:
cont_id = self.get_prop_id(MB_CONT_COORD)
cont_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_id))
cont_pid = self.get_prop_id(cont_prop)
cont_pdt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_pid))
if cont_fus != 'max':
error_msg = 'Input contact point property fusion criterium ' + cont_fus + ' is not valid.'
raise pexceptions.PySegInputError(expr='get_cloud_mb_slice (MbGraphMCF)', msg=error_msg)
# Find tail vertices within slice
h_v_ids = list()
for v in self.get_vertices_list():
v_id = v.get_id()
seg = self.get_prop_entry_fast(seg_id, v_id, 1, seg_dt)[0]
eu = self.get_prop_entry_fast(eu_id, v_id, 1, eu_dt)[0]
geo = self.get_prop_entry_fast(geo_id, v_id, 1, geo_dt)[0]
gl = self.get_prop_entry_fast(gl_id, v_id, 1, gl_dt)[0]
sin = self.get_prop_entry_fast(sin_id, v_id, 1, sin_dt)[0]
if slice.test(side=seg, eu_dst=eu, geo_dst=geo, geo_len=gl, sin=sin):
h_v_ids.append(v_id)
# Vertices thresholding
th_l = slice.get_list_th()
if len(th_l) > 0:
if graph_gt is None:
graph_gt = GraphGT(self)
for th in slice.get_list_th():
h_v_ids = self.threshold_slice(h_v_ids, th, graph_gt)
# Creating vertices LUT
v_ids = list()
cont = 0
lut_ver = (-1) * np.ones(shape=self.get_nid(), dtype=np.int)
for v_id in h_v_ids:
if lut_ver[v_id] == -1:
lut_ver[v_id] = cont
v_ids.append(v_id)
cont += 1
if cont_mode > 0:
# Find edges within slice
coords, ids, cont_p = list(), list(), list()
for edge in self.get_edges_list():
s_id = edge.get_source_id()
t_id = edge.get_target_id()
if self.get_prop_entry_fast(seg_id, s_id, 1, seg_dt)[0] != self.get_prop_entry_fast(seg_id, t_id, 1, seg_dt)[0]:
if cont_mode == 1:
coords.append(self.get_prop_entry_fast(cont_id, s_id, 3, cont_dt))
else:
coords.append(self._GraphMCF__skel.GetPoint(t_id))
val = self.get_prop_entry_fast(cont_pid, t_id, 3, cont_pdt)[0]
cont_p.append(val)
ids.append(t_id)
else:
# Find vertices within slice
coords, ids, cont_p = list(), list(), list()
for vertex in self.get_vertices_list():
v_id = vertex.get_id()
coords.append(self._GraphMCF__skel.GetPoint(v_id))
ids.append(v_id)
# Computing mask
mask_out = (self.__mb_seg == slice.get_side())
if cont_mode:
hold_mb = self.__mb_dst / self.get_resolution()
mask_out *= ((hold_mb >= 0) * (hold_mb <= 2))
else:
mask_out *= ((self.__mb_dst >= slice.get_eu_dst_low()) * (self.__mb_dst <= slice.get_eu_dst_high()))
if cont_mode:
return np.asarray(coords, dtype=np.float), np.asarray(ids, dtype=np.int), mask_out, \
np.asarray(cont_p, dtype=np.float)
else:
return np.asarray(coords, dtype=np.float), np.asarray(ids, dtype=np.int), mask_out
# Filters a cloud of points from a slice by applying linker restrictions
def filter_linker_cloud(self, c_s, c_id_s, c_t, c_id_t, linker):
# Initialization
graph_gt = GraphGT(self).get_gt()
prop_vid = graph_gt.vertex_properties[DPSTR_CELL]
# prop_ew = graph_gt.edge_properties[STR_FIELD_VALUE]
prop_el = graph_gt.edge_properties[SGT_EDGE_LENGTH]
# Getting sources and targets lists
sources = list()
for v_id in c_id_s:
v_list = gt.find_vertex(graph_gt, prop_vid, v_id)
if len(v_list) > 0:
sources.append(v_list[0])
targets = list()
for v_id in c_id_t:
v_list = gt.find_vertex(graph_gt, prop_vid, v_id)
if len(v_list) > 0:
targets.append(v_list[0])
# Building neighbours array for sources
mat_geo = gt.shortest_distance(graph_gt, weights=prop_el)
neighs = np.zeros(shape=len(sources), dtype=np.int)
for i, s in enumerate(sources):
for j, t in enumerate(targets):
geo_dst = mat_geo[s][t]
x_s, y_s, z_s = self._GraphMCF__skel.GetPoint(prop_vid[s])
x_t, y_t, z_t = self._GraphMCF__skel.GetPoint(prop_vid[t])
hold = np.asarray((x_s-x_t, y_s-y_t, z_s-z_t), dtype=np.float)
eu_dst = math.sqrt((hold*hold).sum()) * self.get_resolution()
if eu_dst <= 0:
sin = 1.
else:
sin = geo_dst / eu_dst
if linker.test(geo_dst=geo_dst, eu_dst=eu_dst, sin=sin):
neighs[i] += 1
# Test number of neighbours in linked slice and cloud filtering
hold_cloud = list()
hold_ids = list()
for i, s in enumerate(sources):
if linker.test(neighs=neighs[i]):
hold_cloud.append(c_s[i])
hold_ids.append(c_id_s[i])
return np.asarray(hold_cloud, dtype=np.float), np.asarray(hold_ids, dtype=np.int)
# Generates a VTK poly data from a cloud of vertices a membrane slice
# v_ids: vertices id which represent a membrane slice
# Return: a VTK poly data object (.vtp)
def slice_to_vtp(self, v_ids):
# Initialization
poly = vtk.vtkPolyData()
poly.SetPoints(self._GraphMCF__skel.GetPoints())
verts = vtk.vtkCellArray()
lines = vtk.vtkCellArray()
struct = vtk.vtkIntArray()
struct.SetNumberOfComponents(1)
struct.SetName(MB_VTP_STR)
eu_dst = vtk.vtkFloatArray()
eu_dst.SetNumberOfComponents(1)
eu_dst.SetName(MB_EU_DST)
geo_dst = vtk.vtkFloatArray()
geo_dst.SetNumberOfComponents(1)
geo_dst.SetName(MB_GEO_DST)
geo_len = vtk.vtkFloatArray()
geo_len.SetNumberOfComponents(1)
geo_len.SetName(MB_GEO_LEN)
sin = vtk.vtkFloatArray()
sin.SetNumberOfComponents(1)
sin.SetName(MB_GEO_SIN)
kt = vtk.vtkFloatArray()
kt.SetNumberOfComponents(1)
kt.SetName(MB_GEO_KT)
tt = vtk.vtkFloatArray()
tt.SetNumberOfComponents(1)
tt.SetName(MB_GEO_TT)
ns = vtk.vtkFloatArray()
ns.SetNumberOfComponents(1)
ns.SetName(MB_GEO_NS)
bs = vtk.vtkFloatArray()
bs.SetNumberOfComponents(1)
bs.SetName(MB_GEO_BS)
apl = vtk.vtkFloatArray()
apl.SetNumberOfComponents(1)
apl.SetName(MB_GEO_APL)
prop_eu = self.get_prop_id(MB_EU_DST)
prop_geo = self.get_prop_id(MB_GEO_DST)
prop_gl = self.get_prop_id(MB_GEO_LEN)
prop_sin = self.get_prop_id(MB_GEO_SIN)
prop_kt = self.get_prop_id(MB_GEO_KT)
prop_tt = self.get_prop_id(MB_GEO_TT)
prop_ns = self.get_prop_id(MB_GEO_NS)
prop_bs = self.get_prop_id(MB_GEO_BS)
prop_apl = self.get_prop_id(MB_GEO_APL)
# VTK Topology
# Vertices
lut_ver = np.zeros(shape=self.get_nid(), dtype=np.bool)
for v_id in v_ids:
verts.InsertNextCell(1)
struct.InsertNextTuple((MB_VTP_STR_E,))
eu_dst.InsertNextTuple(self.get_prop_entry_fast(prop_eu, v_id, 1, np.float))
geo_dst.InsertNextTuple(self.get_prop_entry_fast(prop_geo, v_id, 1, np.float))
geo_len.InsertNextTuple(self.get_prop_entry_fast(prop_gl, v_id, 1, np.float))
sin.InsertNextTuple(self.get_prop_entry_fast(prop_sin, v_id, 1, np.float))
kt.InsertNextTuple(self.get_prop_entry_fast(prop_kt, v_id, 1, np.float))
tt.InsertNextTuple(self.get_prop_entry_fast(prop_tt, v_id, 1, np.float))
ns.InsertNextTuple(self.get_prop_entry_fast(prop_ns, v_id, 1, np.float))
bs.InsertNextTuple(self.get_prop_entry_fast(prop_bs, v_id, 1, np.float))
apl.InsertNextTuple(self.get_prop_entry_fast(prop_apl, v_id, 1, np.float))
verts.InsertCellPoint(v_id)
lut_ver[v_id] = True
# Contact points
for v_id in v_ids:
fil = self.get_mb_fil(v_id)
if isinstance(fil, FilamentLDG):
verts.InsertNextCell(1)
struct.InsertNextTuple((MB_VTP_STR_C,))
eu_dst.InsertNextTuple(self.get_prop_entry_fast(prop_eu, v_id, 1, np.float))
geo_dst.InsertNextTuple(self.get_prop_entry_fast(prop_geo, v_id, 1, np.float))
geo_len.InsertNextTuple(self.get_prop_entry_fast(prop_gl, v_id, 1, np.float))
sin.InsertNextTuple(self.get_prop_entry_fast(prop_sin, v_id, 1, np.float))
kt.InsertNextTuple(self.get_prop_entry_fast(prop_kt, v_id, 1, np.float))
tt.InsertNextTuple(self.get_prop_entry_fast(prop_tt, v_id, 1, np.float))
ns.InsertNextTuple(self.get_prop_entry_fast(prop_ns, v_id, 1, np.float))
bs.InsertNextTuple(self.get_prop_entry_fast(prop_bs, v_id, 1, np.float))
apl.InsertNextTuple(self.get_prop_entry_fast(prop_apl, v_id, 1, np.float))
verts.InsertCellPoint(self.get_mb_fil(v_id).get_point_ids()[-1])
# Filament paths
for v_id in v_ids:
fil = self.get_mb_fil(v_id)
if isinstance(fil, FilamentLDG):
p_ids = fil.get_point_ids()
n_points = len(p_ids)
lines.InsertNextCell(n_points)
struct.InsertNextTuple((MB_VTP_STR_F,))
eu_dst.InsertNextTuple(self.get_prop_entry_fast(prop_eu, v_id, 1, np.float))
geo_dst.InsertNextTuple(self.get_prop_entry_fast(prop_geo, v_id, 1, np.float))
geo_len.InsertNextTuple(self.get_prop_entry_fast(prop_gl, v_id, 1, np.float))
sin.InsertNextTuple(self.get_prop_entry_fast(prop_sin, v_id, 1, np.float))
kt.InsertNextTuple(self.get_prop_entry_fast(prop_kt, v_id, 1, np.float))
tt.InsertNextTuple(self.get_prop_entry_fast(prop_tt, v_id, 1, np.float))
ns.InsertNextTuple(self.get_prop_entry_fast(prop_ns, v_id, 1, np.float))
bs.InsertNextTuple(self.get_prop_entry_fast(prop_bs, v_id, 1, np.float))
apl.InsertNextTuple(self.get_prop_entry_fast(prop_apl, v_id, 1, np.float))
for i in range(n_points):
lines.InsertCellPoint(p_ids[i])
# Edges
for e in self.get_edges_list():
s_id = e.get_source_id()
t_id = e.get_target_id()
if lut_ver[s_id] and lut_ver[t_id]:
lines.InsertNextCell(2)
struct.InsertNextTuple((MB_VTP_STR_E,))
eu_dst.InsertNextTuple((-1,))
geo_dst.InsertNextTuple((-1,))
geo_len.InsertNextTuple((-1,))
sin.InsertNextTuple((-1,))
kt.InsertNextTuple((-1,))
tt.InsertNextTuple((-1,))
ns.InsertNextTuple((-1,))
bs.InsertNextTuple((-1,))
apl.InsertNextTuple((-1,))
lines.InsertCellPoint(s_id)
lines.InsertCellPoint(t_id)
# Building the vtp object
poly.SetVerts(verts)
poly.SetLines(lines)
poly.GetCellData().AddArray(struct)
poly.GetCellData().AddArray(eu_dst)
poly.GetCellData().AddArray(geo_dst)
poly.GetCellData().AddArray(geo_len)
poly.GetCellData().AddArray(sin)
poly.GetCellData().AddArray(kt)
poly.GetCellData().AddArray(tt)
poly.GetCellData().AddArray(ns)
poly.GetCellData().AddArray(bs)
poly.GetCellData().AddArray(apl)
return poly
# Generates a VTK poly data from the internal structure of the membrane
# v_ids: vertices id which represent a membrane slice, if none the membrane mask is used
# av_mode: if True (default False) the properties of arcs will be the properties values of
# respective vertices
# edges: if True (default False) only edge arcs are printed
# Return: a VTK poly data object (.vtp)
def mb_to_vtp(self, v_ids, av_mode=True, edges=False):
if v_ids is None:
return self.get_vtp_in_msk(self.__mb_seg == MB_SEG, av_mode, edges)
else:
return self.get_vtp_ids(v_ids, av_mode, edges)
# Generates a binary segmented tomogram (True-fg) from a list of vertices (or a membrane slice)
# v_ids: membrane slice cloud of vertices id
# th_den: number of sigmas above (+) or below vertex geometry density mean for thresholding,
# if None no threshold is applied
# slc: if True (default False) the list of vertices is considered a membrane slice
def print_slice(self, v_ids, th_den=None, slc=False):
# Initialization
img = np.zeros(shape=self._GraphMCF__density.shape, dtype=np.bool)
if slc:
for v_id in v_ids:
fil = self.get_mb_fil(v_id)
if isinstance(fil, FilamentLDG):
for f_id in fil.get_vertex_ids():
v = self.get_vertex(f_id)
v.get_geometry().print_in_numpy(img, True, th_den)
else:
for v_id in v_ids:
v = self.get_vertex(v_id)
v.get_geometry().print_in_numpy(img, True, th_den)
return img
# Set an edge property values into a fixed value so as to protect them from edge simplification
# (see graph_density_simp)
# prop_key: property to set
# mb_dst: distance to membrane for protecting (in nm)
# val: setting value
# Return: a new property key for using in graph_density_simp for edge simplification
def protect_mb_edges_prop(self, prop_key, mb_dst, val):
# Getting region to protect
tomod = disperse_io.seg_dist_trans(self.__mb_seg == MB_LBL) * self.get_resolution()
bin_mask = tomod < mb_dst
# Get old property and creation of the new
key_id = self.get_prop_id(prop_key)
data_type = self.get_prop_type(key_id=key_id)
dt = disperse_io.TypesConverter().gt_to_numpy(data_type)
ncomp = self.get_prop_ncomp(key_id=key_id)
new_prop_key = prop_key + '_ptc'
new_key_id = self.add_prop(new_prop_key, data_type, ncomp)
# Set values of the new property
for v in self.get_vertices_list():
v_id = v.get_id()
t = list(self.get_prop_entry_fast(key_id, v_id, ncomp, dt))
try:
x, y, z = self.get_vertex_coords(v)
if bin_mask[int(round(x)), int(round(y)), int(round(z))]:
for i in range(len(t)):
t[i] = val
except IndexError:
pass
self.set_prop_entry_fast(new_key_id, tuple(t), v_id, ncomp)
for e in self.get_edges_list():
e_id = e.get_id()
t = list(self.get_prop_entry_fast(key_id, e_id, ncomp, dt))
try:
x, y, z = self.get_edge_coords(e)
if bin_mask[int(round(x)), int(round(y)), int(round(z))]:
for i in range(len(t)):
t[i] = val
except IndexError:
pass
self.set_prop_entry_fast(new_key_id, tuple(t), e_id, ncomp)
return new_prop_key
# Threshold trans-membrane vertices simplification
# prop: vertex property key
# den: desired vertex density (vertex/nm^3)
# mode: if 'high' (default) then vertices with highest property values are preserved,
# otherwise those with the lowest
def trans_mb_simp(self, prop_key, den, mode='high'):
# Building membrane mask
mb_mask = self.__mb_seg == MB_LBL
# Compute valid region volume (nm^3)
res3 = float(self.get_resolution() * self.get_resolution() * self.get_resolution())
vol = float(mb_mask.sum()) * res3
if vol == 0:
error_msg = 'Membrane region has null volume.'
raise pexceptions.PySegInputWarning(expr='thres_trans_mb (GraphMCF)', msg=error_msg)
prop_id = self.get_prop_id(prop_key)
p_type = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=prop_id))
n_comp = self.get_prop_ncomp(key_id=prop_id)
if n_comp > 1:
error_msg = 'Property ' + prop_key + ' has more than one components.'
raise pexceptions.PySegInputError(expr='thres_trans_mb (GraphMCF)', msg=error_msg)
# Compute target number of vertices with membrane
mb_verts = np.asarray(self.get_mb_vertices_list())
n_verts = float(len(mb_verts))
n_tverts = int(round(den * vol))
if n_verts < n_tverts:
error_msg = 'Desired density cannot be achieved because current is ' + str(n_verts/vol)
raise pexceptions.PySegInputWarning(expr='thres_trans_mb (GraphMCF)', msg=error_msg)
# List of ordered vertices
arr_prop = np.zeros(shape=mb_verts.shape[0], dtype=p_type)
for i, v in enumerate(mb_verts):
arr_prop[i] = self.__props_info.get_prop_entry_fast(prop_id, v.get_id(), n_comp, p_type)[0]
ids = np.argsort(arr_prop)
mb_verts = mb_verts[ids]
if mode == 'high':
mb_verts = mb_verts[::-1]
# Removing vertices
for i in range(n_tverts, mb_verts.shape[0]):
self.remove_vertex(mb_verts[i])
# Threshold the vertices of a slice
# v_ids: list with the coordinates
# v_ids: list with vertices ids for the slice
# thres: ThresSlice object with the configuration for thresholding
# Result: a sub-set of the input coords and v_ids
def slice_vertex_threshold(self, coords, v_ids, thres):
# Initialization
prop_key = thres.get_prop_key()
prop_id = self.get_prop_id(prop_key)
p_type = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=prop_id))
n_comp = self.get_prop_ncomp(key_id=prop_id)
if n_comp > 1:
error_msg = 'Property ' + prop_key + ' has more than one components.'
raise pexceptions.PySegInputError(expr='slice_vertex_threshold (GraphMCF)', msg=error_msg)
hold_ids = list()
hold_coords = list()
for (coord, v_id) in zip(coords, v_ids):
val = self.get_prop_entry_fast(prop_id, v_id, n_comp, p_type)[0]
if thres.test(val):
hold_coords.append(coord)
hold_ids.append(v_id)
return np.asarray(hold_coords, dtype=np.float), np.asarray(hold_ids, dtype=np.int)
# Compute angle between a vector (3 components property) and the normal to a membrane (only vertices)
# prop_3d: property key for the vector
# v_ids: list with the vertices ids, if None (default) all vertices are considered
# Result: a property with angles in degrees stored as prop_3d+'_ang', the normals as prop_3d+'_norm'
# Returns: the property keys for normals, and angles
def angle_vector_norms(self, prop_3d, v_ids=None):
# Input parsing and initialization
prop_id = self.get_prop_id(prop_3d)
if prop_id is None:
error_msg = 'Input property ' + prop_3d + ' does not exist.'
raise pexceptions.PySegInputError(expr='angle_vector_norms (MbGraphMCF)', msg=error_msg)
n_comp = self.get_prop_ncomp(key_id=prop_id)
if n_comp != 3:
error_msg = 'Only input properties with 3 component are valid, current ' + str(n_comp) + '.'
raise pexceptions.PySegInputError(expr='angle_vector_norms (MbGraphMCF)', msg=error_msg)
dtype = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=prop_id))
prop_id_n = self.get_prop_id(MB_CONT_COORD)
dtype_n = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=prop_id_n))
# Get vertices list
if v_ids is None:
v_ids = list()
for v in self._GraphMCF__vertices:
if v is not None:
v_ids.append(v.get_id())
norms = np.zeros(shape=(len(v_ids), 3), dtype=np.float32)
angles = np.zeros(shape=(len(v_ids), 3), dtype=np.float32)
# Loop for computing the normals and angles
for i, v_id in enumerate(v_ids):
t = self.get_prop_entry_fast(prop_id_n, v_id, 3, dtype_n)
norm = np.asarray(self._GraphMCF__skel.GetPoint(v_id), dtype=np.float32) - np.asarray(t, dtype=np.float32)
vect = np.asarray(self.get_prop_entry_fast(prop_id, v_id, 3, dtype), dtype=dtype)
angles[i] = math.degrees(angle_2vec_3D(norm, vect))
norms[i, :] = norm
# Inserting properties and values
norm_key, ang_key = prop_3d+'_norm', prop_3d+'_ang'
gtype_f = disperse_io.TypesConverter().numpy_to_gt(np.float32)
norm_id, ang_id = self.add_prop(norm_key, gtype_f, 3), self.add_prop(ang_key, gtype_f, 1)
for (v_id, ang, norm) in zip(v_ids, angles, norms):
self.set_prop_entry_fast(ang_id, ang, v_id, 1)
self.set_prop_entry_fast(norm_id, norm, v_id, 3)
return norm_key, ang_key
# Applies a property threshold to a slice
# v_ids: slice vertices ids for being thresholded
# th: Threshold object for a GraphMCF property
# graph_gt: if not None (default) it contains the already compute GraphGT, it can save a lot of time
# if this function is going to be called several times
# Returns: a list with the thresholded vertices ids
def threshold_slice(self, v_ids, th, graph_gt=None):
# Getting the GraphGT
if graph_gt is None:
graph_gt = GraphGT(self).get_gt()
prop_id = graph_gt.vertex_properties[DPSTR_CELL]
prop_th = graph_gt.vertex_properties[th.get_prop_key()].get_array()
# Get LUT for slice vertices
vertices_gt = np.empty(shape=self.get_nid(), dtype=object)
prop_arr = np.zeros(shape=len(v_ids), dtype=prop_th.dtype)
for v in graph_gt.vertices():
vertices_gt[prop_id[v]] = v
cont = 0
for v_id in v_ids:
if vertices_gt[v_id] is not None:
prop_arr[cont] = prop_th[vertices_gt[v_id]]
cont += 1
# Apply the threshold
if th.get_mode() == XML_MBT_MPERI:
per_th_l = np.percentile(prop_arr, th.get_value_low())
per_th_h = np.percentile(prop_arr, th.get_value_high())
mask = (prop_arr >= per_th_l) & (prop_arr <= per_th_h)
elif th.get_mode() == XML_MBT_MPERO:
per_th_l = np.percentile(prop_arr, th.get_value_low())
per_th_h = np.percentile(prop_arr, th.get_value_high())
mask = (prop_arr < per_th_l) | (prop_arr > per_th_h)
elif th.get_mode() == XML_MBT_MIN:
mask = (prop_arr >= th.get_value_low()) & (prop_arr <= th.get_value_high())
else:
mask = (prop_arr < th.get_value_low()) | (prop_arr > th.get_value_high())
return list(np.asarray(v_ids)[mask])
# Generates a filament network where filaments are specified by a set of sources and targets
# s_ids: list with sources ids
# t_ids: list with targets ids
# rg_dst: range for valid geodesic distances in nm
# rg_sin: range for sinuosities
# graph_gt: by default None, otherwise pre-computed GraphGT
# key_l: property for measuring the length (default STR_VERT_DST)
def gen_fil_network(self, s_ids, t_ids, rg_dst, rg_sin, graph_gt=None, key_l=STR_VERT_DST):
# Initialization
if (not hasattr(rg_dst, '__len__')) or (len(rg_dst) != 2):
error_msg = 'The input range of distances must be a 2-tuple.'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
if rg_dst[0] < 0:
error_msg = 'Lowest distance must greater or equal to zero, current ' + str(rg_dst[0]) + '.'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
if (not hasattr(rg_sin, '__len__')) or (len(rg_sin) != 2):
error_msg = 'The input range of sinuosity must be a 2-tuple.'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
if rg_dst[0] < 1:
error_msg = 'Lowest sinuosity must greater or equal to zero, current ' + str(rg_sin[0]) + '.'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
if (not hasattr(s_ids, '__len__')) or (not hasattr(t_ids, '__len__')):
error_msg = 'Input source and target Ids must be iterables.'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
fils = list()
# Compute GraphGT
if graph_gt is None:
graph = GraphGT(self)
graph_gt = graph.get_gt()
else:
if not isinstance(graph_gt, gt.Graph):
error_msg = 'Input graph_gt must be an instance of gt.Graph.'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
prop_id, prop_id_e = graph_gt.vp[DPSTR_CELL], graph_gt.ep[DPSTR_CELL]
try:
prop_l = graph_gt.ep[key_l]
except KeyError:
error_msg = 'Requires the previous computations of ' + STR_VERT_DST + ' property'
raise pexceptions.PySegInputError(expr='gen_fil_network (MbGraphMCF)', msg=error_msg)
# Vertices lut
nv = graph_gt.num_vertices()
nids = self.get_nid()
s_lut, t_lut = np.zeros(shape=nids, dtype=np.bool), np.zeros(shape=nids, dtype=np.bool)
for i, s_id in enumerate(s_ids):
s_lut[s_id] = True
for t_id in t_ids:
t_lut[t_id] = True
len_s, len_t = len(s_ids), len(t_ids)
sg_ids = np.zeros(shape=len_s, dtype=object)
sg_lut, tg_lut = np.zeros(shape=nv, dtype=np.bool), np.zeros(shape=nv, dtype=np.bool)
count_s = 0
for v in graph_gt.vertices():
v_id = prop_id[v]
if s_lut[v_id]:
sg_ids[count_s] = v
sg_lut[int(v)] = True
count_s += 1
if t_lut[v_id]:
tg_lut[int(v)] = True
# Loop for sources
# count = 0
for s in sg_ids:
# print str(count) + ' of ' + str(len_s)
# count += 1
# Compute geodesic distances to source
s_mcf = self.get_vertex(prop_id[s])
pt_s = np.asarray(self.get_vertex_coords(s_mcf))
dst_map, pred_map = gt.shortest_distance(graph_gt, source=s, weights=prop_l, max_dist=rg_dst[1],
pred_map=True)
# Find potential targets
h_tg_lut = np.copy(tg_lut)
dst_map_arr = dst_map.get_array()
n_ids = np.where(tg_lut & (dst_map_arr>=rg_dst[0]) & (dst_map_arr<=rg_dst[1]))[0]
sort_ids = np.argsort(dst_map_arr[n_ids])
# Loop for found targets
for n_id in n_ids[sort_ids]:
# Check already visited
t = graph_gt.vertex(n_id)
if not h_tg_lut[int(t)]:
continue
# Find the paths
v_path, e_path = gt.shortest_path(graph_gt, source=s, target=t, weights=prop_l, pred_map=pred_map)
# Build the filament
lv = len(v_path)
if lv > 1:
length = 0
vl_ids, el_ids = list(), list()
vh_id = prop_id[v_path[0]]
vl_ids.append(vh_id)
# el_ids.append(vh_id)
for i in range(1,lv):
vh, eh = v_path[i], e_path[i-1]
vh_id = prop_id[vh]
# Rewind if source
if sg_lut[int(vh)]:
length = 0
vl_ids, el_ids = list(), list()
vl_ids.append(vh_id)
# el_ids.append(vh_id)
# Check potential target
elif h_tg_lut[int(vh)]:
vl_ids.append(vh_id)
el_ids.append(prop_id_e[eh])
# el_ids.append(vh_id)
length += prop_l[eh]
# Check length
if (length>=rg_dst[0]) and (length<=rg_dst[1]):
# Check sinuosity
t_mcf = self.get_vertex(vh_id)
pt_t = np.asarray(self.get_vertex_coords(t_mcf))
eu_dst = pt_s - pt_t
eu_dst = math.sqrt((eu_dst*eu_dst).sum()) * self.get_resolution()
sin = dst_map_arr[n_id] / eu_dst
if (sin>=rg_sin[0]) and (sin<=rg_sin[1]):
# Filament found
fils.append(FilamentUDG(vl_ids[:], el_ids[:], self))
# No more filaments in this path
for j in range(i, lv):
h_tg_lut[int(v_path[j])] = False
break
else:
vl_ids.append(vh_id)
el_ids.append(prop_id_e[eh])
# el_ids.append(vh_id)
length += prop_l[eh]
# Build the network
return NetFilamentsSyn(fils)
##################################################################################################
# Generic extension for dealing with GraphMCF of synapses, analogously it can be used for any junction of two membranes
#
#
class SynGraphMCF(GraphMCF):
#### Constructor Area
# During building
# skel: DisPerSe skeleton
# manifolds: DisPerSe manifolds
# density: image density map
# mb_seg: tomogram with synapse segmentation (1-pst_mb, 2-pre_mb, 3-psd, 4-az, 5-cleft and otherwise-bg)
def __init__(self, skel, manifolds, density, syn_seg):
super(SynGraphMCF, self).__init__(skel, manifolds, density)
self.build_from_skel(basic_props=False)
self.__syn_seg = syn_seg
self.__mb_fils_pst = np.zeros(shape=self.get_nid(), dtype=object)
self.__mb_fils_pre = np.zeros(shape=self.get_nid(), dtype=object)
self.add_scalar_field_nn(self.__syn_seg, SYN_SEG)
self.__mb_dst_pst = disperse_io.seg_dist_trans(self.__syn_seg == SYN_PST_LBL) * self.get_resolution()
self.__mb_dst_pre = disperse_io.seg_dist_trans(self.__syn_seg == SYN_PRE_LBL) * self.get_resolution()
#### Set/Get functionality
#### External functionality
# Returns vertices within the membrane segmentation
# mb_lbl: if 1 then pst else if 2 pre
def get_mb_vertices_list(self, mb_lbl):
verts_mb = list()
prop_seg_id = self.get_prop_id(SYN_SEG)
for v in self.get_vertices_list():
if self.get_prop_entry_fast(prop_seg_id, v.get_id(), 1, np.int)[0] == mb_lbl:
verts_mb.append(v)
return verts_mb
# Euclidean (through euclidean space) shortest distance to two membranes of the synapse
# Returns: result stored as property MB_PST_EU_DST and MB_PRE_EU_DST
def compute_mb_eu_dst(self):
self.add_scalar_field(self.__mb_dst_pst, MB_PST_EU_DST)
self.add_scalar_field(self.__mb_dst_pre, MB_PRE_EU_DST)
# Geodesic (through graph and euclidean) distance to shortest contact point,
# it also computes differential geometry property for filaments
# Returns: result stored as property MB_PST|PRE_GEO_DST, MB_PST|PRE_GEO_LEN and MB_PST|PRE_GEO_SIM
def compute_mb_geo(self, update=True):
# Initialization
if update or (self.get_prop_id(SYN_SEG) is None):
self.add_scalar_field_nn(self.__syn_seg, SYN_SEG)
if update or (self.get_prop_id(SGT_EDGE_LENGTH) is None):
self.compute_edges_length(SGT_EDGE_LENGTH, 1, 1, 1, False)
if update or (self.get_prop_id(STR_EDGE_FNESS) is None):
self.compute_edge_filamentness()
key_pst_dst_id = self.get_prop_id(MB_PST_GEO_DST)
if key_pst_dst_id is None:
key_pst_dst_id = self.add_prop(MB_PST_GEO_DST, 'float', 1)
key_pre_dst_id = self.get_prop_id(MB_PRE_GEO_DST)
if key_pre_dst_id is None:
key_pre_dst_id = self.add_prop(MB_PRE_GEO_DST, 'float', 1)
key_pst_len_id = self.get_prop_id(MB_PST_GEO_LEN)
if key_pst_len_id is None:
key_pst_len_id = self.add_prop(MB_PST_GEO_LEN, 'float', 1)
key_pre_len_id = self.get_prop_id(MB_PRE_GEO_LEN)
if key_pre_len_id is None:
key_pre_len_id = self.add_prop(MB_PRE_GEO_LEN, 'float', 1)
key_pst_sin_id = self.get_prop_id(MB_PST_GEO_SIN)
if key_pst_sin_id is None:
key_pst_sin_id = self.add_prop(MB_PST_GEO_SIN, 'float', 1)
key_pre_sin_id = self.get_prop_id(MB_PRE_GEO_SIN)
if key_pre_sin_id is None:
key_pre_sin_id = self.add_prop(MB_PRE_GEO_SIN, 'float', 1)
key_pst_cont_id = self.get_prop_id(MB_PST_CONT_COORD)
if key_pst_cont_id is None:
key_pst_cont_id = self.add_prop(MB_PST_CONT_COORD, 'float', 3)
key_pre_cont_id = self.get_prop_id(MB_PRE_CONT_COORD)
if key_pre_cont_id is None:
key_pre_cont_id = self.add_prop(MB_PRE_CONT_COORD, 'float', 3)
# Getting the graph GraphGT
graph_gt = GraphGT(self).get_gt()
prop_elen = graph_gt.edge_properties[SGT_EDGE_LENGTH]
prop_seg = graph_gt.vertex_properties[SYN_SEG]
prop_eid = graph_gt.edge_properties[DPSTR_CELL]
prop_vid = graph_gt.vertex_properties[DPSTR_CELL]
# Creating LUTs
lut_psd, lut_clf_pst = list(), list()
lut_az, lut_clf_pre = list(), list()
for v in graph_gt.vertices():
i_v = int(v)
seg_lbl = prop_seg[v]
if seg_lbl == SYN_PSD_LBL:
lut_psd.append(i_v)
if seg_lbl == SYN_AZ_LBL:
lut_az.append(i_v)
elif seg_lbl == SYN_CLF_LBL:
lut_clf_pst.append(i_v)
lut_clf_pre.append(i_v)
elif seg_lbl == SYN_PST_LBL:
lut_psd.append(i_v)
lut_clf_pst.append(i_v)
elif seg_lbl == SYN_PRE_LBL:
lut_az.append(i_v)
lut_clf_pre.append(i_v)
lut_psd, lut_clf_pst = np.asarray(lut_psd, dtype=np.int), np.asarray(lut_clf_pst, dtype=np.int)
lut_az, lut_clf_pre = np.asarray(lut_az, dtype=np.int), np.asarray(lut_clf_pre, dtype=np.int)
# Loop for all vertices
for v in graph_gt.vertices():
# Look for vertices out the membranes
seg_lbl = prop_seg[v]
if (seg_lbl != SYN_PST_LBL) and (seg_lbl != SYN_PRE_LBL):
# Getting lut for vertices on the other side of the membrane
if seg_lbl == SYN_PSD_LBL:
lut_oth_pst, lut_oth_pre = lut_clf_pst, None
elif seg_lbl == SYN_AZ_LBL:
lut_oth_pst, lut_oth_pre = None, lut_clf_pre
elif seg_lbl == SYN_CLF_LBL:
lut_oth_pst, lut_oth_pre = lut_psd, lut_az
else:
continue
# Checking the sides
t_l, side_l = list(), list()
dists = gt.shortest_distance(graph_gt, source=v, weights=prop_elen).get_array()
if lut_oth_pst is not None:
dists_pot_pst = dists[lut_oth_pst]
if len(dists_pot_pst) > 0:
t_l.append(graph_gt.vertex(lut_oth_pst[np.argmin(dists_pot_pst)]))
side_l.append(SYN_PST_LBL)
if lut_oth_pre is not None:
dists_pot_pre = dists[lut_oth_pre]
if len(dists_pot_pre) > 0:
t_l.append(graph_gt.vertex(lut_oth_pre[np.argmin(dists_pot_pre)]))
side_l.append(SYN_PRE_LBL)
# Finding the shortest geodesic path to other vertices which crosses the membrane
for (t, sd) in zip(t_l, side_l):
v_path, e_path = gt.shortest_path(graph_gt, v, t, weights=prop_elen)
if len(e_path) > 0:
contact = None
# Measuring the path (on euclidean space) until membrane contact point
fil_v_ids = list()
for h_v in v_path[0:-1]:
fil_v_ids.append(prop_vid[h_v])
fil_p_ids = list()
# Initialize the filament starting point
fil_p_ids.append(prop_vid[v])
path_len = .0
for e in e_path[0:-1]:
path_len += prop_elen[e]
# Add in a ordered ways path points to the filament
p_ids = self.get_edge_ids(self.get_edge(prop_eid[e]))
if p_ids[-1] == fil_p_ids[-1]:
p_ids = p_ids[::-1]
fil_p_ids += p_ids[1::]
edge = self.get_edge(prop_eid[e_path[-1]])
e_ids = self.get_edge_ids(edge)
e_coords = np.asarray(self.get_edge_arcs_coords(edge), dtype=np.float)
e_coords_int = np.asarray(e_coords.round(), dtype=np.int)
try:
xi, yi, zi = e_coords_int[0, :]
# Reverse for ending with vertex through membrane
if self.__syn_seg[xi, yi, zi] != seg_lbl:
e_coords = e_coords[::-1]
e_coords_int = e_coords_int[::-1]
e_ids = e_ids[::-1]
contact = e_coords[0, :]
fil_p_ids.append(e_ids[0])
# Loop for length increasing
xh, yh, zh = e_coords[0, :]
for i in range(1, e_coords.shape[0]):
x, y, z = e_coords_int[i, :]
if self.__syn_seg[x, y, z] == seg_lbl:
x, y, z = e_coords[i, :]
hold_len = np.asarray((x-xh, y-yh, z-zh), dtype=np.float)
path_len += (math.sqrt((hold_len*hold_len).sum()) * self.get_resolution())
xh, yh, zh = x, y, z
contact = e_coords[i, :]
fil_p_ids.append(e_ids[i])
else:
break
except IndexError:
pass
# Compute metrics only if a contact is found
if contact is not None:
# Add filament
v_id = prop_vid[v]
if len(fil_p_ids) > 2:
try:
fil = FilamentLDG(fil_v_ids, fil_p_ids, self)
except IndexError:
print('WARNING: filament with less than 3 points!')
if sd == SYN_PST_LBL:
self.__mb_fils_pst[v_id] = fil
self.set_prop_entry_fast(key_pst_dst_id, (fil.get_dst(),), v_id, 1)
self.set_prop_entry_fast(key_pst_len_id, (fil.get_length(),), v_id, 1)
self.set_prop_entry_fast(key_pst_sin_id, (fil.get_sinuosity(),), v_id, 1)
self.set_prop_entry_fast(key_pst_cont_id, (contact[0], contact[1], contact[2]),
v_id, 3)
elif sd == SYN_PRE_LBL:
self.__mb_fils_pre[v_id] = fil
self.set_prop_entry_fast(key_pre_dst_id, (fil.get_dst(),), v_id, 1)
self.set_prop_entry_fast(key_pre_len_id, (fil.get_length(),), v_id, 1)
self.set_prop_entry_fast(key_pre_sin_id, (fil.get_sinuosity(),), v_id, 1)
self.set_prop_entry_fast(key_pre_cont_id, (contact[0], contact[1], contact[2]),
v_id, 3)
else:
print('WARNING: filament with less than 3 points!')
# Returns: returns a 2-tuple (pst, pre) with the the two possible filaments (if a filament does not exist is None)
# if mb_id == SYN_PST|PRE_LBL only one filament is returned
def get_mb_fil(self, v_id, mb_id=None):
if mb_id is None:
return self.__mb_fils_pst[v_id], self.__mb_dst_pre[v_id]
elif mb_id == SYN_PST_LBL:
return self.__mb_fils_pst[v_id]
elif mb_id == SYN_PRE_LBL:
return self.__mb_fils_pre[v_id]
else:
error_msg = 'Non valid membrane label ' + str(mb_id)
raise pexceptions.PySegInputError(expr='get_mb_fil (SynGraphMCF)', msg=error_msg)
# Cloud of points in a slice
# slices: Slice object with the membrane slice information
# cont_mode: if True (default False) contact points coordinates, instead of vertex ones, are returned
# graph: if not None (default) it contains the already compute GraphGT, it can save a lot of time
# if this function is going to be called several times
# cont_prop: if cont_mode active, then it may get an array from GraphMCF properties associated to the contact
# points. (default STR_FIELD_VALUE_INV)
# conf_fus: if cont_mode active, mode for contact point fusion criterium, valid: 'max'
# Return: an array with points coordinates, vertices ids and mask, and contact point properties array in contact
# mode
def get_cloud_mb_slice(self, slice, cont_mode=False, graph_gt=None, cont_prop=STR_FIELD_VALUE_INV, cont_fus='max'):
# Initialisation
seg_id = self.get_prop_id(SYN_SEG)
seg_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=seg_id))
mb_id = slice.get_mb()
if mb_id == SYN_PST_LBL:
eu_id = self.get_prop_id(MB_PST_EU_DST)
eu_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=eu_id))
geo_id = self.get_prop_id(MB_PST_GEO_DST)
geo_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=geo_id))
gl_id = self.get_prop_id(MB_PST_GEO_LEN)
gl_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gl_id))
sin_id = self.get_prop_id(MB_PST_GEO_SIN)
sin_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=sin_id))
gol_id = self.get_prop_id(MB_PRE_GEO_LEN)
gol_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gol_id))
if cont_mode:
cont_id = self.get_prop_id(MB_PST_CONT_COORD)
cont_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_id))
cont_pid = self.get_prop_id(cont_prop)
cont_pdt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_pid))
if cont_fus != 'max':
error_msg = 'Input contact point property fusion criterium ' + cont_fus + ' is not valid.'
raise pexceptions.PySegInputError(expr='get_cloud_mb_slice (SynGraphMCF)', msg=error_msg)
elif mb_id == SYN_PRE_LBL:
eu_id = self.get_prop_id(MB_PRE_EU_DST)
eu_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=eu_id))
geo_id = self.get_prop_id(MB_PRE_GEO_DST)
geo_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=geo_id))
gl_id = self.get_prop_id(MB_PRE_GEO_LEN)
gl_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gl_id))
sin_id = self.get_prop_id(MB_PRE_GEO_SIN)
sin_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=sin_id))
gol_id = self.get_prop_id(MB_PST_GEO_LEN)
gol_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=gol_id))
if cont_mode:
cont_id = self.get_prop_id(MB_PRE_CONT_COORD)
cont_dt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_id))
cont_pid = self.get_prop_id(cont_prop)
cont_pdt = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=cont_pid))
if cont_fus != 'max':
error_msg = 'Input contact point property fusion criterium ' + cont_fus + ' is not valid.'
raise pexceptions.PySegInputError(expr='get_cloud_mb_slice (SynGraphMCF)', msg=error_msg)
else:
error_msg = 'Non valid membrane label ' + str(mb_id)
raise pexceptions.PySegInputError(expr='get_cloud_mb_slice (SynGraphMCF)', msg=error_msg)
# Find tail vertices within slice
h_v_ids = list()
for v in self.get_vertices_list():
v_id = v.get_id()
seg = self.get_prop_entry_fast(seg_id, v_id, 1, seg_dt)[0]
eu = self.get_prop_entry_fast(eu_id, v_id, 1, eu_dt)[0]
geo = self.get_prop_entry_fast(geo_id, v_id, 1, geo_dt)[0]
gl = self.get_prop_entry_fast(gl_id, v_id, 1, gl_dt)[0]
sin = self.get_prop_entry_fast(sin_id, v_id, 1, sin_dt)[0]
gol = self.get_prop_entry_fast(gol_id, v_id, 1, gol_dt)[0]
if (gol > 0) and (gl > 0) and (gol < gl):
continue
if slice.test(seg=seg, eu_dst=eu, geo_dst=geo, geo_len=gl, sin=sin):
h_v_ids.append(v_id)
# Vertices thresholding
th_l = slice.get_list_th()
if len(th_l) > 0:
if graph_gt is None:
graph_gt = GraphGT(self)
for th in slice.get_list_th():
h_v_ids = self.threshold_slice(h_v_ids, th, graph_gt)
# Creating vertices LUT
v_ids = list()
cont = 0
lut_ver = (-1) * np.ones(shape=self.get_nid(), dtype=np.int)
for v_id in h_v_ids:
if lut_ver[v_id] == -1:
lut_ver[v_id] = cont
v_ids.append(v_id)
cont += 1
# Find edges within slice
e_ids = list()
for edge in self.get_edges_list():
s_id = lut_ver[edge.get_source_id()]
t_id = lut_ver[edge.get_target_id()]
if (s_id > -1) and (t_id > -1):
e_ids.append([s_id, t_id])
# graph_tool building
vertices_gt = np.empty(shape=len(v_ids), dtype=object)
graph = gt.Graph(directed=False)
for i in range(len(v_ids)):
vertices_gt[i] = graph.add_vertex()
for e_id in e_ids:
graph.add_edge(vertices_gt[e_id[0]], vertices_gt[e_id[1]])
# Subgraphs visitor initialization
sgraph_id = graph.new_vertex_property("int")
visitor = SubGraphVisitor(sgraph_id)
# Find subgraphs
coords = np.zeros(shape=(vertices_gt.shape[0], 3), dtype=np.float)
rids = np.zeros(shape=vertices_gt.shape[0], dtype=np.int)
if cont_fus == 'max':
cont_p = (-np.inf) * np.ones(shape=vertices_gt.shape[0], dtype=np.int)
for i, v in enumerate(vertices_gt):
if sgraph_id[v] == 0:
gt.dfs_search(graph, v, visitor)
visitor.update_sgraphs_id()
v_id = v_ids[i]
rids[i] = v_id
if cont_mode:
coords[i, :] = self.get_prop_entry_fast(cont_id, v_id, 3, cont_dt)
val = self.get_prop_entry_fast(cont_pid, v_id, 3, cont_pdt)[0]
if cont_fus == 'max':
if val > cont_p[i]:
cont_p[i] = val
else:
coords[i, :] = self._GraphMCF__skel.GetPoint(v_id)
# Clusters filtering
ids = sgraph_id.get_array()
max_id = ids.max()
lut_counts = np.zeros(shape=max_id+1, dtype=np.int)
for idy in ids:
lut_counts[idy] += 1
hold_coords = list()
hold_ids = list()
for idy in range(1, max_id+1):
counts = lut_counts[idy]
if slice.test(cnv=counts):
c_ids = np.where(ids == idy)[0]
for c_id in c_ids:
hold_coords.append(coords[c_id, :])
hold_ids.append(rids[c_id])
# Computing mask
mask_out = (self.__syn_seg == slice.get_seg())
if mb_id == SYN_PST_LBL:
if cont_mode:
hold_mb = self.__mb_dst_pst / self.get_resolution()
mask_out *= ((hold_mb >= 0) * (hold_mb <= 2))
else:
mask_out *= ((self.__mb_dst_pst >= slice.get_eu_dst_low()) *
(self.__mb_dst_pst <= slice.get_eu_dst_high()))
else:
if cont_mode:
hold_mb = self.__mb_dst_pre / self.get_resolution()
mask_out *= ((hold_mb >= 0) * (hold_mb <= 2))
else:
mask_out *= ((self.__mb_dst_pre >= slice.get_eu_dst_low()) *
(self.__mb_dst_pre <= slice.get_eu_dst_high()))
# disperse_io.save_numpy(mask_out, '/fs/pool/pool-ruben/antonio/nuc_mito/hold_1.mrc')
if cont_mode:
return np.asarray(hold_coords, dtype=np.float), np.asarray(hold_ids, dtype=np.int), mask_out, cont_p
else:
return np.asarray(hold_coords, dtype=np.float), np.asarray(hold_ids, dtype=np.int), mask_out
# Generates a VTK poly data from a cloud of vertices of a synapse slice
# v_ids: vertices id which represent a membrane slice
# mb_id: identifies the membrane which corresponds with the slice
# Return: a VTK poly data object (.vtp)
def slice_to_vtp(self, v_ids, mb_id):
# Initialization
poly = vtk.vtkPolyData()
poly.SetPoints(self._GraphMCF__skel.GetPoints())
verts = vtk.vtkCellArray()
lines = vtk.vtkCellArray()
struct = vtk.vtkIntArray()
struct.SetNumberOfComponents(1)
struct.SetName(MB_VTP_STR)
eu_dst = vtk.vtkFloatArray()
eu_dst.SetNumberOfComponents(1)
eu_dst.SetName(MB_EU_DST)
geo_dst = vtk.vtkFloatArray()
geo_dst.SetNumberOfComponents(1)
geo_dst.SetName(MB_GEO_DST)
geo_len = vtk.vtkFloatArray()
geo_len.SetNumberOfComponents(1)
geo_len.SetName(MB_GEO_LEN)
sin = vtk.vtkFloatArray()
sin.SetNumberOfComponents(1)
sin.SetName(MB_GEO_SIN)
kt = vtk.vtkFloatArray()
kt.SetNumberOfComponents(1)
kt.SetName(MB_GEO_KT)
tt = vtk.vtkFloatArray()
tt.SetNumberOfComponents(1)
tt.SetName(MB_GEO_TT)
ns = vtk.vtkFloatArray()
ns.SetNumberOfComponents(1)
ns.SetName(MB_GEO_NS)
bs = vtk.vtkFloatArray()
bs.SetNumberOfComponents(1)
bs.SetName(MB_GEO_BS)
apl = vtk.vtkFloatArray()
apl.SetNumberOfComponents(1)
apl.SetName(MB_GEO_APL)
if mb_id == SYN_PST_LBL:
prop_eu = self.get_prop_id(MB_PST_EU_DST)
prop_geo = self.get_prop_id(MB_PST_GEO_DST)
prop_gl = self.get_prop_id(MB_PST_GEO_LEN)
prop_sin = self.get_prop_id(MB_PST_GEO_SIN)
elif mb_id == SYN_PRE_LBL:
prop_eu = self.get_prop_id(MB_PRE_EU_DST)
prop_geo = self.get_prop_id(MB_PRE_GEO_DST)
prop_gl = self.get_prop_id(MB_PRE_GEO_LEN)
prop_sin = self.get_prop_id(MB_PRE_GEO_SIN)
# VTK Topology
# Vertices
lut_ver = np.zeros(shape=self.get_nid(), dtype=np.bool)
for v_id in v_ids:
verts.InsertNextCell(1)
struct.InsertNextTuple((MB_VTP_STR_E,))
eu_dst.InsertNextTuple(self.get_prop_entry_fast(prop_eu, v_id, 1, np.float))
geo_dst.InsertNextTuple(self.get_prop_entry_fast(prop_geo, v_id, 1, np.float))
geo_len.InsertNextTuple(self.get_prop_entry_fast(prop_gl, v_id, 1, np.float))
sin.InsertNextTuple(self.get_prop_entry_fast(prop_sin, v_id, 1, np.float))
kt.InsertNextTuple((-1,))
tt.InsertNextTuple((-1,))
ns.InsertNextTuple((-1,))
bs.InsertNextTuple((-1,))
apl.InsertNextTuple((-1,))
verts.InsertCellPoint(v_id)
lut_ver[v_id] = True
# Contact points
for v_id in v_ids:
fil = self.get_mb_fil(v_id, mb_id)
if isinstance(fil, FilamentLDG):
verts.InsertNextCell(1)
struct.InsertNextTuple((MB_VTP_STR_C,))
eu_dst.InsertNextTuple(self.get_prop_entry_fast(prop_eu, v_id, 1, np.float))
geo_dst.InsertNextTuple(self.get_prop_entry_fast(prop_geo, v_id, 1, np.float))
geo_len.InsertNextTuple(self.get_prop_entry_fast(prop_gl, v_id, 1, np.float))
sin.InsertNextTuple(self.get_prop_entry_fast(prop_sin, v_id, 1, np.float))
kt.InsertNextTuple((fil.get_total_k(),))
tt.InsertNextTuple((fil.get_total_t(),))
ns.InsertNextTuple((fil.get_total_ns(),))
bs.InsertNextTuple((fil.get_total_bs(),))
apl.InsertNextTuple((fil.get_apex_length(),))
verts.InsertCellPoint(fil.get_point_ids()[-1])
# Filament paths
for v_id in v_ids:
fil = self.get_mb_fil(v_id, mb_id)
if isinstance(fil, FilamentLDG):
p_ids = fil.get_point_ids()
n_points = len(p_ids)
lines.InsertNextCell(n_points)
struct.InsertNextTuple((MB_VTP_STR_F,))
eu_dst.InsertNextTuple(self.get_prop_entry_fast(prop_eu, v_id, 1, np.float))
geo_dst.InsertNextTuple(self.get_prop_entry_fast(prop_geo, v_id, 1, np.float))
geo_len.InsertNextTuple(self.get_prop_entry_fast(prop_gl, v_id, 1, np.float))
sin.InsertNextTuple(self.get_prop_entry_fast(prop_sin, v_id, 1, np.float))
kt.InsertNextTuple((fil.get_total_k(),))
tt.InsertNextTuple((fil.get_total_t(),))
ns.InsertNextTuple((fil.get_total_ns(),))
bs.InsertNextTuple((fil.get_total_bs(),))
apl.InsertNextTuple((fil.get_apex_length(),))
for i in range(n_points):
lines.InsertCellPoint(p_ids[i])
# Edges
for e in self.get_edges_list():
s_id = e.get_source_id()
t_id = e.get_target_id()
if lut_ver[s_id] and lut_ver[t_id]:
lines.InsertNextCell(2)
struct.InsertNextTuple((MB_VTP_STR_E,))
eu_dst.InsertNextTuple((-1,))
geo_dst.InsertNextTuple((-1,))
geo_len.InsertNextTuple((-1,))
sin.InsertNextTuple((-1,))
kt.InsertNextTuple((-1,))
tt.InsertNextTuple((-1,))
ns.InsertNextTuple((-1,))
bs.InsertNextTuple((-1,))
apl.InsertNextTuple((-1,))
lines.InsertCellPoint(s_id)
lines.InsertCellPoint(t_id)
# Building the vtp object
poly.SetVerts(verts)
poly.SetLines(lines)
poly.GetCellData().AddArray(struct)
poly.GetCellData().AddArray(eu_dst)
poly.GetCellData().AddArray(geo_dst)
poly.GetCellData().AddArray(geo_len)
poly.GetCellData().AddArray(sin)
poly.GetCellData().AddArray(kt)
poly.GetCellData().AddArray(tt)
poly.GetCellData().AddArray(ns)
poly.GetCellData().AddArray(bs)
poly.GetCellData().AddArray(apl)
return poly
# Applies RWR on a list of sources
# s_ids: list with the sources id
# key: prop key were the result will be stored, if it does not exist it is created
# c: restart probability [0, 1] (default 0.1) (the lower the more global analysis)
# key_w_v: prop key for vertices weighting (default None)
# key_w_e: prop key for edges weighting (default None)
# inv: if True (default False) edge weighting property is inverted
# graph: if not None (default) it contains the already compute GraphGT, it can save a lot of time
# if this function is going to be called several times
def rwr_sources(self, s_ids, key, c=.0, key_w_v=None, key_w_e=None, inv=False, graph=None):
# Getting the GraphGT
if graph is None:
graph = GraphGT(self)
graph_gt = graph.get_gt()
# Get sources
vertices_gt = np.empty(shape=self.get_nid(), dtype=object)
prop_id = graph_gt.vertex_properties[DPSTR_CELL]
for v in graph_gt.vertices():
vertices_gt[prop_id[v]] = v
sources = list()
for v_i in s_ids:
v = vertices_gt[v_i]
if v is not None:
sources.append(v)
# RWR
graph.multi_rwr(sources, key, c, key_w_e, key_w_v, mode=2, inv=inv)
graph.add_prop_to_GraphMCF(self, key, up_index=True)
# Applies geodesic neighbours filter from list of ids with the sources
# s_ids: list with the sources id
# key: prop key were the result will be stored, if it does not exist it is created
# scale: neighborhood radius in nm
# sig: sigma (controls neighborhood sharpness) in nm
# n_sig: limits neighbourhood shape (default 3)
# key_w_v: prop key for vertices weighting (default None)
# graph: if not None (default) it contains the already compute GraphGT, it can save a lot of time
# if this function is going to be called several times
def gnf_sources(self, s_ids, key, scale, sig, n_sig, key_w_v=None, graph=None):
# Getting the GraphGT
if graph is None:
graph = GraphGT(self)
graph_gt = graph.get_gt()
# Get sources
prop_s = graph_gt.new_vertex_property('int', vals=0)
vertices_gt = np.empty(shape=self.get_nid(), dtype=object)
prop_id = graph_gt.vertex_properties[DPSTR_CELL]
for v in graph_gt.vertices():
vertices_gt[prop_id[v]] = v
for v_i in s_ids:
v = vertices_gt[v_i]
if v is not None:
prop_s[v] = 1
# Filtering
graph.geodesic_neighbors_filter(key, scale, sig, prop_key_v=key_w_v, prop_key_s=prop_s, n_sig=n_sig)
graph.add_prop_to_GraphMCF(self, key, up_index=True)
# Applies a property threshold to a slice
# v_ids: slice vertices ids for being thresholded
# th: Threshold object for a GraphMCF property
# graph_gt: if not None (default) it contains the already compute GraphGT, it can save a lot of time
# if this function is going to be called several times
# Returns: a list with the thresholded vertices ids
def threshold_slice(self, v_ids, th, graph_gt=None):
# Getting the GraphGT
if graph_gt is None:
graph_gt = GraphGT(self).get_gt()
prop_id = graph_gt.vertex_properties[DPSTR_CELL]
prop_th = graph_gt.vertex_properties[th.get_prop_key()].get_array()
# Get LUT for slice vertices
vertices_gt = np.empty(shape=self.get_nid(), dtype=object)
prop_arr = np.zeros(shape=len(v_ids), dtype=prop_th.dtype)
for v in graph_gt.vertices():
vertices_gt[prop_id[v]] = v
cont = 0
for v_id in v_ids:
if vertices_gt[v_id] is not None:
prop_arr[cont] = prop_th[vertices_gt[v_id]]
cont += 1
# Apply the threshold
if th.get_mode() == XML_MBT_MPERI:
per_th_l = np.percentile(prop_arr, th.get_value_low())
per_th_h = np.percentile(prop_arr, th.get_value_high())
mask = (prop_arr >= per_th_l) & (prop_arr <= per_th_h)
elif th.get_mode() == XML_MBT_MPERO:
per_th_l = np.percentile(prop_arr, th.get_value_low())
per_th_h = np.percentile(prop_arr, th.get_value_high())
mask = (prop_arr < per_th_l) | (prop_arr > per_th_h)
elif th.get_mode() == XML_MBT_MIN:
mask = (prop_arr >= th.get_value_low()) & (prop_arr <= th.get_value_high())
else:
mask = (prop_arr < th.get_value_low()) | (prop_arr > th.get_value_high())
return list(np.asarray(v_ids)[mask])
# Compute angle between a vector (3 components property) and the normal to a membrane (only vertices)
# prop_3d: property key for the vector
# mb: membrane for computing the normals, valid: SYN_PST_LBL or SYN_PRE_LBL
# v_ids: list with the vertices ids, if None (default) all vertices are considered
# Result: a property with angles in degrees stored as prop_3d+'_'+SYN_XXX_LBL+'_ang', the normals as SYN_XXX_LBL+'_norm'
# Returns: the property keys for normals, and angles
def angle_vector_norms(self, prop_3d, mb, v_ids=None):
# Input parsing and initialization
prop_id = self.get_prop_id(prop_3d)
if prop_id is None:
error_msg = 'Input property ' + prop_3d + ' does not exist.'
raise pexceptions.PySegInputError(expr='angle_vector_norms (SynGraphMCF)', msg=error_msg)
n_comp = self.get_prop_ncomp(key_id=prop_id)
if n_comp != 3:
error_msg = 'Only input properties with 3 component are valid, current ' + str(n_comp) + '.'
raise pexceptions.PySegInputError(expr='angle_vector_norms (SynGraphMCF)', msg=error_msg)
dtype = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=prop_id))
if mb == SYN_PST_LBL:
mb_key = MB_PST_CONT_COORD
elif mb == SYN_PRE_LBL:
mb_key = MB_PRE_CONT_COORD
else:
error_msg = 'Non valid membrane label for SynGraphMCF ' + mb
raise pexceptions.PySegInputError(expr='angle_vector_norms (SynGraphMCF)', msg=error_msg)
prop_id_n = self.get_prop_id(mb_key)
dtype_n = disperse_io.TypesConverter().gt_to_numpy(self.get_prop_type(key_id=prop_id_n))
# Get vertices list
if v_ids is None:
v_ids = list()
for v in self._GraphMCF__vertices:
if v is not None:
v_ids.append(v.get_id())
norms = np.zeros(shape=(len(v_ids), 3), dtype=np.float32)
angles = np.zeros(shape=(len(v_ids), 3), dtype=np.float32)
# Loop for computing the normals and angles
for i, v_id in enumerate(v_ids):
t = self.get_prop_entry_fast(prop_id_n, v_id, 3, dtype_n)
norm = np.asarray(self._GraphMCF__skel.GetPoint(v_id), dtype=np.float32) - np.asarray(t, dtype=np.float32)
vect = np.asarray(self.get_prop_entry_fast(prop_id, v_id, 3, dtype), dtype=dtype)
angles[i] = math.degrees(angle_2vec_3D(norm, vect))
norms[i, :] = norm
# Inserting properties and values
norm_key, ang_key = prop_3d+mb_key+'_norm', prop_3d+'_'+mb_key+'_ang'
gtype_f = disperse_io.TypesConverter().numpy_to_gt(np.float32)
norm_id, ang_id = self.add_prop(norm_key, gtype_f, 3), self.add_prop(ang_key, gtype_f, 1)
for (v_id, ang, norm) in zip(v_ids, angles, norms):
self.set_prop_entry_fast(ang_id, ang, v_id, 1)
self.set_prop_entry_fast(norm_id, norm, v_id, 3)
return norm_key, ang_key
# Generates a filament network where filaments are specified by a set of sources and targets
# s_ids: list with sources ids
# t_ids: list with targets ids
# rg_dst: range for valid geodesic distances in nm
# rg_sin: range for sinuosities
# graph_gt: by default None, otherwise pre-computed GraphGT
# key_l: property for measuring the length (default STR_VERT_DST)
def gen_fil_network(self, s_ids, t_ids, rg_dst, rg_sin, graph_gt=None, key_l=STR_VERT_DST):
# Initialization
if (not hasattr(rg_dst, '__len__')) or (len(rg_dst) != 2):
error_msg = 'The input range of distances must be a 2-tuple.'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
if rg_dst[0] < 0:
error_msg = 'Lowest distance must greater or equal to zero, current ' + str(rg_dst[0]) + '.'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
if (not hasattr(rg_sin, '__len__')) or (len(rg_sin) != 2):
error_msg = 'The input range of sinuosity must be a 2-tuple.'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
if rg_dst[0] < 1:
error_msg = 'Lowest sinuosity must greater or equal to zero, current ' + str(rg_sin[0]) + '.'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
if (not hasattr(s_ids, '__len__')) or (not hasattr(t_ids, '__len__')):
error_msg = 'Input source and target Ids must be iterables.'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
fils = list()
# Compute GraphGT
if graph_gt is None:
graph = GraphGT(self)
graph_gt = graph.get_gt()
else:
if not isinstance(graph_gt, gt.Graph):
error_msg = 'Input graph_gt must be an instance of gt.Graph.'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
prop_id, prop_id_e = graph_gt.vp[DPSTR_CELL], graph_gt.ep[DPSTR_CELL]
try:
prop_l = graph_gt.ep[key_l]
except KeyError:
error_msg = 'Requires the previous computations of ' + STR_VERT_DST + ' property'
raise pexceptions.PySegInputError(expr='gen_fil_network (SynGraphMCF)', msg=error_msg)
# Vertices lut
nv = graph_gt.num_vertices()
nids = self.get_nid()
s_lut, t_lut = np.zeros(shape=nids, dtype=np.bool), np.zeros(shape=nids, dtype=np.bool)
for i, s_id in enumerate(s_ids):
s_lut[s_id] = True
for t_id in t_ids:
t_lut[t_id] = True
len_s, len_t = len(s_ids), len(t_ids)
sg_ids = np.zeros(shape=len_s, dtype=object)
sg_lut, tg_lut = np.zeros(shape=nv, dtype=np.bool), np.zeros(shape=nv, dtype=np.bool)
count_s = 0
for v in graph_gt.vertices():
v_id = prop_id[v]
if s_lut[v_id]:
sg_ids[count_s] = v
sg_lut[int(v)] = True
count_s += 1
if t_lut[v_id]:
tg_lut[int(v)] = True
# Loop for sources
# count = 0
for s in sg_ids:
# print str(count) + ' of ' + str(len_s)
# count += 1
# Compute geodesic distances to source
s_mcf = self.get_vertex(prop_id[s])
pt_s = np.asarray(self.get_vertex_coords(s_mcf))
dst_map, pred_map = gt.shortest_distance(graph_gt, source=s, weights=prop_l, max_dist=rg_dst[1],
pred_map=True)
# Find potential targets
h_tg_lut = np.copy(tg_lut)
dst_map_arr = dst_map.get_array()
n_ids = np.where(tg_lut & (dst_map_arr>=rg_dst[0]) & (dst_map_arr<=rg_dst[1]))[0]
sort_ids = np.argsort(dst_map_arr[n_ids])
# Loop for found targets
for n_id in n_ids[sort_ids]:
# Check already visited
t = graph_gt.vertex(n_id)
if not h_tg_lut[int(t)]:
continue
# Find the paths
v_path, e_path = gt.shortest_path(graph_gt, source=s, target=t, weights=prop_l, pred_map=pred_map)
# Build the filament
lv = len(v_path)
if lv > 1:
length = 0
vl_ids, el_ids = list(), list()
vh_id = prop_id[v_path[0]]
vl_ids.append(vh_id)
# el_ids.append(vh_id)
for i in range(1,lv):
vh, eh = v_path[i], e_path[i-1]
vh_id = prop_id[vh]
# Rewind if source
if sg_lut[int(vh)]:
length = 0
vl_ids, el_ids = list(), list()
vl_ids.append(vh_id)
# el_ids.append(vh_id)
# Check potential target
elif h_tg_lut[int(vh)]:
vl_ids.append(vh_id)
el_ids.append(prop_id_e[eh])
# el_ids.append(vh_id)
length += prop_l[eh]
# Check length
if (length>=rg_dst[0]) and (length<=rg_dst[1]):
# Check sinuosity
t_mcf = self.get_vertex(vh_id)
pt_t = np.asarray(self.get_vertex_coords(t_mcf))
eu_dst = pt_s - pt_t
eu_dst = math.sqrt((eu_dst*eu_dst).sum()) * self.get_resolution()
sin = dst_map_arr[n_id] / eu_dst
if (sin>=rg_sin[0]) and (sin<=rg_sin[1]):
# Filament found
fils.append(FilamentUDG(vl_ids[:], el_ids[:], self))
# No more filaments in this path
for j in range(i, lv):
h_tg_lut[int(v_path[j])] = False
break
else:
vl_ids.append(vh_id)
el_ids.append(prop_id_e[eh])
# el_ids.append(vh_id)
length += prop_l[eh]
# Build the network
return NetFilamentsSyn(fils)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_webhooks
----------------------------------
Tests for `webhooks` module.
"""
from webhooks import webhook, unhashed_hook
from webhooks.senders.simple import sender
import json
from standardjson import StandardJSONEncoder
from Crypto.Hash import SHA256
def test_simple_hashed():
@webhook(event="example200", sender_callable=sender)
def basic(wife, husband, creator, encoding, url):
return {"husband": husband, "wife": wife}
status = basic("Audrey Roy Greenfeld", "Daniel Roy Greenfeld", creator="pydanny", encoding="application/json", url="http://httpbin.org")
assert status['wife'] == "Audrey Roy Greenfeld"
assert status['husband'] == "Daniel Roy Greenfeld"
assert "creator" not in status
assert len(status['hash']) > 10
def test_simple_unhash():
@unhashed_hook(event="example200", sender_callable=sender)
def basic(wife, husband, creator, encoding, url):
return {"husband": husband, "wife": wife}
status = basic("Audrey Roy Greenfeld", "Daniel Roy Greenfeld", creator="pydanny", encoding="application/json", url="http://httpbin.org")
assert status['wife'] == "Audrey Roy Greenfeld"
assert status['husband'] == "Daniel Roy Greenfeld"
assert "hash" not in status
def test_simple_custom_header():
@unhashed_hook(event="example200", sender_callable=sender)
def basic(wife, husband, creator, encoding, url, custom_headers):
return {"husband": husband, "wife": wife}
status = basic("Audrey Roy Greenfeld", "Daniel Roy Greenfeld", creator="pydanny", encoding="application/json", custom_headers = {"Basic" : "dXNlcjpzdXBlcnNlY3JldA=="}, url="http://httpbin.org")
assert status['wife'] == "Audrey Roy Greenfeld"
assert status['husband'] == "Daniel Roy Greenfeld"
assert status['post_attributes']['headers']['Basic'] == "dXNlcjpzdXBlcnNlY3JldA=="
def test_simple_signature():
@unhashed_hook(event="example200", sender_callable=sender)
def basic(wife, husband, creator, encoding, url, signing_secret):
return {"husband": husband, "wife": wife}
secret = "secret_key"
status = basic("Audrey Roy Greenfeld", "Daniel Roy Greenfeld", creator="pydanny", encoding="application/json", url="http://httpbin.org", signing_secret = secret)
assert status['wife'] == "Audrey Roy Greenfeld"
assert status['husband'] == "Daniel Roy Greenfeld"
signature = status['post_attributes']['headers']['x-hub-signature']
body = {"wife": "Audrey Roy Greenfeld", "husband": "Daniel Roy Greenfeld"}
if not isinstance(secret, bytes):
secret = secret.encode('utf-8')
hash = SHA256.new(secret)
hash.update(json.dumps(body, cls=StandardJSONEncoder).encode("utf-8"))
expected_signature = "sha256="+hash.hexdigest()
assert signature == expected_signature |
import torch
import numpy as np
from scipy.optimize import linear_sum_assignment
from mmdet.core import bbox2roi, build_assigner, build_sampler, bbox2result_with_id
from .cluster_rcnn import ClusterRCNN
from .. import builder
from ..registry import DETECTORS
def chunker_list(seq, length, shift):
return [seq[i:i+length] for i in range(len(seq))[0::shift]]
def segm_iou(trk, det):
if np.any([trk, det], axis=0).sum() > 0:
return np.all([trk, det], axis=0).sum() / np.any([trk, det], axis=0).sum()
else:
return 0
def matching(det_mask, trk_mask, ids):
IOU_mat= np.zeros((len(ids),len(ids)),dtype=np.float32)
for d,det_id in enumerate(ids):
det = det_mask==det_id
for t,trk_id in enumerate(ids):
trk = trk_mask==trk_id
IOU_mat[d,t] = segm_iou(det, trk)
return linear_sum_assignment(-IOU_mat)[1]
@DETECTORS.register_module
class ClusterRCRNN(ClusterRCNN):
def __init__(self,
backbone,
rpn_head,
bbox_roi_extractor,
bbox_head,
mask_roi_extractor,
train_cfg,
test_cfg,
neck=None,
backbone_of=None,
shared_head=None,
mask_head= None,
mask_cluster_head=None,
roi_cluster_head=None,
gru_model=None,
pretrained=None,
swapped=False,
bidirectional=False,
num_clusters=2,
rnn_level=0):
super(ClusterRCRNN, self).__init__(
backbone=backbone,
neck=neck,
backbone_of=backbone_of,
shared_head=shared_head,
rpn_head=rpn_head,
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
mask_cluster_head=mask_cluster_head,
roi_cluster_head=roi_cluster_head,
pretrained=pretrained,
swapped=swapped,
num_clusters=num_clusters)
self.gru_model=builder.build_head(gru_model)
self.bidirectional = bidirectional
self.rnn_level = rnn_level
self.num_clusters = num_clusters
self.mask_roi_map = {}
self.prev_mask_roi_map = {}
self.prev_mask_clusters = None
self.ids = np.arange(1, num_clusters+1)
self.inv_mask = np.arange(num_clusters+1)
def forward_train(self, img, img_meta, inp_seq, ref_frame_index, return_targets=False):
M_seq = []
if self.bidirectional:
for inp in inp_seq:
fms = list(self.backbone((inp['img'])))
M_seq.append(fms)
fms_f = []
hidden = None
for fms in M_seq:
hidden = self.gru_model(fms[self.rnn_level], hidden)
fms_f.append(hidden[-1])
fms_b = []
hidden = None
for fms in reversed(M_seq):
hidden = self.gru_model(fms[self.rnn_level], hidden)
fms_b.append(hidden[-1])
fms_b.reverse()
for i, (fm_f, fm_b) in enumerate(zip(fms_f, fms_b)):
M_seq[i][self.rnn_level] = torch.cat((fm_f, fm_b), 1)
M_seq[i] = self.neck(M_seq[i])
else:
hidden = None
for im in img_seq:
fms = list(self.forward_to_neck(im))
hidden = self.gru_model(fms[self.rnn_level], hidden)
fms[self.rnn_level] = hidden[-1]
M_seq.append(fms)
seq_targets = []
for i, (M, inp) in enumerate(zip(M_seq, inp_seq)):
calculate_losses = (i == ref_frame_index[0])
l, targets = self.forward_heads( M,
inp['img_meta'],
inp['gt_bboxes'],
inp['gt_labels'],
inp.get('gt_inst_ids', None),
inp['gt_masks'],
return_targets=True,
calculate_losses=calculate_losses,
img=img)
if calculate_losses:
losses = l
seq_targets.append(targets)
if self.with_cluster:
def batchseq2seqbatch(targetkey):
seqbatchtarget=[target[targetkey] for target in seq_targets]
seqbatchtarget=list(map(list, zip(*seqbatchtarget)))
seqbatchtarget=[torch.cat(seq) for seq in seqbatchtarget]
return seqbatchtarget
def updated_assigned_gt_inds(target_key):
assigned_gt_inds = []
gt_bboxes_seq=[target['gt_bboxes'] for target in seq_targets]
gt_bboxes_seq=list(map(list, zip(*gt_bboxes_seq))) # batch x seq
assigned_gt_inds_seq=[target[target_key] for target in seq_targets] # seq x batch
assigned_gt_inds_seq=list(map(list, zip(*assigned_gt_inds_seq))) # batch x seq
for ind_seq_batch, gt_bboxes_batch in zip(assigned_gt_inds_seq, gt_bboxes_seq):
ind_joint = []
shift = 0
for ind_seq, bboxes_seq in zip(ind_seq_batch, gt_bboxes_batch):
ind_joint.append(ind_seq + shift)
shift += len(bboxes_seq)
assigned_gt_inds.append(torch.cat(ind_joint))
return assigned_gt_inds
pos_assigned_gt_inds = updated_assigned_gt_inds('pos_assigned_gt_inds')
ext_assigned_gt_inds = updated_assigned_gt_inds('ext_assigned_gt_inds')
gt_bboxes = batchseq2seqbatch('gt_bboxes')
mask_cluster_preds = batchseq2seqbatch('mask_cluster_pred')
mask_inst_targets = batchseq2seqbatch('mask_inst_targets')
mask_inst_score = batchseq2seqbatch('mask_inst_score')
roi_cluster_preds = batchseq2seqbatch('roi_cluster_pred')
roi_inst_targets = batchseq2seqbatch('roi_inst_targets')
roi_inst_score = batchseq2seqbatch('roi_inst_score')
roi_bboxes = batchseq2seqbatch('roi_bboxes')
im_size_list= seq_targets[0]['im_size_list']
roi_cluster_list = [self.train_cfg.rcnn.roi_cluster for _ in im_size_list]
mask_pair_idx = self.mask_cluster_head.get_pairs(pos_assigned_gt_inds,
mask_inst_targets,
gt_bboxes,
im_size_list, self.train_cfg.rcnn)
roi_pair_idx = self.mask_cluster_head.get_roi_pairs(ext_assigned_gt_inds,
gt_bboxes,
im_size_list, self.train_cfg.rcnn)
mask_cluster_preds = torch.cat(mask_cluster_preds, 0)
mask_inst_targets = torch.cat(mask_inst_targets, 0)
mask_inst_score = torch.cat(mask_inst_score, 0)
roi_cluster_preds = torch.cat(roi_cluster_preds, 0)
roi_inst_targets = torch.cat(roi_inst_targets, 0)
roi_inst_score = torch.cat(roi_inst_score, 0)
roi_bboxes = torch.cat(roi_bboxes, 0)
# DEBUG
DEBUG = False
if DEBUG:
#bboxes = torch.cat([res.pos_bboxes for res in sampling_results])
prop_nums = [[len(inds) for inds in target['pos_assigned_gt_inds']] for target in seq_targets]
prop_nums=list(map(list, zip(*prop_nums)))
prop_nums=sum(prop_nums, [])
img_seq = [i['img'] for i in inp_seq]
debug_img = list(map(list, zip(*img_seq)))
debug_img = sum(debug_img, [])
self.mask_cluster_head.display_instance_pairs(debug_img, prop_nums, mask_inst_targets, roi_inst_targets, roi_bboxes, mask_pair_idx, 0)
# DEBUG END
mask_cluster_preds = mask_cluster_preds[:, 1:]
mask_cluster_preds = mask_cluster_preds.view(mask_cluster_preds.shape[0],
mask_cluster_preds.shape[1], -1)
loss_mask_cluster = self.mask_cluster_head.clustering_loss(mask_cluster_preds,
mask_inst_targets,
mask_inst_score,
mask_pair_idx)
losses.update(loss_mask_cluster)
loss_roi_cluster = self.roi_cluster_head.clustering_loss(roi_cluster_preds,
roi_inst_targets,
roi_inst_score,
roi_pair_idx)
losses.update(loss_roi_cluster)
if return_targets:
return losses, seq_targets
else:
return losses
def simple_test(self,
img,
img_meta,
inp_seq=None,
rescale=False,
show=False):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
bbox_result_seq = []
segm_result_seq = []
mask_cluster_seq = []
mask_cluster_orig_seq = []
M_seq = []
if self.bidirectional:
for inp in inp_seq:
fms = list(self.backbone(inp['img'][0]))
M_seq.append(fms)
fms_f = []
hidden = None
for fms in M_seq:
hidden = self.gru_model(fms[self.rnn_level], hidden)
fms_f.append(hidden[-1])
fms_b = []
hidden = None
for fms in reversed(M_seq):
hidden = self.gru_model(fms[self.rnn_level], hidden)
fms_b.append(hidden[-1])
fms_b.reverse()
for i, (fm_f, fm_b) in enumerate(zip(fms_f, fms_b)):
M_seq[i][self.rnn_level] = torch.cat((fm_f, fm_b), 1)
M_seq[i] = self.neck(M_seq[i])
else:
hidden = None
for im in img_seq:
fms = list(self.forward_to_neck(im))
hidden = self.gru_model(fms[self.rnn_level], hidden)
fms[self.rnn_level] = hidden[-1]
M_seq.append(fms)
#extract end
for e, (x, inp) in enumerate(zip(M_seq, inp_seq)):
img_meta = inp['img_meta'][0]
proposal_list = self.simple_test_rpn(
x, img_meta, self.test_cfg.rpn)
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale)
segm_results, mask_clusters_orig, roi_cluster_ids, roi_indeces, self.mask_roi_map = self.simple_video_test_mask(
x, img_meta, det_bboxes, det_labels, self.mask_roi_map, rescale=rescale)
mask_clusters_o = np.concatenate((mask_clusters_orig, np.zeros_like(mask_clusters_orig[:, :, :1])), axis=2)
mask_clusters_o = mask_clusters_o.argmax(2)
if e==0:
if self.prev_mask_clusters is not None:
matched_idx = matching(mask_clusters_o, self.prev_mask_clusters, self.ids)
self.inv_mask = np.concatenate([np.zeros(1), self.ids[matched_idx]])
mask_clusters = self.inv_mask[mask_clusters_o]
def inv_roi(orig_roi_id):
key_list = list(self.mask_roi_map.keys())
val_list = list(self.mask_roi_map.values())
orig_mask_id = key_list[val_list.index(orig_roi_id)]
mask_id = self.inv_mask[orig_mask_id]
return self.prev_mask_roi_map[mask_id]
if self.prev_mask_roi_map:
if isinstance(segm_results,dict):
segm_results = {self.inv_mask[roi_cluster_id]:obj_segm for roi_cluster_id, obj_segm in segm_results.items()}
roi_cluster_ids = [inv_roi(roi_cluster_id) for roi_cluster_id in roi_cluster_ids]
else:
self.prev_mask_roi_map = self.mask_roi_map
det_bboxes = det_bboxes[roi_indeces]
det_labels = det_labels[roi_indeces]
bbox_results = bbox2result_with_id(det_bboxes, det_labels, roi_cluster_ids, self.bbox_head.num_classes)
bbox_result_seq.append(bbox_results)
segm_result_seq.append(segm_results)
mask_clusters_vis = np.concatenate(((mask_clusters==1)[:, :, None], (mask_clusters==2)[:, :, None], (mask_clusters==3)[:, :, None]), axis=2)
mask_cluster_seq.append(mask_clusters_vis)
mask_cluster_orig_seq.append(mask_clusters_orig)
if e==len(M_seq)-1:
self.prev_mask_clusters = mask_clusters
self.prev_mask_roi_map = self.mask_roi_map
if show:
return bbox_result_seq, segm_result_seq, mask_cluster_seq, mask_cluster_orig_seq
else:
return bbox_result_seq, segm_result_seq
|
import sys
import argparse
import torch
from model.ENet import ENet
from model.ERFNet import ERFNet
from model.CGNet import CGNet
from model.EDANet import EDANet
from model.ESNet import ESNet
from model.ESPNet import ESPNet
from model.LEDNet import LEDNet
from model.ESPNet_v2.SegmentationModel import EESPNet_Seg
from model.FastSCNN import FastSCNN
from model.DABNet import DABNet
from model.FPENet import FPENet
from tools.flops_counter.ptflops import get_model_complexity_info
pt_models = {
'ENet': ENet,
'ERFNet': ERFNet,
'CGNet': CGNet,
'EDANet': EDANet,
'ESNet': ESNet,
'ESPNet': ESPNet,
'LEDNet': LEDNet,
'EESPNet_Seg': EESPNet_Seg,
'FastSCNN': FastSCNN,
'DABNet': DABNet,
'FPENet': FPENet
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ptflops sample script')
parser.add_argument('--device', type=int, default=0,
help='Device to store the model.')
parser.add_argument('--model', choices=list(pt_models.keys()),
type=str, default='ENet')
parser.add_argument('--result', type=str, default=None)
args = parser.parse_args()
if args.result is None:
ost = sys.stdout
else:
ost = open(args.result, 'w')
# with torch.cuda.device(args.device):
# net = pt_models[args.model](classes=19).cuda()
#
# flops, params = get_model_complexity_info(net, (3, 512, 1024),
# as_strings=True,
# print_per_layer_stat=True,
# ost=ost)
# print('Flops: ' + flops)
# print('Params: ' + params)
net = pt_models[args.model](classes=19)
flops, params = get_model_complexity_info(net, (3, 512, 1024),
as_strings=True,
print_per_layer_stat=True,
ost=ost)
print('Flops: ' + flops)
print('Params: ' + params) |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import re
from datetime import datetime
from common.django_utils import DataResponse
from common.exceptions import ApiRequestError
from rest_framework.response import Response
from dataflow.flow.handlers.modeling_pipeline import ModelingFlowPipeLine
from dataflow.modeling.exceptions.comp_exceptions import (
DEFAULT_MODELING_ERR,
DEFAULT_SUCCESS_CODE,
CreateModelProcessingException,
EntityAlreadyExistsError,
ModelCreateError,
ModelingCode,
RTCreateError,
SQLForbiddenError,
SqlParseError,
TableNotQuerySetError,
)
from dataflow.modeling.handler.mlsql_execute_log import MLSqlExecuteLogHandler
from dataflow.modeling.job.modeling_job_pipeline import ModelingJobPipeLine
from dataflow.modeling.models import MLSqlExecuteLog
from dataflow.modeling.utils.modeling_utils import ModelingUtils
from dataflow.shared.log import modeling_logger as logger
from dataflow.shared.meta.tag.tag_helper import TagHelper
class ModelingDatalabTaskHandler(object):
STAGES = [
{
"stage": "parse",
"stage_zh": "SQL解析",
"description": "Parse sql and create processing.",
},
{"stage": "submit", "stage_zh": "提交任务", "description": "Submit mlsql job."},
{"stage": "execute", "stage_zh": "任务执行", "description": "Execute mlsql job."},
{"stage": "evaluate", "stage_zh": "获取结果", "description": "Evaluate mlsql job."},
]
def __init__(self, task):
self.flow_task = task
@classmethod
def create(
cls,
session_id,
cell_id,
notebook_id,
operator,
action,
context=None,
version=None,
):
kwargs = {
"session_id": session_id,
"cell_id": cell_id,
"notebook_id": notebook_id,
"created_by": operator,
"action": action,
}
if context is not None:
kwargs["context"] = json.dumps(context)
o_mlsql_task = MLSqlExecuteLog.objects.create(**kwargs)
return cls(o_mlsql_task)
@classmethod
def start(cls, args):
pass
def format_stage(self, current_stage, action="start", level="INFO"):
stage_count = len(ModelingDatalabTaskHandler.STAGES)
stage_info = "{current_stage}/{total_stage}".format(current_stage=current_stage, total_stage=stage_count)
return stage_info
def execute(self):
self.flow_task.set_status(MLSqlExecuteLog.STATUS.RUNNING)
flow_context = json.loads(self.flow_task.context)
args = flow_context["args"]
bk_biz_id = args["bk_biz_id"]
project_id = args["project_id"]
bk_username = args["bk_username"]
tags = [TagHelper.get_geog_area_code_by_project(project_id)]
notebook_id = args["notebook_id"]
cell_id = args["cell_id"]
component_type = args["component_type"] if "component_type" in args else "spark_mllib"
use_type = args["type"]
sql_list = args["sql_list"]
processing_ids = []
stage_index = 1
try:
processing_params = {
"sql_list": sql_list,
"component_type": component_type,
"project_id": project_id,
"bk_biz_id": bk_biz_id,
"notebook_id": notebook_id,
"cell_id": cell_id,
"bk_username": bk_username,
"tags": tags,
"type": use_type,
}
# 创建processing
logger.info("create processing...")
self.add_log(stage_index, status=MLSqlExecuteLog.STATUS.RUNNING)
processing_info = ModelingJobPipeLine.create_model_processing(processing_params)
if "processing_ids" not in processing_info:
# 解析过程正常,但没有任何process生成,表示传到bksql的是show或drop或是truncate
# 一般情况下不会出现,但当这些语句前有/**/的注释就会出现
message = "sql语法有误,请注意目前尚不支持多行注释"
raise CreateModelProcessingException(message=message)
# return Response(processing_info['content'])
logger.info("processing result:" + json.dumps(processing_info))
if "contains_create_run_sql" in processing_info:
# 我们需要写入一个标记,即是否有create_run语句,以供前端来判断显示是否有发布按钮
args["contains_create_run_sql"] = processing_info["contains_create_run_sql"]
if "disable" in processing_info:
args["disable"] = processing_info["disable"]
if processing_info["disable"]:
args["disable_message"] = "不支持含有Join子查询的模型进行发布:{processing_id}".format(
processing_id=processing_info["sub_query_processing_id"]
)
if "evaluate_map" in processing_info and processing_info["evaluate_map"]:
evaluate_map = processing_info["evaluate_map"]
last_table = processing_info["result_table_ids"][-1]
args["need_evaluate"] = True
args["evaluate_map"] = evaluate_map
args["last_table"] = last_table
# 将评估信息写入日志表
MLSqlExecuteLogHandler.update_execute_log(self.flow_task.id, flow_context)
self.add_log(stage_index, status=MLSqlExecuteLog.STATUS.SUCCESS)
# 创建job
stage_index = stage_index + 1
logger.info("create job...")
self.add_log(stage_index, status=MLSqlExecuteLog.STATUS.RUNNING)
processing_ids = processing_info["processing_ids"]
job_params = {
"project_id": project_id,
"use_type": use_type,
"component_type": component_type,
"notebook_id": notebook_id,
"cell_id": cell_id,
}
job_id = ModelingJobPipeLine.create_mlsql_job(processing_info, job_params)
self.add_log(stage_index, status=MLSqlExecuteLog.STATUS.SUCCESS)
# 启动job
stage_index = stage_index + 1
self.add_log(stage_index, status=MLSqlExecuteLog.STATUS.RUNNING)
pipeline = ModelingJobPipeLine(job_id)
logger.info("start job...")
exec_id = pipeline.start_mlsql_job()
# 汇总返回信息, args需要扩充,所以会传入
stage_index = stage_index + 1
return_object = pipeline.merge_return_object(exec_id)
args["exec_id"] = exec_id
args["job_id"] = pipeline.job.job_id
args["geog_area_code"] = pipeline.geog_area_code
args["cluster_id"] = pipeline.cluster_id
# 将执行信息写入log表以备check时使用
MLSqlExecuteLogHandler.update_execute_log(self.flow_task.id, flow_context)
return Response(return_object)
except (
CreateModelProcessingException,
EntityAlreadyExistsError,
SqlParseError,
ModelCreateError,
RTCreateError,
SQLForbiddenError,
TableNotQuerySetError,
) as e:
logger.error("创建Process失败:%s" % e)
self.flow_task.set_status(MLSqlExecuteLog.STATUS.FAILURE)
self.add_log(
stage_index,
level="ERROR",
detail="创建Process失败:%s" % e,
status=MLSqlExecuteLog.STATUS.FAILURE,
)
return DataResponse(result=False, code=e.code, message=e.message, errors=e.errors)
except Exception as e:
logger.error("运行异常", e)
# 回滚清理操作
notebook_info = {
"notebook_id": notebook_id,
"cell_id": cell_id,
"bk_username": bk_username,
}
ModelingUtils.clear(processing_ids, notebook_info)
self.flow_task.set_status(MLSqlExecuteLog.STATUS.FAILURE)
self.add_log(
stage_index,
level="ERROR",
detail="运行异常:%s" % e,
status=MLSqlExecuteLog.STATUS.FAILURE,
)
return DataResponse(result=False, code=DEFAULT_MODELING_ERR, message="{}".format(e))
def add_log(
self,
stage_index,
level="INFO",
detail="",
status=MLSqlExecuteLog.STATUS.RUNNING,
):
self.log(
ModelingDatalabTaskHandler.STAGES[stage_index - 1]["description"],
level=level,
stage=self.format_stage(stage_index),
detail=detail,
status=status,
)
def check(self):
try:
params = json.loads(self.flow_task.context)["args"]
if self.flow_task.status == "failure" or "exec_id" not in params or "job_id" not in params:
code, msg, errors = ModelingDatalabTaskHandler.match_code_and_message(self.flow_task.logs_zh)
if not msg:
msg = "无法获取失败信息,请联系管理员"
resp = DataResponse(result=False, code=code)
resp.message = msg
resp.data = "failed"
resp.errors = errors
return resp
job_id = params["job_id"]
res = ModelingFlowPipeLine.check_mlsql_job(job_id)
final_resp = DataResponse(message=res.message)
if res.data == "failed":
self.flow_task.set_status(MLSqlExecuteLog.STATUS.FAILURE)
final_resp.result = False
final_resp.data = res.data
final_resp.message = res.message
final_resp.code = res.code
elif res.data == "finished":
self.flow_task.set_status(MLSqlExecuteLog.STATUS.SUCCESS)
final_resp.result = True
final_resp.data = res.data
final_resp.message = res.message
elif res.data == "killed":
self.flow_task.set_status(MLSqlExecuteLog.STATUS.CANCELLED)
final_resp.result = True
final_resp.data = res.data
final_resp.message = res.message
else:
# still running, do nothing
final_resp.result = True
final_resp.data = res.data
final_resp.message = res.message
return final_resp
except ApiRequestError as e:
return DataResponse(result=False, code=e.code, message=e.message)
except Exception as e:
return DataResponse(result=False, code=DEFAULT_MODELING_ERR, message="{}".format(e))
@classmethod
def match_code_and_message(cls, log_line):
log_message = "Unknown error"
try:
log_lines = log_line.split("\n")
last_line = log_lines[-1]
log_array = last_line.split("|")
log_message = log_array[-1]
regex_pattern = r"(.*)\[(.+)\](.+)"
match_object = re.match(regex_pattern, log_message)
match_groups = match_object.groups()
if len(match_groups) >= 3:
code = match_groups[1]
msg = match_groups[2]
mlsql_code = code[-3:]
if mlsql_code in [
ModelingCode.MODEL_EXISTS_ERR[0],
ModelingCode.TABLE_EXISTS_ERR[0],
ModelingCode.DP_EXISTS_ERR[0],
]:
# 实体不存在的异常时,需要解析出具体哪些实体不存在
error_regex_pattern = r"(.*):(.*):(.*)"
error_match_object = re.match(error_regex_pattern, msg)
error_match_group = error_match_object.groups()
error_entity = error_match_group[-1]
errors = {"name": error_entity}
else:
errors = None
else:
code = DEFAULT_MODELING_ERR
msg = log_message
errors = None
except Exception as e:
logger.exception(e)
code = DEFAULT_MODELING_ERR
msg = log_message
errors = None
return code, msg, errors
def clear(self):
try:
params = json.loads(self.flow_task.context)["args"]
if "job_id" not in params:
return DataResponse(result=True, code=DEFAULT_SUCCESS_CODE)
status = self.flow_task.status
job_id = params["job_id"]
return ModelingFlowPipeLine.clear_mlsql_job(job_id, status)
except ApiRequestError as e:
return DataResponse(result=False, code=e.code, message=e.message)
except Exception as e:
return DataResponse(result=False, code=DEFAULT_MODELING_ERR, message="{}".format(e))
def log(self, msg, level="INFO", time=None, stage=None, detail="", status=None):
_time = time if time is not None else datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.flow_task.add_log(msg, level=level, time=_time, stage=stage, detail=detail, status=status)
@staticmethod
def get_task_id(notebook_id):
return_obj = {}
result = MLSqlExecuteLog.objects.filter(notebook_id=notebook_id).order_by("-id").first()
if result:
return_obj["task_id"] = result.id
status = result.status
last_status = status
if status == "cancelled":
last_status = "killed"
elif status == "success":
last_status = "finished"
elif status == "failure":
last_status = "failed"
return_obj["status"] = last_status
return return_obj
|
# coding=utf-8
from support.views import (
assign_campaigns,
list_campaigns_with_no_seller,
assign_seller,
seller_console_list_campaigns,
seller_console,
scheduled_activities,
edit_address,
import_contacts,
send_promo,
new_subscription,
product_change,
partial_unsubscription,
book_unsubscription,
edit_products,
contact_list,
contact_detail,
api_new_address,
api_dynamic_prices,
list_issues,
invoicing_issues,
debtor_contacts,
new_issue,
new_scheduled_task,
view_issue,
dynamic_contact_filter_new,
dynamic_contact_filter_edit,
dynamic_contact_filter_list,
export_dcf_emails,
sync_with_mailtrain,
register_activity,
edit_contact,
edit_newsletters,
edit_envelopes,
campaign_statistics_list,
campaign_statistics_detail,
campaign_statistics_per_seller,
seller_performance_by_time,
unsubscription_statistics,
)
from django.conf.urls import url
urlpatterns = [
url(r"^assign_campaigns/$", assign_campaigns, name="assign_campaigns"),
url(r"^assign_sellers/$", list_campaigns_with_no_seller, name="assign_to_seller"),
url(r"^assign_sellers/(\d+)/$", assign_seller, name="assign_sellers"),
url(
r"^seller_console/$",
seller_console_list_campaigns,
name="seller_console_list_campaigns",
),
url(r"^seller_console/(\w+)/(\d+)/$", seller_console, name="seller_console"),
url(r"^scheduled_activities/$", scheduled_activities, name="scheduled_activities"),
url(r"^edit_address/(\d+)/$", edit_address),
url(r"^edit_address/(\d+)/(\d+)/$", edit_address),
url(r"^import/$", import_contacts, name="import_contacts"),
url(r"^send_promo/(\d+)/$", send_promo, name="send_promo"),
url(r"^new_subscription/(\d+)/$", new_subscription, name="new_subscription"),
url(r"^product_change/(\d+)/$", product_change, name="product_change"),
url(r"^partial_unsubscription/(\d+)/$", partial_unsubscription, name="partial_unsubscription"),
url(r"^book_unsubscription/(\d+)/$", book_unsubscription, name="book_unsubscription"),
url(r"^edit_products/(\d+)/$", edit_products, name="edit_products"),
url(r"^contacts/$", contact_list, name="contact_list"),
url(r"^contacts/(\d+)/$", contact_detail, name="contact_detail"),
url(r"^api_new_address/(\d+)/$", api_new_address),
url(r"^api_dynamic_prices/$", api_dynamic_prices),
# Issues
url(r"^list_issues/$", list_issues, name="list_issues"),
url(r"^invoicing_issues/$", invoicing_issues, name="invoicing_issues"),
url(r"^debtor_contacts/$", debtor_contacts, name="debtor_contacts"),
url(r"^new_issue/(\d+)/$", new_issue, name="new_issue"),
url(
r"^new_scheduled_task/(\d+)/(\w+)/$",
new_scheduled_task,
name="new_scheduled_task",
),
url(r"^view_issue/(\d+)/$", view_issue, name="view_issue"),
url(
r"^add_dynamic_contact_filter/$",
dynamic_contact_filter_new,
name="dynamic_contact_filter_add",
),
url(
r"^dynamic_contact_filter_list/$",
dynamic_contact_filter_list,
name="dynamic_contact_filter_list",
),
url(
r"^dynamic_contact_filter/(\d+)/$",
dynamic_contact_filter_edit,
name="dynamic_contact_filter_edit",
),
url(r"^export_dcf_emails/(\d+)/$", export_dcf_emails, name="export_dcf_emails"),
url(
r"^sync_with_mailtrain/(\d+)/$", sync_with_mailtrain, name="sync_with_mailtrain"
),
url(r"^register_activity/$", register_activity, name="register_activity"),
url(r"^contacts/(\d+)/edit/$", edit_contact, name="edit_contact"),
url(r"^edit_newsletters/(\d+)/$", edit_newsletters, name="edit_newsletters"),
url(r"^edit_envelopes/(\d+)/$", edit_envelopes, name="edit_envelopes"),
url(r"^campaign_statistics/$", campaign_statistics_list, name="campaign_statistics_list"),
url(r"^campaign_statistics/(\d+)/$", campaign_statistics_detail, name="campaign_statistics_detail"),
url(r"^campaign_statistics/by_seller/(\d+)/$", campaign_statistics_per_seller, name="campaign_statistics_per_seller"),
url(r"^campaign_statistics/performance_by_time/$", seller_performance_by_time, name="seller_performance_by_time"),
url(r"^unsubscription_statistics/$", unsubscription_statistics, name="unsubscription_statistics"),
]
|
#
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from merlin_standard_lib import Tag
tf = pytest.importorskip("tensorflow")
tr = pytest.importorskip("transformers4rec.tf")
def test_concat_aggregation_yoochoose(tabular_schema, tf_tabular_data):
schema = tabular_schema
tab_module = tr.TabularFeatures.from_schema(schema)
block = tab_module >> tr.ConcatFeatures()
out = block(tf_tabular_data)
assert out.shape[-1] == 262
def test_stack_aggregation_yoochoose(tabular_schema, tf_tabular_data):
schema = tabular_schema
tab_module = tr.EmbeddingFeatures.from_schema(schema)
block = tab_module >> tr.StackFeatures()
out = block(tf_tabular_data)
assert out.shape[1] == 64
assert out.shape[2] == 4
def test_element_wise_sum_features_different_shapes():
with pytest.raises(ValueError) as excinfo:
element_wise_op = tr.ElementwiseSum()
input = {
"item_id/list": tf.random.uniform((10, 20)),
"category/list": tf.random.uniform((10, 25)),
}
element_wise_op(input)
assert "shapes of all input features are not equal" in str(excinfo.value)
def test_element_wise_sum_aggregation_yoochoose(tabular_schema, tf_tabular_data):
schema = tabular_schema
tab_module = tr.EmbeddingFeatures.from_schema(schema)
block = tab_module >> tr.ElementwiseSum()
out = block(tf_tabular_data)
assert out.shape[-1] == 64
def test_element_wise_sum_item_multi_no_col_group():
with pytest.raises(ValueError) as excinfo:
element_wise_op = tr.ElementwiseSumItemMulti()
element_wise_op(None)
assert "requires a schema." in str(excinfo.value)
def test_element_wise_sum_item_multi_col_group_no_item_id(tabular_schema):
with pytest.raises(ValueError) as excinfo:
categ_schema = tabular_schema.select_by_tag(Tag.CATEGORICAL)
# Remove the item id from col_group
categ_schema = categ_schema.remove_by_name("item_id")
element_wise_op = tr.ElementwiseSumItemMulti(categ_schema)
element_wise_op(None)
assert "no column tagged as item id" in str(excinfo.value)
def test_element_wise_sum_item_multi_features_different_shapes(yoochoose_schema):
with pytest.raises(ValueError) as excinfo:
categ_schema = yoochoose_schema.select_by_tag(Tag.CATEGORICAL)
element_wise_op = tr.ElementwiseSumItemMulti(categ_schema)
input = {
"item_id/list": tf.random.uniform((10, 20)),
"category/list": tf.random.uniform((10, 25)),
}
element_wise_op(input)
assert "shapes of all input features are not equal" in str(excinfo.value)
def test_element_wise_sum_item_multi_aggregation_yoochoose(yoochoose_schema, tf_yoochoose_like):
schema = yoochoose_schema
tab_module = tr.EmbeddingFeatures.from_schema(schema)
block = tab_module >> tr.ElementwiseSumItemMulti(schema)
out = block(tf_yoochoose_like)
assert out.shape[-1] == 64
def test_element_wise_sum_item_multi_aggregation_registry_yoochoose(
yoochoose_schema, tf_yoochoose_like
):
tab_module = tr.TabularSequenceFeatures.from_schema(
yoochoose_schema.select_by_tag(Tag.CATEGORICAL), aggregation="element-wise-sum-item-multi"
)
out = tab_module(tf_yoochoose_like)
assert out.shape[-1] == 64
|
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
class ContrastMatrix:
"""A representation of a contrast matrix
Parameters
----------
contrast: 2-dimensional np.array
The contrast matrix as a numpy array.
labels: list or tuple
The labels for the columns of the contrast matrix. Its length must match the number of
columns in the contrast matrix.
"""
def __init__(self, matrix, labels):
self.matrix = matrix
self.labels = labels
if matrix.shape[1] != len(labels): # pragma: no cover
raise ValueError(
"The number of columns in the contrast matrix is not equal to the number of labels"
)
@property
def matrix(self):
return self._matrix
@matrix.setter
def matrix(self, value):
if not (
isinstance(value, np.ndarray) and value.dtype.kind in "if" and value.ndim == 2
): # pragma: no cover
raise ValueError("The matrix argument must be a 2d numerical numpy array")
self._matrix = value
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, value):
if not isinstance(value, (list, tuple)): # pragma: no cover
raise ValueError("The labels argument must be a list or a tuple")
if not all(isinstance(i, str) for i in value): # pragma: no cover
raise ValueError("The items in the labels argument must be of type 'str'")
self._labels = value
def __str__(self): # pragma: no cover
msg = (
f"{self.__class__.__name__}\n"
f"Matrix:\n{self.matrix}\n\n"
f"Labels:\n{', '.join(self.labels)}"
)
return msg
def __repr__(self): # pragma: no cover
return self.__str__()
class CategoricalBox:
"""A container with information to encode categorical data
Parameters
----------
data: 1d array-like
The data converted to categorical.
contrast: Encoding
An instance that represents the contrast matrix used to encode the categorical variable.
levels: list or tuple
The levels in ``data`` in the desired order.
"""
def __init__(self, data, contrast, levels):
self.data = data
self.contrast = contrast
self.levels = levels
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, pd.Series):
value = value.values
if not (isinstance(value, np.ndarray) and value.ndim == 1): # pragma: no cover
raise ValueError("The data argument must be one dimensional array-like")
self._data = value
@property
def contrast(self):
return self._contrast
@contrast.setter
def contrast(self, value):
# Allows to do C(x, Treatment) instead of C(x, Treatment())
if callable(value):
value = value()
if not (isinstance(value, Encoding) or value is None): # pragma: no cover
raise ValueError("The contrast argument in must be an instance of Encoding")
self._contrast = value
@property
def levels(self):
return self._levels
@levels.setter
def levels(self, value):
if value is not None and set(value) != set(self.data): # pragma: no cover
raise ValueError("The levels beign assigned and the levels in the data differ")
self._levels = value
class Encoding(ABC):
"""Abstract class for custom Encodings"""
@abstractmethod
def code_with_intercept(self, levels): # pragma: no cover
"""This contrast matrix _does_ span the intercept"""
return
@abstractmethod
def code_without_intercept(self, levels): # pragma: no cover
"""This contrast matrix _does not_ span the intercept"""
return
class Treatment(Encoding):
def __init__(self, reference=None):
"""Treatment encoding
This is also known as dummy encoding.
When the encoding is not full-rank, one level is taken as reference and the regression
coefficients measure the difference between the each level and the reference. In that case,
the intercept represents the mean of the reference level.
When the encoding is of full-rank, there's a dummy variable for each level representing
its mean.
Parameters
----------
reference: str
The level to take as reference
"""
self.reference = reference
def code_with_intercept(self, levels):
contrast = np.eye(len(levels), dtype=int)
labels = [str(level) for level in levels]
return ContrastMatrix(contrast, labels)
def code_without_intercept(self, levels):
# First category is the default reference
if self.reference is None:
reference = 0
else:
if self.reference in levels:
reference = levels.index(self.reference)
else: # pragma: no cover
raise ValueError("reference not in levels")
eye = np.eye(len(levels) - 1, dtype=int)
contrast = np.vstack(
(eye[:reference, :], np.zeros((1, len(levels) - 1)), eye[reference:, :])
)
levels = levels[:reference] + levels[reference + 1 :]
labels = [str(level) for level in levels]
return ContrastMatrix(contrast, labels)
class Sum(Encoding):
def __init__(self, omit=None):
"""Sum-to-zero encoding
This is also known as deviation encoding. It compares the the mean of each level to the
grand mean (aka mean-of-means).
For full-rank coding, an intercept term is added. This intercept represents the mean
of the response variable.
One level must be omitted to avoid redundancy. By default, this is the last level, but this
can be adjusted via the `omit` argument.
Parameters
----------
omit: str
The level to omit.
"""
self.omit = omit
def _omit_index(self, levels):
"""Returns a number between 0 and len(levels) - 1"""
if self.omit is None:
# By default, omit the lats level.
return len(levels) - 1
else:
return levels.index(self.omit)
def _sum_contrast(self, levels):
n = len(levels)
omit_index = self._omit_index(levels)
eye = np.eye(n - 1, dtype=int)
out = np.empty((n, n - 1), dtype=int)
out[:omit_index, :] = eye[:omit_index, :]
out[omit_index, :] = -1
out[omit_index + 1 :, :] = eye[omit_index:, :]
return out
def code_with_intercept(self, levels):
contrast = self.code_without_intercept(levels)
matrix = np.column_stack((np.ones(len(levels), dtype=int), contrast.matrix))
labels = ["mean"] + contrast.labels
return ContrastMatrix(matrix, labels)
def code_without_intercept(self, levels):
matrix = self._sum_contrast(levels)
omit_index = self._omit_index(levels)
levels = levels[:omit_index] + levels[omit_index + 1 :]
labels = [str(level) for level in levels]
return ContrastMatrix(matrix, labels)
ENCODINGS = {"Treatment": Treatment, "Sum": Sum}
|
from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import include
urlpatterns = [
url(r'^$', views.home,name='home'),
url(r'^accounts/profile/', views.add_profile, name='add_profile'),
url(r'^profile/(\d+)', views.profile, name='profile'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^image/(\d+)',views.get_image_by_id,name ='image'),
url(r'^new/profile$', views.add_profile, name='add_profile'),
url(r'^upload/', views.update_image, name='upload'),
url(r'^comment/(?P<pk>\d+)',views.add_comment,name='comment'),
url(r'^like/(?P<operation>.+)/(?P<pk>\d+)',views.like, name='like'),
url(r'^all/(?P<pk>\d+)', views.all, name='all'),
url(r'^signup/$', views.signup, name='signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.activate, name='activate'),
url(r'^follow/(?P<operation>.+)/(?P<id>\d+)',views.follow,name='follow'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
#
|
"""
Support for the PRT Heatmiser themostats using the V3 protocol.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.heatmiser/
"""
import logging
import voluptuous as vol
from homeassistant.components.climate import (
ClimateDevice, PLATFORM_SCHEMA, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import (
TEMP_CELSIUS, ATTR_TEMPERATURE, CONF_PORT, CONF_NAME, CONF_ID)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['heatmiserV3==0.9.1']
_LOGGER = logging.getLogger(__name__)
CONF_IPADDRESS = 'ipaddress'
CONF_TSTATS = 'tstats'
TSTATS_SCHEMA = vol.Schema({
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_IPADDRESS): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_TSTATS, default={}):
vol.Schema({cv.string: TSTATS_SCHEMA}),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the heatmiser thermostat."""
from heatmiserV3 import heatmiser, connection
ipaddress = config.get(CONF_IPADDRESS)
port = str(config.get(CONF_PORT))
tstats = config.get(CONF_TSTATS)
serport = connection.connection(ipaddress, port)
serport.open()
for tstat in tstats.values():
add_devices([
HeatmiserV3Thermostat(
heatmiser, tstat.get(CONF_ID), tstat.get(CONF_NAME), serport)
])
return
class HeatmiserV3Thermostat(ClimateDevice):
"""Representation of a HeatmiserV3 thermostat."""
def __init__(self, heatmiser, device, name, serport):
"""Initialize the thermostat."""
self.heatmiser = heatmiser
self.device = device
self.serport = serport
self._current_temperature = None
self._name = name
self._id = device
self.dcb = None
self.update()
self._target_temperature = int(self.dcb.get('roomset'))
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
if self.dcb is not None:
low = self.dcb.get('floortemplow ')
high = self.dcb.get('floortemphigh')
temp = (high * 256 + low) / 10.0
self._current_temperature = temp
else:
self._current_temperature = None
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.heatmiser.hmSendAddress(
self._id,
18,
temperature,
1,
self.serport)
self._target_temperature = temperature
def update(self):
"""Get the latest data."""
self.dcb = self.heatmiser.hmReadAddress(self._id, 'prt', self.serport)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models.core import BaseSequentialModel
from lib.distributions import Normal, Multinomial
class CTVAE_info(BaseSequentialModel):
name = 'ctvae_info' # conditional trajectory VAE policy w/ information factorization
model_args = ['state_dim', 'action_dim', 'z_dim', 'h_dim', 'rnn_dim', 'num_layers']
requires_labels = True
def __init__(self, model_config):
super().__init__(model_config)
def _construct_model(self):
state_dim = self.config['state_dim']
action_dim = self.config['action_dim']
z_dim = self.config['z_dim']
h_dim = self.config['h_dim']
enc_rnn_dim = self.config['rnn_dim']
dec_rnn_dim = self.config['rnn_dim'] if self.is_recurrent else 0
num_layers = self.config['num_layers']
label_dim = self.config['label_dim']
label_functions = self.config['label_functions']
self.enc_birnn = nn.GRU(state_dim+action_dim, enc_rnn_dim, num_layers=num_layers, bidirectional=True)
# TODO hacky, change this
if 'mode' in self.config and self.config['mode'] == 'mujoco':
assert not self.is_recurrent
self.enc_mean = nn.Linear(2*enc_rnn_dim, z_dim)
self.enc_logvar = nn.Linear(2*enc_rnn_dim, z_dim)
self.dec_action_fc = nn.Sequential(
nn.Linear(state_dim+z_dim+label_dim+dec_rnn_dim, h_dim),
nn.Tanh(),
nn.Linear(h_dim, h_dim),
nn.Tanh())
self.dec_action_mean = nn.Sequential(
nn.Linear(h_dim, action_dim),
nn.Tanh())
self.dec_action_logvar = nn.Parameter(torch.zeros(action_dim))
else:
self.enc_fc = nn.Sequential(
nn.Linear(2*enc_rnn_dim, h_dim),
nn.ReLU(),
nn.Linear(h_dim, h_dim),
nn.ReLU())
self.enc_mean = nn.Linear(h_dim, z_dim)
self.enc_logvar = nn.Linear(h_dim, z_dim)
self.dec_action_fc = nn.Sequential(
nn.Linear(state_dim+z_dim+label_dim+dec_rnn_dim, h_dim),
nn.ReLU(),
nn.Linear(h_dim, h_dim),
nn.ReLU())
self.dec_action_mean = nn.Linear(h_dim, action_dim)
self.dec_action_logvar = nn.Linear(h_dim, action_dim)
self.aux_fc = nn.ModuleList([nn.Sequential(
nn.Linear(z_dim, h_dim),
nn.ReLU(),
nn.Linear(h_dim, h_dim),
nn.ReLU()) for lf in label_functions])
self.aux_mean = nn.ModuleList([nn.Linear(h_dim, lf.output_dim) for lf in label_functions])
self.aux_logvar = nn.ModuleList([nn.Linear(h_dim, lf.output_dim) for lf in label_functions])
if self.is_recurrent:
self.dec_rnn = nn.GRU(state_dim+action_dim, dec_rnn_dim, num_layers=num_layers)
def _define_losses(self):
self.log.add_loss('kl_div')
self.log.add_loss('nll')
self.log.add_metric('kl_div_true')
for lf in self.config['label_functions']:
self.log.add_loss('{}_label_pred'.format(lf.name))
def ctvaep_params(self):
params = list(self.enc_birnn.parameters()) + list(self.enc_mean.parameters()) + list(self.enc_logvar.parameters()) + \
list(self.dec_action_fc.parameters()) + list(self.dec_action_mean.parameters())
# TODO hacky
if 'mode' not in self.config or self.config['mode'] != 'mujoco':
params += list(self.enc_fc.parameters())
params += list(self.dec_action_logvar.parameters())
else:
params += [self.dec_action_logvar]
if self.is_recurrent:
params += list(self.dec_rnn.parameters())
return params
def aux_params(self):
return list(self.aux_fc.parameters()) + list(self.aux_mean.parameters()) + list(self.aux_logvar.parameters())
def init_optimizer(self, lr):
self.ctvaep_optimizer = torch.optim.Adam(self.ctvaep_params(), lr=lr)
self.aux_optimizer = torch.optim.Adam(self.aux_params(), lr=lr)
def optimize(self, losses):
assert isinstance(losses, dict)
self.ctvaep_optimizer.zero_grad()
ctvaep_loss = sum(losses.values())
ctvaep_loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(self.ctvaep_params(), 10)
self.ctvaep_optimizer.step()
self.aux_optimizer.zero_grad()
label_preds = [ value for key,value in losses.items() if 'label_pred' in key ]
aux_loss = -sum(label_preds)
aux_loss.backward()
nn.utils.clip_grad_norm_(self.aux_params(), 10)
self.aux_optimizer.step()
def predict_labels(self, z, lf_idx, categorical):
aux_h = self.aux_fc[lf_idx](z)
aux_mean = self.aux_mean[lf_idx](aux_h)
aux_logvar = self.aux_logvar[lf_idx](aux_h)
if categorical:
label_log_prob = F.log_softmax(aux_mean, dim=-1)
return Multinomial(label_log_prob)
else:
return Normal(aux_mean, aux_logvar)
def forward(self, states, actions, labels_dict):
self.log.reset()
assert actions.size(1)+1 == states.size(1) # final state has no corresponding action
states = states.transpose(0,1)
actions = actions.transpose(0,1)
labels = torch.cat(list(labels_dict.values()), dim=-1)
# Encode
posterior = self.encode(states[:-1], actions=actions)
z = posterior.sample()
kld = Normal.kl_divergence(posterior, free_bits=0.0).detach()
self.log.metrics['kl_div_true'] = torch.sum(kld)
kld = Normal.kl_divergence(posterior, free_bits=1/self.config['z_dim'])
self.log.losses['kl_div'] = torch.sum(kld)
# Train auxiliary networks to maximize label prediction losses
# Train encoder to minimize label prediction losses
for lf_idx, lf_name in enumerate(labels_dict):
lf = self.config['label_functions'][lf_idx]
lf_labels = labels_dict[lf_name]
auxiliary = self.predict_labels(z, lf_idx, lf.categorical)
self.log.losses['{}_label_pred'.format(lf_name)] = auxiliary.log_prob(lf_labels)
# Decode
self.reset_policy(labels=labels, z=z)
for t in range(actions.size(0)):
action_likelihood = self.decode_action(states[t])
self.log.losses['nll'] -= action_likelihood.log_prob(actions[t])
if self.is_recurrent:
self.update_hidden(states[t], actions[t])
return self.log
|
a = np.random.randn(10, 2)
print("forma: ", a.shape)
print("# dimensiones: ", a.ndim)
print("# elementos: ", a.size)
print("El número de elementos es igual a multiplicar "
"todos los elementos de la forma (Shape): ",
np.multiply(*a.shape) == a.size)
print("Tamaño de cada elemento en bytes: ", a.itemsize)
print("# bytes en el array: ", a.nbytes)
print("El número de bytes del array es igual al número de "
"elementos por el tamaño de cada elemento: ",
a.itemsize * a.size == a.nbytes)
print("FLAGS: ", a.flags, sep="\n") |
# coding: utf-8
"""
Nodeum API
# About This document describes the Nodeum API version 2: If you are looking for any information about the product itself, reach the product website https://www.nodeum.io. You can also contact us at this email address : info@nodeum.io # Filter parameters When browsing a list of items, multiple filter parameters may be applied. Some operators can be added to the value as a prefix: - `=` value is equal. Default operator, may be omitted - `!=` value is different - `>` greater than - `>=` greater than or equal - `<` lower than - `>=` lower than or equal - `><` included in list, items should be separated by `|` - `!><` not included in list, items should be separated by `|` - `~` pattern matching, may include `%` (any characters) and `_` (one character) - `!~` pattern not matching, may include `%` (any characters) and `_` (one character) # noqa: E501
The version of the OpenAPI document: 2.1.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import nodeum_sdk
from nodeum_sdk.api.cloud_connectors_api import CloudConnectorsApi # noqa: E501
from nodeum_sdk.rest import ApiException
class TestCloudConnectorsApi(unittest.TestCase):
"""CloudConnectorsApi unit test stubs"""
def setUp(self):
self.api = nodeum_sdk.api.cloud_connectors_api.CloudConnectorsApi() # noqa: E501
def tearDown(self):
pass
def test_create_cloud_connector(self):
"""Test case for create_cloud_connector
Creates a new cloud connector. # noqa: E501
"""
pass
def test_destroy_cloud_connector(self):
"""Test case for destroy_cloud_connector
Destroys a specific cloud connector. # noqa: E501
"""
pass
def test_index_cloud_connectors(self):
"""Test case for index_cloud_connectors
Lists all cloud connectors. # noqa: E501
"""
pass
def test_show_cloud_connector(self):
"""Test case for show_cloud_connector
Displays a specific cloud connector. # noqa: E501
"""
pass
def test_test_cloud_connector(self):
"""Test case for test_cloud_connector
Test an unsaved cloud connector. # noqa: E501
"""
pass
def test_test_result_cloud_connector(self):
"""Test case for test_result_cloud_connector
Check result of cloud connector test job. # noqa: E501
"""
pass
def test_update_cloud_connector(self):
"""Test case for update_cloud_connector
Updates a specific cloud connector. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
"""
-----------------------------------------------------------------------------
Created: 11.02.2021, 18:04
-----------------------------------------------------------------------------
Author: Matthieu Scherpf
Email: Matthieu.Scherpf@tu-dresden.de
Website: https://becuriouss.github.io/matthieu-scherpf/
Project page: tba
-----------------------------------------------------------------------------
Purpose: Tests for the implemented processing. The tests are based on a simple plausibility check by plotting the computation results.
-----------------------------------------------------------------------------
"""
# %%
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from ippgtoolbox.processing import processing
TEST_sample_freq_ref_bp = 1000
TEST_abs_file_path_ref_bp = os.path.join(
'./assets/bp4d_example/BP_mmHg.txt')
TEST_sample_freq_ref_hr_seq = 1000
TEST_abs_file_path_ref_hr_seq = os.path.join(
'./assets/bp4d_example/Pulse Rate_BPM.txt')
TEST_sample_freq_bvp = 25
TEST_abs_file_path_bvp = os.path.join(
'./assets/bp4d_example/bvp_CHROM_cdf.mat')
plt.rc('lines', linewidth=0.2)
class TestProcessing:
def __init__(self):
self.bp = np.loadtxt(TEST_abs_file_path_ref_bp)
self.hr_seq = np.loadtxt(TEST_abs_file_path_ref_hr_seq)
self.bvp = loadmat(TEST_abs_file_path_bvp)['CHROM']
self._test_plot_seq(self.bp, 'Continuous blood pressure')
self._test_plot_seq(self.hr_seq, 'Reference HR sequence')
self._test_plot_seq(self.bvp, 'Extracted blood volume pulse')
def test_compute_hr_from_hr_sequence(self):
self.vals, self.data = processing.compute_window_based_feature(
self.hr_seq,
TEST_sample_freq_ref_hr_seq,
processing.compute_hr_from_hr_sequence,
window_length=10,
window_stride=1,
verbose=True
)
self._test_plot('HR from HR sequence')
def test_compute_hr_from_spectrum_peak_det(self):
self.vals, self.data = processing.compute_window_based_feature(
self.bp,
TEST_sample_freq_ref_bp,
processing.compute_hr_from_spectrum_peak_det,
window_length=10,
window_stride=1,
verbose=True
)
self._test_plot('HR from cbp spectrum peak det')
def test_compute_hr_from_spectrum_max(self):
self.vals, self.data = processing.compute_window_based_feature(
self.bp,
TEST_sample_freq_ref_bp,
processing.compute_hr_from_spectrum_max,
window_length=10,
window_stride=1,
verbose=True
)
self._test_plot('HR from cbp spectrum max')
def test_compute_snr(self):
ref_hr = processing.compute_window_based_feature(
self.bvp,
TEST_sample_freq_bvp,
processing.compute_hr_from_spectrum_max,
window_length=10,
window_stride=1,
verbose=False
)
self._test_plot_seq(ref_hr, 'HR for extracted blood volume pulse')
self.vals, self.data = processing.compute_window_based_feature(
self.bvp,
TEST_sample_freq_bvp,
processing.compute_snr,
window_length=10,
window_stride=1,
verbose=True,
ref_hr_bpm=ref_hr,
# freq_res_bpm=1,
)
self._test_plot('SNR for extracted blood volume pulse')
# # plot spectrogram
# f, t, p = ([], [], [])
# for i, w in enumerate(self.data['w_data']):
# f.append(w['freq'][w['roi_f_all']])
# p.append(w['power'][w['roi_f_all']])
# t.append(i+1)
# f = f[0] * 60 # convert to bpm
# t = np.asarray(t)
# p = np.transpose(np.asarray(p))
# plt.figure()
# plt.pcolormesh(t, f, p, shading='gouraud')
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
def test_apply_filter(self):
self.vals = processing.apply_filter(
self.bvp,
TEST_sample_freq_bvp,
order=3,
cutoff_bpm=(30, 120)
)
self._test_plot('Filtered blood volume pulse signal')
def test_resample_sequence(self):
self.vals = processing.resample_sequence(
self.bp,
new_sample_freq=30,
sample_freq=TEST_sample_freq_ref_bp,
)
self._test_plot('Resampled cbp by sample freq')
self.vals = processing.resample_sequence(
self.bp,
new_sample_freq=30,
seq_ts=np.arange(0, len(self.bp))*1e3,
)
self._test_plot('Resampled cbp by timestamps')
def _test_plot(self, title):
plt.figure()
plt.title(title)
plt.plot(self.vals)
def _test_plot_seq(self, seq, title):
plt.figure()
plt.title(title)
plt.plot(seq)
if __name__ == '__main__':
testProcessing = TestProcessing()
testProcessing.test_compute_hr_from_hr_sequence()
testProcessing.test_compute_hr_from_spectrum_peak_det()
testProcessing.test_compute_hr_from_spectrum_max()
testProcessing.test_compute_snr()
testProcessing.test_apply_filter()
testProcessing.test_resample_sequence()
# %%
|
"""This module defines the views corresponding to the tasks."""
import logging
from datetime import date
from drf_yasg.utils import swagger_auto_schema
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from maintenancemanagement.models import (
Field,
FieldGroup,
FieldObject,
File,
Task,
)
from maintenancemanagement.serializers import (
FieldObjectCreateSerializer,
FieldObjectValidationSerializer,
TaskCreateSerializer,
TaskDetailsSerializer,
TaskListingSerializer,
TaskSerializer,
TaskTemplateRequirementsSerializer,
TaskUpdateSerializer,
TriggerConditionsCreateSerializer,
TriggerConditionsValidationSerializer,
)
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from usersmanagement.models import Team, UserProfile
from usersmanagement.views.views_team import belongs_to_team
from utils.methods import parse_time
logger = logging.getLogger(__name__)
VIEW_TASK = "maintenancemanagement.view_task"
CHANGE_TASK = "maintenancemanagement.change_task"
ADD_TASK = "maintenancemanagement.add_task"
DELETE_TASK = "maintenancemanagement.delete_task"
UPDATED_LOGGER = "{user} UPDATED {object} with {params}"
class TaskList(APIView):
r"""
\n# List all tasks or create a new one.
Parameter :
request (HttpRequest) : the request coming from the front-end
Return :
response (Response) : the response.
GET request : list all tasks and return the data
POST request :
- create a new task, send HTTP 201. If the request is not valid,\
send HTTP 400.
- If the user doesn't have the permissions, it will send HTTP 401.
- The request must contain name (the name of the task (String)) and \
description (a description of the task (String))
- The request can also contain :
- end_date (String): Date (format DD-MM-YYYY) of the deadline
- time (String): estimated duration of the task
- is_template (Boolean): boolean to specify if this task is \
just a template or not
- equipment (int): an id which refers to the concerned equipment
- teams (List<int>): an id list of the teams in charge of this task
- task_type (int): an id which refers to the task_type of this task
- files (List<int>): an id list of the files explaining this task
"""
@swagger_auto_schema(
operation_description='Send the list of Task in the database.',
query_serializer=None,
responses={
200: TaskListingSerializer(many=True),
401: "Unhauthorized",
},
)
def get(self, request):
"""Send the list of Task in the database."""
if request.user.has_perm(VIEW_TASK):
only_template = request.GET.get("template", None)
if only_template == "true":
tasks = Task.objects.filter(is_template=True)
else:
tasks = Task.objects.filter(is_template=False).order_by('over', 'end_date')
serializer = TaskListingSerializer(tasks, many=True)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Add a Task into the database.',
query_serializer=TaskCreateSerializer(many=False),
responses={
201: TaskCreateSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
},
)
def post(self, request):
"""Add a Task into the database."""
if request.user.has_perm(ADD_TASK):
conditions, task_triggered = self._extract_conditions_from_data(request)
task_serializer = TaskCreateSerializer(data=request.data)
if task_serializer.is_valid():
error = self._validate_conditions(conditions)
if error:
return error
task = task_serializer.save()
task.is_triggered = task_triggered
task.created_by = request.user
task.save()
logger.info("{user} CREATED Task with {params}".format(user=request.user, params=request.data))
self._save_conditions(request, conditions, task)
return Response(task_serializer.data, status=status.HTTP_201_CREATED)
return Response(task_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_401_UNAUTHORIZED)
def _extract_conditions_from_data(self, request):
trigger_conditions = request.data.pop('trigger_conditions', None)
end_conditions = request.data.pop('end_conditions', None)
if not end_conditions:
end_condition_fields = Field.objects.filter(field_group=FieldGroup.objects.get(name="End Conditions"))
check_box = end_condition_fields.get(name="Checkbox")
end_conditions = [
{
'field': check_box.id,
'value': None,
'description': 'Finish task'
}
] # Add default end_condition
return (trigger_conditions, end_conditions), trigger_conditions is None
def _validate_conditions(self, conditions):
(trigger_conditions, end_conditions) = conditions
if trigger_conditions:
for trigger_condition in trigger_conditions:
validation_serializer = TriggerConditionsValidationSerializer(data=trigger_condition)
if not validation_serializer.is_valid():
return Response(validation_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
if end_conditions:
for end_condition in end_conditions:
validation_serializer = FieldObjectValidationSerializer(data=end_condition)
if not validation_serializer.is_valid():
return Response(validation_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def _save_conditions(self, request, conditions, task):
(trigger_conditions, end_conditions) = conditions
if trigger_conditions:
for trigger_condition in trigger_conditions:
trigger_condition.update({'described_object': task})
condition_serializer = TriggerConditionsCreateSerializer(data=trigger_condition)
if condition_serializer.is_valid():
condition_serializer.save()
logger.info(
"{user} CREATED FieldObject with {params}".format(user=request.user, params=trigger_condition)
)
if end_conditions:
for end_condition in end_conditions:
end_condition.update({'described_object': task})
condition_serializer = FieldObjectCreateSerializer(data=end_condition)
if condition_serializer.is_valid():
condition_serializer.save()
logger.info(
"{user} CREATED FieldObject with {params}".format(user=request.user, params=end_condition)
)
class TaskDetail(APIView):
r"""
\n# Retrieve, update or delete a task.
Parameters :
request (HttpRequest) : the request coming from the front-end
id (int) : the id of the task
Return :
response (Response) : the response.
GET request : return the task's data.
PUT request : change the task with the data on the request \
or if the data isn't well formed, send HTTP 400.
DELETE request: delete the task and send HTTP 204.
If the user doesn't have the permissions, it will send HTTP 401.
If the id doesn't exist, it will send HTTP 404.
The PUT request can contain one or more of the following fields :
- name (String): The name of the task
- description (String): The description of the task
- end_date (String): Date (format DD-MM-YYYY) of the deadline
- time (String): estimated duration of the task
- is_template (Boolean): boolean to specify if this task is just \
a template or not
- equipment (int): an id which refers to the concerned equipment
- teams (List<int>): an id list of the teams in charge of this task
- task_type (int): an id which refers to the task_type of this task
- files (List<int>): an id list of the files explaining this task
"""
@swagger_auto_schema(
operation_description='Send the Task corresponding to the given key.',
query_serializer=None,
responses={
200: TaskDetailsSerializer(many=False),
401: "Unhauthorized",
404: "Not found",
},
)
def get(self, request, pk):
"""Send the Task corresponding to the given key."""
try:
task = Task.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(VIEW_TASK) or participate_to_task(request.user, task):
serializer = TaskDetailsSerializer(task)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Update the Task corresponding to the given key.',
query_serializer=TaskUpdateSerializer(many=False),
responses={
200: TaskDetailsSerializer(many=False),
400: "Bad request",
401: "Unhauthorized",
404: "Not found",
},
)
def put(self, request, pk):
"""Update the Task corresponding to the given key."""
if request.user.has_perm(CHANGE_TASK):
try:
task = Task.objects.get(pk=pk)
data = request.data.copy() # Because request.data is immutable and we will modify its content
end_conditions = data.pop('end_conditions', None)
error = self._update_end_conditions(request, end_conditions, task, data)
if error:
return error
if 'duration' in request.data.keys():
data.update({'duration': parse_time(request.data['duration'])})
serializer = TaskUpdateSerializer(task, data=data, partial=True)
if serializer.is_valid():
logger.info(UPDATED_LOGGER.format(user=request.user, object=repr(task), params=data))
task = serializer.save()
self._check_if_over(request, task)
serializer_details = TaskDetailsSerializer(task)
return Response(serializer_details.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(status=status.HTTP_401_UNAUTHORIZED)
def _update_end_conditions(self, request, end_conditions, task, data):
content_type_object = ContentType.objects.get_for_model(task)
if end_conditions:
for end_condition in end_conditions:
try:
field_object = FieldObject.objects.get(
pk=end_condition.get('id'), object_id=task.id, content_type=content_type_object
)
if end_condition.get('file', None) is not None:
end_file = File.objects.get(pk=end_condition.get('file'))
task.files.add(end_file)
logger.info(UPDATED_LOGGER.format(user=request.user, object=repr(task), params=data))
task.save()
end_condition.update({'value': end_file.file.path})
field_object_serializer = FieldObjectCreateSerializer(
field_object, data=end_condition, partial=True
)
if field_object_serializer.is_valid():
logger.info(
UPDATED_LOGGER.format(user=request.user, object=repr(field_object), params=end_condition)
)
field_object_serializer.save()
except ObjectDoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
def _check_if_over(self, request, task):
content_type_object = ContentType.objects.get_for_model(task)
end_fields_objects = FieldObject.objects.filter(
object_id=task.id, content_type=content_type_object, field__field_group__name='End Conditions'
)
over = True
for end_field_object in end_fields_objects:
if end_field_object.value is None:
over = False
if over is True:
task.achieved_by = request.user
content_type_object = ContentType.objects.get_for_model(task)
if FieldObject.objects.filter(
object_id=task.id, content_type=content_type_object, field__field_group__name='Trigger Conditions'
).count() > 0:
self._generate_new_task(request, task)
task.over = over
logger.info(
"{user} UPDATED {object} with {params}".format(user=request.user, object=repr(task), params=request.data)
)
task.save()
def _generate_new_task(self, request, task):
content_type_object = ContentType.objects.get_for_model(task)
old_trigger_conditions = FieldObject.objects.filter(
object_id=task.id, content_type=content_type_object, field__field_group__name='Trigger Conditions'
)
end_fields_objects = FieldObject.objects.filter(
object_id=task.id, content_type=content_type_object, field__field_group__name='End Conditions'
)
new_task = Task.objects.get(pk=task.pk)
new_task.pk = None
new_task.achieved_by = None
new_task.save()
for old_trigger_condition in old_trigger_conditions:
self._create_new_trigger_condition(old_trigger_condition, new_task)
for end in end_fields_objects:
FieldObject.objects.create(described_object=new_task, field=end.field, description=end.description)
logger.info("{user} TRIGGER RECURRENT TASK ON {task}".format(user=request.user, task=new_task))
def _create_new_trigger_condition(self, trigger_condition, task):
new_trigger_condition = trigger_condition
new_trigger_condition.pk = None
new_trigger_condition.described_object = task
if trigger_condition.field.name == 'Recurrence':
new_trigger_condition.save()
task.end_date = date.today() + parse_time(trigger_condition.value.split('|')[0])
task.save()
elif trigger_condition.field.name == 'Frequency':
splited = trigger_condition.value.split('|')
trigger_condition.value = '|'.join(splited[:3])
new_frequency = float(FieldObject.objects.get(id=int(splited[1])).value) + float(splited[0])
trigger_condition.value += f'|{new_frequency}'
trigger_condition.save()
else:
trigger_condition.save()
@swagger_auto_schema(
operation_description='Delete the Task corresponding to the given key.',
query_serializer=None,
responses={
204: "No content",
401: "Unhauthorized",
404: "Not found",
},
)
def delete(self, request, pk):
"""Delete the Task corresponding to the given key."""
try:
task = Task.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(DELETE_TASK):
logger.info("{user} DELETED {object}".format(user=request.user, object=repr(task)))
task.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class AddTeamToTask(APIView):
r"""
\n# Assign a team to a task.
Parameters :
request (HttpRequest) : the request coming from the front-end
id (int) : the id of the task
Return :
response (Response) : the response.
POST request : add team to task
PUT request : remove team from task
If the user doesn't have the permissions, it will send HTTP 401.
Both request must contain :
- id_task : the id of the task we want to edit
- id_team : the id ot the team we want to add/remove \
to/from the task
"""
@swagger_auto_schema(
operation_description='Assign a team to a task.',
query_serializer=None,
responses={
201: "Created",
401: "Unhauthorized",
},
)
def post(self, request):
"""Assign a team to a task."""
if request.user.has_perm(CHANGE_TASK):
task = Task.objects.get(pk=request.data["id_task"])
team = Team.objects.get(pk=request.data["id_team"])
task.teams.add(team)
return Response(status=status.HTTP_201_CREATED)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Remove a team from a task.',
query_serializer=None,
responses={
201: "Created",
401: "Unhauthorized",
},
)
def put(self, request):
"""Remove a team from a task."""
if request.user.has_perm(CHANGE_TASK):
task = Task.objects.get(pk=request.data["id_task"])
team = Team.objects.get(pk=request.data["id_team"])
logger.info(
"{user} REMOVED {task} FROM {team}".format(user=request.user, task=repr(task), team=repr(team))
)
task.teams.remove(team)
return Response(status=status.HTTP_201_CREATED)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class TeamTaskList(APIView):
r"""
\n# List all the tasks a team is assigned to.
Parameter :
request (HttpRequest) : the request coming from the front-end
pk (int) : the team's database id.
Return :
response (Response) : the response.
GET request : list all tasks of a team.
"""
@swagger_auto_schema(
operation_description='Send the list of Task corresponding to the given Team key.',
query_serializer=None,
responses={
200: TaskSerializer(many=True),
401: "Unhauthorized",
404: "Not found",
},
)
def get(self, request, pk):
"""Send the list of Task corresponding to the given Team key."""
try:
team = Team.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(VIEW_TASK):
tasks = team.task_set.all()
serializer = TaskSerializer(tasks, many=True)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
class UserTaskList(APIView):
r"""
\n# List all the tasks the user is assigned to.
Parameter :
request (HttpRequest) : the request coming from the front-end
pk (int) : the user's database id.
Return :
response (Response) : the response.
GET request : list all tasks of the user.
"""
@swagger_auto_schema(
operation_description='Send the list of Task corresponding to the given User key.',
query_serializer=None,
responses={
200: TaskListingSerializer(many=True),
401: "Unhauthorized",
404: "Not found",
},
)
def get(self, request, pk):
"""Send the list of Task corresponding to the given User key."""
try:
user = UserProfile.objects.get(pk=pk)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.user.has_perm(VIEW_TASK) or request.user == user:
tasks = Task.objects.filter(
teams__pk__in=user.groups.all().values_list("id", flat=True).iterator(), is_template=False
).distinct().order_by('over', 'end_date')
serializer = TaskListingSerializer(tasks, many=True)
return Response(serializer.data)
return Response(status=status.HTTP_401_UNAUTHORIZED)
@swagger_auto_schema(
operation_description='Check if a user is assigned to the task.',
query_serializer=None,
responses={
200: "Ok",
},
)
def participate_to_task(user, task):
r"""\n# Check if a user is assigned to the task."""
for team in task.teams.values_list("id", flat=True).iterator():
belong = belongs_to_team(user, Team.objects.get(id=team))
if belong:
return True
return False
class TaskRequirements(APIView):
"""docstrings."""
@swagger_auto_schema(
operation_description='Send the End Conditions and Trigger Conditions. \
If specified, send the task templates as well.',
)
def get(self, request):
"""Send the End Conditions and Trigger Conditions. \
If specified, send the task templates as well."""
if request.user.has_perm(ADD_TASK):
serializer = TaskTemplateRequirementsSerializer(1)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_401_UNAUTHORIZED)
|
'''
Given a binary tree, return the zigzag level order traversal of its nodes' values. (ie, from left to right,
then right to left for the next level and alternate between).
For example:
Given binary tree [3,9,20,null,null,15,7],
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its zigzag level order traversal as:
[
[3],
[20,9],
[15,7]
]
ideas of BFS
'''
from tree_utils import Tree
import unittest
from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if root==None:
return []
que = deque()
que.append(root)
cur_end_node = root
next_end_node = root
result = []
i=1
level_nodes=[]
while que:
cur_node = que.popleft()
if cur_node!=None and cur_node.left:
que.append(cur_node.left)
next_end_node=cur_node.left
if cur_node!=None and cur_node.right:
que.append(cur_node.right)
next_end_node=cur_node.right
if cur_node==cur_end_node:
cur_end_node=next_end_node
if cur_node.val!=None:
level_nodes.append(cur_node.val)
if i%2==1:
result.append(level_nodes)
else:
result.append(list(reversed(level_nodes)))
i+=1
level_nodes=[]
else:
if cur_node.val!=None:
level_nodes.append(cur_node.val)
return result
class TestBinaryTree(unittest.TestCase):
def setUp(self):
self.solution = Solution()
self.testCase1 = Tree([3,9,20,None,None,15,7])
def testZigzagLevelOrder(self):
self.assertEqual([[3],[20,9],[15,7]], self.solution.zigzagLevelOrder(self.testCase1.root))
if __name__=='__main__':
unittest.main() |
# -*- coding: utf-8 -*-
"""Utilities for the PyBEL database manager."""
from ..utils import parse_datetime
def extract_shared_required(config, definition_header='Namespace'):
"""Get the required annotations shared by BEL namespace and annotation resource documents.
:param dict config: The configuration dictionary representing a BEL resource
:param str definition_header: ``Namespace`` or ``AnnotationDefinition``
:rtype: dict
"""
return {
'keyword': config[definition_header]['Keyword'],
'created': parse_datetime(config[definition_header]['CreatedDateTime']),
}
def extract_shared_optional(bel_resource, definition_header='Namespace'):
"""Get the optional annotations shared by BEL namespace and annotation resource documents.
:param dict bel_resource: A configuration dictionary representing a BEL resource
:param str definition_header: ``Namespace`` or ``AnnotationDefinition``
:rtype: dict
"""
shared_mapping = {
'description': (definition_header, 'DescriptionString'),
'version': (definition_header, 'VersionString'),
'author': ('Author', 'NameString'),
'license': ('Author', 'CopyrightString'),
'contact': ('Author', 'ContactInfoString'),
'citation': ('Citation', 'NameString'),
'citation_description': ('Citation', 'DescriptionString'),
'citation_version': ('Citation', 'PublishedVersionString'),
'citation_url': ('Citation', 'ReferenceURL'),
}
result = {}
update_insert_values(bel_resource, shared_mapping, result)
if 'PublishedDate' in bel_resource.get('Citation', {}):
result['citation_published'] = parse_datetime(bel_resource['Citation']['PublishedDate'])
return result
def update_insert_values(bel_resource, mapping, values):
"""Update the value dictionary with a BEL resource dictionary.
:param dict bel_resource:
:param dict[str,tuple[str,str]] mapping:
:param dict[str,str] values:
"""
for database_column, (section, key) in mapping.items():
if section in bel_resource and key in bel_resource[section]:
values[database_column] = bel_resource[section][key]
def int_or_str(v):
"""Safe converts an string represent an integer to an integer or passes through none.
:type v: Optional[str]
:rtype: None or str or int
"""
if v is None:
return
try:
return int(v)
except ValueError:
return v
|
from vb2py.test.testframework import *
import vb2py.utils
import vb2py.vbparser
import vb2py.parserclasses
import sys
in_vb_module_tests = []
tests.append((
'a = "hello".Length',
{'a': 5},
))
tests.append((
'a = ("hello").Length',
{'a': 5},
))
tests.append((
'a = ("hello" + "world").Length + 2',
{'a': 12},
))
# Functions
tests.append((
"""
Class _MyClass
Function B(x)
Return 12 + x
End Function
End Class
""", {
'x': 22,
},
'''
_a = _MyClass()
x = _a.B(10)
'''
))
# Properties
tests.append((
"""
Public Class _MyObject
Dim _y = 0
Public Property A As Integer
Get
Return _y
End Get
Set(Value as Integer)
_y = Value
End Set
End Property
End Class
""", {
'x': 0,
'y': 1,
},
"""
Set _a = _MyObject()
x = _a.A
_a.A = 1
y = _a.A
"""
))
# Module
tests.append((
"""
Module _MyClassName
a = 1
End Module
""", {
'x': 1,
},
'''
_a = _MyClassName()
x = _a.a
'''
))
# Operators
in_vb_module_tests.extend([
# IsNot
('a = 1 IsNot 2', {'a': True}),
('_x = 1\n_y = _x\na = _x IsNot _y', {'a': False}),
# Assignment
('a = 1\na += 1', {'a': 2}),
('a = 1\na -= 1', {'a': 0}),
('a = 1\na *= 4', {'a': 4}),
('a = 11\na /= 2', {'a': 5.5}),
('a = 11\na \\= 2', {'a': 5}),
('a = 2\na ^= 3', {'a': 8}),
('a = 8\na <<= 2', {'a': 32}),
('a = 8\na >>= 2', {'a': 2}),
('a = 7\na &= 11', {'a': 3}),
])
class TestModule:
path = 1
sys.modules['TestModule'] = TestModule
# Imports
tests.extend([
('Imports TestModule\nClass _MyClassName\na = path\nEnd Class\n',
{'b': 1, 'path': 1},
'_a = _MyClassName()\nb = _a.a\n',
),
('Imports b = TestModule\nClass _MyClassName\na = b.path\nEnd Class\n',
{'b': 1},
'_a = _MyClassName()\nb = _a.a\n',
),
])
in_vb_module_tests.extend([
(
'''
B = 0
For A = 1 To 10
B = B + A
Next
''',
{'A': 10, 'B': 55}
),
(
'''
B = 0
For A As Integer = 1 To 10
B = B + A
Next
''',
{'A': 10, 'B': 55}
)
])
# Try statements
# Bare catch
in_vb_module_tests.append(('''
Try
a = 1
Catch
a = 2
End Try
''', {
'a': 1,
}))
in_vb_module_tests.append(('''
Try
a = 1 / 0
Catch
a = 2
End Try
''', {
'a': 2,
}))
# Named exception type
in_vb_module_tests.append(('''
Try
a = 1
Catch ZeroDivisionError
a = 2
End Try
''', {
'a': 1
}))
in_vb_module_tests.append(('''
Try
a = 1 / 0
Catch ZeroDivisionError
a = 2
End Try
''', {
'a': 2
}))
in_vb_module_tests.append(('''
Try
a = 1 / 0
Catch IndexError
a = 2
Catch
a = 3
End Try
''', {
'a': 3
}))
in_vb_module_tests.append(('''
Try
a = 1 / 0
Catch IndexError
a = 2
Catch ZeroDivisionError
a = 3
Catch
a = 4
End Try
''', {
'a': 3
}))
# Collect the exception in a variable
in_vb_module_tests.append(('''
Try
a = 1
Catch ZeroDivisionError As Err
a = 2
End Try
''', {
'a': 1
}))
in_vb_module_tests.append(('''
Try
a = 1 / 0
Catch ZeroDivisionError As _Err
a = 2
b = str(_Err)
End Try
''', {
'a': 2, 'b': "division by zero"
}))
in_vb_module_tests.append(('''
Try
a = 1 / 0
Catch ZeroDivisionError As _Err
a = 2
Exit Try
b = str(_Err)
End Try
''', {
'a': 2,
}))
# Conditional catching
in_vb_module_tests.append(('''
b = 1
Try
a = 1
Catch ZeroDivisionError As Err When b = 1
a = 2
Catch
a = 3
End Try
''', {
'a': 1, 'b': 1,
}))
in_vb_module_tests.append(('''
b = 1
Try
a = 1 / 0
Catch ZeroDivisionError As Err When b = 1
a = 2
Catch
a = 3
End Try
''', {
'a': 2, 'b': 1,
}))
in_vb_module_tests.append(('''
b = 1
Try
Try
a = 1 / 0
Catch ZeroDivisionError As Err When b = 2
a = 2
Catch
a = 3
End Try
Catch ZeroDivisionError
a = 4
End Try
''', {
'a': 4, 'b': 1,
}))
# Finally
in_vb_module_tests.append(('''
b = 1
Try
a = 1
Finally
b = 2
End Try
''', {
'a': 1, 'b': 2
}))
in_vb_module_tests.append(('''
b = 1
Try
a = 1
Catch
a = 2
Finally
b = 2
End Try
''', {
'a': 1, 'b': 2
}))
in_vb_module_tests.append(('''
b = 1
Try
a = 1 / 0
Catch
a = 2
Finally
b = 2
End Try
''', {
'a': 2, 'b': 2
}))
in_vb_module_tests.append(('''
b = 1
Try
a = 1 / 0
Catch
a = 2
Exit Try
a = 3
Finally
b = 2
End Try
''', {
'a': 2, 'b': 2
}))
in_vb_module_tests.append(('''
b = 1
Try
a = 1
Throw New ValueError("This is an error")
Catch ValueError
a = 2
Finally
End Try
''', {
'a': 2, 'b': 1
}))
in_vb_module_tests.append(('''
b = 1
Try
Try
a = 1
Throw New IndexError("This is an error")
Catch ValueError
a = 2
Finally
b = 3
End Try
Catch
End Try
''', {
'a': 1, 'b': 3
}))
# Array initialization
in_vb_module_tests.append(('''
Dim _A As String() = New String() {"1", "2", "3"}
a = _A(0)
b = _A(1)
c = _A(2)
''', {
'a': '1', 'b': '2', 'c': '3',
}))
in_vb_module_tests.append(('''
Function _ReturnIt(Array, Index)
Return Array(Index)
End Function
a = _ReturnIt(New String() {"1", "2", "3"}, 0)
b = _ReturnIt(New String() {"1", "2", "3"}, 1)
c = _ReturnIt(New String() {"1", "2", "3"}, 2)
''', {
'a': '1', 'b': '2', 'c': '3',
}))
# Closures
in_vb_module_tests.append(('''
Dim _A = Function(x) x*2
b = _A(10)
''', {
'b': 20
}))
in_vb_module_tests.append(('''
Dim _A = Function(x) x*2
b = _B(_A, 10)
Function _B(x, y)
Return x(y)
End Function
''', {
'b': 20
}))
in_vb_module_tests.append(('''
b = _B(Function(x, y) x+y, 10, 20)
Function _B(x, y, z)
Return x(y, z)
End Function
''', {
'b': 30
}))
in_vb_module_tests.append(('''
b = _B(Function() 10)
Function _B(x)
Return x()
End Function
''', {
'b': 10
}))
# Inheritance
tests.append(('''
Class _Test
Inherits vbclasses.VBArray
C = 1
End Class
''', {
'a': 0
}, '''
_a = _Test(0)
a = len(_a)
'''))
tests.append(('''
Class _Test
Inherits vbclasses.VBArray, vbclasses._DebugClass
C = 1
End Class
''', {
'a': 0, 'b': True,
}, '''
_b = _Test(0)
a = len(_b)
b = _b._logger is None
'''))
# AddressOf should raise NotImplemented
in_vb_module_tests.append(('''
a = 0
b = 0
Try
b = AddressOf a
Catch NotImplementedError
a = 1
End Try
Try
b = int(AddressOf a)
Catch NotImplementedError
b = 1
End Try
''', {
'a': 1, 'b': 1,
}))
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0)
TestClass1 = addTestsTo(BasicTest, tests, dialect='vb.net')
TestClass2 = addTestsTo(BasicTest, in_vb_module_tests, dialect='vb.net', container=vb2py.parserclasses.VBModule)
if __name__ == "__main__":
main()
|
from django.contrib.auth.models import User
from django.db import models
from django_extensions.db.models import TimeStampedModel
from back.models.instance import Instance
from back.models.city import City
class SignalProperties(models.Model):
"""
Симптомы и свойства, например газа на вкус.
Человек отмечает в чекбоксе, что голова болит и тп.
"""
name = models.TextField()
GROUPS = (
('smells', 'Запахи'),
('symptoms', 'Симптомы'),
)
group = models.CharField(
max_length=9,
choices=GROUPS,
default='smells',
)
class Signal(TimeStampedModel):
text = models.TextField()
properties = models.ManyToManyField(SignalProperties, related_name='reports')
owner = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL, related_name='reports')
location = models.TextField(null=True, blank=True)
latitude = models.DecimalField(max_digits=14, decimal_places=11, null=True, blank=True)
longitude = models.DecimalField(max_digits=14, decimal_places=11, null=True, blank=True)
city = models.ForeignKey(City, on_delete=models.SET_NULL, null=True, related_name='signal')
time_of_incident = models.DateTimeField(auto_now=True)
SIGNAL_STATUS = (
('sent', 'Отправлено'),
('verified', 'Подтверждено'),
('canceled', 'Отмена'),
)
status = models.CharField(
max_length=15,
choices=SIGNAL_STATUS,
default='sent',
)
class SignalMedia(TimeStampedModel):
signal = models.ForeignKey(Signal, null=True, on_delete=models.SET_NULL, related_name='media')
file = models.FileField(upload_to='') # TODO: добавить в env
class SignalToInstance(TimeStampedModel):
text = models.TextField()
signal = models.ForeignKey(Signal, null=True, on_delete=models.SET_NULL)
instance = models.ForeignKey(Instance, null=True, on_delete=models.SET_NULL)
time_of_report = models.DateTimeField(auto_now=True)
response = models.TextField()
time_of_response = models.DateTimeField(auto_now=True)
STATUSES = (
('in_processing', 'В обработке'),
('ignore', 'Не дали ответ'),
('answered', 'Ответили'),
('failed_to_call', 'Не удалось дозвониться'),
('other', 'Другое'),
)
status = models.CharField(
max_length=15,
choices=STATUSES,
default='in_processing',
)
other_comment = models.TextField()
class SignalToInstanceMedia(TimeStampedModel):
signal = models.ForeignKey(SignalToInstance, null=True, on_delete=models.SET_NULL, related_name='media')
file = models.FileField(upload_to='') # TODO: добавить в env
|
from .reader_unit_test_helpers import (
get_reader,
get_conn,
send_message
)
def test_initial_probe_then_full_throttle():
r = get_reader()
conn = get_conn(r)
assert conn.rdy == 1
send_message(conn)
assert conn.rdy == r._connection_max_in_flight()
def test_new_conn_throttles_down_existing_conns():
r = get_reader()
conn1 = get_conn(r)
send_message(conn1)
assert conn1.rdy == r._connection_max_in_flight()
conn2 = get_conn(r)
assert conn2.rdy == 1
assert conn1.rdy == r._connection_max_in_flight()
|
import torch
import numpy as np
import torch.nn as nn
# Create batches with positive samples in first half and negative examples in second half
class BatchSamplerWithNegativeSamples(torch.utils.data.Sampler):
def __init__(self, pos_sampler, neg_sampler, batch_size, items):
self._pos_sampler = pos_sampler
self._neg_sampler = neg_sampler
self._items = items
assert batch_size % 2 == 0, 'Batch size must be divisible by two for negative samples.'
self._batch_size = batch_size
def __iter__(self):
batch, neg_batch = [], []
neg_sampler = iter(self._neg_sampler)
for pos_idx in self._pos_sampler:
batch.append(pos_idx)
neg_idx = pos_idx
# keep sampling until we get a true negative sample
while self._items[neg_idx] == self._items[pos_idx]:
try:
neg_idx = next(neg_sampler)
except StopIteration:
neg_sampler = iter(self._neg_sampler)
neg_idx = next(neg_sampler)
neg_batch.append(neg_idx)
if len(batch) == self._batch_size // 2:
batch.extend(neg_batch)
yield batch
batch, neg_batch = [], []
return
def __len__(self):
return len(self._pos_sampler) // self._batch_size |
from .uri import Uri |
import numpy #for numpy storage
import os #to find files
import time #for time to complete
import sklearn
import pickle
#Load Test Data
running = True
selection = 1
while running:
if (selection == 21):
print ("done")
exit()
elif (selection == 1):
testdata = numpy.load("Data/UkiORBTestingDataV.npy")
testdata = testdata.reshape(173,(10*256))
correctnum = 0
results = numpy.zeros(shape = (20,173))
elif (selection == 2):
testdata = numpy.load("Data/LReORBTestingDataV.npy")
testdata = testdata.reshape(172,(10*256))
correctnum = 1
results = numpy.zeros(shape = (20,172))
elif (selection == 3):
testdata = numpy.load("Data/MinORBTestingDataV.npy")
testdata = testdata.reshape(89,(10*256))
correctnum = 2
results = numpy.zeros(shape = (20,89))
elif (selection == 4):
testdata = numpy.load("Data/HReORBTestingDataV.npy")
testdata = testdata.reshape(186,(10*256))
correctnum = 3
results = numpy.zeros(shape = (20,186))
elif (selection == 5):
testdata = numpy.load("Data/EreORBTestingDataV.npy")
testdata = testdata.reshape(197,(10*256))
correctnum = 4
results = numpy.zeros(shape = (20,197))
elif (selection == 6):
testdata = numpy.load("Data/PopORBTestingDataV.npy")
testdata = testdata.reshape(208,(10*256))
correctnum = 5
results = numpy.zeros(shape = (20,208))
elif (selection == 7):
testdata = numpy.load("Data/CFPORBTestingDataV.npy")
testdata = testdata.reshape(92,(10*256))
correctnum = 6
results = numpy.zeros(shape = (20,92))
elif (selection == 8):
testdata = numpy.load("Data/RocORBTestingDataV.npy")
testdata = testdata.reshape(282,(10*256))
correctnum = 7
results = numpy.zeros(shape = (20,282))
elif (selection == 9):
testdata = numpy.load("Data/CubORBTestingDataV.npy")
testdata = testdata.reshape(317,(10*256))
correctnum = 8
results = numpy.zeros(shape = (20,317))
elif (selection == 10):
testdata = numpy.load("Data/NAPORBTestingDataV.npy")
testdata = testdata.reshape(355,(10*256))
correctnum = 9
results = numpy.zeros(shape = (20,355))
elif (selection == 11):
testdata = numpy.load("Data/NreORBTestingDataV.npy")
testdata = testdata.reshape(358,(10*256))
correctnum = 10
results = numpy.zeros(shape = (20,358))
elif (selection == 12):
testdata = numpy.load("Data/AExORBTestingDataV.npy")
testdata = testdata.reshape(380,(10*256))
correctnum = 11
results = numpy.zeros(shape = (20,380))
elif (selection == 13):
testdata = numpy.load("Data/BORBTestingDataV.npy")
testdata = testdata.reshape(589,(10*256))
correctnum = 12
results = numpy.zeros(shape = (20,589))
elif (selection == 14):
testdata = numpy.load("Data/ANMORBTestingDataV.npy")
testdata = testdata.reshape(633,(10*256))
correctnum = 13
results = numpy.zeros(shape = (20,633))
elif (selection == 15):
testdata = numpy.load("Data/SymORBTestingDataV.npy")
testdata = testdata.reshape(637,(10*256))
correctnum = 14
results = numpy.zeros(shape = (20,637))
elif (selection == 16):
testdata = numpy.load("Data/PImORBTestingDataV.npy")
testdata = testdata.reshape(946,(10*256))
correctnum = 15
results = numpy.zeros(shape = (20,946))
elif (selection == 17):
testdata = numpy.load("Data/ExpORBTestingDataV.npy")
testdata = testdata.reshape(981,(10*256))
correctnum = 16
results = numpy.zeros(shape = (20,981))
elif (selection == 18):
testdata = numpy.load("Data/RomORBTestingDataV.npy")
testdata = testdata.reshape(964,(10*256))
correctnum = 17
results = numpy.zeros(shape = (20,964))
elif (selection == 19):
testdata = numpy.load("Data/RelORBTestingDataV.npy")
testdata = testdata.reshape(1542,(10*256))
correctnum = 18
results = numpy.zeros(shape = (20,1542))
elif (selection == 20):
testdata = numpy.load("Data/ImpORBTestingDataV.npy")
testdata = testdata.reshape(1912,(10*256))
correctnum = 19
results = numpy.zeros(shape = (20,1912))
#Load All SVM's
UKISVM = pickle.load(open("1UKIORBProb.DF", "rb"))
LReSVM = pickle.load(open("2LReORBProb.DF", "rb"))
MinSVM = pickle.load(open("3MinORBProb.DF", "rb"))
HRenSVM = pickle.load(open("4HRenORBProb.DF", "rb"))
ERenSVM = pickle.load(open("5ERenORBProb.DF", "rb"))
PopSVM = pickle.load(open("6PopORBProb.DF", "rb"))
CFPSVM = pickle.load(open("7CFPORBProb.DF", "rb"))
RocSVM = pickle.load(open("8RocORBProb.DF", "rb"))
CubSVM = pickle.load(open("9CubORBProb.DF", "rb"))
NAPSVM = pickle.load(open("10NAPORBProb.DF", "rb"))
NRSVM = pickle.load(open("11NRORBProb.DF", "rb"))
AESVM = pickle.load(open("12AEORBProb.DF", "rb"))
BSVM = pickle.load(open("13BORBProb.DF", "rb"))
ANMSVM = pickle.load(open("14ANMORBProb.DF", "rb"))
SymSVM = pickle.load(open("15SymORBProb.DF", "rb"))
PISVM = pickle.load(open("16PIORBProb.DF", "rb"))
ESVM = pickle.load(open("17EORBProb.DF", "rb"))
RomSVM = pickle.load(open("18RomORBProb.DF", "rb"))
RelSVM = pickle.load(open("19RelORBProb.DF", "rb"))
ImpSVM = pickle.load(open("20ImpORBProb.DF", "rb"))
results[0] = UKISVM.predict_proba(testdata)[:,1]
results[1] = LReSVM.predict_proba(testdata)[:,1]
results[2] = MinSVM.predict_proba(testdata)[:,1]
results[3] = HRenSVM.predict_proba(testdata)[:,1]
results[4] = ERenSVM.predict_proba(testdata)[:,1]
results[5] = PopSVM.predict_proba(testdata)[:,1]
results[6] = CFPSVM.predict_proba(testdata)[:,1]
results[7] = RocSVM.predict_proba(testdata)[:,1]
results[8] = CubSVM.predict_proba(testdata)[:,1]
results[9] = NAPSVM.predict_proba(testdata)[:,1]
results[10] = NRSVM.predict_proba(testdata)[:,1]
results[11] = AESVM.predict_proba(testdata)[:,1]
results[12] = BSVM.predict_proba(testdata)[:,1]
results[13] = ANMSVM.predict_proba(testdata)[:,1]
results[14] = SymSVM.predict_proba(testdata)[:,1]
results[15] = PISVM.predict_proba(testdata)[:,1]
results[16] = ESVM.predict_proba(testdata)[:,1]
results[17] = RomSVM.predict_proba(testdata)[:,1]
results[18] = RelSVM.predict_proba(testdata)[:,1]
results[19] = ImpSVM.predict_proba(testdata)[:,1]
labels = ["UKI","LRe","Min","HRen","ERen","Pop","CFP","Roc","Cub","NAP","NR","AE","B","ANM","Sym","PI","ES","RoM","Rel","Imp"]
resultsmax = results.argmax(axis=0)
correct = list(resultsmax).count(correctnum)
print(correct)
selection = selection +1;
|
# -*- coding: utf-8 -*-
from django.db import models
class CurrencyQuerySet(models.QuerySet):
def active(self):
return self.filter(is_active=True)
def default(self):
return self.get(is_default=True)
def base(self):
return self.get(is_base=True)
class CurrencyManager(models.Manager):
def get_queryset(self):
return CurrencyQuerySet(self.model, using=self._db).active()
def default(self):
return self.get_queryset().default()
def base(self):
return self.get_queryset().base()
|
#!/usr/bin/env python
import os
import os.path as op
import sys
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'parse interproscan output'
)
parser.add_argument(
'fi', help = 'input file'
)
parser.add_argument(
'fo', help = 'output file (tsv)'
)
args = parser.parse_args()
(fi, fo) = (args.fi, args.fo)
dic = dict()
fhi = open(fi, "r")
for line in fhi:
ps = line.strip().split("\t")
aid, md5, plen, aly, sname, sdesc, beg, end, score, status, date = ps[0:11]
iid = ps[11] if len(ps) >= 12 else ''
gid = ps[13] if len(ps) >= 14 else ''
if not aid in dic:
dic[aid] = [set(), set()]
iids = iid.split(",")
for iid in iids:
if iid:
dic[aid][0].add(iid)
gids = gid.split("|")
for gid in gids:
if gid:
dic[aid][1].add(gid)
fhi.close()
fho = open(fo, "w")
for aid, lst in dic.items():
iids, gids = lst
fho.write("%s\t%s\n" % (aid, ";".join(gids)))
fho.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.