content stringlengths 5 1.05M |
|---|
"""
Skeletonizer: Python Cell Morphology Analysis and Construction Toolkit
KAUST, BESE, Neuro-Inspired Computing Project
(c) 2014-2015. All rights reserved.
"""
"""
Amiramesh module.
"""
import re
import sys
import logging
#
# Node class
#
class Node(object):
"""Graph node point class in 3D space """
def __init__(self, x=0.0, y=0.0, z=0.0):
self.x = x
self.y = y
self.z = z
def list(self):
""" Returns a list of XYZ values"""
return [self.x, self.y, self.z]
def position(self):
""" Returns a tuple of XYZ values"""
return (self.x, self.y, self.z)
#
# 3D point class
#
class Point3D(Node):
"""3D Point class with public x,y,z attributes and a diameter"""
def __init__(self, x=0.0, y=0.0, z=0.0, d=0.0):
self.x = x
self.y = y
self.z = z
self.diameter = d
def list(self):
""" Returns a list of XYZD values"""
return [self.x, self.y, self.z, self.diameter]
def position(self):
""" Returns a tuple of XYZ values"""
return (self.x, self.y, self.z)
def set_diameter(self, dia):
"""
Set diameter value
:param dia: Float diameter value
"""
self.diameter = dia
#
# Segment class
#
class Segment(object):
"""Array of Point objects together with start and end node names
Length of a segment is the count of points. End points of a
segment are at the same location as the end nodes.
"""
def __init__(self, start, end):
self.start = start
self.end = end
self.pointcount = None
self.points = []
def __len__(self):
""" Count of points"""
return(self.pointcount)
#
# Skeleton class
#
class Skeleton(object):
"""Top storage object for a skeleton that knows about
nodes, segments, and their locations """
def __init__(self):
self.nodes = {}
self.segments = []
def add_node(self, name, node):
"""Add one Node object to a dictionary
The name is the key to the dictionary of nodes.
"""
self.nodes.setdefault(name, node)
def add_segment(self, segment):
""" Add one Segment object to an array"""
self.segments.append(segment)
def add_points(self, points):
"""Add an array of Point objects
The skeleton needs to be populated with its segments before
calling this method. Segments need to have the point count
(Segment.pointcount) for this method to pass the points to
their correct segments.
"""
offset = 0
for segment in self.segments :
segment.points = points[offset:offset+segment.pointcount]
offset += segment.pointcount
def update_diameters(self, xsection_dict,
require_complete_xsection = True,
outlier_logging_threshold = sys.float_info.max):
"""
Given a dictionary of cross-sectional data, updates the point diameters
to match those provided by the cross-section data.
:param xsection_dict: A dictionary of cross-section data,
including 'diameter' and 'estimated_diameter', and indexed by
a (segment_index, point_index) tuple.
:param require_complete_xsection: If true, assert on missing xsection data
otherwise, keep previous value.
:param outlier_logging_threshold: Threshold value for pre-post diameter difference;
logs special info about points whose new diameters differ by more
than the specified threshold.
"""
class UpdateDiameterStats:
cnt_total = 0
dia_total_pre = 0.0
dia_total_post = 0.0
inc_dia_total = 0.0
dec_dia_total = 0.0
cnt_inc_total = 0
cnt_dec_total = 0
def collect_stats(self, pre_dia, post_dia):
self.cnt_total += 1
self.dia_total_pre += pre_dia
self.dia_total_post += post_dia
if post_dia > pre_dia:
self.inc_dia_total += (post_dia - pre_dia)
self.cnt_inc_total += 1
elif post_dia < pre_dia:
self.dec_dia_total += (pre_dia - post_dia)
self.cnt_dec_total += 1
stats = UpdateDiameterStats()
logging.info('Updating diameters from cross_sections: total(%s)', len(xsection_dict))
for sidx in range(0, len(self.segments)):
s = self.segments[sidx]
for pidx in range(0, len(s.points)):
p = s.points[pidx]
idx = (sidx, pidx)
if require_complete_xsection or idx in xsection_dict:
assert(idx in xsection_dict), \
"Missing index (%s) in xsection dictionary. Expected complete cross-section data." % idx
xs = xsection_dict[idx]
d = xs['diameter'] # max(xs['diameter'], p.diameter)
assert(p.diameter == xs['estimated_diameter']), \
"Expected point diameter (%f) to equal xsection estimate (%f)" % \
(p.diameter, xs['estimated_diameter'])
if abs(p.diameter - d) > outlier_logging_threshold:
logging.info('\t Updated OUTLIER diameter of segment point (%i,%i) at pos(%s) [blender pos(%s) normal(%s)], from old(%f) to new(%f), diff(%f)',
sidx, pidx, p.position(), xs['blender_position'], xs['blender_normal'], p.diameter, d, abs(p.diameter - d))
else:
logging.debug('\t Updated diameter of segment point (%i,%i) from old(%f) to new(%f)',
sidx, pidx, p.diameter, d)
stats.collect_stats(p.diameter, d)
p.diameter = d
logging.info("Diameters updated: %i (inc: %i) (dec: %i), diameters total (pre: %f) (post:%f), increased: (total: %f) (avg: %f), decreased: (total: %f) (avg: %f)",
stats.cnt_total, stats.cnt_inc_total, stats.cnt_dec_total,
stats.dia_total_pre, stats.dia_total_post,
stats.inc_dia_total, (stats.inc_dia_total / stats.cnt_inc_total) if stats.cnt_inc_total > 0 else 0,
stats.dec_dia_total, (stats.dec_dia_total / stats.cnt_dec_total) if stats.cnt_dec_total > 0 else 0)
def info(self):
"""Print out the count of Node, Segment and Points objects"""
c = 0
for s in self.segments:
c+= len(s.points)
return "Nodes : %5i\nSegments : %5i\nPoints : %5i" % (len(self.nodes), len(self.segments), c)
#
# AmirameshReader class
#
class AmirameshReader(object):
""" Read from a filehandle, parse, return a Skeleton object"""
def parse(self, f):
skel = Skeleton() # storage object
points = [] # list of points
counter = 0 # section counter
linecounter = 0 # within sections
for line in f:
# trim white space, including \r,\n
line = line.strip()
# ignore empty lines
if not line:
continue
# skip intro
header = line.startswith("@")
if counter == 0 and not header:
continue
# header
if header:
counter+= 1
linecounter = 0
continue
if counter == 1: # nodes
match = re.search('([\d\.e\+\-]+) ([\d\.e\+\-]+) ([\d\.e\+\-]+)', line)
x,y,z = match.groups()
x = float(x)
y = float(y)
z = float(z)
n = Node(x,y,z)
skel.add_node(linecounter,n)
linecounter += 1
elif counter == 2: # segments to nodes
match = re.search('(\d+) (\d+)', line)
start,end = match.groups()
seg = Segment(int(start), int(end))
skel.add_segment(seg)
elif counter == 3: # point count within segment
match = re.search('(\d+)', line)
count = match.groups()
skel.segments[linecounter].pointcount = int(count[0])
linecounter += 1
elif counter == 4: # point coordinates within a segment
match = re.search('([\d\.e\+\-]+) ([\d\.e\+\-]+) ([\d\.e\+\-]+)', line)
x,y,z = match.groups()
x = float(x)
y = float(y)
z = float(z)
p = Point3D(x,y,z)
points.append(p)
#linecounter += 1
elif counter == 5: # diameter
# empty values replaced by 0
if line == "nan":
line = "0.0"
match = re.search('([\d\.e\+\-]+)', line)
dia = match.groups()[0]
dia = float(dia)
points[linecounter].set_diameter(dia)
linecounter += 1
# add points in the end for efficiency
skel.add_points(points)
return skel
|
import xml.etree.ElementTree as ET
from lxml import objectify
from xml.dom import minidom
import sys
import re
# Take h files and create xml files
# fixup the XML such that it accounts for padding
# fixup the XML such that enums and vals are included from other headers
debug=0
tree = ET.parse(sys.argv[1])
print("XML Tree:", tree)
root = tree.getroot()
# have to make each name unique
i=0
skip_len = len('<?xml version="1.0" ?>')
def prettify(elem):
#Return a pretty-printed XML string for the Element.
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")[skip_len+1:-1:]
def get_index(findme, elem_list):
for index, elem in enumerate(elem_list):
if (findme == elem):
return index
return -1
def get_structs(root):
struct_nodes = []
for child in root:
if child.get('type') == 'struct':
struct_nodes.append(child)
if debug:
print len(struct_nodes)
return struct_nodes
def insert_padding(struct, elem, pad_size, member_offset):
global i
kwargs = {'ident':"padding" + str(i), 'length':str(pad_size), 'base-type-builtin':'padding', 'bit-size':str(pad_size*8)}
pad_node = ET.Element("jay_symbol" ,**kwargs)
i+=1
pad_node.tail = "\n "
struct.insert(member_offset, pad_node)
def fixup_struct(struct):
if debug:
print struct.get('ident')
print "+"*20
old_elem_end = 0
cur_idx = 0
total = len(struct)
tot_size = struct.get('bit-size')
tot_bytes = int(tot_size)/8
cur_size = 0
while (cur_idx < total):
elem = struct[cur_idx]
pad_size = 0
offset = int(elem.get('offset'))
if debug:
print "old_elem_end:", old_elem_end
print "offset:", offset
if ( old_elem_end != offset ):
pad_size = offset-old_elem_end
if debug:
print "pad with %d bytes" % pad_size
insert_padding(struct, elem, pad_size, cur_idx)
cur_idx += 1
total += 1
bit_size = int(elem.get('bit-size'))
old_elem_end = ( (bit_size/8) + offset)
cur_idx += 1
if debug:
print "=="
global i
if old_elem_end < tot_bytes:
end_pad_amount = tot_bytes - old_elem_end
kwargs = {'ident':"padding" + str(i), 'length':str(end_pad_amount), 'base-type-builtin':'padding', 'bit-size':str(end_pad_amount*8)}
pad_node = ET.Element("jay_symbol" ,**kwargs)
i+=1
elem.tail += ' '
pad_node.tail = "\n "
#import ipdb;ipdb.set_trace()
struct.append(pad_node)
return struct
struct_nodes = get_structs(root)
for struct in struct_nodes:
if debug:
print '-'*20
fixed = fixup_struct(struct)
idx = get_index(struct, root)
root.remove(struct)
root.insert(idx, fixed)
if debug:
print '-'*20
fname = sys.argv[1]
tree.write(fname[0:-4] + "_fixup" + ".xml")
|
import opengm
import numpy
np = numpy
#---------------------------------------------------------------
# MinSum with SelfFusion
#---------------------------------------------------------------
numpy.random.seed(42)
#gm=opengm.loadGm("/home/tbeier/datasets/image-seg/3096.bmp.h5","gm")
#gm=opengm.loadGm("/home/tbeier/datasets/image-seg/175032.bmp.h5","gm")
#gm=opengm.loadGm("/home/tbeier/datasets/image-seg/291000.bmp.h5","gm")
gm=opengm.loadGm("/home/tbeier/datasets/image-seg/148026.bmp.h5","gm")
#gm=opengm.loadGm("/home/tbeier/datasets/knott-3d-450/gm_knott_3d_102.h5","gm")#(ROTTEN)
gm=opengm.loadGm("/home/tbeier/datasets/knott-3d-300/gm_knott_3d_078.h5","gm")
#gm=opengm.loadGm("/home/tbeier/datasets/knott-3d-150/gm_knott_3d_038.h5","gm")
#---------------------------------------------------------------
# Minimize
#---------------------------------------------------------------
#get an instance of the optimizer / inference-algorithm
print gm
N = np.arange(0.0, 10, 0.5)
R = np.arange(0.1, 0.99, 0.1)
print N
print R
for n in N:
for r in R:
print n,r
with opengm.Timer("with new method", verbose=False) as timer:
infParam = opengm.InfParam(
numStopIt=0,
numIt=40,
generator='qpboBased'
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
# inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
inf.infer(visitor)
arg = inf.arg()
proposalParam = opengm.InfParam(
randomizer = opengm.weightRandomizer(noiseType='normalAdd',noiseParam=1.000000001, ignoreSeed=True),
stopWeight=0.0,
reduction=0.85,
setCutToZero=False
)
infParam = opengm.InfParam(
numStopIt=20,
numIt=20,
generator='randomizedHierarchicalClustering',
proposalParam=proposalParam
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
inf.infer(visitor)
arg = inf.arg()
infParam = opengm.InfParam(
numStopIt=0,
numIt=40,
generator='qpboBased'
)
inf=opengm.inference.IntersectionBased(gm, parameter=infParam)
inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
inf.infer(visitor)
arg = inf.arg()
timer.interval
with opengm.Timer("with multicut method"):
infParam = opengm.InfParam(
workflow="(IC)(TTC-I,CC-I)"
)
inf=opengm.inference.Multicut(gm, parameter=infParam)
# inf.setStartingPoint(arg)
# start inference (in this case verbose infernce)
visitor=inf.verboseVisitor(printNth=1,multiline=False)
inf.infer(visitor)
arg = inf.arg()
|
import numpy as np
import urllib2 as ulib
import csv
import psycopg2
import sys
import time
import json
'''
to do:
SUPERTARGET
BRENDA
DsigDB
'''
sys.path.append('../../utility')
from map_id_util import mapUniprotToKEGG,mapKEGGToCAS,mapPCToCAS,baseURL
sys.path.append('../../config')
from database_config import databaseConfig as dcfg
connDB = psycopg2.connect(database=dcfg['name'],user=dcfg['user'],password=dcfg['passwd'],
host=dcfg['host'],port=dcfg['port'])
cur = connDB.cursor()
columnTable = {'protein':["pro_id","pro_uniprot_id"],'compound':['com_id','com_cas_id'
,'com_pubchem_id','com_drugbank_id'], 'int':['com_id','pro_id']}
condTable = {'int':["weight = 1"]}
def main():
if (len(sys.argv)<2 or len(sys.argv)>3):
print "Error Usage: python pullCompProtInteraction.py [kegg|matador tsv_file]"
return
source = sys.argv[1]
if source == "matador":
dataPath = sys.argv[2]
print "load known data"
protein = loadTable('protein',columnTable['protein'])
ijahUniprotDict = {p[1]:p[0] for p in protein}
uniProtId = set([p[1] for p in protein])
compound = loadTable('compound',columnTable['compound'])
casId = set([c[1] for c in compound])
ijahCasDict = {c[1]:c[0] for c in compound}
knownInt = loadTable('compound_vs_protein',columnTable['int'],condTable['int'])
knownInt = set(knownInt)
newInt = dict()
if source == "kegg":
print "Get KEGG ID"
upToKegg,keggSet = mapUniprotToKEGG(list(uniProtId))
print "Get Interaction Data"
keggInt,kcID = getKEGGinteractionData(list(keggSet))
print "Get CAS ID"
kCasDict = mapKEGGToCAS(list(kcID))
for prot in protein:
newInt[prot[1]] = []
if prot[1] in upToKegg:
for i in upToKegg[prot[1]]:
if i in keggInt:
newInt[prot[1]] += keggInt[i]
for pK,cL in newInt.iteritems():
casList = []
for c in cL:
if c in kCasDict:
casList += [kCasDict[c]]
newInt[pK] = casList
elif source == "matador":
with open(dataPath,'r') as df:
csvContent = csv.reader(df,delimiter='\t',quotechar='\"')
pcID = set()
newInt = dict()
for i,row in enumerate(csvContent):
if i==0:
continue
pcID.add(row[0])
for prot in row[6].split():
if prot[:6] in newInt:
newInt[prot[:6]] += [row[0]]
else:
newInt[prot[:6]] = [row[0]]
pcCasDict = mapPCToCAS(list(pcID))
for pKey,cList in newInt.iteritems():
casList = []
for c in cList:
if int(c) in pcCasDict:
casList += pcCasDict[int(c)]
newInt[pKey] = casList
else:
print "Invalid dbSource"
return
# validate pair with known database
additionInt = []
for pKey,cList in newInt.iteritems():
if pKey in uniProtId:
pIjahKey = ijahUniprotDict[pKey]
for c in cList:
if (c in casId):
cIjahkey = ijahCasDict[c]
if (pIjahKey,cIjahkey) not in knownInt:
additionInt.append((pIjahKey,cIjahkey))
print len(additionInt)
insertInteraction(additionInt,source)
connDB.close()
def insertInteraction(interaction,s):
for i in interaction:
query = "INSERT INTO compound_vs_protein VALUES"
query += "(\'%s\',\'%s\',\'%s\',1)"%(i[1],i[0],s)
cur.execute()
connDB.commit()
def getKEGGinteractionData(kID):
retDict = dict()
retList = []
startBatch = 0
step = 10
nQuery = len(kID)
while (startBatch < nQuery):
print startBatch
if startBatch+step < nQuery:
lenBatch = step
else:
lenBatch = nQuery - startBatch
urlTarget = baseURL['kegg']+"get/"
for i in range(startBatch,lenBatch+startBatch):
if i > startBatch:
urlTarget += "+"
urlTarget += kID[i]
connection = ulib.urlopen(urlTarget)
content = connection.read()
lines = content.split("\n")
pullData = False
dataList = []
for line in lines:
headWord = line[:12].rstrip()
content = line[12:].strip()
if len(headWord)!=0:
if headWord == "DRUG_TARGET":
pullData = True
elif headWord == "ENTRY":
keggid = content.split()[0]
elif headWord == "ORGANISM":
org = content.split()[0]
else:
pullData = False
if pullData:
dbId,record = content.split(":")
retList+=record.split()
if org+":"+keggid in retDict:
retDict[org+":"+keggid] += record.split()
else:
retDict[org+":"+keggid] = record.split()
startBatch += lenBatch
return retDict,set(retList)
def loadTable(table,column,condStr = "",optionStr = ""):
queryStr = "SELECT "
for i,col in enumerate(column):
if i > 0:
queryStr += ","
queryStr = queryStr + col
queryStr = queryStr + " FROM " + table
if condStr is not "":
queryStr += " WHERE "
for i,cond in enumerate(condStr):
if i > 0:
queryStr += " OR "
queryStr = queryStr + cond
queryStr += " "+optionStr
dataList = []
cur.execute(queryStr)
dataRows = cur.fetchall()
for i,row in enumerate(dataRows):
dataList.append(row)
return dataList
if __name__ == '__main__':
startTime = time.time()
main()
print time.time()-startTime
|
from data_resource.generator.api_manager.v1_0_0.resource_handler import ResourceHandler
import pytest
from data_resource.shared_utils.api_exceptions import ApiError
from data_resource.db.base import db_session
@pytest.mark.requiresdb
def test_query_works_with_correct_data(valid_people_orm):
resource_handler = ResourceHandler()
class FakeFlaskRequest:
json = {"name": "tester"}
new_object = valid_people_orm(name="tester")
db_session.add(new_object)
db_session.commit()
result = resource_handler.query_one(
resource_orm=valid_people_orm, request=FakeFlaskRequest()
)
assert result == ({"results": [{"id": 1, "name": "tester"}]}, 200)
@pytest.mark.requiresdb
def test_query_returns_none_when_given_incorrect_field_data(valid_people_orm):
# When one item in DB - GET returns that item
resource_handler = ResourceHandler()
class FakeFlaskRequest:
json = {"name": "wrongname"}
new_object = valid_people_orm(name="tester")
db_session.add(new_object)
db_session.commit()
result = resource_handler.query_one(
resource_orm=valid_people_orm, request=FakeFlaskRequest()
)
assert result == ({"message": "No matches found"}, 404)
@pytest.mark.requiresdb
def test_query_errors_when_given_incorrect_field_data(valid_people_orm):
"""Ideally we would be able to also assert on the error message that
returns.
# assert result == ({"error": "Resource with id '1' not found."},
404)
"""
# When one item in DB - GET returns that item
resource_handler = ResourceHandler()
class FakeFlaskRequest:
json = {"doesnotexist": "error"}
new_object = valid_people_orm(name="tester")
db_session.add(new_object)
db_session.commit()
with pytest.raises(ApiError):
resource_handler.query_one(
resource_orm=valid_people_orm, request=FakeFlaskRequest()
)
@pytest.mark.requiresdb
def test_query_empty_body_errors(valid_people_orm):
"""Ideally we would be able to also assert on the error message that
returns.
# assert result == ({"error": "Resource with id '1' not found."},
404)
"""
resource_handler = ResourceHandler()
class FakeFlaskRequest:
json = {}
with pytest.raises(ApiError):
resource_handler.query_one(
resource_orm=valid_people_orm, request=FakeFlaskRequest()
)
|
import tkinter as tk
from PIL import Image, ImageTk
from tkinter import *
w = OptionMenu(master, variable, "one", "two", "three")
w.config(bg = "GREEN") # Set background color to green
# Set this to what you want, I'm assuming "green"...
w["menu"].config(bg="GREEN")
w.pack() |
class Solution:
def maxSumMinProduct(self, nums: List[int]) -> int:
ans = 0
stack = []
prefix = [0] + list(accumulate(nums))
for i in range(len(nums) + 1):
while stack and (i == len(nums) or nums[stack[-1]] > nums[i]):
minVal = nums[stack.pop()]
sum = prefix[i] - prefix[stack[-1] + 1] if stack else prefix[i]
ans = max(ans, minVal * sum)
stack.append(i)
return ans % int(1e9 + 7)
|
from azure.storage.blob import BlobService
import datetime
import string
from verify_oauth import verify_oauth
accountName = 'jesse15'
accountKey = ''
blob_service = BlobService(accountName, accountKey)
uploaded = False
def uploadBlob(username, file, filename, token, secret):
global uploaded
returnList = []
verify_oauth_code = verify_oauth(token, secret)
if verify_oauth_code.status_code != 200:
returnList = ["Could not verify oAuth credentials"]
return returnList
blob_service.create_container(username, x_ms_blob_public_access='container')
#datetime gives the system's current datetime, I convert to string in order to .replace
#characters that wouldn't work in a URL like ':','.', and ' '
time = str(datetime.datetime.now())
timeReplaced = time.replace(':','').replace('.','').replace(' ','')
URLstring = "https://" + accountName + ".blob.core.windows.net/" + username + "/" + timeReplaced + "_" + filename
uploaded = False
blob_service.put_block_blob_from_path(
username,
timeReplaced,
'\/Users\/rjhunter\/Desktop\/bridge.jpg',
x_ms_blob_content_type='image/png',
progress_callback=progress_callback
)
#if upload is successful, return a list with the timestamp and the final URL
#else return an empty list
if uploaded:
return returnList[time, URLstring]
else:
return returnList["Failure to upload to Azure Blob Storage"]
def deleteBlob(username, blobURL):
exploded = blobURL.split("/")
blob_service.delete_blob(username, exploded[len(exploded)-1])
def listBlobs(username):
blobs = []
marker = None
while True:
batch = blob_service.list_blobs(username, marker=marker)
blobs.extend(batch)
if not batch.next_marker:
break
marker = batch.next_marker
for blob in blobs:
print(blob.name)
def progress_callback(current, total):
global uploaded
print ("Current bytes uploaded: ", current)
print ("===============")
print ("Total bytes of file: ", total)
print ()
if(current==total):
uploaded = True
|
import os
from flask_sqlalchemy import SQLAlchemy
app = None
db:SQLAlchemy = None
|
# Generated by Django 3.0.5 on 2020-07-11 05:18
from django.db import migrations, models
import django.utils.timezone
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('listelement', '0004_auto_20200630_0029'),
]
operations = [
migrations.AddField(
model_name='element',
name='created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='element',
name='tags',
field=taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='element',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
|
__author__ = 'stev1090'
|
system.exec_command("bspc wm -h off; bspc node older -f; bspc wm -h on", getOutput=False) |
from picar_4wd.utils import mapping
class Servo():
PERIOD = 4095
PRESCALER = 10
MAX_PW = 2500
MIN_PW = 500
FREQ = 50
ARR = 4095
CPU_CLOCK = 72000000
def __init__(self, pin, offset=0):
self.pin = pin
self.offset = offset
self.pin.period(self.PERIOD)
prescaler = int(float(self.CPU_CLOCK) / self.FREQ/ self.ARR)
self.pin.prescaler(prescaler)
def set_angle(self, angle):
try:
angle = int(angle)
except:
raise ValueError("Angle value should be int value, not %s"%angle)
if angle < -90:
angle = -90
if angle > 90:
angle = 90
angle = angle + self.offset
High_level_time = mapping(angle, -90, 90, self.MIN_PW, self.MAX_PW)
pwr = High_level_time / 20000
value = int(pwr*self.PERIOD)
self.pin.pulse_width(value) |
import xbmc,xbmcaddon,xbmcgui,xbmcplugin,urllib,urllib2,os,re,sys,datetime,shutil
SiteName='Space Telescope 0.0.1'
addon_id = 'plugin.video.spacetelescope'
baseurl = 'http://www.spacetelescope.org/videos/'
videobase = 'http://www.spacetelescope.org'
fanart = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id , 'fanart.jpg'))
icon = xbmc.translatePath(os.path.join('special://home/addons/' + addon_id, 'icon.PNG'))
def INDEX():
link = open_url(baseurl)
match=re.compile('<a class=".+?" href="(.+?)" >(.+?)</a>').findall(link)
for url, cat in match:
if 'category'in url:
cat2 = cat.replace(''','').replace('&','').replace('HD','Best')
url = videobase + url
addDir(cat2,url,1,icon,'',fanart)
def get_video_list(url):
link = open_url(url)
match=re.compile('<td class=".+?" ><a href="(.+?)"><img src="(.+?)" width="122" alt="(.+?)" /></a></td>').findall(link)
for url, thumb, name in match:
url = videobase + url
thumb2 = videobase+thumb
name2 = name.replace(''','').replace('&','')
addLink(name2,url,3,thumb2,'',fanart)
nextpage=re.compile('<span class="paginator_next"> <a href="(.+?)">').findall(link)
for Next_Page in nextpage:
url = videobase + Next_Page
addDir('Next Page>>>',url,1,icon,'',fanart)
############################ STANDARD #####################################################################################
def PLAYLINK(name,url):
link = open_url(url)
vid_link=re.compile('<a href="(.+?)" rel="shadowbox;width=640;height=360" title=".+?">Medium Flash</a></span>').findall(link)[0]
playlist = xbmc.PlayList(1)
playlist.clear()
listitem = xbmcgui.ListItem(name, iconImage="DefaultVideo.png")
listitem.setInfo("Video", {"Title":name})
listitem.setProperty('mimetype', 'video/x-msvideo')
listitem.setProperty('IsPlayable', 'true')
playlist.add(vid_link,listitem)
xbmcPlayer = xbmc.Player(xbmc.PLAYER_CORE_AUTO)
xbmcPlayer.play(playlist)
exit()
def open_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addDir(name,url,mode,iconimage,description,fanart):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&description="+str(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name, 'plot': description } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addLink(name,url,mode,iconimage,description,fanart):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)+"&description="+str(description)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=icon)
liz.setInfo( type="Video", infoLabels={ "Title": name, 'plot': description } )
liz.setProperty('fanart_image', fanart)
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=False)
return ok
params=get_params(); url=None; name=None; mode=None; site=None
try: site=urllib.unquote_plus(params["site"])
except: pass
try: url=urllib.unquote_plus(params["url"])
except: pass
try: name=urllib.unquote_plus(params["name"])
except: pass
try: mode=int(params["mode"])
except: pass
print "Site: "+str(site); print "Mode: "+str(mode); print "URL: "+str(url); print "Name: "+str(name)
if mode==None or url==None or len(url)<1: INDEX()
elif mode==1: get_video_list(url)
elif mode==2: get_videos(url)
elif mode==3: PLAYLINK(name,url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
# This code is part of the project "Krill"
# Copyright (c) 2020 Hongzheng Chen
def get_prop_class(job_prop,pb_name):
prop_name, type_name, initial_val = job_prop
class_name = "{}".format(prop_name)
if type_name == "uint":
type_name = "uintE"
elif type_name == "int":
type_name = "intE"
else:
pass # other C/C++ inherent types are supported
res = "class {} {{\npublic:\n".format(class_name)
# constructor
res += " {}(size_t _n): n(_n) {{\n".format(class_name)
res += " }\n"
# destructor
res += " ~{}() {{\n".format(class_name)
res += " free(data);\n" \
" }\n"
# accessors
res += " inline {} operator[] (int i) const {{ return data[i]; }}\n".format(type_name)
res += " inline {}& operator[] (int i) {{ return data[i]; }}\n".format(type_name)
res += " inline {} get (int i) const {{ return data[i]; }}\n".format(type_name)
res += " inline {}& get (int i) {{ return data[i]; }}\n".format(type_name)
res += " inline {}* get_addr (int i) {{ return &(data[i]); }}\n".format(type_name)
res += " inline {}* get_data () {{ return data; }}\n".format(type_name)
res += " inline void set (int i, {} val) {{ data[i] = val; }}\n".format(type_name)
res += " inline void set_all ({} val) {{ parallel_for (int i = 0; i < n; ++i) data[i] = val; }}\n".format(type_name)
res += " inline void add (int i, {} val) {{ data[i] += val; }}\n".format(type_name)
res += " friend class {}::PropertyManager;\n".format(pb_name) # friend class
# data
res += "private:\n"
res += " size_t n;\n"
res += " {}* data;\n".format(type_name)
res += "};\n\n"
return res
def get_props_class(props,pb_name):
res = ""
for job in props:
job_namespace = job + "_Prop"
res += "namespace {} {{\n\n".format(job_namespace)
for prop in props[job]:
res += get_prop_class(prop,pb_name)
res += "}} // namespace {}\n\n".format(job_namespace)
return res
def get_main_class(props):
res = "class PropertyManager {\npublic:\n size_t n;\n" \
" PropertyManager(size_t _n): n(_n) {}\n"
for job in props:
for prop in props[job]:
class_name = "{}_Prop::{}".format(job,prop[0])
prop_name = prop[0]
array_name = "arr_{}_{}".format(job,prop[0])
res += " inline {0}* add_{1}() {{\n".format(class_name,prop_name)
res += " {0}* {1} = new {0}(n);\n".format(class_name,prop_name)
res += " {}.push_back({});\n".format(array_name,prop_name)
res += " return {};\n".format(prop_name)
res += " }\n"
res += " inline void initialize() {\n"
for job in props:
for prop in props[job]:
class_name = "{}_Prop::{}".format(job,prop[0])
prop_name, type_name, initial_val = prop
array_name = "arr_{}_{}".format(job,prop[0])
res += " // {}\n".format(class_name) # comment
res += " {0}* {1}_all = ({0}*) malloc(sizeof({0}) * n * {1}.size());\n".format(type_name,array_name)
res += " int {}_idx = 0;\n".format(array_name)
res += " for (auto ptr : {}) {{\n".format(array_name)
res += " ptr->data = &({0}_all[{0}_idx]);\n".format(array_name)
if initial_val != None:
if type(initial_val) == type("str"):
res += " parallel_for (int i = 0; i < n; ++i) {\n"
res += " ptr->data[i] = {};\n".format(initial_val if eval(initial_val) != -1 else "UINT_MAX")
else: # lambda expression
res += " auto lambda = [](int i) -> {} {{ return ".format(type_name)
res += initial_val[1].replace(initial_val[0],"i")
res += "; };\n"
res += " parallel_for (int i = 0; i < n; ++i) {\n"
res += " ptr->data[i] = lambda(i);\n"
res += " }\n"
res += " {}_idx += n;\n".format(array_name)
res += " }\n"
res += " }\n"
for job in props:
for prop in props[job]:
class_name = "{}_Prop::{}".format(job,prop[0])
prop_name = prop[0]
array_name = "arr_{}_{}".format(job,prop[0])
res += " std::vector<{}*> {};\n".format(class_name,array_name)
res += "};\n\n"
return res |
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from .models import *
import random
from django.db.models import Q
# Load Table Page
def ldsp(request):
return render(request, 'ldsp/index.html')
# Load Table Data
def ldspData(request):
# Get the data from request
search_value = request.GET['search[value]'].strip()
startLimit = int(request.GET['start'])
endLimit = startLimit + int(request.GET['length'])
data_array = []
# Count the total length
totalLength = DataSet.objects.count()
# if search parameter is passed
if search_value != '':
# Querying dataset
dataList = DataSet.objects.filter(Q(text__contains=search_value) | Q(random__contains=search_value)).order_by(
'id')
# Filtering dataset
dataFilter = dataList[startLimit:endLimit]
# Get the filter length
filterLength = dataList.count()
else:
# Querying dataset
dataList = DataSet.objects.all().order_by('id')
# Filtering dataset
dataFilter = dataList[startLimit:endLimit]
# Get the filter length
filterLength = totalLength
# Processing the data for table
for key, item in enumerate(dataFilter):
row_array = [str(key + 1), item.text, item.random, item.created_at]
data_array.append(row_array)
# Preparing the response
response = {
"draw": request.GET['draw'],
"recordsTotal": totalLength,
"recordsFiltered": filterLength,
"data": data_array
}
# Returning json response
return JsonResponse(response)
def ldspSeed(request):
# 1 Lakh Data Seed
for i in range(1, 100000):
# Creating dataset object
DataSet.objects.create(
text='This is text %s' % str(i),
random=random.randint(100000, 999999)
)
return HttpResponse("Done")
|
from statistics import mean
from signal_processing_algorithms.energy_statistics import energy_statistics
def jump_detection(time_series, relative_threshold = 0.05):
jump_points = []
idx=1
last_point = time_series[0]
for current_point in time_series[1:]:
relative_change = abs((current_point/last_point)-1)
if relative_change > relative_threshold:
jump_points.append(idx)
idx+=1
last_point = current_point
return jump_points
def trend_detection(time_series, window_length,threshhold):
jump_points = []
idx = window_length
while idx < len(time_series):
moving_average = mean(time_series[idx-window_length:idx-1])
relative_change = abs((time_series[idx] / moving_average) - 1)
if relative_change > threshhold:
jump_points.append(idx)
idx+=1
return jump_points
def e_divisive_analysis(time_series):
change_points = energy_statistics.e_divisive(time_series, pvalue=0.1, permutations=100)
if len(change_points) == 0:
return dict()
result = dict()
for idx in range(1,len(time_series)):
print(idx)
partial_changepoints = energy_statistics.e_divisive(time_series[:idx], pvalue=0.1, permutations=100)
for cp in change_points:
if cp in result:
continue
if cp in partial_changepoints:
result[cp] = idx
return result |
# Leet spek generator. Leet é uma forma de se escrever o alfabeto latino usando outros símbolos em lugar das letras' como
# números por exemplo. A própria palavra leet admite muitas variações' como l33t ou 1337. O uso do leet reflete uma
# subcultura relacionada ao mundo dos jogos de computador e internet' sendo muito usada para confundir os iniciantes e
# afirmar-se como parte de um grupo. Pesquise sobre as principais formas de traduzir as letras. Depois' faça um programa que
# peça uma texto e transforme-o para a grafia leet speak.
#
from random import randint, choice
alfha_leet = {'A': ['4', '/' , '@', '/', '-' , ' \ ' , '^', 'ä', 'ª', 'aye'],
'B': ['8', '6', '|3', 'ß', 'P>', '|:'],
'C': ['¢' ,'<', '('],
'D': ['|))', 'o|', '[)', 'I>', '|>', '?'],
'E': ['3','&', '£','ë','[-','€' ,'ê' ,'|=-'],
'F': ['|=' ,'ph','|', '#'],
'G': ['6', '&', '(_+', '9', 'C-', 'gee', '('],
'H': ['#', '/-/', '[-]', '{=}', '<~>', '|-|', ']~[', '}{', ']-[', '?', '8', '}-{'],
'I': ['1', '!', '|', '&', 'eye', '3y3', 'ï', '][', '[]'],
'J': [';', '_/', '</', '(/'],
'K': ['|<' ,'|{' ,']{' ,'}<' ,'|('],
'L': ['1_', '|', '|_', '#', '¬', '£'],
'M': ['//.', '^^', '|v|', '[V]', '{V}', '|\/|', '/\/', '(u)', '[]V[]', ' (V) ', '/|\ ', 'IVI'],
'N': ['//', '^/', '|\|', '/\/', '[\]', '<\>', '{\}', '[]\[]', 'n', '/V' '₪'],
'O': ['0', '()', '?p', '*', 'ö'],
'P': ['|^', '|*', '|o', '|^(o)', '|>', '|', '9', '[]D', '|̊', '|7'],
'Q': ['q', '9', '(_', ')', 'o'],
'R': ['2', 'P\ ' , '|?', '|^', 'lz', '[z', '12', 'Я'],
'S': ['5', '$', 'z', '§', 'ehs'],
'T': ['7', '+', '-|-', '1', '][', '|'],
'U': ['(_)', '|_|', 'v', 'ü'],
'V': ['\/'],
'W': ['\/\/', 'vv', '//', '\^/', '(n)', '\V/', '\//', '\X/', '\|/'],
'X': ['><', 'Ж', 'ecks', ')('],
'Y': ['Y', 'j', '`/', '¥'],
'Z': ['2', 'z', '~\_', '~/_', '%']}
def cripto(frase):
lista = list(frase)
cifra = ''
for letra in lista:
letraUpper = str(letra).upper()
if letraUpper in alfha_leet.keys():
tamanho = len(alfha_leet[letraUpper])
elemento = randint(0, tamanho-1)
cifra += alfha_leet[letraUpper][elemento]+' '
return print(f'A palavra criptografada é: {cifra}')
palavra = input('Insira uma palavra: ')
cripto(palavra)
|
class Calendar(dict):
fields = [
'id',
'name',
'type',
'uid',
'url',
'updated',
'enabled',
'defaultCategory',
'external',
]
def __init__(self, **kwargs):
for field in kwargs.keys():
if field not in Calendar.fields:
raise TypeError("Unknown property {}".format(field))
dict.__init__(self, **kwargs)
|
import hashlib
import os
import socket
import time
from django.core.cache import get_cache
from django.core.files.base import ContentFile
from django.utils import simplejson
from django.utils.encoding import smart_str
from django.utils.functional import SimpleLazyObject
from django.utils.importlib import import_module
from compressor.conf import settings
from compressor.storage import default_storage
from compressor.utils import get_mod_func
_cachekey_func = None
def get_hexdigest(plaintext, length=None):
digest = hashlib.md5(smart_str(plaintext)).hexdigest()
if length:
return digest[:length]
return digest
def simple_cachekey(key):
return 'django_compressor.%s' % smart_str(key)
def socket_cachekey(key):
return "django_compressor.%s.%s" % (socket.gethostname(), smart_str(key))
def get_cachekey(*args, **kwargs):
global _cachekey_func
if _cachekey_func is None:
try:
mod_name, func_name = get_mod_func(
settings.COMPRESS_CACHE_KEY_FUNCTION)
_cachekey_func = getattr(import_module(mod_name), func_name)
except (AttributeError, ImportError), e:
raise ImportError("Couldn't import cache key function %s: %s" %
(settings.COMPRESS_CACHE_KEY_FUNCTION, e))
return _cachekey_func(*args, **kwargs)
def get_mtime_cachekey(filename):
return get_cachekey("mtime.%s" % get_hexdigest(filename))
def get_offline_hexdigest(render_template_string):
return get_hexdigest(render_template_string)
def get_offline_cachekey(source):
return get_cachekey("offline.%s" % get_offline_hexdigest(source))
def get_offline_manifest_filename():
output_dir = settings.COMPRESS_OUTPUT_DIR.strip('/')
return os.path.join(output_dir, settings.COMPRESS_OFFLINE_MANIFEST)
_offline_manifest = None
def get_offline_manifest():
global _offline_manifest
if _offline_manifest is None:
filename = get_offline_manifest_filename()
if default_storage.exists(filename):
_offline_manifest = simplejson.load(default_storage.open(filename))
else:
_offline_manifest = {}
return _offline_manifest
def flush_offline_manifest():
global _offline_manifest
_offline_manifest = None
def write_offline_manifest(manifest):
filename = get_offline_manifest_filename()
default_storage.save(filename,
ContentFile(simplejson.dumps(manifest, indent=2)))
flush_offline_manifest()
def get_templatetag_cachekey(compressor, mode, kind):
return get_cachekey(
"templatetag.%s.%s.%s" % (compressor.cachekey, mode, kind))
def get_mtime(filename):
if settings.COMPRESS_MTIME_DELAY:
key = get_mtime_cachekey(filename)
mtime = cache.get(key)
if mtime is None:
mtime = os.path.getmtime(filename)
cache.set(key, mtime, settings.COMPRESS_MTIME_DELAY)
return mtime
return os.path.getmtime(filename)
def get_hashed_mtime(filename, length=12):
try:
filename = os.path.realpath(filename)
mtime = str(int(get_mtime(filename)))
except OSError:
return None
return get_hexdigest(mtime, length)
def get_hashed_content(filename, length=12):
try:
filename = os.path.realpath(filename)
except OSError:
return None
hash_file = open(filename)
try:
content = hash_file.read()
finally:
hash_file.close()
return get_hexdigest(content, length)
def cache_get(key):
packed_val = cache.get(key)
if packed_val is None:
return None
val, refresh_time, refreshed = packed_val
if (time.time() > refresh_time) and not refreshed:
# Store the stale value while the cache
# revalidates for another MINT_DELAY seconds.
cache_set(key, val, refreshed=True,
timeout=settings.COMPRESS_MINT_DELAY)
return None
return val
def cache_set(key, val, refreshed=False, timeout=None):
if timeout is None:
timeout = settings.COMPRESS_REBUILD_TIMEOUT
refresh_time = timeout + time.time()
real_timeout = timeout + settings.COMPRESS_MINT_DELAY
packed_val = (val, refresh_time, refreshed)
return cache.set(key, packed_val, real_timeout)
cache = SimpleLazyObject(lambda: get_cache(settings.COMPRESS_CACHE_BACKEND))
|
"""
Created on Thu Sep 17 01:23:14 2020
@author: anish.gupta
"""
from flask import Flask, request, jsonify
import numpy as np
import pickle
import pandas as pd
import flasgger
from flasgger import Swagger
# from clean import preprocess
from fastai.text import load_learner, DatasetType
from flasgger import APISpec, Schema, fields
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
import operator
import string
import re
import nltk
import random
nltk.download('all')
# nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
stop_words_english = set(stopwords.words('english'))
stop_words_french = set(stopwords.words('french'))
def english_stop_word_remover(x):
word_tokens = word_tokenize(x)
filtered_sentence = [w for w in word_tokens if not w in stop_words_english]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words_english:
filtered_sentence.append(w)
return ' '.join(filtered_sentence)
def french_stop_word_remover(x):
word_tokens = word_tokenize(x)
filtered_sentence = [w for w in word_tokens if not w in stop_words_french]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words_french:
filtered_sentence.append(w)
return ' '.join(filtered_sentence)
def replace_escape_characters(x):
try:
x = re.sub(r'\s', ' ', x)
x = x.encode('ascii', 'ignore').decode('ascii')
return x
except:
return x
def preprocess(df):
rawData=df
cleanData = rawData.dropna()
rawData["OriginalEmailBody"] = rawData["Subject"] + " " + rawData["OriginalEmailBody"]
rawData['OriginalEmailBody'] = rawData['OriginalEmailBody'].str.lower()
rawData['OriginalEmailBody'] = rawData.OriginalEmailBody.str.strip()
rawData = rawData.dropna(subset=['OriginalEmailBody'])
rawData['OriginalEmailBody'] = rawData['OriginalEmailBody'].apply(replace_escape_characters)
rawData['OriginalEmailBody'] = rawData['OriginalEmailBody'].apply(english_stop_word_remover)
rawData['OriginalEmailBody'] = rawData['OriginalEmailBody'].apply(french_stop_word_remover)
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('@[A-Za-z0-9_]+', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('re:', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('graybar', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('\n', " ")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('#[A-Za-z0-9]+', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('#[ A-Za-z0-9]+', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('[0-9]+', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('[A-Za-z0-9_]+.com', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].replace('[^a-zA-Z0-9 ]+', ' ', regex=True) #removes sp char
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('please', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('com', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('saps', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('sent', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('subject', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('thank', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('www', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace(' e ', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('email', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('cc', "")
cleanData['OriginalEmailBody'] = cleanData['OriginalEmailBody'].str.replace('n t', "not")
cleanData['OriginalEmailBody'] = cleanData.OriginalEmailBody.str.strip()
return cleanData
# Create an APISpec for documentation of this API using flassger
# spec = APISpec(
# title='SAPS Email Classifier',
# version='1.0.0',
# openapi_version='2.0',
# plugins=[
# FlaskPlugin(),
# MarshmallowPlugin(),
# ],
# )
# # Optional marshmallow support
# class CategorySchema(Schema):
# id = fields.Int()
# name = fields.Str(required=True)
# class PetSchema(Schema):
# category = fields.Nested(CategorySchema, many=True)
# name = fields.Str()
# app=Flask(__name__)
from azureml.core.model import Model
#from azureml.monitoring import ModelDataCollector
def init():
global learn
print ("model initialized" + time.strftime("%H:%M:%S"))
model_path = Model.get_model_path(model_name = 'saps_classification')
# model = load(model_path)
learn = load_learner(model_path,'')
# model_path = Model.get_model_path(model_name = 'saps_classification')
# # Load fastai model trained using model_train.py
# learn = load_learner(model_path,'')
# @app.route('/')
# def welcome():
# return "Welcome All"
# GET method
# @app.route('/predict',methods=["Get"])
# def predict_email():
# # GET specification for flassger
# """GET Method
# This is using docstrings for specifications.
# ---
# tags:
# - Email Classification Model API
# parameters:
# - name: subject
# in: query
# type: string
# required: true
# - name: body
# in: query
# type: string
# required: true
# responses:
# 200:
# description: The output values
# """
# # GET email subject and body
# subject=request.args.get("subject")
# body=request.args.get("body")
# # Create a dataframe
# data = {'Subject' : [subject], 'OriginalEmailBody' : [body]}
# df_test=pd.DataFrame(data)
# # Clean the data
# df_clean = preprocess(df_test)
# # Make prediction and return top 3 probabilities and RequestType
# learn.data.add_test(df_clean['OriginalEmailBody'])
# prob_preds = learn.get_preds(ds_type=DatasetType.Test, ordered=True)
# keys = ['Chargeback Request', 'Chargeback Workflow Follow-up', 'Freight Deductions Issue',
# 'Invoice Submission','Miscellaneous Deduction Information','Other',
# 'Payment status on Invoice','Secondary','Statement']
# values = prob_preds[0].numpy().tolist()[0]
# # Create a dict with keys as RequestType names and values as their corresponding probabilities
# pred = dict(zip(keys, values))
# # Select maximum probability
# max_prob = max(pred.items(), key=operator.itemgetter(1))[1]
# # Sort in descending and select top 3 values
# res = dict(sorted(pred.items(), key = itemgetter(1), reverse = True)[:3])
# res['max_prob'] = max_prob
# # Create final response with top 3 probabilities and their RequestTypes
# predictedRequestType1 = list(res.keys())[0]
# predictedRequestType2 = list(res.keys())[1]
# predictedRequestType3 = list(res.keys())[2]
# predictedProbability1 = list(res.values())[0]
# predictedProbability2 = list(res.values())[1]
# predictedProbability3 = list(res.values())[2]
# predictedMaxProb = max_prob
# response = {
# 'PredictedRequestType1':predictedRequestType1,
# 'PredictedRequestType2':predictedRequestType2,
# 'PredictedRequestType3':predictedRequestType3,
# 'PredictedProbability1':predictedProbability1,
# 'PredictedProbability2':predictedProbability2,
# 'PredictedProbability3':predictedProbability3,
# 'MaxProb':predictedMaxProb
# }
# # Return response dict
# return response
# @app.route('/random')
# def random_pet():
# """
# A cute furry animal endpoint.
# Get a random pet
# ---
# description: Get a random pet
# responses:
# 200:
# description: A pet to be returned
# schema:
# $ref: '#/definitions/Pet'
# """
# pet = {'category': [{'id': 1, 'name': 'rodent'}], 'name': 'Mickey'}
# return jsonify(PetSchema().dump(pet).data)
# template = spec.to_flasgger(
# app,
# definitions=[CategorySchema, PetSchema],
# paths=[random_pet]
# )
# # POST method
# @app.route('/predict_file',methods=["POST"])
# def predict_email_file():
# # POST spec for flassger
# """POST Method
# This is using docstrings for specifications.
# ---
# tags:
# - Email Classification Model API
# parameters:
# - name: file
# in: formData
# type: file
# required: true
# responses:
# 200:
# description: The output values
# """
# # Read csv sent as POST request
# df_test=pd.read_csv(request.files.get("file"))
# # Preprocess the csv file
# df_clean = preprocess(df_test)
# learn.data.add_test(df_clean['OriginalEmailBody'])
# # Get predictions
# prob_preds = learn.get_preds(ds_type=DatasetType.Test, ordered=True)
# # Return predictions
# return str(list(prob_preds))
# template = spec.to_flasgger(
# app
# )
# # start Flasgger using a template from apispec
# swag = Swagger(app, template=template)
# if __name__=='__main__':
# # app.run(host='0.0.0.0',port=8000)
# app.run(debug=False)
import pickle
import json
import numpy
import time
# from sklearn.linear_model import Ridge
from joblib import load
# from azureml.core.model import Model
# #from azureml.monitoring import ModelDataCollector
# def init():
# global model
# print ("model initialized" + time.strftime("%H:%M:%S"))
# model_path = Model.get_model_path(model_name = 'saps_classification')
# model = load(model_path)
def run(raw_data):
try:
data = json.loads(raw_data)["data"]
data = numpy.array(data)
learn.data.add_test(data)
result = learn.get_preds(ds_type=DatasetType.Test, ordered=True)
# result = model.predict(data)
result = result[0].numpy().tolist()[0]
# return result
return json.dumps({"result": result})
except Exception as e:
result = str(e)
return json.dumps({"error": result})
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: spatial_biased_networks.py
# --- Creation Date: 20-01-2020
# --- Last Modified: Tue 04 Feb 2020 21:33:34 AEDT
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Spatial-Biased Networks implementation for disentangled representation learning.
Based on API of stylegan2.
"""
import numpy as np
import pdb
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d
from dnnlib.tflib.ops.upfirdn_2d import upsample_conv_2d, conv_downsample_2d
from dnnlib.tflib.ops.fused_bias_act import fused_bias_act
from training.networks_stylegan2 import get_weight, dense_layer, conv2d_layer
from training.networks_stylegan2 import apply_bias_act, naive_upsample_2d
from training.networks_stylegan2 import naive_downsample_2d, modulated_conv2d_layer
from training.networks_stylegan2 import minibatch_stddev_layer
from training.spatial_biased_extended_networks import G_synthesis_sb_general_dsp
from training.spatial_biased_modular_networks import G_synthesis_sb_modular
from training.variation_consistency_networks import G_synthesis_vc_modular
from stn.stn import spatial_transformer_network as transformer
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Spatial-Biased Generator
def G_main_spatial_biased_dsp(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
is_training=False, # Network is under training? Enables and disables specific features.
is_validation=False, # Network is under validation? Chooses which value to use for truncation_psi.
return_dlatents=False, # Return dlatents in addition to the images?
is_template_graph=False, # True = template graph constructed by the Network class, False = actual evaluation.
components=dnnlib.EasyDict(
), # Container for sub-networks. Retained between calls.
mapping_func='G_mapping_spatial_biased_dsp', # Build func name for the mapping network.
synthesis_func='G_synthesis_spatial_biased_dsp', # Build func name for the synthesis network.
**kwargs): # Arguments for sub-networks (mapping and synthesis).
# Validate arguments.
assert not is_training or not is_validation
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network(
'G_spatial_biased_synthesis_dsp',
func_name=globals()[synthesis_func],
**kwargs)
if 'mapping' not in components:
components.mapping = tflib.Network('G_spatial_biased_mapping_dsp',
func_name=globals()[mapping_func],
dlatent_broadcast=None,
**kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in,
labels_in,
is_training=is_training,
**kwargs)
dlatents = tf.cast(dlatents, tf.float32)
# Evaluate synthesis network.
deps = []
if 'lod' in components.synthesis.vars:
deps.append(tf.assign(components.synthesis.vars['lod'], lod_in))
with tf.control_dependencies(deps):
images_out = components.synthesis.get_output_for(
dlatents,
is_training=is_training,
force_clean_graph=is_template_graph,
**kwargs)
# Return requested outputs.
images_out = tf.identity(images_out, name='images_out')
if return_dlatents:
return images_out, dlatents
return images_out
def G_mapping_spatial_biased_dsp(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size=7, # Latent vector (Z) dimensionality.
label_size=0, # Label dimensionality, 0 if no labels.
mapping_nonlinearity='lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype='float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
with tf.variable_scope('LabelConcat'):
x = tf.concat([labels_in, x], axis=1)
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# StyleGAN2-like spatial-biased synthesis network for dsprites.
def G_synthesis_spatial_biased_dsp(
dlatents_in, # Input: Disentangled latents (W) [minibatch, dlatent_size].
dlatent_size=7, # Disentangled latent (W) dimensionality. Including discrete info, rotation, scaling, and xy translation.
D_global_size=3, # Discrete latents.
sb_C_global_size=4, # Continuous latents.
label_size=0, # Label dimensionality, 0 if no labels.
num_channels=1, # Number of output color channels.
resolution=64, # Output resolution.
fmap_base=16 <<
10, # Overall multiplier for the number of feature maps.
fmap_decay=1.0, # log2 feature map reduction when doubling the resolution.
fmap_min=1, # Minimum number of feature maps in any layer.
fmap_max=512, # Maximum number of feature maps in any layer.
architecture='skip', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity='lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype='float32', # Data type to use for activations and outputs.
resample_kernel=[
1, 3, 3, 1
], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv=True, # Implement modulated_conv2d_layer() as a single fused op?
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage):
return np.clip(int(fmap_base / (2.0**(stage * fmap_decay))), fmap_min,
fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
images_out = None
# Primary inputs.
assert dlatent_size == D_global_size + sb_C_global_size
n_cat = label_size + D_global_size
dlatents_in.set_shape([None, label_size + dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
# Return rotation matrix
def get_r_matrix(r_latents, cond_latent):
# r_latents: [-2., 2.] -> [0, 2*pi]
with tf.variable_scope('Condition0'):
cond = apply_bias_act(dense_layer(cond_latent, fmaps=128), act=act)
with tf.variable_scope('Condition1'):
cond = apply_bias_act(dense_layer(cond, fmaps=1), act='sigmoid')
rad = (r_latents + 2) / 4. * 2. * np.pi
rad = rad * cond
tt_00 = tf.math.cos(rad)
tt_01 = -tf.math.sin(rad)
tt_02 = tf.zeros_like(rad)
tt_10 = tf.math.sin(rad)
tt_11 = tf.math.cos(rad)
tt_12 = tf.zeros_like(rad)
theta = tf.concat([tt_00, tt_01, tt_02, tt_10, tt_11, tt_12], axis=1)
return theta
# Return scaling matrix
def get_s_matrix(s_latents, cond_latent):
# s_latents: [-2., 2.] -> [1, 3]
with tf.variable_scope('Condition0'):
cond = apply_bias_act(dense_layer(cond_latent, fmaps=128), act=act)
with tf.variable_scope('Condition1'):
cond = apply_bias_act(dense_layer(cond, fmaps=1), act='sigmoid')
scale = (s_latents / 2. + 2.) * cond
tt_00 = scale
tt_01 = tf.zeros_like(scale)
tt_02 = tf.zeros_like(scale)
tt_10 = tf.zeros_like(scale)
tt_11 = scale
tt_12 = tf.zeros_like(scale)
theta = tf.concat([tt_00, tt_01, tt_02, tt_10, tt_11, tt_12], axis=1)
return theta
# Return shear matrix
def get_sh_matrix(sh_latents, cond_latent):
# sh_latents[:, 0]: [-2., 2.] -> [-1., 1.]
# sh_latents[:, 1]: [-2., 2.] -> [-1., 1.]
with tf.variable_scope('Condition0x'):
cond_x = apply_bias_act(dense_layer(cond_latent, fmaps=128),
act=act)
with tf.variable_scope('Condition1x'):
cond_x = apply_bias_act(dense_layer(cond_x, fmaps=1),
act='sigmoid')
with tf.variable_scope('Condition0y'):
cond_y = apply_bias_act(dense_layer(cond_latent, fmaps=128),
act=act)
with tf.variable_scope('Condition1y'):
cond_y = apply_bias_act(dense_layer(cond_y, fmaps=1),
act='sigmoid')
cond = tf.concat([cond_x, cond_y], axis=1)
xy_shear = sh_latents / 2. * cond
tt_00 = tf.ones_like(xy_shear[:, 0:1])
tt_01 = xy_shear[:, 0:1]
tt_02 = tf.zeros_like(xy_shear[:, 0:1])
tt_10 = xy_shear[:, 1:]
tt_11 = tf.ones_like(xy_shear[:, 1:])
tt_12 = tf.zeros_like(xy_shear[:, 1:])
theta = tf.concat([tt_00, tt_01, tt_02, tt_10, tt_11, tt_12], axis=1)
return theta
# Return translation matrix
def get_t_matrix(t_latents, cond_latent):
# t_latents[:, 0]: [-2., 2.] -> [-0.5, 0.5]
# t_latents[:, 1]: [-2., 2.] -> [-0.5, 0.5]
with tf.variable_scope('Condition0x'):
cond_x = apply_bias_act(dense_layer(cond_latent, fmaps=128),
act=act)
with tf.variable_scope('Condition1x'):
cond_x = apply_bias_act(dense_layer(cond_x, fmaps=1),
act='sigmoid')
with tf.variable_scope('Condition0y'):
cond_y = apply_bias_act(dense_layer(cond_latent, fmaps=128),
act=act)
with tf.variable_scope('Condition1y'):
cond_y = apply_bias_act(dense_layer(cond_y, fmaps=1),
act='sigmoid')
cond = tf.concat([cond_x, cond_y], axis=1)
xy_shift = t_latents / 4. * cond
tt_00 = tf.ones_like(xy_shift[:, 0:1])
tt_01 = tf.zeros_like(xy_shift[:, 0:1])
tt_02 = xy_shift[:, 0:1]
tt_10 = tf.zeros_like(xy_shift[:, 1:])
tt_11 = tf.ones_like(xy_shift[:, 1:])
tt_12 = xy_shift[:, 1:]
theta = tf.concat([tt_00, tt_01, tt_02, tt_10, tt_11, tt_12], axis=1)
return theta
# Apply spatial transform
def apply_st(x, st_matrix, idx, up=True): # idx: 2, 3, 4
with tf.variable_scope('Transform'):
x = tf.transpose(x, [0, 2, 3, 1]) # NCHW -> NHWC
x = transformer(x, st_matrix, out_dims=x.shape.as_list()[1:3])
x = tf.transpose(x, [0, 3, 1, 2]) # NHWC -> NCHW
with tf.variable_scope('Upconv'):
x = apply_bias_act(conv2d_layer(x,
fmaps=nf(idx),
kernel=3,
up=up,
resample_kernel=resample_kernel),
act=act)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(idx), kernel=3),
act=act)
return x
def upsample(y):
with tf.variable_scope('Upsample'):
return upsample_2d(y, k=resample_kernel)
def torgb(x, y):
with tf.variable_scope('ToRGB'):
t = apply_bias_act(conv2d_layer(x, fmaps=num_channels, kernel=1))
return t if y is None else y + t
# Early layers.
y = None
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const',
shape=[1, nf(1), 4, 4],
initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Upconv8x8'):
x = apply_bias_act(conv2d_layer(x,
fmaps=nf(1),
kernel=3,
up=True,
resample_kernel=resample_kernel),
act=act)
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('ModulatedConv'):
x = apply_bias_act(modulated_conv2d_layer(
x,
dlatents_in[:, :n_cat],
fmaps=nf(2),
kernel=3,
up=False,
resample_kernel=resample_kernel,
fused_modconv=fused_modconv),
act=act)
with tf.variable_scope('Conv1'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(2), kernel=3), act=act)
# Rotation layers.
with tf.variable_scope('16x16'):
r_matrix = get_r_matrix(dlatents_in[:, n_cat:n_cat + 1],
dlatents_in[:, :n_cat])
x = apply_st(x, r_matrix, 2)
# Scaling layers.
with tf.variable_scope('32x32'):
s_matrix = get_s_matrix(dlatents_in[:, n_cat + 1:n_cat + 2],
dlatents_in[:, :n_cat])
x = apply_st(x, s_matrix, 3)
# Shearing layers.
with tf.variable_scope('32x32_Shear'):
sh_matrix = get_sh_matrix(dlatents_in[:, n_cat + 2:n_cat + 4],
dlatents_in[:, :n_cat])
x = apply_st(x, sh_matrix, 3, up=False)
# Translation layers.
with tf.variable_scope('64x64'):
t_matrix = get_t_matrix(dlatents_in[:, n_cat + 4:],
dlatents_in[:, :n_cat])
x = apply_st(x, t_matrix, 4)
y = torgb(x, y)
# # Tail layers.
# for res in range(6, resolution_log2 + 1):
# with tf.variable_scope('%dx%d' % (res * 2, res * 2)):
# x = apply_bias_act(conv2d_layer(x,
# fmaps=nf(res),
# kernel=1,
# up=True,
# resample_kernel=resample_kernel),
# act=act)
# if architecture == 'skip':
# y = upsample(y)
# if architecture == 'skip' or res == resolution_log2:
# y = torgb(x, y)
images_out = y
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
|
from flask import Flask, render_template, request, redirect, session, jsonify
from datetime import datetime
import csv
import json
app = Flask(__name__)
app.config['SECRET_KEY'] = "some_random"
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_PERMANENT'] = False
# Dictionary containing all foods
co2_dict = {}
with open('data/cycle_data_en.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',')
next(data, None)
for row in data:
if row[0] != '':
co2_dict[row[0].lower()] = True
nl_data = []
# updated diet data
with open('data/nl_diet_data.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',')
for row in data:
if row[3] == '':
nl_data.append(
{
'id': row[0],
'name': row[1],
'parent': row[4],
'color': row[5]
}
)
else:
nl_data.append(
{
'id': row[0],
'name': row[1],
'parent': row[4],
'value': round(float(row[3]), 2),
'kg_co2': round(float(row[3]) / 100 * 5.4, 2),
'color': row[5]
}
)
cycle_dict = {}
with open('data/cycle_data_en.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',')
next(data, None) # skip the headers
for row in data:
if row[0] != '':
cycle_dict[row[0]] = row[1:8]
cycle_data = []
with open('data/user_sunburst_data.csv') as csv_file:
data = csv.reader(csv_file, delimiter=',')
next(data, None)
for row in data:
if row[0] != '':
if row[3] == '':
cycle_data.append(
{
'id': row[0],
'name': row[1],
'parent': row[4],
'sliced': False,
'color': row[5]
}
)
else:
cycle_data.append(
{
'id': row[0],
'name': row[1],
'parent': row[4],
'value': float(row[3]),
'sliced': False
}
)
@app.route('/', methods=['GET', 'POST'])
def index():
if 'groceries' not in session:
session['groceries'] = []
if request.method == 'POST':
name = request.form['name']
quantity = request.form['quantity']
if name.lower() not in co2_dict:
return "Dit ingredient zit niet in het recept"
groceries = session.get('groceries')
groceries.append({'id': name.strip(), 'name': name, 'quantity': int(quantity)})
session['groceries'] = groceries
return redirect('/')
if not session['groceries']:
users_ingredients = []
else:
user_data = []
groceries = session.get('groceries')
total_kg_co2 = 0.0
for grocery in groceries:
total_kg_co2 += (grocery['quantity'] / 1000) * sum([float(x) for x in cycle_dict[grocery['name']]])
for grocery in groceries:
for i in range(len(cycle_data)):
if cycle_data[i]['name'] == grocery['name']:
temp_dict = cycle_data[i].copy()
# value = percentage co2 emission of total emission in kg
temp_dict['value'] = round((temp_dict['value'] * (grocery['quantity'] / 10) / total_kg_co2), 2)
temp_dict['kg_co2'] = round(temp_dict['value'] / 100 * total_kg_co2, 2)
user_data.append(temp_dict)
for i in range(len(user_data)):
cur_parent_id = user_data[i]['parent']
if cur_parent_id != '':
parent_dict = next(item for item in cycle_data if item["id"] == cur_parent_id)
if parent_dict not in user_data:
user_data.append(parent_dict)
user_data.append({'id': '0.1', 'name': 'Food', 'parent': '', 'kg_co2': round(total_kg_co2, 2), 'color': '#fcfcdc'})
user_data.append({'id': '1.1', 'name': 'Vegetal Products', 'parent': '0.1', 'color': '#b3e2cd'})
user_data.append({'id': '1.2', 'name': 'Animal Products', 'parent': '0.1', 'color': '#fdcdac'})
users_ingredients = user_data
return render_template('index.html', groceries=session.get('groceries'), data=json.dumps(nl_data),
cycle_dict=json.dumps(cycle_dict), cycle_data=json.dumps(cycle_data),
users_ingredients=json.dumps(users_ingredients))
@app.route('/process_userdata', methods=['GET'])
def process_userdata():
"""
Takes the users product list and preprocesses it to the sunburst visualization for the user.
* Removed total CO2 calculation
"""
user_data = []
groceries = session.get('groceries')
total_kg_co2 = 0.0
for grocery in groceries:
total_kg_co2 += (grocery['quantity'] / 1000) * sum([float(x) for x in cycle_dict[grocery['name']]])
for grocery in groceries:
for i in range(len(cycle_data)):
if cycle_data[i]['name'] == grocery['name']:
temp_dict = cycle_data[i].copy()
# value = percentage co2 emission of total emission in kg
temp_dict['value'] = round((temp_dict['value'] * (grocery['quantity'] / 10) / total_kg_co2), 2)
temp_dict['kg_co2'] = round(temp_dict['value'] / 100 * total_kg_co2, 2)
user_data.append(temp_dict)
for i in range(len(user_data)):
cur_parent_id = user_data[i]['parent']
if cur_parent_id != '':
parent_dict = next(item for item in cycle_data if item["id"] == cur_parent_id)
if parent_dict not in user_data:
user_data.append(parent_dict)
user_data.append({'id': '0.1', 'name': 'Food', 'parent': '', 'kg_co2': round(total_kg_co2, 2), 'color': '#fcfcdc'})
user_data.append({'id': '1.1', 'name': 'Vegetal Products', 'parent': '0.1', 'color': '#b3e2cd'})
user_data.append({'id': '1.2', 'name': 'Animal Products', 'parent': '0.1', 'color': '#fdcdac'})
print(user_data)
return jsonify({'user_data': user_data})
@app.route('/delete/<id>')
def delete(id):
temp_groceries = []
groceries = session.get('groceries')
for grocery in groceries:
if grocery['id'] != id:
temp_groceries.append(grocery)
session['groceries'] = temp_groceries
return redirect('/')
@app.route('/update/<id>', methods=['GET', 'POST'])
def update(id):
name = None
for grocery in session.get('groceries'):
if grocery['id'] == id:
temp_grocery = grocery
name = grocery['name']
if request.method == 'POST':
temp_groceries = []
groceries = session.get('groceries')
for grocery in groceries:
if grocery['id'] != id:
temp_groceries.append(grocery)
session['groceries'] = temp_groceries
# name = request.form['name']
quantity = request.form['quantity']
if name not in cycle_dict:
return "Dit ingredient zit niet in het recept"
groceries = session.get('groceries')
groceries.append({'id': name.strip(), 'name': name, 'quantity': int(quantity)})
session['groceries'] = groceries
return redirect('/')
else:
return render_template('update.html', grocery=temp_grocery)
@app.route('/calculate', methods=['GET'])
def calculate():
groceries = session.get('groceries')
co2_score = 0.0
success = True
for grocery in groceries:
grocery_name = grocery['name']
try:
# co2_score += (grocery['quantity'] / 1000) * co2_dict[grocery_name.lower()][1]
print(grocery_name, sum([float(x) for x in cycle_dict[grocery_name]]))
co2_score += (grocery['quantity'] / 1000) * sum([float(x) for x in cycle_dict[grocery_name]])
except:
success = False
result = {"co2_score": co2_score, "success": success}
return result
@app.route('/deleteall')
def deleteall():
session['groceries'] = []
return redirect("/")
if __name__ == '__main__':
app.run(debug=True) |
'''
Log generation simulation with different durations and rates.
'''
import os
import time
import random
from time import sleep
from datetime import datetime
import logging
log_format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)
class LogGenerator:
'''
Simulation of log generator.
Args:
file (str): The file with the logs to monitor.
rate (int): The average of number of requests per sec.
ips (list): Random ips to choose from.
methods (list): Random methods to choose from.
sections (list): Random sections to choose from.
codes (list): Random codes to choose from.
'''
def __init__(self,
file="/tmp/access.log",
rate=20):
self.file = file
self.rate = rate
self.ips = ["::1", "192.168.0.110", "127.0.0.1", "60.242.26.14"]
self.methods = ["GET", "POST", "POST", "PUT", "DELETE"]
self.sections = ["/img", "/captcha", "/css", "/foo", "/foo", "/bar"]
self.codes = ["200", "200", "200", "200",
"200", "304", "403", "404", "501"]
def write_log(self, timestamp):
'''
Write a log entry, given a timestamp.
Args:
timestamp (str): A timestamp for the random log.
'''
with open(self.file, 'a+', os.O_NONBLOCK) as f:
f.write(self.generate_log(timestamp))
f.flush()
f.close()
def random_ip(self):
'''
Generate a random ip.
Returns:
(str): Generated random ip.
'''
return str(random.randint(0, 255)) + "." + str(random.randint(0, 255)) \
+ "." + str(random.randint(0, 255)) + "." \
+ str(random.randint(0, 255))
def generate_log(self, timestamp):
'''
Generate a log string given a timestamp.
Args:
timestamp (str): A timestamp for the random log.
Returns:
(str): a random generated log entry.
'''
ip = random.choice([random.choice(self.ips), self.random_ip()])
method = random.choice(self.methods)
section = random.choice(self.sections) \
+ random.choice([".html",
random.choice(self.sections)+'/',
random.choice(self.sections)+'/'])
code = random.choice(self.codes)
size = random.randint(10, 100000)
return ('%s - - [%s +1000] "%s %s HTTP/1.1" %s %d\n'
% (ip,
timestamp.strftime("%d/%b/%Y:%H:%M:%S"),
method,
section,
code,
size))
def run(self, duration):
'''
Run the log generation.
Args:
duration (str): duration of log generation simulation.
'''
start = time.time()
while time.time()-start < duration:
self.write_log(datetime.now())
sleep(random.random()*2/self.rate)
|
"""Remote client command for creating image from container."""
import sys
import podman
from pypodman.lib import AbstractActionBase, BooleanAction, ChangeAction
class Commit(AbstractActionBase):
"""Class for creating image from container."""
@classmethod
def subparser(cls, parent):
"""Add Commit command to parent parser."""
parser = parent.add_parser(
'commit',
help='create image from container',
)
parser.add_argument(
'--author',
help='Set the author for the committed image',
)
parser.add_argument(
'--change',
'-c',
action=ChangeAction,
)
parser.add_argument(
'--format',
'-f',
choices=('oci', 'docker'),
default='oci',
type=str.lower,
help='Set the format of the image manifest and metadata',
)
parser.add_argument(
'--iidfile',
metavar='PATH',
help='Write the image ID to the file',
)
parser.add_argument(
'--message',
'-m',
help='Set commit message for committed image',
)
parser.add_argument(
'--pause',
'-p',
action=BooleanAction,
default=True,
help='Pause the container when creating an image',
)
parser.add_argument(
'--quiet',
'-q',
action='store_true',
help='Suppress output',
)
parser.add_argument(
'container',
nargs=1,
help='container to use as source',
)
parser.add_argument(
'image',
nargs=1,
help='image name to create',
)
parser.set_defaults(class_=cls, method='commit')
def commit(self):
"""Create image from container."""
try:
try:
ctnr = self.client.containers.get(self._args.container[0])
except podman.ContainerNotFound as e:
sys.stdout.flush()
print(
'Container {} not found.'.format(e.name),
file=sys.stderr,
flush=True)
return 1
else:
ident = ctnr.commit(self.opts['image'][0], **self.opts)
print(ident)
except podman.ErrorOccurred as e:
sys.stdout.flush()
print(
'{}'.format(e.reason).capitalize(),
file=sys.stderr,
flush=True)
return 1
return 0
|
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Houses py_grpc_gevent_test.
"""
load("@grpc_python_dependencies//:requirements.bzl", "requirement")
_GRPC_LIB = "//src/python/grpcio/grpc:grpcio"
_COPIED_MAIN_SUFFIX = ".gevent.main"
def py_grpc_gevent_test(
name,
srcs,
main = None,
deps = None,
data = None,
**kwargs):
"""Runs a Python test with gevent monkeypatched in.
Args:
name: The name of the test.
srcs: The source files.
main: The main file of the test.
deps: The dependencies of the test.
data: The data dependencies of the test.
**kwargs: Any other test arguments.
"""
if main == None:
if len(srcs) != 1:
fail("When main is not provided, srcs must be of size 1.")
main = srcs[0]
deps = [] if deps == None else deps
data = [] if data == None else data
lib_name = name + ".gevent.lib"
supplied_python_version = kwargs.pop("python_version", "")
if supplied_python_version and supplied_python_version != "PY3":
fail("py_grpc_gevent_test only supports python_version=PY3")
native.py_library(
name = lib_name,
srcs = srcs,
)
augmented_deps = deps + [
":{}".format(lib_name),
requirement("gevent"),
]
if _GRPC_LIB not in augmented_deps:
augmented_deps.append(_GRPC_LIB)
# The main file needs to be in the same package as the test file.
copied_main_name = name + _COPIED_MAIN_SUFFIX
copied_main_filename = copied_main_name + ".py"
native.genrule(
name = copied_main_name,
srcs = ["//bazel:_gevent_test_main.py"],
outs = [copied_main_filename],
cmd = "cp $< $@",
)
# TODO(https://github.com/grpc/grpc/issues/27542): Remove once gevent is deemed non-flaky.
if "flaky" in kwargs:
kwargs.pop("flaky")
native.py_test(
name = name + ".gevent",
args = [name],
deps = augmented_deps,
srcs = [copied_main_filename],
main = copied_main_filename,
python_version = "PY3",
flaky = True,
**kwargs
)
|
import random
import string
from fake_gen.base import Factory
class RandomLengthStringFactory(Factory):
"""
Generates random strings between 2 lengths
:param min_chars: minimum amount of characters
:param max_chars: maximum amount of characters
:param prefix: string that must be present before the random characters
:param suffix: string that must be present after the random characters
Examples,
>>> all(len(chars) == 5 for chars in RandomLengthStringFactory(5, 5).generate(200))
True
"""
MIN_CHAR_DEFAULT = 3
MAX_CHAR_DEFAULT = 100
def __init__(self, min_chars=None, max_chars=None, prefix=None, suffix=None):
super(RandomLengthStringFactory, self).__init__()
if not isinstance(min_chars, int):
raise TypeError("min_chars needs to be an integer")
if not isinstance(max_chars, int):
raise TypeError("max_chars needs to be an integer")
self._min_chars = min_chars if min_chars else self.MIN_CHAR_DEFAULT
self._max_chars = max_chars if max_chars else self.MAX_CHAR_DEFAULT
self._prefix = prefix if prefix else ''
self._suffix = suffix if suffix else ''
def __call__(self):
length = random.randint(self._min_chars, self._max_chars)
random_parts = [self._prefix]
random_parts += [random.choice(string.ascii_letters) for _ in range(length)]
random_parts += [self._suffix]
return ''.join(random_parts)
class HashHexDigestFactory(Factory):
"""
Returns on each iteration the result of the hash `hash_class`.hexdigest(), generated
from the pseudo random string.
:param hash_class: Any hash class from the hashlib package, like hashlib.md5
:param element_amount: The amount of values that will be generated
Examples,
>> for i in HashHexDigestFactory(hashlib.md5).generate(3):
.. print(i)
aaaa6305d730ca70eae904ca47e427c8
d172baa4019279f3f78a624f2a0b3e2b
78cd377dc9421cd4252d8110f9acb7c4
>> for i in HashHexDigestFactory(hashlib.sha224).generate(3):
.. print(i)
8dfd75184b6b5f9be73050dc084a8a3ebcf4c45fc5ca334df911c7c5
ee1822b3cd7f58eb81bd37b7e5933d73a62578a2c060e7e4808569d0
3c2ecb8fd519795f77620614ed5b45ccd611a12aa9d355683ac791d9
"""
_MAX_VALUE_LENGTH = 100
_MIN_VALUE_LENGTH = 3
def __init__(self, hash_class):
super(HashHexDigestFactory, self).__init__()
self._hash_class = hash_class
def __call__(self):
length = random.randint(self._MIN_VALUE_LENGTH, self._MAX_VALUE_LENGTH)
random_string = u''.join([random.choice(string.ascii_letters) for _ in range(length)])
return self._hash_class(random_string.encode()).hexdigest()
|
from unittest import mock
from tests.util.test_util import perform_test_ca_sign, find_test_ca_sign_url
from xrdsst.controllers.auto import AutoController
from xrdsst.controllers.base import BaseController
from xrdsst.controllers.cert import CertController
from xrdsst.controllers.status import StatusController
from xrdsst.main import XRDSSTTest
class IntegrationOpBase:
def step_cert_download_csrs(self):
with XRDSSTTest() as app:
cert_controller = CertController()
cert_controller.app = app
cert_controller.load_config = (lambda: self.config)
result = cert_controller.download_csrs()
assert len(result) == 6
fs_loc_list = []
csrs = []
for csr in result:
fs_loc_list.append(csr.fs_loc)
csrs.append((str(csr.key_type).lower(), csr.fs_loc))
flag = len(set(fs_loc_list)) == len(fs_loc_list)
assert flag is True
return csrs
@staticmethod
def step_acquire_certs(downloaded_csrs, security_server):
tca_sign_url = find_test_ca_sign_url(security_server['configuration_anchor'])
cert_files = []
for down_csr in downloaded_csrs:
cert = perform_test_ca_sign(tca_sign_url, down_csr[1], down_csr[0])
cert_file = down_csr[1] + ".signed.pem"
cert_files.append(cert_file)
with open(cert_file, "w") as out_cert:
out_cert.write(cert)
return cert_files
def apply_cert_config(self, signed_certs, ssn):
self.config['security_server'][ssn]['certificates'] = signed_certs
def query_status(self):
with XRDSSTTest() as app:
status_controller = StatusController()
status_controller.app = app
status_controller.load_config = (lambda: self.config)
servers = status_controller._default()
# Must not throw exception, must produce output, test with global status only -- should be ALWAYS present
# in the configuration that integration test will be run, even when it is still failing as security server
# has only recently been started up.
assert status_controller.app._last_rendered[0][1][0].count('LAST') == 1
assert status_controller.app._last_rendered[0][1][0].count('NEXT') == 1
return servers
def step_autoconf(self):
with XRDSSTTest() as app:
with mock.patch.object(BaseController, 'load_config', (lambda x, y=None: self.config)):
auto_controller = AutoController()
auto_controller.app = app
auto_controller._default()
|
# encoding: utf-8
from opendatatools.common import RestAgent
from bs4 import BeautifulSoup
import json
import pandas as pd
import datetime
index_map={
'SSEC' : '上证综合指数',
'SZSC1' : '深证成份指数(价格)',
'FTXIN9' : '富时中国A50指数',
'DJSH' : '道琼斯上海指数',
'HSI' : '香港恒生指数 (CFD)',
'DJI' : '道琼斯工业平均指数',
'SPX' : '美国标准普尔500指数 (CFD)',
'IXIC' : '纳斯达克综合指数',
'RUT' : '美国小型股2000 (CFD)',
'VIX' : 'VIX恐慌指数 (CFD)',
'GSPTSE' : '加拿大多伦多S&P/TSX 综合指数 (CFD)',
'BVSP' : '巴西IBOVESPA股指',
'MXX' : 'S&P/BMV IPC',
'GDAXI' : '德国DAX30指数 (CFD)',
'FTSE' : '英国富时100指数 (CFD)',
'FCHI' : '法国CAC40指数',
'STOXX50E' : '欧洲斯托克(Eurostoxx)50指数 (CFD)',
'AEX' : '荷兰AEX指数',
'IBEX' : '西班牙IBEX35指数 (CFD)',
'FTMIB' : '意大利富时MIB指数 (CFD)',
'SSMI' : '瑞士SWI20指数 (CFD)',
'PSI20' : '葡萄牙PSI20指数',
'BFX' : '比利时BEL20指数 (CFD)',
'ATX' : 'ATX',
'OMXS30' : '瑞典OMX斯德哥尔摩30指数',
'IMOEX' : '俄罗斯MOEX Russia指数',
'IRTS' : '俄罗斯交易系统市值加权指数',
'WIG20' : '波兰华沙WIG20指数',
'BUX' : '匈牙利股票交易指数',
'XU100' : '土耳其伊斯坦堡100指数',
'TA35' : 'TA 35',
'TASI' : '沙特阿拉伯TASI指数',
'N225' : '日经225指数 (CFD)',
'AXJO' : '澳大利亚S&P/ASX200指数',
'TWII' : '台湾加权指数',
'SETI' : 'SET Index',
'KS11' : '韩国KOSPI指数',
'JKSE' : '印尼雅加达综合指数',
'NSEI' : '印度S&P CNX NIFTY指数',
'BSESN' : '印度孟买30指数',
'HNX30' : 'HNX 30',
'CSE' : '斯里兰卡科伦坡指数',
'VIX' : 'VIX恐慌指数 (CFD)',
}
index_map_inv = {v:k for k, v in index_map.items()}
class YingWeiAgent(RestAgent):
def __init__(self):
RestAgent.__init__(self)
self.add_headers({'Referer': 'https://cn.investing.com/indices/shanghai-composite', 'X-Requested-With': 'XMLHttpRequest'})
def get_index_list(self):
url = "https://cn.investing.com/indices/major-indices"
response = self.do_request(url)
soup = BeautifulSoup(response, "html5lib")
tables = soup.find_all('table')
data_list = []
for table in tables:
if table.has_attr('id') and table['id'] == 'cr_12':
trs = table.findAll("tr")
for tr in trs:
if tr.has_attr('id'):
tds = tr.findAll('td')
time = datetime.datetime.fromtimestamp(int(tds[7]['data-value'])).strftime("%Y-%m-%d %H:%M:%S")
data_list.append({'index_name_cn': tr.a['title'],
'index_name': index_map_inv[tr.a['title']] if tr.a['title'] in index_map_inv else '',
'country' : tds[0].span['title'],
'last': tds[2].text,
'high': tds[3].text,
'low': tds[4].text,
'price_change': tds[5].text,
'percent_change': tds[6].text,
'time' : time,
})
df = pd.DataFrame(data_list)
return df, ''
def _get_id(self, symbol):
url = "https://cn.investing.com/indices/major-indices"
response = self.do_request(url)
soup = BeautifulSoup(response, "html5lib")
tables = soup.find_all('table')
for table in tables:
if table.has_attr('id') and table['id'] == 'cr_12':
rows = table.findAll("tr")
for row in rows:
if row.has_attr('id'):
if row.a['title'] == symbol:
return row['id'][5:]
return None
def get_index_data(self, symbol, interval, period):
symbol = index_map[symbol]
id = self._get_id(symbol)
if id is None:
return None, '暂不支持该指数'
url = "https://cn.investing.com/common/modules/js_instrument_chart/api/data.php"
param = {
'pair_id': id,
'pair_id_for_news': id,
'chart_type': 'area',
'pair_interval': interval,
'candle_count': 120,
'events': 'yes',
'volume_series': 'yes',
'period': period,
}
response = self.do_request(url, param=param, encoding='gzip')
if response is not None:
jsonobj = json.loads(response)
df = pd.DataFrame(jsonobj['candles'])
df.columns = ['time', 'close', '2', '3']
df = df[['time', 'close']]
df['time'] = df['time'].apply(lambda x: datetime.datetime.fromtimestamp(int(x) / 1000))
return df, ''
else:
return None, 'error, no data'
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The MagnaChain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from collections import deque
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
import traceback
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
initialize_datadir,
log_filename,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
system_info,
wait_until,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class MagnaChainTestFramework(object):
"""Base class for a magnachain test script.
Individual magnachain test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.sidenodes = []
self.mocktime = 0
# mapped,侧链对主链的映射,eg.[[0],[],[],[1]],表示侧链的0节点attach在主链的节点0;侧链1节点attach在主链的节点3
self.mapped = []
# self.mortgage_coins = [] #抵押币的txid,赎回抵押币时会用到
self.with_gdb = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave magnachainds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop magnachainds after the test execution")
parser.add_option("--srcdir", dest="srcdir",
default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"),
help="Source directory containing magnachaind/magnachain-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir",
default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
self.setup_chain()
self.setup_network()
if getattr(self, "num_sidenodes", 0) > 0:
self.setup_sidechain()
self.__for_convenient()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
system_info()
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
self.log.info("Note: magnachainds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = [self.options.tmpdir + "/test_framework.log"]
filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for fn in filenames:
try:
with open(fn, 'r') as f:
print("From", fn, ":")
print("".join(deque(f, MAX_LINES_TO_PRINT)))
except OSError:
print("Opening file %s failed." % fn)
traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
sys.exit(TEST_EXIT_PASSED)
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
sys.exit(TEST_EXIT_SKIPPED)
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
logging.shutdown()
sys.exit(TEST_EXIT_FAILED)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self, sidechain=False):
"""Override this method to customize test network topology"""
self.setup_nodes(sidechain=sidechain)
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
node_num = (self.num_nodes if not sidechain else self.num_sidenodes)
nodes = self.nodes if not sidechain else self.sidenodes
for i in range(node_num - 1):
connect_nodes_bi(nodes, i, i + 1, sidechain=sidechain)
self.sync_all([nodes])
def setup_nodes(self, sidechain=False):
"""Override this method to customize test node setup"""
extra_args = None
if not sidechain:
if hasattr(self, "extra_args"):
extra_args = self.extra_args
if not getattr(self, 'rpc_timewait', 0):
self.add_nodes(self.num_nodes, extra_args)
else:
self.add_nodes(self.num_nodes, extra_args, timewait=self.rpc_timewait)
else:
if hasattr(self, "side_extra_args"):
extra_args = self.side_extra_args
if not getattr(self, 'rpc_timewait', 0):
self.add_nodes(self.num_sidenodes, extra_args, sidechain=True)
else:
self.add_nodes(self.num_sidenodes, extra_args, sidechain=True, timewait=self.rpc_timewait)
self.start_nodes(sidechain=sidechain)
# 支链相关
def setup_sidechain(self):
"""Override this method to customize test sidenode setup"""
# todo 多侧链支持
# 目前主节点与侧节点只能是1对1关系,不支持1对多
assert self.num_nodes >= self.num_sidenodes
self.log.info("setup sidechain")
# 创建抵押币
# for convince
node = self.nodes[0]
logger = self.log.info
node.generate(2)
sidechain_id = node.createbranchchain("clvseeds.com", "00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:3b:80:8333",
node.getnewaddress())['branchid']
self.sidechain_id = sidechain_id
node.generate(1)
self.sync_all()
logger("sidechain id is {}".format(sidechain_id))
# 创建magnachaind的软链接,为了区分主链和侧链
magnachain_side_path = os.path.join(self.options.srcdir, 'magnachaind-side')
if not os.path.exists(magnachain_side_path):
try:
# os.symlink(os.path.join(self.options.srcdir, 'magnachaind'),
# os.path.join(self.options.srcdir, 'magnachaind-side'))
# use copy to instead
shutil.copy(os.path.join(self.options.srcdir, 'magnachaind'),magnachain_side_path)
except Exception as e:
pass
else:
# if exist
if not os.path.islink(magnachain_side_path):
try:
os.unlink(magnachain_side_path)
shutil.copy(os.path.join(self.options.srcdir, 'magnachaind'), magnachain_side_path)
except Exception as e:
pass
# Set env vars
if "MAGNACHAIND_SIDE" not in os.environ:
os.environ["MAGNACHAIND_SIDE"] = os.path.join(self.options.srcdir, 'magnachaind-side')
# 初始化侧链目录
logger("create sidechain datadir")
side_datadirs = []
if not self.mapped:
self.mapped = [[i] for i in range(self.num_sidenodes)]
for i in range(self.num_sidenodes):
attach_index = None
# 处理特定的节点映射
# 最多只能是1对1
all([len(m) == 1 for m in self.mapped])
for index, m in enumerate(self.mapped):
if i in m:
attach_index = index
break
if not attach_index:
attach_index = i
logger("sidenode{} attach to mainnode{}".format(i, attach_index))
side_datadirs.append(
initialize_datadir(self.options.tmpdir, i, sidechain_id=sidechain_id,
mainport=self.nodes[attach_index].rpcport,
main_datadir=os.path.join(self.options.tmpdir, 'node{}'.format(attach_index))))
logger("setup sidechain network and start side nodes")
self.setup_network(sidechain=True)
logger("sidechain attach to mainchains")
for index, m in enumerate(self.mapped):
if m:
# 只有主节点有被挂载时才处理
self.nodes[index].generate(2) # make some coins
self.sync_all()
# addbranchnode接口会覆盖旧的配置。目前主节点与侧节点只能是1对1关系,不支持1对多
ret = self.nodes[index].addbranchnode(sidechain_id, '127.0.0.1', self.sidenodes[m[0]].rpcport, '', '',
'', side_datadirs[m[0]])
if ret != 'ok':
raise Exception(ret)
for index, m in enumerate(self.mapped):
if m:
logger("mortgage coins to sidenode{}".format(m[0]))
for j in range(20):
addr = self.sidenodes[m[0]].getnewaddress()
txid = self.nodes[index].mortgageminebranch(sidechain_id, 5000, addr)['txid'] # 抵押挖矿币
for i in range(5):
# avoid generate timeout on travis-ci
self.nodes[index].generate(2)
self.sync_all()
assert self.sidenodes[m[0]].getmempoolinfo()['size'] > 0
self.sync_all()
logger("sidechains setup done")
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None, sidechain=False):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
n = TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i],
stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir,
sidechain=sidechain,with_gdb=self.with_gdb)
if not sidechain:
self.nodes.append(n)
else:
self.sidenodes.append(n)
def start_node(self, i, extra_args=None, stderr=None):
"""Start a magnachaind"""
node = self.nodes[i]
node.start(extra_args, stderr)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, sidechain=False):
"""Start multiple magnachainds"""
nodes = []
if extra_args is None:
if not sidechain:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
nodes = self.nodes
else:
extra_args = [None] * self.num_sidenodes
assert_equal(len(extra_args), self.num_sidenodes)
nodes = self.sidenodes
try:
if not nodes:
if sidechain:
nodes = self.sidenodes
else:
nodes = self.nodes
for i, node in enumerate(nodes):
node.start(extra_args[i])
for node in nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a magnachaind test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple magnachaind test nodes"""
all_nodes = self.nodes + self.sidenodes
for node in all_nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in all_nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None):
with tempfile.SpooledTemporaryFile(max_size=2 ** 16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr)
self.stop_node(i)
except Exception as e:
assert 'magnachaind exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "magnachaind should have exited with an error"
else:
assert_msg = "magnachaind should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self,sidechain = False):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
if not sidechain:
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
else:
disconnect_nodes(self.sidenodes[1], 2)
disconnect_nodes(self.sidenodes[2], 1)
self.sync_all([self.sidenodes[:2], self.sidenodes[2:]])
def join_network(self, sidechain = False,timeout=60):
"""
Join the (previously split) network halves together.
"""
if not sidechain:
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all(timeout=timeout)
else:
connect_nodes_bi(self.sidenodes, 1, 2)
self.sync_all([self.sidenodes],timeout=timeout)
"""make a chain have more work than b"""
def make_more_work_than(self, a, b,sidechain = False):
if sidechain:
node_a = self.sidenodes[a]
bwork = int(self.sidenodes[b].getchaintipwork(), 16)
else:
node_a = self.nodes[a]
bwork = int(self.nodes[b].getchaintipwork(), 16)
genblocks = []
while int(node_a.getchaintipwork(), 16) <= bwork:
genblocks.append(node_a.generate(1)[0])
if bwork == int(node_a.getchaintipwork(), 16):
genblocks.append(node_a.generate(1)[0])
if len(genblocks) > 0:
self.log.info("make more work by gen %d" % (len(genblocks)))
return genblocks
def sync_all(self, node_groups=None, show_max_height=False, timeout=60):
if not node_groups:
node_groups = [self.nodes]
if show_max_height:
self.log.info("syncall group : %s" % (str([len(g) for g in node_groups])))
for group in node_groups:
logger = self.log if show_max_height else None
sync_blocks(group, logger=logger, timeout=timeout)
sync_mempools(group, timeout=timeout)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as magnachaind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("MagnaChainRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))):
shutil.rmtree(os.path.join(self.options.cachedir, "node" + str(i)))
# Create cache directories, run magnachainds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("MAGNACHAIND", "magnachaind"), "-server", "-keypool=1", "-datadir=" + datadir,
"-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(
TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None,
stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename(self.options.cachedir, i, "debug.log"))
os.remove(log_filename(self.options.cachedir, i, "db.log"))
os.remove(log_filename(self.options.cachedir, i, "peers.dat"))
os.remove(log_filename(self.options.cachedir, i, "fee_estimates.dat"))
for i in range(self.num_nodes):
from_dir = os.path.join(self.options.cachedir, "node" + str(i))
to_dir = os.path.join(self.options.tmpdir, "node" + str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in magnachain.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def __for_convenient(self):
'''
be convenient for self.node0 to call
:return:
'''
for i, node in enumerate(self.nodes):
# for convenient
setattr(self, 'node' + str(i), node)
for i, node in enumerate(self.sidenodes):
# for convenient
setattr(self, 'snode' + str(i), node)
class ComparisonTestFramework(MagnaChainTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some magnachaind binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("MAGNACHAIND", "magnachaind"),
help="magnachaind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("MAGNACHAIND", "magnachaind"),
help="magnachaind binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class MutiSideChainTestFramework(MagnaChainTestFramework):
"""Test framework for doing muti sidechain testing"""
def set_test_params(self):
self.setup_clean_chain = True
# 多侧链的话,可以一个主节点上挂载多条侧链
self.num_nodes = 1
self.num_sidenodes = 2
def setup_network(self, sidechain=False):
"""Override this method to customize test network topology"""
self.setup_nodes(sidechain=sidechain)
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
node_num = (self.num_nodes if not sidechain else self.num_sidenodes)
nodes = self.nodes if not sidechain else self.sidenodes
'''
不同的侧链节点不需要链接起来,也不会链接起来
for i in range(node_num - 1):
connect_nodes_bi(nodes, i, i + 1, sidechain=sidechain)
self.sync_all([nodes])
'''
def setup_sidechain(self):
'''
Override this method to customize test sidenode setup
:return:
'''
self.log.info("setup sidechain")
# 创建抵押币
# for convince
node = self.nodes[0]
logger = self.log.info
node.generate(2)
self.sidechain_id_one = node.createbranchchain("clvseeds.com", "00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:3b:80:8333",
node.getnewaddress())['branchid']
node.generate(1)
self.sidechain_id_two = node.createbranchchain("clvseeds.com", "00:00:00:00:00:00:00:00:00:00:ff:ff:c0:a8:3b:80:8333",
node.getnewaddress())['branchid']
node.generate(1)
logger("sidechain ids:\n\t{}\n\t{}".format(self.sidechain_id_one,self.sidechain_id_two))
# 创建magnachaind的软链接,为了区分主链和侧链
if not os.path.exists(os.path.join(self.options.srcdir, 'magnachaind-side')):
try:
os.symlink(os.path.join(self.options.srcdir, 'magnachaind'),
os.path.join(self.options.srcdir, 'magnachaind-side'))
except Exception as e:
pass
# Set env vars
if "MAGNACHAIND_SIDE" not in os.environ:
os.environ["MAGNACHAIND_SIDE"] = os.path.join(self.options.srcdir, 'magnachaind-side')
# 初始化侧链目录
logger("create sidechains datadir")
# sidechain one
sidechain_one_dir = initialize_datadir(self.options.tmpdir, 0, sidechain_id=self.sidechain_id_one,
mainport=self.nodes[0].rpcport,
main_datadir=os.path.join(self.options.tmpdir, 'node{}'.format(0)))
sidechain_two_dir = initialize_datadir(self.options.tmpdir, 1, sidechain_id=self.sidechain_id_two,
mainport=self.nodes[0].rpcport,
main_datadir=os.path.join(self.options.tmpdir, 'node{}'.format(0)))
print('datadirs:',sidechain_one_dir,sidechain_two_dir)
logger("setup sidechains network and start side nodes")
self.setup_network(sidechain=True)
logger("sidechain attach to mainchains")
self.nodes[0].generate(2) # make some coins
# addbranchnode接口会覆盖旧的配置。目前主节点与侧节点只能是1对1关系,不支持1对多
ret = self.nodes[0].addbranchnode(self.sidechain_id_one, '127.0.0.1', self.sidenodes[0].rpcport, '', '',
'', sidechain_one_dir)
if ret != 'ok':
raise Exception(ret)
ret = self.nodes[0].addbranchnode(self.sidechain_id_two, '127.0.0.1', self.sidenodes[1].rpcport, '', '',
'', sidechain_two_dir)
if ret != 'ok':
raise Exception(ret)
for m in [0,1]:
logger("mortgage coins to sidenode{}".format(m))
sidechain_id = self.sidechain_id_one if m == 0 else self.sidechain_id_two
for j in range(10):
addr = self.sidenodes[m].getnewaddress()
txid = self.nodes[0].mortgageminebranch(sidechain_id, 5000, addr)['txid'] # 抵押挖矿币
for i in range(5):
# avoid generate timeout on travis-ci
self.nodes[0].generate(2)
self.sync_all()
wait_until(lambda: self.sidenodes[m].getmempoolinfo()['size'] > 0, timeout=30)
self.sync_all()
logger("sidechains setup done")
|
import torch
from torch_geometric.datasets import Planetoid
import torch.nn.functional as F
from torch_geometric.nn import GATConv
device = torch.device('cuda:2')
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = GATConv(1433, 8, heads=8, dropout=0.6)
self.conv2 = GATConv(8*8, 7, heads=1, concat=False, dropout=0.6)
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, training=self.training)
x = self.conv1(x, edge_index)
x = F.elu(x)
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
dataset = Planetoid(root='/datasets/Cora', name='Cora')
GCN = Net().to(device)
data = dataset[0].to(device)
optimizer = torch.optim.Adam(GCN.parameters(), lr=0.005, weight_decay=5e-4)
def train_one_epoch():
GCN.train()
optimizer.zero_grad()
out = GCN(data)
loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask])
loss.backward()
optimizer.step()
return loss.item()
def test_one_epoch():
GCN.eval()
_, pred = GCN(data).max(dim=1)
correct = pred[data.test_mask].eq(data.y[data.test_mask]).sum()
accuracy = correct / data.test_mask.sum()
return accuracy.item()
GCN.train()
for epoch in range(200):
loss = train_one_epoch()
acc = test_one_epoch()
if epoch % 1 == 0:
print('epoch',epoch,'loss',loss,'accuracy',acc)
# 固定epoch=200
# 增加dropout可以提升准确率
# GAT acc 81.19% |
import glob
import logging
import math
import os
import pickle
import re
from itertools import product
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import ShuffleSplit, learning_curve
from .classifiers import custom_dict
from .utils import progress_bar
sns.set(color_codes=True)
plt.style.use('default')
__all__ = ['custom_dict', 'fig_param', 'colors', 'bar_grid_for_dataset', 'classwise_barplot_for_dataset',
'bar_plot_for_problem', 'plot_learning_curves_importances', 'pgf_with_latex']
colors = ['black', 'black', 'black', 'indigo', 'blueviolet', 'mediumorchid', 'plum', 'mediumblue', 'firebrick',
'darkorange', 'sandybrown', 'darkgoldenrod', 'gold', 'khaki']
pgf_with_latex = { # setup matplotlib to use latex for output
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"font.family": "serif",
"font.serif": [], # blank entries should cause plots
"font.sans-serif": ['Times New Roman'] + plt.rcParams['font.serif'], # to inherit fonts from the document
"font.monospace": [],
"font.size": 11,
"legend.fontsize": 11, # Make the legend/label fonts
"xtick.labelsize": 11, # a little smaller
"ytick.labelsize": 11,
'pgf.rcfonts': False,
"pgf.preamble": [
r"\usepackage[utf8x]{inputenc}", # use utf8 fonts
r"\usepackage[T1]{fontenc}", # plots will be generated
r"\usepackage[detect-all,locale=DE]{siunitx}",
] # using this preamble
}
fig_param = {'facecolor': 'w', 'edgecolor': 'w', 'transparent': False, 'dpi': 800, 'bbox_inches': 'tight',
'pad_inches': 0.05}
def init_plots(df, extension, metric, figsize):
sns.set(color_codes=True)
plt.style.use('default')
fig_param['format'] = extension
if extension == 'pgf':
plt.rc('text', usetex=True)
mpl.use('pgf')
pgf_with_latex["figure.figsize"] = figsize
mpl.rcParams.update(pgf_with_latex)
bar_width = 0.5
opacity = 0.7
offset = 0.1
df = df[~df['Dataset'].str.contains('Multi-Class')]
df['rank'] = df['Model'].map(custom_dict)
df.sort_values(by='rank', inplace=True)
del df['rank']
u_models = list(df.Model.unique())
u_models = [model.split('Classifier')[0] for model in u_models]
u_models[u_models.index('SGD')] = "StochasticGradientDescent"
u_models[u_models.index('LinearSVC')] = "SupportVectorMachine"
u_models[u_models.index('Ridge')] = "RidgeClassificationModel"
u_models = [' '.join(re.findall('[A-Z][^A-Z]*', model)) for model in u_models]
u_models[0] = u_models[0] + ' Guesser (Baseline)'
u_datasets = list(df.Dataset.unique())
bar_width_offset = bar_width + offset
space = 0.3
index = []
for i in [3, 4, 1, 1, 2, 2]:
if len(index) == 0:
index.extend(list(np.arange(1, i + 1) * bar_width_offset))
else:
ll = (index[-1] + space) + (np.arange(1, i + 1) * bar_width_offset)
index.extend(ll)
# j = 0
if 'pval' in metric:
end = 0.011
else:
end = 1.1
return bar_width, df, fig_param, index, opacity, u_datasets, u_models, end
def bar_grid_for_dataset(df, metric, std, folder, figsize=(7, 4), extension='png', logger=None):
bar_width, df, fig_param, index, opacity, u_datasets, u_models, end = init_plots(df, extension, figsize, metric)
if len(u_datasets) % 4 == 0:
c = 4
r = int(len(u_datasets) / c)
elif len(u_datasets) % 3 == 0:
c = 3
r = int(len(u_datasets) / c)
else:
c = 5
r = int(len(u_datasets) / c) + 1
figsize = (figsize[0] * r, figsize[1] * c)
logger.info('Datasets {}, figsize {}, rows {}, cols {}'.format(len(u_datasets), figsize, r, c))
fig, axs = plt.subplots(nrows=r, ncols=c, sharex=True, sharey=True, figsize=figsize, frameon=True, edgecolor='k',
facecolor='white')
plt.figure()
axs = np.array(axs).flatten()
ini = index[0]
for ax, dataset in zip(axs, u_datasets):
logger.error("Plotting grid plot for dataset {}".format(dataset))
accs = list(df[df['Dataset'] == dataset][metric].values)
errors = list(df[df['Dataset'] == dataset][metric + '-std'].values / std)
ax.bar(x=index, height=accs, yerr=errors, width=bar_width, alpha=opacity, color=colors, tick_label=u_models)
ax.plot([ini - bar_width / 2, index[-1] + bar_width / 2], [0.5, 0.5], "k--")
ax.plot([ini - bar_width / 2, index[-1] + bar_width / 2], [1.0, 1.0], "k--")
ax.set_yticks(np.arange(0, end, step=0.1).round(1))
l = int(len(dataset.split(" ")) / 2) + 1
dataset = '_'.join(dataset.split(" ")[0:l]) + '\n' + '_'.join(dataset.split(" ")[l:])
ax.set_title(dataset, fontsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=10)
ax.tick_params(axis='x', which='major', labelsize=10)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45, ha='right')
ax.set_ylim(0, end)
ax.set_ylabel(metric.title(), fontsize=10)
fname = os.path.join(folder, "plot_{}.{}".format('grid', extension))
fig_param['fname'] = fname
fig.savefig(**fig_param)
plt.show()
def classwise_barplot_for_dataset(df, metric, std, folder, figsize=(4, 4), extension='png',
logger=logging.getLogger('None')):
bar_width, df, fig_param, index, opacity, u_datasets, u_models, end = init_plots(df, extension, figsize, metric)
ini = index[0]
for dataset in u_datasets:
logger.error("Plotting single for dataset {}".format(dataset))
fig, ax = plt.subplots(figsize=figsize, frameon=True, edgecolor='k', facecolor='white')
accs = list(df[df['Dataset'] == dataset][metric].values)
errors = list(df[df['Dataset'] == dataset][metric + '-std'].values / std)
ax.bar(x=index, height=accs, yerr=errors, width=bar_width, alpha=opacity, color=colors, tick_label=u_models)
ax.plot([ini - bar_width / 2, index[-1] + bar_width / 2], [0.5, 0.5], "k--")
ax.plot([ini - bar_width / 2, index[-1] + bar_width / 2], [1.0, 1.0], "k--")
ax.set_yticks(np.arange(0, end, step=0.1).round(1))
plt.title(dataset, fontsize=10)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=10)
ax.tick_params(axis='x', which='major', labelsize=8)
ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45, ha='right')
ax.set_ylim(0, end)
ax.set_ylabel(metric.title(), fontsize=10)
plt.tight_layout()
dataset = re.sub(r'\s*\d+\s*', '', dataset)
dataset = dataset.replace(" ", "_")
fname = os.path.join(folder, "plot_{}.{}".format(dataset.lower(), extension))
fig_param['fname'] = fname
plt.savefig(**fig_param)
def bar_plot_for_problem(df, metric, params, std, folder, figsize=(14, 6), extension='png',
logger=logging.getLogger('None')):
bar_width, df, fig_param, index, opacity, u_datasets, u_models, end = init_plots(df, extension, figsize, metric)
init_index = index
ini = init_index[0]
fig, ax = plt.subplots(figsize=figsize, frameon=True, edgecolor='k', facecolor='white')
for model in u_models:
accs = list(df[df['Model'] == model][metric].values)
errors = list(df[df['Model'] == model][metric + '-std'].values / std)
ax.bar(x=index, height=accs, yerr=errors, width=bar_width, alpha=opacity, label=model)
index = index + bar_width
end = 1.1
ax.plot([ini - bar_width, index[-1] - bar_width / 2], [0.5, 0.5], "k--")
ax.plot([ini - bar_width, index[-1] - bar_width / 2], [1.0, 1.0], "k--")
ax.set_yticks(np.arange(0, end, step=0.1).round(1))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.tick_params(labelsize=10)
ax.set_xticks(init_index)
ax.set_xticklabels(np.arange(len(u_datasets)) + 1, rotation=0)
ax.set_ylim(0, end)
ax.set_ylabel(metric.title(), fontsize=15)
plt.legend(**params)
plt.tight_layout()
fname = os.path.join(folder, "plot_{}.{}".format(metric.lower(), extension))
fig_param['fname'] = fname
plt.savefig(**fig_param)
def plot_importance(model1, model2, class_label, feature_names, folder, extension, figsize=(7, 4), number=15):
fig_param['format'] = extension
def norm(x):
return (x - x.min()) / (x.max() - x.min())
def get_importances(model):
feature_importances = model.feature_importances_
trees = np.array(model.estimators_)
trees = trees.flatten()
deviation = []
for tree in trees:
imp = tree.feature_importances_
imp = norm(imp)
deviation.append(imp)
std = np.std(deviation, axis=0) / len(trees)
indices = np.argsort(feature_importances)[::-1]
importances = norm(feature_importances)[indices][0:number]
names = feature_names[indices[0:number]]
std = std[indices][0:number]
return importances, std, names
importances, std, names = get_importances(model1)
importances2, std2, names2 = get_importances(model2)
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=figsize, frameon=True, edgecolor='k', facecolor='white')
if '(' in class_label:
l = class_label.index('(')
class_label = ' '.join(class_label[0:l].split(" ")) + '\n' + ' '.join(class_label[l:].split(" ")) + '\n'
else:
l = int(len(class_label.split(" ")) / 2) + 1
class_label = ' '.join(class_label.split(" ")[0:l]) + '\n' + ' '.join(class_label.split(" ")[l:]) + '\n'
ax1.barh(range(number), importances, color="r", xerr=std, align="center")
ax1.set_yticklabels(names)
ax1.set_yticks(range(number))
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax2.barh(range(number), importances2, color="r", xerr=std2, align="center")
ax2.set_yticks(range(number))
ax2.set_yticklabels(names2)
ax2.set_title('Missing-CCS-FIN', y=1.01)
ax2.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
fig.suptitle(class_label, y=0.90, fontsize=11)
class_label = '_'.join(class_label.lower().split(' '))
class_label = class_label.replace('\n', '')
fname = os.path.join(folder, "importance_{}.{}".format(class_label, extension))
fig_param['fname'] = fname
plt.savefig(**fig_param)
def learning_curve_for_label(estimators, X, y, vulnerable, fname, extension):
ncols = 2
nrows = int(len(estimators) / ncols)
figsize = (7, nrows * 4)
fig_param['format'] = extension
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=figsize,
frameon=True, edgecolor='k', facecolor='white')
axs = np.array(axs).flatten()
test_size = 0.5
if not vulnerable:
cv = ShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
else:
cv = ShuffleSplit(n_splits=10, test_size=0.5, random_state=0)
i = 1
for ax, estimator in zip(axs, estimators):
label = type(estimator).__name__
progress_bar(i, len(estimators), label)
i += 1
if not vulnerable:
train_sizes = np.linspace(0.4, 1.0, num=15)
else:
train_sizes = np.arange(10, 300, 10) / X.shape[0]
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(estimator, X, y, cv=cv,
n_jobs=os.cpu_count() - 2,
train_sizes=train_sizes,
return_times=True)
train_sizes = [int(math.ceil(i / 5.0)) * 5 for i in train_sizes]
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
label = label.split('Classifier')[0]
if 'SGD' in label:
label = "StochasticGradient\nDescent"
if 'LinearSVC' in label:
label = "SupportVector\nMachine"
if 'Ridge' in label:
label = "RidgeClassification\nModel"
if 'Hist' in label:
label = "HistogramGradient\nBoosting"
label = ' '.join(re.findall('[A-Z][^A-Z]*', label))
ax.set_title(label, y=0.90, fontsize=13)
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color='r')
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color='k')
ax.plot(train_sizes, train_scores_mean, 'o-', label="In-Sample Accuracy", markersize=3, color='r')
ax.plot(train_sizes, test_scores_mean, 's-', label="Out-of-Sample Accuracy", markersize=3, color='k')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylim(0.20, 1.20)
ax.set_yticks(np.linspace(0.3, 1.0, num=8).round(1))
ax.set_yticklabels(np.linspace(0.3, 1.0, num=8).round(1), fontsize=13)
diff = train_sizes[1] - train_sizes[0]
if vulnerable:
diff = diff * 2
ax.set_xlim(train_sizes[0] - diff, train_sizes[-1] + diff)
if vulnerable:
train_sizes.append(train_sizes[-1] + 5)
train_sizes.insert(0, train_sizes[0] - 5)
ax.set_xticks(train_sizes[::2])
ax.set_xticklabels(train_sizes[::2], rotation=90, ha='right', fontsize=11)
axs[9].set_xlabel('# Training Examples', x=-0.2, fontsize=14)
axs[4].set_ylabel('Accuracy', fontsize=14)
params = dict(loc='lower right', bbox_to_anchor=(1.0, -0.45), ncol=2, fancybox=False, shadow=True,
facecolor='white', edgecolor='k', fontsize=13)
if not vulnerable:
params['bbox_to_anchor'] = (1.00, -0.45)
plt.legend(**params)
fig_param['fname'] = fname
plt.savefig(**fig_param)
def plot_learning_curves_importances(models_folder, csv_reader, vulnerable_classes, lrcurve_folder, imp_folder,
extension='png', plotlr=False, logger=logging.getLogger('None')):
def get_estimators_data(models_folder, csv_reader, label_number, missing_ccs_fin):
X, y = csv_reader.get_data_class_label(class_label=label_number, missing_ccs_fin=missing_ccs_fin)
label = csv_reader.inverse_label_mapping[label_number]
if missing_ccs_fin:
label = label + ' Missing-CCS-FIN'
name = '-' + '_'.join(label.lower().split(' ')) + '.pickle'
estimators = []
files = glob.glob(os.path.join(models_folder, '*.pickle'))
files.sort()
for pic in files:
if name in pic and not ('randomclassifier' in pic or 'perceptron' in pic):
with open(pic, 'rb') as f:
model = pickle.load(f)
estimators.append(model)
return label, estimators, X, y
for missing_ccs_fin, (label, label_number) in product(csv_reader.ccs_fin_array,
list(csv_reader.label_mapping.items())):
label, estimators, X, y = get_estimators_data(models_folder, csv_reader, label_number=label_number,
missing_ccs_fin=missing_ccs_fin)
vulnerable = label in vulnerable_classes
if vulnerable and not missing_ccs_fin:
name = 'RandomForestClassifier'.lower() + '-' + '_'.join(label.lower().split(' ')) + '.pickle'
f = open(os.path.join(models_folder, name), 'rb')
model1 = pickle.load(f)
model1.n_estimators = 500
model1.fit(X, y)
label1 = label + ' Missing-CCS-FIN'
name = 'RandomForestClassifier'.lower() + '-' + '_'.join(label1.lower().split(' ')) + '.pickle'
f = open(os.path.join(models_folder, name), 'rb')
model2 = pickle.load(f)
model2.n_estimators = 500
X1, y1 = csv_reader.get_data_class_label(class_label=label_number, missing_ccs_fin=True)
model2.fit(X1, y1)
plot_importance(model1, model2, label, csv_reader.feature_names, imp_folder, extension=extension,
figsize=(7, 9), number=15)
if len(estimators) != 0 and plotlr:
label = label.replace(" ", "_")
fname = os.path.join(lrcurve_folder, "learning_curves_{}.{}".format(label, extension))
learning_curve_for_label(estimators, X, y, vulnerable, fname, extension)
logger.error("###########################{}-{}###########################".format(vulnerable, label))
|
from . import features
from . import models
from . import preprocess
from . import utils
from . import visualizations
|
from databases import Database, DatabaseURL
import os
from .config import load_config, Config
from .generators.empty import EmptyGenerator
from .generators.initial import InitialGenerator
from .tables import (
db_create_migrations_table_if_not_exists,
db_load_migrations_table,
db_apply_migration,
db_unapply_migration,
)
from .loader import load_migrations
import sqlalchemy
async def init(dir="migrations"):
migration_init_path = os.path.join(dir, "__init__.py")
migration_0001_path = os.path.join(dir, "0001_initial.py")
config = Config(metadata="example:metadata")
from_state = config.get_initial_state()
to_state = config.get_current_state()
generator = InitialGenerator(from_state=from_state, to_state=to_state)
os.mkdir(dir)
config.write_config_to_disk(path=migration_init_path)
print(f"Created config in {migration_init_path!r}")
generator.write_migration_to_disk(path=migration_0001_path)
print(f"Created migration '0001_initial'")
async def make_migration(url: str, dir: str = "migrations"):
async with Database(url) as database:
applied = await db_load_migrations_table(database)
import os
print("list dir", os.listdir(dir))
migrations = load_migrations(applied, dir_name="migrations")
dependencies = [migration.name for migration in migrations if migration.is_leaf]
final_name = dependencies[-1]
index = int(final_name.split("_")[0]) + 1
migration_000x_path = os.path.join(dir, f"{index:04}_auto.py")
generator = EmptyGenerator()
generator.write_migration_to_disk(
path=migration_000x_path, dependencies=dependencies
)
print(f"Created migration '{index:04}_auto'")
async def list_migrations(url: str):
async with Database(url) as database:
applied = await db_load_migrations_table(database)
return load_migrations(applied, dir_name="migrations")
async def migrate(url: str, target: str = None):
async with Database(url) as database:
await db_create_migrations_table_if_not_exists(database)
applied_migrations = await db_load_migrations_table(database)
# Load the migrations from disk.
migrations = load_migrations(applied_migrations, dir_name="migrations")
# Determine which migration we are targeting.
if target is None:
index = len(migrations) + 1
elif target.lower() == "zero":
index = 0
else:
candidates = [
(index, migration)
for index, migration in enumerate(migrations, 1)
if migration.name.startswith(target)
]
if len(candidates) > 1:
raise Exception(
f"Target {target!r} matched more than one migration name."
)
elif len(candidates) == 0:
raise Exception(f"Target {target!r} does not match any migrations.")
index, migration = candidates[0]
has_downgrades = any(migration.is_applied for migration in migrations[index:])
has_upgrades = any(not migration.is_applied for migration in migrations[:index])
if not has_downgrades and not has_upgrades:
print("No migrations required.")
return
# Apply or unapply migrations.
async with database.transaction():
# Unapply migrations.
if has_downgrades:
for migration in reversed(migrations[index:]):
if not (migration.is_applied):
continue
await migration.downgrade()
await db_unapply_migration(database, migration.name)
# Apply migrations.
if has_upgrades:
for migration in migrations[:index]:
if migration.is_applied:
continue
await migration.upgrade()
await db_apply_migration(database, migration.name)
async def create_database(url: str, encoding: str = "utf8") -> None:
url = DatabaseURL(url)
database_name = url.database
if url.dialect in ("postgres", "postgresql"):
url = url.replace(database="postgres")
elif url.dialect == "mysql":
url = url.replace(database="")
if url.dialect in ("postgres", "postgresql"):
statement = "CREATE DATABASE {0} ENCODING '{1}' TEMPLATE template1".format(
database_name, encoding,
)
statements = [statement]
elif url.dialect == "mysql":
statement = "CREATE DATABASE {0} CHARACTER SET = '{1}'".format(
database_name, encoding
)
statements = [statement]
elif url.dialect == "sqlite":
if database_name and database_name != ":memory:":
statements = ["CREATE TABLE DB(id int);", "DROP TABLE DB;"]
else:
statements = []
async with Database(url) as database:
for statement in statements:
await database.execute(statement)
async def drop_database(url: str) -> None:
url = DatabaseURL(url)
database_name = url.database
if url.dialect in ("postgres", "postgresql"):
url = url.replace(database="postgres")
elif url.dialect == "mysql":
url = url.replace(database="")
if url.dialect == "sqlite":
if database_name and database_name != ":memory:":
os.remove(database_name)
return
else:
statement = "DROP DATABASE {0}".format(database_name)
async with Database(url) as database:
await database.execute(statement)
async def database_exists(url: str) -> bool:
url = DatabaseURL(url)
database_name = url.database
if url.dialect in ("postgres", "postgresql"):
url = url.replace(database="postgres")
elif url.dialect == "mysql":
url = url.replace(database="")
if url.dialect in ("postgres", "postgresql"):
statement = "SELECT 1 FROM pg_database WHERE datname='%s'" % database_name
elif url.dialect == "mysql":
statement = (
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA "
"WHERE SCHEMA_NAME = '%s'" % database_name
)
elif url.dialect == "sqlite":
if database_name == ":memory:" or not database_name:
return True
if not os.path.isfile(database_name) or os.path.getsize(database_name) < 100:
return False
with open(database_name, "rb") as file:
header = file.read(100)
return header[:16] == b"SQLite format 3\x00"
async with Database(url) as database:
return bool(await database.fetch_one(statement))
|
import numpy as np
from taurex.model.simplemodel import SimpleForwardModel
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.driver as drv
from pycuda.gpuarray import GPUArray, to_gpu, zeros
from functools import lru_cache
import math
from ..utils.emission import cuda_blackbody
import pycuda.tools as pytools
from taurex_cuda.contributions.cudacontribution import CudaContribution
@lru_cache(maxsize=400)
def gen_partial_kernal(ngauss, nlayers, grid_size):
from taurex.constants import PI
mu, weight = np.polynomial.legendre.leggauss(ngauss)
mu_quads = (mu+1)/2
code = f"""
__global__ void quadrature_kernal(double* __restrict__ dest,
double* __restrict__ layer_tau,
const double* __restrict__ dtau,
const double* __restrict__ BB)
{{
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( i >= {grid_size} )
return;
"""
for idx,mu in enumerate(mu_quads):
code+=f"""
double I_{idx} = 0.0;
"""
code+=f"""
for (int layer = 0; layer < {nlayers}; layer++)
{{
double _dtau = dtau[layer*{grid_size} + i];
double _layer_tau = layer_tau[layer*{grid_size} + i];
double _BB = BB[layer*{grid_size} + i]*{1.0/PI};
layer_tau[layer*{grid_size} + i] = exp(-_layer_tau) - exp(-_dtau);
_dtau += _layer_tau;
if (layer == 0){{
"""
for idx,mu in enumerate(mu_quads):
code += f"""
I_{idx} += exp(-_dtau*{1.0/mu})*_BB;
"""
code+=f"""
}}
"""
for idx,mu in enumerate(mu_quads):
code += f"""
I_{idx} += (exp(-_layer_tau*{1.0/mu}) - exp(-_dtau*{1.0/mu}))*_BB;
"""
code += f"""
}}
"""
for idx,mu in enumerate(mu_quads):
code +=f"""
dest[{idx*grid_size}+i] = I_{idx};
"""
code+=f"""
}}
"""
mod = SourceModule(code)
return mod.get_function('quadrature_kernal')
@lru_cache(maxsize=400)
def gen_coeff(ngauss, nlayers, grid_size):
from taurex.constants import PI
mu, weight = np.polynomial.legendre.leggauss(ngauss)
mu_quads = (mu+1)/2
wi_quads = weight/2
code = f"""
__global__ void quadrature_kernal(double* __restrict__ dest,
const double * __restrict__ mu,
const double * __restrict__ wi,
const double* __restrict__ tau)
{{
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( i >= {grid_size} )
return;
double _I=0.0;
double _mu = 0.0;
double _wi = 0.0;
for (int g =0; g < {ngauss}; g++){{
_mu = mu[g];
_wi = wi[g];
_I += I[g*{grid_size}+i]*_mu*_wi;
}}
dest[i] = {2.0*PI}*_I;
}}
"""
mod = SourceModule(code)
return mod.get_function('quadrature_kernal')
class EmissionCudaModel(SimpleForwardModel):
"""
A forward model for eclipse models using CUDA
Parameters
----------
planet: :class:`~taurex.data.planet.Planet`, optional
Planet model, default planet is Jupiter
star: :class:`~taurex.data.stellar.star.Star`, optional
Star model, default star is Sun-like
pressure_profile: :class:`~taurex.data.profiles.pressure.pressureprofile.PressureProfile`, optional
Pressure model, alternative is to set ``nlayers``, ``atm_min_pressure``
and ``atm_max_pressure``
temperature_profile: :class:`~taurex.data.profiles.temperature.tprofile.TemperatureProfile`, optional
Temperature model, default is an :class:`~taurex.data.profiles.temperature.isothermal.Isothermal`
profile at 1500 K
chemistry: :class:`~taurex.data.profiles.chemistry.chemistry.Chemistry`, optional
Chemistry model, default is
:class:`~taurex.data.profiles.chemistry.taurexchemistry.TaurexChemistry` with
``H2O`` and ``CH4``
nlayers: int, optional
Number of layers. Used if ``pressure_profile`` is not defined.
atm_min_pressure: float, optional
Pressure at TOA. Used if ``pressure_profile`` is not defined.
atm_max_pressure: float, optional
Pressure at BOA. Used if ``pressure_profile`` is not defined.
ngauss: int, optional
Number of Gaussian quadrature points. Default is 4
"""
def __init__(self,
planet=None,
star=None,
pressure_profile=None,
temperature_profile=None,
chemistry=None,
nlayers=100,
atm_min_pressure=1e-4,
atm_max_pressure=1e6,
ngauss=4):
super().__init__(self.__class__.__name__, planet,
star,
pressure_profile,
temperature_profile,
chemistry,
nlayers,
atm_min_pressure,
atm_max_pressure)
self.set_num_gauss(ngauss)
self.set_num_streams(1)
self._memory_pool = pytools.DeviceMemoryPool()
self._tau_memory_pool = pytools.PageLockedMemoryPool()
def set_num_gauss(self, value):
self._ngauss = int(value)
mu, weight = np.polynomial.legendre.leggauss(self._ngauss)
self._mu_quads = (mu+1)/2
self._wi_quads = (weight)/2
def set_num_streams(self, num_streams):
self._streams = [drv.Stream() for x in range(num_streams)]
def build(self):
super().build()
self._start_surface_K = to_gpu(np.array([0]).astype(np.int32))
self._end_surface_K = to_gpu(np.array([self.nLayers]).astype(np.int32))
self._start_layer = to_gpu(np.array([x+1 for x in range(self.nLayers)],dtype=np.int32))
self._end_layer = to_gpu(np.array([self.nLayers for x in range(self.nLayers)],dtype=np.int32))
self._start_dtau = to_gpu(np.array([x for x in range(self.nLayers)]).astype(np.int32))
self._end_dtau = to_gpu(np.array([x+1 for x in range(self.nLayers)]).astype(np.int32))
self._dz = zeros(shape=(self.nLayers,self.nLayers,),dtype=np.float64)
self._density_offset = zeros(shape=(self.nLayers,),dtype=np.int32)
#self._tau_buffer= drv.pagelocked_zeros(shape=(self.nativeWavenumberGrid.shape[-1], self.nLayers,),dtype=np.float64)
@lru_cache(maxsize=4)
def _gen_ngauss_kernal(self, ngauss, nlayers, grid_size):
from taurex.constants import PI
mu, weight = np.polynomial.legendre.leggauss(ngauss)
mu_quads = (mu+1)/2
wi_quads = weight/2
code = f"""
__global__ void quadrature_kernal(double* __restrict__ dest,
double* __restrict__ layer_tau,
const double* __restrict__ dtau,
const double* __restrict__ BB)
{{
unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( i >= {grid_size} )
return;
double I = 0.0;
for (int layer = 0; layer < {nlayers}; layer++)
{{
double _dtau = dtau[layer*{grid_size} + i];
double _layer_tau = layer_tau[layer*{grid_size} + i];
double _BB = BB[layer*{grid_size} + i]*{1.0/PI};
layer_tau[layer*{grid_size} + i] = exp(-_layer_tau) - exp(-_dtau);
_dtau += _layer_tau;
if (layer == 0){{
"""
for mu,weight in zip(mu_quads, wi_quads):
code += f"""
I += exp(-_dtau*{1.0/mu})*{mu*weight}*_BB;
"""
code+=f"""
}}
"""
for mu,weight in zip(mu_quads, wi_quads):
code += f"""
I += (exp(-_layer_tau*{1.0/mu}) - exp(-_dtau*{1.0/mu}))*{mu*weight}*_BB;
"""
code += f"""
}}
dest[i] = {2.0*PI}*I;
}}
"""
mod = SourceModule(code)
return mod.get_function('quadrature_kernal')
def partial_model(self,wngrid=None,cutoff_grid=True):
from taurex.util.util import clip_native_to_wngrid
self.initialize_profiles()
native_grid = self.nativeWavenumberGrid
if wngrid is not None and cutoff_grid:
native_grid = clip_native_to_wngrid(native_grid,wngrid)
self._star.initialize(native_grid)
for contrib in self.contribution_list:
contrib.prepare(self,native_grid)
return self.evaluate_emission(native_grid,False)
def evaluate_emission(self, wngrid, return_contrib):
from taurex.util.util import compute_dz
total_layers = self.nLayers
dz = compute_dz(self.altitudeProfile)
dz = np.array([dz for x in range(self.nLayers)])
self._dz.set(dz)
wngrid_size = wngrid.shape[0]
temperature = self.temperatureProfile
density_profile = to_gpu(self.densityProfile, allocator=self._memory_pool.allocate)
self._cuda_contribs = [c for c in self.contribution_list if isinstance(c, CudaContribution)]
self._noncuda_contribs = [c for c in self.contribution_list if not isinstance(c, CudaContribution)]
self._fully_cuda = len(self._noncuda_contribs) == 0
layer_tau = zeros(shape=(total_layers, wngrid_size), dtype=np.float64, allocator=self._memory_pool.allocate)
dtau = zeros(shape=(total_layers, wngrid_size), dtype=np.float64, allocator=self._memory_pool.allocate)
BB = zeros(shape=(total_layers, wngrid_size), dtype=np.float64, allocator=self._memory_pool.allocate)
I = zeros(shape=(self._ngauss,wngrid_size), dtype=np.float64, allocator=self._memory_pool.allocate)
cuda_blackbody(wngrid, temperature.ravel(), out=BB)
tau_host = self._tau_memory_pool.allocate(shape=(total_layers, wngrid_size), dtype=np.float64)
if not self._fully_cuda:
self.fallback_noncuda(layer_tau, dtau,wngrid,total_layers)
for contrib in self._cuda_contribs:
contrib.contribute(self, self._start_layer, self._end_layer, self._density_offset, 0,
density_profile, layer_tau, path_length=self._dz, with_sigma_offset=True)
contrib.contribute(self, self._start_dtau, self._end_dtau, self._density_offset, 0,
density_profile, dtau, path_length=self._dz, with_sigma_offset=True)
drv.Context.synchronize()
integral_kernal = gen_partial_kernal(self._ngauss, self.nLayers, wngrid_size)
THREAD_PER_BLOCK_X = 64
NUM_BLOCK_X = int(math.ceil(wngrid_size/THREAD_PER_BLOCK_X))
integral_kernal(I, layer_tau, dtau, BB,
block=(THREAD_PER_BLOCK_X, 1, 1), grid=(NUM_BLOCK_X, 1, 1))
layer_tau.get(ary=tau_host, pagelocked=True)
#drv.memcpy_dtoh(self._tau_buffer[:wngrid_size,:], layer_tau.gpudata)
final_tau = tau_host
#final_I= I.get()
return I.get(),1/self._mu_quads[:,None],self._wi_quads[:,None],final_tau
#return self.compute_final_flux(final_I), final_tau
def path_integral(self,wngrid,return_contrib):
I,_mu,_w,tau = self.evaluate_emission(wngrid,return_contrib)
self.debug('I: %s',I)
flux_total = 2.0*np.pi*sum(I*_w/_mu)
self.debug('flux_total %s',flux_total)
return self.compute_final_flux(flux_total).ravel(),tau
def fallback_noncuda(self, gpu_layer_tau, gpu_dtau, wngrid, total_layers):
from taurex.util.emission import black_body
from taurex.constants import PI
wngrid_size = wngrid.shape[0]
dz = np.zeros(total_layers)
dz[:-1] = np.diff(self.altitudeProfile)
dz[-1] = self.altitudeProfile[-1] - self.altitudeProfile[-2]
density = self.densityProfile
layer_tau = np.zeros(shape=(total_layers, wngrid_size))
dtau = np.zeros(shape=(total_layers, wngrid_size))
_dtau = np.zeros(shape=(1, wngrid_size))
_layer_tau = np.zeros(shape=(1, wngrid_size))
# Loop upwards
for layer in range(total_layers):
_layer_tau[...] = 0.0
_dtau[...] = 0.0
for contrib in self._noncuda_contribs:
contrib.contribute(self, layer+1, total_layers,
0, 0, density, _layer_tau, path_length=dz)
contrib.contribute(self, layer, layer+1, 0,
0, density, _dtau, path_length=dz)
layer_tau[layer,:] += _layer_tau[0]
dtau[layer,:] += _dtau[0]
gpu_layer_tau.set(layer_tau)
gpu_dtau.set(dtau)
def compute_final_flux(self, f_total):
star_sed = self._star.spectralEmissionDensity
self.debug('Star SED: %s', star_sed)
# quit()
star_radius = self._star.radius
planet_radius = self._planet.fullRadius
self.debug('star_radius %s', self._star.radius)
self.debug('planet_radius %s', self._star.radius)
last_flux = (f_total/star_sed) * (planet_radius/star_radius)**2
self.debug('last_flux %s', last_flux)
return last_flux
# tau = np.exp(-tau.get())
# ap = self.altitudeProfile[:, None]
# pradius = self._planet.fullRadius
# sradius = self._star.radius
# _dz = dz[:, None]
# integral = np.sum((pradius+ap)*(1.0-tau)*_dz*2.0, axis=0)
# return ((pradius**2.0) + integral)/(sradius**2), tau
@classmethod
def input_keywords(cls):
return ['emission_cuda', 'emission_cuda', ]
|
"""OWASP Dependency Check dependencies collector."""
from xml.etree.ElementTree import Element # nosec, Element is not available from defusedxml, but only used as type
from collector_utilities.functions import parse_source_response_xml_with_namespace, sha1_hash
from collector_utilities.type import Namespaces
from model import Entities, Entity, SourceResponses
from .base import OWASPDependencyCheckBase
class OWASPDependencyCheckDependencies(OWASPDependencyCheckBase):
"""Collector to get the dependencies from the OWASP Dependency Check XML report."""
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the dependencies from the XML."""
landing_url = await self._landing_url(responses)
entities = Entities()
for response in responses:
tree, namespaces = await parse_source_response_xml_with_namespace(response, self.allowed_root_tags)
entities.extend(
[
self._parse_entity(dependency, index, namespaces, landing_url)
for (index, dependency) in enumerate(self._dependencies(tree, namespaces))
]
)
return entities
def _dependencies(self, tree: Element, namespaces: Namespaces) -> list[Element]: # pylint: disable=no-self-use
"""Return the dependencies."""
return tree.findall(".//ns:dependency", namespaces)
def _parse_entity( # pylint: disable=no-self-use
self, dependency: Element, dependency_index: int, namespaces: Namespaces, landing_url: str
) -> Entity:
"""Parse the entity from the dependency."""
file_path = dependency.findtext("ns:filePath", default="", namespaces=namespaces)
file_name = dependency.findtext("ns:fileName", default="", namespaces=namespaces)
sha1 = dependency.findtext("ns:sha1", namespaces=namespaces)
# We can only generate an entity landing url if a sha1 is present in the XML, but unfortunately not all
# dependencies have one, so check for it:
entity_landing_url = f"{landing_url}#l{dependency_index + 1}_{sha1}" if sha1 else ""
key = sha1 if sha1 else sha1_hash(file_path + file_name)
return Entity(key=key, file_path=file_path, file_name=file_name, url=entity_landing_url)
|
import pytest
from libpythonpro.spam.enviador_de_email import Enviador, EmailInvalido
def test_criar_enviador_de_email():
enviador = Enviador()
assert enviador is not None
@pytest.mark.parametrize(
'remetente',
['contato@smkbarbosa.eti.br', 'foo@bar.com.br']
)
def test_remetente(remetente):
enviador = Enviador()
resultado = enviador.enviar(
remetente,
'samuka1@gmail.com',
'Curso Python Pro',
'Turma Python Pro'
)
assert remetente in resultado
@pytest.mark.parametrize(
'remetente',
['', 'foo']
)
def test_remetente_invalido(remetente):
enviador = Enviador()
with pytest.raises(EmailInvalido):
enviador.enviar(
remetente,
'samuka1@gmail.com',
'Curso Python Pro',
'Turma Python Pro'
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
==================================
Documentation extractor and writer
==================================
A command line tool for generating Axon and Kamaelia documentation
Features:
* outputs HTML (with some simple additional directives for the wiki engine
behind the Kamaelia website)
* python DocStrings are parsed as
`ReStructuredText<http://docutils.sourceforge.net/rst.html>`_ - permitting
rich formatting.
* can document modules, classes, functions, components and prefabs
* some customisation control over the output format
* generates hierarchical indices of modules
* fully qualified module, component, prefab, class and function names are
automagically converted to hyperlinks
* can incorporate test suite output into documentation
* can dump symbols (with mappings to URLs) to a file and/or read them in. This
makes it possible to cross-link, for example, from the Kamaelia documentation
back to the Axon documentation.
*This is not an Axon/Kamaelia system* - it is not built from components. However
it is probably sufficiently useful to be classified as a 'tool'!
Usage
-----
For help on command line options, use the ``--help`` option::
$> ./DocExtractor.py --help
The command lines currently being used to generate Kamaelia and Axon
documentation are as follows:
For Axon docs for the website::
$> ./DocExtractor.py --urlprefix /Docs/Axon/ \
--promotetitles \
--notjustcomponents \
--indexdepth 0 \
--root Axon \
--footerinclude Docs/Axon-footer.html \
--outdir <outputDirName> \
--dumpSymbolsTo <symbolFile> \
<repositoryDir>
For Kamaelia component docs for the website::
$> ./DocExtractor.py --urlprefix /Components/pydoc/ \
--root Kamaelia \
--footerinclude Components/pydoc-footer.html \
--outdir <outputDirName> \
--linkToSymbols <symbolFile> \
<repositoryDir>
Why differences?
* The ``--notjustcomponents`` flag which ensures that the classes and functions
making up Axon are documented.
* the ``--dumpSymbolsTo`` option creates a dump of all symbols documented.
``--linkToSymbols`` reads them in for generating crosslinks.
* The remaining differences change the formatting and style:
* ``promotetitles`` pushes module level doc string titles to the top of the
HTML pages generated - making them more prominent.
* ``indexDepth`` of 0 supresses the generation of indexes of modules in a
given subdir. Axon's ``__init__.py`` contains a comprehensive table of
contents of its own, so the index is not needed.
Not quite plain HTML
--------------------
Although the output is primarily HTML, it does include Kamaelia website specific
directives (of the form ``[[foo][attribute=value] blah blah ]``
Since these directives use square brackets, any square brackets in the body text
are replaced with escaped codes.
Implementation Details
----------------------
Kamaelia.Support.Data.Repository is used to scan the specified code base and
extract info and documentation about modules, classes, functions, components and
prefabs.
All python doc strings are fed through the
`docutils <http://docutils.sourceforge.net/>`_ ReStructuredText parser to
generate formatted HTML output.
Internally individual documentation pages are built up entirely using docutils
node structures (a convenient intermediate tree representation of the doucment)
A separate renderer object is used to perform the final conversion to HTML, as
well as resolve the automatic linking of fully qualified names. It also
determines the appropriate filenames and URLs to use for individual pages and
hyperlinks between them.
A few bespoke extensions are added to the repertoire of docutils nodes to
represent specific directives that need to appear in the final output. These
are converted by the renderer object to final ``[[foo][bar=bibble] ...]``
format.
This is done this way to keep an unambiguous separation between directives and
documentation text. If directives were dropped in as plain text earlier in the
process then they might be confused with occurrences of square brackets in the
actual documentation text.
Code overview
* The *DocGenConfig* class encapsulates configuration choices and also carries
the extracted repository info.
* *__main__* invokes *generateDocumentationFiles()* and *generateIndices()* to
kick off the construction of all documentation files and index page files.
* The actual formatting and generation of pages is performed by the *docFormatter*
class.
* *formatXXXPage()* methods return document node trees representing the final
pages to be converted to HTML and written to disk.
* Other *formatXXX()* methods construct fragments of the document tree.
"""
import textwrap
import inspect
import pprint
import time
import os
import StringIO
import ConfigParser
from docutils import core
from docutils import nodes
#from Kamaelia.Support.Data import Repository
import Repository
ClassScope = Repository.ClassScope
FunctionScope = Repository.FunctionScope
ModuleScope = Repository.ModuleScope
ImportScope = Repository.ImportScope
from renderHTML import RenderHTML
from Nodes import boxright
class DocGenConfig(object):
"""Configuration object for documentation generation."""
def __init__(self):
super(DocGenConfig,self).__init__()
# NOTE: These settings are overridden in __main__ - modify them there,
# not here
self.repository = None
self.filterPattern=""
self.docdir="pydoc"
self.docroot="Kamaelia"
self.treeDepth=99
self.tocDepth=99
self.includeMethods=False
self.includeModuleDocString=False
self.includeNonKamaeliaStuff=False
self.showComponentsOnIndices=False
self.promoteModuleTitles=False
self.deemphasiseTrails=False
self.pageFooter=""
self.testOutputDir=None
self.testExtensions=[]
self.dumpSymbolsTo=None
self.loadSymbolsFrom=[]
class docFormatter(object):
"""\
docFormatter(renderer,config) -> new docFormatter object
Object that formats documentation - methods of this class return document
trees (docutils node format) documenting whatever was requested.
Requires the renderer object so it can determine URIs for hyperlinks.
"""
def __init__(self, renderer, config):
super(docFormatter,self).__init__()
self.renderer = renderer
self.config = config
self.errorCount=0
uid = 0
def genUniqueRef(self):
uid = str(self.uid)
self.uid+=1
return uid
def boxes(self,componentName, label, boxes):
"""\
Format documentation for inboxes/outboxes. Returns a docutils document
tree fragment.
Keyword arguments:
- componentName -- name of the component the boxes belong to
- label -- typically "Inboxes" or "Outboxes"
- boxes -- dict containing (boxname, boxDescription) pairs
"""
items = []
for box in boxes:
try:
description = boxes[box]
except KeyError:
description = ""
except TypeError:
description = "Code uses old style inbox/outbox description - no metadata available"
items.append((str(box), str(description)))
docTree= nodes.section('',
ids = ["symbol-"+componentName+"."+label],
names = ["symbol-"+componentName+"."+label],
*[ nodes.title('', label),
nodes.bullet_list('',
*[ nodes.list_item('', nodes.paragraph('', '',
nodes.strong('', boxname),
nodes.Text(" : "+boxdesc))
)
for (boxname,boxdesc) in items
]
),
]
)
return docTree
def docString(self,docstring, main=False):
"""
Parses a doc string in ReStructuredText format and returns a docutils
document tree fragment.
Removes any innate indentation from the raw doc strings before parsing.
Also captures any warnings generated by parsing and writes them to
stdout, incrementing the self.errorCount flag.
"""
if docstring is None:
docstring = " "
lines = docstring.split("\n")
if len(lines)>1:
line1 = textwrap.dedent(lines[0])
rest = textwrap.dedent("\n".join(lines[1:]))
docstring = line1+"\n"+rest
else:
docstring=textwrap.dedent(docstring)
while len(docstring)>0 and docstring[0] == "\n":
docstring = docstring[1:]
while len(docstring)>0 and docstring[-1] == "\n":
docstring = docstring[:-1]
warningStream=StringIO.StringIO()
overrides={"warning_stream":warningStream,"halt_level":99}
docTree=core.publish_doctree(docstring,settings_overrides=overrides)
warnings=warningStream.getvalue()
if warnings:
print "!!! Warnings detected:"
print warnings
self.errorCount+=1
warningStream.close()
return nodes.section('', *docTree.children)
def formatArgSpec(self, argspec):
return pprint.pformat(argspec[0]).replace("[","(").replace("]",")").replace("'","")
def formatMethodDocStrings(self,className,X):
docTree = nodes.section('')
methods = X.listAllFunctions()
methods.sort()
for (methodname,method) in methods:
methodHead = methodname + "(" + method.argString + ")"
docTree.append( nodes.section('',
ids = ["symbol-"+className+"."+methodname],
names = ["symbol-"+className+"."+methodname],
* [ nodes.title('', methodHead) ]
+ self.docString(method.doc)
)
)
return docTree
def formatInheritedMethods(self,className,CLASS):
docTree = nodes.section('')
overrides = [name for (name,method) in CLASS.listAllFunctions()] # copy of list of existing method names
for base in CLASS.allBasesInMethodResolutionOrder:
if isinstance(base,ClassScope):
moduleName=base.module
findName=moduleName[len(self.config.docroot+"."):]
module=self.config.repository.find(findName)
try:
className=module.locate(base)
except ValueError:
continue
# work out which methods haven't been already overriden
methodList = []
for (name,method) in base.listAllFunctions():
if name not in overrides:
overrides.append(name)
uri = self.renderer.makeURI(moduleName,"symbol-"+className+"."+name)
methodList.append(nodes.list_item('',
nodes.paragraph('','',
nodes.reference('', nodes.Text(name), refuri=uri),
nodes.Text("(" + method.argString + ")"),
),
)
)
if len(methodList)>0:
docTree.append( nodes.section('',
nodes.title('', "Methods inherited from "+moduleName+"."+className+" :"),
nodes.bullet_list('', *methodList),
)
)
return docTree
def formatClassStatement(self, name, bases):
baseNames=[]
for baseName,base in bases:
baseNames.append(baseName)
return "class "+ name+"("+", ".join(baseNames)+")"
def formatPrefabStatement(self, name):
return "prefab: "+name
def formatComponent(self, moduleName, name, X):
# no class bases available from repository scanner
CLASSNAME = self.formatClassStatement(name, X.bases)
CLASSDOC = self.docString(X.doc)
INBOXES = self.boxes(name,"Inboxes", X.inboxes)
OUTBOXES = self.boxes(name,"Outboxes", X.outboxes)
if self.config.includeMethods and len(X.listAllFunctions()):
METHODS = [ nodes.section('',
nodes.title('', 'Methods defined here'),
boxright('',
nodes.paragraph('', '',
nodes.strong('', nodes.Text("Warning!"))
),
nodes.paragraph('', '',
nodes.Text("You should be using the inbox/outbox interface, not these methods (except construction). This documentation is designed as a roadmap as to their functionalilty for maintainers and new component developers.")
),
),
* self.formatMethodDocStrings(name,X)
)
]
else:
METHODS = []
return \
nodes.section('',
* [ nodes.title('', CLASSNAME, ids=["symbol-"+name]) ]
+ CLASSDOC
+ [ INBOXES, OUTBOXES ]
+ METHODS
+ [ self.formatInheritedMethods(name,X) ]
)
def formatPrefab(self, moduleName, name, X):
CLASSNAME = self.formatPrefabStatement(name)
CLASSDOC = self.docString(X.doc)
return nodes.section('',
* [ nodes.title('', CLASSNAME, ids=["symbol-"+name]) ]
+ CLASSDOC
)
def formatFunction(self, moduleName, name, X):
functionHead = name + "(" + X.argString + ")"
return nodes.section('',
ids = ["symbol-"+name],
names = ["symbol-"+name],
* [ nodes.title('', functionHead) ]
+ self.docString(X.doc)
)
def formatClass(self, moduleName, name, X):
CLASSNAME = self.formatClassStatement(name, X.bases)
if len(X.listAllFunctions()):
METHODS = [ nodes.section('',
nodes.title('', 'Methods defined here'),
* self.formatMethodDocStrings(name,X)
)
]
else:
METHODS = []
return \
nodes.section('',
nodes.title('', CLASSNAME, ids=["symbol-"+name]),
self.docString(X.doc),
* METHODS + [self.formatInheritedMethods(name,X)]
)
def formatTests(self, moduleName):
if not self.config.testOutputDir:
return nodes.container('')
else:
docTree = nodes.container('')
for (ext,heading) in self.config.testExtensions:
filename = os.path.join(self.config.testOutputDir, moduleName+ext)
try:
file=open(filename,"r")
itemlist = nodes.bullet_list()
foundSomething=False
for line in file.readlines():
line=line[:-1] # strip of trailing newline
itemlist.append(nodes.list_item('',nodes.paragraph('',line)))
foundSomething=True
if foundSomething:
docTree.append(nodes.paragraph('', heading))
docTree.append(itemlist)
file.close()
except IOError:
pass
if len(docTree.children)>0:
docTree.insert(0,nodes.title('', "Test documentation"))
return docTree
def formatTrail(self, fullPathName):
path = fullPathName.split(".")
trail = nodes.paragraph('')
line = trail
accum = ""
firstPass=True
for element in path:
if not firstPass:
accum += "."
accum += element
if not firstPass:
line.append(nodes.Text("."))
URI = self.renderer.makeURI(accum)
line.append( nodes.reference('', element, refuri=URI) )
firstPass=False
return trail
def formatTrailAsTitle(self, fullPathName):
trailTree = self.formatTrail(fullPathName)
title = nodes.title('', '', *trailTree.children)
if self.config.deemphasiseTrails:
title = nodes.section('', title)
return title
def declarationsList(self, moduleName, components, prefabs, classes, functions):
uris = {}
prefixes = {}
postfixes = {}
for (name,component) in components:
uris[name] = self.renderer.makeURI(moduleName+"."+name)
prefixes[name] = "component "
postfixes[name] = ""
for (name,prefab) in prefabs:
uris[name] = self.renderer.makeURI(moduleName+"."+name)
prefixes[name] = "prefab "
postfixes[name] = ""
for (name,cls) in classes:
uris[name] = self.renderer.makeURI(moduleName+"."+name)
prefixes[name] = "class "
postfixes[name] = ""
for (name,function) in functions:
uris[name] = self.renderer.makeURI(moduleName+"."+name)
prefixes[name] = ""
postfixes[name] = "("+function.argString+")"
declNames = uris.keys()
declNames.sort()
return nodes.container('',
nodes.bullet_list('',
*[ nodes.list_item('',
nodes.paragraph('', '',
nodes.strong('', '',
nodes.Text(prefixes[NAME]),
nodes.reference('', NAME, refuri=uris[NAME])),
nodes.Text(postfixes[NAME]),
)
)
for NAME in declNames
]
)
)
def formatComponentPage(self, moduleName, name, component):
return self.formatDeclarationPage(moduleName, name, self.formatComponent, component)
def formatPrefabPage(self, moduleName, name, prefab):
return self.formatDeclarationPage(moduleName, name, self.formatPrefab, prefab)
def formatClassPage(self, moduleName, name, cls):
return self.formatDeclarationPage(moduleName, name, self.formatClass, cls)
def formatFunctionPage(self, moduleName, name, function):
return self.formatDeclarationPage(moduleName, name, self.formatFunction, function)
def formatDeclarationPage(self, moduleName, name, method, item):
parentURI = self.renderer.makeURI(item.module)
trailTitle = self.formatTrailAsTitle(moduleName+"."+name)
itemDocTree = method(moduleName, name, item)
return nodes.section('',
trailTitle,
nodes.paragraph('', '',
nodes.Text("For examples and more explanations, see the "),
nodes.reference('', 'module level docs.', refuri=parentURI)
),
nodes.transition(),
nodes.section('', *itemDocTree),
)
def formatModulePage(self, moduleName, module, components, prefabs, classes, functions):
trailTitle = self.formatTrailAsTitle(moduleName)
moduleDocTree = self.docString(module.doc, main=True)
testsTree = self.formatTests(moduleName)
while len(testsTree.children)>0:
node=testsTree.children[0]
testsTree.remove(node)
moduleTree.append(node)
if self.config.promoteModuleTitles and \
len(moduleDocTree.children)>=1 and \
isinstance(moduleDocTree.children[0], nodes.title):
theTitle = moduleDocTree.children[0]
moduleDocTree.remove(theTitle)
promotedTitle = [ theTitle ]
else:
promotedTitle = []
toc = self.buildTOC(moduleDocTree, depth=self.config.tocDepth)
allDeclarations = []
declarationTrees = []
for (name,component) in components:
cTrail = self.formatTrail(moduleName+"."+name)
declarationTrees.append((
name,
nodes.container('',
nodes.title('','', *cTrail.children),
self.formatComponent(moduleName,name,component)
)
))
for (name,prefab) in prefabs:
pTrail = self.formatTrail(moduleName+"."+name)
declarationTrees.append((
name,
nodes.container('',
nodes.title('','', *pTrail.children),
self.formatPrefab(moduleName,name,prefab)
)
))
for (name,cls) in classes:
cTrail = self.formatTrail(moduleName+"."+name)
declarationTrees.append((
name,
nodes.container('',
nodes.title('','', *cTrail.children),
self.formatClass(moduleName,name,cls)
)
))
for (name,function) in functions:
fTrail = self.formatTrail(moduleName+"."+name)
declarationTrees.append((
name,
nodes.container('',
nodes.title('','', *fTrail.children),
self.formatFunction(moduleName,name,function)
)
))
declarationTrees.sort() # sort by name
concatenatedDeclarations=[]
for (name,tree) in declarationTrees:
concatenatedDeclarations.extend(tree)
componentListTree = self.declarationsList( moduleName, components, prefabs, classes, functions )
if len(module.listAllModules()) > 0:
subModuleIndex = self.generateIndex(moduleName,module,self.config.treeDepth)
else:
subModuleIndex = []
return nodes.container('',
nodes.section('',
trailTitle,
),
nodes.section('',
* promotedTitle + \
[ componentListTree] + \
subModuleIndex + \
[ toc ]
),
moduleDocTree,
nodes.transition(),
nodes.section('', *concatenatedDeclarations),
)
def buildTOC(self, srcTree, parent=None, depth=None):
"""Recurse through a source document tree, building a table of contents"""
if parent is None:
parent = nodes.bullet_list()
if depth==None:
depth=self.config.tocDepth
if depth<=0:
return parent
items=nodes.section()
for n in srcTree.children:
if isinstance(n, nodes.title):
refid = self.genUniqueRef()
n.attributes['ids'].append(refid)
newItem = nodes.list_item()
newItem.append(nodes.paragraph('','', nodes.reference('', refid=refid, *n.children)))
newItem.append(nodes.bullet_list())
parent.append(newItem)
elif isinstance(n, nodes.section):
if len(parent)==0:
newItem = nodes.list_item()
newItem.append(nodes.bullet_list())
parent.append(newItem)
self.buildTOC( n, parent[-1][-1], depth-1)
# go through parent promoting any doubly nested bullet_lists
for item in parent.children:
if isinstance(item.children[0], nodes.bullet_list):
sublist = item.children[0]
for subitem in sublist.children[:]: # copy it so it isn't corrupted by what we're about to do
sublist.remove(subitem)
item.parent.insert(item.parent.index(item), subitem)
parent.remove(item)
return parent
def generateIndex(self, pathToHere, module, depth=99):
if depth<=0:
return []
tree=[]
children = module.listAllModules()
children.sort()
if pathToHere!="":
pathToHere=pathToHere+"."
for subModuleName,submodule in children:
moduleContents=[]
if self.config.showComponentsOnIndices:
moduleContains=[name for (name,item) in submodule.listAllComponentsAndPrefabs()]
if len(moduleContains)>0:
moduleContains.sort()
moduleContents.append(nodes.Text(" ( "))
first=True
for name in moduleContains:
if not first:
moduleContents.append(nodes.Text(", "))
first=False
uri = self.renderer.makeURI(pathToHere+subModuleName+"."+name)
linkToDecl = nodes.reference('', nodes.Text(name), refuri=uri)
moduleContents.append(linkToDecl)
moduleContents.append(nodes.Text(" )"))
uri=self.renderer.makeURI(pathToHere+subModuleName)
tree.append( nodes.list_item('',
nodes.paragraph('','',
nodes.strong('', '',nodes.reference('', subModuleName, refuri=uri)),
*moduleContents
),
*self.generateIndex(pathToHere+subModuleName, submodule,depth-1)
) )
if len(tree):
return [ nodes.bullet_list('', *tree) ]
else:
return []
def generateDocumentationFiles(formatter, CONFIG):
for (moduleName,module) in CONFIG.repository.listAllModulesIncSubModules():
print "Processing: "+moduleName
components=module.listAllComponents()
prefabs=module.listAllPrefabs()
if CONFIG.includeNonKamaeliaStuff:
classes = [X for X in module.listAllClasses() if X not in components]
functions = [X for X in module.listAllFunctions() if X not in prefabs]
else:
classes = []
functions = []
if CONFIG.filterPattern in moduleName:
doctree = formatter.formatModulePage(moduleName, module, components, prefabs, classes, functions)
filename = formatter.renderer.makeFilename(moduleName)
output = formatter.renderer.render(moduleName, doctree)
F = open(CONFIG.docdir+"/"+filename, "w")
F.write(output)
F.close()
for (name,component) in components:
NAME=moduleName+"."+name
if CONFIG.filterPattern in NAME:
print " Component: "+NAME
filename = formatter.renderer.makeFilename(NAME)
doctree = formatter.formatComponentPage(moduleName, name, component)
output = formatter.renderer.render(NAME, doctree)
F = open(CONFIG.docdir+"/"+filename, "w")
F.write(output)
F.close()
for (name,prefab) in prefabs:
NAME=moduleName+"."+name
if CONFIG.filterPattern in NAME:
print " Prefab: "+NAME
filename = formatter.renderer.makeFilename(NAME)
doctree = formatter.formatPrefabPage(moduleName, name, prefab)
output = formatter.renderer.render(NAME, doctree)
F = open(CONFIG.docdir+"/"+filename, "w")
F.write(output)
F.close()
for (name,cls) in classes:
NAME=moduleName+"."+name
if CONFIG.filterPattern in NAME:
print " Class: "+NAME
filename = formatter.renderer.makeFilename(NAME)
doctree = formatter.formatClassPage(moduleName, name, cls)
output = formatter.renderer.render(NAME, doctree)
F = open(CONFIG.docdir+"/"+filename, "w")
F.write(output)
F.close()
for (name,function) in functions:
NAME=moduleName+"."+name
if CONFIG.filterPattern in NAME:
print " Function: "+NAME
filename = formatter.renderer.makeFilename(NAME)
doctree = formatter.formatFunctionPage(moduleName, name, function)
output = formatter.renderer.render(NAME, doctree)
F = open(CONFIG.docdir+"/"+filename, "w")
F.write(output)
F.close()
def dumpSymbols(makeURI, CONFIG, filename, theTime="", cmdLineArgs=[]):
"""\
Dumps symbols from the repository to a text file - classes, functions, prefabs,
components and modules. Includes, for each, the URL for the corresponding
piece of generated documentation.
This data can therefore be read in by another documentation build to allow
cross links to be generated.
Arguments:
- makeURI -- function for transforming symbols to the corresponding URI they should map to
- CONFIG -- configuration object
- filename -- filename to dump to
- theTime -- Optional. String describing the time of this documentation build.
- cmdLineArgs -- Optional. The command line args used to invoke this build.
"""
print "Dumping symbols to file '"+filename+"' ..."
F=open(filename,"wb")
F.write(";\n")
F.write("; Kamaelia documentation extractor symbol dump\n")
if theTime:
F.write("; (generated on "+theTime+" )\n")
if cmdLineArgs:
F.write(";\n")
F.write("; Command line args for build were:\n")
F.write("; "+" ".join(cmdLineArgs)+"\n")
F.write(";\n")
F.write("\n")
cfg=ConfigParser.ConfigParser()
cfg.optionxform = str # make case sensitive
cfg.add_section("COMPONENTS")
cfg.add_section("PREFABS")
cfg.add_section("CLASSES")
cfg.add_section("FUNCTIONS")
cfg.add_section("MODULES")
for (moduleName,module) in CONFIG.repository.listAllModulesIncSubModules():
uri=makeURI(moduleName)
cfg.set("MODULES", option=moduleName, value=uri)
components=module.listAllComponents()
prefabs=module.listAllPrefabs()
if CONFIG.includeNonKamaeliaStuff:
classes = [X for X in module.listAllClasses() if X not in components]
functions = [X for X in module.listAllFunctions() if X not in prefabs]
else:
classes = []
functions = []
for (name,item) in classes:
NAME=moduleName+"."+name
URI=makeURI(NAME)
cfg.set("CLASSES", option=NAME, value=URI)
for (name,item) in prefabs:
NAME=moduleName+"."+name
URI=makeURI(NAME)
cfg.set("PREFABS", option=NAME, value=URI)
for (name,item) in components:
NAME=moduleName+"."+name
URI=makeURI(NAME)
cfg.set("COMPONENTS", option=NAME, value=URI)
for (name,item) in functions:
NAME=moduleName+"."+name
URI=makeURI(NAME)
cfg.set("FUNCTIONS", option=NAME, value=URI)
cfg.write(F)
F.close()
if __name__ == "__main__":
import sys
config = DocGenConfig()
config.docdir = "pydoc"
config.treeDepth=99
config.tocDepth=3
config.includeMethods=True
config.includeModuleDocString=True
config.showComponentsOnIndices=True
urlPrefix=""
cmdLineArgs = []
for arg in sys.argv[1:]:
if arg[:2] == "--" and len(arg)>2:
cmdLineArgs.append(arg.lower())
else:
cmdLineArgs.append(arg)
if not cmdLineArgs or "--help" in cmdLineArgs or "-h" in cmdLineArgs:
sys.stderr.write("\n".join([
"Usage:",
"",
" "+sys.argv[0]+" <arguments - see below>",
"",
"Only <repository dir> is mandatory, all other arguments are optional.",
"",
" --help Display this help message",
"",
" --filter <substr> Only build docs for components/prefabs for components",
" or modules who's full path contains <substr>",
"",
" --urlprefix <prefix> Prefix for URLs - eg. a base dir: '/Components/pydoc/",
" (remember the trailing slash if you want one)",
"",
" --outdir <dir> Directory to put output into (default is 'pydoc')",
" directory must already exist (and be emptied)",
"",
" --root <moduleRoot> The module path leading up to the repositoryDir specified",
" eg. Kamaelia.File, if repositoryDir='.../Kamaelia/File/'",
" default='Kamaelia'",
"",
" --notjustcomponents Will generate documentation for classes and functions too",
"",
" --footerinclude <file> A directive will be included to specify '<file>'",
" as an include at the bottom of all pages.",
"",
" --promotetitles Promote module level doc string titles to top of pages",
" generated. Also causes breadcrumb trails at the top of",
" pages to be reduced in emphasis slightly, so the title",
" properly stands out",
"",
" --indexdepth Depth (nesting levels) of indexes on non-module pages.",
" Use 0 to suppress index all together",
"",
" --includeTestOutput <dir> Incorporate test suite output",
" as found in the specified directory.",
"",
" --dumpSymbolsTo <file> Dumps catalogue of major symbols (classes, components, ",
" prefabs, functions) to the specified file, along with",
" the URLs they map to.",
"",
" --linkToSymbols <file> Read symbols from the specified file and automatically",
" link any references to those symbols to the respective",
" URLs defined in the symbol file.",
" Repeat this option for every symbol file to be read in.",
"",
" <repositoryDir> Use Kamaelia modules here instead of the installed ones",
"",
"",
]))
sys.exit(0)
try:
if "--filter" in cmdLineArgs:
index = cmdLineArgs.index("--filter")
config.filterPattern = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--urlprefix" in cmdLineArgs:
index = cmdLineArgs.index("--urlprefix")
urlPrefix = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--outdir" in cmdLineArgs:
index = cmdLineArgs.index("--outdir")
config.docdir = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--root" in cmdLineArgs:
index = cmdLineArgs.index("--root")
config.docroot = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--notjustcomponents" in cmdLineArgs:
index = cmdLineArgs.index("--notjustcomponents")
config.includeNonKamaeliaStuff=True
del cmdLineArgs[index]
if "--promotetitles" in cmdLineArgs:
index = cmdLineArgs.index("--promotetitles")
config.promoteModuleTitles=True
config.deemphasiseTrails=True
del cmdLineArgs[index]
if "--footerinclude" in cmdLineArgs:
index = cmdLineArgs.index("--footerinclude")
location=cmdLineArgs[index+1]
config.pageFooter = "\n[[include][file="+location+"]]\n"
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--indexdepth" in cmdLineArgs:
index = cmdLineArgs.index("--indexdepth")
config.treeDepth = int(cmdLineArgs[index+1])
assert(config.treeDepth >= 0)
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--includetestoutput" in cmdLineArgs:
index = cmdLineArgs.index("--includetestoutput")
config.testOutputDir = cmdLineArgs[index+1]
config.testExtensions = [("...ok","Tests passed:"),("...fail","Tests failed:")]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if "--dumpsymbolsto" in cmdLineArgs:
index = cmdLineArgs.index("--dumpsymbolsto")
config.dumpSymbolsTo = cmdLineArgs[index+1]
del cmdLineArgs[index+1]
del cmdLineArgs[index]
while "--linktosymbols" in cmdLineArgs:
index = cmdLineArgs.index("--linktosymbols")
config.loadSymbolsFrom.append(cmdLineArgs[index+1])
del cmdLineArgs[index+1]
del cmdLineArgs[index]
if len(cmdLineArgs)==1:
REPOSITORYDIR = cmdLineArgs[0]
elif len(cmdLineArgs)==0:
REPOSITORYDIR = None
else:
raise
except:
sys.stderr.write("\n".join([
"Error in command line arguments.",
"Run with '--help' for info on command line arguments.",
"",
"",
]))
sys.exit(1)
args=sys.argv
sys.argv=sys.argv[0:0]
debug = False
REPOSITORY=Repository.ModuleDoc( moduleName=config.docroot,
filePath=REPOSITORYDIR,
localModules={},
)
REPOSITORY.resolve(roots={config.docroot:REPOSITORY})
config.repository=REPOSITORY
import time
theTime=time.strftime("%d %b %Y at %H:%M:%S UTC/GMT", time.gmtime())
config.pageFooter += "\n<p><i>-- Automatic documentation generator, "+theTime+"</i>\n"
renderer = RenderHTML(titlePrefix="Kamaelia docs : ",
urlPrefix=urlPrefix,
debug=False,
rawFooter=config.pageFooter)
# automatically generate crosslinks when component names are seen
crossLinks = {}
wantedTypes=(ClassScope,FunctionScope,ModuleScope,)
for (fullPathName,item) in REPOSITORY.listAllMatching(recurseDepth=99,noRecurseTypes=ImportScope,types=wantedTypes):
if config.includeNonKamaeliaStuff \
or isinstance(item,ModuleScope) \
or getattr(item,"isComponent",False) \
or getattr(item,"isPrefab",False):
fullPathName = REPOSITORY.module+"."+fullPathName
crossLinks[fullPathName] = fullPathName
renderer.setAutoCrossLinks( crossLinks )
# also add crosslinks for any referenced external files of symbols
for filename in config.loadSymbolsFrom:
print "Reading symbol links from '%s' ..." % filename
cfg=ConfigParser.ConfigParser()
cfg.optionxform = str # make case sensitive
if not cfg.read(filename):
raise "Could not find symbol file: "+filename
renderer.addAutoLinksToURI(dict(cfg.items("CLASSES")))
renderer.addAutoLinksToURI(dict(cfg.items("FUNCTIONS")))
renderer.addAutoLinksToURI(dict(cfg.items("COMPONENTS")))
renderer.addAutoLinksToURI(dict(cfg.items("PREFABS")))
renderer.addAutoLinksToURI(dict(cfg.items("MODULES")))
formatter = docFormatter(renderer, config=config)
generateDocumentationFiles(formatter,config)
if config.dumpSymbolsTo is not None:
dumpSymbols(formatter.renderer.makeURI, config, config.dumpSymbolsTo, theTime, args)
if formatter.errorCount>0:
print "Errors occurred during docstring parsing/page generation."
sys.exit(2)
else:
sys.exit(0)
|
from . import isotropic_cov_funs
import numpy as np
__all__ = ['nsmatern','nsmatern_diag','default_h']
def default_h(x):
return np.ones(x.shape[:-1])
def nsmatern(C,x,y,diff_degree,amp=1.,scale=1.,h=default_h,cmin=0,cmax=-1,symm=False):
"""
A covariance function. Remember, broadcasting for covariance functions works
differently than for numpy universal functions. C(x,y) returns a matrix, and
C(x) returns a vector.
:Parameters:
- `amp`: The pointwise standard deviation of f.
- `scale`: The factor by which to scale the distance between points.
Large value implies long-range correlation.
- `diff_degree`: A function that takes arrays and returns the degree
of differentiability at each location.
- `h`: A function that takes arrays and returns the relative amplitude
at each location.
- `x and y` are arrays of points in Euclidean coordinates
formatted as follows:
[[x_{0,0} ... x_{0,ndim}],
[x_{1,0} ... x_{1,ndim}],
...
[x_{N,0} ... x_{N,ndim}]]
- `symm` indicates whether x and y are references to
the same array.
- `cmin' and `cmax' indicate which columns to compute.
These are used for multithreaded evaluation.
:Reference: Pintore and Holmes, 2010, "Spatially adaptive non-stationary covariance functions
via spatially adaptive spectra". Journal of the American Statistical Association.
Forthcoming.
"""
ddx, ddy = diff_degree(x), diff_degree(y)
hx, hy = h(x), h(y)
# for rkbesl
nmax = np.floor(max(np.max(ddx), np.max(ddy)))
# Compute covariance for this bit
isotropic_cov_funs.nsmatrn(C,ddx,ddy,hx,hy,nmax,cmin,cmax,symm=symm)
return C
def nsmatern_diag(x,diff_degree, amp=1., scale=1.,h=default_h):
return (h(x)*amp)**2
|
import os.path
import pandas as pd
from datetime import date
from distutils import dir_util
import requests
from pandas.io.json import json_normalize
from resources import constants
from utils import data_utils, api_utils
import time
import json
"""
This script currently updates the committed and paid funding for appeals, and replaces the old `funding_progress.csv` file.
Scheduling this script to run on a nightly or weekly basis is sufficient to automatically update the /world/funding_progress endpoint.
Functions to query raw funding data from the UNOCHA FTS API, perform transformations, and save the data.
See API docs here: https://fts.unocha.org/sites/default/files/publicftsapidocumentation.pdf
"""
def getPlans(year, country_mapping):
# Get all plans from the FTS API
data = api_utils.get_fts_endpoint('/public/plan/year/{}'.format(year))
def extract_adminLevel0(dict):
iso3 = None
for x in dict:
if x['adminLevel'] == 0:
iso3 = x['iso3']
return iso3
# Extract names from objects
data['categoryName'] = data.categories.apply(lambda x: x[0]['name'])
data['emergencies'] = data.emergencies.apply(lambda x: x[0]['name'] if x else None)
data['countryCode'] = data.locations.apply(extract_adminLevel0)
# Merge in country codes based on country Name
#data = data.merge(country_mapping, how='left', on=['name'])
#Tidy the dataset
data.drop(['origRequirements', 'startDate', 'endDate', 'years', 'categories', 'emergencies', 'locations'], axis=1, inplace=True)
data = data.where((pd.notnull(data)), None).sort_values('name')
return data
def getCountries():
# Get all plans from the FTS API
data = api_utils.get_fts_endpoint('/public/location')
return data
def updateCommittedAndPaidFunding(year=constants.FTS_APPEAL_YEAR):
data = pd.read_csv('resources/data/derived/example/funding_progress.csv', encoding='utf-8')
# Get committed and paid funding from the FTS API
def pull_committed_funding_for_plan(plan_id):
plan_funds = api_utils.get_fts_endpoint('/public/fts/flow?planId={}'.format(plan_id), 'flows')
funded = plan_funds[(plan_funds.boundary == 'incoming') & (plan_funds.status != 'pledge')]
return funded['amountUSD'].sum()
data['appealFunded'] = data['id'].apply(pull_committed_funding_for_plan)
data['percentFunded'] = data.appealFunded / data.revisedRequirements
return data
def getInitialRequiredAndCommittedFunding(data):
# Get committed and paid funding from the FTS API
def pull_committed_funding_for_plan(plan_id):
plan_funds = api_utils.get_fts_endpoint('/public/fts/flow?planId={}'.format(plan_id), 'incoming')
funded = plan_funds['fundingTotal']
return funded
data['appealFunded'] = data['id'].apply(pull_committed_funding_for_plan)
# Calculate percent funded
data['percentFunded'] = data.appealFunded / data.revisedRequirements
data['neededFunding'] = data.revisedRequirements - data.appealFunded
return data
def getDonorPlanFundingAmounts(plans):
"""
For each plan, pull the amount funded by each donor.
Since the path to the right data in the json is very long, I couldn't sort out how to keep the path in a variable.
"""
# TODO: make a helper function like api_utils.get_fts_endpoint() that can take a very long key chain
# TODO: make column indexes of final output a constant
# TODO: add metadata! With update date.
def getFundingByDonorOrg(plan_id):
url = None
endpoint_str = '/public/fts/flow?planId={}&groupby=Organization'.format(plan_id)
url = constants.FTS_API_BASE_URL + endpoint_str
result = requests.get(url, auth=(constants.FTS_CLIENT_ID, constants.FTS_CLIENT_PASSWORD))
result.raise_for_status()
if len(result.json()['data']['report1']['fundingTotals']['objects']) > 0:
result = json_normalize(result.json()['data']['report1']['fundingTotals']['objects'][0]['singleFundingObjects'])
result['plan_id'] = plan_id
else:
print 'Empty data from this endpoint: {}'.format(url)
result = None
return result
plan_ids = plans['id']
data = pd.DataFrame([])
#loop through each plan and append the donor data for it to a combined data set
for plan in plan_ids:
funding = getFundingByDonorOrg(plan)
data = data.append(funding)
data = data.merge(plans, how='left', left_on='plan_id', right_on='id')
data.drop(['behavior','id_y', 'direction', 'type'], axis=1,inplace=True)
data.columns = (['organization_id', 'organization_name', 'totalFunding', 'plan_id', 'plan_code', 'plan_name','countryCode'])
return data
def getTopDonorCountryFundingAmounts(countries, year, top=False, top_n=5):
"""
For each plan, pull the amount funded by each donor.
Since the path to the right data in the json is very long, I couldn't sort out how to keep the path in a variable.
"""
# TODO: add metadata! With update date.
def getDonorByCountry(country, year, top, top_n):
endpoint_str = '/public/fts/flow?locationid={}&year={}&groupBy=organization'.format(country, year)
url = 'https://api.hpc.tools/v1' + endpoint_str
result = requests.get(url, auth=(constants.FTS_CLIENT_ID, constants.FTS_CLIENT_PASSWORD))
result.raise_for_status()
if result.json()['data']['report1']['fundingTotals']['total'] == 0:
single = None
print ('No funding data for country {} in {}'.format(country, year))
else:
single = result.json()['data']['report1']['fundingTotals']['objects'][0]['singleFundingObjects']
if single:
single = json_normalize(single)
single['year'] = year
single['dest_country_id'] = country
single = single.sort_values('totalFunding', ascending=False)
if top == True:
single = single.head(top_n)
return single
country_ids = countries['id']
data = pd.DataFrame([])
#loop through each plan and append the donor data for it to a combined data set
for country_id in country_ids:
print 'Getting funding for country: {}'.format(country_id)
funding = getDonorByCountry(country_id, year, top, top_n)
print 'Done. Appending...'
data = data.append(funding)
data = data.merge(countries, how='left', left_on='dest_country_id', right_on='id')
data.drop(['behavior','id_x', 'direction', 'type', 'dest_country_id', 'id_y'], axis=1,inplace=True)
data.columns = (['organization_name', 'totalFunding', 'year', 'countryCode', 'Country'])
return data
def getClusterFundingAmounts(plans):
"""
For each plan, pull the amount required and funded at the cluster level.
"""
# TODO: make a helper function like api_utils.get_fts_endpoint() that can take a very long key chain
# TODO: make column indexes of final output a constant
# TODO: add metadata! With update date.
def getFundingByCluster(plan_id):
url = None
endpoint_str = '/public/fts/flow?planId={}&groupby=Cluster'.format(plan_id)
url = 'https://api.hpc.tools/v1' + endpoint_str
result = requests.get(url, auth=(constants.FTS_CLIENT_ID, constants.FTS_CLIENT_PASSWORD))
result.raise_for_status()
#Get the required funding amounts for each cluster
requirements = result.json()['data']['requirements']
if requirements and 'objects' in requirements:
requirements = requirements['objects']
requirements = json_normalize(requirements)
requirements['plan_id'] = plan_id
else:
print ('No requirements data from this endpoint: {}'.format(url))
requirements = None
#Get the actual funded amounts for each cluster
if len(result.json()['data']['report3']['fundingTotals']['objects']) > 0:
funding = json_normalize(result.json()['data']['report3']['fundingTotals']['objects'][0]['singleFundingObjects'])
funding['plan_id'] = plan_id
else:
print ('Empty data from this endpoint: {}'.format(url))
funding = None
#Join required and actual funding amounts together
if funding is not None:
combined = requirements.merge(funding, how='outer', on=['name', 'plan_id'])
else:
combined = requirements
return combined
plan_ids = plans['id']
data = pd.DataFrame([])
#loop through each plan and append the donor data for it to a combined data set
for plan in plan_ids:
print ('Getting for plan {}'.format(plan))
funding = getFundingByCluster(plan)
print ('Success! Appending plan {}'.format(plan))
data = data.append(funding)
#TODO: if a plan result in an error, skip that and move on
#Merge on plan information for reference
data = data.merge(plans, how='left', left_on='plan_id', right_on='id')
#Select certain columns and rename them
data = data[['name_x', 'revisedRequirements', 'totalFunding', 'plan_id','code','name_y','countryCode']]
data.columns = (['cluster', 'revisedRequirements', 'totalFunding', 'plan_id', 'plan_code', 'plan_name','countryCode'])
#Replace NaN funded amounts with 0s
data.totalFunding = data.totalFunding.fillna(0)
#Calculate percent funded
data['percentFunded'] = data['totalFunding']/data['revisedRequirements']
return data
def getCountryFundingAmounts(year_list, country_mapping):
"""
For each country, pull the amount of funding received in each year.
Since the path to the right data in the json is very long, I couldn't sort out how to keep the path in a variable.
"""
# TODO: make a helper function like api_utils.get_fts_endpoint() that can take a very long key chain
# TODO: make column indexes of final output a constant
# TODO: add metadata! With update date.
def getActualFundingByCountryGroup(year):
url = None
endpoint_str = '/public/fts/flow?year={}&groupby=Country'.format(year)
url = 'https://api.hpc.tools/v1' + endpoint_str
result = requests.get(url, auth=(constants.FTS_CLIENT_ID, constants.FTS_CLIENT_PASSWORD))
result.raise_for_status()
single = result.json()['data']['report3']['fundingTotals']['objects'][0]['singleFundingObjects']
if single:
single = json_normalize(single)
single['year'] = year
else:
print ('Empty data from this endpoint: {}'.format(url))
single = None
return single
data = pd.DataFrame([])
#loop through each year and append the data for it to a combined data set
for year in year_list:
funding = getActualFundingByCountryGroup(year)
data = data.append(funding)
data = data.merge(country_mapping, how='left', on=['name','id'])
data.drop(['id', 'direction', 'type'], axis=1,inplace=True)
#Rename column headings
data.rename(columns={'name': 'Country',
'iso3': 'countryCode'
}, inplace=True)
data = data.sort_values(['Country','year'])
#data.columns = (['organization_id', 'organization_name', 'totalFunding', 'plan_id', 'plan_code', 'plan_name','countryCode'])
return data
def loadDataByDimension(dimension):
"""
Given a dimension of funding data (e.g. clusters/donors/recipients), load the data for each country.
Return a dict of country code to pandas dataframe for the funding data along the given dimension.
"""
if dimension not in constants.FTS_SCHEMAS.keys():
raise Exception('Not a valid funding dimension for downloaded data from FTS: {}!'.format(dimension))
schema = constants.FTS_SCHEMAS[dimension]
data_dir = os.path.join(constants.LATEST_RAW_DATA_PATH, constants.FTS_DIR)
date_str = date.today().isoformat()
with open(os.path.join(data_dir, constants.FTS_DOWNLOAD_DATE_FILE), 'r') as f:
date_str = f.read().strip()
data = {}
for code, country in constants.COUNTRY_CODES.iteritems():
file_name = '-'.join([constants.FTS_FILE_PREFIX, code, dimension, date_str])
file_path = os.path.join(data_dir, '{}.csv'.format(file_name))
df = pd.read_csv(file_path, encoding='utf-8')
data[country] = df
return data
# def loadDataByCountryCode(country_code):
# """
# Given a country, load the data for each funding dimension.
# Return a dict of funding dimension to pandas dataframe for the funding data for the given country.
# """
# if country_code not in constants.COUNTRY_CODES.keys():
# if country_code not in constants.COUNTRY_CODES.values():
# raise Exception('Not a valid country code for downloaded data from FTS: {}!'.format(country_code)
# else:
# # Convert country name to country code
# country_code = constants.COUNTRY_CODES.values().index(country_code)
#
# data_dir = os.path.join(constants.LATEST_RAW_DATA_PATH, constants.FTS_DIR)
# date_str = date.today().isoformat()
# with open(os.path.join(data_dir, constants.FTS_DOWNLOAD_DATE_FILE), 'r') as f:
# date_str = f.read().strip()
# data = {}
# for dimension, schema in constants.FTS_SCHEMAS.iteritems():
# file_name = '-'.join([constants.FTS_FILE_PREFIX, country_code, dimension, date_str])
# file_path = os.path.join(data_dir, '{}.csv'.format(file_name))
# df = pd.read_csv(file_path, encoding='utf-8')
# data[dimension] = df
# return data
# def combineData(data, column):
# """
# Combine given data across a particular column, where data is a dictionary from keys to dataframes,
# and the given column corresponds to a column name for the keys of the data dict, e.g. 'Country' or 'Dimension'.
# Returns a single dataframe that combines all the dataframes in the given data.
# """
# combined_df = pd.DataFrame()
# for key, df in data.iteritems():
# df[column] = key
# combined_df = combined_df.append(df)
# return combined_df
#
#
# def updateLatestDataDir(download_path, current_date_str):
# """
# Copies all files from the given download_path into the latest data directory configured in
# `resources/constants.py`. Appends to the run_dates.txt file with the current run date.
# """
# if not download_path or not current_date_str:
# print 'Could not copy latest data for this run to the latest data directory!'
# return
# dir_util.copy_tree(download_path, constants.LATEST_DERIVED_DATA_PATH)
# with open(constants.LATEST_DERIVED_RUN_DATE_FILE, 'a') as run_file:
# run_file.write('{}-fts\n'.format(current_date_str))
# return
# def createCurrentDateDir(parent_dir):
# """
# Create a new directory with the current date (ISO format) under the given parent_dir.
# Return whether it was successful, the full path for the new directory, and the current date string.
# If the date directory already exists or is not successful, default to returning the parent_dir as the full path.
# """
# # Create a new directory of the current date under the given parent_dir if it doesn't already exist
# current_date_str = date.today().isoformat()
# dir_path = os.path.join(parent_dir, current_date_str)
# success = data_utils.safely_mkdir(dir_path)
# if not success:
# # Safely default to returning the parent_dir if we cannot create the dir_path
# print 'Could not create a new directory for the current date [{}], defaulting to existing parent dir: {}'.format(current_date_str, parent_dir)
# dir_path = parent_dir
# else:
# print 'Created new derived data dir: {}'.format(dir_path)
# return success, dir_path, current_date_str
#
#
# def saveDerivedData(data, dir_path):
# """
# Save the derived data into a new dated directory under the given parent_dir (defaults to DERIVED_DATA_PATH configured in `resources/constants.py`).
# Return whether any data saving was successful.
# """
# # Save data to dated directory under the given parent_dir
# success = False
# for dimension, df in data.iteritems():
# df_path = os.path.join(dir_path, 'fts-{}.csv'.format(dimension))
# print 'Saving derived data for dimension [{}] to: {}'.format(dimension, df_path)
# df.to_csv(df_path, index=False, encoding='utf-8')
# success = True
# return success
#
#
# def run_transformations_by_dimension():
# """
# This is an example of some data transformations we can do to go from raw data to derived data.
# """
# print 'Load and process downloaded data from FTS'
# print 'Create current date directory as the download path...'
# _, download_path, current_date_str = createCurrentDateDir(constants.DERIVED_DATA_PATH)
# print 'Load data by dimension...'
# data_by_dimension = {}
# for dimension, schema in constants.FTS_SCHEMAS.iteritems():
# data_for_dimension = loadDataByDimension(dimension)
# print 'Combine data for dimension [{}] across all countries...'.format(dimension)
# data_by_dimension[dimension] = combineData(data_for_dimension, constants.COUNTRY_COL)
# print data_by_dimension[dimension]
#
# success = saveDerivedData(data_by_dimension, download_path)
# if success:
# print 'Copy data from {} to {}...'.format(download_path, constants.LATEST_DERIVED_DATA_PATH)
# updateLatestDataDir(download_path, current_date_str)
#
# #dir_util.copy_tree(download_path, constants.EXAMPLE_DERIVED_DATA_PATH)
#
# print 'Done!'
def prepend_metadata(metadata, filepath):
with open(filepath, 'r') as original:
data = original.read()
with open(filepath, 'w') as modified:
modified.write('#')
json.dump(metadata, modified)
modified.write('\n' + data)
def run():
t0 = time.time()
# Hardcode FTS metadata
metadata = {}
metadata['extract_date'] = date.today().isoformat()
metadata['source_data'] = date.today().isoformat()
metadata['source_key'] = 'FTS'
metadata['source_url'] = 'https://fts.unocha.org'
metadata['update_frequency'] = 'Hourly'
print 'Get list of countries and ISO-3 codes'
countries = getCountries()
print countries.head()
print 'Get list of plans'
plans = getPlans(year=constants.FTS_APPEAL_YEAR, country_mapping = countries)
print plans.head()
#Filter plans to only include those that are not RRPs and where the funding requirement is > 0
plans = plans[(plans.categoryName != 'Regional response plan') & (plans.revisedRequirements > 0)]
plan_index = plans[['id', 'code', 'name', 'countryCode']]
print 'Get required and committed funding from the FTS API'
initial_result = getInitialRequiredAndCommittedFunding(plans)
print initial_result.head()
official_data_path = os.path.join(constants.EXAMPLE_DERIVED_DATA_PATH, 'funding_progress.csv')
initial_result.to_csv(official_data_path, encoding='utf-8', index=False)
prepend_metadata(metadata, official_data_path)
print 'Get donor funding amounts to each plan from the FTS API'
donor_funding_plan = getDonorPlanFundingAmounts(plan_index)
print donor_funding_plan.head()
official_data_path = os.path.join(constants.EXAMPLE_DERIVED_DATA_PATH, 'funding_donors_appeal.csv')
donor_funding_plan.to_csv(official_data_path, encoding='utf-8', index=False)
prepend_metadata(metadata, official_data_path)
print 'Get required and committed funding at the cluster level from the FTS API'
cluster_funding = getClusterFundingAmounts(plan_index)
print cluster_funding.head()
official_data_path = os.path.join(constants.EXAMPLE_DERIVED_DATA_PATH, 'funding_clusters.csv')
cluster_funding.to_csv(official_data_path, encoding='utf-8', index=False)
prepend_metadata(metadata, official_data_path)
print 'Get funding by destination country for given years'
country_funding = getCountryFundingAmounts(range(2015, 2018), countries)
print country_funding.head()
official_data_path = os.path.join(constants.EXAMPLE_DERIVED_DATA_PATH, 'funding_dest_countries.csv')
country_funding.to_csv(official_data_path, encoding='utf-8', index=False)
prepend_metadata(metadata, official_data_path)
print 'Get top donors by destination country for given years'
donor_funding_country = getTopDonorCountryFundingAmounts(countries, 2016, top=True, top_n=5)
print donor_funding_country.head()
official_data_path = os.path.join(constants.EXAMPLE_DERIVED_DATA_PATH, 'funding_donors_country.csv')
donor_funding_country.to_csv(official_data_path, encoding='utf-8', index=False)
prepend_metadata(metadata, official_data_path)
print 'Done!'
print 'Total time taken in minutes: {}'.format((time.time() - t0)/60)
if __name__ == "__main__":
run()
|
"""
Quantiphyse - Processes for basic loading/saving of data
Copyright (c) 2013-2020 University of Oxford
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from quantiphyse.utils import QpException
from quantiphyse.data import load, save
from .process import Process
__all__ = ["LoadProcess", "LoadDataProcess", "LoadRoisProcess", "SaveProcess", "SaveAllExceptProcess", "SaveDeleteProcess", "SaveArtifactsProcess"]
class LoadProcess(Process):
"""
Load data into the IVM
"""
def __init__(self, ivm, **kwargs):
Process.__init__(self, ivm, **kwargs)
def run(self, options):
rois = options.pop('rois', {})
data = options.pop('data', {})
# Force 3D data to be multiple 2D volumes
force_mv = options.pop('force-multivol', False)
for fname, name in list(data.items()) + list(rois.items()):
qpdata = self._load_file(fname, name)
if qpdata is not None:
if force_mv and qpdata.nvols == 1 and qpdata.grid.shape[2] > 1:
qpdata.set_2dt()
qpdata.roi = fname in rois
self.ivm.add(qpdata, make_current=True)
def _load_file(self, fname, name):
filepath = self._get_filepath(fname)
if name is None:
name = self.ivm.suggest_name(os.path.split(fname)[1].split(".", 1)[0])
self.debug(" - Loading data '%s' from %s" % (name, filepath))
try:
data = load(filepath)
data.name = name
return data
except QpException as exc:
self.warn("Failed to load data: %s (%s)" % (filepath, str(exc)))
def _get_filepath(self, fname, folder=None):
if os.path.isabs(fname):
return fname
else:
if folder is None: folder = self.indir
return os.path.abspath(os.path.join(folder, fname))
class LoadDataProcess(LoadProcess):
"""
Process to load data
Deprecated: use LoadProcess
"""
def run(self, options):
LoadProcess.run(self, {'data' : options})
for key in list(options.keys()): options.pop(key)
class LoadRoisProcess(LoadProcess):
"""
Process to load ROIs
Deprecated: use LoadProcess
"""
def run(self, options):
LoadProcess.run(self, {'rois' : options})
for key in list(options.keys()): options.pop(key)
class SaveProcess(Process):
"""
Save data to file
"""
def __init__(self, ivm, **kwargs):
Process.__init__(self, ivm, **kwargs)
def run(self, options):
# Note that output-grid is not a valid data name so will not clash
output_grid = None
output_grid_name = options.pop("output-grid", None)
if output_grid_name is not None:
output_grid_data = self.ivm.data.get(output_grid_name, self.ivm.rois.get(output_grid_name, None))
if output_grid_data is None:
raise QpException("No such data found as source of grid: %s" % output_grid_name)
else:
output_grid = output_grid_data.grid
for name in list(options.keys()):
try:
fname = options.pop(name, name)
qpdata = self.ivm.data.get(name, None)
if qpdata is not None:
save(qpdata, fname, grid=output_grid, outdir=self.outdir)
else:
self.warn("Failed to save %s - no such data or ROI found" % name)
except QpException as exc:
self.warn("Failed to save %s: %s" % (name, str(exc)))
class SaveAllExceptProcess(Process):
"""
Save all data to file apart from specified items
"""
def __init__(self, ivm, **kwargs):
Process.__init__(self, ivm, **kwargs)
def run(self, options):
exceptions = list(options.keys())
for k in exceptions: options.pop(k)
for name, qpdata in self.ivm.data.items():
if name in exceptions:
continue
try:
save(qpdata, name, outdir=self.outdir)
except QpException as exc:
self.warn("Failed to save %s: %s" % (name, str(exc)))
except:
import traceback
traceback.print_exc()
class SaveDeleteProcess(SaveProcess):
"""
Save data to file and then delete it
"""
def __init__(self, ivm, **kwargs):
SaveProcess.__init__(self, ivm, **kwargs)
def run(self, options):
options_save = dict(options)
SaveProcess.run(self, options)
for name in options_save:
if name in self.ivm.data: self.ivm.delete(name)
class SaveArtifactsProcess(Process):
"""
Save 'extras' (previously known as 'artifacts')
"""
def __init__(self, ivm, **kwargs):
Process.__init__(self, ivm, **kwargs)
def run(self, options):
for name in list(options.keys()):
fname = options.pop(name)
if not fname: fname = name
if name in self.ivm.extras:
self.debug("Saving '%s' to %s" % (name, fname))
self._save_text(str(self.ivm.extras[name]), fname)
else:
self.warn("Extra '%s' not found - not saving" % name)
def _save_text(self, text, fname, ext="txt"):
if text:
if "." not in fname: fname = "%s.%s" % (fname, ext)
if not os.path.isabs(fname):
fname = os.path.join(self.outdir, fname)
dirname = os.path.dirname(fname)
if not os.path.exists(dirname): os.makedirs(dirname)
with open(fname, "w") as text_file:
text_file.write(text)
|
from __future__ import print_function, absolute_import, division
import os
import numpy as np
from tqdm import tqdm
import cv2
import argparse
import torch
import torch.nn as nn
import torch.optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from model.mesh_graph_hg import MeshGraph_hg, init_pretrained
from util import config
from util.helpers.visualize import Visualizer
from util.loss_utils import kp_l2_loss, Shape_prior, Laplacian
from util.loss_sdf import tversky_loss
from util.metrics import Metrics
from datasets.stanford import BaseDataset
from util.logger import Logger
from util.meter import AverageMeterSet
from util.misc import save_checkpoint
from util.pose_prior import Prior
from util.joint_limits_prior import LimitPrior
from util.utils import print_options
# Set some global varibles
global_step = 0
best_pck = 0
best_pck_epoch = 0
def main(args):
global best_pck
global best_pck_epoch
global global_step
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
print("RESULTS: {0}".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# set up device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# set up model
model = MeshGraph_hg(device, args.shape_family_id, args.num_channels, args.num_layers, args.betas_scale,
args.shape_init, args.local_feat, num_downsampling=args.num_downsampling,
render_rgb=args.save_results)
model = nn.DataParallel(model).to(device)
# set up dataset
dataset_train = BaseDataset(args.dataset, param_dir=args.param_dir, is_train=True, use_augmentation=True)
data_loader_train = DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, num_workers=args.num_works)
dataset_eval = BaseDataset(args.dataset, param_dir=args.param_dir, is_train=False, use_augmentation=False)
data_loader_eval = DataLoader(dataset_eval, batch_size=args.batch_size, shuffle=False, num_workers=args.num_works)
# set up optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
writer = SummaryWriter(os.path.join(args.output_dir, 'train'))
# set up priors
joint_limit_prior = LimitPrior(device)
shape_prior = Shape_prior(args.prior_betas, args.shape_family_id, device)
tversky = tversky_loss(args.alpha, args.beta)
# read the adjacency matrix, which will used in the Laplacian regularizer
data = np.load('./data/mesh_down_sampling_4.npz', encoding='latin1', allow_pickle=True)
adjmat = data['A'][0]
laplacianloss = Laplacian(adjmat, device)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint {}".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
if args.load_optimizer:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
print("=> loaded checkpoint {} (epoch {})".format(args.resume, checkpoint['epoch']))
# logger = Logger(os.path.join(args.output_dir, 'log.txt'), resume=True)
logger = Logger(os.path.join(args.output_dir, 'log.txt'))
logger.set_names(['Epoch', 'LR', 'PCK', 'IOU', 'PCK_re', 'IOU_re'])
else:
print("=> no checkpoint found at {}".format(args.resume))
else:
logger = Logger(os.path.join(args.output_dir, 'log.txt'))
logger.set_names(['Epoch', 'LR', 'PCK', 'IOU','PCK_re', 'IOU_re'])
if args.freezecoarse:
for p in model.module.meshnet.parameters():
p.requires_grad = False
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint {}".format(args.pretrained))
checkpoint_pre = torch.load(args.pretrained)
init_pretrained(model, checkpoint_pre)
print("=> loaded checkpoint {} (epoch {})".format(args.pretrained, checkpoint_pre['epoch']))
# logger = Logger(os.path.join(args.output_dir, 'log.txt'), resume=True)
logger = Logger(os.path.join(args.output_dir, 'log.txt'))
logger.set_names(['Epoch', 'LR', 'PCK', 'IOU', 'PCK_re', 'IOU_re'])
print_options(args)
if args.evaluate:
pck, iou_silh, pck_by_part, pck_re, iou_re = run_evaluation(model, dataset_eval, data_loader_eval, device, args)
print("Evaluate only, PCK: {:6.4f}, IOU: {:6.4f}, PCK_re: {:6.4f}, IOU_re: {:6.4f}"
.format(pck, iou_silh, pck_re, iou_re))
return
lr = args.lr
# lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
# optimizer, [160, 190], 0.5,
# )
for epoch in range(args.start_epoch, args.nEpochs):
# lr_scheduler.step()
# lr = lr_scheduler.get_last_lr()[0]
model.train()
tqdm_iterator = tqdm(data_loader_train, desc='Train', total=len(data_loader_train))
meters = AverageMeterSet()
for step, batch in enumerate(tqdm_iterator):
keypoints = batch['keypoints'].to(device)
keypoints_norm = batch['keypoints_norm'].to(device)
seg = batch['seg'].to(device)
img = batch['img'].to(device)
verts, joints, shape, pred_codes = model(img)
scale_pred, trans_pred, pose_pred, betas_pred, betas_scale_pred = pred_codes
pred_camera = torch.cat([scale_pred[:, [0]], torch.ones(keypoints.shape[0], 2).cuda() * config.IMG_RES / 2],
dim=1)
faces = model.module.smal.faces.unsqueeze(0).expand(verts.shape[0], 7774, 3)
labelled_joints_3d = joints[:, config.MODEL_JOINTS]
# project 3D joints onto 2D space and apply 2D keypoints supervision
synth_landmarks = model.module.model_renderer.project_points(labelled_joints_3d, pred_camera)
loss_kpts = args.w_kpts * kp_l2_loss(synth_landmarks, keypoints[:, :, [1, 0, 2]], config.NUM_JOINTS)
meters.update('loss_kpt', loss_kpts.item())
loss = loss_kpts
# use tversky for silhouette loss
if args.w_dice>0:
synth_rgb, synth_silhouettes = model.module.model_renderer(verts, faces, pred_camera)
synth_silhouettes = synth_silhouettes.unsqueeze(1)
loss_dice = args.w_dice * tversky(synth_silhouettes, seg)
meters.update('loss_dice', loss_dice.item())
loss += loss_dice
# apply shape prior constraint, either come from SMAL or unity from WLDO
if args.w_betas_prior > 0:
if args.prior_betas == 'smal':
s_prior = args.w_betas_prior * shape_prior(betas_pred)
elif args.prior_betas == 'unity':
betas_pred = torch.cat([betas_pred, betas_scale_pred], dim=1)
s_prior = args.w_betas_prior * shape_prior(betas_pred)
else:
Exception("Shape prior should come from either smal or unity")
s_prior = 0
meters.update('loss_prior', s_prior.item())
loss += s_prior
# apply pose prior constraint, either come from SMAL or unity from WLDO
if args.w_pose_prior > 0:
if args.prior_pose == 'smal':
pose_prior_path = config.WALKING_PRIOR_FILE
elif args.prior_pose == 'unity':
pose_prior_path = config.UNITY_POSE_PRIOR
else:
Exception('The prior should come from either smal or unity')
pose_prior_path = None
pose_prior = Prior(pose_prior_path, device)
p_prior = args.w_pose_prior * pose_prior(pose_pred)
meters.update('pose_prior', p_prior.item())
loss += p_prior
# apply pose limit constraint
if args.w_pose_limit_prior > 0:
pose_limit_loss = args.w_pose_limit_prior * joint_limit_prior(pose_pred)
meters.update('pose_limit', pose_limit_loss.item())
loss += pose_limit_loss
# get refined meshes by adding del_v to the coarse mesh from SMAL
verts_refine, joints_refine, _, _ = model.module.smal(betas_pred, pose_pred, trans=trans_pred,
del_v=shape,
betas_logscale=betas_scale_pred)
# apply 2D keypoint and silhouette supervision
labelled_joints_3d_refine = joints_refine[:, config.MODEL_JOINTS]
synth_landmarks_refine = model.module.model_renderer.project_points(labelled_joints_3d_refine,
pred_camera)
loss_kpts_refine = args.w_kpts_refine * kp_l2_loss(synth_landmarks_refine, keypoints[:, :, [1, 0, 2]],
config.NUM_JOINTS)
meters.update('loss_kpt_refine', loss_kpts_refine.item())
loss += loss_kpts_refine
if args.w_dice_refine> 0:
_, synth_silhouettes_refine = model.module.model_renderer(verts_refine, faces, pred_camera)
synth_silhouettes_refine = synth_silhouettes_refine.unsqueeze(1)
loss_dice_refine = args.w_dice_refine * tversky(synth_silhouettes_refine, seg)
meters.update('loss_dice_refine', loss_dice_refine.item())
loss += loss_dice_refine
# apply Laplacian constraint to prevent large deformation predictions
if args.w_arap > 0:
verts_clone = verts.detach().clone()
loss_arap, loss_smooth = laplacianloss(verts_refine, verts_clone)
loss_arap = args.w_arap * loss_arap
meters.update('loss_arap', loss_arap.item())
loss += loss_arap
meters.update('loss_all', loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if step % 20 == 0:
loss_values = meters.averages()
for name, meter in loss_values.items():
writer.add_scalar(name, meter, global_step)
writer.flush()
pck, iou_silh, pck_by_part, pck_re, iou_re = run_evaluation(model, dataset_eval, data_loader_eval, device, args)
print("Epoch: {:3d}, LR: {:6.5f}, PCK: {:6.4f}, IOU: {:6.4f}, PCK_re: {:6.4f}, IOU_re: {:6.4f}"
.format(epoch, lr, pck, iou_silh, pck_re, iou_re))
logger.append([epoch, lr, pck, iou_silh, pck_re, iou_re])
is_best = pck_re > best_pck
if pck_re > best_pck:
best_pck_epoch = epoch
best_pck = max(pck_re, best_pck)
save_checkpoint({'epoch': epoch,
'state_dict': model.state_dict(),
'best_pck': best_pck,
'optimizer': optimizer.state_dict()},
is_best, checkpoint=args.output_dir, filename='checkpoint.pth.tar')
writer.close()
logger.close()
def run_evaluation(model, dataset, data_loader, device, args):
model.eval()
result_dir = args.output_dir
batch_size = args.batch_size
pck = np.zeros((len(dataset)))
pck_by_part = {group: np.zeros((len(dataset))) for group in config.KEYPOINT_GROUPS}
acc_sil_2d = np.zeros(len(dataset))
pck_re = np.zeros((len(dataset)))
acc_sil_2d_re = np.zeros(len(dataset))
smal_pose = np.zeros((len(dataset), 105))
smal_betas = np.zeros((len(dataset), 20))
smal_camera = np.zeros((len(dataset), 3))
smal_imgname = []
tqdm_iterator = tqdm(data_loader, desc='Eval', total=len(data_loader))
for step, batch in enumerate(tqdm_iterator):
with torch.no_grad():
preds = {}
keypoints = batch['keypoints'].to(device)
keypoints_norm = batch['keypoints_norm'].to(device)
seg = batch['seg'].to(device)
has_seg = batch['has_seg']
img = batch['img'].to(device)
img_border_mask = batch['img_border_mask'].to(device)
# get coarse meshes and project onto 2D
verts, joints, shape, pred_codes = model(img)
scale_pred, trans_pred, pose_pred, betas_pred, betas_scale_pred = pred_codes
pred_camera = torch.cat([scale_pred[:, [0]], torch.ones(keypoints.shape[0], 2).cuda() * config.IMG_RES / 2],
dim=1)
faces = model.module.smal.faces.unsqueeze(0).expand(verts.shape[0], 7774, 3)
labelled_joints_3d = joints[:, config.MODEL_JOINTS]
synth_rgb, synth_silhouettes = model.module.model_renderer(verts, faces, pred_camera)
synth_silhouettes = synth_silhouettes.unsqueeze(1)
synth_landmarks = model.module.model_renderer.project_points(labelled_joints_3d, pred_camera)
# get refined meshes by adding del_v to coarse estimations
verts_refine, joints_refine, _, _ = model.module.smal(betas_pred, pose_pred, trans=trans_pred,
del_v=shape,
betas_logscale=betas_scale_pred)
labelled_joints_3d_refine = joints_refine[:, config.MODEL_JOINTS]
# project refined 3D meshes onto 2D
synth_rgb_refine, synth_silhouettes_refine = model.module.model_renderer(verts_refine, faces, pred_camera)
synth_silhouettes_refine = synth_silhouettes_refine.unsqueeze(1)
synth_landmarks_refine = model.module.model_renderer.project_points(labelled_joints_3d_refine,
pred_camera)
if args.save_results:
synth_rgb = torch.clamp(synth_rgb[0], 0.0, 1.0)
synth_rgb_refine = torch.clamp(synth_rgb_refine[0], 0.0, 1.0)
preds['pose'] = pose_pred
preds['betas'] = betas_pred
preds['camera'] = pred_camera
preds['trans'] = trans_pred
preds['verts'] = verts
preds['joints_3d'] = labelled_joints_3d
preds['faces'] = faces
preds['acc_PCK'] = Metrics.PCK(synth_landmarks, keypoints_norm, seg, has_seg)
preds['acc_IOU'] = Metrics.IOU(synth_silhouettes, seg, img_border_mask, mask=has_seg)
preds['acc_PCK_re'] = Metrics.PCK(synth_landmarks_refine, keypoints_norm, seg, has_seg)
preds['acc_IOU_re'] = Metrics.IOU(synth_silhouettes_refine, seg, img_border_mask, mask=has_seg)
for group, group_kps in config.KEYPOINT_GROUPS.items():
preds[f'{group}_PCK'] = Metrics.PCK(synth_landmarks, keypoints_norm, seg, has_seg,
thresh_range=[0.15],
idxs=group_kps)
preds['synth_xyz'] = synth_rgb
preds['synth_silhouettes'] = synth_silhouettes
preds['synth_landmarks'] = synth_landmarks
preds['synth_xyz_re'] = synth_rgb_refine
preds['synth_landmarks_re'] = synth_landmarks_refine
preds['synth_silhouettes_re'] = synth_silhouettes_refine
assert not any(k in preds for k in batch.keys())
preds.update(batch)
curr_batch_size = preds['synth_landmarks'].shape[0]
pck[step * batch_size:step * batch_size + curr_batch_size] = preds['acc_PCK'].data.cpu().numpy()
acc_sil_2d[step * batch_size:step * batch_size + curr_batch_size] = preds['acc_IOU'].data.cpu().numpy()
smal_pose[step * batch_size:step * batch_size + curr_batch_size] = preds['pose'].data.cpu().numpy()
smal_betas[step * batch_size:step * batch_size + curr_batch_size, :preds['betas'].shape[1]] = preds['betas'].data.cpu().numpy()
smal_camera[step * batch_size:step * batch_size + curr_batch_size] = preds['camera'].data.cpu().numpy()
pck_re[step * batch_size:step * batch_size + curr_batch_size] = preds['acc_PCK_re'].data.cpu().numpy()
acc_sil_2d_re[step * batch_size:step * batch_size + curr_batch_size] = preds['acc_IOU_re'].data.cpu().numpy()
for part in pck_by_part:
pck_by_part[part][step * batch_size:step * batch_size + curr_batch_size] = preds[f'{part}_PCK'].data.cpu().numpy()
if args.save_results:
output_figs = np.transpose(
Visualizer.generate_output_figures(preds, vis_refine=True).data.cpu().numpy(),
(0, 1, 3, 4, 2))
for img_id in range(len(preds['imgname'])):
imgname = preds['imgname'][img_id]
output_fig_list = output_figs[img_id]
path_parts = imgname.split('/')
path_suffix = "{0}_{1}".format(path_parts[-2], path_parts[-1])
img_file = os.path.join(result_dir, path_suffix)
output_fig = np.hstack(output_fig_list)
smal_imgname.append(path_suffix)
npz_file = "{0}.npz".format(os.path.splitext(img_file)[0])
cv2.imwrite(img_file, output_fig[:, :, ::-1] * 255.0)
# np.savez_compressed(npz_file,
# imgname=preds['imgname'][img_id],
# pose=preds['pose'][img_id].data.cpu().numpy(),
# betas=preds['betas'][img_id].data.cpu().numpy(),
# camera=preds['camera'][img_id].data.cpu().numpy(),
# trans=preds['trans'][img_id].data.cpu().numpy(),
# acc_PCK=preds['acc_PCK'][img_id].data.cpu().numpy(),
# # acc_SIL_2D=preds['acc_IOU'][img_id].data.cpu().numpy(),
# **{f'{part}_PCK': preds[f'{part}_PCK'].data.cpu().numpy() for part in pck_by_part}
# )
return np.nanmean(pck), np.nanmean(acc_sil_2d), pck_by_part, np.nanmean(pck_re), np.nanmean(acc_sil_2d_re)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--output_dir', default='./logs/', type=str)
parser.add_argument('--nEpochs', default=250, type=int)
parser.add_argument('--w_kpts', default=10, type=float)
parser.add_argument('--w_betas_prior', default=1, type=float)
parser.add_argument('--w_pose_prior', default=1, type=float)
parser.add_argument('--w_pose_limit_prior', default=0, type=float)
parser.add_argument('--w_kpts_refine', default=1, type=float)
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_works', default=4, type=int)
parser.add_argument('--start_epoch', default=0, type=int)
parser.add_argument('--gpu_ids', default='0', type=str)
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--resume', default=None, type=str)
parser.add_argument('--load_optimizer', action='store_true')
parser.add_argument('--dataset', default='stanford', type=str)
parser.add_argument('--shape_family_id', default=1, type=int)
parser.add_argument('--param_dir', default=None, type=str, help='Exported parameter folder to load')
parser.add_argument('--shape_init', default='smal', help='enable to initiate shape with mean shape')
parser.add_argument('--save_results', action='store_true')
parser.add_argument('--prior_betas', default='smal', type=str)
parser.add_argument('--prior_pose', default='smal', type=str)
parser.add_argument('--betas_scale', action='store_true')
parser.add_argument('--num_channels', type=int, default=256, help='Number of channels in Graph Residual layers')
parser.add_argument('--num_layers', type=int, default=5, help='Number of residuals blocks in the Graph CNN')
parser.add_argument('--pretrained', default=None, type=str)
parser.add_argument('--local_feat', action='store_true')
parser.add_argument('--num_downsampling', default=1, type=int)
parser.add_argument('--freezecoarse', action='store_true')
parser.add_argument('--w_arap', default=0, type=float)
parser.add_argument('--w_dice', default=0, type=float)
parser.add_argument('--w_dice_refine', default=0, type=float)
parser.add_argument('--alpha', default=0.6, type=float)
parser.add_argument('--beta', default=0.4, type=float)
args = parser.parse_args()
main(args) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
from functools import reduce
def arguments():
# Handle command line arguments
parser = argparse.ArgumentParser(description='Adventofcode.')
parser.add_argument('-f', '--file', required=True)
args = parser.parse_args()
return args
class CustomCustoms():
def __init__(self):
self.answers = None
self.answers_pt2 = None
def find_anwers_pt1(self, group):
self.answers = (len(set(group.replace("\n", ""))))
def part2(input_path):
return sum(
[
len(reduce(lambda set_1, set_2: set_1.intersection(set_2), [set(word) for word in g if word])) for g in
[group.split('\n') for group in open(input_path).read().split('\n\n')]
])
def main():
args = arguments()
with open(args.file) as file:
print("Part2:", part2(args.file))
input_file = file.read().strip().split("\n\n")
result = []
for row in input_file:
custom_answers = CustomCustoms()
custom_answers.find_anwers_pt1(row)
result.append(custom_answers)
print("Part1:", sum([x.answers for x in result]))
if __name__ == '__main__':
main()
|
from graph.graph import Graph
def gain(K, Z, vModify, g, list):
""" calcoliamo il guadagno tra il set X-Z e Z.
Args:
K (set): insieme dove è stato rimosso v
Z (set): insieme dove è stato aggiunto v
vModify (vertex): nodo che aggiungiamo/togliamo per valutarne il guadagno
g (graph): grafo con tutti i collegamenti
list (list): lista di tutti i nodi
Returns:
[number]: ritorna il guadagno calcolato tra x-z e z
"""
weight = 0
vModify = list[vModify]
for e in g.incident_edges(vModify):
if e._origin != vModify and e._origin._element in K:
weight += e.element() #sommo tutti i pesi degli archi che uniscono k con i veritici z
elif e._destination != vModify and e._destination._element in K:
weight += e.element()
elif e._origin != vModify and e._origin._element in Z:
weight -= e.element()
elif e._destination != vModify and e._destination._element in Z:
weight -= e.element()
return weight
def calcA(V, Z, vModify, g, list):
"""un interfaccia che richiama la funzione gain
"""
return gain(V, Z, vModify, g, list)
def calcB(V, Z, vModify, g, list):
"""un interfaccia che richiama la funzione gain
"""
return gain(Z, V, vModify, g, list)
def facebook_enmy(V, E):
"""Lo strumento di Facebook raggruppa gli elettori per Democratici e Repubblicani in
modo che il livello di inimicizia all'interno di ogni gruppo sia basso, e il livello di inimicizia tra i due gruppi sia il più grande possibile.
Args:
V ([type]): insieme di elettori
E (dict): dizionario che contine le coppie di elettori che hanno una relazione di amicizia su Facebook, e il loro valore corrisponde all'enmy
Returns:
restituisce due insiemi, D e R, che corrispondono rispettivamente agli elettori dei Democratici e dei Repubblicani.
"""
g = Graph()
list = {}
D = set()
VminusD = V.copy()
T = V.copy()
VminusT = set()
# inserisco i vertici nel grafo e li salvo all'interno di una lista
for v in V:
list[v] = g.insert_vertex(v)
# aggiungo tutti gli archi al grafo
for e in E.keys():
g.insert_edge(list[e[0]], list[e[1]], E.get(e))
# scorro tutti i vertici
for v in V:
# inizializzo tutte le variabili da utilizzare nei calcoli
a = 0
b = 0
# aggiungo temporaneamente il vertice corrente all'interno del set, ne valuto il guadagno e lo rimuovo
D.add(v)
VminusD.remove(v)
a = calcA(VminusD, D, v, g, list)
D.remove(v)
VminusD.add(v)
# rimuovo v temporaneamente per calcolare il guadagno, per poi riaggiungerlo
T.remove(v)
VminusT.add(v)
b = calcB(VminusT, T, v, g, list)
T.add(v)
VminusT.remove(v)
# se aggiungendo i vertice ho un gadagno maggiore lo aggiungo all'elenco dei democratici e lo rimuovo dalla lista, altrimenti il contrario
if (a >= b):
D.add(v)
VminusD.remove(v)
else:
T.remove(v)
VminusT.add(v)
# ritorno il set di democratici e di rep(è la differenza dell'elenco completo con quello dei dem)
R = V - D
return D, R
def myBFS(g, s, discovered, d):
"""BFS personalizzata, si ferma appena trova il primo percorso possibile per la mia destinazione
Args:
g (graph): grafo
s (vertex): sorgente
discovered (dict): dict dove salvare il path
d (vertex): destinazione
"""
level = [s] # first level includes only s
while len(level) > 0:
next_level = [] # prepare to gather newly found vertices
for u in level:
finalEdge = g.get_edge(u, d)
if(finalEdge is not None):
# if che serve per bloccare il ciclo se trova il path che parte da S e arriva a D
if finalEdge._element != 0:
discovered[d] = finalEdge # e is the tree edge that discovered v
return
for e in g.incident_edges(u): # for every outgoing edge from u
if e._element != 0:
v = e.opposite(u)
if v not in discovered: # v is an unvisited vertex
discovered[v] = e # e is the tree edge that discovered v
next_level.append(v) # v will be further considered in next pass
level = next_level # relabel 'next' level to become current
def facebook_friend(V, E):
"""Lo strumento di Facebook ha ora bisogno di raggruppare gli elettori per Democratici e Repubblicani in modo che il
livello di amicizia all'interno di ogni gruppo sia grande, e il livello di amicizia tra i due
gruppi sia il più basso possibile.
"""
G = Graph(True)
Nodes = {}
# inseriamo i vertici dem e rep, aggiungiamo i vari vertici nel grafo e li colleghiamo con le rispettive probabilità a dem e rep
Dem = G.insert_vertex("Dem")
Rep = G.insert_vertex("Rep")
for v in V:
toIns = G.insert_vertex(v)
Nodes[v] = toIns
# inseriamo tutti gli edge che abbiamo e per ognuno mettiamo anche l'opposto
G.insert_edge(Dem, toIns, V[v][0])
G.insert_edge(toIns, Dem, 0)
G.insert_edge(toIns, Rep, V[v][1])
G.insert_edge(Rep, toIns, 0)
for e in E:
G.insert_edge(Nodes.get(e[0]), Nodes.get(e[1]), E[e])
G.insert_edge(Nodes.get(e[1]), Nodes.get(e[0]), E[e])
# diction conterrà tutto quello che ci restituisce la mybfs, il path da un nodo ad un altro,
# nel nostro caso ci restituisce un solo percorso tra quelli possibili (da dem a rep)
diction = {}
diction[Dem] = None
myBFS(G, Dem, diction, Rep)
# lo faccio fino a quando dem riesce ad arrivare a rep, se non ci arrivo significa che ho finito la divisione dei partiti
while diction.get(Rep) is not None:
edge = diction.get(Rep)
min = edge.element()
# calcolo del minimo per tutto il path
while edge is not None:
if (edge.element() < min):
min = edge.element()
edge = diction.get(edge._origin)
# aggiorno i pesi dei collegamenti
edge = diction.get(Rep)
while edge is not None:
edge._element -= min
G.get_edge(edge._destination, edge._origin)._element += min
edge = diction.get(edge._origin)
diction = {}
diction[Dem] = None
myBFS(G, Dem, diction, Rep)
D = set()
# aggiungo tutti gli elementi attacati alla partizione d
for key in diction.keys():
if key.element() != 'Dem': #dem non deve essere calcolato nella soluzione per questo motivo lo escludiamo
D.add(key.element())
R = Nodes.keys() - D
return D, R
if __name__ == "__main__":
V = {'A': (4, 1), 'B': (5, 0), 'C': (0, 5), 'D': (1, 4)}
E = {('A', 'B'): 5, ('A', 'C'): 1, ('A', 'D'): 2, ('B', 'C'): 2, ('D', 'B'): 1, ('C', 'D'): 5}
"""V = {'A': (1, 0), 'B': (3, 2), 'C': (1, 3), 'D': (2, 1), 'E': (2,4)}
E = {('A', 'B'): 2, ('A', 'C'): 4, ('B', 'D'): 3, ('C', 'D'): 5, ('D', 'E'): 3}"""
"""V = {'A': (4, 0), 'B': (5, 0), 'C': (0, 0), 'D': (1, 0)}
E = {('A', 'B'): 5, ('A', 'C'): 1, ('A', 'D'): 2, ('B', 'C'): 2, ('D', 'B'): 1, ('C', 'D'): 5}"""
"""V = {'A', 'B', 'C', 'D', 'E'}
E = {('A', 'B'): 2, ('A', 'C'): 4, ('B', 'D'): 3, ('C', 'D'): 5, ('D', 'E'): 3}"""
D, R = facebook_friend(V, E)
print(str(D) + " " + str(R))
|
def queda(lista):
for i in range(1, len(lista)):
if lista[i]<lista[i-1]:
return i+1
return 0
n = int(input())
r = list(map(int, input().split()))
print(queda(r)) |
# mosromgr: Python library for managing MOS running orders
# Copyright 2021 BBC
# SPDX-License-Identifier: Apache-2.0
from mosromgr.cli import main
import pytest
def test_args_incorrect():
with pytest.raises(SystemExit):
main(['--nonexistentarg'])
def test_help(capsys):
with pytest.raises(SystemExit) as ex:
main(['--help'])
out, err = capsys.readouterr()
assert "managing MOS running orders" in out
with pytest.raises(SystemExit) as ex:
main(['-h'])
out, err = capsys.readouterr()
assert "managing MOS running orders" in out
with pytest.raises(SystemExit) as ex:
main(['help'])
out, err = capsys.readouterr()
assert "managing MOS running orders" in out
def test_detect():
args = main.parser.parse_args(['detect', '-f', 'roCreate.mos.xml'])
assert args.files == ['roCreate.mos.xml']
args = main.parser.parse_args(
['detect', '-f', 'roCreate.mos.xml', 'roDelete.mos.xml'])
assert args.files == ['roCreate.mos.xml', 'roDelete.mos.xml']
def test_inspect():
args = main.parser.parse_args(['inspect', '-f', 'roCreate.mos.xml'])
assert args.files == ['roCreate.mos.xml']
assert not args.bucket_name
assert not args.key
args = main.parser.parse_args(['inspect', '--file', 'roCreate.mos.xml'])
assert args.files == ['roCreate.mos.xml']
assert not args.bucket_name
assert not args.key
args = main.parser.parse_args(['inspect', '-b', 'bucket', '-k', 'key'])
assert not args.files
assert args.bucket_name == 'bucket'
assert args.key == 'key'
def test_merge():
args = main.parser.parse_args(['merge', '-f', 'roCreate.mos.xml'])
assert args.files == ['roCreate.mos.xml']
args = main.parser.parse_args(
['merge', '-f', 'roCreate.mos.xml', 'roDelete.mos.xml'])
assert args.files == ['roCreate.mos.xml', 'roDelete.mos.xml']
args = main.parser.parse_args(['merge', '--files', 'roCreate.mos.xml'])
assert args.files == ['roCreate.mos.xml']
args = main.parser.parse_args(
['merge', '--files', 'roCreate.mos.xml', 'roDelete.mos.xml'])
assert args.files == ['roCreate.mos.xml', 'roDelete.mos.xml']
args = main.parser.parse_args(
['merge', '-b', 'bucket', '-p', 'prefix'])
assert not args.files
assert args.bucket_name == 'bucket'
assert args.prefix == 'prefix'
assert not args.outfile
assert not args.incomplete
args = main.parser.parse_args(
['merge', '-f', 'roCreate.mos.xml', 'roDelete.mos.xml', '-o', 'outfile',
'-i'])
assert args.files == ['roCreate.mos.xml', 'roDelete.mos.xml']
assert args.bucket_name is None
assert args.prefix is None
assert args.outfile == 'outfile'
assert args.incomplete
args = main.parser.parse_args(
['merge', '-f', 'roCreate.mos.xml', 'roDelete.mos.xml', '--outfile',
'outfile', '--incomplete'])
assert args.files == ['roCreate.mos.xml', 'roDelete.mos.xml']
assert args.bucket_name is None
assert args.prefix is None
assert args.outfile == 'outfile'
assert args.incomplete
|
import argparse
import textwrap
from wordle import Wordle
class ArgumentDefaultsHelpFormatter(argparse.RawTextHelpFormatter):
def _get_help_string(self, action):
return textwrap.dedent(action.help)
def start():
parser = argparse.ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--pool', '-pl', required=True, help='Name of the directory containing the pool of words to use. (Required)')
parser.add_argument('--pattern', '-pt', required=True, help='Name of the directory containing the pattern to run. (Required)')
parser.add_argument('--attempts', '-at', type=int, default=1, help='Number of times the program will run on each pattern. This matters because the possible solutions use random generations, so running a pattern a second time will probably create different solutions. If --distinct, new solutions to the same password will be discarded.(Default: 1)')
parser.add_argument('--pools_dir', '-pld', default='./word_pools', help='Path to the directory containing all the word pools. (Default: ./word_pools)')
parser.add_argument('--patterns_dir', '-ptd', default='./patterns', help='Path to the directory containing all the patterns. (Default: ./patterns)')
parser.add_argument('--distinct', '-d', action='store_true', default=False, help='Keep only one solution for each password. Only matters if using --attempts greater than 1 or if running the pattern a second time.')
parser.add_argument('--trace', '-t', action='store_true', default=False, help='Show execution text.')
args = parser.parse_args()
pool = args.pool
pattern = args.pattern
attempts = args.attempts
pools_dir = args.pools_dir
patterns_dir = args.patterns_dir
repeat = not args.distinct
trace = args.trace
wordle = Wordle(pool, pools_dir, patterns_dir)
wordle.play(pattern, attempts=attempts, repeat_pwd=repeat, trace=trace)
if __name__ == '__main__':
start() |
from typing import List
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
res = []
flag = True
for interval in intervals:
# 1. 没有交集的情况
if interval[1] < newInterval[0]:
res.append(interval)
elif interval[0] > newInterval[1]:
if flag:
res.append(newInterval)
flag = False
res.append(interval)
else:
# 2. 必然有交集的情况
if newInterval[0] <= interval[0] and newInterval[1] >= interval[1]:
continue
elif interval[0] <= newInterval[0] and interval[1] >= newInterval[1]:
res.append(interval)
flag = False
continue
elif newInterval[0] > interval[0]:
newInterval[0] = interval[0]
elif newInterval[1] < interval[1]:
newInterval[1] = interval[1]
if flag:
res.append(newInterval)
return res
if __name__ == '__main__':
solution = Solution()
intervals = [[1, 2], [3, 5], [6, 7], [8, 10], [12, 16]]
newInterval = [4, 8]
print(solution.insert(intervals, newInterval))
|
import os
env = os.environ
#: The Celery broker URL used by webhook.
celery_broker = env.get("CELERY_BROKER", "redis://localhost:6379")
#: The Celery backend used by webhook.
celery_backend = env.get("CELERY_BACKEND", "redis://localhost:6379")
#: The secret used for encoding GitHub commit message.
webhook_secret = env.get("WEBHOOK_SECRET", "").encode() or None
|
"""Custom errors, error handler functions and function to register error
handlers with a Connexion app instance."""
import logging
from connexion import App, ProblemException
from connexion.exceptions import (
ExtraParameterProblem,
Forbidden,
Unauthorized
)
from flask import Response
from json import dumps
from typing import Union
from werkzeug.exceptions import (BadRequest, InternalServerError, NotFound)
# Get logger instance
logger = logging.getLogger(__name__)
def register_error_handlers(app: App) -> App:
"""Adds custom handlers for exceptions to Connexion app instance."""
# Add error handlers
app.add_error_handler(BadRequest, handle_bad_request)
app.add_error_handler(ExtraParameterProblem, handle_bad_request)
app.add_error_handler(Forbidden, __handle_forbidden)
app.add_error_handler(InternalServerError, __handle_internal_server_error)
app.add_error_handler(Unauthorized, __handle_unauthorized)
app.add_error_handler(TaskNotFound, __handle_task_not_found)
logger.info('Registered custom error handlers with Connexion app.')
# Return Connexion app instance
return app
# CUSTOM ERRORS
class TaskNotFound(ProblemException, NotFound, BaseException):
"""TaskNotFound(404) error compatible with Connexion."""
def __init__(self, title=None, **kwargs):
super(TaskNotFound, self).__init__(title=title, **kwargs)
# CUSTOM ERROR HANDLERS
def handle_bad_request(exception: Union[Exception, int]) -> Response:
return Response(
response=dumps({
'msg': 'The request is malformed.',
'status_code': '400'
}),
status=400,
mimetype="application/problem+json"
)
def __handle_unauthorized(exception: Exception) -> Response:
return Response(
response=dumps({
'msg': 'The request is unauthorized.',
'status_code': '401'
}),
status=401,
mimetype="application/problem+json"
)
def __handle_forbidden(exception: Exception) -> Response:
return Response(
response=dumps({
'msg': 'The requester is not authorized to perform this action.',
'status_code': '403'
}),
status=403,
mimetype="application/problem+json"
)
def __handle_task_not_found(exception: Exception) -> Response:
return Response(
response=dumps({
'msg': 'The requested task was not found.',
'status_code': '404'
}),
status=404,
mimetype="application/problem+json"
)
def __handle_internal_server_error(exception: Exception) -> Response:
return Response(
response=dumps({
'msg': 'An unexpected error occurred.',
'status_code': '500'
}),
status=500,
mimetype="application/problem+json"
) |
import argparse
import os
import sys
from dotenv import load_dotenv
from sendgrid.helpers.mail import Mail
from requests.exceptions import RequestException
from bude_hezky.content import content_builder
from bude_hezky.sender import email_sender
from bude_hezky.weather import weather_forecast
CITY_OPTION_KEY = 'mesto'
EMAIL_OPTION_KEY = 'email'
SUNNY_LIKE_CODES = [1, 2, 3, 4, 5, 6, 20, 21, 30]
parser = argparse.ArgumentParser()
parser.add_argument(CITY_OPTION_KEY, help='Město, kde bydlíš a kód státu (např. Prague,CZ). Můžeš zkusit i vesnici.')
parser.add_argument(f'--{EMAIL_OPTION_KEY}', help='E-mail, na který pošleme informaci, zda bude hezky.')
cli_arguments = vars(parser.parse_args())
load_dotenv()
api_key = os.getenv('WEATHER_API_KEY')
city = cli_arguments[CITY_OPTION_KEY]
to_email = cli_arguments[EMAIL_OPTION_KEY]
try:
tomorrow_forecasts = weather_forecast.get_forecasts_for_city(api_key, city)
except RequestException as e:
print(f'Počasí nezjištěno kvůli chybě: {e}')
sys.exit(1)
sunny_hours = weather_forecast.get_sunny_like_hours(tomorrow_forecasts)
if not sunny_hours:
print(':( Zítra raději zůstaň doma.')
sys.exit()
hours_string = ', '.join(str(s) for s in content_builder.build_sunny_ranges(sunny_hours))
final_message = content_builder.rreplace(f'Hurá! Zítra bude ve městě {city} hezky mezi {hours_string}. Běž třeba na kolo!', ', ', ' a ', 1)
print(final_message)
if to_email:
print('Posílám e-mail...')
message = Mail(
from_email='ivan@ikvasnica.com',
to_emails=to_email,
subject='Zítra bude hezky',
html_content=final_message
)
try:
email_sender.send_mail(message)
except email_sender.EmailNotSentException as e:
print(f'E-mail nemohl být poslán kvůli chybě: {e}')
sys.exit(1)
|
import json
import os
import tensorflow as tf
import numpy as np
import datetime as dt
from collections import defaultdict
import gaussian_variables as gv
def start_run():
pid = os.getpid()
run_id = "%s_%s" % (dt.datetime.now().strftime('%Y%m%d_%H%M%S'), pid)
np.random.seed(0)
tf.set_random_seed(0)
print(''.join(['*' for _ in range(80)]))
print('* RUN ID: %s ' % run_id)
print(''.join(['*' for _ in range(80)]))
return run_id
def get_hypers(args, default_hypers_path):
# get the default
with open(default_hypers_path, 'r') as f:
hypers = json.load(f)
# update according to --config-file
config_file = args.get('--config-file')
if config_file is not None:
with open(config_file, 'r') as f:
hypers.update(json.load(f))
# update according to --config
config = args.get('--config')
if config is not None:
hypers.update(json.loads(config))
return hypers
def get_device_string(device_id):
return '/cpu:0' if device_id < 0 else '/gpu:%s' % device_id
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
return sess
def restrict_dataset_size(dataset, size_fraction):
data_subset_size = int(np.floor(size_fraction * len(dataset[0])))
return tuple([d[:data_subset_size] for d in dataset])
def batched(dataset, hypers):
bs = hypers['batch_size']
permute = False
if permute:
perm = np.random.permutation(range(len(dataset[0])))
dataset[0] = np.array(dataset[0][perm])
dataset[1] = np.array(dataset[1][perm])
for batch_ptr in range(0, len(dataset[0]), bs):
batch_ipts = dataset[0][batch_ptr:(batch_ptr+bs)]
batch_opts = dataset[1][batch_ptr:(batch_ptr+bs)]
yield batch_ipts, batch_opts
def make_optimizer(model_and_metrics, hypers):
if hypers['optimizer'].strip().lower() == 'adam':
optimizer = tf.train.AdamOptimizer(hypers['learning_rate'])
elif hypers['optimizer'].strip().lower() == 'momentum':
optimizer = tf.train.MomentumOptimizer(hypers['learning_rate'], 0.9)
elif hypers['optimizer'].strip().lower() == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(hypers['learning_rate'])
else:
raise NotImplementedError('optimizer "%s" not recognized' % hypers['optimizer'])
if hypers['gradient_clip'] > 0:
gvs = optimizer.compute_gradients(model_and_metrics['metrics']['loss'])
capped_gvs = [(tf.clip_by_value(grad, -hypers['gradient_clip'], hypers['gradient_clip']), var)
for grad, var in gvs
if grad is not None]
train_op = optimizer.apply_gradients(capped_gvs, global_step = model_and_metrics['global_step'])
else:
train_op = optimizer.minimize(model_and_metrics['metrics']['loss'],
global_step = model_and_metrics['global_step'])
return train_op
def update_prior_from_posterior(sess, model):
if not hasattr(model, prior_update_assigners):
assigners = []
for p in model.parameters:
assigners.append(tf.assign(p.prior.mean, p.value.mean))
assigners.append(tf.assign(p.prior.var, p.value.var ))
model.prior_update_assigners = assigners
sess.run(model.prior_update_assigners)
def run_one_epoch(sess, data, model, metrics, train_op, hypers, dynamic_hypers):
learning_curve = []
fetch_list = [metrics]
if train_op is not None:
fetch_list.append(train_op)
feed_dict = {}
if 'loss_n_samples' in hypers:
feed_dict[model.placeholders['loss_n_samples']] = dynamic_hypers['loss_n_samples']
count = 0
running_accuracy = running_logprob = 0
for batch in batched(data, hypers):
feed_dict.update(
{model.placeholders['ipt_mean']: batch[0],
model.placeholders['target']: batch[1]})
result = sess.run(fetch_list, feed_dict)
if 'prior_update' in hypers and hypers['prior_update']:
update_prior_from_posterior(sess, model)
new_count = count + len(batch[0])
running_accuracy = \
(count * running_accuracy + len(batch[0]) * result[0]['accuracy']) \
/ new_count
running_logprob = \
(count * running_logprob + len(batch[0]) * result[0]['logprob']) \
/ new_count
count = new_count
result[0].update({'count': len(batch[0]),
'running_accuracy': running_accuracy,
'running_logprob': running_logprob})
learning_curve.append(result[0])
return learning_curve
def train_valid_test(data, sess, model_and_metrics, train_op, hypers, verbose=True):
train_op_dict = {'train': train_op, 'valid': None, 'test': None}
summary = defaultdict(list)
for section in hypers['sections_to_run']:
dynamic_hypers = {h: hypers[h][section]
for h in hypers
if isinstance(hypers[h], dict) and section in hypers[h]}
summary[section].append(run_one_epoch(
sess, data[section],
model_and_metrics['model'], model_and_metrics['metrics'],
train_op_dict[section], hypers, dynamic_hypers
))
accuracies = {}
for section in hypers['sections_to_run']:
accuracies[section] = summary[section][-1][-1]['running_accuracy']
if verbose:
print(' %s accuracy = %.4f | logprob = %.4f | KL term = %s' % (section, accuracies[section],
summary[section][-1][-1]['running_logprob'],
summary[section][-1][-1]['all_surprise']/hypers['dataset_size']),
end='')
if verbose:
print()
return summary, accuracies
def piecewise_anneal(hypers, var_name, global_step):
return hypers[var_name] * tf.clip_by_value((tf.to_float(global_step) - hypers['warmup_updates'][var_name])/hypers['anneal_updates'][var_name], 0.0, 1.0)
def get_predictions(data, sess, model, hypers):
predictions = []
output = model(model.placeholders['ipt_mean'])
if isinstance(output, gv.GaussianVar):
out_cov = tf.broadcast_to(output.var, [tf.shape(output.mean)[0],
tf.shape(output.mean)[1],
tf.shape(output.mean)[1]])
out_mean = output.mean
else:
out_cov = tf.tile(tf.constant([[[0,0],[0,0]]]), [tf.shape(output)[0], 1,1])
out_mean = output
for batch in batched(data, hypers):
result = sess.run({'mean':out_mean, 'cov':out_cov},
{model.placeholders['ipt_mean']: batch[0]})
predictions.append((batch[0], result))
x = np.concatenate([p[0] for p in predictions]).reshape(-1)
y = {}
for v in ['mean', 'cov']:
y[v] = np.concatenate([p[1][v] for p in predictions])
return (x,y)
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.ndarray, np.number)):
return obj.tolist()
elif isinstance(obj, (complex, np.complex)):
return [obj.real, obj.imag]
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, bytes): # pragma: py3
return obj.decode()
return json.JSONEncoder.default(self, obj) |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File name: screenshot.py
Author: Fredrik Forsberg
Date created: 2020-11-11
Date last modified: 2020-11-11
Python Version: 3.8
"""
import numpy as np
import cv2
from PIL import ImageGrab
###
def pil_screenshot(all_screens=True):
"""
Returns a PIL image of what is currently displayed on screen
:param all_screens: bool: Returns an image containing all screens if True (default=True)
:return: PIL.Image: Screenshot image
"""
return ImageGrab.grab(all_screens=all_screens)
def opencv_screenshot(all_screens=True):
"""
Returns an OpenCV image of what is currently displayed on screen
:param all_screens: bool: Returns an image containing all screens if True (default=True)
:return: np.array: OpenCV screenshot image
"""
return cv2.cvtColor(np.asarray(pil_screenshot(all_screens=all_screens)), cv2.COLOR_RGB2BGR)
###
if __name__ == "__main__":
img = opencv_screenshot()
# Show image
window_name = "imshow"
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.imshow(window_name, img)
key_code = -1
try:
# KeyboardInterrupt works when waitKey is done repeatedly
while cv2.getWindowProperty(window_name, cv2.WND_PROP_VISIBLE) > 0 and key_code == -1:
key_code = cv2.waitKey(500)
except KeyboardInterrupt:
pass
cv2.destroyWindow(window_name)
|
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from django.contrib.auth.models import Group,Permission
from meiduo_admin.serializers.groups import GroupSerializer
from meiduo_admin.serializers.permission import PermissionSerializer, ContentTypeSerializer
from meiduo_admin.utils import PageNum
class GroupView(ModelViewSet):
"""
权限表增删改查
"""
# 指定序列化器
serializer_class = GroupSerializer
# 指定查询集
queryset = Group.objects.all()
# 指定分页器
pagination_class = PageNum
# 指定权限
permission_classes = [IsAdminUser]
# 获取所有权限
def simple(self, request):
# 1、查询所有权限类型
permssion = Permission.objects.all()
# 2、返回权限类型
ser = PermissionSerializer(permssion, many=True)
return Response(ser.data)
|
from __future__ import print_function, absolute_import, division
import sys
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import ec2hosts
def main():
try:
cli(obj={})
except Exception as e:
import traceback
click.echo(traceback.format_exc(), err=True)
sys.exit(1)
@click.group(invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name='Anaconda Cluster', version=ec2hosts.__version__)
@click.pass_context
def cli(ctx):
ctx.obj = {}
if ctx.invoked_subcommand is None:
ctx.invoke(run)
@cli.command(short_help='Run')
@click.pass_context
def run(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.gen_file()
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
@cli.command(short_help='Clean')
@click.pass_context
def clean(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.read_file()
content = ec2hosts.clean(ec2hosts.read_file())
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
|
import random
# Helper functions ----------------------------------------------------------------------------------------------------------------------------------
def make_node_barrier(node):
"""Turns a node into a hard barrier."""
node.make_barrier()
node.is_hard_barrier = True
return node
def node_reset(node):
"""Resets a node, making it default colour and not a hard barrier."""
node.reset()
node.is_hard_barrier = False
def generate_blank_path(grid):
"""Makes an empty path dictionary."""
return {node: None for row in grid for node in row}
def make_wall(grid, start, direction, path):
"""Generates a wall in a given direction from a starting node."""
# Will contain all the nodes changed (so one can be made open again using make_opening)
wall = []
node = start
# Loop ends as soon as the next node in line is a barrier
while node:
# Must keep updating neighbours because barriers are getting added
node.update_neighbours(grid)
# Saving the current to a different variable
prev_node = node
# Checks next node in given direction
node = direction(grid, prev_node)
# Make previous node barrier and add to walls list
prev_node.update_neighbours(grid)
make_node_barrier(prev_node)
wall.append(prev_node)
# Update path if node is valid, or break the loop if it has been visited before
if node:
if path[node]:
break
path[node] = prev_node
if start in wall:
wall.remove(start)
return wall
def make_opening(wall):
"""Takes an array of nodes which make up a wall and opens a random node."""
node_reset(random.choice(wall))
# MOVEMENT
# Functions to select next node in a certain direction, if that node is valid
def l_node(grid, node):
"""Selects node to the left if available."""
if node.row - 2 >= 0 and not grid[node.row - 1][node.col].is_hard_barrier:
return grid[node.row - 1][node.col]
return False
def r_node(grid, node):
"""Selects node to the right if available."""
if node.row + 2 <= len(grid[node.row]) and not grid[node.row + 1][node.col].is_hard_barrier:
return grid[node.row + 1][node.col]
return False
def u_node(grid, node):
"""Selects node above given node if available."""
if node.col - 2 >= 0 and not grid[node.row][node.col - 1].is_hard_barrier:
return grid[node.row][node.col - 1]
return False
def d_node(grid, node):
"""Selects node below given node if available."""
if node.col + 2 <= len(grid) and not grid[node.row][node.col + 1].is_hard_barrier:
return grid[node.row][node.col + 1]
return False
# List containing the movement helper functions to make them easy to iterate over
directions = [r_node, d_node, l_node, u_node]
# Maze type functions -----------------------------------------------------------------------------------------------------------------------------
def completely_random(grid):
"""Generates a completely random maze, where every node has a 1 in 4 chance of becoming a barrier."""
for row in grid:
for node in row:
if random.random() <= 0.25:
make_node_barrier(node)
return grid
def basic_swirl(grid):
"""
Generates a simple swirl type maze.
'Wall Adder' algorithm as it adds walls to an empty grid.
"""
# Defines which nodes have been visited, used in one_direction function
# Needs to be defined here so it can be checked on through different calls of the one_direction function
path = generate_blank_path(grid)
def one_direction(grid, direction, start):
"""Uses a given movement function to keep moving in a specific direction until it hits a barrier."""
node = start
# Arbitrary path value just so the start node does not become a barrier
path[start] = 1
# Loop ends as soon as the next node in line is a barrier
while node:
# Must keep updating neighbours because barriers are getting added
node.update_neighbours(grid)
# Saving the current to a different variable
prev_node = node
# Checks next node in given direction
node = direction(grid, node)
path[node] = prev_node
# If next node is not valid:
if not node:
# If previous node was the start node, break as no other direction can therefore be taken
if prev_node == start:
return False
# Otherwise, return the previous node
return prev_node
# Make the neighbours of the previous node barriers, if they are not yet visited and are not the current node
for neighbour in prev_node.neighbours:
if neighbour is not node and not path[neighbour]:
make_node_barrier(neighbour)
# Always starts at top left corner
start = grid[1][1]
current = start
# Loop breaks as soon as the one direction function returns False
while current:
# Iterates over movement functions and breaks loop if any of them return false
for direction in directions:
previous = current
current = one_direction(grid, direction, previous)
if not current:
break
return grid
def imperfect(grid):
"""
My first attempt at a proper maze generating algorithm.
Imperfect because some chunks of the maze are unfortunately left inacessible.
Works by adding horizontal and vertical walls of barrier nodes to the grid around a random start node then removing a single barrier from each wall.
'Wall Adder' algorithm as it adds walls to an empty grid.
"""
# Defines which nodes have been visited/changed
path = generate_blank_path(grid)
# Keeps a List of what nodes are left to search
available_nodes = [node for row in grid for node in row]
# Gives the nodes a random order
random.shuffle(available_nodes)
def choose_node(grid):
"""Chooses a random, unvisited node for a wall to be drawn from."""
while available_nodes:
node = available_nodes.pop()
node.update_neighbours(grid)
if not path[node] and not node.row % 2 and not node.col % 2 and not node.is_hard_barrier and len(node.neighbours) > 3 and not node.is_hard_barrier:
return node
return None
# Loop ends as soon as there is no node left to go over (from choose_node)
while True:
start = choose_node(grid)
if not start:
break
# Makes wall in each direction from the start node and opens a single node on each wall
for direction in directions:
wall = make_wall(grid, start, direction, path)
if not wall:
continue
make_opening(wall)
return grid
def simple_maze(grid):
"""
Very simple form of maze generation.
Makes every odd row and column node into a barrier, then loops through the unaffected nodes and resets
2 adjacent barrier nodes at random.
This algorithm was inspired by binary tree maze generation, but since the walls/barrierrs on this program
are full cells rather than space between adjacent cells I had to play around with it and came up with this
to take its place.
'Path Carver' algorithm as it starts with walls and then knocks down walls to connect nodes.
"""
# Goes through odd rows and columns and makes all of those nodes barriers
for i, row in enumerate(grid):
if not i % 2:
for node in row:
make_node_barrier(node)
else:
for j, node in enumerate(row):
if not j % 2:
make_node_barrier(node)
# Goes through all the even rows and columns (currently open nodes surrounded by barrier nodes)
# and chooses, at random, 2 adjacent barrier nodes and resets them
for i, row in enumerate(grid):
if i % 2:
for j, node in enumerate(row):
if j % 2:
# Loops twice to select 2 nodes
for _ in range(2):
# Nodes chosen from directions: up, down
node = random.choice([grid[i][j - 1],
grid[i][j + 1],
grid[i + 1][j],
grid[i - 1][j]
])
node_reset(node)
return grid
|
from loguru import logger
from mass_spec_utils.data_import.mzmine import load_picked_boxes, map_boxes_to_scans
from mass_spec_utils.data_import.mzml import MZMLFile
from vimms.Roi import make_roi
def picked_peaks_evaluation(mzml_file, picked_peaks_file):
boxes = load_picked_boxes(picked_peaks_file)
mz_file = MZMLFile(mzml_file)
scans2boxes, boxes2scans = map_boxes_to_scans(mz_file, boxes, half_isolation_window=0)
return float(len(boxes2scans))
def roi_scoring(mzml_file, mz_tol=10, mz_units='ppm', min_length=3, min_intensity=500):
mz_file = MZMLFile(mzml_file)
good_roi, junk_roi = make_roi(mzml_file, mz_tol=mz_tol, mz_units=mz_units, min_length=min_length,
min_intensity=min_intensity)
roi_roi2scan, roi_scan2roi = match_scans_to_rois(mz_file, good_roi)
with_scan, without_scan, num_scan = prop_roi_with_scans(roi_roi2scan)
return dict({'with_scan': with_scan, 'without_scan': without_scan, 'num_scan': num_scan})
def summarise(mz_file_object):
n_scans = len(mz_file_object.scans)
n_ms1_scans = len(list(filter(lambda x: x.ms_level == 1, mz_file_object.scans)))
n_ms2_scans = len(list(filter(lambda x: x.ms_level == 2, mz_file_object.scans)))
logger.debug("Total scans = {}, MS1 = {}, MS2 = {}".format(n_scans, n_ms1_scans, n_ms2_scans))
def match_scans_to_rois(mz_file_object, roi_list):
roi2scan = {roi: [] for roi in roi_list}
scan2roi = {scan: [] for scan in filter(lambda x: x.ms_level == 2, mz_file_object.scans)}
for scan in mz_file_object.scans:
if scan.ms_level == 2:
pmz = scan.precursor_mz
scan_rt_in_seconds = 60 * scan.previous_ms1.rt_in_minutes
in_mz_range = list(filter(lambda x: min(x.mz_list) <= pmz <= max(x.mz_list), roi_list))
in_rt_range = list(filter(lambda x: x.rt_list[0] <= scan_rt_in_seconds <= x.rt_list[-1], in_mz_range))
for roi in in_rt_range:
roi2scan[roi].append(scan)
scan2roi[scan].append(roi)
return roi2scan, scan2roi
def prop_roi_with_scans(roi2scan):
with_scan = 0
without_scan = 0
for r, scans in roi2scan.items():
if len(scans) == 0:
without_scan += 1
else:
with_scan += 1
return with_scan, without_scan, len(roi2scan)
|
from django.contrib import admin
from .models import UserProfile,Teacher,Timetable,Klass,Pupil,Cabinet,Subject, Grade
admin.site.register(UserProfile)
admin.site.register(Teacher)
admin.site.register(Timetable)
admin.site.register(Klass)
admin.site.register(Pupil)
admin.site.register(Cabinet)
admin.site.register(Subject)
admin.site.register(Grade)
|
import json
import pandas as pd
import sqlalchemy as sa
import pemi
from pemi.fields import *
__all__ = [
'PdDataSubject',
'SaDataSubject',
'SparkDataSubject'
]
class MissingFieldsError(Exception): pass
class DataSubject:
'''
A data subject is mostly just a schema and a generic data object
Actually, it's mostly just a schema that knows which pipe it belongs to (if any)
and can be converted from and to a pandas dataframe (really only needed for testing to work)
'''
def __init__(self, schema=None, name=None, pipe=None):
self.schema = schema or pemi.Schema()
self.name = name
self.pipe = pipe
def __str__(self):
subject_str = '<{}({}) {}>'.format(self.__class__.__name__, self.name, id(self))
if self.pipe:
return '{}.{}'.format(self.pipe, subject_str)
return subject_str
def to_pd(self):
raise NotImplementedError
def from_pd(self, df, **kwargs):
raise NotImplementedError
def connect_from(self, _other):
self.validate_schema()
raise NotImplementedError
def validate_schema(self): #pylint: disable=no-self-use
return True
class PdDataSubject(DataSubject):
def __init__(self, df=None, strict_match_schema=False, **kwargs):
super().__init__(**kwargs)
if df is None or df.shape == (0, 0):
df = self._empty_df()
self.strict_match_schema = strict_match_schema
self.df = df
def to_pd(self):
return self.df
def from_pd(self, df, **kwargs):
self.df = df
def connect_from(self, other):
if other.df is None or other.df.shape == (0, 0):
self.df = self._empty_df()
else:
self.df = other.df
self.validate_schema()
def validate_schema(self):
'Verify that the dataframe contains all of the columns specified in the schema'
if self.strict_match_schema:
return self.validate_data_frame_columns()
missing = set(self.schema.keys()) - set(self.df.columns)
if len(missing) == 0:
return True
raise MissingFieldsError('DataFrame missing expected fields: {}'.format(missing))
def validate_data_frame_columns(self):
'Verify that the schema contains all the columns specefied in the dataframe'
missing = set(self.df.columns) - set(self.schema.keys())
if len(missing) > 0:
raise MissingFieldsError("Schema is missing current columns: {}".format(missing))
return True
def _empty_df(self):
return pd.DataFrame(columns=self.schema.keys())
class SaDataSubject(DataSubject):
def __init__(self, engine, table, sql_schema=None, **kwargs):
super().__init__(**kwargs)
self.engine = engine
self.table = table
self.sql_schema = sql_schema
self.cached_test_df = None
def to_pd(self):
if self.cached_test_df is not None:
return self.cached_test_df
with self.engine.connect() as conn:
df = pd.read_sql_table(
self.table,
conn,
schema=self.sql_schema,
)
for column in set(df.columns) & set(self.schema.keys()):
df[column] = df[column].apply(self.schema[column].coerce)
self.cached_test_df = df
return df
def from_pd(self, df, **to_sql_opts):
self.cached_test_df = df
pemi.log.debug('loading SaDataSubject with:\n%s', self.cached_test_df)
to_sql_opts['if_exists'] = to_sql_opts.get('if_exists', 'append')
to_sql_opts['index'] = to_sql_opts.get('index', False)
if self.sql_schema:
to_sql_opts['schema'] = self.sql_schema
df_to_sql = df.copy()
for field in self.schema.values():
if isinstance(field, JsonField):
df_to_sql[field.name] = df_to_sql[field.name].apply(json.dumps)
with self.engine.connect() as conn:
df_to_sql.to_sql(self.table, conn, **to_sql_opts)
def connect_from(self, _other):
self.engine.dispose()
self.validate_schema()
def __getstate__(self):
return (
[],
{
'url': self.engine.url,
'table': self.table,
'sql_schema': self.sql_schema
}
)
def __setstate__(self, state):
_args, kwargs = state
self.engine = sa.create_engine(kwargs['url'])
self.table = kwargs['table']
self.sql_schema = kwargs['sql_schema']
class SparkDataSubject(DataSubject):
def __init__(self, spark, df=None, **kwargs):
super().__init__(**kwargs)
self.spark = spark
self.df = df
self.cached_test_df = None
def to_pd(self):
if self.cached_test_df is not None:
return self.cached_test_df
converted_df = self.df.toPandas()
self.cached_test_df = pd.DataFrame()
for column in list(converted_df):
self.cached_test_df[column] = converted_df[column].apply(self.schema[column].coerce)
return self.cached_test_df
def from_pd(self, df, **kwargs):
self.df = self.spark.createDataFrame(df)
def connect_from(self, other):
self.spark = other.spark.builder.getOrCreate()
self.df = other.df
self.validate_schema()
|
class SnakeModel:
score = 0
lifeLeft = 200
lifetime = 0
dead = False
xVel = 0
yVel = 0
body: ArrayList<Position> = ArrayList()
eventRegistry = EventRegistry.createChildInstance()
def __init__(self, brain: DecisionEngine,board: GameBoardModel, vision: SnakeVision, headPosition: Position):
self.brain = brain
self.board = board
self.vision = vision
self.headPosition = headPosition
init {
if (vision != null) {
vision.snake = this
}
}
private fun bodyCollide(x: Long, y: Long): Boolean { //check if a position collides with the snakes body
for (i in body.indices) {
if (x == body[i].x && y == body[i].y) {
println("Eaten My own body at $i with body length ${body.size}")
return true
}
}
return false
}
private fun foodCollide(position: Position): Boolean {
return board!!.isFoodDroppedAt(position)
}
private fun wallCollide(x: Long, y: Long): Boolean { //check if a position collides with the wall
return board!!.isOutSideBoard(x,y)
}
fun thinkAndMove() { //move the snake
val observations = vision!!.observations()
val decision = think(observations)
move(decision)
raiseEvent(SnakeMoveCompleted(this, observations))
}
private fun move() {
if (!dead) {
lifetime++
lifeLeft--
shiftBody()
val wallCollide = wallCollide(head.position.x, head.position.y)
val bodyCollide = bodyCollide(head.position.x, head.position.y)
val starved = isStarved()
dead = wallCollide
|| bodyCollide
|| starved
if (dead) {
when {
wallCollide -> raiseEvent(SnakeDeadEvent(this, SnakeDeathType.WALL_COLLIDE))
bodyCollide -> raiseEvent(SnakeDeadEvent(this, SnakeDeathType.BODY_COLLIDE))
starved -> raiseEvent(SnakeDeadEvent(this, SnakeDeathType.STARVED))
}
} else if (foodCollide(head.position)) {
eat()
}
}
}
private fun isStarved() = lifeLeft <= 0
fun eat() { //eat food
raiseEvent(FoodEvent(EventType.FOOD_EATEN, head.position))
score++
val len = body.size - 1
adjustLifeLeft()
if (len >= 0) {
body.add(Position(body[len].x, body[len].y))
} else {
body.add(Position(head.position.x, head.position.y))
}
}
private fun raiseEvent(event: Event) {
eventRegistry.raiseEvent(event)
}
private fun adjustLifeLeft() {
if (lifeLeft < 500) {
if (lifeLeft > 400) {
lifeLeft = 500
} else {
lifeLeft += 100
}
}
}
private fun shiftBody() { //shift the body to follow the head
var tempPosition = head.position
head.position =
Position(head.position.x + xVel, head.position.y + yVel)
var temp2Position: Position
for (i in body.indices) {
temp2Position = body[i]
body[i] = tempPosition
tempPosition = temp2Position
}
}
fun crossover(parent: SnakeModel): SnakeModel {
val newBrain = if(brain!=null) {
if(parent.brain is RegeneratableDecisionEngine) {
parent.brain.crossover(parent.brain)
} else {
throw IllegalStateException("Brain of this Snake does not support Cross")
}
} else null
return SnakeModel(newBrain)
}
fun mutate(mutationRate: Float) { //mutate the snakes brain
if(brain is RegeneratableDecisionEngine) {
return brain.mutate(mutationRate)
} else {
throw IllegalStateException("Brain of this Snake does not support mutate")
}
}
fun calculateFitness(): Double { //calculateReward the fitness of the snake
var fitness: Double
if (score < 10) {
fitness = floor(lifetime.toDouble() * lifetime) * 2.0.pow(score.toDouble())
} else {
fitness = floor(lifetime.toDouble() * lifetime) * 2.0.pow(10) * (score - 9).toFloat()
}
return fitness
}
private fun think(observations: SnakeObservationModel): SnakeAction {
return brain!!.output(observations)
}
private fun moveUp() {
if (yVel != 1) {
xVel = 0
yVel = -1
}
}
private fun moveDown() {
if (yVel != -1) {
xVel = 0
yVel = 1
}
}
private fun moveLeft() {
if (xVel != 1) {
xVel = -1
yVel = 0
}
}
private fun moveRight() {
if (xVel != -1) {
xVel = 1
yVel = 0
}
}
fun hasBodyAtPosition(pos: Position): Boolean {
for (i in body.indices) {
if (body[i] == pos) {
return true
}
}
return false
}
fun move(action: SnakeAction) {
when (action) {
SnakeAction.UP -> moveUp()
SnakeAction.DOWN -> moveDown()
SnakeAction.LEFT -> moveLeft()
SnakeAction.RIGHT -> moveRight()
}
move()
}
fun addEventListener(type:EventType, listener: GameEventListener) {
eventRegistry.addEventListener(type, listener)
}
}
|
from __future__ import print_function
import time
import boto3
from amazon.api import AmazonAPI
class AWSClient(object):
"""
"""
def __init__(self,region=None,root_access_key='AKIAJB4BJYPJKV5YACXQ',
root_secret_access_key='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',tag=None):
self.region = 'us-east-1'
self.root_access_key = root_access_key
self.root_secret_access_key = root_secret_access_key
self.search_access_key = 'AKIAIMJ3KXAGVLAEFBNA' #affiliate key
self.search_secret_access_key = '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' #affiliate key
self.associate_tag = "msucapstone02-20"
self.create_comprehend_client()
self.create_search_client()
self.create_transcribe_client()
def create_client(self,service):
"""
"""
return boto3.client(service,
region_name=self.region,
aws_access_key_id=self.root_access_key,
aws_secret_access_key=self.root_secret_access_key
)
def create_transcribe_client(self):
"""
"""
self.transcribe_client = self.create_client('transcribe')
def create_comprehend_client(self):
"""
"""
self.comprehend_client = self.create_client('comprehend')
def create_search_client(self):
self.search_client = AmazonAPI(self.search_access_key,
self.search_secret_access_key,
self.associate_tag)
def run_transcribe_job(self):
pass
def comprehend_entities(self,text_input):
response = self.comprehend_client.detect_entities(
Text=text_input,
LanguageCode='en'
)
return response
def comprehend_key_phrases(self,text_input):
response = self.comprehend_client.detect_key_phrases(
Text=text_input,
LanguageCode='en'
)
return response
def comprehend_sentiment(self,text_input):
response = self.comprehend_client.detect_sentiment(
Text=text_input,
LanguageCode='en'
)
return response
def search_n(self, keywords, index,n):
return self.search_client.search_n(n, Keywords=keywords, SearchIndex=index)
|
import re
class Transpiler(object):
def __init__(self):
self.ident = 0
self.regex_variables = r'\b(?:(?:[Aa]n?|[Tt]he|[Mm]y|[Yy]our) [a-z]+|[A-Z][A-Za-z]+(?: [A-Z][A-Za-z]+)*)\b'
self.most_recently_named = ''
self.simple_subs = {
'(':'#',
')':'',
'Give back':'return',
'Take it to the top':'continue',
'Break it down':'break',
' false ':' False ',
' wrong ':' False ',
' no ':' False ',
' lies ':' False ',
' null ':' False ',
' nothing ':' False ',
' nowhere ':' False ',
' nobody ':' False ',
' empty ':' False ',
' gone ':' False ',
' mysterious ':' False ',
' true ':' true ',
' right ':' true ',
' yes ':' true ',
' ok ':' true ',
' plus ':' + ',
' with ':' + ',
' minus ':' - ',
' without ':' - ',
' times ':' * ',
' of ':' * ',
' over ':' / ',
' is higher than ':' > ',
' is greater than ':' > ',
' is bigger than ':' > ',
' is stronger than ':' > ',
' is lower than ':' < ',
' is less than ':' < ',
' is smaller than ':' < ',
' is weaker than ':' < ',
' is as high as ':' >= ',
' is as great as ':' >= ',
' is as big as ':' >= ',
' is as strong as ':' >= ',
' is as low as ':' <= ',
' is as little as ':' <= ',
' is as small as ':' <= ',
' is as weak as ':' <= ',
' is not ':' != ',
' aint ':' != ',
'Until ':'while not ',
'While ':'while '
}
def get_comments(self, line):
if '(' in line:
line, comment = line.split('(')
comment = ' #' + comment.strip(')\n ')
else:
comment = ''
return line, comment
def create_function(self, line):
match = re.match(r'\b({0}) takes ({0}(?: and {0})*)\b'.format(self.regex_variables), line)
if match:
self.ident += 1
line = 'def {}({}):'.format(match.group(1), match.group(2).replace(' and', ','))
return line
def create_while(self, line):
if line.startswith('while '):
line = line.replace(' is ', ' == ')
line += ':'
self.ident += 1
return line
def create_if(self, line):
match = re.match(r'If .*', line)
if match:
self.ident += 1
line = line.replace(' is ', ' == ')
line = line.replace('If', 'if')
line += ':'
return line
def find_poetic_number_literal(self, line):
poetic_type_literals_keywords = ['true', 'false', 'nothing', 'nobody', 'nowhere', 'empty', 'wrong', 'gone', 'no', 'lies', 'right', 'yes', 'ok', 'mysterious']
match = re.match(r'\b({})(?: is|\'s| was| were) (.+)'.format(self.regex_variables), line)
if match and match.group(2).split()[0] not in poetic_type_literals_keywords:
line = '{} = '.format(match.group(1))
for word_number in match.group(2).split():
period = '.' if word_number.endswith('.') else ''
alpha_word = re.sub('[^A-Za-z]', '', word_number)
line += str(len(alpha_word) % 10) + period
return line
def find_proper_variables(self, line):
match_list = re.findall(r'\b[A-Z][A-Za-z]+(?: [A-Z][A-Za-z]+)*\b', line)
if match_list:
for match in match_list:
line = line.replace(match, match.replace(' ', '_'))
return line
def find_common_variables(self, line):
match_list = re.findall(r'\b([Aa]n?|[Tt]he|[Mm]y|[Yy]our) ([a-z]+)\b', line)
if match_list:
for match in match_list:
line = line.replace(' '.join(match), '{}_{}'.format(*match).lower())
return line
def find_named(self, line):
match = re.match(r'([A-Za-z]+(?:_[A-Za-z]+)*) [+-]?= .+', line)
if match:
return match.group(1)
def get_strings(self, line):
says_match = re.match(r'({}) says (.*)'.format(self.regex_variables), line)
if says_match:
line = says_match.group(1) + ' = "{}"'
return line, says_match.group(2)
quotes_match = re.match(r'([^\"]* )(\".*\"(?:, ?\".*\")*)([^\"]*)', line)
if quotes_match:
line = quotes_match.group(1) + '{}' + quotes_match.group(3)
return line, quotes_match.group(2)
return line, None
def transpile_line(self, line):
if line == '\n':
self.ident = self.ident - 1 if self.ident > 0 else 0
return ''
else:
line_ident = ' ' * self.ident
line, comments = self.get_comments(line)
py_line, line_strings = self.get_strings(line)
for key in self.simple_subs:
py_line = py_line.strip()
py_line += ' '
py_line = py_line.replace(key, self.simple_subs[key])
py_line = py_line.strip('\n ,.;')
py_line = self.find_poetic_number_literal(py_line)
py_line = py_line.replace('\'', '')
for key in self.simple_subs:
py_line = py_line.strip()
py_line += ' '
py_line = py_line.replace(key, self.simple_subs[key])
py_line = py_line.strip('\n ,.;')
most_recently_named_keywords = [' it ', ' he ', ' she ', ' him ', ' her ', ' them ', ' they ',
' ze ', ' hir ', ' zie ', ' zir ', ' xe ', ' xem ', ' ve ', ' ver ']
for keyword in most_recently_named_keywords:
py_line = py_line.replace(keyword, ' {} '.format(self.most_recently_named))
py_line = self.create_function(py_line)
py_line = self.create_while(py_line)
py_line = self.create_if(py_line)
line_ident = ' ' * (self.ident - 1) if py_line == 'Else' else line_ident
py_line = 'else:' if py_line == 'Else' else py_line
py_line = re.sub(r'Put (.*) into ({})'.format(self.regex_variables), r'\g<2> = \g<1>', py_line)
py_line = re.sub(r'Build ({}) up'.format(self.regex_variables), r'\g<1> += 1', py_line)
py_line = re.sub(r'Knock ({}) down(\, down)*'.format(self.regex_variables), r'\g<1> -= ' + str(1 + py_line.count(", down")), py_line)
py_line = re.sub(r'Listen to ({})'.format(self.regex_variables), r'\g<1> = input()', py_line)
py_line = re.sub(r'(?:Say|Shout|Whisper|Scream) (.*)', r'print(\g<1>)', py_line)
py_line = py_line.replace(' is ', ' = ', 1)
py_line = re.sub(r'({0}) taking ((?:{0}|\"[^\"]*\"|[0-9]+)(?:, ?(?:{0}|\"[^\"]*\"|[0-9]+))*)'.format(self.regex_variables), r'\g<1>(\g<2>)', py_line)
py_line = self.find_proper_variables(py_line)
py_line = self.find_common_variables(py_line)
line_named = self.find_named(py_line)
self.most_recently_named = line_named if line_named else self.most_recently_named
py_line = py_line.format(line_strings) if line_strings else py_line
return line_ident + py_line + comments + '\n'
|
from test_helper import run_common_tests, passed, failed, get_answer_placeholders
def test_window():
window = get_answer_placeholders()[0]
if "len(" in window:
passed()
else:
failed("Use len() function")
if __name__ == '__main__':
run_common_tests("Use len() function")
test_window() |
from typing import Generator
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import Command
from aiogram.types import Message, CallbackQuery
from config import bot_config
from loader import dp, logger_guru, scheduler
from .states_in_handlers import TodoStates
from middlewares.throttling import rate_limit
from utils.database_manage.sql.sql_commands import DB_USERS
from utils.keyboards.calendar import calendar_bot_en, calendar_bot_ru, CalendarBot
from utils.misc.other_funcs import get_time_now
from utils.todo_service import load_todo_obj, dump_todo_obj, pin_todo_message
@rate_limit(2, key='todo')
@dp.message_handler(Command('todo'))
async def bot_todo(message: Message, state: FSMContext):
lang, skin = await DB_USERS.select_lang_and_skin(telegram_id=message.from_user.id)
await message.answer_sticker(skin.love_you.value, disable_notification=True)
if lang == 'ru':
await message.answer(
'<code>Привет! :)\nдавай запишем что сделать и когда</code>',
reply_markup=calendar_bot_ru.enable())
else:
await message.answer(
'<code>Привет! :)\nдавай запишем что сделать и когда</code>',
reply_markup=calendar_bot_en.enable())
await TodoStates.first()
async with state.proxy() as data:
data['lang'] = lang
await message.delete()
@dp.callback_query_handler(CalendarBot.callback.filter(), state=TodoStates.todo)
async def process_simple_calendar(call: CallbackQuery, callback_data, state: FSMContext) -> None:
async with state.proxy() as data:
lang: str = data.get('lang')
selected, date = (await calendar_bot_ru.process_selection(call, callback_data) if lang == 'ru' else
await calendar_bot_en.process_selection(call, callback_data))
if date and selected:
if date < get_time_now(bot_config.time_zone).date():
if lang == 'ru':
await call.answer('Выбрать можно только на сегодня и позже !', show_alert=True)
await call.message.answer(
'Ты не можешь выбрать эту дату!', reply_markup=calendar_bot_ru.enable()
)
else:
await call.answer('You can only choose today and later!', show_alert=True)
await call.message.answer(
"You can't choose this date!", reply_markup=calendar_bot_ru.enable()
)
else:
async with state.proxy() as data:
data['date'] = str(date)
await call.message.edit_text(
f'Что планируешь на <code>{date}</code> число?' if lang == 'ru' else
f'What are you planning for the <code>{date}</code>?'
)
await TodoStates.next()
@dp.message_handler(state=TodoStates.reception_todo)
async def set_calendar_date(message: Message, state: FSMContext) -> None:
async with state.proxy() as data:
lang, date = data.values()
user_id: int = message.from_user.id
name, skin = f'todo_{user_id}', await DB_USERS.select_skin(telegram_id=user_id)
if len(message.text) <= 1000:
message_task: Generator = (item for item in message.text.split('\n') if item)
todo_obj: dict = await load_todo_obj()
todo_obj.setdefault(name, {}).setdefault(date, []).extend(message_task)
result: str = '\n'.join(f"<code>{i})</code> <b>{val}</b>" for i, val in enumerate(todo_obj[name][date], 1))
await message.delete()
await message.answer_sticker(skin.great.value, disable_notification=True)
send_msg: Message = await message.answer(
f'Вот список на этот день:\n\n{result}' if lang == 'ru' else
f'Here is the list for this day:\n\n{result}'
)
if date == get_time_now(bot_config.time_zone).strftime('%Y-%m-%d'):
await dp.bot.unpin_all_chat_messages(chat_id=user_id)
await send_msg.pin(disable_notification=True)
else:
scheduler.add_job(
func=pin_todo_message,
args=(message.chat.id, user_id),
trigger='date',
id=f'{user_id}_pin_msg_job',
run_date=f'{date} 00:05:05',
misfire_grace_time=5,
replace_existing=True,
timezone="Europe/Moscow"
)
await dump_todo_obj(todo_obj)
await state.finish()
else:
logger_guru.warning(f'{user_id=} Trying to write a message that is too large.')
await message.answer_sticker(skin.you_were_bad.value, disable_notification=True)
await message.answer(
'Слишком большое сообщение ! Попробуй написать короче...' if lang == 'ru' else
'Too big message! Try to write shorter'
)
@dp.message_handler(state=TodoStates.todo)
async def cancel_todo(message: Message, state: FSMContext) -> None:
async with state.proxy() as data:
lang: str = data['lang']
await message.answer(
'Тебе нужно выбрать дату :) попробуй ещё раз!' if lang == 'ru' else
'You need to choose a date :) try again!'
)
await state.finish()
|
###
# Core Libs
###
import tingbot
from tingbot import *
######
# Setup Screens
######
def setup_screen( current_screen ):
# Error Handler Screen
if current_screen == 'error':
screen.brightness = 100
screen.fill(color='orange')
screen.rectangle(align='bottomleft', size=(320,30), color='navy')
# Loading Screen
if current_screen == 'loading':
screen.brightness = 85
screen.fill(color='navy')
screen.image( 'img/loading.gif', scale='fill' )
screen.rectangle(align='topleft', size=(320,30), color='navy')
screen.image( 'img/slack_icon.png', scale=(0.1), xy=(300,16) )
# Waiting on Data from Hook Screen
if current_screen == 'waiting':
screen.brightness = 95
screen.fill(color='navy')
screen.image( 'img/loading.gif', scale='fill' )
screen.rectangle(align='bottomleft', size=(320,30), color='navy')
screen.rectangle(align='topleft', size=(320,30), color='navy')
# Display Chat Transcript
if current_screen == 'transcript':
screen.brightness = 100
screen.fill(color='teal')
screen.image( 'img/bg.png', scale='fill', align='top' )
screen.rectangle(align='left', xy=(16,225), size=(240,18), color='navy')
return
######
# Setup Loading / Boot Screen
######
def showLoading():
setup_screen( 'loading' )
screen.text(
'Booting Up...',
xy=(12, 8),
align='topleft',
font_size=12,
color='white',
)
return
######
# Setup Waiting for Messages Screen
######
def showWaiting():
setup_screen( 'waiting' )
screen.text(
'SlackerBot',
xy=(160, 15),
font_size=12,
color='white',
)
screen.text(
'waiting for messages...',
xy=(160, 225),
font_size=12,
color='white',
)
return
######
# Setup Error Screen
######
def showError( message ):
setup_screen( 'error' )
screen.text(
'Error',
xy=(160, 15),
font_size=12,
color='white',
)
screen.text(
message,
xy=(160, 225),
font_size=12,
color='white',
)
return
|
from dataclasses import dataclass
from typing import Optional
from wired import ServiceContainer
from wired_injector.operators import Get, Attr, Context
from examples.factories import (
View,
FrenchView,
)
try:
from typing import Annotated
except ImportError:
from typing_extensions import Annotated # type: ignore
def test_no_parameters(regular_injector):
@dataclass
class Target:
def __call__(self) -> int:
return 99
target: Target = regular_injector(Target)
result: int = target()
assert result == 99
def test_one_parameter_container(regular_injector):
@dataclass
class Target:
container: ServiceContainer
def __call__(self):
view = self.container.get(View)
return view
target: Target = regular_injector(Target)
result: View = target()
assert result.name == 'View'
def test_one_parameter_field_type(regular_injector):
@dataclass
class Target:
view: View
def __call__(self):
return self.view
target: Target = regular_injector(Target)
result: View = target()
assert result.name == 'View'
def test_one_parameter_annotated(french_injector):
@dataclass
class Target:
french_view: Annotated[
FrenchView,
Get(View),
]
def __call__(self):
return self.french_view
target: Target = french_injector(Target)
result: FrenchView = target()
assert result.name == 'French View'
def test_two_parameters_unannotated(regular_injector):
@dataclass
class Target:
container: ServiceContainer
view: View
def __call__(self):
return self.view
target: Target = regular_injector(Target)
result: View = target()
assert result.name == 'View'
def test_two_parameters_annotated(french_injector):
@dataclass
class Target:
container: ServiceContainer
french_customer: Annotated[
FrenchView,
Get(View),
]
def __call__(self):
return self.french_customer
target: Target = french_injector(Target)
result: FrenchView = target()
assert result.name == 'French View'
def test_optional_unannotated(regular_injector):
@dataclass
class Target:
container: Optional[ServiceContainer] = None
def __call__(self) -> Optional[View]:
if self.container is None:
return None
else:
view = self.container.get(View)
return view
target: Target = regular_injector(Target)
result = target()
if result is not None:
assert result.name == 'View'
def test_optional_annotated(french_injector):
@dataclass
class Target:
french_customer: Optional[
Annotated[
FrenchView,
Get(View),
]
]
def __call__(self):
return self.french_customer
target: Target = french_injector(Target)
result: FrenchView = target()
assert result.name == 'French View'
def test_props_extra(regular_injector):
# Send an extra prop, not one that overrides an injected prop
@dataclass
class Target:
container: ServiceContainer
flag: int
def __call__(self):
return self.flag
target: Target = regular_injector(Target, flag=88)
result: int = target()
assert 88 == result
def test_props_override(regular_injector):
# Send a prop that overrides an injected prop
@dataclass
class Target:
container: ServiceContainer
def __call__(self):
return self.container
target: Target = regular_injector(Target, container=88)
result = target()
assert 88 == result
def test_get_then_attr(regular_injector):
""" Pipeline: Get, Attr """
@dataclass
class Target:
customer_name: Annotated[
str,
Get(View),
Attr('name'),
]
def __call__(self):
return self.customer_name
target: Target = regular_injector(Target)
result: str = target()
assert result == 'View'
def test_get_then_attr_double_injected(regular_injector):
""" An injected attribute is itself injected """
@dataclass
class Target:
customer_name: Annotated[
str,
Get(View),
Attr('caps_name'),
]
target: Target = regular_injector(Target)
assert 'VIEW' == target.customer_name
def test_default_value_unannotated(regular_injector):
class Foo:
pass
f = Foo()
@dataclass
class Target:
view: Foo = f
def __call__(self) -> Foo:
return self.view
target: Target = regular_injector(Target)
result: Foo = target()
assert result == f
def test_default_value_annotated(regular_injector):
class Foo:
pass
@dataclass
class Target:
view_name: Annotated[
str,
Get(Foo),
Attr('name'),
] = 'View Name'
def __call__(self):
return self.view_name
target: Target = regular_injector(Target)
result = target()
assert result == 'View Name'
def test_context_then_attr(regular_injector):
""" Pipeline: Context, Attr """
@dataclass
class Target:
customer_name: Annotated[
str,
Context(),
Attr('name'),
]
def __call__(self):
return self.customer_name
target: Target = regular_injector(Target)
result = target()
assert result == 'Customer'
|
# -*- coding: utf-8 -*-
# @Time : 19-5-20 上午10:34
# @Author : Redtree
# @File : insert_shell.py
# @Desc : 希尔排序
#----希尔排序----
def dosort(L):
#初始化gap值,此处利用序列长度的一般为其赋值
gap = (int)(len(L)/2)
#第一层循环:依次改变gap值对列表进行分组
while (gap >= 1):
#下面:利用直接插入排序的思想对分组数据进行排序
#range(gap,len(L)):从gap开始
for x in range(gap,len(L)):
#range(x-gap,-1,-gap):从x-gap开始与选定元素开始倒序比较,每个比较元素之间间隔gap
for i in range(x-gap,-1,-gap):
#如果该组当中两个元素满足交换条件,则进行交换
if L[i] > L[i+gap]:
temp = L[i+gap]
L[i+gap] = L[i]
L[i] =temp
#while循环条件折半
gap = (int)(gap/2)
return L |
###############################################################################
# Copyright Maciej Patro (maciej.patro@gmail.com)
# MIT License
###############################################################################
from pathlib import Path
from cmake_tidy.utils.app_configuration.configuration import Configuration, ConfigurationError
class FormatConfiguration(Configuration):
def __init__(self, arguments: dict):
super().__init__(arguments)
self.__input_data = self.__initialize_input(arguments)
@property
def input(self) -> str:
return self.__input_data
@property
def inplace(self) -> str:
return self._config.get(self._property_name()) is True
@property
def file(self) -> Path:
return Path(self._config['input'])
@property
def verbose(self) -> bool:
return self._config.get(self._property_name()) is True
@property
def command(self) -> str:
return 'format'
def __initialize_input(self, arguments) -> str:
return self.__load_input_data(arguments)
@staticmethod
def __load_input_data(arguments) -> str:
try:
return Path(arguments['input']).read_text()
except Exception:
raise ConfigurationError('Error - incorrect \"input\" - please specify existing file to be formatted')
|
from subprocess import run
import os
import pytest
def pytest_addoption(parser):
"""Customize testinfra with config options via cli args"""
# By default run tests in clustered mode, but allow dev mode with --single-node"""
parser.addoption('--single-node', action='store_true',
help='non-clustered version')
# Let us specify which docker-compose-(image_flavor).yml file to use
parser.addoption('--image-flavor', action='store',
help='Docker image flavor; the suffix used in docker-compose-<flavor>.yml')
# Bind-mount a user specified dir for the data dir
parser.addoption('--mount-datavolume1', action='store',
help='The host dir to be bind-mounted on /usr/share/elasticsearch/data for the first node')
# Bind-mount a user specified dir for the data dir
parser.addoption('--mount-datavolume2', action='store',
help='The host dir to be bind-mounted on /usr/share/elasticsearch/data for the second node')
# Let us override the Dockerfile's USER; akin to specifying `--user` in the docker run.
parser.addoption('--process-uid', action='store',
help='Used to override the Dockerfile\'s USER')
def pytest_configure(config):
# Named volumes used by default for persistence of each container
(datavolume1, datavolume2) = ("esdata1", "esdata2")
# Our default is not to override uid; empty strings for --user are ignored by Docker.
process_uid = ''
image_flavor = config.getoption('--image-flavor')
compose_flags = ('-f docker-compose-{0}.yml -f tests/docker-compose-{0}.yml up -d'.format(image_flavor)).split(' ')
if config.getoption('--single-node'):
compose_flags.append('elasticsearch1')
# Use a host dir for the data volume of Elasticsearch, if specified
if config.getoption('--mount-datavolume1'):
datavolume1 = config.getoption('--mount-datavolume1')
if config.getoption('--mount-datavolume2'):
datavolume2 = config.getoption('--mount-datavolume2')
if config.getoption('--process-uid'):
process_uid = config.getoption('--process-uid')
env_vars = os.environ
env_vars['DATA_VOLUME1'] = datavolume1
env_vars['DATA_VOLUME2'] = datavolume2
env_vars['PROCESS_UID'] = process_uid
run(['docker-compose'] + compose_flags, env=env_vars)
def pytest_unconfigure(config):
run(['docker-compose', '-f', 'docker-compose-{}.yml'.format(config.getoption('--image-flavor')), 'down', '-v'])
run(['docker-compose', '-f', 'docker-compose-{}.yml'.format(config.getoption('--image-flavor')), 'rm', '-f', '-v'])
|
# This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
import muDIC as dic
import logging
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
# Path to folder containing images
path = r'./example_data/' # Use this formatting on Linux and Mac OS
#path = r'c:\path\to\example_data\\' # Use this formatting on Windows
# Generate image instance containing all images found in the folder
images = dic.IO.image_stack_from_folder(path, file_type='.tif')
#images.set_filter(dic.filtering.lowpass_gaussian, sigma=1.)
# Generate mesh
mesher = dic.Mesher(deg_e=3, deg_n=3,type="q4")
# If you want to see use a GUI, set GUI=True below
mesh = mesher.mesh(images,Xc1=316,Xc2=523,Yc1=209,Yc2=1055,n_ely=36,n_elx=9, GUI=False)
# Instantiate settings object and set some settings manually
settings = dic.DICInput(mesh, images)
settings.max_nr_im = 500
settings.ref_update = [15]
settings.maxit = 20
settings.tol = 1.e-6
settings.interpolation_order = 4
# If you want to access the residual fields after the analysis, this should be set to True
settings.store_internals = True
# This setting defines the behaviour when convergence is not obtained
settings.noconvergence = "ignore"
# Instantiate job object
job = dic.DICAnalysis(settings)
# Running DIC analysis
dic_results = job.run()
# Calculate field values
fields = dic.post.viz.Fields(dic_results,upscale=10)
# Show a field
viz = dic.Visualizer(fields,images=images)
# Uncomment the line below to see the results
# viz.show(field="displacement", component = (1,1), frame=-1)
# Uncomment the line below to export the results to CSV files
#dic.IO.readWriteUtils.exportCSV(fields,'test',-1)
|
# Generated by Django 2.2.4 on 2019-09-10 13:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TournamentModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, unique=True)),
('player1', models.CharField(max_length=30)),
('player2', models.CharField(max_length=30)),
('player3', models.CharField(max_length=30)),
('player4', models.CharField(max_length=30)),
('player5', models.CharField(max_length=30)),
('player6', models.CharField(max_length=30)),
('player7', models.CharField(max_length=30)),
('player8', models.CharField(max_length=30)),
('stared_at', models.DateField(default=django.utils.timezone.now)),
('slug', models.SlugField(unique=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'TournamentModel',
},
),
migrations.CreateModel(
name='MatchModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('player1', models.CharField(max_length=30)),
('player2', models.CharField(max_length=30)),
('score1', models.ImageField(default=0, upload_to='')),
('score2', models.ImageField(default=0, upload_to='')),
('tournament_matches', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.TournamentModel')),
],
),
]
|
import cv2
# 获得视频的格式
path = "Thy1-GCaMP6s-M4-K-airpuff-0706"
videoCapture1 = cv2.VideoCapture(
"\\\\192.168.3.146\\public\\临时文件\\xpy\\" + path + '\\Thy1-GCaMP6s-M4-K-airpuff-0706.avi')
videoCapture2 = cv2.VideoCapture(
"\\\\192.168.3.146\\public\\临时文件\\xpy\\" + path + '\\Thy1-GCaMP6s-M4-K-airpuff-0706-3.mp4')
# 获得码率及尺寸
fps = videoCapture1.get(cv2.CAP_PROP_FPS)
width = int(videoCapture1.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(videoCapture1.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (width, height)
print(fps, size)
# 编码格式
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
f = cv2.VideoWriter_fourcc('M', 'P', '4', '2') # ??
# 指定写视频的格式, I420-avi, MJPG-mp4
videoWriter = cv2.VideoWriter("\\\\192.168.3.146\\public\\临时文件\\xpy\\" + path + '\\' + path + '_all.avi', f, fps, size)
# 读帧
success, frame = videoCapture1.read()
while success:
videoWriter.write(frame) # 写视频帧
success, frame = videoCapture1.read() # 获取下一帧
# 资源释放
videoCapture1.release()
success2, frame2 = videoCapture2.read()
while success2:
videoWriter.write(frame2) # 写视频帧
success2, frame2 = videoCapture2.read() # 获取下一帧
# 资源释放
videoCapture2.release()
videoWriter.release()
|
x = (5, 6)
def f():
return (7, 8)
f()
|
# my_project/my_app/serializers.py
from rest_framework import serializers
from my_app.models import MyModel
class MySerializer(serializers.ModelSerializer):
class Meta:
model = MyModel
fields = '__all__'
|
import unittest
from deep_coffee.image_proc import OpenCVStream
from deep_coffee.image_proc import CropBeans_CV
import numpy as np
BEAN_IMAGE_PATH = "/app/test/image_proc/images/beans.jpg"
class TestCropBeans_CV(unittest.TestCase):
def test_count_beans(self):
stream = OpenCVStream(BEAN_IMAGE_PATH)
frame = stream.next_frame()
cropper = CropBeans_CV()
beans_list = cropper.crop(frame)
self.assertGreaterEqual(len(beans_list), 10, "There should be at least 10 beans on the image")
if __name__ == '__main__':
unittest.main()
|
# coding: utf-8
"""
Investigate a failure from a benchmark
======================================
The method ``validate`` may raise an exception and
in that case, the class :class:`BenchPerfTest
<pymlbenchmark.benchmark.benchmark_perf.BenchPerfTest>`.
The following script shows how to investigate.
.. contents::
:local:
"""
from onnxruntime import InferenceSession
from pickle import load
from time import time
import numpy
from numpy.testing import assert_almost_equal
import matplotlib.pyplot as plt
import pandas
from scipy.special import expit
import sklearn
from sklearn.utils._testing import ignore_warnings
from sklearn.linear_model import LogisticRegression
from pymlbenchmark.benchmark import BenchPerf
from pymlbenchmark.external import OnnxRuntimeBenchPerfTestBinaryClassification
##############################
# Defines the benchmark and runs it
# +++++++++++++++++++++++++++++++++
class OnnxRuntimeBenchPerfTestBinaryClassification3(
OnnxRuntimeBenchPerfTestBinaryClassification):
"""
Overwrites the class to add a pure python implementation
of the logistic regression.
"""
def fcts(self, dim=None, **kwargs):
def predict_py_predict(X, model=self.skl):
coef = model.coef_
intercept = model.intercept_
pred = numpy.dot(X, coef.T) + intercept
return (pred >= 0).astype(numpy.int32)
def predict_py_predict_proba(X, model=self.skl):
coef = model.coef_
intercept = model.intercept_
pred = numpy.dot(X, coef.T) + intercept
decision_2d = numpy.c_[-pred, pred]
return expit(decision_2d)
res = OnnxRuntimeBenchPerfTestBinaryClassification.fcts(
self, dim=dim, **kwargs)
res.extend([
{'method': 'predict', 'lib': 'py', 'fct': predict_py_predict},
{'method': 'predict_proba', 'lib': 'py',
'fct': predict_py_predict_proba},
])
return res
def validate(self, results, **kwargs):
"""
Raises an exception and locally dump everything we need
to investigate.
"""
# Checks that methods *predict* and *predict_proba* returns
# the same results for both scikit-learn and onnxruntime.
OnnxRuntimeBenchPerfTestBinaryClassification.validate(
self, results, **kwargs)
# Let's dump anything we need for later.
# kwargs contains the input data.
self.dump_error("Just for fun", skl=self.skl,
ort_onnx=self.ort_onnx,
results=results, **kwargs)
raise AssertionError("Just for fun")
@ignore_warnings(category=FutureWarning)
def run_bench(repeat=10, verbose=False):
pbefore = dict(dim=[1, 5], fit_intercept=[True])
pafter = dict(N=[1, 10, 100])
test = lambda dim=None, **opts: (
OnnxRuntimeBenchPerfTestBinaryClassification3(
LogisticRegression, dim=dim, **opts))
bp = BenchPerf(pbefore, pafter, test)
with sklearn.config_context(assume_finite=True):
start = time()
results = list(bp.enumerate_run_benchs(repeat=repeat, verbose=verbose))
end = time()
results_df = pandas.DataFrame(results)
print("Total time = %0.3f sec\n" % (end - start))
return results_df
########################
# Runs the benchmark.
try:
run_bench(verbose=True)
except AssertionError as e:
print(e)
#############################
# Investigation
# +++++++++++++
#
# Let's retrieve what was dumped.
filename = "BENCH-ERROR-OnnxRuntimeBenchPerfTestBinaryClassification3-0.pkl"
try:
with open(filename, "rb") as f:
data = load(f)
good = True
except Exception as e:
print(e)
good = False
if good:
print(list(sorted(data)))
print("msg:", data["msg"])
print(list(sorted(data["data"])))
print(data["data"]['skl'])
##################################
# The input data is the following:
if good:
print(data['data']['data'])
########################################
# Let's compare predictions.
if good:
model_skl = data["data"]['skl']
model_onnx = InferenceSession(data["data"]['ort_onnx'].SerializeToString())
input_name = model_onnx.get_inputs()[0].name
def ort_predict_proba(sess, input, input_name):
res = model_onnx.run(None, {input_name: input.astype(numpy.float32)})[1]
return pandas.DataFrame(res).values
if good:
pred_skl = [model_skl.predict_proba(input[0])
for input in data['data']['data']]
pred_onnx = [ort_predict_proba(model_onnx, input[0], input_name)
for input in data['data']['data']]
print(pred_skl)
print(pred_onnx)
##############################
# They look the same. Let's check...
if good:
for a, b in zip(pred_skl, pred_onnx):
assert_almost_equal(a, b)
###################################
# Computing differences.
if good:
def diff(a, b):
return numpy.max(numpy.abs(a.ravel() - b.ravel()))
diffs = list(sorted(diff(a, b) for a, b in zip(pred_skl, pred_onnx)))
plt.plot(diffs)
plt.title(
"Differences between prediction with\nscikit-learn and onnxruntime"
"\nfor Logistic Regression")
plt.show()
|
import ast
from datetime import datetime
from itertools import chain
import numpy as np
import pandas as pd
from pyzoopla.base import BaseProperty
from pyzoopla.utils import currency_to_num, myround, text_inbetween
class PropertyDetails(BaseProperty):
"""
Details of a property scraped from https://ww2.zoopla.co.uk/property/{property_id}
"""
def __init__(self, property_id):
super(PropertyDetails, self).__init__(property_id, slug='property')
def __str__(self):
# Address of the property
return self.soup.find(name='title').text.split(' - ')[0]
def details(self):
string = text_inbetween(text=self.html.text.replace('\n', '').replace(' ', ''),
left='ZPG.trackData.taxonomy=', right=';</script><script>d')
string = string.replace('{', '{"').replace(':', '":').replace(',', ',"').replace('null', '"null"')
return ast.literal_eval(string)
def location(self):
string = text_inbetween(text=self.html.text.replace('\n', '').replace(' ', ''),
left='"coordinates":', right=',"pin":"https://r.zoocdn.com/assets/map-pin.png')
string = string.replace('false', 'False').replace('true', 'True')
return ast.literal_eval(string)
def for_sale(self):
# current listings available at https://www.zoopla.co.uk/for-sale/details/{listing_id}
sale = self.soup.find(name='span', attrs={'class': 'pdp-history__details'})
if sale and sale.text.strip() == 'This property is currently for sale':
return sale.a['href'].split('/')[-1]
else:
return False
def property_value(self):
values = [currency_to_num(i.text) for i in
self.soup.find_all(name='p', attrs={'class': 'pdp-estimate__price'})]
ranges = [currency_to_num(i) for i in
list(chain(*[i.text[7:].split(' - ')
for i in self.soup.find_all(name='p', attrs={'class': 'pdp-estimate__range'})]))]
try:
conf = float(self.soup.find(name='span',
attrs={'class': 'pdp-confidence-rating__copy'}).text.strip().split('%')[0])
except AttributeError:
conf = np.nan
if len(values) == 1:
values.append(np.nan)
ranges.extend([np.nan] * 2)
elif not values:
values.extend([np.nan] * 2)
ranges.extend([np.nan] * 4)
return {'buy': {'value': values[0], 'lower_bound': ranges[0], 'upper_bound': ranges[1]},
'rent': {'value': values[1], 'lower_bound': ranges[2], 'upper_bound': ranges[3]},
'confidence': conf}
def value_change(self):
period = [i.text for i in self.soup.find_all(name='span', attrs={'class': 'pdp-value-change__label'})]
changes = [currency_to_num(i.text) for i in
self.soup.find_all(name='span', attrs={'class': 'pdp-value-change__value'})]
diffs = [float(i.text.replace('%', '')) for i in
self.soup.find_all(name='span', attrs={'class': 'pdp-value-change__difference'})]
df = pd.DataFrame(list(zip(period, changes, diffs)), columns=['period', 'value_change', 'perc_change'])
df['value'] = self.property_value()['buy']['value'] / (1 + df.perc_change / 100)
df['value'] = df.value.apply(myround)
df = df[['period', 'value', 'value_change', 'perc_change']]
return df
def sales_history(self, dataframe=False):
# historical listings available at https://www.zoopla.co.uk/property-history/{listing_id}
history = {
'date': [i.text for i in self.soup.find_all(name='span', attrs={'class': 'pdp-history__date'})],
'status': [i.text for i in self.soup.find_all(name='span', attrs={'class': 'pdp-history__status'})],
'price': [currency_to_num(i.text.replace('View listing', '')) for i in
self.soup.find_all(name='span', attrs={'class': 'pdp-history__price'})],
'listing_id': []
}
for listing in self.soup.find_all(name='span', attrs={'class': 'pdp-history__price'}):
try:
history['listing_id'].append(listing.a['href'].split('/')[-1])
except TypeError:
history['listing_id'].append(np.nan)
return pd.DataFrame(history) if dataframe else history
def all_data(self, dataframe=True):
data = self.details()
data['address'] = str(self).split('Property details for ')[-1].split(' - Zoopla')[0]
data['id'] = self.listing_id
data['geolocation'] = self.location()
data['for_sale_id'] = self.for_sale()
data['property_value'] = self.property_value()
data['value_change'] = self.value_change().to_dict()
data['sales_history'] = self.sales_history()
data['date_generated'] = datetime.utcnow()
return pd.DataFrame.from_dict(data, orient='index').T if dataframe else data
|
import unittest
from foo import bar
class MyTestCase(unittest.TestCase):
def setUp(self):
pass
def testMethod(self):
self.assertTrue(bar(), True)
if __name__ == '__main__':
unittest.main()
|
#!/bin/python3
# encoding: utf-8
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
X = tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.float32)
y = tf.constant([[10], [20]], dtype=tf.float32)
class Linear(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense = tf.keras.layers.Dense(units=1, kernel_initializer=tf.zeros_initializer(),
bias_initializer=tf.zeros_initializer())
def call(self, inputs):
return self.dense(inputs)
model = Linear()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
for _ in range(10000):
with tf.GradientTape() as tape:
y_pred = model(X)
loss = tf.reduce_mean(tf.square(y_pred - y))
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
print([e.numpy() for e in model.variables])
|
#HANDS-ON PIL (Python Image Library, aka pillow)
from PIL import Image, ImageDraw, ImageFont
#create a new image
size = 500,300 #w,h
bg_color = 242,247,151
canvas = Image.new('RGB', size, bg_color)
#get the drawing pen for the canvas
pen = ImageDraw.Draw(canvas)
#dimension
dim = (50,50), (450,250) #x1y1, x2y2
dim2 = (450,50), (50, 250)
#color
fg_color = 126,0,31
fill_color= 255,200,200
#draw 2 lines
#pen.line(xy=dim, fill=fg_color, width=10)
#pen.line(xy=dim2, fill=fg_color, width=10)
#draw an arc
#pen.arc(xy=dim, start= 30, end=300, fill=fg_color, width=5)
#draw a rectangle
#pen.rectangle(xy=dim, fill=fill_color, outline=fg_color, width=5)
#draw an ellipse
#pen.ellipse(xy=dim, fill=None, outline= fg_color, width=5)
#draw a chord
#pen.chord(xy=dim, start= 30, end= 300, fill=fill_color, outline=fg_color, width=5)
#draw a pieslice
#pen.pieslice(xy=dim,start=30, end=300, fill=fill_color, outline=fg_color, width=5)
#lets write at the center of the canvas
fnt = ImageFont.truetype('c:/windows/fonts/arial.ttf', size=40)
#data = 'Computer'
#reqd_size = pen.textsize(text=data, font=fnt)
#pen.text(xy=((size[0]-reqd_size[0])/2, (size[1]- reqd_size[1])/2), text=data, fill=fg_color, font=fnt)
data = '''Python
Image
Library'''
reqd_size = pen.multiline_textsize(text=data, font=fnt)
pen.text(xy=((size[0]-reqd_size[0])/2, (size[1]- reqd_size[1])/2), text=data, fill=fg_color, font=fnt)
#save the canvas image
canvas.save('pil.jpg') |
key = 'RWSANSOL5'
mode = 'rpns-k'
rpns_rates = [5, 10, 15, 20, 25, 32, 40, 50, 60, 70, 80, 90, 100]
methods = ['SANSOL', 'SANSOLF', 'SANSOLcorr', 'SANSOLFcorr', 'RWSANSOL', 'RWSANSOLF', 'RWSANSOLcorr', 'RWSANSOLFcorr']
with open('../../results/rpns.csv', 'r') as f:
lines = f.readlines()
def get_result_array(method_str):
results_array = [[None for i in rpns_rates] for j in range(5)]
for line in lines:
if line:
result = line.split(sep='\t')
if result[0] == method_str:
if results_array[int(result[2])][rpns_rates.index(int(result[1]))] is not None:
continue
else:
results_array[int(result[2])][rpns_rates.index(int(result[1]))] = float(result[3])
return results_array
if mode == 'rpns':
array = get_result_array(key)
for row in array:
print('\t'.join([str(x) for x in row]))
# print(results_array)
elif mode == 'hops-ns':
for k in range(2, 9):
for ns in methods:
array = get_result_array(ns + str(k))
count = 0
total = 0
for row in array:
for cell in row:
if cell is not None:
total += cell
count += 1
average = total / count if count != 0 else 0
print(average, end='\t')
print('')
elif mode == 'rpns-ave':
full_array = [[0 for i in rpns_rates] for j in range(5)]
count_array = [[0 for i in rpns_rates] for j in range(5)]
for k in range(2, 9):
for ns in methods:
array = get_result_array(ns + str(k))
for i, row in enumerate(array):
for j, cell in enumerate(row):
if cell is not None:
full_array[i][j] += cell
count_array[i][j] += 1
for i, row in enumerate(full_array):
for j, cell in enumerate(row):
if count_array[i][j] > 0:
print(cell / count_array[i][j], end='\t')
print('')
if mode == 'rpns-k':
for k in range(2, 9):
rpns_total = [0 for x in rpns_rates]
rpns_counts = [0 for x in rpns_rates]
for ns in methods:
array = get_result_array(ns + str(k))
for i, row in enumerate(array):
for j, cell in enumerate(row):
if cell is not None:
rpns_total[j] += cell
rpns_counts[j] += 1
k_average = [str(rpns_total[i] / rpns_counts[i]) if rpns_counts[i] != 0 else '0' for i in range(len(rpns_total))]
print('\t'.join(k_average))
|
#!/usr/bin/python3.6
import os, pickle, random, subprocess, sys
from typing import Any
import numpy as np, pandas as pd
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
from skimage.io import imread, imshow
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model, save_model
from keras.layers import Input, Dropout, BatchNormalization, Activation, Add
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras import backend as K
from keras import optimizers
import tensorflow as tf
from keras.preprocessing.image import array_to_img, img_to_array, load_img
NpArray = Any
basic_name = "../output/unet_17_depth_coord"
submission_file = basic_name + '.csv'
NUM_FOLDS = 5
PREDICT_ONLY = True
img_size_ori = 101
img_size_target = 101
def enable_logging() -> None:
""" Sets up logging to a file. """
module_name = os.path.splitext(os.path.basename(__file__))[0]
log_file = '../output/' + module_name + ".log"
tee = subprocess.Popen(["tee", "-a", log_file], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
# os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
def make_output_path(filename: str) -> str:
""" Returns a correct file path to save to. """
module_name = os.path.splitext(os.path.basename(__file__))[0]
name_ext = os.path.splitext(filename)
return '../output/' + name_ext[0] + '_' + module_name + name_ext[1]
def upsample(img):# not used
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)
def downsample(img):# not used
if img_size_ori == img_size_target:
return img
return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)
def cov_to_class(val):
for i in range(0, 11):
if val * 10 <= i :
return i
def BatchActivate(x):
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):
x = Conv2D(filters, size, strides=strides, padding=padding)(x)
if activation==True: x = BatchActivate(x)
return x
def residual_block(blockInput, num_filters=16, batch_activate=False):
x = BatchActivate(blockInput)
x = convolution_block(x, num_filters, (3,3))
x = convolution_block(x, num_filters, (3,3), activation=False)
x = Add()([x, blockInput])
if batch_activate: x = BatchActivate(x)
return x
# Build Model
def build_model(input_layer, start_neurons, DropoutRatio=0.5):
# 101 -> 50
conv1 = Conv2D(start_neurons*1, (3,3), activation=None, padding='same')(input_layer)
conv1 = residual_block(conv1, start_neurons*1)
conv1 = residual_block(conv1, start_neurons*1, True)
pool1 = MaxPooling2D((2,2))(conv1)
pool1 = Dropout(DropoutRatio/2)(pool1)
# 50 -> 25
conv2 = Conv2D(start_neurons*2, (3,3), activation=None, padding='same')(pool1)
conv2 = residual_block(conv2, start_neurons*2)
conv2 = residual_block(conv2, start_neurons*2, True)
pool2 = MaxPooling2D((2,2))(conv2)
pool2 = Dropout(DropoutRatio)(pool2)
# 25 -> 12
conv3 = Conv2D(start_neurons*4, (3,3), activation=None, padding='same')(pool2)
conv3 = residual_block(conv3, start_neurons*4)
conv3 = residual_block(conv3, start_neurons*4, True)
pool3 = MaxPooling2D((2,2))(conv3)
pool3 = Dropout(DropoutRatio)(pool3)
# 12 -> 6
conv4 = Conv2D(start_neurons*8, (3,3), activation=None, padding='same')(pool3)
conv4 = residual_block(conv4, start_neurons*8)
conv4 = residual_block(conv4, start_neurons*8, True)
pool4 = MaxPooling2D((2,2))(conv4)
pool4 = Dropout(DropoutRatio)(pool4)
# Middle
convm = Conv2D(start_neurons*16, (3,3), activation=None, padding='same')(pool4)
convm = residual_block(convm, start_neurons*16)
convm = residual_block(convm, start_neurons*16, True)
# 6 -> 12
deconv4 = Conv2DTranspose(start_neurons*8, (3,3), strides=(2,2), padding='same')(convm)
uconv4 = concatenate([deconv4, conv4])
uconv4 = Dropout(DropoutRatio)(uconv4)
uconv4 = Conv2D(start_neurons*8, (3,3), activation=None, padding='same')(uconv4)
uconv4 = residual_block(uconv4, start_neurons*8)
uconv4 = residual_block(uconv4, start_neurons*8, True)
# 12 -> 25
deconv3 = Conv2DTranspose(start_neurons*4, (3,3), strides=(2,2), padding='valid')(uconv4)
uconv3 = concatenate([deconv3, conv3])
uconv3 = Dropout(DropoutRatio)(uconv3)
uconv3 = Conv2D(start_neurons*4, (3,3), activation=None, padding='same')(uconv3)
uconv3 = residual_block(uconv3, start_neurons*4)
uconv3 = residual_block(uconv3, start_neurons*4, True)
# 25 -> 50
deconv2 = Conv2DTranspose(start_neurons*2, (3,3), strides=(2,2), padding='same')(uconv3)
uconv2 = concatenate([deconv2, conv2])
uconv2 = Dropout(DropoutRatio)(uconv2)
uconv2 = Conv2D(start_neurons*2, (3,3), activation=None, padding='same')(uconv2)
uconv2 = residual_block(uconv2, start_neurons*2)
uconv2 = residual_block(uconv2, start_neurons*2, True)
# 50 -> 101
deconv1 = Conv2DTranspose(start_neurons*1, (3,3), strides=(2,2), padding='valid')(uconv2)
uconv1 = concatenate([deconv1, conv1])
uconv1 = Dropout(DropoutRatio)(uconv1)
uconv1 = Conv2D(start_neurons*1, (3,3), activation=None, padding='same')(uconv1)
uconv1 = residual_block(uconv1, start_neurons*1)
uconv1 = residual_block(uconv1, start_neurons*1, True)
output_layer_noActi = Conv2D(1, (1,1), padding='same', activation=None)(uconv1)
output_layer = Activation('sigmoid')(output_layer_noActi)
return output_layer
def get_iou_vector(A, B):
batch_size = A.shape[0]
metric = []
for batch in range(batch_size):
t, p = A[batch]>0, B[batch]>0
intersection = np.logical_and(t, p)
union = np.logical_or(t, p)
iou = (np.sum(intersection > 0) + 1e-10 )/ (np.sum(union > 0) + 1e-10)
thresholds = np.arange(0.5, 1, 0.05)
s = []
for thresh in thresholds:
s.append(iou > thresh)
metric.append(np.mean(s))
return np.mean(metric)
def my_iou_metric(label, pred):
return tf.py_func(get_iou_vector, [label, pred>0.5], tf.float64)
def my_iou_metric_2(label, pred):
return tf.py_func(get_iou_vector, [label, pred >0], tf.float64)
# code download from: https://github.com/bermanmaxim/LovaszSoftmax
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
gts = tf.reduce_sum(gt_sorted)
intersection = gts - tf.cumsum(gt_sorted)
union = gts + tf.cumsum(1. - gt_sorted)
jaccard = 1. - intersection / union
jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)
return jaccard
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
def treat_image(log_lab):
log, lab = log_lab
log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)
log, lab = flatten_binary_scores(log, lab, ignore)
return lovasz_hinge_flat(log, lab)
losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)
loss = tf.reduce_mean(losses)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
def compute_loss():
labelsf = tf.cast(labels, logits.dtype)
signs = 2. * labelsf - 1.
errors = 1. - logits * tf.stop_gradient(signs)
errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name="descending_sort")
gt_sorted = tf.gather(labelsf, perm)
grad = lovasz_grad(gt_sorted)
loss = tf.tensordot(tf.nn.elu(errors_sorted), tf.stop_gradient(grad), 1, name="loss_non_void")
return loss
# deal with the void prediction case (only void pixels)
loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),
lambda: tf.reduce_sum(logits) * 0.,
compute_loss,
strict=True,
name="loss"
)
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = tf.reshape(scores, (-1,))
labels = tf.reshape(labels, (-1,))
if ignore is None:
return scores, labels
valid = tf.not_equal(labels, ignore)
vscores = tf.boolean_mask(scores, valid, name='valid_scores')
vlabels = tf.boolean_mask(labels, valid, name='valid_labels')
return vscores, vlabels
def lovasz_loss(y_true, y_pred):
y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
logits = y_pred #Jiaxin
loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)
return loss
def train_and_predict(x_train, y_train, x_valid, y_valid, fold):
# data augmentation
x_train = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
y_train = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)
print("x_train after hflip", x_train.shape)
print("y_train after hflip", y_valid.shape)
# model
input_layer = Input((img_size_target, img_size_target, 3))
output_layer = build_model(input_layer, 16,0.5)
model1 = Model(input_layer, output_layer)
c = optimizers.adam(lr = 0.005)
model1.compile(loss="binary_crossentropy", optimizer=c, metrics=[my_iou_metric])
save_model_name = f"{basic_name}_stage1_fold{fold}.hdf5"
early_stopping = EarlyStopping(monitor='my_iou_metric', mode = 'max',patience=15, verbose=1)
model_checkpoint = ModelCheckpoint(save_model_name, monitor='my_iou_metric', mode='max',
save_best_only=True, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='my_iou_metric', mode='max', factor=0.5, patience=5,
min_lr=0.0001, verbose=1)
epochs = 80
batch_size = 128
if not PREDICT_ONLY:
history = model1.fit(x_train, y_train,
validation_data = [x_valid, y_valid],
epochs = epochs,
batch_size = batch_size,
callbacks = [early_stopping, model_checkpoint, reduce_lr],
verbose = 2)
model1 = load_model(save_model_name, custom_objects={'my_iou_metric':my_iou_metric})
# remove activation layer and use lovasz loss
input_x = model1.layers[0].input
output_layer = model1.layers[-1].input
model = Model(input_x, output_layer)
c = optimizers.adam(lr=0.01)
model.compile(loss=lovasz_loss, optimizer=c, metrics=[my_iou_metric_2])
save_model_name = f"{basic_name}_stage2_fold{fold}.hdf5"
early_stopping = EarlyStopping(monitor='val_my_iou_metric_2', mode = 'max',patience=30, verbose=1)
model_checkpoint = ModelCheckpoint(save_model_name,monitor='val_my_iou_metric_2',
mode = 'max', save_best_only=True, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric_2', mode = 'max',factor=0.5, patience=5,
min_lr=0.00005, verbose=1)
epochs = 120
batch_size = 128
if not PREDICT_ONLY:
history = model.fit(x_train, y_train,
validation_data=[x_valid, y_valid],
epochs=epochs,
batch_size=batch_size,
callbacks=[ model_checkpoint,reduce_lr,early_stopping],
verbose=2)
model = load_model(save_model_name,custom_objects={'my_iou_metric_2': my_iou_metric_2,
'lovasz_loss': lovasz_loss})
def predict_result(model,x_test,img_size_target): # predict both orginal and reflect x
x_test_reflect = np.array([np.fliplr(x) for x in x_test])
preds_test = model.predict(x_test).reshape(-1, img_size_target, img_size_target)
preds_test2_refect = model.predict(x_test_reflect).reshape(-1, img_size_target, img_size_target)
preds_test += np.array([ np.fliplr(x) for x in preds_test2_refect] )
return preds_test/2
preds_valid = predict_result(model,x_valid,img_size_target)
preds_test = predict_result(model,x_test,img_size_target)
return preds_valid, preds_test
#Score the model and do a threshold optimization by the best IoU.
# src: https://www.kaggle.com/aglotero/another-iou-metric
def iou_metric(y_true_in, y_pred_in, print_table=False):
labels = y_true_in
y_pred = y_pred_in
true_objects = 2
pred_objects = 2
# if all zeros, original code generate wrong bins [-0.5 0 0.5],
temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))
# temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))
#print(temp1)
intersection = temp1[0]
#print("temp2 = ",temp1[1])
#print(intersection.shape)
# print(intersection)
# Compute areas (needed for finding the union between all objects)
#print(np.histogram(labels, bins = true_objects))
area_true = np.histogram(labels,bins=[0,0.5,1])[0]
#print("area_true = ",area_true)
area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
# Compute union
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
intersection[intersection == 0] = 1e-9
union = union[1:,1:]
union[union == 0] = 1e-9
# Compute the intersection over union
iou = intersection / union
# Precision helper function
def precision_at(threshold, iou):
matches = iou > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Missed objects
false_negatives = np.sum(matches, axis=1) == 0 # Extra objects
tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
return tp, fp, fn
# Loop over IoU thresholds
prec = []
if print_table:
print("Thresh\tTP\tFP\tFN\tPrec.")
for t in np.arange(0.5, 1.0, 0.05):
tp, fp, fn = precision_at(t, iou)
if (tp + fp + fn) > 0:
p = tp / (tp + fp + fn)
else:
p = 0
if print_table:
print("{:1.3f}\t{}\t{}\t{}\t{:1.3f}".format(t, tp, fp, fn, p))
prec.append(p)
if print_table:
print("AP\t-\t-\t-\t{:1.3f}".format(np.mean(prec)))
return np.mean(prec)
def iou_metric_batch(y_true_in, y_pred_in):
batch_size = y_true_in.shape[0]
metric = []
for batch in range(batch_size):
value = iou_metric(y_true_in[batch], y_pred_in[batch])
metric.append(value)
return np.mean(metric)
def rle_encode(im):
'''
im: numpy array, 1-mask, 0-background
Returns run length as string
'''
pixels = im.flatten(order='F')
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def add_depth_coord(images: NpArray) -> NpArray:
""" Takes dataset (N, W, H, 1) returns (N, W, H, 3). """
assert(len(images.shape) == 4)
channel1 = np.zeros_like(images)
h = images.shape[1]
for row, const in enumerate(np.linspace(0, 1, h)):
channel1[:, row, ...] = const
channel2 = images * channel1
images = np.concatenate([images, channel1, channel2], axis=-1)
return images
if __name__ == "__main__":
enable_logging()
print(f"training with {NUM_FOLDS} folds")
train_df = pd.read_csv("../data/train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("../data/depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
train_df["images"] = [np.array(load_img("../data/train/images/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm(train_df.index)]
train_df["masks"] = [np.array(load_img("../data/train/masks/{}.png".format(idx), grayscale=True)) / 255 for idx in tqdm(train_df.index)]
train_df["coverage"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)
train_df["coverage_class"] = train_df.coverage.map(cov_to_class)
images = np.array(train_df.images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)
masks = np.array(train_df.masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)
preds_train = np.zeros((train_df.shape[0], img_size_target, img_size_target))
preds_test = np.zeros((NUM_FOLDS, test_df.shape[0], img_size_target, img_size_target))
folds = StratifiedKFold(NUM_FOLDS, shuffle=True, random_state=666)
x_test = np.array([(np.array(load_img("../data/test/images/{}.png".format(idx), grayscale = True))) / 255 for idx in tqdm(test_df.index)]).reshape(-1, img_size_target, img_size_target, 1)
images = add_depth_coord(images)
x_test = add_depth_coord(x_test)
print("train", images.shape)
print("coverage_class", train_df.coverage_class.shape)
print("preds_train", preds_train.shape)
print("preds_test", preds_test.shape)
for fold, indices in enumerate(folds.split(images, train_df.coverage_class)):
print("==================== fold %d" % fold)
train_idx, valid_idx = indices
x_train, y_train = images[train_idx], masks[train_idx]
x_valid, y_valid = images[valid_idx], masks[valid_idx]
p_valid, p_test = train_and_predict(x_train, y_train, x_valid, y_valid, fold)
preds_train[valid_idx], preds_test[fold] = p_valid, p_test
with open(make_output_path("predicts/fold%d_test.pkl" % fold), "wb") as f:
pickle.dump(p_test, f)
with open(make_output_path("predicts/fold%d_train.pkl" % fold), "wb") as f:
pickle.dump(p_valid, f)
# preds_test = np.mean(preds_test, axis=0)
#
# ## Scoring for last model, choose threshold by validation data
# thresholds_ori = np.linspace(0.3, 0.7, 31)
# # Reverse sigmoid function: Use code below because the sigmoid activation was removed
# thresholds = np.log(thresholds_ori/(1-thresholds_ori))
#
# ious = np.array([iou_metric_batch(masks, preds_train > threshold) for threshold in tqdm(thresholds)])
# print(ious)
#
# threshold_best_index = np.argmax(ious)
# iou_best = ious[threshold_best_index]
# threshold_best = thresholds[threshold_best_index]
# print("validation:", iou_best)
#
# pred_dict = {idx: rle_encode(np.round(downsample(preds_test[i]) > threshold_best)) for i, idx in enumerate(tqdm(test_df.index.values))}
#
# sub = pd.DataFrame.from_dict(pred_dict,orient='index')
# sub.index.names = ['id']
# sub.columns = ['rle_mask']
# sub.to_csv(submission_file)
|
from flask import Blueprint, g, url_for
from flask_login import login_required
from flask_restx import Api, Resource
bp_merchant = Blueprint('merchant', __name__, url_prefix='/api/merchant')
api = Api(bp_merchant, title="Merchant API", description="YYY Merchant API")
@api.route('/brand/')
class BrandList(Resource):
method_decorators = [login_required]
def get(self):
# brands = [mdl for mdl in Brand.select().where(Brand.user == g.user)]
return [{
'id': 1,
'name': 'test',
'logo_url': 'https://...' #url_for('upload.merchant_image', slug='brand-test')
}]
def post(self):
return {'id': 1}
@api.route('/brand/<int:brand_id>/')
class Brand(Resource):
method_decorators = [login_required]
def get(self, brand_id):
return {
'id': 1,
'name': 'test',
'logo_url': 'https://...'
}
def put(self, brand_id):
return {
'id': 1,
}
def delete(self, brand_id):
return {
'id': 1,
}
@api.route('/shop/')
class ShopList(Resource):
method_decorators = [login_required]
def get(self):
return 'get shop list'
def post(self):
return 'shop create'
@api.route('/shop/<int:shop_id>/')
class Shop(Resource):
method_decorators = [login_required]
def get(self, shop_id):
return 'shop info'
def put(self, shop_id):
return 'shop edit'
def delete(self, shop_id):
return 'shop delete'
@api.route('/shop/<int:shop_id>/product/')
class ProductList(Resource):
method_decorators = [login_required]
def get(self, shop_id):
return 'get product list'
def post(self, shop_id):
return 'product create'
@api.route('/shop/<int:shop_id>/product/<int:product_id>')
class Product(Resource):
method_decorators = [login_required]
def get(self, shop_id, product_id):
return 'get product'
def put(self, shop_id, product_id):
return 'product edit'
def delete(self, shop_id, product_id):
return 'product delete'
|
"""
The purpose of this test set is to show how easy or difficult the
generated features are. Results are included in the paper.
@author: Stippinger
"""
import time
from contextlib import contextmanager
from typing import Iterable, Tuple, Dict, List, Any
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from scipy import stats
from sklearn.utils import check_random_state
from tqdm import tqdm
from biometric_blender.generator_api import EffectiveFeature
# # # Gridsearch scores for the table of accuracy # # #
def make_data(
n_labels=100, n_samples_per_label=16, n_true_features=40,
n_fake_features=160, n_features_out=10000, seed=137
) -> Iterable[Tuple[str, Dict[str, Any], Tuple[np.ndarray, ...]]]:
"""
Generate some test data: true only, hidden only, all output features
"""
from biometric_blender import generate_feature_space
kw = dict(n_labels=n_labels,
count_distribution=stats.randint(5, 11),
min_usefulness=0.50,
max_usefulness=0.95,
n_samples_per_label=n_samples_per_label,
n_true_features=n_true_features,
n_fake_features=n_fake_features,
location_ordering_extent=2,
location_sharing_extent=3,
n_features_out=n_features_out,
blending_mode='logarithmic',
usefulness_scheme='linear',
random_state=seed)
fs = generate_feature_space(**kw)
tr = fs[4][:, :n_true_features], fs[1], fs[5], fs[3], fs[4], fs[5]
hd = fs[4], fs[1], fs[5], fs[3], fs[4], fs[5]
yield 'true', kw, tr
yield 'hidden', kw, hd
yield 'full', kw, fs
def get_reduction(n_components=None, seed=4242) -> Iterable[
Tuple[str, "sklearn.base.TransformerMixin", int]]:
"""
Get benchmark reduction algorithms
"""
# Note: FA rotation requires sklearn version > 0.24
import sklearn
assert tuple(map(int, sklearn.__version__.split('.'))) >= (0, 24)
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.preprocessing import FunctionTransformer
for n in np.ravel(n_components):
if n is None:
yield 'none', FunctionTransformer(), n
else:
yield 'kbest', SelectKBest(f_classif, k=n), n
yield 'pca', PCA(n_components=n, random_state=seed), n
yield 'fa', FactorAnalysis(n_components=n, rotation='varimax',
random_state=seed), n
def get_classifiers(seed=4242) -> Iterable[
Tuple[str, "sklearn.base.ClassifierMixin"]]:
"""
Get benchmark classifiers
"""
# see https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
yield 'knn', KNeighborsClassifier()
yield 'svm', SVC(random_state=seed)
yield 'rf', RandomForestClassifier(random_state=seed)
def score_classifiers(n_jobs=2):
"""
Score benchmark classifiers on the data
"""
from itertools import product as iterprod
from sklearn.model_selection import cross_val_score
result = {}
for (red_name, red_obj, red_n), (data_name, data_kw, data_fs) in tqdm(
iterprod(get_reduction(n_components=60), make_data()),
desc='data&reduction'):
(out_features, out_labels, out_usefulness, out_names,
hidden_features, hidden_usefulness) = data_fs
simplified_features = red_obj.fit_transform(
out_features, out_labels)
for (clf_name, clf_obj) in tqdm(
get_classifiers(), desc='cfl', leave=False):
name = '_'.join([red_name, str(red_n),
clf_name, data_name])
score = cross_val_score(clf_obj, simplified_features,
out_labels, n_jobs=n_jobs)
result[name] = score
print(name, score, flush=True)
df = pd.DataFrame(result)
df.to_csv('fig/scores.csv')
def get_gridsearch_classifiers(seed=4242) -> Iterable[
Tuple[str, object, Dict[str, list]]]:
"""
Get benchmark classifiers to test with various parametrization
"""
# see https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
yield 'knn', KNeighborsClassifier(), {
"weights": ['uniform', 'distance'],
}
yield 'svm', SVC(random_state=seed), {
"C": [0.5, 1.0, 2.0],
"tol": [1e-4, 1e-3, 1e-2],
}
yield 'rf', RandomForestClassifier(random_state=seed), {
"n_estimators": [1000],
"min_samples_leaf": [1, 2, 4],
"min_impurity_decrease": [0.0, 0.01, 0.05],
"max_depth": [None, 8, 10],
}
def score_gridsearch_classifiers(n_jobs=4):
"""
Score benchmark classifiers with various parametrization on the data
"""
from itertools import product as iterprod
from sklearn.model_selection import GridSearchCV
result = []
n_components = [None, 10, 25, 50, 100, 200, 400, 800]
for (red_name, red_obj, red_n), (data_name, data_kw, data_fs) in tqdm(
iterprod(get_reduction(n_components=n_components), make_data()),
desc='data&reduction'):
(out_features, out_labels, out_usefulness, out_names,
hidden_features, hidden_usefulness) = data_fs
if (red_n is not None) and (out_features.shape[1] < red_n):
continue
t0 = time.time()
simplified_features = red_obj.fit_transform(
out_features, out_labels)
red_time = time.time() - t0
for (clf_name, clf_obj, clf_param_grid) in tqdm(
get_gridsearch_classifiers(), desc='clf', leave=False):
gridsearch = GridSearchCV(clf_obj, clf_param_grid, cv=4,
verbose=2, n_jobs=n_jobs)
gridsearch.fit(simplified_features, out_labels)
df = pd.DataFrame(gridsearch.cv_results_)
df['reduction'] = red_name
df['reduction_time'] = red_time
df['n_components'] = red_n
df['classifier'] = clf_name
df['data'] = data_name
result.append(df)
pd.concat(result).to_csv('fig/gridsearch.csv')
def make_table_accuracy(data):
"""
Find best parametrization from stored scores
(write out TeX tables presented in the paper)
"""
df = pd.read_csv('fig/gridsearch.csv')
outcome = df.sort_values(
'mean_test_score', ascending=False
).drop_duplicates(
['data', 'classifier', 'reduction', ]
)
q = "data=='{}'".format(data)
tmp = outcome.query(q).set_index(['classifier', 'reduction'])
columns = ['none', 'pca', 'fa', 'kbest']
rows = ['knn', 'svm', 'rf']
new_columns = {'pca': 'PCA', 'fa': 'FA', 'kbest': '$k$-best'}
new_rows = {'knn': '$k$NN', 'svm': 'SVC', 'rf': 'RF'}
tmp.loc[:, 'mean_test_score'].unstack('reduction').round(3).reindex(
index=rows, columns=columns).rename(
index=new_rows, columns=new_columns).to_latex(
f'fig/score-{data}.tex')
tmp.loc[:, 'mean_fit_time'].unstack('reduction').reindex(
index=rows, columns=columns).rename(
index=new_rows, columns=new_columns).to_latex(
f'fig/time-fit-{data}.tex')
tmp.loc[:, 'reduction_time'].unstack('reduction').reindex(
index=rows, columns=columns).rename(
index=new_rows, columns=new_columns).to_latex(
f'fig/time-red-{data}.tex')
pass
def make_figure_accuracy(data):
"""
Make figure from stored scores as a function of n_components
(from the various parametrizations only the best score is kept)
"""
from matplotlib import pyplot as plt
df = pd.read_csv('fig/gridsearch.csv')
outcome = df.sort_values(
'mean_test_score', ascending=False
).drop_duplicates(
['data', 'classifier', 'reduction', 'n_components', ]
)
outcome.to_excel('fig/outcome.xlsx')
reduction = list(o for o in outcome.reduction.unique() if o != 'none')
if not len(reduction):
reduction = ['none']
fig, ax = plt.subplots(3, len(reduction),
sharex=True, sharey='row', squeeze=False)
for i, red in enumerate(reduction):
ax[0, i].set_title(red)
ax[0, i].semilogx()
for clf in outcome.classifier.unique():
q = "reduction=='{}' & classifier=='{}' & data=='{}'".format(
red, clf, data)
meas = outcome.query(q).sort_values('n_components')
q = "reduction=='{}' & classifier=='{}' & data=='{}'".format(
'none', clf, data)
ref = outcome.query(q).iloc[0, :]
# top row: score
l0, = ax[0, i].plot(meas['n_components'],
meas['mean_test_score'],
marker='o',
markersize='3',
markerfacecolor='w',
markeredgewidth=0.5,
label=clf)
lr = ax[0, i].axhline(ref['mean_test_score'],
color=l0.get_color(),
linestyle='--')
# middle row: fit time
l1, = ax[1, i].plot(meas['n_components'],
meas['mean_fit_time'],
marker='o',
markersize='3',
markerfacecolor='w',
markeredgewidth=0.5,
label=clf)
lt = ax[1, i].axhline(ref['mean_fit_time'],
color=l1.get_color(),
linestyle='--')
# bottom row: reduction time
l2, = ax[2, i].plot(meas['n_components'],
meas['reduction_time'],
marker='o',
markersize='3',
markerfacecolor='w',
markeredgewidth=0.5,
label=clf)
lr = ax[2, i].axhline(ref['reduction_time'],
color=l2.get_color(),
linestyle='--')
# add legend entry
ll, = ax[0, i].plot([np.nan], [np.nan],
color='k',
linestyle='--',
label='no red.')
h, l = ax[0, 0].get_legend_handles_labels()
fig.legend(h, l, title='gridsearch\nclassifier')
ax[0, 0].set_ylabel('max(accuracy)')
ax[1, 0].set_ylabel('fit time')
ax[2, 0].set_ylabel('reduction time')
ax[-1, 0].set_xlabel('reduction n_components')
fig.savefig(f'fig/gridsearch-{data}.pdf')
plt.show()
# # # Additional figure about the reconstruction capabilities of FA # # #
def compute_scores_for_n_components(X, red):
"""
Cross validated reduction scores for varying n_components,
this could be a GridSearchCV.
"""
from sklearn.model_selection import cross_val_score
from sklearn.base import clone
red = clone(red)
n_components = np.logspace(0, np.log10(np.minimum(X.shape[1], 200)),
num=10)
n_components = np.unique(n_components.astype(int))
scores = []
for n in tqdm(n_components):
red.n_components = n
scores.append(np.mean(cross_val_score(red, X, cv=3)))
return n_components, scores
def plot_factor_analysis_reconstruction():
"""
Estimate number of factors based on cross-validated model likelihood.
Plot a matrix of original vs varimax rotated inferred factors.
"""
from sklearn.decomposition import FactorAnalysis
from scipy.stats import spearmanr
for name, kw, fs in make_data(n_fake_features=40):
(out_features, out_labels, out_usefulness, out_names,
hidden_features, hidden_usefulness) = fs
sorter = np.argsort(hidden_usefulness)[::-1] # decreasing
ranked_usefulness = hidden_usefulness[sorter]
ranked_hidden_features = hidden_features[:, sorter]
fa = FactorAnalysis(rotation='varimax')
n_hidden = hidden_features.shape[1]
n_components, scores = compute_scores_for_n_components(out_features,
fa)
n_ml = n_components[np.argmax(scores)]
fa.n_components = n_ml
reconstructred = fa.fit_transform(out_features, out_labels)
print(out_features.shape, reconstructred.shape)
corr_result = spearmanr(ranked_hidden_features, reconstructred)
reconstruction_corr = corr_result.correlation[:n_hidden, n_hidden:]
corr_result = spearmanr(ranked_hidden_features, out_features)
out_corr = corr_result.correlation[:n_hidden, n_hidden:]
fig, ax = plt.subplots(2, 2,
figsize=(8, 6)) # type: plt.Figure, plt.Axes
ax = ax.ravel() # type: list[plt.Axes]
ax[3].invert_yaxis()
ax[2].get_shared_y_axes().join(ax[3])
h0 = ax[1].hist(out_corr.max(axis=0))
ax[1].semilogy()
ax[1].set_xlabel('max correlation to any hidden feature')
ax[1].set_ylabel('# output features')
l1, = ax[0].plot(n_components, scores, marker='o')
ax[0].semilogx()
ax[0].set_xlabel('n_components')
ax[0].set_ylabel('likelihood')
mx = ax[2].matshow(np.abs(reconstruction_corr), vmin=0, vmax=1)
ax[2].set_xlabel('reconstructed')
ax[2].set_ylabel('original')
plt.colorbar(mx, ax=ax[3])
l2u, = ax[3].plot(ranked_usefulness, np.arange(n_hidden),
label='usefulness')
f2u = ax[3].fill_betweenx(np.arange(n_hidden), 0,
ranked_usefulness, alpha=0.4,
color=l2u.get_color())
sac = np.max(np.abs(reconstruction_corr), axis=1)
l2c, = ax[3].plot(sac, np.arange(n_hidden), label='max abs corr')
f2c = ax[3].fill_betweenx(np.arange(n_hidden), 0, sac,
alpha=0.4, color=l2c.get_color())
ax[3].set_xlabel('usefulness or detectability')
ax[3].set_ylabel('rank')
ax[3].legend()
fig.savefig('fig/fa_{}.pdf'.format(name))
plt.show()
# # # Figures for the targeted usefulness of hidden features # # #
@contextmanager
def intercept_ef():
"""
Hack to get parametrization of EffectiveFeatures within a context
"""
from biometric_blender import generator_api
original = generator_api.EffectiveFeature
instances = []
class Replacement(generator_api.EffectiveFeature):
def get_samples(self, *args, **kwargs):
instances.append(self)
return super(Replacement, self).get_samples(*args, **kwargs)
generator_api.EffectiveFeature = Replacement
generator_api.patched = True
try:
yield instances
finally:
# to check: do we restore original state under all circumstances
generator_api.EffectiveFeature = original
del generator_api.patched
def plot_1d_locations(
ax: plt.Axes,
ef: EffectiveFeature,
reverse: bool,
normalize: bool
):
def dist_pdf(dist, shift=0., **kwargs):
xr = dist.mean() + np.array([-4, 4]) * dist.std()
x = np.linspace(*xr, 40)
y = dist.pdf(x)
if normalize:
y = y / np.max(y) + shift
if reverse:
ax.plot(y, x, **kwargs)
else:
ax.plot(x, y, **kwargs)
shift = 0.
for i, (loc, scale) in enumerate(zip(ef.locations_, ef.scales_)):
dist = ef.sampling_distribution(loc, scale)
dist_pdf(dist, linestyle='-', shift=shift * i)
dist_pdf(ef.location_distribution, shift=shift * len(ef.locations_),
color='k', linestyle='--')
def plot_2d_realizations(ax: plt.Axes, fs: np.ndarray, labels: np.ndarray):
df = pd.DataFrame(fs,
index=pd.Index(labels, name='labels'),
columns=['x', 'y'])
for i, data in df.groupby('labels'):
ax.plot(data.x, data.y,
marker='o', markersize=2, linestyle='none')
def make_features_by_usefulness(
seed: int = 137,
usefulness: float = 0.1,
) -> Tuple[np.ndarray, np.ndarray, List[EffectiveFeature]]:
from scipy import stats
from biometric_blender import generator_api
rs = check_random_state(seed)
with intercept_ef() as instances:
fs, labels, _, _ = generator_api.generate_hidden_features(
10, 16, 2, 0, usefulness, usefulness, 'linear', None, stats.norm,
stats.uniform(0.5, 1.5), stats.norm, 2, 2, rs)
return fs, labels, instances
def make_slides_usefulness(seed=137):
"""
Show the effect of usefulness on two features and their locations
(each usefulness is saved to a separate figure like a slideshow)
"""
for i, usefulness in enumerate([0.01, 0.1, 0.3, 0.5, 0.99]):
fs, labels, instances = make_features_by_usefulness(
seed=seed, usefulness=usefulness
)
fig, ax = plt.subplots(2, 2, sharex='col', sharey='row')
plot_1d_locations(ax[0, 0], instances[0],
reverse=False, normalize=False)
plot_1d_locations(ax[1, 1], instances[1],
reverse=True, normalize=False)
plot_2d_realizations(ax[1, 0], fs, labels)
ax[0, 1].remove()
ax[1, 0].set_xlabel('feature A')
ax[1, 0].set_ylabel('feature B')
fig.suptitle(f'usefulness={usefulness}')
fig.savefig(f'fig/usefulness-zoom-{i}.png')
ax[1, 0].set_xlim([-30, 30])
ax[1, 0].set_ylim([-30, 30])
fig.suptitle(f'usefulness={usefulness}')
fig.savefig(f'fig/usefulness-fixed-{i}.png')
def make_figure_usefulness(seed=137):
"""
Show the effect of usefulness on two features
(save the figure presented in the paper)
"""
def get_mnl_top():
return MaxNLocator(nbins=1, integer=True,
symmetric=False, min_n_ticks=2)
def get_mnl_bottom():
return MaxNLocator(nbins=2, integer=True,
symmetric=True, min_n_ticks=3)
fig, ax = plt.subplots(2, 3, figsize=(5, 3),
gridspec_kw={'wspace': 0.3},
sharex='col', sharey=False)
for i, usefulness in enumerate([0.2, 0.4, 0.6]):
fs, labels, instances = make_features_by_usefulness(
seed=seed, usefulness=usefulness
)
plot_1d_locations(ax[0, i], instances[0],
reverse=False, normalize=False)
plot_2d_realizations(ax[1, i], fs, labels)
ax[0, i].update_datalim([[0, 0], [0, 1]])
ax[0, i].yaxis.set_major_locator(get_mnl_top())
ax[1, i].xaxis.set_major_locator(get_mnl_bottom())
ax[1, i].yaxis.set_major_locator(get_mnl_bottom())
ax[0, i].set_title(f'usefulness={usefulness}')
ax[0, 0].set_ylabel('pdf of A')
ax[1, 0].set_xlabel('feature A')
ax[1, 0].set_ylabel('feature B')
fig.align_ylabels(ax[:, 0])
fig.savefig(f'fig/usefulness-autozoom.png', bbox_inches='tight')
fig.savefig(f'fig/usefulness-autozoom.pdf', bbox_inches='tight')
# # # Entry point # # #
def main():
import os
os.makedirs('fig', exist_ok=True)
print('scoring takes a while...')
# score_gridsearch_classifiers()
for data_name in ['true', 'hidden', 'full']:
make_table_accuracy(data_name)
make_figure_accuracy(data_name)
make_figure_usefulness()
if __name__ == '__main__':
main()
|
import argparse
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', default='logs/ddpg_pendulum/norm_one',
help='Log dir [default: logs/ddpg_pendulum/norm_one]')
parser.add_argument('--save_dir', default='docs/ddpg_pendulum/norm_one',
help='Path of directory to saved [default: docs/ddpg_pendulum/norm_one]')
FLAGS = parser.parse_args()
LOG_DIR = FLAGS.log_dir
SAVE_DIR = FLAGS.save_dir
assert (os.path.exists(LOG_DIR))
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
def collect():
for j in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]:
input_dir = os.path.join(LOG_DIR, str(j))
files = glob.glob(os.path.join(input_dir, "*.png"))
for fin in files:
filename = fin[fin.rindex("/")+1:]
fout = os.path.join(SAVE_DIR, filename)
print "cp '%s' '%s'" % (fin, fout)
os.system("cp '%s' '%s'" % (fin, fout))
if __name__ == "__main__":
collect()
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
import cProfile
import pstats
from datetime import datetime
from mo_future import iteritems
from mo_logs import Log
FILENAME = "profile.tab"
cprofiler_stats = None # ACCUMULATION OF STATS FROM ALL THREADS
class CProfiler(object):
"""
cProfiler CONTEXT MANAGER WRAPPER
"""
__slots__ = ["cprofiler"]
def __init__(self):
self.cprofiler = None
def __enter__(self):
if cprofiler_stats is not None:
Log.note("starting cprofile")
self.cprofiler = cProfile.Profile()
self.cprofiler.enable()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.cprofiler is not None:
self.cprofiler.disable()
cprofiler_stats.add(pstats.Stats(self.cprofiler))
del self.cprofiler
Log.note("done cprofile")
def enable(self):
if self.cprofiler is not None:
return self.cprofiler.enable()
def disable(self):
if self.cprofiler is not None:
return self.cprofiler.disable()
def enable_profilers(filename):
global FILENAME
global cprofiler_stats
if cprofiler_stats is not None:
return
if filename:
FILENAME = filename
from mo_threads.threads import ALL_LOCK, ALL, Thread
from mo_threads.queues import Queue
cprofiler_stats = Queue("cprofiler stats")
current_thread = Thread.current()
with ALL_LOCK:
threads = list(ALL.values())
for t in threads:
t.cprofiler = CProfiler()
if t is current_thread:
Log.note("starting cprofile for thread {{name}}", name=t.name)
t.cprofiler.__enter__()
else:
Log.note("cprofiler not started for thread {{name}} (already running)", name=t.name)
def write_profiles(main_thread_profile):
if cprofiler_stats is None:
return
from pyLibrary import convert
from mo_files import File
cprofiler_stats.add(pstats.Stats(main_thread_profile.cprofiler))
stats = cprofiler_stats.pop_all()
Log.note("aggregating {{num}} profile stats", num=len(stats))
acc = stats[0]
for s in stats[1:]:
acc.add(s)
stats = [
{
"num_calls": d[1],
"self_time": d[2],
"total_time": d[3],
"self_time_per_call": d[2] / d[1],
"total_time_per_call": d[3] / d[1],
"file": (f[0] if f[0] != "~" else "").replace("\\", "/"),
"line": f[1],
"method": f[2].lstrip("<").rstrip(">")
}
for f, d, in iteritems(acc.stats)
]
stats_file = File(FILENAME, suffix=convert.datetime2string(datetime.now(), "_%Y%m%d_%H%M%S"))
stats_file.write(convert.list2tab(stats))
Log.note("profile written to {{filename}}", filename=stats_file.abspath)
|
#!/usr/bin/python
def trace(traced_function):
def inner(*args, **kwargs):
print '>>'
traced_function(*args, **kwargs)
print '<<'
return inner
@trace
def fun1(x, y):
print 'x:', x, 'y:', y
@trace
def fun2(x,y,z):
print x + ',' + y + ',' + z
def test():
fun1('aa', 'bb')
fun2('er','st', 'lib')
if __name__ == '__main__':test()
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Wen Guan, <wen.guan@cern.ch>, 2019
"""
Main start entry point for iDDS service
"""
import logging
import signal
import time
import traceback
from idds.common.constants import Sections
from idds.common.config import config_has_section, config_has_option, config_list_options, config_get
from idds.common.utils import setup_logging
setup_logging('idds.log')
AGENTS = {
'baseagent': ['idds.agents.common.baseagent.BaseAgent', Sections.Common],
'clerk': ['idds.agents.clerk.clerk.Clerk', Sections.Clerk],
'marshaller': ['idds.agents.marshaller.marshaller.Marshaller', Sections.Marshaller],
'transformer': ['idds.agents.transformer.transformer.Transformer', Sections.Transformer],
'transporter': ['idds.agents.transporter.transporter.Transporter', Sections.Transporter],
'carrier': ['idds.agents.carrier.carrier.Carrier', Sections.Carrier],
'conductor': ['idds.agents.conductor.conductor.Conductor', Sections.Conductor]
}
RUNNING_AGENTS = []
def load_config_agents():
if config_has_section(Sections.Main) and config_has_option(Sections.Main, 'agents'):
agents = config_get(Sections.Main, 'agents')
agents = agents.split(',')
agents = [d.strip() for d in agents]
return agents
return []
def load_agent_attrs(section):
"""
Load agent attributes
"""
attrs = {}
logging.info("Loading config for section: %s" % section)
if config_has_section(section):
options = config_list_options(section)
for option, value in options:
if not option.startswith('plugin.'):
if isinstance(value, str) and value.lower() == 'true':
value = True
if isinstance(value, str) and value.lower() == 'false':
value = False
attrs[option] = value
return attrs
def load_agent(agent):
if agent not in AGENTS.keys():
logging.critical("Configured agent %s is not supported." % agent)
raise Exception("Configured agent %s is not supported." % agent)
agent_cls, agent_section = AGENTS[agent]
attrs = load_agent_attrs(agent_section)
logging.info("Loading agent %s with class %s and attributes %s" % (agent, agent_cls, str(attrs)))
k = agent_cls.rfind('.')
agent_modules = agent_cls[:k]
agent_class = agent_cls[k + 1:]
module = __import__(agent_modules, fromlist=[None])
cls = getattr(module, agent_class)
impl = cls(**attrs)
return impl
def run_agents():
global RUNNING_AGENTS
agents = load_config_agents()
logging.info("Configured to run agents: %s" % str(agents))
for agent in agents:
agent_thr = load_agent(agent)
RUNNING_AGENTS.append(agent_thr)
for agent in RUNNING_AGENTS:
agent.start()
while len(RUNNING_AGENTS):
[thr.join(timeout=3.14) for thr in RUNNING_AGENTS if thr and thr.is_alive()]
RUNNING_AGENTS = [thr for thr in RUNNING_AGENTS if thr and thr.is_alive()]
if len(agents) != len(RUNNING_AGENTS):
logging.critical("Number of active agents(%s) is not equal number of agents should run(%s)" % (len(RUNNING_AGENTS), len(agents)))
logging.critical("Exit main run loop.")
break
def stop(signum=None, frame=None):
global RUNNING_AGENTS
logging.info("Stopping ......")
logging.info("Stopping running agents: %s" % RUNNING_AGENTS)
[thr.stop() for thr in RUNNING_AGENTS if thr and thr.is_alive()]
stop_time = time.time()
while len(RUNNING_AGENTS):
[thr.join(timeout=3.14) for thr in RUNNING_AGENTS if thr and thr.is_alive()]
RUNNING_AGENTS = [thr for thr in RUNNING_AGENTS if thr and thr.is_alive()]
if time.time() > stop_time + 180:
break
logging.info("Still running agents: %s" % str(RUNNING_AGENTS))
[thr.terminate() for thr in RUNNING_AGENTS if thr and thr.is_alive()]
while len(RUNNING_AGENTS):
[thr.join(timeout=3.14) for thr in RUNNING_AGENTS if thr and thr.is_alive()]
RUNNING_AGENTS = [thr for thr in RUNNING_AGENTS if thr and thr.is_alive()]
if __name__ == '__main__':
signal.signal(signal.SIGTERM, stop)
signal.signal(signal.SIGQUIT, stop)
signal.signal(signal.SIGINT, stop)
try:
run_agents()
stop()
except KeyboardInterrupt:
stop()
except Exception as error:
logging.error("An exception is caught in main process: %s, %s" % (error, traceback.format_exc()))
stop()
|
try:
while True:
expressao = input()
numero_de_aberturas = 0
correto = True
for c in expressao:
if c == '(':
numero_de_aberturas += 1
elif c == ')':
numero_de_aberturas -= 1
if numero_de_aberturas < 0:
break
if not numero_de_aberturas == 0:
correto = False
print(f'{"correct" if correto else "incorrect"}')
except:
pass
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from daliulian import settings
class DaliulianPipeline(object):
def __init__(self):
#连接数据库
self.connect = pymysql.connect(
host = settings.MYSQL_HOST,
db = settings.MYSQL_DBNAME,
user = settings.MYSQL_USER,
passwd = settings.MYSQL_PASSWD,
charset = 'utf8',
use_unicode = True
)
self.cursor = self.connect.cursor()
def process_item(self, item, spider):
try:
#插入到tv_name表
#检查是否已经存在
self.cursor.execute("""select * from tv_name where name=%s""", item['name'])
repetition = self.cursor.fetchone()
if repetition:
print('Already exists in the database')
self.cursor.execute("""select id from tv_name where name=%s""", item['name'])
new_id = self.cursor.fetchone()[0]
else:
print('ok')
self.cursor.execute("""insert into tv_name(name, f_url) VALUE(%s, %s) """, (item['name'], item['link']))
# 获取自增ID,插入到tv_urls表
new_id = self.cursor.lastrowid
self.connect.commit()
#去重
for i in [item['baidu_link'],item['ed2k_link'],item['magnet_link'],item['thunder_link']]:
if i is not None:
for k, v in i.items():
self.cursor.execute("""select * from tv_urls where url=%s""", v)
repetition = self.cursor.fetchone()
if repetition:
print('Already exists in the database')
continue
else:
print('ok')
self.cursor.execute("""insert into tv_urls(url_name, url, nid) VALUE(%s, %s, %s) """, (k, v, new_id))
self.connect.commit()
else:
print("There is nothing inside")
except Exception as error:
print(error)
return item
|
#Algoritmos Computacionais e Estruturas de Dados
#Lista Simplesmente Encadeada em Python
#Prof.: Laercio Brito
#Dia: 28/01/2022
#Turma 2BINFO
#Alunos:
#Dora Tezulino Santos
#Guilherme de Almeida Torrão
#Mauro Campos Pahoor
#Victor Kauã Martins Nunes
#Victor Pinheiro Palmeira
#Lucas Lima
#Questão 7
class No:
def __init__(self, valor):
self.valor = valor
self.prox = None
class ListaEncadeada:
def __init__(self):
self.inicio = None
def return_start(self):
return self.inicio
def ultimo_valor_rec(self, aux):
if(aux.prox != None):
aux = aux.prox
self.ultimo_valor_rec(aux)
else:
global yes
yes = aux
def inserir(self,valor):
aux = No(valor)
aux.prox = self.inicio
self.inicio = aux
def inserir_fim(self, valor):
aux=self.inicio
if(aux==None):
lista.inserir(valor)
else:
self.ultimo_valor_rec(aux)
end = yes
end.prox = No(valor)
def criar_lista(self):
valor = int(input("Digite o valor: "))
if(valor <= 0):
return 0
else:
self.inserir_fim(valor)
lista.criar_lista()
def print_list(self, aux):
if (aux != None):
print(aux.valor)
aux = aux.prox
self.print_list(aux)
else:
return print('Programa Finalizado.')
lista = ListaEncadeada()
lista.criar_lista()
lista.print_list(lista.return_start())
lista.inserir_fim(int(input('Entre com um valor a ser inserido no fim da lista: ')))
lista.print_list(lista.return_start()) |
#!/usr/bin/python
import rospy
import std_msgs
print("starting jrk_test_pub")
def talker():
jrk_pub = rospy.Publisher('jrk_target', std_msgs.msg.UInt16, queue_size=1)
rospy.init_node('test_jrk_pub')
for i in [0, 800, 1200, 1500, 2000, 2500, 3000, 4000]:
print(i)
jrk_pub.publish(i)
rospy.sleep(10)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobuf/auth.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobuf/auth.proto',
package='auth',
syntax='proto3',
serialized_options=b'Z\n./protobuf',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x13protobuf/auth.proto\x12\x04\x61uth\"-\n\x07Request\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\x1a\n\x08Response\x12\x0e\n\x06result\x18\x01 \x01(\t28\n\x04\x41UTH\x12\x30\n\tAuthLogin\x12\r.auth.Request\x1a\x0e.auth.Response\"\x00(\x01\x30\x01\x42\x0cZ\n./protobufb\x06proto3'
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='auth.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='username', full_name='auth.Request.username', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='password', full_name='auth.Request.password', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=29,
serialized_end=74,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='auth.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='auth.Response.result', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=102,
)
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _REQUEST,
'__module__' : 'protobuf.auth_pb2'
# @@protoc_insertion_point(class_scope:auth.Request)
})
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _RESPONSE,
'__module__' : 'protobuf.auth_pb2'
# @@protoc_insertion_point(class_scope:auth.Response)
})
_sym_db.RegisterMessage(Response)
DESCRIPTOR._options = None
_AUTH = _descriptor.ServiceDescriptor(
name='AUTH',
full_name='auth.AUTH',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=104,
serialized_end=160,
methods=[
_descriptor.MethodDescriptor(
name='AuthLogin',
full_name='auth.AUTH.AuthLogin',
index=0,
containing_service=None,
input_type=_REQUEST,
output_type=_RESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_AUTH)
DESCRIPTOR.services_by_name['AUTH'] = _AUTH
# @@protoc_insertion_point(module_scope)
|
"""
Literales de cadena de idioma ESPAÑOL para la aplicación PyMandel tkinter
Creado el 22 abr 2020
@autor: semuadmin
"""
# pylint: disable=line-too-long
WIKIURL = "https://en.wikipedia.org/wiki/Mandelbrot_set"
GITHUBURL = "https://github.com/semuconsulting/PyMandel"
CETURL = "https://github.com/holoviz/colorcet/blob/master/LICENSE.txt"
MODULENAME = "pymandel"
COPYRIGHTTXT = "\u00A9 SEMU Consulting 2020 \nBSD 2 Cláusula Licencia. Todos los derechos reservados"
COLORCETTXT = "Mapas de color HoloViz Colorcet disponibles bajo licencia de\nCreative Commons Attribution (CC_BY)"
INTROTXT = "Bienvenido a PyMandel! Use la rueda del mouse o haga clic izquierdo para hacer zoom, haga clic derecho para centrar."
HELPTXT = (
"Ingrese la configuración manualmente (o impórtela de un archivo de metadatos) y haga clic en TRAZER para crear una imagen fractal con los parámetros especificados.\n\n"
+ "Use la rueda del mouse para acercar y alejar la ubicación actual del cursor.\n"
+ "Clic izquierdo, arrastre y suelte: amplíe un área rectangular dibujada.\n"
+ "Clic izquierdo - acerca la ubicación del cursor por la cantidad de incremento de zoom.\n"
+ "Shift y Clic izquierdo - alejar.\n"
+ "Presione Izquierda \u25C0 o Derecha \u25B6 en modo Julia para rotar el Conjunto Julia sobre su origen.\n\n"
+ "Botón TRAZER - trazar la imagen con la configuración actual.\n"
+ "Botón Guardar: guarda la imagen que se muestra actualmente como un archivo .png junto con sus metadatos asociados como un archivo .json.\n"
+ "Botón Cancelar: cancela la operación de trazado actual.\n"
+ "Botón Restablecer: restablece los parámetros a los valores predeterminados.\n\n"
+ "Botón de zoom: crea automáticamente una secuencia de imágenes con zoom.\n"
+ "Botón Girar: crea automáticamente una secuencia de imágenes giratorias de Julia.\n\n"
+ "Archivo..Configuración de exportación: exporta la configuración actual (metadatos).\n"
+ "Archivo..Configuración de importación: importa metadatos guardados previamente.\n"
+ "Opciones..Ocultar / Mostrar configuración: activa o desactiva el Panel de configuración.\n"
+ "Opciones..Ocultar / Mostrar estado: activa o desactiva la barra de estado.\n"
+ "Ayuda..Cómo: mostrar este cuadro de diálogo Cómo.\n"
+ "Ayuda..Acerca de: muestra el cuadro de diálogo Acerca de."
)
ABOUTTXT = (
"PyMandel es una aplicación GUI gratuita de código abierto escrita completamente en Python y tkinter con mejoras de rendimiento de Numba.\n\n"
+ "Las instrucciones y el código fuente están disponibles en Github en el siguiente enlace."
)
# Texto de Mensaje
JITTXT = "USO POR PRIMERA VEZ SOLO: espere la compilación y el almacenamiento en caché de JIT"
SETINITTXT = "Configuración inicializada"
VALERROR = "ERROR! Corrija las entradas resaltadas"
SAVEERROR = "ERROR! El archivo no se pudo guardar en el directorio especificado"
METASAVEERROR = (
"ERROR! El archivo de metadatos no se pudo guardar en el directorio especificado"
)
NOIMGERROR = "ERROR! Se debe crear una imagen antes de guardar"
OPENFILEERROR = "ERROR! El archivo no se pudo abrir"
BADJSONERROR = "ERROR! Archivo de metadatos no válido"
SAVETITLE = "Seleccionar Guardar Directorio"
SELTITLE = "Seleccionar archivo para importar"
METAPROMPTTXT = "importado, haga clic en TRAZER para continuar"
IMGSAVETXT = "Imagen guardada como"
COMPLETETXT = "Operación completada en"
INPROGTXT = "Operación en progreso ..."
OPCANTXT = "Operación cancelada"
COORDTXT = "Coordenadas:"
COORDPOLTXT = "Cordones polares:"
FRMTXT = "Marco"
FRMSTXT = "Marcos"
# Texto de Menú
MENUFILE = "Archivo"
MENUOPTIONS = "Opciones"
MENUSAVE = "Guardar Imagen"
MENUEXPORT = "Exportar Parámetros"
MENUIMPORT = "Importar Parámetros"
MENUEXIT = "Salir"
MENUPLOT = "Trazar Imagen"
MENUZOOM = "Trazar Animación de zoom"
MENUSPIN = "Trazar Animación de Rotación Julia"
MENUCAN = "Cancelar"
MENURST = "Restablecer"
MENUHIDESE = "Ocultar Parámetros"
MENUSHOWSE = "Mostrar Parámetros"
MENUHIDESB = "Ocultar Barra de Estado"
MENUSHOWSB = "Mostrar Barra de Estado"
MENUHIDEAX = "Ocultar Ejes"
MENUSHOWAX = "Mostrar Ejes"
MENUHOWTO = "Cómo"
MENUABOUT = "Acerca de"
MENUHELP = "Ayuda"
# Texto de Botón
BTNPLOT = "TRAZER"
BTNSAVE = "Guardar"
BTNCAN = "Cancelar"
BTNRST = "Restablecer"
BTNZOOM = "Zoom"
BTNSPIN = "Girar"
# Texto de etiqueta
LBLCTL = "Controles"
LBLSET = "Parámetros"
LBLMODE = "Conjunto tipo"
LBLVAR = "Variant"
LBLAUTO = "Animar:"
LBLTHEME = "Tema"
LBLEXP = "Exponente"
LBLRAD = "Escape\nRadio"
LBLSHIFT = "Tema\nCambiar"
LBLITER = "Max\nIteraciones"
LBLZOOM = "Zoom"
LBLZOOMINC = "Zoom\nIncremento"
LBLZXOFF = "ZX Offset"
LBLZYOFF = "ZY Offset"
LBLCX = "Julia CX"
LBLCY = "Julia CY"
# Texto de diálogo
DLGABOUT = "Acerca de PyMandel"
DLGHOWTO = "Cómo usar PyMandel"
|
def main():
iterador = 1
while iterador <= 10:
print(iterador)
iterador += 1
if __name__ == '__main__':
main()
|
import pytest
from django.contrib.auth import get_user_model
from djangito_client.io import save_user_string_fields, save_user_foreign_key_fields
@pytest.fixture
def string_data():
return {"date_joined": "2020-06-07T20:06:32Z",
"is_superuser": True, "last_name": "Jackson",
"last_login": "2020-06-26T20:20:44.522017Z",
"username": "pandichef", "first_name": "Michael",
"is_staff": True, "is_active": True,
"email": "mike@gmail.com", "server_id": 1}
@pytest.fixture
def foreign_key_data():
return {
"company": {"name": "Jackson5", "primary_activity": 2, "server_id": 1}}
@pytest.mark.django_db
def test_save_user_string_fields(string_data) -> None:
user = save_user_string_fields(string_data)
assert user.__class__.objects.get(server_id=1) == user
@pytest.mark.django_db
def test_UPDATE_user_string_fields(string_data) -> None:
get_user_model()(server_id=1, username='bradpitt').save()
user = get_user_model().objects.get(server_id=1)
original_pk = user.pk
assert user.server_id == 1
assert user.username == 'bradpitt'
user = save_user_string_fields(string_data)
assert user.server_id == 1
assert user.username == 'pandichef'
assert user.pk == original_pk
@pytest.mark.django_db
def test_UPDATE_user_foreign_key_fields(string_data, foreign_key_data) -> None:
# Create ForeignKey field manually
User = get_user_model()
Company = User._meta.get_field('company').remote_field.model
company = Company(server_id=1, name='Google')
company.save()
user = User(server_id=1, username='bradpitt')
user.company = company
user.save()
# Update ForeignKey field
user = save_user_string_fields(string_data)
assert save_user_foreign_key_fields(foreign_key_data, user)
assert user.server_id == 1
assert user.username == 'pandichef'
assert user.company.name == "Jackson5"
new_foreign_key_data = {
"company": {"name": "U2", "primary_activity": 2, "server_id": 2}}
assert save_user_foreign_key_fields(new_foreign_key_data, user)
assert user.company.name == "U2"
@pytest.mark.django_db
def test_save_user_foreign_key_fields_NOT_FOUND(string_data, foreign_key_data) -> None:
user = save_user_string_fields(string_data)
new_foreign_key_data = {
"group": {"name": "U2", "primary_activity": 2, "server_id": 2}}
assert not save_user_foreign_key_fields(new_foreign_key_data, user)
|
from lxml.html import fromstring
import lxml.html as html
def is_empty(text):
return len(text) == 0
def delete_empty_table_rows(html_doc : str)->str:
doc = fromstring(html_doc)
elements = doc.cssselect('table > tr')
empty_tr_elements = []
for tr_elem in elements:
is_row_all_empty = True
for td_elem in tr_elem:
if not is_empty(td_elem.text_content()):
is_row_all_empty = False
break
if is_row_all_empty:
empty_tr_elements.append(tr_elem)
for empty_tr_element in empty_tr_elements:
empty_tr_element.drop_tree()
return html.tostring(doc)
html_doc = "<html><body><table>\
<tr><td>1</td><td></td></tr>\
<tr><td></td><td></td></tr>\
<tr><td>1</td><td>2</td></tr>\
</table></body></html>"
new_html = delete_empty_table_rows(html_doc)
print(new_html)
|
n = int(input())
v = []
for i in range(n):
v.append(str(input()))
print('Falta(m) {} pomekon(s).'.format(151-len(set(v))))
|
# Generated by Django 2.0.5 on 2018-06-09 04:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0017_tocountrydata_tonation'),
]
operations = [
migrations.RenameField(
model_name='collecteddata',
old_name='country',
new_name='fromCountry',
),
migrations.AddField(
model_name='collecteddata',
name='toCheck',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='collecteddata',
name='toID',
field=models.CharField(default=1, max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='collecteddata',
name='toCountry',
field=models.CharField(max_length=3),
),
migrations.DeleteModel(
name='toCountryData',
),
]
|
"""
partition_suggestion.py
purpose: Given Nx, Ny, Nz and a number of processes, suggest a partitioning strategy that would result in
more-or-less cube-shaped partitions
"""
import random
from collections import deque
def random_partition(factors,n_factors):
"""
factors = list of prime factors of a number
n_factors = three-tuple [#px, #py, #pz] indicating the number
of prime factors that should be chosen (at random) for each direction
returns [px,py,pz]
"""
l_factors = factors[:] # make a local copy
p_list = [1,1,1]
for d in range(3):
for i in range(n_factors[d]):
c = random.choice(l_factors)
l_factors.remove(c)
p_list[d]*=c
return p_list
class Partition:
def __init__(self,Nx,Ny,Nz,part):
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.px = part[0]
self.py = part[1]
self.pz = part[2]
self.set_score()
def set_score(self):
lx = self.Nx/float(self.px)
ly = self.Ny/float(self.py)
lz = self.Nz/float(self.pz)
# obviously, change this measure if it proves crappy.
# estimate surface to volume ratio of the typical partition
vol = lx*ly*lz
surf = 2.*lx*ly + 2.*lx*lz + 2.*lz*ly
self.score = surf/vol
#vol = lx*ly*lz
#surf = 2.*lx*ly + 2.*lx*lz + 2.*lz*ly
#interior = vol - surf
#self.score = (surf/interior)
def get_score(self):
return self.score
def get_partition(self):
return [self.px, self.py, self.pz]
def partitionfunc(n,k,l=1):
'''n is the integer to partition, k is the length of partitions, l is the min partition element size'''
if k < 1:
raise StopIteration
if k == 1:
if n >= l:
yield (n,)
raise StopIteration
for i in range(l,n+1):
for result in partitionfunc(n-i,k-1,i):
yield (i,)+result
def primes(n):
"""
return a list containing the prime factorization of positive integer n
n = positive integer
"""
primfac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primfac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primfac.append(n)
return primfac
def factors(n):
"""
n = positive integer
returns a list of the factors of n in (more-or-less) standard form
"""
return filter(lambda i: n % i == 0, range(1, n + 1))
def part_advisor(Nx,Ny,Nz,num_procs, numTrials = 2000):
"""
Nx = number of points in the x-direction
Ny = number of points in the y-direction
Nz = number of points in the z-direction
num_procs = the number of partitions to create
returns a suggested px,py,pz
"""
p_facts = primes(num_procs)
p_facts.append(1)
p_facts.append(1) # to allow effectively 1-D partitioning if that is best....
bestScore = float("inf")
bestPartition = None
#numTrials = 4000 # not clear to me how big this should be...
for p in partitionfunc(len(p_facts),3):
#print p
"""
set up some partitions and keep track of the one with the best score.
"""
p_deque = deque(p);
for i in range(3):
p_deque.rotate(1) #shift the groupings
# take numTrials samples
for trial in range(numTrials):
r_part = random_partition(p_facts,p_deque)
sample_partition = Partition(Nx,Ny,Nz,r_part)
sample_score = sample_partition.get_score()
if sample_score < bestScore:
bestPartition = Partition(Nx,Ny,Nz,r_part)
return bestPartition.get_partition()
"""
partitionfunc will let me generate groupings of the prime factors
"""
"""
if there are fewer than 3 prime factors, then there is no way to solve
the problem; an error should be returned and the user should be prompted
to provide a value for num_procs that has more prime factors.
"""
if len(p_facts)<3:
print 'Error! num_procs is prime and cannot be used for 3D partitioning'
raise RuntimeError
print p_facts
"""
concept: use the prime factors listed in p_facts
and put them into 3 groups such that, as close as possible,
Nx/g1, Ny/g2 and Nz/g3 are nearly equal.
To do this, for each grouping, I will compute the variance of the partition dimensions.
I will then select the grouping that has the lowest variance.
1. Enumerate all of the possible groupings of the prime factors.
2. Compute the partition dimension variance for each grouping
3. Pick the smallest one.
"""
if __name__=="__main__":
"""
write test code here...
"""
Nx = 150
Ny = 150
Nz = 1000
num_procs = 8
partition = part_advisor(Nx,Ny,Nz,num_procs)
bestPartition = Partition(Nx,Ny,Nz,partition)
print 'Best partition found has score = %g \n'%bestPartition.get_score()
print bestPartition.get_partition()
print 'Block sizes approximately %i x %i x %i'%(Nx/partition[0],Ny/partition[1],Nz/partition[2])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.