content stringlengths 5 1.05M |
|---|
# Generated by Django 3.1a1 on 2020-05-31 04:37
from django.db import migrations
import djstripe.enums
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
("djstripe", "0008_djstripepaymentmethod_type_width"),
]
operations = [
migrations.AlterField(
model_name="fileupload",
name="purpose",
field=djstripe.fields.StripeEnumField(
enum=djstripe.enums.FileUploadPurpose,
help_text="The purpose of the uploaded file.",
max_length=35,
),
),
]
|
from bisect import bisect_right
from utils import chunks
from six import iterkeys
class Node(object):
__last_ID = 0
def __init__(self):
self.ID = Node.__last_ID = Node.__last_ID + 1
@property
def is_leaf(self):
return isinstance(self, Leaf)
class InnerNode(Node):
def __init__(self, children):
super(InnerNode, self).__init__()
self._children = children
self._values = [child.leftmost for child in self._children[1:]]
@property
def leftmost(self):
return self._children[0].leftmost
def child_to_follow(self, value):
idx = bisect_right(self._values, value)
return self._children[idx]
def __getitem__(self, key):
return self.child_to_follow(key)[key]
def __str__(self):
return 'InnerNode(%s)' % ','.join(str(n.ID) for n in self._children)
def __repr__(self):
return self.__str__()
class Leaf(Node):
def __init__(self, data):
super(Leaf, self).__init__()
self._data = data
@property
def leftmost(self):
return min(iterkeys(self._data))
def __getitem__(self, key):
return self._data[key]
def __str__(self):
return 'Leaf(%s)' % self._data
def __repr__(self):
return self.__str__()
class Tree(object):
def __init__(self, fanout, leafsize):
self._root = None
self._nodes = dict()
self._fanout = fanout
self._leafsize = leafsize
def __getitem__(self, ID):
return self._nodes.__getitem__(ID)
def __setitem__(self, ID, node):
return self._nodes.__setitem__(ID, node)
def add_nodes(self, nodes):
self._nodes.update((node.ID, node) for node in nodes)
def search(self, key):
return self._root[key]
def bulk_load(self, data):
data = sorted(data.items(), key=lambda (k,v): k)
nodes = list(map(Leaf, map(dict, chunks(data, self._leafsize))))
self.add_nodes(nodes)
while len(nodes) > 1:
nodes = list(map(InnerNode, chunks(nodes, self._fanout)))
self.add_nodes(nodes)
self._root = nodes[0]
def __str__(self):
return self._nodes.__str__()
|
# -*- coding: utf-8 -*-
import os
try: # Try importing Python3 urllib
import urllib.request
except ImportError: # Now importing Python2 urllib
import urllib
def get_content(url):
try: # Using Python3 urllib.
with urllib.request.urlopen(url) as response:
return response.read() # Returns http.client.HTTPResponse.
except AttributeError: # Using Python3 urllib.
return urllib.urlopen(url).read() # Returns an instance.
def norvig_bigtxt():
url = "https://norvig.com/big.txt"
# Check if the Norvig's big.txt file exists.
if os.path.isfile('big.txt'):
with open('big.txt') as fin:
return fin.read()
else: # Otherwise, download the big.txt.
big_txt = get_content(url).decode('utf8')
with open('big.txt', 'w') as fout:
fout.write(big_txt)
return big_txt
|
import pandas as pd
import numpy as np
import os
def read_data():
#set the path of the raw data
raw_data_path = os.path.join(os.path.pardir,'data','raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
#read data with all default values
train_df = pd.read_csv(train_file_path, index_col='PassengerId')
test_df = pd.read_csv(test_file_path, index_col='PassengerId')
test_df['Survived'] = -786 #adding Survived column to test data
df = pd.concat((test_df, train_df),axis=0, sort=False)
return df
def fill_missing_values(df):
#embarked
df.Embarked.fillna('C',inplace=True)
#Fare
median_fare=df.loc[(df.Pclass==3) & (df.Embarked=='S')].Fare.median()
df.Fare.fillna(median_fare,inplace=True)
#age
title_median_age = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_median_age,inplace=True)
return df
def get_deck(cabin):
return np.where(pd.notnull(cabin), str(cabin)[0].upper(), 'Z' )
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def GetTitle(name):
title = name.split(',')[1].split('.')[0].strip().lower()
return title
def processed_data(df):
#using method chaining concept
return(df
#create title attribute - then add this
.assign(Title = lambda x: x.Name.map(GetTitle))
#working missing values - start with this
.pipe(fill_missing_values)
#create fare bin feature
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low','low','high','very_hig']))
#create AgeState FamilySize n IsMother
.assign(AgeState = lambda x: np.where(x.Age >= 18, 'Adult', 'Child') )
.assign(FamilySize = lambda x: x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x: np.where(((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'miss')), 1, 0))
#create deck feature
.assign(Cabin = lambda x: np.where(x.Cabin == 'T',np.nan, x.Cabin))
.assign(Deck = lambda x: x.Cabin.map(get_deck))
#feature encoding
.assign(IsMale = lambda x: np.where(x.Sex == 'male', 1, 0))
.pipe(pd.get_dummies, columns=['Deck','Pclass','Title','Fare_Bin','Embarked','AgeState'])
#drop unnecessary columns
.drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1)
#reorder columns
.pipe(reorder_columns)
)
def write_data(df):
processed_data_path = os.path.join(os.path.pardir,'data','processed')
write_train_path = os.path.join(processed_data_path,'train.csv')
write_test_path = os.path.join(processed_data_path,'test.csv')
# writing train data to file
#df[df.Survived != -786].to_csv(write_train_path) # this one also gives same result
df.loc[df.Survived != -786].to_csv(write_train_path)
# writing test data to file
# as we dont have Survived column in test lets remove it
columns = [column for column in df.columns if column != 'Survived' ]
df.loc[df.Survived == -786,columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = processed_data(df)
write_data(df)
#df.info()
print 'Done.......'
#print df.Survived.value_counts()
|
import os
from typing import Dict, List, Optional
import attr
from openlineage.constants import DEFAULT_PRODUCER
PRODUCER = os.getenv("MARQUEZ_PRODUCER", DEFAULT_PRODUCER)
SCHEMA_URI = "https://raw.githubusercontent.com/OpenLineage/OpenLineage/main/spec/OpenLineage.json"
@attr.s
class BaseFacet:
_producer: str = attr.ib(init=False)
_schemaURL: str = attr.ib(init=False)
def __attrs_post_init__(self):
self._producer = PRODUCER
self._schemaURL = self._get_schema()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/BaseFacet"
@attr.s
class NominalTimeRunFacet(BaseFacet):
nominalStartTime: str = attr.ib()
nominalEndTime: str = attr.ib(default=None)
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/NominalTimeRunFacet"
@attr.s
class ParentRunFacet(BaseFacet):
run: Dict = attr.ib()
job: Dict = attr.ib()
@classmethod
def create(cls, runId: str, namespace: str, job_name: str):
return cls(
run={
"runId": runId
},
job={
"namespace": namespace,
"name": job_name
}
)
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/ParentRunFacet"
@attr.s
class DocumentationJobFacet(BaseFacet):
description: str = attr.ib()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/DocumentationJobFacet"
@attr.s
class SourceCodeLocationJobFacet(BaseFacet):
type: str = attr.ib()
url: str = attr.ib()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/SourceCodeLocationJobFacet"
@attr.s
class SqlJobFacet(BaseFacet):
query: str = attr.ib()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/SqlJobFacet"
@attr.s
class DocumentationDatasetFacet(BaseFacet):
description: str = attr.ib()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/DocumentationDatasetFacet"
@attr.s
class SchemaField:
name: str = attr.ib()
type: str = attr.ib()
description: Optional[str] = attr.ib(default=None)
@attr.s
class SchemaDatasetFacet(BaseFacet):
fields: List[SchemaField] = attr.ib()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/SchemaDatasetFacet"
@attr.s
class DataSourceDatasetFacet(BaseFacet):
name: str = attr.ib()
uri: str = attr.ib()
@staticmethod
def _get_schema() -> str:
return SCHEMA_URI + "#/definitions/DataSourceDatasetFacet"
|
#
# Copyright (c) 2006-2016, Prometheus Research, LLC
#
from ....core.util import to_name
from ....core.adapter import adapt
from ....core.error import Error, act_guard
from ....core.connect import transaction
from ....core.domain import UntypedDomain, RecordDomain, Value, Product
from ....core.cmd.embed import Embed
from ....core.cmd.act import Act, ProduceAction, act
from ....core.tr.binding import VoidBinding
from ....core.tr.decorate import decorate
from .command import WithCmd
class ProduceWith(Act):
adapt(WithCmd, ProduceAction)
def __call__(self):
with transaction():
input = act(self.command.record, self.action)
if not (isinstance(input.domain, RecordDomain) or
isinstance(input.data, dict)):
with act_guard(self.command.record):
raise Error("Expected a record value")
if input.data is not None:
environment = self.action.environment.copy()
if isinstance(input.data, dict):
for key, value in sorted(input.data.items()):
try:
if not isinstance(key, unicode):
raise TypeError
name = to_name(key)
value = Embed.__invoke__(value)
except TypeError:
with act_guard(self.command.record):
raise Error("Expected a record value")
environment[key] = value
else:
for idx, field in enumerate(input.domain.fields):
tag = getattr(field, 'tag')
if tag is not None:
name = to_name(tag)
value = Value(field.domain, input.data[idx])
environment[name] = value
action = self.action.clone(environment=environment)
return act(self.command.body, action)
else:
meta = decorate(VoidBinding())
meta = meta.clone(domain=UntypedDomain())
return Product(meta, None)
|
#!/usr/bin/python
from Tkinter import *
from PIL import Image, ImageTk
import subprocess
import locale
import os
import time
import thread
from collections import deque
import paramiko
import memcache
# NUMS times WPT should be 600 for the scale to work out
NUMS = 200 # number of values to show in strip
WPT = 3 # width of each data point in plot
PMRG = 60 # margin beside chart
PWIDTH = 722 # left margin is 30, right margin is 30
SHGT = 380 # strip chart pane height
CWIDTH = 250 # width of stats column
DHGT = 380 # stats dump pane height
SLHGT = 15 # line height for stats
INTERVAL = 1000
CTWIDTH = 276
CLWIDTH = 344
CMWIDTH = 108
CHGT = 230 # height of load control frames
CLICOL = 3 # number of columns for client icons
CLIROW = 8 # number of rows for client icons
LSAMP = 10 # number of samples in moving average
LINTVL = 100 # number of seconds between latency samples
NSAMP = 20 # number of samples to take before writing an average record
WARM = 5 # number of samples to skip before starting AUTO test
COOL = 5 # number of samples to skip before restarting AUTO test
MULTIKEY = [ "000","001","002","003","004","005","006","007","008","009",\
"010","011","012","013","014","015","016","017","018","019",\
"020","021","022","023","024","025","026","027","028","029",\
"030","031","032","033","034","035","036","037","038","039",\
"040","041","042","043","044","045","046","047","048","049",\
"050","051","052","053","054","055","056","057","058","059",\
"060","061","062","063"]
MULTIOBJ = [ "r000","r001","r002","r003","r004","r005","r006","r007","r008","r009",\
"r010","r011","r012","r013","r014","r015","r016","r017","r018","r019",\
"r020","r021","r022","r023","r024","r025","r026","r027","r028","r029",\
"r030","r031","r032","r033","r034","r035","r036","r037","r038","r039",\
"r040","r041","r042","r043","r044","r045","r046","r047","r048","r049",\
"r050","r051","r052","r053","r054","r055","r056","r057","r058","r059",\
"r060","r061","r062","r063"]
# defaults for loadgen
MINthr = 1
MAXthr = 16
CLThr = 16 # threads per client
MINcon = 1
MAXcon = 64
CLCon = 8 # connections per thread
MINgpr = 1
MAXgpr = 48
CLgpr = 32 # gets per network packet
MINdel = 1
MAXdel = 100000
CLdelay = 50 # delay between requests
MINkey = 8
MAXkey = 64
CLkey = 8
MINval = 8
MAXval = 128
CLval = 32
FONT = "Liberation Mono"
CHFNTSIZ = "12"
BGCLR = "dark slate gray"
CHCLR = "white"
RELF = "ridge"
class mcLat:
# object to collect a moving average of latency
def __init__(self,master,mc):
global LATMULTI
self.mymaster = master
# load data for latency measurements
self.mc = mc
if LATMULTI:
for i in range(len(MULTIKEY)):
mc.set(MULTIKEY[i],MULTIOBJ[i])
else:
mc.set(MULTIKEY[0],MULTIOBJ[0])
# prime latency moving average
stime = time.time()
if LATMULTI:
obj = self.mc.get_multi(MULTIKEY)
else:
obj = self.mc.get(MULTIKEY[0])
elmic = int(1000000*(time.time() - stime))
self.latsamp = deque()
for i in range(0,LSAMP):
self.latsamp.append(elmic)
self.mymaster.after(LINTVL,self.sample)
def sample(self):
global LATMULTI
global LOADGEN
global gprEntry
# time single query
elmic = 0
if LOADGEN:
gprm1 = int(gprEntry.get())
key = MULTIKEY[0:gprm1]
else:
key = MULTIKEY
stime = time.time()
if LATMULTI:
obj = self.mc.get_multi(key)
else:
obj = self.mc.get(MULTIKEY[0])
elmic = int(1000000*(time.time() - stime))
# print "latency sample = ",elmic
# self.il = self.il + 1
# if self.il >= LSAMP:
# self.il = 0
# self.latsamp[self.il] = elmic
toss = self.latsamp.popleft()
self.latsamp.append(elmic)
self.mymaster.after(LINTVL,self.sample)
def get(self):
lsum = 0
for i in range(len(self.latsamp)):
lsum = lsum + self.latsamp[i]
return lsum/LSAMP
class mcStats:
def __init__(self,master,SYSTEM,LATENCY,MISSES,STATS,SHOWAVG,RECFILE,TRACEFILE,AUTOFILE):
global SVRVER
global AUTO
global getque
global getpsque
global setque
global setpsque
global gmissque
global gmisspsque
global latque
global il
global lasttime
global samples
global settot
global gettot
global gmisstot
global lattot
global warmcnt
global WARMING
global coolcnt
global COOLING
global autoparms
getque = deque()
getpsque = deque()
gmissque = deque()
gmisspsque = deque()
setque = deque()
setpsque = deque()
latque = deque()
self.resetAvg()
if RECORD:
rfile=open(RECFILE,"a")
rec = "clientsys,total_conn,protocol,threads,conn,gets_per_frame,delay,keysize,valsize,numsamp,avgsetps,avggetps"
if MISSES:
rec = rec + ",avgmissps"
if LATENCY:
rec = rec + ",avglatency"
rfile.write(rec + "\n")
rfile.close()
if TRACE:
tfile=open(TRACEFILE,"a")
rec = "setps,getps"
if MISSES:
rec = rec + ",missps"
if LATENCY:
rec = rec + ",latency"
tfile.write(rec + "\n")
tfile.close()
self.master=master
self.chart = Canvas(master,width=PWIDTH,height=SHGT,bg=BGCLR,bd=3,relief=RELF)
self.chart.pack()
if STATS:
self.grid = Canvas(master,width=PWIDTH,height=DHGT,bd=3,bg='white',relief=RELF)
self.grid.pack()
# connect to memcached server
print "connecting to ",SYSTEM
self.mc = memcache.Client([SYSTEM],debug=1)
print self.mc
# initialize strip chart data queues
lasttime = -1.0
# prime with current counts
data = self.mc.get_stats()
if(len(data) <= 0):
print "couldn't get stats"
exit(1)
stats = data[0][1]
numget = int(stats['cmd_get'])
numset = int(stats['cmd_set'])
nummiss = int(stats['get_misses'])
for x in range(NUMS):
getque.append(numget)
getpsque.append(0)
gmissque.append(nummiss)
gmisspsque.append(0)
setque.append(numset)
setpsque.append(0)
if LATENCY:
latque.append(0)
SVRVER = stats['version']
# if LATENCY create latency object
if LATENCY:
self.lat = mcLat(master,self.mc)
WARMING = True # signifies warmup period of test
COOLING = False # signifies cooling period of test
warmcnt = 0
coolcnt = 0
self.refresh()
def refresh(self):
global RECORD
global AUTO
global COOL
global COOLING
global coolcnt
global WARM
global WARMING
global warmcnt
global autoparms
global active
global totconn
global g_thr
global g_conn
global g_gpr
global g_delay
global g_keysize
global g_valsize
global g_protocol
# get stats from server
data = self.mc.get_stats()
if len(data) > 0:
stats = data[0][1]
numget = int(stats['cmd_get'])
numset = int(stats['cmd_set'])
nummiss = int(stats['get_misses'])
if LATENCY:
latency = self.lat.get()
else:
latency = 0
self.drawchart(numget,numset,nummiss,latency)
if STATS:
self.dumpstats(data)
if AUTO:
if WARMING:
print "warmcnt cycle ",warmcnt
self.resetAvg()
warmcnt = warmcnt + 1
if warmcnt > WARM:
WARMING = False
elif COOLING:
print "cooldown cycle ",coolcnt
self.resetAvg()
coolcnt = coolcnt + 1
if coolcnt > COOL:
COOLING = False
if len(autoparms) < 1:
print "auto test complete"
AUTO = False
controller.stopAndExit()
else:
ptext = autoparms.popleft().rstrip()
params = ptext.split(",")
print "starting test with params ",params
iclients = int(params[0])
if iclients > nclients:
print "requested ",iclients," clients, ",nclients," available"
iclients = nclients
ithreads = int(params[1])
iconn = int(params[2])
igetperreq = int(params[3])
idelay = int(params[4])
ikeysize = int(params[5])
imode = params[6]
controller.setParams(ithreads,iconn,igetperreq,idelay,ikeysize,imode)
controller.startN(iclients)
warmcnt = 0
coolcnt = 0
WARMING = True
elif samples == NSAMP:
print "finished a sample set, writing data"
rfile=open(RECFILE,"a")
rec = "%d" % active
rec = rec + ",%d" % totconn
rec = rec + ",%s" % g_protocol
rec = rec + ",%d" % g_thr
rec = rec + ",%d" % g_conn
rec = rec + ",%d" % g_gpr
rec = rec + ",%d" % g_delay
rec = rec + ",%d" % g_keysize
rec = rec + ",%d" % g_valsize
rec = rec + ",%d" % samples
rec = rec + ",%d" % setavg
rec = rec + ",%d" % getavg
if MISSES:
rec = rec + ",%d" % gmissavg
if LATENCY:
rec = rec + ",%d" % latavg
rfile.write(rec + "\n")
rfile.close()
controller.stopAll()
self.resetAvg()
COOLING = True
elif RECORD:
if samples == NSAMP:
print "finished a sample set, writing data"
rfile=open(RECFILE,"a")
rec = "%d" % active
rec = rec + ",%d" % totconn
rec = rec + ",%s" % g_protocol
rec = rec + ",%d" % g_thr
rec = rec + ",%d" % g_conn
rec = rec + ",%d" % g_gpr
rec = rec + ",%d" % g_delay
rec = rec + ",%d" % g_keysize
rec = rec + ",%d" % g_valsize
rec = rec + ",%d" % samples
rec = rec + ",%d" % setavg
rec = rec + ",%d" % getavg
if MISSES:
rec = rec + ",%d" % gmissavg
if LATENCY:
rec = rec + ",%d" % latavg
rfile.write(rec + "\n")
rfile.close()
# reschedule sampler
self.master.after(INTERVAL,self.refresh)
def drawchart(self,numget,numset,nummiss,latency):
global gmissque
global gmisspsque
global getque
global setque
global lasttime
global samples
global settot
global setavg
global gettot
global getavg
global gmisstot
global lattot
global active
global totconn
curtime = time.time()
self.chart.delete("MCCHART")
CLFT = PMRG
CRGT = PWIDTH-PMRG
CTOP = 5+SHGT/10
CBOT = SHGT-5
YRNG = CBOT-CTOP
NMAJ = 7
YMAJ = YRNG/NMAJ
YMNR = YMAJ/2
YSC = 35000000
LSC = 7000
# draw chart
self.chart.create_line(CLFT,CBOT,CRGT,CBOT,fill=CHCLR,tags="MCCHART")
# self.chart.create_line(CLFT,CTOP,CRGT,CTOP,fill=CHCLR,tags="MCCHART")
self.chart.create_line(CLFT,CBOT,CLFT,CTOP,fill=CHCLR,tags="MCCHART")
self.chart.create_line(CRGT,CBOT,CRGT,CTOP,fill=CHCLR,tags="MCCHART")
ly = CBOT - YMAJ
val = 0
lval = 0
while ly > CTOP:
self.chart.create_line(CLFT,ly,CRGT,ly,fill=CHCLR,dash=(3,3),tags="MCCHART")
val = val + (YSC/NMAJ)/1000000
self.chart.create_text(CLFT,ly,anchor=E,text=str(val) + 'M',fill=CHCLR,tags="MCCHART")
if LATENCY:
lval = lval + (LSC/NMAJ)/1000
self.chart.create_text(CRGT,ly,anchor=W,text=str(lval) + 'ms',fill=CHCLR,tags="MCCHART")
ly = ly - YMAJ
self.chart.create_text(PWIDTH/2,CTOP,anchor=S,text="memcached: " + SVRVER + " server: " + HOSTNAME,fill='white',font=(FONT,"20","bold italic"),tags="MCCHART")
if lasttime > 0.0:
deltime = curtime - lasttime
getpscur = int((numget-getque[-1])/deltime)
getque.append(numget)
getpsque.append(getpscur)
setpscur = int((numset-setque[-1])/deltime)
setque.append(numset)
setpsque.append(setpscur)
gmisspscur = int((nummiss-gmissque[-1])/deltime)
gmissque.append(nummiss)
gmisspsque.append(gmisspscur)
if LATENCY:
latque.append(latency)
if TRACE:
tfile=open(TRACEFILE,"a")
rec = "%d" % setpscur
rec = rec + ",%d" % getpscur
if MISSES:
rec = rec + ",%d" % gmisspscur
if LATENCY:
rec = rec + ",%d" % latency
tfile.write(rec + "\n")
tfile.close()
csetps = locale.format("%d",setpscur,grouping=True)
cgetps = locale.format("%d",getpscur,grouping=True)
cgmissps = locale.format("%d",gmisspscur,grouping=True)
if LATENCY:
clat = locale.format("%d",latque[-1],grouping=True)
ypos = CTOP+2
self.chart.create_text(CLFT+2,ypos,anchor=NW,text='sets per second',fill='green',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+300,ypos,anchor=NE,text=csetps,fill='green',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
ypos = ypos + 16
self.chart.create_text(CLFT+2,ypos,anchor=NW,text='gets per second',fill='cyan',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+300,ypos,anchor=NE,text=cgetps,fill='cyan',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
if MISSES:
ypos = ypos + 16
self.chart.create_text(CLFT+2,ypos,anchor=NW,text='misses per second',fill='red',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+300,ypos,anchor=NE,text=cgmissps,fill='red',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
if LATENCY:
ypos = ypos + 16
self.chart.create_text(CLFT+2,ypos,anchor=NW,text='latency (mic)',fill='yellow',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+300,ypos,anchor=NE,text=clat,fill='yellow',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
if SHOWAVG:
self.updateAvg(setpscur,getpscur,gmisspscur,latency)
cavgsamp = locale.format("%d",samples,grouping=True)
csetps = locale.format("%d",setavg,grouping=True)
cgetps = locale.format("%d",getavg,grouping=True)
if MISSES:
cgmissps = locale.format("%d",gmissavg,grouping=True)
if LATENCY:
clat = locale.format("%d",latavg,grouping=True)
ypos = CTOP+2
self.chart.create_text(CLFT+352,ypos,anchor=NW,text='avg',fill='green',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+500,ypos,anchor=NE,text=csetps,fill='green',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
ypos = ypos + 16
self.chart.create_text(CLFT+352,ypos,anchor=NW,text='avg',fill='cyan',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+500,ypos,anchor=NE,text=cgetps,fill='cyan',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
if MISSES:
ypos = ypos + 16
self.chart.create_text(CLFT+352,ypos,anchor=NW,text='avg',fill='red',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+500,ypos,anchor=NE,text=cgmissps,fill='red',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
if LATENCY:
ypos = ypos + 16
self.chart.create_text(CLFT+352,ypos,anchor=NW,text='avg',fill='yellow',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+500,ypos,anchor=NE,text=clat,fill='yellow',font=(FONT,CHFNTSIZ,"bold"),tags="MCCHART")
ypos = ypos + 16
self.chart.create_text(CLFT+352,ypos,anchor=NW,text='samples',fill='white',font=(FONT,CHFNTSIZ),tags="MCCHART")
self.chart.create_text(CLFT+500,ypos,anchor=NE,text=cavgsamp,fill='white',font=(FONT,CHFNTSIZ),tags="MCCHART")
delx = WPT
x1 = CLFT
for x in range(NUMS):
x2 = x1 + WPT
y1 = CBOT - (YRNG*getpsque[x])/YSC
y2 = CBOT - (YRNG*getpsque[x+1])/YSC
self.chart.create_line(x1,y1,x2,y2,fill="cyan",width=2.0,tags="MCCHART")
y1 = CBOT - (YRNG*setpsque[x])/YSC
y2 = CBOT - (YRNG*setpsque[x+1])/YSC
self.chart.create_line(x1,y1,x2,y2,fill="green",width=2.0,tags="MCCHART")
if MISSES:
y1 = CBOT - (YRNG*gmisspsque[x])/YSC
y2 = CBOT - (YRNG*gmisspsque[x+1])/YSC
self.chart.create_line(x1,y1,x2,y2,fill="red",width=2.0,tags="MCCHART")
if LATENCY:
y1 = CBOT - (YRNG*latque[x])/LSC
y2 = CBOT - (YRNG*latque[x+1])/LSC
self.chart.create_line(x1,y1,x2,y2,fill="yellow",width=2.0,tags="MCCHART")
x1 = x2
getque.popleft()
getpsque.popleft()
setque.popleft()
setpsque.popleft()
if MISSES:
gmissque.popleft()
gmisspsque.popleft()
if LATENCY:
latque.popleft()
lasttime = curtime
def resetAvg(self):
global samples
global settot
global gettot
global gmisstot
global lattot
samples = 0
settot = 0
gettot = 0
gmisstot = 0
lattot = 0
def updateAvg(self,setpscur,getpscur,gmisspscur,latency):
global samples
global settot
global gettot
global gmisstot
global lattot
global setavg
global getavg
global gmissavg
global latavg
samples = samples + 1
settot = settot + setpscur
setavg = settot/samples
gettot = gettot + getpscur
getavg = gettot/samples
gmisstot = gmisstot + gmisspscur
gmissavg = gmisstot/samples
lattot = lattot + latency
latavg = lattot/samples
def dumpstats(self,data):
# pane size is a minimum of 200
NP = PWIDTH/CWIDTH # how many panes can I fit in the panel width?
PANE = PWIDTH/(NP) # expand pane size so the columns are all the same size
xpos = 0
ypos = 15
self.grid.delete("MCINFO")
sysid = data[0][0]
self.grid.create_text(xpos+5,ypos,anchor=W,text='address:port',fill='blue',font=(FONT,CHFNTSIZ),tags="MCINFO")
self.grid.create_text(xpos+PANE-5,ypos,anchor=E,text=sysid,fill='blue',font=(FONT,CHFNTSIZ),tags="MCINFO")
ypos = ypos + SLHGT
stats = data[0][1]
for name in sorted(stats.iterkeys()):
self.grid.create_text(xpos+5,ypos,anchor=W,text=name,fill='black',font=(FONT,CHFNTSIZ),tags="MCINFO")
self.grid.create_text(xpos+PANE-5,ypos,anchor=E,text=stats[name],fill='red',font=(FONT,CHFNTSIZ),tags="MCINFO")
xpos = xpos+PANE
if(xpos >= PWIDTH):
xpos = 0
ypos = ypos + SLHGT
class mcCtl:
def __init__(self,master,svrip,CLIENTS):
global AUTO
global autoparms
global clthr
global clconn
global cldelay
global clgpr
global clientctl
global nclients
global iclient
global active
global totconn
global nthEntry
global conEntry
global gprEntry
global delEntry
global keyEntry
global binButton
global ascButton
global protoVar
global nclData
global tcoData
global numSlide
mysvr = svrip
clthr = CLThr
clconn = CLCon
clgpr = CLgpr
cldelay = CLdelay
clkey = CLkey
active = 0
totconn = 0
cfile=open(CLIENTS,"r")
text = cfile.readlines()
cfile.close()
# how many clients do we have?
nrec = len(text)
if nrec <= 1:
print "must have init client and at least one loadgen client"
return
else:
print "found ",nrec," client records"
nclients = nrec - 1
# create frame for control
ctlFrame=LabelFrame(master,text="load generator parameters",background=BGCLR,fg="white",width=CTWIDTH,height=CHGT,bd=3,relief=RELF)
ctlFrame.grid_propagate(0)
ctlFrame.pack(side=LEFT)
# create frame for client status
cliFrame=LabelFrame(master,text="load generation clients",background=BGCLR,fg="white",width=CLWIDTH,height=CHGT,bd=3,relief=RELF)
cliFrame.grid_propagate(0)
cliFrame.pack(side=LEFT)
# create master control frame
clmFrame=LabelFrame(master,text="master controls",background=BGCLR,fg="white",width=CMWIDTH,height=CHGT,bd=3,relief=RELF)
clmFrame.pack_propagate(0)
clmFrame.pack(side=LEFT)
# get params for init client
rec = text[0].rstrip()
sys = rec.split(",")
iclient=mcInit(sys[0],sys[1],sys[2],sys[3],sys[4])
# create grid of client objects
clientctl = []
ncl = 0
for i in range(1,nrec):
rec = text[i].rstrip()
sys = rec.split(",")
irow = ncl/CLICOL
icol = ncl % CLICOL
clientctl.append(mcClient(cliFrame,sys[0],sys[1],sys[2],sys[3],sys[4],ncl,irow,icol))
ncl = ncl + 1
cliFrame.pack()
# add start/stop buttons to clmFrame
startButton = Button(clmFrame,text="Start All",bg='light green',fg='white',command=self.startAll,font=(FONT,10,"bold"),width=8,bd=2,relief=RAISED)
startButton.pack(side=TOP,pady=2)
stopButton = Button(clmFrame,text="Stop All",bg='indian red',fg='white',command=self.stopAll,font=(FONT,10,"bold"),width=8,bd=2,relief=RAISED)
stopButton.pack(side=TOP,pady=2)
initButton = Button(clmFrame,text="Load DB",bg='medium purple',fg='white',command=self.initDB,font=(FONT,10,"bold"),width=8,bd=2,relief=RAISED)
initButton.pack(side=TOP,pady=2)
if SHOWAVG:
resetavgButton = Button(clmFrame,text="Reset Avg",bg='gray',fg='white',command=self.resetavg,font=(FONT,10,"bold"),width=8,bd=2,relief=RAISED)
resetavgButton.pack(side=TOP,pady=2)
resetButton = Button(clmFrame,text="Reset",bg='steel blue',fg='white',command=self.reset,font=(FONT,10,"bold"),width=8,bd=2,relief=RAISED)
resetButton.pack(side=TOP,pady=2)
exitButton = Button(clmFrame,text="Exit",bg='sienna3',fg='white',command=self.stopAndExit,font=(FONT,10,"bold"),width=8,bd=2,relief=RAISED)
exitButton.pack(side=TOP,pady=2)
# add entry widgets for load parameters
ir = 0
ir = ir + 1
keyLabel = Label(ctlFrame,text="Key Size",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
keyLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
keyCk = (ctlFrame.register(self.keyCheck),'%d','%i','%P','%s','%S','%v','%V','%W')
keyEntry = Entry(ctlFrame,font=(FONT,10),width=6,justify=RIGHT,validate='focusout',validatecommand=keyCk)
keyEntry.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
keyEntry.insert(0,"%d" % CLkey)
ir = ir + 1
nthLabel = Label(ctlFrame,text="Threads per LG",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
nthLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
nthCk = (ctlFrame.register(self.nthCheck),'%d','%i','%P','%s','%S','%v','%V','%W')
nthEntry = Entry(ctlFrame,font=(FONT,10),width=6,justify=RIGHT,validate='focusout',validatecommand=nthCk)
nthEntry.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
nthEntry.insert(0,"%d" % clthr)
ir = ir + 1
conLabel = Label(ctlFrame,text="Connections per thread",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
conLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
conCk = (ctlFrame.register(self.conCheck),'%d','%i','%P','%s','%S','%v','%V','%W')
conEntry = Entry(ctlFrame,font=(FONT,10),width=6,justify=RIGHT,validate='focusout',validatecommand=conCk)
conEntry.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
conEntry.insert(0,"%d" % clconn)
ir = ir + 1
gprLabel = Label(ctlFrame,text="Gets per request",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
gprLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
gprCk = (ctlFrame.register(self.gprCheck),'%d','%i','%P','%s','%S','%v','%V','%W')
gprEntry = Entry(ctlFrame,font=(FONT,10),width=6,justify=RIGHT,validate='focusout',validatecommand=gprCk)
gprEntry.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
gprEntry.insert(0,"%d" % clgpr)
ir = ir + 1
delLabel = Label(ctlFrame,text="Microsec between requests",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
delLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
delCk = (ctlFrame.register(self.delCheck),'%d','%i','%P','%s','%S','%v','%V','%W')
delEntry = Entry(ctlFrame,font=(FONT,10),width=6,justify=RIGHT,validate='focusout',validatecommand=delCk)
delEntry.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
delEntry.insert(0,"%d" % cldelay)
ir = ir + 1
protoVar = StringVar()
protoLabel = Label(ctlFrame,text="Protocol",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
protoLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
binButton = Radiobutton(ctlFrame,text="binary",variable=protoVar,value="binary",command=self.protoCheck)
binButton.grid(row=ir,column=0,sticky=E,padx=2,pady=0)
ascButton = Radiobutton(ctlFrame,text="ascii",variable=protoVar,value="ascii",command=self.protoCheck)
ascButton.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
binButton.invoke()
ir = ir + 1
nclLabel = Label(ctlFrame,text="Active load generators",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
nclLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
nclData = Label(ctlFrame,font=(FONT,10),anchor=E,width=6,fg="steel blue",bg='light gray',justify=RIGHT,bd=2,relief=RIDGE)
nclData.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
nclData.config(text="%d" % active,justify=RIGHT)
ir = ir + 1
tcoLabel = Label(ctlFrame,text="Total connections",font=(FONT,10,"bold"),bg=BGCLR,fg="white")
tcoLabel.grid(row=ir,column=0,sticky=W,padx=2,pady=0)
tcoData = Label(ctlFrame,font=(FONT,10),anchor=E,width=6,fg="steel blue",bg='light gray',justify=RIGHT,bd=2,relief=RIDGE)
tcoData.grid(row=ir,column=1,sticky=E,padx=2,pady=0)
tcoData.config(text="%d" % totconn,justify=RIGHT)
master.update_idletasks()
# note: voodoo below is to pass Entry data to validate command
# %d = Type of action (1=insert, 0=delete, -1 for others)
# %i = index of char string to be inserted/deleted, or -1
# %P = value of the entry if the edit is allowed
# %s = value of the entry prior to editing
# %S = the text string being inserted or deleted, if any
# %v = the type of validation currently set
# %V = the type of validation that triggered the callback
# (key, focusin, focusout, forced)
# %W = the tk name of the widget
def keyCheck(self,d,i,P,s,S,v,V,W):
key = int(P)
if key < MINthr:
print "key too small"
keyEntry.delete(0,12)
keyEntry.insert(0,MINkey)
return False
if key > MAXkey:
print "key too large"
keyEntry.delete(0,12)
keyEntry.insert(0,MAXkey)
return False
print "key size now %d" % key
return True
def nthCheck(self,d,i,P,s,S,v,V,W):
nth = int(P)
if nth < MINthr:
print "too few threads"
nthEntry.delete(0,12)
nthEntry.insert(0,MINthr)
return False
if nth > MAXthr:
print "too many threads"
nthEntry.delete(0,12)
nthEntry.insert(0,MAXthr)
return False
print "num threads now %d" % nth
return True
def conCheck(self,d,i,P,s,S,v,V,W):
con = int(P)
if con < MINcon:
print "too few connections"
conEntry.delete(0,12)
conEntry.insert(0,MINcon)
return False
if con > MAXcon:
print "too many connections"
conEntry.delete(0,12)
conEntry.insert(0,MAXcon)
return False
print "num connections per thread now %d" % con
return True
def gprCheck(self,d,i,P,s,S,v,V,W):
gpr = int(P)
if gpr < MINgpr:
print "too few gets per request"
gprEntry.delete(0,12)
gprEntry.insert(0,MINgpr)
return False
if gpr > MAXgpr:
print "too many gets per request"
gprEntry.delete(0,12)
gprEntry.insert(0,MAXgpr)
return False
print "num gets per request now %d" % gpr
return True
def delCheck(self,d,i,P,s,S,v,V,W):
delay = int(P)
if delay < MINdel:
print "delay too small"
delEntry.delete(0,12)
delEntry.insert(0,MINdel)
return False
if delay > MAXdel:
print "delay too large"
delEntry.delete(0,12)
delEntry.insert(0,MAXdel)
return False
print "delay between requests now %d microsecond" % delay
return True
def protoCheck(self):
print "protocol is now %s" % protoVar.get()
return True
def startAll(self):
print "starting all clients"
for i in range(0,len(clientctl)):
clientctl[i].pressed()
def initDB(self):
# create thread to asynchronously run mc-hammr init
print "initializing DB"
thread.start_new_thread(iclient.initCmd,(int(keyEntry.get()),CLval,))
def stopAll(self):
print "stopping all clients"
for i in range(0,len(clientctl)):
clientctl[i].stopCmd()
def stopAndExit(self):
print "stopping all clients"
for i in range(0,len(clientctl)):
clientctl[i].stopCmd()
print "exiting"
exit(0)
def resetavg(self):
global samples
global settot
global gettot
global gmisstot
global lattot
samples = 0
settot = 0
gettot = 0
gmisstot = 0
lattot = 0
def setParams(self,ith,icon,igpr,idelay,ikey,imode):
nthEntry.delete(0,12)
nthEntry.insert(0,"%d" % ith)
conEntry.delete(0,12)
conEntry.insert(0,"%d" % icon)
gprEntry.delete(0,12)
gprEntry.insert(0,"%d" % igpr)
delEntry.delete(0,12)
delEntry.insert(0,"%d" % idelay)
keyEntry.delete(0,12)
keyEntry.insert(0,"%d" % ikey)
if(imode == "ascii"):
ascButton.invoke()
def startN(self,icl):
print "starting ",icl," clients"
for i in range(0,icl):
print "starting client ",icl
clientctl[i].pressed()
def reset(self):
global active
global totconn
global nthEntry
global conEntry
global gprEntry
global delEntry
global keyEntry
global nclData
# reset values for parameters to defaults
nthEntry.delete(0,12)
nthEntry.insert(0,"%d" % CLThr)
conEntry.delete(0,12)
conEntry.insert(0,"%d" % CLCon)
gprEntry.delete(0,12)
gprEntry.insert(0,"%d" % CLgpr)
delEntry.delete(0,12)
delEntry.insert(0,"%d" % CLdelay)
keyEntry.delete(0,12)
keyEntry.insert(0,"%d" % CLkey)
# stop all running clients
for i in range(0,len(clientctl)):
print "stopping mc-hammr on client %d" % i
clientctl[i].stopCmd()
# pkill all surviving mc-hammr instances
for i in range(0,len(clientctl)):
print "pkill mc-hammr on client %d" % i
clientctl[i].pkill()
class mcInit:
def __init__(self,remote,username,password,svrip,svrport):
self.state = "ready"
self.mysys = remote
self.myuser = username
self.mypass = password
self.mysvrip = svrip
self.mysvrport = svrport
print "setting up ssh connection for init thread"
self.ssh = paramiko.SSHClient()
def initCmd(self,keysize,valsize):
global active
global totconn
global nthEntry
global conEntry
global gprEntry
global delEntry
global keyEntry
global nclData
print "in initCmd"
# get current values for parameters
self.thr = 1
self.conn = 1
self.delay = 10
self.keysize = keysize
self.keypre = "%d:" % self.keysize
self.valsize = valsize
if self.state == "running":
print "init already running on ",self.mysys
return
# set up ssh connection to client
# print "logging in to ",self.mysys," as ",self.myuser," password ",self.mypass
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.mysys,username=self.myuser,password=self.mypass)
# run_init hostip port nthreads connections delay keysize keyprefix valsize
cmd = "./client_scripts/run_init %s %s %d %d %d %d %s %d" % (self.mysvrip,self.mysvrport,self.thr, self.conn, self.delay, self.keysize,self.keypre,self.valsize)
print "running %s on %s " % (cmd,self.mysys)
self.ssh_stdin,self.ssh_stdout, self.ssh_stderr = self.ssh.exec_command(cmd)
status = self.ssh_stdout.channel.recv_exit_status()
print "run_init completed with status ","%d" % status
# print self.ssh_stdout
# print self.ssh_stderr
self.ssh.close()
class mcClient:
def __init__(self,master,remote,username,password,svrip,svrport,id,irow,icol):
self.state = "ready"
self.mysys = remote
self.myuser = username
self.mypass = password
self.mysvrip = svrip
self.mysvrport = svrport
self.myid = id
self.state = "initial"
self.ssh = paramiko.SSHClient()
self.icon = Button(master,text=self.mysys,command=self.pressed,bg='khaki3',fg="black",font=(FONT,10),height=1,width=8,bd=2)
self.icon.grid(row=irow,column=icol,padx=4,pady=4)
self.state = "ready"
def pressed(self):
if self.state == "running":
self.stopCmd()
else:
self.runCmd()
def runCmd(self):
global active
global totconn
global g_thr
global g_conn
global g_gpr
global g_delay
global g_keysize
global g_valsize
global g_protocol
global nthEntry
global conEntry
global gprEntry
global delEntry
global keyEntry
global nclData
# get current values for parameters
self.thr = int(nthEntry.get())
self.conn = int(conEntry.get())
self.gpr = int(gprEntry.get())
self.delay = int(delEntry.get())
self.keysize = int(keyEntry.get())
self.keypre = "%d:" % self.keysize
self.valsize = CLval
self.protocol = protoVar.get()
if self.state == "running":
print "client ",self.myid,"already running!"
return
g_thr = self.thr
g_conn = self.conn
g_gpr = self.gpr
g_delay = self.delay
g_keysize = self.keysize
g_valsize = self.valsize
g_protocol = self.protocol
# set up ssh connection to client
# print "logging in to ",self.mysys," as ",self.myuser," password ",self.mypass
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.mysys,username=self.myuser,password=self.mypass)
# load_gen hostip port nthreads conn mget delay keysize keyprefix valsize
cmd = "./client_scripts/run_loadgen %s %s %d %d %d %d %d %s %d %s" % (self.mysvrip,self.mysvrport,self.thr, self.conn, self.gpr, self.delay, self.keysize,self.keypre,self.valsize,self.protocol)
print "running %s on %s " % (cmd,self.mysys)
self.ssh_stdin,self.ssh_stdout, self.ssh_stderr = self.ssh.exec_command(cmd)
self.icon.configure(background='DarkSeaGreen2',relief=SUNKEN)
self.state = "running"
active = active + 1
nclData.config(text="%d" % active)
totconn = totconn + (self.conn * self.thr)
tcoData.config(text="%d" % totconn)
def stopCmd(self):
global active
global totconn
global nclData
if self.state != "running":
return
print "closing ssh connection to ",self.mysys
self.ssh_stdin.write("stop\n")
self.ssh_stdin.write("stop\n")
# print self.ssh_stdout
# print self.ssh_stderr
self.ssh.close()
self.icon.configure(background='khaki3',relief=RAISED)
self.state = "ready"
active = active - 1
nclData.config(text="%d" % active)
totconn = totconn - (self.conn * self.thr)
tcoData.config(text="%d" % totconn)
def pkill(self):
self.stopCmd()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.mysys,username=self.myuser,password=self.mypass)
cmd = "pkill mc-hammr"
print "running %s on %s " % (cmd,self.mysys)
self.ssh_stdin,self.ssh_stdout, self.ssh_stderr = self.ssh.exec_command(cmd)
# print self.ssh_stdout
# print self.ssh_stderr
self.ssh.close()
def printusage():
print "usage: python mcmon.py [--stats] [--latency] [--misses] [--clients=<file>] [--showavg] [--record=<file>] [--auto=<file>] [--help] system:port"
print " --stats: display dump of memcached stats"
print " --misses: plot get misses"
print " --latency: plot latency"
print " --clients=<filename>: client description file (note 1st entry is init)"
print " --showavg: show running averages"
print " --record=<filename>: append averages to <filename> when sample count == 50"
print " --auto=<filename>: automatically run tests described in <filename>"
print " file should contain records with comma separated integer parameters:"
print " clients,threads,connections per thread,gets per req,delay,keysize"
print " --help: print this message"
#
# main driver
#
global SYSTEM
global HOSTNAME
global PORT
global LATMULTI
global LOADGEN
global RECORD
global TRACE
global AUTO
global autoparms
global warmcnt
global nclients
STATS = False
LATENCY = False
LATMULTI = False
MISSES = False
LOADGEN = False
SHOWAVG = False
RECORD = False
RECFILE=""
TRACE = False
TRACEFILE=""
AUTO = False
AUTOFILE = "AUTOPARAMS"
CLIENTS = 'CLIENTS'
SYSTEM = "localhost:11211"
starttime = os.times()
root = Tk()
# some housekeeping
locale.setlocale(locale.LC_ALL, 'en_US')
# parse args
for a in sys.argv[1:]:
if (a == "--stats"):
STATS = True
elif (a == "--loadgen"):
LOADGEN = True
elif (a == "--latency"):
LATENCY = True
elif (a == "--latmulti"):
LATMULTI = True
LATENCY = True
elif (a == "--misses"):
MISSES = True
elif (a == "--showavg"):
SHOWAVG = True
elif (a[0:9] == "--record="):
SHOWAVG = True
RECORD = True
RECFILE=a.split('=')[1]
print "recording averages in %s" % RECFILE
elif (a[0:8] == "--trace="):
TRACE = True
TRACEFILE=a.split('=')[1]
print "recording data in %s" % TRACEFILE
elif (a[0:7] == "--auto="):
SHOWAVG = True
RECORD = True
AUTO = True
AUTOFILE=a.split('=')[1]
print "executing test parameters from %s" % AUTOFILE
elif (a[0:10] == "--clients="):
CLIENTS=a.split('=')[1]
print "client file is %s" % CLIENTS
elif (a == "--help"):
printusage()
exit(0)
elif (a[0] == "-"):
print "invalid option %s" % a
printusage()
exit(1)
else:
SYSTEM = a
HOSTNAME = SYSTEM.split(':')[0]
PORT = SYSTEM.split(':')[1]
# create header
root.title("memcached monitor")
# create frame
mcFrame=Frame(root,bg='black',width=PWIDTH)
mcFrame.pack()
stats = mcStats(mcFrame,SYSTEM,LATENCY,MISSES,STATS,SHOWAVG,RECFILE,TRACEFILE,AUTOFILE)
root.update_idletasks()
if LOADGEN:
ctFrame=Frame(root,bg='black',width=PWIDTH)
ctFrame.pack()
controller = mcCtl(ctFrame,SYSTEM,CLIENTS)
root.update_idletasks()
# initialize auto parameters
if AUTO:
afile=open(AUTOFILE,"r")
autoparms = deque(afile.readlines())
afile.close()
ptext = autoparms.popleft().rstrip()
params = ptext.split(",")
iclients = int(params[0])
if iclients > nclients:
print "requested ",iclients," clients, ",nclients," available"
iclients = nclients
ithreads = int(params[1])
iconn = int(params[2])
igetperreq = int(params[3])
idelay= int(params[4])
ikeysize= int(params[5])
imode = params[6]
controller.setParams(ithreads,iconn,igetperreq,idelay,ikeysize,imode)
controller.startN(iclients)
root.update_idletasks()
root.mainloop()
|
from aspi import AsPi, AsPiLogger
import time
import random
from pathlib import Path
SENSORS = [
AsPi.SENSOR_MOTION,
AsPi.SENSOR_LAT,
AsPi.SENSOR_LON,
AsPi.SENSOR_ECLIPSED,
AsPi.SENSOR_USERDATA
]
class DataProcessor:
def __init__(self):
self.imgcounter = 0
def process_data(self, data ):
photo, sensor_readings = data
if photo is None:
print("NO photo for YOU!")
else:
photo_filename, photo_data = photo
print("New photo available")
if sensor_readings is None:
print("NO sensor readings for YOU!")
else:
print("New sensor readings available")
time.sleep(3)
return Path( photo_filename ).name + "-" + str(random.random())
imgproc = ImageProcessor()
aspilogger = AsPiLogger(
cameraEnabled = True
, logPeriodInSecs=1
, imgPeriodInSecs=2
, filePrefix="process-data-example"
, durationInSecs = 10
, logToStdErr=True
, sensorList = SENSORS
, updateCallback=imgproc.process_data)
aspilogger.start()
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This module is used to collect lineage information of model training."""
import json
import os
import numpy as np
from mindinsight.lineagemgr.summary.summary_record import LineageSummary
from mindinsight.utils.exceptions import \
MindInsightException
from mindinsight.lineagemgr.common.validator.validate import validate_train_run_context, \
validate_eval_run_context, validate_file_path, validate_network, \
validate_int_params, validate_summary_record, validate_raise_exception
from mindinsight.lineagemgr.common.exceptions.error_code import LineageErrors, LineageErrorMsg
from mindinsight.lineagemgr.common.exceptions.exceptions import LineageParamRunContextError, \
LineageGetModelFileError, LineageLogError
from mindinsight.lineagemgr.common.log import logger as log
from mindinsight.lineagemgr.common.utils import try_except
from mindinsight.lineagemgr.common.validator.model_parameter import RunContextArgs, \
EvalParameter
from mindinsight.lineagemgr.collection.model.base import Metadata
try:
from mindspore.common.tensor import Tensor
from mindspore.train.callback import Callback, RunContext, ModelCheckpoint, SummaryStep
from mindspore.nn import Cell, Optimizer, WithLossCell, TrainOneStepWithLossScaleCell
from mindspore.nn.loss.loss import _Loss
from mindspore.dataset.engine import Dataset, MindDataset
import mindspore.dataset as ds
except (ImportError, ModuleNotFoundError):
log.warning('MindSpore Not Found!')
class TrainLineage(Callback):
"""
Collect lineage of a training job.
Args:
summary_record (SummaryRecord): SummaryRecord is used to record
the summary value, and summary_record is an instance of SummaryRecord,
see mindspore.train.summary.SummaryRecord.
raise_exception (bool): Whether to raise exception when error occurs in
TrainLineage. If True, raise exception. If False, catch exception
and continue. Default: False.
Raises:
MindInsightException: If validating parameter fails.
LineageLogError: If recording lineage information fails.
Examples:
>>> from mindinsight.lineagemgr import TrainLineage
>>> from mindspore.train.callback import ModelCheckpoint, SummaryStep
>>> from mindspore.train.summary import SummaryRecord
>>> model = Model(train_network)
>>> model_ckpt = ModelCheckpoint(directory='/dir/to/save/model/')
>>> summary_writer = SummaryRecord(log_dir='./')
>>> summary_callback = SummaryStep(summary_writer, flush_step=2)
>>> lineagemgr = TrainLineage(summary_record=summary_writer)
>>> model.train(epoch_num, dataset, callbacks=[model_ckpt, summary_callback, lineagemgr])
"""
def __init__(self, summary_record, raise_exception=False):
super(TrainLineage, self).__init__()
try:
validate_raise_exception(raise_exception)
self.raise_exception = raise_exception
validate_summary_record(summary_record)
self.summary_record = summary_record
summary_log_path = summary_record.full_file_name
validate_file_path(summary_log_path)
self.lineage_log_path = summary_log_path + '_lineage'
self.initial_learning_rate = None
except MindInsightException as err:
log.error(err)
if raise_exception:
raise
@try_except(log)
def begin(self, run_context):
"""
Initialize the training progress when the training job begins.
Args:
run_context (RunContext): It contains all lineage information,
see mindspore.train.callback.RunContext.
Raises:
MindInsightException: If validating parameter fails.
"""
log.info('Initialize training lineage collection...')
if not isinstance(run_context, RunContext):
error_msg = f'Invalid TrainLineage run_context.'
log.error(error_msg)
raise LineageParamRunContextError(error_msg)
run_context_args = run_context.original_args()
if not self.initial_learning_rate:
optimizer = run_context_args.get('optimizer')
if optimizer and not isinstance(optimizer, Optimizer):
log.error("The parameter optimizer is invalid. It should be an instance of "
"mindspore.nn.optim.optimizer.Optimizer.")
raise MindInsightException(error=LineageErrors.PARAM_OPTIMIZER_ERROR,
message=LineageErrorMsg.PARAM_OPTIMIZER_ERROR.value)
if optimizer:
log.info('Obtaining initial learning rate...')
self.initial_learning_rate = AnalyzeObject.analyze_optimizer(optimizer)
log.debug('initial_learning_rate: %s', self.initial_learning_rate)
else:
network = run_context_args.get('train_network')
validate_network(network)
optimizer = AnalyzeObject.get_optimizer_by_network(network)
self.initial_learning_rate = AnalyzeObject.analyze_optimizer(optimizer)
log.debug('initial_learning_rate: %s', self.initial_learning_rate)
# get train dataset graph
train_dataset = run_context_args.get('train_dataset')
dataset_graph_dict = ds.serialize(train_dataset)
dataset_graph_json_str = json.dumps(dataset_graph_dict, indent=2)
dataset_graph_dict = json.loads(dataset_graph_json_str)
log.info('Logging dataset graph...')
try:
lineage_summary = LineageSummary(self.lineage_log_path)
lineage_summary.record_dataset_graph(dataset_graph=dataset_graph_dict)
except Exception as error:
error_msg = f'Dataset graph log error in TrainLineage begin: {error}'
log.error(error_msg)
raise LineageLogError(error_msg)
log.info('Dataset graph logged successfully.')
@try_except(log)
def end(self, run_context):
"""
Collect lineage information when the training job ends.
Args:
run_context (RunContext): It contains all lineage information,
see mindspore.train.callback.RunContext.
Raises:
LineageLogError: If recording lineage information fails.
"""
log.info('Start to collect training lineage...')
if not isinstance(run_context, RunContext):
error_msg = f'Invalid TrainLineage run_context.'
log.error(error_msg)
raise LineageParamRunContextError(error_msg)
run_context_args = run_context.original_args()
validate_train_run_context(RunContextArgs, run_context_args)
train_lineage = dict()
train_lineage = AnalyzeObject.get_network_args(
run_context_args, train_lineage
)
train_dataset = run_context_args.get('train_dataset')
callbacks = run_context_args.get('list_callback')
list_callback = getattr(callbacks, '_callbacks', [])
log.info('Obtaining model files...')
ckpt_file_path, _ = AnalyzeObject.get_file_path(list_callback)
train_lineage[Metadata.learning_rate] = self.initial_learning_rate
train_lineage[Metadata.epoch] = run_context_args.get('epoch_num')
train_lineage[Metadata.step_num] = run_context_args.get('cur_step_num')
train_lineage[Metadata.parallel_mode] = run_context_args.get('parallel_mode')
train_lineage[Metadata.device_num] = run_context_args.get('device_number')
train_lineage[Metadata.batch_size] = run_context_args.get('batch_num')
model_path_dict = {
'ckpt': ckpt_file_path
}
train_lineage[Metadata.model_path] = json.dumps(model_path_dict)
log.info('Calculating model size...')
train_lineage[Metadata.model_size] = AnalyzeObject.get_model_size(
ckpt_file_path
)
log.debug('model_size: %s', train_lineage[Metadata.model_size])
log.info('Analyzing dataset object...')
train_lineage = AnalyzeObject.analyze_dataset(train_dataset, train_lineage, 'train')
log.info('Logging lineage information...')
try:
lineage_summary = LineageSummary(self.lineage_log_path)
lineage_summary.record_train_lineage(train_lineage)
except IOError as error:
error_msg = f'End error in TrainLineage: {error}'
log.error(error_msg)
raise LineageLogError(error_msg)
except Exception as error:
error_msg = f'End error in TrainLineage: {error}'
log.error(error_msg)
log.error('Fail to log the lineage of the training job.')
raise LineageLogError(error_msg)
log.info('The lineage of the training job has logged successfully.')
class EvalLineage(Callback):
"""
Collect lineage of an evaluation job.
Args:
summary_record (SummaryRecord): SummaryRecord is used to record
the summary value, and summary_record is an instance of SummaryRecord,
see mindspore.train.summary.SummaryRecord.
raise_exception (bool): Whether to raise exception when error occurs in
EvalLineage. If True, raise exception. If False, catch exception
and continue. Default: False.
Raises:
MindInsightException: If validating parameter fails.
LineageLogError: If recording lineage information fails.
Examples:
>>> from mindinsight.lineagemgr import EvalLineage
>>> from mindspore.train.callback import ModelCheckpoint, SummaryStep
>>> from mindspore.train.summary import SummaryRecord
>>> model = Model(train_network)
>>> model_ckpt = ModelCheckpoint(directory='/dir/to/save/model/')
>>> summary_writer = SummaryRecord(log_dir='./')
>>> summary_callback = SummaryStep(summary_writer, flush_step=2)
>>> lineagemgr = EvalLineage(summary_record=summary_writer)
>>> model.eval(epoch_num, dataset, callbacks=[model_ckpt, summary_callback, lineagemgr])
"""
def __init__(self, summary_record, raise_exception=False):
super(EvalLineage, self).__init__()
try:
validate_raise_exception(raise_exception)
self.raise_exception = raise_exception
validate_summary_record(summary_record)
self.summary_record = summary_record
summary_log_path = summary_record.full_file_name
validate_file_path(summary_log_path)
self.lineage_log_path = summary_log_path + '_lineage'
except MindInsightException as err:
log.error(err)
if raise_exception:
raise
@try_except(log)
def end(self, run_context):
"""
Collect lineage information when the training job ends.
Args:
run_context (RunContext): It contains all lineage information,
see mindspore.train.callback.RunContext.
Raises:
MindInsightException: If validating parameter fails.
LineageLogError: If recording lineage information fails.
"""
if not isinstance(run_context, RunContext):
error_msg = f'Invalid EvalLineage run_context.'
log.error(error_msg)
raise LineageParamRunContextError(error_msg)
run_context_args = run_context.original_args()
validate_eval_run_context(EvalParameter, run_context_args)
valid_dataset = run_context_args.get('valid_dataset')
eval_lineage = dict()
metrics = run_context_args.get('metrics')
eval_lineage[Metadata.metrics] = json.dumps(metrics)
eval_lineage[Metadata.step_num] = run_context_args.get('cur_step_num')
log.info('Analyzing dataset object...')
eval_lineage = AnalyzeObject.analyze_dataset(valid_dataset, eval_lineage, 'valid')
log.info('Logging evaluation job lineage...')
try:
lineage_summary = LineageSummary(self.lineage_log_path)
lineage_summary.record_evaluation_lineage(eval_lineage)
except IOError as error:
error_msg = f'End error in EvalLineage: {error}'
log.error(error_msg)
log.error('Fail to log the lineage of the evaluation job.')
raise LineageLogError(error_msg)
except Exception as error:
error_msg = f'End error in EvalLineage: {error}'
log.error(error_msg)
log.error('Fail to log the lineage of the evaluation job.')
raise LineageLogError(error_msg)
log.info('The lineage of the evaluation job has logged successfully.')
class AnalyzeObject:
"""Analyze class object in MindSpore."""
@staticmethod
def get_optimizer_by_network(network):
"""
Get optimizer by analyzing network.
Args:
network (Cell): See mindspore.nn.Cell.
Returns:
Optimizer, an Optimizer object.
"""
optimizer = None
net_args = vars(network) if network else {}
net_cell = net_args.get('_cells') if net_args else {}
for _, value in net_cell.items():
if isinstance(value, Optimizer):
optimizer = value
break
return optimizer
@staticmethod
def get_loss_fn_by_network(network):
"""
Get loss function by analyzing network.
Args:
network (Cell): See mindspore.nn.Cell.
Returns:
Loss_fn, a Cell object.
"""
loss_fn = None
inner_cell_list = []
net_args = vars(network) if network else {}
net_cell = net_args.get('_cells') if net_args else {}
for _, value in net_cell.items():
if isinstance(value, Cell) and \
not isinstance(value, Optimizer):
inner_cell_list.append(value)
while inner_cell_list:
inner_net_args = vars(inner_cell_list[0])
inner_net_cell = inner_net_args.get('_cells')
for value in inner_net_cell.values():
if isinstance(value, _Loss):
loss_fn = value
break
if isinstance(value, Cell):
inner_cell_list.append(value)
if loss_fn:
break
inner_cell_list.pop(0)
return loss_fn
@staticmethod
def get_backbone_network(network):
"""
Get the name of backbone network.
Args:
network (Cell): The train network.
Returns:
str, the name of the backbone network.
"""
with_loss_cell = False
backbone = None
net_args = vars(network) if network else {}
net_cell = net_args.get('_cells') if net_args else {}
for _, value in net_cell.items():
if isinstance(value, WithLossCell):
backbone = getattr(value, '_backbone')
with_loss_cell = True
break
if with_loss_cell:
backbone_name = type(backbone).__name__ \
if backbone else None
elif isinstance(network, TrainOneStepWithLossScaleCell):
backbone = getattr(network, 'network')
backbone_name = type(backbone).__name__ \
if backbone else None
else:
backbone_name = type(network).__name__ \
if network else None
return backbone_name
@staticmethod
def analyze_optimizer(optimizer):
"""
Analyze Optimizer, a Cell object of MindSpore.
In this way, we can obtain the following attributes:
learning_rate (float),
weight_decay (float),
momentum (float),
weights (float).
Args:
optimizer (Optimizer): See mindspore.nn.optim.Optimizer.
Returns:
float, the learning rate that the optimizer adopted.
"""
learning_rate = None
if isinstance(optimizer, Optimizer):
learning_rate = getattr(optimizer, 'learning_rate', None)
if learning_rate:
learning_rate = learning_rate.default_input
# Get the real learning rate value
if isinstance(learning_rate, Tensor):
learning_rate = learning_rate.asnumpy()
if learning_rate.ndim == 0:
learning_rate = np.atleast_1d(learning_rate)
learning_rate = list(learning_rate)
elif isinstance(learning_rate, float):
learning_rate = [learning_rate]
return learning_rate[0] if learning_rate else None
@staticmethod
def analyze_dataset(dataset, lineage_dict, dataset_type):
"""
Analyze Dataset, a Dataset object of MindSpore.
In this way, we can obtain the following attributes:
dataset_path (str),
train_dataset_size (int),
valid_dataset_size (int),
batch_size (int)
Args:
dataset (Dataset): See mindspore.dataengine.datasets.Dataset.
lineage_dict (dict): A dict contains lineage metadata.
dataset_type (str): Dataset type, train or valid.
Returns:
dict, the lineage metadata.
"""
dataset_batch_size = dataset.get_dataset_size()
if dataset_batch_size is not None:
validate_int_params(dataset_batch_size, 'dataset_batch_size')
log.debug('dataset_batch_size: %d', dataset_batch_size)
dataset_path = AnalyzeObject.get_dataset_path_wrapped(dataset)
if dataset_path:
dataset_path = '/'.join(dataset_path.split('/')[:-1])
step_num = lineage_dict.get('step_num')
validate_int_params(step_num, 'step_num')
log.debug('step_num: %d', step_num)
if dataset_type == 'train':
lineage_dict[Metadata.train_dataset_path] = dataset_path
epoch = lineage_dict.get('epoch')
train_dataset_size = dataset_batch_size * (step_num / epoch)
lineage_dict[Metadata.train_dataset_size] = int(train_dataset_size)
elif dataset_type == 'valid':
lineage_dict[Metadata.valid_dataset_path] = dataset_path
lineage_dict[Metadata.valid_dataset_size] = dataset_batch_size * step_num
return lineage_dict
def get_dataset_path(self, output_dataset):
"""
Get dataset path of MindDataset object.
Args:
output_dataset (Union[MindDataset, Dataset]): See
mindspore.dataengine.datasets.Dataset.
Returns:
str, dataset path.
"""
if isinstance(output_dataset, MindDataset):
return output_dataset.dataset_file
return self.get_dataset_path(output_dataset.input[0])
@staticmethod
def get_dataset_path_wrapped(dataset):
"""
A wrapper for obtaining dataset path.
Args:
dataset (Union[MindDataset, Dataset]): See
mindspore.dataengine.datasets.Dataset.
Returns:
str, dataset path.
"""
dataset_path = None
if isinstance(dataset, Dataset):
try:
dataset_path = AnalyzeObject().get_dataset_path(dataset)
except IndexError:
dataset_path = None
validate_file_path(dataset_path, allow_empty=True)
return dataset_path
@staticmethod
def get_file_path(list_callback):
"""
Get ckpt_file_name and summary_log_path from MindSpore callback list.
Args:
list_callback (list[Callback]): The MindSpore training Callback list.
Returns:
tuple, contains ckpt_file_name and summary_log_path.
"""
ckpt_file_path = None
summary_log_path = None
for callback in list_callback:
if isinstance(callback, ModelCheckpoint):
ckpt_file_path = callback.latest_ckpt_file_name
if isinstance(callback, SummaryStep):
summary_log_path = callback.summary_file_name
if ckpt_file_path:
validate_file_path(ckpt_file_path)
ckpt_file_path = os.path.realpath(ckpt_file_path)
if summary_log_path:
validate_file_path(summary_log_path)
summary_log_path = os.path.realpath(summary_log_path)
return ckpt_file_path, summary_log_path
@staticmethod
def get_file_size(file_path):
"""
Get the file size.
Args:
file_path (str): The file path.
Returns:
int, the file size.
"""
try:
return os.path.getsize(file_path)
except (OSError, IOError) as error:
error_msg = f"Error when get model file size: {error}"
log.error(error_msg)
raise LineageGetModelFileError(error_msg)
@staticmethod
def get_model_size(ckpt_file_path):
"""
Get model the total size of the model file and the checkpoint file.
Args:
ckpt_file_path (str): The checkpoint file path.
Returns:
int, the total file size.
"""
if ckpt_file_path:
ckpt_file_path = os.path.realpath(ckpt_file_path)
ckpt_file_size = AnalyzeObject.get_file_size(ckpt_file_path)
else:
ckpt_file_size = 0
return ckpt_file_size
@staticmethod
def get_network_args(run_context_args, train_lineage):
"""
Get the parameters related to the network,
such as optimizer, loss function.
Args:
run_context_args (dict): It contains all information of the training job.
train_lineage (dict): A dict contains lineage metadata.
Returns:
dict, the lineage metadata.
"""
network = run_context_args.get('train_network')
validate_network(network)
optimizer = run_context_args.get('optimizer')
if not optimizer:
optimizer = AnalyzeObject.get_optimizer_by_network(network)
loss_fn = run_context_args.get('loss_fn')
if not loss_fn:
loss_fn = AnalyzeObject.get_loss_fn_by_network(network)
loss = None
else:
loss = run_context_args.get('net_outputs')
if loss:
log.info('Calculating loss...')
loss_numpy = loss.asnumpy()
loss = float(np.atleast_1d(loss_numpy)[0])
log.debug('loss: %s', loss)
train_lineage[Metadata.loss] = loss
else:
train_lineage[Metadata.loss] = None
# Analyze classname of optimizer, loss function and training network.
train_lineage[Metadata.optimizer] = type(optimizer).__name__ \
if optimizer else None
train_lineage[Metadata.train_network] = AnalyzeObject.get_backbone_network(network)
train_lineage[Metadata.loss_function] = type(loss_fn).__name__ \
if loss_fn else None
return train_lineage
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
from django.test.utils import override_script_prefix
class FlatpageModelTests(TestCase):
def test_get_absolute_url_urlencodes(self):
pf = FlatPage(title="Café!", url='/café/')
self.assertEqual(pf.get_absolute_url(), '/caf%C3%A9/')
@override_script_prefix('/beverages/')
def test_get_absolute_url_honors_script_prefix(self):
pf = FlatPage(title="Tea!", url='/tea/')
self.assertEqual(pf.get_absolute_url(), '/beverages/tea/')
|
import numpy as np
from tuner.maximizers.base_maximizer import BaseMaximizer
class GridSearch(BaseMaximizer):
def __init__(self, objective_function, lower, upper, resolution=1000):
"""
Evaluates a equally spaced grid to maximize the acquisition function
in a one dimensional input space.
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
lower: np.ndarray (D)
Lower bounds of the input space
upper: np.ndarray (D)
Upper bounds of the input space
resolution: int
Defines of how many data points the grid consists.
"""
self.resolution = resolution
if lower.shape[0] > 1:
raise RuntimeError("Grid search works just for \
one dimensional functions")
super(GridSearch, self).__init__(objective_function, lower, upper)
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
x = np.linspace(self.lower[0], self.upper[0], self.resolution).reshape((self.resolution, 1, 1))
# y = array(map(acquisition_fkt, x))
ys = np.zeros([self.resolution])
for i in range(self.resolution):
ys[i] = self.objective_func(x[i])
y = np.array(ys)
x_star = x[y.argmax()]
return x_star[0]
|
"""
Setup for compiling cache
Jonas Toft Arnfred, 2013-04-22
"""
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize(["cache.pyx", "fastmatch.pyx", "turntable_ground_truth.pyx"])
)
|
import time
import signal
import timeout_decorator
from contextlib import contextmanager
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutError("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def long_function_call():
a = 1
for _ in range(10000000):
a += 10
if a > 10000:
a = 1
print(a)
print(a)
def test_simple_func():
try:
with time_limit(10):
long_function_call()
except TimeoutError as e:
print("Timed out!")
@timeout_decorator.timeout(5, timeout_exception=TimeoutError, use_signals=False)
def test():
print("Start")
for i in range(1, 10):
time.sleep(1)
print("{} seconds have passed".format(i))
test()
|
# pylint: disable=C0303, C0103
# Simplified batch manager
import multiprocessing
from collections import namedtuple
from time import sleep
from numpy import random
from pythia.X_to_tautau import generate_X_to_tautau
JobParams = namedtuple('JobParams',
['cmndfile', # path to Pythia command file
'target_val', # target (1,2,3)
'outname', # output name
'nevents', # number of events to generate
'massrange', # list with lower, upper inv mass range
'smearing' # boolean, apply smearing or not
])
def worker((cmnd_file, target_val, outname, nevents, mass_range, smearing)):
# Wait for random time to avoid pythia start at the same time, hence using same random seed
random.seed(hash(outname) % 10000)
sleep(random.uniform(0.0, 3.0))
print 'Generating %d events from %s' % (nevents, cmnd_file)
generate_X_to_tautau(cmnd_file, target_val, nevents, outname, mass_range, smearing)
class Dispatcher(object):
""" Manage a pool of workers """
def __init__(self, n_simultaneous_jobs):
"""
n_simultaneous_jobs: number of workers in pool
"""
super(Dispatcher, self).__init__()
self.joblist = []
self.pool = multiprocessing.Pool(n_simultaneous_jobs)
def submit(self, jobparams, split=False):
""" Submit a job """
if split:
self.submit_split(jobparams, split)
else:
self.submit_single(jobparams)
def submit_single(self, jobparams):
""" Convert jobparams to list, append to job list """
self.joblist.append(jobparams._asdict().values())
def submit_split(self, jobparams, nsplit):
""" Split a job into multiple and submit """
assert isinstance(nsplit, int)
neach = jobparams.nevents / nsplit
remainder = jobparams.nevents % nsplit
for i in range(nsplit):
outputname = jobparams.outname.replace('.h5', '_{:03d}'.format(i))
if i == 0:
jp = jobparams._replace(nevents=(neach+remainder),
outname=outputname)
else:
jp = jobparams._replace(nevents=neach, outname=outputname)
self.submit_single(jp)
def start(self):
""" Start all submitted jobs """
print 'Starting jobs'
self.pool.imap(worker, self.joblist)
def close(self):
""" Close pool, then wait and join """
print 'Closing pool'
self.pool.close()
self.pool.join()
|
#Good morning! Here's your coding interview problem for today.
#
#This problem was asked by Stripe.
#
#Given an array of integers, find the first missing positive integer in linear time and constant space.
#In other words, find the lowest positive integer that does not exist in the array.
#The array can contain duplicates and negative numbers as well.
#
#For example, the input [3, 4, -1, 1] should give 2. The input [1, 2, 0] should give 3.
#
#You can modify the input array in-place.
#soln
import numpy as np
import timeit
#input is a list of integers
def DPC_4(I):
#remove non positive values
S = sorted([i for i in I if i > 0])
#if 1 not in list, return 1
if S[0] != 1:
return 1
#eval space between values
D = list(np.diff(S))
#if no positives, return 1
if D == []:
return 1
#if no space > 1, return largest number in list + 1
elif max(D) == 1:
y = S[-1] + 1
return y
#else, find the lowest val with a gap to the next val and add 1 to it.
else:
d = [d for d in D if d > 1][0]
s=D.index(d)
y=S[s]+1
return y
#test prompt examples
I=[3, 4, -1, 1]
DPC_4(I)
#2
I=[1, 2, 0]
DPC_4(I)
#3
#test list of only negatives
I= [-1]
DPC_4(I)
#1
#test list with duplicates
I=[1,1,3,3,4,5]
DPC_4(I)
#2
#test list with only zero
I=[0]
DPC_4(I)
#1
#test random string of ints
I=np.random.randint(low=-10,high=10, size=10)
print(I)
DPC_4(I)
#test linear time performance
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
#10 iters
I=np.random.randint(low=-10,high=10, size=10)
wrapped = wrapper(DPC_4, I)
timeit.timeit(wrapped, number=10000)
#0.1026542482261732
#100 iters
I=np.random.randint(low=-100,high=100, size=100)
wrapped = wrapper(DPC_4, I)
timeit.timeit(wrapped, number=10000)
#1.0872683088967392
#1000 iters
I=np.random.randint(low=-1000,high=1000, size=1000)
wrapped = wrapper(DPC_4, I)
timeit.timeit(wrapped, number=10000)
#7.861962749415568
#10000 iters
I=np.random.randint(low=-10000,high=10000, size=10000)
wrapped = wrapper(DPC_4, I)
timeit.timeit(wrapped, number=10000)
#60.52168661096425
#looks to be better than O(n) time complexity,possibly due to the high likelyhood of the simple case where 1 is NOT in the list having O(log(n)) complexity (Quicksort).
#soln time incl notes,comments: 35min
|
import sys
import time
import datetime
class WError(Exception):
def __init__(self, errno, msg):
super().__init__(f"[Errno {errno}]: {msg}")
def hello_world():
print("Hello world!")
def exit():
sys.exit()
def time(time):
if time == "now" or time == 0:
return datetime.datetime.now()
else:
raise WError(1, "[That is actually a bad time.")
def pause(time):
time.sleep(time)
def say(it, time=0):
print(it)
pause(time)
|
# -*- coding: utf-8 -*-
import os
from lithoxyl import (Logger,
StreamEmitter,
SensibleSink,
SensibleFilter,
SensibleFormatter)
from lithoxyl.sinks import DevDebugSink
# import lithoxyl; lithoxyl.get_context().enable_async()
chert_log = Logger('chert')
fmt = ('{status_char}+{import_delta_s}'
' - {duration_ms:>8.3f}ms'
' - {parent_depth_indent}{end_message}')
begin_fmt = ('{status_char}+{import_delta_s}'
' --------------'
' {parent_depth_indent}{begin_message}')
stderr_fmtr = SensibleFormatter(fmt,
begin=begin_fmt)
stderr_emtr = StreamEmitter('stderr')
stderr_filter = SensibleFilter(success='info',
failure='debug',
exception='debug')
stderr_sink = SensibleSink(formatter=stderr_fmtr,
emitter=stderr_emtr,
filters=[stderr_filter])
chert_log.add_sink(stderr_sink)
try:
from lithoxyl.emitters import SyslogEmitter
except Exception:
pass
else:
syslog_filter = SensibleFilter(success='critical',
failure='critical',
exception='critical')
syslog_emt = SyslogEmitter('chert')
syslog_sink = SensibleSink(formatter=stderr_fmtr,
emitter=syslog_emt,
filters=[syslog_filter])
if os.getenv('CHERT_SYSLOG'):
chert_log.add_sink(syslog_sink)
chert_log.add_sink(DevDebugSink(post_mortem=bool(os.getenv('CHERT_PDB'))))
def _ppath(path): # lithoxyl todo
# find module path (or package path) and relativize to that?
if not path.startswith('/'):
return path
rel_path = os.path.relpath(path, input_path)
if rel_path.startswith('..'):
return path
return rel_path
|
from posixpath import dirname
from flask import Flask, render_template, request, session, redirect, url_for, send_file
import os
import sys
import requests
from flask_paginate import Pagination, get_page_args #pip install flask-paginate
from datetime import timedelta
import json
import uuid
import base64
import redis
import argparse
import grpc
from werkzeug.utils import secure_filename
import ocr_request_pb2
import ocr_request_pb2_grpc
# session['userid'] => userid
def get_page_data(offset=0, per_page=5, data = []):
return data[offset: offset + per_page]
app = Flask(__name__)
app.secret_key = "102011334455"
app.config["IMAGE_UPLOADS"] = "/usr/src/app/backend/uploads"
# app.config["IMAGE_UPLOADS"] = "/home/taekyun/8/assignment-4/backend/uploads"
@app.route('/', methods=['GET', 'POST'])
def home():
# session.clear()
total = 0
if request.method=='POST':
userid = request.form.get('userid')
password = request.form.get('password')
if(redis.get(userid+"_pw") and redis.get(userid+"_pw").decode('utf-8') == password):
session['userid'] = userid
currentUser = redis.get(userid+"_name").decode('utf-8')
if(redis.exists(currentUser+"_Total")):
total = int(redis.get(currentUser+"_Total"))
else:
total = 0
page, per_page, offset = get_page_args(page_parameter='page',per_page_parameter='per_page') #default 5
data = []
datalist = []
for i in range(total):
bookTitle=redis.get(currentUser+"_bookTitle"+str(i+1)).decode('utf-8')
page_2=redis.get(currentUser+"_page"+str(i+1)).decode('utf-8')
author = redis.get(currentUser+"_author"+str(i+1)).decode('utf-8')
ocrFinal = ""
if redis.exists(currentUser+"_ocrFinal"+str(i+1)):
ocrFinal = redis.get(currentUser+"_ocrFinal"+str(i+1)).decode('utf-8')
datalist.append(bookTitle)
datalist.append(page_2)
datalist.append(author)
datalist.append(ocrFinal)
data.append(datalist)
datalist = []
#datalist.append(page_2)
#datalist.append(author)
#datalist.append(ocrFinal)
#data.append(datalist)
pagination_data = get_page_data(offset = offset, per_page = per_page, data = data)
pagination = Pagination(page=page, per_page=per_page, total=total) #page -> current page, per_page -> num of data for 1 page
return render_template('home.html', currentUser=currentUser, users=pagination_data,
page=page,
per_page=per_page,
pagination=pagination,
total = total)
else:
# 비밀번호가 일치하지 않습니다 에러 메세지 띄우기
return redirect('/login')
if "userid" in session:
currentUser = redis.get(session['userid']+"_name").decode('utf-8')
# return "already login" #next page
page, per_page, offset = get_page_args(page_parameter='page',
per_page_parameter='per_page') #default 5
if(redis.exists(currentUser+"_Total")):
total = int(redis.get(currentUser+"_Total"))
else:
total = 0
data = []
datalist = []
for i in range(total):
bookTitle=redis.get(currentUser+"_bookTitle"+str(i+1)).decode('utf-8')
page_2=redis.get(currentUser+"_page"+str(i+1)).decode('utf-8')
author = redis.get(currentUser+"_author"+str(i+1)).decode('utf-8')
ocrFinal = ""
if redis.exists(currentUser+"_ocrFinal"+str(i+1)):
ocrFinal = redis.get(currentUser+"_ocrFinal"+str(i+1)).decode('utf-8')
datalist.append(bookTitle)
datalist.append(page_2)
datalist.append(author)
datalist.append(ocrFinal)
data.append(datalist)
datalist = []
print(data)
print(i)
pagination_data = get_page_data(offset = offset, per_page = per_page, data = data)
pagination = Pagination(page=page, per_page=per_page, total=total) #page -> current page, per_page -> num of data for 1 page
return render_template('home.html', currentUser=currentUser, users=pagination_data,
page=page,
per_page=per_page,
pagination=pagination,
total = total)
else:
return render_template('home.html', currentUser="", total = total)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method=='GET':
return render_template('login.html')
else:
userid = request.form.get('userid')
password = request.form.get('password')
if(redis.get(userid+"_pw") and redis.get(userid+"_pw").decode('utf-8') == password):
session['userid'] = userid
# return "login success" # next page
currentUser = redis.get(userid+"_name").decode('utf-8')
#total = 14
#return render_template('home.html', currentUser=currentUser, total = 14)
return redirect('/')
else:
return redirect('/login')
@app.route('/register', methods=['GET','POST'])
def register():
if request.method =='GET':
return render_template("register.html")
else:
userid = request.form.get('userid')
username = request.form.get('username')
password = request.form.get('password')
re_password = request.form.get('re_password')
if not (userid and username and password and re_password):
return "모두 입력해주세요"
elif password != re_password:
return "비밀번호를 확인해주세요"
else:
redis.set(userid+"_name",username)
redis.set(userid+"_pw",password)
#session['userid'] = userid
return redirect('/')
return redirect('/')
@app.route('/logout', methods=['GET'])
def logout():
session.clear()
return redirect('/')
@app.route('/upload-image', methods=['GET', 'POST'])
def upload_image():
if "userid" in session:
currentUser = redis.get(session['userid']+"_name").decode('utf-8')
if request.method == "POST":
if request.files:
image = request.files["image"]
print("hh", image.filename)
if image.filename != "":
image.save(os.path.join(app.config["IMAGE_UPLOADS"], image.filename))
print("Upload the image")
channel2 = grpc.insecure_channel("0.0.0.0:8002")
stub2 = ocr_request_pb2_grpc.ocrApiServiceStub(channel2)
response2 = stub2.goURL(ocr_request_pb2.urlMsg(url=image.filename))
raw_text = response2.response
response3 = requests.post("http://0.0.0.0:8001/", data = raw_text.encode('utf-8'))
ocr_result = response3.text
print("OCR done")
print(ocr_result)
redis.set(session['userid']+"_ocr", ocr_result)
return redirect(url_for('write_after_ocr'))
return render_template("upload_image.html", currentUser=currentUser)
else:
return render_template('login.html')
@app.route('/uploads/<filename>')
def send_uploaded_file(filename=''):
from flask import send_from_directory
return send_from_directory(app.config["IMAGE_UPLOADS"], filename)
@app.route("/read", methods=['GET', 'POST'])
def read():
if "userid" in session:
currentUser = redis.get(session['userid']+"_name").decode('utf-8')
my_var = request.args.get('my_var', None) #numbering_of_book
bookTitle=redis.get(currentUser+"_bookTitle"+str(my_var)).decode('utf-8')
author=redis.get(currentUser+"_author"+str(my_var)).decode('utf-8')
date=redis.get(currentUser+"_date"+str(my_var)).decode('utf-8')
page=redis.get(currentUser+"_page"+str(my_var)).decode('utf-8')
thought=redis.get(currentUser+"_thought"+str(my_var)).decode('utf-8')
ocrFinal = ""
if redis.exists(currentUser+"_ocrFinal"+str(my_var)):
ocrFinal = redis.get(currentUser+"_ocrFinal"+str(my_var)).decode('utf-8')
print(my_var)
return render_template('read.html', currentUser=currentUser,ocrFinal=ocrFinal, bookTitle=bookTitle,author=author,date=date,page=page,thought=thought)
else:
return render_template('login.html')
@app.route("/write-after-ocr", methods=['GET', 'POST'])
def write_after_ocr():
if "userid" in session:
currentUser = redis.get(session['userid']+"_name").decode('utf-8')
ocr_result=""
if (redis.exists(session['userid']+"_ocr")):
ocr_result = redis.get(session['userid']+"_ocr").decode('utf-8')
bookTitle=""
author=""
date=""
page=""
ocrFinal=""
thought=""
if request.method == 'POST':
bookTitle = request.form.get("bookTitle")
author = request.form.get("author", "")
date = request.form.get("date")
page = request.form.get("page")
ocrFinal = request.form.get("ocrFinal")
if ocrFinal is None:
print(ocrFinal)
else:
print('yeyyyyyy')
thought = request.form.get("thought")
if(redis.exists(currentUser+"_Total") != 1):
redis.set(currentUser+"_Total",1)
else:
redis.incr(currentUser+"_Total",1)
book_num = int(redis.get(currentUser+"_Total"))
print("what is book num" + str(book_num))
redis.set(currentUser+"_bookTitle"+str(book_num),bookTitle)
redis.set(currentUser+str(book_num),bookTitle)
redis.set(currentUser+"_author"+str(book_num),author)
redis.set(currentUser+"_date"+str(book_num),date)
redis.set(currentUser+"_page"+str(book_num),page)
if ocrFinal is not None:
redis.set(currentUser+"_ocrFinal"+str(book_num),ocrFinal)
redis.set(currentUser+"_thought"+str(book_num),thought)
print(bookTitle, author, date, page, ocrFinal, thought)
redis.set(session['userid']+"_ocr", "")
return redirect('/')
return render_template('write_after_ocr.html', currentUser=currentUser, ocr_result=ocr_result)
else:
return render_template('login.html')
if __name__ == "__main__":
redis = redis.Redis(host="0.0.0.0", port=6379)
# for local test
# redis = redis.Redis(host="0.0.0.0", port=6379)
# app.run()
app.run(debug=True, host='0.0.0.0', port=5000)
# ngrok http 5000
# redis-cli -p 6379
# get all keys : KEYS *
# key : userid_booklist
# value : ['어린왕자_생텍쥐페리', '푸른꽃_작가', '경제_작가']
# key : 어린왕자_생텍쥐페리
# value : [1, 4, 5]
# key : userid_bookname_author
# value : [ocr_data@%date@%thought]
# key : userid_bookname_author_1
# value : [ocr_data@%date@%thought]
# total : 책 개수
# key : userid_1_bookname
# key : userid_1_author
# key : userid_1_date
# key : userid_1_bookname
# key : userid_1_bookname
|
# Write a program to create a queue called Doctor to perform the
# basic operations on queue using list. The list contains two
# data fields: Docid and Docname. Write the following functions:
# InsertDoc() – To push the data values into the list Docinfo
# DeleteDoc() – To remove the data value from the list Docinfo
# ShowDoc(): - To display data value for all Docinfo.
# Queue
Doctor = []
def InsertDoc():
n = int(input('Enter who many Doctor you want to insert: '))
for _ in range(0, n):
Docid = int(input('Enter Doctor ID: '))
Docname = input('Enter Doctor: ')
Docinfo = (Docid, Docname)
Doctor.append(Docinfo)
def DeleteDoc():
Doctor.pop(0)
print('Done')
def ShowDoc():
print(Doctor)
k = True
while k == True:
print('''
1. Push Values
2. Delete Values
3. See Values
4. Exit
''')
option = int(input('Enter your option(1/4): '))
if option == 1:
InsertDoc()
elif option == 2:
DeleteDoc()
elif option == 3:
ShowDoc()
elif option == 4:
k = False
else:
print("Invalid Option!")
continue
|
from setuptools import setup, find_packages
setup(
name='acs_student_attendance',
version='1.0.4',
url='https://github.com/petarmaric/acs_student_attendance',
license='BSD',
author='Petar Maric',
author_email='petarmaric@uns.ac.rs',
description='Console app and Python API for analyzing and reporting the '\
'lab attendance of our ACS students',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Education',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
entry_points={
'console_scripts': ['acs_student_attendance=acs_student_attendance.shell:main']
},
install_requires=open('requirements.txt').read().splitlines(),
)
|
name=["Bill","Anne","Angy","Cony","Daniel","Occhan"]
for i in range(0,5):
if(name[i]!="Angy"):
print("{}.{} is my classmate".format(i+1,name[i]))
else:
print("{}.My name is {}".format(i+1,name[i]))
|
import sys
from aiohttp import web
from aio_recaptcha import Recaptcha
TEMPLATE = '''
<!DOCTYPE html>
<html lang="en">
<body>
<form action="/check" method="POST">
<p class="g-recaptcha" data-sitekey="{site_key}"></p>
<p>is verify: {is_verify}</p>
<button type="submit">check captcha</button>
</form>
<script src='//www.google.com/recaptcha/api.js'></script>
</body>
</html>
'''
SITE_KEY, SECRET_KEY = sys.argv[1], sys.argv[2]
async def home(request):
is_verify = request.query.get('is_verify') == '1'
return web.Response(
body=TEMPLATE.format(
site_key=SITE_KEY,
is_verify=is_verify,
),
content_type='html',
)
async def check(request):
recaptcha = request.app.recaptcha
is_verify = await recaptcha.verify(request)
return web.HTTPFound(
request.app.router['home']
.url_for()
.with_query(is_verify=1 if is_verify else 0)
)
def main():
app = web.Application()
app.recaptcha = Recaptcha(SECRET_KEY)
app.router.add_get('/', home, name='home')
app.router.add_post('/check', check)
web.run_app(app)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# __author__ = 'Benjamin'
import sys, os
import json
import requests
import random
import time
domain = 'redfish.blawesom.com'
lines = open('/usr/share/dict/words').read().splitlines()
new_name =random.choice(lines)
def test_root():
response = requests.get('https://{}/'.format(domain))
data = json.loads(response.content)
assert response.status_code == 200
assert data['response'] == "service alive"
def test_hello():
response = requests.get('https://{}/{}'.format(domain, new_name))
data = json.loads(response.content)
assert response.status_code == 200
assert data['response'] == "nice to meet you {}".format(new_name)
def test_hello_2():
response = requests.get('https://{}/{}'.format(domain, new_name))
data = json.loads(response.content)
assert response.status_code == 200
assert data['response'] == "nice to see you again {}".format(new_name)
|
from flask import Flask, request, Response, redirect
# expected import error. API is run as a standalone from server.py
# noinspection PyUnresolvedReferences, PyPackageRequirements
from resources.keys import c, private_keys, idol_folder, top_gg_webhook_key, db_conn
# noinspection PyUnresolvedReferences, PyPackageRequirements
from resources.drive import get_file_type, download_media
import json
import random
import os.path
app = Flask(__name__)
bot_invite_url = "https://discord.com/oauth2/authorize?client_id=520369375325454371&scope=bot&permissions=1609956823"
patreon_url = "https://www.patreon.com/mujykun"
@app.before_request
def log_info():
"""Log minimal request information to know amount of calls along with key usage."""
try:
key = request.headers.get('Authorization') or "None"
# keys are always appended to the end in order, so we can use the index to differentiate between keys.
try:
index = private_keys.index(key)
except:
index = -1
# c.execute("INSERT INTO ")
c.execute("SELECT called FROM stats.apiusage WHERE endpoint = %s AND keyused = %s", (request.base_url, index))
called_amount = c.fetchone()
if called_amount:
c.execute("UPDATE stats.apiusage SET called = %s WHERE endpoint = %s AND keyused = %s", (called_amount[0] + 1, request.base_url, index))
else:
c.execute("INSERT INTO stats.apiusage(endpoint, keyused, called) VALUES(%s, %s, %s)", (request.base_url,
index, 1))
db_conn.commit()
except Exception as e:
db_conn.commit() # will cause a transaction rollback / abort the current transaction.
print(f"{e} - log_info")
@app.after_request
def add_header(response):
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/members/', methods=['GET'])
def get_all_members():
"""Gets all full names and stage names of idols."""
c.execute("SELECT id, fullname, stagename FROM groupmembers.member")
all_members = {}
for idol_id, full_name, stage_name in c.fetchall():
all_members[idol_id] = {'full_name': full_name, 'stage_name': stage_name}
return all_members
@app.route('/members_with_photos/', methods=['GET'])
def get_all_members_with_photos():
"""Gets all full names and stage names of idols with photos"""
c.execute("""SELECT DISTINCT(m.id), fullname, stagename
FROM groupmembers.member as m, groupmembers.imagelinks as i
WHERE m.id = i.memberid""")
all_members = {}
for idol_id, full_name, stage_name in c.fetchall():
all_members[idol_id] = {'full_name': full_name, 'stage_name': stage_name}
return all_members
@app.route('/members/<idol_id>/', methods=['GET'])
def get_member(idol_id):
"""Get full name and stage name of an idol by it's id."""
c.execute("SELECT fullname, stagename FROM groupmembers.member WHERE id=%s", (idol_id,))
all_members = {}
for full_name, stage_name in c.fetchall():
all_members[idol_id] = {'full_name': full_name, 'stage_name': stage_name}
return all_members
@app.route('/groups/', methods=['GET'])
def get_groups():
"""Get all group ids to group names."""
c.execute("SELECT groupid, groupname FROM groupmembers.groups")
groups = c.fetchall()
c.execute("SELECT idolid, groupid FROM groupmembers.idoltogroup")
all_groups = {}
members_in_groups = {}
for idol_id, group_id in c.fetchall():
members = members_in_groups.get(group_id)
if not members:
members_in_groups[group_id] = [idol_id]
else:
members_in_groups[group_id].append(idol_id)
for group_id, group_name in groups:
members = members_in_groups.get(group_id) or []
all_groups[group_id] = {
"name": group_name,
"members": members
}
return all_groups
@app.route('/commands/', methods=['GET'])
def get_commands():
with open("commands.json", "r") as file:
return json.loads(file.read()), 200
# noinspection PyBroadException,PyPep8
@app.route('/groups/<group_id>/', methods=['GET'])
def get_group(group_id):
"""Get group name by group id"""
c.execute("SELECT groupname FROM groupmembers.groups WHERE groupid=%s", (group_id,))
group = c.fetchone()
try:
return {group_id: group[0]}
except:
return {}
@app.route('/photos/<idol_id>/list/', methods=['GET'])
def get_image_ids(idol_id):
"""Returns all image ids an idol has."""
c.execute("SELECT id FROM groupmembers.imagelinks WHERE memberid=%s", (idol_id,))
all_ids = {'ids': [current_id[0] for current_id in c.fetchall()]}
return all_ids
def get_param(key: str):
"""Get the form data or param of sent data"""
return request.args.get(key) or request.form.get(key)
# noinspection PyBroadException
@app.route('/photos/<idol_id>/', methods=['POST'])
def get_idol_photo(idol_id, redirect_user=True, auth=True, guessing_game=False, looped=0):
"""Download an idol's photo and redirect the user to the image link."""
# check authorization
if not check_auth_key(request.headers.get('Authorization')) and auth:
# Invalid API Key
return Response(status=403)
# defining the args and kwargs for this method to use recursive strategies.
args = {idol_id}
kwargs = {
"redirect_user": redirect_user,
"auth": auth,
"guessing_game": guessing_game
}
try:
check_redirect = get_param("redirect") or 1 # should redirect by default
allow_video = get_param('video_allowed') or 1 # video allowed by default
min_faces = get_param('min_faces') or 1
max_faces = get_param('max_faces') or 999
# confirm the input is not a string
check_redirect = int(check_redirect)
allow_video = int(allow_video)
min_faces = int(min_faces)
max_faces = int(max_faces)
except:
return Response(status=422)
if not check_redirect:
redirect_user = False
if 999 < min_faces < -1:
min_faces = 1
if max_faces > 10000:
max_faces = 999
if max_faces < min_faces:
max_faces = min_faces
try:
add_sql_query = "" if not allow_video else "OR facecount = -1"
sql_query = f"""SELECT id, link FROM groupmembers.imagelinks
WHERE memberid=%s AND ( (facecount >= %s AND facecount <= %s) {add_sql_query})"""
c.execute(sql_query, (idol_id, min_faces, max_faces))
all_links = c.fetchall()
if not all_links:
# idol has no photos
return Response(status=404)
random_link = random.choice(all_links)
if guessing_game:
image_host_url = process_image(random_link, redirect_user=redirect_user, guessing_game=True)
return image_host_url
return process_image(random_link, redirect_user=redirect_user)
except Exception as e:
if "current transaction is aborted" in f"{e}".lower() and looped < 5:
# we will attempt this 5 times.
kwargs['looped'] = looped + 1
return get_idol_photo(*args, **kwargs)
print(f"{e} (Looped {looped} times) - get_idol_photo 2 ")
return Response(status=500)
@app.route('/file/<image_id>/', methods=['POST'])
def get_image(image_id):
# check authorization
if not check_auth_key(request.headers.get('Authorization')):
# Invalid API Key
return Response(status=403)
try:
c.execute("SELECT link FROM groupmembers.imagelinks where id = %s", (image_id,))
link = c.fetchone()
if not link:
return Response(status=404)
return process_image([image_id, link[0]])
except Exception as e:
print(e)
return Response(status=500)
@app.route('/random/', methods=['GET'])
def random_image():
random_idol_id = get_random_idol_id_with_photo()
return get_idol_photo(random_idol_id, redirect_user=False, auth=False)
@app.route('/photos/guessing_game/', methods=['POST'])
def random_gg_image():
random_idol_id = get_random_idol_id_with_photo()
c.execute("SELECT fullname, stagename FROM groupmembers.member WHERE id = %s", (random_idol_id,))
info = c.fetchone()
full_name = info[0]
stage_name = info[1]
c.execute("SELECT alias FROM groupmembers.aliases WHERE objectid = %s AND isgroup = 0 AND serverid IS NULL",
(random_idol_id,))
aliases = c.fetchall()
aliases = [alias[0] for alias in aliases]
photo_link = ".mp4"
# confirm the client does not receive a video.
while ".mp4" in photo_link or ".webm" in photo_link:
photo_link = get_idol_photo(random_idol_id, redirect_user=False, auth=True, guessing_game=True)
idol_info_json = {
'id': random_idol_id,
'full_name': full_name,
'stage_name': stage_name,
'image_url': photo_link,
'aliases': aliases
}
return idol_info_json
@app.route('/downloaded/', methods=['GET'])
def get_downloaded_images():
currently_existing_photos = os.listdir(idol_folder)
random.shuffle(currently_existing_photos)
if len(currently_existing_photos) > 1000:
currently_existing_photos = currently_existing_photos[0:999]
randomized_images = {
'images': []
}
for file_name in currently_existing_photos:
if '.mp4' in file_name or '.webm' in file_name:
continue
randomized_images['images'].append(file_name)
return randomized_images, 200
@app.route('/webhook/', methods=['POST'])
def get_top_gg_vote():
if not check_webhook_key(request.headers.get('Authorization')):
# Invalid Webhook Key
return Response(status=403)
user_id = (request.get_json()).get('user')
if not user_id:
return Response(status=400)
try:
c.execute("DELETE FROM general.lastvoted WHERE userid = %s", (user_id,))
c.execute("INSERT INTO general.lastvoted(userid) values(%s)", (user_id,))
db_conn.commit()
print(user_id, " has voted.")
return Response(status=200)
except Exception as e:
print(e)
return Response(status=500)
@app.route('/botinfo/', methods=['GET'])
def get_bot_info():
"""Sends a list of bot information such as
Server Count, User Count, Total commands used, Amount of Idol Photos """
c.execute("SELECT totalused FROM stats.sessions ORDER BY totalused DESC")
total_commands_used = c.fetchone()[0]
c.execute("SELECT COUNT(*) FROM stats.guilds")
server_count = c.fetchone()[0]
c.execute("SELECT SUM(membercount) FROM stats.guilds")
member_count = c.fetchone()[0]
c.execute("SELECT COUNT(*) FROM groupmembers.imagelinks")
idol_photo_count = c.fetchone()[0]
return {
'total_commands_used': total_commands_used,
'server_count': server_count,
'member_count': member_count,
'idol_photo_count': idol_photo_count
}, 200
@app.route('/idolcommandsused/', methods=['GET'])
def get_idol_commands_used():
"""Get the Amount of Idol Photo Commands Used."""
c.execute("SELECT SUM(count) FROM stats.commands WHERE commandname LIKE 'Idol %' OR commandname LIKE 'randomidol'")
return {'idol_commands_used': c.fetchone()[0]}, 200
@app.route('/invite/', methods=['GET'])
def redirect_to_invite_bot():
"""Redirect to invite the bot to a server."""
return redirect(bot_invite_url, code=308)
@app.route('/patreon/', methods=['GET'])
def redirect_to_patreon():
"""Redirect to patreon page."""
return redirect(patreon_url, code=308)
@app.route('/', methods=['GET'])
def get_default_route():
return redirect("https://irenebot.com/api", code=308)
def check_webhook_key(key):
"""Check the Top.GG webhook key with an auth key"""
if key:
return key == top_gg_webhook_key
def check_auth_key(key):
"""Check if an authorization key is correct."""
if key:
return key in private_keys
def check_file_exists(file_name):
"""Check if a file exists."""
return os.path.isfile(file_name)
def get_google_drive_id(link):
"""Get a google drive file id by the file url."""
return link.replace("https://drive.google.com/uc?export=view&id=", "")
def get_random_idol_id_with_photo():
"""Get a random idol id that definitely has a photo."""
c.execute("SELECT DISTINCT(memberid) FROM groupmembers.imagelinks")
return random.choice(c.fetchall())[0]
def process_image(link_info, redirect_user=True, guessing_game=False):
try:
# get information about the file from google drive
file_db_id = link_info[0]
file_url = link_info[1]
google_drive_id = get_google_drive_id(file_url)
file_type = get_file_type(google_drive_id)
file_location = f"{idol_folder}{file_db_id}{file_type}"
image_host_url = f"https://images.irenebot.com/idol/{file_db_id}{file_type}"
print(f"Processing {image_host_url}.")
file_data = {
'final_image_link': image_host_url,
'location': file_location,
'file_name': f"{file_db_id}{file_type}"
}
# check if the file is already downloaded
if not check_file_exists(file_location):
# download the file
download_media(google_drive_id, file_location)
# we only need the media downloaded for guessing game, so this is our return point.
if guessing_game:
return image_host_url
if '.mp4' in file_type or '.webm' in file_type:
# return a json of the video info
return file_data, 415
if not redirect_user:
return file_data, 200
return redirect(image_host_url, code=308)
except Exception as e:
print(f"{e} - process_image")
raise Exception
# should be run through gunicorn
# app.run(port=5454)
if __name__ == "__main__":
app.run()
|
"""
MagPy
Intermagnet ImagCDF input filter
(based on cdflib)
Written by Roman Leonhardt October 2019
- contains test, read and write functions for
ImagCDF
- supports python >= 3.5
- currently requires cdflib<=0.3.18
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from io import open
from magpy.stream import *
import cdflib
#import ciso8601 ## fast datetime parser ciso8601.parse_datetime should be 10 times faster than datetime.strftime
import sys
import logging
logger = logging.getLogger(__name__)
HEADTRANSLATE = {'FormatDescription':'DataFormat', 'IagaCode':'StationID', 'ElementsRecorded':'DataComponents', 'ObservatoryName':'StationName', 'Latitude':'DataAcquisitionLatitude', 'Longitude':'DataAcquisitionLongitude', 'Institution':'StationInstitution', 'VectorSensOrient':'DataSensorOrientation', 'TermsOfUse':'DataTerms','UniqueIdentifier':'DataID','ParentIdentifiers':'SensorID','ReferenceLinks':'StationWebInfo', 'FlagRulesetType':'FlagRulesetType','FlagRulesetVersion':'FlagRulesetVersion'}
def isIMAGCDF(filename):
"""
Checks whether a file is ImagCDF format
"""
try:
temp = cdflib.CDF(filename)
except:
return False
try:
form = temp.globalattsget().get('FormatDescription')
if not form.startswith('INTERMAGNET'):
return False
except:
return False
print ("Running new ImagCDF import filter")
logger.debug("isIMAGCDF: Found INTERMAGNET CDF data - using cdflib")
return True
def readIMAGCDF(filename, headonly=False, **kwargs):
"""
Reading Intermagnet CDF format (1.0,1.1,1.2)
"""
debug = kwargs.get('debug')
headers={}
arraylist = []
array = [[] for elem in KEYLIST]
multipletimedict = {}
newdatalist = []
tllist = []
indexarray = np.asarray([])
cdfdat = cdflib.CDF(filename)
if debug:
print ("Reading ImagCDF with cdflib")
for att in cdfdat.globalattsget():
value = cdfdat.globalattsget().get(att)
try:
if isinstance(list(value), list):
if len(value) == 1:
value = value[0]
except:
pass
if not att in HEADTRANSLATE:
attname = 'Data'+att
else:
attname = HEADTRANSLATE[att]
headers[attname] = value
#Some specials:
headers['StationIAGAcode'] = headers.get('StationID')
headers['DataFormat'] = headers.get('DataFormat') + '; ' + cdfdat.globalattsget().get('FormatVersion')
try:
try:
pubdate = cdflib.cdfepoch.to_datetime(cdflib.cdfepoch,headers.get('DataPublicationDate'))
except TypeError:
pubdate = cdflib.cdfepoch.to_datetime(headers.get('DataPublicationDate'))
headers['DataPublicationDate'] = pubdate[0]
#pubdate = cdflib.cdfepoch.unixtime(headers.get('DataPublicationDate'))
#headers['DataPublicationDate'] = datetime.utcfromtimestamp(pubdate[0])
except:
print ("imagcdf warning: Publication date is not provided as tt_2000")
try:
pubdate = DataStream()._testtime(headers.get('DataPublicationDate'))
headers['DataPublicationDate'] = pubdate
except:
pass
if debug:
logger.info("readIMAGCDF: FOUND IMAGCDF file created with version {}".format(headers.get('DataFormat')))
headers['DataLeapSecondUpdated'] = cdfdat.cdf_info().get('LeapSecondUpdated')
if debug:
print ("LEAP seconds updated:", cdfdat.cdf_info().get('LeapSecondUpdated'))
# Get all available Variables - ImagCDF usually uses only zVariables
datalist = cdfdat.cdf_info().get('zVariables')
# New in 0.3.99 - provide a SensorID as well consisting of IAGA code, min/sec
# and numerical publevel
# IAGA code
if headers.get('SensorID','') == '':
try:
#TODO determine resolution
headers['SensorID'] = "{}_{}_{}".format(headers.get('StationIAGAcode','xxx').upper()+'sec',headers.get('DataPublicationLevel','0'),'0001')
except:
pass
# #########################################################
# 1. Now getting individual data and check time columns
# #########################################################
zpos = KEYLIST.index('z') # used for idf records
for elem in datalist:
if elem.endswith('Times') and not elem.startswith('Flag'):
try:
#if elem in ['GeomagneticVectorTimes','GeomagneticTimes','GeomagneticScalarTimes']:
tl = int(cdfdat.varinq(elem).get('Last_Rec'))
tllist.append([tl,elem])
except:
pass
if len(tllist) < 1:
"""
No time column identified
-> Check for starttime and sampling rate in header
"""
if cdfdat.globalattsget().get('StartTime','') and cdfdat.globalattsget().get('SamplingPeriod',''):
# TODO Write that function
st = cdfdat.globalattsget().get('StartTime','')
sr = cdfdat.globalattsget().get('SamplingPeriod','')
# get length of f or x
else:
logger.error("readIMAGCDF: No Time information available - aborting")
return
elif len(tllist) > 1:
tl = [el[0] for el in tllist]
if not max(tl) == min(tl):
if debug:
print ("Time columns of different length")
logger.warning("readIMAGCDF: Time columns of different length. Choosing longest as basis")
newdatalist.append(['time',max(tllist)[1]])
datnumar1 = date2num(np.asarray([datetime.utcfromtimestamp(el) for el in cdflib.cdfepoch.unixtime(cdfdat.varget(max(tllist)[1]))]))
datnumar2 = date2num(np.asarray([datetime.utcfromtimestamp(el) for el in cdflib.cdfepoch.unixtime(cdfdat.varget(min(tllist)[1]))]))
try:
indexarray = np.nonzero(np.in1d(datnumar1,datnumar2))[0]
except:
indexarray = np.asarray([])
# create a dictionary with time column name and indexarray
multipletimedict = {min(tllist)[1]:indexarray}
else:
logger.info("readIMAGCDF: Equal length time axes found - assuming identical time")
if 'GeomagneticVectorTimes' in datalist:
newdatalist.append(['time','GeomagneticVectorTimes'])
else:
newdatalist.append(['time',tllist[0][1]]) # Take the first one
else:
#"Single time axis found in file"
newdatalist.append(['time',tllist[0][1]])
def Ruleset2Flaglist(flagginglist,rulesettype,rulesetversion):
if rulesettype in ['Conrad', 'conrad', 'MagPy','magpy'] and len(flagginglist) > 0:
if rulesetversion in ['1.0','1',1]:
flagcolsconrad = [flagginglist[0],flagginglist[1],flagginglist[3],flagginglist[4],flagginglist[5],flagginglist[6],flagginglist[2]]
flaglisttmp = []
for elem in flagcolsconrad:
flaglisttmp.append(cdfdat[elem][...])
try:
flaglisttmp[0] = cdflib.cdfepoch.to_datetime(cdflib.cdfepoch,flaglisttmp[0])
except:
flaglisttmp[0] = cdflib.cdfepoch.to_datetime(flaglisttmp[0])
try:
flaglisttmp[1] = cdflib.cdfepoch.to_datetime(cdflib.cdfepoch,flaglisttmp[1])
except:
flaglisttmp[1] = cdflib.cdfepoch.to_datetime(flaglisttmp[1])
try:
flaglisttmp[-1] = cdflib.cdfepoch.to_datetime(cdflib.cdfepoch,flaglisttmp[-1])
except:
flaglisttmp[-1] = cdflib.cdfepoch.to_datetime(flaglisttmp[-1])
flaglist = np.transpose(flaglisttmp)
flaglist = [list(elem) for elem in flaglist]
return list(flaglist)
else:
return []
else:
print ("readIMAGCDF: Could not interprete flags ruleset or flagginglist is empty")
logger.warning("readIMAGCDF: Could not interprete Ruleset")
return []
if not headers.get('FlagRulesetType','') == '':
if debug:
print ("readIMAGCDF: Found flagging ruleset {} vers.{} - extracting flagging information".format(headers.get('FlagRulesetType',''),headers.get('FlagRulesetVersion','')))
logger.info("readIMAGCDF: Found flagging ruleset {} vers.{} - extracting flagging information".format(headers.get('FlagRulesetType',''),headers.get('FlagRulesetVersion','')))
flagginglist = [elem for elem in datalist if elem.startswith('Flag')]
flaglist = Ruleset2Flaglist(flagginglist,headers.get('FlagRulesetType',''),headers.get('FlagRulesetVersion',''))
if debug:
print ("readIMAGCDF: Flagging information extracted")
datalist = [elem for elem in datalist if not elem.endswith('Times') and not elem.startswith('Flag')]
# #########################################################
# 2. Sort the datalist according to KEYLIST
# #########################################################
for key in KEYLIST:
possvals = [key]
if key == 'x':
possvals.extend(['h','i'])
if key == 'y':
possvals.extend(['d','e'])
if key == 'df':
possvals.append('g')
if key == 'f':
possvals.append('s')
for elem in datalist:
try:
label = cdfdat.varattsget(elem).get('LABLAXIS').lower()
if label in possvals:
newdatalist.append([key,elem])
except:
pass # for lines which have no Label
if debug:
print ("Components in file: {}".format(newdatalist))
if not len(datalist) == len(newdatalist)-1:
logger.warning("readIMAGCDF: error encountered in key assignment - please check")
# #########################################################
# (4. eventually completely drop time cols and just store start date and sampling period in header)
# Deal with scalar data (independent or whatever
delrow = False
index = 0
for elem in newdatalist:
if elem[0] == 'time':
ttdesc = cdfdat.varinq(elem[1]).get('Data_Type_Description')
col = cdfdat.varget(elem[1])
try:
# cdflib version (<0.3.19... Problem: cdflib.cdfepoch.getVersion() does not change, although to_datetime is different and unixtime as well)
ar = date2num(cdflib.cdfepoch.to_datetime(cdflib.cdfepoch,col))
cdfvers = 18
except TypeError:
# cdflib version (>=0.3.19)
ar = date2num(cdflib.cdfepoch.to_datetime(col))
cdfvers = 19
except:
# if second value is 60 (tt_2000 leapsecond timestamp) cdfepoch.unixtime fails
print ("File contains a leap second - will be ignored")
seccol = np.asarray([row[5] for row in cdflib.cdfepoch.breakdown(col)])
# assume that seccol contains a 60 seconds step - identify and remove
index = seccol.argmax()
col = np.delete(col,index)
try:
ar = date2num(cdflib.cdfepoch.to_datetime(cdflib.cdfepoch,col))
except TypeError:
ar = date2num(cdflib.cdfepoch.to_datetime(col))
delrow = True
arlen= len(ar)
arraylist.append(ar)
ind = KEYLIST.index('time')
array[ind] = ar
else:
ar = cdfdat.varget(elem[1])
if delrow:
ar = np.delete(ar,index)
if elem[0] in NUMKEYLIST:
with np.errstate(invalid='ignore'):
ar[ar > 88880] = float(nan)
ind = KEYLIST.index(elem[0])
headers['col-'+elem[0]] = cdfdat.varattsget(elem[1]).get('LABLAXIS').lower()
headers['unit-col-'+elem[0]] = cdfdat.varattsget(elem[1]).get('UNITS')
if not multipletimedict == {} and list(multipletimedict.keys())[0] == cdfdat.varattsget(elem[1]).get('DEPEND_0'):
newar = np.asarray([np.nan]*arlen)
newar[indexarray] = ar
array[ind] = newar
arraylist.append(newar)
else:
array[ind] = ar
arraylist.append(ar)
if elem[0] in ['f','F'] and headers.get('DataComponents','') in ['DIF','dif','idf','IDF'] and not len(array[zpos]) > 0:
array[zpos] = ar
arraylist.append(ar)
headers['col-z'] = cdfdat.varattsget(elem[1]).get('LABLAXIS').lower()
headers['unit-col-z'] = cdfdat.varattsget(elem[1]).get('UNITS')
ndarray = np.array(array, dtype=object) # decreapated .. add dtype=object
stream = DataStream()
stream = [LineStruct()]
result = DataStream(stream,headers,ndarray)
if not headers.get('FlagRulesetType','') == '' and len(flaglist) > 0:
result = result.flag(flaglist)
return result
def writeIMAGCDF(datastream, filename, **kwargs):
"""
Writing Intermagnet CDF format (currently: vers1.2) + optional flagging info
"""
print ("Writing CDF data based on cdflib")
def tt(my_dt_ob):
ms = my_dt_ob.microsecond/1000. # fraction
date_list = [my_dt_ob.year, my_dt_ob.month, my_dt_ob.day, my_dt_ob.hour, my_dt_ob.minute, my_dt_ob.second, ms]
return date_list
logger.info("Writing IMAGCDF Format {}".format(filename))
mode = kwargs.get('mode')
addflags = kwargs.get('addflags')
skipcompression = kwargs.get('skipcompression')
main_cdf_spec = {}
main_cdf_spec['Compressed'] = False
leapsecondlastupdate = cdflib.cdfepoch.getLeapSecondLastUpdated()
if not skipcompression:
try:
main_cdf_spec['Compressed'] = True
except:
logger.warning("writeIMAGCDF: Compression failed for unknown reason - storing uncompresed data")
pass
testname = str(filename+'.cdf')
if os.path.isfile(testname):
filename = testname
if os.path.isfile(filename):
if mode == 'skip': # skip existing inputs
exst = read(path_or_url=filename)
datastream = joinStreams(exst,datastream)
os.remove(filename)
mycdf = cdflib.CDF(filename,cdf_spec=main_cdf_spec)
elif mode == 'replace' or mode == 'append': # replace existing inputs
exst = read(path_or_url=filename)
datastream = joinStreams(datastream,exst)
os.remove(filename)
mycdf = cdflib.CDF(filename,cdf_spec=main_cdf_spec)
else: # overwrite mode
#print filename
os.remove(filename)
mycdf = cdflib.CDF(filename,cdf_spec=main_cdf_spec)
else:
mycdf = cdflib.CDF(filename)
keylst = datastream._get_key_headers()
tmpkeylst = ['time']
tmpkeylst.extend(keylst)
keylst = tmpkeylst
headers = datastream.header
head, line = [],[]
success = False
# For test purposes: flagging
flaglist = []
# check DataComponents for correctness
dcomps = headers.get('DataComponents','')
dkeys = datastream._get_key_headers()
if 'f' in dkeys and len(dcomps) == 3:
dcomps = dcomps+'S'
if 'df' in dkeys and len(dcomps) == 3:
dcomps = dcomps+'G'
headers['DataComponents'] = dcomps
### #########################################
### Check Header
### #########################################
INVHEADTRANSLATE = {v: k for k, v in HEADTRANSLATE.items()}
INVHEADTRANSLATE['StationIAGAcode'] = 'IagaCode'
globalAttrs = {}
for key in headers:
if key in INVHEADTRANSLATE:
globalAttrs[INVHEADTRANSLATE.get(key)] = { 0 : headers.get(key) }
elif key.startswith('col-') or key.startswith('unit-'):
pass
else:
globalAttrs[key.replace('Data','',1)] = { 0 : str(headers.get(key)) }
## 1. Fixed Part -- current version is 1.2
## Transfer MagPy Header to INTERMAGNET CDF attributes
globalAttrs['FormatDescription'] = { 0 : 'INTERMAGNET CDF format'}
globalAttrs['FormatVersion'] = { 0 : '1.2'}
globalAttrs['Title'] = { 0 : 'Geomagnetic time series data'}
if addflags:
globalAttrs['FormatVersion'] = { 0 : '1.3'}
## 3. Optional flagging information
## identify flags within the data set and if they are present then add an attribute to the header
if addflags:
flaglist = datastream.extractflags()
if len(flaglist) > 0:
globalAttrs['FlagRulesetVersion'] = { 0 : '1.0'}
globalAttrs['FlagRulesetType'] = { 0 : 'Conrad'}
if not headers.get('DataPublicationDate','') == '':
dat = tt(datastream._testtime(headers.get('DataPublicationDate','')))
pubdate = cdflib.cdfepoch.compute_tt2000([dat])
else:
pubdate = cdflib.cdfepoch.compute_tt2000([tt(datetime.utcnow())])
globalAttrs['PublicationDate'] = { 0 : pubdate }
if not headers.get('DataSource','') == '':
if headers.get('DataSource','') in ['INTERMAGNET', 'WDC']:
globalAttrs['Source'] = { 0 : headers.get('DataSource','')}
else:
globalAttrs['Source'] = { 0 : headers.get('DataSource','')}
else:
globalAttrs['Source'] = { 0 : headers.get('StationInstitution','')}
if not headers.get('DataStandardLevel','') == '':
if headers.get('DataStandardLevel','') in ['None','none','Partial','partial','Full','full']:
globalAttrs['StandardLevel'] = { 0 : headers.get('DataStandardLevel','')}
else:
print("writeIMAGCDF: StandardLevel not defined - please specify by yourdata.header['DataStandardLevel'] = ['None','Partial','Full']")
globalAttrs['StandardLevel'] = { 0 : 'None'}
if headers.get('DataStandardLevel','') in ['partial','Partial']:
# one could add a validity check whether provided list is aggreement with standards
if headers.get('DataPartialStandDesc','') == '':
print("writeIMAGCDF: PartialStandDesc is missing. Add items like IMOM-11,IMOM-12,IMOM-13 ...")
else:
print("writeIMAGCDF: StandardLevel not defined - please specify by yourdata.header['DataStandardLevel'] = ['None','Partial','Full']")
globalAttrs['StandardLevel'] = { 0 : 'None'}
if not headers.get('DataStandardName','') == '':
globalAttrs['StandardName'] = { 0 : headers.get('DataStandardName','')}
else:
try:
#print ("writeIMAGCDF: Asigning StandardName")
samprate = float(str(headers.get('DataSamplingRate',0)).replace('sec','').strip())
if int(samprate) == 1:
stdadd = 'INTERMAGNET_1-Second'
elif int(samprate) == 60:
stdadd = 'INTERMAGNET_1-Minute'
if headers.get('DataPublicationLevel',0) in [3,'3','Q','quasi-definitive','Quasi-definitive']:
stdadd += '_QD'
globalAttrs['StandardName'] = { 0 : stdadd }
elif headers.get('DataPublicationLevel',0) in [4,'4','D','definitive','Definitive']:
globalAttrs['StandardName'] = { 0 : stdadd }
else:
print ("writeIMAGCDF: current Publication level {} does not allow to set StandardName".format(headers.get('DataPublicationLevel',0)))
globalAttrs['StandardLevel'] = { 0 : 'None'}
except:
print ("writeIMAGCDF: Asigning StandardName Failed")
proj = headers.get('DataLocationReference','')
longi = headers.get('DataAcquisitionLongitude','')
lati = headers.get('DataAcquisitionLatitude','')
try:
longi = "{:.3f}".format(float(longi))
lati = "{:.3f}".format(float(lati))
except:
print("writeIMAGCDF: could not convert lat long to floats")
if not longi=='' or lati=='':
if proj == '':
patt = mycdf.attrs
try:
globalAttrs['Latitude'] = { 0 : float(lati) }
globalAttrs['Longitude'] = { 0 : float(longi) }
except:
globalAttrs['Latitude'] = { 0 : lati }
globalAttrs['Longitude'] = { 0 : longi }
else:
if proj.find('EPSG:') > 0:
epsg = int(proj.split('EPSG:')[1].strip())
if not epsg==4326:
print ("writeIMAGCDF: converting coordinates to epsg 4326")
longi,lati = convertGeoCoordinate(float(longi),float(lati),'epsg:'+str(epsg),'epsg:4326')
longi = "{:.3f}".format(float(longi))
lati = "{:.3f}".format(float(lati))
globalAttrs['Latitude'] = { 0 : float(lati) }
globalAttrs['Longitude'] = { 0 : float(longi) }
if not 'StationIagaCode' in headers and 'StationID' in headers:
globalAttrs['IagaCode'] = { 0 : headers.get('StationID','')}
mycdf.write_globalattrs(globalAttrs)
### #########################################
### Data
### #########################################
def checkEqualIvo(lst):
# http://stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical
return not lst or lst.count(lst[0]) == len(lst)
def checkEqual3(lst):
return lst[1:] == lst[:-1]
ndarray = False
if len(datastream.ndarray[0]>0):
ndarray = True
# Check F/S/G select either S or G, send out warning if presumably F (mean zero, stddeviation < resolution)
naninds = np.asarray([])
## Analyze F and dF columns:
fcolname = 'S'
scal = ''
ftest = DataStream()
if 'f' in keylst or 'df' in keylst:
if 'f' in keylst:
if not 'df' in keylst:
scal = 'f'
#print ("writeIMAGCDF: Found F column") # check whether F or S
comps = datastream.header.get('DataComponents')
if not comps.endswith('S'):
print ("writeIMAGCDF: given components are {}. Checking F column...".format(datastream.header.get('DataComponents')))
#calculate delta F and determine average diff
datastream = datastream.delta_f()
dfmean, dfstd = datastream.mean('df',std=True, percentage=50)
if dfmean < 0.0000000001 and dfstd < 0.0000000001:
fcolname = 'F'
print ("writeIMAGCDF: analyzed F column - values are apparently calculated from vector components - using column name 'F'")
else:
print ("writeIMAGCDF: analyzed F column - values are apparently independend from vector components - using column name 'S'")
pos = KEYLIST.index('f')
col = datastream.ndarray[pos]
if 'df' in keylst:
scal = 'df'
#print ("writeIMAGCDF: Found dF column")
pos = KEYLIST.index('df')
col = datastream.ndarray[pos]
col = col.astype(float)
# Check sampling rates of main stream and f/df stream
mainsamprate = datastream.samplingrate()
ftest = datastream.copy()
ftest = ftest._drop_nans(scal)
fsamprate = ftest.samplingrate()
if fsamprate-0.1 < mainsamprate and mainsamprate < fsamprate+0.1:
#Samplingrate of F column and Vector are similar
useScalarTimes=False
else:
useScalarTimes=True
#print ("IMAG", len(nonancol),datastream.length()[0])
"""
if len(nonancol) < datastream.length()[0]/2.:
#shorten col
print ("writeIMF - reducing f column resolution:", len(nonancol), len(col))
naninds = np.where(np.isnan(col))[0]
#print (naninds, len(naninds))
useScalarTimes=True
#[inds]=np.take(col_mean,inds[1])
else:
#keep column and (later) leave time
useScalarTimes=True # change to False in order to use a single col
"""
## get sampling rate of vec, get sampling rate of scalar, if different extract scalar and time use separate, else ..
for key in keylst:
# New : assign data to the following variables: var_attrs (meta), var_data (dataarray), var_spec (key??)
var_attrs = {}
var_spec = {}
if key in ['time','sectime','x','y','z','f','dx','dy','dz','df','t1','t2','scalartime']:
try:
if not key == 'scalartime':
ind = KEYLIST.index(key)
if ndarray and len(datastream.ndarray[ind])>0:
col = datastream.ndarray[ind]
else:
col = datastream._get_column(key)
col = col.astype(float)
if not False in checkEqual3(col):
logger.warning("Found identical values only for {}".format(key))
col = col[:1]
#{'FIELDNAM': 'Geomagnetic Field Element X', 'VALIDMIN': array([-79999.]), 'VALIDMAX': array([ 79999.]), 'UNITS': 'nT', 'FILLVAL': array([ 99999.]), 'DEPEND_0': 'GeomagneticVectorTimes', 'DISPLAY_TYPE': 'time_series', 'LABLAXIS': 'X'}
if key == 'time':
cdfkey = 'GeomagneticVectorTimes'
cdfdata = cdflib.cdfepoch.compute_tt2000( [tt(num2date(elem).replace(tzinfo=None)) for elem in col] )
var_spec['Data_Type'] = 33
elif key == 'scalartime':
cdfkey = 'GeomagneticScalarTimes'
ftimecol = ftest.ndarray[0]
# use ftest Datastream
cdfdata = cdflib.cdfepoch.compute_tt2000( [tt(num2date(elem).replace(tzinfo=None)) for elem in ftimecol] )
var_spec['Data_Type'] = 33
elif len(col) > 0:
#if len(col) > 1000000:
# print ("Starting with {}".format(key))
var_spec['Data_Type'] = 45
comps = datastream.header.get('DataComponents','')
keyup = key.upper()
if key in ['t1','t2']:
cdfkey = key.upper().replace('T','Temperature')
elif not comps == '':
try:
if key == 'x':
compsupper = comps[0].upper()
elif key == 'y':
compsupper = comps[1].upper()
elif key == 'z':
compsupper = comps[2].upper()
elif key == 'f':
compsupper = fcolname ## MagPy requires independend F value
elif key == 'df':
compsupper = 'G'
else:
compsupper = key.upper()
cdfkey = 'GeomagneticField'+compsupper
keyup = compsupper
except:
cdfkey = 'GeomagneticField'+key.upper()
keyup = key.upper()
else:
cdfkey = 'GeomagneticField'+key.upper()
nonetest = [elem for elem in col if not elem == None]
if len(nonetest) > 0:
var_attrs['DEPEND_0'] = "GeomagneticVectorTimes"
var_attrs['DISPLAY_TYPE'] = "time_series"
var_attrs['LABLAXIS'] = keyup
var_attrs['FILLVAL'] = np.nan
if key in ['x','y','z','h','e','g','t1','t2']:
cdfdata = col
var_attrs['VALIDMIN'] = -88880.0
var_attrs['VALIDMAX'] = 88880.0
elif key == 'i':
cdfdata = col
var_attrs['VALIDMIN'] = -90.0
var_attrs['VALIDMAX'] = 90.0
elif key == 'd':
cdfdata = col
var_attrs['VALIDMIN'] = -360.0
var_attrs['VALIDMAX'] = 360.0
elif key in ['f','s','df']:
if useScalarTimes:
# write time column
keylst.append('scalartime')
fcol = ftest._get_column(key)
#if len(naninds) > 0:
# cdfdata = col[~np.isnan(col)]
var_attrs['DEPEND_0'] = "GeomagneticScalarTimes"
#mycdf[cdfkey] = fcol
cdfdata = fcol
else:
cdfdata = col
var_attrs['VALIDMIN'] = 0.0
var_attrs['VALIDMAX'] = 88880.0
for keydic in headers:
if keydic == ('col-'+key):
if key in ['x','y','z','f','dx','dy','dz','df']:
try:
var_attrs['FIELDNAM'] = "Geomagnetic Field Element "+key.upper()
except:
pass
if key in ['t1','t2']:
try:
var_attrs['FIELDNAM'] = "Temperature"+key.replace('t','')
except:
pass
if keydic == ('unit-col-'+key):
if key in ['x','y','z','f','dx','dy','dz','df','t1','t2']:
try:
unit = 'unspecified'
if 'unit-col-'+key == 'deg C':
#mycdf[cdfkey].attrs['FIELDNAM'] = "Temperature "+key.upper()
unit = 'Celsius'
elif 'unit-col-'+key == 'deg':
unit = 'Degrees of arc'
else:
unit = headers.get('unit-col-'+key,'')
var_attrs['UNITS'] = unit
except:
pass
var_spec['Variable'] = cdfkey
var_spec['Num_Elements'] = 1
var_spec['Rec_Vary'] = True # The dimensional sizes, applicable only to rVariables.
var_spec['Dim_Sizes'] = []
mycdf.write_var(var_spec, var_attrs=var_attrs, var_data=cdfdata)
except:
pass
success = filename
if len(flaglist) > 0 and addflags == True:
flagstart = 'FlagBeginTimes'
flagend = 'FlagEndTimes'
flagcomponents = 'FlagComponents'
flagcode = 'FlagCode'
flagcomment = 'FlagDescription'
flagmodification = 'FlagModificationTimes'
flagsystemreference = 'FlagSystemReference'
flagobserver = 'FlagObserver'
trfl = np.transpose(flaglist)
#print ("Transposed flaglist", trfl)
try:
print ("Writing flagging information ...")
var_attrs = {}
var_spec = {}
var_spec['Data_Type'] = 33
var_spec['Num_Elements'] = 1
var_spec['Rec_Vary'] = True # The dimensional sizes, applicable only to rVariables.
var_spec['Dim_Sizes'] = []
var_spec['Variable'] = flagstart
cdfdata = cdflib.cdfepoch.compute_tt2000( [tt(el) for el in trfl[0]] )
mycdf.write_var(var_spec, var_attrs=var_attrs, var_data=cdfdata)
var_spec['Variable'] = flagend
cdfdata = cdflib.cdfepoch.compute_tt2000( [tt(el) for el in trfl[1]] )
mycdf.write_var(var_spec, var_attrs=var_attrs, var_data=cdfdata)
var_spec['Variable'] = flagmodification
cdfdata = cdflib.cdfepoch.compute_tt2000( [tt(el) for el in trfl[-1]] )
mycdf.write_var(var_spec, var_attrs=var_attrs, var_data=cdfdata)
# Here we can select between different content
if len(flaglist[0]) == 7:
#[st,et,key,flagnumber,commentarray[idx],sensorid,now]
# eventually change flagcomponent in the future
fllist = [flagcomponents,flagcode,flagcomment, flagsystemreference] # , flagobserver]
elif len(flaglist[0]) == 8:
# Future version ??
fllist = [flagcomponents,flagcode,flagcomment, flagsystemreference, flagobserver]
#print (fllist)
for idx, cdfkey in enumerate(fllist):
var_attrs = {}
var_spec = {}
if not cdfkey == flagcode:
ll = [str(el) for el in trfl[idx+2]]
else:
ll = trfl[idx+2]
#mycdf[cdfkey] = ll
cdfdata = ll
var_attrs['DEPEND_0'] = "FlagBeginTimes"
var_attrs['DISPLAY_TYPE'] = "time_series"
var_attrs['LABLAXIS'] = cdfkey.strip('Flag')
#var_attrs['FILLVAL'] = np.nan
var_attrs['FIELDNAM'] = cdfkey
if cdfkey in ['flagcode']:
var_attrs['VALIDMIN'] = 0
var_attrs['VALIDMAX'] = 9
if cdfkey in [flagcomponents,flagcomment, flagsystemreference, flagobserver]:
var_spec['Data_Type'] = 51
var_spec['Num_Elements'] = max([len(i) for i in ll])
elif cdfkey in [flagcode]:
var_spec['Data_Type'] = 45
var_spec['Num_Elements'] = 1
var_spec['Variable'] = cdfkey
var_spec['Rec_Vary'] = True # The dimensional sizes, applicable only to rVariables.
var_spec['Dim_Sizes'] = []
mycdf.write_var(var_spec, var_attrs=var_attrs, var_data=cdfdata)
logger.info("writeIMAGCDF: Flagging information added to file")
print ("... success")
except:
print ("writeIMAGCDF: error when adding flags. skipping this part")
mycdf.close()
return success
|
#!/usr/bin/env python
import os.path
import curses
import time
import jobtracker
class File:
def __init__(self, name, size, status, update_time):
self.name = name
self.size = size
self.status = status
self.currsize = 0
self.currtime = time.time()
self.update()
self.oldsize = self.currsize
self.oldtime = self.currtime
if self.status == 'downloading':
t = time.strptime(update_time, '%Y-%m-%d %H:%M:%S')
self.starttime = time.mktime(t)
else:
self.starttime = None
def update(self):
if os.path.exists(self.name):
newsize = os.path.getsize(self.name)
else:
newsize = 0
if newsize != self.currsize:
self.oldsize = self.currsize
self.oldtime = self.currtime
self.currsize = newsize
self.currtime = time.time()
def get_progressbar(self, numchars):
pcnt_complete = float(self.currsize)/self.size
progbar = "[" + "="*int(round(pcnt_complete*(numchars-12)))+">" + \
" "*int(round((1-pcnt_complete)*(numchars-12))) + "]" + \
" (%5.1f%%)" % (pcnt_complete*100)
return progbar
def get_download_info(self, numchars):
info = "File Size: %d MB" % (self.size/1024.0**2)
if self.status == 'downloading':
info += " - Amt. Downloaded: %d MB" % (self.currsize/1024.0**2) # in MB
delta_size = (self.currsize - self.oldsize)/1024.0**2 # in MB
delta_time = self.currtime - self.oldtime # in seconds
if delta_time == 0:
info += " - Rate: ?? MB/s"
else:
rate = delta_size/delta_time
info += " - Rate: %.2f MB/s" % rate
elif self.status == 'unverified':
if self.starttime is None:
info += " - Avg. Rate: ?? MB/s - Total Time: ?? s"
else:
rate = self.size/(self.oldtime - self.starttime)/1024.0**2 # in MB/s
info += " - Avg. Rate: %.2f MB/s - Total Time: %d s" % \
(rate, round(self.oldtime-self.starttime))
return info
def __cmp__(self, other):
status_mapping = {'downloading': 1, 'unverified': 2, 'new':0}
return cmp(status_mapping[self.status], status_mapping[other.status])
class FileList(list):
def __init__(self):
super(FileList, self).__init__()
def update(self):
active_downloads = jobtracker.query("SELECT * FROM files " \
"WHERE status IN ('downloading', " \
"'unverified', " \
"'new') " \
"ORDER BY created_at ASC")
for dl in active_downloads:
found = False
for f in self:
if dl['filename'] == f.name:
f.status=dl['status']
found = True
if not found:
self.append(File(dl['filename'], dl['size'], dl['status'], dl['updated_at']))
for ii, f in enumerate(self):
found = False
for dl in active_downloads:
if dl['filename'] == f.name:
found = True
if dl['status'] == 'downloading':
t = time.strptime(dl['updated_at'], '%Y-%m-%d %H:%M:%S')
f.starttime = time.mktime(t)
if not found:
self.pop(ii)
else:
f.update()
def show_status(scr):
scr.clear()
maxy, maxx = scr.getmaxyx()
scr.addstr(0,0, "Number of active downloads: %d" % len(files), \
curses.A_BOLD | curses.A_UNDERLINE)
for ii, file in enumerate(files[:(maxy-2)/3-1]):
fn = os.path.split(file.name)[-1]
scr.addstr(2+ii*3, 0, fn, curses.A_BOLD)
scr.addstr(2+ii*3, len(fn), " - %s" % file.status)
scr.addstr(3+ii*3, 0, file.get_download_info(maxx))
scr.addstr(4+ii*3, 0, file.get_progressbar(maxx))
scr.refresh()
def loop(scr):
curses.curs_set(0)
curses.use_default_colors()
scr.scrollok(True)
while True:
files.update()
files.sort(reverse=True)
show_status(scr)
time.sleep(1)
def main():
global files
files = FileList()
try:
curses.wrapper(loop)
except KeyboardInterrupt:
print "Exiting..."
if __name__=='__main__':
main()
|
import unittest
import ramda as R
from ramda.private._curry3 import _curry3
from ramda.private._inspect import funcArgsLength
"""
https://github.com/ramda/ramda/blob/master/test/internal/_curry3.js
"""
def f(a, b, c): return [a, b, c]
g = _curry3(f)
class Test_Curry3(unittest.TestCase):
def test_supports_placeholder(self):
_ = R.__
self.assertEqual([1, 2, 3], g(1)(2)(3))
self.assertEqual([1, 2, 3], g(1)(2, 3))
self.assertEqual([1, 2, 3], g(1, 2)(3))
self.assertEqual([1, 2, 3], g(1, 2, 3))
self.assertEqual([1, 2, 3], g(_, 2, 3)(1))
self.assertEqual([1, 2, 3], g(1, _, 3)(2))
self.assertEqual([1, 2, 3], g(1, 2, _)(3))
self.assertEqual([1, 2, 3], g(1, _, _)(2)(3))
self.assertEqual([1, 2, 3], g(_, 2, _)(1)(3))
self.assertEqual([1, 2, 3], g(_, _, 3)(1)(2))
self.assertEqual([1, 2, 3], g(1, _, _)(2, 3))
self.assertEqual([1, 2, 3], g(_, 2, _)(1, 3))
self.assertEqual([1, 2, 3], g(_, _, 3)(1, 2))
self.assertEqual([1, 2, 3], g(1, _, _)(_, 3)(2))
self.assertEqual([1, 2, 3], g(_, 2, _)(_, 3)(1))
self.assertEqual([1, 2, 3], g(_, _, 3)(_, 2)(1))
self.assertEqual([1, 2, 3], g(_, _, _)(_, _)(1, 2, 3))
self.assertEqual([1, 2, 3], g(_, _, _)(1, _, _)(_, _)(2, _)(_)(3))
def test_has_3_arity(self):
self.assertEqual(3, funcArgsLength(g))
def test_works_even_more_args_provided(self):
self.assertEqual([1, 2, 3], g(1, 2, 3, 4))
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import torch
import torch.nn
import matplotlib.pyplot as plt
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
schedule = [False, False, True]
########## Basic (FB prediction)
if schedule[0]:
N, D_in, D_out = 1, 5, 5
# Initialize input and output
x = torch.tensor(np.ones((1,D_in)), dtype=dtype, requires_grad=True)
y = torch.tensor(np.zeros((1,D_out)), dtype=dtype, requires_grad=True)
# Initialize weights: unit basis vectors except for d2, d3
weight = torch.zeros(D_in, D_out, dtype=dtype)
weight[0,0] = 2
weight[1,1] = 1
weight[2,2] = 0.5
weight[4,4] = -0.5
e_history = []
x_history = []
y_history = []
num_iters = 250
x_rate = 0.01
y_rate = 0.01
for iter in range(num_iters):
print(iter)
x_history.append(x.detach().numpy().copy())
y_history.append(y.detach().numpy().copy())
# Forward prediction
pred = y.mm(weight)
err = x - pred
e_history.append(err.detach().numpy().copy())
loss = err.pow(2).sum()
# Update input/output (GD)
loss.backward()
with torch.no_grad():
x -= x_rate * x.grad
y -= y_rate * y.grad
# Manually zero the gradients after updating weights
x.grad.zero_()
y.grad.zero_()
plt.subplot(3,5,1);plt.plot(range(num_iters), [yt[0,0] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,2);plt.plot(range(num_iters), [yt[0,1] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,3);plt.plot(range(num_iters), [yt[0,2] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,4);plt.plot(range(num_iters), [yt[0,3] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,5);plt.plot(range(num_iters), [yt[0,4] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,6);plt.plot(range(num_iters), [et[0,0] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,7);plt.plot(range(num_iters), [et[0,1] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,8);plt.plot(range(num_iters), [et[0,2] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,9);plt.plot(range(num_iters), [et[0,3] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,10);plt.plot(range(num_iters), [et[0,4] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,11);plt.plot(range(num_iters), [xt[0,0] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,12);plt.plot(range(num_iters), [xt[0,1] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,13);plt.plot(range(num_iters), [xt[0,2] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,14);plt.plot(range(num_iters), [xt[0,3] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,15);plt.plot(range(num_iters), [xt[0,4] for xt in x_history]);plt.ylim(-1,1)
plt.show()
########## FF prediction
if schedule[1]:
N, D_in, D_out = 1, 5, 5
# Initialize input and output
x = torch.tensor(np.ones((1,D_in)), dtype=dtype, requires_grad=True)
y = torch.tensor(np.zeros((1,D_out)), dtype=dtype, requires_grad=True)
# Initialize weights: unit basis vectors except for d2, d3
weight = torch.zeros(D_in, D_out, dtype=dtype)
weight[0,0] = 2
weight[1,1] = 1
weight[2,2] = 0.5
weight[4,4] = -0.5
e_history = []
x_history = []
y_history = []
num_iters = 250
x_rate = 0.01
y_rate = 0.01
for iter in range(num_iters):
print(iter)
x_history.append(x.detach().numpy().copy())
y_history.append(y.detach().numpy().copy())
# Forward prediction
pred = x.mm(weight)
err = y - pred
e_history.append(err.detach().numpy().copy())
loss = err.pow(2).sum()
# Update input/output (GD)
loss.backward()
with torch.no_grad():
x -= x_rate * x.grad
y -= y_rate * y.grad
# Manually zero the gradients after updating weights
x.grad.zero_()
y.grad.zero_()
plt.subplot(3,5,1);plt.plot(range(num_iters), [yt[0,0] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,2);plt.plot(range(num_iters), [yt[0,1] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,3);plt.plot(range(num_iters), [yt[0,2] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,4);plt.plot(range(num_iters), [yt[0,3] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,5);plt.plot(range(num_iters), [yt[0,4] for yt in y_history]);plt.ylim(-1,1)
plt.subplot(3,5,6);plt.plot(range(num_iters), [et[0,0] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,7);plt.plot(range(num_iters), [et[0,1] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,8);plt.plot(range(num_iters), [et[0,2] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,9);plt.plot(range(num_iters), [et[0,3] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,10);plt.plot(range(num_iters), [et[0,4] for et in e_history]);plt.ylim(-1,1)
plt.subplot(3,5,11);plt.plot(range(num_iters), [xt[0,0] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,12);plt.plot(range(num_iters), [xt[0,1] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,13);plt.plot(range(num_iters), [xt[0,2] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,14);plt.plot(range(num_iters), [xt[0,3] for xt in x_history]);plt.ylim(-1,1)
plt.subplot(3,5,15);plt.plot(range(num_iters), [xt[0,4] for xt in x_history]);plt.ylim(-1,1)
plt.show()
########## FF prediction with pooling (conclusion: pooling does NOT work. behaves like an average layer)
if schedule[2]:
N, D_in, D_out = 1, 3, 3
# Initialize input and output
x_pre = np.ones((1, D_in))/2
x_pre[0, 1] = 1
x_pre[0, 2] = -0.5
x = torch.tensor(x_pre, dtype=dtype, requires_grad=True)
y = torch.tensor(np.zeros((1, 1)), dtype=dtype, requires_grad=True)
# Initialize weights: unit basis vectors except for d2, d3
weight = torch.zeros(D_in, D_out, dtype=dtype)
weight[0, 0] = 2
weight[1, 1] = 1
weight[2, 2] = -2
e_history = []
pred_history = []
x_history = []
y_history = []
num_iters = 250
x_rate = 0.01
y_rate = 0.01
for iter in range(num_iters):
print(iter)
x_history.append(x.detach().numpy().copy())
y_history.append(y.detach().numpy().copy())
# Forward prediction
pred = x.mm(weight)
pred_history.append(pred)
err = y - torch.max(pred)
e_history.append(err.detach().numpy().copy())
loss = err.pow(2).sum()
# Update input/output (GD)
loss.backward()
with torch.no_grad():
x -= x_rate * x.grad
y -= y_rate * y.grad
# Manually zero the gradients after updating weights
x.grad.zero_()
y.grad.zero_()
plt.subplot(3, 3, 2);plt.plot(range(num_iters), [yt[0, 0] for yt in y_history]);plt.ylim(-1, 1)
plt.subplot(3, 3, 4);plt.plot(range(num_iters), [pt[0, 0] for pt in pred_history]);plt.ylim(-1, 1)
plt.subplot(3, 3, 5);plt.plot(range(num_iters), [pt[0, 1] for pt in pred_history]);plt.ylim(-1, 1)
plt.subplot(3, 3, 6);plt.plot(range(num_iters), [pt[0, 2] for pt in pred_history]);plt.ylim(-1, 1)
plt.subplot(3, 3, 7);plt.plot(range(num_iters), [xt[0, 0] for xt in x_history]);plt.ylim(-1, 1)
plt.subplot(3, 3, 8);plt.plot(range(num_iters), [xt[0, 1] for xt in x_history]);plt.ylim(-1, 1)
plt.subplot(3, 3, 9);plt.plot(range(num_iters), [xt[0, 2] for xt in x_history]);plt.ylim(-1, 1)
plt.show()
|
"""
Created by Amor on 2018-09-22
"""
import xadmin
from .models import Fav, Comment, Reply, Like, Message, Dynamics
__author__ = '骆杨'
class FavAdmin(object):
model_icon = 'fa fa-star'
class CommentAdmin(object):
model_icon = 'fa fa-commenting'
class ReplyAdmin(object):
model_icon = 'fa fa-comments-o'
class LikeAdmin(object):
model_icon = 'fa fa-thumbs-up'
class MessageAdmin(object):
model_icon = 'fa fa-archive'
class DynamicsAdmin(object):
model_icon = 'fa fa-at'
xadmin.site.register(Fav, FavAdmin)
xadmin.site.register(Comment, CommentAdmin)
xadmin.site.register(Reply, ReplyAdmin)
xadmin.site.register(Like, LikeAdmin)
xadmin.site.register(Message, MessageAdmin)
xadmin.site.register(Dynamics, DynamicsAdmin)
|
import json
from django.core.paginator import PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse,HttpResponse
# Create your views here.
from django.views.generic.base import View
from django_filters.rest_framework import DjangoFilterBackend
from pure_pagination import Paginator
from rest_framework import mixins, viewsets
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from apps.article.views import StandardResultsSetPagination
from apps.course.filter import CoursesFilter
from apps.course.models import Courses, CourseList
from apps.course.serializers import CourseSerializers, CreatedCourseSerializers, AddtutorialSerializers
from apps.support.models import Seo
from apps.uitls.jsonserializable import DateEncoder
from apps.uitls.permissions import IsOwnerOrReadOnly, IsOwnerOrRead
def List(request):
"""TODO 教程列表 a标签根据uuid进 Detail视图渲染对应的所有文章"""
course = Courses.objects.all()
seo_list =get_object_or_404(Seo,name='教程')
return render(request,'pc/course/index.html',{'course':course,'seo_list':seo_list})
def Detail(request,course_id,list_id):
"""TODO 文章视图 根据uuid来查询对应所有文章
页面左侧要渲染所有的title标题
右侧对应当前标题内容
我的文章目录就是所有的文章 ,但是我只渲染了title标题,右侧就渲染对应标题的内容
现在问题是我懵逼了,貌似这个视图不能同时拿到uuid和id 只能二选一
"""
course_list = CourseList.objects.filter(course=course_id)#根据文章列表uuid查询对应的文章
#course_list.filter(id='')#根据对应文章id 来获取对应的数据
content = get_object_or_404(course_list, pk=list_id)
previous_blog = course_list.filter(id__gt=list_id).first()
netx_blog = course_list.filter(id__lt=list_id).last()
return render(request,'pc/course/detail.html',{'course':course_list,'uuid':course_id,'content':content,'previous_blog':previous_blog,'netx_blog':netx_blog})
def courseViewApi(request,courses_id):
course = Courses.objects.get(pk=courses_id)
course_list = course.courselist_set.all()
try:
page = request.GET.get('page', 1)
if page == '':
page = 1
except PageNotAnInteger:
page = 1
p = Paginator(course_list,10,request=request)
people = p.page(page)
print(people.object_list)
print(people.next_page_number)
return HttpResponse()
class CoursesList(viewsets.ReadOnlyModelViewSet):
queryset = Courses.objects.filter(is_delete=False)
serializer_class = CourseSerializers
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly) # 未登录禁止访问
authentication_classes = [JSONWebTokenAuthentication]
pagination_class = StandardResultsSetPagination
class MeCoursesList(viewsets.ReadOnlyModelViewSet):
queryset = Courses.objects.filter(is_delete=False)
serializer_class = CourseSerializers
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly) # 未登录禁止访问
authentication_classes = [JSONWebTokenAuthentication]
pagination_class = StandardResultsSetPagination
def get_queryset(self):
return Courses.objects.filter(user=self.request.user)
class CourseCreatedList(mixins.CreateModelMixin,mixins.UpdateModelMixin,viewsets.ReadOnlyModelViewSet):
queryset = Courses.objects.filter(is_delete=False)
serializer_class = CreatedCourseSerializers
pagination_class = StandardResultsSetPagination
permission_classes = (IsAuthenticated, IsOwnerOrReadOnly) # 未登录禁止访问
authentication_classes = [JSONWebTokenAuthentication]
filter_backends = (DjangoFilterBackend,)
filter_class = CoursesFilter
class CourseListCreated(mixins.CreateModelMixin,mixins.UpdateModelMixin,viewsets.ReadOnlyModelViewSet):
queryset = CourseList.objects.all()
serializer_class = AddtutorialSerializers
permission_classes = (IsAuthenticated, IsOwnerOrRead) # 未登录禁止访问
authentication_classes = [JSONWebTokenAuthentication]
|
""" Abstracts the capturing and interfacing of applications """
import os
import re
import time
import pyperclip
import platform
import subprocess
from .RegionMatching import Region
from .SettingsDebug import Debug
if platform.system() == "Windows":
from .PlatformManagerWindows import PlatformManagerWindows
PlatformManager = PlatformManagerWindows() # No other input managers built yet
elif platform.system() == "Darwin":
from .PlatformManagerDarwin import PlatformManagerDarwin
PlatformManager = PlatformManagerDarwin()
else:
# Avoid throwing an error if it's just being imported for documentation purposes
if not os.environ.get('READTHEDOCS') == 'True':
raise NotImplementedError("Lackey is currently only compatible with Windows and OSX.")
# Python 3 compatibility
try:
basestring
except NameError:
basestring = str
class App(object):
""" Allows apps to be selected by title, PID, or by starting an
application directly. Can address individual windows tied to an
app.
For more information, see `Sikuli's App documentation <http://sikulix-2014.readthedocs.io/en/latest/appclass.html#App>`_.
"""
def __init__(self, identifier=None):
self._pid = None
self._search = identifier
self._title = ""
self._exec = ""
self._params = ""
self._process = None
self._devnull = None
self._defaultScanRate = 0.1
self.proc = None
# Replace class methods with instance methods
self.focus = self._focus_instance
self.close = self._close_instance
self.open = self._open_instance
# Process `identifier`
if isinstance(identifier, int):
# `identifier` is a PID
Debug.log(3, "Creating App by PID ({})".format(identifier))
self._pid = identifier
elif isinstance(identifier, basestring):
# `identifier` is either part of a window title
# or a command line to execute. If it starts with a "+",
# launch it immediately. Otherwise, store it until open() is called.
Debug.log(3, "Creating App by string ({})".format(identifier))
launchNow = False
if identifier.startswith("+"):
# Should launch immediately - strip the `+` sign and continue
launchNow = True
identifier = identifier[1:]
# Check if `identifier` is an executable commmand
# Possible formats:
# Case 1: notepad.exe C:\sample.txt
# Case 2: "C:\Program Files\someprogram.exe" -flag
# Extract hypothetical executable name
if identifier.startswith('"'):
executable = identifier[1:].split('"')[0]
params = identifier[len(executable)+2:].split(" ") if len(identifier) > len(executable) + 2 else []
else:
executable = identifier.split(" ")[0]
params = identifier[len(executable)+1:].split(" ") if len(identifier) > len(executable) + 1 else []
# Check if hypothetical executable exists
if self._which(executable) is not None:
# Found the referenced executable
self._exec = executable
self._params = params
# If the command was keyed to execute immediately, do so.
if launchNow:
self.open()
else:
# No executable found - treat as a title instead. Try to capture window.
self._title = identifier
self.open()
else:
self._pid = -1 # Unrecognized identifier, setting to empty app
self._pid = self.getPID() # Confirm PID is an active process (sets to -1 otherwise)
def _which(self, program):
""" Private method to check if an executable exists
Shamelessly stolen from http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
@classmethod
def pause(cls, waitTime):
time.sleep(waitTime)
@classmethod
def focus(cls, appName):
""" Searches for exact text, case insensitive, anywhere in the window title.
Brings the matching window to the foreground.
As a class method, accessible as `App.focus(appName)`. As an instance method,
accessible as `App(appName).focus()`.
"""
app = cls(appName)
return app.focus()
def _focus_instance(self):
""" In instances, the ``focus()`` classmethod is replaced with this instance method. """
if self._title:
Debug.log(3, "Focusing app with title like ({})".format(self._title))
PlatformManager.focusWindow(PlatformManager.getWindowByTitle(re.escape(self._title)))
if self.getPID() == -1:
self.open()
elif self._pid and self._pid != -1:
Debug.log(3, "Focusing app with pid ({})".format(self._pid))
PlatformManager.focusWindow(PlatformManager.getWindowByPID(self._pid))
return self
@classmethod
def close(cls, appName):
""" Closes the process associated with the specified app.
As a class method, accessible as `App.class(appName)`.
As an instance method, accessible as `App(appName).close()`.
"""
return cls(appName).close()
def _close_instance(self):
if self._process:
self._process.terminate()
self._devnull.close()
elif self.getPID() != -1:
PlatformManager.killProcess(self.getPID())
@classmethod
def open(self, executable):
""" Runs the specified command and returns an App linked to the generated PID.
As a class method, accessible as `App.open(executable_path)`.
As an instance method, accessible as `App(executable_path).open()`.
"""
return App(executable).open()
def _open_instance(self, waitTime=0):
if self._exec != "":
# Open from an executable + parameters
self._devnull = open(os.devnull, 'w')
self._process = subprocess.Popen([self._exec] + self._params, shell=False, stderr=self._devnull, stdout=self._devnull)
self._pid = self._process.pid
elif self._title != "":
# Capture an existing window that matches self._title
self._pid = PlatformManager.getWindowPID(
PlatformManager.getWindowByTitle(
re.escape(self._title)))
time.sleep(waitTime)
return self
@classmethod
def focusedWindow(cls):
""" Returns a Region corresponding to whatever window is in the foreground """
x, y, w, h = PlatformManager.getWindowRect(PlatformManager.getForegroundWindow())
return Region(x, y, w, h)
def getWindow(self):
""" Returns the title of the main window of the currently open app.
Returns an empty string if no match could be found.
"""
if self.getPID() != -1:
if not self.hasWindow():
return ""
return PlatformManager.getWindowTitle(PlatformManager.getWindowByPID(self.getPID()))
else:
return ""
def getName(self):
""" Returns the short name of the app as shown in the process list """
return PlatformManager.getProcessName(self.getPID())
def getPID(self):
""" Returns the PID for the associated app
(or -1, if no app is associated or the app is not running)
"""
if self._pid is not None:
if not PlatformManager.isPIDValid(self._pid):
self._pid = -1
return self._pid
return -1
def hasWindow(self):
""" Returns True if the process has a window associated, False otherwise """
return PlatformManager.getWindowByPID(self.getPID()) is not None
def waitForWindow(self, seconds=5):
timeout = time.time() + seconds
while True:
window_region = self.window()
if window_region is not None or time.time() > timeout:
break
time.sleep(0.5)
return window_region
def window(self, windowNum=0):
""" Returns the region corresponding to the specified window of the app.
Defaults to the first window found for the corresponding PID.
"""
if self._pid == -1:
return None
if not self.hasWindow():
return None
x,y,w,h = PlatformManager.getWindowRect(PlatformManager.getWindowByPID(self._pid, windowNum))
return Region(x,y,w,h).clipRegionToScreen()
def setUsing(self, params):
self._params = params.split(" ")
def __repr__(self):
""" Returns a string representation of the app """
return "[{pid}:{executable} ({windowtitle})] {searchtext}".format(pid=self._pid, executable=self.getName(), windowtitle=self.getWindow(), searchtext=self._search)
def isRunning(self, waitTime=0):
""" If PID isn't set yet, checks if there is a window with the specified title. """
waitUntil = time.time() + waitTime
while True:
if self.getPID() > 0:
return True
else:
self._pid = PlatformManager.getWindowPID(PlatformManager.getWindowByTitle(re.escape(self._title)))
# Check if we've waited long enough
if time.time() > waitUntil:
break
else:
time.sleep(self._defaultScanRate)
return self.getPID() > 0
def isValid(self):
return (os.path.isfile(self._exec) or self.getPID() > 0)
@classmethod
def getClipboard(cls):
""" Gets the contents of the clipboard (as classmethod) """
return pyperclip.paste()
@classmethod
def setClipboard(cls, contents):
""" Sets the contents of the clipboard (as classmethod) """
return pyperclip.copy(contents)
|
import pytest
from tests import strategies
from tests.utils import example
@pytest.fixture(scope='function')
def filtration_method() -> str:
return example(strategies.filtration_methods)
|
from django.db import models
from django_extensions.db.models import TimeStampedModel
# Create your models here.
class Link(TimeStampedModel):
url = models.URLField()
description = models.TextField(blank=True)
posted_by = models.ForeignKey(
to="hn_users.HNUser", null=True, on_delete=models.PROTECT
)
def __str__(self):
return f"{self.url} | {self.description}"
class Vote(TimeStampedModel):
user = models.ForeignKey(
to="hn_users.HNUser", related_name="hn_user_votes", on_delete=models.CASCADE
)
link = models.ForeignKey(
to="links.Link", related_name="link_votes", on_delete=models.PROTECT
)
class Meta:
unique_together = ("user", "link")
def __str__(self):
return f"{self.user} - {self.link}"
|
#Use to create local host
import http.server
import socketserver
PORT = 8000
Handler = http.server.SimpleHTTPRequestHandler
Handler.extensions_map.update({
".js": "application/javascript",
})
httpd = socketserver.TCPServer(("", PORT), Handler)
print(f"Starting server on http://localhost:{PORT}")
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("\rServer Stopped") |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax
from nnunet.training.network_training.nnUNetTrainerV2 import nnUNetTrainerV2
class nnUNetTrainerV2_resample33(nnUNetTrainerV2):
def validate(self, do_mirroring: bool = True, use_train_mode: bool = False, tiled: bool = True, step: int = 2,
save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,
validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,
force_separate_z: bool = None, interpolation_order: int = 3, interpolation_order_z=0):
return super().validate(do_mirroring, use_train_mode, tiled, step, save_softmax, use_gaussian,
overwrite, validation_folder_name, debug, all_in_gpu,
force_separate_z=False, interpolation_order=3,
interpolation_order_z=3)
def preprocess_predict_nifti(self, input_files, output_file=None, softmax_ouput_file=None):
"""
Use this to predict new data
:param input_files:
:param output_file:
:param softmax_ouput_file:
:return:
"""
print("preprocessing...")
d, s, properties = self.preprocess_patient(input_files)
print("predicting...")
pred = self.predict_preprocessed_data_return_softmax(d, self.data_aug_params["do_mirror"], 1, False, 1,
self.data_aug_params['mirror_axes'], True, True, 2,
self.patch_size, True)
pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])
print("resampling to original spacing and nifti export...")
save_segmentation_nifti_from_softmax(pred, output_file, properties, 3, None, None, None, softmax_ouput_file,
None, force_separate_z=False, interpolation_order_z=3)
print("done")
|
# python3 (3.6)
# Coded on iPad Pro 2020 4th Generation
# Pythonista an Apple iPad App
# MIT License Copyright (c) 2020 Rogelio Fiorenzano
#
# ITS320: Basic Programming
# Colorado State University Global
# Dr. Joseph Turano
# Option #1: String Values in Reverse Order
# Assignment Instructions
# Write a Python function that will accept as input three string values from a user. The method will return to the user a concatenation of the string values in reverse order. The function is to be called from the main method.
# In the main method, prompt the user for the three strings.
# Capture of user input and placing into a list for later reversal
usr_strings = []
# Area to define String Reversal
def str_reversal():
# Special thanks to our LinkedIn video with kittens and reminding us to "return variable" for later use
usr_strings.reverse()
return usr_strings
str_reversal()
# Area to define Main
def main():
print("Hello fellow user! Please provide your input when prompted.\n\n\tSPECIAL NOTE!\n\nWhen done providing input, please type, \"QUIT\" to end capture of input.")
# Special thanks to Thomas Streets Module 4 in developing my thoughts and decreasing my written code amount with the "while True:" advice for loops!
while True:
usr_str = input("\nPlease provide your input here: ")
if usr_str == "QUIT":
print("\n")
print("Your original user inputs were: ", usr_strings)
print("\n")
print("Your original user inputs reversed are: ", str_reversal())
break
else:
usr_strings.append(usr_str)
main() |
#Printing stars in 'S' shape!
'''
***
*
*
***
*
*
***
'''
for row in range(7):
for col in range(5):
if(row==0 or row==3 or row==6) and (col>0 and col<4) or (col==0 and row>0 and row<3) or (col==4 and row>3 and row<6):
print('*', end='')
else:
print(end=' ')
print() |
from . import core, plugins, utils
|
# encoding: utf-8
from __future__ import print_function
import os
from argparse import ArgumentParser
from websocket_server import start_server
from db import database
from models import __all__, User
import migrations
parser = ArgumentParser()
subparsers = parser.add_subparsers()
parser_start = subparsers.add_parser('start', help='executar servidor de sincronização')
parser_start.set_defaults(cmd='start')
parser_start.add_argument('directory', metavar='DIRETÓRIO', type=str,
help='diretório para hospedagem e sincronização')
parser_start.add_argument('--host', metavar='SERVIDOR', type=str,
help='endereço para execução (padrão: 0.0.0.0)',
default='0.0.0.0')
parser_start.add_argument('--port', metavar='PORTA', type=int,
help='porta para execução (padrão: 9000)',
default='9000')
parser_syncdb = subparsers.add_parser('syncdb', help='cria tabelas do banco de dados')
parser_syncdb.set_defaults(cmd='syncdb')
parser_createuser = subparsers.add_parser('createuser', help='cria usuário')
parser_createuser.add_argument('username', metavar='USUÁRIO', type=str,
help='nome de usuário')
parser_createuser.add_argument('password', metavar='SENHA', type=str,
help='senha do usuário')
parser_createuser.set_defaults(cmd='createuser')
args = parser.parse_args()
if args.cmd == 'start':
if not os.path.exists(args.directory) or not os.path.isdir(args.directory):
parser.error("diretório para sincronização inexistente")
start_server(args.directory, args.host, args.port)
elif args.cmd == 'syncdb':
database.connect()
print('Criando tabelas ...')
database.create_tables(__all__)
print('Executando migrações ...')
migrations.createuser_system()
print('Pronto')
elif args.cmd == 'createuser':
User.create(
username=args.username,
password=args.password,
)
print('Usuário criado com sucesso.')
|
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
import json
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
#catfromdate=max of last run--'2019-06-17'
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
box_temp='/home/petra/UbWinSharedSpace1/boxtemp'
pathout="/home/petra/UbWinSharedSpace1/redcap2nda_Lifespan2019/NIH_toolbox_crosswalk_docs/HCPA/prepped_structures"
box = LifespanBox(cache=box_temp)
#prep the fields that NDA requires in all of their structures
subjectlist='/home/petra/UbWinSharedSpace1/redcap2nda_Lifespan2019/Dev_pedigrees/UnrelatedHCAHCD_w_STG_Image_and_pseudo_GUID09_27_2019.csv'
subjects=pd.read_csv(subjectlist)[['subjectped','nda_gender', 'nda_guid', 'nda_interview_age', 'nda_interview_date']]
ndar=subjects.loc[subjects.subjectped.str.contains('HCA')].rename(
columns={'nda_guid':'subjectkey','subjectped':'src_subject_id','nda_interview_age':'interview_age',
'nda_interview_date':'interview_date','nda_gender':'gender'}).copy()
ndar['interview_date'] = pd.to_datetime(ndar['interview_date']).dt.strftime('%m/%d/%Y')
ndarlist=['subjectkey','src_subject_id','interview_age','interview_date','gender']
#import the data
scores=587003317792 #new corrected data from sites (all combined)
raw=587005373416 #new corrected data from sites (all combined)
scordata=Box2dataframe(scores,box_temp)
rawdata=Box2dataframe(raw,box_temp)
#subset to correct visit data...
scordata=scordata.loc[scordata.visit=='V1'].drop(columns=['gender']).copy()
rawdata=rawdata.loc[rawdata.visit=='V1'].drop(columns=['gender']).copy()
#merge with the required fields
scordata=pd.merge(scordata,ndar,how='inner',left_on='subject', right_on='src_subject_id')
rawdata=pd.merge(rawdata,ndar,how='inner',left_on='subject', right_on='src_subject_id')
#this is the list of variables in the scored data files that you might need...
#creating list incase your scored data is merged with other files for other reasons (ours was)
scorlist=['Age-Corrected Standard Score', 'Age-Corrected Standard Scores Dominant',
'Age-Corrected Standard Scores Non-Dominant', 'AgeCorrCrystal', 'AgeCorrDCCS', 'AgeCorrEarly',
'AgeCorrEngRead', 'AgeCorrEngVocab', 'AgeCorrFlanker', 'AgeCorrFluid', 'AgeCorrListSort',
'AgeCorrPSM', 'AgeCorrPatternComp', 'AgeCorrTotal', 'Assessment Name', 'Computed Score',
'ComputedDCCS', 'ComputedEngRead', 'ComputedEngVocab', 'ComputedFlanker', 'ComputedPSM',
'ComputedPatternComp', 'DCCSaccuracy', 'DCCSreactiontime', 'Dominant Score', 'FlankerAccuracy',
'FlankerReactionTime', 'FullTCrystal', 'FullTDCCS', 'FullTEarly', 'FullTEngRead', 'FullTEngVocab',
'FullTFlanker', 'FullTFluid', 'FullTListSort', 'FullTPSM', 'FullTPatternComp', 'FullTTotal',
'Fully-Corrected T-score', 'Fully-Corrected T-scores Dominant', 'Fully-Corrected T-scores Non-Dominant',
'FullyCorrectedTscore', 'Group', 'Inst', 'InstrumentBreakoff', 'InstrumentRCReason', 'InstrumentRCReasonOther',
'InstrumentStatus2', 'ItmCnt', 'Language', 'Male', 'National Percentile (age adjusted)',
'National Percentile (age adjusted) Dominant', 'National Percentile (age adjusted) Non-Dominant',
'Non-Dominant Score', 'PIN', 'Raw Score Left Ear', 'Raw Score Right Ear', 'RawDCCS',
'RawFlanker', 'RawListSort', 'RawPSM', 'RawPatternComp', 'RawScore', 'SE', 'Static Visual Acuity Snellen',
'Static Visual Acuity logMAR', 'TScore', 'Theta', 'ThetaEngRead', 'ThetaEngVocab', 'ThetaPSM', 'Threshold Left Ear',
'Threshold Right Ear', 'UncorrCrystal', 'UncorrDCCS', 'UncorrEarly', 'UncorrEngRead', 'UncorrEngVocab',
'UncorrFlanker', 'UncorrFluid', 'UncorrListSort', 'UncorrPSM', 'UncorrPatternComp', 'UncorrTotal',
'Uncorrected Standard Score', 'Uncorrected Standard Scores Dominant', 'Uncorrected Standard Scores Non-Dominant',
'UncorrectedStandardScore']
#check that lengths are the same...indicating one to one PIN match between scores and raw
len(rawdata.PIN.unique())
len(scordata.PIN.unique())
#check that shape is same before and after removing duplicates (should not be any)
rawdata.shape
scordata.shape
testraw=rawdata.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
testscore=scordata.drop_duplicates(subset={'PIN','Inst'})
testraw.shape
testscore.shape
#establish the crosswalk
crosswalkfile="/home/petra/UbWinSharedSpace1/redcap2nda_Lifespan2019/NIH_toolbox_crosswalk_docs/HCPA/NIH_Toolbox_crosswalk_HCPA_2020Jan6.csv"
crosswalk=pd.read_csv(crosswalkfile,header=0,low_memory=False, encoding = "ISO-8859-1")
#check that your instruments are in both raw data and scores files.
for i in rawdata.Inst.unique():
if i not in scordata.Inst.unique():
print(i)
#Within the rawdata structure (for HCP), all but the NIH Toolbox Pain Intensity FF Age 18+ v2.0 Instrument are practices
#So only the Pain Intensity instrument needed special coding attention
#check your data and adjust if needed
#create the NDA structure for this special case
inst_i='NIH Toolbox Pain Intensity FF Age 18+ v2.0'
paindata=rawdata.loc[rawdata.Inst==inst_i][['PIN','subject','Inst','visit','ItemID','Position',
'subjectkey','src_subject_id','interview_age','interview_date','gender',
'Response','ResponseTime', 'SE', 'Score', 'TScore','Theta']]
paindata.ItemID = paindata.ItemID.str.lower().str.replace('-','_').str.replace('(','_').str.replace(')','_')
inst = paindata.pivot(index='PIN', columns='ItemID', values='Response').reset_index()
meta = paindata.drop_duplicates(subset=['PIN', 'visit'])
painreshaped = pd.merge(meta, inst, on='PIN', how='inner').drop(columns={'subject','visit','PIN'})
crosswalk_subset=crosswalk.loc[crosswalk['Inst']==inst_i]
crosswalk_subset.reset_index(inplace=True)
cwlist=list(crosswalk_subset['hcp_variable'])
reshapedslim=painreshaped[ndarlist+cwlist]
data2struct(dout=reshapedslim,crosssub=crosswalk_subset,study='HCPA')
inst_i='NIH Toolbox Odor Identification Test Age 10+ v2.0'
#for instruments in both
for i in scordata.Inst.unique():
if i in rawdata.Inst.unique():
inst_i=i
if "Visual Acuity" in inst_i:
pass #special case--see below
elif "Practice" in inst_i:
print("Note: Omitting practice instrument, "+inst_i)
else:
try: #this will fail if there are duplicates or if no-one has the data of interest (e.g. idlist too small), or if only V2 instrument
#print('Processing '+inst_i+'...')
items=rawdata.loc[rawdata.Inst==inst_i][['PIN','subject','Inst','visit','ItemID','Position',
'subjectkey','src_subject_id','interview_age','interview_date','gender',
'Response','ResponseTime']]# not these..., 'SE', 'Score', 'TScore','Theta']]
items.ItemID = items.ItemID.str.lower().str.replace('-','_').str.replace('(','_').str.replace(')','_').str.replace(' ','_')
inst=items.pivot(index='PIN',columns='ItemID',values='Response').reset_index()
meta=items.drop_duplicates(subset=['PIN','visit'])
instreshaped = pd.merge(meta, inst, on='PIN', how='inner').drop(columns={'subject', 'visit','Inst'})
items2=scordata.loc[scordata.Inst==inst_i][scorlist]
instreshapedfull=pd.merge(instreshaped,items2,on='PIN',how='inner')
sendthroughcrosswalk(instreshapedfull, inst_i, crosswalk,studystr='HCPA')
#instreshapedfull = instreshapedfull.dropna(how='all', axis=1)
#filecsv = box_temp + '/ndaformat/' + inst_i + '.csv'
#dictcsv = box_temp + '/ndaformat/' + 'Dictionary' + inst_i + '.csv'
#instreshapedfull.to_csv(filecsv, index=False)
#makedatadict(filecsv, dictcsv,inst_i)
except:
print('Couldnt process '+inst_i+'...')
inst_i="Negative Psychosocial Functioning Parent Report Summary (3-7)"
#instruments in score but not raw
for i in scordata.Inst.unique():
if i not in rawdata.Inst.unique():
inst_i=i
if "Practice" in inst_i:
print("Note: Omitting practice instrument, "+inst_i)
elif "Cognition" in inst_i:
pass #another special case--see below
else:
try:
print('Processing '+inst_i+'...')
items2 = scordata.loc[scordata.Inst == inst_i][scorlist+ndarlist]
instreshapedfull=items2
sendthroughcrosswalk(instreshapedfull,inst_i,crosswalk)
# instreshapedfull = instreshapedfull.dropna(how='all', axis=1)
# filecsv = box_temp + '/ndaformat/' + inst_i + '.csv'
# dictcsv = box_temp + '/ndaformat/' + 'Dictionary' + inst_i + '.csv'
# instreshapedfull.to_csv(filecsv, index=False)
# makedatadict(filecsv, dictcsv,inst_i)
except:
print('Couldnt process '+inst_i+'...')
#special case for Cognition Composite scores - going to cogcomp01 structure - mapped before Leo agreed to accept by Instrument name
#keeping in for posterity and to shed light on one type of merge he must do on his end, when it comes to NIH toolbox data
cogcompdata=scordata.loc[scordata.Inst.str.contains('Cognition')==True][['PIN','Language',
'Assessment Name','Inst', 'Uncorrected Standard Score', 'Age-Corrected Standard Score',
'National Percentile (age adjusted)', 'Fully-Corrected T-score']+ndarlist]
#initialize prefix
cogcompdata['varprefix']='test'
cogcompdata.loc[cogcompdata.Inst=='Cognition Crystallized Composite v1.1','varprefix']='nih_crystalcogcomp_'
cogcompdata.loc[cogcompdata.Inst=='Cognition Early Childhood Composite v1.1','varprefix']='nih_eccogcomp_'
cogcompdata.loc[cogcompdata.Inst=='Cognition Fluid Composite v1.1','varprefix']='nih_fluidcogcomp_'
cogcompdata.loc[cogcompdata.Inst=='Cognition Total Composite Score v1.1','varprefix']='nih_totalcogcomp_'
#pivot the vars of interest by varprefix and rename
uncorr=cogcompdata.pivot(index='PIN',columns='varprefix',values='Uncorrected Standard Score')
for col in uncorr.columns.values:
uncorr=uncorr.rename(columns={col:col+"unadjusted"})
ageadj=cogcompdata.pivot(index='PIN',columns='varprefix',values='Age-Corrected Standard Score')
for col in ageadj.columns.values:
ageadj=ageadj.rename(columns={col:col+"ageadj"})
npage=cogcompdata.pivot(index='PIN',columns='varprefix',values='National Percentile (age adjusted)')
for col in npage.columns.values:
npage=npage.rename(columns={col:col+"np_ageadj"})
#put them together
cogcompreshape=pd.concat([uncorr,ageadj,npage],axis=1)
meta=cogcompdata[['PIN','Language','Assessment Name']+ndarlist].drop_duplicates(subset={'PIN'})
meta['nih_crystalcogcomp']='Cognition Crystallized Composite v1.1'
meta['nih_eccogcomp']='Cognition Early Childhood Composite v1.1'
meta['nih_fluidcogcomp']='Cognition Fluid Composite v1.1'
meta['nih_totalcogcomp']='Cognition Total Composite Score v1.1'
cogcompreshape=pd.merge(meta,cogcompreshape,on='PIN',how='inner')
inst_i='Cognition Composite Scores'
sendthroughcrosswalk(pathout,cogcompreshape,inst_i,crosswalk)
#special case for instruments with "Visual Acuity" in their titles, which have dup inst/itemid at diff positions
for i in scordata.Inst.unique():
if i in rawdata.Inst.unique():
inst_i=i
if "Visual Acuity" in inst_i:
print('Processing ' + inst_i + '...')
#items=testraw.loc[testraw.Inst==inst_i][['PIN','subject','Inst','flagged','parent','site', 'study',
# 'gender','v1_interview_date', 'visit','ItemID','Position','Response']]
items=testraw.loc[testraw.Inst.str.contains('Visual Acuity')][['PIN','subject','Inst','flagged','parent','site', 'study',
'gender','v1_interview_date', 'visit','ItemID','Position','Response']]
#initialize firstdup, isdup, tallydup
items.ItemID = items.ItemID.str.lower()
items['dup_number']=items.groupby(['PIN','ItemID']).cumcount()+1
items['ItemID_Dup']=items.ItemID.str.replace('|', '_') + '_P'+items.dup_number.astype(str)
inst=items.pivot(index='PIN',columns='ItemID_Dup',values='Response')
meta = items.drop_duplicates(subset=['PIN', 'visit'])[['Inst', 'PIN', 'flagged', 'parent', 'site', 'study',
'subject', 'v1_interview_date', 'visit']]
instreshaped = pd.merge(meta, inst, on='PIN', how='inner')
items2 = testscore.loc[testscore.Inst == inst_i].drop(
['FirstDate4PIN', 'flagged', 'gender', 'v1_interview_date', 'site',
'subject', 'raw_cat_date', 'study', 'Column1', 'Column2',
'Column3', 'Column4', 'Column5', 'Inst', 'DeviceID', 'source'], axis=1)
instreshapedfull = pd.merge(instreshaped, items2, on='PIN', how='inner')
# droppped flagged
instreshapedfull = instreshapedfull.loc[instreshapedfull.flagged.isnull() == True]
# drop parents
instreshapedfull = instreshapedfull.loc[instreshapedfull.parent.isnull() == True]
# drop if not v1 - this will results in dropping NIH Toolbox Picture Sequence Memory Test Age 8+ Form B v2.1
instreshapedfull = instreshapedfull.loc[instreshapedfull.visit == 'V1']
instreshapedfull = instreshapedfull.drop(columns={'App Version', 'iPad Version', 'Firmware Version','DateFinished',
'InstrumentBreakoff','InstrumentStatus2','site','study','v1_interview_date','visit'})
instreshapedfull = instreshapedfull.dropna(how='all', axis=1)
filecsv = box_temp + '/ndaformat/' + inst_i + '.csv'
dictcsv = box_temp + '/ndaformat/' + 'Dictionary' + inst_i + '.csv'
instreshapedfull.to_csv(filecsv, index=False)
makedatadict(filecsv, dictcsv,inst_i)
else:
print('Skipping ' + inst_i + '...')
########################################################################
def sendthroughcrosswalk(pathout,instreshapedfull,inst_i,crosswalk,studystr='HCPD'):
# replace special charaters in column names
instreshapedfull.columns = instreshapedfull.columns.str.replace(' ', '_').str.replace('-', '_').str.replace('(','_').str.replace(')', '_')
crosswalk_subset = crosswalk.loc[crosswalk['Inst'] == inst_i]
crosswalk_subset.reset_index(inplace=True)
# crosswalk_subset.loc[crosswalk_subset['hcp_variable_upload'].isnull()==False,'hcp_variable']
cwlist = list(crosswalk_subset['hcp_variable'])
before = len(cwlist)
cwlist = list(set(cwlist) & set(
instreshapedfull.columns)) # drop the handful of vars in larger instruments that got mapped but that we dont have
after = len(cwlist)
if before != after:
print("WARNING!!! " + inst_i + ": Crosswalk expects " + str(before) + " elements, but only found " + str(after))
studydata = instreshapedfull[ndarlist + cwlist].copy()
# execute any specialty codes
for index, row in crosswalk_subset.iterrows():
if pd.isna(row['requested_python']):
pass
else:
exec(row['requested_python'])
uploadlist = list(crosswalk_subset['hcp_variable_upload'])
uploadlist = list(set(uploadlist) & set(studydata.columns))
data2struct(pathout,dout=studydata[ndarlist + uploadlist], crosssub=crosswalk_subset, study=studystr)
def data2struct(pathout,dout,crosssub,study='HCPD'):
strucroot=crosssub['nda_structure'].str.strip().str[:-2][0]
strucnum=crosssub['nda_structure'].str.strip().str[-2:][0]
instshort=crosssub['inst_short'].str.strip()[0]
filePath=os.path.join(pathout,study+'_'+instshort+'_'+strucroot+strucnum+'_'+snapshotdate+'.csv')
if os.path.exists(filePath):
os.remove(filePath)
else:
pass
#print("Can not delete the file as it doesn't exists")
with open(filePath,'a') as f:
f.write(strucroot+","+str(int(strucnum))+"\n")
dout.to_csv(f,index=False)
def getredcapfieldsjson(fieldlist, study='hcpdparent '): # , token=token[0],field=field[0],event=event[0]):
"""
Downloads requested fields from Redcap databases specified by details in redcapconfig file
Returns panda dataframe with fields 'study', 'Subject_ID, 'subject', and 'flagged', where 'Subject_ID' is the
patient id in the database of interest (sometimes called subject_id, parent_id) as well as requested fields.
subject is this same id stripped of underscores or flags like 'excluded' to make it easier to merge
flagged contains the extra characters other than the id so you can keep track of who should NOT be uploaded to NDA
or elsewwhere shared
"""
auth = pd.read_csv(redcapconfigfile)
studydata = pd.DataFrame()
fieldlistlabel = ['fields[' + str(i) + ']' for i in range(5, len(fieldlist) + 5)]
fieldrow = dict(zip(fieldlistlabel, fieldlist))
d1 = {'token': auth.loc[auth.study == study, 'token'].values[0], 'content': 'record', 'format': 'json', 'type': 'flat',
'fields[0]': auth.loc[auth.study == study, 'field'].values[0],
'fields[1]': auth.loc[auth.study == study, 'interview_date'].values[0],
'fields[2]': auth.loc[auth.study == study, 'sexatbirth'].values[0],
'fields[3]': auth.loc[auth.study == study, 'sitenum'].values[0],
'fields[4]': auth.loc[auth.study == study, 'dobvar'].values[0]}
d2 = fieldrow
d3 = {'events[0]': auth.loc[auth.study == study, 'event'].values[0], 'rawOrLabel': 'raw', 'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false', 'exportDataAccessGroups': 'false', 'returnFormat': 'json'}
data = {**d1, **d2, **d3}
buf = BytesIO()
ch = pycurl.Curl()
ch.setopt(ch.URL, 'https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/')
ch.setopt(ch.HTTPPOST, list(data.items()))
ch.setopt(ch.WRITEDATA, buf)
ch.perform()
ch.close()
htmlString = buf.getvalue().decode('UTF-8')
buf.close()
d = json.loads(htmlString)
#parent_ids = pd.DataFrame(htmlString.splitlines(), columns=['row'])
#header = parent_ids.iloc[0]
#headerv2 = header.str.replace(auth.loc[auth.study == study, 'interview_date'].values[0], 'interview_date')
#headerv3 = headerv2.str.split(',')
#parent_ids.drop([0], inplace=True)
#pexpanded = pd.DataFrame(parent_ids.row.str.split(pat='\t').values.tolist(), columns=headerv3.values.tolist()[0])
pexpanded=pd.DataFrame(d)
pexpanded = pexpanded.loc[~(pexpanded[auth.loc[auth.study == study, 'field'].values[0]] == '')] ##
new = pexpanded[auth.loc[auth.study == study, 'field'].values[0]].str.split("_", 1, expand=True)
pexpanded['subject'] = new[0].str.strip()
pexpanded['flagged'] = new[1].str.strip()
pexpanded['study'] = study # auth.study[i]
studydata = pd.concat([studydata, pexpanded], axis=0, sort=True)
studydata=studydata.rename(columns={auth.loc[auth.study == study, 'interview_date'].values[0]:'interview_date'})
# Convert age in years to age in months
# note that dob is hardcoded var name here because all redcap databases use same variable name...sue me
# interview date, which was originally v1_date for hcpd, has been renamed in line above, headerv2
try:
studydata['nb_months'] = (
12 * (pd.to_datetime(studydata['interview_date']).dt.year - pd.to_datetime(studydata.dob).dt.year) +
(pd.to_datetime(studydata['interview_date']).dt.month - pd.to_datetime(studydata.dob).dt.month) +
(pd.to_datetime(studydata['interview_date']).dt.day - pd.to_datetime(studydata.dob).dt.day) / 31)
studydatasub=studydata.loc[studydata.nb_months.isnull()].copy()
studydatasuper = studydata.loc[~(studydata.nb_months.isnull())].copy()
studydatasuper['nb_months'] = studydatasuper['nb_months'].apply(np.floor).astype(int)
studydatasuper['nb_monthsPHI'] = studydatasuper['nb_months']
studydatasuper.loc[studydatasuper.nb_months > 1080, 'nb_monthsPHI'] = 1200
studydata=pd.concat([studydatasub,studydatasuper],sort=True)
studydata = studydata.drop(columns={'nb_months'}).rename(columns={'nb_monthsPHI': 'interview_age'})
except:
pass
#convert gender to M/F string
try:
studydata.gender = studydata.gender.str.replace('1', 'M')
studydata.gender = studydata.gender.str.replace('2', 'F')
except:
print(study+' has no variable named gender')
return studydata
#def getndarequiredfields(subject,dframe,guidfile=hcalist,studystr='hcpa',structure):
# """
# get the pseudo_guid, age, sex, interview_date...merge with dataframe, and prepend structure title
# :return:
# """
# redcapvars = box.getredcapfields([], study=studystr)
# redcapvars.loc[redcapvars.gender.astype(int)==1,'nda_gender']='M'
# redcapvars.loc[redcapvars.gender.astype(int)==2,'nda_gender']='F'
# redcapvars.loc[:,'nda_interview_date']=pd.to_datetime(redcapvars['interview_date']) -pd.offsets.QuarterBegin(startingMonth=1)
# redcapvars=redcapvars.rename(columns={'interview_age':'nda_interview_age'})
# redcapvars=redcapvars.loc[redcapvars.flagged.isnull()==True] #these already subset to v1 by redcap event pull
# hcaids=pd.read_csv(guidfile,header=0)
# hcaidsupdate=pd.merge(hcaids[['Subject','nda_guid']],redcapvars,left_on='Subject',right_on='subject',how='left')
# ndadf=pd.merge(hcaidsupdate,dframe,lefton='Subject',righton='subject',how='inner')
def Box2dataframe(curated_fileid_start,cache_space):#,study,site,datatype,boxsnapshotfolderid,boxsnapshotQCfolderid):
#get current best curated data from BOX and read into pandas dataframe for QC
raw_fileid=curated_fileid_start
rawobject=box.download_file(raw_fileid)
data_path = os.path.join(cache_space, rawobject.get().name)
raw=pd.read_csv(data_path,header=0,low_memory=False, encoding = 'ISO-8859-1')
#raw['DateCreatedDatetime']=pd.to_datetime(raw.DateCreated).dt.round('min')
#raw['InstStartedDatetime']=pd.to_datetime(raw.InstStarted).dt.round('min')
#raw['InstEndedDatetime']=pd.to_datetime(raw.InstEnded).dt.round('min')
return raw
def makedatadict(filecsv,dictcsv,inst):
"""
create datadictionary from csvfile
"""
ksadsraw=pd.read_csv(filecsv,header=0,low_memory=False)
varvalues=pd.DataFrame(columns=['variable','values_or_example','numunique'])
varvalues['variable']=ksadsraw.columns
kcounts=ksadsraw.count().reset_index().rename(columns={'index':'variable',0:'num_nonmissing'})
varvalues=pd.merge(varvalues,kcounts,on='variable',how='inner')
summarystats = ksadsraw.describe().transpose().reset_index().rename(columns={'index': 'variable'})
varvalues=pd.merge(varvalues,summarystats,on='variable',how='left')
varvalues['min']=varvalues['min'].fillna(-99)
varvalues['max']=varvalues['max'].fillna(-99)
varvalues['ValueRange']=varvalues['min'].astype(int).astype(str) + ' :: ' + varvalues['max'].astype(int).astype(str)
varvalues['min']=varvalues['min'].replace(-99.0,np.nan)
varvalues['max'] = varvalues['max'].replace(-99.0, np.nan)
varvalues.loc[varvalues.ValueRange.str.contains('-99')==True,'ValueRange']=' '
#create a data frame containing summary info of data in the ksadraw, e.g. variablse, their formats, values, ect.
for var in ksadsraw.columns:
row=ksadsraw.groupby(var).count().reset_index()[var]
varvalues.loc[varvalues.variable==var,'numunique']=len(row) #number of unique vars in this column
varvalues.loc[(varvalues.variable==var) & (varvalues.numunique<=10) &
(varvalues.num_nonmissing>=10),'values_or_example']=''.join(str(ksadsraw[var].unique().tolist()))
varvalues.loc[(varvalues.variable==var) & (varvalues.numunique<=10) &
(varvalues.num_nonmissing<10),'values_or_example']=ksadsraw[var].unique().tolist()[0]
varvalues.loc[(varvalues.variable==var) & (varvalues.numunique>10),'values_or_example']=ksadsraw[var].unique().tolist()[0]
varvalues['Instrument Title']=inst
varvalues.to_csv(dictcsv,index=False)
def redcap2structure(vars,crosswalk,pathstructuresout=box_temp,studystr='hcpa',dframe=None):
"""
Takes list of vars from the crosswalk, gets the data from Redcap, and puts into structure format after
merging with NDAR requiredvars. Outputs a csv structure in NDA format to pathstructureout location
"""
#varslim=[x for x in fieldlist if str(x) != 'nan']
#varnan=[x for x in fieldlist if str(x) == 'nan']
if dframe is not None:
studydata=dframe
else:
studydata=getredcapfieldsjson(fieldlist=vars,study=studystr)
#get the relevant rows of the crosswalk
#inner merge works for redcap source..need right merge for box, though, to get extra vars for missing people
crosswalk_subset=pd.merge(crosswalk,pd.DataFrame(vars,columns=['hcp_variable']),on='hcp_variable',how='inner')[['nda_structure', 'nda_element', 'hcp_variable', 'HCP-D Source',
'action_request',
'hcp_variable_upload',
'requested_python']]
#execute transformation codes stored in the crosswalk
for index,row in crosswalk_subset.iterrows():
if pd.isna(row['requested_python']):
pass
else:
exec(row['requested_python'])
#remove fields with empty values for hcp_variable_upload -- these are empty because NDA doesnt want them
crosswalk_subset=crosswalk_subset.loc[crosswalk_subset['hcp_variable_upload'].isnull()==False]
listout=['subject','flagged','interview_date','interview_age','gender']+list(crosswalk_subset['hcp_variable_upload'])
#output new variables and subset to those not flagged for withdrawal.
transformed=studydata[listout].loc[studydata.flagged.isnull()==True].drop(columns={'flagged','interview_date','gender','interview_age'})
#merge with required fields from vars in intradb staging (guid, etc)
#not sure whether it makes sense to pull these in here or recalculate on fly from redcap.
#future issues: compare this approach (e.g. pull from the file above named 'ndar') vs. what happens in the applycrosswalk.py
#program for HCD, which regenerates on fly...will require some recodeing below to pull from redcap...
#might just be easier to pull once...but how will this affect visit numbers?
ndarsub=ndar[['nda_guid','subjectped','nda_gender','nda_interview_age','nda_interview_date']].rename(
columns={'nda_guid':'subjectkey','subjectped':'src_subject_id','nda_gender':'gender',
'nda_interview_date':'interview_date','nda_interview_age':'interview_age'}).copy()
dout=pd.merge(ndarsub,transformed,how='left',left_on='src_subject_id',right_on='subject').drop(columns='subject')
dout['interview_date'] = pd.to_datetime(dout['interview_date']).dt.strftime('%m/%d/%Y')
#now export
crosswalk_subset.reset_index(inplace=True)
strucroot=crosswalk_subset['nda_structure'].str.strip().str[:-2][0]
strucnum=crosswalk_subset['nda_structure'].str.strip().str[-2:][0]
#finalsubset - i.e. no withdraws
#subjectkey src_subject_id interview_age interview_date gender
filePath=os.path.join(pathstructuresout,'HCPD_'+strucroot+strucnum+'_'+snapshotdate+'.csv')
if os.path.exists(filePath):
os.remove(filePath)
else:
pass
#print("Can not delete the file as it doesn't exists")
with open(filePath,'a') as f:
f.write(strucroot+","+str(int(strucnum))+"\n")
dout.to_csv(f,index=False)
|
#########################################################
#
# DO NOT EDIT THIS FILE. IT IS GENERATED AUTOMATICALLY. #
# PLEASE LOOK INTO THE README FOR MORE INFORMATION. #
#
#########################################################
# coding: utf-8
# # Control Ops Tutorial
#
# In this tutorial we show how to use control flow operators in Caffe2 and give some details about their underlying implementations.
# ### Conditional Execution Using NetBuilder
#
# Let's start with conditional operator. We will demonstrate how to use it in two Caffe2 APIs used for building nets: `NetBuilder` and `brew`.
# In[1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
from caffe2.python.core import Plan, to_execution_step, Net
from caffe2.python.net_builder import ops, NetBuilder
# In the first example, we define several blobs and then use the 'If' operator to set the value of one of them conditionally depending on values of other blobs.
#
# The pseudocode for the conditional examples we will implement is as follows:
#
# if (x > 0):
# y = 1
# else:
# y = 0
# In[2]:
with NetBuilder() as nb:
# Define our constants
ops.Const(0.0, blob_out="zero")
ops.Const(1.0, blob_out="one")
ops.Const(0.5, blob_out="x")
ops.Const(0.0, blob_out="y")
# Define our conditional sequence
with ops.IfNet(ops.GT(["x", "zero"])):
ops.Copy("one", "y")
with ops.Else():
ops.Copy("zero", "y")
# Note the usage of `NetBuilder`'s `ops.IfNet` and `ops.Else` calls: `ops.IfNet` accepts a blob reference or blob name as an input, it expects an input blob to have a scalar value convertible to bool. Note that the optional `ops.Else` is at the same level as `ops.IfNet` and immediately follows the corresponding `ops.IfNet`. Let's execute the resulting net (execution step) and check the values of the blobs.
#
# Note that since x = 0.5, which is indeed greater than 0, we should expect y = 1 after execution.
# In[3]:
# Initialize a Plan
plan = Plan('if_net_test')
# Add the NetBuilder definition above to the Plan
plan.AddStep(to_execution_step(nb))
# Initialize workspace for blobs
ws = workspace.C.Workspace()
# Run the Plan
ws.run(plan)
# Fetch some blobs and print
print('x = ', ws.blobs["x"].fetch())
print('y = ', ws.blobs["y"].fetch())
# Before going further, it's important to understand the semantics of execution blocks ('then' and 'else' branches in the example above), i.e. handling of reads and writes into global (defined outside of the block) and local (defined inside the block) blobs.
#
# `NetBuilder` uses the following set of rules:
#
# - In `NetBuilder`'s syntax, a blob's declaration and definition occur at the same time - we define an operator which writes its output into a blob with a given name.
#
# - `NetBuilder` keeps track of all operators seen before the current execution point in the same block and up the stack in parent blocks.
#
# - If an operator writes into a previously unseen blob, it creates a **local** blob that is visible only within the current block and the subsequent children blocks. Local blobs created in a given block are effectively deleted when we exit the block. Any write into previously defined (in the same block or in the parent blocks) blob updates an originally created blob and does not result in the redefinition of a blob.
#
# - An operator's input blobs have to be defined earlier in the same block or in the stack of parent blocks.
#
#
# As a result, in order to see the values computed by a block after its execution, the blobs of interest have to be defined outside of the block. This rule effectively forces visible blobs to always be correctly initialized.
#
# To illustrate concepts of block semantics and provide a more sophisticated example, let's consider the following net:
# In[4]:
with NetBuilder() as nb:
# Define our constants
ops.Const(0.0, blob_out="zero")
ops.Const(1.0, blob_out="one")
ops.Const(2.0, blob_out="two")
ops.Const(1.5, blob_out="x")
ops.Const(0.0, blob_out="y")
# Define our conditional sequence
with ops.IfNet(ops.GT(["x", "zero"])):
ops.Copy("x", "local_blob") # create local_blob using Copy -- this is not visible outside of this block
with ops.IfNet(ops.LE(["local_blob", "one"])):
ops.Copy("one", "y")
with ops.Else():
ops.Copy("two", "y")
with ops.Else():
ops.Copy("zero", "y")
# Note that using local_blob would fail here because it is outside of the block in
# which it was created
# When we execute this, we expect that y == 2.0, and that `local_blob` will not exist in the workspace.
# In[5]:
# Initialize a Plan
plan = Plan('if_net_test_2')
# Add the NetBuilder definition above to the Plan
plan.AddStep(to_execution_step(nb))
# Initialize workspace for blobs
ws = workspace.C.Workspace()
# Run the Plan
ws.run(plan)
# Fetch some blobs and print
print('x = ', ws.blobs["x"].fetch())
print('y = ', ws.blobs["y"].fetch())
# Assert that the local_blob does not exist in the workspace
# It should have been destroyed because of its locality
assert "local_blob" not in ws.blobs
# ### Conditional Execution Using Brew Module
#
# Brew is another Caffe2 interface used to construct nets. Unlike `NetBuilder`, `brew` does not track the hierarchy of blocks and, as a result, we need to specify which blobs are considered local and which blobs are considered global when passing 'then' and 'else' models to an API call.
#
# Let's start by importing the necessary items for the `brew` API.
# In[6]:
from caffe2.python import brew
from caffe2.python.workspace import FeedBlob, RunNetOnce, FetchBlob
from caffe2.python.model_helper import ModelHelper
# We will use the Caffe2's `ModelHelper` class to define and represent our models, as well as contain the parameter information about the models. Note that a `ModelHelper` object has two underlying nets:
#
# (1) param_init_net: Responsible for parameter initialization
# (2) net: Contains the main network definition, i.e. the graph of operators that the data flows through
#
# Note that `ModelHelper` is similar to `NetBuilder` in that we define the operator graph first, and actually run later. With that said, let's define some models to act as conditional elements, and use the `brew` module to form the conditional statement that we want to run. We will construct the same statement used in the first example above.
# In[7]:
# Initialize model, which will represent our main conditional model for this test
model = ModelHelper(name="test_if_model")
# Add variables and constants to our conditional model; notice how we add them to the param_init_net
model.param_init_net.ConstantFill([], ["zero"], shape=[1], value=0.0)
model.param_init_net.ConstantFill([], ["one"], shape=[1], value=1.0)
model.param_init_net.ConstantFill([], ["x"], shape=[1], value=0.5)
model.param_init_net.ConstantFill([], ["y"], shape=[1], value=0.0)
# Add Greater Than (GT) conditional operator to our model
# which checks if "x" > "zero", and outputs the result in the "cond" blob
model.param_init_net.GT(["x", "zero"], "cond")
# Initialize a then_model, and add an operator which we will set to be
# executed if the conditional model returns True
then_model = ModelHelper(name="then_test_model")
then_model.net.Copy("one", "y")
# Initialize an else_model, and add an operator which we will set to be
# executed if the conditional model returns False
else_model = ModelHelper(name="else_test_model")
else_model.net.Copy("zero", "y")
# Use the brew module's handy cond operator to facilitate the construction of the operator graph
brew.cond(
model=model, # main conditional model
cond_blob="cond", # blob with condition value
external_blobs=["x", "y", "zero", "one"], # data blobs used in execution of conditional
then_model=then_model, # pass then_model
else_model=else_model) # pass else_model
# Before we run the model, let's use Caffe2's graph visualization tool `net_drawer` to check if the operator graph makes sense.
# In[8]:
from caffe2.python import net_drawer
from IPython import display
graph = net_drawer.GetPydotGraph(model.net, rankdir="LR")
display.Image(graph.create_png(), width=800)
# Now let's run the net! When using `ModelHelper`, we must first run the `param_init_net` to initialize paramaters, then we execute the main `net`.
# In[9]:
# Run param_init_net once
RunNetOnce(model.param_init_net)
# Run main net (once in this case)
RunNetOnce(model.net)
# Fetch and examine some blobs
print("x = ", FetchBlob("x"))
print("y = ", FetchBlob("y"))
# ### Loops Using NetBuilder
#
# Another important control flow operator is 'While', which allows repeated execution of a fragment of net. Let's consider `NetBuilder`'s version first.
#
# The pseudocode for this example is:
#
# i = 0
# y = 0
# while (i <= 7):
# y = i + y
# i += 1
# In[10]:
with NetBuilder() as nb:
# Define our variables
ops.Const(0, blob_out="i")
ops.Const(0, blob_out="y")
# Define loop code and conditions
with ops.WhileNet():
with ops.Condition():
ops.Add(["i", ops.Const(1)], ["i"])
ops.LE(["i", ops.Const(7)])
ops.Add(["i", "y"], ["y"])
# As with the 'If' operator, standard block semantic rules apply. Note the usage of `ops.Condition` clause that should immediately follow `ops.WhileNet` and contains code that is executed before each iteration. The last operator in the condition clause is expected to have a single boolean output that determines whether the other iteration is executed.
#
# In the example above we increment the counter ("i") before each iteration and accumulate its values in "y" blob, the loop's body is executed 7 times, the resulting blob values:
# In[11]:
# Initialize a Plan
plan = Plan('while_net_test')
# Add the NetBuilder definition above to the Plan
plan.AddStep(to_execution_step(nb))
# Initialize workspace for blobs
ws = workspace.C.Workspace()
# Run the Plan
ws.run(plan)
# Fetch blobs and print
print("i = ", ws.blobs["i"].fetch())
print("y = ", ws.blobs["y"].fetch())
# ### Loops Using Brew Module
#
# Now let's take a look at how to replicate the loop above using the `ModelHelper`+`brew` interface.
# In[12]:
# Initialize model, which will represent our main conditional model for this test
model = ModelHelper(name="test_while_model")
# Add variables and constants to our model
model.param_init_net.ConstantFill([], ["i"], shape=[1], value=0)
model.param_init_net.ConstantFill([], ["one"], shape=[1], value=1)
model.param_init_net.ConstantFill([], ["seven"], shape=[1], value=7)
model.param_init_net.ConstantFill([], ["y"], shape=[1], value=0)
# Initialize a loop_model that represents the code to run inside of loop
loop_model = ModelHelper(name="loop_test_model")
loop_model.net.Add(["i", "y"], ["y"])
# Initialize cond_model that represents the conditional test that the loop
# abides by, as well as the incrementation step
cond_model = ModelHelper(name="cond_test_model")
cond_model.net.Add(["i", "one"], "i")
cond_model.net.LE(["i", "seven"], "cond")
# Use brew's loop operator to facilitate the creation of the loop's operator graph
brew.loop(
model=model, # main model that contains data
cond_blob="cond", # explicitly specifying condition blob
external_blobs=["cond", "i", "one", "seven", "y"], # data blobs used in execution of the loop
loop_model=loop_model, # pass loop_model
cond_model=cond_model # pass condition model (optional)
)
# Once again, let's visualize the net using the `net_drawer`.
# In[13]:
graph = net_drawer.GetPydotGraph(model.net, rankdir="LR")
display.Image(graph.create_png(), width=800)
# Finally, we'll run the `param_init_net` and `net` and print our final blob values.
# In[14]:
RunNetOnce(model.param_init_net)
RunNetOnce(model.net)
print("i = ", FetchBlob("i"))
print("y = ", FetchBlob("y"))
# ### Backpropagation
#
# Both 'If' and 'While' operators support backpropagation. To illustrate how backpropagation with control ops work, let's consider the following examples in which we construct the operator graph using `NetBuilder` and obtain calculate gradients using the `AddGradientOperators` function. The first example shows the following conditional statement:
#
# x = 1-D numpy float array
# y = 4
# z = 0
# if (x > 0):
# z = y^2
# else:
# z = y^3
# In[15]:
import numpy as np
# Feed blob called x, which is simply a 1-D numpy array [0.5]
FeedBlob("x", np.array(0.5, dtype='float32'))
# _use_control_ops=True forces NetBuilder to output single net as a result
# x is external for NetBuilder, so we let nb know about it through initial_scope param
with NetBuilder(_use_control_ops=True, initial_scope=["x"]) as nb:
ops.Const(0.0, blob_out="zero")
ops.Const(1.0, blob_out="one")
ops.Const(4.0, blob_out="y")
ops.Const(0.0, blob_out="z")
with ops.IfNet(ops.GT(["x", "zero"])):
ops.Pow("y", "z", exponent=2.0)
with ops.Else():
ops.Pow("y", "z", exponent=3.0)
# we should get a single net as output
assert len(nb.get()) == 1, "Expected a single net produced"
net = nb.get()[0]
# add gradient operators for 'z' blob
grad_map = net.AddGradientOperators(["z"])
# In this case
#
# $$x = 0.5$$
#
# $$z = y^2 = 4^2 = 16$$
#
# We will fetch the blob `y_grad`, which was generated by the `AddGradientOperators` call above. This blob contains the gradient of blob z with respect to y. According to basic calculus:
#
# $$y\_grad = \frac{\partial{z}}{\partial{y}}y^2 = 2y = 2(4) = 8$$
# In[16]:
# Run the net
RunNetOnce(net)
# Fetch blobs and print
print("x = ", FetchBlob("x"))
print("y = ", FetchBlob("y"))
print("z = ", FetchBlob("z"))
print("y_grad = ", FetchBlob("y_grad"))
# Now, let's change value of blob "x" to -0.5 and rerun net:
# In[17]:
# To re-run net with different input, simply feed new blob
FeedBlob("x", np.array(-0.5, dtype='float32'))
RunNetOnce(net)
print("x = ", FetchBlob("x"))
print("y = ", FetchBlob("y"))
print("z = ", FetchBlob("z"))
print("y_grad = ", FetchBlob("y_grad"))
# The next and final example illustrates backpropagation on the following loop:
#
# x = 2
# y = 3
# z = 2
# i = 0
# while (i <= 2):
# x = x^2
# if (i < 2):
# y = y^2
# else:
# z = z^3
# i += 1
# s = x + y + z
#
# Note that this code essentially computes the sum of x^4 (by squaring x twice), y^2, and z^3.
# In[18]:
with NetBuilder(_use_control_ops=True) as nb:
# Define variables and constants
ops.Copy(ops.Const(0), "i")
ops.Copy(ops.Const(1), "one")
ops.Copy(ops.Const(2), "two")
ops.Copy(ops.Const(2.0), "x")
ops.Copy(ops.Const(3.0), "y")
ops.Copy(ops.Const(2.0), "z")
# Define loop statement
# Computes x^4, y^2, z^3
with ops.WhileNet():
with ops.Condition():
ops.Add(["i", "one"], "i")
ops.LE(["i", "two"])
ops.Pow("x", "x", exponent=2.0)
with ops.IfNet(ops.LT(["i", "two"])):
ops.Pow("y", "y", exponent=2.0)
with ops.Else():
ops.Pow("z", "z", exponent=3.0)
# Sum s = x + y + z
ops.Add(["x", "y"], "x_plus_y")
ops.Add(["x_plus_y", "z"], "s")
assert len(nb.get()) == 1, "Expected a single net produced"
net = nb.get()[0]
# Add gradient operators to output blob 's'
grad_map = net.AddGradientOperators(["s"])
# In[19]:
workspace.RunNetOnce(net)
print("x = ", FetchBlob("x"))
print("x_grad = ", FetchBlob("x_grad")) # derivative: 4x^3
print("y = ", FetchBlob("y"))
print("y_grad = ", FetchBlob("y_grad")) # derivative: 2y
print("z = ", FetchBlob("z"))
print("z_grad = ", FetchBlob("z_grad")) # derivative: 3z^2
# ### Implementation Notes
#
# On the low level, Caffe2 uses the following set of operators to implement forward and backward branching and loops:
# - If - accepts *then_net* and *else_net* nets as arguments and executes one of them, depending on input condition blob value, nets are executed **in the same** workspace;
# - While - repeats execution of *loop_net* net passed as argument, net is executed in the same workspace;
# - Do - special operator that creates a separate inner workspace, sets up blob mappings between outer and inner workspaces, and runs a net in an inner workspace;
# - CreateScope/HasScope - special operators that create and keep track of workspaces used by Do operator.
#
# Higher level libraries that implement branching and looping (e.g. in `NetBuilder`, `brew`), use these operators to build control flow, e.g. for 'If':
# - do necessary sanity checks (e.g. determine which blobs are initialized and check that subnet does not read undefined blobs)
# - wrap 'then' and 'else' branches into Do
# - setup correct blob mappings by specifying which local names are mapped to outer blobs
# - prepare scope structure, used by Do operator
#
# While 'If' and 'While' Caffe2 ops can be used directly without creating local block workspaces, we encourage users to use higher level Caffe2 interfaces that provide necessary correctness guarantees.
#
# Backpropagation for 'While' in general is expensive memory-wise - we have to save local workspace for every iteration of a block, including global blobs visible to the block. It is recommended that users use `RecurrentNetwork` operator instead in production environments.
|
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import os
# "javascript" section for javascript. see @app.route('/config.js') in app/views.py
# NOTE: all following key/secrets for test purpose.
ENDPOINT_WEB = "http://localhost" # host name of the UI site
ENDPOINT_HACKATHON_API = "http://" + os.environ["HACKATHON_SERVER"] + ":" + os.environ["HACKATHON_SERVER_PORT"]
GITHUB_CLIENT_ID = "b44f3d47bdeb26b9c4e6"
QQ_CLIENT_ID = "101200890"
QQ_OAUTH_STATE = "openhackathon" # todo state should be constant. Actually it should be unguessable to prevent CSFA
WECHAT_APP_ID = "wxe75b8aef71c2059f"
WECHAT_OAUTH_STATE = "openhackathon" # NOTE: may be should be same as QQ_OAUTH_STATE?
WEIBO_CLIENT_ID = "479757037"
LIVE_CLIENT_ID = "000000004414E0A6"
Config = {
"environment": "local",
"app": {
"secret_key": "secret_key"
},
"login": {
"provider_enabled": ["github", "wechat"],
"session_valid_time_minutes": 60
},
"endpoint": {
"hackathon_web": ENDPOINT_WEB,
"hackathon_api": ENDPOINT_HACKATHON_API
},
"javascript": {
"github": {
"authorize_url": "https://github.com/login/oauth/authorize?client_id=%s&redirect_uri=%s/github&scope=user" % (
GITHUB_CLIENT_ID, ENDPOINT_WEB)
},
"weibo": {
"authorize_url": "https://api.weibo.com/oauth2/authorize?client_id=%s&redirect_uri=%s/weibo&scope=all" % (
WEIBO_CLIENT_ID, ENDPOINT_WEB)
},
"qq": {
"authorize_url": "https://graph.qq.com/oauth2.0/authorize?client_id=%s&redirect_uri=%s/qq&scope=get_user_info&state=%s&response_type=code" % (
QQ_CLIENT_ID, ENDPOINT_WEB, QQ_OAUTH_STATE)
},
"wechat": {
"authorize_url": "https://open.weixin.qq.com/connect/qrconnect?appid=%s&redirect_uri=%s/wechat&response_type=code&scope=snsapi_login&state=%s#wechat_redirect" % (
WECHAT_APP_ID, ENDPOINT_WEB, WECHAT_OAUTH_STATE)
},
"wechat_mobile": {
"authorize_url": "https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s/wechat&response_type=code&scope=snsapi_base&state=%s#wechat_redirect" % (
WECHAT_APP_ID, ENDPOINT_WEB, WECHAT_OAUTH_STATE)
},
"live": {
"authorize_url": "https://login.live.com/oauth20_authorize.srf?client_id=%s&scope=wl.basic+,wl.emails&response_type=code&redirect_uri=%s/live" % (
LIVE_CLIENT_ID, ENDPOINT_WEB)
},
"hackathon": {
"endpoint": ENDPOINT_HACKATHON_API
},
"apiconfig": {
"proxy": ENDPOINT_HACKATHON_API,
"api": {
"admin": {
"hackathon": {
"": ["get", "post", "put", "delete"],
"checkname": ["get"],
"list": ["get"],
"online": ["post"],
"applyonline": ["post"],
"offline": ["post"],
"tags": ["get", "post", "put", "delete"],
"config": ["get", "post", "put", "delete"],
"administrator": {
"": ["put", "post", "delete"],
"list": ["get"]
},
"template": {
"": ["post", "delete"],
"list": ["get"],
"check": ["get"]
},
"organizer": {
"": ["get", "post", "put", "delete"]
},
"award": {
"": ["get", "post", "put", "delete"],
"list": ["get"]
},
"notice": {
"": ["get", "post", "put", "delete"]
}
},
"registration": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
},
"experiment": {
"list": ["get"],
"": ["post", "put", "delete"]
},
"team": {
"list": ["get"],
"score": {
"list": ["get"]
},
"award": ["get", "post", "delete"]
},
"user": {
"list": ["get"]
},
"hostserver": {
"": ["get", "post", "delete", "put"],
"list": ["get"]
}
},
"template": {
"": ["get", "post", "delete", "put"],
"file": ["post"],
"list": ["get"],
"check": ["get"]
},
"user": {
"": ["get"],
"login": ["post", "delete"],
"experiment": {
"": ["get", "post", "delete", "put"]
},
"registration": {
"": ["put", "post", "get"],
"checkemail": ["get"],
"list": ["get"]
},
"profile": {
"": ["post", "put"]
},
"picture": {
"": ["put"]
},
"team": {
"member": ["get"]
},
"hackathon": {
"like": ["get", "post", "delete"]
},
"notice": {
"read": ["put"]
},
"show": {
"list": ["get"]
},
"file": {
"": ["post"]
}
},
"hackathon": {
"": ["get"],
"list": ["get"],
"stat": ["get"],
"template": ["get"],
"team": {
"list": ["get"]
},
"registration": {
"list": ["get"]
},
"show": {
"list": ["get"]
},
"grantedawards": ["get"],
"notice": {
"list": ["get"]
}
},
"team": {
"": ["get", "post", "put", "delete"],
"score": ["get", "post", "put"],
"member": {
"": ["post", "put", "delete"],
"list": ["get"]
},
"show": ["get", "post", "delete"],
"template": ["post", "delete"],
"email": ["put"]
},
"talent": {
"list": ["get"]
},
"grantedawards": ["get"]
}
}
}
}
|
import argparse
import logging
import os
import random
import socket
import sys
import traceback
import numpy as np
import psutil
import setproctitle
import torch
import wandb
from mpi4py import MPI
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
# add the FedML root directory to the python path
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "./../../../")))
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "")))
from fedml_api.distributed.utils.gpu_mapping import mapping_processes_to_gpu_device_from_yaml_file
from fedml_api.data_preprocessing.MNIST.data_loader import load_partition_data_mnist
from fedml_api.model.cv.mnistgan import MNISTGan
from fedml_api.distributed.fedgan.FedGanAPI import FedML_init, FedML_FedGan_distributed
from fedml_api.distributed.fedgan.MyModelTrainer import MyModelTrainer
def add_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
# Training settings
parser.add_argument('--model', type=str, default='mnist_gan', metavar='N',
help='neural network used in training')
parser.add_argument('--dataset', type=str, default='mnist', metavar='N',
help='dataset used for training')
parser.add_argument('--data_dir', type=str, default='./../../../data/mnist',
help='data directory')
parser.add_argument('--partition_method', type=str, default='hetero', metavar='N',
help='how to partition the dataset on local workers')
parser.add_argument('--partition_alpha', type=float, default=0.5, metavar='PA',
help='partition alpha (default: 0.5)')
parser.add_argument('--client_num_in_total', type=int, default=3, metavar='NN',
help='number of workers in a distributed cluster')
parser.add_argument('--client_num_per_round', type=int, default=3, metavar='NN',
help='number of workers')
parser.add_argument('--batch_size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--client_optimizer', type=str, default='adam',
help='SGD with momentum; adam')
parser.add_argument('--backend', type=str, default="MPI",
help='Backend for Server and Client')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.001)')
parser.add_argument('--wd', help='weight decay parameter;', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=5, metavar='EP',
help='how many epochs will be trained locally')
parser.add_argument('--comm_round', type=int, default=10,
help='how many round of communications we shoud use')
parser.add_argument('--is_mobile', type=int, default=0,
help='whether the program is running on the FedML-Mobile server side')
parser.add_argument('--frequency_of_the_test', type=int, default=1,
help='the frequency of the algorithms')
parser.add_argument('--gpu_server_num', type=int, default=1,
help='gpu_server_num')
parser.add_argument('--gpu_num_per_server', type=int, default=4,
help='gpu_num_per_server')
parser.add_argument('--gpu_mapping_file', type=str, default="gpu_mapping.yaml",
help='the gpu utilization file for servers and clients. If there is no \
gpu_util_file, gpu will not be used.')
parser.add_argument('--gpu_mapping_key', type=str, default="mapping_default",
help='the key in gpu utilization file')
parser.add_argument('--grpc_ipconfig_path', type=str, default="grpc_ipconfig.csv",
help='config table containing ipv4 address of grpc server')
parser.add_argument('--ci', type=int, default=0,
help='CI')
args = parser.parse_args()
return args
def load_data(args, dataset_name):
if dataset_name == "mnist":
logging.info("load_data. dataset_name = %s" % dataset_name)
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=[0.5],
std=[0.5])]
)
mnist = datasets.MNIST(root='./data/', train=True, transform=transform, download=True)
data_loader = DataLoader(dataset=mnist, batch_size=args.batch_size, shuffle=True, drop_last=True)
# client_num, train_data_num, test_data_num, train_data_global, test_data_global, \
# train_data_local_num_dict, train_data_local_dict, test_data_local_dict, \
# class_num = load_partition_data_mnist(args.batch_size)
"""
For shallow NN or linear models,
we uniformly sample a fraction of clients each round (as the original FedAvg paper)
"""
test_data_num = 0
train_data_local_dict = dict()
test_data_local_dict = dict()
train_data_local_num_dict = dict()
train_data_global = list()
test_data_global = list()
class_num = 10
for i in range(args.client_num_in_total):
train_data_local_num_dict[i] = len(data_loader) * args.batch_size
train_data_local_dict[i] = data_loader
train_data_num = len(data_loader)
dataset = [train_data_num, test_data_num, train_data_global, test_data_global,
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num]
return dataset
def create_model():
model = MNISTGan()
return model
if __name__ == "__main__":
# quick fix for issue in MacOS environment: https://github.com/openai/spinningup/issues/16
# if sys.platform == 'darwin':
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
# initialize distributed computing (MPI)
comm, process_id, worker_number = FedML_init()
# parse python script input parameters
parser = argparse.ArgumentParser()
args = add_args(parser)
logging.info(args)
# customize the process name
str_process_name = "FedGan (distributed):" + str(process_id)
setproctitle.setproctitle(str_process_name)
# customize the log format
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO,
# logging.basicConfig(level=logging.DEBUG,
format=str(
process_id) + ' - %(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
hostname = socket.gethostname()
logging.info("#############process ID = " + str(process_id) +
", host name = " + hostname + "########" +
", process ID = " + str(os.getpid()) +
", process Name = " + str(psutil.Process(os.getpid())))
# Set the random seed. The np.random seed determines the dataset partition.
# The torch_manual_seed determines the initial weight.
# We fix these two, so that we can reproduce the result.
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
# Please check "GPU_MAPPING.md" to see how to define the topology
logging.info("process_id = %d, size = %d" % (process_id, worker_number))
device = mapping_processes_to_gpu_device_from_yaml_file(process_id, worker_number, None,
args.gpu_mapping_key)
# load data
dataset = load_data(args, args.dataset_name)
[train_data_num, test_data_num, train_data_global, test_data_global,
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, class_num] = dataset
# create model.
# Note if the model is DNN (e.g., ResNet), the training will be very slow.
# In this case, please use our FedML distributed version (./fedml_experiments/distributed_fedavg)
model = create_model()
netd = model.get_netd()
netg = model.get_netg()
trainer = MyModelTrainer(netd, netg)
# try:
# start "federated averaging (FedAvg)"
FedML_FedGan_distributed(process_id, worker_number, device, comm,
model, train_data_num, train_data_global, test_data_global,
train_data_local_num_dict, train_data_local_dict, test_data_local_dict, args, trainer)
# except Exception as e:
# print(e)
# logging.info('traceback.format_exc():\n%s' % traceback.format_exc())
# MPI.COMM_WORLD.Abort() |
import random
print('''
Porta da Fortuna
Existe um super prêmio atrás de um destas 3 portas!
Advinhe qual é a porta certa para ganhar o prêmio!
_______ _______ _______
| | | | | |
| [1] | | [2] | | [3] |
| | | | | |
| | | | | |
------- ------- -------
'''
)
allowed_doors = [1, 2, 3]
# Lista de prêmios
list_of_prizes = ['Apple MacBook Air', 'Notebook Gamer Acer Nitro 5',
'Monitor Dell Alienware', 'Xbox One X', 'iPhone 12 Pro Apple',
'Headphone Sony', 'Echo Dot (4ª Geração) - ALEXA', 'TECLADO GAMER MECÂNICO ALIENWARE LOW PROFILE RGB AW510K',
'Chevrolet Onix Plus 0 Km', 'PC Gamer Fácil Intel Core i5 9400F (Nona Geração)']
# A variável responsável por iniciar uma nova partida se retornada true
try_again = 'SIM'
while (try_again == 'SIM'):
# A 'vida', ou seja, o total de chances do jogador
life = 2
# Estou na mesma partida, portanto a porta premiada e o prêmio escolhido permanecem o mesmo até o final da partida.
# Número aleatório entre o intervalo de 1 e 3 para ser a porta premiada
award_winning_door = random.randint(1, 3)
# Um prêmio aleatório da lista list_of_prizes
prize_drawn = random.choice(list_of_prizes)
while (life > 0):
chosen_door = int(input('Escolha uma porta entre 1, 2 e 3: '))
# Se a porta escolhida NÃO for encontrada dentro da lista allowed_doors (portas permitidas)
if (chosen_door not in allowed_doors):
print('Essa porta não existe!')
# Se a porta escolhida for igual a porta premiada
elif (chosen_door == award_winning_door):
print('''
______ __ __ __ _
/ ____/___ ____ ____ __________ _/ /___ __/ /___ _/ /_(_)___ ____ _____
/ / / __ \/ __ \/ __ `/ ___/ __ `/ __/ / / / / __ `/ __/ / __ \/ __ \/ ___/
/ /___/ /_/ / / / / /_/ / / / /_/ / /_/ /_/ / / /_/ / /_/ / /_/ / / / (__ )
\____/\____/_/ /_/\__, /_/ \__,_/\__/\__,_/_/\__,_/\__/_/\____/_/ /_/____/
''')
print('Você acertou a porta premiada!!!')
print('E ganhou um', prize_drawn)
break
# Se a porta escolhida for encontrada dentro da lista de allowed_doors (portas permitidas)
# mas não for igual a porta premiada
else:
life -= 1
if (life == 1):
print('Porta errada! Você tem mais uma chance')
else:
print('GAME OVER')
try_again = str(input('Iniciar nova partida? Sim ou Não?: ')).upper()
|
"""``cubi-tk archive prepare``: Prepare a project for archival"""
import errno
import os
import re
import shutil
import sys
import tempfile
from cookiecutter.main import cookiecutter
from logzero import logger
from ..common import execute_shell_commands
from ..isa_tpl import IsaTabTemplate
from ..isa_tpl import load_variables
_BASE_DIR = os.path.dirname(__file__)
TEMPLATE = IsaTabTemplate(
name="archive",
path=os.path.join(os.path.dirname(_BASE_DIR), "isa_tpl", "archive"),
description="Prepare project for archival",
configuration=load_variables("archive"),
)
DU = re.compile("^ *([0-9]+)[ \t]+[^ \t]+.*$")
DATE = re.compile("^(20[0-9][0-9]-[01][0-9]-[0-3][0-9])[_-].+$")
MAIL = (
"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
'|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]'
'|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")'
"@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
"|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:"
"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
"|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)"
"\\])"
)
PATTERNS = {
"project_name": re.compile("^ *- *Project name: *.+$"),
"date": re.compile("^ *- *Start date: *20[0-9]{2}-[01][0-9]-[0-3][0-9].*$"),
"status": re.compile("^ *- *Current status: *(Active|Inactive|Finished|Archived) *$"),
"PI": re.compile("^ *- P.I.: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"client": re.compile("^ *- *Client contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"archiver": re.compile("^ *- *CUBI contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"CUBI": re.compile("^ *- *CUBI project leader: ([A-z '-]+) *$"),
}
COMMANDS = {
"size": ["du", "--bytes", "--max-depth=0"],
"inodes": ["du", "--inodes", "--max-depth=0"],
"size_follow": ["du", "--dereference", "--bytes", "--max-depth=0"],
"inodes_follow": ["du", "--dereference", "--inodes", "--max-depth=0"],
}
MSG = "**Contents of original `README.md` file**"
def _extra_context_from_config(config=None):
extra_context = {}
if config:
for name in TEMPLATE.configuration:
if getattr(config, "var_%s" % name, None) is not None:
extra_context[name] = getattr(config, "var_%s" % name)
return extra_context
def _get_snakemake_nb(project_dir):
cmds = [
[
"find",
project_dir,
"-type",
"d",
"-name",
".snakemake",
"-exec",
"du",
"--inodes",
"--max-depth=0",
"{}",
";",
],
["cut", "-f", "1"],
["paste", "-sd+"],
["bc"],
]
return execute_shell_commands(cmds, check=False, verbose=False)
def _get_archiver_name():
cmds = [
["pinky", "-l", os.getenv("USER")],
["grep", "In real life:"],
["sed", "-e", "s/.*In real life: *//"],
]
output = execute_shell_commands(cmds, check=False, verbose=False)
return output.rstrip()
def _create_extra_context(project_dir, config=None):
extra_context = _extra_context_from_config(config)
logger.info("Collecting size & inodes numbers")
for (context_name, cmd) in COMMANDS.items():
if context_name not in extra_context.keys():
cmd.append(project_dir)
extra_context[context_name] = DU.match(
execute_shell_commands([cmd], check=False, verbose=False)
).group(1)
if "snakemake_nb" not in extra_context.keys():
extra_context["snakemake_nb"] = _get_snakemake_nb(project_dir)
if "archiver_name" not in extra_context.keys():
extra_context["archiver_name"] = _get_archiver_name()
if "archiver_email" not in extra_context.keys():
extra_context["archiver_email"] = (
"{}@bih-charite.de".format(extra_context["archiver_name"]).lower().replace(" ", ".")
)
if "CUBI_name" not in extra_context.keys():
extra_context["CUBI_name"] = extra_context["archiver_name"]
if "PI_name" in extra_context.keys() and "PI_email" not in extra_context.keys():
extra_context["PI_email"] = (
"{}@charite.de".format(extra_context["PI_name"]).lower().replace(" ", ".")
)
if "client_name" in extra_context.keys() and "client_email" not in extra_context.keys():
extra_context["client_email"] = (
"{}@charite.de".format(extra_context["client_name"]).lower().replace(" ", ".")
)
if "SODAR_UUID" in extra_context.keys() and "SODAR_URL" not in extra_context.keys():
extra_context["SODAR_URL"] = "{}/projects/{}".format(
config.sodar_server_url, extra_context["SODAR_UUID"]
)
if "directory" not in extra_context.keys():
extra_context["directory"] = project_dir
if "project_name" not in extra_context.keys():
extra_context["project_name"] = os.path.basename(project_dir)
if "start_date" not in extra_context.keys() and DATE.match(extra_context["project_name"]):
extra_context["start_date"] = DATE.match(extra_context["project_name"]).group(1)
if "current_status" not in extra_context.keys():
extra_context["current_status"] = "Finished"
return extra_context
def _copy_readme(src, target):
os.makedirs(os.path.realpath(os.path.dirname(target)), mode=488, exist_ok=True)
with open(src, "rt") as f:
lines = [x.rstrip() for x in f.readlines()]
if os.path.exists(target):
lines.extend(["", "", "-" * 80, "", "", MSG, "", "", "-" * 80, "", ""])
with open(target, "rt") as f:
lines.extend([x.rstrip() for x in f.readlines()])
os.remove(target)
with open(os.path.realpath(target), "wt") as f:
f.write("\n".join(lines))
def is_readme_valid(filename=None):
if filename is None:
f = sys.stdin
else:
if not os.path.exists(filename):
return False
f = open(filename, "rt")
matching = set()
for line in f:
line = line.rstrip()
for (name, pattern) in PATTERNS.items():
if pattern.match(line):
matching.add(name)
f.close()
return set(PATTERNS.keys()).issubset(matching)
def create_readme(filename, project_dir, config=None, no_input=False):
# If a valid README.md file already exists in the project, do nothing
if os.path.exists(filename) and is_readme_valid(filename):
logger.info("Using existing file, variables ignored : '{}'".format(filename))
return
# Fill defaults (emails, size, inodes, ...)
extra_context = _create_extra_context(project_dir, config)
try:
tmp = tempfile.mkdtemp()
# Create the readme file in temp directory
cookiecutter(
template=TEMPLATE.path, extra_context=extra_context, output_dir=tmp, no_input=no_input
)
# Copy it back to destination, including contents of former incomplete README.md
_copy_readme(os.path.join(tmp, extra_context["project_name"], "README.md"), filename)
finally:
try:
shutil.rmtree(tmp)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def add_readme_parameters(parser):
for name in TEMPLATE.configuration:
key = name.replace("_", "-")
parser.add_argument(
"--var-%s" % key, help="template variables %s" % repr(name), default=None
)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Secondary Storage
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class TestSecStorageServices(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.apiclient = super(TestSecStorageServices, cls).getClsTestClient().getApiClient()
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
# Get Zone and pod
self.zones = []
self.pods = []
for zone in self.config.zones:
cmd = listZones.listZonesCmd()
cmd.name = zone.name
z = self.apiclient.listZones(cmd)
if isinstance(z, list) and len(z) > 0:
self.zones.append(z[0].id)
for pod in zone.pods:
podcmd = listPods.listPodsCmd()
podcmd.zoneid = z[0].id
p = self.apiclient.listPods(podcmd)
if isinstance(p, list) and len(p) >0:
self.pods.append(p[0].id)
self.domains = []
dcmd = listDomains.listDomainsCmd()
domains = self.apiclient.listDomains(dcmd)
assert isinstance(domains, list) and len(domains) > 0
for domain in domains:
self.domains.append(domain.id)
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_01_sys_vm_start(self):
"""Test system VM start
"""
# 1. verify listHosts has all 'routing' hosts in UP state
# 2. verify listStoragePools shows all primary storage pools
# in UP state
# 3. verify that secondary storage was added successfully
list_hosts_response = list_hosts(
self.apiclient,
type='Routing',
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
# ListHosts has all 'routing' hosts in UP state
self.assertNotEqual(
len(list_hosts_response),
0,
"Check list host response"
)
for host in list_hosts_response:
self.assertEqual(
host.state,
'Up',
"Check state of routing hosts is Up or not"
)
# ListStoragePools shows all primary storage pools in UP state
list_storage_response = list_storage_pools(
self.apiclient,
)
self.assertEqual(
isinstance(list_storage_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_storage_response),
0,
"Check list storage pools response"
)
for primary_storage in list_hosts_response:
self.assertEqual(
primary_storage.state,
'Up',
"Check state of primary storage pools is Up or not"
)
for _ in range(2):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
#Verify SSVM response
self.assertNotEqual(
len(list_ssvm_response),
0,
"Check list System VMs response"
)
for ssvm in list_ssvm_response:
if ssvm.state != 'Running':
time.sleep(30)
continue
for ssvm in list_ssvm_response:
self.assertEqual(
ssvm.state,
'Running',
"Check whether state of SSVM is running"
)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_02_sys_template_ready(self):
"""Test system templates are ready
"""
# Validate the following
# If SSVM is in UP state and running
# 1. wait for listTemplates to show all builtin templates downloaded and
# in Ready state
hypervisors = {}
for zone in self.config.zones:
for pod in zone.pods:
for cluster in pod.clusters:
hypervisors[cluster.hypervisor] = "self"
for zid in self.zones:
for k, v in hypervisors.items():
self.debug("Checking BUILTIN templates in zone: %s" %zid)
list_template_response = list_templates(
self.apiclient,
hypervisor=k,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
self.assertEqual(validateList(list_template_response)[0], PASS,\
"templates list validation failed")
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
template_response = list_templates(
self.apiclient,
id=templateid,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
if isinstance(template_response, list):
template = template_response[0]
else:
raise Exception("ListTemplate API returned invalid list")
if template.status == 'Download Complete':
self.debug("Template %s is ready in zone %s"%(template.templatetype, zid))
elif 'Downloaded' not in template.status.split():
self.debug("templates status is %s"%template.status)
self.assertEqual(
template.isready,
True,
"Builtin template is not ready %s in zone %s"%(template.status, zid)
)
|
import time
from flask import request
from flask.ext.restful import abort
from funcy import project
from peewee import IntegrityError
from redash import models
from redash.wsgi import api
from redash.tasks import record_event
from redash.permissions import require_permission, require_admin_or_owner, is_admin_or_owner, \
require_permission_or_owner
from redash.handlers.base import BaseResource, require_fields
class UserListResource(BaseResource):
@require_permission('list_users')
def get(self):
return [u.to_dict() for u in models.User.select()]
@require_permission('admin')
def post(self):
# TODO: send invite.
req = request.get_json(force=True)
require_fields(req, ('name', 'email', 'password'))
user = models.User(name=req['name'], email=req['email'])
user.hash_password(req['password'])
try:
user.save()
except IntegrityError as e:
if "email" in e.message:
abort(400, message='Email already taken.')
abort(500)
record_event.delay({
'user_id': self.current_user.id,
'action': 'create',
'timestamp': int(time.time()),
'object_id': user.id,
'object_type': 'user'
})
return user.to_dict()
class UserResource(BaseResource):
def get(self, user_id):
require_permission_or_owner('list_users', user_id)
user = models.User.get_by_id(user_id)
return user.to_dict(with_api_key=is_admin_or_owner(user_id))
def post(self, user_id):
require_admin_or_owner(user_id)
user = models.User.get_by_id(user_id)
req = request.get_json(True)
params = project(req, ('email', 'name', 'password', 'old_password', 'groups'))
if 'password' in params and 'old_password' not in params:
abort(403, message="Must provide current password to update password.")
if 'old_password' in params and not user.verify_password(params['old_password']):
abort(403, message="Incorrect current password.")
if 'password' in params:
user.hash_password(params.pop('password'))
params.pop('old_password')
if 'groups' in params and not self.current_user.has_permission('admin'):
abort(403, message="Must be admin to change groups membership.")
try:
user.update_instance(**params)
except IntegrityError as e:
if "email" in e.message:
message = "Email already taken."
else:
message = "Error updating record"
abort(400, message=message)
record_event.delay({
'user_id': self.current_user.id,
'action': 'edit',
'timestamp': int(time.time()),
'object_id': user.id,
'object_type': 'user',
'updated_fields': params.keys()
})
return user.to_dict(with_api_key=is_admin_or_owner(user_id))
api.add_resource(UserListResource, '/api/users', endpoint='users')
api.add_resource(UserResource, '/api/users/<user_id>', endpoint='user')
|
"""Definition of stuff that can be used directly by a user in a dodo.py file."""
import sys
from doit.cmd_base import ModuleTaskLoader
from doit.doit_cmd import DoitMain
def run(task_creators):
"""run doit using task_creators
@param task_creators: module or dict containing task creators
"""
sys.exit(DoitMain(ModuleTaskLoader(task_creators)).run(sys.argv[1:]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 Yeolar
#
import multiprocessing
import os
import subprocess
import sys
def _wrap_with(code):
def inner(text, bold=False):
c = code
if bold:
c = '1;%s' % c
return '\033[%sm%s\033[0m' % (c, text)
return inner
red = _wrap_with('31')
green = _wrap_with('32')
yellow = _wrap_with('33')
blue = _wrap_with('34')
magenta = _wrap_with('35')
cyan = _wrap_with('36')
white = _wrap_with('37')
def run(*cmd):
cmdstr = ' '.join(cmd)
print cyan(cmdstr)
return subprocess.call(cmdstr, shell=True)
class cd(object):
def __init__(self, path):
if not os.path.exists(path):
os.makedirs(path, 0755)
self.path = path
self.oldpath = os.getcwd()
def __enter__(self):
os.chdir(self.path)
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.oldpath)
TYPE_GIT = 0
TYPE_ZIP = 1
TYPE_GZ = 2
def get_type(url):
if url.lower().endswith('.tar.gz'):
return TYPE_GZ
if url.lower().endswith('.zip'):
return TYPE_ZIP
return TYPE_GIT
def read_deps(f, *args):
deps = []
if f != '-':
with open(f) as fp:
args = fp.readlines()
for arg in args:
s = arg.strip()
if not s.startswith('#'):
url, _, commit = s.partition('@')
type = get_type(url)
commit, _, build = commit.partition(':')
deps.append({
'raw': s,
'type': type,
'url': url,
'root': url.split('/')[-1],
'commit': commit,
'build': build or 'cmake ..'
})
return deps
def build_dep(root, commit, build, jobs=multiprocessing.cpu_count() * 2):
path = os.path.join(os.getcwd(), 'usr', 'local')
with cd(root):
if commit is not None:
if commit:
run('git checkout', commit)
run('git pull')
if build.startswith('cmake'):
with cd('_build'):
build = 'cmake -DCMAKE_PREFIX_PATH=' + path + build[5:]
run(build)
run('make -j%d' % jobs)
run('make DESTDIR=../.. install')
else:
run(build)
run('make -j%d' % jobs)
run('make DESTDIR=.. install')
if __name__ == '__main__':
deps = read_deps(*sys.argv[1:])
for dep in deps:
if not os.path.exists(dep['root']):
print 'fetch', white(dep['raw'], True)
if dep['type'] == TYPE_GIT:
run('git', 'clone', dep['url'])
else:
run('wget', dep['url'])
for dep in deps:
if dep['type'] != TYPE_GIT and not os.path.exists(dep['commit']):
print 'unzip', white(dep['raw'], True)
if dep['type'] == TYPE_GZ:
run('tar', 'xzf', dep['root'])
if dep['type'] == TYPE_ZIP:
run('unzip', dep['root'])
for dep in deps:
print 'build', white(dep['raw'], True)
if dep['type'] == TYPE_GIT:
build_dep(dep['root'], dep['commit'], dep['build'])
else:
build_dep(dep['commit'], None, dep['build'])
|
from .bitmovin_json_encoder import BitmovinJSONEncoder
from .serializable import Serializable
|
import scipy
import pdb
import numpy as np
import pandas as pd
#from inquire.interactions.modalities import *
from inquire.interactions.feedback import Query, Feedback, Choice, Modality
from inquire.utils.learning import Learning
from inquire.utils.sampling import TrajectorySampling
from inquire.agents.agent import Agent
class FixedInteractions(Agent):
def __init__(self, sampling_method, optional_sampling_params, M, N, steps, int_types=[]):
self.M = M # number of weight samples
self.N = N # number of trajectory samples
self.steps = steps # trajectory length
self.int_types = int_types #[Sort, Demo] #, Pref, Rating]
self.sampling_method = sampling_method
self.optional_sampling_params = optional_sampling_params
self.query_num = 0
def initialize_weights(self, rand, domain):
init_w = rand.normal(0,1,(domain.w_dim, self.M)) #.reshape(-1,1)
init_w = init_w/np.linalg.norm(init_w, axis=0)
return init_w.T
def reset(self):
self.rand = np.random.RandomState(0)
self.query_num = 0
def generate_query(self, domain, query_state, curr_w, verbose=False):
all_queries, all_gains = [], []
if verbose:
print("Sampling trajectories...")
sampling_params = tuple([query_state, curr_w, domain, self.rand, self.steps, self.N, self.optional_sampling_params])
traj_samples = self.sampling_method(*sampling_params)
exp_mat = Inquire.generate_exp_mat(curr_w, traj_samples)
i = self.int_types[self.query_num]
if verbose:
print("Assessing " + str(i.name) + " queries...")
prob_mat, choice_idxs = Inquire.generate_prob_mat(exp_mat, i)
gains = Inquire.generate_gains_mat(prob_mat, self.M)
query_gains = np.sum(gains, axis=(1,2))
opt_query_idx = np.argmax(query_gains)
query_trajs = [traj_samples[a] for a in choice_idxs[opt_query_idx]]
opt_query = Query(i, None, query_state, query_trajs)
self.query_num += 1
return opt_query
def step_weights(self, curr_w, domain, feedback):
converted_feedback = self.convert_binary_feedback_to_prefs(curr_w, feedback, domain)
return Learning.gradient_descent(self.rand, converted_feedback, Inquire.gradient, domain.w_dim, self.M, conv_threshold=np.inf)
def update_weights(self, curr_w, domain, feedback):
converted_feedback = self.convert_binary_feedback_to_prefs(curr_w, feedback, domain)
return Learning.gradient_descent(self.rand, converted_feedback, Inquire.gradient, domain.w_dim, self.M)
class Inquire(Agent):
def __init__(self, sampling_method, optional_sampling_params, M, N, steps, int_types=[]):
self.M = M # number of weight samples
self.N = N # number of trajectory samples
self.steps = steps # trajectory length
self.int_types = int_types #[Sort, Demo] #, Pref, Rating]
self.sampling_method = sampling_method
self.optional_sampling_params = optional_sampling_params
self.chosen_interactions = []
def reset(self):
self.rand = np.random.RandomState(0)
def initialize_weights(self, rand, domain):
init_w = rand.normal(0,1,(domain.w_dim, self.M)) #.reshape(-1,1)
init_w = init_w/np.linalg.norm(init_w, axis=0)
return init_w.T
@staticmethod
def gradient(feedback, w):
grads = np.zeros_like(w)
for fb in feedback:
phi_pos = fb.choice.selection.phi
for f in fb.choice.options:
if any(f.phi != phi_pos):
phis = np.array([f.phi, phi_pos])
exps = np.exp(np.dot(phis,w)).reshape(-1,1)
grads = grads + (phi_pos - ((exps*phis).sum(axis=0)/exps.sum()))
return grads * -1
@staticmethod
def generate_exp_mat(w_samples, trajectories):
phi = np.stack([t.phi for t in trajectories])
exp = np.exp(np.dot(phi, w_samples.T)) # produces a M X N matrix
exp_mat = np.broadcast_to(exp,(exp.shape[0],exp.shape[0],exp.shape[1]))
return exp_mat
@staticmethod
def generate_prob_mat(exp, int_type): #|Q| x |C| x |W|
#if int_type is Demonstration:
# choice_matrix = np.expand_dims(np.array(list(range(exp.shape[0]))),axis=0)
# return np.expand_dims(exp[0] / np.sum(exp, axis=1), axis=0), choice_matrix
mat = exp / (exp + np.transpose(exp,(1,0,2)))
diag = np.repeat(np.expand_dims(np.eye(mat.shape[0], mat.shape[1], dtype=bool), axis=-1), mat.shape[-1], axis=-1)
if int_type is Modality.DEMONSTRATION:
choice_matrix = np.expand_dims(np.array(list(range(exp.shape[0]))),axis=0)
prod_mat = np.prod(mat, axis=1) / mat[0,0]
return np.expand_dims(prod_mat/np.sum(prod_mat,axis=0), axis=0), choice_matrix
elif int_type is Modality.PREFERENCE:
idxs = np.triu_indices(exp.shape[0], 1)
prob_mat = np.stack([mat[idxs],mat[idxs[::-1]]],axis=1)
choices = np.transpose(np.stack(idxs))
return prob_mat, choices
elif int_type is Modality.CORRECTION:
tf_mat = np.transpose(mat, (1,0,2))
result = np.transpose(tf_mat/np.sum(tf_mat,axis=0),(1,0,2)), [[i] for i in range(exp.shape[0])]
return result
elif int_type is Modality.BINARY:
choice_matrix = np.expand_dims(np.array(list(range(exp.shape[0]))),axis=1)
pref_mat = np.mean(mat[~diag].reshape((exp.shape[0],exp.shape[1]-1,-1)), axis=1)
return np.stack([pref_mat, 1.0-pref_mat],axis=1), choice_matrix
else:
return None
@staticmethod
def generate_gains_mat(prob_mat, M):
return prob_mat * np.log(M * prob_mat / np.expand_dims(np.sum(prob_mat,axis=-1),axis=-1)) / M
def generate_query(self, domain, query_state, curr_w, verbose=False):
all_queries, all_gains = [], []
all_probs = []
if verbose:
print("Sampling trajectories...")
sampling_params = tuple([query_state, curr_w, domain, self.rand, self.steps, self.N, self.optional_sampling_params])
traj_samples = self.sampling_method(*sampling_params)
exp_mat = Inquire.generate_exp_mat(curr_w, traj_samples)
for i in self.int_types:
if verbose:
print("Assessing " + str(i.name) + " queries...")
prob_mat, choice_idxs = Inquire.generate_prob_mat(exp_mat, i)
gains = Inquire.generate_gains_mat(prob_mat, self.M)
query_gains = np.sum(gains, axis=(1,2))
#query_gains = np.mean(np.sum(gains, axis=-1), axis=-1)
all_gains.append(query_gains)
all_queries.append(choice_idxs)
all_probs.append(prob_mat)
if verbose:
print("Selecting best query...")
gains = [np.max(i) for i in all_gains]
opt_type = np.argmax([np.max(i) for i in all_gains])
opt_query_idx = np.argmax(all_gains[opt_type])
query_trajs = [traj_samples[i] for i in all_queries[opt_type][opt_query_idx]]
opt_query = Query(self.int_types[opt_type], None, query_state, query_trajs)
if verbose:
print(f"Chosen interaction type: {self.int_types[opt_type].name}")
self.chosen_interactions.append(self.int_types[opt_type].name)
return opt_query
def convert_binary_feedback_to_prefs(self, traj_samples, curr_w, feedback, domain):
converted_feedback = []
for i in range(len(feedback)):
fb = feedback[i]
traj = fb.choice.options[0]
if fb.modality is Modality.BINARY:
sign = fb.choice.selection
rewards = np.array([np.dot(curr_w, t.phi) for t in traj_samples[i]])
lower_threshold_r = np.percentile(rewards, 25) #replace with whatever percentile threshold
upper_threshold_r = np.percentile(rewards, 75) #replace with whatever percentile threshold
for j in range(len(traj_samples[i])):
if sign and rewards[j] <= lower_threshold_r:
converted_feedback.append(Feedback(Modality.PREFERENCE, Choice(traj, [traj, traj_samples[i][j]])))
if (not sign) and rewards[j] >= upper_threshold_r:
converted_feedback.append(Feedback(Modality.PREFERENCE, Choice(traj_samples[i][j], [traj, traj_samples[i][j]])))
else:
converted_feedback.append(fb)
return converted_feedback
def step_weights(self, curr_w, domain, feedback):
return self.update_weights(None, domain, feedback, conv_threshold=np.inf)
def update_weights(self, init_w, domain, feedback, learning_rate=0.05, conv_threshold=1.0e-5):
traj_samples = []
for fb in feedback:
if fb.modality is Modality.BINARY:
traj = fb.choice.options[0]
query_state = traj.trajectory[0][1]
sampling_params = tuple([query_state, init_w, domain, self.rand, self.steps, self.N, self.optional_sampling_params])
traj_samples.append(self.sampling_method(*sampling_params))
else:
traj_samples.append(None)
samples = []
for i in range(self.M):
curr_w = self.rand.normal(0,1,domain.w_dim) #.reshape(-1,1)
curr_w = curr_w/np.linalg.norm(curr_w)
converged = (len(feedback) == 0)
while not converged:
converted_feedback = self.convert_binary_feedback_to_prefs(traj_samples, curr_w, feedback, domain)
grads = Inquire.gradient(converted_feedback, curr_w)
new_w = curr_w - (learning_rate * np.array(grads))
new_w = new_w/np.linalg.norm(new_w)
if np.linalg.norm(new_w - curr_w) < conv_threshold:
converged = True
curr_w = new_w
samples.append(curr_w)
return np.stack(samples)
def save_data(self, directory: str, file_name: str, data: np.ndarray = None) -> None:
"""Save the agent's stored attributes."""
if data is not None:
data = np.stack(data, axis=1).squeeze()
df = pd.DataFrame(data)
df.to_csv(directory + file_name)
else:
df = pd.DataFrame(self.chosen_interactions)
df.to_csv(directory + file_name)
|
from django.apps import AppConfig
class SearchAppConfig(AppConfig):
name = 'search_app'
|
from __future__ import print_function
from builtins import str
from django.core.exceptions import FieldDoesNotExist
from django.core.management.base import BaseCommand
from django.utils.module_loading import import_string
from bluebottle.assignments.models import Assignment, Applicant
from bluebottle.events.models import Event, Participant
from bluebottle.funding.models import Donation, Funding, PayoutAccount
from bluebottle.initiatives.models import Initiative
from bluebottle.members.models import Member
def get_doc(element):
if element.__doc__:
return element.__doc__
return "{} (documentation missing)".format(str(element)).replace('<', '').replace('>', '')
class Command(BaseCommand):
help = "Prints transitions for a model"
def add_arguments(self, parser):
parser.add_argument(
"model",
type=str,
help="Dotted path to the model"
)
parser.add_argument(
"--attributes",
type=str,
default="",
help="List of comma separated attributes, e.g. 'title=bla,description=test'"
)
parser.add_argument(
"--owner",
type=str,
help="Email of the models owner"
)
parser.add_argument(
"--user",
type=str,
help="Email of the models user"
)
def _has_field(self, model, field):
try:
model._meta.get_field(field)
return True
except FieldDoesNotExist:
return False
def handle(self, *args, **options):
model = import_string(options["model"])
if options["attributes"]:
model_args = dict(
arg.split("=") for arg in options.get("attributes", ).split(",")
)
else:
model_args = {}
if options.get("owner") and self._has_field(model, "owner"):
model_args["owner"] = Member(email=options["owner"])
if options.get("user") and self._has_field(model, "user"):
model_args["user"] = Member(email=options["user"])
instance = model(
**model_args
)
if isinstance(instance, Initiative):
instance.title = "the initiative"
if isinstance(instance, Funding):
instance.title = "the campaign"
if isinstance(instance, Donation):
instance.activity = Funding(title="the campaign")
instance.user = Member(first_name='the', last_name='donor')
if isinstance(instance, Event):
instance.title = "the event"
if isinstance(instance, Participant):
instance.activity = Event(title="the event")
instance.user = Member(first_name='the', last_name='participant')
if isinstance(instance, Assignment):
instance.title = "the assignment"
if isinstance(instance, Applicant):
instance.activity = Assignment(title="the assignment")
instance.user = Member(first_name='the', last_name='applicant')
if isinstance(instance, PayoutAccount):
instance.owner = Member(first_name='the', last_name='owner')
machine = instance.states
text = ""
text += u"<h2>States</h2>"
text += u"<em>All states this instance can be in.</em>"
text += u"<table data-layout=\"default\"><tr><th>State Name</th><th>Description</th></tr>"
for state in list(machine.states.values()):
text += u"<tr><td>{}</td><td>{}</td></tr>".format(state.name.capitalize(), state.description)
text += u"</table>"
text += u"<h2>Transitions</h2>"
text += u"<em>An instance will always move from one state to the other through a transition. " \
u"A manual transition is initiated by a user. An automatic transition is initiated by the system, " \
u"either through a trigger or through a side effect of a related object.</em>"
text += u"<table data-layout=\"full-width\"><tr><th>Name</th><th>Description</th><th>From</th><th>To</th>" \
u"<th>Manual</th><th>Conditions</th><th>Side Effects</th></tr>"
for transition in list(machine.transitions.values()):
str = u"<tr><td>{}</td><td>{}</td><td><ul>{}</ul></td>" \
u"<td>{}</td><td>{}</td><td><ul>{}</ul></td><td><ul>{}</ul></td></tr>"
text += str.format(
transition.name,
transition.description,
u"".join(u"<li>{}</li>".format(state.name.capitalize()) for state in transition.sources),
transition.target.name.capitalize(),
"Automatic" if transition.automatic else "Manual",
u"".join(
u"<li>{}</li>".format(get_doc(condition))
for condition
in transition.conditions
),
u"".join(
u"<li>{}</li>".format(effect(instance).to_html())
for effect
in transition.effects
)
)
text += u"</table>"
if model.triggers:
text += u"<h2>Triggers</h2>"
text += u"<em>These are events that get triggered when the instance changes, " \
u"other then through a transition. " \
u"Mostly it would be triggered because a property changed (e.g. a deadline).</em>"
text += u"<table data-layout=\"full-width\">" \
u"<tr><th>When</th>" \
u"<th>Effects</th></tr>"
for trigger in model.triggers:
text += u"<tr><td>{}</td><td><ul>{}</ul></td></tr>".format(
trigger(instance),
"".join(["<li>{}</li>".format(effect(instance).to_html()) for effect in trigger(instance).effects])
)
text += u"</table>"
if model.triggers:
text += u"<h2>Periodic tasks</h2>"
text += u"<em>These are events that get triggered when certain dates are passed. " \
u"Every 15 minutes the system checks for passing deadlines, registration dates and such.</em>"
text += u"<table data-layout=\"full-width\">" \
u"<tr><th>When</th>" \
u"<th>Effects</th></tr>"
for task in model.periodic_tasks:
text += u"<tr><td>{}</td><td><ul>{}</ul></td></tr>".format(
task(instance),
"".join(["<li>{}</li>".format(effect(instance).to_html()) for effect in task(instance).effects])
)
text += u"</table>"
print(text)
|
"""
Datum Object Model
"""
from decimal import Decimal
from typing import Any, Dict, Optional
import numpy as np
from pydantic import BaseModel, validator
class Datum(BaseModel):
r"""Facilitates the storage of quantum chemical results by labeling them with basic metadata.
Attributes
----------
label : str
Official label for `data`, often qcvar. May contain spaces.
units : str
ASCII, LaTeX-like representation of units, without square brackets.
data : float or decimal.Decimal or numpy.ndarray
Value for `label`.
comment : str
Additional notes.
doi : str
Literature citation or definition DOI link.
glossary : str
Extended description or definition.
numeric : bool
Whether `data` is numeric. Pass `True` to disable validating `data` as float/Decimal/np.ndarray.
"""
numeric: bool
label: str
units: str
data: Any
comment: str = ""
doi: Optional[str] = None
glossary: str = ""
class Config:
extra = "forbid"
allow_mutation = False
json_encoders = {np.ndarray: lambda v: v.flatten().tolist(), complex: lambda v: (v.real, v.imag)}
def __init__(self, label, units, data, *, comment=None, doi=None, glossary=None, numeric=True):
kwargs = {"label": label, "units": units, "data": data, "numeric": numeric}
if comment is not None:
kwargs["comment"] = comment
if doi is not None:
kwargs["doi"] = doi
if glossary is not None:
kwargs["glossary"] = glossary
super().__init__(**kwargs)
@validator("data")
def must_be_numerical(cls, v, values, **kwargs):
try:
1.0 * v
except TypeError:
try:
Decimal("1.0") * v
except TypeError:
if values["numeric"]:
raise ValueError(f"Datum data should be float, Decimal, or np.ndarray, not {type(v)}.")
else:
values["numeric"] = True
else:
values["numeric"] = True
return v
def __str__(self, label=""):
width = 40
text = ["-" * width, "{:^{width}}".format("Datum " + self.label, width=width)]
if label:
text.append("{:^{width}}".format(label, width=width))
text.append("-" * width)
text.append("Data: {}".format(self.data))
text.append("Units: [{}]".format(self.units))
text.append("doi: {}".format(self.doi))
text.append("Comment: {}".format(self.comment))
text.append("Glossary: {}".format(self.glossary))
text.append("-" * width)
return "\n".join(text)
def dict(self, *args, **kwargs):
return super().dict(*args, **{**kwargs, **{"exclude_unset": True}})
def to_units(self, units=None):
from .physical_constants import constants
to_unit = self.units if units is None else units
factor = constants.conversion_factor(self.units, to_unit)
if isinstance(self.data, Decimal):
return factor * float(self.data)
else:
return factor * self.data
def print_variables(qcvars: Dict[str, "Datum"]) -> str:
r"""Form a printable representation of qcvariables.
Parameters
----------
qcvars
Group of Datum objects to print.
Returns
-------
str
Printable string representation of label, data, and unit in Datum-s.
"""
text = ["\n Variable Map:", " ----------------------------------------------------------------------------"]
if len(qcvars) == 0:
text.append(" (none)")
return "\n".join(text)
largest_key = max(len(k) for k in qcvars) + 2 # for quotation marks
largest_characteristic = 8
for k, v in qcvars.items():
try:
exp = int(str(v.data).split("E")[1])
except IndexError:
pass
else:
largest_characteristic = max(exp, largest_characteristic)
for k, qca in sorted(qcvars.items()):
# if k != qca.lbl:
# raise ValidationError('Huh? {} != {}'.format(k, qca.label))
if isinstance(qca.data, np.ndarray):
data = np.array_str(qca.data, max_line_width=120, precision=8, suppress_small=True)
data = "\n".join(" " + ln for ln in data.splitlines())
text.append(
""" {:{keywidth}} => {:{width}} [{}]""".format(
'"' + k + '"', "", qca.units, keywidth=largest_key, width=largest_characteristic + 14
)
)
text.append(data)
elif isinstance(qca.data, Decimal):
text.append(
""" {:{keywidth}} => {:{width}} [{}]""".format(
'"' + k + '"', qca.data, qca.units, keywidth=largest_key, width=largest_characteristic + 14
)
)
elif not qca.numeric:
text.append(
""" {:{keywidth}} => {:>{width}} [{}]""".format(
'"' + k + '"', str(qca.data), qca.units, keywidth=largest_key, width=largest_characteristic + 14
)
)
else:
text.append(
""" {:{keywidth}} => {:{width}.{prec}f} [{}]""".format(
'"' + k + '"', qca.data, qca.units, keywidth=largest_key, width=largest_characteristic + 14, prec=12
)
)
text.append("")
return "\n".join(text)
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import json
import os
from nlp_architect.models.bist import utils
from nlp_architect.models.bist.utils import get_options_dict
from nlp_architect.utils.io import validate, validate_existing_filepath
class BISTModel(object):
"""
BIST parser model class.
This class handles training, prediction, loading and saving of a BIST parser model.
After the model is initialized, it accepts a CoNLL formatted dataset as input, and learns to
output dependencies for new input.
Args:
activation (str, optional): Activation function to use.
lstm_layers (int, optional): Number of LSTM layers to use.
lstm_dims (int, optional): Number of LSTM dimensions to use.
pos_dims (int, optional): Number of part-of-speech embedding dimensions to use.
Attributes:
model (MSTParserLSTM): The underlying LSTM model.
params (tuple): Additional parameters and resources for the model.
options (dict): User model options.
"""
def __init__(self, activation='tanh', lstm_layers=2, lstm_dims=125, pos_dims=25):
validate((activation, str), (lstm_layers, int, 0, None), (lstm_dims, int, 0, 1000),
(pos_dims, int, 0, 1000))
self.options = get_options_dict(activation, lstm_dims, lstm_layers, pos_dims)
self.params = None
self.model = None
def fit(self, dataset, epochs=10, dev=None):
"""
Trains a BIST model on an annotated dataset in CoNLL file format.
Args:
dataset (str): Path to input dataset for training, formatted in CoNLL/U format.
epochs (int, optional): Number of learning iterations.
dev (str, optional): Path to development dataset for conducting evaluations.
"""
if dev:
dev = validate_existing_filepath(dev)
dataset = validate_existing_filepath(dataset)
validate((epochs, int, 0, None))
print('\nRunning fit on ' + dataset + '...\n')
words, w2i, pos, rels = utils.vocab(dataset)
self.params = words, w2i, pos, rels, self.options
from nlp_architect.models.bist.mstlstm import MSTParserLSTM
self.model = MSTParserLSTM(*self.params)
for epoch in range(epochs):
print('Starting epoch', epoch + 1)
self.model.train(dataset)
if dev:
ext = dev.rindex('.')
res_path = dev[:ext] + '_epoch_' + str(epoch + 1) + '_pred' + dev[ext:]
utils.write_conll(res_path, self.model.predict(dev))
utils.run_eval(dev, res_path)
def predict(self, dataset, evaluate=False):
"""
Runs inference with the BIST model on a dataset in CoNLL file format.
Args:
dataset (str): Path to input CoNLL file.
evaluate (bool, optional): Write prediction and evaluation files to dataset's folder.
Returns:
res (list of list of ConllEntry): The list of input sentences with predicted
dependencies attached.
"""
dataset = validate_existing_filepath(dataset)
validate((evaluate, bool))
print('\nRunning predict on ' + dataset + '...\n')
res = list(self.model.predict(conll_path=dataset))
if evaluate:
ext = dataset.rindex('.')
pred_path = dataset[:ext] + '_pred' + dataset[ext:]
utils.write_conll(pred_path, res)
utils.run_eval(dataset, pred_path)
return res
def predict_conll(self, dataset):
"""
Runs inference with the BIST model on a dataset in CoNLL object format.
Args:
dataset (list of list of ConllEntry): Input in the form of ConllEntry objects.
Returns:
res (list of list of ConllEntry): The list of input sentences with predicted
dependencies attached.
"""
res = None
if hasattr(dataset, '__iter__'):
res = list(self.model.predict(conll=dataset))
return res
def load(self, path):
"""Loads and initializes a BIST model from file."""
with open(path.parent / 'params.json') as file:
self.params = json.load(file)
from nlp_architect.models.bist.mstlstm import MSTParserLSTM
self.model = MSTParserLSTM(*self.params)
self.model.model.populate(str(path))
def save(self, path):
"""Saves the BIST model to file."""
print("Saving")
with open(os.path.join(os.path.dirname(path), 'params.json'), 'w') as file:
json.dump(self.params, file)
self.model.model.save(path)
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""This test module contains old tests coming from the instrumentation package.
Some are still relevant, others are not, we should sort this out eventually.
Overall, they may be overly complicated because they were converted from the old framework...
"""
import typing as tp
import numpy as np
import pytest
from nevergrad.common import testing
from . import parameter as p
def test_instrumentation_set_standardized_data() -> None:
tokens = [p.Choice(list(range(5))), p.Scalar(init=3).set_mutation(sigma=4)]
instru = p.Instrumentation(*tokens)
values = instru.spawn_child().set_standardized_data([0, 200, 0, 0, 0, 2]).args
assert values == (1, 11)
np.testing.assert_raises(
ValueError, instru.spawn_child().set_standardized_data, [0, 0, 200, 0, 0, 0, 2, 3]
)
def test_instrumentation() -> None:
instru = p.Instrumentation(p.Scalar(), 3, b=p.Choice([0, 1, 2, 3]), a=p.TransitionChoice([0, 1, 2, 3]))
np.testing.assert_equal(instru.dimension, 6)
instru2 = p.Instrumentation(p.Scalar(), 3, b=p.Choice([0, 1, 2, 3]), a=p.TransitionChoice([0, 1, 2, 3]))
np.testing.assert_equal(instru2.dimension, 6)
data = instru2.spawn_child(new_value=((4, 3), dict(a=0, b=3))).get_standardized_data(reference=instru2)
np.testing.assert_array_almost_equal(data, [4, -1.1503, 0, 0, 0, 0.5878], decimal=4)
args, kwargs = instru.spawn_child().set_standardized_data(data, deterministic=True).value
testing.printed_assert_equal((args, kwargs), ((4.0, 3), {"a": 0, "b": 3}))
assert "3),Dict(a=TransitionChoice(choices=Tuple(0,1,2,3)," in repr(
instru
), f"Erroneous representation {instru}"
# check deterministic
data = np.array([0.0, 0, 0, 0, 0, 0])
total = 0
for _ in range(24):
total += instru.spawn_child().set_standardized_data(data, deterministic=True).kwargs["b"]
np.testing.assert_equal(total, 0)
# check stochastic
for _ in range(24):
total += instru.spawn_child().set_standardized_data(data, deterministic=False).kwargs["b"]
assert total != 0
# check duplicate
# instru2 = mvar.Instrumentation(*instru.args, **instru.kwargs) # TODO: OUCH SILENT FAIL
instru2.copy()
data = np.random.normal(0, 1, size=6)
testing.printed_assert_equal(
instru2.spawn_child().set_standardized_data(data, deterministic=True).value,
instru.spawn_child().set_standardized_data(data, deterministic=True).value,
)
# check naming
instru_str = (
"Instrumentation(Tuple(Scalar[sigma=Log{exp=2.0}],3),"
"Dict(a=TransitionChoice(choices=Tuple(0,1,2,3),"
"positions=Array{Cd(0,4)},transitions=[1. 1.]),"
"b=Choice(choices=Tuple(0,1,2,3),weights=Array{(1,4)})))"
)
testing.printed_assert_equal(instru.name, instru_str)
testing.printed_assert_equal("blublu", instru.set_name("blublu").name)
def _false(value: tp.Any) -> bool: # pylint: disable=unused-argument
return False
def test_instrumentation_copy() -> None:
instru = p.Instrumentation(p.Scalar(), 3, b=p.Choice(list(range(1000)))).set_name("bidule")
instru.register_cheap_constraint(_false)
copied = instru.copy()
assert copied.name == "bidule"
assert copied.random_state is not instru.random_state
# test that variables do not hold a random state / interfere
instru.random_state = np.random.RandomState(12)
copied.random_state = np.random.RandomState(12)
kwargs1 = instru.spawn_child().set_standardized_data([0] * 1001).kwargs
kwargs2 = copied.spawn_child().set_standardized_data([0] * 1001).kwargs
assert kwargs1 == kwargs2
assert not copied.satisfies_constraints()
def test_instrumentation_init_error() -> None:
variable = p.Scalar()
np.testing.assert_raises(ValueError, p.Instrumentation, variable, variable)
def test_softmax_categorical_deterministic() -> None:
token = p.Choice(["blu", "blublu", "blublublu"], deterministic=True)
assert token.set_standardized_data([1, 1, 1.01], deterministic=False).value == "blublublu"
def test_softmax_categorical() -> None:
np.random.seed(12)
token = p.Choice(["blu", "blublu", "blublublu"])
assert token.spawn_child().set_standardized_data([0.5, 1.0, 1.5]).value == "blublu"
new_token = token.spawn_child(new_value="blu")
assert (
token.spawn_child()
.set_standardized_data(new_token.get_standardized_data(reference=token), deterministic=True)
.value
== "blu"
)
def test_ordered_discrete() -> None:
token = p.TransitionChoice(["blu", "blublu", "blublublu"])
assert token.spawn_child().set_standardized_data([5]).value == "blublublu"
assert token.spawn_child().set_standardized_data([0]).value == "blublu"
new_token = token.spawn_child(new_value="blu")
assert (
token.spawn_child()
.set_standardized_data(new_token.get_standardized_data(reference=token), deterministic=True)
.value
== "blu"
)
def test_scalar() -> None:
token = p.Scalar().set_integer_casting()
assert token.spawn_child().set_standardized_data([0.7]).value == 1
new_token = token.spawn_child(new_value=1)
assert new_token.get_standardized_data(reference=token).tolist() == [1.0]
# bouncing with large values clips to the other side
@pytest.mark.parametrize("value,expected", [(0, 0.01), (10, 0.001), (-30, 0.1), (20, 0.001)]) # type: ignore
def test_log(value: float, expected: float) -> None:
var = p.Log(lower=0.001, upper=0.1)
out = var.spawn_child().set_standardized_data(np.array([value]))
np.testing.assert_approx_equal(out.value, expected, significant=4)
repr(var)
def test_log_int() -> None:
var = p.Log(lower=300, upper=10000).set_integer_casting()
out = var.spawn_child().set_standardized_data(np.array([0]))
assert out.value == 1732
# note: 0.9/0.9482=0.9482/0.999
# with very large values, bouncing clips to the other side
@pytest.mark.parametrize("value,expected", [(0, 0.9482), (-11, 0.999), (10, 0.9)]) # type: ignore
def test_log_9(value: float, expected: float) -> None:
var = p.Log(lower=0.9, upper=0.999)
out = var.spawn_child().set_standardized_data(np.array([value]))
np.testing.assert_approx_equal(out.value, expected, significant=4)
|
"""
Factory and interface for file parser
"""
import os
import warnings
from .sources import *
from .generic import GenericParser
class ParserFactory(type):
"""
Metaclass for creating source classes for different CHIANTI filetypes
"""
def __call__(cls, *args, **kwargs):
# Allow for standalone files
if os.path.exists(args[0]):
kwargs['standalone'] = True
# Use custom parser if desired
custom_parser = None
if 'custom_parser' in kwargs:
custom_parser = kwargs['custom_parser']
del kwargs['custom_parser']
if custom_parser is not None:
return custom_parser(*args, **kwargs)
# Create parser based on file extension or name
filetype_name, filetype_ext = os.path.splitext(os.path.basename(args[0]))
filetype_ext = filetype_ext[1:]
subclass_dict = {c.filetype: c for c in all_subclasses(GenericParser)
if hasattr(c, 'filetype')}
if filetype_ext in subclass_dict:
return subclass_dict[filetype_ext](*args, **kwargs)
elif filetype_name in subclass_dict:
return subclass_dict[filetype_name](*args, **kwargs)
else:
warnings.warn('Unrecognized filename and extension {}'.format(args[0]), stacklevel=2)
return type.__call__(cls, *args, **kwargs)
def all_subclasses(cls):
"""
Return all subclasses of a given class
"""
return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in all_subclasses(s)]
class Parser(GenericParser, metaclass=ParserFactory):
"""
General parser interface for all CHIANTI datatypes.
The Parser ingests the name of a raw ASCII data file and builds an
`astropy.table.QTable` from it. A predefined parser is created based
on the file extension, but a custom parser can also be used.
Examples
--------
"""
def __init__(self, filename, custom_parser=None, **kwargs):
super().__init__(filename, **kwargs)
|
import unittest
from parsers import groups_parser, student_list_parser, student_parser
class ParserTestCase(unittest.TestCase):
def test_groups_parser(self):
text = '''
<select id="group" name="group">
<option value=""/>
<option value="15798">АЭМББТ</option>
<option value="35568">Б8102</option>
<option value="2107">Б8103а</option>
<option value="3240" selected="selected">мастер-класс</option>
<option value="-">без группы</option>
</select>'''
data = [
{'id': 15798, 'name': 'АЭМББТ'},
{'id': 35568, 'name': 'Б8102'},
{'id': 2107, 'name': 'Б8103а'},
{'id': 3240, 'name': 'мастер-класс'},
]
self.assertEqual(groups_parser(text), data)
def test_students_parser(self):
text = '''
<table class="works">
<tr>
<th>N</th><th>Ф. И. О.</th><th>Группы</th>
</tr><tr>
<td><a href="marks_student?id=34211">Игнатий Лихачев</a></td><td>Б8303а</td>
</tr><tr>
<td align="right">5</td>
<td><a href="marks_student?id=34217">Кира Шилова</a></td><td>Б8303а</td>
</tr><tr>
<td align="right">6</td>
<td><a href="marks_student?id=34269">Лука Назаров</a></td><td>Б8303а</td>
</tr>
</table>'''
data = [
{'id': 34211, 'name': 'Игнатий Лихачев'},
{'id': 34217, 'name': 'Кира Шилова'},
{'id': 34269, 'name': 'Лука Назаров'},
]
self.assertEqual(student_list_parser(text), data)
def test_student_parser(self):
text = '''
<table class="bordered marks">
<tr>
<th>Дата</th>
<th>Задание</th>
<th>Баллы/вес</th>
<th>Тесты/оценки</th>
</tr>
<tr class="worktype">
<td colspan="4">Технология программирования</td>
</tr>
<tr>
<td>02.11.2015</td>
<td>
<a href="marks_view?tid=40113;sid=8884">Задание 2</a>
</td>
<td>1..10, 25%</td>
<td class="embedded">
<table class="bordered">
<col />
<col width="30px"/>
<col width="30px"/>
<tr>
<td>24.02.2016</td>
<td>7</td>
<td>1.9</td>
</tr>
<tr>
<td>10.02.2016</td>
<td>3</td>
<td>0.9</td>
</tr>
<tr>
<td>28.12.2015</td>
<td>2</td>
<td>0.7</td>
</tr>
</table>
</td>
</tr>
<tr class="worktype">
<td colspan="4">Web-программирование 1</td>
</tr>
<tr>
<td>30.06.2015</td>
<td>
<a href="marks_view?tid=40270;sid=8884">Дополнительное задание 1</a>
</td>
<td>1..10, 14%</td>
<td class="embedded">
</td>
</tr>
<tr>
<td>16.06.2015</td>
<td>
<a href="marks_view?tid=38743;sid=8884">Отчёты</a>
</td>
<td>1..10, 4%</td>
<td class="embedded late">
</td>
</tr>
<tr>
<td>10.03.2015</td>
<td>
<a href="marks_view?tid=37563;sid=8884">CSS, меню</a>
</td>
<td>1..10, 4%</td>
<td class="embedded">
<table class="bordered">
<col />
<col width="30px"/>
<col width="30px"/>
<tr>
<td>17.03.2015</td>
<td>8</td>
<td>5.3</td>
</tr>
</table>
</td>
</tr>
</table>'''
data = [
{'name': 'Технология программирования', 'tasks': [
{'date': '02.11.2015', 'id': 40113, 'name': 'Задание 2', 'maxRate': 10, 'rateWeight': 25, 'marks': [
{'date': '24.02.2016', 'rate1': 7, 'rate2': 1.9},
{'date': '10.02.2016', 'rate1': 3, 'rate2': 0.9},
{'date': '28.12.2015', 'rate1': 2, 'rate2': 0.7},
]},
]},
{'name': 'Web-программирование 1', 'tasks': [
{
'id': 40270, 'date': '30.06.2015', 'name': 'Дополнительное задание 1', 'maxRate': 10,
'rateWeight': 14, 'marks': []
},
{'date': '16.06.2015', 'id': 38743, 'name': 'Отчёты', 'maxRate': 10, 'rateWeight': 4, 'marks': []},
{'date': '10.03.2015', 'id': 37563, 'name': 'CSS, меню', 'maxRate': 10, 'rateWeight': 4, 'marks': [
{'date': '17.03.2015', 'rate1': 8.0, 'rate2': 5.3},
]},
]},
]
self.assertEqual(student_parser(text), data)
def test_student_parser_bad_cases(self):
text = '''
<table class="bordered marks">
<tr class="worktype">
<td colspan="4">Технология программирования</td>
</tr>
<tr>
<td>02.11.2015</td>
<td>
<a href="marks_view?tid=40113;sid=8884"></a>
</td>
<td>1..10, 25%</td>
<td class="embedded">
</td>
</tr>
<tr>
<td>30.06.2015</td>
<td>
<a href="marks_view?tid=40270;sid=8884">Дополнительное задание 1</a>
</td>
<td>1..10, 14%</td>
<td class="embedded">
<table class="bordered">
<col />
<col width="30px"/>
<col width="30px"/>
<tr>
<td>17.03.2015</td>
<td>8</td>
<td></td>
</tr>
</table>
</td>
</tr>
<tr>
<td>16.06.2015</td>
<td>
<a href="marks_view?tid=38743;sid=8884">Отчёты</a>
</td>
<td>1..10, 4%</td>
<td class="embedded late">
Тесты:
<table class="bordered">
<col width1="50%"/>
<col width="30px"/>
<tr>
<td>24.05.2011 13:42</td>
<td>
<a href="quiz/quiz.xhtml?t=HdF3OLflcT9LYHd82VZ1Bik4dbxgm01CFcYwUWOh">начать</a>
</td>
</tr>
</table>
</td>
</tr>
</table>'''
data = [
{'name': 'Технология программирования', 'tasks': [
{'date': '02.11.2015', 'id': 40113, 'name': '', 'maxRate': 10, 'rateWeight': 25, 'marks': []},
{
'id': 40270, 'date': '30.06.2015', 'name': 'Дополнительное задание 1', 'maxRate': 10,
'rateWeight': 14, 'marks': [
{'date': '17.03.2015', 'rate1': 8.0, 'rate2': None},
]
},
{'date': '16.06.2015', 'id': 38743, 'name': 'Отчёты', 'maxRate': 10, 'rateWeight': 4, 'marks': []},
]},
]
self.assertEqual(student_parser(text), data)
if __name__ == '__main__':
unittest.main()
|
#! coding: utf-8
from django.db import models
class Producers(models.Model):
Name = models.CharField("Название", max_length=150)
def __unicode__(self):
return self.Name
class Meta:
verbose_name = "Производитель"
verbose_name_plural = 'Производители'
class Categories(models.Model):
Name = models.CharField("Название", max_length=150)
Parent = models.ForeignKey('self', verbose_name='Родитель', blank=True, null=True)
def __unicode__(self):
return self.Name
class Meta:
verbose_name = "Категория"
verbose_name_plural = 'Категории'
class Wares(models.Model):
Code = models.CharField("Артикул", max_length=50)
Name = models.CharField("Название", max_length=150)
Producer = models.ForeignKey(Producers, verbose_name='Производитель')
Image = models.ImageField("Фото", upload_to="images", blank=True, null=True)
Category = models.ForeignKey(Categories, verbose_name='Категория')
Description = models.TextField("Описание", blank=True, null=True)
def __unicode__(self):
return self.Name
class Meta:
verbose_name = "Товар"
verbose_name_plural = 'Товары'
class Properties(models.Model):
Name = models.CharField("Название", max_length=150)
def __unicode__(self):
return self.Name
class Meta:
verbose_name = "Свойство"
verbose_name_plural = 'Свойства'
class PropertiesValues(models.Model):
Property = models.ForeignKey(Properties, verbose_name='Свойство')
Value = models.TextField()
def __unicode__(self):
return self.Value
class Meta:
verbose_name = "Значение свойства"
verbose_name_plural = 'Значение свойств'
class PropertiesByCategories(models.Model):
Name = models.CharField("Название", max_length=150)
Category = models.ForeignKey(Categories, verbose_name='Категория')
Property = models.ForeignKey(Properties, verbose_name='Свойство')
def __unicode__(self):
return self.Name
class Meta:
verbose_name = "Свойства по категориям товара"
verbose_name_plural = 'Свойства по категориям товара'
class WaresProperties(models.Model):
Ware = models.ForeignKey(Wares, verbose_name='Товар')
Property = models.ForeignKey(Properties, verbose_name='Свойство')
Value = models.ForeignKey(PropertiesValues, verbose_name='Значение свойства')
def __unicode__(self):
return self.Ware.Name
class Meta:
verbose_name = "Значение свойств товара"
verbose_name_plural = 'Значения свойств товара'
class WaresImages(models.Model):
Title = models.CharField("Заголовок", max_length=150)
Ware = models.ForeignKey(Wares, verbose_name='Товар')
Image = models.ImageField("Фото", upload_to="images", blank=True, null=True)
def __unicode__(self):
return self.Title
class Meta:
verbose_name = "Фотография товара"
verbose_name_plural = 'Фотографии товара'
class Actions(models.Model):
Title = models.CharField("Заголовок", max_length=150)
Image = models.ImageField("Фото", upload_to="images")
Description = models.TextField("Описание")
def __unicode__(self):
return self.Title
class Meta:
verbose_name = "Описание акции"
verbose_name_plural = 'Акции'
class ActionWares(models.Model):
Action = models.ForeignKey(Actions, verbose_name='Акция')
Ware = models.ForeignKey(Wares, verbose_name='Товар')
def __unicode__(self):
return self.Action
class Meta:
verbose_name = "Акционный товар"
verbose_name_plural = 'Акционные товары' |
def compute_iou_with_regular_coord(box1, box2):
# Prevent NaN in benchmark results
validate_box(box1)
validate_box(box2)
# change float to int, in order to prevent overflow
box1 = map(int, box1)
box2 = map(int, box2)
tb = min(box1[0]+0.5*box1[2],box2[0]+0.5*box2[2])-max(box1[0]-0.5*box1[2],box2[0]-0.5*box2[2])
lr = min(box1[1]+0.5*box1[3],box2[1]+0.5*box2[3])-max(box1[1]-0.5*box1[3],box2[1]-0.5*box2[3])
if tb <= 0 or lr <= 0 :
intersection = 0
else : intersection = tb*lr
return intersection / (box1[2]*box1[3] + box2[2]*box2[3] - intersection)
def compute_iou_with_decimal_coord(box1, box2, w, h):
box1 = coord_decimal_to_regular(w,h,box1)
box2 = coord_decimal_to_regular(w,h,box2)
return compute_iou_with_regular_coord(box1,box2)
def cal_score(location, gt_location, thresh):
iou_score = compute_iou_with_regular_coord(regular_box1, regular_box2)
if iou_score >= thresh:
score = 1
else:
score = 0
return score
|
from django.forms import ModelForm, Form, TextInput, HiddenInput, DateField, DateInput, \
ModelChoiceField, ModelMultipleChoiceField, ChoiceField, \
CharField, BooleanField, formset_factory
from opencivicdata.legislative.models import Event, EventParticipant
from opencivicdata.core.models import Jurisdiction, Organization
from .models import HearingCategoryType, WitnessDetails, CommitteeOrganization
from .customfields import GroupedModelMultiChoiceField
# add hearing events
class EventForm(ModelForm):
class Meta:
model = Event
fields = ['jurisdiction', 'name', 'start_date', 'classification']
labels = {
'name': ('Hearing Title'),
}
widgets = {
'jurisdiction': HiddenInput(),
'classification': HiddenInput()
}
start_date = DateField(
widget=DateInput(attrs={'type':'date'}),
label='Date'
)
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
try:
jurisdiction = Jurisdiction.objects.get(name='United States of America')
self.fields['jurisdiction'].initial = jurisdiction
except:
pass
self.fields['classification'].initial = "Hearing"
# add committees as event participants
class CommitteeForm(ModelForm):
class Meta:
model = EventParticipant
fields = ['name']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs['class'] = 'basic-multiple'
self.fields['name'].widget.attrs['multiple'] = 'multiple'
name = GroupedModelMultiChoiceField(
label='Committees/subcommittees',
queryset=CommitteeOrganization.objects.filter(classification='committee').order_by('parent__name'),
group_by_field='parent',
)
class CommitteeEditForm(CommitteeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].required = False
# add a field for transcript url
class TranscriptForm(Form):
transcript_url = CharField(
label='Transcript URL',
required=False
)
opening_statement_chair = CharField(
label='Chair Opening Statement URL',
required=False
)
opening_statement_rm = CharField(
label='Ranking Member Opening Statement URL',
required=False
)
# add category as foreign key in table HearingCategory
class CategoryForm(Form):
category = ModelChoiceField(
queryset=HearingCategoryType.objects.all(),
required=False
)
# when editing a hearing, category is not required
class CategoryEditForm(CategoryForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['category'].required = False
# add witnesses as event participants
class WitnessForm(Form):
name = CharField(
label='Witness name'
)
organization = CharField(
label='Witness organization',
required=False
)
url = CharField(
label='Witness statement URL',
required=False
)
retired = BooleanField(
label='Retired',
required=False
)
WitnessFormset = formset_factory(WitnessForm, extra=1)
|
"""A textured gyroid shape cut by a sphere"""
from vedo import *
import numpy as np
# Equation of a "gyroid" (https://en.wikipedia.org/wiki/Gyroid)
x, y, z = np.mgrid[:30,:30,:30] * 0.4
U = sin(x)*cos(y) + sin(y)*cos(z) + sin(z)*cos(x)
# Create a Volume, take the isosurface at 0, smooth and subdivide it
gyr = Volume(U).isosurface(0).smooth().subdivide()
# Intersect it with a sphere made of quads
sph = Sphere(pos=(15,15,15), r=14, quads=True, res=30).triangulate()
printc("Please wait a few secs while I'm cutting your gyroid", c='y')
gxs = gyr.boolean('intersect', sph).clean().flat()
gxs.texture('https://vedo.embl.es/examples/data/images/marblings.jpg')
plt = show(gxs, __doc__, bg='wheat', bg2='lightblue', axes=5, zoom=1.4)
# Video('gyroid.mp4').action().close().interactive() # shoot video
plt.close()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: winston
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Dense, Input
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from scipy.io import loadmat
from utils import getPaths, cc_coef
from utils import class2onehot_5class, class2onehot_8class
import argparse
def fusion_network_MTL(num_nodes):
inputs = Input((768,))
encode = Dense(num_nodes, activation='relu')(inputs)
encode = Dense(num_nodes, activation='relu')(encode)
output_act = Dense(units=1, activation='linear')(encode)
output_dom = Dense(units=1, activation='linear')(encode)
output_val = Dense(units=1, activation='linear')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=[output_act, output_dom, output_val])
model.compile(optimizer=adam, loss=[cc_coef, cc_coef, cc_coef])
return model
def fusion_network_class(num_nodes, num_class):
inputs = Input((768,))
encode = Dense(num_nodes, activation='relu')(inputs)
encode = Dense(num_nodes, activation='relu')(encode)
outputs = Dense(units=num_class, activation='softmax')(encode)
adam = Adam(lr=0.0001)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=adam, loss='categorical_crossentropy')
return model
###############################################################################
argparse = argparse.ArgumentParser()
argparse.add_argument("-ep", "--epoch", required=True)
argparse.add_argument("-batch", "--batch_size", required=True)
argparse.add_argument("-emo", "--emo_type", required=True)
argparse.add_argument("-nodes", "--num_nodes", required=True)
argparse.add_argument("-nc", "--num_class")
args = vars(argparse.parse_args())
# Parameters
shuffle = True
random_seed = 99
batch_size = int(args['batch_size'])
epochs = int(args['epoch'])
num_nodes = int(args['num_nodes'])
label_type = args['emo_type']
try:
num_class = args['num_class']
except:
pass
# Hidden Features Paths Setting
if label_type == 'attr':
root_dir = './Fusion_Features/3-attribute'
elif label_type == 'class':
if num_class == '5-class':
root_dir = './Fusion_Features/5-class'
elif num_class == '8-class':
root_dir = './Fusion_Features/8-class'
# Loading Paths & Labels
if label_type == 'class':
paths_valid, labels_class_valid = getPaths(label_type, split_set='Validation', num_class=num_class)
paths_train, labels_class_train = getPaths(label_type, split_set='Train', num_class=num_class)
elif label_type == 'attr':
# Loading Norm-Label
Label_mean_act = loadmat('./NormTerm/act_norm_means.mat')['normal_para'][0][0]
Label_std_act = loadmat('./NormTerm/act_norm_stds.mat')['normal_para'][0][0]
Label_mean_dom = loadmat('./NormTerm/dom_norm_means.mat')['normal_para'][0][0]
Label_std_dom = loadmat('./NormTerm/dom_norm_stds.mat')['normal_para'][0][0]
Label_mean_val = loadmat('./NormTerm/val_norm_means.mat')['normal_para'][0][0]
Label_std_val = loadmat('./NormTerm/val_norm_stds.mat')['normal_para'][0][0]
paths_valid, labels_act_valid, labels_dom_valid, labels_val_valid = getPaths(label_type, split_set='Validation', num_class=num_class)
paths_train, labels_act_train, labels_dom_train, labels_val_train = getPaths(label_type, split_set='Train', num_class=num_class)
# shuffle the training set
indexes = np.arange(len(paths_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indexes)
if label_type == 'class':
shuffle_paths_train = [paths_train[k] for k in indexes]
shuffle_class_train = [labels_class_train[k] for k in indexes]
elif label_type == 'attr':
shuffle_paths_train = [paths_train[k] for k in indexes]
shuffle_act_train = [labels_act_train[k] for k in indexes]
shuffle_dom_train = [labels_dom_train[k] for k in indexes]
shuffle_val_train = [labels_val_train[k] for k in indexes]
# Loading Hidden Features (Training set)
X_Train = []
Y_Train_Class = []
Y_Train_Act = []
Y_Train_Dom = []
Y_Train_Val = []
for i in range(len(shuffle_paths_train)):
try: # deal with missing files
x_audio = loadmat(root_dir + '/Audios/' + shuffle_paths_train[i].replace('.wav','.mat'))['Feat']
x_video = loadmat(root_dir + '/Videos/' + shuffle_paths_train[i].replace('.wav','.mat'))['Feat']
# fusing audio-visual hidden features
x = np.concatenate((x_audio, x_video),axis=1)
x = x.reshape(-1)
X_Train.append(x)
if label_type == 'class': # STL
# class to one-hot label
if num_class == '5-class':
y = class2onehot_5class(shuffle_class_train[i])
elif num_class == '8-class':
y = class2onehot_8class(shuffle_class_train[i])
Y_Train_Class.append(y)
elif label_type == 'attr': # MTL
# normalize regression label
y_act = (shuffle_act_train[i]-Label_mean_act)/Label_std_act
y_dom = (shuffle_dom_train[i]-Label_mean_dom)/Label_std_dom
y_val = (shuffle_val_train[i]-Label_mean_val)/Label_std_val
Y_Train_Act.append(y_act)
Y_Train_Dom.append(y_dom)
Y_Train_Val.append(y_val)
except:
pass
if label_type == 'class':
X_Train = np.array(X_Train)
Y_Train_Class = np.array(Y_Train_Class)
elif label_type == 'attr':
X_Train = np.array(X_Train)
Y_Train_Act = np.array(Y_Train_Act)
Y_Train_Dom = np.array(Y_Train_Dom)
Y_Train_Val = np.array(Y_Train_Val)
# Loading Hidden Features (Validation set)
X_Valid = []
Y_Valid_Class = []
Y_Valid_Act = []
Y_Valid_Dom = []
Y_Valid_Val = []
for i in range(len(paths_valid)):
try: # deal with missing files
x_audio = loadmat(root_dir + '/Audios/' + paths_valid[i].replace('.wav','.mat'))['Feat']
x_video = loadmat(root_dir + '/Videos/' + paths_valid[i].replace('.wav','.mat'))['Feat']
# fusing audio-visual hidden features
x = np.concatenate((x_audio, x_video),axis=1)
x = x.reshape(-1)
X_Valid.append(x)
if label_type == 'class':
# class to one-hot label
if num_class == '5-class':
y = class2onehot_5class(labels_class_valid[i])
elif num_class == '8-class':
y = class2onehot_8class(labels_class_valid[i])
Y_Valid_Class.append(y)
elif label_type == 'attr':
y_act = (labels_act_valid[i]-Label_mean_act)/Label_std_act
y_dom = (labels_dom_valid[i]-Label_mean_dom)/Label_std_dom
y_val = (labels_val_valid[i]-Label_mean_val)/Label_std_val
Y_Valid_Act.append(y_act)
Y_Valid_Dom.append(y_dom)
Y_Valid_Val.append(y_val)
except:
pass
if label_type == 'class':
X_Valid = np.array(X_Valid)
Y_Valid_Class = np.array(Y_Valid_Class)
elif label_type == 'attr':
X_Valid = np.array(X_Valid)
Y_Valid_Act = np.array(Y_Valid_Act)
Y_Valid_Dom = np.array(Y_Valid_Dom)
Y_Valid_Val = np.array(Y_Valid_Val)
# loading model structure
if label_type == 'class':
model = fusion_network_class(num_nodes=num_nodes, num_class=int(num_class.split('-')[0]))
elif label_type == 'attr':
model = fusion_network_MTL(num_nodes=num_nodes)
#print(model.summary())
# Output fusion models saving folder
if not os.path.isdir('./Fusion_Models/'):
os.makedirs('./Fusion_Models/')
# setting model checkpoints
if label_type == 'attr':
filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.hdf5'
elif label_type == 'class':
filepath='./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
# model fitting
if label_type == 'class':
model.fit(x=X_Train,
y=Y_Train_Class,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_Valid, Y_Valid_Class),
verbose=1,
callbacks=callbacks_list)
elif label_type == 'attr':
model.fit(x=X_Train,
y=([Y_Train_Act, Y_Train_Dom, Y_Train_Val]),
batch_size=batch_size,
epochs=epochs,
validation_data=(X_Valid, [Y_Valid_Act, Y_Valid_Dom, Y_Valid_Val]),
verbose=1,
callbacks=callbacks_list)
# Show training & validation loss
v_loss = model.history.history['val_loss']
t_loss = model.history.history['loss']
plt.plot(t_loss,'b')
plt.plot(v_loss,'r')
if label_type == 'attr':
plt.savefig('./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.png')
elif label_type == 'class':
plt.savefig('./Fusion_Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.png')
|
import os
from os.path import join
import pytest
import OpenSSL.crypto
from cryptography import x509
from cryptography.x509.oid import ExtensionOID
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
cert_dir = '/etc/self-signed-certs/live/'
@pytest.mark.parametrize('domain', ['example1.com', 'example3.com'])
def test_domain_cert_directories_exist(host, domain):
assert host.file(join(cert_dir, domain)).exists
assert host.file(join(cert_dir, domain)).is_directory
assert host.file(join(cert_dir, domain, 'ca')).exists
assert host.file(join(cert_dir, domain, 'ca')).is_directory
@pytest.mark.parametrize('domain', ['example1.com', 'example3.com'])
@pytest.mark.parametrize('finfo', [
('ca/privkey.pem', '0o600'),
('ca/crt.pem', '0o644'),
('privkey.pem', '0o600'),
('cert.pem', '0o644'),
('chain.pem', '0o644'),
('fullchain.pem', '0o644'),
])
def test_domain_certificate_files_exist(host, domain, finfo):
assert host.file(join(cert_dir, domain, finfo[0])).exists
assert oct(host.file(join(cert_dir, domain, finfo[0])).mode) == finfo[1]
@pytest.mark.parametrize('domain', ['example1.com', 'example3.com'])
def test_domains_represented_in_ca_cert_pem(host, domain):
cfile = host.file(join(cert_dir, domain, 'ca/crt.pem')).content
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cfile)
assert cert.get_subject().countryName == "US"
assert cert.get_subject().localityName == "San Francisco"
assert cert.get_subject().organizationName == "Self Signed"
assert cert.get_subject().organizationalUnitName == \
"Self Signed Certificates Department"
assert cert.get_subject().commonName == "Self Signed"
@pytest.mark.parametrize('domain', [
('example1.com', ['example1.com', 'example2.com']),
('example3.com', ['example3.com'])
])
def test_domains_represented_in_domain_cert_pem(host, domain):
cfile = host.file(join(cert_dir, domain[0], 'cert.pem')).content
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cfile)
assert cert.get_subject().countryName == "US"
assert cert.get_subject().localityName == "San Francisco"
assert cert.get_subject().organizationName == "Self Signed"
assert cert.get_subject().organizationalUnitName == \
"Self Signed Certificates Department"
assert cert.get_subject().commonName == domain[0]
# Dealing with ASN.1 fields is a bit tricker, let's fall back
# on the cryptography package
csert = cert.to_cryptography()
ext = csert.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
san_domains = ext.value.get_values_for_type(x509.DNSName)
assert san_domains == domain[1]
@pytest.mark.parametrize('domain', ['example1.com', 'example3.com'])
def test_content_of_chainfiles(host, domain):
cacert = host.file(join(cert_dir, domain, 'ca/crt.pem')).content
domaincert = host.file(join(cert_dir, domain, 'cert.pem')).content
chainfile = host.file(join(cert_dir, domain, 'chain.pem')).content
assert chainfile == cacert
fullchainfile = host.file(join(cert_dir, domain, 'fullchain.pem')).content
test_fullchain_val = domaincert + cacert
assert fullchainfile == test_fullchain_val
@pytest.mark.parametrize('domain', ['example1.com', 'example3.com'])
def test_dhparams_file_exists(host, domain):
assert host.file(join(cert_dir, domain, 'dhparams.pem')).exists
assert oct(host.file(join(
cert_dir, domain, 'dhparams.pem'
)).mode) == '0o644'
|
import random
import re
WIN_POSITION = 63
BRIDGE_HEAD = 6
BRIDGE_TAIL = 12
DICE_FACE = 6
GOOSES = [5, 9, 14, 18, 23, 27]
def get_old_label(old_position):
if old_position == 0:
old_label = "Start"
else:
old_label = old_position
return old_label
def get_new_label(new_position):
if new_position in GOOSES:
new_label = f"{new_position}, The Goose. "
elif new_position == 6:
new_label = "The Bridge."
elif new_position > 63:
new_label = 63
else:
new_label = new_position
return new_label
class Player():
def __init__(self, name):
self.name = name
self.position = 0
class BillBoard():
def __init__(self):
self.players = []
def __contains__(self, player):
return player.name in self.get_players_name()
def add_player(self, player):
if player in self:
print(player.name, ": already existing player")
else:
self.players.append(player)
print("players:", ", ".join(self.get_players_name()))
def get_player(self, name):
return next(filter(lambda player: player.name == name, self.players))
def move_player_goose(self, name, n1, n2):
player = self.get_player(name)
player.position += (n1 + n2)
old_position = player.position
new_position = old_position + n1 + n2
if new_position in GOOSES:
print(f"{name} moves again and goes to {new_position}, The Goose. ", end="")
self.move_player_goose(name, n1, n2)
else:
print(f"{name} moves again and goes to {new_position}", end="")
return new_position
def move_player(self, name, n1, n2):
player = self.get_player(name)
old_position = player.position
new_position = old_position + n1 + n2
old_label = get_old_label(old_position)
new_label = get_new_label(new_position)
print(f"{name} rolls {n1}, {n2}. {name} moves from {old_label} to {new_label}", end="")
if new_position in GOOSES:
new_position = self.move_player_goose(name, n1, n2)
elif new_position == BRIDGE_HEAD:
new_position = BRIDGE_TAIL
print(f" {name} jumps to {BRIDGE_TAIL}", end="")
elif new_position == WIN_POSITION:
print(f". {name} Wins!!")
elif new_position > WIN_POSITION:
new_position = WIN_POSITION - (new_position - WIN_POSITION)
print(f". {name} bounces! {name} returns to {new_position}")
player.position = new_position
players_at_position = self.get_players_at_position(new_position, name)
for p in players_at_position:
new_label = get_new_label(player.position)
p.position = old_position
print(f". On {str(new_label).split(',')[0]} there is {p.name}, who returns to {old_label}", end="")
print("")
def get_players_name(self):
return [player.name for player in self.players]
def get_players_at_position(self, position, name):
return list(filter(lambda player: player.position == position and player.name != name , self.players))
def main():
billboard = BillBoard()
while True:
cmd = input('Insert your command: ')
if cmd == 'exit':
break
elif cmd.startswith("add player"):
player = Player(cmd.split()[2])
billboard.add_player(player)
elif cmd.startswith("move"):
p = re.compile('^move (\w*) (\d+), (\d+)$')
match = p.match(cmd)
result = p.search(cmd)
if match:
billboard.move_player(result.group(1), int(result.group(2)), int(result.group(3)))
else:
billboard.move_player(cmd.split()[1], random.randint(1, DICE_FACE), random.randint(1, DICE_FACE))
if __name__ == '__main__':
main()
|
"""
DogStatsd is a Python client for DogStatsd, a Statsd fork for Datadog.
"""
import logging
from random import random
from time import time
import socket
from functools import wraps
try:
from itertools import imap
except ImportError:
imap = map
log = logging.getLogger('dogstatsd')
class DogStatsd(object):
OK, WARNING, CRITICAL, UNKNOWN = (0, 1, 2, 3)
def __init__(self, host='localhost', port=8125, max_buffer_size = 50):
"""
Initialize a DogStatsd object.
>>> statsd = DogStatsd()
:param host: the host of the DogStatsd server.
:param port: the port of the DogStatsd server.
:param max_buffer_size: Maximum number of metric to buffer before sending to the server if sending metrics in batch
"""
self._host = None
self._port = None
self.socket = None
self.max_buffer_size = max_buffer_size
self._send = self._send_to_server
self.connect(host, port)
self.encoding = 'utf-8'
def get_socket(self):
'''
Return a connected socket
'''
if not self.socket:
self.connect(self._host, self._port)
return self.socket
def __enter__(self):
self.open_buffer(self.max_buffer_size)
return self
def __exit__(self, type, value, traceback):
self.close_buffer()
def open_buffer(self, max_buffer_size=50):
'''
Open a buffer to send a batch of metrics in one packet
You can also use this as a context manager.
>>> with DogStatsd() as batch:
>>> batch.gauge('users.online', 123)
>>> batch.gauge('active.connections', 1001)
'''
self.max_buffer_size = max_buffer_size
self.buffer= []
self._send = self._send_to_buffer
def close_buffer(self):
'''
Flush the buffer and switch back to single metric packets
'''
self._send = self._send_to_server
self._flush_buffer()
def connect(self, host, port):
"""
Connect to the statsd server on the given host and port.
"""
self._host = host
self._port = int(port)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.connect((self._host, self._port))
def gauge(self, metric, value, tags=None, sample_rate=1):
"""
Record the value of a gauge, optionally setting a list of tags and a
sample rate.
>>> statsd.gauge('users.online', 123)
>>> statsd.gauge('active.connections', 1001, tags=["protocol:http"])
"""
return self._report(metric, 'g', value, tags, sample_rate)
def increment(self, metric, value=1, tags=None, sample_rate=1):
"""
Increment a counter, optionally setting a value, tags and a sample
rate.
>>> statsd.increment('page.views')
>>> statsd.increment('files.transferred', 124)
"""
self._report(metric, 'c', value, tags, sample_rate)
def decrement(self, metric, value=1, tags=None, sample_rate=1):
"""
Decrement a counter, optionally setting a value, tags and a sample
rate.
>>> statsd.decrement('files.remaining')
>>> statsd.decrement('active.connections', 2)
"""
self._report(metric, 'c', -value, tags, sample_rate)
def histogram(self, metric, value, tags=None, sample_rate=1):
"""
Sample a histogram value, optionally setting tags and a sample rate.
>>> statsd.histogram('uploaded.file.size', 1445)
>>> statsd.histogram('album.photo.count', 26, tags=["gender:female"])
"""
self._report(metric, 'h', value, tags, sample_rate)
def timing(self, metric, value, tags=None, sample_rate=1):
"""
Record a timing, optionally setting tags and a sample rate.
>>> statsd.timing("query.response.time", 1234)
"""
self._report(metric, 'ms', value, tags, sample_rate)
def timed(self, metric, tags=None, sample_rate=1):
"""
A decorator that will measure the distribution of a function's run
time. Optionally specify a list of tag or a sample rate.
::
@statsd.timed('user.query.time', sample_rate=0.5)
def get_user(user_id):
# Do what you need to ...
pass
# Is equivalent to ...
start = time.time()
try:
get_user(user_id)
finally:
statsd.timing('user.query.time', time.time() - start)
"""
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
start = time()
result = func(*args, **kwargs)
self.timing(metric, time() - start, tags=tags,
sample_rate=sample_rate)
return result
return wrapped
return wrapper
def set(self, metric, value, tags=None, sample_rate=1):
"""
Sample a set value.
>>> statsd.set('visitors.uniques', 999)
"""
self._report(metric, 's', value, tags, sample_rate)
def _report(self, metric, metric_type, value, tags, sample_rate):
if sample_rate != 1 and random() > sample_rate:
return
payload = [metric, ":", value, "|", metric_type]
if sample_rate != 1:
payload.extend(["|@", sample_rate])
if tags:
payload.extend(["|#", ",".join(tags)])
encoded = "".join(imap(str, payload))
self._send(encoded)
def _send_to_server(self, packet):
try:
self.socket.send(packet.encode(self.encoding))
except socket.error:
log.info("Error submitting metric, will try refreshing the socket")
self.connect(self._host, self._port)
try:
self.socket.send(packet.encode(self.encoding))
except socket.error:
log.exception("Failed to send packet with a newly binded socket")
def _send_to_buffer(self, packet):
self.buffer.append(packet)
if len(self.buffer) >= self.max_buffer_size:
self._flush_buffer()
def _flush_buffer(self):
self._send_to_server("\n".join(self.buffer))
self.buffer=[]
def _escape_event_content(self, string):
return string.replace('\n', '\\n')
def _escape_service_check_message(self, string):
return string.replace('\n', '\\n').replace('m:', 'm\:')
def event(self, title, text, alert_type=None, aggregation_key=None,
source_type_name=None, date_happened=None, priority=None,
tags=None, hostname=None):
"""
Send an event. Attributes are the same as the Event API.
http://docs.datadoghq.com/api/
>>> statsd.event('Man down!', 'This server needs assistance.')
>>> statsd.event('The web server restarted', 'The web server is up again', alert_type='success') # NOQA
"""
title = self._escape_event_content(title)
text = self._escape_event_content(text)
string = u'_e{%d,%d}:%s|%s' % (len(title), len(text), title, text)
if date_happened:
string = '%s|d:%d' % (string, date_happened)
if hostname:
string = '%s|h:%s' % (string, hostname)
if aggregation_key:
string = '%s|k:%s' % (string, aggregation_key)
if priority:
string = '%s|p:%s' % (string, priority)
if source_type_name:
string = '%s|s:%s' % (string, source_type_name)
if alert_type:
string = '%s|t:%s' % (string, alert_type)
if tags:
string = '%s|#%s' % (string, ','.join(tags))
if len(string) > 8 * 1024:
raise Exception(u'Event "%s" payload is too big (more that 8KB), '
'event discarded' % title)
try:
self.socket.send(string.encode(self.encoding))
except Exception:
log.exception(u'Error submitting event "%s"' % title)
def service_check(self, check_name, status, tags=None, timestamp=None,
hostname=None, message=None):
"""
Send a service check run.
>>> statsd.service_check('my_service.check_name', DogStatsd.WARNING)
"""
message = self._escape_service_check_message(message) if message is not None else ''
string = u'_sc|{0}|{1}'.format(check_name, status)
if timestamp:
string = u'{0}|d:{1}'.format(string, timestamp)
if hostname:
string = u'{0}|h:{1}'.format(string, hostname)
if tags:
string = u'{0}|#{1}'.format(string, ','.join(tags))
if message:
string = u'{0}|m:{1}'.format(string, message)
try:
self.socket.send(string.encode(self.encoding))
except Exception:
log.exception(u'Error submitting service check "{0}"'.format(check_name))
statsd = DogStatsd()
|
#!/usr/bin/env python
# coding: utf-8
# # 开发 AI 应用
#
# 未来,AI 算法在日常生活中的应用将越来越广泛。例如,你可能想要在智能手机应用中包含图像分类器。为此,在整个应用架构中,你将使用一个用成百上千个图像训练过的深度学习模型。未来的软件开发很大一部分将是使用这些模型作为应用的常用部分。
#
# 在此项目中,你将训练一个图像分类器来识别不同的花卉品种。可以想象有这么一款手机应用,当你对着花卉拍摄时,它能够告诉你这朵花的名称。在实际操作中,你会训练此分类器,然后导出它以用在你的应用中。我们将使用[此数据集](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html),其中包含 102 个花卉类别。你可以在下面查看几个示例。
#
# <img src='assets/Flowers.png' width=500px>
#
# 该项目分为多个步骤:
#
# * 加载和预处理图像数据集
# * 用数据集训练图像分类器
# * 使用训练的分类器预测图像内容
#
# 我们将指导你完成每一步,你将用 Python 实现这些步骤。
#
# 完成此项目后,你将拥有一个可以用任何带标签图像的数据集进行训练的应用。你的网络将学习花卉,并成为一个命令行应用。但是,你对新技能的应用取决于你的想象力和构建数据集的精力。例如,想象有一款应用能够拍摄汽车,告诉你汽车的制造商和型号,然后查询关于该汽车的信息。构建你自己的数据集并开发一款新型应用吧。
#
# 首先,导入你所需的软件包。建议在代码开头导入所有软件包。当你创建此 notebook 时,如果发现你需要导入某个软件包,确保在开头导入该软件包。
# In[13]:
# Imports here
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
from torchvision import transforms, datasets, models
import torchvision.models as models
import torch.nn.functional as F
from PIL import Image
import json
from matplotlib.ticker import FormatStrFormatter
# ## 加载数据
#
# 在此项目中,你将使用 `torchvision` 加载数据([文档](http://pytorch.org/docs/master/torchvision/transforms.html#))。数据应该和此 notebook 一起包含在内,否则你可以[在此处下载数据](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz)。数据集分成了三部分:训练集、验证集和测试集。对于训练集,你需要变换数据,例如随机缩放、剪裁和翻转。这样有助于网络泛化,并带来更好的效果。你还需要确保将输入数据的大小调整为 224x224 像素,因为预训练的网络需要这么做。
#
# 验证集和测试集用于衡量模型对尚未见过的数据的预测效果。对此步骤,你不需要进行任何缩放或旋转变换,但是需要将图像剪裁到合适的大小。
#
# 对于所有三个数据集,你都需要将均值和标准差标准化到网络期望的结果。均值为 `[0.485, 0.456, 0.406]`,标准差为 `[0.229, 0.224, 0.225]`。这样使得每个颜色通道的值位于 -1 到 1 之间,而不是 0 到 1 之间。
# In[14]:
train_dir = 'train'
valid_dir = 'valid'
test_dir = 'test'
# In[15]:
# TODO: Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(0.25),
transforms.RandomRotation(30),
transforms.ToTensor(),
transforms.Normalize( (0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
valid_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize( (0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# TODO: Load the datasets with ImageFolder
train_datasets = datasets.ImageFolder('flowers' + '/'+ train_dir, transform=train_transforms)
valid_datasets = datasets.ImageFolder('flowers' + '/'+ valid_dir, transform=valid_transforms)
test_datasets = datasets.ImageFolder('flowers/' + '/'+ test_dir, transform=train_transforms)
# TODO: Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=64, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_datasets, batch_size=32, shuffle=True)
testloader = torch.utils.data.DataLoader(test_datasets, batch_size=32, shuffle=True)
# ### 标签映射
#
# 你还需要加载从类别标签到类别名称的映射。你可以在文件 `cat_to_name.json` 中找到此映射。它是一个 JSON 对象,可以使用 [`json` 模块](https://docs.python.org/2/library/json.html)读取它。这样可以获得一个从整数编码的类别到实际花卉名称的映射字典。
# In[16]:
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# # 构建和训练分类器
#
# 数据准备好后,就开始构建和训练分类器了。和往常一样,你应该使用 `torchvision.models` 中的某个预训练模型获取图像特征。使用这些特征构建和训练新的前馈分类器。
#
# 这部分将由你来完成。如果你想与他人讨论这部分,欢迎与你的同学讨论!你还可以在论坛上提问或在工作时间内咨询我们的课程经理和助教导师。
#
# 请参阅[审阅标准](https://review.udacity.com/#!/rubrics/1663/view),了解如何成功地完成此部分。你需要执行以下操作:
#
# * 加载[预训练的网络](http://pytorch.org/docs/master/torchvision/models.html)(如果你需要一个起点,推荐使用 VGG 网络,它简单易用)
# * 使用 ReLU 激活函数和丢弃定义新的未训练前馈网络作为分类器
# * 使用反向传播训练分类器层,并使用预训练的网络获取特征
# * 跟踪验证集的损失和准确率,以确定最佳超参数
#
# 我们在下面为你留了一个空的单元格,但是你可以使用多个单元格。建议将问题拆分为更小的部分,并单独运行。检查确保每部分都达到预期效果,然后再完成下个部分。你可能会发现,当你实现每部分时,可能需要回去修改之前的代码,这很正常!
#
# 训练时,确保仅更新前馈网络的权重。如果一切构建正确的话,验证准确率应该能够超过 70%。确保尝试不同的超参数(学习速率、分类器中的单元、周期等),寻找最佳模型。保存这些超参数并用作项目下个部分的默认值。
# In[17]:
# TODO: Build and train your network
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model = models.densenet121(pretrained = True)
model.name = 'densenet121'
# In[18]:
for param in model.parameters():
param.requires_grad= False
# hyperparameters for classifier
input_size = [each.in_features for each in model.classifier.modules() if type(each) == torch.nn.modules.linear.Linear][0]
hidden_layers = [512]
output_size = 102
drop_p = 0.5
epochs = 30
# Adding own classifier
class classifier(nn.Module):
def __init__(self, input_size, output_size, hidden_layers, drop_p):
super().__init__()
self.hidden_layers = nn.ModuleList([nn.Linear(input_size, hidden_layers[0])])
if len(hidden_layers) > 1:
layers = zip(hidden_layers[:-1], hidden_layers[1:])
self.hidden_layers.extend([nn.Linear(h1, h2) for h1, h2 in layers])
self.dropout = nn.Dropout(p=drop_p)
self.output = nn.Linear(hidden_layers[-1], output_size)
def forward(self, x):
for linear in self.hidden_layers:
x = F.relu(linear(x))
x = self.dropout(x)
x = self.output(x)
return F.log_softmax(x, dim=1)
model.classifier = classifier(input_size, output_size, hidden_layers, drop_p)
model.to(device)
# In[19]:
# validation test
def validation(model, criterion, validloader):
loss = 0
accuracy = 0
for images, targets in iter(validloader):
images, targets = images.to(device), targets.to(device)
output = model.forward(images)
loss += criterion(output, targets).item()
ps = torch.exp(output)
equality = (targets.data == ps.max(dim=1)[1])
accuracy += equality.type(torch.FloatTensor).mean()
return loss, accuracy
# In[20]:
learning_rate = 0.001
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
# In[21]:
def train(model, trainloader, validloader, criterion, optimizer, print_step=32, epochs=5):
# training
steps = 0
running_loss = 0
print_step = 32
# validation result variables
vloss = 0
vaccuracy = 0
for e in range(epochs):
for images, labels in iter(trainloader):
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
output = model.forward(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps%print_step == 0:
model.eval()
with torch.no_grad():
vloss, vaccuracy = validation(model, criterion, validloader)
print('Epoch: {}/{}\t'.format(e+1, epochs),
'Train Loss: {:.3f}\t'.format(running_loss/print_step),
'Valid Loss: {:.3f}\t'.format(vloss/len(validloader)),
'Valid Accuracy: {:.3f}'.format(vaccuracy/len(validloader)*100))
running_loss = 0
model.train()
# In[22]:
train(model.to(device), trainloader, testloader, criterion, optimizer, print_step=32, epochs=5)
# ## 测试网络
#
# 建议使用网络在训练或验证过程中从未见过的测试数据测试训练的网络。这样,可以很好地判断模型预测全新图像的效果。用网络预测测试图像,并测量准确率,就像验证过程一样。如果模型训练良好的话,你应该能够达到大约 70% 的准确率。
# In[ ]:
# TODO: Do validation on the test set
def test(model, criterion, testloader):
model.eval()
with torch.no_grad():
tloss, taccuracy = validation(model, criterion, testloader)
print('Test Accuracy: {:.3f}'.format(taccuracy/len(testloader)*100))
model.train()
# In[24]:
test(model.to(device), criterion, testloader)
# ## 保存检查点
#
# 训练好网络后,保存模型,以便稍后加载它并进行预测。你可能还需要保存其他内容,例如从类别到索引的映射,索引是从某个图像数据集中获取的:`image_datasets['train'].class_to_idx`。你可以将其作为属性附加到模型上,这样稍后推理会更轻松。
# In[28]:
#注意,稍后你需要完全重新构建模型,以便用模型进行推理。确保在检查点中包含你所需的任何信息。如果你想加载模型并继续训练,则需要保存周期数量和优化器状态 `optimizer.state_dict`。你可能需要在下面的下个部分使用训练的模型,因此建议立即保存它。
# TODO: Save the checkpoint
model.class_to_idx = train_datasets.class_to_idx
checkpoint = {
'input_size': input_size,
'output_size': output_size,
'hidden_layer_size': [each.out_features for each in model.classifier.hidden_layers],
'model_state_dict': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
'class_to_idx': model.class_to_idx,
'drop_p': drop_p,
'learning_rate': learning_rate,
'epochs': epochs,
'model': model.name
}
torch.save(checkpoint,
'checkpoint_{}_{}.pth'.format(
"_".join([str(each.out_features) for each in model.classifier.hidden_layers]), checkpoint['model']))
# ## 加载检查点
#
# 此刻,建议写一个可以加载检查点并重新构建模型的函数。这样的话,你可以回到此项目并继续完善它,而不用重新训练网络。
# In[29]:
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = getattr(models, checkpoint['model'])(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.classifier = classifier(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layer_size'],
checkpoint['drop_p'])
model.load_state_dict(checkpoint['model_state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
optimizer = optim.Adam(model.classifier.parameters(), lr=checkpoint['learning_rate'])
optimizer.load_state_dict(checkpoint['optimizer_state'])
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
return model, optimizer
# # 类别推理
#
# 现在,你需要写一个使用训练的网络进行推理的函数。即你将向网络中传入一个图像,并预测图像中的花卉类别。写一个叫做 `predict` 的函数,该函数会接受图像和模型,然后返回概率在前 $K$ 的类别及其概率。应该如下所示:
# In[27]:
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
# 首先,你需要处理输入图像,使其可以用于你的网络。
#
# ## 图像处理
#
# 你需要使用 `PIL` 加载图像([文档](https://pillow.readthedocs.io/en/latest/reference/Image.html))。建议写一个函数来处理图像,使图像可以作为模型的输入。该函数应该按照训练的相同方式处理图像。
#
# 首先,调整图像大小,使最小的边为 256 像素,并保持宽高比。为此,可以使用 [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) 或 [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) 方法。然后,你需要从图像的中心裁剪出 224x224 的部分。
#
# 图像的颜色通道通常编码为整数 0-255,但是该模型要求值为浮点数 0-1。你需要变换值。使用 Numpy 数组最简单,你可以从 PIL 图像中获取,例如 `np_image = np.array(pil_image)`。
#
# 和之前一样,网络要求图像按照特定的方式标准化。均值应标准化为 `[0.485, 0.456, 0.406]`,标准差应标准化为 `[0.229, 0.224, 0.225]`。你需要用每个颜色通道减去均值,然后除以标准差。
#
# 最后,PyTorch 要求颜色通道为第一个维度,但是在 PIL 图像和 Numpy 数组中是第三个维度。你可以使用 [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html)对维度重新排序。颜色通道必须是第一个维度,并保持另外两个维度的顺序。
# In[30]:
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
img = Image.open(image)
tsize = (256, 256)
img.thumbnail(tsize)
lwsize = (img.size[0] - 224)/2
thsize = (img.size[1] - 224)/2
rwsize = (img.size[0] + 224)/2
bhsize = (img.size[1] + 224)/2
img = img.crop((lwsize, thsize, rwsize, bhsize))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
npimg = np.array(img)
npimg = npimg/255
npimg = (npimg - mean)/std
npimg = npimg.transpose((2,0,1))
return torch.from_numpy(npimg)
# 要检查你的项目,可以使用以下函数来转换 PyTorch 张量并将其显示在 notebook 中。如果 `process_image` 函数可行,用该函数运行输出应该会返回原始图像(但是剪裁掉的部分除外)。
# In[31]:
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
# ## 类别预测
#
# 可以获得格式正确的图像后
#
# 要获得前 $K$ 个值,在张量中使用 [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk)。该函数会返回前 `k` 个概率和对应的类别索引。你需要使用 `class_to_idx`(希望你将其添加到了模型中)将这些索引转换为实际类别标签,或者从用来加载数据的[ `ImageFolder`](https://pytorch.org/docs/master/torchvision/datasets.html?highlight=imagefolder#torchvision.datasets.ImageFolder)进行转换。确保颠倒字典
#
# 同样,此方法应该接受图像路径和模型检查点,并返回概率和类别。
# In[32]:
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
# In[33]:
def predict(image_path, model, topk=5):
# TODO: Implement the code to predict the class from an image file
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to(device)
model.eval()
imaget = process_image(image_path)
imaget.to(device)
imaget = imaget.unsqueeze(0)
image = imaget.type(torch.cuda.FloatTensor)
output = model.forward(image)
ps = torch.exp(output)
model.train()
return ps.topk(topk)
# ## 检查运行状况
#
# 你已经可以使用训练的模型做出预测,现在检查模型的性能如何。即使测试准确率很高,始终有必要检查是否存在明显的错误。使用 `matplotlib` 将前 5 个类别的概率以及输入图像绘制为条形图,应该如下所示:
#
# <img src='assets/inference_example.png' width=300px>
#
# 你可以使用 `cat_to_name.json` 文件(应该之前已经在 notebook 中加载该文件)将类别整数编码转换为实际花卉名称。要将 PyTorch 张量显示为图像,请使用定义如下的 `imshow` 函数。
# In[35]:
# TODO: Display an image along with the top 5 classes
import seaborn as sb
image_path = 'flowers/test/28/image_05230.jpg'
probs, labels = predict(image_path, model)
ps = [x for x in probs.cpu().detach().numpy()[0]]
npar = [x for x in labels.cpu().numpy()[0]]
names = list()
inv_mapping = {v: k for k, v in model.class_to_idx.items()}
for i in npar:
names.append(cat_to_name[str(inv_mapping[i])])
imshow(process_image(image_path), ax=plt.subplot(2,1,1));
plt.title(cat_to_name['28'])
plt.subplot(2,1,2)
sb.barplot(y=names, x=ps, color=sb.color_palette()[0]);
# In[ ]:
# In[ ]:
# In[ ]:
|
# -*- coding: utf-8 -*-
import time
import numpy as np
import scipy.integrate
import matplotlib.pyplot as plt
from pyequion2 import InterfaceSystem
from pyequion2 import water_properties
def reynolds_number(flow_velocity, pipe_diameter, TK=298.15): #Dimensionless
kinematic_viscosity = water_properties.water_kinematic_viscosity(TK)
return flow_velocity*pipe_diameter/kinematic_viscosity
def darcy_friction_factor(flow_velocity, pipe_diameter, TK=298.15):
reynolds = reynolds_number(flow_velocity, pipe_diameter, TK)
if reynolds < 2300:
return 64/reynolds
else: #Blasius
return 0.316*reynolds**(-1./4)
def shear_velocity(flow_velocity, pipe_diameter, TK=298.15):
f = darcy_friction_factor(flow_velocity, pipe_diameter, TK)
return np.sqrt(f/8.0)*flow_velocity
elements = ['Ca', 'C', 'Mg']
intsys = InterfaceSystem(elements, from_elements=True)
intsys.set_interface_phases(['Calcite', 'Dolomite'])
TK = 298.15
pipe_diameter = 0.01 #m
flow_velocity = 1.0
pipe_length = 80.0 #m
pipe_time = pipe_length/flow_velocity
co2_flash_value = 0.001
initial_ca_value = 0.02
initial_mg_value = 0.01
transport_params = {'type': 'pipe',
'shear_velocity': shear_velocity(flow_velocity, pipe_diameter, TK)}
solution_stats = {'res': None, 'x': 'default'}
solution_stats_int = {'res': None, 'x': 'default'}
def f(t, y):
global solution_stats
global solution_stats_int
molal_balance = {'Ca': y[0], 'Mg': y[1], 'CO2': co2_flash_value}
solution, solution_stats = intsys.solve_equilibrium_mixed_balance(TK,
molal_balance=molal_balance,
tol=1e-6,
initial_guess=solution_stats['x'])
molals_bulk = solution.solute_molals
solution_int, solution_stats_int = intsys.solve_interface_equilibrium(TK,
molals_bulk,
transport_params,
tol=1e-6,
initial_guess=solution_stats_int['x'])
elements_reaction_fluxes = solution_int.elements_reaction_fluxes
wall_scale = 4/(pipe_diameter*water_properties.water_density(TK))
dy = -wall_scale*np.array(
[elements_reaction_fluxes['Ca'], elements_reaction_fluxes['Mg']])
return dy
initial_vector = np.array([initial_ca_value, initial_mg_value])
start_time = time.time()
sol = scipy.integrate.solve_ivp(f, (0.0, pipe_time), initial_vector,
t_eval = np.linspace(0.0, pipe_time, 101))
elapsed_time = time.time() - start_time
plt.plot(sol.t, sol.y[0], label='Ca')
plt.plot(sol.t, sol.y[1], label='Mg')
plt.xlabel('t')
plt.ylabel('c')
plt.legend()
plt.show()
|
# -*- coding: utf-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Joe Budden TV
# Addon id: plugin.audio.jbuddentv
# Addon Provider: MuadDib
#######################################################################
#Import Modules Section
import urllib2
from glo_var import *
#######################################################################
#######################################################################
# Handles getting contents of the menu file and returning them
def openMenuFile(menuFile):
req = urllib2.Request(menuFile)
req.add_header('User-Agent', USER_AGENT)
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
####################################################################### |
import os
from img_classify import *
try:
from PIL import Image
import discord
from discord.ext import commands
from dotenv import load_dotenv
except:
os.system("pip3 install discord.py")
os.system("pip3 install python-dotenv")
from discord.ext import commands
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
bot = commands.Bot(command_prefix='!')
@bot.listen('on_ready')
async def ready_function():
print("Connected")
@bot.command(name='test')
async def test_function(ctx):
await ctx.send("Testing")
pic_ext = ['.jpg', '.png', '.jpeg']
async def classify_and_detect(message, addr, top_image_addr):
# val = classify(addr)
det_boxes, non_nude = detect(addr, top_image_addr)
channel = message.channel
non_nude.save("./images/non_nude.png")
if len(det_boxes) > 0:
await message.delete()
result = "According to that image, you are horny. I have deleted that image for Zeeshan's protection."
await channel.send(result)
await channel.send(file=discord.File('./images/non_nude.png'))
# await channel.send(f"Val: `{val}`")
# await channel.send(f"```\n{detection_result}\n```")
@bot.listen('on_message')
async def message_function(message):
if message.author.bot:
return
print(f"{message.author.name} sent a message")
if message.attachments != []:
for attachment in message.attachments:
for ext in pic_ext:
if attachment.url.endswith(ext):
save_img(attachment.url, ext)
await classify_and_detect(message, f"./images/test_image{ext}", "./images/pumpkin.png")
else:
for ext in pic_ext:
if message.content.endswith(ext) and message.content.startswith('https://'):
print(message.content)
save_img(message.content, ext)
await classify_and_detect(message, f"./images/test_image{ext}", "./images/pumpkin.png")
bot.run(TOKEN)
|
#!/usr/bin/env python
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torchsummary import summary
import sys
sys.path.append('../')
from wideresnet import Wide_ResNet
'''
Function that loads the dataset and returns the data-loaders
'''
def getData(batch_size,test_batch_size,val_percentage):
# Normalize the training set with data augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
torchvision.transforms.RandomRotation(20),
torchvision.transforms.ColorJitter(brightness=0.03, contrast=0.03, saturation=0.03, hue=0.03),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# Normalize the test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# Download/Load data
full_training_data = torchvision.datasets.CIFAR10('./data',train = True,transform=transform_train,download=True)
test_data = torchvision.datasets.CIFAR10('./data',train = False,transform=transform_test,download=True)
# Create train and validation splits
num_samples = len(full_training_data)
training_samples = int((1-val_percentage)*num_samples+1)
validation_samples = num_samples - training_samples
training_data, validation_data = torch.utils.data.random_split(full_training_data, [training_samples, validation_samples])
# Initialize dataloaders
train_loader = torch.utils.data.DataLoader(training_data,batch_size=batch_size,shuffle=True, num_workers=2)
val_loader = torch.utils.data.DataLoader(validation_data,batch_size=batch_size,shuffle=False, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_data,batch_size=test_batch_size,shuffle=False, num_workers=2)
return train_loader, val_loader, test_loader
'''
Function to test that returns the loss per sample and the total accuracy
'''
def test(data_loader,net,cost_fun,device):
net.eval()
samples = 0.
cumulative_loss = 0.
cumulative_accuracy = 0.
for batch_idx, (inputs,targets) in enumerate(data_loader):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = net(inputs)[0]
loss = cost_fun(outputs,targets)
# Metrics computation
samples+=inputs.shape[0]
cumulative_loss += loss.item()
_, predicted = outputs.max(1)
cumulative_accuracy += predicted.eq(targets).sum().item()
return cumulative_loss/samples, cumulative_accuracy/samples*100
'''
Function to train the nework on the data for one epoch that returns the loss per sample and the total accuracy
'''
def train(data_loader,net,cost_fun,device,optimizer):
net.train()
samples = 0.
cumulative_loss = 0.
cumulative_accuracy = 0.
for batch_idx, (inputs,targets) in enumerate(data_loader):
inputs = inputs.to(device)
targets = targets.to(device)
outputs = net(inputs)[0]
loss = cost_fun(outputs,targets)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Metrics computation
samples+=inputs.shape[0]
cumulative_loss += loss.item()
_, predicted = outputs.max(1)
cumulative_accuracy += predicted.eq(targets).sum().item()
return cumulative_loss/samples, cumulative_accuracy/samples*100
def main(epochs, batch_size, test_batch_size,val_percentage,lr,test_freq,device,save_filename, res_depth, res_width):
print('Architecture: WRN-' + str(res_depth) + '-' + str(res_width))
print('Epochs: ' + str(epochs) + ' batch_size: ' + str(batch_size) + ' test_batch_size: ' + str(test_batch_size))
print('Save and test frequency ' + str(test_freq) + ' model filename: ' + str(save_filename))
print('LR: ' + str(lr) + ' momentum: ' + str(0.9) + ' weight decay: ' + str(5e-4))
print('LR Scheduler: gamma= ' + str(0.2) + ' steps: [' + str(int(epochs*0.3)) + ',' + str(int(epochs*0.6)) + str(int(epochs*0.8)) )
print('data augmentation: random crop 32, padding 4, random horizontal flip, random rotation 20,ColorJitter(brightness=0.03, contrast=0.03, saturation=0.03, hue=0.03')
# Define cost function
cost_function = torch.nn.CrossEntropyLoss()
# Create the network: Wide_ResNet(depth, width, dropout, num_classes)
net = Wide_ResNet(res_depth,res_width,0,10)
net = net.to(device)
#summary(net,input_size=(3,32,32))
# Create the optimizer anche the learning rate scheduler
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[int(epochs*0.3),int(epochs*0.6),int(epochs*0.8)], gamma=0.20)
# Get the data
train_loader, val_loader, test_loader = getData(batch_size,test_batch_size,val_percentage)
for e in range(epochs):
net.train()
train_loss, train_accuracy = train(train_loader,net,cost_function,device,optimizer)
val_loss, val_accuracy = test(val_loader,net,cost_function,device)
scheduler.step()
print('Epoch: {:d}:'.format(e+1))
print('\t Training loss: \t {:.6f}, \t Training accuracy \t {:.2f}'.format(train_loss, train_accuracy))
print('\t Validation loss: \t {:.6f},\t Validation accuracy \t {:.2f}'.format(val_loss, val_accuracy))
if((e) % test_freq) == 0:
test_loss, test_accuracy = test(test_loader,net,cost_function,device)
print('Test loss: \t {:.6f}, \t \t Test accuracy \t {:.2f}'.format(test_loss, test_accuracy))
torch.save(net.state_dict(), save_filename)
print('After training:')
train_loss, train_accuracy = test(train_loader,net,cost_function,device)
val_loss, val_accuracy = test(val_loader,net,cost_function,device)
test_loss, test_accuracy = test(test_loader,net,cost_function,device)
print('\t Training loss: \t {:.6f}, \t Training accuracy \t {:.2f}'.format(train_loss, train_accuracy))
print('\t Validation loss: \t {:.6f},\t Validation accuracy \t {:.2f}'.format(val_loss, val_accuracy))
print('Test loss: \t {:.6f}, \t \t Test accuracy \t {:.2f}'.format(test_loss, test_accuracy))
torch.save(net.state_dict(), save_filename)
net2 = Wide_ResNet(16,2,0,10)
net2 = net.to(device)
net2.load_state_dict(torch.load(save_filename))
print('loaded net test:')
test_loss, test_accuracy = test(test_loader,net2,cost_function,device)
print('\t Test loss: \t {:.6f}, \t Test accuracy \t {:.2f}'.format(test_loss, test_accuracy))
# Parameters
epochs = 352
batch_size = 128
test_batch_size = 128
val_percentage = 0.01
lr = 0.1
test_freq = 20
device = 'cuda:0'
save_filename = './teacher-40-1.pth'
res_depth = 40
res_width = 1
# Call the main
main(epochs, batch_size, test_batch_size,val_percentage,lr,test_freq,device,save_filename, res_depth, res_width)
|
# -*- coding: utf-8 -*-
"""
Created on 2019/7/17
File run.py
@author:ZhengYuwei
"""
import os
import logging
import numpy as np
import tensorflow as tf
from tensorflow import keras
from logging.handlers import RotatingFileHandler
from multi_label.trainer import MultiLabelClassifier
from configs import FLAGS
if FLAGS.mode == 'test':
tf.enable_eager_execution()
if FLAGS.mode in ('train', 'debug'):
keras.backend.set_learning_phase(True)
else:
keras.backend.set_learning_phase(False)
np.random.seed(6)
tf.set_random_seed(800)
def generate_logger(filename, **log_params):
"""
生成日志记录对象记录日志
:param filename: 日志文件名称
:param log_params: 日志参数
:return:
"""
level = log_params.setdefault('level', logging.INFO)
logger = logging.getLogger()
logger.setLevel(level=level)
formatter = logging.Formatter('%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s')
# 定义一个RotatingFileHandler,最多备份3个日志文件,每个日志文件最大1M
file_handler = RotatingFileHandler(filename, maxBytes=1 * 1024 * 1024, backupCount=3)
file_handler.setFormatter(formatter)
# 控制台输出
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(file_handler)
logger.addHandler(console)
def run():
# gpu模式
if FLAGS.gpu_mode != MultiLabelClassifier.CPU_MODE:
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.visible_gpu
# tf.device('/gpu:{}'.format(FLAGS.gpu_device))
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # 按需
sess = tf.Session(config=config)
"""
# 添加debug:nan或inf过滤器
from tensorflow.python import debug as tf_debug
from tensorflow.python.debug.lib.debug_data import InconvertibleTensorProto
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# nan过滤器
def has_nan(datum, tensor):
_ = datum # Datum metadata is unused in this predicate.
if isinstance(tensor, InconvertibleTensorProto):
# Uninitialized tensor doesn't have bad numerical values.
# Also return False for data types that cannot be represented as numpy
# arrays.
return False
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
return np.any(np.isnan(tensor))
else:
return False
# inf过滤器
def has_inf(datum, tensor):
_ = datum # Datum metadata is unused in this predicate.
if isinstance(tensor, InconvertibleTensorProto):
# Uninitialized tensor doesn't have bad numerical values.
# Also return False for data types that cannot be represented as numpy
# arrays.
return False
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complex) or
np.issubdtype(tensor.dtype, np.integer)):
return np.any(np.isinf(tensor))
else:
return False
# 添加过滤器
sess.add_tensor_filter("has_nan", has_nan)
sess.add_tensor_filter("has_inf", has_inf)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
"""
keras.backend.set_session(sess)
generate_logger(filename=FLAGS.log_path)
logging.info('TensorFlow version: %s', tf.__version__) # 1.13.1
logging.info('Keras version: %s', keras.__version__) # 2.2.4-tf
classifier = MultiLabelClassifier()
# 模型训练
if FLAGS.mode == 'train':
train_dataset = classifier.prepare_data(FLAGS.train_label_path, FLAGS.train_set_dir, FLAGS.is_augment)
classifier.train(train_dataset, None)
logging.info('训练完毕!')
# 用于测试,
elif FLAGS.mode == 'test':
# 测试用单GPU测试,若是多GPU模型,需要先转为单GPU模型,然后再执行测试
if FLAGS.gpu_num > 1:
classifier.convert_multi2single()
logging.info('多GPU训练模型转换单GPU运行模型成功,请使用单GPU测试!')
return
total_test, wrong_count, great_total_count, great_wrong_count, great_wrong_records = test_model(classifier)
logging.info('预测总数:%d\t 错误数:%d', total_test, wrong_count)
logging.info('大于置信度总数:%d\t 错误数:%d\t 准确率:%f', great_total_count, great_wrong_count,
1 - great_wrong_count/(great_total_count + 1e-7))
# logging.info('错误路径是:\n%s', great_wrong_records)
logging.info('测试完毕!')
# 用于调试,查看训练的模型中每一层的输出/梯度
elif FLAGS.mode == 'debug':
import cv2
train_dataset = classifier.prepare_data(FLAGS.train_label_path, FLAGS.train_set_dir, FLAGS.is_augment)
get_trainable_layers = classifier.get_trainable_layers_func()
for images, labels in train_dataset:
cv2.imshow('a', np.array(images[0]))
cv2.waitKey(1)
outputs = get_trainable_layers(images) # 每一个可训练层的输出
gradients = classifier.get_gradients(images, labels) # 每一个可训练层的参数梯度
assert outputs is not None
assert gradients is not None
logging.info("=============== debug ================")
# 将模型保存为pb模型
elif FLAGS.mode == 'save_pb':
# 保存模型记得注释eager execution
classifier.save_mobile()
# 将模型保存为服务器pb模型
elif FLAGS.mode == 'save_serving':
# 保存模型记得注释eager execution
classifier.save_serving()
else:
raise ValueError('Mode Error!')
def test_model(classifier):
""" 模型测试
:param classifier: 训练完毕的多标签分类模型
:return: 总测试样本数, 总错误样本数,大于置信度的总样本数, 大于置信度的错误样本数, 错误样本路径记录
"""
# import cv2
# 测试集包含(image, labels, image_path)
test_set = classifier.prepare_data(FLAGS.test_label_path, FLAGS.test_set_dir, is_augment=False, is_test=True)
base_conf = FLAGS.base_confidence # 置信度基线
# 实际标签,预测标签,预测概率(label数,验证样本数)
total_test = int(np.ceil(FLAGS.val_set_size / FLAGS.batch_size) * FLAGS.batch_size)
truth = np.zeros(shape=(len(FLAGS.output_shapes), total_test))
pred = np.zeros(shape=(len(FLAGS.output_shapes), total_test))
prob = np.zeros(shape=(len(FLAGS.output_shapes), total_test))
start_index, end_index = 0, FLAGS.batch_size
great_wrong_records = list() # 大于置信度的错误路径集合
for images, labels, paths in test_set:
great_wrong_records = np.concatenate((great_wrong_records, np.array(paths)), axis=0)
truth[:, start_index:end_index] = np.array(labels)
results = classifier.predict(np.array(images))
pred[:, start_index:end_index] = np.array([np.argmax(result, axis=-1) for result in results])
prob[:, start_index:end_index] = np.array([np.max(result, axis=-1) for result in results])
start_index, end_index = end_index, end_index + FLAGS.batch_size
logging.info('finish: %d/%d', start_index, total_test)
if start_index >= total_test:
break
# 比较truth和pred,prob和base conf,以统计评价指标
valid_mask = (truth != -1) # 有效的待预测位置标记(无效标签/未知类别的在label里真实标签为-1)
wrong_mask = abs(pred - truth) > 0.5 # 预测错误的位置标记
great_conf_mask = (prob >= base_conf) # 预测置信度大于基线的位置标记
wrong_result = np.any(valid_mask & wrong_mask, axis=0)
great_conf_result = np.all(~valid_mask | great_conf_mask, axis=0)
# 总错误数,大于置信度错误数,总大于置信度样本数
wrong_count = np.sum(wrong_result)
great_total_count = np.sum(great_conf_result)
great_wrong_count = np.sum(wrong_result & great_conf_result)
# 记录大于置信度的预测错误标签
if np.any(wrong_result & great_conf_result):
great_wrong_records = [u.decode() for u in great_wrong_records[wrong_result & great_conf_result]]
# plot_confusion_matrix(truth, pred)
return total_test, wrong_count, great_total_count, great_wrong_count, great_wrong_records
def plot_confusion_matrix(y_trues, y_preds):
from utils import draw_tools
for i in range(y_trues.shape[0]):
valid_mask = (y_trues[i] != -1)
draw_tools.plot_confusion_matrix(y_trues[i][valid_mask], y_preds[i][valid_mask],
['cls_{}'.format(i) for i in range(FLAGS.output_shapes[i])],
FLAGS.output_names[i], is_save=True)
return
if __name__ == '__main__':
run()
|
import sys
from os.path import dirname,abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from unittest import TestCase
from unittest import main
from extensions.Util import Util
class TestUtilConfig(TestCase):
def test_getConfigbrowser(self):
result = Util.getConfig("browser")
expected = "phantomjs"
self.assertEquals(expected,result)
def test_getConfighttpport(self):
result = Util.getConfig("http-port")
expected = 9000
self.assertEquals(expected,result)
def test_getConfigtimeout(self):
result = Util.getConfig("timeout")
expected = 7
self.assertEquals(expected,result)
def test_getBouncer(self):
result = Util.getBouncer()
expected = "http://localhost:9000/bouncer"
self.assertEquals(expected,result)
def test_getLogger(self):
result = Util.getLogger()
expected = "http://localhost:9000/logger"
self.assertEquals(expected,result)
if __name__ == '__main__':
main()
|
from flask import Flask, render_template, request
import pickle
import numpy as np
app = Flask(__name__)
model = pickle.load(open("C:/Users/Omneya Essam/Desktop/tutorial/model.pkl","rb"))
vectorizer = pickle.load(open("C:/Users/Omneya Essam/Desktop/tutorial/Vectorizer.pkl","rb"))
@app.route('/<string:job>',methods=['GET'])
# takes job as parameters, transforms into features
# predicts based on features and then return prediction
def returnJob(job):
X = vectorizer.transform([job])
pred = model.predict(X)
#y = le.inverse_transform(model.predict(X))
return {'Industry: ': pred[0]}
if __name__ == '__main__':
app.run(debug=True)
|
# Generated by Django 3.1 on 2020-09-02 20:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shopping', '0029_auto_20200830_1157'),
('order', '0012_auto_20200824_2040'),
]
operations = [
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='shopping.product'),
),
]
|
import warnings
import astropy.units as u
import numpy as np
from einsteinpy import constant
from einsteinpy.coordinates import BoyerLindquistDifferential, CartesianDifferential
from einsteinpy.integrators import RK45
from einsteinpy.utils import kerr_utils, schwarzschild_radius
_G = constant.G.value
_c = constant.c.value
class Kerr:
"""
Class for defining Kerr Goemetry Methdos
"""
@u.quantity_input(time=u.s, M=u.kg)
def __init__(self, bl_coords, M, time):
self.input_coords = bl_coords
self.M = M
self.a = self.input_coords.a.to(u.m)
self.time = time
pos_vec, vel_vec = (
self.input_coords.si_values()[:3],
self.input_coords.si_values()[3:],
)
time_vel = kerr_utils.kerr_time_velocity(pos_vec, vel_vec, self.M, self.a.value)
self.initial_vec = np.hstack(
(self.time.value, pos_vec, time_vel.value, vel_vec)
)
self.scr = schwarzschild_radius(M)
@classmethod
@u.quantity_input(time=u.s, M=u.kg, a=u.m)
def from_coords(cls, coords, M, q=None, Q=None, time=0 * u.s, a=0 * u.m):
"""
Constructor
Parameters
----------
coords : ~einsteinpy.coordinates.velocity.CartesianDifferential
Object having both initial positions and velocities of particle in Cartesian Coordinates
M : ~astropy.units.quantity.Quantity
Mass of the body
a : ~astropy.units.quantity.Quantity
Spin factor of the massive body. Angular momentum divided by mass divided by speed of light.
time : ~astropy.units.quantity.Quantity
Time of start, defaults to 0 seconds.
"""
if coords.system == "Cartesian":
bl_coords = coords.bl_differential(a)
return cls(bl_coords, M, time)
if coords.system == "Spherical":
bl_coords = coords.bl_differential(a)
return cls(bl_coords, M, time)
return cls(coords, M, time)
def f_vec(self, ld, vec):
chl = kerr_utils.christoffels(vec[1], vec[2], self.M.value, self.a.value)
vals = np.zeros(shape=(8,), dtype=float)
for i in range(4):
vals[i] = vec[i + 4]
vals[4] = -2.0 * (
chl[0, 0, 1] * vec[4] * vec[5]
+ chl[0, 0, 2] * vec[4] * vec[6]
+ chl[0, 1, 3] * vec[5] * vec[7]
+ chl[0, 2, 3] * vec[6] * vec[7]
)
vals[5] = -1.0 * (
chl[1, 0, 0] * vec[4] * vec[4]
+ 2 * chl[1, 0, 3] * vec[4] * vec[7]
+ chl[1, 1, 1] * vec[5] * vec[5]
+ 2 * chl[1, 1, 2] * vec[5] * vec[6]
+ chl[1, 2, 2] * vec[6] * vec[6]
+ chl[1, 3, 3] * vec[7] * vec[7]
)
vals[6] = -1.0 * (
chl[2, 0, 0] * vec[4] * vec[4]
+ 2 * chl[2, 0, 3] * vec[4] * vec[7]
+ chl[2, 1, 1] * vec[5] * vec[5]
+ 2 * chl[2, 1, 2] * vec[5] * vec[6]
+ chl[2, 2, 2] * vec[6] * vec[6]
+ chl[2, 3, 3] * vec[7] * vec[7]
)
vals[7] = -2.0 * (
chl[3, 0, 1] * vec[4] * vec[5]
+ chl[3, 0, 2] * vec[4] * vec[6]
+ chl[3, 1, 3] * vec[5] * vec[7]
+ chl[3, 2, 3] * vec[6] * vec[7]
)
return vals
def calculate_trajectory(
self,
start_lambda=0.0,
end_lambda=10.0,
stop_on_singularity=True,
OdeMethodKwargs={"stepsize": 1e-3},
return_cartesian=False,
):
"""
Calculate trajectory in manifold according to geodesic equation
Parameters
----------
start_lambda : float
Starting lambda(proper time), defaults to 0, (lambda ~= t)
end_lamdba : float
Lambda(proper time) where iteartions will stop, defaults to 100000
stop_on_singularity : bool
Whether to stop further computation on reaching singularity, defaults to True
OdeMethodKwargs : dict
Kwargs to be supplied to the ODESolver, defaults to {'stepsize': 1e-3}
Dictionary with key 'stepsize' along with an float value is expected.
return_cartesian : bool
True if coordinates and velocities are required in cartesian coordinates(SI units), defaults to False
Returns
-------
~numpy.ndarray
N-element array containing proper time.
~numpy.ndarray
(n,8) shape array of [t, x1, x2, x3, velocity_of_time, v1, v2, v3] for each proper time(lambda).
"""
vecs = list()
lambdas = list()
crossed_event_horizon = False
ODE = RK45(
fun=self.f_vec,
t0=start_lambda,
y0=self.initial_vec,
t_bound=end_lambda,
**OdeMethodKwargs
)
_event_hor = kerr_utils.event_horizon(self.M.value, self.a.value)[0] * 1.001
while ODE.t < end_lambda:
vecs.append(ODE.y)
lambdas.append(ODE.t)
ODE.step()
if (not crossed_event_horizon) and (ODE.y[1] <= _event_hor):
warnings.warn("particle reached event horizon. ", RuntimeWarning)
if stop_on_singularity:
break
else:
crossed_event_horizon = True
vecs, lambdas = np.array(vecs), np.array(lambdas)
if not return_cartesian:
return lambdas, vecs
else:
cart_vecs = list()
for v in vecs:
si_vals = (
BoyerLindquistDifferential(
v[1] * u.m,
v[2] * u.rad,
v[3] * u.rad,
v[5] * u.m / u.s,
v[6] * u.rad / u.s,
v[7] * u.rad / u.s,
self.a,
)
.cartesian_differential()
.si_values()
)
cart_vecs.append(np.hstack((v[0], si_vals[:3], v[4], si_vals[3:])))
return lambdas, np.array(cart_vecs)
def calculate_trajectory_iterator(
self,
start_lambda=0.0,
stop_on_singularity=True,
OdeMethodKwargs={"stepsize": 1e-3},
return_cartesian=False,
):
"""
Calculate trajectory in manifold according to geodesic equation.
Yields an iterator.
Parameters
----------
start_lambda : float
Starting lambda, defaults to 0.0, (lambda ~= t)
stop_on_singularity : bool
Whether to stop further computation on reaching singularity, defaults to True
OdeMethodKwargs : dict
Kwargs to be supplied to the ODESolver, defaults to {'stepsize': 1e-3}
Dictionary with key 'stepsize' along with an float value is expected.
return_cartesian : bool
True if coordinates and velocities are required in cartesian coordinates(SI units), defaults to Falsed
Yields
------
float
proper time
~numpy.ndarray
array of [t, x1, x2, x3, velocity_of_time, v1, v2, v3] for each proper time(lambda).
"""
ODE = RK45(
fun=self.f_vec,
t0=start_lambda,
y0=self.initial_vec,
t_bound=1e300,
**OdeMethodKwargs
)
crossed_event_horizon = False
_event_hor = kerr_utils.event_horizon(self.M.value, self.a.value)[0] * 1.001
while True:
if not return_cartesian:
yield ODE.t, ODE.y
else:
v = ODE.y
si_vals = (
BoyerLindquistDifferential(
v[1] * u.m,
v[2] * u.rad,
v[3] * u.rad,
v[5] * u.m / u.s,
v[6] * u.rad / u.s,
v[7] * u.rad / u.s,
self.a,
)
.cartesian_differential()
.si_values()
)
yield ODE.t, np.hstack((v[0], si_vals[:3], v[4], si_vals[3:]))
ODE.step()
if (not crossed_event_horizon) and (ODE.y[1] <= _event_hor):
warnings.warn("particle reached event horizon. ", RuntimeWarning)
if stop_on_singularity:
break
else:
crossed_event_horizon = True
|
import os
import pytest
import tempfile
from tests.utils import (
get_open_port,
)
from cpc_fusion import Web3
from .common import (
GoEthereumEthModuleTest,
GoEthereumNetModuleTest,
GoEthereumPersonalModuleTest,
GoEthereumTest,
GoEthereumVersionModuleTest,
)
from .utils import (
wait_for_socket,
)
@pytest.fixture(scope='module')
def geth_command_arguments(geth_binary, datadir, geth_ipc_path):
geth_port = get_open_port()
return (
geth_binary,
'--datadir', str(datadir),
'--ipcpath', geth_ipc_path,
'--nodiscover',
'--fakepow',
'--port', geth_port,
)
@pytest.fixture(scope='module')
def geth_ipc_path(datadir):
geth_ipc_dir_path = tempfile.mkdtemp()
_geth_ipc_path = os.path.join(geth_ipc_dir_path, 'geth.ipc')
yield _geth_ipc_path
if os.path.exists(_geth_ipc_path):
os.remove(_geth_ipc_path)
@pytest.fixture(scope="module")
def web3(geth_process, geth_ipc_path):
wait_for_socket(geth_ipc_path)
_web3 = Web3(Web3.IPCProvider(geth_ipc_path))
return _web3
class TestGoEthereumTest(GoEthereumTest):
pass
class TestGoEthereumEthModuleTest(GoEthereumEthModuleTest):
pass
class TestGoEthereumVersionModuleTest(GoEthereumVersionModuleTest):
pass
class TestGoEthereumNetModuleTest(GoEthereumNetModuleTest):
pass
class TestGoEthereumPersonalModuleTest(GoEthereumPersonalModuleTest):
pass
|
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib
import urlparse
import routes
from cmframework.server.cmhttperrors import CMHTTPErrors
from cmframework.apis import cmerror
from cmframework.server import cmhttprpc
class CMWSGIHandler(object):
def __init__(self, rest_api_factory):
logging.debug('CMWSGIHandler constructor called')
self.mapper = routes.Mapper()
self.mapper.connect(None, '/cm/apis', action='get_apis')
self.mapper.connect(None, '/cm/{api}/properties', action='handle_properties')
self.mapper.connect(None, '/cm/{api}/properties/{property}', action='handle_property')
self.mapper.connect(None, '/cm/{api}/snapshots', action='handle_snapshots')
self.mapper.connect(None, '/cm/{api}/snapshots/{snapshot}', action='handle_snapshot')
self.mapper.connect(None, '/cm/{api}/activator/enable', action='handle_activator_enable')
self.mapper.connect(None, '/cm/{api}/activator/disable', action='handle_activator_disable')
self.mapper.connect(None, '/cm/{api}/activator/agent/{node}',
action='handle_agent_activate')
self.mapper.connect(None, '/cm/{api}/activator/{node}', action='handle_activate')
self.mapper.connect(None, '/cm/{api}/activator', action='handle_activate')
self.mapper.connect(None, '/cm/{api}/reboot', action='handle_reboot')
self.mapper.connect(None, '/cm/{api}/changes', action='handle_changes')
self.rest_api_factory = rest_api_factory
def __call__(self, environ, start_response):
logging.debug('Handling request started, environ=%s', str(environ))
# for debug, print environment
# pprint.pprint(environ)
# For request and resonse data
rpc = cmhttprpc.HTTPRPC()
rpc.rep_status = CMHTTPErrors.get_ok_status()
# get the interesting fields
rpc.req_method = environ['REQUEST_METHOD']
path = environ['PATH_INFO']
try:
rpc.req_filter = urlparse.parse_qs(urllib.unquote(environ['QUERY_STRING']))
except KeyError as exp:
rpc.req_filter = {}
content_type = environ['CONTENT_TYPE']
try:
content_size = environ['CONTENT_LENGTH']
except KeyError:
content_size = None
try:
# get the action to be done
action = ''
actions, _ = self.mapper.routematch(path)
if actions and isinstance(actions, dict):
action = actions.get('action', '')
for key, value in actions.iteritems():
if key != 'action':
rpc.req_params[key] = value
else:
rpc.rep_status = CMHTTPErrors.get_resource_not_found_status()
raise cmerror.CMError('The requested url is not found')
# get the body if available
if content_size and int(content_size):
size = int(content_size)
if content_type == 'application/json':
totalread = 0
while totalread < size:
data = environ['wsgi.input'].read()
totalread += len(data)
rpc.req_body += data
else:
rpc.rep_status = CMHTTPErrors.get_unsupported_content_type_status()
raise cmerror.CMError('Only json content is supported')
# check the action
try:
logging.info('Calling %s with rpc=%s', action, str(rpc))
actionfunc = getattr(self, action)
actionfunc(rpc)
except AttributeError as attrerror:
rpc.reply_status = CMHTTPErrors.get_resource_not_found_status()
raise cmerror.CMError('Action %s not found, error: %s' % (action, str(attrerror)))
except cmerror.CMError as exp:
rpc.rep_status = CMHTTPErrors.get_internal_error_status()
rpc.rep_status += ','
rpc.rep_status += str(exp)
except Exception as exp: # pylint: disable=broad-except
rpc.rep_status = CMHTTPErrors.get_internal_error_status()
rpc.rep_status += ','
rpc.rep_status += str(exp)
finally:
logging.info('Replying with rpc=%s', str(rpc))
response_headers = [('Content-type', 'application/json')]
start_response(rpc.rep_status, response_headers)
yield rpc.rep_body
def get_apis(self, rpc):
logging.debug('get_apis called')
if rpc.req_method != 'GET':
rpc.rep_status = CMHTTPErrors.get_request_not_ok_status()
rpc.rep_status += ', only GET operation is possible'
else:
self.rest_api_factory.get_apis(rpc)
def handle_properties(self, rpc):
logging.debug('handle_properties called')
api = self._get_api(rpc)
if api:
api.handle_properties(rpc)
def handle_property(self, rpc):
logging.debug('handle_property called')
api = self._get_api(rpc)
if api:
api.handle_property(rpc)
def handle_snapshots(self, rpc):
logging.debug('handle_snapshots called')
api = self._get_api(rpc)
if api:
api.handle_snapshots(rpc)
def handle_snapshot(self, rpc):
logging.debug('handle_snapshot called')
api = self._get_api(rpc)
if api:
api.handle_snapshot(rpc)
def handle_agent_activate(self, rpc):
logging.debug('handle_agent_activate called')
api = self._get_api(rpc)
if api:
api.handle_agent_activate(rpc)
def handle_activate(self, rpc):
logging.debug('handle_activate called')
api = self._get_api(rpc)
if api:
api.handle_activate(rpc)
def handle_activator_disable(self, rpc):
logging.debug('handle_activator_disable called')
api = self._get_api(rpc)
if api:
api.handle_activator_disable(rpc)
def handle_activator_enable(self, rpc):
logging.debug('handle_activator_enable called')
api = self._get_api(rpc)
if api:
api.handle_activator_enable(rpc)
def handle_reboot(self, rpc):
logging.debug('handle_reboot called')
api = self._get_api(rpc)
if api:
api.handle_reboot(rpc)
def handle_changes(self, rpc):
logging.debug('handle_changes called')
api = self._get_api(rpc)
if api:
api.handle_changes(rpc)
def _get_api(self, rpc):
logging.debug('_get_api called')
api = None
try:
version = rpc.req_params['api']
api = self.rest_api_factory.get_api(version)
if not api:
rpc.rep_status = CMHTTPErrors.get_resource_not_found_status()
except KeyError:
rpc.rep_status = CMHTTPErrors.get_request_not_ok_status()
return api
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0014_auto_20150227_2248'),
]
operations = [
migrations.AddField(
model_name='line',
name='type',
field=models.CharField(default=b'bus', help_text=b'Liikennev\xc3\xa4linetyyppi', max_length=10,
verbose_name=b'Tyyppi', choices=[(b'bus', b'bus'), (b'tram', b'tram'), (b'train', b'train')]),
preserve_default=True,
),
]
|
import sys
import traceback
from discord.ext import commands
from ansura import AnsuraBot, AnsuraContext
class ErrorHandler(commands.Cog):
def __init__(self, bot: AnsuraBot):
self.bot = bot
# @commands.Cog.listener()
async def on_command_error(self, ctx: AnsuraContext, error: Exception):
print(traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr))
if hasattr(ctx.command, 'error'):
return
ignored = (commands.CommandNotFound, commands.UserInputError)
error = getattr(error, 'original', error)
if isinstance(error, ignored):
print(error)
elif isinstance(error, commands.MissingPermissions):
await ctx.send_error("You can't do that! >.>\n" +
str(error))
elif isinstance(error, commands.BotMissingPermissions):
await ctx.send_error("Oops. Doesn't look like I was given the proper permissions for that!\n" +
str(error))
else:
print(error)
def setup(bot):
bot.add_cog(ErrorHandler(bot))
|
# pfryby.py
import pandas as pd
import requests
from bs4 import BeautifulSoup
from os import path
from fantasyfootball.config import DATA_DIR
def pfr_year_by_year_table_grab(year):
"""Grabs fantasy stats for a given year """
url = f'https://www.pro-football-reference.com/years/{year}/fantasy.htm'
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
player_table = soup.find_all('table')
df = pd.read_html(str(player_table))[0]
df['year'] = year
return df
def year_by_year_player_table_transform(df):
"""transforms a dataframe into a standard fomat"""
df = df.copy()
df.drop(columns = ['FantPt','PPR', 'DKPt', 'FDPt', 'VBD'], level=1, inplace=True)
df.columns = ['_'.join(col) for col in df.columns]
df.columns = [col.lower() for col in df.columns]
df.columns = [i.split('_')[-1] if 'level' in str(i) else i for i in df.columns]
df = df.loc[df['rk'] != 'Rk']
df.rename(columns={
'player': 'player_name',
'fumbles_fmb' : 'fumbles',
'fumbles_fl' : 'fumbles_lost',
'games_g' : 'games',
'games_gs' : 'games_started',
'fantpos' : 'pos',
'year_' : 'year'
}, inplace=True)
df['player_name'] = df['player_name'].str.split('*').str[0].str.split('+').str[0]
return df
def year_by_year_player_table_reindex(df):
"""Re-orders columns and replaces nans with 0's """
df = df.copy()
full_column_List = [
'rk', 'player_name', 'pos', 'year',
'age', 'tm', 'games', 'games_started',
'passing_cmp', 'passing_att', 'passing_yds', 'passing_td', 'passing_int',
'rushing_att', 'rushing_yds', 'rushing_y/a', 'rushing_td', #rushing columns
'receiving_tgt', 'receiving_rec', 'receiving_yds', 'receiving_y/r', 'receiving_td',
'scoring_2pm', #scoring columns
'fumbles', 'fumbles_lost', #fumble columns
'fantasy_posrank', 'fantasy_ovrank'
]
df = df.reindex(columns = full_column_List, fill_value = 0)
df.fillna(0, inplace=True)
return df
if __name__ == "__main__":
min = int(input('Minimum year? >>> '))
max = int(input('Maximum year? >>> '))
for year in range(min, max+1):
df = pfr_year_by_year_table_grab(year)
df = year_by_year_player_table_transform(df)
df = year_by_year_player_table_reindex(df)
df.to_csv(path.join(DATA_DIR,fr'year-by-year\{year}.csv'), index=False) |
import requests
from requests.auth import HTTPBasicAuth
import json
api_key = "" # Your API Key
youtube_link = "" # Your YouTube link
url = "https://api-us.musiio.com/api/v1/upload/youtube-link"
payload = {
"link": youtube_link
}
headers = {
'Content-Type': 'application/json'
}
response = requests.post(url, headers=headers, data=json.dumps(payload), auth=HTTPBasicAuth(api_key, ''))
print(response.text.encode('utf8'))
|
import time
import sys
import os
import tempfile
import tornado
from datetime import datetime
from circus.py3compat import StringIO
from circus.client import make_message
from circus.tests.support import TestCircus, async_poll_for, truncate_file
from circus.tests.support import TestCase, EasyTestSuite
from circus.stream import FileStream
from circus.stream import FancyStdoutStream
def run_process(testfile, *args, **kw):
try:
# print once, then wait
sys.stdout.write('stdout')
sys.stdout.flush()
sys.stderr.write('stderr')
sys.stderr.flush()
with open(testfile, 'a+') as f:
f.write('START')
time.sleep(1.)
except:
return 1
class TestWatcher(TestCircus):
dummy_process = 'circus.tests.test_stream.run_process'
@tornado.gen.coroutine
def start_arbiter(self):
cls = TestWatcher
fd, cls.stdout = tempfile.mkstemp()
os.close(fd)
fd, cls.stderr = tempfile.mkstemp()
os.close(fd)
cls.stdout_stream = FileStream(cls.stdout)
cls.stderr_stream = FileStream(cls.stderr)
stdout = {'stream': cls.stdout_stream}
stderr = {'stream': cls.stderr_stream}
self.file, self.arbiter = cls._create_circus(cls.dummy_process,
stdout_stream=stdout,
stderr_stream=stderr,
debug=True, async=True)
yield self.arbiter.start()
@tornado.gen.coroutine
def stop_arbiter(self):
cls = TestWatcher
yield self.arbiter.stop()
cls.stdout_stream.close()
cls.stderr_stream.close()
if os.path.exists(self.file):
os.remove(self.file)
if os.path.exists(self.stdout):
os.remove(cls.stdout)
if os.path.exists(self.stderr):
os.remove(cls.stderr)
@tornado.gen.coroutine
def restart_arbiter(self):
yield self.arbiter.restart()
@tornado.gen.coroutine
def call(self, _cmd, **props):
msg = make_message(_cmd, **props)
resp = yield self.cli.call(msg)
raise tornado.gen.Return(resp)
@tornado.testing.gen_test
def test_file_stream(self):
yield self.start_arbiter()
stream = FileStream(self.stdout, max_bytes='12', backup_count='3')
self.assertTrue(isinstance(stream._max_bytes, int))
self.assertTrue(isinstance(stream._backup_count, int))
yield self.stop_arbiter()
stream.close()
@tornado.testing.gen_test
def test_stream(self):
yield self.start_arbiter()
# wait for the process to be started
res1 = yield async_poll_for(self.stdout, 'stdout')
res2 = yield async_poll_for(self.stderr, 'stderr')
self.assertTrue(res1)
self.assertTrue(res2)
# clean slate
truncate_file(self.stdout)
truncate_file(self.stderr)
# restart and make sure streams are still working
yield self.restart_arbiter()
# wait for the process to be restarted
res1 = yield async_poll_for(self.stdout, 'stdout')
res2 = yield async_poll_for(self.stderr, 'stderr')
self.assertTrue(res1)
self.assertTrue(res2)
yield self.stop_arbiter()
class TestFancyStdoutStream(TestCase):
def color_start(self, code):
return '\033[0;3%s;40m' % code
def color_end(self):
return '\033[0m\n'
def get_stream(self, *args, **kw):
# need a constant timestamp
now = datetime.now()
stream = FancyStdoutStream(*args, **kw)
# patch some details that will be used
stream.out = StringIO()
stream.now = lambda: now
return stream
def get_output(self, stream):
# stub data
data = {'data': 'hello world',
'pid': 333}
# get the output
stream(data)
output = stream.out.getvalue()
stream.out.close()
expected = self.color_start(stream.color_code)
expected += stream.now().strftime(stream.time_format) + " "
expected += "[333] | " + data['data'] + self.color_end()
return output, expected
def test_random_colored_output(self):
stream = self.get_stream()
output, expected = self.get_output(stream)
self.assertEqual(output, expected)
def test_red_colored_output(self):
stream = self.get_stream(color='red')
output, expected = self.get_output(stream)
self.assertEqual(output, expected)
def test_time_formatting(self):
stream = self.get_stream(time_format='%Y/%m/%d %H.%M.%S')
output, expected = self.get_output(stream)
self.assertEqual(output, expected)
def test_data_split_into_lines(self):
stream = self.get_stream(color='red')
data = {'data': '\n'.join(['foo', 'bar', 'baz']),
'pid': 333}
stream(data)
output = stream.out.getvalue()
stream.out.close()
# NOTE: We expect 4 b/c the last line needs to add a newline
# in order to prepare for the next chunk
self.assertEqual(len(output.split('\n')), 4)
def test_data_with_extra_lines(self):
stream = self.get_stream(color='red')
# There is an extra newline
data = {'data': '\n'.join(['foo', 'bar', 'baz', '']),
'pid': 333}
stream(data)
output = stream.out.getvalue()
stream.out.close()
self.assertEqual(len(output.split('\n')), 4)
def test_color_selections(self):
# The colors are chosen from an ordered list where each index
# is used to calculate the ascii escape sequence.
for i, color in enumerate(FancyStdoutStream.colors):
stream = self.get_stream(color)
self.assertEqual(i + 1, stream.color_code)
stream.out.close()
class TestFileStream(TestCase):
def get_stream(self, *args, **kw):
# need a constant timestamp
now = datetime.now()
stream = FileStream(*args, **kw)
# patch some details that will be used
stream._file.close()
stream._file = StringIO()
stream._open = lambda: stream._file
stream.now = lambda: now
return stream
def get_output(self, stream):
# stub data
data = {'data': 'hello world',
'pid': 333}
# get the output
stream(data)
output = stream._file.getvalue()
stream._file.close()
expected = stream.now().strftime(stream.time_format) + " "
expected += "[333] | " + data['data'] + '\n'
return output, expected
def test_time_formatting(self):
stream = self.get_stream(time_format='%Y/%m/%d %H.%M.%S')
output, expected = self.get_output(stream)
self.assertEqual(output, expected)
def test_data_split_into_lines(self):
stream = self.get_stream(time_format='%Y/%m/%d %H.%M.%S')
data = {'data': '\n'.join(['foo', 'bar', 'baz']),
'pid': 333}
stream(data)
output = stream._file.getvalue()
stream._file.close()
# NOTE: We expect 4 b/c the last line needs to add a newline
# in order to prepare for the next chunk
self.assertEqual(len(output.split('\n')), 4)
def test_data_with_extra_lines(self):
stream = self.get_stream(time_format='%Y/%m/%d %H.%M.%S')
# There is an extra newline
data = {'data': '\n'.join(['foo', 'bar', 'baz', '']),
'pid': 333}
stream(data)
output = stream._file.getvalue()
stream._file.close()
self.assertEqual(len(output.split('\n')), 4)
test_suite = EasyTestSuite(__name__)
|
import numpy as np
import ast
def vandermonde(x,y):
x = ast.literal_eval(x)
y = ast.literal_eval(y)
y = np.array(y)
x = np.array(x)
vanx = np.vander(x)
yT = y.T
invX = np.linalg.inv(vanx) # Get the inverse of matrix x
dotPoint = np.dot(invX,yT) # dot point between inverse x and transpose Y
sizePol = len(dotPoint)
countPol = sizePol
polynom = ""
for i in range(sizePol):
polynom += str(np.round(dotPoint[i],4))+ "*x^"+str(countPol-1)+"+"
countPol -= 1
if(countPol == 1):
break
polynom += str(round(dotPoint[sizePol-1],4))
polynom = polynom.replace(" ", "").replace("--", "+").replace("++", "+").replace("+-", "-").replace("-+", "-")
results = {
'vMatrix': vanx.tolist(),
'values': np.round(dotPoint,14).tolist(),
'polynom': polynom
}
return results
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Do-nothing script for making a release
This idea comes from here:
https://blog.danslimmon.com/2019/07/15/do-nothing-scripting-the-key-to-gradual-automation/
Author: Gertjan van den Burg
Date: 2019-07-23
"""
import colorama
import os
import sys
import tempfile
def colored(msg, color=None, style=None):
colors = {
"red": colorama.Fore.RED,
"green": colorama.Fore.GREEN,
"cyan": colorama.Fore.CYAN,
"yellow": colorama.Fore.YELLOW,
"magenta": colorama.Fore.MAGENTA,
None: "",
}
styles = {
"bright": colorama.Style.BRIGHT,
"dim": colorama.Style.DIM,
None: "",
}
pre = colors[color] + styles[style]
post = colorama.Style.RESET_ALL
return f"{pre}{msg}{post}"
def cprint(msg, color=None, style=None):
print(colored(msg, color=color, style=style))
def wait_for_enter():
input(colored("\nPress Enter to continue", style="dim"))
print()
def get_package_name():
with open("./setup.py", "r") as fp:
nameline = next(
(l.strip() for l in fp if l.startswith("NAME = ")), None
)
return nameline.split("=")[-1].strip().strip('"')
def get_package_version(pkgname):
ctx = {}
with open(f"{pkgname.lower()}/__version__.py", "r") as fp:
exec(fp.read(), ctx)
return ctx["__version__"]
class Step:
def pre(self, context):
pass
def post(self, context):
wait_for_enter()
def run(self, context):
try:
self.pre(context)
self.action(context)
self.post(context)
except KeyboardInterrupt:
cprint("\nInterrupted.", color="red")
raise SystemExit(1)
def instruct(self, msg):
cprint(msg, color="green")
def print_run(self, msg):
cprint("Run:", color="cyan", style="bright")
self.print_cmd(msg)
def print_cmd(self, msg):
cprint("\t" + msg, color="cyan", style="bright")
def do_cmd(self, cmd):
cprint(f"Going to run: {cmd}", color="magenta", style="bright")
wait_for_enter()
os.system(cmd)
class GitToMaster(Step):
def action(self, context):
self.instruct("Make sure you're on master and changes are merged in")
self.print_run("git checkout master")
class UpdateChangelog(Step):
def action(self, context):
self.instruct(f"Update change log for version {context['version']}")
self.print_run("vi CHANGELOG.md")
class UpdateReadme(Step):
def action(self, context):
self.instruct(f"Update readme if necessary")
self.print_run("vi README.md")
class RunTests(Step):
def action(self, context):
self.instruct("Run the unit tests")
self.print_run("make test")
class BumpVersionPackage(Step):
def action(self, context):
self.instruct(f"Update __version__.py with new version")
self.do_cmd(f"vi {context['pkgname']}/__version__.py")
def post(self, context):
wait_for_enter()
context["version"] = self._get_version(context)
def _get_version(self, context):
# Get the version from the version file
return get_package_version(context["pkgname"])
class MakeClean(Step):
def action(self, context):
self.do_cmd("make clean")
class MakeDocs(Step):
def action(self, context):
self.do_cmd("make docs")
class MakeDist(Step):
def action(self, context):
self.do_cmd("make dist")
class PushToTestPyPI(Step):
def action(self, context):
self.do_cmd(
"twine upload --repository-url https://test.pypi.org/legacy/ dist/*"
)
class InstallFromTestPyPI(Step):
def action(self, context):
tmpvenv = tempfile.mkdtemp(prefix="s2h_venv_")
self.do_cmd(
f"virtualenv {tmpvenv} && source {tmpvenv}/bin/activate && "
"pip install --no-cache-dir --index-url "
"https://test.pypi.org/simple/ "
"--extra-index-url https://pypi.org/simple "
f"{context['pkgname']}=={context['version']}"
)
context["tmpvenv"] = tmpvenv
class TestPackage(Step):
def action(self, context):
self.instruct(
f"Ensure that the following command gives version {context['version']}"
)
self.do_cmd(
f"source {context['tmpvenv']}/bin/activate && signal2html -V"
)
class RemoveVenv(Step):
def action(self, context):
self.do_cmd(f"rm -rf {context['tmpvenv']}")
class GitTagVersion(Step):
def action(self, context):
self.do_cmd(f"git tag v{context['version']}")
class GitAdd(Step):
def action(self, context):
self.instruct("Add everything to git and commit")
self.print_run("git gui")
class PushToPyPI(Step):
def action(self, context):
self.do_cmd("twine upload dist/*")
class PushToGitHub(Step):
def action(self, context):
self.do_cmd("git push -u --tags origin master")
class WaitForTravis(Step):
def action(self, context):
self.instruct(
"Wait for Travis to complete and verify that its successful"
)
class WaitForAppVeyor(Step):
def action(self, context):
self.instruct(
"Wait for AppVeyor to complete and verify that its successful"
)
class WaitForRTD(Step):
def action(self, context):
self.instruct(
"Wait for ReadTheDocs to complete and verify that its successful"
)
def main(target=None):
colorama.init()
procedure = [
("gittomaster", GitToMaster()),
("gitadd1", GitAdd()),
("push1", PushToGitHub()),
("bumpversion", BumpVersionPackage()),
("changelog", UpdateChangelog()),
("readme", UpdateReadme()),
("clean", MakeClean()),
# ("tests", RunTests()),
("dist", MakeDist()),
("testpypi", PushToTestPyPI()),
("install", InstallFromTestPyPI()),
("testpkg", TestPackage()),
("remove_venv", RemoveVenv()),
("gitadd2", GitAdd()),
("pypi", PushToPyPI()),
("tag", GitTagVersion()),
("push2", PushToGitHub()),
]
context = {}
context["pkgname"] = get_package_name()
context["version"] = get_package_version(context["pkgname"])
skip = True if target else False
for name, step in procedure:
if not name == target and skip:
continue
skip = False
step.run(context)
cprint("\nDone!", color="yellow", style="bright")
if __name__ == "__main__":
target = sys.argv[1] if len(sys.argv) > 1 else None
main(target=target)
|
from unittest import TestCase, mock
from django.contrib.auth.models import User
from rest_framework.test import APIClient
class TestIndexView(TestCase):
@classmethod
def setUpClass(cls):
cls.user = User(username='test')
@classmethod
def tearDownClass(cls):
User.objects.all().delete()
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_unauthenticated_get_index(self):
self.client.force_authenticate(user=None)
response = self.client.get('/maidbot/api/')
self.assertEqual(401, response.status_code)
def test_authenticated_get_index(self):
response = self.client.get('/maidbot/api/')
self.assertEqual(200, response.status_code)
self.assertTrue(b'html' in response.content)
class TestQuoteView(TestCase):
@classmethod
def setUpClass(cls):
cls.user = User(username='test')
@classmethod
def tearDownClass(cls):
User.objects.all().delete()
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_unauthenticated_get_quote(self):
self.client.force_authenticate(user=None)
response = self.client.get('/maidbot/api/quotes/')
self.assertEqual(401, response.status_code)
def test_authenticated_get_quote(self):
response = self.client.get('/maidbot/api/quotes/')
self.assertEqual(200, response.status_code)
|
"""Import this module to run the service"""
import os
import sys
import django
BASE_DIR = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
os.path.pardir,
os.path.pardir,
))
sys.path.append(BASE_DIR)
os.environ["DJANGO_SETTINGS_MODULE"] = "webca.ca_service.settings"
django.setup()
#pylint: disable=c0413
from webca.ca_service.service import CAService
CAService().run()
|
from .tokenized_validators import run_if, run_if_any, run_if_all, run_if_tokens_satisfy_condition |
from backend.django_algolia.client import get_index
# def perform_search(query):
# index = get_index()
# params = {
# 'hitsPerPage': 10
# }
# results = index.search(query, params)
# return results
def search_index(query):
index = get_index()
return index.search(query) |
print("Some content")
|
import csv, re
def cleanNumber(n):
n = int(n.replace('.', '').replace(',', ''))
if (n < 100):
n *= 1000
if (n < 1000):
n *= 10
if (n < 10000):
n *= 10
return n
def cleanupMedians():
with open('county-median-incomes-normalized.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
with open('county-median-incomes.csv', 'w') as csvoutfile:
writer = csv.writer(csvoutfile)
writer.writerow(reader.next())
for row in reader:
row[3] = cleanNumber(row[3])
row[4] = cleanNumber(row[4])
row[5] = cleanNumber(row[5])
writer.writerow(row)
def cleanupH1Bs():
with open('h1bs-2012-2018-final-with-countyid.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
with open('h1bs-2012-2018.csv', 'w') as csvoutfile:
writer = csv.writer(csvoutfile)
writer.writerow(next(reader))
for row in reader:
row[2] = cleanNumber(row[2])
writer.writerow(row)
if __name__ == "__main__":
#cleanupMedians()
cleanupH1Bs()
|
from .version_header import VersionHeader
from .header import Header
from .items import InfoItem, VersionItem
class TeeworldsMap:
def __init__(self, version_header: VersionHeader, header: Header,
version_item: VersionItem, info_item: InfoItem):
self.version_header = version_header
self.header = header
self.version = version_item.version
self.map_info = info_item
|
from pathlib import Path
from pypj.environment import Version
from pypj.file_path import PypjFilePath
from pypj.setting import PackageName, PypjSetting
from pypj.task.pyproject import Pyproject
from tests.conftest import does_contain_specific_words, prepare_dir, validate_toml
def test_pyproject() -> None:
# prepare
PACKAGE = PackageName("test_pyproject")
package_dir = prepare_dir(PACKAGE)
pyproject = package_dir.joinpath("pyproject.toml")
pyproject.touch()
# execute
setting = PypjSetting(Version("0.0.0"), PACKAGE)
filepath = PypjFilePath(Path().cwd().joinpath("tmp"), setting)
Pyproject(setting, filepath).execute()
# assert
assert pyproject.exists()
assert validate_toml(pyproject)
with pyproject.open(mode="r") as f:
content = f.read()
assert not does_contain_specific_words(content)
|
import os
import json
import difflib
import logging
import threading
from pathlib import Path
from itertools import islice
from collections import OrderedDict
from typing import List, Optional, Tuple, NamedTuple
from nornir.core.task import AggregatedResult, MultiResult, Result
from nornir_utils.plugins.tasks.files.write_file import _read_file
LOCK = threading.Lock()
CONTENT = []
class ResultRecord(NamedTuple):
name: str
res: str
diff: str
def _generate_diff(
old_lines: List[str],
new_lines: str,
append: bool,
filename: str,
) -> str:
if append:
c = list(old_lines)
c.extend(new_lines.splitlines())
new_content = c
else:
new_content = new_lines.splitlines()
diff = difflib.unified_diff(old_lines, new_content, fromfile=filename, tofile="new")
return "\n".join(diff)
def _write_individual_result(
result: Result,
dirname: str,
attrs: List[str],
failed: bool,
severity_level: int,
task_group: bool = False,
write_host: bool = False,
no_errors: bool = False,
append: bool = False,
) -> None:
individual_result = []
# ignore results with a specifig severity_level
if result.severity_level < severity_level:
return
# ignore results with errors
if no_errors:
if result.exception:
return
# create file only if there is a result.host.name
if result.host and result.host.name:
filename = result.host.name
else:
return
filepath = os.path.join(dirname, filename)
old_lines = _read_file(filepath)
subtitle = (
"" if result.changed is None else " ** changed : {} ".format(result.changed)
)
level_name = logging.getLevelName(result.severity_level)
symbol = "v" if task_group else "-"
host = f"{filename}: " if write_host else ""
msg = "{} {}{}{}".format(symbol * 4, host, result.name, subtitle)
individual_result.append(
"{}{} {}".format(msg, symbol * (80 - len(msg)), level_name)
)
for attribute in attrs:
x = getattr(result, attribute, "")
if isinstance(x, BaseException):
# for consistency between py3.6 and py3.7
individual_result.append(f"{x.__class__.__name__}{x.args}")
elif x and not isinstance(x, str):
try:
individual_result.append(
json.dumps(x, indent=2, ensure_ascii=False).encode("utf-8").decode()
)
except TypeError:
individual_result.append(str(x))
elif x:
individual_result.append(x)
lines = [l.strip() for l in individual_result]
line = "\n\n".join(lines)
diff = _generate_diff(old_lines, line, append, filepath)
result_record = ResultRecord._make([filename, lines, diff])
# add namedtuple with filename, Result attributes, diff
CONTENT.append(result_record)
def _write_results(
result: Result,
dirname: str,
attrs: List[str] = None,
failed: bool = False,
severity_level: int = logging.INFO,
write_host: bool = False,
count: Optional[int] = None,
no_errors: bool = False,
append: bool = False,
) -> None:
attrs = attrs or ["diff", "result", "stdout"]
if isinstance(attrs, str):
attrs = [attrs]
if isinstance(result, AggregatedResult):
result = dict(sorted(result.items()))
if isinstance(count, int):
l = len(result)
if count >= 0:
_ = [0, l and count]
elif (l + count) < 0:
_ = [0, l]
else:
_ = [l + count, l]
result = dict(islice(result.items()), *_)
for host_data in result.values():
_write_results(
host_data,
dirname,
attrs,
failed,
severity_level,
write_host,
no_errors=no_errors,
append=append,
)
elif isinstance(result, MultiResult):
_write_individual_result(
result[0],
dirname,
attrs,
failed,
severity_level,
task_group=True,
write_host=write_host,
no_errors=no_errors,
append=append,
)
for r in result[1:]:
_write_results(
r,
dirname,
attrs,
failed,
severity_level,
write_host,
no_errors=no_errors,
append=append,
)
elif isinstance(result, Result):
_write_individual_result(
result,
dirname,
attrs,
failed,
severity_level,
write_host=write_host,
no_errors=no_errors,
append=append,
)
def write_results(
result: Result,
dirname: str,
vars: List[str] = None,
failed: bool = False,
severity_level: int = logging.INFO,
write_host: bool = True,
count: Optional[int] = None,
no_errors: bool = False,
append: bool = False,
) -> List[Tuple[str, str]]:
"""
Write an object of type `nornir.core.task.Result` to files with hostname names
Arguments:
result: from a previous task (Result or AggregatedResult or MultiResult)
vars: Which attributes you want to write (see ``class Result`` attributes)
failed: if ``True`` assume the task failed
severity_level: Print only errors with this severity level or higher
write_host: Write hostname to file
count: Number of sorted results. It's acceptable to use numbers with minus sign
(-5 as example), then results will be from the end of results list
no_errors: Don't write results with errors
append: "a+" if ``True`` or "w+" if ``False``
"""
Path(dirname).mkdir(parents=True, exist_ok=True)
mode = "a+" if append else "w+"
LOCK.acquire()
try:
_write_results(
result,
dirname,
attrs=vars,
failed=failed,
severity_level=severity_level,
write_host=write_host,
count=count,
no_errors=no_errors,
append=append,
)
diffs = []
for value in CONTENT:
with open(os.path.join(dirname, value.name), mode=mode) as f:
f.write("\n\n".join(value.res))
diffs.append((value.name, value.diff))
return diffs
finally:
LOCK.release()
|
from django.conf import settings
from django.db import models
import datetime
# Create your models here.
class Event (models.Model):
title = models.CharField(verbose_name='Título',
max_length=50, blank=True, null=True)
start = models.CharField(verbose_name='Data Inicio',
blank=True, null=True, default='2021-12-11T22:30:00', max_length=20)
end = models.CharField(verbose_name='Data Final',
blank=True, null=True, default='2021-12-11T22:30:00', max_length=20)
url = models.URLField(verbose_name='URL Google Meet',
blank=True, null=True)
class Meta:
verbose_name = 'event'
verbose_name_plural = 'events'
def __str__(self):
return self.title
|
import json
from flask import request, jsonify, g
from random import randint
import requests
from sqlalchemy import func
from . import main
from app.models import User, Box, BoxWithCard, Card, db, UserOpenid, Photo, Message
from app.utils.tool import Token, user_login_required, user_often_get_api, msg_unsafe, have_access_token, check_img
from config import redis_store, wx_appid, wx_secret, qq_appid, qq_secret, minute, check_img_trace_id_expire
from ..utils.TXcos.upload import upload_file
from ..utils.sms import send
# 获取 wx openid
@main.route('/wx/login')
def wx_login():
"""
请求获得用户的openid
属性 类型 默认值 必填 说明
appid string 是 小程序 appId
secret string 是 小程序 appSecret
js_code string 是 登录时获取的 code
grant_type string 是 授权类型,此处只需填写 authorization_code
:return:
返回的 JSON 数据包
属性 类型 说明
openid string 用户唯一标识
session_key string 会话密钥
unionid string 用户在开放平台的唯一标识符,在满足 UnionID 下发条件的情况下会返回,详见 UnionID 机制说明。
errcode number 错误码
errmsg string 错误信息
"""
lg_code = request.args.get('lg_code')
url = f"https://api.weixin.qq.com/sns/jscode2session?appid={wx_appid}&secret={wx_secret}&js_code={lg_code}&grant_type=authorization_code"
rq = requests.get(url)
rq_json = rq.json()
print(rq_json)
if rq_json.get('errcode') is not None:
data = {"error": rq_json.get('errmsg')}
data = jsonify(data)
return data
else:
openid = rq_json.get('openid')
payload = {
"openid": openid
}
openid_token = Token().create_token(payload)
return jsonify(code=200, openid=openid, openid_token=openid_token)
# 获取 qq openid
@main.route('/qq/login')
def qq_login():
"""
请求获得用户的openid
属性 类型 默认值 必填 说明
appid string 是 小程序 appId
secret string 是 小程序 appSecret
js_code string 是 登录时获取的 code
grant_type string 是 授权类型,此处只需填写 authorization_code
:return:
返回的 JSON 数据包
属性 类型 说明
openid string 用户唯一标识
session_key string 会话密钥
unionid string 用户在开放平台的唯一标识符,在满足 UnionID 下发条件的情况下会返回,详见 UnionID 机制说明。
errcode number 错误码
errmsg string 错误信息
"""
lg_code = request.args.get('lg_code')
url = f"https://api.q.qq.com/sns/jscode2session?appid={qq_appid}&secret={qq_secret}&js_code={lg_code}&grant_type=authorization_code"
rq = requests.get(url)
rq_json = rq.json()
print(rq_json)
if rq_json.get('errcode') != 0:
data = {"error": rq_json.get('errmsg')}
data = jsonify(data)
return data
else:
openid = rq_json.get('openid')
payload = {
"openid": openid
}
openid_token = Token().create_token(payload)
return jsonify(code=200, openid=openid, openid_token=openid_token)
# 通过openid获取token
@main.route('/openid/token', methods=['GET'])
def get_token():
openid_token = request.args.get('openid_token')
# 判断用户的登录状态
if openid_token is None:
return jsonify(code=4000, msg="参数不完整")
# 解析token
try:
token_data = Token().var_token(openid_token)
openid = token_data.get('openid')
print(openid, openid)
except Exception as e:
print(e)
return jsonify(code=4003, msg="请重新登录")
# 查找openid是否存在
user_openid = UserOpenid.query.filter_by(openid=openid).first()
# 注册
if user_openid is None:
user = User(username=openid, avatar="https://hicaiji.com/avatar", password=openid[:6])
try:
db.session.add(user)
db.session.commit()
except Exception as e:
db.session.rollback()
print(e)
return jsonify(code=4001, msg="注册用户失败")
# 保存openid
user_openid = UserOpenid(openid=openid, avatar="https://hicaiji.com/avatar", user_id=user.id)
try:
db.session.add(user_openid)
db.session.commit()
except Exception as e:
db.session.rollback()
print(e)
return jsonify(code=4002, msg="注册用户失败")
# 验证并生成token
payload = {
"id": user_openid.user.id
}
token = Token().create_token(payload)
return jsonify(code=200, msg="获取token成功", openid=openid, token=token)
# 其他端登录 // 暂未做密码验证
@main.route('/token/login', methods=['POST'])
def token_login():
"""
具体逻辑用户可以二次开发写密码验证
:return:
"""
print(request.path)
req_json = request.get_json()
username = req_json.get("username")
password = req_json.get("password")
if not all([username, password]):
return jsonify(code=4000, msg="参数不完整"), 400
user = User.query.filter_by(username=username).first()
if user:
# 验证并生成token
payload = {
"id": user.id
}
token = Token().create_token(payload)
return jsonify(code=200, msg="登录成功", token=token), 200
else:
return jsonify(code=4001, msg="登录失败"), 400
# 发送手机验证码 v
@main.route("/sms", methods=["POST"])
@user_login_required
@user_often_get_api
def send_sms():
req_json = request.get_json()
phone = req_json.get("phone")
if not all([phone]):
# 表示参数不完整
return jsonify(code=4000, msg="参数不完整")
sms_code = randint(100000, 999999) # 生成验证码
print(sms_code, "验证码")
print(phone, "手机号")
# 保存真实的短信验证码
try:
redis_store.setex("sms_code_%s" % phone, minute * 60, sms_code)
# 保存发送给这个手机号的记录,防止用户在60s内再次出发发送短信的操作
redis_store.setex("send_sms_code_%s" % phone, 60, 1)
except Exception as e:
print(e)
return jsonify(code=4003, msg="保存短信验证码异常,请稍后在试"), 400
# 发送验证码
try:
code = send.send_sms(phone, sms_code, minute)
if code == "Ok":
return jsonify({"code": 200, "msg": "发送成功"}), 200
else:
return jsonify({"code": 4004, "msg": "发送失败"}), 400
except Exception as e:
print(e)
return jsonify(code=4004, msg="发送异常"), 400
# 校验手机验证码 v
@main.route("/check/sms_code", methods=["POST"])
@user_login_required
def check_code():
req_json = request.get_json()
phone = req_json.get("phone")
sms_code = req_json.get("code")
user_id = g.user_id
if not all([phone, sms_code, user_id]):
return jsonify(code=4000, msg="参数不完整"), 400
# 从redis中取出短信验证码
try:
real_sms_code = redis_store.get("sms_code_%s" % phone)
except Exception as e:
print(e)
return jsonify(code=4001, msg="读取真实短信验证码异常"), 400
# 判断短信验证码是否过期
if real_sms_code is None:
return jsonify(code=4002, msg="短信验证码失效"), 400
# 删除redis中的短信验证码,防止重复使用校验
try:
redis_store.delete("sms_code_%s" % phone)
except Exception as e:
print(e)
# 判断用户填写短信验证码的正确性
if str(real_sms_code) != str(sms_code):
return jsonify(code=4003, msg="短信验证码错误"), 400
# 查询当前账号
cur_user = User.query.get(user_id)
# 查询手机号是否被添加
user = User.query.filter_by(phone=phone).first()
# 如果手机号对应账号已经存在 并且不是当前账号
if user and user.id != user_id:
# 添加当前openid到新账号
for uo in cur_user.user_openid:
uo.user_id = user.id
db.session.add(uo)
# 其他情况就是手机号没被添加或者手机号被添加的那个对象是自己的当前账号
else:
# 当前账号绑定手机号
cur_user.phone = phone
db.session.add(cur_user)
try:
db.session.commit()
except Exception as e:
print(e)
# 数据库操作错误后的回滚
db.session.rollback()
# 表示手机号出现了重复值,即手机号已注册过
return jsonify(code=4005, msg="手机号已存在"), 400
return jsonify({"code": 200, "msg": "绑定手机成功"})
# 创建卡集 v
@main.route("/box", methods=["POST"])
@user_login_required
@user_often_get_api
@have_access_token
def create_box():
"""
创建一个新的卡集
新卡集没有卡片
获取用户的手机号
卡集的名字
卡集的密码
:return:
"""
req_json = request.get_json()
name = req_json.get("name")
password = req_json.get("password")
color = req_json.get("color")
user_id = g.user_id
if not all([name, password, color]):
return jsonify(code=4000, msg="参数不完整")
msg_data = name + "内容的拼接" + password
access_token = g.access_token
if msg_unsafe(access_token, msg_data):
return jsonify(code=4010, msg="提交含有敏感内容")
try:
# 判断账号是否存在
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# 创建卡集
box = Box(name=name, password=password, color=color)
user.boxes.append(box)
db.session.add(box)
db.session.add(user)
# 提交更改的数据
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4002, msg="创建失败")
return jsonify(code=200, msg="创建成功", box_id=box.id, box_name=box.name)
except Exception as e:
print(e)
return jsonify(code=4003, msg="创建失败")
# 克隆卡集 v
@main.route("/box/clone", methods=["POST"])
@user_login_required
@user_often_get_api
def share_box():
"""
用于克隆卡集
参数:
卡集的id
卡集的密码
:return:
"""
req_json = request.get_json()
user_id = g.user_id
box_id = req_json.get("box_id")
password = req_json.get("password")
if not all([user_id, box_id, password]):
return jsonify(code=4000, msg="参数不完整")
try:
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# box = Box.query.filter_by(id=box_id, is_active=True, password=password).first()
box = Box.query.filter_by(id=box_id, password=password).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
# 复制卡片
new_box = Box(name=box.name, password=password, color=box.color, cards=box.cards)
db.session.add(new_box)
user.boxes.append(new_box)
db.session.add(user)
try:
db.session.commit()
return jsonify(code=200, msg="克隆成功", new_box=new_box.id, box_name=new_box.name)
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4003, msg="提交数据库变更失败")
except Exception as e:
print(e)
return jsonify(code=4004, msg="克隆失败")
# 查看卡集主页介绍 vv
@main.route("/box/index", methods=["GET"])
@user_login_required
def get_boxes_index():
"""
查看对应的卡集的主页信息
卡集的名字
卡集的颜色
卡集的id
卡集的密码
卡集的克隆次数
卡集的简介
卡集的作者头像
卡集的卡片数量
卡集的创建时间
:return:
"""
box_id = request.args.get('box_id')
box_password = request.args.get('box_password')
if not all([box_id, box_password]):
return jsonify(code=4000, msg="参数不完整")
try:
box = Box.query.filter_by(id=box_id, password=box_password, is_share=True).first()
if not box:
return jsonify(code=4001, msg="卡集不存在")
else:
# 获取卡集的数据
data = {
"name": box.name,
"box_id": box.id,
"color": box.color,
"password": box.password,
# "clone_times": box.clone_times,
"summary": box.summary,
# "author_avatar": box.user.avatar,
"cards_number": len(box.cards),
"create_time": box.create_time.strftime("%Y-%m-%d %H:%M:%S")
}
payload = jsonify(data)
return jsonify(code=200, msg="获取成功", data=payload)
except Exception as e:
print(e)
return jsonify(code=4002, msg="查询出错")
# 查看卡集 v
@main.route("/boxes", methods=["GET"])
@user_login_required
def get_boxes():
"""
查看用户的手机号手下有多少卡集
:return:
"""
user_id = g.user_id
try:
# 判断账号是否存在
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
payload = []
boxes = Box.query.filter_by(is_active=True, user_id=user.id).order_by(Box.create_time.desc())
for box in boxes:
payload.append({
"box_id": box.id,
"box_name": box.name,
"password": box.password,
"box_color": box.color,
"cards_count": len(box.cards),
"create_time": box.create_time.strftime("%Y-%m-%d %H:%M:%S")
})
return jsonify(code=200, msg="查询成功", data=payload)
except Exception as e:
print(e)
return jsonify(code=4002, msg="查询出错")
# 查看卡集士多 vx
@main.route("/boxes/share/store", methods=["GET"])
@user_login_required
def get_boxes_share():
"""
查看卡集士多的所有卡集
:return:
"""
try:
payload = []
boxes = Box.query.filter_by(is_share=True).order_by(Box.id.desc())
for box in boxes:
payload.append({
"box_id": box.id,
"box_name": box.name,
"password": box.password,
"box_color": box.color,
"create_time": box.create_time.strftime("%Y-%m-%d %H:%M:%S")
})
return jsonify(code=200, msg="查询成功", data=payload)
except Exception as e:
print(e)
return jsonify(code=4002, msg="查询出错")
# 申请分享我的卡集到广场
@main.route("/box/share/apply", methods=["POST"])
@user_login_required
def box_share_apply():
user_id = g.user_id
box_id = request.get_json().get("box_id")
summary = request.get_json().get("summary")
try:
box = Box.query.filter_by(id=box_id, user_id=user_id, is_share=False, is_active=True).first()
if not box:
return jsonify(code=200, msg="要分享的盒子不存在")
else:
box.summary = summary
db.session.add(box)
try:
db.session.commit()
return jsonify(code=200, msg="申请分享成功,等待审核")
except Exception as e:
print(e)
return jsonify(code=4002, msg="提交错误,请稍后重试")
except Exception as e:
print(e)
return jsonify(code=4002, msg="查询数据库出错")
# 查看我的分享的卡集 vx
@main.route("/boxes/share", methods=["GET"])
@user_login_required
def get_my_share_boxes():
"""
查看我的分享的卡集
:return:
"""
user_id = g.user_id
try:
payload = []
boxes = Box.query.filter_by(user_id=user_id, is_share=True).order_by(Box.id.desc())
for box in boxes:
payload.append({
"box_id": box.id,
"box_name": box.name,
"password": box.password,
"box_color": box.color,
"create_time": box.create_time.strftime("%Y-%m-%d %H:%M:%S")
})
return jsonify(code=200, msg="查询成功", data=payload)
except Exception as e:
print(e)
return jsonify(code=4002, msg="查询出错")
# 删除我的分享的卡集 vx
@main.route("/box/share", methods=["DELETE"])
@user_login_required
def delete_my_share_boxes():
"""
查看我的分享的卡集
:return:
"""
user_id = g.user_id
box_id = request.args.get("box_id")
try:
box = Box.query.filter_by(id=box_id, user_id=user_id, is_share=True).first()
if not box:
return jsonify(code=200, msg="分享的盒子不存在")
else:
box.is_share = False
db.session.add(box)
try:
db.session.commit()
return jsonify(code=200, msg="删除成功")
except Exception as e:
print(e)
return jsonify(code=4002, msg="删除出错")
except Exception as e:
print(e)
return jsonify(code=4002, msg="查询数据库出错")
# 删除卡集 v
@main.route("/box", methods=["DELETE"])
@user_login_required
@user_often_get_api
def delete_box():
"""
删除卡集的存在
:return:
"""
req_json = request.get_json()
box_id = req_json.get("box_id")
user_id = g.user_id
# 参数完整
if not all([box_id, user_id]):
return jsonify(code=4000, msg="参数不完整")
# 用户存在且正常
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# 卡集存在且正常
box = Box.query.filter_by(id=box_id, is_active=True, user_id=user.id).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
else:
box.is_active = False
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4003, msg="删除卡集失败")
return jsonify(code=200, msg="删除卡集成功")
# 修改卡集 v
@main.route("/box", methods=["PUT"])
@user_login_required
@user_often_get_api
@have_access_token
def mod_box():
"""
删除卡集的存在
:return:
"""
req_json = request.get_json()
box_id = req_json.get("box_id")
password = req_json.get("password")
color = req_json.get("color")
box_name = req_json.get("box_name")
user_id = g.user_id
# 参数完整
if not all([box_id, password, box_name, color]):
return jsonify(code=4000, msg="参数不完整")
msg_data = password + "内容的过度" + box_name
access_token = g.access_token
if msg_unsafe(access_token, msg_data):
return jsonify(code=4010, msg="提交含有敏感内容")
# # 用户存在且正常
# user = User.query.filter_by(id=user_id, is_active=True).first()
# if user is None:
# return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# 卡集存在且正常
box = Box.query.filter_by(id=box_id, is_active=True, user_id=user_id).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
else:
box.name = box_name
box.password = password
box.color = color
try:
db.session.add(box)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4003, msg="修改卡集失败")
return jsonify(code=200, msg="修改卡集成功")
# 创建卡片 v
@main.route("/card", methods=["POST"])
@user_login_required
@user_often_get_api
@have_access_token
def create_card():
"""
写一个新的卡片
把卡片绑定到卡集
参数:
卡片的问题
卡片的答案
卡片的类型
卡集的id
用户的手机号
:return:
"""
req_json = request.get_json()
question = req_json.get("question")
answer = req_json.get("answer")
answer_html = req_json.get("answer_html")
delta = req_json.get("delta")
box_id = req_json.get("box_id")
color = req_json.get("color")
user_id = g.user_id # 用于身份认证,或许可以缓存session验证或者token
if not all([question, answer, box_id, color, delta,answer_html]):
return jsonify(code=4000, msg="参数不完整")
msg_data = question + "内容的过度" + answer
access_token = g.access_token
if msg_unsafe(access_token, msg_data):
return jsonify(code=4010, msg="提交含有敏感内容")
try:
# 查找卡集
box = Box.query.filter_by(id=box_id, user_id=user_id, is_active=True).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
delta = json.dumps(delta) # 将对象转换为字符串存到数据库 在前端可以通过 JSON.parse(delta) 字符串转换为json对象
# 创建卡片
card = Card(question=question, answer=answer, color=color, delta=delta,answer_html=answer_html)
# card = Card(question=question, answer=answer, color=color)
box.cards.append(card)
db.session.add(card)
db.session.add(box)
# 提交数据更改
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4003, msg="提交数据失败。数据库出现异常")
return jsonify(code=200, msg="创建成功", card_id=card.id, card_question=card.question)
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4004, msg="创建失误")
# 修改卡片 v
@main.route("/card", methods=["PUT"])
@user_login_required
@user_often_get_api
@have_access_token
def mod_card():
"""
修改卡片的信息
参数:
卡片的问题
卡片的类型
卡片的答案
卡片的id
卡集的id
用户手机号
:return:
"""
req_json = request.get_json()
question = req_json.get("question")
color = req_json.get("color")
answer = req_json.get("answer")
answer_html = req_json.get("answer_html")
print(answer_html,"html")
delta = req_json.get("delta")
delta = json.dumps(delta) # 序列化
card_id = req_json.get("card_id")
box_id = req_json.get("box_id")
user_id = g.user_id # 用于身份认证,或许可以缓存session验证或者token
# 判断敏感内容
msg_data = question + "内容的过度" + answer
access_token = g.access_token
if msg_unsafe(access_token, msg_data):
return jsonify(code=4010, msg="提交含有敏感内容")
try:
# 判断账号是否存在
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# 查询卡集是否是用户的
box = Box.query.filter_by(id=box_id, is_active=True, user_id=user.id).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
# 查询卡片是否是卡集的
box_with_card = BoxWithCard.query.filter_by(card_id=card_id, box_id=box_id).first()
if box_with_card is None:
return jsonify(code=4003, msg="找不到此卡片")
# 判断卡是自己的还是共享的
print(box_with_card.card.get_status, "查找卡被多少引用")
flag = len(box_with_card.card.get_status)
# 引用卡的超过一个人
if flag > 1:
# 新建卡
card = Card(question=question, answer=answer, color=color, delta=delta,answer_html=answer_html)
db.session.add(card)
# 提交变更 获取card的id
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4004, msg="修改卡失败")
# 将中间表对应的新卡给替换掉原来绑定的卡
box_with_card.card_id = card.id
db.session.add(box_with_card)
msg = "新增卡片"
# 引用卡的只有自己
else:
# 直接在旧卡修改值
box_with_card.card.answer = answer
box_with_card.card.question = question
box_with_card.card.color = color
box_with_card.card.delta = delta
box_with_card.card.answer_html = answer_html
db.session.add(box_with_card.card)
msg = "直接修改"
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4005, msg="提交数据库变更失败")
return jsonify(code=200, msg=msg, card_id=box_with_card.card_id)
except Exception as e:
print(e)
return jsonify(code=4006, msg="提交数据库变更失败")
# 删除卡片 v
@main.route("/card", methods=["DELETE"])
@user_login_required
@user_often_get_api
def delete_card():
"""
删除卡集与卡片的关联
:return:
"""
req_json = request.get_json()
box_id = req_json.get("box_id")
card_id = req_json.get("card_id")
user_id = g.user_id
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# 卡集存在且正常
box = Box.query.filter_by(id=box_id, is_active=True, user_id=user.id).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
# 新方法
delete_box_with_card_times = BoxWithCard.query.filter_by(box_id=box_id, card_id=card_id, is_active=True).delete()
try:
db.session.commit()
return jsonify(code=200, msg="操作删除卡片成功", delete_amount=delete_box_with_card_times)
except Exception as e:
print(e)
return jsonify(code=4003, msg="操作删除卡片失败")
# 标记卡片状态 v
@main.route('/card/status', methods=["PUT"])
@user_login_required
@user_often_get_api
def mark_status():
"""
错误次数增加
标记为理解/取消理解
标记为收藏/取消收藏
参数:
卡集id
卡片id
:return:
"""
req_json = request.get_json()
box_id = req_json.get("box_id")
status = req_json.get("status")
card_id = req_json.get("card_id")
user_id = g.user_id
try:
if not all([box_id, status, card_id, user_id]):
return jsonify(code=4000, msg="参数不完整")
# 判断账号是否存在
user = User.query.filter_by(id=user_id, is_active=True).first()
if user is None:
return jsonify(code=4001, msg="用户不存在或者用户状态异常")
# 判断账号是否是卡集的拥有者
box = Box.query.filter_by(id=box_id, is_active=True).first()
if box is None:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
box_with_card = BoxWithCard.query.filter_by(box_id=box_id, card_id=card_id).first()
if not box_with_card:
return jsonify(code=4003, msg="找不到对应卡片")
if status == "error":
box_with_card.error_times += 1
elif status == "understand":
box_with_card.understand = True
elif status == "un_understand":
box_with_card.understand = False
elif status == "collect":
box_with_card.collection = True
elif status == "un_collect":
box_with_card.collection = False
else:
return jsonify(code=4004, msg="参数错误")
# 提交修改
try:
db.session.add(box_with_card)
db.session.commit()
return jsonify(code=200, msg="操作成功")
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4004, msg="提交数据操作失败")
except Exception as e:
print(e)
return jsonify(code=4005, msg="操作失败")
# 获取卡片 (这里的查询代码可以优化 等待执行)
@main.route("/cards", methods=["GET"])
@user_login_required
def get_cards():
"""
查看卡片
参数:
卡集:id
类型:文字 代码 图片 全部
状态:收藏的卡片 掌握的卡片 没掌握的卡片 全部
排序:随机 顺序 倒序 错误次数
页数:分页
显示数量:一页显示的数量
:return:
"""
req_json = request.args
all_box_id = req_json.get("all_box_id")
card_status = req_json.get("card_status")
order = req_json.get("order")
page = req_json.get("page")
limit = req_json.get("limit")
user_id = g.user_id
if not all([all_box_id, card_status, order, page, limit]):
return jsonify(code=4000, msg="参数不完整")
# 判断是要全部卡片还是分卡集查看
if all_box_id == "all":
boxes = Box.query.filter_by(is_active=True, user_id=user_id).all()
if not boxes:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
box_ids = [box.id for box in boxes]
else:
# !!!!下面这行不可删除 因为此时all_box_id不是list了 目的: '[1,2,3]' 转为 [1,2,3]
all_box_id = [int(item) for item in all_box_id.strip('[]').split(',')] # 目的: '[1,2,3]' 转为 [1,2,3]
# 判断卡集是否存在
boxes = Box.query.filter(Box.id.in_(all_box_id)).filter_by(is_active=True, user_id=user_id).all()
if not boxes:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
box_ids = [box.id for box in boxes]
# 状态:收藏的卡片 掌握的卡片 没掌握的卡片 全部
if card_status not in ["collect", "understand", "un_understand", "all"]:
card_status = "un_understand"
# 排序:随机 顺序 倒序 错误次数
if order not in ["random", "up", "down", "error_times"]:
order = "up"
# 保证分页数据正确
try:
page = int(page)
limit = int(limit)
except Exception as e:
print(e)
page = 1
limit = 10
# 全部
if card_status == "all":
if order == "random":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(func.rand()).limit(limit)
elif order == "up":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id). \
paginate(page, per_page=limit, error_out=False).items
elif order == "down":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id.desc()). \
paginate(page, per_page=limit, error_out=False).items
# elif order == "error_times":
else:
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(BoxWithCard.error_times.desc()). \
paginate(page, per_page=limit, error_out=False).items
# 收藏
elif card_status == "collect":
if order == "random":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(collection=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(func.rand()).limit(limit)
elif order == "up":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(collection=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id).paginate(page, per_page=limit,
error_out=False).items
elif order == "down":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(collection=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id.desc()).paginate(page, per_page=limit,
error_out=False).items
# elif order == "error_times":
else:
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(collection=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(BoxWithCard.error_times.desc()).paginate(page,
per_page=limit,
error_out=False).items
# 理解
elif card_status == "understand":
if order == "random":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(func.rand()).limit(limit)
elif order == "up":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id).paginate(page, per_page=limit,
error_out=False).items
elif order == "down":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id.desc()).paginate(page, per_page=limit,
error_out=False).items
# elif order == "error_times":
else:
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(BoxWithCard.error_times.desc()).paginate(page,
per_page=limit,
error_out=False).items
# 未理解
else:
if order == "random":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=False). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(func.rand()).limit(limit)
elif order == "up":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=False). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id).paginate(page, per_page=limit,
error_out=False).items
elif order == "down":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=False). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id.desc()).paginate(page, per_page=limit,
error_out=False).items
# elif order == "error_times":
else:
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=False). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(BoxWithCard.error_times.desc()).paginate(page,
per_page=limit,
error_out=False).items
payload = []
print(bwc, "xxx")
for card in bwc:
payload.append({
"box_id": card.box_id,
"box_name": card.box.name,
"card_id": card.card_id,
"card_color": card.card.color,
"question": card.card.question,
"answer": card.card.answer,
"delta": card.card.delta,
"error_times": card.error_times,
"understand": card.understand,
"collection": card.collection
})
return jsonify(code=200, msg="查询成功", data=payload)
# 获取分享的卡集的卡片
@main.route("/share/cards", methods=["GET"])
@user_login_required
def get_share_cards():
"""
查看卡片
参数:
卡集:id
类型:文字 代码 图片 全部
状态:收藏的卡片 掌握的卡片 没掌握的卡片 全部
排序:随机 顺序 倒序 错误次数
页数:分页
显示数量:一页显示的数量
:return:
"""
req_json = request.args
all_box_id = req_json.get("all_box_id")
card_status = req_json.get("card_status")
order = req_json.get("order")
page = req_json.get("page")
limit = req_json.get("limit")
user_id = g.user_id
if not all([all_box_id, card_status, order, page, limit]):
return jsonify(code=4000, msg="参数不完整")
# 判断是要全部卡片还是分卡集查看
if all_box_id == "all":
boxes = Box.query.filter_by(is_share=True, user_id=user_id).all()
if not boxes:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
box_ids = [box.id for box in boxes]
else:
# !!!!下面这行不可删除 因为此时all_box_id不是list了 目的: '[1,2,3]' 转为 [1,2,3]
all_box_id = [int(item) for item in all_box_id.strip('[]').split(',')] # 目的: '[1,2,3]' 转为 [1,2,3]
# 判断卡集是否存在
boxes = Box.query.filter(Box.id.in_(all_box_id)).filter_by(is_share=True).all()
if not boxes:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
box_ids = [box.id for box in boxes]
# 状态:收藏的卡片 掌握的卡片 没掌握的卡片 全部
if card_status not in ["collect", "understand", "un_understand", "all"]:
card_status = "un_understand"
# 排序:随机 顺序 倒序 错误次数
if order not in ["random", "up", "down"]:
order = "up"
# 保证分页数据正确
try:
page = int(page)
limit = int(limit)
except Exception as e:
print(e)
page = 1
limit = 10
# 全部
if order == "random":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(func.rand()).limit(limit)
elif order == "up":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id). \
paginate(page, per_page=limit, error_out=False).items
elif order == "down":
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id.desc()). \
paginate(page, per_page=limit, error_out=False).items
# elif order == "error_times": 删除
else:
bwc = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(BoxWithCard.error_times.desc()). \
paginate(page, per_page=limit, error_out=False).items
payload = []
summary = ''
for card in bwc:
payload.append({
"box_id": card.box_id,
"box_name": card.box.name,
"card_id": card.card_id,
"card_color": card.card.color,
"question": card.card.question,
"answer": card.card.answer,
"delta": card.card.delta,
"error_times": card.error_times,
"understand": card.understand,
"collection": card.collection
})
summary = card.box.summary
return jsonify(code=200, msg="查询分享成功", summary=summary, data=payload)
# 获取主页的卡片数量
@main.route("/count", methods=["GET"])
@user_login_required
def count_card():
user_id = g.user_id
boxes = Box.query.filter_by(is_active=True, user_id=user_id).all()
if not boxes:
return jsonify(code=4002, msg="卡集不存在或者你不是卡集拥有者")
box_ids = [box.id for box in boxes]
# 收藏
collect_count = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(collection=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id).count()
# 理解
understand_count = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=True). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id).count()
# 未理解
un_understand_count = db.session.query(BoxWithCard).filter(BoxWithCard.box_id.in_(box_ids)). \
filter_by(understand=False). \
join(Card,
Card.id == BoxWithCard.card_id).order_by(Card.id).count()
return jsonify(code=200, count={"collect_count": collect_count, "understand_count": understand_count,
"un_understand_count": un_understand_count})
# 获取个人信息 v
@main.route("/user/profile", methods=["GET"])
@user_login_required
def get_profile():
"""
id
用户名
头像
注册时间
最新上线时间
:return:
"""
user_id = g.user_id
user = User.query.filter_by(id=user_id, is_active=True).first()
if not user:
return jsonify(code=400, msg="查询不到用户")
# 将数据转换为json字符串
try:
openids = [uo.openid for uo in user.user_openid]
data = {
"id": user.id,
"username": user.username,
"phone": user.phone,
"create_time": user.create_time.strftime("%Y-%m-%d %H:%M:%S"),
"openids": openids,
"messages": len(user.messages),
"photos": len(user.photos)
}
resp_dict = dict(code=200, msg="查询用户信息成功!", data=data)
return jsonify(resp_dict)
except Exception as e:
print(e)
return jsonify(code=4000, msg="出错了", data=[])
# 反馈 v
@main.route("/feedback", methods=["POST"])
@user_login_required
@user_often_get_api
@have_access_token
def message():
req_json = request.get_json()
user_id = g.user_id
content = req_json.get("content")
connect = req_json.get("connect")
if not all([user_id, connect, content]):
return jsonify(code=4000, msg="参数不完整")
# 判断敏感内容
msg_data = content + "内容的过度" + connect
access_token = g.access_token
if msg_unsafe(access_token, msg_data):
return jsonify(code=4010, msg="提交含有敏感内容")
msg = Message(user_id=user_id, content=content, connect=connect)
try:
db.session.add(msg)
db.session.commit()
return jsonify(code=200, msg="你的反馈发送成功,感谢你的反馈")
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=400, msg="操作数据库失败,请稍后再试")
# 用户上传图片、文件 v
@main.route("/user/file", methods=["POST"])
@user_login_required
@have_access_token
def upload_user_photo():
"""
1. 先上传图片到腾讯云cos
2. 调用图片安全接口判断用户发送的图片是否完整
3. 获取安全接口返回的唯一id
4. 存user_id和图片url到redis
5. 等回调函数进行存储到mysql
:return:
"""
# 装饰器的代码中已经将user_id保存到g对象中,所以视图中可以直接读取
user_id = g.user_id
# 获取图片
image_files = request.files.getlist("file")
if image_files is None:
return jsonify(code=4000, msg="未上传图片")
file_urls = []
for image_file in image_files:
try:
# 1. 先上传图片到腾讯云cos
cos_path = '/card/user/' + str(user_id) + '/'
file_url = upload_file(image_file, path=cos_path)
except Exception as e:
print(e)
return jsonify(code=400, msg="上传图片失败")
# 2. 调用图片安全接口判断用户发送的图片是否完整
# 3. 获取安全接口返回的唯一id
access_token = g.access_token
trace_id = check_img(access_token, file_url)
print(trace_id)
# 4. 存user_id和图片url到redis
data = {
"user_id": user_id,
"file_url": file_url
}
try:
redis_store.setex("check_img_%s" % trace_id, check_img_trace_id_expire * 60, json.dumps(data))
except Exception as e:
print(e)
return jsonify(code=200, msg="等待验证图片", data={"url": file_urls})
# 查看用户上传图片、文件 v
@main.route("/user/file", methods=["GET"])
@user_login_required
def user_photos():
# 装饰器的代码中已经将user_id保存到g对象中,所以视图中可以直接读取
user_id = g.user_id
photos = Photo.query.filter_by(user_id=user_id).order_by(Photo.create_time.desc())
payload = []
for p in photos:
md_url = f"\n\n"
data = {"url": p.url, "create_time": p.create_time.strftime("%Y-%m-%d"), "md_url": md_url,
"small_url": p.url + "?imageMogr2/thumbnail/500x"}
payload.append(data)
return jsonify(code=200, msg="获取成功", data=payload)
# 删除用户上传图片、文件 v
@main.route("/user/file", methods=["DELETE"])
@user_login_required
@user_often_get_api
def delete_user_photos():
# 装饰器的代码中已经将user_id保存到g对象中,所以视图中可以直接读取
user_id = g.user_id
req_json = request.get_json()
url = req_json.get("url")
count = Photo.query.filter(Photo.url == url, Photo.user_id == user_id).delete()
try:
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
return jsonify(code=4000, msg="数据库错误")
print(count)
if count >= 1:
return jsonify(code=200, msg="删除成功")
else:
return jsonify(code=4001, msg="不存在此图片")
|
import os
import random
import argparse
import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
from transformers import AutoTokenizer, EncoderDecoderModel
from rouge_score import rouge_scorer
from models import EncoderDecoderModelWithGates, EncoderModelWithGates
from scorers import WRR, bleu_score
pd.options.display.max_columns = 1000
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="Denoise trainer", conflict_handler='resolve')
parser.add_argument('--data_file', type=str, help='Path to the data file', required=True)
parser.add_argument('--model_path', type=str, help='Path to save trained model', required=True)
parser.add_argument('--min_len_src', type=int, help='Minimum length of source texts', required=False, default=20)
parser.add_argument('--max_len_src', type=int, help='Maximum length of source texts', required=False, default=300)
parser.add_argument('--min_len_tgt', type=int, help='Minimum length of target texts', required=False, default=20)
parser.add_argument('--max_len_tgt', type=int, help='Maximum length of target texts', required=False, default=300)
parser.add_argument('--model_type', type=str, help='Type of models - seq2seq, BART, T5', required=True)
parser.add_argument('--pretrained_encoder_path', type=str, help='Pretrained encoder model name', required=True)
parser.add_argument('--pretrained_decoder_path', type=str, help='Pretrained decoder model name', required=False, default=None)
parser.add_argument('--mask_gate', help='Indicator for masking gate', default=False, action="store_true")
parser.add_argument('--copy_gate', help='Indicator for copy gate', default=False, action="store_true")
parser.add_argument('--generate_gate', help='Indicator for generate gate', default=False, action="store_true")
parser.add_argument('--skip_gate', help='Indicator for skip gate', default=False, action="store_true")
parser.add_argument('--seed', type=int, help='Random seed', required=False, default=66)
parser.add_argument('--teacher_forcing', type=int, help='Teacher Forcing', required=False, default=1)
args, _ = parser.parse_known_args()
try:
assert args.model_type in ['seq2seq','bart','t5']
except:
raise ValueError("Model type not in ['seq2seq','bart', 't5']")
#try:
# assert (args.model_type == 'seq2seq' and args.pretrained_encoder_path and args.pretrained_decoder_path) or (args.model_type != 'seq2seq' and args.pretrained_encoder_path)
#except:
# raise ValueError("Check the pretrained paths")
val = pd.read_csv(args.data_file)
val = val.dropna().reset_index(drop=True)
try:
assert ('source' in val.columns)
except:
raise ValueError("Source column not found in data")
encoder_tokenizer = AutoTokenizer.from_pretrained(args.pretrained_encoder_path)
if args.pretrained_decoder_path:
decoder_tokenizer = AutoTokenizer.from_pretrained(args.pretrained_decoder_path)
else:
decoder_tokenizer = encoder_tokenizer
valX = torch.Tensor(np.asarray([encoder_tokenizer.encode(i, max_length=args.max_len_src, truncation=True, padding='max_length', add_special_tokens=True) \
for i in tqdm(val.source.values)]))
if 'target' in val.columns:
valy = torch.Tensor(np.asarray([decoder_tokenizer.encode(i, max_length=args.max_len_tgt, truncation=True, padding='max_length', add_special_tokens=True) \
for i in tqdm(val.target.values)]))
valX = torch.tensor(valX, dtype=torch.long)
if 'target' in val.columns:
valy = torch.tensor(valy, dtype=torch.long)
gates = []
if args.mask_gate == True:
gates.append('mask')
if args.copy_gate == True:
gates.append('copy')
if args.generate_gate == True:
gates.append('generate')
if args.skip_gate == True:
gates.append('skip')
print ("Running model with {} gates".format(gates))
if args.model_type == 'seq2seq':
if args.pretrained_decoder_path:
model = EncoderDecoderModelWithGates.from_encoder_decoder_pretrained(args.pretrained_encoder_path, args.pretrained_decoder_path, gates=gates)
else:
model = EncoderDecoderModel.from_pretrained(args.pretrained_encoder_path)
model = EncoderDecoderModelWithGates(config=model.config,encoder=model.encoder, decoder=model.decoder, gates=gates)
model.config.encoder.max_length = args.max_len_src
model.config.decoder.max_length = args.max_len_tgt
model.config.encoder.min_length = args.min_len_src
model.config.decoder.min_length = args.min_len_tgt
model.encoder_tokenizer = encoder_tokenizer
model.decoder_tokenizer = decoder_tokenizer
else:
model = EncoderModelWithGates(args.model_type, args.pretrained_encoder_path, gates=gates)
model.encoder.config.max_length = args.max_len_src
model.decoder.config.max_length = args.max_len_tgt
model.encoder.config.min_length = args.min_len_src
model.decoder.config.min_length = args.min_len_tgt
model.encoder_tokenizer = encoder_tokenizer
model.decoder_tokenizer = decoder_tokenizer
if args.model_type == 't5':
encoder_mask_id = encoder_tokenizer.additional_special_tokens_ids[0]
decoder_mask_id = decoder_tokenizer.additional_special_tokens_ids[0]
else:
encoder_mask_id = encoder_tokenizer.mask_token_id
decoder_mask_id = decoder_tokenizer.mask_token_id
#print ("Total number of parameters {}".format(sum(p.numel() for p in model.parameters() if p.requires_grad == True)))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.cuda.empty_cache()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
if 'target' in val.columns:
val_data_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(valX,valy), batch_size=4)
else:
val_data_loader = torch.utils.data.DataLoader(
torch.utils.data.TensorDataset(valX), batch_size=4)
model.load_state_dict(torch.load(os.path.join(args.model_path,'model.pth')))
model.eval()
all_val_logits = []
all_generate_probs = []
all_copy_probs = []
all_masking_probs = []
all_skip_probs = []
# Evaluate data for one epoch
for batch in tqdm(val_data_loader):
input_ids = batch[0].to(device)
if 'target' in val.columns:
output_ids = batch[1].to(device)
with torch.no_grad():
# Forward pass, calculate logit predictions.
# token_type_ids is the same as the "segment ids", which
# differentiates sentence 1 and 2 in 2-sentence tasks.
# Get the "logits" output by the model. The "logits" are the output
# values prior to applying an activation function like the softmax.
if 'target' in val.columns and args.teacher_forcing == 1:
outputs, generate_prob, copy_prob,masking_prob, skip_prob = model(input_ids=input_ids, encoder_mask_token_id = torch.tensor([[encoder_mask_id]]).to(device),\
decoder_mask_token_id = decoder_mask_id, labels=output_ids, return_dict=True)
else:
outputs, generate_prob, copy_prob,masking_prob, skip_prob = model(input_ids=input_ids, encoder_mask_token_id = torch.tensor([[encoder_mask_id]]).to(device),\
decoder_mask_token_id = decoder_mask_id, return_dict=True)
logits = outputs.logits
logits = logits.detach().cpu().numpy()
all_val_logits.extend(logits.argmax(-1))
all_generate_probs.extend(generate_prob.detach().cpu().numpy())
all_copy_probs.extend(copy_prob.detach().cpu().numpy())
all_masking_probs.extend(masking_prob.detach().cpu().numpy())
all_skip_probs.extend(skip_prob.detach().cpu().numpy())
predicted_texts = []
if len(all_val_logits) != val.shape[0]:
all_val_logits = np.concatenate(all_val_logits, axis=0)
#all_generate_probs = np.concatenate(all_generate_probs, axis=0)
#all_copy_probs = np.concatenate(all_copy_probs, axis=0)
#all_masking_probs = np.concatenate(all_masking_probs, axis=0)
#all_skip_probs = np.concatenate(all_skip_probs, axis=0)
#print (all_generate_probs.shape, all_copy_probs.shape, np.asarray(all_masking_probs).shape, all_skip_probs.shape)
#print (all_val_logits.shape)
for i in all_val_logits:
text = decoder_tokenizer.decode(i)
text = text.replace('<s>','')
text = text.replace('</s>','')
text = text.replace('<pad>','')
#text = [k for k in text if k not in ['<s>','</s>','<pad>']]
predicted_texts.append(text.strip())
#predicted_texts.append(" ".join(text).strip())
val['predicted_target'] = predicted_texts
val['text_len'] = val.source.apply(lambda x: len(encoder_tokenizer.encode(x, max_length=512, add_special_tokens=True)))
val = val[val.text_len < args.max_len_src].reset_index(drop=True)
scorer = rouge_scorer.RougeScorer(['rouge1','rougeL'], use_stemmer=True)
if 'target' in val.columns:
val['WRR'] = val.apply(lambda x: WRR(x.target, x.predicted_target), axis=1)
val['BLEU'] = val.apply(lambda x: bleu_score(x.target, x.predicted_target), axis=1)
val['Rogue'] = val.apply(lambda x: scorer.score(x.target.lower(),x.predicted_target.lower())['rougeL'].fmeasure,axis=1)
print (val[['WRR','BLEU','Rogue']].describe())
val.to_csv(os.path.join(args.model_path,'validation_output.csv'),index=False)
try:
np.save(os.path.join(args.model_path,'generate_probs.npy'), np.asarray(all_generate_probs)[:,:,0])
np.save(os.path.join(args.model_path,'copy_probs.npy'), np.asarray(all_copy_probs)[:,:,0])
np.save(os.path.join(args.model_path,'mask_probs.npy'), np.asarray(all_masking_probs)[:,:,0])
np.save(os.path.join(args.model_path,'skip_probs.npy'), np.asarray(all_skip_probs)[:,:,0])
except:
pass
|
# Copyright Notice:
# Copyright 2017 Distributed Management Task Force, Inc. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Usecase-Checkers/blob/master/LICENSE.md
import datetime
import json
import os
import sys
class Results(object):
def __init__(self, tool_name, service_root):
self.output_dir = os.getcwd()
self.results_filename = "results.json"
self.tool_name = tool_name
self.return_code = 0
self.results = {"ToolName": tool_name}
self.results.update({"Timestamp": {"DateTime":
"{:%Y-%m-%dT%H:%M:%SZ}".format(datetime.datetime.now(datetime.timezone.utc))}})
if service_root is not None:
self.results.update({"ServiceRoot": service_root})
else:
self.results.update({"ServiceRoot": {}})
def update_test_results(self, test_name, rc, msg, skipped=False):
if "TestResults" not in self.results:
self.results.update({"TestResults": {}})
if test_name not in self.results["TestResults"]:
self.results["TestResults"].update({test_name: {"pass": 0, "fail": 0, "skip": 0}})
if skipped:
self.results["TestResults"][test_name]["skip"] += 1
elif rc == 0:
self.results["TestResults"][test_name]["pass"] += 1
else:
print("ERROR: {}".format(msg))
self.results["TestResults"][test_name]["fail"] += 1
if "ErrorMessages" not in self.results["TestResults"]:
self.results["TestResults"].update({"ErrorMessages": []})
if msg is not None:
self.results["TestResults"]["ErrorMessages"].append(test_name + ": " + msg)
self.return_code = rc
def add_cmd_line_args(self, args):
self.results.update({"CommandLineArgs": args})
def set_output_dir(self, output_dir):
self.output_dir = os.path.abspath(output_dir)
try:
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
except OSError as e:
print("Error creating output directory {}, error: {}".format(self.output_dir, e), file=sys.stderr)
print("Will write results file to current working directory instead.", file=sys.stderr)
self.output_dir = os.getcwd()
def write_results(self):
path = os.path.join(self.output_dir, self.results_filename)
try:
with open(path, 'w') as outfile:
json.dump(self.results, outfile, indent=4)
except OSError as e:
print("Error writing results file to {}, error: {}".format(path, e), file=sys.stderr)
print("Printing results to STDOUT instead.", file=sys.stderr)
print(json.dumps(self.results, indent=4))
def json_string(self):
return json.dumps(self.results)
def get_return_code(self):
return self.return_code
|
import json
import os
import shutil
import pytest
from graphql_relay import to_global_id
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client
from db.models import Attachment, Image, AttachmentKey
# pylint: disable=C0209
def attachment_node_query():
return '''
query ($id: ID!) {
node(id: $id) {
id
... on Attachment {
fileName
}
}
}
'''
def attachments_query(key):
return '''
query {
attachments(key: %s) {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
fileName
}
}
}
}
''' % key
def attachments_by_slug_query(slug):
return '''
query {
companyAvatar: attachments (key: COMPANY_AVATAR, slug: "%s") {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
url
mimeType
fileSize
fileName
}
}
}
companyAvatarFallback: attachments (key: COMPANY_AVATAR_FALLBACK, slug: "%s") {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
url
mimeType
fileSize
fileName
}
}
}
companyDocuments: attachments (key: COMPANY_DOCUMENTS, slug: "%s") {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
url
mimeType
fileSize
fileName
}
}
}
studentAvatar: attachments (key: STUDENT_AVATAR, slug: "%s") {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
url
mimeType
fileSize
fileName
}
}
}
studentAvatarFallback: attachments (key: STUDENT_AVATAR_FALLBACK, slug: "%s") {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
url
mimeType
fileSize
fileName
}
}
}
studentDocuments: attachments (key: STUDENT_DOCUMENTS, slug: "%s") {
pageInfo {
startCursor
endCursor
hasNextPage
hasPreviousPage
}
edges {
cursor
node {
id
url
mimeType
fileSize
fileName
}
}
}
}
''' % (slug, slug, slug, slug, slug, slug)
@pytest.fixture
def query_attachment_node_by_node_id(execute):
def closure(user, node_id):
return execute(attachment_node_query(), variables={'id': node_id}, **{'user': user})
return closure
@pytest.fixture
def query_attachments(execute):
def closure(user, key):
return execute(attachments_query(key), **{'user': user})
return closure
@pytest.fixture
def query_attachments_for_slug(execute):
def closure(user, slug):
return execute(attachments_by_slug_query(slug), **{'user': user})
return closure
def upload_mutation():
return '''
mutation UploadMutation($input: UserUploadInput!) {
upload(input: $input) {
attachment {
id
}
success
errors
}
}
'''
def upload_for_project_posting_mutation():
return '''
mutation UploadProjectPosting($input: UserUploadInput!) {
upload(input: $input) {
success
errors
}
}
'''
@pytest.fixture
def upload(default_password):
def closure(user, key, file, project_posting=None):
query = upload_mutation()
data = {
'operations':
json.dumps({
'query': query,
'variables': {
'input': {
'file': None,
'key': key.upper(),
'projectPosting': project_posting
}
},
}),
'0':
file,
'map':
json.dumps({
'0': ['variables.input.file'],
}),
}
client = Client()
client.login(username=user.username, password=default_password)
response = client.post('/graphql/', data=data)
content = json.loads(response.content)
return content.get('data'), content.get('errors')
return closure
@pytest.fixture
def upload_for_project_posting(default_password):
def closure(user, project_posting, key, file):
query = upload_for_project_posting_mutation()
data = {
'operations':
json.dumps({
'query': query,
'variables': {
'input': {
'file': None,
'key': key.upper(),
'projectPosting': {
'id': to_global_id('ProjectPosting', project_posting.id)
}
}
},
}),
'0':
file,
'map':
json.dumps({
'0': ['variables.input.file'],
}),
}
client = Client()
client.login(username=user.username, password=default_password)
response = client.post('/graphql/', data=data)
content = json.loads(response.content)
return content.get('data'), content.get('errors')
return closure
def delete_attachment_mutation():
return '''
mutation DeleteAttachmentMutation($input: DeleteAttachmentInput!) {
deleteAttachment(input: $input) {
success
errors
}
}
'''
@pytest.fixture
def file_image_jpg():
mime_type = 'image/jpeg'
image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media', 'image.jpg')
with open(image_path, 'rb') as file:
return SimpleUploadedFile(name='image.jpg', content=file.read(), content_type=mime_type)
@pytest.fixture
def file_video_mp4():
mime_type = 'video/mp4'
image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media', 'video.mp4')
with open(image_path, 'rb') as file:
return SimpleUploadedFile(name='video.mp4', content=file.read(), content_type=mime_type)
@pytest.fixture
def file_document_pdf():
mime_type = 'application/pdf'
image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media',
'document.pdf')
with open(image_path, 'rb') as file:
return SimpleUploadedFile(name='document.pdf', content=file.read(), content_type=mime_type)
@pytest.fixture
def attachments_for_user():
def closure(user, key):
profile_content_type = user.get_profile_content_type()
profile_id = user.get_profile_id()
return Attachment.objects.filter(key=key,
content_type=profile_content_type,
object_id=profile_id)
return closure
@pytest.fixture
def attachments_for_project_posting():
def closure(project_posting, key):
profile_content_type = ContentType.objects.get(app_label='db', model='projectposting')
profile_id = project_posting.id
return Attachment.objects.filter(key=key,
content_type=profile_content_type,
object_id=profile_id)
return closure
@pytest.fixture
def delete_attachment(execute):
def closure(user, attachment_id):
return execute(delete_attachment_mutation(),
variables={'input': {
'id': to_global_id('Attachment', attachment_id)
}},
**{'user': user})
return closure
@pytest.mark.django_db
@pytest.fixture(autouse=True)
def company_fallback_images(user_employee):
image_content_type = ContentType.objects.get(app_label='db', model='image')
source_image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media',
'image.jpg')
destination_image_path = os.path.join(settings.MEDIA_ROOT, 'company_image.jpg')
shutil.copy(source_image_path, destination_image_path)
image = Image.objects.create(file='company_image.jpg')
Attachment.objects.create(key=AttachmentKey.COMPANY_AVATAR_FALLBACK,
object_id=user_employee.get_profile_id(),
content_type=user_employee.get_profile_content_type(),
attachment_id=image.id,
attachment_type=image_content_type)
source_image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media',
'image.jpg')
destination_image_path = os.path.join(settings.MEDIA_ROOT, 'company_image_2.jpg')
shutil.copy(source_image_path, destination_image_path)
image = Image.objects.create(file='company_image_2.jpg')
Attachment.objects.create(key=AttachmentKey.COMPANY_AVATAR_FALLBACK,
object_id=user_employee.get_profile_id(),
content_type=user_employee.get_profile_content_type(),
attachment_id=image.id,
attachment_type=image_content_type)
@pytest.mark.django_db
@pytest.fixture(autouse=True)
def student_fallback_images(user_student):
image_content_type = ContentType.objects.get(app_label='db', model='image')
source_image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media',
'image.jpg')
destination_image_path = os.path.join(settings.MEDIA_ROOT, 'student_image.jpg')
shutil.copy(source_image_path, destination_image_path)
image = Image.objects.create(file='student_image.jpg')
Attachment.objects.create(key=AttachmentKey.COMPANY_AVATAR_FALLBACK,
object_id=user_student.get_profile_id(),
content_type=user_student.get_profile_content_type(),
attachment_id=image.id,
attachment_type=image_content_type)
source_image_path = os.path.join(settings.BASE_DIR, 'api', 'tests', 'fixtures', 'media',
'image.jpg')
destination_image_path = os.path.join(settings.MEDIA_ROOT, 'student_image_2.jpg')
shutil.copy(source_image_path, destination_image_path)
image = Image.objects.create(file='student_image_2.jpg')
Attachment.objects.create(key=AttachmentKey.STUDENT_AVATAR_FALLBACK,
object_id=user_student.get_profile_id(),
content_type=user_student.get_profile_content_type(),
attachment_id=image.id,
attachment_type=image_content_type)
|
"""
The MIT License (MIT)
Copyright © 2018 Jean-Christophe Bos & HC² (www.hc2.fr)
"""
from network import WLAN
from socket import getaddrinfo
from time import sleep, ticks_ms, ticks_diff
from binascii import hexlify
from os import mkdir
from json import load, dumps
class MicroWifi :
# ============================================================================
# ===( Constants )============================================================
# ============================================================================
_ETH_AP = 1
_ETH_STA = 0
_IP_NONE = '0.0.0.0'
_DEFAULT_AUTH_TYPE = WLAN.WPA2
_AP_MASK = '255.255.255.0'
_DEFAULT_TIMEOUT_SEC = 10
# ============================================================================
# ===( Utils )===============================================================
# ============================================================================
@staticmethod
def _mac2Str(binMac) :
return hexlify(binMac, ':').decode().upper()
# ----------------------------------------------------------------------------
def _setAPInfos(self, ssid=None, key=None, ip=None, mask=None, gateway=None, dns=None) :
self._apInfos = {
'ssid' : ssid,
'key' : key,
'ip' : ip,
'mask' : mask,
'gateway' : gateway,
'dns' : dns
}
# ----------------------------------------------------------------------------
def _setConnectionInfos(self, bssid=None, ssid=None, key=None, ip=None, mask=None, gateway=None, dns=None) :
self._connInfos = {
'macBssid' : bssid,
'ssid' : ssid,
'key' : key,
'ip' : ip,
'mask' : mask,
'gateway' : gateway,
'dns' : dns
}
# ----------------------------------------------------------------------------
def _openConf(self) :
try :
with open(self._filePath, 'r') as jsonFile :
self._confObj = load(jsonFile)
except :
self._confObj = { }
if self._confObj.get('STA', None) is None :
self._confObj['STA'] = { }
# ----------------------------------------------------------------------------
def _writeConf(self) :
try :
jsonStr = dumps(self._confObj)
try :
mkdir(self._confPath)
except :
pass
jsonFile = open(self._filePath, 'wb')
jsonFile.write(jsonStr)
jsonFile.close()
return True
except :
return False
# ============================================================================
# ===( Constructor )==========================================================
# ============================================================================
def __init__(self, confName="wifi", confPath="/flash/conf", useExtAntenna=False) :
self._confPath = confPath
self._filePath = '%s/%s.json' % (confPath, confName)
self._wlan = WLAN()
self._antenna = WLAN.EXT_ANT if useExtAntenna else WLAN.INT_ANT
self._openConf()
self._setAPInfos()
self._setConnectionInfos()
self._wlan.init(antenna=self._antenna)
self.DisableRadio()
# ============================================================================
# ===( Functions )============================================================
# ============================================================================
def DisableRadio(self) :
self.CloseAccessPoint()
self.CloseConnectionToAP()
self._wlan.deinit()
# ----------------------------------------------------------------------------
def GetMACAddr(self) :
return self._mac2Str(self._wlan.mac())
# ----------------------------------------------------------------------------
def GetAPInfos(self) :
if not self.IsAccessPointOpened() :
self._setAPInfos()
return self._apInfos
# ----------------------------------------------------------------------------
def GetConnectionInfos(self) :
if not self.IsConnectedToAP() :
self._setConnectionInfos()
return self._connInfos
# ----------------------------------------------------------------------------
def ScanAP(self) :
try :
if self._wlan.mode() == WLAN.STA :
self._wlan.init(antenna=self._antenna)
return self._wlan.scan()
except :
return ()
# ----------------------------------------------------------------------------
def OpenAccessPoint(self, ssid, key=None, ip='192.168.0.254', autoSave=True) :
if ssid and ip :
try :
self._wlan.ifconfig( id = self._ETH_AP,
config = (ip, self._AP_MASK, ip, ip) )
auth = (self._DEFAULT_AUTH_TYPE, key) if key else None
self._wlan.init( mode = WLAN.STA_AP,
ssid = ssid,
auth = auth,
antenna = self._antenna )
print("WIFI ACCESS POINT OPENED :")
print(" - MAC address : %s" % self.GetMACAddr())
print(" - Network SSID : %s" % ssid)
print(" - IP address : %s" % ip)
print(" - Mask : %s" % self._AP_MASK)
print(" - Gateway IP : %s" % ip)
print(" - DNS server : %s" % ip)
if autoSave :
self._confObj['AP'] = {
'ssid' : ssid,
'key' : key,
'ip' : ip
}
self._writeConf()
self._setAPInfos(ssid, key, ip, self._AP_MASK, ip, ip)
return True
except :
self.CloseAccessPoint()
return False
# ----------------------------------------------------------------------------
def OpenAccessPointFromConf(self) :
try :
ssid = self._confObj['AP']['ssid']
key = self._confObj['AP']['key']
ip = self._confObj['AP']['ip']
return self.OpenAccessPoint(ssid, key, ip, False)
except :
return False
# ----------------------------------------------------------------------------
def RemoveAccessPointFromConf(self) :
try :
del self._confObj['AP']
return self._writeConf()
except :
return False
# ----------------------------------------------------------------------------
def CloseAccessPoint(self) :
try :
ip = self._IP_NONE
self._wlan.mode(WLAN.STA)
self._wlan.ifconfig( id = self._ETH_AP,
config = (ip, ip, ip, ip) )
return True
except :
return False
# ----------------------------------------------------------------------------
def IsAccessPointOpened(self) :
return self._wlan.ifconfig(self._ETH_AP)[0] != self._IP_NONE
# ----------------------------------------------------------------------------
def ConnectToAP(self, ssid, key=None, macBssid=None, timeoutSec=None, autoSave=True) :
if ssid :
if not key :
key = ''
if not timeoutSec :
timeoutSec = self._DEFAULT_TIMEOUT_SEC
timeout = timeoutSec * 1000
if self._wlan.mode() == WLAN.STA :
self._wlan.init(antenna=self._antenna)
print("TRYING TO CONNECT WIFI TO AP %s..." % ssid)
for ap in self.ScanAP() :
if ap.ssid == ssid and \
( not macBssid or self._mac2Str(ap.bssid) == macBssid ) :
self._wlan.connect( ssid = ap.ssid,
bssid = ap.bssid,
auth = (self._DEFAULT_AUTH_TYPE, key),
timeout = timeout )
t = ticks_ms()
while ticks_diff(t, ticks_ms()) < timeout :
sleep(0.100)
if self.IsConnectedToAP() :
bssid = self._mac2Str(ap.bssid)
staCfg = self._wlan.ifconfig(id=self._ETH_STA)
ip = staCfg[0]
mask = staCfg[1]
gateway = staCfg[2]
dns = staCfg[3]
print("WIFI CONNECTED TO AP :")
print(" - MAC address : %s" % self.GetMACAddr())
print(" - Network BSSID : %s" % bssid)
print(" - Network SSID : %s" % ssid)
print(" - IP address : %s" % ip)
print(" - Mask : %s" % mask)
print(" - Gateway IP : %s" % gateway)
print(" - DNS server : %s" % dns)
if autoSave :
sta = {
'ssid' : ssid,
'key' : key,
}
self._confObj['STA'][bssid] = sta
self._writeConf()
self._setConnectionInfos(bssid, ssid, key, ip, mask, gateway, dns)
return True
self.CloseConnectionToAP()
break
print("FAILED TO CONNECT WIFI TO AP %s" % ssid)
return False
# ----------------------------------------------------------------------------
def ConnectToAPFromConf(self, bssidMustBeSame=False, timeoutSec=None) :
if self._wlan.mode() == WLAN.STA :
self._wlan.init(antenna=self._antenna)
for ap in self.ScanAP() :
for bssid in self._confObj['STA'] :
macBssid = self._mac2Str(ap.bssid) if bssidMustBeSame else None
if self._confObj['STA'][bssid]['ssid'] == ap.ssid and \
( not macBssid or bssid == macBssid ) :
if self.ConnectToAP( ap.ssid,
self._confObj['STA'][bssid]['key'],
macBssid,
timeoutSec,
False ) :
return True
break
return False
# ----------------------------------------------------------------------------
def RemoveConnectionToAPFromConf(self, ssid, macBssid=None) :
try :
changed = False
for bssid in list(self._confObj['STA']) :
if self._confObj['STA'][bssid]['ssid'] == ssid and \
( not macBssid or bssid == macBssid ) :
del self._confObj['STA'][bssid]
changed = True
if changed :
return self._writeConf()
except :
pass
return False
# ----------------------------------------------------------------------------
def CloseConnectionToAP(self) :
try :
self._wlan.disconnect()
self._wlan.ifconfig( id = self._ETH_STA,
config = 'dhcp' )
return True
except :
return False
# ----------------------------------------------------------------------------
def IsConnectedToAP(self) :
return self._wlan.ifconfig(self._ETH_STA)[0] != self._IP_NONE
# ----------------------------------------------------------------------------
def ResolveIPFromHostname(self, hostname) :
originalMode = self._wlan.mode()
if originalMode == WLAN.STA_AP :
self._wlan.mode(WLAN.STA)
try :
ipResolved = getaddrinfo(hostname, 0)[0][-1][0]
except :
ipResolved = None
if originalMode == WLAN.STA_AP :
self._wlan.mode(WLAN.STA_AP)
return ipResolved if ipResolved != self._IP_NONE else None
# ----------------------------------------------------------------------------
def InternetAccessIsPresent(self) :
return ( self.ResolveIPFromHostname('iana.org') is not None )
# ----------------------------------------------------------------------------
def WaitForInternetAccess(self, timeoutSec=None) :
if not timeoutSec :
timeoutSec = self._DEFAULT_TIMEOUT_SEC
timeout = timeoutSec * 1000
t = ticks_ms()
while ticks_diff(t, ticks_ms()) < timeout :
sleep(0.100)
if self.InternetAccessIsPresent() :
return True
return False
# ============================================================================
# ============================================================================
# ============================================================================ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.