text stringlengths 8 6.05M |
|---|
from Training.Trainer import Trainer
class Generative_Replay(Trainer):
def __init__(self, model, args):
super(Generative_Replay, self).__init__(model, args)
def generate_dataset(self, ind_task,sample_per_classes, classe2generate, Train=True):
if self.conditional:
return self.model.generate_dataset(ind_task, sample_per_classes, one_task=False, Train=Train,
classe2generate=classe2generate)
else:
return self.model.generate_dataset(ind_task, sample_per_classes, one_task=False, Train=Train, classe2generate=classe2generate)
def create_next_data(self, ind_task):
task_te_gen = None
if ind_task > 0:
#nb_sample_train = len(self.train_loader[ind_task])
#nb_sample_test = int(nb_sample_train * 0.2)
self.train_loader[ind_task] #we set the good index of dataset
#self.test_loader[ind_task] #we set the good index of dataset
#print("nb_sample_train should be higher than this")
print("numbe of train sample per task is fixed as : " + str(self.sample_transfer))
nb_sample_train = self.sample_transfer # approximate size of one task
#nb_sample_test = int(nb_sample_train * 0.2)
#task_tr_gen = self.model.generate_dataset(ind_task - 1, nb_sample_train, one_task=False, Train=True)
task_tr_gen = self.generate_dataset(ind_task, nb_sample_train, classe2generate=ind_task,
Train=True)
#task_tr_gen = self.model.generate_dataset(ind_task - 1, nb_sample_test, one_task=False, Train=True)
self.train_loader.concatenate(task_tr_gen)
train_loader = self.train_loader[ind_task]
train_loader.shuffle_task()
if task_te_gen is not None:
self.test_loader.concatenate(task_te_gen)
test_loader = self.test_loader[ind_task]
test_loader.shuffle_task()
else:
test_loader = None #we don't use test loader for instance but we keep the code for later in case of
else:
train_loader = self.train_loader[ind_task]
test_loader = self.test_loader[ind_task]
return train_loader, test_loader
|
import random
articles = ('A', 'The')
nouns = ('Boy', 'Girl', 'Bat', 'Ball')
verbs = ('hit', 'saw', 'liked')
prepositions = ('with', 'by')
def sentence():
return nounPhrase() + ' ' + verbPhrase()
def nounPhrase():
return random.choice(articles) + ' ' + random.choice(nouns)
def verbPhrase():
return random.choice(verbs) + ' ' + nounPhrase() + ' ' + \
prepostionalPhrase()
def prepostionalPhrase():
return random.choice(prepositions) + ' ' + nounPhrase()
def main():
number = int(input('Enter the number of sentences: '))
for count in range(number):
print(sentence())
if __name__ == '__main__':
main() |
# functionality to make a climatology from the daily file made by SNAP through interpolation
if __name__ == '__main__':
import os
import xarray as xr
import numpy as np
import argparse
# parse some args
parser = argparse.ArgumentParser( description='make a climatology NetCDF file of Sea Ice Concentration from the daily nsidc-0051' )
parser.add_argument( "-f", "--fn", action='store', dest='fn', type=str, help="daily NSIDC-0051 NetCDF file" )
parser.add_argument( "-o", "--out_fn", action='store', dest='out_fn', type=str, help="name/path of output daily climatology file to be generated" )
parser.add_argument( "-b", "--begin", action='store', dest='begin', type=str, help="beginning year of the climatology" )
parser.add_argument( "-e", "--end", action='store', dest='end', type=str, help="ending year of the climatology" )
# unpack args
args = parser.parse_args()
fn = args.fn
out_fn = args.out_fn
begin = args.begin
end = args.end
# make climatology --> 0-366 includes leaps
ds = xr.open_dataset(fn).load()
ds_sel = ds.sel(time=slice(begin,end))
clim = ds_sel.groupby('time.dayofyear').mean(dim='time')
clim['sic'].values[np.where(clim['sic'].values < 0)] = 0
clim['sic'].values[np.where(clim['sic'].values > 1)] = 1
clim.to_netcdf(out_fn, format='NETCDF4') |
def fibonacci(n):
"""Returns None for 0, 0 for 1st sequence, 1 for 2nd sequence, or else returns the nth sequence of the series
:param integer (nth sequence of series)
:type integer
:rtype integer
"""
return None if n <= 0 else 0 if n == 1 else 1 if n == 2 else (fibonacci(n-1)+fibonacci(n-2))
def lucas(n):
"""Returns None for 0, 2 for 1st sequence, 1 for 2nd sequence, or else returns the nth sequence of the series
:param integer (nth sequence of series)
:type integer
:rtype integer
"""
return None if n <= 0 else 2 if n == 1 else 1 if n == 2 else (lucas(n-1)+lucas(n-2))
def sum_series(n, x = 0, y = 1):
"""Returns nth sequence of lucas series if all optional args are true, nth sequence of fibonacci if all default args are true, else 'different series'
:param integer (nth sequence of series), 2nd and 3rd optional
:type integer
:rtype integer
"""
defaultArgs = [x == 0, y == 1]
optionalArgs = [x == 2, y == 1]
return lucas(n) if all(optionalArgs) else fibonacci(n) if all(defaultArgs) else 'different series'
# attribution: short circuit eval - https://www.djm.org.uk/posts/python-multiple-line-conditions-and-all-builtin/ |
import os
import simplejson as json
import re
"""
Helper functions used in various steps of websubmit/modify. Collect
functions that are used in various stages of websubmit here, e.g. to
handle the shuffeling of files necessary for websubmit, processing of
hgf-structures to/from JSON.
Note that curdir generally refers to the curdir used in websubmit.
This naming convention is kept throughout the module.
"""
# ----------------------------------------------------------------------
# from Prefill_hgf_fields.py
def add_non_json_fields(json_dict):
"""
Add single input fields if field has no repetition
For autosuggest field keep structured keys, and skip the creation of
the individual subfields. Otherwise we'd need to delete them later
on anyway, as those fields deliver their values via the technical
strutured fields.
@param json_dict: dict keyed by marc tags
"""
autosuggest_keys = get_autosuggest_keys()
for key in json_dict.keys():
if not len(json_dict[key])==1:
# json_dict[key] has always to be a list, and we can create only
# individual files if it's lenght is exactly 1. If not the
# second entry would just overwrite the first values.
continue
if key in autosuggest_keys:
# do not create files for autosuggest/tokeninput entries
continue
# We found a structured field which is non autosuggest/tokeninput
# and contains only one value. E.g. 773__ This gets spread up into
# individual files for each key, e.g. 773__a, 773__0 ...
for subfield in json_dict[key][0].keys():
fieldname = key + subfield
json_dict[fieldname] = json_dict[key][0][subfield]
del json_dict[key] # Remove structure key from dict: we have individual files
return json_dict
def clean_fields(curdir, fieldname):
""" In case we have a structured field (ie. XXX__) and string inputs
(XXX__a), we need to clean up the curdir such, that only the
structured fields survive. Thus we delete the simple fields here.
@param curdir : curdir from websubmit containing all files
@param fieldname : the (structured) field that should survive
"""
# TODO either we should call delete_fields here or use this function
# for delete_fields. At least both sound very similar.
liste = os.listdir(curdir)
to_remove = []
for file in liste:
if not fieldname in file:
continue
if fieldname == file:
continue # technical field should not be deleted
to_remove.append(file)
for i in to_remove:
file_to_delete = os.path.join(curdir,i)
os.remove(file_to_delete)
def get_autosuggest_keys():
"""
Define which fields are use autosuggest or tokeninput in the
webfrontend. Those fields always return the real values in the
structured technical subfields and the simple fields only serve
display purposes, thus their data should not be considered later on.
"""
autosuggest_keys = [ "0247_", "1001_", "7001_", "245__",
"3367_", "536__", "9131_",
"9201_", "980__", "982__"]
return [] ####TODO CERN DEMO does not need autosuggest!!!!!!!!!!!!!!!!!!!!
def read_file(curdir, filename):
"""
Get contents of a file from curdir
@param curdir : curdir from websubmit containing all files
@param filename: file to read from curdir
"""
fd = open(os.path.join(curdir,filename),"r")
text = fd.read().replace("\n","").replace("\r","")
fd.close()
return text
def read_json(curdir, filename):
"""
Read json from a file and return dict
Check if the file associated with the field name exists. If so,
read it else return an empty dict.
Note: usually all files should exist, however, in Postpone
processes we might get an almost empty record so not even the
mandantory fields got filled in. This is meant to catch this.
@param curdir : curdir from websubmit containing all files
@param fieldname: Filename to read
"""
if not check_field_exists(curdir,filename):
return []
text = read_file(curdir,filename)
if text.startswith("["):
pass #we have a list
else:
# create a list of the input. This is necessary, as Inveino does
# not have a possibility to define an overall "startof/endof" an
# output format. Thus if we return JSON-structures from simple
# output format we get them without the necessary [] around it.
text = '[' + text +']'
jsontext = washJSONinput(text)
jsondict = json.loads(jsontext, 'utf8')
#marcfield = fieldname.replace("hgf_","")
#if isinstance(jsondict,list): jsondict = {marcfield:jsondict} # if json Frormat as list
return jsondict
def remove_file(curdir, filename):
"""
Delete an arbitrary file from curdir.
@param curdir : curdir from websubmit containing all files
@param fieldname: Marc Tag to process (our files follow the hgf_<marc> naming convention)
"""
#os.remove(os.path.join(curdir,filename)) TODO: use os.remove if possible
os.system("rm -f %s" %os.path.join(curdir,filename))
def wash_db_record_dict(record_dict):
"""
create nice json dictionary
@param record_dict: output of search_engine.get_record
@type record_dict : Invenio Marc tuple
"""
json_dict = {}
for marccode in record_dict.keys():
#loop 1: all datafields (we get a list with repeatable datafields)
ct_fields = len(record_dict[marccode]) # field counter
#print marccode
for marcfield in record_dict[marccode]: #loop2: all 700 fields
#print marcfield
ind1 = marcfield[1]
ind2 = marcfield[2]
if (ind1 == "" or ind1 == " "): ind1 = "_"
if (ind2 == "" or ind2 == " "): ind2 = "_"
fullmarccode = str(marccode) + ind1 + ind2
#print fullmarccode, marcfield
_dict ={}
for subfield in marcfield[0]:
#loop3: all subfields
subfield_code = subfield[0]
subfield_val = subfield[1]
_dict[subfield_code]=subfield_val
if _dict == {}: continue
if not fullmarccode in json_dict.keys(): json_dict[fullmarccode] = []
json_dict[fullmarccode].append(_dict)
return json_dict
def write_done_file(curdir):
"""
In original Invenio the modify contains a step 0 to select the
fields that should be modified. This page generates a donefile for
step 1 to know that step 0 was passed and also to pass on the fields
to modify.
As we do not use the step 0 and always use the full forms with all
fields we just fake this file to stay compatible here.
@param curdir: curdir from websubmit containing all files
"""
write_file(curdir, "Create_Modify_Interface_DONE", "DONE")
def write_file(curdir, filename, text):
"""
Write a text file to curdir for further processing
@param curdir : curdir from websubmit containing all files
@param filename: name of the file to generate. We need the real file
name here and do not prpend hgf_ automagically as
we use this function to write any file
@param text : conents of the file
"""
wd = open(os.path.join(curdir,filename),"w")
wd.write(text)
wd.close()
def write_json(curdir, filename, _dict):
"""
Write python structure (usually a dictionary) as JSON to a file
@param curdir : curdir from websubmit containing all files
@param filename: file to write, no automatic name conversoin to hgf_
@param dict : dictionary to write. This dict should be properly keyed
by Marc tags. Note that due to this structure we
do /not/ use repeatable subfields here.
"""
fw = open(os.path.join(curdir,filename), "w")
json.dump(_dict, fw)
fw.close()
def write_all_files(curdir, json_dict):
"""
Write files for the keys contained in json_dict.
@param curdir: curdir from websubmit containing all files
@param json_dict: dict keyed by marc tags
"""
for field in json_dict.keys():
fieldname = "hgf_" + field
if len(field) == len('XXXii'):
write_json(curdir, fieldname, json_dict[field])
else:
write_file(curdir, fieldname, json_dict[field])
return
# ----------------------------------------------------------------------
# from Convert_hgf_Fields
def get_usergroups(uid):
"""
Get all groups of a user and return them as a list. In case a user
does not belong to any group (this should not happen) return an
emptyl list
@param uid: User ID
"""
from invenio.webgroup_dblayer import get_groups
# TODO check Create_hgf_collection: why does it not use this
# function
user_groups = get_groups(uid)
if user_groups == []:
return []
groups = []
[groups.append(tup[1]) for tup in user_groups]
return groups
def check_field_exists(curdir, fieldname):
"""
Check if a file (fieldname) exists in curdir.
@param curdir : curdir frreom websubmit containing all files
@param fieldname: file to check
"""
# TODO replace all those os.path.exists() calls by
# check_field_exists() to get easier code
if os.path.exists(os.path.join(curdir,fieldname)):
return True
else:
return False
# ----------------------------------------------------------------------
# from Create_hgf_collection.py
def get_user(uid):
"""
Check role of a user, ie. if she is STAFF, EDITORS or USER
@param uid: User ID
"""
user_groups = get_usergroups(uid)
if "STAFF" in user_groups: return "STAFF"
if "EDITORS" in user_groups: return "EDITORS"
return "USER"
def get_technical_collections():
"""
Return a list of collections that have a special meaning in our
workflow.
"""
return [ "EDITORS", "MASSMEDIA", "TEMPENTRY", "USER", "VDB", "VDBINPRINT", "VDBRELEVANT"]
# ----------------------------------------------------------------------
# from Create_hgf_record_json.py
def get_hgf_files(curdir):
"""
Get all hgf_files from curdir
@param curdir : curdir from websubmit containing all files
"""
hgf_files = []
for f in os.listdir(curdir):
if not f.startswith("hgf_"): continue
hgf_files.append(f)
return hgf_files
def washJSONinput(jsontext):
"""
Wash string jsontext intended for processing with json.loads().
Removes newlines and commas before closing brackets and at the end
of the string. We get this e.g. due to Invenios inability to handle
lists properly in output formats.
@param jsontext : The text that should be cleaned.
Returns: String suitable for processing with json.loads()
"""
#jsontext = re.sub('\n', '', jsontext)
jsontext = re.sub(r"(?<!\\)(\n\r|\n|\r)", " ", jsontext)
jsontext = re.sub(',\s*]', ']', jsontext)
jsontext = re.sub(',\s*}', '}', jsontext)
jsontext = re.sub(',\s*$', '', jsontext)
return jsontext
def get_recordid(curdir):
"""
Extract the record ID from the SN file in curdir
@param curdir : curdir from websubmit containing all files
"""
return read_file(curdir, "SN")
def check_hgf_field(fieldname):
"""
Check if a filename matches a is a regular marcfield syntax
prepended by hgf_. This is a simple plausibility check it does not
evaluate if this field exists or has some meaning.
Depending wether we have an encoded subfield (245__a vs. 245__)
return 'asci' for subfields (plain string values) or 'json' for
fields that hold a json-structure.
@param fieldname : filename in curdir like hgf_245__a
"""
if fieldname == "hgf_release": return False, "" #do not check for hgf_release
if len(fieldname) < len("hgf_xxx"): return False, "" # controlfields or more
if fieldname == "hgf_master": return True, "master" #record linking
if re.search('_[A-Za-z0-9]{3}[_A-z0-9]', fieldname):
if len(fieldname) == 9: return True, "json"
if len(fieldname) > 9: return True, "asci"
else: return False, ""
else: return False, ""
def backup_file(curdir,fieldname):
"""
Create a bak file in curdir. Useful for testing submission stages
but usually not used in our productive workflows.
@param curdir : curdir from websubmit containing all files
@param fieldname : filename in curdir like hgf_245__a
"""
bak_file = os.path.join(curdir,fieldname + ".bak")
orig_file = os.path.join(curdir,fieldname)
# TODO avoid system call.
os.system("cp %s %s" %(orig_file,bak_file))
|
import pylab
from numpy import *
from numpy.fft import *
max_t = 128e-15 #s
num_p = 128
t_fwhm = 10e-15 #s
GVD = 362e-30 #s
t = arange(-max_t,max_t,2*max_t/num_p)
#freqs = 2*pi*arange(-1/max_t/2.,1/max_t/2.,1/max_t/num_p)
#freqs = roll(freqs,num_p/2)
freqs = 2*pi*fftfreq(num_p,2*max_t/num_p)
t_sigma = t_fwhm/2.3548
t_sigma *= sqrt(2) #t_fwhm is for the intensity envelope
E = exp(-t**2/(2*t_sigma**2))
fft_E = fft(E)
fft_E *= exp(1j*freqs**2*GVD/2.)
E2 = ifft(fft_E)
pylab.plot(t,abs(E)**2)
pylab.plot(t,abs(E2)**2)
pylab.show()
#calculate intensity envelope
env1 = abs(E)**2
env2 = abs(E2)**2
# calculate FWHM by gaussian fit
X = arange(len(env1))
x = sum(X*env1)/sum(env1)
width = sqrt(abs(sum((X-x)**2*env1)/sum(env1)))
fwhm1 = 2.3548*width*2*max_t/num_p
# calculate FWHM by gaussian fit
X = arange(len(env2))
x = sum(X*env2)/sum(env2)
width = sqrt(abs(sum((X-x)**2*env2)/sum(env2)))
fwhm2 = 2.3548*width*2*max_t/num_p
print 'initial = ',fwhm1*1e15,' fs\n final = ',fwhm2*1e15,' fs'
|
'''Cluster boxes to K centroids with K-means.
Note:
The actual anchor boxes used is from the Darknet config file: yolo-voc.cfg
anchors = [(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)]
What I get:
anchors = [(0.6240, 1.2133), (1.4300, 2.2075), (2.2360, 4.3081), (4.3940, 6.5976), (9.5680, 9.9493)]
'''
import torch
import random
from utils import iou
def init_centroids(X, K):
'''Pick K centroids with K-means++ algorithm.
Args:
X: (tensor) data, sized [N,D].
K: (int) number of clusters.
ReturnsL
(tensor) picked centroids, sized [K,D].
'''
N, D = X.size()
# Randomly pick the first centroid
picked_ids = []
picked_ids.append(random.randint(0, N-1))
while len(picked_ids) < K:
# Compute the dists to other points
centroids = X[torch.LongTensor(picked_ids)]
other_ids = [i for i in range(N) if i not in picked_ids]
ious = iou(centroids, X[torch.LongTensor(other_ids)])
min_ious, _ = ious.min(0)
_, idx = min_ious.min(1)
picked_ids.append(idx[0][0])
return X[torch.LongTensor(picked_ids)]
def pick_centroid_from_cluster(X):
'''Instead of choosing the mean of cluster as the centroid,
I pick the centroid as a sample from the cluster with the maximum average iou.
Args:
X: (tensor) samples of a cluster.
Return:
(tensor) picked centroid from the cluster.
'''
best_iou = -1
for x in X:
iou_x = iou(x.view(1,4), X)
if iou_x.mean() > best_iou:
best_iou = iou_x.mean()
centroid = x
return centroid
def kmeans(X, K, max_iter=100, tol=1e-7):
'''Run K-means on data X.
Args:
X: (tensor) data, sized [N,D].
K: (int) number of clusters.
max_iter: (int) max number of iterations.
tol: (float) loss tolerance between two iterations.
Returns:
(tensor) centroids, sized [K,D].
'''
N, D = X.size()
assert N >= K, 'Too few samples for K-means'
# Randomly pick the centroids
ids = torch.randperm(N)[:K]
centroids = X[ids].clone()
# Pick centroids with K-means++
# centroids = init_centroids(X, K)
last_loss = 0
for it in range(max_iter):
# Assign each sample to the nearest centroid
groups = [[] for i in range(K)]
dist_sum = 0
for i in range(N):
x = X[i].view(1,4)
dists = 1 - iou(x, centroids)
# dists = (x.expand_as(centroids) - centroids).pow(2).sum(1).sqrt()
min_dist, centroid_idx = dists.squeeze().min(0)
groups[centroid_idx[0]].append(i)
dist_sum += min_dist[0]
loss = dist_sum / N
print('iter: %d/%d loss: %f avg_iou: %f' % (it, max_iter, loss, 1-loss))
# Compute the new centroids
centroids = []
for i in range(K):
group_i = torch.LongTensor(groups[i])
centroids.append(pick_centroid_from_cluster(X[group_i]))
centroids = torch.stack(centroids)
if abs(last_loss - loss) < tol:
break
last_loss = loss
return centroids
random.seed(0)
torch.manual_seed(0)
K = 5 # 5 centroids
list_file = './voc_data/voc07_train.txt'
fmsize = 13
boxes = []
f = open(list_file, 'r')
for line in f.readlines():
splited = line.strip().split()
img_width = float(splited[1])
img_height = float(splited[2])
num_boxes = (len(splited) - 3) // 5
for i in range(num_boxes):
xmin = float(splited[3+5*i]) / img_width
ymin = float(splited[4+5*i]) / img_height
xmax = float(splited[5+5*i]) / img_width
ymax = float(splited[6+5*i]) / img_height
w = xmax - xmin
h = ymax - ymin
boxes.append([0,0,w,h])
boxes = torch.Tensor(boxes)
centroids = kmeans(boxes, K)
print(centroids * fmsize)
|
import time
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import load_config
from probe import SIPProbe
try:
configuration = load_config()
inventory = configuration['inventory']
default_ping = configuration['ping']
default_ping_interval = default_ping['interval']
default_ping_timeout = default_ping['timeout']
default_ping_latency = default_ping['latency']
default_notification = configuration['notification']
default_notification_timeout_schedulers = default_notification['timeout_schedulers']
default_notification_latency_schedulers = default_notification['latency_schedulers']
default_notification_methods = default_notification['methods']
threads = []
for server in inventory:
name = server['name']
host = server['host']
port = server['port']
transport = server['transport']
ping = server.get('ping', default_ping)
ping_interval = ping.get('interval', default_ping_interval)
ping_timeout = ping.get('timeout', default_ping_timeout)
ping_latency = ping.get('latency', default_ping_latency)
notification = server.get('notification', default_notification)
notification_timeout_schedulers = notification.get('timeout_schedulers', default_notification_timeout_schedulers)
notification_latency_schedulers = notification.get('latency_schedulers', default_notification_latency_schedulers)
notification_methods = notification.get('methods', default_notification_methods)
instance = SIPProbe(name, host, port, transport, ping_interval, ping_timeout, notification_timeout_schedulers,
ping_latency, notification_latency_schedulers, notification_methods)
threads.append(instance)
for thread in threads:
thread.start()
while True:
time.sleep(1)
except Exception as error:
raise Exception(error)
|
from flask import Blueprint
auth = Blueprint('auth',__name__)
from . import views,forms
from flask_login import LoginManager
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
|
import collections
from importplotfunctionsCR import *
from inputplotdataCR import inputplotdict
plotlist = ['figBrms_m12fhr','figBrms_m12mhr']
#plotlist = ['figBrms_m12bhr','figBrms_m12chr']
for plotneed in plotlist:
inputplotdict[plotneed]['_plotfunction'](inputplotdict[plotneed])
|
from django.contrib import messages
from django.http import HttpResponseRedirect,HttpResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from apps.teams.decorators import login_and_team_required, team_admin_required
from apps.teams.util import get_default_team
def home(request):
"""
Set home page on django side.
"""
return HttpResponseRedirect(reverse('pegasus:react_object_lifecycle'))
# May use letter thats why didn't removed.
# if request.user.is_authenticated:
# team = get_default_team(request)
# if team:
# return HttpResponseRedirect(reverse('web:team_home', args=[team.slug]))
# else:
# messages.info(request, _(
# 'Teams are enabled but you have no teams. '
# 'Create a team below to access the rest of the dashboard.'
# ))
# return HttpResponseRedirect(reverse('teams:manage_teams'))
# else:
# return render(request, 'web/landing_page.html')
@login_and_team_required
def team_home(request, team_slug):
assert request.team.slug == team_slug
return render(request, 'web/app_home.html', context={
'team': request.team,
'active_tab': 'dashboard',
})
@team_admin_required
def team_admin_home(request, team_slug):
assert request.team.slug == team_slug
return render(request, 'web/team_admin.html', context={
'active_tab': 'team-admin',
'team': request.team,
})
def msfile(request):
f = open('.well-known/microsoft-identity-association.json', 'r')
file_content = f.read()
f.close()
return HttpResponse(file_content, content_type="application/json")
|
from onegov.core.orm import Base
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.mixins import UTCPublicationMixin
from onegov.core.orm.types import UUID
from onegov.file import AssociatedFiles
from onegov.gis import CoordinatesMixin
from onegov.search import SearchableContent
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Text
from sqlalchemy.dialects.postgresql import HSTORE
from sqlalchemy.ext.mutable import MutableDict
from uuid import uuid4
class DirectoryEntry(Base, ContentMixin, CoordinatesMixin, TimestampMixin,
SearchableContent, AssociatedFiles, UTCPublicationMixin):
""" A single entry of a directory. """
__tablename__ = 'directory_entries'
es_properties = {
'keywords': {'type': 'keyword'},
'title': {'type': 'localized'},
'lead': {'type': 'localized'},
'directory_id': {'type': 'keyword'},
# since the searchable text might include html, we remove it
# even if there's no html -> possibly decreasing the search
# quality a bit
'text': {'type': 'localized_html'}
}
@property
def es_public(self):
return False # to be overridden downstream
#: An interal id for references (not public)
id = Column(UUID, primary_key=True, default=uuid4)
#: The public id of the directory entry
name = Column(Text, nullable=False)
#: The directory this entry belongs to
directory_id: 'Column[UUID]' = Column(
ForeignKey('directories.id'), nullable=False)
#: the polymorphic type of the entry
type = Column(Text, nullable=False, default=lambda: 'generic')
#: The order of the entry in the directory
order = Column(Text, nullable=False, index=True)
#: The title of the entry
title = Column(Text, nullable=False)
#: Describes the entry briefly
lead = Column(Text, nullable=True)
#: All keywords defined for this entry (indexed)
_keywords = Column( # type:ignore
MutableDict.as_mutable(HSTORE), nullable=True, name='keywords'
)
__mapper_args__ = {
'polymorphic_on': type,
'polymorphic_identity': 'generic',
}
__table_args__ = (
Index('inverted_keywords', 'keywords', postgresql_using='gin'),
Index('unique_entry_name', 'directory_id', 'name', unique=True),
)
@property
def external_link(self):
return self.directory.configuration.extract_link(self.values)
@property
def external_link_title(self):
return self.directory.configuration.link_title
@property
def external_link_visible(self):
return self.directory.configuration.link_visible
@property
def directory_name(self):
return self.directory.name
@property
def keywords(self):
return set(self._keywords.keys()) if self._keywords else set()
@keywords.setter
def keywords(self, value):
self._keywords = {k: '' for k in value} if value else None
@property
def text(self):
return self.directory.configuration.extract_searchable(self.values)
@property
def values(self):
return self.content and self.content.get('values', {})
@values.setter
def values(self, values):
self.content = self.content or {}
self.content['values'] = values
self.content.changed()
|
import numpy as np
import cv2
def print_image(name_image, image):
cv2.imshow(name_image, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def convert_image_to_RGB(image):
height, width = image.shape
channels = 3
img_rgb = np.zeros((height, width, channels))
for ch in range(channels):
for he in range(height):
for wi in range(width):
img_rgb[he, wi, ch] = img[ch, wi]
return img_rgb
def image_to_gray_scale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def invert(image):
return abs(255 - image)
# Load image and convert image to RGB
img = np.loadtxt('ImageMatriz.txt')
img = convert_image_to_RGB(img)
img = img.astype('uint8')
# Print original image
print_image('original', img)
# Convert to gray scale
img_gray = image_to_gray_scale(img)
# Print gray image
print_image('gray scale', img_gray)
|
import pygame
from pygame.sprite import Sprite
from sprites import X, Y, InvalidMoveException, BLUE
from sprites.characters.tank import TankCharacter, TANK_WIDTH
MOVE_DISTANCE = 10
MOVE_COUNT_MAX = 100
MAX_ANGLE = 180
MIN_ANGLE = 0
MAX_POWER = 150
MIN_POWER = 0
LEFT = 0
RIGHT = 1
class Tank(Sprite):
def __init__(self, screen_dimensions, location, weapons_list, terrain, color=BLUE, name='Name', switch_player_callback=None, damage_callback=None):
Sprite.__init__(self)
# Game details
self._dimension = screen_dimensions # x, y
self._terrain = terrain
self._enemy = None
self._switch_player_callback = switch_player_callback
self._damage_callback = damage_callback
# Tank attributes
self._location = location
self._move_count = MOVE_COUNT_MAX
self._move_amt = 0
self._color = color
self._tank_character = TankCharacter(location, color)
self._health = 100
self._name = name
# Gun attributes
self._gun_angle = MAX_ANGLE/4 if color == BLUE else MAX_ANGLE * 3/4
self._gun_power = MAX_POWER/2
# Weapons
self._weapons = weapons_list
self._weapon_selected = 0
# Animation tracking
self._is_animating = False
def stop_animating(self):
self._is_animating = False
def is_animating(self):
return self._is_animating
def get_location(self):
return self._location
def set_target(self, tank):
self._enemy = tank
def is_alive(self):
return self._health > 0
def get_name(self):
return self._name
def get_health(self):
return self._health
def get_color(self):
return self._color
def get_power(self):
return self._gun_power
def get_angle(self):
return self._gun_angle
def get_move_count(self):
return self._move_count
def get_current_weapon_name(self):
return self._weapons[self._weapon_selected].get_name()
def damage(self, damage_value):
damage_value = int(damage_value)
if self._health >= damage_value:
self._health -= damage_value
self._damage_callback(self._name, damage_value)
else:
self._health = 0
if damage_value > 0:
print(self._name + ": Ouch!, I've been damaged by " + str(damage_value) + ", my new health is: " + str(self._health))
# Render functions for animations
def update(self, elapsed_time):
# self._is_animating = True
# Will used to animate
# Update/Move the tank
if self._is_animating:
self._step_animation(elapsed_time)
self._weapons[self._weapon_selected].update(elapsed_time)
self._tank_character.set_cannon_angle(self._gun_angle)
self._tank_character.move(
self._location[X],
self._location[Y],
self._terrain.grade_at_point(self._location[X], TANK_WIDTH)
)
def draw(self, surface):
self._tank_character.draw(surface)
if self._is_animating:
self._weapons[self._weapon_selected].draw(surface)
# --------------------- #
# Change to next weapon #
# --------------------- #
def load_next_weapon(self, force=False):
if not self._is_animating or force:
if self._weapon_selected < len(self._weapons) - 1:
self._weapon_selected += 1
else:
raise InvalidMoveException("Cannot play while game is animating.")
# ------------------------- #
# Change to previous weapon #
# ------------------------- #
def load_previous_weapon(self):
if not self._is_animating:
if self._weapon_selected > 0:
self._weapon_selected -= 1
else:
raise InvalidMoveException("Cannot play while game is animating.")
# ---------- #
# Move Right #
# ---------- #
def move_right(self):
if not self._is_animating:
self._move(RIGHT)
else:
raise InvalidMoveException("Cannot play while game is animating.")
# --------- #
# Move Left #
# --------- #
def move_left(self):
if not self._is_animating:
self._move(LEFT)
else:
raise InvalidMoveException("Cannot play while game is animating.")
# -------------- #
# Increase angle #
# -------------- #
def increase_angle(self):
if not self._is_animating:
if self._gun_angle < MAX_ANGLE:
self._gun_angle += 1
else:
# invalid move
raise InvalidMoveException("Cannot Increase angle past " + str(MAX_ANGLE))
else:
raise InvalidMoveException("Cannot play while game is animating.")
# -------------- #
# Decrease angle #
# -------------- #
def decrease_angle(self):
if not self._is_animating:
if self._gun_angle > MIN_ANGLE:
self._gun_angle -= 1
else:
# invalid move
raise InvalidMoveException("Cannot decrease angle past " + str(MIN_ANGLE))
else:
raise InvalidMoveException("Cannot play while game is animating.")
# -------------- #
# Increase power #
# -------------- #
def increase_power(self):
if not self._is_animating:
if self._gun_power < MAX_POWER:
# print("more- power: " + str(self._gun_power))
self._gun_power += 1
else:
# invalid move
raise InvalidMoveException("Cannot Increase power past " + str(MAX_POWER))
else:
raise InvalidMoveException("Cannot play while game is animating.")
# -------------- #
# Decrease power #
# -------------- #
def decrease_power(self):
if not self._is_animating:
if self._gun_power > MIN_POWER:
# print("less power: " + str(self._gun_power))
self._gun_power -= 1
else:
# invalid move
raise InvalidMoveException("Cannot decrease power past " + str(MIN_POWER))
else:
raise InvalidMoveException("Cannot play while game is animating.")
def _get_cannon_tip(self): # returns location [x, y]
return self._tank_character.get_cannon_tip()
def fire(self):
# Setup the weapon
weapon = self._weapons[self._weapon_selected]
weapon.prepare(self, self._enemy)
# FIRE!!!!
weapon.fire(self._gun_angle, self._gun_power, self._get_cannon_tip(), self._impact_callback)
# Enable animation
self._is_animating = True
# ------------------ #
# ---- PRIVATES ---- #
# ------------------ #
# Move Function
def _move(self, direction):
if not self._is_animating:
if self._move_count != 0:
self._move_count -= 1
self._location[X] += MOVE_DISTANCE if direction == RIGHT else - MOVE_DISTANCE
self._location[Y] = self._terrain.height_at_point(self._location[X])
else:
# Invalid move
raise InvalidMoveException("Cannot move the tank anymore, maximum moves used: " +
str(MOVE_COUNT_MAX) + " .")
else:
raise InvalidMoveException("Cannot play while game is animating.")
def _step_animation(self, elapsed_time):
pass
def _impact_callback(self, impact_location, damage, distance):
self.load_next_weapon(force=True)
self._switch_player_callback(impact_location, damage, distance)
|
import os
import glob
import cv2
import pickle
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import gzip
import numpy as np
import cv2
from sklearn.ensemble import RandomForestClassifier
from skimage.io import imread
from sklearn.metrics import accuracy_score
target = []
images = []
for file in os.listdir("Stop Signs"):
img = cv2.imread(os.path.join("Stop Signs",file))
if img is not None:
images.append(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY))
if file[0] == '5':
print("yield %s", file[0])
target.append(0)
else:
target.append(1)
print("stop %s", file[0])
cv2.imshow("tset",img)
cv2.waitKey(0)
def trainNetwork():
features = []
#unrolls the 2d matrix to a vecor to be used as inputs for classifier
for i in range(len(target)):
features.append(images[i].ravel())
clf = RandomForestClassifier(n_estimators = 10, n_jobs = 2)
clf = clf.fit(features, target)
return clf
clf = trainNetwork()
|
import json
def loadData():
filename1 = './data/Albadi_2018/Hate_Speech/test.json'
filename2 = './data/Albadi_2018/Hate_Speech/train.json'
with open(filename1) as f:
content = f.readlines()
with open(filename2) as f:
content2 = f.readlines()
content.extend(content2)
content = [json.loads(x.strip()) for x in content]
return content
def get_data_binary():
full_data = list()
data = loadData()
for elem in data:
label = 'neutral' if elem['hate'] != 'yes' else 'abusive'
full_data.append({'text':elem['full_text'],'label': label})
return full_data
def get_data():
full_data = list()
data = loadData()
for elem in data:
label = 'neutral' if elem['hate'] != 'yes' else 'hate'
full_data.append({'text':elem['full_text'],
'label': label,
'id':str(elem['id_str']),
'user': {'id':str(elem['user']['id'])}})
return full_data
def get_complete_data():
full_data = list()
data = loadData()
for elem in data:
label = 'neutral' if elem['hate'] != 'yes' else 'hate'
full_data.append({'text':elem['full_text'],
'label': label,
'id':str(elem['id_str']),
'user': {'id':str(elem['user']['id'])}})
return full_data
def get_available_data():
full_data = list()
data = loadData()
lookup = dict()
for elem in data:
lookup[elem['id_str']] = 'neutral' if elem['hate'] != 'yes' else 'hate'
# load available data
with open('./data/Albadi_2018/Hate_Speech/210218_API_dump.json') as json_file:
available_data = json.load(json_file)
for elem in available_data:
full_data.append({'text':elem['full_text'],
'label': lookup[elem['id_str']],
'id':str(elem['id_str']),
'user': {'id':str(elem['user']['id'])}})
return full_data |
from django.conf.urls import url
from .views import redirect_url,api_root
urlpatterns = [
url(r'^api/short/new', api_root, name='POST URL'),
url(r'^(?P<tiny_id>[-\w]+)/$', redirect_url, name='Redirect URL'),
] |
# flake8: noqa
from .base import *
DEBUG = True
|
#!/usr/bin/env python
import roslib
import rospy
import smach
import smach_ros
import random
# Example state machine that attempts to complete all tasks
# Init state
# Returns whether initialization completed or failed
class Init(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['completed','failed'])
def execute(self, userdata):
print 'Initializing...'
return 'completed'
# Next state
# Determines the where the 'robot' should move next
# Returns found if determined where the robot should move
# Returns completed if there is nowhere else to move (all tasks completed)
class Next(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['completed','found'],
input_keys=['task','success','completed'],
output_keys=['next'])
def execute(self, userdata):
print 'Determining next state...'
task = userdata.task
success = userdata.success
completed = userdata.completed
# A task has just exited
if (task != None):
completed[task] = success
index = nextIndex(completed)
if index != -1:
userdata.next = index
return 'found' # A task was found
else:
return 'completed' # All tasks are completed
# Init has just taken place
else:
userdata.next = 0
return 'found'
# Move state
# Returns task to transition to
class Move(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['to_task0','to_task1','to_task2','failed'],
input_keys=['next'])
def execute(self, userdata):
next = userdata.next
print 'Moving to task {}'.format(next)
if next == 0:
return 'to_task0'
elif next == 1:
return 'to_task1'
elif next == 2:
return 'to_task2'
return 'failed'
# --------------------------------------------------------------------------------- #
# Generic Task
class Task(smach.State):
def __init__(self, index):
smach.State.__init__(self, outcomes=['exit'],
output_keys=['task','success'])
self._index = index
def execute(self, userdata):
print 'Attempting task {}'.format(self._index)
success = randomBool()
if success:
print "Task {} succeeded".format(self._index)
else:
print "Task {} failed".format(self._index)
userdata.task = self._index
userdata.success = success
return 'exit'
# --------------------------------------------------------------------------------- #
# Initializes user data
def initializeData(userdata):
userdata.next = None
userdata.task = -1
userdata.success = None
userdata.completed = {
0:False,
1:False,
2:False
}
# Return first index of incomplete task or -1 if all tasks are completed
def nextIndex(completed):
for c in completed:
if not completed[c]:
return c
return -1
# Returns a random bool
def randomBool():
return random.random() > 0.5
# Main
# Creates state machine
def main():
rospy.init_node('state_machine')
sm = smach.StateMachine(outcomes=['completed','failed'])
initializeData(sm.userdata)
with sm:
# Primary States
smach.StateMachine.add('INIT', Init(),
transitions={'completed':'NEXT',
'failed':'failed'})
smach.StateMachine.add('NEXT', Next(),
transitions={'completed':'completed',
'found':'MOVE'})
smach.StateMachine.add('MOVE', Move(),
transitions={'to_task0':'TASK0',
'to_task1':'TASK1',
'to_task2':'TASK2',
'failed':'NEXT'})
# Task States
smach.StateMachine.add('TASK0', Task(0), transitions={'exit':'NEXT'})
smach.StateMachine.add('TASK1', Task(1), transitions={'exit':'NEXT'})
smach.StateMachine.add('TASK2', Task(2), transitions={'exit':'NEXT'})
outcome = sm.execute()
if __name__ == '__main__':
main()
|
from django.shortcuts import render,HttpResponse,redirect
from apps.user.models import *
from apps.goods.models import *
from apps.order.models import *
import re
from django.urls import reverse #反向解析
from django.views import View
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired
from django.conf import settings
from celery_tasks.tasks import send_register_active_email
from django.contrib.auth import authenticate, login,logout
from django.core.paginator import Paginator #获取分页
#未登录进行跳转
from utils.mixin import LoginRequiredMixin
# Create your views here.
# def register(request):
# '''
# 注册处理
# :param request:
# :return:
# '''
# if request.method == 'GET':
# #显示注册页面
# return render(request,'register.html')
# elif request.method == 'POST':
# #注册处理;
# #1.接受数据;
# username = request.POST.get('user_name')
# pwd = request.POST.get('pwd')
# cpwd = request.POST.get('cpwd')
# email = request.POST.get('email')
# allow = request.POST.get('allow')
# #2.校验数据;判断数据是否为空;
# if not all([username,pwd,cpwd,email,allow]):
# #数据不完整
# return render(request,'register.html',{'errmsg':'数据不完整'})
# #检验密码是否相同
# if pwd != cpwd:
# #密码不同
# return render(request,'register.html',{'errmsg':'两次密码不同'})
# #检验邮箱
# #只允许英文字母、数字、下划线、英文句号、以及中划线组成
# if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$',email):
# return render(request, 'register.html', {'errmsg': '邮箱格式不正确'})
# #允许
# if allow != 'on':
# return render(request,'register.html',{'errmsg':'请同意协议'})
# #3.业务处理,先判断用户名是否重复,再将数据存入表中;
# #检验用户名是否重复
# try:
# user = User.objects.get(username=username)
# except User.DoesNotExist:
# #用户名不存在
# user = None
# if user:
# #用户名已存在
# return render(request,'register.html',{'errmsg':'用户名已存在'})
# else:
# user = User.objects.create_user(username, email, pwd)
# user.is_active = 0 #
# user.save()
# #4.返回应答,跳转至首页。
# url = reverse('goods:index') #反向解析,去goods/index/
# return redirect(url)
# # return HttpResponse('register ok')
#CBV
class RegisterView(View):
def get(self,request):
# 显示注册页面
return render(request, 'register.html')
def post(self,request):
# 注册处理;
# 1.接受数据;
username = request.POST.get('user_name')
pwd = request.POST.get('pwd')
cpwd = request.POST.get('cpwd')
email = request.POST.get('email')
allow = request.POST.get('allow')
# 2.校验数据;判断数据是否为空;
if not all([username, pwd, cpwd, email, allow]):
# 数据不完整
return render(request, 'register.html', {'errmsg': '数据不完整'})
# 检验密码是否相同
if pwd != cpwd:
# 密码不同
return render(request, 'register.html', {'errmsg': '两次密码不同'})
# 检验邮箱
# 只允许英文字母、数字、下划线、英文句号、以及中划线组成
if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return render(request, 'register.html', {'errmsg': '邮箱格式不正确'})
# 允许
if allow != 'on':
return render(request, 'register.html', {'errmsg': '请同意协议'})
# 3.业务处理,先判断用户名是否重复,再将数据存入表中;
# 检验用户名是否重复
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
# 用户名不存在
user = None
if user:
# 用户名已存在
return render(request, 'register.html', {'errmsg': '用户名已存在'})
else:
user = User.objects.create_user(username, email, pwd)
user.is_active = 0 #
user.save()
#发送激活邮件,包含激活链接http://127.0.0.1:8000/user/active/用户id
#激活链接中需要包含用户的身份信息,并且把身份信息进行加密
#加密用户的身份信息,生成激活的token
serializer = Serializer(settings.SECRET_KEY,3600)
info = {'confirm':user.id}
token = serializer.dumps(info) #bytes类型
token = token.decode() #转为字符串
# print(token)
#发邮件,利用celery异步发邮件send_mail
# 如果my_task函数有参数,可通过delay()传递,例如 my_task(a, b), my_task.delay(10, 20)
send_register_active_email.delay(email,username,token) #发送激活信息
# 4.返回应答,跳转至首页。
url = reverse('goods:index') # 反向解析,去goods/index/
return redirect(url)
# return HttpResponse('register ok')
class ActiveView(View):
'''用户激活'''
def get(self,request,token):
'''
进行用户的激活
:param request:
:return:
'''
#进行解密。获取要激活的用户信息
serializer = Serializer(settings.SECRET_KEY,3600)
#过期报异常
try:
info = serializer.loads(token)
user_id = info['confirm']
print(user_id) #激活用户的id
#根据id获取用户信息
user = User.objects.get(id=user_id)
user.is_active = 1 #激活成功
user.save()
#跳转至登录页面
return redirect(reverse('user:login'))
except SignatureExpired as e:
#激活链接过期
return HttpResponse('激活链接过期',e)
class LoginView(View):
'''登录页面'''
def get(self,request):
#判断是否记住了用户名
if 'username' in request.COOKIES:
username = request.COOKIES.get('username')
checkbox = 'on'
else:
username = ''
checkbox = ''
return render(request,'login.html',{'username':username,'checkbox':checkbox})
def post(self,request):
#接受数据
username = request.POST.get('username')
password = request.POST.get('pwd')
#print(username+':'+password)
#校验数据
#检验数据完整
if not all([username,password]):
return render(request,'login.html',{'status':'数据不完整'})
#使用django内置的认证系统进行校验
user = authenticate(username=username,password=password)
print(user)
if user is not None:
#用户名和密码正确
print('ok')
if user.is_active:
#用户已激活
#记录用户的登录状态
login(request,user)
#获取登录后要跳转的地址
#默认跳转至首页;
next_url = request.GET.get('next',reverse('goods:index'))
response = redirect( next_url)
#记住用户名
remember = request.POST.get('remember')
if remember == 'on':
#记住用户名
response.set_cookie('username',username,max_age=7*24*3600)
else:
response.delete_cookie('username')
#跳转至首页
return response
else:
#用户未激活
return render(request,'login.html',{'status':'请激活你的账户'})
else:
#用户名或密码错误
return render(request,'login.html',{'status':'用户名或密码错误'})
#/user/logout
class LogoutView(View):
'''用户登出'''
def get(self,request):
logout(request)
return redirect(reverse('goods:index'))
#/user/
class UserInfoView(LoginRequiredMixin,View):
'''用户中心-信息页'''
def get(self,request):
'''
显示
:param request:
:return:
'''
#如果用户未登录-》AnonymousUser类的一个实例
#如果用户登录-》User类的一个实例
#request.user.is_authenticated()
#获取用户的个人信息
user = request.user
address = Address.objects.get_default_address(user)
#获取用户的历史浏览记录,利用django_redis
from django_redis import get_redis_connection
con = get_redis_connection('default') #StrictRedis #default为settings里面的参数;
history_key = 'history_%d'%user.id
#获取用户最近浏览的5个商品;
sku_ids = con.lrange(history_key,0,4)
#从数据库中查询用浏览的5个商品的id
#goods_li = GoodsSKU.objects.filter(id__in=sku_ids)
# goods_res=[]
# for a_id in sku_ids:
# for goods in goods_li:
# if a_id == goods.id:
# goods.res.append(goods)
#
#方法2 遍历获取用户浏览的商品信息;
goods_li = []
for id in sku_ids:
goods = GoodsSKU.objects.get(id=id)
goods_li.append(goods)
#组织上下文
context ={
'page': 'user',
'address': address,
'goods_li':goods_li,
}
return render(request,'user_center_info.html',context)
#/user/order/
class UserOrderView(LoginRequiredMixin, View):
'''用户中心-订单页'''
def get(self, request,page):
'''显示'''
# 获取用户的订单信息
user = request.user
#获取该用户的订单信息
orders = OrderInfo.objects.filter(user=user).order_by('-create_time')
# print('order数据',orders)
# 遍历获取订单商品的信息
for order in orders:
# 根据订单id获取商品信息 order 是一个对象OrderInfo object
order_skus = OrderGoods.objects.filter(order_id=order.order_id)
# 遍历order_skus计算商品的小计
for order_sku in order_skus:
# 计算小计
amount = order_sku.count * order_sku.price
# 动态给order_sku 增加属性amount,保存订单商品的小计
order_sku.amount = amount
# 动态给order增加属性,保存订单状态标题
order.status_name = OrderInfo.ORDER_STATUS[order.order_status]
# 动态给order增加属性,保存订单商品的信息
order.order_skus = order_skus
# 分页
# 对数据进行分页
paginator = Paginator(orders, 1)
# 获取第page页的内容,进行容错处理
try:
#数字
page = int(page)
except Exception as e:
page = 1
# 如果传送的页码大于总页码,就置1
if page > paginator.num_pages:
page = 1
# 获取第page页的实例对象
order_page = paginator.page(page) # <Page 9 of 17>
# try:
# print(order_page.has_previous)
# print(order_page.has_next)
# except Exception as e:
# print('报错')
#Returns a Page object with the given 1-based index.
# for order in order_page:
# for order_sku in order.order_skus:
# print(order_sku) # OrderGoods object (14)
# 进行页码控制,页面上最多显示5个页码;
# 分为4种情况:
# 1. 总页数小于5,页面显示所有页码
# 2. 如果当前页是前3页,显示1-5页;
# 3. 如果当前页是后3页,显示后5页;
# 5. 如果总页数减去当前页大于等于4,此时显示1-5页;
# 4. 其他情况,显示当前页的前2页,当前页,当前页的后2页;
num_pages = paginator.num_pages
print('总页数:',num_pages)
if num_pages < 5:
pages = range(1, num_pages + 1)
elif num_pages <= 3:
pages = range(1, 6)
elif num_pages - page <= 2:
pages = range(num_pages - 4, num_pages + 1)
elif num_pages - page >= 4:
pages = range(1, 6)
else:
pages = range(page - 2, page + 3)
#组织上下文
context ={
'order_page':order_page,
'pages':pages,
'page':'order',
}
return render(request, 'user_center_order.html',context)
#/user/address
class AddressView(LoginRequiredMixin,View):
'''用户中心-地址页'''
def get(self,request):
'''
显示
:param request:
:return:
'''
#检验address是否存在默认地址
user = request.user
# try:
# address = Address.objects.get(user=user,is_default=True)
# except Address.DoesNotExist:
# #不存在默认地址
# address = None
address = Address.objects.get_default_address(user)
return render(request,'user_center_site.html',{'page':'address','address':address})
def post(self,request):
'''接受数据,提交至数据库'''
#1.接受数据
receiver = request.POST.get('receiver')
addr = request.POST.get('addr')
zip_code = request.POST.get('zip_code')
phone = request.POST.get('phone')
#2.进行校验
#检验数据完整性
if not all([receiver,addr,zip_code,phone]):
return render(request,'user_center_site.html',{'errmsg':'数据不完整'})
#检验手机合法性
if re.match(phone,'^(13[0-9]|14[579]|15[0-3,5-9]|16[6]|17[0135678]|18[0-9]|19[89])\d{8}$'):
return render(request,'user_center_site.html',{'errmsg':'手机格式不正确'})
#3.业务处理
#如果用户存在默认收货地址,添加的地址不作为默认收货地址,否则作为默认收货地址
#获取登录用户对应User对象
user = request.user
# try:
# address = Address.objects.get(user=user,is_default=True)
# except Address.DoesNotExist:
# #不存在默认地址
# address = None
address = Address.objects.get_default_address(user)
if address:
#如果为真,添加的地址不作为默认地址;
is_default = False
else:
is_default = True
#添加地址
Address.objects.create(user=user,
receiver=receiver,
addr=addr,
zip_code=zip_code,
phone=phone,
is_default=is_default)
#4.返回应答
return redirect(reverse('user:address'))
# #测试login_required
# from django.contrib.auth.decorators import login_required
# @login_required
# def user(request):
# return render(request,'user_center_info.html') |
from flask_cors import CORS
from controllers import quiz_controller, answers_controller, questions_controller, student_courses_controller, student_questions_controller
from controllers import course_controller
from controllers import user_controller
def route(app):
quiz_controller.route(app)
course_controller.route(app)
user_controller.route(app)
answers_controller.route(app)
questions_controller.route(app)
student_courses_controller.route(app)
student_questions_controller.route(app)
CORS(app)
|
# -*- coding: utf-8 -*-
# @Author: lc
# @Date: 2017-09-08 09:20:58
# @Last Modified by: lc
# @Last Modified time: 2017-09-19 21:30:35
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
os.environ["CUDA_VISIBLE_DEVICES"] = '0' # decide to use CPU or GPU
import time
import json
from datetime import datetime
import cv2
import numpy as np
from kafka import KafkaConsumer, KafkaProducer
from FaceProcessUtilMultiFaces import preprocessImage
from AlexNet import AlexNet
from VGG import VGGModel
SERVER = '127.0.0.1:9092'
VIDEO_TOPIC = 'video'
IMAGE_TOPIC = 'image'
PROBABILITY_TOPIC = 'emotion_probability'
EMOTION = ('neural', 'angry', 'surprise', 'disgust', 'fear', 'happy', 'sad')
COLOR_RGB = ((0, 255, 0), (255, 0, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0))
COLOR_HEX = ('#00FF00', '#0000FF', '#FF0000', '#FFFF00', '#FF00FF', '#00FFFF')
FONT = cv2.FONT_HERSHEY_SIMPLEX
class VideoConsumer():
def __init__(self):
self.consumer = KafkaConsumer(bootstrap_servers = [SERVER], auto_offset_reset='latest') # earliest
def get_img(self):
self.consumer.subscribe([VIDEO_TOPIC])
for message in self.consumer:
if message.value != None:
yield message.value
"""
# convert bytes to image in memory
nparr = np.fromstring(message.value, np.uint8)
img_np = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR) # cv2.IMREAD_COLOR in OpenCV 3.1
"""
class ImageProducer():
def __init__(self):
self.producer = KafkaProducer(bootstrap_servers = [SERVER])
def send_img(self, img):
self.producer.send(IMAGE_TOPIC, value = img)
class ProbabilityProducer():
def __init__(self):
self.producer = KafkaProducer(bootstrap_servers = [SERVER])
def send_probability_distribution(self, msg):
self.producer.send(PROBABILITY_TOPIC, value = msg)
def predict_and_label_frame(video_consumer, img_producer, probability_producer, model, maximum_detect_face = 6):
"""fetch original frame from kafka
detect whether there is human face in the frame
predict the emotion of the human face, label it on the image
then send it to kafka
"""
consume_count = 0
produce_count = 0
while True:
for img in video_consumer.get_img():
start_time = time.time()
consume_count += 1
print('========Consume {0} from video stream'.format(consume_count))
# write original image to disk
"""
raw_dest_img = './rev_img/original{0}.png'.format(consume_count)
with open(raw_dest_img, 'wb') as f:
f.write(img)
"""
# transform image from bytes to ndarray
np_arr = np.fromstring(img, np.uint8) # one dimension array
np_img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
result = preprocessImage(np_img)
print('**********time consumed by face detection: {0}s'.format(time.time() - start_time))
start_time = time.time()
if result['detected']: # detect human face
produce_count += 1
# deal with multiple face in an image
num_faces = min(maximum_detect_face, len(result['rescaleimg']))
face_imgs, face_points = result['rescaleimg'], result['originalPoints']
emotion_distributions = {}
for i in range(num_faces):
emotion, probability_distribution = model.predict(face_imgs[i])
distribution = dict(zip(EMOTION, probability_distribution.tolist()[0]))
emotion_distributions[COLOR_HEX[i]] = distribution
print('*****************probability_distribution: {0}'.format(probability_distribution))
# add square and text to the human face in the image
left_top, right_bottom = face_points[i]
cv2.rectangle(np_img, left_top, right_bottom, COLOR_RGB[i], 2)
text_left_bottom = (left_top[0], left_top[1] - 20)
cv2.putText(np_img, emotion, text_left_bottom, FONT, 1, COLOR_RGB[i], 2)
# cv2.imwrite('./test_imgs/img_{0}.jpg'.format(datetime.now().strftime("%Y%m%d%H%M%S")), np_img)
# send image to kafka
img_producer.send_img(cv2.imencode('.jpeg', np_img)[1].tostring())
print('#########produce {0} to image stream'.format(produce_count))
# send emotion probability distribution to kafka
probability_producer.send_probability_distribution(json.dumps(emotion_distributions).encode('utf8'))
print('#########produce {0} to probability stream'.format(emotion_distributions))
"""
# send both image and distribution to kafka
message = []
message['img'] = np_img.tolist()
message['distribution'] = json.dumps(dict(zip(EMOTION, probability_distribution.tolist()[0])))
print('#########produce {0} to image stream, {1}'.format(produce_count, message['distribution']))
"""
else:
# message = {'img': img, 'distribution': None}
img_producer.send_img(img)
print('#########produce raw image to image stream')
empty_distribution = {COLOR_HEX[0] : dict(zip(EMOTION, [0] * 7))}
probability_producer.send_probability_distribution(json.dumps(empty_distribution).encode('utf8'))
# img_producer.send_img(json.dumps(message).encode('utf8'))
print('**********time consumed by prediction: {0}s'.format(time.time() - start_time))
if __name__ == '__main__':
video_consumer = VideoConsumer()
img_producer = ImageProducer()
probability_producer = ProbabilityProducer()
# model = AlexNet()
model = VGGModel()
predict_and_label_frame(video_consumer, img_producer, probability_producer, model) |
# noqa
from libvirt import Libvirt # noqa
from docker import Docker # noqa
|
import sys
# 홀수
# 7개의 자연수 중 홀수인 것들의 합
# 이 7개의 홀수들 중에서 최솟값
number = []
odds = []
for _ in range(7):
N = int(sys.stdin.readline().rstrip())
number.append(int(N))
sum = 0
for i in range(len(number)):
if number[i] % 2 != 0:
sum += int(number[i])
odds.append(int(number[i]))
# 구한 홀수값에 따라 출력
if len(odds) != 0 and sum != 0:
print(sum)
print(min(odds))
# 홀수 아예 없으면 -1
else: print(-1) |
#!/usr/bin/python
#\file plan_2d_grasp3.py
#\brief Planning grasping on 2D.
# Objects are given as polygons.
# Gripper has two fingers that are rectangles.
# Using plan_2d_grasp2.py, grasping parameter is planned with CMA-ES.
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date May.19, 2017
from plan_2d_grasp2 import TGraspPlanningScene
import sys
sys.path.append('../')
import cma_es.cma as cma
import math
import numpy as np
def Main():
contours= [
[[0.729,0.049],[0.723,0.082],[0.702,0.125],[0.682,0.124],[0.654,0.106],[0.656,0.101],[0.647,0.081],[0.652,0.078],[0.651,0.071],[0.655,0.071],[0.673,0.031]],
[[0.722,0.219],[0.717,0.220],[0.712,0.229],[0.693,0.235],[0.681,0.227],[0.672,0.230],[0.649,0.211],[0.637,0.213],[0.629,0.208],[0.626,0.216],[0.620,0.202],[0.616,0.203],[0.617,0.207],[0.609,0.200],[0.603,0.201],[0.601,0.191],[0.587,0.181],[0.589,0.175],[0.580,0.166],[0.585,0.133],[0.593,0.121],[0.605,0.113],[0.626,0.113],[0.645,0.121],[0.644,0.127],[0.651,0.123],[0.661,0.135],[0.669,0.134],[0.675,0.140],[0.702,0.148],[0.715,0.159],[0.717,0.150],[0.720,0.149],[0.721,0.167],[0.727,0.167],[0.730,0.195],[0.724,0.204]],
[[0.820,0.156],[0.793,0.154],[0.812,0.154],[0.812,0.150],[0.803,0.149],[0.806,0.134],[0.802,0.139],[0.796,0.133],[0.786,0.140],[0.779,0.139],[0.772,0.131],[0.774,0.126],[0.782,0.127],[0.779,0.134],[0.789,0.130],[0.788,0.115],[0.794,0.109],[0.773,0.111],[0.769,0.124],[0.755,0.143],[0.749,0.144],[0.753,0.150],[0.750,0.153],[0.737,0.147],[0.731,0.149],[0.738,0.141],[0.722,0.144],[0.722,0.124],[0.726,0.126],[0.729,0.123],[0.725,0.118],[0.733,0.107],[0.733,0.090],[0.738,0.086],[0.738,0.077],[0.740,0.082],[0.744,0.080],[0.749,0.041],[0.757,0.039],[0.758,0.032],[0.763,0.034],[0.762,0.040],[0.769,0.037],[0.769,0.008],[0.781,0.024],[0.778,0.034],[0.788,0.043],[0.828,0.144],[0.819,0.150]],
]
#for contour in contours:
#print '['+','.join(['['+','.join(map(lambda f:'%0.3f'%f,p))+']' for p in contour])+'],'
gps= TGraspPlanningScene()
gps.Construct(contours, scale=0.9, obj_target=1)
#ev= gps.Evaluate(p_grasp=[-0.01, 0.005, 0.1, 0.08])
def f_obj(p_grasp, gps=gps):
ev= gps.Evaluate(p_grasp)
#Analyzing intersection polygon of inner-grasp polygon and target object in grasp local frame
if len(ev['lps_ing_obj'])==0: return None #No grasp
lps_ing_obj_max= np.max(ev['lps_ing_obj'],axis=0)
lps_ing_obj_min= np.min(ev['lps_ing_obj'],axis=0)
if lps_ing_obj_max[0]-lps_ing_obj_min[0]<gps.WFinger[0]*0.7:
return None #Grasping part is less than 70% of finger width.
#if ev['area_ing_obj']==0.0: return None #Grasping area is zero
score= 0.0
collision= 10000.0*(sum(ev['area_f1_obj_s'])+sum(ev['area_f2_obj_s']))
if collision>0.0: score= 1.0+collision
#score-= 10000.0*ev['area_ing_obj']
return score
cma_opt={
'bounds': [[-0.1,-0.1,-math.pi,0.0], [0.1,0.1,math.pi,0.088]],
'scaling_of_variables': [5.0,5.0,0.2,12.0],
'verb_time': 0,
'verb_log': False,
'CMA_diagonal': 1,
'maxfevals': 1000,
'tolfun': 1.0e-4,
}
res= cma.fmin(f_obj, [0.0,0.0,0.0,0.08], 0.006, cma_opt)
#print res
p_grasp= res[0]
print 'Best p_grasp=',p_grasp
ev= gps.Evaluate(p_grasp)
print 'lps_ing_obj:',map(lambda a:a.tolist(), ev['lps_ing_obj'])
#Compute areas of intersections:
#NOTE: Use these values for evaluating collision.
#Intersection with target object:
print 'Intersection area/target object: f1, f2=',
print ev['area_f1_obj_s'][gps.ObjTarget],
print ev['area_f2_obj_s'][gps.ObjTarget]
#Intersection with other objects:
for obj in range(len(gps.Contours)):
if obj==gps.ObjTarget: continue
print 'Intersection area/object',obj,': f1, f2=',
print ev['area_f1_obj_s'][obj],
print ev['area_f2_obj_s'][obj]
print 'Total collision area=',sum(ev['area_f1_obj_s'])+sum(ev['area_f2_obj_s'])
#Save data into files for plotting:
def write_polygon(fp,polygon):
if len(polygon)>0:
for pt in polygon+[polygon[0]]:
fp.write('%s\n'%' '.join(map(str,pt)))
fp.write('\n')
fp= open('/tmp/contours.dat','w')
#for contour in contours:
for contour in gps.Contours:
write_polygon(fp,contour)
fp.close()
fp= open('/tmp/viz.dat','w')
fp.write('%s\n'%' '.join(map(str,gps.CenterO)))
fp.write('%s\n'%' '.join(map(str,gps.CenterO+0.05*gps.ExO)))
fp.write('\n')
fp.write('%s\n'%' '.join(map(str,gps.CenterO)))
fp.write('%s\n'%' '.join(map(str,gps.CenterO+0.025*gps.EyO)))
fp.write('\n')
write_polygon(fp,ev['ps_f1'])
write_polygon(fp,ev['ps_f2'])
write_polygon(fp,ev['ps_ing'])
write_polygon(fp,ev['ps_ing_obj'])
fp.close()
fp= open('/tmp/intersection.dat','w')
for ps_f1_obj in ev['ps_f1_obj_s']:
write_polygon(fp,ps_f1_obj)
for ps_f2_obj in ev['ps_f2_obj_s']:
write_polygon(fp,ps_f2_obj)
fp.close()
def PlotGraphs():
print 'Plotting graphs..'
import os
commands=[
'''qplot -x2 aaa
/tmp/contours.dat w l
/tmp/viz.dat u 1:2:'(column(-1)+1)' lc var w l
/tmp/intersection.dat w l
&''',
#/tmp/viz.dat u 1:2:-1 lc var w l
'''''',
'''''',
]
for cmd in commands:
if cmd!='':
cmd= ' '.join(cmd.splitlines())
print '###',cmd
os.system(cmd)
print '##########################'
print '###Press enter to close###'
print '##########################'
raw_input()
os.system('qplot -x2kill aaa')
if __name__=='__main__':
import sys
if len(sys.argv)>1 and sys.argv[1] in ('p','plot','Plot','PLOT'):
PlotGraphs()
sys.exit(0)
Main()
|
"""
_InsertComponent_
MySQL implementation of UpdateWorker
"""
__all__ = []
import time
from WMCore.Database.DBFormatter import DBFormatter
class UpdateWorker(DBFormatter):
sqlpart1 = """UPDATE wm_workers
SET last_updated = :last_updated
"""
sqlpart2 = """WHERE component_id = :component_id
AND name = :worker_name"""
def execute(self, componentID, workerName, state = None,
pid = None, conn = None, transaction = False):
binds = {"component_id": componentID,
"worker_name": workerName,
"last_updated": int(time.time())}
if state:
binds["state"] = state
self.sqlpart1 += ", state = :state"
if pid:
binds["pid"] = pid
self.sqlpart1 += ", pid = :pid"
sql = self.sqlpart1 + " " + self.sqlpart2
self.dbi.processData(sql, binds, conn = conn,
transaction = transaction)
return
|
# -*- coding: utf-8 -*-
import crochet
import fido.exceptions
import pytest
from mock import Mock
from bravado.fido_client import FidoFutureAdapter
def test_eventual_result_not_cancelled():
mock_eventual_result = Mock()
adapter = FidoFutureAdapter(mock_eventual_result)
adapter.result()
assert mock_eventual_result.cancel.called is False
def test_cancel_calls_eventual_result():
mock_eventual_result = Mock()
adapter = FidoFutureAdapter(mock_eventual_result)
adapter.cancel()
assert mock_eventual_result.cancel.called is True
def test_eventual_result_cancelled_on_exception():
mock_eventual_result = Mock(wait=Mock(side_effect=crochet.TimeoutError()))
adapter = FidoFutureAdapter(mock_eventual_result)
with pytest.raises(fido.exceptions.HTTPTimeoutError) as exc_info:
adapter.result(timeout=1)
assert mock_eventual_result.cancel.called is True
assert str(exc_info.value).startswith('Connection was closed by fido after blocking for timeout=1 seconds')
|
#TODO
"""
Likes and comment
Create Index Page
Email Verification
Discussion
""" |
# Librerias Future
from __future__ import unicode_literals
# Librerias Django
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from django.views.generic import DetailView, ListView
# Librerias de terceros
from apps.website.submodels.post import PyPost
def index(request):
return render(request, 'index.html')
def shop(request):
return render(request, 'shop.html')
def product(request):
return render(request, 'product.html')
def post(request):
return render(request, 'post.html')
def license(request):
return render(request, 'license.html')
def UnderConstruction(request):
return render(request, 'under_construction.html')
"""
BLOG
"""
POST_FIELDS = [
{'string': 'Título', 'field': 'title'},
{'string': 'Creado en', 'field': 'created_on'},
{'string': 'Contenido', 'field': 'content'},
]
POST_FIELDS_SHORT = ['title','content','created_on']
class BlogView(LoginRequiredMixin, ListView):
login_url = "login"
model = PyPost
template_name = 'blog.html'
fields = POST_FIELDS
paginate_by = 8
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
class PostDetailView(LoginRequiredMixin, DetailView):
login_url = "login"
model = PyPost
template_name = 'post.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
|
from paste.deploy import loadapp
import os
from wsgiref.simple_server import make_server
config = "python_paste.ini"
appname = "common"
wsgi_app = loadapp("config:%s" % os.path.abspath(config), appname)
server = make_server('localhost',80,wsgi_app)
server.serve_forever() |
pygameWindowWidth = 600
pygameWindowDepth = 600
|
# -*- coding: utf-8 -*-
import os
from functools import partial
from collections import defaultdict
from irc3.utils import slugify
from irc3.utils import maybedotted
from irc3.dcc.client import DCCChat
from irc3.dcc.client import DCCGet
from irc3.dcc.client import DCCSend
DCC_TYPES = ('chat', 'get', 'send')
class DCCManager:
"""Manage DCC connections"""
def __init__(self, bot):
self.bot = bot
self.loop = bot.loop
self.config = cfg = bot.config.get('dcc', {})
self.config.update(
send_limit_rate=int(cfg.get('send_limit_rate', 0)),
send_block_size=int(cfg.get('send_block_size', DCCSend.block_size))
)
self.connections = {}
self.protocols = {}
for klass in (DCCChat, DCCGet, DCCSend):
n = klass.type
self.config.update({
n + '_limit': int(cfg.get(n + '_limit', 100)),
n + '_user_limit': int(cfg.get(n + '_user_limit', 1)),
n + '_accept_timeout': int(cfg.get(n + '_accept_timeout', 60)),
n + '_idle_timeout': int(cfg.get(n + '_idle_timeout', 60 * 5)),
})
klass = maybedotted(self.config.get(n + '_protocol', klass))
self.connections[n] = {'total': 0, 'masks': defaultdict(dict)}
self.protocols[n] = klass
self.seeks = {}
def created(self, protocol, future):
if protocol.port is None:
server = future.result()
protocol.port = server.sockets[0].getsockname()[1]
protocol.idle_handle = self.loop.call_later(
self.config[protocol.type + '_accept_timeout'],
server.close)
ctcp_msg = protocol.ctcp.format(protocol)
self.bot.ctcp(protocol.mask.nick, ctcp_msg)
else:
transport, protocol = future.result()
protocol.idle_handle = self.loop.call_later(
self.config[protocol.type + '_accept_timeout'],
protocol.close)
info = self.connections[protocol.type]
info['total'] += 1
info['masks'][protocol.mask][protocol.port] = protocol
protocol.ready.set_result(protocol)
def create(self, name_or_class, mask, filepath=None, **kwargs):
"""Create a new DCC connection. Return an ``asyncio.Protocol``"""
if isinstance(name_or_class, type):
name = name_or_class.type
protocol = name_or_class
else:
name = name_or_class
protocol = self.protocols[name]
assert name in DCC_TYPES
if filepath:
kwargs.setdefault('limit_rate',
self.config['send_limit_rate'])
kwargs['filepath'] = filepath
if protocol.type == DCCSend.type:
kwargs.setdefault('offset', 0)
kwargs.update(
filename_safe=slugify(os.path.basename(filepath)),
filesize=os.path.getsize(filepath),
)
elif protocol.type == DCCGet.type:
try:
offset = os.path.getsize(filepath)
except OSError:
offset = 0
kwargs.setdefault('offset', offset)
kwargs.setdefault('resume', False)
kwargs.setdefault('port', None)
f = protocol(
mask=mask, ip=int(self.bot.ip),
bot=self.bot, loop=self.loop, **kwargs)
if kwargs['port']:
if self.bot.config.get('dcc_sock_factory'):
sock_factory = maybedotted(self.bot.config.dcc_sock_factory)
args = dict(sock=sock_factory(self.bot, f.host, f.port))
else:
args = dict(host=f.host, port=f.port)
task = self.bot.create_task(
self.loop.create_connection(f.factory, **args))
task.add_done_callback(partial(self.created, f))
else:
task = self.bot.create_task(
self.loop.create_server(
f.factory, '0.0.0.0', 0, backlog=1))
task.add_done_callback(partial(self.created, f))
return f
def resume(self, mask, filename, port, pos):
"""Resume a DCC send"""
self.connections['send']['masks'][mask][port].offset = pos
message = 'DCC ACCEPT %s %d %d' % (filename, port, pos)
self.bot.ctcp(mask, message)
def is_allowed(self, name_or_class, mask): # pragma: no cover
"""Return True is a new connection is allowed"""
if isinstance(name_or_class, type):
name = name_or_class.type
else:
name = name_or_class
info = self.connections[name]
limit = self.config[name + '_limit']
if limit and info['total'] >= limit:
msg = (
"Sorry, there is too much DCC %s active. Please try again "
"later.") % name.upper()
self.bot.notice(mask, msg)
return False
if mask not in info['masks']:
return True
limit = self.config[name + '_user_limit']
if limit and info['masks'][mask] >= limit:
msg = (
"Sorry, you have too many DCC %s active. Close the other "
"connection(s) or wait a few seconds and try again."
) % name.upper()
self.bot.notice(mask, msg)
return False
return True
|
"""
Provides a trivial likelihood factory function for testing purposes.
"""
from . import lkmodule
def build_likelihood(_):
"""Return an EmptyLikelihood object."""
return lkmodule.empty_likelihood()
|
def handle_page_timeout(driver):
timeout_button = driver.find_elements_by_xpath("//a[contains(@onclick, 'location.reload(true);')]")
if len(timeout_button) > 0:
end_wait = random.randint(1, 60)
start_wait = time.time()
duration_wait = 0
while end_wait > duration_wait:
duration_wait = time.time() - start_wait
timeout_button[0].click() |
class Solution:
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
if dividend == -2**31 and divisor == -1:
return 2**31-1
if dividend > 2**31-1 or dividend < -2**31 or divisor > 2**31-1 or divisor < -2**31:
return 2**31-1
current_dividend = abs(dividend)
current_divisor = abs(divisor)
quotient = 0
temp = 0
# test down from the highest bit and
# accumulate the tentative value for
# valid bit
for i in range(31, -1,-1):
if temp + (current_divisor << i) <= current_dividend:
temp += current_divisor << i
quotient += 1 << i
if (dividend < 0) ^ (divisor < 0):
return quotient * -1
else:
return quotient
solution = Solution()
print(solution.divide(10,2) == 5)
print(solution.divide(9,2) == 4)
print(solution.divide(9,-2) == -4)
print(solution.divide(9,-22) == 0)
print(solution.divide(1,1) == 1)
print(solution.divide(-1,1) == -1)
print(solution.divide(1,-1) == -1)
print(solution.divide(-1,-1) == 1)
print(solution.divide(-2**31+1,-1) == 2147483647)
print(solution.divide(-2**31+1,-2) == 1073741823)
print(solution.divide(-2147483648,-1) == 2147483647)
print(solution.divide(-2147483648,1) == -2147483648)
print(solution.divide(-2147483648,2) == -1073741824)
|
import sys
import pydot
from grammars_utils import Grammar, is_regular_grammar, Epsilon, build_grammar
from grammars_utils import Item
class NFA:
def __init__(self, states: int, finals: iter, transitions: dict, start=0):
'''
:param states: representa en número de estados del autómata. Los
estados se modelarán como números, comenzando en 0 y hasta states-1.
:type states: int
:param finals: epresenta la colección de estados finales del autómata.
Dado que los estados se representan con números, este debe ser una
colección de números.
:param transitions: representa la función de transición. Se espera un
diccionario que, dados como llaves un estado origen (un número) y un
símbolo (un string), devuelve como valor una colección de estados
destino (números). Para renotar una ϵϵ-transición usaremos el string
vacío.
:param start: estado inicial
:type start: int
'''
self.states = states
self.start = start
self.finals = set(finals)
self.map = transitions
self.vocabulary = set()
self.transitions = {state: {} for state in range(states)}
destinations: list
origin: int
symbol: str
for (origin, symbol), destinations in transitions.items():
assert hasattr(destinations,
'__iter__'), 'Invalid collection of states'
self.transitions[origin][symbol] = destinations
self.vocabulary.add(symbol)
self.vocabulary.discard('')
def epsilon_transitions(self, state):
assert state in self.transitions, 'Invalid state'
try:
return self.transitions[state]['']
except KeyError:
return ()
def graph(self):
G = pydot.Dot(rankdir='LR', margin=0.1)
G.add_node(
pydot.Node('start', shape='plaintext', label='', width=0, height=0))
for (start, tran), destinations in self.map.items():
tran = 'ε' if tran == '' else tran
G.add_node(pydot.Node(start, shape='circle',
style='bold' if start in self.finals else ''))
for end in destinations:
G.add_node(pydot.Node(end, shape='circle',
style='bold' if end in self.finals else ''))
G.add_edge(pydot.Edge(start, end, label=tran, labeldistance=2))
G.add_edge(pydot.Edge('start', self.start, label='', style='dashed'))
return G
def _repr_svg_(self):
try:
return self.graph().create().decode('utf8')
except:
pass
class DFA(NFA):
def __init__(self, states: int, finals: list, transitions: dict, start=0):
assert all(isinstance(value, int) for value in transitions.values())
assert all(len(symbol) > 0 for origin, symbol in transitions)
transitions = {key: [value] for key, value in transitions.items()}
NFA.__init__(self, states, finals, transitions, start)
self.current = start
def epsilon_transitions(self):
raise TypeError()
def _move(self, symbol):
# Your code here
try:
self.current = self.transitions[self.current][symbol][0]
except KeyError: return False
return True
def _reset(self):
self.current = self.start
def recognize(self, string):
# Your code here
self._reset()
for char in string:
if not self._move(char):
return False
return self.current in self.finals
def reg_grammar2DFA(grammar: Grammar) -> DFA:
'''
:param grammar: a regular grammar
:type grammar: Grammar
:return: DFA's grammar
:rtype: DFA
'''
count = 0
states = {}
# se le asocia a cada no terminal un estado
for symbol in grammar.nonTerminals:
states[symbol.Name] = count
count += 1
# se verifica que el estado 0 sea el simbolo inicial
for char in states:
if states[char] == 0 and char != grammar.startSymbol.Name:
states[char] = states[grammar.startSymbol.Name]
states[grammar.startSymbol.Name] = 0
# se verifica que halla que agregar un estado final si hay producciones
# que deriven en un no terminal por lo que irian a ese estado final
exist_new_final = any([len(prod.Right) == 1 for prod in grammar.Productions])
finals = []
final = 0
if exist_new_final:
finals.append(count)
final = 1
# si el distinguido deriva en e el tambien es final
for prod in grammar.startSymbol.productions:
if isinstance(prod.Right, Epsilon):
finals.append(0)
break
transitions = {}
# se agregan las transiciones
for prod in grammar.Productions:
left, right = prod
if len(right) == 2:
transitions[(states[left.Name], right[0].Name,)] = states[right[1].Name]
else:
if isinstance(right, Epsilon): pass
else:
transitions[(states[left.Name], right[0].Name,)] = count
return DFA(len(states) + final, finals, transitions)
def automaton2reg(automaton: DFA) -> str:
# se modifican las transiciones sumandole 1 a cada estado para poner el nuevo
# estado inicial y el nuevo estado final
transitions = {(state + 1, symbol,): dest[0] + 1 for (state, symbol,), dest in
automaton.map.items()}
transitions[(0, '',)] = 1 # se pone la transicion con el primer estado
for final in automaton.finals:
transitions[(final + 1, '', )] = automaton.states + 1
# se ponen las transiciones con el nuevo ultimo estado
# se comienza la eliminacion de estados desde el 2 hasta los ultimos del
# automata no el nuevo estado ultimo que se agrego
for node in range(2, automaton.states + 1):
# se buscan las transiciones que el tiene con el mismo, eso es una re
# de la forma (symbol1|symbol2|...|symbolk)
inter_reg = '|'.join([symbol for (state, symbol,), dest in
transitions.items()
if state == node and state == dest])
new_transitions = {}
# se buscan todas las transiciones que llegan a el y por cada una todas
# las que salen de el, se crean las nuevas transiciones con las re que
# llegan a el con las de el y las que salen de el, notar que de esta
# forma un estado puede tener mas de una transicion con otro con
# distintas reg, eso no es problema ya que siempre se buscan en todas las
# transiciones
for (start_state, start_reg,), start_dest in transitions.items():
if start_state != node and start_dest == node:
for (end_state, end_reg,), end_dest in transitions.items():
if end_dest != node and end_state == node:
if inter_reg:
new_transitions[(start_state,
start_reg + f'({inter_reg})*' + end_reg,)] = end_dest
else:
new_transitions[(start_state,
start_reg + end_reg,)] = end_dest
# se anaden las transiciones que no incluyen ese estado puesto que ya
# se elimino
for (state, reg,), dest in transitions.items():
if state == node or dest == node: continue
new_transitions[(state, reg,)] = dest
transitions = new_transitions
# por ultimo se elimina el primer estado siguiendo un proceso similar a los
# otros, primero se crea la re de las re que van de el con el mismo
inter_reg = '|'.join([reg for (state, reg,), dest in transitions.items()
if state == 1 and dest == 1])
# solo el estado inicial agregado nuevo va a tener transiciones con el, solo
# una, y el va a tener varias con el nuevo final que se agrego, suiguiendo
# el mismo procedimiento ahora el nuevo estado inicial agregado tendra varias
# transiciones con varias re con el nuevo estado final agregado
new_transitions = {}
for (start_state, start_reg,), start_dest in transitions.items():
if start_state != 1 and start_dest == 1:
for (end_state, end_reg,), end_dest in transitions.items():
if end_dest != 1 and end_state == 1:
if inter_reg:
new_transitions[(start_state,
start_reg + f'({inter_reg})*' + end_reg,)] = end_dest
else:
new_transitions[(start_state,
start_reg + end_reg,)] = end_dest
# se juntan en una solo re aquellas que tenga en paralelo el estado inicial
# agregado con el final agregado, esa es la resultante
reg = '|'.join([f'({reg})' for (state, reg,) in new_transitions])
return reg
class State:
def __init__(self, state, final=False, formatter=lambda x: str(x)):
self.state = state
self.final = final
self.transitions = {}
self.epsilon_transitions = set()
self.tag = None
self.formatter = formatter
def set_formatter(self, formatter, visited=None):
if visited is None:
visited = set()
elif self in visited:
return
visited.add(self)
self.formatter = formatter
for destinations in self.transitions.values():
for node in destinations:
node.set_formatter(formatter, visited)
for node in self.epsilon_transitions:
node.set_formatter(formatter, visited)
return self
def has_transition(self, symbol):
return symbol in self.transitions
def add_transition(self, symbol, state):
try:
self.transitions[symbol].append(state)
except:
self.transitions[symbol] = [state]
return self
def add_epsilon_transition(self, state):
self.epsilon_transitions.add(state)
return self
def recognize(self, string):
states = self.epsilon_closure
for symbol in string:
states = self.move_by_state(symbol, *states)
states = self.epsilon_closure_by_state(*states)
return any(s.final for s in states)
def to_deterministic(self, formatter=lambda x: str(x)):
closure = self.epsilon_closure
start = State(tuple(closure), any(s.final for s in closure), formatter)
closures = [closure]
states = [start]
pending = [start]
while pending:
state = pending.pop()
symbols = {symbol for s in state.state for symbol in s.transitions}
for symbol in symbols:
move = self.move_by_state(symbol, *state.state)
closure = self.epsilon_closure_by_state(*move)
if closure not in closures:
new_state = State(tuple(closure),
any(s.final for s in closure), formatter)
closures.append(closure)
states.append(new_state)
pending.append(new_state)
else:
index = closures.index(closure)
new_state = states[index]
state.add_transition(symbol, new_state)
return start
@staticmethod
def from_nfa(nfa, get_states=False):
states = []
for n in range(nfa.states):
state = State(n, n in nfa.finals)
states.append(state)
for (origin, symbol), destinations in nfa.map.items():
origin = states[origin]
origin[symbol] = [states[d] for d in destinations]
if get_states:
return states[nfa.start], states
return states[nfa.start]
@staticmethod
def move_by_state(symbol, *states):
return {s for state in states if state.has_transition(symbol) for s in
state[symbol]}
@staticmethod
def epsilon_closure_by_state(*states):
closure = {state for state in states}
l = 0
while l != len(closure):
l = len(closure)
tmp = [s for s in closure]
for s in tmp:
for epsilon_state in s.epsilon_transitions:
closure.add(epsilon_state)
return closure
@property
def epsilon_closure(self):
return self.epsilon_closure_by_state(self)
@property
def name(self):
return self.formatter(self.state)
def get(self, symbol):
target = self.transitions[symbol]
assert len(target) == 1
return target[0]
def __getitem__(self, symbol):
if symbol == '':
return self.epsilon_transitions
try:
return self.transitions[symbol]
except KeyError:
return None
def __setitem__(self, symbol, value):
if symbol == '':
self.epsilon_transitions = value
else:
self.transitions[symbol] = value
def __repr__(self):
return str(self)
def __str__(self):
return str(self.state)
def __hash__(self):
return hash(self.state)
def __iter__(self):
yield from self._visit()
def _visit(self, visited=None) -> 'State':
if visited is None:
visited = set()
elif self in visited:
return
visited.add(self)
yield self
for destinations in self.transitions.values():
for node in destinations:
yield from node._visit(visited)
for node in self.epsilon_transitions:
yield from node._visit(visited)
def graph(self):
G = pydot.Dot(rankdir='LR', margin=0.1)
G.add_node(
pydot.Node('start', shape='plaintext', label='', width=0, height=0))
visited = set()
def visit(start):
ids = id(start)
if ids not in visited:
visited.add(ids)
G.add_node(pydot.Node(ids, label=start.name, shape='circle',
style='bold' if start.final else ''))
for tran, destinations in start.transitions.items():
for end in destinations:
visit(end)
G.add_edge(pydot.Edge(ids, id(end), label=tran,
labeldistance=2))
for end in start.epsilon_transitions:
visit(end)
G.add_edge(
pydot.Edge(ids, id(end), label='ε', labeldistance=2))
visit(self)
G.add_edge(pydot.Edge('start', id(self), label='', style='dashed'))
return G
def _repr_svg_(self):
try:
return self.graph().create_svg().decode('utf8')
except:
pass
def write_to(self, fname):
return self.graph().write_svg(fname)
def multiline_formatter(state):
return '\n'.join(str(item) for item in state)
def lr0_formatter(state):
try:
return '\n'.join(str(item)[:-4] for item in state)
except TypeError:
return str(state)[:-4]
if __name__ == '__main__':
# automaton = NFA(states=3, finals=[2], transitions={
# (0, 'a'): [0],
# (0, 'b'): [0, 1],
# (1, 'a'): [2],
# (1, 'b'): [2],
# })
#
# algo = automaton.graph().write_svg(path=r'.\some', prog=r'C:\Program Files (x86)\Graphviz2.38\bin\dot.exe')
# print(sys.argv[0])
# g = build_grammar(['S->aS',
# 'S->',
# 'S->aA',
# 'A->bA'])
# dfa = reg_grammar2DFA(g)
# algo = dfa.graph().write_svg(path=r'.\some', prog=r'C:\Program Files (x86)\Graphviz2.38\bin\dot.exe')
# other = DFA(3, [1, 2], {(0, 'a',): 1, (1, 'a',): 1, (1, 'b',): 1,
# (0, 'b',): 2, (2, 'a',): 0, (2, 'b',): 2})
# reg = automaton2reg(dfa)
# print(reg)
g = build_grammar(['S->aB', 'S->bS', 'B->bS', 'B->b'])
dfa = reg_grammar2DFA(g)
algo = dfa.graph().write_svg(path=r'.\some', prog=r'C:\Program Files (x86)\Graphviz2.38\bin\dot.exe')
|
s1={1,2,3,4,5,66,11,83,76}
prime=set()
nonprime=set()
for i in s1:
if i>1:
for j in range(2,i):
if i%j==0:
nonprime.add(i)
break
else:
prime.add(i)
print(prime)
print(nonprime)
|
#!/usr/bin/python
def func1():
print( "myModule1::mySubModule1::func1 called" )
def func2():
print( "myModule1::mySubModule1::func2 called" )
class myClass( object ):
def run( self ):
print( "myModule1::mySubModule1::myClass::Run method called" )
def printName( self ):
print( "myModule1::mySubModule1::myClass::printName method called" )
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 11:05:07 2019
@author: 3602786
"""
import numpy as np
import time
import os
########## Exercice 2 ###########
def nb_VarProp(ne,nj):
return nj*ne**2
def K(ne,nj,j,x,y):
return j*(ne**2)+x*ne+y+1
def resolution(k,ne):
j = (k-1)//(ne**2)
x = (k-1-j*(ne**2))//ne
y = k-1-j*(ne**2)-x*ne
return j,x,y
########### Exercice 3 ###########
#question1
def au_moins(liste):
liste_clause = ""
for i in liste:
liste_clause = liste_clause + str(i) + ' '
liste_clause += str(0)+'\n'
return liste_clause
def au_plus(liste):
liste_clause = ""
clause = ""
liste = np.array(liste)
liste = -1 * liste
for i in range(len(liste)):
for j in range(i+1,len(liste)):
clause = str(liste[i]) + ' ' + str(liste[j]) + ' ' + str(0) + '\n'
liste_clause = liste_clause + clause
return liste_clause
liste = [1,2,3,4]
print(au_moins(liste))
print(au_plus(liste))
#question2
def encoderC1(ne,nj):
liste_clause = ""
liste_domicile = []
liste_exterieur = []
for j in range(nj):
for x in range(ne):
for y in range(ne):
if x!=y:
liste_domicile.append(K(ne,nj,j,x,y))
liste_exterieur.append(K(ne,nj,j,y,x))
liste = liste_domicile + liste_exterieur
liste_domicile = []
liste_exterieur = []
liste_clause = liste_clause + au_plus(liste)
return liste_clause
ne,nj = 3,6
a = encoderC1(ne,nj)
print(a + '\n')
print(len(a.split('\n'))-1)
# 72 contraintes retournées
def encoderC2(ne,nj):
liste_clause = ""
liste_domicile = []
liste_exterieur = []
for x in range(ne):
for y in range(ne):
if(x == y):
break
for j in range(nj):
liste_domicile.append(K(ne,nj,j,x,y))
liste_exterieur.append(K(ne,nj,j,y,x))
liste_clause += au_moins(liste_domicile) + au_plus(liste_domicile)
liste_clause += au_moins(liste_exterieur) + au_plus(liste_exterieur)
liste_domicile = []
liste_exterieur = []
return liste_clause
ne,nj = 3,6
a = encoderC2(ne,nj)
print(a + '\n')
print(len(a.split('\n'))-1)
#42 contraintes
def encoder(ne,nj):
res = encoderC1(ne,nj) + encoderC2(ne,nj)
f=open("encodage.cnf", "w")
nb_var=nb_VarProp(ne,nj)
nb_clause=len(res.split('\n'))-1
ligne1="p cnf "+str(nb_var)+ " "+str(nb_clause) +'\n'
f.write(ligne1)
f.write(res)
f.close()
ne,nj = 4,6
encoder(ne,nj)
#print(a + '\n')
#print(len(a.split('\n'))-1)
#114 contraintes
#question 3
def decoder(fichier,ne, f_equipe):
f=open(fichier,"r+") #ouvre le fichier du resultat de glucose
f2=open("match.txt","w")
f3=open(f_equipe,"r")
eq=f3.readlines()
for k in range(len(eq)):
eq[k]=eq[k][:-1]
res=f.readline()
res=res.split(' ')
#on enleve "v" et "0\n"
res.remove(res[0])
#res.remove(res[-1])
for i in res[1:]:
i = int(i)
if (i>0):
j,x,y=resolution(i,ne)
f2.write("Jour "+str(j)+ ": l'equipe " +eq[x] +" joue contre l'equipe " +eq[y] +'\n')
f2.close()
f3.close()
f.close()
#decoder("resultat.txt", 4, "equipe.txt")
def assemblage(ne,nj):
encoder(ne,nj)
os.system("rm resultat.txt") #supprimer le fichier s'il existe deja
cmd="./glucose_static -model encodage.cnf | tail -n 1 >> resultat.txt"
os.system(cmd)
decoder("resultat.txt",ne,"equipe.txt") #pour afficher le planning des matchs dans le fichier "match.txt"
assemblage(4,6)
########## Exercice 4 ###############
def main():
list_nj=[]
for ne in range(3,10):
debut=time.time()
s=False
nj=ne
while s!=True:
encoder(ne,nj)
os.system("rm satisfiable.txt") #supprimer le fichier s'il existe deja
cmd="./glucose_static encodage.cnf | tail -n 1 >> satisfiable.txt"
os.system(cmd)
f=open("satisfiable.txt","r")
res=f.readline()
res=res[2:-1]
if res=="SATISFIABLE":
s=True
nj+=1
if (time.time()-debut>10):
print("reponse inconnue pour ne= "+str(ne))
nj=0
break
list_nj.append(nj-1)
print(list_nj)
main()
|
import webapp2
from gramp import Gramp
class MainPage(webapp2.RequestHandler):
def get(self):
gramp = Gramp()
idiom = gramp.get_idiom()
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(idiom)
app = webapp2.WSGIApplication([
('/', MainPage),
], debug=True) |
x = [[12,7,3],
[4,5,6],
[7,8,9]]
y=[[1,2,3],
[2,1,3],
[6,7,8]]
z=[[0,0,0],
[0,0,0],
[0,0,0]]
for i in range(len(x)):
for j in range(len(x[0])):
z[i][j]=x[i][j]+y[i][j]
for r in z:
print(r)
|
##!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: xxx
#python library:什么是库???利用程序员之前的工作,就是使用库(其他人写过的代码)去做其他的程序。
#应用外部的库画了一个乌龟
import turtle#引入外部的库
def main():
#设置一个画面
windows=turtle.Screen()
#设置背景
windows.bgcolor('pink')
#生成一个黄色乌龟
bran=turtle.Turtle()#Turtle是一个类!!!引入Turtle,调用turtle里面的类,实例化,形成一个叫BRAN的乌龟
#一个类(蓝图)包含了描述它的东西。
bran.shape('turtle')
bran.color('black')
bran.speed(1.00)
bran.setpos(-100,130)#设置龟龟的坐标
bran.home()#把乌龟带到起始坐标
#joe = bran.clone()
#joe.setpos(100.130)
#bran.setx(100)#把bran改成turtle会怎样???试试?why???
#turtle.pos()(60.00,30.00)
#设置速度
#bran.speed(1)
#turtle.stamp ()
#bran.circle(100,360,100)#半径,角度,线条数(因为是根据多边形来画圆的!这个数越大,乌龟越慢但越像圆)
#走几步
# for i in range(1,15):
# bran.forward(10)
# bran.right(20)
# bran.forward(10)
# bran.right(20)
# bran.forward(10)
# bran.right(20)
# bran.forward(10)
# bran.right(20)
# #停下来
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
from odoo import fields, models
class DiscountContractCloseReason(models.Model):
_name = 'discount.contract.close_reason'
_description = 'Discount Contract Close Reason'
name = fields.Char(required=True, translate=True)
active = fields.Boolean(default=True)
sequence = fields.Integer(default=10)
|
from typing import List
import math
def checkio(a: int, b: int, c: int) -> List[int]:
if a+b > c and a+c > b and b+c > a:
angle1 = math.degrees(math.acos((a**2 + b**2 - c**2)/(2.0 * a * b)))
angle2 = math.degrees(math.acos((a**2 + c**2 - b**2)/(2.0 * a * c)))
angle3 = math.degrees(math.acos((c**2 + b**2 - a**2)/(2.0 * c * b)))
return [round(angle3), round(angle2), round(angle1)]
else:
return [0,0,0]
# These "asserts" using only for self-checking and not necessary for auto-testing
# if __name__ == '__main__':
# print("Example:")
# print(checkio(4, 4, 4))
#
# assert checkio(4, 4, 4) == [60, 60, 60], "All sides are equal"
# assert checkio(3, 4, 5) == [37, 53, 90], "Egyptian triangle"
# assert checkio(2, 2, 5) == [0, 0, 0], "It's can not be a triangle"
# print("Coding complete? Click 'Check' to earn cool rewards!")
print(checkio(4, 4, 4), [60, 60, 60], "All sides are equal")
print(checkio(3, 4, 5), [37, 53, 90], "Egyptian triangle")
print(checkio(2, 2, 5), [0, 0, 0], "It's can not be a triangle")
|
"""A Flask-based api implemented with GraphQL and basic authentication.
Usage: flask run
Attributes:
app: A flask Flask object creating the flask app
"""
import os
from flask import Flask
from auth import AuthGraphQLView
from models import setup_db
from schema import schema
app = Flask(__name__)
setup_db(app)
app.add_url_rule(
"/graphql", view_func=AuthGraphQLView.as_view("graphql", schema=schema),
)
@app.route("/")
def index():
"""The route handler for the home page.
Returns:
A simple health check that returns the str 'Working!'
"""
return "Working!"
if __name__ == "__main__":
port = os.environ.get("PORT", 5000)
app.run(port=port)
|
import re
for _ in range(int(input())):
s = input()
if re.search(r'([A-Z].*){2}', s) and re.search(r'(\d.*){3}', s) \
and re.match(r'^[a-zA-Z\d]{10}$', s) and not re.search(r'(\w).*\1', s):
print('Valid')
else:
print('Invalid')
|
#String
a = "Mehedi"
b = "Amin"
#c = a + b
print(a,b,100) |
from django.shortcuts import render
from django.shortcuts import render, redirect, get_object_or_404, render_to_response, HttpResponse
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
#Import Models
from models import Datacenters, Cages, CageRows, Racks
#Import Forms from forms.py
from forms import FormNewDatacenter, FormNewCage, FormNewCageRow, FormNewRack
#Import System Modules
import re
import sys
import json
# Locations.Views #
def home(request, template_name='locations_home.html'):
""" Home page for Locations app """
return render(request, template_name)
def datacenter_create(request, template_name='locations_datacenter_create.html'):
""" View and Create Datacenters """
if request.method == 'POST' and 'CreateDatacenter' in request.POST:
form = FormNewDatacenter(request.POST) #create form object
if form.is_valid():
# Get POST Data from form.cleaned_Data
name = form.cleaned_data['name']
description = form.cleaned_data['description']
f = Datacenters(name=name, description=description) # create model object
f.save() #insert new value into database
return redirect('locations_datacenter_create') #Must match a urls.py "name"
if request.method == 'POST' and 'input_delete_id' in request.POST:
#Deactivate / Delete datacenter
try:
deleteId = int(request.POST['input_delete_id'])
except:
pass
if deleteId >= 1:
Datacenters.objects.filter(id=deleteId).update(active=0)
return redirect('locations_datacenter_create') #Must match a urls.py "name"
#Default action for GET, or POST's that fail validation.
data = {}
data['datacenters'] = Datacenters.objects.filter(active=1)
data['form'] = FormNewDatacenter(request.POST or None) #Not POST or post failed, so make form object either way.
return render(request, template_name, data)
def cage_create(request, template_name='locations_cage_create.html'):
""" View and Create Cages"""
if request.method == 'POST' and 'CreateCage' in request.POST:
form = FormNewCage(request.POST) #create form object
if form.is_valid():
# Get POST Data from form.cleaned_Data
name = form.cleaned_data['name']
description = form.cleaned_data['description']
datacenter = form.cleaned_data['datacenter'].id #since datacenter is a foreignkey it returns the entire object. You must reference 'id'.
f = Cages(name=name, description=description, datacenter_id=datacenter) # create model object
f.save() #insert new value into database
return redirect('locations_cage_create') #Must match a urls.py "name"
if request.method == 'POST' and 'input_delete_id' in request.POST:
#Deactivate / Delete datacenter
try:
deleteId = int(request.POST['input_delete_id'])
except:
pass
if deleteId >= 1:
Cages.objects.filter(id=deleteId).update(active=0)
return redirect('locations_cage_create') #Must match a urls.py "name"
#Default action for GET, or POST's that fail validation.
data = {}
data['cages'] = Cages.objects.filter(active=1)
data['form'] = FormNewCage(request.POST or None) #Not POST or post failed, so make form object either way.
return render(request, template_name, data)
def cagerow_create(request, template_name='locations_cagerow_create.html'):
""" View and Create Cage Rows"""
if request.method == 'POST' and 'CreateCageRow' in request.POST:
form = FormNewCageRow(request.POST) #create form object
if form.is_valid():
# Get POST Data from form.cleaned_Data
name = form.cleaned_data['name']
description = form.cleaned_data['description']
cage = form.cleaned_data['cage'].id
f = CageRows(name=name, description=description, cage_id=cage) # create model object
f.save() #insert new value into database
return redirect('locations_cagerow_create') #Must match a urls.py "name"
if request.method == 'POST' and 'input_delete_id' in request.POST:
#Deactivate / Delete datacenter
try:
deleteId = int(request.POST['input_delete_id'])
except:
pass
if deleteId >= 1:
CageRows.objects.filter(id=deleteId).update(active=0)
return redirect('locations_cageRow_create') #Must match a urls.py "name"
#Default action for GET, or POST's that fail validation.
data = {}
data['cageRows'] = CageRows.objects.filter(active=1)
data['form'] = FormNewCageRow(request.POST or None) #Not POST or post failed, so make form object either way.
return render(request, template_name, data)
def rack_create(request, template_name='locations_rack_create.html'):
""" View and Create Racks"""
if request.method == 'POST' and 'CreateRack' in request.POST:
form = FormNewRack(request.POST) #create form object
if form.is_valid():
# Get POST Data from form.cleaned_Data
name = form.cleaned_data['name']
description = form.cleaned_data['description']
cagerow = form.cleaned_data['cagerow'].id
f = Racks(name=name, description=description, cagerow_id=cagerow) # create model object
f.save() #insert new value into database
return redirect('locations_rack_create') #Must match a urls.py "name"
if request.method == 'POST' and 'input_delete_id' in request.POST:
#Deactivate / Delete datacenter
try:
deleteId = int(request.POST['input_delete_id'])
except:
pass
if deleteId >= 1:
Racks.objects.filter(id=deleteId).update(active=0)
return redirect('locations_rack_create') #Must match a urls.py "name"
#Default action for GET, or POST's that fail validation.
data = {}
data['racks'] = Racks.objects.filter(active=1)
data['form'] = FormNewRack(request.POST or None) #Not POST or post failed, so make form object either way.
return render(request, template_name, data)
#### View Locations (Datacenter/Cage/Rack/Row) by ID
def datacenter_by_id(request, pk, template_name='locations_datacenter_by_id.html'):
""" View a datacenter by ID """
#Default action for GET, or POST's that fail validation.
data = {}
data['this_datacenter'] = Datacenters.objects.get(id=pk)
data['cage_list'] = Cages.objects.filter(datacenter_id=pk)
return render(request, template_name, data)
def cage_by_id(request, pk, template_name='locations_cage_by_id.html'):
""" View a datacenter by ID """
#Default action for GET, or POST's that fail validation.
data = {}
data['this_cage'] = Cages.objects.get(id=pk)
data['cagerow_list'] = CageRows.objects.filter(cage_id=pk)
print "datacenter ID =", data['this_cage'].datacenter.id, data['this_cage'].datacenter.name
print "cage id =", data['this_cage'].id, data['this_cage'].name
return render(request, template_name, data)
def cagerow_by_id(request, pk, template_name='locations_cagerow_by_id.html'):
""" View a datacenter by ID """
#Default action for GET, or POST's that fail validation.
data = {}
data['this_cagerow'] = CageRows.objects.get(id=pk)
data['rack_list'] = Racks.objects.filter(cagerow_id=pk)
return render(request, template_name, data)
def rack_by_id(request, pk, template_name='locations_rack_by_id.html'):
""" View a datacenter by ID """
#Default action for GET, or POST's that fail validation.
data = {}
data['this_rack'] = Racks.objects.get(id=pk)
return render(request, template_name, data) |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=relative-beyond-top-level
# pylint:disable=arguments-differ
# pylint:disable=no-member
# pylint:disable=signature-differs
"""Implementation of the service-side open-telemetry interceptor.
This library borrows heavily from the OpenTracing gRPC integration:
https://github.com/opentracing-contrib/python-grpc
"""
from contextlib import contextmanager
from typing import List
import grpc
from opentelemetry import propagators, trace
from opentelemetry.context import attach, detach
from . import grpcext
from ._utilities import RpcInfo
# pylint:disable=abstract-method
class _OpenTelemetryServicerContext(grpc.ServicerContext):
def __init__(self, servicer_context, active_span):
self._servicer_context = servicer_context
self._active_span = active_span
self.code = grpc.StatusCode.OK
self.details = None
super(_OpenTelemetryServicerContext, self).__init__()
def is_active(self, *args, **kwargs):
return self._servicer_context.is_active(*args, **kwargs)
def time_remaining(self, *args, **kwargs):
return self._servicer_context.time_remaining(*args, **kwargs)
def cancel(self, *args, **kwargs):
return self._servicer_context.cancel(*args, **kwargs)
def add_callback(self, *args, **kwargs):
return self._servicer_context.add_callback(*args, **kwargs)
def invocation_metadata(self, *args, **kwargs):
return self._servicer_context.invocation_metadata(*args, **kwargs)
def peer(self, *args, **kwargs):
return self._servicer_context.peer(*args, **kwargs)
def peer_identities(self, *args, **kwargs):
return self._servicer_context.peer_identities(*args, **kwargs)
def peer_identity_key(self, *args, **kwargs):
return self._servicer_context.peer_identity_key(*args, **kwargs)
def auth_context(self, *args, **kwargs):
return self._servicer_context.auth_context(*args, **kwargs)
def send_initial_metadata(self, *args, **kwargs):
return self._servicer_context.send_initial_metadata(*args, **kwargs)
def set_trailing_metadata(self, *args, **kwargs):
return self._servicer_context.set_trailing_metadata(*args, **kwargs)
def abort(self, *args, **kwargs):
if not hasattr(self._servicer_context, "abort"):
raise RuntimeError(
"abort() is not supported with the installed version of grpcio"
)
return self._servicer_context.abort(*args, **kwargs)
def abort_with_status(self, *args, **kwargs):
if not hasattr(self._servicer_context, "abort_with_status"):
raise RuntimeError(
"abort_with_status() is not supported with the installed "
"version of grpcio"
)
return self._servicer_context.abort_with_status(*args, **kwargs)
def set_code(self, code):
self.code = code
return self._servicer_context.set_code(code)
def set_details(self, details):
self.details = details
return self._servicer_context.set_details(details)
# On the service-side, errors can be signaled either by exceptions or by
# calling `set_code` on the `servicer_context`. This function checks for the
# latter and updates the span accordingly.
# pylint:disable=unused-argument
def _check_error_code(span, servicer_context, rpc_info):
if servicer_context.code != grpc.StatusCode.OK:
rpc_info.error = servicer_context.code
class OpenTelemetryServerInterceptor(
grpcext.UnaryServerInterceptor, grpcext.StreamServerInterceptor
):
def __init__(self, tracer):
self._tracer = tracer
@contextmanager
# pylint:disable=no-self-use
def _set_remote_context(self, servicer_context):
metadata = servicer_context.invocation_metadata()
if metadata:
md_dict = {md.key: md.value for md in metadata}
def get_from_grpc_metadata(metadata, key) -> List[str]:
return [md_dict[key]] if key in md_dict else []
# Update the context with the traceparent from the RPC metadata.
ctx = propagators.extract(get_from_grpc_metadata, metadata)
token = attach(ctx)
try:
yield
finally:
detach(token)
else:
yield
def _start_span(self, method):
span = self._tracer.start_as_current_span(
name=method, kind=trace.SpanKind.SERVER
)
return span
def intercept_unary(self, request, servicer_context, server_info, handler):
with self._set_remote_context(servicer_context):
with self._start_span(server_info.full_method) as span:
rpc_info = RpcInfo(
full_method=server_info.full_method,
metadata=servicer_context.invocation_metadata(),
timeout=servicer_context.time_remaining(),
request=request,
)
servicer_context = _OpenTelemetryServicerContext(
servicer_context, span
)
response = handler(request, servicer_context)
_check_error_code(span, servicer_context, rpc_info)
rpc_info.response = response
return response
# For RPCs that stream responses, the result can be a generator. To record
# the span across the generated responses and detect any errors, we wrap
# the result in a new generator that yields the response values.
def _intercept_server_stream(
self, request_or_iterator, servicer_context, server_info, handler
):
with self._set_remote_context(servicer_context):
with self._start_span(server_info.full_method) as span:
rpc_info = RpcInfo(
full_method=server_info.full_method,
metadata=servicer_context.invocation_metadata(),
timeout=servicer_context.time_remaining(),
)
if not server_info.is_client_stream:
rpc_info.request = request_or_iterator
servicer_context = _OpenTelemetryServicerContext(
servicer_context, span
)
result = handler(request_or_iterator, servicer_context)
for response in result:
yield response
_check_error_code(span, servicer_context, rpc_info)
def intercept_stream(
self, request_or_iterator, servicer_context, server_info, handler
):
if server_info.is_server_stream:
return self._intercept_server_stream(
request_or_iterator, servicer_context, server_info, handler
)
with self._set_remote_context(servicer_context):
with self._start_span(server_info.full_method) as span:
rpc_info = RpcInfo(
full_method=server_info.full_method,
metadata=servicer_context.invocation_metadata(),
timeout=servicer_context.time_remaining(),
)
servicer_context = _OpenTelemetryServicerContext(
servicer_context, span
)
response = handler(request_or_iterator, servicer_context)
_check_error_code(span, servicer_context, rpc_info)
rpc_info.response = response
return response
|
def do_delete(rq_id, rq_name, rq_activities):
status = 200
d_json = {'id': rq_id, 'name': rq_name, 'activities': rq_activities, 'type' : 'person'}
d_response = {'status': status, 'json': d_json}
return d_response |
N = int( input())
C = [ int( input()) for _ in range(N)]
dp = [1]*(N+1)
colors = [1]*(2*10**5+1)
S = [-1]*(2*10**5+1)
Q = 10**9+7
for i in range(N):
if i >= 1:
if C[i] == C[i-1]:
dp[i+1] = dp[i]
continue
if S[C[i]] == -1:
dp[i+1] = dp[i]
S[C[i]] = 1
if i >= 1:
S[C[i]] = dp[i]
continue
dp[i+1] = (dp[i] + S[C[i]])%Q
S[C[i]] += dp[i]
S[C[i]] %= Q
print(dp[N])
|
from setuptools import setup
#with open('requirements.txt') as f:
#requirements = f.read().splitlines()
setup(
name='C3D_gait',
author='Michael Jeffryes',
author_email='mike.jeffryes@hotmail.com',
url='',
version='1.1.0',
description='Extracts gait data from C3D files',
#packages=['gpscalc'],
py_modules=["c3dtrial"],
package_dir={'':'c3dgait'},
setup_requires=['wheel'],
classifiers=[
#"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
],
#install_requires=requirements,
long_description=open('README.md').read(),
)
|
# Author : Xiang Xu
# -*- coding: utf-8 -*-
def filter_users_with_checkintimes(mintCheckinTimes):
users = []
with open('checkintimes.txt', 'r') as f:
for line in f:
token = line.strip().split('\t')
if int(token[1]) >= mintCheckinTimes:
users.append(int(token[0]))
return users
def filter_users_with_friends(minFriends):
users = []
with open('friends.txt') as f:
for line in f:
token = line.strip().split('\t')
friends = token[1].split(' ')
if len(friends) >= minFriends:
users.append(int(token[0]))
return users
if __name__ == '__main__':
print filter_users_with_checkintimes(20)
print filter_users_with_friends(200)
|
def answer(start, length):
# define a function f(n) to calculate 1^2^3^...^n
# because 4m+3 ^ 4m+2 = 1 and 4m+1 ^ 4m = 1
# 4m+3 ^ 4m+2 ^ 4m+1 ^ 4m = 0
# therefore f(4m+3) = f(3) = 0
# f(4m+2) = f(4m+3) ^ 4m+3 = 0 ^ 4m+3 = 4m+3
# f(4m+1) = f(4m+2) ^ 4m+2 = 4m+3 ^ 4m+2 = 1
# f(4m) = f(4m+1) ^ 4m+1 = 1 ^ 4m+1 = 4m
def xor_one_to_n(n):
if n % 4 == 0:
return n
elif n % 4 == 1:
return 1
elif n % 4 == 2:
return n + 1
else:
return 0
# define a function to calculate result for each row
def row_answer(row_start, row_end):
if row_start == row_end:
return row_start
else:
return xor_one_to_n(row_end) ^ xor_one_to_n(row_start-1)
# calculate results for all rows, and XOR together
total = 0
queue_start, queue_len = start, length
while queue_len:
total = total ^ row_answer(queue_start, queue_start+queue_len-1)
queue_start += length
queue_len -= 1
return total
|
from django.contrib import admin
from .models import Person
# Username: admin, Password: administrador.
admin.site.register(Person)
|
from scipy import stats as s
from scipy import special as ss
import numpy as np
from statsmodels.stats import multicomp, anova
import pandas as pd
df = pd.read_csv('/datasets/genetherapy.csv')
df2 = pd.read_csv('/datasets/atherosclerosis.csv')
"""quartiles"""
np.percentile([1,2,3], 50)
"""t-distribution"""
stat, pval = s.ttest_1samp([1,2,3], 5)
s.norm.rvs(size=1000)
"""compare distributions"""
stat, p = s.levene([1,2,3], [2,3,4]) #test equal variance hypotesis.
stat, p = s.ttest_ind([1,2,3], [2,2,2]) #test if average equal. 1 - equal
stat, pval = s.shapiro([1,2,300]) #test norm. >.05 - norm
"""dispersion anlysis"""
f, p = s.f_oneway([1,2,3,4], (2,3,4,5)) #test equal M. if P<.05 => at least 2 groups differ
#df usage
gg = df.groupby('Therapy')
s.f_oneway(*[gg.get_group(g)['expr'].values for g in gg.groups.keys()])
df.groupby('Therapy').boxplot()
r = multicomp.pairwise_tukeyhsd(df['expr'], df['Therapy']) #tukey correction for multivariate dispersion analysis
r.plot_simultaneous()
print(r.summary())
mod = ols('expr ~ age*dose', data=df2).fit() #Multi-factor dispersion analysis
anova.anova_lm(mod, typ=2)
"""Correlation"""
c, p = s.pearsonr([1,2,3], [5,6,7])
c, p = s.spearmanr([4,5,2,3,1], [2,1,4,3,100]) #ranging values, works fine for ejections
"""Regression"""
s.stats.linregress([1,2,3],[4,5,6]) #slope, intercept, rvalue
"""Nominative variables (coin)"""
s.chisquare([795,705], [750,750]) #got, expected, dfx (k - 1 - dfx)=> chi**2, p. H0: no relation between ars
chi2, p, dof, exp = s.chi2_contingency([[15,9],[11,6]], correction=True) #contingency matrix (coin throw case), f > 10. Yates correction 5 < f < 10
s.fisher_exact([[18,7],[6,13]]) #Small tables, f < 5
"""Combinatorics"""
ss.comb(1000,5) #N things taken k at a time
sm.perm(1000,5) #permutations of k in N |
import math
import collections
def solve(data):
ring = [1] * data
solved = 0
while not solved:
if ring.count(len(ring)) == 1:
solved = 1
break
for elf_num in range(len(ring)):
elf = ring[elf_num]
if elf == 0:
pass
else:
next_elf = next(i for i, v in enumerate(ring[elf_num + 1:] + ring[:elf_num]) if v > 0) + 1 + elf_num
ring[elf_num] += ring[next_elf % len(ring)]
ring[next_elf % len(ring)] = 0
return next(i for i, v in enumerate(ring) if v > 0) + 1
def solve(data):
return (data - 2**math.floor(math.log(data, 2))) * 2 + 1
def solve_v2(data):
left = collections.deque()
right = collections.deque()
for i in range(1, data+1):
if i < (data // 2) + 1:
left.append(i)
else:
right.appendleft(i)
while left and right:
if len(left) > len(right):
left.pop()
else:
right.pop()
right.appendleft(left.popleft())
left.append(right.pop())
return left[0] or right[0]
def f(x):
# this solves where x is not a power of 3, sort of
a = math.floor(math.log(x, 3))
b = x - 3**a
c = math.floor(math.log(b, 3))
d = b - 3**c
return d
if __name__ == '__main__':
data = 3005290
data = 15
print(solve_v2(data))
|
"""
IN: s3://spark-job-cluster-bucket/input/customers.csv
OP: s3://spark-job-cluster-bucket/output/
spark-submit --deploy-mode cluster --master yarn script.py s3://spark-job-cluster-bucket/input/customers.csv s3://spark-job-cluster-bucket/output/
"""
from __future__ import print_function
from pyspark.sql import SparkSession
import sys
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: testjob ", file=sys.stderr)
exit(-1)
with SparkSession.builder.appName("My EMR Job").enableHiveSupport().getOrCreate() as spark:
df = spark.read.csv(sys.argv[1], header=True)
table_name = "trade_sample_table_orc_ext_emr"
df.write.mode("OVERWRITE").option("path", sys.argv[2]).format("orc").saveAsTable(table_name)
|
import sys
import time
import pyfirmata2
port = '/dev/ttyACM0'
leonardo = {
'digital': tuple(x for x in range(14)),
'analog': tuple(x for x in range(6)),
'pwm': (3, 5, 6, 9, 10, 11, 13),
'use_ports': True,
'disabled': (0, 1) # Rx, Tx, Crystal
}
try:
print("Initializing... ", end = '', flush=True)
#board = pyfirmata2.Board(port, leonardo)
board = pyfirmata2.Arduino(port)
print("Ready")
except:
print ('No Arduino found')
sys.exit()
dig_pin = {}
try:
print("Initializing pins... ")
for pin in [3, 5, 6, 9, 10, 11]:
print("init pin {}".format(pin))
dig_pin[pin] = board.get_pin("d:{}:o".format(pin))
for pin in [2, 4, 7, 8, 12, 13]:
print("init pin {}".format(pin))
dig_pin[pin] = board.get_pin("d:{}:o".format(pin))
print("ready")
except Exception as e:
print ("Error: {}".format(e))
sys.exit()
while True:
in_txt = input ("command ")
num, on_off = in_txt.split(" ")
print("led {} {}".format(num, on_off))
dig_pin[int(num)].write(float(on_off))
|
# https://www.reddit.com/r/dailyprogrammer/comments/wjzly/7132012_challenge_76_easy_title_case/
def titlecase(input, exception):
words = input.split()
output = [words[0].title()]
for word in words[1:]:
if word.lower() in exception:
output.append(word.lower())
else:
output.append(word.title())
return ' '.join(output)
print(titlecase("This is a hard one", "hard")) |
#coding:utf-8
permission_required(perm, login_url=None, raise_exception=False)
from django.contrib.auth.decorators import permission_required
@permission_required('polls.can_vote')
def my_view(request):
#PermissionRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
class MyView(PermissionRequiredMixin, View):
permission_required = 'polls.can_vote'
# Or multiple of permissions:
permission_required = ('polls.can_open', 'polls.can_edit')
#AccessMixin
|
'''
Problem Statement #
We are given an unsorted array containing ‘n’ numbers taken from the range 1 to ‘n’.
The array has some duplicates, find all the duplicate numbers without using any extra space.
Example 1:
Input: [3, 4, 4, 5, 5]
Output: [4, 5]
Example 2:
Input: [5, 4, 7, 2, 3, 5, 3]
Output: [3, 5]
'''
def find_all_duplicates(nums):
N = len(nums)
i = 0
while i < N:
j = nums[i] - 1
if nums[i] != nums[j]:
nums[i], nums[j] = nums[j], nums[i]
else:
i += 1
result = []
for i in range(N):
if i+1 != nums[i]:
result.append(nums[i])
return result
def main():
nums = [3, 4, 4, 5, 5]
dups = find_all_duplicates(nums)
print("All duplicates in {0} are {1}".format(nums, dups))
if __name__ == "__main__":
main()
|
Import('env')
Import('path')
import os
currentPath = path
#VariantDir("..\\build", '.\\', duplicate=0)
# Gets all the .cpp and.h files and puts them in a list
srcs = os.walk(currentPath)
#Array of objects
objs = []
# Loops through source files
for root, dirs, files in srcs:
print("Files: " + str(files))
print("Directories: " + str(dirs))
for d in dirs:
if os.path.isfile(d + "\\SConscript"):
print("Scanning through: " + currentPath + "\\" + d + "\\SConscript")
o = env.SConscript(currentPath + "\\" + d + "\\SConscript")
objs.extend(o)
for f in files:
if f.endswith(".cpp"):
print(f)
# Store as object, then add it to array
o = env.Object(f)
objs.extend(o)
#print("\n" + str(objs))
#Return array
#obj = env.Object('RenderManager.cpp')
Return('objs') |
from flask import Flask, request, g, Response
from auth import requires_auth
import datetime, json, msgpack, os.path, sqlite3
app = Flask(__name__)
app.config.from_object('config')
insert_query = '''INSERT INTO utmp (host, user, uid, rhost, line, time, updated)
VALUES (:host, :user, :uid, :rhost, :line, :time, :updated)'''
delete_query = '''DELETE FROM utmp WHERE host = ?'''
def init_db():
with app.app_context():
db = connect_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
db.close()
def connect_db():
db_path = app.config['DATABASE']
return sqlite3.connect(db_path)
@app.before_request
def before_request():
g.db = connect_db()
g.db.row_factory = sqlite3.Row
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def check_auth(hostname, password):
if hostname in app.config['CREDENTIALS']:
return password == app.config['CREDENTIALS'][hostname]
else:
return False
def update_utmp(db, hostname, logins):
cursor = db.cursor()
cursor.execute(delete_query, (hostname,))
for login in logins:
login['updated'] = int(datetime.datetime.now().timestamp())
cursor.execute(insert_query, login)
db.commit()
def dict_from_rows(rows):
'''Convert a list of sqlite3.Row to a list of dicts'''
l = []
for row in rows:
l += [dict(zip(row.keys(), row))]
return l
@app.route('/update', methods=['PUT'])
@requires_auth(check_auth)
def update():
'''API endpoint for submitting utmp data to
:return: status code 400 - unsupported Content-Type
:return: status code 200 - successful submission
'''
hostname = request.authorization.username
if request.headers['content-type'] == 'application/x-msgpack':
logins = msgpack.unpackb(request.data, encoding='utf-8')
else:
return Response(status=400)
update_utmp(g.db, hostname, logins)
return Response('Update successful for host {}'.format(hostname), status=200)
@app.route('/list')
def list():
rows = g.db.cursor().execute("SELECT * FROM utmp").fetchall()
logins = dict_from_rows(rows)
best_mimetype = request.accept_mimetypes.best
if best_mimetype == 'application/x-msgpack':
return Response(msgpack.packb(logins), 200,
{'Content-Type': 'application/x-msgpack'})
elif best_mimetype == 'application/json':
return Response(json.dumps(logins), 200,
{'Content-Type': 'application/json'})
else:
text = ''
for login in logins:
text += 'user {} connected to host {} on line {} from {}\n'.format(
login['user'], login['host'], login['line'], login['rhost'])
return Response(text, 200,
{'Content-Type': 'text/plain'})
if __name__ == '__main__':
db_path = app.config['DATABASE']
if not os.path.isfile(db_path):
init_db()
app.run(port=app.config['PORT'])
|
from ampel.pipeline.t0.DevAlertProcessor import DevAlertProcessor
#from ampel.contrib.hu.t0.DecentFilter import DecentFilter
from ampel.contrib.weizmann.t0.InfantFilter import InfantFilter
from ampel.view.AmpelAlertPlotter import AmpelAlertPlotter
import glob
import json
import logging
import numpy as np
import os
import sys
import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# initialize your filter
base_config = {'catsHTM.default': "/raid/eran/catsHTM/"}
with open('ampel/contrib/weizmann/channels.json', 'r') as f:
channel_param = json.load(f)
run_config = channel_param['WEIZMANN_INFANTSN']['sources']['ZTFIPAC']['t0Filter']['runConfig']
print(run_config)
on_match_t2_units = ["SNCOSMO"]
my_filter = InfantFilter(
base_config = base_config,
run_config = run_config,
on_match_t2_units = on_match_t2_units,
logger = logger
)
# process the stuff
tar = sys.argv[1]
dir = tar.split('/')[-1].replace('.tar.gz', '')
try:
os.system("rm -r ./accepted_{name} ./rejected_{name}".format(name=dir))
except:
pass
try:
os.system("mkdir accepted_{name} rejected_{name}".format(name=dir))
except:
pass
dap = DevAlertProcessor(my_filter, use_dev_alerts=True)
dap._logger = logger
print ("processing alerts from %s"%tar)
start = time.time()
nproc = dap.process_tar(tar, iter_max=1e5)#1e666)
print ("processed %d alerts in %.2e sec"%(nproc, time.time() - start))
n_good, n_bad = len(dap.get_accepted_alerts()), len(dap.get_rejected_alerts())
print ("%d alerts accepted by the filter (%.2f perc)"%(n_good, 100*n_good/nproc))
print ("%d alerts rejected by the filter (%.2f perc)"%(n_bad, 100*n_bad/nproc))
# plot the stuff
accepted_plot = AmpelAlertPlotter(interactive = False, plot_dir = "./accepted_{name}".format(name=dir), plot_name_tmpl="{objectId}.pdf")
rejected_plot = AmpelAlertPlotter(interactive = False, plot_dir = "./rejected_{name}".format(name=dir), plot_name_tmpl="{objectId}.pdf")
accepted = dap.get_accepted_alerts()
rejected = dap.get_rejected_alerts()
np.random.seed(42)
for accepted_alert in accepted:
accepted_plot.summary_plot(accepted_alert)
for rejected_alert in np.random.choice(rejected, 1, replace=False):
rejected_plot.summary_plot(rejected_alert)
output = open('results.log', 'a')
output.write('{archive}\t{alerts}\t{accepted}\t{rejected}\n'.format(archive=dir, alerts=nproc, accepted=n_good, rejected=n_bad))
output.close |
import os
from yacs.config import CfgNode as CN
cfg = CN(new_allowed=True)
# ---------------------------------------------------------------------------- #
# TASK
# 0->cls, 1->seg
# ---------------------------------------------------------------------------- #
cfg.TASK = CN(new_allowed=True)
cfg.TASK.STATUS = 'train'
cfg.TASK.TYPE = 0 # 0 for classification, 1 for segmentation
cfg.TASK.NAME = 'aneurysm_cls'
cfg.SEED = 1234
cfg.METRICS = ['Acc', 'PR', 'NR']
cfg.MODEL = CN(new_allowed=True)
cfg.MODEL.NAME = 'resnet'
cfg.MODEL.DIM = '3d'
cfg.MODEL.BN = 'bn'
cfg.MODEL.INPUT_CHANNEL = 1
cfg.MODEL.NCLASS = 2
cfg.MODEL.PRETRAIN = ''
cfg.MODEL.DEEP_SUPERVISION = False
cfg.MODEL.BACKBONE = CN(new_allowed=True)
cfg.MODEL.BACKBONE.ARCH = 'resnet34'
cfg.MODEL.BACKBONE.HEAD = 'A'
# cfg.MODEL.BACKBONE.PRETRAIN = ''
# ---------------------------------------------------------------------------- #
# Solver
# ---------------------------------------------------------------------------- #
cfg.SOLVER = CN(new_allowed=True)
cfg.SOLVER.BASE_LR = 0.001
cfg.SOLVER.LR_MODE = 'poly'
cfg.SOLVER.EPOCHS = 120
cfg.SOLVER.OPTIMIZER = CN(new_allowed=True)
# ---------------------------------------------------------------------------- #
# Loss
# ---------------------------------------------------------------------------- #
cfg.LOSS = CN(new_allowed=True)
cfg.LOSS.TYPE = 'ce_loss'
cfg.LOSS.CLASS_WEIGHT = []
cfg.LOSS.WEIGHT = []
cfg.LOSS.IGNORE_INDEX = -100
cfg.LOSS.DICE_WEIGHT = []
# ---------------------------------------------------------------------------- #
# Train
# ---------------------------------------------------------------------------- #
cfg.TRAIN = CN(new_allowed=True)
cfg.TRAIN.RESUME = False
cfg.TRAIN.PRINT = 50
cfg.TRAIN.DATA = CN(new_allowed=True)
cfg.TRAIN.DATA.WORKERS = 16
cfg.TRAIN.DATA.TRAIN_LIST = '/data3/pancw/data/patch/dataset/train/train.lst'
cfg.TRAIN.DATA.VAL_LIST = '/data3/pancw/data/patch/dataset/train/test.lst'
cfg.TRAIN.DATA.BATCH_SIZE = 32
# ---------------------------------------------------------------------------- #
# Test
# ---------------------------------------------------------------------------- #
cfg.TEST = CN(new_allowed=True)
cfg.TEST.MODEL_PTH = ' '
cfg.TEST.SAVE = True
cfg.TEST.SAVE_DIR = ' '
cfg.TEST.DATA = CN(new_allowed=True)
cfg.TEST.DATA.TEST_FILE = ' '
cfg.TEST.DATA.TEST_LIST = []
cfg.TEST.DATA.WORKERS = 16
cfg.TEST.DATA.BATCH_SIZE = 32
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
cfg.OUTPUT_DIR = "resnet34_ce_loss"
cfg.SAVE_ALL = False
# cfg.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py")
|
from django.urls import path
from products import views
urlpatterns = [
path('list', views.product_list, name='create'),
path('details/<int:id>', views.product_rud, name='details'),
path('type', views.product_type_list, name='create'),
path('prod_details/<int:id>', views.product_type_rud, name='create'),
path('package', views.package_type_list, name='create'),
path('batch', views.product_batch, name='create'),
]
|
from typing import List
from navmesh.polygon import Polygon
class FloorPlan:
def __init__(self, boundary: Polygon, obstacles: List[Polygon]):
self.boundary = boundary
self.obstacles = obstacles
|
## Author: shebang-rc
import sys
sys.setrecursionlimit(20000)
# F(3223) is the limit before this thing crashes, so
# that's why it takes the minimum of number specified
# and 3223.
def fibonacci(n, a=1, b=0):
if n == 0:
return b
else:
return fibonacci(n-1, a+b, a)
def min(a, b):
if a < b:
return a
return b
def main():
num = int(raw_input("List up to nth Fibonacci number: "))
for i in xrange(0, min(num, 3223)):
print str(i+1) + " : " + str(fibonacci(i))
if __name__ == "__main__":
main()
|
"""
如果你了解JavaScript或者PHP等,那么你一定对eval()所有了解。如果你并没有接触过也没关系,eval()函数的使用非常简单。
"""
print(eval("1+1==2"))
print(eval("'A'+'B'"))
print(eval("1+2"))
"""
Python中eval()函数将字符串str当成有效的表达式来求值并返回计算结果。其函数声明如下:
eval(expression[, globals[, locals]])
其中,参数globals为字典形式,locals为任何映射对象,它们分别表示全局和局部命名空间。
如果传入globals参数的字典中缺少__builtins__的时候,当前的全局命名空间将作为globals参数输入并且在表达式计算之前被解析。
locals参数默认与globals相同,如果两者都省略的话,表达式将在eval()调用的环境中执行。
“eval is evil”(eval是邪恶的),这是一句广为人知的对eval的评价,它主要针对的是eval()的安全性。那么eval存在什么样的安全漏洞呢?来看一个简单的例子:
"""
import sys
from math import *
def ExpCalcBot(string):
try:
print("Your answer is", eval(string))
except:
print("The expression you enter is not valid")
print("Hi,I am ExpCalcBot. please input your experssion or enter e to end")
while 1:
print("Please enter a number or operation. Enter c to complete")
inputstr = input()
if inputstr == str('e'):
break
elif repr(inputstr) != repr(''):
ExpCalcBot(inputstr)
inputstr = ""
"""
上面这段代码的主要功能是:根据用户的输入,计算Python表达式的值。它有什么问题呢?
如果用户都是素质良好,没有不良目的的话,那么这段程序也许可以满足基本需求。
比如,输入1+sin(20)会输出结果1.91294525073。但如果它是一个Web页面的后台调用(当然,你需要做一定的修改),
由于网络环境下运行它的用户并非都是可信任的,问题就出现了。因为eval()可以将任何字符串当做表达式求值,
这也就意味着有空子可钻。上面的例子中假设用户输入__import__("os").system("ls"),
会有什么样的输出呢?你会惊讶地发现它会显示当前目录下的所有文件列表.
"""
"""
import("os"). system(del */Q")
!不要轻易在你的计算机上尝试
Your answer is 0
"""
import ast
print(ast.literal_eval('[1 ,2, 3]'))
# def eval_code(code):
# parsed = ast.parse(code, mode='eval')
# fixed = ast.fix_missing_locations(parsed)
# compiled = compile(fixed, '<string>', 'eval')
# print(eval(compiled))
#
# eval_code("1+sin(20)")
# eval_code('__import__("os").system("ls")')
|
from flask import request
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, SelectField, FloatField, BooleanField, IntegerField
from wtforms.validators import ValidationError, DataRequired, Length
from flask_babel import _, lazy_gettext as _l
from app.models import User
class EditProfileForm(FlaskForm):
username = StringField(_l('Username'), validators=[DataRequired()])
about_me = TextAreaField(_l('About me'),
validators=[Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_('Please use a different username.'))
class PostForm(FlaskForm):
post = TextAreaField(_l('Say something'), validators=[DataRequired()])
submit = SubmitField(_l('Submit'))
class SearchForm(FlaskForm):
q = StringField(_l('Search'), validators=[DataRequired()])
def __init__(self, *args, **kwargs):
if 'formdata' not in kwargs:
kwargs['formdata'] = request.args
if 'csrf_enabled' not in kwargs:
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
class PostRateForm(FlaskForm):
origin = StringField(_l('Origin City'), validators=[DataRequired()], id='locationTextField1')
destination = StringField(_l('Destination City'), validators=[DataRequired()], id='locationTextField2')
equipment_type = SelectField(
'Equipment Type',
choices=[('dryvan', 'Dry Van'), ('reefer', 'Reefer'), ('flatbed', 'Flatbed')]
)
hazardous_freight = SelectField('Hazardous?', choices=[('Yes',True), ('No', False)])
brokered_load = SelectField('Was this load through a 3PL or broker?', choices=[('Yes',True), ('No', False)])
hazardous_freight = BooleanField(_l('Did this load contain hazardous materials?'))
brokered_load = BooleanField(_l('Was this load through a broker?'))
weather = SelectField('Weather from 1-5: 5 being the worst weather', choices=[('1',1),
('2', 2),
('3', 3),
('4', 4),
('5', 5)])
rate_per_mile = FloatField(_l('Rate Per Mile'), validators=[DataRequired()])
dead_head = IntegerField(_l('How far did you have to deadhead?'), validators=[DataRequired()])
submit = SubmitField(_l('Submit'))
def __init__(self, *args, **kwargs):
super(PostRateForm, self).__init__(*args, **kwargs)
class SearchRatesForm(FlaskForm):
origin = StringField(_l('Origin City'), validators=[DataRequired(), Length(min=0, max=30)], id='locationTextField1')
destination = StringField(_l('Destination City'), validators=[DataRequired(), Length(min=0, max=30)], id='locationTextField2')
equipment_type = SelectField(
'Equipment Type',
choices=[('dryvan', 'Dry Van'), ('reefer', 'Reefer'), ('flatbed', 'Flatbed')]
)
hazardous_freight = SelectField('Hazardous?', choices=[('Yes',True), ('No', False)])
submit = SubmitField(_l('Submit'))
def __init__(self, *args, **kwargs):
if 'csrf_enabled' not in kwargs:
kwargs['csrf_enabled'] = False
super(SearchRatesForm, self).__init__(*args, **kwargs)
class LocationReviewForm(FlaskForm):
shipper = StringField('Shipper', validators=[DataRequired()])
consignee = StringField('Consignee', validators=[DataRequired()])
unloading_score = SelectField('Unloading score 1-5: 5 being the worst', choices=[('1',1),('2', 2), ('3', 3), ('4', 4), ('5', 5)])
lateness_score = SelectField('Lateness score 1-5: 5 being the worst (Run late all the time)', choices=[('1',1),('2', 2), ('3', 3), ('4', 4), ('5', 5)])
comments = TextAreaField(_l('Add a comment about unloading'), validators=[DataRequired()])
submit = SubmitField(_l('Submit'))
class MessageForm(FlaskForm):
message = TextAreaField(_l('Message'), validators=[
DataRequired(), Length(min=0, max=140)])
submit = SubmitField(_l('Submit'))
|
# #q1 not done
dic1={1:10,2:20}
dic2={3:30,4:40}
dic3={5:50,6:60}
dic4={}
dic4.update(dic1)
dic4.update(dic2)
dic4.update(dic3)
print(dic4) |
from pkg_resources import resource_string
from yandextank.common.interfaces import AbstractPlugin, AggregateResultListener, AbstractInfoWidget, GeneratorPlugin, AbstractCriterion
from yandextank.plugins.Console import Plugin as ConsolePlugin
from yandextank.plugins.Autostop import Plugin as AutostopPlugin
from yandextank.plugins.Console import screen as ConsoleScreen
from yandextank.common.util import splitstring
from .reader import LocustReader #, LocustStatsReader
from yandextank.stepper import StepperWrapper
from locust import runners as lr
from locust import main as lm
from locust import log as ll
import locust
import gevent
import sys
import subprocess
import socket
import logging
import time
from locust.stats import stats_printer, stats_writer, print_percentile_stats, print_error_report, print_stats #,write_stat_csvs
from locust.runners import MasterLocustRunner, SlaveLocustRunner, LocalLocustRunner
import locust.events as events
from locust.util.time import parse_timespan
import tempfile
version = locust.__version__
logger = logging.getLogger(__name__)
class Plugin(AbstractPlugin, GeneratorPlugin):
""" Locust tank plugin """
SECTION = 'locust'
def __init__(self, core, cfg, cfg_updater):
AbstractPlugin.__init__(self, core, cfg, cfg_updater)
self.core = core
self._locustrunner = None
self._locustclasses = None
self._options = None
self._user_count = 0
self._state = ''
self._locuststats = ''
self._locustslaves = None
self.stats_reader = None
self.reader = None
self.host = None
self.web_host = ''
self.port = 8089
self.locustfile = 'locustfile'
self.master = False
self.slave = False
self.master_host = "127.0.0.1"
self.master_port = 5557
self.master_bind_host = "*"
self.master_bind_port = 5557
self.expect_slaves = 0
self.no_web = True
self.num_clients = int(1)
self.hatch_rate = float(1)
self.num_requests = None
self.run_time = None
self.loglevel = 'INFO'
self.logfile = None
self.csvfilebase = None
self.csvappend = True
self.print_stats = True
self.only_summary = True
self.list_commands = False
self.show_task_ratio = False
self.show_task_ratio_json = False
self.show_version = True
self.locustlog_level = 'INFO'
self.cfg = cfg
# setup logging
ll.setup_logging(self.loglevel, self.logfile)
@property
def locustlog_file(self):
logger.debug("######## DEBUG: self.core.artifacts_dir = {}".format(self.core.artifacts_dir))
return "{}/locust.log".format(self.core.artifacts_dir)
def get_available_options(self):
return [
"host", "port", "locustfile",
"num_clients", "hatch_rate", "run_time", #"num_requests",
"logfile", "loglevel", "csvfilebase",
"master", "master_bind_host", "master_bind_port", "expect_slaves",
"master_host", "master_port"
]
def _get_variables(self):
res = {}
for option in self.core.config.get_options(self.SECTION):
if option[0] not in self.get_available_options():
res[option[0]] = option[1]
logger.debug("Variables: %s", res)
return res
def get_reader(self):
if self.reader is None:
self.reader = LocustReader(self, self.locustlog_file)
return self.reader
def get_stats_reader(self):
if self.stats_reader is None:
self.stats_reader = self.reader.stats_reader
logger.debug("######## DEBUG: plugin.reader.stats_reader.source = %s" % self.stats_reader.source)
return self.stats_reader
def configure(self):
self.host = self.get_option("host")
self.port = self.get_option("port")
self.locustfile = self.get_option("locustfile")
self.num_clients = int(self.get_option ("num_clients"))
self.hatch_rate = float(self.get_option("hatch_rate"))
self.run_time = self.get_option("run_time")
self.logfile = self.get_option("logfile")
self.loglevel = self.get_option("loglevel")
self.csvfilebase = self.get_option("csvfilebase")
self.locustlog_level = self.get_option("locustlog_level")
self.show_version = True
self.master = self.get_option("master")
self.master_bind_host = self.get_option("master_bind_host")
self.master_bind_port = self.get_option("master_bind_port")
self.expect_slaves = self.get_option("expect_slaves")
self.master_host = self.get_option("master_host")
self.master_port = self.get_option("master_port")
if self.locustlog_file:
logger.debug("######## DEBUG: configuring Locust resplog")
ll.setup_resplogging(self.locustlog_level, self.locustlog_file)
def get_options(self):
options = {optname : self.__getattribute__(optname) for optname in self.get_available_options()}
logger.debug("##### Locust plugin: get_options() : options = {}".format(options))
return options
def prepare_test(self):
logger = logging.getLogger(__name__)
try:
logger.debug("######## DEBUG: looking for a console object")
### DEBUG: enable/disable Console
console = self.core.get_plugin_of_type(ConsolePlugin)
except Exception as ex:
logger.debug("######## DEBUG: Console not found: %s", ex)
console = None
if console:
logger.debug("######## DEBUG: console found")
widget = LocustInfoWidget(self)
console.add_info_widget(widget)
logger.debug("######## DEBUG: locust widget added to console")
try:
locustfile = lm.find_locustfile(self.locustfile)
if not locustfile:
logger.error("##### Locust plugin: Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.")
sys.exit(1)
if locustfile == "locust.py":
logger.error("##### Locust plugin: The locustfile must not be named `locust.py`. Please rename the file and try again.")
sys.exit(1)
docstring, locusts = lm.load_locustfile(locustfile)
logger.info("##### Locust plugin: locustfile = {}".format(locustfile))
if not locusts:
logger.error("##### Locust plugin: No Locust class found!")
sys.exit(1)
else:
logger.info("##### Locust plugin: Locust classes found in {} : {}".format(locustfile, locusts))
self._locustclasses = list(locusts.values())
options = Opts(**self.get_options())
self._options = options
logger.debug("##### Locust plugin: main() : options = {}".format(options))
except Exception as e:
logger.error("##### Locust plugin: prepare_test() CRITICAL ERROR : %s", e)
sys.exit(1)
def is_any_slave_up(self):
if self.master and self._locustslaves is not None:
poll_slaves = [s.poll() for s in self._locustslaves]
res = any([False if x is not None else True for x in poll_slaves])
logger.debug("######## DEBUG: is_any_slave_up/any(res) = {}".format(res))
return res
elif self.master:
logger.error("##### Locust plugin: no slave alive to poll")
return False
else:
return False
def start_test(self):
# install SIGTERM handler
def sig_term_handler():
logger.info("##### Locust plugin: Got SIGTERM signal")
self.shutdown(0)
gevent.signal(signal.SIGTERM, sig_term_handler)
def spawn_local_slaves(count):
"""
Spawn *local* locust slaves : data aggregation will NOT work with *remote* slaves
"""
try:
args = ['locust']
args.append('--locustfile={}'.format(str(self.locustfile)))
args.append('--slave')
args.append('--master-host={}'.format(self.master_host))
args.append('--master-port={}'.format(self.master_port))
args.append('--resplogfile={}'.format(self.locustlog_file))
logger.info("##### Locust plugin: slave args = {}".format(args))
# Spawning the slaves in shell processes (security warning with the use of 'shell=True')
self._locustslaves = [subprocess.Popen(' '.join(args), shell=True, stdin=None,
stdout=open('{}/locust-slave-{}.log'.format(self.core.artifacts_dir, i), 'w'),
stderr=subprocess.STDOUT) for i in range(count)]
#slaves = [SlaveLocustRunner(self._locustclasses, self._options) for _ in range(count)] # <-- WRONG: This will spawn slave running on the same CPU core as master
time.sleep(1)
logger.info("##### Locust plugin: Started {} new locust slave(s)".format(len(self._locustslaves)))
logger.info("##### Locust plugin: locust slave(s) PID = {}".format(self._locustslaves))
except socket.error as e:
logger.error("##### Locust plugin: Failed to connect to the Locust master: %s", e)
sys.exit(-1)
except Exception as e:
logger.error("##### Locust plugin: Failed to spawn locust slaves: %s", e)
sys.exit(-1)
try:
logger.info("##### Locust plugin: Starting Locust %s" % version)
# run the locust
### FIXME
#if self.csvfilebase:
# gevent.spawn(stats_writer, self.csvfilebase)
### /FIXME
if self.run_time:
if not self.no_web:
logger.error("##### Locust plugin: The --run-time argument can only be used together with --no-web")
sys.exit(1)
try:
self.run_time = parse_timespan(self.run_time)
except ValueError:
logger.error("##### Locust plugin: Valid --time-limit formats are: 20, 20s, 3m, 2h, 1h20m, 3h30m10s, etc.")
sys.exit(1)
def spawn_run_time_limit_greenlet():
logger.info("##### Locust plugin: Run time limit set to %s seconds" % self.run_time)
def timelimit_stop():
logger.info("##### Locust plugin: Time limit reached. Stopping Locust.")
self._locustrunner.quit()
logger.debug("######## DEBUG: timelimit_stop()/self._locustrunner.quit() passed")
def on_greenlet_completion():
logger.debug("######## DEBUG: Locust plugin: on_greenlet_completion()")
#gevent.spawn_later(self.run_time, timelimit_stop)
gl = gevent.spawn_later(self.run_time, timelimit_stop)
# linking timelimit greenlet to main greenlet and get a feedback of its execution
#gl.link(on_greenlet_completion)
# locust runner : web monitor
if not self.no_web and not self.slave and self._locustrunner is None:
# spawn web greenlet
logger.info("##### Locust plugin: Starting web monitor at %s:%s" % (self.web_host or "*", self.port))
main_greenlet = gevent.spawn(web.start, self._locustclasses, self._options)
# locust runner : standalone
if not self.master and not self.slave and self._locustrunner is None:
logger.info("##### Locust plugin: LocalLocustRunner about to be launched")
self._locustrunner = LocalLocustRunner(self._locustclasses, self._options)
# spawn client spawning/hatching greenlet
if self.no_web:
logger.info("##### Locust plugin: LocalLocustRunner.start_hatching()")
self._locustrunner.start_hatching(wait=True)
main_greenlet = self._locustrunner.greenlet
if self.run_time:
logger.info("##### Locust plugin: spawn_run_time_limit_greenlet()")
spawn_run_time_limit_greenlet()
logger.info("##### Locust plugin: spawn_run_time_limit_greenlet() passed")
# locust runner : master/slave mode (master here)
elif self.master and self._locustrunner is None:
self._locustrunner = MasterLocustRunner(self._locustclasses, self._options)
logger.info("##### Locust plugin: MasterLocustRunner started")
time.sleep(1)
if self.no_web:
gevent.spawn(spawn_local_slaves(self.expect_slaves))
while len(self._locustrunner.clients.ready) < self.expect_slaves:
logger.info("##### Locust plugin: Waiting for slaves to be ready, %s of %s connected",
len(self._locustrunner.clients.ready), self.expect_slaves)
time.sleep(1)
self._locustrunner.start_hatching(self.num_clients, self.hatch_rate)
logger.debug("######## DEBUG: MasterLocustRunner/start_hatching()")
main_greenlet = self._locustrunner.greenlet
if self.run_time:
try:
spawn_run_time_limit_greenlet()
except Exception as e:
logger.error("##### Locust plugin: exception raised in spawn_run_time_limit_greenlet() = {}".format(e))
# locust runner : master/slave mode (slave here)
#elif self.slave and self._locustrunner is None:
# if self.run_time:
# logger.error("##### Locust plugin: --run-time should be specified on the master node, and not on slave nodes")
# sys.exit(1)
# try:
# self._locustrunner = SlaveLocustRunner(self._locustclasses, self._options)
# main_greenlet = self._locustrunner.greenlet
# except socket.error as e:
# logger.error("##### Locust plugin: Failed to connect to the Locust master: %s", e)
# sys.exit(-1)
return self._locustrunner
self._locustrunner.greenlet.join()
code = 0
if len(self._locustrunner.errors):
code = 1
self.shutdown(code=code)
except KeyboardInterrupt as e:
self.shutdown(0)
def shutdown(self, code=0):
"""
Shut down locust by firing quitting event, printing stats and exiting
"""
logger.debug("######## DEBUG: shutdown()/_locustrunner = {}".format(self._locustrunner))
logger.info("##### Locust plugin: Cleaning up runner...")
if self._locustrunner is not None and self.is_any_slave_up():
#if self.csvfilebase:
# write_stat_csvs(self.csvfilebase)
retcode = self._locustrunner.quit()
logger.debug("######## DEBUG: shutdown()/_locustrunner.quit() passed # retcode = {}".format(retcode))
logger.info("##### Locust plugin: Running teardowns...")
while not self.reader.is_stat_queue_empty():
logger.info("##### Locust plugin: {} items remaining is stats queue".format(self.reader.stat_queue.qsize()))
time.sleep(1)
### FIXME : possibly causing a greenlet looping infinitely
#events.quitting.fire(reverse=True)
print_stats(self._locustrunner.request_stats)
print_percentile_stats(self._locustrunner.request_stats)
print_error_report()
self.reader.close()
logger.info("##### Locust plugin: Shutting down (exit code %s), bye." % code)
def is_test_finished(self):
"""
Fetch locustrunner stats: min/max/median/avg response time, current RPS, fail ratio
"""
if self._locustrunner:
self._locuststats = self._locustrunner.stats.total
"""
Fetch locustrunner status: 'ready', 'hatching', 'running', 'stopped' and returns status code
"""
logger.debug("######## DEBUG: is_test_finished()? -> Fetching locust status")
logger.debug("######## DEBUG: is_test_finished() -> self._locustrunner.state = {}".format(self._locustrunner.state))
logger.debug("######## DEBUG: is_test_finished() -> is_any_slave_up() = {}".format(self.is_any_slave_up()))
self._state = self._locustrunner.state
if self._locustrunner.state == 'stopped' or self.master and not self.is_any_slave_up():
self._user_count = 0
return 0
else:
self._user_count = self._locustrunner.user_count
return -1
def end_test(self, retcode):
if self.is_test_finished() < 0:
logger.info("##### Locust plugin: Terminating Locust")
self.shutdown(retcode)
else:
logger.info("##### Locust plugin: Locust has been terminated already")
self.shutdown(retcode)
return retcode
class Opts:
def __init__(self, **entries):
self.__dict__.update(entries)
self.no_reset_stats = True
self.reset_stats = False
class LocustInfoWidget(AbstractInfoWidget, AggregateResultListener):
""" Right panel widget with Locust test info """
def __init__(self, sender):
AbstractInfoWidget.__init__(self)
self.krutilka = ConsoleScreen.krutilka()
self.owner = sender
self.active_threads = 0
self.RPS = 0
def get_index(self):
logger.debug('######## DEBUG: LocustInfoWidget get_index()')
return 0
def on_aggregated_data(self, data, stats):
### DEBUG
self.active_threads = self.owner._user_count
self.RPS = self.owner._locuststats.current_rps # data['overall']['interval_real']['len']
logger.debug('######## DEBUG: LocustInfoWidget on_aggregated_data(): %s' % str(stats))
logger.debug('######## DEBUG: LocustInfoWidget on_aggregated_data() / self.RPS = %s' % str(self.RPS))
def render(self, ConsoleScreen):
# color_bg = ConsoleScreen.markup.BG_CYAN
res = ""
info_banner = "########## LOCUST INFO ###########"
stats_banner = "########## LOCUST STATS ##########"
space = ConsoleScreen.right_panel_width - len(info_banner) - 1
left_spaces = space / 2
right_spaces = space / 2
#dur_seconds = int(time.time()) - int(self.locust.start_time)
#duration = str(datetime.timedelta(seconds=dur_seconds))
info_template = ConsoleScreen.markup.GREEN + '#' * left_spaces + info_banner
info_template += "#" * right_spaces + ConsoleScreen.markup.RESET + "\n"
info_template += "\t## Target host: {}\n".format(self.owner.host)
info_template += "\t## Max users: {}\n".format(self.owner.num_clients)
info_template += "\t## Hatch rate: {}\n".format(self.owner.hatch_rate)
# info_ template += "\t## Locust file: {}\n".format(self.owner.locustfile)
info_template += "\t## Locust state: {}\n".format(self.owner._state)
info_template += "\t## Active users: {}\n".format(self.owner._user_count)
if self.owner.master:
info_template += "\t## Active slaves: {}\n".format(len(self.owner._locustrunner.clients.ready)
+ len(self.owner._locustrunner.clients.hatching)
+ len (self.owner._locustrunner.clients.running)
)
# info_template += "\n\n"
# info_template += ConsoleScreen.markup.BG_CYAN + '#' * (ConsoleScreen.right_panel_width - 1) + '\n\n\n\n\n' #+ ConsoleScreen.markup.RESET
res += "{}".format(info_template)
stats_template = ConsoleScreen.markup.GREEN + "#" * left_spaces + stats_banner
stats_template += "#" * right_spaces + ConsoleScreen.markup.RESET + "\n"
stats_template += "\t## Current RPS: {0:.2f}\n".format(self.owner._locuststats.current_rps)
stats_template += "\t## Fail ratio (%): {0:.2f}\n".format(100 * self.owner._locuststats.fail_ratio)
stats_template += "\t## Min resp time (ms): {}\n".format(self.owner._locuststats.min_response_time)
stats_template += "\t## Max resp time (ms): {}\n".format(self.owner._locuststats.max_response_time)
stats_template += "\t## Average resp time (ms): {0:.2f}\n".format(self.owner._locuststats.avg_response_time)
stats_template += "\t## Median resp time (ms): {}\n".format(self.owner._locuststats.median_response_time)
stats_template += ConsoleScreen.markup.GREEN + '#' * (ConsoleScreen.right_panel_width - 1) + ConsoleScreen.markup.RESET
res += "{}".format(stats_template)
logger.debug('######## DEBUG: LocustInfoWidget render()')
return res
|
'''Create test scene with plenty of example features to test.
Created on 05/10/2019
@author: Bren
'''
import os
import fbx
import FbxCommon
from inspect import isclass
from brenfbx.core import bfIO
DUMP_DIR = r"D:\Repos\dataDump\brenfbx"
TEST_EXPORT_FILE = os.path.join(
DUMP_DIR,
"brenfbx_test_scene_01.fbx"
)
def create_scene(fbx_manager):
"""Create a scene with plenty of example features to test brenfbx.
"""
fbx_scene = fbx.FbxScene.Create(fbx_manager, "TestScene")
# create some nodes
node_1 = fbx.FbxNode.Create(fbx_manager, "node1")
node_2 = fbx.FbxNode.Create(fbx_manager, "node2")
node_3 = fbx.FbxNode.Create(fbx_manager, "node3")
object_1 = fbx.FbxObject.Create(fbx_manager, "object1")
# add to scene
fbx_scene.GetRootNode().AddChild(node_1)
fbx_scene.GetRootNode().AddChild(node_2)
fbx_scene.ConnectSrcObject(object_1)
fbx_scene.ConnectSrcObject(node_3)
# create a reference property and connect nodes
ref_prop = fbx.FbxProperty.Create(
object_1, fbx.FbxReferenceDT, "testReferenceProperty"
)
ref_prop.ModifyFlag(fbx.FbxPropertyFlags.eUserDefined, True)
ref_prop.ModifyFlag(fbx.FbxPropertyFlags.eAnimatable, True)
ref_prop.ConnectSrcObject(node_1)
ref_prop.ConnectSrcObject(node_2)
# create hierarchal properties
ref_dbl2_prop = fbx.FbxProperty.Create(
object_1, fbx.FbxDouble2DT, "testReferenceDbl2Property"
)
ref_dbl2_prop.Set(19.0)
ref_dbl2_prop.ConnectSrcObject(node_1)
ref_dbl2_prop.ModifyFlag(fbx.FbxPropertyFlags.eUserDefined, True)
ref_dbl2_prop.ModifyFlag(fbx.FbxPropertyFlags.eAnimatable, True)
child_prop_1 = fbx.FbxProperty.Create(
ref_dbl2_prop, fbx.FbxDoubleDT, "child_property1"
)
child_prop_1.Set(16.0)
child_prop_1.ModifyFlag(fbx.FbxPropertyFlags.eUserDefined, True)
child_prop_1.ModifyFlag(fbx.FbxPropertyFlags.eAnimatable, True)
# create another sibling
ref_dbl3_prop = fbx.FbxProperty.Create(
object_1, fbx.FbxDouble3DT, "testReferenceDbl3Property"
)
ref_dbl3_prop.Set(19.0)
# create an object connection
object_2 = fbx.FbxObject.Create(fbx_manager, "object2")
object_1.ConnectSrcObject(object_2)
# create property-property connection(s)
src_prop_1 = fbx.FbxProperty.Create(
node_1, fbx.FbxDouble3DT, "testSrcProp1"
)
src_prop_1.Set(13)
ref_prop.ConnectSrcProperty(src_prop_1)
# ** connections models test **
# src_prop_child = fbx.FbxProperty.Create(
# src_prop_1, fbx.FbxDouble3DT, "testSrcPropChild"
# )
# create some skeleton joints
root_name = "rootJoint"
root_attr = fbx.FbxSkeleton.Create(fbx_manager, root_name+"Attr")
root_attr.SetSkeletonType(fbx.FbxSkeleton.eRoot)
root_joint = fbx.FbxNode.Create(fbx_manager, root_name)
root_joint.SetNodeAttribute(root_attr)
# create joint1
joint1_name = "joint1"
joint1_attr = fbx.FbxSkeleton.Create(fbx_manager, joint1_name+"Attr")
joint1_attr.SetSkeletonType(fbx.FbxSkeleton.eLimbNode)
joint1_node = fbx.FbxNode.Create(fbx_manager, joint1_name)
joint1_node.SetNodeAttribute(joint1_attr)
joint1_node.LclTranslation.Set(
fbx.FbxDouble3(50.0, 0.0, 0.0)
)
# create joint2
joint2_name = "joint2"
joint2_attr = fbx.FbxSkeleton.Create(fbx_manager, joint2_name+"Attr")
joint2_attr.SetSkeletonType(fbx.FbxSkeleton.eLimbNode)
joint2_node = fbx.FbxNode.Create(fbx_manager, joint2_name)
joint2_node.SetNodeAttribute(joint2_attr)
joint2_node.LclTranslation.Set(
fbx.FbxDouble3(25.0, 0.0, 0.0)
)
joint2_node.LclRotation.Set(
fbx.FbxDouble3(0.0, 0.0, 45.0)
)
# create skeleton hierarchy
fbx_scene.GetRootNode().AddChild(root_joint)
root_joint.AddChild(joint1_node)
joint1_node.AddChild(joint2_node)
# create some redundant node attributes
root_attr = fbx.FbxSkeleton.Create(fbx_manager, "SkelRootAttr")
root_attr.SetSkeletonType(fbx.FbxSkeleton.eRoot)
fbx_scene.ConnectSrcObject(root_attr)
limb_attr = fbx.FbxSkeleton.Create(fbx_manager, "SkelLimbAttr")
limb_attr.SetSkeletonType(fbx.FbxSkeleton.eLimb)
fbx_scene.ConnectSrcObject(limb_attr)
limb_node_attr = fbx.FbxSkeleton.Create(fbx_manager, "SkelLimbNodeAttr")
limb_node_attr.SetSkeletonType(fbx.FbxSkeleton.eLimbNode)
fbx_scene.ConnectSrcObject(limb_node_attr)
# create anim stack to support constraints
lAnimStack = fbx.FbxAnimStack.Create(fbx_scene, "testAnimStack")
lAnimLayer = fbx.FbxAnimLayer.Create(fbx_scene, "Base Layer")
lAnimStack.AddMember(lAnimLayer)
# create some constraints
parent_cons = fbx.FbxConstraintParent.Create(fbx_manager, "parentConstraint1")
parent_cons.SetConstrainedObject(node_1)
# note that this adds weight, translation offset and rotation offset properties for each source
parent_cons.AddConstraintSource(node_2, 60.0)
# weights don't need to be specified, but properties will still be created
parent_cons.AddConstraintSource(node_3)
fbx_scene.ConnectSrcObject(parent_cons)
# create offset
parent_cons.SetTranslationOffset(node_2, fbx.FbxVector4(1,2,3,1))
aim_cons = fbx.FbxConstraintAim.Create(fbx_manager, "aimConstraint1")
# note that this also adds properties for weight per target
aim_cons.SetConstrainedObject(joint1_node)
aim_cons.AddConstraintSource(node_2, 60.0)
aim_cons.AddConstraintSource(node_1, 40.0)
aim_cons.SetWorldUpObject(joint2_node)
# aim_cons.RemoveConstraintSource(node_2) # method doesn't exist!
fbx_scene.ConnectSrcObject(aim_cons)
scale_cons = fbx.FbxConstraintScale.Create(fbx_manager, "scaleConstraint1")
scale_cons.SetConstrainedObject(joint1_node)
scale_cons.AddConstraintSource(node_1, 44.0)
scale_cons.AddConstraintSource(node_2, 22.3)
fbx_scene.ConnectSrcObject(scale_cons)
position_cons = fbx.FbxConstraintPosition.Create(fbx_manager, "positionConstraint1")
position_cons.SetConstrainedObject(joint1_node)
position_cons.AddConstraintSource(node_1, 44.0)
position_cons.AddConstraintSource(node_2, 22.3)
position_cons.AddConstraintSource(node_3, 33.3)
# note that position constraint does have remove method,
# but it doesn't remove the weight property!
position_cons.RemoveConstraintSource(node_2)
# With initial testing it also seems to break the connection to node 3 as well in maya,
# but it actually seems like position constraints with multiple targets don't import into
# maya with the extra targets for some reason?
# everything else seems ok though
fbx_scene.ConnectSrcObject(position_cons)
position_cons_2 = fbx.FbxConstraintPosition.Create(fbx_manager, "positionConstraint2")
position_cons_2.SetConstrainedObject(joint2_node)
position_cons_2.AddConstraintSource(node_1, 44.0)
# position_cons.AddConstraintSource(node_2, 22.3)
position_cons_2.AddConstraintSource(node_3, 33.3)
fbx_scene.ConnectSrcObject(position_cons_2)
rotation_cons = fbx.FbxConstraintRotation.Create(fbx_manager, "rotationConstraint1")
rotation_cons.SetConstrainedObject(joint2_node)
rotation_cons.AddConstraintSource(node_1, 44.0)
rotation_cons.AddConstraintSource(node_2, 22.3)
fbx_scene.ConnectSrcObject(rotation_cons)
print "created stuff"
# return scene
return fbx_scene
def export_scene():
"""Create a manager, scene, and export file.
"""
fbx_manager = fbx.FbxManager.Create()
fbx_scene = create_scene(fbx_manager)
bfIO.save_fbx_file(
fbx_manager,
fbx_scene,
TEST_EXPORT_FILE,
settings=None,
ascii=True,
overwrite=True,
makedirs=False,
verbose=True,
err=True
)
print "file exported: {}".format(TEST_EXPORT_FILE)
def debug():
fbx_scene, fbx_manager = bfIO.load_fbx_file(TEST_EXPORT_FILE)
object_1 = fbx_scene.FindSrcObject("object1")
print object_1.GetName()
prop = object_1.FindProperty("testReferenceProperty")
print prop.GetName()
print "Src object count ", prop.GetSrcObjectCount()
print "Src property count ", prop.GetSrcPropertyCount()
print "Src property 0", prop.GetSrcProperty(0).GetName()
if __name__ == "__main__":
export_scene()
debug()
print "done"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author: B17455
class day():
"""docstring for day"""
def __init__(self, day, time):
self.day = day
self.time = time
def speed(self, distance,vehicle):
speed = float(distance)/self.time
print '------------------'
print '%s骑%s去买菜,骑了 %s 小时,平均速度 %s km/h'%(self.day, vehicle, self.time, speed)
def main():
distance = 20
e_bicycle = '电动车'
bicycle = '自行车'
day1 = day('周一', 0.5)
day1.speed(distance, e_bicycle)
day2 = day('周二', 2)
day2.speed(distance, bicycle)
day3 = day('周三', 0.6)
day3.speed(distance, e_bicycle)
if __name__ == '__main__':
main()
|
# coding:utf-8
from pymongo import MongoClient
import pymysql
import numpy as np
from publicMethods import *
#载入汽车之家数据,并计算每辆车的提及数量以及构建共同提及矩阵
def loadCarCommentSet(carSet):
client=MongoClient('47.92.211.251',30000)
collection=client.new_carDataset.test
totalCount=collection.find().count()
print('一共包含数据%s条'%totalCount)
#初始化数据并计算统计汽车提及和汽车共同提及
carCommentSet=[]
carCount={}
for car in carSet:
carCount[car]=0
carOccurrenceMat=np.zeros((len(carSet),len(carSet)))
#处理数据
n=0
for row in collection.find({},{"car":1,"formatMentionCar":1}):
n+=1
if n%1000==0:print(n)
#if n==5000:break
buyCar=row['car']
if 'formatMentionCar' in row.keys():
mentionCarSet=list(row['formatMentionCar'].keys())
mentionCarSet.append(buyCar)
mentionCarSet=list(set(mentionCarSet))
#如果没有提及车型,就不将其加入该数据集中
carComment={'buyCar':buyCar,'mentionCar':mentionCarSet}
carCommentSet.append(carComment)
else:
mentionCarSet=[buyCar]
#计算提及车型数量
for mentionCar in mentionCarSet:
if mentionCar in carCount.keys():
carCount[mentionCar]+=1
#计算车型共同提及数量
for mentionCar1 in mentionCarSet:
x=readCarID(mentionCar1,carSet)
if x==-1:continue
for mentionCar2 in mentionCarSet:
if mentionCar2==mentionCar1:continue
y=readCarID(mentionCar2,carSet)
if y==-1:continue
carOccurrenceMat[x,y]+=1
return carCommentSet,carCount,carOccurrenceMat,totalCount
#给定两个车型,计算他们的共同潜在市场占比
def calPotentialMarket(car1,car2,carCommentSet):
potenitialMarket={}
for carComment in carCommentSet:
if not car1 in carComment['mentionCar']:continue
if not car2 in carComment['mentionCar']:continue
buyCar=carComment['buyCar']
if not buyCar in potenitialMarket.keys():
potenitialMarket[buyCar]=0
potenitialMarket[buyCar]+=1
return potenitialMarket
#存储单个车型的数据
def insert_into_car_market(carSet,carDataSet,carCount):
print('正在导入单个车型的数据')
conn = pymysql.connect(host='47.99.116.136',user='root',passwd='3H1passwd',port=3306,db='car_test',charset='utf8')
cursor=conn.cursor()
sql_one='insert into car_market(id,car_id,num) values (%s,%s,%s)'
one_id=1
for i in range(len(carSet)):
car1=carSet[i]
car_id1=carDataSet[car1]['car_id']
num1=carCount[car1]
#存储单个车型关注量,若为0则不存
if num1==0:continue
try:
cursor.execute(sql_one,(one_id,car_id1,num1))
one_id+=1
except Exception as e:
print(e)
cursor.close() # 关闭游标
conn.commit() #向数据库插入一条数据时必须要有这个方法,否则数据不会被真正的插入
conn.close()
#存储共同关注量的数据
def insert_into_car_market_two_id(carSet,carDataSet,carCount,carOccurrenceMat):
print('正在导入共同关注的数据')
conn = pymysql.connect(host='47.99.116.136',user='root',passwd='3H1passwd',port=3306,db='car_test',charset='utf8')
cursor=conn.cursor()
sql_two='insert into car_market_two_id(id,car_id1,car_id2,num) values (%s,%s,%s,%s)'
two_id=1
for i in range(len(carSet)):
if two_id%10000==0:print(two_id)
car1=carSet[i]
#print('正在导入%s的数据'%car1)
car_id1=carDataSet[car1]['car_id']
num1=carCount[car1]
if num1==0:continue
for j in range(len(carSet)):
car2=carSet[j]
car_id2=carDataSet[car2]['car_id']
num2=int(carOccurrenceMat[i,j])
#存储两个车型的共同关注量,若为0则不存
if car1==car2:continue
if num2==0:continue
try:
cursor.execute(sql_two,(two_id,car_id1,car_id2,num2))
two_id+=1
except Exception as e:
print(e)
cursor.close() # 关闭游标
conn.commit() #向数据库插入一条数据时必须要有这个方法,否则数据不会被真正的插入
conn.close()
#存储潜在市场占比的数据
def insert_into_car_market_three_id(carSet,carDataSet,carCount,carOccurrenceMat,carCommentSet):
print('正在导入潜在市场的数据')
conn = pymysql.connect(host='47.99.116.136',user='root',passwd='3H1passwd',port=3306,db='car_test',charset='utf8')
cursor=conn.cursor()
sql_three='insert into car_market_three_id(id,car_id1,car_id2,car_id3,num) values (%s,%s,%s,%s,%s)'
three_id=1
for i in range(len(carSet)):
if three_id%10000==0:
print(three_id)
conn.commit()
car1=carSet[i]
car_id1=carDataSet[car1]['car_id']
num1=carCount[car1]
if num1==0:continue
for j in range(len(carSet)):
car2=carSet[j]
car_id2=carDataSet[car2]['car_id']
num2=int(carOccurrenceMat[i,j])
if car1==car2:continue
if num2==0:continue
potenitialMarket=calPotentialMarket(car1,car2,carCommentSet)
for car3 in potenitialMarket.keys():
if not car3 in carDataSet.keys():continue
car_id3=carDataSet[car3]['car_id']
num3=potenitialMarket[car3]
try:
cursor.execute(sql_three,(three_id,car_id1,car_id2,car_id3,num3))
three_id+=1
except Exception as e:
print(e)
cursor.close() # 关闭游标
conn.commit() #向数据库插入一条数据时必须要有这个方法,否则数据不会被真正的插入
conn.close()
#存储竞争关系的数据——按照车类别进行区分
def insert_into_car_contend_value(carSet,carDataSet,carTypeSet,carCount,carOccurrenceMat,totalCount):
print('正在导入竞争关系')
conn = pymysql.connect(host='47.99.116.136',user='root',passwd='3H1passwd',port=3306,db='car_test',charset='utf8')
cursor=conn.cursor()
sql_relation='insert into car_contend_value(id,car_type_id,car_id,associated_id,value) values (%s,%s,%s,%s,%s)'
_id=1
for value in carTypeSet.values():
carSubDataSet=splitCarDataSet(carDataSet,value)
for car1 in carSubDataSet.keys():
if carCount[car1]==0:continue
for car2 in carSubDataSet.keys():
if car1==car2:continue
car_id1=carDataSet[car1]['car_id']
car_id2=carDataSet[car2]['car_id']
x=carSet.index(car1)
y=carSet.index(car2)
freq_x=carCount[car1]
freq_y=carCount[car2]
freq_xy=int(carOccurrenceMat[x,y])
N=totalCount
if freq_xy==0:continue
carRelation=float(PMI_measure2(freq_x,freq_y,freq_xy,N))
#print(_id,value,car_id1,car_id2,carRelation)
try:
cursor.execute(sql_relation,(_id,value,car_id1,car_id2,carRelation))
if _id%1000==0:print(_id)
_id+=1
except Exception as e:
print(e)
cursor.close() # 关闭游标
conn.commit() #向数据库插入一条数据时必须要有这个方法,否则数据不会被真正的插入
conn.close()
carDataSet=loadCarDataSet()
carTypeSet=loadCarTypeSet()
carSet=list(carDataSet.keys())
carCommentSet,carCount,carOccurrenceMat,totalCount=loadCarCommentSet(carSet)
#insert_into_car_market(carSet,carDataSet,carCount)
#insert_into_car_market_two_id(carSet,carDataSet,carCount,carOccurrenceMat)
insert_into_car_market_three_id(carSet,carDataSet,carCount,carOccurrenceMat,carCommentSet)
#insert_into_car_contend_value(carSet,carDataSet,carTypeSet,carCount,carOccurrenceMat,totalCount)
|
def gen(n):
for i in range(n):
yield i
# g= gen(5)
#
# print(g.__next__())
# print(g.__next__())
# print(g.__next__())
# print(g.__next__())
# print(g.__next__())
#
# s="Manan"
# itr=iter(s)
# print(itr.__next__())
# print(itr.__next__())
# print(itr.__next__())
# print(itr.__next__())
# print(itr.__next__())
def fibo(n):
while True:
if n==0:
yield 0
elif n==1:
yield 1
else:
x= fibo(n-2) + fibo(n-1)
yield x
g=gen(5)
f=fibo(10)
print(f.__next__())
print(f.__next__())
|
class Square:
def __init__(self, side):
self.side = side
def __add__(square_one, square_two): # special operator overloading method for +
return (4 * square_one.side) + (4 * square_two.side)
square_one = Square(5) # 5*4=20
square_two = Square(10) # 10*4=40
print("Sum of sides of both squares: ", square_one + square_two)
|
# Python 3.7
# File name:
# Authors: Aaron Watt
# Date: 2021-07-05
"""Module to be imported for project settings."""
# Standard library imports
from pathlib import Path
# Third-party imports
# Local application imports
from . import tools
# CLASSES --------------------------
class Paths:
"""Inner paths class to store project paths commonly used.
This will search the current working directory path for the name of the
repo (beecensus). Since this code is only called from main.py, and main.py
is inside the repo, it should be able to find the beecensus path.
This also means the name of the repo cannot be changed.
Since this is an inner class, paths will be accessible in the following way:
Project = ProjectSettings() # instance of the outer class
Project.paths.root # this will be the pathlib path to the github repo root
"""
def __init__(self):
# add root path of the project / git repo
self.root = Path(*Path.cwd().parts[:Path.cwd().parts.index('beecensus') + 1])
# Top-level paths
self.code = self.root / 'code'
self.docs = self.root / 'docs'
self.models = self.root / 'models'
self.output = self.root / 'output'
self.paper = self.root / 'paper'
# Data directories
self.data = self.root / 'data'
self.checkpoints = self.data / 'checkpoints'
self.configs = self.data / 'configs'
self.images = self.data / 'images'
self.tables = self.data / 'tables'
self.temp = self.data / 'temp'
class GISSettings:
"""Class to hold settings for gis portion of project.
Possible geographical names:
- Philadelphia County, PA
"""
def __init__(self):
# Name of geographical area to be used in the search for bees.
self.geographical_name = 'Philadelphia County, PA'
class MLSettings:
"""Class to hold settings for machine learning portion of project.
Model names are tensorflow model names used in fetching the model checkpoint.
Possible Classifier models:
- resent50
Possible Object Detector models:
- faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8
- ssd_mobilenet_v2_320x320_coco17_tpu-8
"""
def __init__(self):
# Model to be used as a base in classifying images as having or not having bee boxes.
self.ml_classifier_name = 'resnet50'
# Model to be used as a base in training the detection of apiaries in images.
self.ml_apiary_detector_name = 'faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8'
# Model to be used as a base in training the detection of bee boxes in images.
self.ml_box_detector_name = 'faster_rcnn_resnet50_v1_1024x1024_coco17_tpu-8'
class GoogleEarthDriverSettings:
"""Class to hold settings for controlling Google Earth when downloading images."""
def __init__(self):
pass
# FUNCTIONS --------------------------
# MAIN -------------------------------
# Create instances of each class to be called from other
PATHS = Paths()
GIS = GISSettings()
ML = MLSettings()
GE = GoogleEarthDriverSettings()
# REFERENCES -------------------------
"""
"""
|
import pytest
import torch
from pytest_dl import model
@pytest.fixture(scope="module", params=["cnn", "mlp"])
def net(request):
if request.param == "cnn":
return (
model.CNNVAE(input_shape=(1, 32, 32), bottleneck_dim=16),
torch.randn(4, 1, 32, 32),
)
elif request.param == "mlp":
return (
model.MLPVAE(input_shape=(1, 32, 32), bottleneck_dim=16),
torch.randn(4, 1, 32, 32),
)
else:
raise ValueError("invalid internal test config")
@torch.no_grad()
def test_shape(net):
net, inputs = net
outputs = net(inputs)
assert inputs.shape == outputs.shape
@torch.no_grad()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No GPU was detected")
def test_device_moving(net):
net, inputs = net
net_on_gpu = net.to("cuda:0")
net_back_on_cpu = net_on_gpu.cpu()
torch.manual_seed(42)
outputs_cpu = net(inputs)
torch.manual_seed(42)
outputs_gpu = net_on_gpu(inputs)
torch.manual_seed(42)
outputs_back_on_cpu = net_back_on_cpu(inputs)
assert torch.sum(outputs_cpu - outputs_gpu.cpu()) == pytest.approx(0)
assert torch.sum(outputs_cpu - outputs_back_on_cpu) == pytest.approx(0)
def test_batch_indepependence(net):
net, inputs = net
inputs = inputs.clone()
inputs.requires_grad = True
# Compute forward pass in eval mode to deactivate batch norm
net.eval()
outputs = net(inputs)
net.train()
# Mask loss for certain samples in batch
batch_size = inputs[0].shape[0]
mask_idx = torch.randint(0, batch_size, ())
mask = torch.ones_like(outputs)
mask[mask_idx] = 0
outputs = outputs * mask
# Compute backwad pass
loss = outputs.mean()
loss.backward()
# Check if gradient exists and is zero for masked samples
for i, grad in enumerate(inputs.grad):
if i == mask_idx:
assert torch.all(grad == 0).item()
else:
assert not torch.all(grad == 0)
def test_all_parameters_updates(net):
net, inputs = net
optim = torch.optim.SGD(net.parameters(), lr=0.1)
outputs = net(inputs)
loss = outputs.mean()
loss.backward()
optim.step()
for param_name, param in net.named_parameters():
if param.requires_grad:
assert param.grad is not None, param_name
assert torch.sum(param.grad ** 2) != 0.0, param_name
|
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from support.Scapy_Control import *
import sys, getopt
if __name__== "__main__":
dmac = GenerateRandomMac()
smac = GenerateRandomMac()
SIP = GenerateRandomIp()
DIP = GenerateRandomIp()
dport = 1812
sport = 16450
id = 1
Request = Ether(dst=dmac, src=smac, type=0x0800) / IP(src=SIP, dst=DIP) / UDP(dport=dport, sport=sport) / Radius(code=1, id=id)
Challenge = Ether(dst=smac, src=dmac, type=0x0800) / IP(src=DIP, dst=SIP) / UDP(dport=sport, sport=dport) / Radius(code=11, id=id, authenticator='0123456789abcdef')
id += 1
Request2 = Ether(dst=dmac, src=smac, type=0x0800) / IP(src=SIP, dst=DIP) / UDP(dport=dport, sport=sport) / Radius(code=1, id=id, authenticator='fedcba9876543210')
Accept = Ether(dst=smac, src=dmac, type=0x0800) / IP(src=DIP, dst=SIP) / UDP(dport=sport, sport=dport) / Radius(code=2, id=id)
p = [Request, Challenge, Request2, Accept]
wrpcap('/home/nathan/radius.pcap', p)
|
from django.db import models
# Create your models here.
class Message(models.Model):
sender = models.CharField(max_length=50)
text_message = models.TextField()
sender_email = models.EmailField()
sending_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return 'Сообщение от ' + self.sender
|
#import urllib.request
#import json
import requests
import pandas as pd
pd.options.display.max_columns = 100
def get_park_list(key):
headers = {"Authorization": key}
endpoint = "https://developer.nps.gov/api/v1/parks"
# Sample non-working Python code from
# https://github.com/nationalparkservice/nps-api-samples/blob/master/park-name-list.py
#req = urllib.request.Request(endpoint, headers=headers)
#response = urllib.request.urlopen(req).read()
#data = json.loads(response.decode('utf-8'))
#return data
r = requests.get(endpoint + "?limit=1000" + "&api_key=" + key)
print("Request status code: " + str(r.status_code))
data = pd.DataFrame.from_dict(r.json())
return data
my_key = input("Enter your NPS API key: ")
df = get_park_list(my_key)
column_names = ['id', 'park_code', 'name', 'full_name', 'designation', 'description',
'street', 'city', 'state', 'zipcode', 'lat_long', 'url']
parks_df = pd.DataFrame(columns=column_names)
for park in df['data']:
print("Adding " + park['fullName'])
# Get physical address info
park_addresses = park['addresses']
try:
physical_index = next((index for (index, d) in enumerate(park_addresses) if d['type'] == 'Physical'), None)
physical_address = park_addresses[physical_index]
street = physical_address['line1']
city = physical_address['city']
state = physical_address['stateCode']
zipcode = physical_address['postalCode']
except:
street = None
city = None
state = None
zipcode = None
# Get park info
try:
description = park['description']
except:
description = None
try:
latlong = park['latLong']
except:
latlong = None
try:
url = park['url']
except:
url = None
try:
park_code = park['parkCode']
except:
park_code = None
parks_data = [park['id'], park_code, park['name'], park['fullName'], park['designation'],
description, street, city, state, zipcode, latlong, url]
parks_series = pd.Series(parks_data, index=column_names)
parks_df = parks_df.append(parks_series, ignore_index=True)
# Found a few unicode decimal codes that were broken in raw data (https://www.codetable.net/decimal/):
# ā = ā
# ñ = ñ
# ō = ō
parks_df = parks_df.replace({"ā": "ā", "ñ": "ñ", "ō": "ō"}, regex=True)
parks_df['name'] = parks_df['name'].replace(";","", regex=True)
parks_df['full_name'] = parks_df['full_name'].replace(";","", regex=True)
parks_df.to_csv('data/parks_data.csv', index=False, encoding='utf-8-sig') |
from functools import cached_property
from onegov.org.request import OrgRequest
class FsiRequest(OrgRequest):
@cached_property
def attendee(self):
return self.current_user and self.current_user.attendee or None
@cached_property
def attendee_id(self):
return (
self.attendee and self.attendee.id or None
)
@cached_property
def is_editor(self):
return self.current_user \
and self.current_user.role == 'editor' or False
@cached_property
def is_member(self):
return self.current_user \
and self.current_user.role == 'member' or False
|
import tensorflow as tf
from tensorflow.python.ops.rnn_cell import GRUCell
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
class LSTMAutoencoder(object):
def __init__(self, hidden_num, input_num, cell=None, reverse=True, decode_without_input=False, name=None):
self.name=name
if cell is None:
self._enc_cell = GRUCell(hidden_num, name='encoder_cell_{}'.format(self.name))
self._dec_cell = GRUCell(hidden_num, name='decoder_cell_{}'.format(self.name))
else:
self._enc_cell = cell
self._dec_cell = cell
self.reverse = reverse
self.decode_without_input = decode_without_input
self.hidden_num = hidden_num
if FLAGS.datasource in ['2D']:
self.elem_num_init = 2
self.elem_num=FLAGS.sync_filters
elif FLAGS.datasource in ['plainmulti', 'artmulti']:
self.elem_num = input_num
self.dec_weight = tf.Variable(tf.truncated_normal([self.hidden_num,
self.elem_num], dtype=tf.float32), name='dec_weight_{}'.format(self.name))
self.dec_bias = tf.Variable(tf.constant(0.1, shape=[self.elem_num],
dtype=tf.float32), name='dec_bias_{}'.format(self.name))
def model(self, inputs):
inputs = tf.expand_dims(inputs, 0)
inputs = tf.unstack(inputs, axis=1)
self.batch_num = FLAGS.meta_batch_size
with tf.variable_scope('encoder_{}'.format(self.name)):
(self.z_codes, self.enc_state) = tf.contrib.rnn.static_rnn(self._enc_cell, inputs, dtype=tf.float32)
with tf.variable_scope('decoder_{}'.format(self.name)) as vs:
if self.decode_without_input:
dec_inputs = [tf.zeros(tf.shape(inputs[0]), dtype=tf.float32) for _ in range(len(inputs))]
(dec_outputs, dec_state) = tf.contrib.rnn.static_rnn(self._dec_cell, dec_inputs,
initial_state=self.enc_state,
dtype=tf.float32)
if self.reverse:
dec_outputs = dec_outputs[::-1]
dec_output_ = tf.transpose(tf.stack(dec_outputs), [1, 0, 2])
dec_weight_ = tf.tile(tf.expand_dims(self.dec_weight, 0), [self.batch_num, 1, 1])
self.output_ = tf.matmul(dec_weight_, dec_output_) + self.dec_bias
else:
dec_state = self.enc_state
dec_input_ = tf.zeros(tf.shape(inputs[0]),
dtype=tf.float32)
dec_outputs = []
for step in range(len(inputs)):
if step > 0:
vs.reuse_variables()
(dec_input_, dec_state) = \
self._dec_cell(dec_input_, dec_state)
dec_input_ = tf.matmul(dec_input_, self.dec_weight) + self.dec_bias
dec_outputs.append(dec_input_)
if self.reverse:
dec_outputs = dec_outputs[::-1]
self.output_ = tf.transpose(tf.stack(dec_outputs), [1, 0, 2])
self.input_ = tf.transpose(tf.stack(inputs), [1, 0, 2])
self.loss = tf.reduce_mean(tf.square(self.input_ - self.output_))
self.emb_all = tf.reduce_mean(self.z_codes, axis=0)
return self.emb_all, self.loss |
'''
http://snowdeer.github.io/machine-learning/2018/01/09/recognize-mnist-data/
'''
from keras.datasets import mnist
(X_train, Y_train), (X_validation, Y_validation) = mnist.load_data()
print ("x_train shape : " , X_train.shape)
def printTrainData(idx):
for x in X_train[idx]:
for i in x:
# print('{:3} '.format(i), end='')
printNum(i)
print()
# print(Y_train.shape)
def printNum(n):
if(n>0):
print('{:3} '.format(n), end='')
else:
print(' ', end='')
def printYTrain(idx):
print("Y_train", idx, " => ", Y_train[idx])
printTrainData(2)
print("========================")
printYTrain(2)
|
# A Floater is Prey; it updates by moving mostly in
# a straight line, but with random changes to its
# angle and speed, and displays as ufo.gif (whose
# dimensions (width and height) are computed by
# calling .width()/.height() on the PhotoImage
# from PIL.ImageTk import PhotoImage
from prey import Prey
from random import random, uniform
class Floater(Prey):
radius = 5
def __init__(self, x, y):
self.randomize_angle()
Prey.__init__(self,x,y,width = Floater.radius *2,height = Floater.radius*2,angle = self._angle,speed = 5)
def update(self, model):
random_chance = random()
random_speed = uniform(-.5, +.51)
random_angle = uniform(-.5, +.51)
if random_chance <= 0.3:
new_speed = self._speed + random_speed
new_angle = self._angle + random_angle
while True:
if 3 <= new_speed <= 7:
break
else:
random_speed = uniform(-.5, .51)
new_speed = self._speed + random_speed
self.set_velocity(new_speed, new_angle)
self.move()
self.wall_bounce()
def display(self, canvas):
canvas.create_oval(self._x-Floater.radius , self._y-Floater.radius,
self._x+Floater.radius, self._y+Floater.radius,
fill='red')
|
import random
import datetime
r = random.randint(1, 6)
print(r)
print(datetime.date.today())
import qrcode
img = qrcode.make("http://kujiranand.com")
img.save('qrcode-test.png')
|
import unittest
from mongoengine.base.datastructures import StrictDict
class TestStrictDict(unittest.TestCase):
def strict_dict_class(self, *args, **kwargs):
return StrictDict.create(*args, **kwargs)
def setUp(self):
self.dtype = self.strict_dict_class(("a", "b", "c"))
def test_init(self):
d = self.dtype(a=1, b=1, c=1)
self.assertEqual((d.a, d.b, d.c), (1, 1, 1))
def test_repr(self):
d = self.dtype(a=1, b=2, c=3)
self.assertEqual(repr(d), '{"a": 1, "b": 2, "c": 3}')
# make sure quotes are escaped properly
d = self.dtype(a='"', b="'", c="")
self.assertEqual(repr(d), '{"a": \'"\', "b": "\'", "c": \'\'}')
def test_init_fails_on_nonexisting_attrs(self):
with self.assertRaises(AttributeError):
self.dtype(a=1, b=2, d=3)
def test_eq(self):
d = self.dtype(a=1, b=1, c=1)
dd = self.dtype(a=1, b=1, c=1)
e = self.dtype(a=1, b=1, c=3)
f = self.dtype(a=1, b=1)
g = self.strict_dict_class(("a", "b", "c", "d"))(a=1, b=1, c=1, d=1)
h = self.strict_dict_class(("a", "c", "b"))(a=1, b=1, c=1)
i = self.strict_dict_class(("a", "c", "b"))(a=1, b=1, c=2)
self.assertEqual(d, dd)
self.assertNotEqual(d, e)
self.assertNotEqual(d, f)
self.assertNotEqual(d, g)
self.assertNotEqual(f, d)
self.assertEqual(d, h)
self.assertNotEqual(d, i)
def test_setattr_getattr(self):
d = self.dtype()
d.a = 1
self.assertEqual(d.a, 1)
self.assertRaises(AttributeError, getattr, d, 'b')
def test_setattr_raises_on_nonexisting_attr(self):
d = self.dtype()
with self.assertRaises(AttributeError):
d.x = 1
def test_setattr_getattr_special(self):
d = self.strict_dict_class(["items"])
d.items = 1
self.assertEqual(d.items, 1)
def test_get(self):
d = self.dtype(a=1)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('b', 'bla'), 'bla')
def test_items(self):
d = self.dtype(a=1)
self.assertEqual(d.items(), [('a', 1)])
d = self.dtype(a=1, b=2)
self.assertEqual(d.items(), [('a', 1), ('b', 2)])
def test_mappings_protocol(self):
d = self.dtype(a=1, b=2)
assert dict(d) == {'a': 1, 'b': 2}
assert dict(**d) == {'a': 1, 'b': 2}
if __name__ == '__main__':
unittest.main()
|
import pytest
from raincoat.match import pypi
def test_match_str(match):
assert(
str(match) ==
"umbrella == 3.2 @ path/to/file.py:MyClass (from filename:12)")
def test_match_str_other_version(match):
match.other_version = "3.4"
assert(
str(match) ==
"umbrella == 3.2 vs 3.4 @ path/to/file.py:MyClass (from filename:12)")
def test_wrong_package_format():
with pytest.raises(pypi.NotMatching):
pypi.PyPIMatch("a", 12, "pytest", "path", "element")
def test_current_source_key(mocker, match):
mocker.patch(
"raincoat.source.get_current_or_latest_version",
return_value=(True, "3.8"))
assert pypi.PyPIChecker().current_source_key(match) == (
"umbrella", "3.8", True)
assert match.other_version == "3.8"
def test_current_source_key_cache(mocker, match):
get_version = mocker.patch(
"raincoat.match.pypi.source.get_current_or_latest_version",
return_value=(True, "3.7"))
checker = pypi.PyPIChecker()
a = checker.current_source_key(match)
assert len(get_version.mock_calls) == 1
get_version.reset_mock()
b = checker.current_source_key(match)
assert get_version.mock_calls == []
assert a == b
def test_match_source_key(match):
assert pypi.PyPIChecker().match_source_key(match) == (
"umbrella", "3.2", False)
def test_get_source_installed(mocker):
source = mocker.patch("raincoat.match.pypi.source")
source.get_current_path.return_value = "yay/"
source.open_installed.return_value = {"file_1.py": ["yay"]}
result = pypi.PyPIChecker().get_source(
key=pypi.PyPIKey("umbrella", "3.4", True), files=["file_1.py"])
assert result == {"file_1.py": ["yay"]}
assert source.get_current_path.mock_calls == [mocker.call("umbrella")]
assert source.open_installed.mock_calls == [
mocker.call('yay/', ['file_1.py'])]
def test_get_source_downloaded(mocker):
source = mocker.patch("raincoat.match.pypi.source")
source.open_downloaded.return_value = {"file_1.py": ["yay"]}
mocker.patch("raincoat.match.pypi.Cleaner.mkdir",
return_value="/tmp/clean")
result = pypi.PyPIChecker().get_source(
key=pypi.PyPIKey("umbrella", "3.4", False), files=["file_1.py"])
assert result == {"file_1.py": ["yay"]}
assert source.download_package.mock_calls == [
mocker.call('umbrella', '3.4', '/tmp/clean')]
assert source.open_downloaded.mock_calls == [
mocker.call('/tmp/clean', ['file_1.py'])]
|
import cv2
import pcl
import pcl.pcl_visualization
import numpy as np
import quaternion
# camera intrinsics
cx = 325.5
cy = 253.5
fx = 518.0
fy = 519.0
depthScale = 1000.0
colorImgs, depthImgs = [], []
pose = []
# read pose.txt
pose = []
with open('pose.txt', 'r') as f:
for line in f.readlines():
line = line.replace('\n', '') # remove returns
line = line.split(' ') # split into 7 items
vector = []
for num in line:
vector.append(float(num))
vector = np.array(vector)
# compute Rotation matrix based on quaternion
quater = quaternion.Quaternion(vector[6], vector[3], vector[4], vector[5])
R = quater.transformToRotationMatrix()
# translation matrix
position = np.array([vector[0], vector[1], vector[2]]) #x, y, z
trans = quaternion.translation(position)
# pose matrix
# T = trans * R # wrong multiplication, should use dot
T = np.dot(trans, R)
pose.append(T)
# read color and depth images
view = []
for i in range(1,6):
colorImg = cv2.imread(f"color/{i}.png")
# cv2.imshow(f"Image{i}", colorImg)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
depthImg = cv2.imread(f"depth/{i}.pgm", cv2.IMREAD_UNCHANGED)
height, width, channel = colorImg.shape
for v in range(height):
for u in range(width):
# get RBG value
b = colorImg.item(v, u, 0)
g = colorImg.item(v, u, 1)
r = colorImg.item(v, u, 2)
# pack RGB into PointXYZRGB structure, refer to PCL
# http://pointclouds.org/documentation/structpcl_1_1_point_x_y_z_r_g_b.html#details
# rgb = int(str(r)+str(g)+str(b))
rgb = r << 16 | g << 8 | b
depth = depthImg[v,u]
if depth == 0:
continue
z = depth / depthScale
x = (u - cx) * z/fx
y = (v - cy) * z/fy
# using homogeneous coordinate
point = np.array([x, y, z, 1])
point_world = np.dot(pose[i-1], point)
# x,y,z,rgb
# scene = np.insert(point_world, 3, rgb)
scene = np.array([point_world[0], point_world[1], point_world[2], rgb], dtype=np.float32)
view.append(scene)
colorCloud = pcl.PointCloud_PointXYZRGB()
colorCloud.from_list(view)
visual = pcl.pcl_visualization.CloudViewing()
visual.ShowColorCloud(colorCloud, b"cloud")
v = True
while v:
v = not(visual.WasStopped()) |
list=["apple","banana","cherry","arange","kiwi","melon","mango",]
list.clear()
print(list) |
import math
class activationLayer:
def __init__(self, activationFunc = None):
print("New Activation Layer")
if activationFunc == None:
self.activation = activationLayer.empty
else:
self.activation = activationFunc
def forwardPass(self, inputMat):
return self.activation.forwardPass(inputMat)
def compile(self, inputShape):
return inputShape
class empty:
def forwardPass(inputMat):
return inputMat
class relu:
def forwardPass(inputMat):
for i in range(0,len(inputMat)):
if inputMat[i] < 0:
inputMat[i]= 0
return inputMat
class softmax:
def forwardPass(inputMat):
esum = 0
for i in range(0,len(inputMat)):
esum += math.log1p(inputMat[i])
for i in range(0,len(inputMat)):
inputMat[i] = math.log1p(inputMat[i])/esum
return inputMat
class sigmoid:
def forwardPass(inputMat):
for i in range(0,len(inputMat)):
inputMat[i] = 1 / (1 + math.exp(-inputMat[i]))
return inputMat |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 5 13:34:57 2021
@author: Gustavo Godoy
email: gustavogodoy85@gmail.com
"""
sentence = input('Por favor ingrese el texto a convertir en Geringoso: ')
if sentence == '':
sentence = 'Geringoso'
word = ''
new_sentence = sentence.split(' ')
a_new_word = ''
for word in new_sentence:
i=0
for letter in word:
if letter == 'a' :
a_new_word = a_new_word + letter + 'p' + letter
elif letter == 'e':
a_new_word = a_new_word + letter + 'p' + letter
elif letter == 'i':
a_new_word = a_new_word + letter + 'p' + letter
elif letter == 'o':
a_new_word = a_new_word + letter + 'p' + letter
elif letter == 'u':
a_new_word = a_new_word + letter + 'p' + letter
else:
a_new_word = a_new_word + word[i]
i += 1
a_new_word = a_new_word + ' '
print(a_new_word)
|
def _check_mode(mode):
assert mode in ['training', 'inference']
def _check_shape(shape):
'''
Image size must be dividable by 2 multiple times
:param shape: (h, w)
:return: None
'''
h, w = shape
if h / 2 ** 6 != int(h / 2 ** 6) or w / 2 ** 6 != int(w / 2 ** 6):
raise Exception("Image size must be dividable by 2 at least 6 times "
"to avoid fractions when downscaling and upscaling."
"For example, use 256, 320, 384, 448, 512, ... etc. ")
def _check_resnet(resnet):
assert resnet in ['resnet50', 'resnet101'] |
from django.db import models
# here my manager for registration come in action
'''
class Any_Model_Manager(models.Manager):
def search_box(self,keyword):
for x in self.filter(name__icontains=str(keyword)):
print(x)
class Any_Model_Manager2(models.Manager):
def search_acc_gender(self,gender_please):
for x in self.filter(gender=gender_please.lower()):
print(x)
'''
# and it's ends here like manager deaths
class Divide_into_gender(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(gender='male')
# for page2
class Registration(models.Model):
choice_field = [
('male' , 'Male'),
('female' , 'Female'),
]
age_choice = [
(18,'18'), (19,'19'),
(20,'20'), (21,'21'),
(22,'22'), (23,'23'),(24,'24')
]
name = models.CharField(max_length=30)
age = models.PositiveIntegerField(choices=age_choice)
gender = models.CharField(max_length=6,choices=choice_field)
roll = models.CharField(max_length=6)
email = models.EmailField()
percent = models.PositiveIntegerField()
objects = models.Manager()
objects_two = Divide_into_gender()
# game changing line
#objects = Any_Model_Manager()
# our second model manager
#objects2 = Any_Model_Manager2()
def __str__(self):
name_split = self.name.split(' ')
return str(name_split[0] + '-' +str(self.roll))
# for page3
class Examination(models.Model):
roll = models.CharField(max_length=6)
weight = models.PositiveIntegerField()
chest = models.PositiveIntegerField()
height = models.PositiveIntegerField()
pull_ups = models.PositiveIntegerField()
time = models.PositiveIntegerField()
marks = models.PositiveIntegerField()
def __str__(self):
return str(self.roll)+' '+str('Examination')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import smtplib
def enviar(nombre, destinatario):
msg = '''
Hola %s''' % nombre
# Datos
username = 'jose.vergara2104@gmail.com'
password = 'slqiqlkgyeifcora'
# Enviando el correo
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
server.sendmail(username, destinatario, msg)
server.quit()
#from lib_FAM import load_xl, data_map
## En esta parte cargamos los datos de EXCEL
#datos = load_xl('Acreedores a Curso TESE ISC-2017.xlsx', 'Sheet1', 'C5:D19')
def nombre(dic):
return (dic['Nombre'],dic['Correo'])
#datos = data_map(datos, nombre)
#enviar('enrique', 'ureyesz@outlook.com')
#enviar(nombre.encode('utf-8'), correo.encode('utf-8')) |
import numpy as np
from .base_policy import BasePolicy
class MPCPolicy(BasePolicy):
def __init__(self,
env,
ac_dim,
dyn_models,
horizon,
N,
**kwargs
):
super().__init__(**kwargs)
# init vars
self.env = env
self.dyn_models = dyn_models
self.horizon = horizon
self.N = N
self.data_statistics = None # NOTE must be updated from elsewhere
self.ob_dim = self.env.observation_space.shape[0]
# action space
self.ac_space = self.env.action_space
self.ac_dim = ac_dim
self.low = self.ac_space.low
self.high = self.ac_space.high
def sample_action_sequences(self, num_sequences, horizon):
# TODO(Q1) uniformly sample trajectories and return an array of
# dimensions (num_sequences, horizon, self.ac_dim) in the range
# [self.low, self.high]
return random_action_sequences
def get_action(self, obs):
if self.data_statistics is None:
# print("WARNING: performing random actions.")
return self.sample_action_sequences(num_sequences=1, horizon=1)[0]
# sample random actions (N x horizon)
candidate_action_sequences = self.sample_action_sequences(
num_sequences=self.N, horizon=self.horizon)
# for each model in ensemble:
predicted_sum_of_rewards_per_model = []
for model in self.dyn_models:
sum_of_rewards = self.calculate_sum_of_rewards(
obs, candidate_action_sequences, model)
predicted_sum_of_rewards_per_model.append(sum_of_rewards)
# calculate mean_across_ensembles(predicted rewards)
predicted_rewards = np.mean(
predicted_sum_of_rewards_per_model, axis=0) # [ens, N] --> N
# pick the action sequence and return the 1st element of that sequence
best_action_sequence = None # TODO (Q2)
action_to_take = None # TODO (Q2)
return action_to_take[None] # Unsqueeze the first index
def calculate_sum_of_rewards(self, obs, candidate_action_sequences, model):
"""
:param obs: numpy array with the current observation. Shape [D_obs]
:param candidate_action_sequences: numpy array with the candidate action
sequences. Shape [N, H, D_action] where
- N is the number of action sequences considered
- H is the horizon
- D_action is the action of the dimension
:param model: The current dynamics model.
:return: numpy array with the sum of rewards for each action sequence.
The array should have shape [N].
"""
sum_of_rewards = None # TODO (Q2)
# For each candidate action sequence, predict a sequence of
# states for each dynamics model in your ensemble.
# Once you have a sequence of predicted states from each model in
# your ensemble, calculate the sum of rewards for each sequence
# using `self.env.get_reward(predicted_obs)`
# You should sum across `self.horizon` time step.
# Hint: you should use model.get_prediction and you shouldn't need
# to import pytorch in this file.
# Hint: Remember that the model can process observations and actions
# in batch, which can be much faster than looping through each
# action sequence.
return sum_of_rewards
|
import sys, aplpy
import astropy.io.fits as fits
import astropy.wcs
import astropy.visualization as vis
import matplotlib.pyplot as plt, numpy as np
class FIGURE:
def __init__(self, filename):
self.filename = filename
self.raw = fits.open(filename)
self.data = self.raw[0].data
self.header = self.raw[0].header
self.raw.close()
self.region_corners=[]
self.regs = []
def add_region(self, filename):
reg = fits.open(filename)
hdr = reg[0].header
data = reg[0].data
center = [int(hdr['CRPIX2']),int(hdr['CRPIX1'])]
corners = self.get_corners(data, center)
wcs = astropy.wcs.WCS(reg[0].header)
cornersWCS = wcs.wcs_pix2world(corners,0)
self.region_corners.append(cornersWCS)
self.regs.append(reg)
def get_corners(self, data, center):
mask = np.isfinite(data)
x,y = center
while mask[x,y] == 1 and y >0: y -= 1
top = y
x,y = center
while mask[x,y] == 1 and y<(len(mask[0])-1): y +=1
bottom = y
x,y = center
while mask[x,y] == 1 and x>0: x -= 1
left = x
x,y = center
while mask[x,y] == 1 and x<(len(mask)-1): x += 1
right = x
return [[top,left],[top,right],[bottom,right],[bottom,left],[top,left]]
def plotting(self, limits=None):
fig=plt.figure()
stretch = vis.AsinhStretch()
wcs = astropy.wcs.WCS(self.header)
#fig.add_subplot(111, projection=wcs)
#plt.imshow(stretch(self.data), cmap='Greys')
img = aplpy.FITSFigure(self.filename, figure=fig)
img.show_grayscale(stretch='arcsinh', invert='True',vmin=--0, vmax=5)
#img.show_grayscale(stretch='linear', invert=True)
#img.show_grayscale(stretch='power', invert='True',vmin=--5, vmax=70)
img.add_grid()
img.grid.set_color('black')
img.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm')
img.set_tick_labels_font(size='small',family='sans-serif')
img.set_theme('publication')
#img.set_system_latex(True)
for reg, corners in zip(self.regs, self.region_corners):#for i in range(len(self.regions)):
pixels = wcs.wcs_world2pix(corners,0)
plt.plot(pixels[:,0],pixels[:,1], label = 'Channel: ' + str(reg[0].header['CHNLNUM']))
if limits!=None:
plt.ylim(limits[0][0], limits[0][1])
plt.xlim(limits[1][0], limits[1][1])
plt.legend()
plt.show()
if __name__=='__main__':
'''fig1 = FIGURE('../data/forConor/M32_bulge_MIPS_mosaic.fits')
fig1.add_region('../data/epoch1/ch1/pbcd/SPITZER_I1_53090048_0000_2_E10558994_maic.fits')
fig1.add_region('../data/epoch1/ch2/pbcd/SPITZER_I2_53090048_0000_2_E10560927_maic.fits')
'''
fig1 = FIGURE('../tests/test1.fits')
prefix = '/media/conor/data/M32var/epoch1/ch1/bcd/'
print(np.loadtxt('fits_filenames.txt', dtype='str')[0])
for x in np.loadtxt('fits_filenames.txt', dtype='str'):
fig1.add_region(prefix+x)
print(x)
#../tests/test1.fits')
#fig1.add_region('../tests/test2.fits')
#limits = [[3100, 4200],[3400,4800]]
fig1.plotting()
|
#!/bin/python
import sys
import copy
import re
import math
infile = open(sys.argv[1], "r")
dim = 0
NORTH = 0
EAST = 1
SOUTH = 2
WEST = 3
class Tile:
def __init__(self, name, data):
self.name = name
self.north = data[0]
self.south = data[-1]
self.east = ""
self.west = ""
for r in data:
self.west += r[0]
self.east += r[-1]
#print(self.west, self.east)
def rotateEdges(self, edges, rotation):
north, east, south, west = edges
if rotation == 0:
return edges
elif rotation == 1:
return [east, south[::-1], west, north[::-1]]
elif rotation == 2:
return [south[::-1], west[::-1], north[::-1], east[::-1]]
elif rotation == 3:
return [west[::-1], north, east[::-1], south]
def flipEdges(self, edges, flip):
north, east, south, west = edges
if flip == 0:
return edges
elif flip == 1:
return [north[::-1], west, south[::-1], east]
elif flip == 2:
return [south, east[::-1], north, west[::-1]]
def getAllEdges(self, rotation=0, flip=0):
# Rotations:
# 0: as found
# 1: 90d counter clockwise
# 2: 180d counter clockwise
# 3: 270d counter clockwise
# Flip (after rotation:
# 0: as found
# 1: horrizontal
# 2: vertical
if rotation == 0 and flip == 0:
return [self.north, self.east, self.south, self.west]
return self.flipEdges(self.rotateEdges([self.north, self.east, self.south, self.west], rotation), flip)
def findEdge(self, cand, whichEdge):
# side:
# 0 - north
# 1 - east
# 2 - south
# 3 - west
if cand not in self.getAllEdges() and cand[::-1] not in self.getAllEdges():
return (False, None, None)
for rotation in [0, 1, 2, 3]:
for flip in [0, 1, 2]:
if cand == self.getAllEdges(rotation, flip)[whichEdge]:
return (True, rotation, flip)
def parseInput(infile):
line = infile.readline().rstrip()
allTiles = []
while line != "":
lineM = re.match(r"Tile (\d+):", line)
if not lineM:
raise Exception("")
data = []
line = infile.readline().rstrip()
while line != "":
data.append(line)
line = infile.readline().rstrip()
t = Tile(int(lineM.group(1)), data)
allTiles.append(t)
line = infile.readline().rstrip()
return allTiles
allTiles = parseInput(infile)
def printEdges(edges):
north, east, south, west = edges
print(north)
for i in range(1,len(east)-1):
print("%s%s%s" % (west[i],'.'*(len(north)-2),east[i]))
print(south)
def printGrid(grid):
for row in grid:
for v in row:
if v is not None:
tile, r, f = v
print(tile.name, r, f, "\t", end='')
else:
print("NA\t", end='')
print("")
def pullEdgeFromGrid(side, grid, x, y):
# side:
# 0 - north
# 1 - east
# 2 - south
# 3 - west
tile, r, f = grid[y][x]
edges = tile.getAllEdges(r, f)
return edges[side]
def getNextCoords(x, y):
global dim
if x == dim-1:
return (0, y+1)
else:
return (x+1, y)
def gridSolve(grid, remainingTiles, x, y):
grid = copy.copy(grid)
remainingTiles = copy.copy(remainingTiles)
if len(remainingTiles) == 0:
return grid
#print("")
#print(x,y)
#printGrid(grid)
#print([rt.name for rt in remainingTiles])
nx, ny = getNextCoords(x,y)
if x > 0 and y > 0:
existingVEdge = pullEdgeFromGrid(EAST, grid, x-1, y)
existingHEdge = pullEdgeFromGrid(SOUTH, grid, x, y-1)
for rt in remainingTiles:
(retv, rv, fv) = rt.findEdge(existingVEdge, WEST)
#print(existingVEdge, rt.name, retv, rv, fv)
if retv:
(reth, rh, fh) = rt.findEdge(existingHEdge, NORTH)
#print(existingHEdge, rt.name, reth, rh, fh)
if reth:
if rv == rh and fv == fh:
remainingTiles.remove(rt)
grid[y][x] = (rt, rv, fv)
solved = gridSolve(grid, remainingTiles, nx, ny)
if solved is not None:
return solved
grid[y][x] = None
return None
elif x > 0:
existingVEdge = pullEdgeFromGrid(EAST, grid, x-1, y)
for rt in remainingTiles:
(retv, rv, fv) = rt.findEdge(existingVEdge, WEST)
#print(existingVEdge, rt.name, retv, rv, fv)
if retv:
remainingTiles.remove(rt)
grid[y][x] = (rt, rv, fv)
solved = gridSolve(grid, remainingTiles, nx, ny)
if solved is not None:
return solved
grid[y][x] = None
return None
elif y > 0:
existingHEdge = pullEdgeFromGrid(SOUTH, grid, x, y-1)
for rt in remainingTiles:
(reth, rh, fh) = rt.findEdge(existingHEdge, NORTH)
#print(existingHEdge, rt.name, reth, rh, fh)
if reth:
#printEdges(rt.getAllEdges(rh,fh))
remainingTiles.remove(rt)
grid[y][x] = (rt, rh, fh)
solved = gridSolve(grid, remainingTiles, nx, ny)
if solved is not None:
return solved
grid[y][x] = None
return None
def initSolver(allTiles):
global dim
#Start in the top left, place a tile, then try to complete the grid
dim = int(math.sqrt(len(allTiles)))
grid = [[None for _ in range(dim)] for _ in range(dim)]
for rt in allTiles:
remainingTiles = copy.copy(allTiles)
remainingTiles.remove(rt)
for rotation in [0,1,2,3]:
for flip in [0,1,2]:
#for rotation in [0]:
# for flip in [2]:
grid[0][0] = (rt, rotation, flip)
solved = gridSolve(grid, remainingTiles, 1, 0)
if solved is not None:
return solved
solution = initSolver(allTiles)
printGrid(solution)
print(solution[0][0][0].name * solution[0][dim-1][0].name * solution[dim-1][0][0].name * solution[dim-1][dim-1][0].name)
#n, e, s, w = allTiles[0].getAllEdges(0,0)
#
#for t in allTiles[1:]:
# print(t.name)
# (found, r, f) = t.findEdge(e)
# if found:
# print(t.name, r, f)
#print("You spin me right round")
#printEdges(allTiles[0].getAllEdges(0,0))
#print("")
#printEdges(allTiles[0].getAllEdges(1,0))
#print("")
#printEdges(allTiles[0].getAllEdges(2,0))
#print("")
#printEdges(allTiles[0].getAllEdges(3,0))
#print("")
#print("Flip that bitch")
#printEdges(allTiles[0].getAllEdges(0,1))
#print("")
#printEdges(allTiles[0].getAllEdges(0,2))
#print("") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.