text
stringlengths 8
6.05M
|
|---|
import numpy as np
class shape:
def __init__(self, coordinates, rgba=(0,0,0,1), closed_shape=True):
self.coordinates = coordinates
self.rgba = rgba
self.closed_shape = closed_shape
class point(shape):
pass
class line(shape):
pass
class polygon(shape):
pass
class Bezier(shape):
def __init__(self, coordinates, step, rgba=(0,0,0,1)):
mb = np.array([
[-1, 3, -3, 1],
[3, -6, 3, 0],
[-3, 3, 0, 0],
[1, 0, 0, 0]
])
self.coordinates = []
for i in range(1, len(coordinates), 3):
points = [coordinates[x] for x in range(i-1, i+3)]
self.calculate_curve(points, step)
self.rgba = rgba
self.closed_shape = False
self.points = points
def calculate_curve(self, points, step):
i = 0
while i <= 1:
point_x = self.calculate_point(i, points, 0)
point_y = self.calculate_point(i, points, 1)
self.coordinates.append([point_x, point_y])
i += step
def calculate_point(self, t, points, point_index):
return (points[0][point_index]*(-(t*t*t) + 3*t*t - 3*t + 1) +
points[1][point_index]*(3*t*t*t - 6*t*t + 3*t) +
points[2][point_index]*(-3*t*t*t + 3*t*t) +
points[3][point_index]*(t*t*t))
class Bspline(shape):
def __init__(self, control_points, rgba=(0,0,0,1)):
self.rgba = rgba
self.closed_shape = False
proj_x = np.array([v[0] for v in control_points], dtype=float)
proj_y = np.array([v[1] for v in control_points], dtype=float)
self.coordinates = []
for i in range(0, len(control_points) - 3):
Gbs_x = proj_x[i:i + 4]
Gbs_y = proj_y[i:i + 4]
Cx = self.bspline_matrix() @ Gbs_x
Cy = self.bspline_matrix() @ Gbs_y
n_points = len(control_points)
Dx = self.fd_matrix(1.0 / n_points) @ Cx
Dy = self.fd_matrix(1.0 / n_points) @ Cy
for k in range(n_points + 1):
x = Dx[0]
y = Dy[0]
# print(f'{k}:')
# print(f'\tx={x}')
# print(f'\ty={y}')
Dx = Dx + np.append(Dx[1:], 0)
Dy = Dy + np.append(Dy[1:], 0)
self.coordinates.append([x, y])
def bspline_matrix(self):
return np.array(
[
-1, 3, -3, 1,
3, -6, 3, 0,
-3, 0, 3, 0,
1, 4, 1, 0
],
dtype=float
).reshape(4, 4) / 6
def fd_matrix(self, delta):
return np.array(
[
0, 0, 0, 1,
delta**3, delta**2, delta, 0,
6 * delta**3, 2 * delta**2, 0, 0,
6 * delta**3, 0, 0, 0,
],
dtype=float
).reshape(4, 4)
if __name__ == '__main__':
rect = rectangle([[0,0],[1,0],[1,1],[0,1]])
for coordinate in rect.coordinates:
print(str(coordinate[0]) + str(coordinate[1]))
pol = polygon( [[0,0],[1,0],[1,1],[0,1], [-1,-1]])
for coordinate in pol.coordinates:
print(str(coordinate[0]) + str(coordinate[1]))
pt = point([[0,0]])
for coordinate in pt.coordinates:
print(str(coordinate[0]) + str(coordinate[1]))
|
import frag_tracker
tracker = frag_tracker.FragTracker()
tracker.execute()
|
import os
from datetime import time
from html_dloader import HtmlDLoader
class HtmlOutputer:
def __init__(self):
self.dLoader = HtmlDLoader()
pass
def output_cont(self, title, cont):
if title is None:
title = time.time()
fout = open('%s.txt' % title, 'w')
fout.write("%s" % cont.encode('utf-8'))
pass
def output_img(self, title, url, index = None):
filename = os.path.join(title, str(index) + '.jpg')
try:
with open(filename, 'wb') as f:
f.write(self.dLoader.download(url))
except:
print "srcurl is %s, error" % url
|
import argparse
import os
import shutil
import sys
import zipfile
#getAll()-It is a generator that's looking for all the files and directories from 'root' and will exclude
#the files that have the prefix == 'exclude'.
def getAll(root,exclude):
list=os.listdir(root)
for item in list:
path=os.path.join(root,item)
if (os.path.isfile(path)):
if(item[0] != exclude):
yield (path)
elif(os.path.isdir(path)):
yield from getAll(path, exclude)
#Copies everything(havin the same structure) from 'rootSource' to 'rootDestination'
#without the files that have the prefix=='exclude'
def copyFromSourceToDestination(rootSource,rootDestination,exclude):
list = getAll(rootSource,exclude)
for item in list:
listOfItem = item.split("\\")
path = os.path.join(rootDestination, "\\".join(listOfItem[1:-1]))
if not (os.path.isdir(path)):
os.makedirs(path)
shutil.copyfile(item, os.path.join(path, listOfItem[-1]))
print("Everything was copied from " + rootSource + " to " + rootDestination)
#make a zip from a folder('source') , with the name of the zip given in the first argument('name')
def makeZip(name,source,exclude):
list=getAll(source,exclude)
zip = zipfile.ZipFile(name, 'w', zipfile.ZIP_DEFLATED)
for item in list:
zip.write(item)
zip.close();
print("Zip was createad.")
if __name__ == "__main__":
parser=argparse.ArgumentParser();
parser.add_argument("copy_zip", help="Decide if you what to copy from Source to Destination or to Zip one file(write 'copy' or 'zip)'")
parser.add_argument("Source", help="The source folder")
parser.add_argument("Destination",help="The destination folder")
parser.add_argument("Exclude",help="What kind of file to exclude")
parser.add_argument("Zip_name")
args=parser.parse_args()
if(args.copy_zip == "copy"):
copyFromSourceToDestination(args.Source,args.Destination,args.Exclude)
else:
makeZip(args.Zip_name,args.Source,args.Exclude)
|
import exputils
import autodisc as ad
import numpy as np
import os
def calc_statistic_space_representation(repetition_data):
# load representation
data = []
config = ad.representations.static.PytorchNNRepresentation.default_config()
config.initialization.type = 'load_pretrained_model'
config.initialization.load_from_model_path = '../../../post_train_analytic_behavior_space/training/training/models/best_weight_model.pth'
representation_model = ad.representations.static.PytorchNNRepresentation(config)
for rep_id, rep_data in repetition_data.items():
cur_rep_data = []
for run_data in rep_data:
cur_representation = representation_model.calc(run_data.observations, run_data.statistics)
cur_rep_data.append(cur_representation)
data.append(cur_rep_data)
if len(np.shape(data)) == 1:
data = np.array([data])
else:
data = np.array(data)
# compute the
statistic = dict()
statistic['data'] = data
return statistic
def calc_parameter_initstate_space_representation(repetition_data):
# load representation
data = []
config = ad.representations.static.PytorchNNRepresentation.default_config()
config.initialization.type = 'load_pretrained_model'
config.initialization.load_from_model_path = '../../../post_train_analytic_parameter_space/training/training/models/best_weight_model.pth'
representation_model = ad.representations.static.PytorchNNRepresentation(config)
for rep_id, rep_data in repetition_data.items():
cur_rep_data = []
for run_data in rep_data:
# the representation will only use the final observation, thus repack the observations to give it the first, i.e. the CPPN generated image
new_obs = dict(states=[run_data.observations['states'][0]],
timepoints=[0])
cur_representation = representation_model.calc(new_obs)
cur_rep_data.append(cur_representation)
data.append(cur_rep_data)
if len(np.shape(data)) == 1:
data = np.array([data])
else:
data = np.array(data)
# compute the
statistic = dict()
statistic['data'] = data
return statistic
def load_data(repetition_directories):
data = dict()
for repetition_directory in sorted(repetition_directories):
# get id of the repetition from its foldername
numbers_in_string = [int(s) for s in os.path.basename(repetition_directory).split('_') if s.isdigit()]
repetition_id = numbers_in_string[0]
# load the full explorer without observations and add its config
datahandler_config = ad.ExplorationDataHandler.default_config()
datahandler_config.memory_size_observations = 1
rep_data = ad.ExplorationDataHandler.create(config=datahandler_config, directory=os.path.join(repetition_directory, 'results'))
rep_data.load(load_observations=False, verbose=True)
data[repetition_id] = rep_data
return data
if __name__ == '__main__':
experiments = '.'
statistics = [('statistic_space_representation', calc_statistic_space_representation),
('parameter_initstate_space_representation', calc_parameter_initstate_space_representation)
]
exputils.calc_statistics_over_repetitions(statistics, load_data, experiments, recalculate_statistics=False, verbose=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 07/02/2018 6:21 PM
# @Author : Lee
# @File : binary_search.py
# @Software: PyCharm
def binary_search(lists, target):
l = 0
r = len(lists) - 1
while l <= r:
mid = l + (r - l) // 2
if lists[mid] == target:
return mid
elif lists[mid] < target:
l = mid + 1
else:
r = mid - 1
return -1
def binary_search2(lists, target, *args):
if len(args):
l = args[0]
r = args[1]
mid = l + (r - l) // 2
if lists[mid] == target:
return mid
elif lists[mid] < target:
return binary_search2(lists, target, mid + 1, r)
else:
return binary_search2(lists, target, l, mid - 1)
else:
return binary_search2(lists, target, 0, len(lists) - 1)
if __name__ == '__main__':
test_lists = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(binary_search(test_lists, 6))
print(binary_search2(test_lists, 8))
|
import time
from goto import with_goto
from temp import *
def open_dairy(dairy):
f=open(f"{dairy}.txt","a+")
ch=0
while(ch!=3):
ch=int(input("\nWelcome to your dairy:\n\t1.Read \n\t2.Write \n\t3.Exit\n\t\t"))
if ch==1:
f.seek(0)
a=f.readlines()
for i in a:
i=decrypt(i)
print(i)
elif ch==2:
app=str(input("Enter your text: \n"))
app=encrypt(app)
# print("\n\n",app)
f.write(f" \n{app}")
elif ch==3:exit
f.close()
def login_check():
u=str(input("\n\nEnter username: "))
p=str(input("Enter password: "))
u=encrypt(u)
p=encrypt(p)
l=open("Login.txt", "r")
a=l.readlines()
for i in a:
i=i.split("\n")[0]
i=i.split(",")
if u==i[0] and p==i[1]:return u,11 #login
elif u==i[0] and p!=i[1]:return u,10 #wrong pass
else:
return u,0
l.close()
@with_goto
def add_user():
label .begin
u=str(input("\nEnter username: "))
p=str(input("Enter password: "))
u=encrypt(u)
p=encrypt(p)
flag=0
l1=open("Login.txt", "a+")
l1.seek(0)
a=l1.readlines()
for i in a:
i=i.split("\n")[0]
i=i.split(",")
if u!=i[0]:
if flag==0:
l1.write(f"\n{u},{p}")
flag+=1
else:
print("\tUsername is already taken.")
goto .begin
l1.close()
print("\tUser Added Sucessfully")
@with_goto
def start():
print("\n\n\t\t🔥🔥🔥Welcome to The Secured Dairy🔥🔥🔥\n\n")
time.sleep(2)
label .begin
u,check=login_check()
if check==10:
print("\tYour entered password is wrong")
c=int(input("\nPress 0 to retry OR 1 to exit: "))
if c==0:
goto .begin
elif c==1:exit
else: print("Enter correct option")
elif check==0:
print("\tNo user Found")
c=int(input("Press 0 to retry OR 1 To add user OR 2 To exit: "))
if c==0:
# check=login_check()
goto .begin
elif c==1:
add_user()
goto .begin
elif c==2:exit
elif check==11:
print("\n\tLogin Sucessfully")
open_dairy(u)
if __name__ == "__main__":
start()
|
x = float(input('Qual o valor da casa? '))
y = float(input('Qual o salário do comprador? '))
z = int(input('Em quantos anos vai ser pago? '))
tp = (x / z) / 12
v = (30 * y) / 100
if tp > v:
print('Pra pagar uma casa de R${} em {} anos, a prestação será de R${:.2f}'.format(x, z, tp))
print('Empréstimo NEGADO')
elif tp < v:
print('Pra pagar uma casa de R$ {} em {} anos, a prestação será de R${:.2f}'.format(x, z, tp))
print('Empréstimo CONCEDIDO')
|
#-*-coding:utf-8-*-
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
from bisect import bisect_right
import torch
import torch.optim.lr_scheduler as lr_scheduler
import torch
import torchvision
import matplotlib.pyplot as plt
# FIXME ideally this would be achieved with a CombinedLRScheduler,
# separating MultiStepLR with WarmupLR
# but the current LRScheduler design doesn't allow it
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1/3,
warmup_iters=100,
warmup_method="linear",
last_epoch=-1,):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
def get_scheduler(config,optimizer):
if config.TRAIN.SCHEDULER.IS_MultiStepLR:
return lr_scheduler.MultiStepLR(optimizer,milestones=config.TRAIN.SCHEDULER.MultiStepLR.LR_STEP,gamma=config.TRAIN.SCHEDULER.MultiStepLR.LR_FACTOR,last_epoch=-1)
else:
return WarmupMultiStepLR(optimizer=optimizer,
milestones=config.TRAIN.SCHEDULER.WarnUpLR.milestones,
gamma=config.TRAIN.SCHEDULER.WarnUpLR.gamma,
warmup_factor=config.TRAIN.SCHEDULER.WarnUpLR.warmup_factor,
warmup_iters=config.TRAIN.SCHEDULER.WarnUpLR.warmup_iters,
warmup_method=config.TRAIN.SCHEDULER.WarnUpLR.warmup_method,
last_epoch=-1)
if __name__ == "__main__":
optimizer = torch.optim.Adam(torchvision.models.resnet18(pretrained=False).parameters(), lr=0.01)
scheduler = WarmupMultiStepLR(optimizer=optimizer,
milestones=[20,80],
gamma=0.1,
warmup_factor=0.1,
warmup_iters=2,
warmup_method="linear",
last_epoch=-1)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
x=[]
y=[]
for epoch in range(120):
scheduler.step()
y.append(scheduler.get_lr())
x.append(epoch)
print(y)
plt.plot(x,y)
plt.show()
|
#!/usr/bin/env python
import yaml
import sys
print(yaml.safe_load(open(sys.argv[1])))
|
#!/usr/bin/python
import sys
def filestats(f):
lines = 0
words = 0
chars = 0
for line in f:
lines += 1
w = line.split()
words += len(w)
for word in w:
chars += len(list(word))
print 'Lines:', lines
print 'Words:', words
print 'Characters:', chars
def main():
while(True):
filename = raw_input('Enter name of file (q to quit): ')
if(filename == 'q'):
break
f = open(filename, 'r')
filestats(f)
if(__name__ == '__main__'):
main()
|
from datetime import datetime,timedelta
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from garmin.models import UserGarminDataSleep
from quicklook.tasks import generate_quicklook
class Command(BaseCommand):
'''
Management command to generate raw data report for users by
providing list of email address or usernames for provided
time duration.
Example Usages:
1) Generate report for user(s) for current day using username.
A list of username is also supported for generating report for
multiple user at once. Usernames should be separated by white space
$ python manage.py generaterawdatareport --username naruto copy-ninja
2) Generate report for user for current day using email address.
A list of email addresses is also supported for generating report
for multiple user at once. Addressed should be separated
by white space
$ python manage.py generaterawdatareport --email naruto@example.com \
copy_ninja@example.com
3) Generate report for every user in the database by providing
'--all' flag.
$ python manage.py generaterawdatareport --all
4) Generate report for user(s) for certain time duration.
$ python manage.py generaterawdatareport --username naruto \
--duration 2018-06-01 2018-06-10
The duration argument takes two date string in YYYY-MM-DD format.
First date should be start date form which reports has to be
created and second should be end date up to which report has
to be created.
5) Generate report from the date user have Garmin health data by
providing --origin or -o flag. So for example, if the user have
health data from April 1, 2018 then report following command
generate report from April 1, 2018 to current date.
$ python manage.py generaterawdatareport --username naruto \
--origin
6) Similarly using --yesterday, --week, --month, --year flags reports
for yesterday, last 7 days, last 30 days and last 365 days can
be generated
7) Ignoring email address using --ignore or -i command. This takes a list
of email addresses and reports for those users will not be generated
$ python manage.py generaterawdatareport --all --origin \
--ignore copy_ninja@example.com
Note:
1) If any combination of these arguments --email, --username and
--all is provided in that case report will be generated on
the basis of following argument priority -
--all > --username > --email
2) Providing at least one of the following argument is necessary -
--all, --email, --username
3) If any combination of following arguments is provided -
--origin, --year, --month, --week, --yesterday, --duration
then report will be generated on the basis of following
argument priority -
--origin > --year > --month > --week > --yesterday > --duration
'''
def __init__(self,*args, **kwargs):
super().__init__(*args, **kwargs)
self.accnt_slection_flag = 'email'
self.date_range_flag = 'today'
help = 'Generate Raw data report'
def _get_flags(self):
'''
Flags and their priority among group.
Each tuple have following meta data- (dest, short flag name,
verbose flag name, priority)
Group 1 = [email, username, all]
Group 2 = [duration, yesterday, week, month, year]
Lower priority value is given more preference
'''
flags = {
'email':('email','-e','--email',3),
'username':('username','-u','--username',2),
'all':('all','-a','--all',1),
'duration':('duration','-d','--duration',6),
'yesterday':('yesterday','-y','--yesterday',5),
'week':('week','-w','--week',4),
'month':('month','-m','--month',3),
'year':('year','-Y','--year',2),
'origin':('origin','-o','--origin',1)
}
return flags
def _get_origin_date(self, user):
'''
Get the date of the oldest sleep summaries a user have. This gives the
rough idea from when user have health API data.
'''
last_record = UserGarminDataSleep.objects.filter(user = user).order_by('id')
if last_record.exists():
start_time = datetime.utcfromtimestamp(last_record[0].start_time_in_seconds)
return start_time
else:
return None
def _validate_options(self,options):
'''
Validate the provided options
'''
no_flags = True
flags = self._get_flags()
for f in flags.values():
if options[f[0]]:
no_flags = False
break
if no_flags:
raise CommandError("No arguments are provided. Type -h or --help for more information")
if not options['email'] and not options['all'] and not options['username']:
raise CommandError("Provide either --email or --username or --all")
if options['all']:
# give "all" flag more preference over anything
self.accnt_slection_flag = 'all'
elif options['username']:
# give "username" flag more preference over "email"
self.accnt_slection_flag = 'username'
# if more than one flag from Group 2 is provided then
# use flags with most priority (lowest value)
flags.pop('email')
flags.pop('all')
flags.pop('username')
common_flags = set([f for f in flags.keys()]).intersection(
set(list(filter(lambda x:options[x],[o for o in options.keys()])))
)
for f in common_flags:
if self.date_range_flag and self.date_range_flag != 'today':
if flags.get(f)[3] < flags.get(self.date_range_flag)[3]:
self.date_range_flag = f
else:
self.date_range_flag = f
return True
def add_arguments(self, parser):
flags = self._get_flags()
parser.add_argument(
flags.get('email')[2],
flags.get('email')[1],
nargs = '+',
type = str,
dest = flags.get('email')[0],
help = "Email(s)"
)
parser.add_argument(
flags.get('username')[2],
flags.get('username')[1],
nargs = '+',
type = str,
dest = flags.get('username')[0],
help = "Username(s)"
)
parser.add_argument(
flags.get('all')[2],
flags.get('all')[1],
action = 'store_true',
dest = flags.get('all')[0],
help = 'Generate Raw Data Report for all user'
)
parser.add_argument(
flags.get('duration')[2],
flags.get('duration')[1],
type = str,
nargs = 2,
dest = flags.get('duration')[0],
help = 'Range of date [from, to] eg "-d 2017-11-01 2017-11-10"'
)
parser.add_argument(
flags.get('yesterday')[2],
flags.get('yesterday')[1],
action = 'store_true',
dest = flags.get('yesterday')[0],
help = 'Create report for yesterday'
)
parser.add_argument(
flags.get('week')[2],
flags.get('week')[1],
action = 'store_true',
dest = flags.get('week')[0],
help = 'Create report for last 7 days (not including today)'
)
parser.add_argument(
flags.get('month')[2],
flags.get('month')[1],
action = 'store_true',
dest = flags.get('month')[0],
help = 'Create report for last 30 days (not including today)'
)
parser.add_argument(
flags.get('year')[2],
flags.get('year')[1],
action ='store_true',
dest = flags.get('year')[0],
help = 'Create report for last 365 days (not including today)'
)
parser.add_argument(
flags.get('origin')[2],
flags.get('origin')[1],
action ='store_true',
dest = flags.get('origin')[0],
help = 'Create report from date of first health data received (including today)'
)
parser.add_argument(
'--ignore',
'-i',
nargs = '+',
type = str,
dest = 'ignore',
help = 'Email(s) to ignore'
)
def _generate_raw_data_reports(self,user_qs,options,from_date,to_date):
'''
Generated reports for users from 'from_date' to 'to_date'
Args:
user_qs (`obj`: Queryset): A queryset of users
options: Dictionary of Command line arguments
from_date (string): Start date from which report has to be created
to_date (string): End date up to which report has to be created
'''
for user in user_qs:
if (not options['ignore']
or (options['ignore'] and user.email not in options['ignore'])):
self.stdout.write(self.style.WARNING(
'\nCreating Raw data report for user "%s"' % user.username)
)
if self.date_range_flag == 'origin':
date_of_oldest_record = self._get_origin_date(user)
if date_of_oldest_record:
from_date = date_of_oldest_record.strftime("%Y-%m-%d")
to_date = datetime.now().strftime("%Y-%m-%d")
generate_quicklook(user.id,from_date,to_date)
else:
self.stdout.write(self.style.ERROR(
'\nNo health record found for user "%s"' % user.username)
)
continue
generate_quicklook(user.id,from_date,to_date)
def _get_update_date_range(self,options):
'''
Return 'from_date' and 'to_date' based on provided flag
'''
today = datetime.now().date()
yesterday = today-timedelta(days=1)
from_date = datetime.now().strftime("%Y-%m-%d")
to_date = datetime.now().strftime("%Y-%m-%d")
if self.date_range_flag == 'duration':
to_date = options['duration'][1]
from_date = options['duration'][0]
elif self.date_range_flag == 'yesterday':
to_date = yesterday.strftime("%Y-%m-%d")
from_date = yesterday.strftime("%Y-%m-%d")
elif self.date_range_flag == 'week':
td = timedelta(days=7)
to_date = yesterday.strftime("%Y-%m-%d")
from_date = (today-td).strftime("%Y-%m-%d")
elif self.date_range_flag == 'month':
td = timedelta(days=30)
to_date =yesterday.strftime("%Y-%m-%d")
from_date = (today-td).strftime("%Y-%m-%d")
elif self.date_range_flag == 'year':
td = timedelta(days=365)
to_date = yesterday.strftime("%Y-%m-%d")
from_date = (today-td).strftime("%Y-%m-%d")
return (from_date,to_date)
def handle(self, *args, **options):
if self._validate_options(options):
from_date,to_date = self._get_update_date_range(options)
if self.accnt_slection_flag == 'email':
emails = [e for e in options['email']]
user_qs = User.objects.filter(email__in = emails)
self._generate_raw_data_reports(
user_qs, options, from_date, to_date
)
elif self.accnt_slection_flag == 'username':
usernames = [username for username in options['username']]
user_qs = User.objects.filter(username__in = usernames)
self._generate_raw_data_reports(
user_qs, options, from_date, to_date
)
else:
user_qs = User.objects.all()
self._generate_raw_data_reports(user_qs,
options,from_date,to_date
)
|
import win32com.client
import pythoncom
class XASessionEvents:
logInState = 0
def OnLogin(self, code, msg):
print("OnLogin method is called")
print(str(code))
print(str(msg))
if str(code) == '0000':
XASessionEvents.logInState = 1
def OnLogout(self):
print("OnLogout method is called")
def OnDisconnect(self):
print("OnDisconnect method is called")
class XAQueryEvents:
queryState = 0
def OnReceiveData(self,szTrCode):
print("ReceiveData")
XAQueryEvents.queryState = 1
def OnReceiveMessage(self, systemError, messageCode, message):
print("ReceiveMessage")
if __name__ == "__main__":
server_addr = "hts.ebestsec.co.kr"
server_port = 20001
server_type = 0
user_id = "songdh10"
user_pass ="gusdl57"
user_certificate_pass="gusdlsla57"
inXASession = win32com.client.DispatchWithEvents("XA_Session.XASession", XASessionEvents)
inXASession.ConnectServer(server_addr, server_port)
inXASession.Login(user_id, user_pass, user_certificate_pass, server_type, 0)
while XASessionEvents.logInState == 0:
pythoncom.PumpWaitingMessages()
nCount = inXASession.GetAccountListCount()
print("The number of account: ", nCount)
#--------------------------------------------------------------------------
# Get single data
#--------------------------------------------------------------------------
inXAQuery = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery", XAQueryEvents)
inXAQuery.LoadFromResFile("C:\\eBEST\\xingAPI\\Res\\t8413.res")
inXAQuery.SetFieldData('t8413InBlock', 'shcode', 0, '099220')
inXAQuery.SetFieldData('t8413InBlock', 'gubun', 0, '2')
inXAQuery.SetFieldData('t8413InBlock', 'sdate', 0, '20140101')
inXAQuery.SetFieldData('t8413InBlock', 'edate', 0, '20160118')
inXAQuery.SetFieldData('t8413InBlock', 'comp_yn', 0, 'N')
inXAQuery.Request(0)
while XAQueryEvents.queryState == 0:
pythoncom.PumpWaitingMessages()
# Get FieldData
nCount = inXAQuery.GetBlockCount('t8413OutBlock1')
for i in range(nCount):
print(inXAQuery.GetFieldData('t8413OutBlock1', 'date', i), ":", inXAQuery.GetFieldData('t8413OutBlock1', 'close', i))
XAQueryEvents.queryState = 0
|
import logging
from flask import Flask
app= Flask(__name__)
@app.route('/')
def hello():
#print("Hello Falsk")
return "Hello Youtube"
if __name__=="__main__":
app.run(host='127.0.0.1',port=8080,debug=True)
|
import math
from datetime import datetime
from pymongo import MongoClient
from threading import Thread
CRIME_CATEGORIES = {
"MISCELLANEOUS": 1,
"LARCENY": 3,
"FRAUD": 0,
"DAMAGE TO PROPERTY": 4,
"ASSAULT": 5,
"MURDER/INFORMATION": 6,
"AGGRAVATED ASSAULT": 6,
"WEAPONS OFFENSES": 2,
"BURGLARY": 4,
"STOLEN VEHICLE": 4,
"DANGEROUS DRUGS": 2,
"ESCAPE": 5,
"OBSTRUCTING THE POLICE": 2,
"OBSTRUCTING JUDICIARY": 1,
"ROBBERY": 4,
"EXTORTION": 5,
"HOMICIDE": 10,
"OUIL": 3,
"TRAFFIC": 1,
"DISORDERLY CONDUCT": 1,
"ARSON": 4,
"STOLEN PROPERTY": 3,
"OTHER BURGLARY": 3,
"EMBEZZLEMENT": 0,
"FAMILY OFFENSE": 1,
"KIDNAPING": 8,
"FORGERY": 0,
"SOLICITATION": 1,
"OTHER": 1,
"IMMIGRATION": 1,
"VAGRANCY (OTHER)": 1,
"CIVIL": 0,
"LIQUOR": 0,
"RUNAWAY": 2,
"ENVIRONMENT": 0,
"JUSTIFIABLE HOMICIDE": 5,
"OBSCENITY": 0,
"TRAFFIC OFFENSES": 0,
"NEGLIGENT HOMICIDE": 4,
"MISCELLANEOUS ARREST": 1,
"GAMBLING": 0,
"BRIBERY": 0,
"DRUNKENNESS": 1,
"MILITARY": 0,
"ABORTION": 1,
"KIDNAPPING": 8
}
TRANSIT_CATEGORIES = {
"DRIVING": 0.0,
"WALKING": 1.0,
"BICYCLING": 0.7,
"TRANSIT": 0.4
}
def generate_sketch_dicts(routes_dicts, key_prefixes):
N = len(routes_dicts)
result_dicts = [{}] * N
threads = []
for i in xrange(0, N):
t = Thread(target=generate_sketch_dict, args=(routes_dicts[i], key_prefixes[i], result_dicts, i))
threads.append(t)
t.start()
for t in threads:
t.join()
result = {}
for result_dict in result_dicts:
result.update(result_dict)
return result
def generate_sketch_dict(routes_dict, key_prefix, result_dicts, thread_index):
client = MongoClient()
db = client.mhacks
result = result_dicts[thread_index]
for i in xrange(0, len(routes_dict['routes'])):
route = routes_dict["routes"][i]
sketch_dict = {}
for step in route["legs"][0]["steps"]:
calc_sketchiness(
step["start_location"]["lat"], step["start_location"]["lng"],
step["end_location"]["lat"], step["end_location"]["lng"],
TRANSIT_CATEGORIES[step["travel_mode"]], sketch_dict, db
)
total_score = 0.0
for key, val in sketch_dict.items():
total_score += val
result[key_prefix + "_" + str(i)] = total_score
# 0.50 miles roughly equals 805 meters
# inputs: start lat/long and end lat/long
def calc_sketchiness(lat1, lon1, lat2, lon2, travel_mode_mult, sketch_dict, db):
diff_lat = lat2 - lat1
diff_lon = lon2 - lon1
dist_meters = int(distance(lat1, lon1, lat2, lon2, 'K') * 1000.0)
# check every 0.75 miles (805 * 1.5) = ~1208
for x in range(0, dist_meters, 1208):
proportion_done = 1.0 * x / dist_meters
lat = (proportion_done * diff_lat) + lat1
lon = (proportion_done * diff_lon) + lon1
nearby_crimes_score(sketch_dict, lat, lon, travel_mode_mult, db)
def nearby_crimes_score(result_dict, lat, lon, travel_mode_mult, db):
cursor = db.crimedata.find(
{
"loc":
{"$near":
{
"$geometry": {"type": "Point", "coordinates": [lat, lon]},
"$minDistance": 0,
"$maxDistance": 805
}
}
}
).limit(500)
for doc in cursor:
obj_id = doc["_id"]
if not obj_id in result_dict:
crime_date = datetime.strptime(doc["INCIDENTDATE"], '%m/%d/%Y')
today = datetime.utcnow()
num_days_ago = (today - crime_date).days
# time decay with half life of 6 months
time_decay = 0.5 ** (num_days_ago / 182.5)
score = CRIME_CATEGORIES[doc["CATEGORY"].split(" - ")[0]]
score_decay = time_decay * score
result_dict[obj_id] = score_decay * travel_mode_mult
# borrowed from call it magic
def distance(lat1, lon1, lat2, lon2, unit):
radlat1 = math.pi * lat1 / 180
radlat2 = math.pi * lat2 / 180
radlon1 = math.pi * lon1 / 180
radlon2 = math.pi * lon2 / 180
theta = lon1 - lon2
radtheta = math.pi * theta / 180
dist = math.sin(radlat1) * math.sin(radlat2) + math.cos(radlat1) * math.cos(radlat2) * math.cos(radtheta)
dist = math.acos(dist)
dist = dist * 180 / math.pi
dist = dist * 60 * 1.1515
# Convert dist in mi to dist in km
if (unit == "K"):
dist = dist * 1.609344
# Convert dist in mi to dist in nautical mi
if (unit == "N"):
dist = dist * 0.8684
return dist
|
import collections
import glob
import os
import random
from tqdm import tqdm
random.seed(12345)
# import numpy as np
# np.set_printoptions(threshold=np.nan)
def read_voxceleb_structure(directory, test_only=False, sample=False):
voxceleb = []
speakers = set()
for subset in os.listdir(directory):
if not os.path.isdir(os.path.join(directory, subset)):
continue
if subset == 'dev' and (test_only or sample):
continue
if subset != 'dev' and subset != 'test' and subset != 'sample_dev':
continue
# root_dir/subset/speaker_id/uri/wav
key = os.path.join(directory, subset) + '/*/*/*.wav'
print('==> Reading {} set'.format(subset))
for file_path in tqdm(glob.glob(key)):
subset, speaker, uri, file = file_path.split('/')[-4:]
if '.wav' in file:
voxceleb.append({'filename': file, 'speaker_id': speaker, 'uri': uri, 'subset': subset, 'file_path': file_path})
speakers.add(speaker)
print('Found {} files with {} different speakers.'.format(len(voxceleb), len(speakers)))
if sample:
voxceleb_dev = [datum for datum in voxceleb if datum['subset']=='sample_dev']
else:
voxceleb_dev = [datum for datum in voxceleb if datum['subset']=='dev']
voxceleb_test = [datum for datum in voxceleb if datum['subset']=='test']
# return voxceleb
return voxceleb_dev, voxceleb_test
def generate_test_dir(voxceleb):
voxceleb_test = []
speakers_test = collections.defaultdict(list)
for item in voxceleb:
if item['subset'] == 'test':
voxceleb_test.append(item['file_path'])
speakers_test[item['speaker_id']].append(item['file_path'])
voxceleb_test = [item['file_path'] for item in voxceleb if item['subset'] == 'test']
# # negative
pairs = list(zip(voxceleb_test, sorted(voxceleb_test, key=lambda x: random.random())))
# positive
for values in speakers_test.values():
pairs += list(zip(values, sorted(values, key=lambda x: random.random())))
csv_path = os.path.join(directory, 'test_pairs.csv')
with open(csv_path, 'w') as f:
for item in tqdm(pairs):
item = list(item)
for i in range(len(item)):
item[i] = item[i].split('/test/')[-1].strip()
# print('>>>>>>>>>>>', item, item[0].split('/')[0], item[1].split('/')[0])
issame = '1' if item[0].split('/')[0] == item[1].split('/')[0] else '0'
line = ' '.join([issame, item[0], item[1]])
f.write(line + '\n')
# f.write('\n'.join([','.join(item) for item in pairs]))
print('==> Generated and saved test pairs to {}'.format(csv_path))
if __name__ == '__main__':
directory = '/data5/xin/voxceleb/raw_data/'
voxceleb = read_voxceleb_structure(directory, test_only=True)
generate_test_dir(voxceleb)
|
from sqlalchemy import create_engine, Column, Integer, VARCHAR, Sequence
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///newfeatures.db')
Base = declarative_base()
# Builds the table
class Features(Base):
__tablename__= 'features'
id = Column(Integer, Sequence('user_id_seq'), primary_key=True)
title = Column(VARCHAR(25))
description = Column(VARCHAR(100))
client = Column(VARCHAR(50))
priority = Column(Integer)
targetdate = Column(VARCHAR(10))
productarea = Column(VARCHAR(20))
def __repr__(self):
return "%s,%s,%s,%s,%s,%s,%s" % (
self.id, self.title, self.description, self.client, self.priority, self.targetdate, self.productarea)
# Creates a connection to the database
def db_connection():
try:
x = sessionmaker(bind=engine)
session = x()
return session
except Exception as exc:
print exc
Features.__table__
Base.metadata.create_all(engine)
|
print('{==============calculadora============')
num = int(input('Digite um número:'))
print('{} x 1 = {}\n{} x 2 = {}\n{} x 3 = {}'.format(num, num*1, num, num*2, num, num*3))
print('{} x 4 = {}\n{} x 5 = {}\n{} x 6 = {}'.format(num, num*4, num, num*5, num, num*6))
print('{} x 7 = {}\n{} x 8 = {}\n{} x 9 = {}'.format(num, num*7, num, num*8, num, num*9))
print('{} x 10 = {}'.format(num, num*10))
|
# -*- coding: utf-8 -*-
# Python Keyphrase Extraction toolkit: unsupervised models
from __future__ import absolute_import
from supervised.api import SupervisedLoadFile
from supervised.feature_based.kea import Kea
from supervised.feature_based.topiccorank import TopicCoRank
from supervised.feature_based.wingnus import WINGNUS
from supervised.neural_based.seq2seq import Seq2Seq
|
import numpy as np
from scipy.misc import imread
def load_images():
""" Load the HST images and make them presentable."""
im1 = imread('lens1.jpg')
im1 = [ im1[:,:,2], im1 ]
im1[0][300:450,400:550] = im1[0].mean()
im1[0]=im1[0][100:-100,100:-100]
im1[1] = im1[1][100:-100,100:-100,:]
im2 = imread('lens2.jpg')
im2 = [ im2[500:-500,500:-500,0], im2[500:-500,500:-500,:] ]
im3 = imread('lens3.jpg')
im3 = [ im3[200:500,200:600,2], im3[200:500,200:600,:] ]
im3[0][100:225,125:250] = im3[0][:50,:50].mean()
im4 = imread('lens4.jpg')
im4 = [ im4[:,:,2], im4 ]
return {'Exercise 1':im1,'Exercise 2':im2,'Exercise 3':im3,'Exercise 4':im4}
def create_galaxy(x,y,**kwargs):
""" Create a mock galaxy using a 2D Gaussian. """
posangle = kwargs['posangle']
xcen = kwargs['xcen']
ycen = kwargs['ycen']
peak = kwargs['peak']
sigma = kwargs['sigma']
axratio = kwargs['axratio']
phi = np.deg2rad(posangle)
x1 = (x - xcen)* np.cos(phi) + ( y- ycen)*np.sin(phi)
y1 = (y-ycen)*np.cos(phi) - (x-xcen)*np.sin(phi)
return peak * np.exp( -.5*( x1*x1*axratio + y1*y1/axratio)/(sigma*sigma))
def load_grid(Npoints):
""" Create a 2D grid of points. """
nx = Npoints
ny = Npoints
xlims = (-2.5,2.5)
ylims = (-2.5,2.5)
x = (xlims[1] - xlims[0]) * np.outer(np.ones(ny),np.arange(nx))/float(nx-1) + xlims[0]
y = (ylims[1] - ylims[0]) * np.outer(np.arange(ny),np.ones(nx))/float(ny-1) + ylims[0]
return x,y
def compute_deflection(x,y,**kwargs):
""" Compute the deflection and shear from a singular isothermal ellipsoid potential """
mass = kwargs['Mass']
xcen = kwargs['xcen']
ycen = kwargs['ycen']
axratio = np.abs(kwargs['axratio'])
posangle = kwargs['posangle']
tol = .001
if axratio>1:
axratio = 1.0/axratio
posangle += 90
phi = np.deg2rad(posangle)
x_new = (x-xcen)*np.cos(phi) + (y-ycen)*np.sin(phi)
y_new = (y-ycen)*np.cos(phi) - (x-xcen)*np.sin(phi)
r = np.sqrt(axratio*x_new*x_new + y_new * y_new /axratio)
q = np.sqrt(1./axratio - axratio)
fac_x = x_new/(r + (r==0))
fac_y = y_new/(r + (r==0))
if q>=tol:
x_def = (mass/q)*np.arctan(q*fac_x)
y_def = (mass/q)*np.arctanh(q*fac_y)
else:
x_def = mass*fac_x
y_def = mass*fac_y
x_out = x_def *np.cos(phi) - y_def*np.sin(phi)
y_out = y_def * np.cos(phi) + x_def * np.sin(phi)
return x_out,y_out
def draw_ellipse(scale=1,xc=0,yc=0,r=1,phi=0):
"""" Draw an ellipse given some orientation and axis ratio. """
phi = np.deg2rad(phi)
theta = np.linspace(0,2*np.pi,100)
xe = np.cos(theta)*np.cos(phi) - r*np.sin(theta)*np.sin(phi)
ye = np.cos(theta)*np.sin(phi) + r*np.sin(theta)*np.cos(phi)
xe = xc + xe/scale
ye = yc + ye/scale
return xe,ye
|
import socket
HEADER_LENGTH = 3
IP = socket.gethostname()
PORT = 2000
#Set up client details
my_username = input("Please enter your USERNAME: ")
username = my_username.encode("utf-8")
username_header = f"{len(username):<{HEADER_LENGTH}}".encode("utf-8")
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def user_pass():
my_username = input('Please enter your Email-adress: ')
pass_word = input('Please enter your PASSWORD: ')
username_password = (my_username + ',' + pass_word).encode("utf-8") # Combine into 1 string
UP_header = f"{len(username_password):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(UP_header + username_password)
#Try to connect:
while True:
try:
client_socket.connect((socket.gethostname(), PORT))
break
except:
print("Problem connecting with Doctor. Please stand by....")
continue
#Send username to Doctor
client_socket.send(username_header + username)
#Try to fet authorize:
def get_Authorization():
print("Getting Authorization...")
user_pass()
while True:
try:
confirm_header = client_socket.recv(HEADER_LENGTH).decode("utf-8")
except:
continue
break
confirm = client_socket.recv(int(confirm_header)).decode("utf-8")
if confirm == "TRUE":
print("Correctly logged in.")
else:
while confirm == 'FALSE':
print("Email-adress or Password not valid!!")
user_pass()
while True:
try:
confirm_header = client_socket.recv(HEADER_LENGTH).decode("utf-8")
except:
continue
break
confirm = client_socket.recv(int(confirm_header)).decode("utf-8")
print("Correctly logged in.")
get_Authorization()
while True:
# receive things
try:
message_header = client_socket.recv(HEADER_LENGTH).decode("utf-8")
except:
#If the connection was not successful, keep trying to reconnect here (indefinitely).
print("Problem with connection - trying to reconnect....")
while True:
try:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((socket.gethostname(), PORT))
break
except:
continue
client_socket.send(username_header + username)
continue
#Here we check if the length of the message received == length specified in the header.
expected_message_length = int(message_header)
message = client_socket.recv(expected_message_length).decode("utf-8")
actual_length = len(message)
if actual_length == expected_message_length:
print("Full message received.")
else:
print("Full message not received!!!!")
# Code to tell server that not all message was received
receipt = f"{my_username} did not receive the full message".encode("utf-8")
receipt_header = f"{len(receipt):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(receipt_header + receipt)
#Now print out the message received in the patient's screen:
print(message)
#Menu giving the patient options to wait for a message to come in or to send a message.
options = input(f"Enter 'm' to send a message to the Doctor or 'w' to wait for advice : > ")
while options != 'm' and options != 'w':
options = input(f"Invalid entry. Enter 'm' to send a message to the Doctor or 'w' to wait for advice : > ")
while options == 'm':
consultMessage = input(f"Enter message for the Doctor: > ")
consultMessage = consultMessage.encode("utf-8")
consultMessageHeader = f"{len(consultMessage):<{HEADER_LENGTH}}".encode("utf-8")
try:
client_socket.send(consultMessageHeader + consultMessage)
except:
print("Error sending message to the Doctor.")
options = input(f"Enter 'm' to send a message to the Doctor or 'w' to wait for advice : > ")
while options != 'm' and options != 'w':
options = input(f"Invalid entry. Enter 'm' to send a message to the Doctor or 'w' to wait for advice : > ")
|
charity_search_key = "98d8c2f044592075251da6b0b146ef2e"
just_giving_appid = "98927271"
charity_navigator_key = "8186b309"
|
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
import launch
import launch.actions
import launch.event_handlers.on_process_start
import launch_ros.actions
import launch_ros.events
import launch_ros.events.lifecycle
import launch_testing
import launch_testing.actions
import launch_testing.asserts
import lifecycle_msgs.msg
import pytest
@pytest.mark.rostest
def generate_test_description():
talker_node = launch_ros.actions.LifecycleNode(
package='lifecycle_py', executable='lifecycle_talker',
name='lc_talker', namespace='', output='screen'
)
listener_node = launch_ros.actions.Node(
package='lifecycle', executable='lifecycle_listener',
name='listener', output='screen'
)
return launch.LaunchDescription([
talker_node, listener_node,
# Right after the talker starts, make it take the 'configure' transition.
launch.actions.RegisterEventHandler(
launch.event_handlers.on_process_start.OnProcessStart(
target_action=talker_node,
on_start=[
launch.actions.EmitEvent(event=launch_ros.events.lifecycle.ChangeState(
lifecycle_node_matcher=launch.events.matches_action(talker_node),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_CONFIGURE,
)),
],
)
),
# When the talker reaches the 'inactive' state, make it take the 'activate' transition.
launch.actions.RegisterEventHandler(
launch_ros.event_handlers.OnStateTransition(
target_lifecycle_node=talker_node,
start_state='configuring', goal_state='inactive',
entities=[
launch.actions.EmitEvent(event=launch_ros.events.lifecycle.ChangeState(
lifecycle_node_matcher=launch.events.matches_action(talker_node),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_ACTIVATE,
)),
],
)
),
# When the talker node reaches the 'active' state, wait a bit and then make it take the
# 'deactivate' transition.
launch.actions.RegisterEventHandler(
launch_ros.event_handlers.OnStateTransition(
target_lifecycle_node=talker_node, start_state='activating', goal_state='active',
entities=[
launch.actions.TimerAction(period=5.0, actions=[
launch.actions.EmitEvent(event=launch_ros.events.lifecycle.ChangeState(
lifecycle_node_matcher=launch.events.matches_action(talker_node),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_DEACTIVATE,
)),
]),
],
)
),
# When the talker node reaches the 'inactive' state coming from the 'active' state,
# make it take the 'cleanup' transition.
launch.actions.RegisterEventHandler(
launch_ros.event_handlers.OnStateTransition(
target_lifecycle_node=talker_node,
start_state='deactivating', goal_state='inactive',
entities=[
launch.actions.EmitEvent(event=launch_ros.events.lifecycle.ChangeState(
lifecycle_node_matcher=launch.events.matches_action(talker_node),
transition_id=lifecycle_msgs.msg.Transition.TRANSITION_CLEANUP,
)),
],
)
),
# When the talker node reaches the 'unconfigured' state after a 'cleanup' transition,
# make it take the 'unconfigured_shutdown' transition.
launch.actions.RegisterEventHandler(
launch_ros.event_handlers.OnStateTransition(
target_lifecycle_node=talker_node,
start_state='cleaningup', goal_state='unconfigured',
entities=[
launch.actions.EmitEvent(event=launch_ros.events.lifecycle.ChangeState(
lifecycle_node_matcher=launch.events.matches_action(talker_node),
transition_id=(
lifecycle_msgs.msg.Transition.TRANSITION_UNCONFIGURED_SHUTDOWN
),
)),
],
)
),
launch_testing.actions.ReadyToTest()
]), locals()
class TestLifecyclePubSub(unittest.TestCase):
def test_talker_lifecycle(self, proc_info, proc_output, talker_node, listener_node):
"""Test lifecycle talker."""
proc_output.assertWaitFor('on_configure() is called', process=talker_node, timeout=5)
proc_output.assertWaitFor('on_activate() is called', process=talker_node, timeout=10)
pattern = re.compile(r'data_callback: Lifecycle HelloWorld #\d+')
proc_output.assertWaitFor(
expected_output=pattern, process=listener_node, timeout=10
)
proc_output.assertWaitFor(
'on_deactivate() is called', process=talker_node, timeout=10
)
proc_output.assertWaitFor(
'on_cleanup() is called', process=talker_node, timeout=5
)
proc_output.assertWaitFor('on_shutdown() is called', process=talker_node, timeout=5)
@launch_testing.post_shutdown_test()
class TestLifecyclePubSubAfterShutdown(unittest.TestCase):
def test_talker_graceful_shutdown(self, proc_info, talker_node):
"""Test lifecycle talker graceful shutdown."""
launch_testing.asserts.assertExitCodes(proc_info, process=talker_node)
|
import urllib2
import re
symbolfile = open('nasdaqlisted.txt')
symbolslist = symbolfile.readlines()
symbolslist = [x.split('|')[0] for x in symbolslist]
print symbolslist
i = 0
while i < len(symbolslist):
htmlfile = urllib2.urlopen("http://finance.yahoo.com/q?s=%s&ql=1" % symbolslist[i])
htmltext = htmlfile.read()
regex = '<span id="yfs_l84_%s">(.+?)</span>' % symbolslist[i].lower()
price = re.findall(regex, htmltext)
print "The price of %s stock is %s" % (symbolslist[i], price)
i += 1
|
from typing import List
from leetcode import TreeNode, test, new_tree, sorted_list
def binary_tree_path(root: TreeNode) -> List[str]:
if not root:
return []
stack, results = [], []
def helper(node: TreeNode) -> None:
nonlocal stack, results
stack.append(node.val)
if not node.left and not node.right:
results.append(list(stack))
else:
if node.left:
helper(node.left)
if node.right:
helper(node.right)
stack.pop()
helper(root)
return ["->".join(str(val) for val in result) for result in results]
test(
binary_tree_path,
[
(new_tree(1, 2, 3, None, 5), ["1->2->5", "1->3"]),
],
map_func=sorted_list,
)
|
""" HPGL2のデータ用クラス
"""
from typing import List, Tuple, Optional, Union
import math
from fig_package.format.ynf.element.cYnfElment import cYnfElement
from ..ynf import cYnf
from ..ynf import cYnfText, cYnfLine, cYnfPolyline, cYnfBox, cYnfCircle, cYnfArc
BP_PIC_NAME = 'BP Picture name'
PS_LENGTH = 'PS Length'
PS_WIDTH = 'PS Width'
RO_ANGLE = 'RO Rotate angle'
NP_NUMBER_OF_PENS = 'NP Number of pens'
PENS_INFO = 'Pens information'
PEN_NUMBER = 'Pen number'
PEN_COLOR = 'Pen color'
DICT_PEN_COLOR_RGB = {'0':'#FFF', '1':'#000', '2':'#F00', '3':'#0F0',
'4':'#FF0', '5':'#00F', '6':'#F0F', '7':'#0FF'}
PEN_DEFAULT_PEN_WIDTH = 'Default Pen width'
PEN_WIDTH = 'Pen width'
PEN_DOWN = 'Pen down'
PEN_POS = 'Pen Position'
PEN_LINETYPE = 'Pen linetype'
PEN_LINETYPE_PREV = 'Pen linetype (prev)'
PEN_LINE_PATTERN_LENGTH = 'Pen line pattern length'
PEN_LINE_PATTERN_MODE = 'Pen line pattern mode'
DICT_LINE_PATTERN = {
'8':[50,10,0,10,10,10,0,10],
'7':[70,10,0,10,0,10],
'6':[50,10,10,10,10,10],
'5':[70,10,10,10],
'4':[80,10,0,10],
'3':[70,30],
'2':[50,50],
'1':[0,100],
'0':[0],
'-1':[0,100,0],
'-2':[25,50,25],
'-3':[35,30,35],
'-4':[40,10,0,10,40],
'-5':[35,10,10,10,35],
'-6':[25,10,10,10,10,10,25],
'-7':[35,10,0,10,0,10,35],
'-8':[25,10,0,10,10,10,0,10,25]
}
CHARACTER_SET = 'Character set'
DICT_CHARACTER_SET = {
'0':'Roman8', '1':'Math-7', '2':'Line Draw-7', '3':'HP Large Characters',
'4':'Norwegian v1', '37':'United Kingdom', '38':'French', '39':'German',
'263':'Greek-8', '8':'Hebrew-7', '264':'Hebrew-8', '9':'Italian',
'202':'Microsoft Publishing', '234':'DeskTop', '330':'PS Text',
'426':'Ventura International', '458':'Ventura U.S.', '11':'JIS ASCII',
'43':'Katakana', '267':'Kana-8', '299':'Korian-8', '1611':'JIS Kanji-1',
'1643':'JIS Kanji-2', '12':'Line Draw-7', '44':'HP Block Charcters',
'76':'Tax Line Draw', '268':'Line Draw-8', '300':'Ventra ITC Zapf Dingbats',
'332':'PS ITC Zapf Dingbats', '364':'ITC Zapf Dingbats Series 100',
'396':'ITC Zapf Dingbats Series 200',
'428':'ITC Zapf Dingbats Series 300', '13':'Math-7', '45':'Tech-7 (DEC)',
'173':'PS Math', '205':'Ventura Math', '269':'Math-8',
'14':'ECMA-94 Latin 1 (8-bit version)', '78':'ECMA-94 Latin 2',
'174':'ECMA-128 Latin 5', '334':'ECMA-113/88 Latin/Cyrillic',
'15':'OCR-A', '47':'OCR-B', '79':'OCR-M', '16':'APL (typewriter-paired)',
'48':'APL (bit-paired)', '145':'PC Line', '18':'Cyrillic ASCII',
'50':'Cyrillic', '114':'PC Cyrillic', '19':'Swedish for names',
'83':'Spanish', '243':'HP European Spanish', '275':'HP Latin Spanish',
'531':'HP-GL Download', '563':'HP-GL Drafting',
'595':'HP-GL Special Symbols', '20':'Thai-8', '276':'Turkish-8',
'21':'ANSI US ASCI', '53':'Legal', '181':'HPL Language Set',
'245':'OEM-1 (DEC Set)', '277':'Roman8', '309':'Windows', '341':'PC-8',
'373':'PC-8 Denmark/Norway', '405':'PC-850', '501':'Pi Font',
'565':'PC-852', '22':'Arabic (MacKay\'s Version)', '278':'Arabic-8',
'25':'3 of 9 Barcode', '57':'Industrial 2 of 5 Barcode',
'89':'Matrix 2 of 5 Barcode', '153':'Interleaved 2 of 5 Barcode',
'185':'CODABAR Barcode', '217':'MSI/Plessey Barcode',
'249':'Code 11 Barcode', '281':'UPC/EAN Barcode', '505':'USPS Zip',
'26': 'Not used'
}
FONT_SPACING = 'Font spacing'
DICT_FONT_SPACING = {
'0': 'fixed spacing',
'1': 'variable spacing'
}
FONT_PITCH = 'Font Pitch'
FONT_HEIGHT = 'Font Height'
FONT_POSTURE = 'Font Posture'
DICT_FONT_POSTURE = {'0':'upright', '1':'italic'}
FONT_WEIGHT = 'Font Weight'
FONT_TYPE_FACE = 'Font Type Face'
DICT_FONT_TYPE_FACE = {
'0':'Line Printer or Line Draw', '1':'Pica', '2':'Elite',
'3':'Courier', '4':'Helvetica', '5':'Times Roman',
'6':'Letter Gothic', '7':'Script', '8':'Prestige',
'9':'Caslon', '10':'Orator', '11':'Presentation', '13':'Serifa',
'14':'Futura', '15':'Palatino', '16':'ITC Sourvenir', '17':'Optima',
'18':'ITC Garamond', '20':'Coronet', '21':'Broadway',
'23':'Century Schoolbook', '24':'Uninversity Roman',
'27':'ITC Korinna', '28':'Naskh (generic Arbaic typeface)',
'29':'Cloister Black', '30':'ITC Galliard',
'31':'ITC Avant Garde Gothic', '32':'Brush', '33':'Blippo',
'34':'Hobo', '35':'Windsor', '38':'Peignot', ''
'39':'Baskerville', '41':'Trade Gothic', '42':'Gordy Old Style',
'43':'ITC Zapf Chancery', '44':'Clarendon',
'45':'ITC Zapf Dingbats', '46':'Cooper', '47':'ITC Bookman',
'48':'Stick (default)', '49':'HP-GL Drafting',
'50':'HP-GL fixed and variable arc', '51':'Gill Sans',
'52':'Univers', '53':'Bodoni', '54':'Rockwell', '55':'Melior',
'56':'ITC Tiffany', '57':'ITC Clearface', '58':'Amelia',
'59':'Park Avenue', '60':'Handel Gothic', '61':'Dom Casual',
'62':'ITC Benguiat', '63':'ITC Cheltenham', '64':'Century Expanded',
'65':'Franklin Gothic', '68':'Plantin', '69':'Trump Mediaeval',
'70':'Futura Black', '71':'ITC American Typewriter',
'72':'Antique Olive', '73':'Uncial', '74':'ITC Bauhaus',
'75':'Century Oldstyle', '76':'ITC Eras', '77':'ITC Friz Quadrata',
'78':'ITC Lubalin Graph', '79':'Eurostile', '80':'Mincho',
'81':'ITC Serif Gothic', '82':'Signet Roundhand',
'83':'Souvenir Gothic', '84':'Stymie', '87':'Bernhard Modern',
'89':'Excelsior', '90':'Grand Ronde Script', '91':'Ondine',
'92':'P.T.Barnum', '93':'Kaufman', '94':'ITC Bolt Bold',
'96':'Helv Monospaced', '97':'Revue', '101':'Garamond (Stemple)',
'102': 'Garth Graphic', '103':'ITC Ronda', '104':'OCR-A',
'105':'ITC Century', '106':'Englishe Schreibschrift',
'107':'Flash', '108':'Gothic Outline (URW)', '109':'Stencil (ATF)',
'110':'OCR-B', '111':'Akzdenz-Grotesk', '112':'TD Logos',
'113':'Shannon', '114':'ITC Century', '152':'Maru Gosikku',
'153':'Gosikku (Kaku)', '154':'Socho', '155':'Kyokasho',
'156':'Kaisho'
}
DEFAULT_FONT_TYPE_FACE = '48'
DICT_PEN_LINE_PATTERN_MODE = {
'0': 'Relative mode', '1': 'Absolute mode'
}
class cHpgl2Status():
""" HPGL2の描画情報
"""
def __init__(self):
self.set_default_values()
def set_default_values(self) -> None:
self.params = {
# 全体
BP_PIC_NAME: '',
PS_LENGTH: 0,
PS_WIDTH: 0,
RO_ANGLE: 0,
# ペン
NP_NUMBER_OF_PENS: 8,
PEN_NUMBER: '0',
PEN_COLOR: DICT_PEN_COLOR_RGB['0'],
PEN_DEFAULT_PEN_WIDTH: 1.0,
PEN_WIDTH: 1.0,
PEN_DOWN: False,
PEN_POS: [0,0], # このPOSは、HPGL2の座標系
PEN_LINETYPE: None,
PEN_LINETYPE_PREV: None,
PEN_LINE_PATTERN_LENGTH: 4,
PEN_LINE_PATTERN_MODE: '0',
# 文字
CHARACTER_SET: DICT_CHARACTER_SET['0'],
FONT_SPACING: DICT_FONT_SPACING['0'],
FONT_PITCH: 3.0,
FONT_HEIGHT: 12.0,
FONT_POSTURE: DICT_FONT_POSTURE['0'],
FONT_WEIGHT: 0,
FONT_TYPE_FACE: DICT_FONT_TYPE_FACE[DEFAULT_FONT_TYPE_FACE],
}
return None
def set_param(self, key:str, val:str) -> None:
self.params[key] = val
def get_param(self, key:str) -> Union[str, int, float]:
return self.params[key]
def get_wnd_pos(self, x0:int, y0:int) -> Tuple[int,int]:
""" HPGL2内に書かれている(x,y)(左下が(0,0)右がx上がyの座標系)から
画面の(x,y)(左上を(0,0),横がx,縦がyとする座標系)へ変換する
RO要素の回転も考慮する。
"""
angle = self.get_param(RO_ANGLE)
length = self.get_param(PS_LENGTH)
width = self.get_param(PS_WIDTH)
# y軸を反転
x1 = x0
y1 = (-1)*y0
# 回転
theta = angle*math.pi/180.0
x2 = x1*math.cos(theta) - y1*math.sin(theta)
y2 = x1*math.sin(theta) + y1*math.cos(theta)
# RO_ANGLEによって原点位置が変わる
x3 = x2
y3 = y2
if angle==0:
# 左下
y3 = y2 + length
elif angle==90:
# 右下
x3 = x2 + width
y3 = y2 + length
elif angle==180:
# 右上
x3 = x2 + width
"""
もしこれをやるとしたら、長さも変換しなくちゃいけない
# 座標が大きすぎて、ペンの幅が細くなるので
# 座標を小さくする
x4 = x3
y4 = y3
"""
return (x3,y3)
class cHpgl2ElmCommand(object):
""" HPGL2のコマンドの親クラス
"""
def __init__(self, str_cmnd:str):
self.mnemonic = str_cmnd[:2]
self.param = str_cmnd[2:].rstrip(';')
def set_status(self, st:cHpgl2Status) -> None:
""" ステータスを定義・変更する
引数の cHpgl2Status を更新する。
"""
return None
def get_ynf(self, st:cHpgl2Status) -> Optional[cYnfElement]:
""" stを参考にしてcYnfElementを作って返す
cYnfElementを作らない場合は、Noneを返す。
"""
return None
def quated_string(self, param_str:str) -> str:
""" Quated Stringを文字列にして返す
"""
ret_str = param_str.strip('"')
ret_str = ret_str.replace('""', '"')
return ret_str
class cExcHpgl2IgnoreInstruction(Exception):
""" Intruction(命令)をスキップする例外
"""
pass
# ----------------------- #
# 要素クラス #
# ----------------------- #
class cHpgl2IN(cHpgl2ElmCommand):
""" IN, Initialize
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
class cHpgl2PG(cHpgl2ElmCommand):
""" PG, Advanced Full Page
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
class cHpgl2RO(cHpgl2ElmCommand):
""" RO, Rotate Coordinate System
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
rotate_angle = 0
if len(self.param)>0:
if self.param not in ['0','90','180','270']:
raise cExcHpgl2IgnoreInstruction(
f'RO angle error.{rotate_angle}')
rotate_angle = int(self.param)
st.set_param(RO_ANGLE, rotate_angle)
return None
class cHpgl2AA(cHpgl2ElmCommand):
""" AA, Arc Absolute
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def get_ynf(self, st:cHpgl2Status) -> Optional[cYnfElement]:
return self.get_ynf_AAarc(st)
#return self.get_ynf_AApolyline(st)
def get_ynf_AAarc(self, st:cHpgl2Status) -> Optional[cYnfElement]:
params = self.param.split(',')
try:
cx = int(params[0])
cy = int(params[1])
sweep_angle = float(params[2])
except:
raise cExcHpgl2IgnoreInstruction(
f'AA のパラメータが不正 ({self.param})')
# 現在の位置
x1,y1 = st.get_param(PEN_POS)
# (cx,cy)からsweep_angle度回した点を取得
clockwise = (sweep_angle<0)
if not clockwise:
sweep_angle = sweep_angle % 360.0
else:
sweep_angle = sweep_angle % (-360.0)
#print(f'seep_angle:{sweep_angle}')
sweep_angle_rad = sweep_angle * math.pi / 180.0
#print(f'sweep_angle_rad:{sweep_angle_rad}')
x2 = (x1-cx)*math.cos(sweep_angle_rad) \
- (y1-cy)*math.sin(sweep_angle_rad) \
+ cx
y2 = (x1-cx)*math.sin(sweep_angle_rad) \
+ (y1-cy)*math.cos(sweep_angle_rad) \
+ cy
# 半径
r = math.sqrt( (x1-cx)**2 + (y1-cy)**2 )
#print(f'r:{r}')
#print(f'center:({cx},{cy})')
#print(f'p1:({x1},{y1})')
#print(f'p2:({x2},{y2})')
#print(f'clockwise:{clockwise}')
# パラメータを作成
arc_params = {
'start': st.get_wnd_pos(x1,y1),
'end': st.get_wnd_pos(x2,y2),
'center': st.get_wnd_pos(cx,cy),
'r': r,
'clockwise': clockwise
}
#print(arc_params)
# 線の共通パラメータを付け加える
arc_params.update(get_line_common_param(st))
return cYnfArc(arc_params)
def get_ynf_AApolyline(self, st:cHpgl2Status) -> Optional[cYnfElement]:
""" polyline版の円弧(使わない)
"""
params = self.param.split(',')
try:
cx = int(params[0])
cy = int(params[1])
sweep_angle = float(params[2])
chord_angle = 5.0 # 分解能=この角度を直線で結ぶ
if len(params)>3:
chord_angle = float(params[3])
except:
raise cExcHpgl2IgnoreInstruction(
f'AA のパラメータが不正 ({self.param})')
# 円弧ではなく、Polylineで円弧を描く
# (それがHPGL2の円弧)
# 角度を丸める
if sweep_angle>0:
sweep_angle = sweep_angle % 360.0
else:
sweep_angle = sweep_angle % (-360.0)
sweep_angle_rad = sweep_angle * math.pi / 180.0
# 現在の座標値
x1,y1 = st.get_param(PEN_POS)
str_xy_array = f'{int(x1)},{int(y1)}'
x1 = float(x1)
y1 = float(y1)
abs_rotated_angle = 0.0
d_theta = chord_angle * math.pi / 180.0
while abs_rotated_angle < abs(sweep_angle):
# 回転後の座標を求める
x2 = (x1-cx)*math.cos(d_theta) \
- (y1-cy)*math.sin(d_theta) \
+ cx
y2 = (x1-cy)*math.sin(d_theta) \
+ (y1-cy)*math.cos(d_theta) \
+ cy
str_xy_array += f',{int(x2)},{int(y2)}'
x1 = x2
y1 = y2
abs_rotated_angle += chord_angle
#print(str_xy_array)
# Polyline要素を作って返す
return get_polylinexy_array(str_xy_array, st)
class cHpgl2CI(cHpgl2ElmCommand):
""" CI, Circle
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def get_ynf(self, st:cHpgl2Status) -> Optional[cYnfElement]:
if len(self.param)==0:
raise cExcHpgl2IgnoreInstruction(
'CI のパラメータがない')
# 現在の点が中心
cx,cy = st.get_param(PEN_POS)
cx1,cy1 = st.get_wnd_pos(cx, cy)
# 半径
params = self.param.split(',')
try:
radius = float(params[0])
except:
raise cExcHpgl2IgnoreInstruction(
f'CI のパラメータが不正 ({params[0]})')
# 線のパラメータを作成
circle_params = {
'center': [cx1, cy1],
'r': radius
}
# 線の共通パラメータを付け加える
circle_params.update(get_line_common_param(st))
return cYnfCircle(circle_params)
class cHpgl2PA(cHpgl2ElmCommand):
""" PA, Plot Absoliute
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def get_ynf(self, st:cHpgl2Status) -> Optional[cYnfElement]:
if len(self.param)==0:
return None
# Polyline要素を作って返す
return get_polylinexy_array(self.param, st)
def get_polylinexy_array(str_xy_array:str, st:cHpgl2Status) \
-> cYnfPolyline:
""" cYnfPolylineインスタンスを作って返す
いくつかのクラスで使われる処理なので関数化した。
- st.get_wnd_pos() はこの中でやるので、呼び出し元での st.get_wnd_pos() は不要。
- st.get_wnd_pos() もこの中で最後の点へ移動するので、呼び出し元は不要。
Params:
str_xy_array:str
x,y,x,y,... の形式の文字列。x,yはint。
st:cHpgl2Status
ステータスオブジェクト。
Returns:
cYnfPolyline
"""
#print('get_polylinexy_array!!')
xy_array = str_xy_array.split(',')
# x,y,x,y,...
if len(xy_array)%2!=0:
raise cExcHpgl2IgnoreInstruction(
f'xとyのセットになっていない({str_xy_array})')
# Pen-Up だったら最後の座標へ移動するだけ
if not st.get_param(PEN_DOWN):
try:
st.set_param(PEN_POS,
[int(xy_array[-2]), int(xy_array[-1])])
except:
raise cExcHpgl2IgnoreInstruction(
f'PA のパラメータが非整数 ({str_xy_array})')
return None
# Pen-Down なので線要素を作成する
# まずそのための座標群を得る
wnd_xy_pairs = []
# 元々の座標値を追加
x, y = st.get_param(PEN_POS)
#print(f'元のPOS:{x},{y}')
wnd_xy_pairs.append(st.get_wnd_pos(x,y))
# PA要素に含まれている座標を追加
for i in range(len(xy_array)//2):
#try:
if True:
x = int(xy_array[i*2])
y = int(xy_array[i*2+1])
#except:
# raise cExcHpgl2IgnoreInstruction(
# f'PA のパラメータが非整数 ({str_xy_array})')
#print(f'新しいPOS:{x},{y}')
#new_wnd_pos = st.get_wnd_pos(x,y)
#print(f'新しいwndPOS:{new_wnd_pos[0]},{new_wnd_pos[1]}')
wnd_xy_pairs.append(st.get_wnd_pos(x,y))
# 現在地を更新
st.set_param(PEN_POS, [int(x), int(y)])
# 点が3点以上の場合はPolyline、2点の場合はline
elm = None
if len(wnd_xy_pairs)>2:
# パラメータを作成
polyline_params = {
'points':wnd_xy_pairs,
'isClose':False
}
# 線の共通パラメータを付け加える
polyline_params.update(get_line_common_param(st))
# cYnfPolylineを生成
elm = cYnfPolyline(polyline_params)
elif len(wnd_xy_pairs)==2:
### 2020/8/13 start
# 下記は書いてみたものの、距離ゼロでドットを書きたいときがあるかもしれないのでコメントアウトしておく(2020/8/13)
## p1とp2の距離が小さいときは何も作らない
#p1p2_distance = math.sqrt((wnd_xy_pairs[0][0] - wnd_xy_pairs[1][0])**2 + (xy_pairs[0][1] - xy_pairs[1][1])**2)
#if p1p2_distance<=1.0:
# return None
### 2020/8/13 end
# パラメータを作成
line_params = {
'p1':wnd_xy_pairs[0],
'p2':wnd_xy_pairs[1]
}
# 線の共通パラメータを付け加える
line_params.update(get_line_common_param(st))
# cYnfLineを生成
elm = cYnfLine(line_params)
else:
assert False, f'wnd_xy_pairsが不正 {wnd_xy_pairs}'
return elm
def get_line_common_param(st:cHpgl2Status) -> dict:
""" ライン系の共通のパラメータを取得してdictにして返す
"""
# 現在のペン
cur_pen_number = int(st.get_param(PEN_NUMBER))
pens_info = st.get_param(PENS_INFO)
cur_pen = pens_info[cur_pen_number]
# ペンの色
pen_color = cur_pen[PEN_COLOR]
# 線太さ
pen_width = cur_pen[PEN_WIDTH]*50
# 線種
line_type = st.get_param(PEN_LINETYPE)
# パターン
if line_type is None:
line_pattern = None
else:
line_pattern = DICT_LINE_PATTERN[line_type]
# パターン長
pattern_length = st.get_param(PEN_LINE_PATTERN_LENGTH)
# パターン長の単位['0':%, '1':mm]
pattern_length_unit = st.get_param(PEN_LINE_PATTERN_MODE)
return {
'border-color':pen_color,
'border-width':pen_width,
'linetype':line_type,
'line-pattern-length':pattern_length,
'line-pattern':line_pattern,
'line-pattern-length-mode':pattern_length_unit
}
class cHpgl2PD(cHpgl2ElmCommand):
""" PD, Pen Down
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
st.set_param(PEN_DOWN, True)
return None
class cHpgl2PU(cHpgl2ElmCommand):
""" PU, Pen Up
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
st.set_param(PEN_DOWN, False)
return None
class cHpgl2LT(cHpgl2ElmCommand):
""" LT, Line Type
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
# 前の線種を取得
prev_linetype = st.get_param(PEN_LINETYPE_PREV)
# 現在の線種を取得
st.set_param(PEN_LINETYPE_PREV, st.get_param(PEN_LINETYPE))
# 次の線種を決定
if len(self.param)==0:
# Solid
st.set_param(PEN_LINETYPE, None)
elif self.param=='99':
# 前の線種を今の線種にする
st.set_param(PEN_LINETYPE, prev_linetype)
else:
# 指定した線種にする
str_params = self.param.split(',')
try:
n_linetype = int(str_params[0])
if n_linetype not in \
[-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8]:
raise Exception
if len(str_params)>1:
pattern_length = int(str_params[1])
if (pattern_length<=0) or (32767<pattern_length):
raise Exception
if len(str_params)>2:
mode = str_params[2]
if (mode!='0') and (mode!='1'):
raise Exception
except:
raise cExcHpgl2IgnoreInstruction(
f'LT の linetype が不正 ({self.param})')
if len(str_params)>=1:
# 線種の変更を、線種番号を指定する
st.set_param(PEN_LINETYPE, str_params[0])
if len(str_params)>=2:
# パターン長を指定する
pattern_length = int(str_params[1])
st.set_param(PEN_LINE_PATTERN_LENGTH, pattern_length)
if len(str_params)>=3:
# パターンモードを指定する
mode = str_params[2]
st.set_param(PEN_LINE_PATTERN_MODE, mode)
return None
class cHpgl2PW(cHpgl2ElmCommand):
""" PW, Pen Width
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
# パラメータがない場合は、未実装
# (デフォルトの幅を変える?)
if len(self.param)==0:
return None
try:
params = self.param.split(',')
if len(params)>1:
# 0:太さ, 1:ペン番号
# 指定したペン番号のペンの太さを変更する
pen_width = float(params[0])
pen_number = int(params[1])
pens_info = st.get_param(PENS_INFO)
pens_info[pen_number][PEN_WIDTH] = pen_width
st.set_param(PENS_INFO, pens_info)
elif len(params)>0:
# 0:太さ
# すべてのペン太さを変更する
pen_width = float(params[0])
pens_info = st.get_param(PENS_INFO)
for p in pens_info:
p[PEN_WIDTH] = pen_width
st.set_param(PENS_INFO, pens_info)
except:
raise cExcHpgl2IgnoreInstruction(
f'PW のパラメータが不正 ({self.param})')
return None
class cHpgl2SP(cHpgl2ElmCommand):
""" SP, Select Pen
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
pen_used = 0
if (len(self.param)==0) or (self.param=='0'):
pen_used = 0
else:
try:
pen_no = int(self.param)
if pen_no<0:
raise
except:
raise cExcHpgl2IgnoreInstruction(
f'SPのパラメータが不正 {self.param}')
# 定義されたペンの中からペン番号を決める
number_of_pens = st.get_param(NP_NUMBER_OF_PENS)
pen_used = ((pen_no-1)%(number_of_pens-1))+1
#print(f'pen_used:{pen_used}')
# ペンを選択する
st.set_param(PEN_NUMBER, str(pen_used))
pen_info = st.get_param(PENS_INFO)[pen_used]
st.set_param(PEN_COLOR, pen_info[PEN_COLOR])
return None
class cHpgl2SD(cHpgl2ElmCommand):
""" SD, Standard Font Definition
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
kvs = self.param.split(',')
# kind,value,kind,value...
if len(kvs)%2!=0:
raise cExcHpgl2IgnoreInstruction(
f'kindとvalueのセットになっていない({self.param})')
kv_pairs = []
for i in range(len(kvs)//2):
kind = kvs[i*2]
value = kvs[i*2+1]
if kind not in ['1','2','3','4','5','6','7']:
raise cExcHpgl2IgnoreInstruction(
f'SDのkeyが不正{self.param}')
if kind==1:
# Character set(文字セット)
if value not in CHARACTER_SET:
raise cExcHpgl2IgnoreInstruction(
f'SD の Character set が不正 ({self.param})')
st.set_param(CHARACTER_SET, DICT_CHARACTER_SET[value])
elif kind==2:
# Font Spacing(文字間隔)
if value not in DICT_FONT_SPACING:
raise cExcHpgl2IgnoreInstruction(
f'SD の Font Spacing が不正 ({self.param})')
st.set_param(FONT_SPACING, DICT_FONT_SPACING[value])
elif kind==3:
# Pitch(文字ピッチ)
try:
f_pitch = float(value)
if f_pitch<0 or 32767<f_pitch:
raise Exception
except:
raise cExcHpgl2IgnoreInstruction(
f'SD の Pitch が不正 ({self.param})')
st.set_param(FONT_SPACING, f_pitch)
elif kind==4:
# Height(サイズ)
try:
f_height = float(value)
if f_height<0 or 32767<f_height:
raise Exception
except:
raise cExcHpgl2IgnoreInstruction(
f'SD の height が不正 ({self.param})')
st.set_param(FONT_HEIGHT, f_height)
elif kind==5:
# posture(スタイル)
if value not in DICT_FONT_POSTURE:
raise cExcHpgl2IgnoreInstruction(
f'SD の Posture が不正 ({self.param})')
st.set_param(FONT_POSTURE, DICT_FONT_POSTURE[value])
elif kind==6:
# Stroke Weight(太さ)
try:
f_weight = int(value)
if f_weight!=9999 and (f_weight<-7 or 7<f_weight):
raise Exception
except:
raise cExcHpgl2IgnoreInstruction(
f'SD の weight が不正 ({self.param})')
st.set_param(FONT_WEIGHT, f_weight)
elif kind==7:
# Typeface(書体)
if value not in DICT_FONT_TYPE_FACE:
raise cExcHpgl2IgnoreInstruction(
f'SD の Typeface が不正 ({self.param})')
st.set_param(FONT_TYPE_FACE, DICT_FONT_TYPE_FACE[value])
return None
class cHpgl2SS(cHpgl2ElmCommand):
""" SS, Select Standard Font
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
st.set_param(FONT_TYPE_FACE, DICT_FONT_TYPE_FACE[DEFAULT_FONT_TYPE_FACE])
return None
class cHpgl2BP(cHpgl2ElmCommand):
""" BP, Begin Plot
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
if len(self.param)==0:
return None
kvs = self.param.split(',')
# kind,value,kind,value...
assert len(kvs)%2==0, f'kindとvalueのセットになっていない({self.param})'
kv_pairs = []
for i in range(len(kvs)//2):
kind = kvs[i*2]
value = kvs[i*2+1]
assert kind not in [1,2,3,4,5], f'keyが不正{self.param}'
if kind==1:
# Picture Name
st.set_param(BP_PIC_NAME, self.quated_string(value))
# これ以外のkindは本プログラムに関係ないので見ない
return None
class cHpgl2PS(cHpgl2ElmCommand):
""" PS, Plot Size
物理的な紙の軸方向ごとの上限値。
length: 紙が出る方向の長さ
width: 紙の幅
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
if len(self.param)==0:
return None
params = self.param.split(',')
if len(params)>0:
int_length = int(params[0])
if int_length<=0:
raise cExcHpgl2IgnoreInstruction(
f'PS length error. {params[0]}')
st.set_param(PS_LENGTH, int_length)
if len(params)>1:
int_width = int(params[1])
if int_width<=0:
raise cExcHpgl2IgnoreInstruction(
f'PS width error. {params[1]}')
st.set_param(PS_WIDTH, int_width)
return None
class cHpgl2NP(cHpgl2ElmCommand):
""" NP, Number of Pens
"""
def __init__(self, str_cmnd:str):
super().__init__(str_cmnd)
def set_status(self, st:cHpgl2Status) -> None:
if len(self.param)==0:
st.set_param(NP_NUMBER_OF_PENS, 8)
return None
# ペン数を設定
num_of_pens = None
try:
num_of_pens = int(self.param)
if num_of_pens&(num_of_pens-1)!=0: # num_of_pens が2のべき乗でない
raise Exception
elif num_of_pens<2:
raise Exception
except:
raise cExcHpgl2IgnoreInstruction(
f'NP の パラメータが不正 ({num_of_pens})')
st.set_param(NP_NUMBER_OF_PENS, num_of_pens)
# すべてのペンの幅と色を決定する
default_pen_width = st.get_param(PEN_DEFAULT_PEN_WIDTH)
len_DICT_PEN_COLOR_RGB = len(DICT_PEN_COLOR_RGB)
pens = [
{
PEN_WIDTH: default_pen_width,
PEN_COLOR: DICT_PEN_COLOR_RGB[str(i%len_DICT_PEN_COLOR_RGB)]
}
for i in range(num_of_pens)
]
st.set_param(PENS_INFO, pens)
return None
|
from flask import jsonify
from psycopg2 import IntegrityError
from app.DAOs.PhoneDAO import PhoneDAO
from app.DAOs.ServiceDAO import ServiceDAO
import phonenumbers
PHONETYPEKEYS = ['E', 'L', 'F', 'M']
SERVICEPHONEKEYS = ['numbers']
def _buildPhoneResponse(phone_tuple):
"""
Private Method to build phone number dictionary to be JSONified.
:param: phone_tuple: response tuple from SQL query
:returns Dict: Phone information with keys:
.. code-block:: python
{'phoneid', 'pnumber', 'ptype'}
"""
response = {}
response['phoneid'] = phone_tuple[0]
response['pnumber'] = phone_tuple[1]
response['ptype'] = phone_tuple[2]
return response
class PhoneHandler:
"""
Handler Class to manage getting/creating/modifying phones
"""
def unpackPhones(self, json):
"""
Returns a lsit of phone numbers given a json body with phone numbers and types
:param json: JSON payload with phone numbers
:type json: array
:return list: list of numbers
"""
numbers = []
for num in json:
numbers.append(num)
return numbers
def insertServicePhone(self, sid, uid, json):
"""
Create a phone number and add it to a service given its ID.
Uses :func:`~app.DAOs.PhoneDAO.PhoneDAO.insertPhones` as well
as :func:`~app.handlers.PhoneHandler.PhoneHandler.unpackPhones`
:param sid: The ID of the service to add phone numbers to
:type sid: int
:param uid: User ID.
:type uid: int
:param json: JSON containing the phone numbers to add
:type json: array
:return: result from :func:`~app.DAOs.PhoneDAO.PhoneDAO.insertPhones`
"""
for key in SERVICEPHONEKEYS:
if key not in json:
return jsonify(Error='Missing credentials from submission: ' + key), 400
handler = PhoneHandler()
phones = []
phoneInfo = []
phones = (handler.unpackPhones(json['numbers']))
dao = PhoneDAO()
if not phones:
phoneInfo = None
else:
phoneInfo = dao.insertPhones(phones, sid, uid=uid)
return phoneInfo
def getPhonesByServiceID(self, sid, no_json=False):
"""
Create a phone number and add it to a service given its ID
Uses :func:`~app.DAOs.PhoneDAO.PhoneDAO.getPhonesByServiceID`
as well as :func:`~app.handlers.PhoneHandler._buildPhoneResponse`
:param sid: The ID of the service to add phone numbers to
:type sid: int
:param no_json: Specify if response is Json or not
:type no_json: bool
:return JSON: list of phones
"""
dao = PhoneDAO()
phones = dao.getPhonesByServiceID(sid=sid)
if not phones:
response = None
else:
phone_list = []
for row in phones:
phone_list.append(_buildPhoneResponse(phone_tuple=row))
response = phone_list
if no_json:
return response
return jsonify(response)
def removePhoneByServiceID(self, sid, json, uid):
"""
Remove a phone number from a service given its ID
Uses:
* :func:`~app.DAOs.PhoneDAO.PhoneDAO.removePhonesByServiceID`
* :func:`~app.handlers.PhoneHandler._buildPhoneResponse`
* :func:`~app.handlers.PhoneHandler.PhoneHandler.unpackPhones`
:param sid: The ID of the service to add phone numbers to
:type sid: int
:param uid: User ID.
:type uid: int
:param json: JSON containing the phone numbers to add
:type json: array
:return JSON: phone number information.
"""
for key in SERVICEPHONEKEYS:
if key not in json:
return jsonify(Error='Missing credentials from submission: ' + key), 400
phoneIDs = []
phoneInfo = []
phones = (self.unpackPhones(json['numbers']))
dao = PhoneDAO()
phoneIDs = dao.removePhonesGivenServiceID(sid=sid, phones=phones, uid=uid)
for id in phoneIDs:
if isinstance(id,int):
phone =dao.getPhoneByID(id)
if phone is not None:
phoneInfo.append(_buildPhoneResponse(phone))
else:
return jsonify(Error="Error with phone id "+str(id)),404
return jsonify({"numbers":phoneInfo}),200
|
import os
import json
path = "/mnt/SC/ASC/task3/ELE/dev/"
files = os.listdir(path)
map_ans = {'A':0,'B':1,'C':2,'D':3}
multi_file = []
def is_multi(candidate):
for j in candidate:
if len(j.split()) >= 2:
return True
return False
# 找出 multi_file
for file in files:
with open(path+file,"r") as f:
data = json.load(f)
text = data['article']
candidates = data['options']
answer = data['answers']
#print(candidates)
for candidate in candidates:
if is_multi(candidate):
multi_file.append(file)
break
# replace single _ to correct answer
for file in multi_file:
print(file)
with open(path+file,"r") as f:
data = json.load(f)
text = data['article']
candidates = data['options']
answer = data['answers']
## main code
for i in range(len(answer)):
## 多選先轉成 ^ ,單選直接換掉
#print(candidates[i])
#print(candidates[i][map_ans[answer[i]]])
if is_multi(candidates[i]):
data['article'] = data['article'].replace('_',"^",1)
else:
data['article'] = data['article'].replace('_',candidates[i][map_ans[answer[i]]],1)
data['answers'][i] = 'x'
data['options'][i] = 'x'
data['article'] = data['article'].replace('^',"_",100)
data['answers'] = list(filter(lambda a: a != 'x', data['answers']))
data['options'] = list(filter(lambda a: a != 'x', data['options']))
print(text)
ret = json.dumps(data)
with open('multi'+file+'.json', 'w') as fp:
fp.write(ret)
|
# -*- coding: utf-8 -*-
import os
import os.path
import requests
import shutil
import time
from behave import given, then
from subprocess import check_output, CalledProcessError, STDOUT
def copytree(src, dst):
"""Variant of shutil.copytree() that works when ``dst`` already exists."""
print('Copying tree "%s".' % src)
assert os.path.isdir(dst)
for name in os.listdir(src):
srcpath = os.path.join(src, name)
dstpath = os.path.join(dst, name)
if os.path.isdir(srcpath):
os.mkdir(dstpath)
copytree(srcpath, dstpath)
else:
print('Copying file "%s".' % dstpath)
shutil.copyfile(srcpath, dstpath)
@given('Application "{name}" exists')
def provision_application(context, name):
pass
@given('I submit the "{name}" sample')
def submit_sample(context, name):
copytree(src=os.path.join(context.test_root, 'samples', name), dst='.')
try:
output = check_output(['git', 'add', '.'], stderr=STDOUT)
except CalledProcessError as e:
output = e.output
raise
finally:
print(output)
try:
output = check_output(['git', 'commit', '-m', 'Adds sample.'])
except CalledProcessError as e:
output = e.output
raise
finally:
print(output)
@then('Application "{name}" should be deployed')
def check_deployment(context, name):
processes = requests.get(context.smartmob_agent['list']).json()
processes = processes['processes']
print(processes)
assert len(processes) == 1
p = processes[0]
assert p['app'] == 'myapp'
assert p['slug'] == 'myapp.1'
while p['state'] in ('pending', 'downloading', 'unpacking'):
time.sleep(0.1)
p = requests.get(p['details']).json()
assert p['state'] == 'processing'
|
lis=[]
def menu():
print(format("menu","=^35"))
print("1.To Add new element")
print("2.To Delete element")
print("3.To Find node value")
print("4.To Exit program")
print(format("=====","=^35"))
def add(element):
lis.append(element)
def delet(element):
lis.remove(element)
def node(element):
return lis.index(element)
def main():
n=int(input("Enter number of elements:"))
print("Enter the elements : ")
for i in range(0,n):
ele=int(input())
lis.append(ele)
print(lis)
menu()
usr=int(input("Enter your choice:"))
assert usr<5,"Invalid Input!"
while(usr!=4):
element=int(input("Enter a value: "))
if usr==1:
add(element)
print(lis)
elif usr==2:
delet(element)
print(lis)
else:
print("Index of your element is:",node(element))
menu()
usr = int(input("Enter your choice:"))
assert usr<5,"Invalid Input!"
print(format("The end","*^40"))
main()
|
from rest_framework import routers
from .views import DeviceViewSet, PositionView
from django.urls import path, include
router = routers.SimpleRouter(trailing_slash = False)
router.register(r'devices', DeviceViewSet, basename = 'device')
urlpatterns = [
path(r'', include(router.urls)),
path(r'position', PositionView.as_view(), name = 'position')
]
|
import datetime
# Simulation de données que l'on pourrait récupérer d'une base de données
data = [
["Josette", "Martin", 25, False],
["Robert", "Durand", 45, True],
["Lucien", "Pinard", 33, True],
]
# Modèle permettant de créer des objets (instances) représentant des personnes
class Person:
# Propriétés des objets de type "personne"
firstName = ""
age = 0
lastName = ""
isMale = False
# Méthodes : fonctions encapsulées dans une classe, qui permettent à chacune de ses instances
# de connaître un comportement spécifique à elle-même
# En Python, chaque méthode prend comme premier paramètre une référence vers l'objet qui l'appelle,
# appelée "self" (soi-même) par convention
# Méthode permettant de renvoyer le nom complet d'une personne
def fullName(self):
return self.firstName + " " + self.lastName
# Méthode permettant de renvoyer un message de salutation de la part d'une personne
def sayHello(self):
return "Bonjour, je m'appelle " + self.fullName()
# Méthode permettant à une personne de saluer une autre personne
def sayHelloTo(self, otherPerson):
return "Bonjour " + otherPerson.firstName + ", je m'appelle " + self.fullName()
# Modèle permettant de créer des objets (instances) représentant des articles
class Article:
# Propriété des objets de type "article"
createdAt = datetime.datetime.now()
title = ""
content = ""
# La variable "auteur" de chaque article contient un objet de type "personne"
author = None
# Génère des objets de type "personne" en adéquation avec le tableau de données récupéré plus haut
people = []
for personData in data:
# Crée un objet de type "personne"
person = Person()
# Remplit les propriétés de l'objet nouvellement créé avec les données du tableau
person.firstName = personData[0]
person.lastName = personData[1]
person.age = personData[2]
person.isMale = personData[3]
# Ajoute l'objet nouvellement créé à une liste
people.append(person)
# Crée un nouvel objet de type "article"
article = Article()
article.title = "Le Python ça déchire"
article.content = "texte texte texte texte texte texte texte "
# Définit Robert comme auteur de cet article
article.author = people[1]
# Affiche le nom complet de l'auteur de l'article
print(article.author.fullName())
# Pour chaque personne générée
for person in people:
# Demande à cette personne de saluer Josette
print(person.sayHelloTo(people[0]))
|
#!/usr/bin/env python
import ROOT
import joblib
import pandas as pd
import mputils
def scale_by_bw(h):
print(h.GetName())
h.Sumw2()
for ib in range(1, h.GetNbinsX()+1):
v = h.GetBinContent(ib)
h.SetBinContent(ib, v / h.GetBinWidth(ib))
e = h.GetBinError(ib)
h.SetBinError(ib, e / h.GetBinWidth(ib))
def plot_some(infile, outfile):
data = joblib.load(infile)
fout = ROOT.TFile(outfile, 'recreate')
hpt = ROOT.TH1F('hpt', 'hpt', 10, mputils.logbins(1, 100, 11))
[ hpt.Fill(x) for x in data['ptsub'] ]
scale_by_bw(hpt)
dsel = data.loc[(data['ptsub'] > 20) & (data['ptsub'] < 60)]
hptsel = ROOT.TH1F('hptsel', 'hptsel', 10, mputils.logbins(1, 100, 11))
[ hptsel.Fill(x) for x in dsel['ptsub'] ]
scale_by_bw(hptsel)
print(infile, len(dsel))
#hsd0zg = ROOT.TH1F('hsd0zg', 'hsd0zg', 22, -1.5, 0.7)
#hsd2zg = ROOT.TH1F('hsd2zg', 'hsd2zg', 22, -1.5, 0.7)
hsd0zg = ROOT.TH1F('hsd0zg', 'hsd0zg', 12, 0, 0.6)
[ hsd0zg.Fill(x) for x in dsel['sd0zg'] ]
scale_by_bw(hsd0zg)
hsd2zg = ROOT.TH1F('hsd2zg', 'hsd2zg', 12, 0, 0.6)
[ hsd2zg.Fill(x) for x in dsel['sd2zg'] ]
scale_by_bw(hsd2zg)
hsd0Rg = ROOT.TH1F('hsd0Rg', 'hsd0Rg', 20, 0, 0.6)
[ hsd0Rg.Fill(x) for x in dsel['sd0Rg'] ]
scale_by_bw(hsd0Rg)
hsd2Rg = ROOT.TH1F('hsd2Rg', 'hsd2Rg', 20, 0, 0.6)
[ hsd2Rg.Fill(x) for x in dsel['sd2Rg'] ]
scale_by_bw(hsd2Rg)
hsd0thg = ROOT.TH1F('hsd0thg', 'hsd0thg', 15, 0, 1.5)
[ hsd0thg.Fill(x) for x in dsel['sd0thetag'] ]
scale_by_bw(hsd0thg)
hsd2thg = ROOT.TH1F('hsd2thg', 'hsd2thg', 15, 0, 1.5)
[ hsd2thg.Fill(x) for x in dsel['sd2thetag'] ]
scale_by_bw(hsd2thg)
fout.Write()
fout.Close()
def main():
plot_some('merged_rg_PbPb_std.pd', "hist_std.root")
plot_some('merged_rg_PbPb_cs004.pd', "hist_cs004.root")
plot_some('merged_rg_PbPb_cs404.pd', "hist_cs404.root")
if __name__ == '__main__':
main()
|
from googlevoice import Voice
import time
from googlevoice.util import input
voice = Voice()
voice.login()
for x in xrange(0,10):
for y in xrange(0,10):
voice.send_sms("5555512%s%s" % (`x`,`y`), "Add your message here")
time.sleep(10) #so Google won't rate limit
|
def get_a_down_arrow_of(n):
output = ''
for x in range(n,0,-1):
output += ' '*((x-n)*-1)
for i in range(1,x):
output += str(i%10)
for k in range(x,0,-1):
output += str(k%10)
output += '\n'
return output[:-1]
'''
Given a number n, make a down arrow shaped pattern.
For example, when n = 5, the output would be:
123454321
1234321
12321
121
1
and for n = 11, it would be:
123456789010987654321
1234567890987654321
12345678987654321
123456787654321
1234567654321
12345654321
123454321
1234321
12321
121
1
An important thing to note in the above example is that the numbers greater
than 9 still stay single digit, like after 9 it would be 0 - 9 again
instead of 10 - 19
'''
|
from Conv_Net.two_dim_resnet import make_two_dim_resnet
from Transformer_Net import Prot_Transformer
import torch.nn as nn
import torch
from torch.utils.checkpoint import checkpoint_sequential as cp_seq, checkpoint as cp
class Final_Net(nn.Module):
'''Putting the whole model together.'''
def __init__(self, ndistbins, nangbins, embed_size, res_heads, seq_heads, resnet_layers=64, embed_expand=4, dropout=0.0):
super(Final_Net, self).__init__()
self.resnet_layers = resnet_layers
self.transformer = Prot_Transformer(embed_size, res_heads, seq_heads, embed_expand, dropout)
self.res_net = make_two_dim_resnet(res_heads*10 + 2*embed_size, ndistbins, 20*res_heads + 4*embed_size, resnet_layers, batch_norm=True, atrou_rates=[1,2], dropout=dropout)
self.angle_net = nn.Sequential(nn.Linear(embed_size,embed_size*embed_expand),
nn.ReLU(),
nn.Linear(embed_size*embed_expand,embed_size*embed_expand),
nn.ReLU()
)
self.phi_net = nn.Sequential(nn.Linear(embed_size*embed_expand,embed_size*embed_expand),
nn.ReLU(),
nn.Linear(embed_size*embed_expand,nangbins)
)
self.psi_net = nn.Sequential(nn.Linear(embed_size*embed_expand,embed_size*embed_expand),
nn.ReLU(),
nn.Linear(embed_size*embed_expand,nangbins)
)
def forward(self, x, mask=None):
'''Intermediates allows for recording intermediate guesses for the distance bin prediction'''
x, res_attn = self.transformer(x, mask)
conv_inp = torch.cat(torch.broadcast_tensors(x[:,:, None], x[:, :, :, None]), dim=1)
conv_inp = torch.cat((conv_inp, res_attn, res_attn.permute(0,1,3,2)),dim=1)
#conv_inp = torch.cat((res_attn, res_attn.permute(0,1,3,2)), dim=1)
dist = self.res_net(conv_inp)
angle = None #self.angle_net(x.permute(0,2,1))
phi = None #self.phi_net(angle)
psi = None #self.psi_net(angle)
return dist, phi, psi
|
import cdutil,cdms2
import cdtime
import numpy as np
from mpi4py import MPI
from regrid2 import Regridder
modelFolder = '/Users/joshsims/gcModels/maurer_daily'
outfolder = '/Users/joshsims/gcModels/extremes/'
realization = '01'
tempFileDict = {}
tmaxFileDict = {}
tminFileDict = {}
precipFileDict = {}
tmaxDict = {}
tminDict = {}
tavgDict = {}
prcpDict = {}
timesDict = {}
g = cdms2.open('/Users/joshsims/gcModels/regridded_1deg_tas_Amon_ACCESS1-0_rcp45_r1i1p1_200601-210012.nc')
Tas = g('tas')
Tas = Tas(longitude=((360-95.0),(360-73.0)),\
latitude=(39.0, 50.0),squeeze=1)
grid2 = Tas.getGrid()
for year in range(1950,1951):
#tempDict[year] = cdms2.open(modelFolder + "/" + "temp_" + realization + "_" + str(year) + ".nc")
tmaxFileDict[year] = cdms2.open(modelFolder + "/" + "gridded_obs.daily.Tmax." + str(year) + ".nc")
tminFileDict[year] = cdms2.open(modelFolder + "/" + "gridded_obs.daily.Tmin." + str(year) + ".nc")
precipFileDict[year] = cdms2.open(modelFolder + "/" + "gridded_obs.daily.Prcp." + str(year) + ".nc")
j = tmaxFileDict[year]
m = tminFileDict[year]
k = precipFileDict[year]
tmax = j('Tmax')
tmax = tmax(longitude=(-95.0, -73.0),\
latitude=(39.0, 50.0),squeeze=1)
tmin = m('Tmin')
tmin = tmin(longitude=(-95.0, -73.0),\
latitude=(39.0, 50.0),squeeze=1)
prcp = k('Prcp')
prcp = prcp(longitude=(-95.0, -73.0),\
latitude=(39.0, 50.0),squeeze=1)
grid1 = tmax.getGrid()
regridFunc2 = Regridder(grid1,grid2)
tmaxDict[year] = regridFunc2(tmax)
tminDict[year] = regridFunc2(tmin)
prcpDict[year] = regridFunc2(prcp)
tavgDict[year] = (tmax + tmin)/2.0
tim = tmax.getTime()
timesDict[year] = [u.tocomp()\
for u in [cdtime.reltime(t,"days since " + str(year) + "-1-1") for t in tim]]
lat = tmaxDict[1950].getLatitude()
print len(lat)
lon = tmaxDict[1950].getLongitude()
print len(lon)
#cdutil.times.setSlabTimeBoundsDaily(Tmax)
#cdutil.times.setSlabTimeBoundsDaily(Tmin)
#cdutil.times.setSlabTimeBoundsDaily(Tavg)
###################################################################
comm = MPI.COMM_WORLD
# number of total items to process
nLat = len(lat)
# number of items to send to each thread (before remainder)
my_nLat = nLat//(comm.size)
# remainder
r_nLat = nLat%(comm.size)
# create array of items to scatter
if comm.rank == 0:
latA = np.arange(nLat, dtype=int)
else:
latA = None
#create arrays to catch the scatter
if comm.rank <= (r_nLat - 1):
my_latA = np.zeros(my_nLat + 1,dtype=int)
else:
my_latA = np.zeros(my_nLat,dtype=int)
# set up sendcounts
sendcountsLat = ()
for x in range(r_nLat):
sendcountsLat = sendcountsLat + ((my_nLat + 1) * 2,)
for y in range(comm.size - r_nLat):
sendcountsLat = sendcountsLat + (my_nLat * 2,)
# set up displacement counts
displaceLat = ()
disLat = 0
for d in range(r_nLat + 1):
displaceLat = displaceLat + (disLat,)
if r_nLat != 0 and len(displaceLat) <= (r_nLat):
disLat += ((my_nLat + 1) * 2)
elif len(displaceLat) <= (r_nLat):
disLat += ((my_nLat + 1) * 2)
else:
disLat += (my_nLat * 2)
for e in range(comm.size - (r_nLat + 1)):
displaceLat = displaceLat + (disLat,)
disLat += (my_nLat * 2)
if comm.rank == 0:
print sendcountsLat
print displaceLat
displaceLon = ()
# Scatter data into my_A arrays
comm.Scatterv( [latA,sendcountsLat,displaceLat, MPI.INT],my_latA )
print(my_latA)
###########################################################
i = 0
while i<len(my_latA):
x = my_latA[i]
y = 0
while y<len(lat):
print 'processing ',lon[x],lat[y]
outname = outfolder + "obs_" + str(lat[y]) + "_" + str(lon[x]) + ".txt"
outfile = open(outname,'w')
# outfile.write("Header:CMIP3 Model data\n")
outfile.write("Time\tTmax(C)\tTmin(C)\tPrcp(mm)\n")
for year in range(1950,1951):
times = timesDict[year]
tavg = tavgDict[year]
tmax = tmaxDict[year]
tmin = tminDict[year]
prcp = prcpDict[year]
t = 0
while t<len(tim):
if times[t].month <= 9:
month = "0" + str(times[t].month)
else:
month = times[t].month
if times[t].day <= 9:
day = "0" + str(times[t].day)
else:
day = times[t].day
outstring="%s%s%s\t%s\t%s\t%s\n" % \
(times[t].year,month,day,tmax[t][y][x],tmin[t][y][x],prcp[t][y][x])
outfile.write(outstring)
# print(outstring)
t += 1
outfile.close()
y += 1
i += 1
|
n=int(input("Enter number n : "))
for i in range(5):
m=int(input("Enter factor to check : "))
if(n%m==0):
print(m," is factor")
else:
print(m," is not a factor")
|
import re
ans = re.findall(r'[aeiouAEIOU]{2,}',input(),flags = re.I)
if(ans == []):
print("-1")
else:
print(*ans,sep='\n')
# import re
# v = "aeiou"
# c = "qwrtypsdfghjklzxcvbnm"
# m = re.findall(r"(?<=[%s])([%s]{2,})[%s]" % (c, v, c), input(), flags = re.I)
# print('\n'.join(m or ['-1']))
|
# -*- coding: utf-8 -*-
class Solution:
def countBattleships(self, board):
result = 0
if not board or not board[0]:
return result
for i in range(len(board)):
for j in range(len(board[0])):
result += (
1
if (
(i == 0 or board[i - 1][j] != "X")
and (j == 0 or board[i][j - 1] != "X")
and board[i][j] == "X"
)
else 0
)
return result
if __name__ == "__main__":
solution = Solution()
assert 2 == solution.countBattleships(
[
["X", ".", ".", "X"],
[".", ".", ".", "X"],
[".", ".", ".", "X"],
]
)
|
#!/usr/bin/env python3
import mpi4py
mpi4py.rc.recv_mprobe = False
from mpi4py import MPI
from socket import gethostname
from random import random as r
from math import pow as p
from sys import argv
# Initialize MPI stuff
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Make sure number of attempts (per rank) is given on command line
assert len(argv) == 2
attempts = int(argv[1])
inside=0.
tries=0
final=0.
# Each rank tries the same number of times
while (tries < attempts):
tries += 1
if (p(r(),2) + p(r(),2) < 1):
inside += 1
# Each rank computes a final ratio (float)
ratio=4.*(inside/(tries))
print("[rank %02d of %d on host %s]: %.12f" % (rank, size, gethostname(), ratio))
# The 0 rank collects from all others and computes an average
if rank == 0:
total = ratio
for i in range(1, size):
other = comm.recv(source=i)
total += other
final = total/size
# All other ranks just send the ratio to rank 0
else:
comm.send(ratio, dest=0)
# Print the final average from rank 0
if rank == 0:
print("")
print("Final pi estimate from", size*attempts, "attempts =", final)
|
import sys
import re
#from numpy import matrix
#import numpy as np
from pyeda.inter import *
n = 4
k = 0
#n, k = [int(x) for x in input("Enter two numbers here: ").split()]
print (n)
print (k)
'''
matrix = [['A', 'B', 'C'],
['D', 'E', 'F'],
['G', 'H', 'I']]
'''
matrix = [['A', 'B', 'C', '1'],
['D', 'E', 'F', '2'],
['G', 'H', 'I', '3'],
['J', 'K', 'L', '4']]
#print (matrix)
print ("['A', 'B', 'C', '1']")
print ("['D', 'E', 'F', '2']")
print ("['G', 'H', 'I', '3']")
print ("['J', 'K', 'L', '4']")
# restricao linha
restRow = ""
for i in range (0, n):
for j in range (0, n):
for k in range (j + 1, n):
restRowAux = "(" + "~" + matrix[i][j] + " | " + "~" + matrix[i][k] + ")"
restRow = restRow + " & " + restRowAux
restRow = restRow[3:]
print (restRow)
# restricao coluna
restColumn = ""
for i in range (0, n):
for j in range (0, n):
for k in range (j + 1, n):
restColumnAux = "(" + "~" + matrix[j][i] + " | " + "~" + matrix[k][i] + ")"
restColumn = restColumn + " & " + restColumnAux
restColumn = restColumn[3:]
print (restColumn)
# restricao diagonal1
restDiag1 = ""
for i in range (0, n - 1):
#print ("manemapunomame1")
for j in range (i, n - 1):
for k in range(1, n - j):
restDiag1Aux = "(" + "~" + matrix[i][j] + " | " + "~" + matrix[i + k][j + k] + ")"
restDiag1 = restDiag1 + " & " + restDiag1Aux
for j in range (0, i):
for k in range(1, n - i):
restDiag1Aux = "(" + "~" + matrix[i][j] + " | " + "~" + matrix[i + k][j + k] + ")"
restDiag1 = restDiag1 + " & " + restDiag1Aux
restDiag1 = restDiag1[3:]
print (restDiag1)
# restricao diagonal2
restDiag2 = ""
for i in range (0, n):
#print ("manemapunomame1")
for j in range (0, n - i):
for k in range(1, j + 1):
restDiag2Aux = "(" + "~" + matrix[i][j] + " | " + "~" + matrix[i + k][j - k] + ")"
restDiag2 = restDiag2 + " & " + restDiag2Aux
for j in range (n - i, n):
for k in range(1, n - i):
restDiag2Aux = "(" + "~" + matrix[i][j] + " | " + "~" + matrix[i + k][j - k] + ")"
restDiag2 = restDiag2 + " & " + restDiag2Aux
restDiag2 = restDiag2[3:]
print (restDiag2)
# presenca linha
presencaLinha = "("
for i in range (0, n):
for j in range (0, n):
presencaLinhaAux = matrix[i][j] + " | "
presencaLinha = presencaLinha + presencaLinhaAux
#print (presencaLinhaAux + "ohh")
presencaLinha = presencaLinha[:-3]
presencaLinha = presencaLinha + ")" + " & " + "("
presencaLinha = presencaLinha[:-4]
print (presencaLinha)
print ()
formula = presencaLinha + " & " + restRow + " & " + restColumn + " & " + restDiag1 + " & " + restDiag2
print (formula)
#f = expr( formula )
#print (f)
|
from django.contrib import admin
from .models import Course, Teacher, Classroom, Student, LevelField, TeacherClassCourse, StudentCourse, Register, ClassTime
class TeacherAdmin(admin.ModelAdmin):
# list_display = ('user', 'hire_date', 'profession')
# list_filter = ('user', 'hire_date', 'profession')
# search_fields = ('user__username', 'profession__name', 'user__first_name')
pass
admin.site.register(Teacher, TeacherAdmin)
admin.site.register(Course)
admin.site.register(Classroom)
admin.site.register(Student)
admin.site.register(LevelField)
admin.site.register(TeacherClassCourse)
admin.site.register(StudentCourse)
admin.site.register(Register)
admin.site.register(ClassTime)
|
OUTPUT = 'ko{}-{}ntti'.format
VOWELS = frozenset('aeiouyAEIOUY')
def kontti(s):
result = []
for word in s.split():
dex = next((i for i, a in enumerate(word) if a in VOWELS), -1) + 1
result.append(OUTPUT(word[dex:], word[:dex]) if dex else word)
return ' '.join(result)
|
# identify the data type in python
# type() function
from math import *
# https://www.w3schools.com/python/python_syntax.asp
# SOME ARE REPETED
bo = (1,2,3,4,5,6,7,8,9,10)
print(type(bo))
print('_________',bo,'___________')
bo_1 = 'Jeewan'
print(type(bo_1))
print('_________',bo_1,'___________')
bo_2 = ['Jeewan', 'Deep', 'Tete']
print(type(bo_2))
print('_________',bo_2,'___________')
bo_3 = {1,2,3,4,5,6,7,8,9,10}
print(type(bo_3))
print('_________',bo_3,'___________')
bo_4 = True
print(type(bo_4))
print('_________',bo_4,'___________')
bo_5 = {
'Jan': 'January',
'Feb': 'Febuary',
'Mar': 'March',
}
print(type(bo_5))
print('_________',bo_5,'___________')
bo_6 = {"name" : "John", "age" : 36}
print(type(bo_6))
print('_________',bo_6,'___________')
bo_7 = 1j
print((type(bo_7)))
print('_________',bo_7,'___________')
bo_8 = 20.5
print((type(bo_8)))
print('_________',bo_7,'___________')
bo_9 = 20
print(type(bo_9))
print('_________',bo_8,'___________')
bo_10 = frozenset({"apple", "banana", "cherry"})
print(type(bo_10))
print('_________',bo_9,'___________')
bo_11 = range(10)
print(type(bo_11))
print('_________',bo_10,'___________')
bo_12 = b"Hello"
print(type(bo_12))
print('_________',bo_11,'___________')
bo_13 = bytearray(5)
print(type(bo_13))
print('_________',bo_12,'___________')
bo_14 = memoryview(bytes(5))
print(type(bo_14))
print('_________',bo_13,'___________')
|
from flask import Flask ,render_template,flash,redirect,request,url_for
import mysql.connector
# from datetime import datetime
try:
print("connected")
connection = mysql.connector.connect(user='root', password='Password@123', host='localhost', database='data')
print("connected")
cur = connection.cursor(buffered=True)
print("connected")
except:
print("not connected")
app = Flask(__name__,template_folder='template')
#for home page
@app.route('/' ,methods=['GET','POST'])
def home():
if request.method=='POST':
if 'INDENT' in request.form:
return redirect(url_for('indentcategory'))
if 'cred' in request.form:
return redirect(url_for('login'))
else:
return render_template('home.html')
#for loginpage
@app.route('/Login',methods=['GET','POST'])
def login():
# base()
if request.method == 'POST':
if 'sign' in request.form:
if request.form['sign'] == 'signin':
return redirect(url_for('signin'))
if request.form['sign'] == 'signup':
return redirect(url_for('signup'))
else:
return base()
else:
return render_template('login.html', title = 'Login')
login =0
# FOR SIGNIN
@app.route('/login/signin', methods = ['GET','POST'])
def signin():
# base(request)
if request.method == 'POST':
if 'signin' in request.form:
User = request.form
email = User['username']
Password = User['password']
# cur=mysql.connection.cursor()
s_user=cur.execute("SELECT username from employee as u where u.username = '"+email+"';")
s_user =cur.fetchall()
if s_user == '' :
print("Either sign up or enter the correct id")
return redirect(url_for('signin'))
if s_user == 'Admin' :
s_pass=cur.execute("SELECT password from employee as u where u.username = '"+email+"';")
s_pass = cur.fetchall()
print(Password,s_pass)
if s_pass[0][0] == Password:
print("Login Successful As Admin")
return redirect(url_for('Admin'))
else:
s_pass=cur.execute("SELECT password from employee as u where u.username = '"+email+"';")
s_pass = cur.fetchall()
print(Password,s_pass)
if s_pass[0][0] == Password:
print("Login Successful As employee")
return redirect(url_for('employee'))
else:
print("Enter the correct password!")
return redirect(url_for('signin'))
else:
return base()
# cur.close()
else:
return render_template('signin.html', title = 'Sign-in')
# FOR SIGNUP
@app.route('/login/signup', methods = ['GET','POST'])
def signup():
login=1
if request.method == 'POST':
if 'signup' in request.form:
User= request.form
f_name=User(f_name)
password=User(password)
print("hey")
# cur=mysql.connection.cursor()
cur.execute("INSERT INTO employee(username,password) VALUES ('"+f_name+"','"+password+"');")
connection.commit()
# cur.close()
print("Thank you")
return redirect(url_for('employee.html', username=username))
else:
return base()
else:
return render_template('signup.html', title ='Sign-Up')
@app.route('/indentcategory', methods=['GET','POST'])
def indentcategory():
if request.method=='POST':
if 'DIRECT PURCHASE' in request.form:
return redirect(url_for("directpurchase"))
elif 'LOCAL PURCHASE >25000' in request.form:
return redirect(url_for("localpurchase"))
elif 'GEM' in request.form:
return redirect(url_for("GEM"))
else:
return render_template('indentcategory.html')
@app.route('/indentdirect', methods=['GET','POST'])
def directpurchase():
if request.method == 'POST':
I=request.form
indent_date=I['indentdate']
Dept=I['Dept']
Name=I['Name']
Desgn=I['Desgn']
IAA=I['IAA']
Radio=I['budgetT']
cost=I['cost']
PFby=I['PFby']
itemname=I['itemname']
subcat=I['subcat']
brand=I['brand']
qty=I['quantity']
warrenty=I['warrenty_period']
expectdate=I['expecteddate']
print(Radio)
cur.execute("INSERT INTO indentor(indent_date,handlename,Designation,indent_approving,Budget_type, estimated_cost,expected_delivery_period) VALUES ('"+indent_date+"','"+Name+"','"+Desgn+"','"+IAA+"','"+Radio+"','"+cost+"','"+expectdate+"');")
cur.execute("INSERT INTO main_cat(item_name,brand,quantity,warrenty_period) VALUES ('"+itemname+"','"+brand+"','"+qty+"','"+warrenty+"');")
cur.execute("INSERT INTO sub_cat(item_specification) VALUES ('"+subcat+"');")
connection.commit()
return redirect(url_for(''))
else:
return render_template("indentdirect.html")
@app.route('/indentlocal', methods=['GET','POST'])
def localpurchase():
return render_template("indent.html")
@app.route('/indentgem', methods=['GET','POST'])
def GEM():
return render_template("indent.html")
@app.route('/Admin', methods=['GET','POST'])
def Admin():
if request.method=='POST':
S=request.form
if 'DIRECT PURCHASE' in S:
return redirect(url_for('admin_directpurchase'))
if 'LOCAL PURCHASE >25000' in S:
return redirect(url_for('admin_localpurchase'))
if 'GEM' in S:
return redirect(url_for('admin_gem'))
else:
return render_template("Admin.html",form=form)
@app.route('/admin_directpurchase', methods=['GET','POST'])
def admin_directpurchase() :
if request.method=='POST':
cur.execute("SELECT * from indentor;")
forms=cur.fetchall()
return redirect(url_for('placeorder_directpurchase'))
else:
return render_template("admin_directpurchase.html") # will show all tables with respective appove buttons and placeorder buttons
@app.route('/admin_localpurchase', methods=['GET','POST'])
def admin_localpurchase() :
if request.method=='POST':
forms=cur.execute("SELECT * from indentor;") #only in this particular category forms should get dispalyed
forms=cur.fetchall()
return redirect(url_for('placeorder_localpurchase'))
else:
return render_template("admin_localpurchase.html") # will show all tables with respective placeorder buttons
@app.route('/admin_gem', methods=['GET','POST'])
def admin_gem() :
if request.method=='POST':
forms=cur.execute("SELECT * from indentor;")
forms=cur.fetchall()
return redirect(url_for('placeorder_gem'))
else:
return render_template("admin_gem.html") # will show all tables with respective appove buttons and placeorder buttons
@app.route('/placeorder_directpurchase', methods=['GET','POST'])
def placeorder_directpurchase():
if request.method=='POST':
return render_template("placeorder_directpurchase.html")
@app.route('/placeorder_localpurchase', methods=['GET','POST'])
def placeorder_localpurchase():
return render_template("placeorder_localpurchase.html")
@app.route('/placeorder_gem', methods=['GET','POST'])
def placeorder_gem():
return render_template("placeorder_gem.html")
if __name__ == '__main__':
app.run(debug=True)
|
# encoding=utf-8
import numpy as np
from numpy.linalg import *
def main():
print(np.eye(3)) #单位矩阵
print('线性方程组')
lst = np.array([[1,2],[3,4]])
print('Inv矩阵的逆:')
print(inv(lst))
print("T矩阵转置:")
print(lst.transpose())
print("T矩阵行列式:")
print(det(lst))
print("T矩阵特征值:特征向量")
print(eig(lst))
y=np.array([[5.],[7.]])
print('Solve解线性方程组')
print(solve(lst,y))
print('FFT:其他通讯')
print(np.fft.fft(np.array([1,1,1,1,1,1,1,1,1,1,1,1])))
print('Coef:')
print(np.corrcoef([1,0,1],[0,2,1]))
print('Poly一元函数:')
print(np.poly1d([2,1,3]))
if __name__ == "__main__":
main()
|
from PyQt5.QtGui import QPixmap
from ui_msgbox import *
from socket import *
class MsgBoxWindow(QtWidgets.QWidget,Ui_msgbox):
message =[]
def __init__(self,msg,parent=None):
super(MsgBoxWindow, self).__init__(parent)
self.setupUi(self)
self.message = msg
self.loadmsg(msg)
self.next_page.clicked.connect(self.to_next_page)
self.last_page.clicked.connect(self.to_last_page)
def loadmsg_range(self,r,data):
if len(self.message) < r * 5:
s_i = len(self.message) - 5 *(r - 1) + 1
else:
s_i = 6
for i in range(1, s_i):
method = "modify_msglist" + str(i)
getattr(self, method)(data[(r-1) * 5 + i-1]['time'],
data[(r-1) * 5 + i-1]['send'],
"[" + data[(r-1) * 5 + i-1]["content"]['title'] + "]\n" + data[(r-1) * 5 + i-1]["content"]['text'])
for i in range(s_i,6):
method = "modify_msglist" + str(i)
getattr(self, method)("","","")
def to_next_page(self):
current = int(self.page.text())
if (current+1)*5<=(int)(self.msg_num.text()):
self.last_page.setHidden(False)
self.next_page.setHidden(False)
self.page.setText(str(current+1))
self.loadmsg_range(current+1,self.message)
elif (current+1)*5 > (int)(self.msg_num.text()):
self.last_page.setHidden(False)
self.next_page.setHidden(True)
self.page.setText(str(current + 1))
self.loadmsg_range(current+1,self.message)
def to_last_page(self):
current = int(self.page.text())
if (current-1)==1:
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.page.setText(str(current - 1))
self.loadmsg_range(current-1,self.message)
else:
self.last_page.setHidden(False)
self.next_page.setHidden(True)
self.page.setText(str(current - 1))
self.loadmsg_range(current - 1, self.message)
def modify_msglist1(self,id,name,owner):
self.time_1.setText(id)
self.send_1.setText(name)
self.content_1.setText(owner)
def modify_msglist2(self,id,name,owner):
self.time_2.setText(id)
self.send_2.setText(name)
self.content_2.setText(owner)
def modify_msglist3(self, id, name, owner):
self.time_3.setText(id)
self.send_3.setText(name)
self.content_3.setText(owner)
def modify_msglist4(self, id, name, owner):
self.time_4.setText(id)
self.send_4.setText(name)
self.content_4.setText(owner)
def modify_msglist5(self, id, name, owner):
self.time_5.setText(id)
self.send_5.setText(name)
self.content_5.setText(owner)
def loadmsg(self,data):
msgnum = data.__len__()
if msgnum <=5:
for i in range(1, msgnum+1):
method = "modify_msglist" + str(i)
getattr(self, method)(data[i - 1]['time'],
data[i - 1]['send'],
"[" + data[i - 1]["content"]['title']+"]\n"+data[i - 1]["content"]['text'])
self.last_page.setHidden(True)
self.next_page.setHidden(True)
self.msg_num.setText(str(msgnum))
else:
for i in range(1, 6):
method = "modify_msglist" + str(i)
getattr(self, method)(data[i - 1]['time'],
data[i - 1]['send'],
"[" + data[i - 1]["content"]['title'] + "]\n" + data[i-1]["content"]['text'])
self.last_page.setHidden(True)
self.next_page.setHidden(False)
self.msg_num.setText(str(msgnum))
self.page.setText("1")
|
__authors__ = ""
__copyright__ = "(c) 2014, pymal"
__license__ = "BSD License"
__contact__ = "Name Of Current Guardian of this file <email@address>"
USER_AGENT = 'api-indiv-0829BA2B33942A4A5E6338FE05EFB8A1'
HOST_NAME = "http://myanimelist.net"
DEBUG = False
RETRY_NUMBER = 4
RETRY_SLEEP = 1
SHORT_SITE_FORMAT_TIME = '%b %Y'
LONG_SITE_FORMAT_TIME = '%b %d, %Y'
MALAPPINFO_FORMAT_TIME = "%Y-%m-%d"
MALAPPINFO_NONE_TIME = "0000-00-00"
MALAPI_FORMAT_TIME = "%Y%m%d"
MALAPI_NONE_TIME = "00000000"
|
#coding:utf-8
#!/usr/bin/env python
import copy
from gclib.utility import currentTime, drop
from game.utility.config import config
from game.routine.pet import pet
from game.routine.vip import vip
class educate:
@staticmethod
def start(usr, edupos, cardid):
"""
开始训练
"""
inv = usr.getInventory()
card = inv.getCard(cardid)
if not card:
return {'msg':'card_not_exist'}
if educate.is_edu_slot_start(usr, edupos):
educate.stop(usr, edupos)
if educate.card_already_educate(usr, cardid):
return {'msg':'educate_card_already_educate'}
gameConf = config.getConfig('game')
educateConf = config.getConfig('educate')
educateInfo = educateConf[usr.level - 1]
goldCost = educateInfo['gold']
if goldCost > usr.gold:
return {'msg':'gold_not_enough'}
now = currentTime()
slot = usr.educate['edu_slot'][edupos]
slot['card_id'] = cardid
slot['start_time'] = now
slot['start_level'] = usr.level
slot['last_update_time'] = now
slot['fraction'] = 0.0
slot['expptm'] = educateInfo['expptm']
slot['edt'] = usr.educate['edt']
usr.gold = usr.gold - goldCost
usr.educate['edt'] = 0
usr.save()
return educate.getClientData(usr, gameConf)
@staticmethod
def stop(usr, edupos):
"""
停卡训练
"""
gameConf = config.getConfig('game')
educate.update_exp(usr, gameConf)
if not educate.is_edu_slot_start(usr, edupos):
return {'msg':'educate_edu_slot_not_start'}
usr.educate['edu_slot'][edupos] = educate.make_open_edu_slot(0)
usr.save()
return educate.getClientData(usr, gameConf)
@staticmethod
def call(usr):
"""
召唤
"""
educateGradeConf = config.getConfig('educate_grade')
edt = usr.educate['edt']
goldCost = educateGradeConf[edt]['price']['gold']
gemCost = educateGradeConf[edt]['price']['gem']
if goldCost > usr.gold:
return {'msg': 'gold_not_enough'}
if gemCost > usr.gem:
return {'msg': 'gem_not_enough'}
usr.gold = usr.gold - goldCost
usr.gem = usr.gem - gemCost
probability = educateGradeConf[edt]['probability']
if drop(probability):
edt = edt + 1
if edt > (len(educateGradeConf) - 1):
edt = len(educateGradeConf) - 1
else:
edt = 0
if edt == 2 and (not vip.canEducateLevel2(usr)):
return {'msg':'vip_required'}
if edt == 3 and (not vip.canEducateLevel3(usr)):
return {'msg':'vip_required'}
if edt == 4 and (not vip.canEducateLevel4(usr)):
return {'msg':'vip_required'}
usr.educate['edt'] = edt
usr.save()
return {'gold':usr.gold, 'gem':usr.gem, 'edu_edt':edt}
@staticmethod
def open_edu_solt(usr):
"""
打开训练栏位
"""
gameConf = config.getConfig('game')
gemCost = 0
cnt = 0
for solt in usr.educate:
if solt:
cnt = cnt + 1
if vip.openEducateSlot(usr) < cnt:
return {'msg':'vip_required'}
for i, solt in enumerate(usr.educate['edu_slot']):
if not solt:
gemCost = gameConf['educate_open_gem'][i]
if usr.gem < gemCost:
return {'msg':'gem_not_enough'}
usr.educate['edu_slot'][i] = educate.make_open_edu_slot(0)
return educate.getClientData(usr, gameConf)
return {'msg':'educate_all_edu_slot_open'}
@staticmethod
def make():
"""
制做
"""
data = {}
data['edu_slot'] = [{}, {}, {}, {}, {}, {}]
data['edt'] = 0
return data
@staticmethod
def make_open_edu_slot(edt):
"""
制做训练栏位
"""
return {'edt':edt}
@staticmethod
def make_null_edu_slot():
"""
制做空训练栏位
"""
return {}
@staticmethod
def levelup_update(usr, gameConf):
"""
should be called when user level up
"""
needSave = False
for s, l in enumerate(usr.educate['edu_slot']):
if (gameConf['educate_auto_open_level'][s] <= usr.level) and (not l):
usr.educate['edu_slot'][s] = educate.make_open_edu_slot(0)
needSave = True
if needSave:
usr.save()
@staticmethod
def is_edu_slot_start(usr, edupos):
"""
是否训练开始
"""
return usr.educate['edu_slot'][edupos].has_key('start_time')
@staticmethod
def card_already_educate(usr, cardid):
"""
卡牌已经训练
"""
for slot in usr.educate['edu_slot']:
if slot and slot.has_key('card_id') and slot['card_id'] == cardid:
return True
return False
@staticmethod
def update_exp(usr, gameConf):
"""
更新经验
"""
petConf = config.getConfig('pet')
petLevelConf = config.getConfig('pet_level')
inv = usr.getInventory()
now = currentTime()
eduCard = []
for edu_slot in usr.educate['edu_slot']:
if edu_slot and edu_slot.has_key('start_time'):
educateGradeConf = config.getConfig('educate_grade')
educateEndTime = edu_slot['start_time'] + gameConf['educate_duration']
educateDuration = now - edu_slot['last_update_time']
if now > educateEndTime:
educateDuration = educateEndTime - edu_slot['last_update_time']
del edu_slot['start_time']
del edu_slot['last_update_time']
else:
edu_slot['last_update_time'] = now
rate = educateGradeConf[edu_slot['edt']]['rate']
exp = edu_slot['expptm'] * educateDuration / 3600 * rate + edu_slot['fraction']
edu_slot['fraction'] = exp - int(exp)
exp = int(exp)
if exp:
card = inv.getCard(edu_slot['card_id'])
pet.gainExp(usr, card, int(exp), petConf, petLevelConf, gameConf)
eduCard.append(card)
inv.save()
usr.save()
@staticmethod
def getEduSlots(usr, gameConf):
"""
得到训练栏位
"""
now = currentTime()
edu_slot = usr.educate['edu_slot']
data = {}
data = []
for slot in edu_slot:
if slot.has_key('start_time'):
s = educate.make_open_edu_slot(slot['edt'])
countdown = gameConf['educate_duration'] - (now - slot['start_time'])
if countdown < 0:
countdown = 0
s['expptm'] = slot['expptm']
s['finish_countdown'] = countdown
s['card_id'] = slot['card_id']
data.append(s)
elif slot:
s = educate.make_open_edu_slot(slot['edt'])
if slot.has_key('card_id'):
s['card_id'] = slot['card_id']
data.append(s)
else:
data.append({})
return data
@staticmethod
def getClientData(usr, gameConf):
"""
得到玩家数据
"""
data = {}
data['edu_slot'] = educate.getEduSlots(usr, gameConf)
data['edu_edt'] = usr.educate['edt']
return data
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Works(models.Model): #儲存手工藝品資料
class Meta:
permissions = (
("add_add_works", "add_add_works"), # 只有一個權限時,千萬不要忘了逗號!
)
Title = models.CharField(max_length = 25,verbose_name="手工藝標題") #標題
Introduction = models.TextField(blank = False,verbose_name="手工藝簡介") #簡介
Contact = models.TextField(blank = False,verbose_name="聯繫方式") #聯絡資訊
Photo = models.ImageField(upload_to = './', blank = False,verbose_name="手工藝照片") #照片
Add_time = models.DateTimeField(auto_now_add = True)#新增的時間
Edit_time = models.DateTimeField(auto_now = True)#編輯的時間
User = models.ForeignKey(User, on_delete=models.CASCADE,default="",verbose_name="手工藝新增者")
class Issue(models.Model): #儲存議題資料
# class Meta:
# permissions = (
# ("add_issue", "add_issue"), # 只有一個權限時,千萬不要忘了逗號!
# )
Title = models.CharField(max_length = 25,verbose_name="議題標題")
Context = models.TextField(blank = False,verbose_name="議題內容")
User = models.ForeignKey(User, on_delete=models.CASCADE,default="")
Photo = models.ImageField(upload_to = './', blank = False,verbose_name="議題照片",default='') #照片
Add_time = models.DateTimeField(auto_now_add = True)
Edit_time = models.DateTimeField(auto_now = True)
# class Board(models.Model): #儲存留言板資料
# Context = models.TextField(blank = False)
# User_name = models.CharField(max_length = 25)
# Add_time = models.DateTimeField(auto_now_add = False)
# Edit_time = models.DateTimeField(auto_now = False)
# Issue = models.ForeignKey(Issue, on_delete=models.CASCADE,default="")
|
import sys
sys.path.insert(0, 'Serializers')
from yaml_serializer import YamlSerializer
from pickle_serializer import PickleSerializer
from json_serializer import JsonSerializer
from toml_serializer import TomlSerializer
class Factory():
@staticmethod
def create_serializer(s_name):
if (s_name == "json"):
return JsonSerializer()
elif (s_name == "pickle"):
return PickleSerializer()
elif (s_name == "yaml"):
return YamlSerializer()
elif (s_name == "toml"):
return TomlSerializer()
else:
raise Exception("No such parsers")
|
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plot
import operator
import os
imgListt = np.full((28,28), 0)
testImg = cv.imread("E:/cut_ImgsTest/28pix/T"+str(7)+".jpg")
for index1 in range(len(testImg)):
for index2 in range(len(testImg[index1])):
if(testImg[index1][index2][2] < 90):
imgListt[index1][index2] = 16
imgListt = imgListt.reshape((-1,784))
plot.imshow(testImg)
plot.show()
dicList = []
for i in range(34,60):
imgA = cv.imread("svmImgs/"+str(i)+".jpg")
imgList = np.full((28,28),0)
for index1 in range(len(imgA)):
for index2 in range(len(imgA[index1])):
if(imgA[index1][index2][2] < 90):
imgList[index1][index2] = 16
targetImgA = imgList.reshape((-1,784))
dicList.append(np.linalg.norm(imgListt - targetImgA))
print(dicList)
minV = dicList.index(min(dicList))
print("最小距离:",min(dicList))
print("最小位置:",minV)
plot.imshow(cv.imread("svmImgs/"+str(minV+34)+".jpg"))
plot.show()
# Y = np.array([0,1,2,3,4,5,6,7,8,9,0,2,3,4,5,7,8,9,0,2,3,4,5,7,8,9,0,0,3,4,5,7,8,9,0,4,3,9,5,8,6,6,7,2,0,4,3,9,5,8,7,2])
Y = np.array([0,4,3,9,5,8,6,6,7,2,0,4,3,9,5,8,7,2,0,4,3,9,5,8,7,0])
print("预测值:",Y[minV])
|
#!/usr/bin/env python
"""
An axes used to jointly format Cartesian and polar axes.
"""
# NOTE: We could define these in base.py but idea is projection-specific formatters
# should never be defined on the base class. Might add to this class later anyway.
from ..config import rc
from ..internals import _pop_kwargs
from ..utils import _fontsize_to_pt, _not_none, units
class _SharedAxes(object):
"""
Mix-in class with methods shared between `~proplot.axes.CartesianAxes`
and `~proplot.axes.PolarAxes`.
"""
def _update_background(self, x=None, **kwargs):
"""
Update the background patch and spines.
"""
# Update the background patch
kw_face, kw_edge = self._get_background_props(**kwargs)
self.patch.update(kw_face)
if x is None:
opts = self.spines
elif x == 'x':
opts = ('bottom', 'top', 'inner', 'polar')
else:
opts = ('left', 'right', 'start', 'end')
for opt in opts:
self.spines.get(opt, {}).update(kw_edge)
# Update the tick colors
axis = 'both' if x is None else x
edgecolor = kw_edge.pop('edgecolor', None)
if edgecolor is not None:
self.tick_params(axis=axis, which='both', color=edgecolor)
# Update the tick widths
# TODO: Either exclude case where 'linewidth' was retrieved from
# 'axes.linewidth' or make tick width a child of that setting?
# TODO: The same logic is used inside config.py to scale tick widths
# by tick ratios and to zero-out tick length. Share with helper func?
linewidth = kw_edge.pop('linewidth', None)
if linewidth is not None:
kw = {'length': 0} if linewidth == 0 else {}
self.tick_params(axis=axis, which='major', width=linewidth, **kw)
ratio = rc['tick.widthratio']
self.tick_params(axis=axis, which='minor', width=linewidth * ratio, **kw)
def _update_ticks(
self, x, *, grid=None, gridminor=None, gridcolor=None, gridpad=None,
ticklen=None, tickdir=None, ticklabeldir=None, tickcolor=None, labelpad=None
):
"""
Update the gridlines and labels. Set `gridpad` to ``True`` to use grid padding.
"""
# Apply tick settings with tick_params when possible
x = _not_none(x, 'x')
kwtext = self._get_ticklabel_props(x)
kwextra = _pop_kwargs(kwtext, 'weight', 'family')
kwtext = {'label' + key: value for key, value in kwtext.items()}
for b, which in zip((grid, gridminor), ('major', 'minor')):
# Tick properties
kwticks = self._get_tick_props(x, which=which)
if labelpad is not None:
kwticks['pad'] = labelpad
if tickcolor is not None:
kwticks['labelcolor'] = kwticks['color'] = tickcolor
if ticklen is not None:
kwticks['size'] = units(ticklen, 'pt')
if which == 'minor':
kwticks['size'] *= rc['tick.lenratio']
if gridpad: # use grid.labelpad instead of tick.labelpad
kwticks.pop('pad', None)
pad = rc.find('grid.labelpad', context=True)
if pad is not None:
kwticks['pad'] = units(pad, 'pt')
# Gridline properties
# NOTE: Internally ax.grid() passes gridOn to ax.tick_params() but this
# is undocumented and might have weird side effects. Just use ax.grid()
b = self._get_gridline_toggle(b, axis=x, which=which)
if b is not None:
self.grid(b, axis=x, which=which)
kwlines = self._get_gridline_props(native=True, which=which)
if gridcolor is not None:
kwlines['grid_color'] = gridcolor
# Apply tick and gridline properties
self.tick_params(axis=x, which=which, **kwticks, **kwlines, **kwtext)
# Tick and tick label direction with padding corrections
# NOTE: The 'tick label direction' is right now just a cartesian thing
kwdir = {}
if tickdir == 'in': # ticklabels should be much closer
kwdir['pad'] = 1.0
if ticklabeldir == 'in': # put tick labels inside the plot
tickdir = 'in'
kwdir['pad'] = (
- rc[f'{x}tick.major.size']
- rc[f'{x}tick.major.pad']
- _fontsize_to_pt(rc[f'{x}tick.labelsize'])
)
if tickdir is not None:
kwdir['direction'] = tickdir
self.tick_params(axis=x, which='both', **kwdir)
# Apply settings that can't be controlled with tick_params
if kwextra:
axis = getattr(self, x + 'axis')
for obj in axis.get_ticklabels():
obj.update(kwextra)
|
#!/usr/bin/env python2
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Fam Zheng <fam@euphon.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import ConfigParser
_config = ConfigParser.ConfigParser()
def load_config(*try_files):
for f in try_files:
print f
try:
if _config.read(f):
return True
except:
pass
continue
return False
def _value(r):
if r.upper() in ["TRUE", "FALSE", "YES", "NO"]:
return r.upper() in ["TRUE", "YES"]
try:
if r == str(int(r)):
return int(r)
except:
return r
def get(section, key, default=None):
""" Return int if value in digits;
bool if in "yes", "no", "true" or "false";
otherwise a string;
list of value if "," is found"""
try:
r = _config.get(section, key)
except:
r = None
if not r:
return default
elif "," in r:
return [_value(x) for x in r.split(",")]
else:
return _value(r)
def items(section):
return _config.items(section)
|
import urllib2
from bs4 import BeautifulSoup
import re
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
#txt_path = r"/Users/zhoufengting/Desktop/href_2013_2014.txt"
#fp = open(txt_path)
#print len(fp)
#for line in fp:
#print line
url = r"http://www.eurosport.com/football/ligue-1/2013-2014/montpellier-hsc-paris-saint-germain_mtc616155/live.shtml"
request = urllib2.urlopen(url)
response = request.read()
soup = BeautifulSoup(response,"html.parser")
Names_major = soup.find_all("ul",class_ = "players away-team")[0]
print Names_major.find_all("span",class_="replacement").text
#print response
#soup = BeautifulSoup(response,"html.parser")
#print line
#lines = fp.readline()
#print lines
|
from importlib import import_module
from django.conf import settings
from django.contrib.auth.base_user import BaseUserManager
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.models import UserManager as AuthUserManager
from django.contrib.auth.signals import user_logged_in
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.db import models
from django.http import Http404
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
class UserManager(AuthUserManager):
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('sex', 'm')
extra_fields.setdefault('is_active', True)
return super().create_superuser(username, email, password, **extra_fields)
class User(AbstractUser):
username_validator = UnicodeUsernameValidator()
username = models.CharField(
_('username'),
max_length=150,
unique=True,
help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'),
validators=[username_validator],
error_messages={
'unique': _("A user with that username already exists."),
},
)
# email = models.EmailField(_('email address'), unique=True)
is_active = models.BooleanField(default=False)
sex = models.CharField(
max_length=1,
choices=(
('f', 'female'),
('m', 'male')
),
verbose_name='성별'
)
penalty = models.IntegerField(
default=0,
verbose_name='벌점'
)
objects = UserManager()
REQUIRED_FIELDS = ['email', 'first_name', 'last_name']
def __str__(self):
return self.username
class UserSession(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, editable=False)
session_key = models.CharField(max_length=40, editable=False)
created_at = models.DateTimeField(auto_now_add=True)
# 중복 로그인 방지
# user_logged_in 시그널에 기존에 로그인한 세션을 삭제해주는 리시버를 연결
def kicked_my_other_sessions(sender, request, user, **kwargs):
print('kicked my other sessions')
# 이전에 생성된 세션이 있을 경우 삭제
for user_session in UserSession.objects.filter(user=user):
session_key = user_session.session_key
session = SessionStore(session_key)
# session.delete() 메세지 보내기 위해 주석처리
session['kicked'] = True
session.save()
user_session.delete()
if not request.session.session_key:
request.session.create()
# 현제 세션에 대한 레코드를 생성하여 저장
session_key = request.session.session_key
UserSession.objects.create(user=user, session_key=session_key)
user_logged_in.connect(kicked_my_other_sessions)
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
info = models.TextField(blank=True)
class RestrictStaffToAdminMiddleware(object):
"""
A middleware that restricts staff members access to administration panels.
"""
def process_request(self, request):
if request.path.startswith(reverse('admin:index')):
if request.user.is_authenticated():
if not request.user.is_staff:
raise Http404
else:
raise Http404
# User Key (Token 발급 위함)
class UserKeyToken(models.Model):
key = models.CharField(max_length=30)
# token = models.CharField(max_length=30, unique=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True) # 발급 시간
|
from sqlite3.test import factory
l=[1,2,3,4]
s="hola mundo"
l3=(c * num for c in s
for num in l
if num >0)
print l3.next()
for letra in l3:
print letra
def factorial(n):
i =1
while n >1 :
i =n*i
yield i
n-=1
for e in factorial(5):
print (e)
|
def solution(n):
count =0
ans = 0
while(n>0):
count+=1
if(n&1):
temp = 1<<(32-count)
ans+=temp
n = n>>1
return ans
n = int(input())
ans = solution(n)
print(ans,end="\n")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 11 22:01:04 2018
@author: panzengyang
"""
import SVM_cv
import Perceptron_cv
import msg2matrix as mx
from sklearn.metrics import hinge_loss
from sklearn.linear_model import Perceptron
from sklearn.svm import SVC
from sklearn.feature_extraction.text import TfidfVectorizer
from msg2matrix import trainmsg, trainresult, testmsg, testresult
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
vectorizer = TfidfVectorizer(max_df = 0.20, min_df = 5, max_features = 1000, sublinear_tf = True)
vectorizer.fit(trainmsg)
dictionary = vectorizer.get_feature_names()
train_matrix = mx.extract_features(trainmsg,dictionary)
test_matrix = mx.extract_features(testmsg, dictionary)
per = Perceptron(max_iter = 5, eta0 = 1, class_weight = "balanced")
per.fit(train_matrix, trainresult)
decision = per.decision_function(test_matrix)
hloss = hinge_loss(testresult, decision)
#Eout_per = 1 - per.score(test_matrix, testresult)
#clf = SVC(kernel='linear', C=0.1)
#clf.fit(train_matrix, trainresult)
#Eout_svm = 1 - clf.score(test_matrix, testresult)
#NB = MultinomialNB()
#NB.fit(train_matrix, trainresult)
#Eout_nb = 1 - NB.score(test_matrix, testresult)
#NBB = BernoulliNB()
#NBB.fit(train_matrix, trainresult)
#Eout_nbb = 1 - NBB.score(test_matrix, testresult)
#list_eta = [0.01, 1000000]
#per = Perceptron(max_iter = 3, eta0 = Perceptron_cv.percep_cv(list_eta, trainmsg, trainresult), class_weight = "balanced")
#per.fit(train_matrix, trainresult)
#Eout_per = 1 - per.score(test_matrix, testresult)
#list_C = [0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000]
#clf = SVC(kernel = 'linear', C = SVM_cv.svm_cv(list_C, trainmsg, trainresult))
#clf.fit(train_matrix, trainresult)
#Eout_svm = 1 - clf.score(test_matrix, testresult)
|
from numpy import diag, sqrt
from numpy.linalg import inv
class ExtendedKalmanFilter:
def __init__(self, state_means, state_covariances,
transition_means_lambda, transition_means_jacobi_lambda, transition_covariances_lambda,
observation_means_lambda, observation_means_jacobi_lambda, observation_covariances_lambda):
self.means = state_means
self.covariances = state_covariances
self.transition_means_lambda = transition_means_lambda
self.transition_means_jacobi_lambda = transition_means_jacobi_lambda
self.transition_covariances_lambda = transition_covariances_lambda
self.observation_means_lambda = observation_means_lambda
self.observation_means_jacobi_lambda = observation_means_jacobi_lambda
self.observation_covariances_lambda = observation_covariances_lambda
def standard_deviations(self):
return sqrt(diag(self.covariances))
def predict(self, control_data):
self.means = self.transition_means_lambda(self.means, control_data)
F = self.transition_means_jacobi_lambda(self.means, control_data)
self.covariances = F.dot(self.covariances).dot(F.T) + self.transition_covariances_lambda(control_data)
def update(self, observation_data, **kwargs):
H = self.observation_means_jacobi_lambda(self.means, **kwargs)
K = self.covariances.dot(H.T).dot(inv(H.dot(self.covariances).dot(H.T) +
self.observation_covariances_lambda(observation_data, **kwargs)))
self.means += K.dot(observation_data - self.observation_means_lambda(self.means, **kwargs))
self.covariances -= K.dot(H).dot(self.covariances)
|
import jieba
import jieba.posseg as pseg
jieba.load_userdict('./dict.txt')
# get the origin data
def getTheOriginData(path):
with open(path,encoding='UTF-8') as file_object:
contents = file_object.read()
##split the data by line
lines = contents.split('\n')
##the labels
labels = []
##the pairslist which is store the pairs
pairslist = []
##get the pairs and the labels
for line in lines:
if len(line) == 0:
continue
line = line.replace(' ', '').replace('','')
pairs = []
strs = line.split('\t')
labels.append(strs[3])
pairs.append(strs[1])
pairs.append(strs[2])
pairslist.append(pairs)
return labels, pairslist
# seg and pos the word by jieba
def seg(pairslists):
segLists = []
for pairslist in pairslists:
segList = []
src = pairslist[0]
susp = pairslist[1]
segList.append(pseg.cut(src))
segList.append(pseg.cut(susp))
segLists.append(segList)
return segLists
# get the Featureset by posed word
def getFeature(src, susp):
## delete the space which is in begin or end
src = src.strip()
susp = susp.strip()
## split by space
src_items = src.split(' ')
susp_items = susp.split(' ')
## init a featureindex <feature, index>
features = {}
## init the feature items
items = ['r', 'd', 'v', 'nz', 'u', 'n', 'f', 'w', 't', 'y', 'ad', 'vn', 'm', 'p', 'a', 'j', 'b', 'an', 'q', 'c', 's', 'Ng', 'i', 'nr', 'Tg', 'Vg', 'j', 'l', 'nx', 'z', 'vd', 'n]', 'Ag', 'ns', 'k', 'Mg', 'o', 'Bg', 'nt', 'Dg', 'h', 'e']
## init the index
index = 0
## init the Matrix of featureset
### for example 'r' is save the item from src which is the same with the item from susp and the pos is 'r',
#### we add 1 to the featuresMatrix[features['r']]
### for example '_r' is save the item from src and susp which can't find the same item from susp and the pos is 'r',
#### we add 1 to the featuresMatrix[features['_r']]
### for susp , the 'r' change to 'pr', '_r' change to '_pr' (I think this could have a chance)
featuresMatrix = []
import numpy as np
countItem = {}
for i in range(len(items)*2):
featuresMatrix.append(0)
for item in items:
features[item] = index
index += 1
features['_'+item] = index
index += 1
countItem[item] = 0
# count the number of all pos
for src_item in src_items:
src_tag = src_item.split('/')[1]
if not (features.__contains__(src_tag)):
continue
countItem[src_tag] += 1
for susp_item in susp_items:
susp_tag = susp_item.split('/')[1]
if not (features.__contains__(susp_tag)):
continue
countItem[susp_tag] += 1
for src_item in src_items:
src_word = src_item.split('/')[0]
src_tag = src_item.split('/')[1]
if not (features.__contains__(src_tag)):
continue
index = features[src_tag]
sign = 1
for susp_item in susp_items:
susp_word = susp_item.split('/')[0]
susp_tag = susp_item.split('/')[1]
if src_item == susp_item:
featuresMatrix[index] += 1
sign = 0
break
featuresMatrix[index+1] += sign
for src_item in susp_items:
src_word = src_item.split('/')[0]
src_tag = src_item.split('/')[1]
if not (features.__contains__(src_tag)):
continue
index = features[src_tag]
sign = 1
for susp_item in src_items:
susp_word = susp_item.split('/')[0]
susp_tag = susp_item.split('/')[1]
if src_item == susp_item:
featuresMatrix[index] += 1
sign = 0
break
featuresMatrix[index+1] += sign
for item in items:
index = features[item]
if countItem[item] == 0:
continue
featuresMatrix[index] /= countItem[item]
featuresMatrix[index+1] /= countItem[item]
return featuresMatrix
##get the data
##get the train data
# ori_trainPath = 'E:/学习资料/自然语言处理/forToolLearn/data/ATEC/Origin/atec_nlp_sim_train_all.csv'
ori_trainPath = 'E:/学习资料/自然语言处理/forToolLearn/data/ATEC/Filter/sim.csv'
train_labels, train_pairslist = getTheOriginData(ori_trainPath)
segLists = seg(train_pairslist)
sign = 1
########
# posFeature.txt the Standard Featureset 1 x label(1), 1 x label(0)
# posFeature4.txt 4 x label(1),1 x label(0)
# posFeature0.25.txt 1 x label(1). 0.25 x label(0)
########
with open('E:/学习资料/自然语言处理/forToolLearn/data/ATEC/data/jieba/atec_nlp_sim_train_all/Filter_sim/posFeature3.txt', 'w',encoding='UTF-8') as fs:
for i in range(len(segLists)):
segList = segLists[i]
label = train_labels[i]
src = segList[0]
susp = segList[1]
srcstr = ''
suspstr = ''
for word, flag in src:
if word == '/':
continue
srcstr = srcstr + word+'/'+flag+' '
for word, flag in susp:
if word == '/':
continue
suspstr = srcstr + word + '/' + flag + ' '
# fs.write(label + '\t' + str(getFeature(srcstr, suspstr)) + '\n')
# multi
if label == '0':
fs.write(label + '\t' + str(getFeature(srcstr, suspstr)) + '\n')
else:
for j in range(3):
fs.write(label + '\t' + str(getFeature(srcstr, suspstr)) + '\n')
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from sklearn import metrics as sklearn_metrics
from d3m.metrics import (
HitsAtKMetric,
MeanReciprocalRankMetric,
RocAucMicroMetric,
RocAucMacroMetric,
RocAucMetric,
)
# from external import objectDetectionAP
hits_at_k = HitsAtKMetric(5) # todo how does this get set?
mean_recriprocal_rank = MeanReciprocalRankMetric()
roc_auc_micro = RocAucMicroMetric()
roc_auc_macro = RocAucMacroMetric()
roc_auc = RocAucMetric()
metrics = {
# classification
"f1Macro": lambda act, pred: sklearn_metrics.f1_score(act, pred, average="macro"),
"f1Micro": lambda act, pred: sklearn_metrics.f1_score(act, pred, average="micro"),
"f1": lambda act, pred: sklearn_metrics.f1_score(act, pred),
"accuracy": lambda act, pred: sklearn_metrics.accuracy_score(act, pred),
# regression
"meanSquaredError": lambda act, pred: -1.0
* sklearn_metrics.mean_squared_error(act, pred),
"meanAbsoluteError": lambda act, pred: -1.0
* sklearn_metrics.mean_absolute_error(act, pred),
"rootMeanSquaredError": lambda act, pred: -1.0
* np.sqrt(sklearn_metrics.mean_squared_error(act, pred)),
"rootMeanSquaredErrorAvg": lambda act, pred: -1.0
* np.sqrt(sklearn_metrics.mean_squared_error(act, pred)),
"rSquared": lambda act, pred: -1.0 * sklearn_metrics.r2_score(act, pred),
# clustering
"normalizedMutualInformation": sklearn_metrics.normalized_mutual_info_score,
"meanReciprocalRank": lambda act, pred: mean_recriprocal_rank.score(act, pred),
"hitsAtK": lambda act, pred: hits_at_k.score(act, pred),
"rocAucMacro": lambda act, pred: roc_auc_macro.score(act, pred),
"rocAucMicro": lambda act, pred: roc_auc_micro.score(act, pred),
"rocAuc": lambda act, pred: roc_auc.score(act, pred),
# object detection
#'objectDetectionAP' : lambda act, pred: objectDetectionAP(act, pred)[-1],
}
classification_metrics = [
"f1Macro",
"f1Micro",
"f1",
"accuracy",
"meanReciprocalRank",
"hitsAtK",
"rocAuc",
"rocAucMacro",
"rocAucMicro",
]
regression_metrics = [
"meanSquaredError",
"meanAbsoluteError",
"rootMeanSquaredError",
"rootMeanSquaredErrorAvg",
"rSquared",
]
clustering_metrics = [
"normalizedMutualInformation",
]
def translate_d3m_metric(metric):
lookup = {
"f1Macro": "f1_macro",
"f1Micro": "f1_micro",
"f1": "f1",
"accuracy": "accuracy",
"rSquared": "r_squared",
"meanSquaredError": "mean_squared_error",
"rootMeanSquaredError": "root_mean_squared_error",
"rootMeanSquaredErrorAvg": "root_mean_squared_error_avg",
"meanAbsoluteError": "mean_absolute_error",
"normalizedMutualInformation": "normalized_mutual_information",
"objectDetectionAP": "object_detection_average_precision",
"meanReciprocalRank": "mean_reciprocal_rank",
"hitsAtK": "hits_at_k",
"rocAucMacro": "roc_auc_macro",
"rocAucMicro": "roc_auc_macro",
"rocAuc": "roc_auc",
}
assert metric in lookup, "%s not in lookup" % metric
return lookup[metric]
def translate_proto_metric(proto_metric):
lookup = {
"F1_MACRO": "f1Macro",
"F1_MICRO": "f1Micro",
"F1": "f1",
"ACCURACY": "accuracy",
"MEAN_SQUARED_ERROR": "meanSquaredError",
"ROOT_MEAN_SQUARED_ERROR": "rootMeanSquaredError",
"ROOT_MEAN_SQUARED_ERROR_AVG": "rootMeanSquaredErrorAvg",
"R_SQUARED": "rSquared", # mapped for now,
"MEAN_ABSOLUTE_ERROR": "meanAbsoluteError",
"NORMALIZED_MUTUAL_INFORMATION": "normalizedMutualInformation",
"OBJECT_DETECTION_AVERAGE_PRECISION": "objectDetectionAP",
"MEAN_RECIPROCAL_RANK": "meanReciprocalRank", # todo add this to primitives metrics
"HITS_AT_K": "hitsAtK",
"ROC_AUC_MACRO": "rocAucMacro",
"ROC_AUC_MICRO": "rocAucMicro",
"ROC_AUC": "rocAuc",
}
assert proto_metric in lookup, "%s not in lookup" % proto_metric
return lookup[proto_metric]
|
from __future__ import absolute_import, division, unicode_literals
from mimic.model import cloudfeeds
from twisted.trial.unittest import SynchronousTestCase
from testtools.matchers import (MatchesSetwise, MatchesDict, Equals)
class TestCloudFeeds(SynchronousTestCase):
def setUp(self):
self.cf = cloudfeeds.CloudFeeds(tenant_id='1234', clock=None)
def test_creation(self):
"""
A new CloudFeeds plugin should have no products when created.
"""
self.assertEquals(len(self.cf.get_product_endpoints()), 0)
def test_product_registration(self):
"""
Registering a new product should create a new ATOM feed.
"""
self.cf.register_product(title='The hoohaw product.', href='hoohaw')
self.assertEquals(len(self.cf.get_product_endpoints()), 1)
def test_product_reregistration(self):
"""
Re-registering a new product should do nothing.
"""
self.cf.register_product(title='The hoohaw product', href='hoohaw')
self.cf.register_product(title='The OTHER hoohaw product', href='hoohaw')
self.assertEquals(len(self.cf.get_product_endpoints()), 1)
p = self.cf.get_product_by_href('hoohaw')
self.assertEquals(p.title, 'The hoohaw product')
def test_get_products(self):
"""
Requesting a list of product endpoints should return a title and an href
for each endpoint.
"""
self.cf.register_product(title='The hoohaw product', href='hoohaw')
self.cf.register_product(title='The goober product', href='goober')
products = self.cf.get_product_endpoints()
self.assertEquals('hoohaw' in products, True)
self.assertEquals(products['hoohaw'].title, 'The hoohaw product')
self.assertEquals(products['goober'].title, 'The goober product')
self.assertEquals(products['hoohaw'].href, 'hoohaw')
self.assertEquals(products['goober'].href, 'goober')
class TestCloudFeedsProduct(SynchronousTestCase):
def test_creation(self):
"""
A new product queue should be empty.
"""
cfp = cloudfeeds.CloudFeedsProduct(title='title', href='href')
self.assertEquals(len(cfp.events), 0)
def test_post(self):
"""
Posting a new event to a queue should tack said event onto the end
of said queue.
"""
cfp = cloudfeeds.CloudFeedsProduct(title='title', href='href')
cfp.post("TROLOLOLOLOL!!!")
cfp.post("This is a totally fake event-like thing.")
self.assertEquals(
cfp.events,
["TROLOLOLOLOL!!!", "This is a totally fake event-like thing."]
)
class TestSerialization(SynchronousTestCase):
def test_json_description(self):
"""
When listing product endpoints, we expect our JSON to look a certain
way. The easiest way to do that is to acquire the corresponding
dict, then pass it through json.dump with your preferred formatting
settings.
"""
cfp = cloudfeeds.CloudFeedsProduct(title='title', href='href')
d = cloudfeeds.render_product_dict(cfp)
productDescription = MatchesDict({
"title": Equals("title"),
"collection": MatchesDict({
"href": Equals("href"),
"title": Equals("title"),
}),
})
self.assertEquals(productDescription.match(d), None)
def test_json_product_list(self):
"""
When listing product endpoints, the resulting JSON should contain a
service object, and a workspace object, and within that, an array of
product descriptors.
"""
cf = cloudfeeds.CloudFeeds(tenant_id='1234', clock=None)
cf.register_product(title="The hoohaw product", href="hoohaw")
cf.register_product(title="The goober product", href="goober")
listing = MatchesDict({
"service": MatchesDict({
"workspace": MatchesSetwise(
MatchesDict({
"collection": MatchesDict({
"href": Equals("hoohaw"),
"title": Equals("The hoohaw product"),
}),
"title": Equals("The hoohaw product"),
}),
MatchesDict({
"collection": MatchesDict({
"href": Equals("goober"),
"title": Equals("The goober product"),
}),
"title": Equals("The goober product"),
}),
),
})
})
self.assertEquals(
listing.match(cloudfeeds.render_product_endpoints_dict(
cf.get_product_endpoints()
)),
None
)
|
import numpy as np
def temptable_template(df, sql, DB):
"""
This function takes a dataframe, a stub bit of a bulk sql insert e.g.
INSERT INTO mytable VALUES
and turns the datafame into the requisite string that follows and concatenates it all together.
:param df: The dataframe we are going to bulk insert
:param sql: The stub sql for the bulk insert.
:param DB: An instance of the PostgresManager class
:return:
"""
df_array = df.values
segments_shape = np.shape(df_array)
'''
This complicated statement takes a 2d numpy array
((1,2,3,4),
(5,6,7,8)
(9,1,2,3))
and turns it into a string that looks like
'((1,2,3,4),(5,6,7,8),(9,1,2,3))'
for use in bulk sql inserts
'''
records_list_template = (','.join(['('+(','.join(['%s'] * segments_shape[1]))+')']*segments_shape[0]))
sql += records_list_template+';'
sql = DB.query_mogrify(sql, sql_var=df_array.flatten())
return sql
|
import pytest
from responder import routes
@pytest.mark.parametrize(
"route, expected",
[
pytest.param("/", False, id="home path without params"),
pytest.param("/test_path", False, id="sub path without params"),
pytest.param("/{test_path}", True, id="path with params"),
],
)
def test_parameter(route, expected):
r = routes.Route(route, "test_endpoint")
assert r.has_parameters is expected
def test_url():
r = routes.Route("/{my_path}", "test_endpoint")
url = r.url(my_path="path")
assert url == "/path"
def test_equal():
r = routes.Route("/{path_param}", "test_endpoint")
r2 = routes.Route("/{path_param}", "test_endpoint")
r3 = routes.Route("/test_path", "test_endpoint")
assert r == r2
assert r != r3
@pytest.mark.parametrize(
"path_param, actual, match",
[
pytest.param(
"/{greetings}", "/hello", {"greetings": "hello"}, id="with one strformat"
),
pytest.param(
"/{greetings}.{name}",
"/hi.jane",
{"greetings": "hi", "name": "jane"},
id="with dot in url and two strformat",
),
pytest.param(
"/{greetings}/{name}",
"/hi/john",
{"greetings": "hi", "name": "john"},
id="with sub url and two strformat",
),
pytest.param(
"/concrete_path", "/foo", {}, id="test concrete path with no match"
),
],
)
def test_incoming_matches(path_param, actual, match):
r = routes.Route(path_param, "test_endpoint")
assert r.incoming_matches(actual) == match
def test_incoming_matches_with_concrete_path_no_match():
r = routes.Route("/concrete_path", "test_endpoint")
assert r.incoming_matches("hello") == {}
@pytest.mark.parametrize(
"route, match, expected",
[
pytest.param(
"/{path_param}",
"/{path_param}",
True,
id="with both parametrized path match",
),
pytest.param(
"/concrete", "/concrete", True, id="with both concrete path match"
),
pytest.param("/concrete", "/no_match", False, id="with no match"),
],
)
def test_does_match_with_route(route, match, expected):
r = routes.Route(route, "test_endpoint")
assert r.does_match(match) == expected
|
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^success/(?P<next>[\s\S]*)/$', views.SuccessView.as_view(), name='success'),
url(r'^error/(?P<next>[\s\S]*)/(?P<msg>[\s\S\\u4e00-\\u9fa5]*)/$', views.ErrorView.as_view(), name="error"),
]
# /error/jump_field/errmsg/
|
from abc import ABC, abstractmethod
class ClientDAO(ABC):
"""
Abstract class for Data Access Objects for Client with four methods.
get_clients
add_client
update_client
delete_client
"""
@abstractmethod
def get_clients(self):
pass
@abstractmethod
def add_client(self, new_client):
pass
@abstractmethod
def update_client(self, client, updated_client):
pass
@abstractmethod
def delete_client(self, client):
pass
|
# Generated by Django 2.0.3 on 2018-05-04 12:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0042_takencourse_taken_course_title'),
]
operations = [
migrations.AlterField(
model_name='takencourse',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='taken_courses', to=settings.AUTH_USER_MODEL),
),
]
|
from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/users/<username>/<id>')
def show_user_profile(username, id):
# print username
print id
# return username
return render_template('users.html', username=username)
@app.route('/route/with/<vararg>')
def handler_function(vararg):
vararg = 'Noel is awesome yeees'
print vararg
return vararg
app.run(debug=True)
|
# import the libraries
# --------------------
import pandas as pd
# import numpy as np
import math
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
# from sklearn import preprocessing as pp
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix,roc_curve, roc_auc_score
from sklearn import tree
import pydotplus
import collections
# read the input file
# --------------------
path="C:/Users/Arman Chand/Desktop/Project works/Random_Forest/Churn_Modelling.csv"
bc = pd.read_csv(path)
# drop unwanted columns
# --------------------
bc = bc.drop(['RowNumber','CustomerId','Surname'],axis=1)
# split the dataset into X and Y variables. These are numpy array
# -----------------------------------------------------------------
X = bc.iloc[:, :10].values
y = bc.iloc[:, 10:].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# split the dataset into train and test
# -------------------------------------
x_train, x_test, y_train, y_test = \
train_test_split( X, y, test_size = 0.3, random_state = 100)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# build the model
# --------------------------
clf1 = RandomForestClassifier(n_estimators=30,criterion='gini')
fit1 = clf1.fit(x_train,y_train)
print(fit1)
# predict
# --------
pred1 = fit1.predict(x_test)
# print some predictions
# -----------------------
for i in range(50):
print("Actual value = {}, Predicted value = {}".format(y_test[i], pred1[i]))
# print the accuracy
# ------------------
print("Test Accuracy :: ", accuracy_score(y_test, pred1))
# confusion matrix
# ----------------
#print(confusion_matrix(y_test, pred1))
import pylab as plt
labels=[2,4]
cm = confusion_matrix(pred1, y_test)
print(cm)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.show()
from sklearn import metrics
##Computing false and true positive rates
fpr, tpr,_=roc_curve(pred1,y_test,drop_intermediate=False)
auc = metrics.roc_auc_score(y_test, pred1)
import matplotlib.pyplot as plt
plt.figure()
##Adding the ROC
plt.plot(fpr, tpr, color='red',
lw=2, label="ROC curve auc="+str(auc))
##Random FPR and TPR
plt.plot([0, 1], [0, 1], color='blue', lw=2, linestyle='--')
##Title and label
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC curve')
plt.show()
'''
data_feature_names = [ 'CreditScore', 'Geography', 'Gender','Age','Tenure','Balance','NumOfProducts','HasCrCard','IsActiveMembe','EstimatedSalary']
# Visualize data
dot_data = tree.export_graphviz(classifier,
feature_names=data_feature_names,
out_file=None,
filled=True,
rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
colors = ('turquoise', 'orange')
edges = collections.defaultdict(list)
for edge in graph.get_edge_list():
edges[edge.get_source()].append(int(edge.get_destination()))
for edge in edges:
edges[edge].sort()
for i in range(2):
dest = graph.get_node(str(edges[edge][i]))[0]
dest.set_fillcolor(colors[i])
graph.write_png('tree1.png')
'''
|
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.contrib.auth import get_user_model
from ghostwriter.home.models import UserProfile
User = get_user_model()
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
"""Whenever a new `User` model entry is created create a `UserProfile`
entry.
"""
if created:
UserProfile.objects.create(user=instance)
|
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Input(Component):
"""A Input component.
Keyword arguments:
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- className (string; optional): Often used with CSS to style elements with common properties.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- type (a value equal to: "text", 'number', 'password', 'email', 'range', 'search', 'tel', 'url', 'hidden'; optional): The type of control to render
- value (string; optional): The value of the Input
- size (string; optional): The initial size of the control. This value is in pixels unless the value
of the type attribute is text or password, in which case it is an integer
number of characters. This attribute applies only when the type attribute
is set to text, search, tel, url, email, or password, otherwise it is
ignored. In addition, the size must be greater than zero. If you do not
specify a size, a default value of 20 is used.
- bs_size (string; optional): Set the size of the Input. Options: 'sm' (small), 'md' (medium)
or 'lg' (large). Default is 'md'.
- valid (boolean; optional): Apply valid style to the Input for feedback purposes. This will cause
any FormFeedback in the enclosing FormGroup with valid=True to display.
- invalid (boolean; optional): Apply invalid style to the Input for feedback purposes. This will cause
any FormFeedback in the enclosing FormGroup with valid=False to display.
- plaintext (boolean; optional): Set to true for a readonly input styled as plain text with the default
form field styling removed and the correct margins and padding preserved.
- placeholder (string; optional): A hint to the user of what can be entered in the control . The placeholder
text must not contain carriage returns or line-feeds. Note: Do not use the
placeholder attribute instead of a <label> element, their purposes are
different. The <label> attribute describes the role of the form element
(i.e. it indicates what kind of information is expected), and the
placeholder attribute is a hint about the format that the content should
take. There are cases in which the placeholder attribute is never
displayed to the user, so the form must be understandable without it.
- name (string; optional): The name of the control, which is submitted with the form data.
- n_submit (number; optional): Number of times the `Enter` key was pressed while the input had focus.
- n_submit_timestamp (number; optional): Last time that `Enter` was pressed.
- n_blur (number; optional): Number of times the input lost focus.
- n_blur_timestamp (number; optional): Last time the input lost focus.
- debounce (boolean; optional): If true, changes to input will be sent back to the Dash server only on enter or when losing focus.
If it's false, it will sent the value back on every change."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, style=Component.UNDEFINED, className=Component.UNDEFINED, key=Component.UNDEFINED, type=Component.UNDEFINED, value=Component.UNDEFINED, size=Component.UNDEFINED, bs_size=Component.UNDEFINED, valid=Component.UNDEFINED, invalid=Component.UNDEFINED, plaintext=Component.UNDEFINED, placeholder=Component.UNDEFINED, name=Component.UNDEFINED, n_submit=Component.UNDEFINED, n_submit_timestamp=Component.UNDEFINED, n_blur=Component.UNDEFINED, n_blur_timestamp=Component.UNDEFINED, debounce=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'style', 'className', 'key', 'type', 'value', 'size', 'bs_size', 'valid', 'invalid', 'plaintext', 'placeholder', 'name', 'n_submit', 'n_submit_timestamp', 'n_blur', 'n_blur_timestamp', 'debounce']
self._type = 'Input'
self._namespace = 'dash_bootstrap_components/_components'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'style', 'className', 'key', 'type', 'value', 'size', 'bs_size', 'valid', 'invalid', 'plaintext', 'placeholder', 'name', 'n_submit', 'n_submit_timestamp', 'n_blur', 'n_blur_timestamp', 'debounce']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Input, self).__init__(**args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('Input(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'Input(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
|
from __future__ import print_function
import argparse
try:
from orlo import __version__
except ImportError:
# _version.py doesn't exist
__version__ = "TEST_BUILD"
__author__ = 'alforbes'
"""
Command line interface
Generally setup/initialisation functions and the like, called by /usr/bin/orlo
"""
def parse_args():
parser = argparse.ArgumentParser(prog='orlo')
parser.add_argument('--version', '-v', action='version',
version='%(prog)s {}'.format(__version__))
p_config = argparse.ArgumentParser(add_help=False)
p_config.add_argument('--file', '-f', dest='file_path',
help="Config file to read/write",
default='/etc/orlo/orlo.ini')
p_database = argparse.ArgumentParser(add_help=False)
p_server = argparse.ArgumentParser(add_help=False)
p_server.add_argument('--host', '-H', dest='host', default='127.0.0.1',
help="Address to listen on")
p_server.add_argument('--port', '-P', dest='port', type=int, default=5000,
help="Port to listen on")
subparsers = parser.add_subparsers(dest='action')
sp_config = subparsers.add_parser(
'write_config', help="Write config file",
parents=[p_config])
sp_config.set_defaults(func=write_config)
sp_database = subparsers.add_parser(
'setup_database', help="Initialise the configured DB",
parents=[p_database, p_config])
sp_database.set_defaults(func=setup_database)
sp_run_server = subparsers.add_parser(
'run_server', help="Run a test server",
parents=[p_server, p_config])
sp_run_server.set_defaults(func=run_server)
return parser.parse_args()
def write_config(args):
from orlo import config
config_file = open(args.file_path, 'w')
config.write(config_file)
def setup_database(args):
from orlo.orm import db
from orlo.config import config
if config.get('db', 'uri') == 'sqlite://':
print("Warning: setting up in-memory database, this is "
"probably not what you want!\n"
"Please configure db:uri in /etc/orlo/orlo.ini")
db.create_all()
def run_server(args):
print("Warning: this is a development server and not suitable "
"for production, we recommend running under gunicorn.")
from orlo import app
app.config['DEBUG'] = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
app.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = False
app.run(host=args.host, port=args.port, debug=True, use_reloader=True)
def main():
# Referenced by entry_points in setup.py
args = parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
import smtplib
SERVERS = ["156.17.40.148", "156.17.40.162", "156.17.40.46", "156.17.40.85"]
for SERVER in SERVERS:
FROM = "test@pwr.wroc.pl"
TO = ["xx@plonk.ict.pwr.wroc.pl", "zxcvbnm@heaven.org"]
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), "test", "test")
server = smtplib.SMTP(SERVER)
#server.set_debuglevel(True)
try:
server.sendmail(FROM, TO, message)
except smtplib.SMTPRecipientsRefused, e:
print SERVER, e
else:
print SERVER, "ok"
server.quit()
|
import json
def lambda_handler(event, context):
jsonRequestInput = json.loads(event['body'])
userid = jsonRequestInput['userid']
stock = jsonRequestInput['stock']
db_stocks = getDynamoData(userid)
if(db_stocks == ""):
putDynamoData(userid, stock)
else:
db_stocks = addDynamoStock(stock, db_stocks)
putDynamoData(userid, db_stocks)
# TODO implement
return {
"statusCode": 200,
"body": json.dumps({
"result": "good"
}),
'headers': {
"Content-Type" : "application/json",
"Access-Control-Allow-Origin" : "*",
"Allow" : "GET, OPTIONS, POST",
"Access-Control-Allow-Methods" : "GET, OPTIONS, POST",
"Access-Control-Allow-Headers" : "*"
}
}
def getDynamoData(userid):
import boto3
dynamodb = boto3.client('dynamodb')
db_ret = dynamodb.get_item(TableName='Stocks', Key={'UserId':{'S': userid}})
if(len(db_ret) < 2):
return ""
db_item = db_ret['Item']
db_stock_item = db_item['Stock']
db_stocks = db_stock_item['S']
return db_stocks
def putDynamoData(userid, stocks):
import boto3
dynamodb = boto3.client('dynamodb')
dynamodb.put_item(TableName='Stocks', Item={'UserId':{'S': userid},'Stock':{'S': stocks}})
def addDynamoStock(new_stock, existing_stocks):
existing_stocks += "," + new_stock
return existing_stocks
|
#YOU ARE CURRENTLY VIEWING THE GENIUS CODING BEHIND PYTHON CALCULATOR 1.0
#BRACE YOURSELVES....SWAG INCOMING!
print " +---+---+---+---+---+---+"
print " |~~~~~~~ THIS IS ~~~~~~~|"
print " +---+---+---+---+---+---+"
print " | P | Y | T | H | O | N |"
print " +---+---+---+---+---+---+"
while 1>0:
print""
print"******************************************************************************"
print""
print"____________Welcome to Python Calculator 1.0____________" #COMMENTS ARE AMAZING
a=input("Enter First Number ==> ")
b=input("Enter Second Number ==> ")
print""
c=raw_input("Choose your operator( + or - or * or /) : ")
if c==("+"):
print" The solution is : ",a+b #MUCH SPACING!
elif c==("-"):
print" The difference is: ",a-b
elif c==("*"):
print" The product is : ",a*b
elif c==("/"):
d=float(a)/float(b)
print" The quotient is : ",("%.2f"%d) #ACCURACY! :')
else:
print" :x____MIND====> < BOOM! >____x: ",exit()
print
print"Thank You For Using Python Calculator 1.0"
end=input("To exit press 1/To restart calculator press 2 : ") #TOO MANY OPTIONS HERE...HOPE YOU CAN HANDLE THE PRESSURE :D
if end!=2:
print" | +----+ +- -+ +---- | "
print" | | | \ / | | "
print" | |---< \ / |--- | "
print" | | | | | | "
print" 0 +----+ + +---- 0 ",exit()
#HAVE A WONDERFUL DAY AHEAD
|
# Simulation Logic
from utils import v_sub, v_add, v_mul, v_div, v_array_sum, agent_degree_rotation, convert_to_unit_vector, limit
from Agent import DEFAULT_SPEED, Agent
from Obstacle import Obstacle
import shared
from random import randrange
# Blue Agent:0
# Red Agent:1
ALIGNMENT_WEIGHT = [10,4]
COHESION_WEIGHT = [5,3]
SEPERATION_WEIGHT = [5,8]
OBSTACLE_DOGDGE_WEIGHT = 180
ALIGNMENT_RADIUS = 200
COHESION_RADIUS = 170
SEPERATION_RADIUS = 30
OBSTACLE_DOGDGE_RADIUS = 70
MAX_SPEED = 25
MIN_SPEED = 1
def compute_alignment(myAgent,t):
compute_vel = (0,0)
neighbors_cnt = 0
for i in range(len(shared.agent_array)):
agent = shared.agent_array[i]
if agent != myAgent and myAgent.distance_from(agent) < ALIGNMENT_RADIUS and t == i%2:
compute_vel = v_add(compute_vel,agent.vel)
neighbors_cnt+=1
if neighbors_cnt == 0:
return compute_vel
compute_vel = v_div(compute_vel,neighbors_cnt)
return limit(compute_vel,0.05)
def compute_cohesion(myAgent,t):
compute_vel = (0,0)
neighbors_cnt = 0
for i in range(len(shared.agent_array)):
agent = shared.agent_array[i]
if agent != myAgent and myAgent.distance_from(agent) < COHESION_RADIUS and t == i%2:
compute_vel = v_sub(agent.pos,myAgent.pos)
neighbors_cnt+=1
if neighbors_cnt == 0:
return compute_vel
compute_vel = v_div(compute_vel,neighbors_cnt)
return limit(compute_vel, 0.05)
def compute_seperation(myAgent,t):
compute_vel = (0,0)
neighbors_cnt = 0
for i in range(len(shared.agent_array)):
agent = shared.agent_array[i]
if agent != myAgent and myAgent.distance_from(agent) < SEPERATION_RADIUS and t == i%2:
temp_vel = v_sub(myAgent.pos,agent.pos)
temp_vel = convert_to_unit_vector(temp_vel)
compute_vel = v_add(compute_vel, v_div(temp_vel,myAgent.distance_from(agent)))
neighbors_cnt+=1
if neighbors_cnt == 0:
return compute_vel
return v_div(compute_vel,neighbors_cnt)
def compute_obstacle_dodge(myAgent):
compute_vel = (0,0)
neighbors_cnt = 0
for obs in shared.obstacle_array:
if obs.distance_from(myAgent) < OBSTACLE_DOGDGE_RADIUS:
temp_vel = v_sub(myAgent.pos,obs.pos)
temp_vel = convert_to_unit_vector(temp_vel)
compute_vel = v_add(compute_vel, v_div(temp_vel,myAgent.distance_from(obs)))
neighbors_cnt+=1
if neighbors_cnt == 0:
return compute_vel
return v_div(compute_vel,neighbors_cnt)
def check_agent_inbound():
for agent in shared.agent_array:
if agent.pos[0] > shared.WIDTH:
agent.pos = (0,agent.pos[1])
if agent.pos[0] < 0:
agent.pos = (shared.WIDTH,agent.pos[1])
if agent.pos[1] > shared.HEIGHT:
agent.pos = (agent.pos[0],0)
if agent.pos[1] < 0:
agent.pos = (agent.pos[0],shared.HEIGHT)
def agent_update():
temp_agent_array = []
for i in range(len(shared.agent_array)):
agent = shared.agent_array[i]
temp_vel = (0,0)
cohesion_v = compute_cohesion(agent,i%2)
alignment_v = compute_alignment(agent,i%2)
seperation_v = compute_seperation(agent,i%2)
obstacle_dodge_v = compute_obstacle_dodge(agent)
v_array = [agent.vel,
v_mul(cohesion_v,COHESION_WEIGHT[i%2]),
v_mul(alignment_v,ALIGNMENT_WEIGHT[i%2]),
v_mul(seperation_v,SEPERATION_WEIGHT[i%2]),
v_mul(obstacle_dodge_v, OBSTACLE_DOGDGE_WEIGHT)
]
temp_vel = v_array_sum(v_array)
temp_vel = v_mul(temp_vel,shared.FPS)
a = Agent(agent.pos, temp_vel)
if i%2:
a.vel = limit(temp_vel, DEFAULT_SPEED + 6 + shared.speed_adjustment)
else:
a.vel = limit(temp_vel, DEFAULT_SPEED + shared.speed_adjustment)
# change_vel_if_zero(a)
a.update_pos()
temp_agent_array.append(a)
shared.agent_array = temp_agent_array
def randomize_position():
for agent in shared.agent_array:
agent.pos = randrange(0,shared.WIDTH,1), randrange(0,shared.HEIGHT,1)
def clear_all_item():
shared.agent_array = []
shared.obstacle_array = []
def adjust_speed(type):
if type:
shared.speed_adjustment += 1
else:
shared.speed_adjustment -= 1
if shared.speed_adjustment > MAX_SPEED:
shared.speed_adjustment = MAX_SPEED
elif shared.speed_adjustment < MIN_SPEED:
shared.speed_adjustment = MIN_SPEED
|
#!/usr/bin/python3
import sys
import subprocess
#exchanges = ["bittrex", "binance_us", "kucoin", "ftx_us", "kraken", "gemini"
exchanges = ["ftx_us", "kucoin"]
import util
import json
from collections import defaultdict
markets = defaultdict(list)
def getdata():
try:
for exchange in exchanges:
print("exchange : {}".format( exchange ))
# cmd = 'curl -X GET "https://api.coingecko.com/api/v3/exchanges/{}" -H accept: application/json > {}.data'.format(exchange, exchange)
# cmd = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
data = json.load(open("{}.data".format(exchange)))
markets[exchange] = set([ coin['base'] for coin in data['tickers']])
util.savep("markets", markets)
print("markets: {}".format( markets))
except Exception as e:
print (e)
print ("wrong usage")
pass
#getdata()
bar = util.loadp("markets")
print("bar : {}".format( bar ))
lines = list()
import os
print (os.getcwd())
dones = set()
for market1 in exchanges:
for market2 in exchanges:
if market1 == market2:
continue
dont_do = "{}|{}".format(market2, market1)
if dont_do in dones:
continue
dont_do = "{}|{}".format(market2, market2)
dones.add(dont_do)
dont_do = "{}|{}".format(market1, market1)
dones.add(dont_do)
addme = " ".join(bar[market1] & bar[market2])
lines.append("{}\n{}\n\n".format(dont_do, addme))
print("lines: {}".format( lines))
with open("common.txt", "w") as f:
what = "\n".join(lines)
print("what : {}".format( what ))
f.write("\n".join(lines))
|
print("Hammie was here!")
sum = 0
empty = []
for i in range(1, 1000):
if i%3 == 0 or i%5 == 0:
empty.append(i)
sum += i
print(empty)
print(sum)
print("hello")
print("test")
|
from setuptools import setup
import pylint_report
setup(
name=pylint_report.__name__,
version=pylint_report.__version__,
description='Generates an html report summarizing the results of pylint.',
url='https://github.com/drdv/pylint-report',
author='Dimitar Dimitrov',
author_email='mail.mitko@gmail.com',
license='Apache 2.0',
python_requires='>=3.6',
py_modules=['pylint_report'],
install_requires=['pandas', 'pylint'],
scripts=['pylint_report/pylint_report.py'],
)
|
from time import sleep
from json import dumps
from numpy.random import choice, randint
from kafka import KafkaProducer
def get_random_value():
"""
Generate dummy data
:return: dict
"""
cities_list = ['Lviv', 'Kyiv', 'Odessa', 'Donetsk']
currency_list = ['HRN', 'USD', 'EUR', 'GBP']
return {
'city': choice(cities_list),
'currency': choice(currency_list),
'amount': randint(-100, 100)
}
if __name__ == '__main__':
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
value_serializer=lambda x: dumps(x).encode('utf-8'),
compression_type='gzip')
topic = 'transaction'
while True:
for _ in range(100):
data = get_random_value()
try:
future = producer.send(topic=topic, value=data)
record_metadata = future.get(timeout=10)
print('--> The message has been sent to a topic: {}, partition: {}, offset: {}'
.format(record_metadata.topic, record_metadata.partition, record_metadata.offset))
except Exception as e:
print('--> Error occured: {}'.format(e))
finally:
producer.flush()
sleep(1)
|
#!/usr/bin/python3 # This is python_server1.py file
import socket
import threading
import pickle
from time import sleep
from collections import OrderedDict
from section import generate_rooms
import sql
def main():
print('start server')
# create a socket object
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# get local machine name
host = socket.gethostname()
port = 9999
# bind to the port
server_socket.bind((host, port))
# queue up to 5 requests
server_socket.listen(5)
print('waiting connection...')
while True:
# establish a connection
client_socket, address = server_socket.accept()
print("Got a connection from %s" % str(address))
# threadを設定
p = threading.Thread(target=work_thread, args=(client_socket,))
p.start()
def work_thread(client_socket):
global all_room
try:
# ゲームのステージ構成を送信
client_socket.send(pickle.dumps(generate_rooms()))
print("send stage")
room = None
client_name = "GuestUser"
score = 0
while True:
receive_msg = client_socket.recv(1024).decode("ascii")
print("Receive request from ", client_name, ":", receive_msg)
# ウィンドウが閉じられたときの処理
if receive_msg == "-10":
print("-10")
if room is not None:
room.remove_player(client_socket)
if len(room.players) == 0:
play_rooms.rooms.remove(room)
client_socket.send(send_msg.encode("ascii"))
break
# ルーム作成者がルームを退出したときの処理
elif receive_msg == "c_leave_room":
print("delete_room")
room.remove_player(client_socket)
play_rooms.rooms.remove(room)
client_socket.send("leave_room".encode("ascii"))
for join_player in room.players:
if client_socket is not join_player:
join_player.send("room_deleted".encode("ascii"))
# ルーム参加者がルームを退出したときの処理
elif receive_msg == "j_leave_room":
print("leave_room")
room.remove_player(client_socket)
client_socket.send("leave_room".encode("ascii"))
# クライアントからユーザ名を受け取る
elif receive_msg == "system_start":
client = pickle.loads(client_socket.recv(4096))
client_name = client.get_name()
if client_name == "":
client_name = "Guest"
print("create account:", client.get_name())
# ルームリストを送信する
elif receive_msg == "request_RoomList":
print("request_RoomList")
client.position = (pickle.loads(client_socket.recv(8192))).position
req_part = client.position[0]
req_section = client.position[1]
req_part_section = client.position[2]
room = None
for part in all_room:
if req_part.number == part.number:
for section in part.sections:
if req_section.number == section.number:
for part_section in section.word_parts:
if req_part_section.part_name == part_section.part_name:
play_rooms = part_section
print(play_rooms.address)
break
# ルームリストを送信
room_list = list()
for r in play_rooms.rooms:
if r.check_vacancy() is True:
room_list.append(
{"name": r.name, "player_num": str(len(r.players)) + "/" + str(r.max_num), "id": r.hashcode,
"reception": r.reception})
client_socket.send(pickle.dumps(room_list))
print("send RoomList")
# ルームに関するリクエストに対応
elif receive_msg == "request_room":
print("request_room")
req_room = pickle.loads(client_socket.recv(4096))
if req_room[2] is "c":
room = play_rooms.create_room(req_room[0], req_room[1])
room.score_list[client_name] = score
print("create Room")
room.add_player(client_socket)
elif req_room[3] is "j":
for r in play_rooms.rooms:
print(r.hashcode)
if req_room[2] == r.hashcode:
room = r
room.add_player(client_socket)
room.score_list[client_name] = score
print(room.name)
# ステージに適した単語リストを送信する
elif receive_msg == "request_words":
print("request_words")
# 単語関連の設定&送信
if req_part_section.part_name == "すべて":
words = sql_session.query(sql.Word). \
filter(sql.Word.part == req_part.number). \
filter(sql.Word.section == req_section.number). \
all()
else:
words = sql_session.query(sql.Word). \
filter(sql.Word.part == req_part.number). \
filter(sql.Word.section == req_section.number). \
filter(sql.Word.part_section == req_part_section.part_name). \
all()
client_socket.send(pickle.dumps(words))
print("send words")
send_msg = "You entered in Room: " + room.name
client_socket.send(send_msg.encode("ascii"))
# ゲームの開始
elif receive_msg == "game_start":
print("game_start")
for player in room.players:
player.send("correct_answer".encode("ascii"))
sleep(1)
player.send(pickle.dumps(room.score_list))
room.reception = False
# 受け取った回答が正解か判定
elif receive_msg == "answer":
print("answer")
receive_msg = client_socket.recv(1024).decode("ascii")
send_msg = client.get_name() + ": " + receive_msg
if receive_msg == words[room.index].english:
score += 1
room.score_list[client_name] = score
for player in room.players:
player.send(send_msg.encode('ascii'))
sleep(0.25)
if receive_msg == words[room.index].english:
if room.index + 1 == len(words):
player.send("game_finish".encode("ascii"))
sleep(0.25)
send_score_list = OrderedDict(
sorted(room.score_list.items(), key=lambda x: x[1], reverse=True))
player.send(pickle.dumps(send_score_list))
sleep(0.25)
else:
player.send("correct_answer".encode("ascii"))
sleep(0.25)
send_score_list = OrderedDict(
sorted(room.score_list.items(), key=lambda x: x[1], reverse=True))
player.send(pickle.dumps(send_score_list))
sleep(0.25)
if receive_msg == words[room.index].english:
room.index += 1
finally:
client_socket.close()
print("disconnection")
if __name__ == '__main__':
all_room = generate_rooms()
sql_session = sql.get_session()
# sql.add_words()
main()
|
# -*- coding: utf-8 -*-
import logging
import datetime
import uuid
from model.assistance.justifications.justifications import Justification, RangedJustification, RangedTimeJustification
from model.assistance.justifications.status import Status
from model.assistance.justifications.status import StatusDAO
from model.assistance.assistanceDao import AssistanceDAO
from model.users.users import UserDAO
class TaskJustificationDAO(AssistanceDAO):
dependencies = [UserDAO, StatusDAO]
@classmethod
def _createSchema(cls, con):
super()._createSchema(con)
cur = con.cursor()
try:
sql = """
CREATE SCHEMA IF NOT EXISTS assistance;
create table IF NOT EXISTS assistance.justification_task (
id varchar primary key,
user_id varchar not null references profile.users (id),
owner_id varchar not null references profile.users (id),
jstart timestamptz default now(),
jend timestamptz default now(),
notes varchar,
type varchar not null,
created timestamptz default now()
);
"""
cur.execute(sql)
finally:
cur.close()
@classmethod
def persist(cls, con, j):
assert j is not None
cur = con.cursor()
try:
if not hasattr(j, 'end'):
j.end = None
if ((not hasattr(j, 'id')) or (j.id is None)):
j.id = str(uuid.uuid4())
if len(j.findById(con, [j.id])) <= 0:
j.type = j.__class__.__name__
r = j.__dict__
cur.execute('insert into assistance.justification_task (id, user_id, owner_id, jstart, jend, type, notes) '
'values (%(id)s, %(userId)s, %(ownerId)s, %(start)s, %(end)s, %(type)s, %(notes)s)', r)
else:
r = j.__dict__
cur.execute('update assistance.justification_task set user_id = %(userId)s, owner_id = %(ownerId)s, '
'jstart = %(start)s, jend = %(end)s, type = %(type)s, notes = %(notes)s where id = %(id)s', r)
return j.id
finally:
cur.close()
@classmethod
def findById(cls, con, ids):
assert isinstance(ids, list)
cur = con.cursor()
try:
cur.execute('select * from assistance.justification_task where id in %s', (tuple(ids),))
return [ cls._fromResult(con, r) for r in cur ]
finally:
cur.close()
@classmethod
def findByUserId(cls, con, userIds, start, end):
assert isinstance(userIds, list)
assert isinstance(start, datetime.date)
assert isinstance(end, datetime.date)
if len(userIds) <= 0:
return
cur = con.cursor()
try:
t = cls.type
eDate = datetime.date.today() if end is None else end
cur.execute('''
SELECT * from assistance.justification_task
WHERE user_id in %s
AND (jstart <= %s AND jend >= %s)
AND type = %s
''', (tuple(userIds), end, start, t))
return [ cls._fromResult(con, r) for r in cur ]
finally:
cur.close()
class TaskWithReturnJustificationDAO(TaskJustificationDAO):
type = "TaskWithReturnJustification"
@classmethod
def _fromResult(cls, con, r):
j = TaskWithReturnJustification()
j.userId = r['user_id']
j.ownerId = r['owner_id']
j.start = r['jstart']
j.end = r['jend']
j.id = r['id']
j.notes = r['notes']
j.setStatus(Status.getLastStatus(con, j.id))
return j
class TaskWithoutReturnJustificationDAO(TaskJustificationDAO):
type = "TaskWithoutReturnJustification"
@classmethod
def _fromResult(cls, con, r):
j = TaskWithoutReturnJustification()
j.userId = r['user_id']
j.ownerId = r['owner_id']
j.start = r['jstart']
j.end = r['jend']
j.id = r['id']
j.notes = r['notes']
j.setStatus(Status.getLastStatus(con, j.id))
return j
class TaskJustification(RangedTimeJustification):
def __init__(self, start = None, end=None, userId = None, ownerId = None):
super().__init__(start, end, userId, ownerId)
self.typeName = "Boleta en comisión"
self.classType = RangedTimeJustification.__name__
class TaskWithReturnJustification(TaskJustification):
dao = TaskWithReturnJustificationDAO
identifier = "con retorno"
def __init__(self, start = None, end = None, userId = None, ownerId = None):
super().__init__(start, end, userId, ownerId)
self.identifier = TaskWithReturnJustification.identifier
def getIdentifier(self):
return self.typeName + " " + self.identifier
def changeEnd(self, con, end):
self.end = end
TaskWithReturnJustificationDAO.persist(con)
class TaskWithoutReturnJustification(TaskJustification):
dao = TaskWithoutReturnJustificationDAO
identifier = "sin retorno"
def __init__(self, start = None, end = None, userId = None, ownerId = None):
super().__init__(start, end, userId, ownerId)
self.identifier = TaskWithoutReturnJustification.identifier
def getIdentifier(self):
return self.typeName + " " + self.identifier
def _loadWorkedPeriods(self, wps):
assert self.getStatus() is not None
if self.getStatus().status != Status.APPROVED:
return
for wp in wps:
if wp.date == self.start.date() and wp.getEndDate() >= self.start:
self.wps.append(wp)
wp.addJustification(self)
|
from django.shortcuts import render
'''
# Create your sql here.
from django.shortcuts import render, HttpResponse, render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from django.views.generic import View
from django.utils.decorators import method_decorator
import os
from django.core.serializers import serialize
from ..models import User,Questions
import json as simplejson
def update(request):
# 研发
# 1 ABC
group = '研发组'
question = '程序员们的头发数量可能截然不同,但说的话却常相似。研发最喜欢说的一句话可能是?'
option1 = '我好菜啊'
option2 = '我这里没问题呀'
option3 = '还可以加需求'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
# 2 BCA
group = '研发组'
question = '一天,某前端和后端正在讨论用户注册问题,你发现,前端把____和____数据传给后端就能够最好实现用户注册。'
option1 = 'username email'
option2 = 'email password'
option3 = 'username password'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
# 3 ABC
group = '研发组'
question = '研发测试产品注册功能时,发现某用户登陆使用了未经注册的用户名,于是后端该向前端发送____反映此情况?'
option1 = 'unknown user'
option2 = 'u u'
option3 = '404'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
# 4 CAB
group = '研发组'
question = '要知道程序员的双肩包里,可能会有一切。以下最不可能出现在程序员双肩包里的是?'
option1 = '折叠板凳'
option2 = '电脑'
option3 = 'ipad'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
# 5 CBA
group = '研发组'
question = '某产品上线前,前后端开始了紧张的接口调试,此时后端给出的数据结构混乱,如果你是前端,你会_____'
option1 = '把后端扔出去'
option2 = '自己沉住气自己解析'
option3 = '让后端优化结构数据'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
#产品
# 1 BAC
group = '产品组'
question = '产品们都有着共同的酸甜苦辣,产品最喜欢说的一句话可能是?'
option1 = '实现这个不难的'
option2 = '给个排期吧'
option3 = '这个功能要不砍了吧?'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
# 2 CAB
group = '产品组'
question = '你突然想到了关于新产品的绝妙想法,你该用什么方式清楚地向研发表述你的idea?'
option1 = '给他你的手绘原型图'
option2 = '用你的高超口才叙述'
option3 = '用AXURE出好原型图'
Questions.objects.create(group=group,question=question,option1=option1,option2=option2,option3=option3)
# 3 BAC
group = '产品组'
question = '一个成熟的产品,和研发在新产品上有了矛盾时,很有可能采取什么手段?'
option1 = '扮演舔狗'
option2 = '激情辩论'
option3 = '冷处理'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 4 CBA
group = '产品组'
question = '以下哪项不是产品需要做的事?'
option1 = '需求分析'
option2 = '用户调研、'
option3 = 'UI设计'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 5 CAB
group = '产品组'
question = '你认为一个起步阶段的产品经理最需要看?'
option1 = '通识读本'
option2 = '摄影画册'
option3 = '专业入门书籍'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 设计
# 1 ABC
group = '设计组'
question = '一个设计最喜欢说的话可能是?'
option1 = '改哪里啊?'
option2 = '再过几天给你'
option3 = '我尽快把下一版给你'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 2 ABC
group = '设计组'
question = '下面哪项是设计最常使用的软件?'
option1 = 'PS'
option2 = 'AI'
option3 = 'MS Office'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 3 CAB
group = '设计组'
question = '作为迎新视频的筹划,现在有三个预选摄影师,各有其优势,综合考量一番后,你敲定_____作为本次大会御用摄影'
option1 = '手不抖,但只会相机自动模式'
option2 = '手易抖,有多年单反使用经验'
option3 = '设备佳,手抖,只会自动模式'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 4 ABC
group = '设计组'
question = '设计在听到什么需求时,容易流泪?'
option1 = '把它设计成五彩斑斓的黑'
option2 = '这个logo占的面积小一点,整体大一点'
option3 = '把标题排成竖排文字叭,再换个字体'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 5 ABC
group = '设计组'
question = '以下哪项很有可能属于UI设计的错误?'
option1 = '控件不统一'
option2 = '缺少层级差异'
option3 = '页面部分留白'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 运营组
# 1 ABC
group = '运营组'
question = '运营组最喜欢说的话可能是?'
option1 = '这个热点必须追!'
option2 = '怎么和用户说明情况啊?'
option3 = '我不关心热点,我只关心你'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 2 ACB
group = '运营组'
question = '今天你在QQ平台值班,需要发一条说说。这时,有三个内容集体撞车,你要选择:'
option1 = '某单车为南大出了高颜值定制款'
option2 = '世界5G技术又取得新进展'
option3 = '四六级考试报名链接正式发布'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 3 ACB
group = '运营组'
question = '写推送是运营的必修课,在确定以某热点为主题后,你为封面头图愁秃了头,这张图要怎么办呢?'
option1 = 'P图达人本人,自己做 '
option2 = '虽然平时都用美图秀秀,但还是试试看 '
option3 = '找设计组寻求帮助'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 4 ABC
group = '运营组'
question = '运营的选题脑洞,有时候大到不可思议,你认为一个运营的选题最有可能来自?'
option1 = '学校闲逛途中'
option2 = '刷微博追剧'
option3 = '选题会上'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 5 CAB
group = '运营组'
question = '运营不仅需要单纯的内容输出,还需要冷静地分析数据。以下各项最可能不是运营关心的数据是?'
option1 = '发出内容推送的一小时阅读量'
option2 = '推送或活动达到的转化率'
option3 = '一篇推送完成所需的总时长'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
#行政
# 1 CBA
group = '行政组'
question = '家园的行政几乎是十八般技能槽统统点满的存在,以下各项行政可能不会的是?'
option1 = 'Ms Office'
option2 = 'PR'
option3 = 'Vue'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 2 CBA
group = '行政组'
question = '温暖如行政,再细心记下工作室每个小伙伴的生日后,还会精心准备祝福语,最近运营的学姐要过生日了,下面哪句祝福语最合适呢?'
option1 = '前辈,祝您生日快乐!'
option2 = 'xxx终于20岁啦,你已经是个成熟的女大学生了,生日快乐!'
option3 = '祝为家园倾注能量的美丽运营生日快乐!你是最棒的话题制造者,推送都能10w + ~'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 3 BCA
group = '行政组'
question = '家园每年的全体大会都令人期待,本次由你筹备。为了征集节目,你要怎么推动身边的家园er们参与节目呢?'
option1 = '在线征集,筒子们自愿报名~'
option2 = '暗中观察,说服潜在表演嘉宾'
option3 = '靠自己,嗨不嗨就靠你了靠自己,嗨不嗨就靠你了'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 4 BCA
group = '行政组'
question = '收集了几十名程序员的无课情况之后,你需要做一张无课统计的电统计表。怎么才能让这张表格尽可能美观呢?'
option1 = '设置表格固定的行高列宽'
option2 = '表格完成后转为PDF格式'
option3 = '表格的行列空间尽可能拉大'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
# 5 ABC
group = '行政组'
question = '行政不是带来惊喜,就是在带来惊喜的路上。圣诞节时你要策划一个活动,以下哪项可能是次优先级的考虑因素?'
option1 = '活动参与人数'
option2 = '活动大致预算'
option3 = '活动流程内容'
Questions.objects.create(group=group, question=question, option1=option1, option2=option2, option3=option3)
return HttpResponse('MMM')
'''
|
import uuid
import django.utils.timezone as timezone
from django.db import models
class Test(models.Model):
id=models.UUIDField(primary_key=True, auto_created=True, default=uuid.uuid4, editable=False)
name=models.CharField(max_length=20)
code=models.IntegerField(default=0)
grade=models.CharField(max_length=10,default='')
class LoadDataStatus(models.Model):
type=models.TextField(default=None)
dirpath=models.TextField(default=None)
exceptionmsgs=models.TextField(default='')
status=models.CharField(max_length=2)
createtime=models.DateTimeField('createtime',auto_now_add=True)
updatetime=models.DateTimeField('updatetime',default=timezone.now)
class AnalysisDataStatus(models.Model):
cleantablename = models.TextField(default='')
industrytablename = models.TextField(default='')
mvpmsg = models.TextField(default='')
mgpmsg = models.TextField(default='')
mvimsg = models.TextField(default='')
mgimsg = models.TextField(default='')
dirpath = models.TextField(default='')
status = models.CharField(max_length=2)
exceptionmsgs = models.TextField(default='')
createtime = models.DateTimeField('createtime', auto_now_add=True)
updatetime = models.DateTimeField('updatetime', default=timezone.now)
class User(models.Model):
username=models.TextField(default='')
usermobile=models.TextField(default='')
password=models.TextField(default='')
realname=models.TextField(default='')
email=models.TextField(default='')
is_active=models.TextField(default='')
createtime = models.DateTimeField('createtime', auto_now_add=True)
updatetime = models.DateTimeField('updatetime', default=timezone.now)
class Meta:
db_table = 'user'
class Role(models.Model):
roleid=models.TextField(default='')
rolename=models.TextField(default='')
usermobile=models.TextField(default='')
class Meta:
db_table = 'role'
class Resource(models.Model):
roleid = models.TextField(default='')
resourceid=models.TextField(default='')
resourcename=models.TextField(default='')
resourceurl=models.TextField(default='')
resourceparentid=models.TextField(default='')
resourceparentname=models.TextField(default='')
class Meta:
db_table = 'resource'
|
# スクレイピング
# !pip install lxml
import requests
import re
import uuid
from bs4 import BeautifulSoup
import os
word = "カメラ"
images_dir = 'image_data/camera/'
if not os.path.exists(images_dir):
os.makedirs(images_dir)
url = "https://search.nifty.com/imagesearch/search?select=1&chartype=&q=%s&xargs=2&img.fmt=all&img.imtype=color&img.filteradult=no&img.type=all&img.dimensions=large&start=%s&num=20"
pages = [1,20,40,60,80,100]
numb = 0
for p in pages:
r = requests.get(url%(word,p))
soup = BeautifulSoup(r.text,'lxml')
imgs = soup.find_all('img',src=re.compile('^https://msp.c.yimg.jp/yjimage'))
for img in imgs:
print('fetched ' + str(img['src']))
r = requests.get(img['src'])
with open(images_dir+str(numb)+str('.jpg'),'wb') as file:
file.write(r.content)
numb += 1
# 画像の確認
# !ls image_data/camera
# from IPython.display import Image,display_jpeg
# display_jpeg(Image('image_data/camera/0.jpg'))
# 他の画像
# word = "犬"
# images_dir = 'image_data/dog/'
#
# word = "猫"
# images_dir = 'image_data/cat/'
# max_image = 60
|
class Solution:
def findJudge(self, N: int, trust: List[List[int]]) -> int:
i = 1
x = []
for j in range(len(trust)):
if trust[j][0] not in x:
x.append(trust[j][0])
m = 0
for i in range(1,N+1):
if i not in x:
m = i
break
if m!=0:
c = 0
for j in range(len(trust)):
if trust[j][1] == m:
c += 1
if c == N-1:
return(m)
else:
return (-1)
else:
return (-1)
|
#############################################################################################
##################### Simple Linear Regression - Python ####################################
#############################################################################################
#---------------------------------------------------------------------------------
# Step : 1 Importing the libraries
#---------------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#---------------------------------------------------------------------------------
# Step : 2 Data Preprocessing
#--------------------------------------------------------------------------------
#2(a) Importing the dataset
dataset = pd.read_csv('Employee_Salary.csv')
Var_Independent = dataset.iloc[:, :-1].values
Var_dependent = dataset.iloc[:, 1].values
#2(b) Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
Var_I_train, Var_I_test, Var_D_train, Var_D_test = train_test_split(Var_Independent, Var_dependent, test_size = 1/3.0, random_state = 0)
#--------------------------------------------------------------------------------
# Step : 3 Data modelling
#--------------------------------------------------------------------------------
#3(a) Fitting Naive Bayes to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(Var_I_train, Var_D_train)
#3(b) Predicting the Test set results
Var_D_pred = regressor.predict(Var_I_test)
#--------------------------------------------------------------------------------
# Step : 4 Data Visualising
#--------------------------------------------------------------------------------
#4(a) for the Training set results
plt.scatter(Var_I_train, y_train, color = 'red')
plt.plot(Var_I_train, regressor.predict(Var_I_train), color = 'blue')
plt.title('Employee Salary vs Experience (Training set)')
plt.xlabel('Employee Years of Experience')
plt.ylabel('Employee Salary')
plt.show()
#4(b) for the Training set results
plt.scatter(Var_I_test, Var_D_test, color = 'red')
plt.plot(Var_I_train, regressor.predict(Var_I_train), color = 'blue')
plt.title('Employee Salary vs Experience (Test set)')
plt.xlabel('Employee Years of Experience')
plt.ylabel('Employee Salary')
plt.show()
|
from abc import ABC, abstractmethod
class Department:
def __init__(self, name, code):
self.name = name
self.code = code
class Employee(ABC, Department):
def __init__(self, code, name, salary):
self.code = code
self.name = name
self.salary = salary
@abstractmethod
def calc_bonus(self):
pass
def get_hours(self):
return 8
class Manager(Employee):
def __init__(self, code, name, salary):
super().__init__(code, name, salary)
self.__departament = Department('managers', 1)
def calc_bonus(self):
return self.salary * 0.15
def get_department(self):
return self.__departament.name
def set_department(self, name):
self.__departament.name = name
class Seller(Manager):
def __init__(self, code, name, salary):
super().__init__(code, name, salary)
self.department = Department('sellers', 2)
self.__sales = 0
def calc_bonus(self):
return self.__sales * 0.15
def get_sales(self):
return self.__sales
def put_sales(self, value):
self.__sales += value
def get_department(self):
return self.department.name
def set_department(self, name):
self.department.name = name
|
#首字母大写 使用[map] (返回列表)
#用法:name = ['jArrY','tOM']
# normalize(name)
def normalize(name):
def fn(a):
return a[:1].upper() + a[1:].lower()
return list(map(fn,name))
#list求积 使用[reduce] (返回值)
from functools import reduce
def prod(L):
def fn(x,y):
return x * y
return reduce(fn,L)
#字符串转浮点数 [map][reduce]
from functools import reduce
def str2float(s):
def str2num(s):
def fn(x,y):
return x * 10 + y
def char2num(a):
return {'0':0,'1':1,'2':2,'3':3,'4':4
,'5':5,'6':6,'7':7,'8':8,'9':9}[a]
return reduce(fn,map(char2num,s))
a,b = s.split('.')
return str2num(a) + 0.1 ** len(b) * str2num(b)
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 17 14:38:53 2016
@author: a_danda
"""
import json
import re
import operator
import sys
from pprint import pprint
data = []
with open(sys.argv[2]) as f:
for line in f:
data.append(json.loads(line))
#pprint(data)
afinnfile = open(sys.argv[1])
scores = {} # initialize an empty dictionary
for line in afinnfile:
term, score = line.split("\t") # The file is tab-delimited. "\t" means "tab character"
scores[term] = int(score) # Convert the score to an integer.
"""print scores.items() # Print every (term, score) pair in the dictionary"""
statesfile = open("states.txt")
states = []
for line in statesfile:
pattern = re.compile("^\s+|\s*,\s*|\s+$")
states.append(pattern.split(line))
#print states
states =[item for sublist in states for item in sublist]
states = [x.lower() for x in states]
value = 0
word_list = []
tweetscore_list = {}
for dict_n in data:
key = dict_n.keys()
if len(key) > 1:
key1 = key[2]
tweet = dict_n[key1]
tweet = tweet.lower()
word_list = re.findall(r'\w+', tweet ,flags = re.UNICODE | re.LOCALE)
#print word_list
for word in word_list:
#print word
if word.encode('utf-8') in scores.keys():
#print word
value = value+scores[word]
#print value
for state in states:
if state in word_list:
#print "1"
tweetscore_list[state] = value
#print tweetscore_list
value = 0
word_list = []
#print tweetscore_list
print max(tweetscore_list.iteritems(), key=operator.itemgetter(1))[0]
|
# -*- coding: utf-8 -*-
"""
Autor:
Jorge Casillas y Miguel Morales Castillo
Fecha:
Noviembre/2018
Contenido:
Uso simple de XGB y LightGBM para competir en DrivenData:
https://www.drivendata.org/competitions/7/pump-it-up-data-mining-the-water-table/
Inteligencia de Negocio
Grado en Ingeniería Informática
Universidad de Granada
"""
import pandas as pd
import numpy as np
import time
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.metrics import accuracy_score, make_scorer
from sklearn import preprocessing
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, GradientBoostingClassifier
from sklearn.linear_model import Lasso
import xgboost as xgb
import lightgbm as lgb
import datetime
now = datetime.datetime.now()
le = preprocessing.LabelEncoder()
'''
lectura de datos
'''
#los ficheros .csv se han preparado previamente para sustituir ,, y "Not known" por NaN (valores perdidos)
data_x = pd.read_csv('water_pump_tra.csv')
data_y = pd.read_csv('water_pump_tra_target.csv')
data_x_tst = pd.read_csv('water_pump_tst.csv')
#se quitan las columnas que no se usan
data_x.drop(labels=['id'], axis=1,inplace = True)
data_x_tst.drop(labels=['id'], axis=1,inplace = True)
data_y.drop(labels=['id'], axis=1,inplace = True)
'''
Se convierten las variables categóricas a variables numéricas (ordinales)
'''
from sklearn.preprocessing import LabelEncoder
mask = data_x.isnull()
data_x_tmp = data_x.fillna(0)
data_x_tmp = data_x_tmp.astype(str).apply(LabelEncoder().fit_transform)
#data_x_nan = data_x_tmp.where(~mask, data_x)
data_x_nan = data_x_tmp
mask = data_x_tst.isnull() #máscara para luego recuperar los NaN
data_x_tmp = data_x_tst.fillna(0) #LabelEncoder no funciona con NaN, se asigna un valor no usado
data_x_tmp = data_x_tmp.astype(str).apply(LabelEncoder().fit_transform) #se convierten categóricas en numéricas
#data_x_tst_nan = data_x_tmp.where(~mask, data_x_tst) #se recuperan los NaN
data_x_tst_nan = data_x_tmp #se recuperan los NaN
#Preprocesamiento avanzado
data_x_nan['date_recorded'] = pd.to_datetime(data_x_nan['date_recorded'])
data_x_nan['operational_year'] = data_x_nan.date_recorded.dt.year - data_x_nan.construction_year
data_x_tst_nan['date_recorded'] = pd.to_datetime(data_x_tst_nan['date_recorded'])
data_x_tst_nan['operational_year'] = data_x_tst_nan.date_recorded.dt.year - data_x_tst_nan.construction_year
useless_features=['date_recorded','wpt_name','num_private','subvillage','region_code','recorded_by','management_group','source_type','source_class','extraction_type_group','extraction_type_class','scheme_name','payment_type','quality_group','quantity_group','waterpoint_type_group','installer','public_meeting','permit']
X = data_x_nan.drop(useless_features,axis=1).values
X_tst = data_x_tst_nan.drop(useless_features,axis=1).values
y = np.ravel(data_y.values)
#------------------------------------------------------------------------
'''
Validación cruzada con particionado estratificado y control de la aleatoriedad fijando la semilla
'''
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=123456)
le = preprocessing.LabelEncoder()
def validacion_cruzada(modelo, X, y, cv):
y_pred_all = []
for train, test in cv.split(X, y):
t = time.time()
modelo = modelo.fit(X[train],y[train])
tiempo = time.time() - t
y_pred = modelo.predict(X[test])
print("Score: {:.4f}, tiempo: {:6.2f} segundos".format(accuracy_score(y[test],y_pred) , tiempo))
y_pred_all = np.concatenate((y_pred_all,y_pred))
print("")
return modelo
def train(modelo, X, y):
clf= validacion_cruzada(modelo,X,y,skf)
return clf
def test(modelo,X,X_tst,y):
clf = modelo.fit(X,y)
y_pred_tra = clf.predict(X)
y_pred_tst = clf.predict(X_tst)
df_submission_tst = pd.read_csv('water_pump_submissionformat.csv')
df_submission_tst['status_group'] = y_pred_tst
print("Score Train: {:.4f}".format(accuracy_score(y,y_pred_tra)))
df_submission_tst.to_csv("submission"+clf.__class__.__name__+str(now.strftime("%y-%m-%d-%H-%M"))+".csv", index=False)
return clf
def test_less_train(modelo,X,X_tst,y):
y_pred_tst = modelo.predict(X_tst)
df_submission_tst = pd.read_csv('water_pump_submissionformat.csv')
df_submission_tst['status_group'] = y_pred_tst
df_submission_tst.to_csv("submission"+modelo.__class__.__name__+str(now.strftime("%y-%m-%d-%H-%M"))+".csv", index=False)
return modelo
#------------------------------------------------------------------------
#print(str(X.shape))
#print(str(X_tst.shape))
#clf1 = xgb.XGBClassifier(n_estimators = 1000)
#clf3 = GradientBoostingClassifier(n_estimators=1000)
clf4 = lgb.LGBMClassifier(n_estimators=200,learning_rate=0.15,num_leaves=80)
clf2 = RandomForestClassifier(n_estimators=300, min_samples_leaf=5, criterion='gini')
clf3 = RandomForestClassifier(n_estimators=500, min_samples_leaf=5,criterion='entropy')
eclf= VotingClassifier(estimators=[('RF1', clf3), ('rf2', clf2), ('Lgb', clf4)], voting='hard')
eclf=train(eclf,X,y)
eclf=test(eclf,X,X_tst,y)
#clf = GradientBoostingClassifier(n_estimators=1000,min_samples_leaf=50)
#clf = clf4
#clf=train(clf,X,y)
#clf=test(clf,X,X_tst,y)
'''print("Empieza Lgbm")
params_lgbm = {'learning_rate':[i/100 for i in range(5,70,5) ],'num_leaves':[30,50,80], 'n_estimators':[100,200,500]}
grid1 = GridSearchCV(clf4, params_lgbm, cv=3, n_jobs=1, verbose=1, scoring=make_scorer(accuracy_score))
grid1.fit(X,y)
grid1=test_less_train(grid1,X,X_tst,y)
print("Mejores parámetros grid 1:")
print(grid1.best_params_)
print("Empieza RF")
params_rf = {'n_estimators':[200,500,1000],'min_samples_leaf':[5,30,50], 'criterion':['gini','entropy']}
grid2 = GridSearchCV(clf2, params_rf, cv=3, n_jobs=1, verbose=1, scoring=make_scorer(accuracy_score))
grid2.fit(X,y)
grid2=test_less_train(grid2,X,X_tst,y)
print("Mejores parámetros grid 2:")
print(grid2.best_params_)
print("Empieza RF")
params_rf = {'n_estimators':[200,300,500,700],'min_samples_leaf':[5,15,30,50], 'criterion':['gini','entropy']}
grid2 = GridSearchCV(clf2, params_rf, cv=5, n_jobs=1, verbose=1, scoring=make_scorer(accuracy_score))
grid2.fit(X,y)
grid2=test_less_train(grid2,X,X_tst,y)
print("Mejores parámetros grid 2:")
print(grid2.best_params_)'''
|
from json import loads
from redis import Redis
from datetime import datetime
import ed25519
import hashlib
class Transaction(object):
"""This class is used for getting easy-to-use txn class
from the bytes, received from the Tendermint core"""
def __init__(self, tx):
self.raw_tx = tx
self.txn = loads(tx.decode())
self.sender = self.txn["sender"]
self.receiver = self.txn["receiver"]
self.amount = self.txn["amount"]
self.signature = self.txn["signature"]
self.data = self.txn["data"]
self.timestamp = self.txn["timestamp"]
if len(self.txn.keys()) > 6:
raise Exception("Unexpected key")
def __repr__(self):
return "From {}, To {}, Amount {}".format(self.sender, self.receiver, self.amount)
@property
def hash(self):
"""Get the transaction hash"""
keys_sequence = sorted(self.txn.keys())
msg_to_sign = ";".join([str(self.txn[k]) for k in keys_sequence])
return hashlib.sha256(msg_to_sign.encode()).hexdigest()
@property
def signature_invalid(self):
"""Check if the signature corresponds to the sender's public key"""
keys_sequence = sorted(self.txn.keys())
msg_to_sign = ";".join([str(self.txn[k]) for k in keys_sequence if k != "signature"])
verifying_key = ed25519.VerifyingKey(self.sender, encoding="base64")
try:
verifying_key.verify(
self.signature.encode(),
msg_to_sign.encode(),
encoding="base64"
)
return False
except ed25519.BadSignatureError:
return True
@property
def timestamp_invalid(self):
current_datetime = datetime.now()
txn_datetime = datetime.fromtimestamp(self.timestamp)
return int(abs(current_datetime - txn_datetime).total_seconds() / 3600) > 2
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest.mock
import pytest
from packaging.version import Version
from pants.base.deprecated import (
BadDecoratorNestingError,
BadSemanticVersionError,
CodeRemovedError,
InvalidSemanticVersionOrderingError,
MissingSemanticVersionError,
NonDevSemanticVersionError,
deprecated,
deprecated_conditional,
deprecated_module,
is_deprecation_active,
resolve_conflicting_options,
warn_or_error,
)
from pants.option.option_value_container import OptionValueContainerBuilder
from pants.option.ranked_value import Rank, RankedValue
_FAKE_CUR_VERSION = "2.0.0.dev0"
FUTURE_VERSION = "9999.9.9.dev0"
def test_deprecated_function(caplog) -> None:
@deprecated(FUTURE_VERSION, "A hint!")
def deprecated_function():
return "some val"
class Foo:
@deprecated(FUTURE_VERSION)
def deprecated_method(self):
return "some val"
@property
@deprecated(FUTURE_VERSION)
def deprecated_property(self):
return "some val"
assert not caplog.records
assert deprecated_function() == "some val"
assert len(caplog.records) == 1
assert deprecated_function.__name__ in caplog.text
assert "A hint!" in caplog.text
caplog.clear()
assert Foo().deprecated_method() == "some val"
assert len(caplog.records) == 1
assert Foo.deprecated_method.__name__ in caplog.text
caplog.clear()
assert Foo().deprecated_property == "some val"
assert len(caplog.records) == 1
assert "deprecated_property" in caplog.text
def test_deprecated_function_invalid() -> None:
with pytest.raises(MissingSemanticVersionError):
@deprecated(None) # type: ignore[arg-type]
def func():
pass
with pytest.raises(BadDecoratorNestingError):
class Test:
@deprecated(FUTURE_VERSION) # type: ignore[misc]
@property
def prop(this):
pass
def test_deprecated_conditional(caplog) -> None:
assert not caplog.records
deprecated_conditional(lambda: True, FUTURE_VERSION, "deprecated entity", None)
assert len(caplog.records) == 1
assert "deprecated entity" in caplog.text
caplog.clear()
deprecated_conditional(lambda: False, FUTURE_VERSION, "deprecated entity", None)
assert not caplog.records
def test_deprecated_module(caplog) -> None:
assert not caplog.records
deprecated_module(FUTURE_VERSION, hint="Do not use me.")
assert len(caplog.records) == 1
assert "module is scheduled to be removed" in caplog.text
assert "Do not use me" in caplog.text
def test_removal_version_bad() -> None:
with pytest.raises(BadSemanticVersionError):
warn_or_error("a.a.a", "fake description", None)
with pytest.raises(BadSemanticVersionError):
@deprecated("a.a.a")
def test_func0():
pass
with pytest.raises(BadSemanticVersionError):
warn_or_error(1.0, "fake description", None) # type: ignore[arg-type]
with pytest.raises(BadSemanticVersionError):
@deprecated(1.0) # type: ignore[arg-type]
def test_func1():
pass
with pytest.raises(BadSemanticVersionError):
warn_or_error("1.a.0", "fake description", None)
with pytest.raises(BadSemanticVersionError):
@deprecated("1.a.0")
def test_func1a():
pass
with pytest.raises(NonDevSemanticVersionError):
@deprecated("1.0.0")
def test_func1b():
pass
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_removal_version_same() -> None:
with pytest.raises(CodeRemovedError):
warn_or_error(_FAKE_CUR_VERSION, "fake description", None)
@deprecated(_FAKE_CUR_VERSION)
def test_func():
pass
with pytest.raises(CodeRemovedError):
test_func()
def test_removal_version_lower() -> None:
with pytest.raises(CodeRemovedError):
warn_or_error("0.0.27.dev0", "fake description", None)
@deprecated("0.0.27.dev0")
def test_func():
pass
with pytest.raises(CodeRemovedError):
test_func()
def test_deprecation_start_version_validation() -> None:
with pytest.raises(BadSemanticVersionError):
warn_or_error(removal_version="1.0.0.dev0", entity="fake", hint=None, start_version="1.a.0")
with pytest.raises(InvalidSemanticVersionOrderingError):
warn_or_error(
removal_version="0.0.0.dev0", entity="fake", hint=None, start_version="1.0.0.dev0"
)
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_deprecation_start_period(caplog) -> None:
with pytest.raises(CodeRemovedError):
warn_or_error(
removal_version=_FAKE_CUR_VERSION, entity="demo", hint=None, start_version="1.0.0.dev0"
)
caplog.clear()
warn_or_error(
removal_version="999.999.999.dev999",
entity="demo",
hint=None,
start_version=_FAKE_CUR_VERSION,
)
assert len(caplog.records) == 1
assert (
"DEPRECATED: demo is scheduled to be removed in version 999.999.999.dev999." in caplog.text
)
@unittest.mock.patch("pants.base.deprecated.PANTS_SEMVER", Version(_FAKE_CUR_VERSION))
def test_deprecation_memoization(caplog) -> None:
caplog.clear()
for i in range(3):
warn_or_error(
removal_version="999.999.999.dev999",
entity="memo",
hint=None,
start_version=_FAKE_CUR_VERSION,
)
assert len(caplog.records) == 1
warn_or_error(
removal_version="999.999.999.dev999",
entity="another",
hint=None,
start_version=_FAKE_CUR_VERSION,
)
assert len(caplog.records) == 2
def test_resolve_conflicting_options() -> None:
old_val = "ancient"
new_val = "modern"
old_default_rv = RankedValue(Rank.HARDCODED, old_val)
new_default_rv = RankedValue(Rank.HARDCODED, new_val)
old_configured_rv = RankedValue(Rank.FLAG, old_val)
new_configured_rv = RankedValue(Rank.FLAG, new_val)
def option_resolved(*, old_configured: bool = False, new_configured: bool = False):
old_container_builder, new_container_builder = (
OptionValueContainerBuilder(),
OptionValueContainerBuilder(),
)
old_container_builder.my_opt = old_configured_rv if old_configured else old_default_rv
new_container_builder.my_opt = new_configured_rv if new_configured else new_default_rv
old_container = old_container_builder.build()
new_container = new_container_builder.build()
return resolve_conflicting_options(
old_option="my_opt",
new_option="my_opt",
old_scope="old-scope",
new_scope="new-scope",
old_container=old_container,
new_container=new_container,
)
assert option_resolved() == new_val
assert option_resolved(old_configured=True) == old_val
assert option_resolved(new_configured=True) == new_val
# both configured -> raise an error
with pytest.raises(ValueError) as e:
option_resolved(old_configured=True, new_configured=True)
assert "--old-scope-my-opt" in str(e.value)
assert "--new-scope-my-opt" in str(e.value)
def test_is_deprecation_active() -> None:
assert is_deprecation_active(start_version=None)
assert is_deprecation_active(start_version="1.0.0")
assert not is_deprecation_active(start_version=FUTURE_VERSION)
|
import time
import hashlib
import argparse
import multiprocessing
from joblib import Parallel, delayed
__author__ = 'David Flury'
__email__ = "david@flury.email"
def calculate_hash():
input = 'unmix.io is great!'
hash_object = hashlib.sha1(input.encode())
return hash_object
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CPU speed test with multiple cores.')
parser.add_argument('--calculations', default=1000000, type=int, help='Count of calculations')
parser.add_argument('--cores', default=multiprocessing.cpu_count(), type=int, help='Size of FFT windows')
args = parser.parse_args()
print('Arguments:', str(args))
print('speed-test with %d cores and %d calculations...' % (args.cores, args.calculations))
start = time.time()
Parallel(n_jobs=args.cores)(delayed(calculate_hash)() for x in range(args.calculations))
end = time.time()
print(end - start)
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pants.backend.cue.subsystem import Cue
from pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest
from pants.engine.fs import Digest, MergeDigests, Snapshot
from pants.engine.platform import Platform
from pants.engine.process import FallibleProcessResult, Process
from pants.engine.rules import Get
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
def generate_argv(*args: str, files: tuple[str, ...], cue: Cue) -> tuple[str, ...]:
return args + cue.args + files
async def _run_cue(
*args: str, cue: Cue, snapshot: Snapshot, platform: Platform, **kwargs
) -> FallibleProcessResult:
downloaded_cue = await Get(
DownloadedExternalTool, ExternalToolRequest, cue.get_request(platform)
)
input_digest = await Get(Digest, MergeDigests((downloaded_cue.digest, snapshot.digest)))
process_result = await Get(
FallibleProcessResult,
Process(
argv=[downloaded_cue.exe, *generate_argv(*args, files=snapshot.files, cue=cue)],
input_digest=input_digest,
description=f"Run `cue {args[0]}` on {pluralize(len(snapshot.files), 'file')}.",
level=LogLevel.DEBUG,
**kwargs,
),
)
return process_result
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
import math
STATE_COUNT_THRESHOLD = 3
LIGHT_AHEAD_WPS = 200
# The car should stop ahead red light about 27.m
STOP_AHEAD_DIST = 27.
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.base_wps = None
self.camera_image = None
self.lights = []
self.light_wps = None
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
self.light_classifier = TLClassifier()
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_wps = waypoints
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
if not self.pose or not self.base_wps:
return
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
if self.last_state == TrafficLight.RED and state == TrafficLight.UNKNOWN:
state = TrafficLight.RED
if state == TrafficLight.RED:
txt = 'RED'
elif state == TrafficLight.GREEN:
txt = 'GREEN'
elif state == TrafficLight.YELLOW:
txt = 'YELLOW'
else:
txt = 'UNKNOWN'
rospy.loginfo( '[tl_detector] =======> class: %s', txt )
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y, z):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.base_wps
"""
#TODO implement
if self.base_wps == None:
return None
dist = float( "inf" )
wpi = None
for i, wp in enumerate( self.base_wps.waypoints ):
p = wp.pose.pose.position
d = math.sqrt( ( p.x - x ) ** 2 + ( p.y - y ) ** 2 + ( p.z - z ) ** 2 )
if d < dist:
wpi = i
dist = d
return wpi
def get_light_state( self ):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if(not self.has_image):
self.prev_light_loc = None
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
#Get classification
return self.light_classifier.get_classification(cv_image)
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# # List of positions that correspond to the line to stop in front of for a given intersection
# stop_line_positions = self.config['stop_line_positions']
pos = self.pose.pose.position
car_wp = self.get_closest_waypoint( pos.x, pos.y, pos.z )
# return self.find_upcoming_light_test( car_wp )
#TODO find the closest visible traffic light (if one exists)
light_wp = self.find_upcoming_light( car_wp )
if light_wp != -1:
state = self.get_light_state()
return light_wp, state
return -1, TrafficLight.UNKNOWN
def find_upcoming_light_test( self, car_wp ):
num_wps = len( self.base_wps.waypoints )
light_wps = []
for light in self.lights:
p = light.pose.pose.position
wp = self.get_closest_waypoint( p.x, p.y, p.z )
# Find real stop location of light
wp = self.get_stop_waypoint( wp )
light_wps.append( wp )
# find upcoming light
light_wp = -1
state = TrafficLight.UNKNOWN
light_dist = num_wps * 2
for i, wp in enumerate( light_wps ):
dist = wp - car_wp if wp > car_wp else wp + num_wps - car_wp
if dist < light_dist:
light_wp = wp
state = self.lights[ i ].state
light_dist = dist
# ss = ['RED', 'YELLOW', 'GREEN', '', 'UNKNOWN']
# rospy.loginfo( '[tl_detector] upcoming: %d - %d = %d, dist = %.2f, state = %s', \
# light_wp, car_wp, light_dist, \
# self.distance( self.base_wps.waypoints, car_wp, light_wp ), ss[ state ] )
if light_dist > LIGHT_AHEAD_WPS:
light_wp = -1
state = TrafficLight.UNKNOWN
#else:
# ss = ['RED', 'YELLOW', 'GREEN', '', 'UNKNOWN']
# rospy.loginfo( '[tl_detector] upcoming: %d - %d = %d, state = %s', \
# light_wp, car_wp, light_dist, ss[ state ] )
return light_wp, state
# Find the real stop waypoint of 'light_wp', which should locate ahead about 27m
def get_stop_waypoint( self, light_wp ):
waypoints = self.base_wps.waypoints
num_wps = len( waypoints )
wp1_last, wp1 = light_wp, light_wp
dist_last, dist = 0, 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
while dist < STOP_AHEAD_DIST:
wp0 = wp1 - 1 if wp1 >= 1 else num_wps - 1
dist_last = dist
dist += dl( waypoints[ wp0 ].pose.pose.position, waypoints[ wp1 ].pose.pose.position )
wp1_last = wp1
wp1 = wp0
return wp1 if dist - STOP_AHEAD_DIST < STOP_AHEAD_DIST - dist_last else wp1_last
def _distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def distance(self, waypoints, wp1, wp2):
num = len( waypoints )
wp1, wp2 = wp1 % num, wp2 % num
if wp2 > wp1:
return self._distance( waypoints, wp1, wp2 )
else:
num_wps = len( self.base_wps.waypoints )
return self._distance( waypoints, wp1, num_wps - 1 ) + \
self._distance( waypoints, 0, wp2 )
def find_upcoming_light( self, car_wp ):
# Calc waypoint index for lights from config, calc only once
if not self.light_wps:
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
self.light_wps = []
for p in stop_line_positions:
wp = self.get_closest_waypoint( p[ 0 ], p[ 1 ], 0. )
# Find real stop location of light
# wp = self.get_stop_waypoint( wp )
self.light_wps.append( wp )
self.light_wps.sort()
rospy.loginfo( '[tl_detector] num_of_lights = %d, light_wps = %s', \
len( self.light_wps ), self.light_wps )
# find upcoming light
light_wp = -1
light_dist = -1
for wp in self.light_wps:
if wp > car_wp:
light_wp = wp
light_dist = wp - car_wp
break
else:
light_wp = self.light_wps[ 0 ]
light_dist = light_wp + len( self.base_wps.waypoints ) - car_wp
# rospy.loginfo( '[tl_detector] find_upcoming_light: car_wp = %d, light_wp = %d, dist = %d', \
# car_wp, light_wp, light_dist )
return light_wp if light_dist <= LIGHT_AHEAD_WPS else -1
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
import os
import random
import numpy as np
import pygame
from pygame.locals import *
from constants import *
from Oracle import Oracle, State
class Block(pygame.sprite.Sprite):
def __init__(self, color_index, grid_center, block_size=50):
super(Block, self).__init__()
self.block_size = block_size
self.surf = pygame.Surface((block_size, block_size))
self.surf.fill(COLORS[color_index])
self.id = color_index
self.rect = self.surf.get_rect(center=grid_center)
class BlockWorld:
def __init__(self, screen_width, screen_height, num_blocks=3, num_stacks=1, block_size=50, record=False):
self.screen_width = screen_width
self.screen_height = screen_height
self.goal_screen_dim = (self.screen_width // 5, self.screen_width // 5)
self.num_stacks = num_stacks
self.num_blocks = num_blocks
self.block_size = block_size
self.target_blocks = {}
self.blocks, self.block_dict = self.create_blocks()
self.record = record
if self.record:
self.demo_id = len(list(os.walk(FRAME_LOCATION)))
self.demo_dir = os.path.join(FRAME_LOCATION, str(self.demo_id))
os.mkdir(self.demo_dir)
self.screen = pygame.display.set_mode((screen_width, screen_height))
self.master_surface = pygame.Surface(self.screen.get_size())
self.master_surface.fill((0, 0, 0))
self.goal_surface = pygame.Surface(self.goal_screen_dim)
self.goal_surface.fill(GRAY)
self.goal_config = self.create_goal()
pygame.init()
self.selected_block_id = None
self.actions_taken = []
@staticmethod
def distance(pts1, pts2):
return (pts1[0] - pts2[0]) ** 2 + (pts1[1] - pts2[1]) ** 2
def create_goal(self, goal_config=None):
if goal_config is None:
goal_config = (-np.ones((self.num_stacks, self.num_blocks), dtype=np.int8)).tolist()
# choosing the order for blocks to be placed in the goal screen.
block_order = [i for i in range(self.num_blocks)]
seed = np.random.randint(0, self.num_stacks)
block_order = [2, 0, 1]
# random.shuffle(block_order)
last_used_block = 0
blocks_per_stack = self.num_blocks // self.num_stacks
block_size = self.goal_screen_dim[0] // 10
if self.num_stacks > 1:
left_padding = (self.goal_screen_dim[0] - block_size + (3 * (self.num_stacks - 1) * block_size // 2)) // 2
else:
left_padding = (self.goal_screen_dim[0] - block_size) // 2
bottom = self.goal_screen_dim[1] - 2 * block_size
for stack_num in range(self.num_stacks):
for i in range(blocks_per_stack):
pygame.draw.rect(self.goal_surface, COLORS[block_order[last_used_block]], (stack_num * (block_size + 5) + left_padding, bottom - block_size * i, block_size, block_size))
goal_config[stack_num][i] = block_order[last_used_block]
if last_used_block > 0:
self.target_blocks[block_order[last_used_block - 1]] = block_order[last_used_block]
last_used_block += 1
if self.num_blocks % 2 == 1 and last_used_block != self.num_blocks:
while last_used_block != self.num_blocks:
pygame.draw.rect(self.goal_surface, COLORS[block_order[last_used_block]], seed * 35 + 40, 150 - block_size * blocks_per_stack)
goal_config[seed][np.where(goal_config[seed] == -1)[0][0]] = block_order[last_used_block]
last_used_block += 1
blocks_per_stack += 1
return goal_config
def euclidean_dist(self, point1, point2):
return np.sqrt(np.square(point1.rect.centerx - point2.rect.centerx) + np.square(point1.rect.centery - point2.rect.centery))
def get_reward_for_state_action_pramodith(self, state, next_state):
if state[-2] == next_state[-2]:
curr_dist = state[0]
next_dist = next_state[0]
if next_dist < curr_dist:
return 1
elif next_dist == curr_dist:
return 0
else:
return -1
return 0
def get_reward_for_state_tanmay(self):
reward = 0
for block_id in range(len(self.block_dict)):
for block_id2 in range(len(self.block_dict)):
if block_id != block_id2:
reward += 10000/self.euclidean_dist(self.block_dict[block_id], self.block_dict[block_id2])
reward += 1000 if self.get_state_as_state().goal_reached() else 0
return reward
def get_reward_for_state(self, block_states):
reward = 0
goal_reward = self.get_sparse_reward_for_state_pramodith(block_states)
if goal_reward > 0:
return goal_reward
else:
for block_id in range(len(self.block_dict)):
for block_id2 in range(len(self.block_dict)):
if block_id != block_id2:
if self.euclidean_dist(self.block_dict[block_id], self.block_dict[block_id2]) < 55:
reward += 0
return reward
def get_sparse_reward_for_state_pramodith(self, block_states):
goal_config = block_states[-1]
score = 0
this_score = 0
block_size = self.block_size
'''
for stack in goal_config:
for i in range(1, len(stack)):
curr_block, prev_block = self.block_dict[stack[i]], self.block_dict[stack[i - 1]]
if curr_block.rect.centerx==prev_block.rect.centerx:
this_score = prev_block.rect.centery-curr_block.rect.centery
if this_score==50:
score+=1
'''
for key in self.target_blocks:
if self.block_dict[key].rect.centerx == self.block_dict[self.target_blocks[key]].rect.centerx and self.block_dict[key].rect.centery - self.block_dict[self.target_blocks[key]].rect.centery == self.block_size:
score += 1
if score > 0:
return 100 * score
else:
return 0
def get_sparse_reward_for_state(self, block_states):
goal_config = block_states[-1]
score = 0
block_size = self.block_size
for stack in goal_config:
for i in range(1, len(stack)):
curr_block, prev_block = block_states[stack[i]], block_states[stack[i - 1]]
this_score = - np.abs(curr_block[0] - prev_block[0]) - np.abs(prev_block[1] - curr_block[1] - block_size)
score += this_score
if score == 0:
return 1000
else:
return 0
def get_dense_reward_for_state_pramodith(self, block_states):
goal_config = block_states[-1]
score = 0
count = 0.0
max_x, max_y, block_size = self.screen_width, self.screen_height, self.block_size
for stack in goal_config:
for i in range(1, len(stack)):
curr_block, prev_block = self.block_dict[stack[i]].rect, self.block_dict[stack[i - 1]].rect
this_score = max_x + max_y - np.abs(curr_block.centerx - prev_block.centerx) - np.abs(prev_block.centerx - curr_block.centerx - block_size)
this_score /= (max_x + max_y)
score += this_score
count += 1
return score / count
def get_dense_reward_for_state(self, block_states):
goal_config = block_states[-1]
score = 0
count = 0.0
max_x, max_y, block_size = self.screen_width, self.screen_height, self.block_size
for stack in goal_config:
for i in range(1, len(stack)):
curr_block, prev_block = block_states[stack[i]], block_states[stack[i - 1]]
this_score = max_x + max_y - np.abs(curr_block[0] - prev_block[0]) - np.abs(prev_block[1] - curr_block[1] - block_size)
this_score /= (max_x + max_y)
score += this_score
count += 1
return score / count
def get_reward(self):
block_states = {idx: (self.block_dict[idx].rect.center[0], self.block_dict[idx].rect.center[1]) for idx in self.block_dict}
return self.get_reward_for_state(block_states, self.goal_config) + self.get_dense_reward_for_state(block_states)
def get_state_as_tuple(self):
# curr_state is a n-tuple( (x1, y1), (x2, y2), (x3, y3), (x4, y4), selectedBlockId, (goal_config))
some_list = [0 for _ in range(self.num_blocks + 1)]
for block_id in self.block_dict:
some_list[block_id] = (self.block_dict[block_id].rect.centerx, self.block_dict[block_id].rect.centery)
some_list[-1] = self.selected_block_id
some_list.append(tuple([tuple(x) for x in self.goal_config]))
return tuple(some_list)
def get_state_as_tuple_pramodith(self):
# curr_state is a n-tuple( (x1, y1), (x2, y2), (x3, y3), (x4, y4), selectedBlockId, (goal_config))
some_list = [-1 for _ in range(3)]
ind = 0
# distances.append(np.square(self.block_dict[block_id].rect.centerx-self.block_dict[block_id2].rect.centerx)+\
# np.square(self.block_dict[block_id].rect.centery-self.block_dict[block_id2].rect.centery))
# for block_id in sorted(self.block_dict.keys()):
directions = ["-", "-"]
if self.selected_block_id != None:
if self.selected_block_id in self.target_blocks:
target_id = self.target_blocks[self.selected_block_id]
some_list[0] = np.square(self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx) + np.square(self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery)
if self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx > 0:
directions[0] = 'l'
elif self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx < 0:
directions[0] = 'r'
if self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery > 0:
directions[1] = 'u'
elif self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery < 0:
directions[1] = 'd'
else:
for key, value in self.target_blocks.items():
if value == self.selected_block_id:
target_id = key
some_list[0] = np.square(self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx) + np.square(self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery)
if self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx > 0:
directions[0] = 'l'
elif self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx < 0:
directions[0] = 'r'
if self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery > 0:
directions[1] = 'u'
elif self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery < 0:
directions[1] = 'd'
else:
distances = []
for key in self.target_blocks:
distances.append(self.euclidean_dist(self.block_dict[key], self.block_dict[self.target_blocks[key]]))
some_list[0] = tuple(distances)
some_list[1] = tuple(directions)
some_list[-1] = self.selected_block_id
some_list.append(tuple([tuple(x) for x in self.goal_config]))
return tuple(some_list)
def get_state_as_dict(self):
# curr_state is a n-tuple( (x1, y1), (x2, y2), (x3, y3), (x4, y4), selectedBlockId)
block_dict = self.block_dict
state = {"positions": {block_id: (block_dict[block_id].rect.centerx, block_dict[block_id].rect.centery) for block_id in block_dict}, "selected": self.selected_block_id if self.selected_block_id is not None else -1}
return state
def get_state_as_state(self):
block_dict = self.block_dict
state = State(block_positions=[(block_dict[block_id].rect.centerx, block_dict[block_id].rect.centery) for block_id in block_dict], goal_positions=None, selected_index=None)
goal_conf = self.goal_config[0].copy()
goal_conf.reverse()
goal_positions = Oracle.get_goal_position(curr_state=state, goal_config=goal_conf, step_size=self.block_size)
return State(block_positions= state.block_positions, goal_positions=goal_positions, selected_index = self.selected_block_id)
@staticmethod
def are_intersecting(rect1, dx, dy, other_rect):
return (other_rect.top <= rect1.top + dy < other_rect.bottom and (other_rect.left <= rect1.left + dx < other_rect.right or other_rect.left < rect1.right + dx <= other_rect.right)) or (other_rect.top < rect1.bottom + dy <= other_rect.bottom and (other_rect.left <= rect1.left + dx < other_rect.right or other_rect.left < rect1.right + dx <= other_rect.right)) or (rect1.top + dy < 0 or rect1.bottom + dy > 350 or rect1.left + dx < 0 or rect1.right + dx > 350)
def create_blocks(self):
blocks = pygame.sprite.Group()
# grid_centers=[(325,325),(25,25)]
grid_centers = [(25 + 50 * np.random.randint(6), 25 + 50 * np.random.randint(6)) for _ in range(self.num_blocks)]
# grid_centers = [(i, self.screen_height // 2) for i in range(25, self.screen_width, 50)]
# for i,blockCenterIdx in enumerate(range(len(grid_centers))):
for i, blockCenterIdx in enumerate(np.random.choice(len(grid_centers), self.num_blocks, replace=False)):
blocks.add(Block(i, grid_centers[blockCenterIdx], self.block_size)) # blocks.add(Block(i, (325,325), self.block_size))
block_dict = {block.id: block for block in blocks.sprites()}
return blocks, block_dict
def get_next_state_based_on_state_tuple(self, state, action):
# action is (Action, blockId)
# print("get_next_state_based_on_state_tuple: ", state, action)
sel_block_id = state[-2]
if sel_block_id: assert sel_block_id < self.block_size
state_l = list(state)
if action[0] == Action.DROP:
state_l[-2] = None
state_l[1] = ('-', '-')
distances = []
for key in self.target_blocks:
distances.append(self.euclidean_dist(self.block_dict[key], self.block_dict[self.target_blocks[key]]))
state_l[0] = tuple(distances)
else:
state_l[-2] = action[1]
state_l = self.get_next_state_pramodith(action[0], state_l[-2], state_l)
return tuple(state_l)
def get_next_state(self, action, sel_block_id):
if action in move_action_to_deviation:
dx, dy = move_action_to_deviation[action]
else:
raise IOError("Invalid Action", action)
rectangle = self.block_dict[sel_block_id].rect
not_intersections = [not BlockWorld.are_intersecting(rectangle, dx, dy, other_block.rect) for other_block in self.blocks if other_block.rect != rectangle]
orig_pos = rectangle.center
if all(not_intersections):
next_pos = (orig_pos[0] + dx, orig_pos[1] + dy)
if self.is_in_bounding_box(next_pos):
return next_pos
return orig_pos
def get_next_state_pramodith(self, action, sel_block_id, state_l):
dx = None
dy = None
if action in move_action_to_deviation:
dx, dy = move_action_to_deviation[action]
else:
raise IOError("Invalid Action", action)
rectangle = self.block_dict[sel_block_id].rect
not_intersections = [not BlockWorld.are_intersecting(rectangle, dx, dy, other_block.rect) for other_block in self.blocks if other_block.rect != rectangle]
orig_pos = rectangle.center
if all(not_intersections):
distances = []
if dx != 0 or dy != 0:
self.block_dict[sel_block_id].rect.centerx += dx
self.block_dict[sel_block_id].rect.centery += dy
if self.selected_block_id == None:
self.selected_block_id = sel_block_id
directions = ["-", "-"]
if sel_block_id in self.target_blocks:
target_id = self.target_blocks[sel_block_id]
state_l[0] = np.square(self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx) + np.square(self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery)
if self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx > 0:
directions[0] = 'l'
elif self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx < 0:
directions[0] = 'r'
if self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery > 0:
directions[1] = 'u'
elif self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery < 0:
directions[1] = 'd'
state_l[1] = tuple(directions)
else:
for key, value in self.target_blocks.items():
if value == self.selected_block_id:
target_id = key
state_l[0] = np.square(self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx) + np.square(self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery)
if self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx > 0:
directions[0] = 'l'
elif self.block_dict[self.selected_block_id].rect.centerx - self.block_dict[target_id].rect.centerx < 0:
directions[0] = 'r'
if self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery > 0:
directions[1] = 'u'
elif self.block_dict[self.selected_block_id].rect.centery - self.block_dict[target_id].rect.centery < 0:
directions[1] = 'd'
return state_l
def update_state_from_tuple_pramodith(self, state_tuple):
sel_block_id = state_tuple[-2]
if sel_block_id is not None:
self.selected_block_id = sel_block_id
else:
self.selected_block_id = None
def update_state_from_tuple(self, state_tuple):
sel_block_id = state_tuple[-2]
if sel_block_id is not None:
self.block_dict[sel_block_id].rect.centerx = state_tuple[sel_block_id][0]
self.block_dict[sel_block_id].rect.centery = state_tuple[sel_block_id][1]
self.selected_block_id = sel_block_id
else:
self.selected_block_id = None
def update_state(self, sel_block_id, next_state):
self.block_dict[sel_block_id].rect.centerx = next_state[0]
self.block_dict[sel_block_id].rect.centery = next_state[1]
def move_block_by_key(self, key, sel_block_id):
return self.move_block_by_action(key_to_action[key], sel_block_id)
def move_block_by_action(self, action, sel_block_id):
next_state = self.get_next_state(action, sel_block_id)
self.update_state(sel_block_id, next_state)
return action, sel_block_id
def pre_render(self, drop_events=True):
self.screen.blit(self.master_surface, (0, 0))
# rendering the goal screen
self.screen.blit(self.goal_surface, (self.screen_width - self.goal_screen_dim[0], 0))
if drop_events:
pygame.event.get()
def render(self, filename=None):
for block in self.blocks:
self.screen.blit(block.surf, block.rect)
pygame.display.flip()
if filename:
pygame.image.save(self.screen, filename)
def record_action(self, state=None, action=None):
action_value = action.value
if action == Action.PICK:
action_value = "%s-%d" % (action_value, self.selected_block_id)
if state is None:
self.actions_taken.append({"state": self.get_state_as_dict(), "action": action_value})
else:
self.actions_taken.append({"state": state, "action": action_value})
def run_environment(self):
running = True
# Required for DQN to map frames to actions.
# Create the surface and pass in a tuple with its length and width
# to indicate the end of the demonstration
prev_action_key = None
while running:
self.pre_render(False)
for event in pygame.event.get():
state = self.get_state_as_dict()
if event.type == KEYUP:
prev_action_key = None
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
prev_action_key = None
running = False
elif event.key == K_RSHIFT:
prev_action_key = None
self.record_action(state=state, action=Action.FINISHED)
print("Finished Demonstration:", self.actions_taken)
running = False
elif event.key == K_SPACE:
self.block_dict[self.selected_block_id].surf.fill(COLORS[self.selected_block_id])
self.record_action(state=state, action=Action.DROP)
self.selected_block_id = None
elif event.key in {K_UP, K_DOWN, K_LEFT, K_RIGHT}:
prev_action_key = event.key
# Check for QUIT event; if QUIT, set running to false
# elif event.type == QUIT or (hasattr(event, "key") and event.key == K_ESCAPE):
# print("writing to file")
# prev_action_key = None
# self.record_action(action=Action.FINISHED)
# # self.serialize_actions()
# print("Finished Demonstration:", self.actions_taken)
# running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
prev_action_key = None
pos = pygame.mouse.get_pos()
for block in self.blocks:
if block.rect.collidepoint(pos):
if self.selected_block_id is not None:
self.record_action(state=state, action=Action.DROP)
self.block_dict[self.selected_block_id].surf.fill(COLORS[self.selected_block_id])
self.selected_block_id = block.id
self.record_action(state=state, action=Action.PICK)
self.block_dict[block.id].surf.fill(WHITE)
break
if prev_action_key:
action_taken = self.move_block_by_key(prev_action_key, self.selected_block_id)
if action_taken:
self.record_action(state=state, action=action_taken[0])
self.render()
return self.actions_taken
@staticmethod
def convert_state_dict_to_tuple(state_dict):
state = [tuple(state_dict["positions"][key]) for key in sorted([key for key in state_dict["positions"]], key=lambda x: int(x))]
selected_id = state_dict["selected"] if state_dict["selected"] != -1 else None
state.append(selected_id)
return tuple(state)
@staticmethod
def parse_action(action_value):
action_vals = action_value.split("-")
action = Action(action_vals[0])
if len(action_vals) > 1:
return action, int(action_vals[1])
else:
return action, None
|
import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import numpy as np
from pathlib import Path
from functools import lru_cache
def pad_collate(batch):
# find longest sequence
max_input_length = max([len(x[0]) for x in batch])
max_output_length = max([len(x[1]) for x in batch])
batch = [(x[0] + (max_input_length - len(x[0])) * [0], # input
x[1] + (max_output_length - len(x[1])) * [-1], # output
len(x[0]), # input length
len(x[1]), # output length
x[2] # path
) for x in batch]
# stack all
input = torch.stack([torch.tensor(x[0], dtype=torch.long) for x in batch], dim=0)
output = torch.stack([torch.tensor(x[1], dtype=torch.long) for x in batch], dim=0)
input_len = torch.tensor([x[2] for x in batch], dtype=torch.long)
output_len = torch.tensor([x[3] for x in batch], dtype=torch.long)
paths = [x[4] for x in batch]
return input, output, input_len, output_len, paths
def make_dataset(root):
return [str(s) for s in Path(root).glob('**/*')]
class ParaLoader(Dataset):
def __init__(self, root, word_dict_path):
super(ParaLoader, self).__init__()
self.data_files = make_dataset(root)
self.word_dict = np.load(word_dict_path, allow_pickle=True).item()
def process_file(self, idx):
with open(self.data_files[idx], 'r') as f:
i, o = f.read().split('|||')
inp_embedding = [self.word_dict[w] for w in ['<SOS>'] + i.split() + ['<EOS>']]
out_embedding = [self.word_dict[w] for w in o.split() + ['<EOS>']]
return inp_embedding, out_embedding
def __getitem__(self, idx):
input, output = self.process_file(idx)
return input, output, self.data_files[idx]
@lru_cache(maxsize=None)
def __len__(self):
return len(self.data_files)
if __name__ == '__main__':
a = next(
enumerate(DataLoader(ParaLoader(root='data/train', word_dict_path='words_dict.npy'), collate_fn=pad_collate,
batch_size=1)))
print(a)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.