text
stringlengths 8
6.05M
|
|---|
[
mean: 0.74598, std: 0.01465, params: {'base_estimator__max_depth': 3, 'base_estimator__max_features': 'sqrt'},
mean: 0.74689, std: 0.01357, params: {'base_estimator__max_depth': 3, 'base_estimator__max_features': 'log2'},
mean: 0.73473, std: 0.00446, params: {'base_estimator__max_depth': 3, 'base_estimator__max_features': 0.75},
mean: 0.74159, std: 0.02107, params: {'base_estimator__max_depth': 3, 'base_estimator__max_features': 0.5},
mean: 0.73107, std: 0.01779, params: {'base_estimator__max_depth': 3, 'base_estimator__max_features': 0.25},
mean: 0.69672, std: 0.01227, params: {'base_estimator__max_depth': 5, 'base_estimator__max_features': 'sqrt'},
mean: 0.69483, std: 0.01245, params: {'base_estimator__max_depth': 5, 'base_estimator__max_features': 'log2'},
mean: 0.70404, std: 0.01453, params: {'base_estimator__max_depth': 5, 'base_estimator__max_features': 0.75},
mean: 0.71057, std: 0.00938, params: {'base_estimator__max_depth': 5, 'base_estimator__max_features': 0.5},
mean: 0.70433, std: 0.01741, params: {'base_estimator__max_depth': 5, 'base_estimator__max_features': 0.25},
mean: 0.68317, std: 0.01573, params: {'base_estimator__max_depth': 7, 'base_estimator__max_features': 'sqrt'},
mean: 0.67883, std: 0.01533, params: {'base_estimator__max_depth': 7, 'base_estimator__max_features': 'log2'},
mean: 0.71653, std: 0.00935, params: {'base_estimator__max_depth': 7, 'base_estimator__max_features': 0.75},
mean: 0.71408, std: 0.00791, params: {'base_estimator__max_depth': 7, 'base_estimator__max_features': 0.5},
mean: 0.70697, std: 0.01221, params: {'base_estimator__max_depth': 7, 'base_estimator__max_features': 0.25},
mean: 0.68718, std: 0.01003, params: {'base_estimator__max_depth': 9, 'base_estimator__max_features': 'sqrt'},
mean: 0.67537, std: 0.01661, params: {'base_estimator__max_depth': 9, 'base_estimator__max_features': 'log2'},
mean: 0.70430, std: 0.00990, params: {'base_estimator__max_depth': 9, 'base_estimator__max_features': 0.75},
mean: 0.70682, std: 0.00701, params: {'base_estimator__max_depth': 9, 'base_estimator__max_features': 0.5},
mean: 0.70295, std: 0.01170, params: {'base_estimator__max_depth': 9, 'base_estimator__max_features': 0.25}]
best_params_ = {'base_estimator__max_depth': 3, 'base_estimator__max_features': 'log2'}
best_score_ = 0.74689232764
2016-03-29 11:48:04
2016-03-29 12:22:12
[
mean: 0.80816, std: 0.00435, params: {'base_estimator__max_depth': 1},
mean: 0.78206, std: 0.00357, params: {'base_estimator__max_depth': 2},
mean: 0.75502, std: 0.00785, params: {'base_estimator__max_depth': 3},
mean: 0.72800, std: 0.01070, params: {'base_estimator__max_depth': 4}]
best_params_ = {'base_estimator__max_depth': 1}
best_score_ = 0.808160005274
2016-03-29 12:33:52
[
mean: 0.81215, std: 0.00573, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 5000},
mean: 0.80539, std: 0.00766, params: {'n_estimators': 350, 'base_estimator__min_samples_split': 5000},
mean: 0.79791, std: 0.00649, params: {'n_estimators': 550, 'base_estimator__min_samples_split': 5000},
mean: 0.78395, std: 0.00609, params: {'n_estimators': 1200, 'base_estimator__min_samples_split': 5000},
mean: 0.81053, std: 0.00261, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 7000},
mean: 0.80485, std: 0.00580, params: {'n_estimators': 350, 'base_estimator__min_samples_split': 7000},
mean: 0.79943, std: 0.00639, params: {'n_estimators': 550, 'base_estimator__min_samples_split': 7000},
mean: 0.78428, std: 0.00682, params: {'n_estimators': 1200, 'base_estimator__min_samples_split': 7000},
mean: 0.81133, std: 0.00476, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 9000},
mean: 0.80628, std: 0.00496, params: {'n_estimators': 350, 'base_estimator__min_samples_split': 9000},
mean: 0.79905, std: 0.00828, params: {'n_estimators': 550, 'base_estimator__min_samples_split': 9000},
mean: 0.78240, std: 0.00494, params: {'n_estimators': 1200, 'base_estimator__min_samples_split': 9000},
mean: 0.80985, std: 0.00144, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 11000},
mean: 0.80367, std: 0.00724, params: {'n_estimators': 350, 'base_estimator__min_samples_split': 11000},
mean: 0.79861, std: 0.00661, params: {'n_estimators': 550, 'base_estimator__min_samples_split': 11000},
mean: 0.78314, std: 0.00582, params: {'n_estimators': 1200, 'base_estimator__min_samples_split': 11000}]
best_params_ = {'n_estimators': 140, 'base_estimator__min_samples_split': 5000}
best_score_ = 0.812154119164
2016-03-29 12:48:35
[
mean: 0.78992, std: 0.03749, params: {'n_estimators': 50, 'base_estimator__min_samples_split': 1000},
mean: 0.80949, std: 0.01134, params: {'n_estimators': 70, 'base_estimator__min_samples_split': 1000},
mean: 0.81179, std: 0.01267, params: {'n_estimators': 110, 'base_estimator__min_samples_split': 1000},
mean: 0.81332, std: 0.01099, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 1000},
mean: 0.80947, std: 0.00738, params: {'n_estimators': 50, 'base_estimator__min_samples_split': 2000},
mean: 0.81054, std: 0.00936, params: {'n_estimators': 70, 'base_estimator__min_samples_split': 2000},
mean: 0.81210, std: 0.01519, params: {'n_estimators': 110, 'base_estimator__min_samples_split': 2000},
mean: 0.81347, std: 0.00956, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 2000},
mean: 0.79785, std: 0.02069, params: {'n_estimators': 50, 'base_estimator__min_samples_split': 3000},
mean: 0.81782, std: 0.01115, params: {'n_estimators': 70, 'base_estimator__min_samples_split': 3000},
mean: 0.81396, std: 0.01551, params: {'n_estimators': 110, 'base_estimator__min_samples_split': 3000},
mean: 0.81518, std: 0.00846, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 3000},
mean: 0.74942, std: 0.01609, params: {'n_estimators': 50, 'base_estimator__min_samples_split': 5000},
mean: 0.81035, std: 0.01198, params: {'n_estimators': 70, 'base_estimator__min_samples_split': 5000},
mean: 0.81586, std: 0.01099, params: {'n_estimators': 110, 'base_estimator__min_samples_split': 5000},
mean: 0.81539, std: 0.01077, params: {'n_estimators': 140, 'base_estimator__min_samples_split': 5000}]
best_params_ = {'n_estimators': 70, 'base_estimator__min_samples_split': 3000}
best_score_ = 0.817815135915
2016-03-29 12:51:48
|
import cv2
import os
import numpy as np
import tensorflow as tf
#Data directory
dat_dir = '../data'
test_dir = dat_dir + '/test'
# Load Images
def preprocess(im):
images = []
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
return images
classes = os.listdir(train_path)
#Remove .DS_Store
for i,j in enumerate(classes):
if j == '.DS_Store':
del classes[i]
break
x_batch = images.reshape(1, image_size,image_size,num_channels)
model_path = '../tensorflow/vg-classifier-model/vg-classifier-model.meta'
sess = tf.Session()
saver = tf.train.import_meta_graph(model_path)
saver.restore(sess, tf.train.latest_checkpoint('../tensorflow/vg-classifier-model/'))
graph = tf.get_default_graph()
y_pred = graph.get_tensor_by_name('y_pred:0')
x = graph.get_tensor_by_name('x:0')
y_true = graph.get_tensor_by_name('y_true:0')
y_test_images = np.zeros((1, len(labels)))
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result=sess.run(y_pred, feed_dict=feed_dict_testing)
|
#Created on 5/23/2017
#@author: rspies
# Python 2.7
# This script converts individual QME datacard files to a single/merged csv file that can be imported for dss build
import os
import datetime
from dateutil import parser
os.chdir("../..") # change dir to \\AMEC\\NWS
maindir = os.getcwd()
############ User input ################
RFC = 'NCRFC_FY2017'
fx_group = '' # set to '' if not used
data_format = 'nhds' # choices: 'usgs' or 'chps' or 'nhds'
dss_csv = 'on' # options: 'on' or 'off' # create csv for dss import
usgs_files = maindir + '\\Calibration_NWS\\' + RFC[:5] + os.sep + RFC + '\\data\\daily_discharge' # directory with USGS QME data
chps_files = maindir + '\\Calibration_NWS\\' + RFC[:5] + os.sep + RFC + '\\datacards\\QME\\QME_CHPS_export\\' # CHPS csv output files
nhds_files = maindir + '\\Calibration_NWS\\' + RFC[:5] + os.sep + RFC + '\\datacards\\QME\\' # NHDS data download (cardfiles)
new_file = maindir + '\\Calibration_NWS\\' + RFC[:5] + os.sep + RFC + '\\datacards\\QME\\' # output summary tab delimited file location
########################################
if fx_group != '':
nhds_files = nhds_files + os.sep + fx_group + os.sep + 'QME_Lynker_download'
dss_path = maindir + '\\Calibration_NWS\\' + RFC[:5] + os.sep + RFC + '\\data_dss\\' + fx_group
else:
nhds_files = nhds_files + os.sep + 'QME_Lynker_download'
dss_path = maindir + '\\Calibration_NWS\\' + RFC[:5] + os.sep + RFC + '\\data_dss'
basins_list = []; count = 0
if data_format == 'usgs':
QMEs = [f for f in os.listdir(usgs_files) if os.path.isfile(os.path.join(usgs_files, f))]
if data_format == 'chps':
QMEs = [f for f in os.listdir(chps_files) if os.path.isfile(os.path.join(chps_files, f))]
if data_format == 'nhds':
QMEs = [f for f in os.listdir(nhds_files) if os.path.isfile(os.path.join(nhds_files, f))]
dss_dic = {}
for QME in QMEs:
print 'Reading data for: ' + QME
count += 1
if data_format == 'usgs':
csv_read = open(usgs_files + '\\' + QME, 'r')
discharge = []; date = []
### read card file formatted .txt files lists
line_count = 0
for line in csv_read:
if line_count >= 9: # ignore header lines
sep = line.split()
### parse date columns
month = str(sep[1])[:-2]
year = str(sep[1])[-2:]
if int(year) <= 17:
year = int(year) + 2000 # assume years <= 14 are in the 2000s
else:
year = int(year) + 1900
day = str(sep[2])
full_date = datetime.datetime(year,int(month),int(day))
date.append(full_date)
if line_count == 12:
site_num = sep[0]
discharge=sep[-1] # asssuming a single column datacard
if dss_csv == 'on':
if str(full_date) not in dss_dic:
dss_dic[str(full_date)] = {}
if float(discharge) >= 0.0:
dss_dic[str(full_date)][QME]=str(float(discharge))
line_count += 1
csv_read.close()
if data_format == 'chps':
csv_read = open(chps_files + '\\' + QME, 'r')
discharge = []; date = []
### read card file formatted .txt files lists
line_count = 0
for line in csv_read:
if line_count >= 2: # ignore header lines
sep = line.split(',')
full_date = parser.parse(sep[0])
date.append(full_date.date())
discharge=sep[-1] # asssuming a single column datacard
if dss_csv == 'on':
if str(full_date) not in dss_dic:
dss_dic[str(full_date)] = {}
if float(discharge) >= 0.0:
dss_dic[str(full_date)][QME]=str(float(discharge))
line_count += 1
csv_read.close()
if data_format == 'nhds':
csv_read = open(nhds_files + '\\' + QME, 'r')
discharge = []; date = []
### read card file formatted .txt files lists
line_count = 0
for line in csv_read:
if line_count >= 9: # ignore header lines
sep = line.split()
if len(sep) > 0: # ignore blank lines
if len(sep) < 4 and len(sep[-1]) < 10: # some QME files (from RFC) may not have gage/basin id as 1st index
sep.insert(0,'0000')
### parse date columns
month = str(sep[1])[:-2]
year = str(sep[1])[-2:]
if int(year) <= 17:
year = int(year) + 2000 # assume years <= 17 are in the 2000s
else:
year = int(year) + 1900
day = str(sep[2])
if len(sep[-1]) > 10: # check for large streamflow values that get combined with day column
day = str(sep[2])[:-10]
full_date = datetime.datetime(year,int(month),int(day))
date.append(full_date)
if line_count == 12:
site_num = sep[0]
discharge=sep[-1] # asssuming a single column datacard
if dss_csv == 'on':
if str(full_date) not in dss_dic:
dss_dic[str(full_date)] = {}
if float(discharge) >= 0.0:
dss_dic[str(full_date)][QME]=str(float(discharge))
line_count += 1
csv_read.close()
if dss_csv == 'on':
print 'Writing to combined dss csv...'
combine_csv = open(dss_path + os.sep + 'QME_daily' + '_merged_for_dss.csv','w')
combine_csv.write('Date,')
for Basin in QMEs:
if Basin != QMEs[-1]:
combine_csv.write(Basin.split('_')[0] + ',')
else:
combine_csv.write(Basin.split('_')[0] + '\n')
for datestep in sorted(dss_dic):
combine_csv.write(str(datestep) + ',')
for Basin in QMEs:
if Basin in dss_dic[datestep]:
combine_csv.write(dss_dic[datestep][Basin])
else:
combine_csv.write('')
if Basin != QMEs[-1]:
combine_csv.write(',')
else:
combine_csv.write('\n')
combine_csv.close()
print 'Completed!!'
|
#!/usr/bin/python
from bitstring import BitArray, BitStream
import Image
import sys
import hashlib
from util import getKey, getImageData
def decrypt(data, key):
# TODO - assert RGB/RGBA
#print img.mode
bits = BitArray()
lbits = BitArray(32)
counter = 0
# Begin for
for i in data:
c = counter - lbits.len
p = counter % len(key)
q = key[p] ^ (i[0] & 1)
# Begin if
if counter < lbits.len:
lbits[counter] = q
# print "Decrypted: " + str(q) + ". Image: " + str(i[0])
elif counter < lbits.int + 32:
bits.append(1)
bits[c] = q
# end if
counter += 1
# end for
# print lbits.bin + " - " + str(lbits.int)
return bits.bytes
# end def
def main(argv):
pw = argv[0]
inputfile = argv[1]
# print "password: " + pw
# print "Input image: " + inputfile
key = getKey(pw)
# print "Key: " + key.hex
data = getImageData(inputfile)
bytes = decrypt(data, key)
print bytes
if __name__ == "__main__":
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 21:13:53 2016
Class for a UserDevice in 3Space
A UserDevice represents both a camera & viewport
@author: alex
"""
import numpy as np
from Events import ObjEvent, EventDispatcher
class UserDevice(object):
def __init__(self):
#The Key Passed to the device on registration by the server
self.key = None
#Name
self._name = ""
#The field of view of the device
self.field_of_view = {'x': 0.0, 'y': 0.0}
#The location of the device (x, y, z)
self.location = np.zeros(3, dtype=np.float32)
#The orientation of the device (rx, ry, rz) where (0, 0, 0) is pointing up on the z direction
self.orientation = np.zeros(3, dtype=np.float32)
#The orientation of the device in quaternions
self.orientation_quaternion = np.zeros(4, dtype=np.float32)
#A list of scene ID's for which the device is registered
self.scenes = []
|
n = int(input())
s = input()
mx = 0
for i in range(1,n):
cnt = 0
for j in range(ord('a'), ord('z')+1):
c = chr(j)
if c in s[0:i] and c in s[i:n]:
cnt += 1
mx = max(cnt, mx)
print(mx)
|
import createDB
import sqlite3
import os.path
from datetime import date, datetime
DB = sqlite3.connect('Mailing.db')
conn = DB.cursor()
def firstActions():
while (1):
action = raw_input("What do you want to do? 1-Register 2-Login 3-Quit \n")
if (action == '3'):
break
elif (action == '1'): #register
try:
name = raw_input("name: ")
family = raw_input("familyName: ")
userName = raw_input("UserName: ")
password = raw_input("password: ")
Log = 0
conn.execute("INSERT INTO USER (name, familyname, password, UserName, Login) VALUES(?,?,?,?,?)", (name,family, password, userName, Log))
conn.execute("INSERT INTO ROLE (RoleName,Permission, UserName) VALUES(?,?,?)", ('OrdinaryUser', '0001', userName))
DB.commit()
except Exception as e:
DB.rollback()
print "UserName has been taken. Please try again"
continue
elif(action =='2'): #login
# try:
uName = raw_input("UserName: ")
passw = raw_input("Password: ")
conn.execute("SELECT count(*) FROM USER where UserName = ? and password = ?",(uName, passw))
unique = conn.fetchone()[0]
if (unique == 1):
print "Logged in\n"
conn.execute("UPDATE USER SET Login = ? WHERE userName = ?", (1, uName))
DB.commit()
#LoggedUser = uName
afterLogin(uName)
else:
print "OOPS! Try again!\n"
# except Exception as e: DB.rollback() print "Try again!
# --Login error" continue
def afterLogin(userName):
while (1):
action = raw_input("What do you want to do? 1-send Message 2-Inbox 3-Sent 4-Trash 5-Delete msg 6-Logout\n")
if(action == '1'): #send message
conn.execute("SELECT count(*) FROM USER where UserName = ? and Login = ?",(userName, 1))
unique = conn.fetchone()[0]
if (unique == 1):
#try:
reciever = raw_input("Reciever: ")
conn.execute("SELECT count(*) FROM USER where UserName = ?",(reciever,))
unique = conn.fetchone()[0]
if (unique == 1):
subject = raw_input("Subject: ")
msg = raw_input("Message: ")
time = datetime.now()
conn.execute("INSERT INTO MESSAGE(SUBJ, TIMESENT, DELTAG, READTAG, BODY, UID1, UID2) VALUES(?,?,?,?,?,?,?)",\
(subject,time, 0, 0, msg, userName,reciever))
updateAttachTable(userName, conn.lastrowid)
DB.commit()
else:
print "UserName is not valid. Try again"
continue
# except Exception as e:
# DB.rollback()
# print "OOPS! Please try again --sending error\n"
# continue
else:
print "You should log in to the system first!\n"
elif (action == '2'): #Show Inbox
conn.execute("SELECT * from INBOX where UID2 = ? and DELTAG = ?", (userName, 0))
allmsg = conn.fetchall()
for msg in allmsg:
print 'num: {0}, From: {1}, Subject: {2}, Body: {3}, On: {4}, Read: {5} '.format(msg[0], msg[5], msg[1], msg[4], msg[2], msg[3])
print "\n"
conn.execute("SELECT FileContent, fileName from ATTACHMENT where MsgKEY = ?", (msg[0],))
allAttach = conn.fetchall()
for attach in allAttach:
with open(attach[1], "wb") as output_file:
output_file.write(attach[0])
elif (action == '3'): #Show Sent
conn.execute("SELECT * from SENT where UID1 = ? and DELTAG = ?", (userName, 0))
allmsg = conn.fetchall()
for msg in allmsg:
print 'num: {0}, To: {1}, Subject: {2}, Body: {3}, On: {4}, '.format(msg[0], msg[5] ,msg[1], msg[3], msg[2])
print "\n"
elif (action == '4'): #Show Trash
conn.execute("SELECT * from TRASH where UID1 = ? and DELTAG = 1", (userName,))
allmsg = conn.fetchall()
for msg in allmsg:
print 'num: {0}, To: {1}, Subject: {2}, Body: {3}, On: {4}, '.format(msg[0], msg[5] ,msg[1], msg[3], msg[2])
print "\n"
elif (action == '5'): #Delete msg from inbox
conn.execute("SELECT * from MESSAGE where UID2 = ?", (userName,)) #delete from inbox
delmsg = raw_input("please enter the msg number: ")
conn.execute("UPDATE MESSAGE SET DELTAG = ? WHERE MKEY = ?", (1, delmsg))
DB.commit()
elif (action == '6'): #logout
try:
conn.execute("UPDATE USER SET Login = ? WHERE userName = ?", (0, userName))
DB.commit()
print "Successfully Logged Out!\n"
break
#LoggedUser = ''
except Exception as e:
DB.rollback()
print "Try again! --Logout error"
continue
def updateAttachTable(userName, lastrowid):
attachment = raw_input("Do you want to attach a file? Y/N\n")
while (attachment == 'y' or attachment == 'Y'):
path = raw_input("Enter file path: ")
name = os.path.split(path)
with open(path, "rb") as att:
ablob = att.read()
conn.execute("INSERT INTO ATTACHMENT (FileName, FileContent, MsgKEY) VALUES(?, ?, ?)",\
(name[1], sqlite3.Binary(ablob),lastrowid))
DB.commit()
attachment = raw_input("Do you want to attach another file? Y/N\n")
firstActions()
DB.close()
|
__author__ = 'samue'
|
import pandas as pd
df1 = pd.read_csv("train.csv")
df1 = df1.drop(df1.columns[0], axis=1)
df2 = pd.read_csv("245_1.csv")
df1 = df1.append(df2)
df1.to_csv('train.csv', index=False)
|
n1 = float(input('Digite o primeiro número: '))
n2 = float(input('Digite o segundo: '))
print (n1 + n2)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2019-05-20 17:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quicklook', '0013_auto_20190508_0757'),
]
operations = [
migrations.AlterField(
model_name='food',
name='list_of_pants_consumed_ql',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='food',
name='no_plants_consumed_ql',
field=models.CharField(blank=True, max_length=5, null=True),
),
]
|
__author__ = "Narwhale"
import unittest
from employee import Employee
class TestEmployee(unittest.TestCase):
'''测试employee.py文件'''
def setUp(self):
'''创建姓名及工资,供使用测试方法使用'''
self.eric = Employee('eric', 'matthes', 65000)
def test_give_default(self):
'''测试默认年薪增加'''
self.eric.give_raise()
self.assertEqual(self.eric.salary,70000)
def test_give_custom_raise(self):
'''测试其他年薪增加'''
self.eric.give_raise(10000)
self.assertEqual(self.eric.salary,75000)
|
from __future__ import division
import autograd.numpy as np
from autograd import grad
from operator import itemgetter
from svae.util import monad_runner, interleave, uninterleave
from svae.lds.gaussian import sample, predict, condition_on
from svae.lds.gaussian import natural_sample, \
natural_condition_on, natural_rts_backward_step
from svae.lds.gaussian_nochol import natural_predict, natural_lognorm
from svae.lds.lds_inference import _repeat_param, natural_filter_forward_general, \
natural_sample_backward_general
def _unpack_repeated(natparam, T=None):
'handle homogeneous models by repeating natural parameters if necessary'
init_params, pair_params, node_params = natparam
T = len(node_params) if T is None else T
return init_params, _repeat_param(pair_params, T-1), _repeat_param(node_params, T)
### inference using standard parameters
def filter_forward(data, mu_init, sigma_init, A, sigma_states, C, sigma_obs):
def observe(y):
def update_belief(mu, sigma):
mu_pred, sigma_pred = predict(mu, sigma, A, sigma_states)
(mu_filt, sigma_filt), ll = condition_on(mu_pred, sigma_pred, C, y, sigma_obs)
return (mu_filt, sigma_filt), ll
return update_belief
def unit(mu, sigma):
return ([mu], [sigma]), 0.
def bind(result, step):
(mus, sigmas), lognorm = result
(mu, sigma), term = step(mus[-1], sigmas[-1])
return (mus + [mu], sigmas + [sigma]), lognorm + term
(mu_filt, sigma_filt), ll = condition_on(mu_init, sigma_init, C, data[0], sigma_obs)
(filtered_mus, filtered_sigmas), loglike = \
monad_runner(bind)(unit(mu_filt, sigma_filt), map(observe, data[1:]))
return (filtered_mus, filtered_sigmas), loglike + ll
def sample_backward(filtered_mus, filtered_sigmas, A, sigma_states):
def filtered_sampler(mu_filt, sigma_filt):
def sample_cond(next_state):
(mu_cond, sigma_cond), _ = condition_on(
mu_filt, sigma_filt, A, next_state, sigma_states)
return sample(mu_cond, sigma_cond)
return sample_cond
def unit(sample):
return [sample]
def bind(result, step):
samples = result
sample = step(samples[0])
return [sample] + samples
last_sample = sample(filtered_mus[-1], filtered_sigmas[-1])
steps = reversed(map(filtered_sampler, filtered_mus[:-1], filtered_sigmas[:-1]))
samples = monad_runner(bind)(unit(last_sample), steps)
return np.array(samples)
def sample_lds(data, mu_init, sigma_init, A, sigma_states, C, sigma_obs):
(filtered_mus, filtered_sigmas), loglike = filter_forward(
data, mu_init, sigma_init, A, sigma_states, C, sigma_obs)
sampled_states = sample_backward(
filtered_mus, filtered_sigmas, A, sigma_states)
return sampled_states, loglike
### inference using info parameters and linear node potentials
def natural_filter_forward(natparam, data):
T, p = data.shape
init_params, pair_params, node_params = _unpack_repeated(natparam, T)
def unit(J, h):
return [(J, h)], 0.
def bind(result, step):
messages, lognorm = result
new_message, term = step(messages[-1])
return messages + [new_message], lognorm + term
condition = lambda node_param, y: lambda (J, h): natural_condition_on(J, h, y, *node_param)
predict = lambda pair_param: lambda (J, h): natural_predict(J, h, *pair_param)
steps = interleave(map(condition, node_params, data), map(predict, pair_params))
J_init, h_init, logZ_init = init_params
messages, lognorm = monad_runner(bind)(unit(J_init, h_init), steps)
lognorm += natural_lognorm(*messages[-1]) + logZ_init
return messages, lognorm - T*p/2*np.log(2*np.pi)
def natural_smooth_backward(forward_messages, natparam):
prediction_messages, filter_messages = uninterleave(forward_messages)
init_params, pair_params, node_params = _unpack_repeated(natparam)
pair_params = map(itemgetter(0, 1, 2), pair_params)
unit = lambda (J, h): [(J, h)]
bind = lambda result, step: [step(result[0])] + result
rts = lambda next_prediction, filtered, pair_param: lambda next_smoothed: \
natural_rts_backward_step(next_smoothed, next_prediction, filtered, pair_param)
steps = map(rts, prediction_messages[1:], filter_messages, pair_params)
return map(itemgetter(2), monad_runner(bind)(unit(filter_messages[-1]), steps))
def natural_lds_Estep(natparam, data):
log_normalizer = lambda natparam: natural_filter_forward(natparam, data)[1]
return grad(log_normalizer)(natparam)
def natural_lds_inference(natparam, data):
saved = lambda: None
def lds_log_normalizer(natparam):
saved.forward_messages, saved.lognorm = natural_filter_forward(natparam, data)
return saved.lognorm
expected_stats = grad(lds_log_normalizer)(natparam)
sample = natural_sample_backward(saved.forward_messages, natparam)
return sample, expected_stats, saved.lognorm
def natural_sample_backward(forward_messages, natparam):
_, filter_messages = uninterleave(forward_messages)
_, pair_params, _ = _unpack_repeated(natparam, len(filter_messages))
pair_params = map(itemgetter(0, 1), pair_params)
unit = lambda sample: [sample]
bind = lambda result, step: [step(result[0])] + result
sample = lambda (J11, J12), (J_filt, h_filt): lambda next_sample: \
natural_sample(*natural_condition_on(J_filt, h_filt, next_sample, J11, J12))
steps = reversed(map(sample, pair_params, filter_messages[:-1]))
last_sample = natural_sample(*filter_messages[-1])
samples = monad_runner(bind)(unit(last_sample), steps)
return np.array(samples)
def natural_lds_sample(natparam, data):
forward_messages, lognorm = natural_filter_forward(natparam, data)
sample = natural_sample_backward(forward_messages, natparam)
return sample
### slightly less efficient method for testing against main method
def natural_lds_inference_general_nosaving(natparam, node_params):
init_params, pair_params = natparam
def lds_log_normalizer(all_natparams):
init_params, pair_params, node_params = all_natparams
forward_messages, lognorm = natural_filter_forward_general(init_params, pair_params, node_params)
return lognorm
all_natparams = init_params, pair_params, node_params
expected_stats = grad(lds_log_normalizer)(all_natparams)
forward_messages, lognorm = natural_filter_forward_general(init_params, pair_params, node_params)
sample = natural_sample_backward_general(forward_messages, pair_params)
return sample, expected_stats, lognorm
|
from common.run_method import RunMethod
import allure
@allure.step("极客数学帮(家长APP)/用户管理/删除用户设备绑定关系")
def pushRelationship_delete(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/删除用户设备绑定关系"
url = f"/service-profile/pushRelationship"
res = RunMethod.run_request("DELETE", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户管理/新增用户设备绑定关系")
def pushRelationship_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户管理/新增用户设备绑定关系"
url = f"/service-profile/pushRelationship"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
'''
Copyright 2012 Will Rogers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author Will Rogers
'''
from apel.db.records import Record, InvalidRecordException
from datetime import datetime, timedelta
import logging
# get the relevant logger
log = logging.getLogger(__name__)
class StorageRecord(Record):
'''
Class to represent one storage record.
It knows about the structure of the MySQL table and the message format.
It stores its information in a dictionary self._record_content. The keys
are in the same format as in the messages, and are case-sensitive.
'''
MANDATORY_FIELDS = ["RecordId", "CreateTime", "StorageSystem",
"StartTime", "EndTime",
"ResourceCapacityUsed"]
# This list specifies the information that goes in the database.
DB_FIELDS = ["RecordId", "CreateTime", "StorageSystem", "Site", "StorageShare",
"StorageMedia", "StorageClass", "FileCount", "DirectoryPath",
"LocalUser", "LocalGroup", "UserIdentity",
"Group", "StartTime", "EndTime",
"ResourceCapacityUsed", "LogicalCapacityUsed",
"ResourceCapacityAllocated"]
ALL_FIELDS = DB_FIELDS
def __init__(self):
'''Provide the necessary lists containing message information.'''
Record.__init__(self)
# Fields which are required by the message format.
self._mandatory_fields = StorageRecord.MANDATORY_FIELDS
# This list specifies the information that goes in the database.
self._db_fields = StorageRecord.DB_FIELDS
# Fields which are accepted but currently ignored.
self._ignored_fields = []
self._all_fields = self._db_fields
self._datetime_fields = ["CreateTime", "StartTime", "EndTime"]
# Fields which will have an integer stored in them
self._int_fields = ["FileCount", "ResourceCapacityUsed", "LogicalCapacityUsed", "ResourceCapacityAllocated"]
def get_apel_db_insert(self, apel_db, source):
'''
Returns record content as a tuple, appending the source of the record
(i.e. the sender's DN). Also returns the appropriate stored procedure.
We have to go back to the apel_db object to find the stored procedure.
This is because only this object knows what type of record it is,
and only the apel_db knows what the procedure details are.
'''
values = self.get_db_tuple(self, source)
return values
def get_db_tuple(self, source):
'''
Last item in list is not usable for us.
'''
return Record.get_db_tuple(self, source)[:-1]
|
from PyQt4.QtGui import QColor, QImage
from images.image_converter import ImageConverter
#from utils.logging import klog
import math
import time
class ImageComparator(object):
def __init__(self, image):
self.image = image
def get_motion_vectors(self, image2, searcher, MAD_threshold = None):
"""
1) Divide self.image into blocks of 8x8 pixels
2) for each block:
4) get the X and Y
5) search block in image2 from X and Y, moving from 0 to P pixel right, left, top, bottom
6) block found?
7) if yes, get the new X and Y
8) if no, return 0
"""
if isinstance(self.image, QImage):
image1 = ImageConverter.qtimage_to_pil_image(self.image)
else:
image1 = self.image
images1_pixels = image1.load()
if isinstance(image2, QImage):
image2 = ImageConverter.qtimage_to_pil_image(image2)
images2_pixels = image2.load()
width = image1.size[0]
height = image1.size[1]
vectors = []
for block_x_num in range(0, width/searcher.block_size):
block_x_pos = searcher.block_size*block_x_num
for block_y_num in range(0, height/searcher.block_size):
block_y_pos = searcher.block_size*block_y_num
(new_x, new_y, MAD, MAD_checks_count) = searcher.search(images1_pixels, block_x_pos, block_y_pos, images2_pixels)
valid_vector = True
if MAD_threshold and MAD > MAD_threshold:
#Discard the vector if the MAD is over ranged
valid_vector = False
if valid_vector:
#if (block_x_pos != new_x) or (block_y_pos != new_y):
vector = { 'x': block_x_pos, 'y': block_y_pos, 'to_x' : new_x, 'to_y': new_y, 'MAD': MAD, 'MAD_checks_count': MAD_checks_count}
vectors.append(vector)
return vectors
@classmethod
def calculate_MAD_v2(cls, image1_pixels, image2_pixels):
sum_MAD = 0.0
pixels_count = len(image1_pixels)
for p in range(pixels_count):
pixel_1 = image1_pixels[p]
pixel_2 = image2_pixels[p]
if isinstance(pixel_1, tuple):
luminance_1 = pixel_1[0] #is already in luminance mode (red=green=blue)
luminance_2 = pixel_2[0] #already in luminance mode (red=green=blue)
else:
luminance_1 = pixel_1
luminance_2 = pixel_2
sum_MAD += abs( luminance_1-luminance_2 )
return sum_MAD/pixels_count
@classmethod
def calculate_MAD(cls, image1_pixels, image2_pixels, width, height):
sum_MAD = 0.0
for x in range(1, width):
for y in range(1, height):
luminance_1 = image1_pixels[x,y][0] #is already in luminance mode (red=green=blue)
luminance_2 = image2_pixels[x,y][0] #already in luminance mode (red=green=blue)
sum_MAD += abs( luminance_1-luminance_2 )
return sum_MAD/(width*height)
@classmethod
def calculate_PSNR(cls, image1, image2, width, height):
image1_pixels = image1.load()
image2_pixels = image2.load()
MSE = 0
MAX = 255
for x in range(1, width):
for y in range(1, height):
red_1 = image1_pixels[x,y][0]
red_2 = image2_pixels[x,y][0]
delta_red = math.pow(red_1-red_2, 2)
green_1 = image1_pixels[x,y][1]
green_2 = image2_pixels[x,y][1]
delta_green = math.pow(green_1-green_2, 2)
blue_1 = image1_pixels[x,y][2]
blue_2 = image2_pixels[x,y][2]
delta_blue = math.pow(blue_1-blue_2, 2)
MSE += delta_red + delta_green +delta_blue
MSE /= (width*height*3)
if MSE != 0:
PSNR = 10* math.log( math.pow(MAX, 2)/MSE, 10)
else:
#Perfect Image, avoid division by zero
PSNR = 100000
return PSNR
@classmethod
def is_valid_coordinate(cls, x, y, block_size, pixels):
try:
ok1 = pixels[x, y]
ok2 = pixels[x+block_size-1, y+block_size-1]
return True
except Exception, ex:
return False
@classmethod
def is_valid_x_coordinate(cls, x, block_size, image):
return x >=0 and x+block_size <= image.size[0]
@classmethod
def is_valid_y_coordinate(cls, y, block_size, image):
return y >=0 and y+block_size <= image.size[1]
@classmethod
def longest_motion_vector(cls, motion_vectors):
longest_vector = {}
max_distance = 0
for vector in motion_vectors:
distance = math.sqrt( math.pow(vector['x']-vector['to_x'], 2) + math.pow(vector['y']-vector['to_y'], 2) )
if distance > max_distance:
max_distance = distance
longest_vector = vector
return (longest_vector, max_distance)
|
from rest_framework import serializers
from .models import Dictribution, Film, Activity, Message
class DictributionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Dictribution
fields = '__all__'
class FilmSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Film
fields = ('film_name', 'url', 'id')
class ActivitySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Activity
fields = '__all__'
class MessageSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Message
fields = '__all__'
|
from twilio.rest import Client
account_sid = 'AC534ccef182c5e4b4efbbc315a44bbed3'
auth_token = 'e505be28ef55d8fa15f158e6af95774b'
client = Client(account_sid, auth_token)
message = client.messages.create(
to="+17743137029",
from_="+16176525131",
body="This is an automated message"
)
print(message.sid)
|
from core import web, view
from aiohttp.web import Response
class UserController:
@web.get('/login')
@view.json
def login(self, request):
return Response(body=b'Fack the system')
@web.get('/blog/{id}')
@view.json
def blog(self, id):
return Response(body=b'{}'.format(id))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# django
from django.contrib import admin
# 3rd-party
from eav.admin import BaseEntityAdmin, BaseSchemaAdmin
# this app
from models import Product,Schema,Choice,Category,Filter,FilterValue,Customer,Order,TopMenu,Cart,Item,MyOrder,OrderProduct,Sort
from forms import ProductForm
@admin.register(Product)
class ProductAdmin(BaseEntityAdmin):
form = ProductForm
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
list_display = ['first_name', 'second_name']
@admin.register(TopMenu)
class TopMenuAdmin(admin.ModelAdmin):
list_display = ['name', 'url']
@admin.register(Filter)
class FilterAdmin(admin.ModelAdmin):
pass
@admin.register(FilterValue)
class FilterValueAdmin(admin.ModelAdmin):
list_display = ['filter', 'title', 'value']
@admin.register(Sort)
class SortAdmin(admin.ModelAdmin):
list_display = ['title', 'value']
admin.site.register(Schema, BaseSchemaAdmin)
admin.site.register(Choice)
|
str = 'programming'
print("str = ", str)
# first character accessing
print("str[0 = ", str[0])
# print last character
print("str[-1) = ", str[-1])
# slicing 2nd to 5th character
print("str[1:5) = ", str[1:5])
# slicing 6th to 2nd last character
print("str[5:-2) = ", str[5:-2])
# String operators
str1 = "Hello"
str2 = "World"
# concatenation using +
print("str1 + str2 = ", str1 + str2)
# concatenation using *
print("str1 * 3 = ", str1 * 3)
# concatenate in multiple lines with parenthesis
string2 = ("Hello"
" world")
print(string2)
# Count the occurences of a letter in a string with a for loop
count = 0
for letter in "Hello World":
if(letter == 'l'):
count += 1
print(count, " letters 'l' found")
# triple quote
str = """He said, what's there?"""
print(str)
# escaping single quotes
str = "He said, what\'s there?"
print(str)
# escaping double quotes
str = "He said \"what's there?\""
print(str)
# examples
print("C:\\Python\\Lib")
print("This is printed \nin two lines")
print("This is \x48 \x45 \x58 representation")
# ignoring escape sequency with raw string
print("This is \x61 \ngood example")
print(r"This is a \x61 \ngood example")
# Formatting strings with Format() method
print("Binary representation of {0} is {0:b}".format(12))
# formating floats
print("Exponential representation of {0} is {0:e}".format(1566.345))
# round off
print("One third is {0:.3f}".format(1/3))
# String alignement
print("|{:<10}|{:^10}|{:>10}|".format("butter", "bread", "ham"))
# old printing
x = 12.3456789
print("The value of x is %3.4f" %x)
# Python string methods
print("PrOGrAMMinG".lower())
print("ProGRAmminG".upper())
str = "This is a phrase"
# Split a word into a list
listStr = str.split()
print(listStr)
# Join a list words into a String
print(" ".join(listStr))
str = "Happy birthday"
#replace a word of a string
print(str.replace("birthday", "halloween"))
|
import unittest
import coc_package
class TestAddFunction(unittest.TestCase):
def test_add_for_ints(self):
self.assertEqual(coc_package.add(3, 5), 3 + 5)
def test_add_error(self):
with self.assertRaises(AttributeError):
coc_package.add(3, "5")
if __name__ == '__main__':
unittest.main()
|
from django.db import models
from workprogramsapp.expertise.models import Expertise, ExpertiseComments
from workprogramsapp.models import Topic, WorkProgram
from django.conf import settings
class UserNotification(models.Model):
"""
Базовый класс нотификаций
"""
status_choices = (
('read', 'read'),
('unread', 'unread'),
)
status = models.CharField(max_length=30, choices=status_choices, verbose_name='Статус нотификации',
default='unread')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, blank=True, null=True)
message = models.CharField(max_length=4096, verbose_name="Сообщение нотификации", blank=True, null=True)
notification_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
class Meta:
ordering = ('-notification_date',)
class ExpertiseNotification(UserNotification):
"""
Нотификации об экспертизе
"""
expertise = models.ForeignKey(Expertise, on_delete=models.CASCADE, blank=True, null=True)
class NotificationComments(UserNotification):
comment_new = models.ForeignKey(ExpertiseComments, on_delete=models.CASCADE, blank=True, null=True)
|
import NeuralNetwork
import Neuron
import Loader
def main():
Neuron.eta = 0.09
Neuron.alpha = 0.015
topology = []
topology.append(1)
topology.append(2)
topology.append(1)
net = NeuralNetwork.Network(topology)
err = 0
for i in range(1):
with open("dataWithTeacher.txt", 'r') as input:
line = input.readline()
while line != "EOF":
data = Loader.loadDataWithTeacher(line)
net.setInput([data.input])
net.dataProcess()
net.backPropagate([data.output])
err = net.calculateError([data.output])
line = input.readline()
print("Error: ", err)
print("Calculating...")
fails=0
attempts = 0
with open("dataWithoutTeacher.txt", 'r') as input:
line = input.readline()
while line != "EOF":
attempts += 1
data = float(line)
net.setInput([data])
net.dataProcess()
line = input.readline()
result = net.getResults()[0]
if data<0 and abs(result-1)<abs(result-0):
fails+=1
else:
if data>0 and abs(result-1)>abs(result-0):
fails+=1
print("% of Success: ",(attempts-fails)/attempts)
if __name__ == '__main__':
main()
|
for i in range(3):
n = int(input())
count = 1
curr = n
while curr > 3:
curr = curr // 3
count += 1
print(count)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-01 19:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='client',
name='note',
field=models.TextField(blank=True, max_length=400, null=True),
),
]
|
from urllib.parse import urlparse
url = '{{ .VARIABLE }}'
u = urlparse(url)
try:
print('Scheme: ' + u.scheme)
print('netloc: ' + u.netloc)
print('path: ' + u.path)
print('params: ' + u.params)
print('query: ' + u.query)
print('fragment: ' + u.fragment)
print('username: ' + str(u.username))
print('password: ' + str(u.password))
print('hostname: ' + str(u.hostname))
print('port: ' + str(u.port))
except:
import traceback
print(traceback.print_exc())
|
import os
os.system('docker run --rm -it -v {}:/app jmengxy/util bash'.format(os.getcwd()))
|
from prereise.cli.data_sources import get_data_sources_list
from prereise.cli.data_sources.solar_data import (
SolarDataGriddedAtmospheric,
SolarDataNationalSolarRadiationDatabase,
)
from prereise.cli.data_sources.wind_data import WindDataRapidRefresh
def test_get_data_sources_list():
data_sources_list = get_data_sources_list()
assert isinstance(data_sources_list[0], WindDataRapidRefresh)
assert isinstance(data_sources_list[1], SolarDataGriddedAtmospheric)
assert isinstance(data_sources_list[2], SolarDataNationalSolarRadiationDatabase)
|
import argparse
import os
import opts.ref as ref
class Opts:
def __init__(self):
self.parser = argparse.ArgumentParser()
def init(self):
self.parser.add_argument('-expID', default='default', help='Experiment ID')
self.parser.add_argument('-DEBUG', type=int, default=0, help='Debug')
self.parser.add_argument('-data', default='default', help='Input data file')
def parse(self):
self.init()
self.opt = self.parser.parse_args()
self.opt.nThreads = ref.nThreads
self.opt.saveDir = os.path.join(ref.expDir, self.opt.expID)
if self.opt.DEBUG > 0:
ref.nThreads = 1
args = dict((name, getattr(self.opt, name)) for name in dir(self.opt)
if not name.startswith('_'))
refs = dict((name, getattr(ref, name)) for name in dir(ref)
if not name.startswith('_'))
if not os.path.exists(self.opt.saveDir):
os.makedirs(self.opt.saveDir)
file_name = os.path.join(self.opt.saveDir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> Args:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
opt_file.write('==> Args:\n')
for k, v in sorted(refs.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
return self.opt
|
"""
孙竹鸿
"""
from flask import Blueprint
szh = Blueprint('szh',__name__)
from .views import *
|
from django.shortcuts import render, render_to_response
from login_and_reg.forms import userCreationForm
from login_and_reg.forms import queryForm
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.core.context_processors import csrf, request
from django.contrib import auth
def login(request):
c = {}
c.update(csrf(request)) #csrf token to improve security
return render_to_response('login.html', c)
def auth_view(request):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/login_and_reg/loggedin/')
else:
return HttpResponseRedirect('/login_and_reg/invalid_login/')
def loggedin(request):
return render_to_response('loggedin.html',
{'full_name' : request.user.username}
)
def invalid_login(request):
return render_to_response('invalid_login.html')
def logout(request):
auth.logout(request)
return render_to_response('logout.html')
#####################REGISTER###############
def register(request):
if request.user.is_authenticated():
return render_to_response('loggedin.html', {'has_account': True}) #needs to be changed - already logged in
if request.method=='POST':
form = userCreationForm(data=request.POST)
if form.is_valid():
form.save()
return render_to_response('registered.html', {'has_account': True}) #needs o be changed - if everything is ok - reg user
else:
form=userCreationForm()
return render(request, "forms.html", {"form": form, "headline":"register"})
def query(request):
# if not request.user.is_authenticated():
# return render_to_response('autherror.html')
if request.method=='POST':
form=queryForm(data=request.POST)
if form.is_valid():
form.generateQuery()
else:
form=queryForm()
c={"form": form, "headline": 'Query' }
return render(request, "forms.html",c )
# Create your views here.
def help_page(request):
return render_to_response('help_page.html')
# Create your views here.
|
from ts3.query import TS3Connection, TS3QueryError
import logging
from waitlist.utility import config
from waitlist.utility.settings import sget_active_ts_id
from waitlist.storage.database import TeamspeakDatum
from waitlist.base import db
from time import sleep
logger = logging.getLogger(__name__)
def make_connection():
if config.disable_teamspeak:
return None
teamspeak_id = sget_active_ts_id()
if teamspeak_id is None:
return None
teamspeak = db.session.query(TeamspeakDatum).get(teamspeak_id)
try:
con = TS3Connection(teamspeak.host, teamspeak.port)
con.login(client_login_name=teamspeak.queryName, client_login_password=teamspeak.queryPassword)
con.use(sid=teamspeak.serverID)
try:
con.clientupdate(CLIENT_NICKNAME=teamspeak.clientName)
except TS3QueryError as ex:
# this means we already have the right name
# newer versions of ts server name without ip
pass
try:
con.clientmove(cid=teamspeak.channelID, clid=0)
except TS3QueryError as ex:
if ex.resp.error['msg'] == "already member of channel":
pass
else:
logger.error("Failed to connect to T3Query %s", ex.resp.error['msg'])
con = None
except TS3QueryError as ex:
logger.error("Failed to connect to T3Query %s", ex.resp.error['msg'])
con = None
except Exception as ex:
logger.error("Failed to connect to T3Query %s", ex)
con = None
return con
conn = make_connection()
def change_connection():
if config.disable_teamspeak:
return
global conn
if conn is not None:
conn.quit()
conn = make_connection()
def handle_dc(func, **kwargs):
if config.disable_teamspeak:
return
def func_wrapper(*argsw, **kwargsw):
global conn
if conn is not None:
try:
func(*argsw, **kwargsw)
except TS3QueryError as error:
logger.error("TS3 Query Error: %s", str(error))
except Exception:
ncon = make_connection()
if ncon is None:
sleep(2)
ncon = make_connection()
if ncon is not None:
conn = ncon
else:
conn = ncon
func(*argsw, **kwargs)
else:
conn = make_connection()
return func_wrapper
@handle_dc
def send_poke(name, msg):
if config.disable_teamspeak:
return
global conn
try:
response = conn.clientfind(pattern=name)
except TS3QueryError as er:
logger.info("TS3 ClientFind failed on %s with %s", name, str(er))
response = []
found = False
for resp in response:
if resp['client_nickname'] == name:
conn.clientpoke(clid=resp['clid'], msg=msg)
found = True
# deaf people put a * in front
if not found:
try:
response = conn.clientfind(pattern="*"+name)
except TS3QueryError as er:
logger.info("TS3 ClientFind failed on %s with %s", "*"+name, str(er))
return
for resp in response:
if resp['client_nickname'] == "*"+name:
conn.clientpoke(msg=msg, clid=resp['clid'])
@handle_dc
def move_to_safety_channel(name: str, channel_id: int) -> None:
if config.disable_teamspeak:
return
try:
response = conn.clientfind(pattern=name)
except TS3QueryError as er:
logger.info("TS3 ClientFind failed on %s with %s", name, str(er))
response = []
client = None
for resp in response:
if resp['client_nickname'] == name:
client = resp
if client is None:
try:
response = conn.clientfind(pattern="*"+name)
except TS3QueryError as er:
logger.info("TS3 ClientFind failed on %s with %s", "*"+name, str(er))
return
for resp in response:
if resp['client_nickname'] == "*"+name:
client = resp
if client is None: # we didn't find a user
return
conn.clientmove(clid=client['clid'], cid=channel_id)
return
|
# 单调栈,但是怎么维护真没想出来
class Solution:
def totalSteps(self, nums: List[int]) -> int:
stack = []
N = len(nums)
ans = 0
for i in range(N):
t = 1
while stack and nums[stack[-1][0]] <= nums[i]:
_, pt = stack.pop()
t = max(t, pt + 1)
if stack:
ans = max(ans, t)
stack.append((i,t))
return ans
|
#!/usr/bin/env python3
#
# Copyright (C) 2020 Cambridge Astronomical Survey Unit
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
import subprocess
import pytest
import ifu.workflow
import mos.workflow.mos_stage1
@pytest.fixture(scope='module')
def mos_field_template(master_cat, tmpdir_factory):
file_path = str(
tmpdir_factory.mktemp('aux').join('mos_field_template.fits'))
mos.workflow.mos_stage1.create_mos_field_template(master_cat, file_path)
assert os.path.exists(file_path)
return file_path
def test_fitscheck_mos_field_template(mos_field_template):
returncode = subprocess.call(['fitscheck', mos_field_template])
assert returncode == 0
def test_fitsdiff_mos_field_template(mos_field_template,
pkg_mos_field_template):
returncode = subprocess.call([
'fitsdiff', '-k', 'CHECKSUM,DATASUM', mos_field_template,
pkg_mos_field_template
])
assert returncode == 0
@pytest.fixture(scope='module')
def mos_field_cat(mos_field_template, tmpdir_factory):
file_path = str(tmpdir_factory.mktemp('output').join('mos_field_cat.fits'))
data_dict = mos.workflow.mos_stage1._get_data_dict_for_example()
trimester, author, report_verbosity, cc_report = \
mos.workflow.mos_stage1._set_keywords_info_for_example()
mos.workflow.mos_stage1.create_mos_field_cat(
mos_field_template,
data_dict,
file_path,
trimester,
author,
report_verbosity=report_verbosity,
cc_report=cc_report)
assert os.path.exists(file_path)
return file_path
def test_fitscheck_mos_field_cat(mos_field_cat):
returncode = subprocess.call(['fitscheck', mos_field_cat])
assert returncode == 0
def test_fitsdiff_mos_field_cat(mos_field_cat, pkg_mos_field_cat):
returncode = subprocess.call([
'fitsdiff', '-k', 'CHECKSUM,DATASUM,DATETIME', mos_field_cat,
pkg_mos_field_cat
])
assert returncode == 0
|
import json
import unittest
import responses
import pyyoutube
class ApiVideoCategoryTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/categories/"
BASE_URL = "https://www.googleapis.com/youtube/v3/videoCategories"
with open(BASE_PATH + "video_category_single.json", "rb") as f:
VIDEO_CATEGORY_SINGLE = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_category_multi.json", "rb") as f:
VIDEO_CATEGORY_MULTI = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "video_category_by_region.json", "rb") as f:
VIDEO_CATEGORY_BY_REGION = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api = pyyoutube.Api(api_key="api key")
def testGetVideoCategories(self) -> None:
# test params
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api.get_video_categories()
# test parts
with self.assertRaises(pyyoutube.PyYouTubeException):
self.api.get_video_categories(category_id="id", parts="id,not_part")
with responses.RequestsMock() as m:
m.add("GET", self.BASE_URL, json=self.VIDEO_CATEGORY_SINGLE)
m.add("GET", self.BASE_URL, json=self.VIDEO_CATEGORY_MULTI)
m.add("GET", self.BASE_URL, json=self.VIDEO_CATEGORY_BY_REGION)
res_by_single = self.api.get_video_categories(
category_id="17",
parts=["snippet"],
return_json=True,
)
self.assertEqual(res_by_single["kind"], "youtube#videoCategoryListResponse")
self.assertEqual(len(res_by_single["items"]), 1)
self.assertEqual(res_by_single["items"][0]["id"], "17")
res_by_multi = self.api.get_video_categories(
category_id=["17", "18"],
parts="snippet",
)
self.assertEqual(len(res_by_multi.items), 2)
self.assertEqual(res_by_multi.items[1].id, "18")
res_by_region = self.api.get_video_categories(
region_code="US",
parts="snippet",
)
self.assertEqual(len(res_by_region.items), 32)
self.assertEqual(res_by_region.items[0].id, "1")
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('baxter_rr_bridge')
import rospy
import baxter_interface
from sensor_msgs.msg import PointCloud
from sensor_msgs.msg import Imu
from std_msgs.msg import UInt16
from std_msgs.msg import Empty
from baxter_core_msgs.msg import SEAJointState
import tf
import sys, argparse
import struct
import time
from collections import OrderedDict
import RobotRaconteur as RR
import thread
import threading
import numpy
baxter_servicedef="""
#Service to provide simple interface to Baxter
service BaxterPeripheral_interface
option version 0.4
struct NavigatorState
field uint8 ok_button
field uint8 cancel_button
field uint8 show_button
field uint8 scroll_wheel
field uint8 inner_led
field uint8 outer_led
end struct
struct SonarPointCloud
field single[] sensors
field single[] distances
field single[] points
end struct
struct framePose
field double[] position
field double[] quaternion
end struct
object BaxterPeripherals
function void openGripper(string gripper)
function void closeGripper(string gripper)
function void calibrateGripper(string gripper)
function void setGripperPosition(string gripper, double position)
function void setGripperVelocity(string gripper, double velocity)
function void setGripperHoldForce(string gripper, double force)
function void setGripperMoveForce(string gripper, double force)
function void setGripperDeadband(string gripper, double deadband)
function single getGripperPosition(string gripper)
function single getGripperForce(string gripper)
function single getRangerValue(string arm)
function single getAccelerometerValue(string arm)
function void panHead(double angle)
function single getHeadPanAngle()
function void nodHead()
function void enableSonar()
function void disableSonar()
property SonarPointCloud sonar_pointcloud
function void suppressBodyAvoidance(string limb, uint8 suppress)
function void suppressCollisionAvoidance(string limb, uint8 suppress)
function void suppressContactSafety(string limb, uint8 suppress)
function void suppressCuffInteraction(string limb, uint8 suppress)
function void suppressGravityCompensation(string limb, uint8 suppress)
property double[] gravity_compensation_torques
function NavigatorState getNavigatorState(string navigator)
function void setNavigatorLEDs(string navigator, uint8 inner_led, uint8 outer_led)
function framePose lookUptransforms(string target_frame, string source_frame)
end object
"""
class BaxterPeripherals_impl(object):
def __init__(self):
print "Initializing Node"
rospy.init_node('baxter_peripherals')
self._running = True
self._valid_limb_names = {'left': 'left',
'l': 'left',
'right': 'right',
'r': 'right'}
# gripper initialization
self._grippers = {'left': baxter_interface.Gripper('left'),
'right': baxter_interface.Gripper('right')}
# Set grippers to defaults
self._grippers['left'].set_parameters(
self._grippers['left'].valid_parameters())
self._grippers['right'].set_parameters(
self._grippers['right'].valid_parameters())
# ranger initialization
self._rangers = {'left': baxter_interface.AnalogIO('left_hand_range'),
'right': baxter_interface.AnalogIO('right_hand_range')}
# accelerometer initialization
self._accelerometers = {'left': [0.0]*3, 'right': [0.0]*3}
rospy.Subscriber("/robot/accelerometer/left_accelerometer/state",
Imu,
self.left_accel_callback)
rospy.Subscriber("/robot/accelerometer/right_accelerometer/state",
Imu,
self.right_accel_callback)
# head control initialization
self._head = baxter_interface.Head()
# sonar initialization
self._sonar_pointcloud = RR.RobotRaconteurNode.s.NewStructure(
'BaxterPeripheral_interface.SonarPointCloud' )
self._sonar_state_sub = rospy.Subscriber(
"/robot/sonar/head_sonar/state",
PointCloud,
self.sonar_callback)
self._sonar_enable_pub = rospy.Publisher(
"/robot/sonar/head_sonar/set_sonars_enabled",
UInt16,
latch=True)
# initially all sonar sensors on
self._sonar_enabled = True
self._sonar_enable_pub.publish(4095)
# suppressions
self._suppress_body_avoidance = {'left': False, 'right': False}
self._supp_body_avoid_pubs = {'left':
rospy.Publisher("/robot/limb/left/suppress_body_avoidance",
Empty,
latch=True),
'right':
rospy.Publisher("/robot/limb/right/suppress_body_avoidance",
Empty,
latch=True)}
self._suppress_collision_avoidance = {'left': False, 'right': False}
self._supp_coll_avoid_pubs = {'left':
rospy.Publisher("/robot/limb/left/suppress_collision_avoidance",
Empty,
latch=True),
'right':
rospy.Publisher("/robot/limb/right/suppress_collision_avoidance",
Empty,
latch=True)}
self._suppress_contact_safety = {'left': False, 'right': False}
self._supp_con_safety_pubs = {'left':
rospy.Publisher("/robot/limb/left/suppress_contact_safety",
Empty,
latch=True),
'right':
rospy.Publisher("/robot/limb/right/suppress_contact_safety",
Empty,
latch=True)}
self._suppress_cuff_interaction = {'left': False, 'right': False}
self._supp_cuff_int_pubs = {'left':
rospy.Publisher("/robot/limb/left/suppress_cuff_interaction",
Empty,
latch=True),
'right':
rospy.Publisher("/robot/limb/right/suppress_cuff_interaction",
Empty,
latch=True)}
self._suppress_gravity_compensation = {'left': False, 'right': False}
self._supp_grav_comp_pubs = {'left':
rospy.Publisher("/robot/limb/left/suppress_gravity_compensation",
Empty,
latch=True),
'right':
rospy.Publisher("/robot/limb/right/suppress_gravity_compensation",
Empty,
latch=True)}
# start suppressions background thread
self._t_suppressions = threading.Thread(
target=self.suppressions_worker)
self._t_suppressions.daemon = True
self._t_suppressions.start()
# gravity compensation subscription
self._grav_comp_lock = threading.Lock()
self._gravity_compensation_torques = OrderedDict(
zip(baxter_interface.Limb('left').joint_names() + \
baxter_interface.Limb('right').joint_names(),
[0.0]*14))
rospy.Subscriber("/robot/limb/left/gravity_compensation_torques",
SEAJointState, self.grav_comp_callback)
rospy.Subscriber("/robot/limb/right/gravity_compensation_torques",
SEAJointState, self.grav_comp_callback)
# navigators
self._navigators = {'left': baxter_interface.Navigator('left'),
'right': baxter_interface.Navigator('right'),
'torso_left':
baxter_interface.Navigator('torso_left'),
'torso_right':
baxter_interface.Navigator('torso_right')}
# initialize frame transform
self._listener = tf.TransformListener()
def close(self):
self._running = False
self._t_suppressions.join()
# gripper functions
def openGripper(self, gripper):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].open()
def closeGripper(self, gripper):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].close()
def calibrateGripper(self,gripper):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].calibrate()
def setGripperPosition(self,gripper,position):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].command_position(
position)
def setGripperVelocity(self,gripper,velocity):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].set_velocity(
velocity)
def setGripperHoldForce(self,gripper,force):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].set_holding_force(
force)
def setGripperMoveForce(self,gripper,force):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].set_moving_force(
force)
def setGripperDeadband(self,gripper,deadband):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
self._grippers[self._valid_limb_names[gripper]].set_dead_band(
deadband)
def getGripperPosition(self, gripper):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
return self._grippers[self._valid_limb_names[gripper]].position()
def getGripperForce(self, gripper):
gripper = gripper.lower()
if gripper in self._valid_limb_names.keys():
return self._grippers[self._valid_limb_names[gripper]].force()
# Hand rangers
def getRangerValue(self, arm):
arm = arm.lower()
if arm in self._valid_limb_names.keys():
return self._rangers[self._valid_limb_names[arm]].state()
# Accelerometers
def getAccelerometerValue(self, arm):
arm = arm.lower()
if arm in self._valid_limb_names.keys():
return self._accelerometers[self._valid_limb_names[arm]]
def left_accel_callback(self, data):
if (data.linear_acceleration):
self._accelerometers['left'][0] = data.linear_acceleration.x
self._accelerometers['left'][1] = data.linear_acceleration.y
self._accelerometers['left'][2] = data.linear_acceleration.z
def right_accel_callback(self, data):
if (data.linear_acceleration):
self._accelerometers['right'][0] = data.linear_acceleration.x
self._accelerometers['right'][1] = data.linear_acceleration.y
self._accelerometers['right'][2] = data.linear_acceleration.z
# head control functions
def panHead(self, angle):
self._head.set_pan(angle)
def getHeadPanAngle(self):
return self._head.pan()
def nodHead(self):
self._head.command_nod()
# sonar functions
@property
def sonar_pointcloud(self):
return self._sonar_pointcloud
def sonar_callback(self, data):
if data.points:
# fill array
pCloud = []
for p in data.points:
pCloud.append(p.x)
pCloud.append(p.y)
pCloud.append(p.z)
self._sonar_pointcloud.sensors = tuple(data.channels[0].values)
self._sonar_pointcloud.distances = tuple(data.channels[1].values)
self._sonar_pointcloud.points = tuple(pCloud)
else:
self._sonar_pointcloud.sensors = None
self._sonar_pointcloud.distances = None
self._sonar_pointcloud.points = None
def enableSonar(self):
if not self._sonar_enabled:
self._sonar_enabled = True
self._sonar_enable_pub.publish(4095)
self._sonar_state_sub = \
rospy.Subscriber("/robot/sonar/head_sonar/state",
PointCloud,
self.sonar_callback)
def disableSonar(self):
if self._sonar_enabled:
self._sonar_enabled = False
self._sonar_enable_pub.publish(0)
self._sonar_state_sub.unregister()
# Suppression functions
def suppressBodyAvoidance(self, limb, suppress):
limb = limb.lower()
if limb in self._valid_limb_names.keys():
if self._suppress_body_avoidance[self._valid_limb_names[limb]] == \
(suppress > 0):
return
self._suppress_body_avoidance[self._valid_limb_names[limb]] = \
(suppress > 0)
if self._suppress_body_avoidance[self._valid_limb_names[limb]]:
print 'Suppressing Body Avoidance for limb ', limb
else:
print 'Enabling Body Avoidance for limb ', limb
def suppressCollisionAvoidance(self, limb, suppress):
limb = limb.lower()
if limb in self._valid_limb_names.keys():
if self._suppress_collision_avoidance[ \
self._valid_limb_names[limb]] == \
(suppress > 0):
return
self._suppress_collision_avoidance[ \
self._valid_limb_names[limb]] = (suppress > 0)
if self._suppress_collision_avoidance[ \
self._valid_limb_names[limb]]:
print 'Suppressing Collision Avoidance for limb ', limb
else:
print 'Enabling Collision Avoidance for limb ', limb
def suppressContactSafety(self, limb, suppress):
limb = limb.lower()
if limb in self._valid_limb_names.keys():
if self._suppress_contact_safety[ \
self._valid_limb_names[limb]] == \
(suppress > 0):
return
self._suppress_contact_safety[ \
self._valid_limb_names[limb]] = (suppress > 0)
if self._suppress_contact_safety[self._valid_limb_names[limb]]:
print 'Suppressing Contact Safety for limb ', limb
else:
print 'Enabling Contact Safety for limb ', limb
def suppressCuffInteraction(self, limb, suppress):
limb = limb.lower()
if limb in self._valid_limb_names.keys():
if self._suppress_cuff_interaction[\
self._valid_limb_names[limb]] == \
(suppress > 0):
return
self._suppress_cuff_interaction[self._valid_limb_names[limb]] = \
(suppress > 0)
if self._suppress_cuff_interaction[self._valid_limb_names[limb]]:
print 'Suppressing Cuff Interaction for limb ', limb
else:
print 'Enabling Cuff Interaction for limb ', limb
def suppressGravityCompensation(self, limb, suppress):
limb = limb.lower()
if limb in self._valid_limb_names.keys():
if self._suppress_gravity_compensation[\
self._valid_limb_names[limb]] == \
(suppress > 0):
return
self._suppress_gravity_compensation[ \
self._valid_limb_names[limb]] = \
(suppress > 0)
if self._suppress_gravity_compensation[ \
self._valid_limb_names[limb]]:
print 'Suppressing Gravity Compensation for limb ', limb
else:
print 'Enabling Gravity Compensation for limb ', limb
def publishSuppressions(self, limb):
if self._suppress_body_avoidance[limb]:
self._supp_body_avoid_pubs[limb].publish()
if self._suppress_collision_avoidance[limb]:
self._supp_coll_avoid_pubs[limb].publish()
if self._suppress_contact_safety[limb]:
self._supp_con_safety_pubs[limb].publish()
if self._suppress_cuff_interaction[limb]:
self._supp_cuff_int_pubs[limb].publish()
if self._suppress_gravity_compensation[limb]:
self._supp_grav_comp_pubs[limb].publish()
# worker function to continuously publish suppression commands at >5Hz
def suppressions_worker(self):
while self._running:
time.sleep(0.05)
self.publishSuppressions('left')
self.publishSuppressions('right')
# gravity compensation info functions
@property
def gravity_compensation_torques(self):
return self._gravity_compensation_torques.values()
def grav_comp_callback(self, data):
with self._grav_comp_lock:
if data.gravity_model_effort:
for n in xrange(0,len(data.name)):
self._gravity_compensation_torques[data.name[n]] = \
data.gravity_model_effort[n]
# navigator functions
def getNavigatorState(self, navigator):
if (navigator in self._navigators.keys()):
navigator_state = RR.RobotRaconteurNode.s.NewStructure(
'BaxterPeripheral_interface.NavigatorState')
navigator_state.ok_button = self._navigators[navigator].button0
navigator_state.cancel_button = self._navigators[navigator].button1
navigator_state.show_button = self._navigators[navigator].button2
navigator_state.scroll_wheel = self._navigators[navigator].wheel
navigator_state.inner_led = self._navigators[navigator].inner_led
navigator_state.outer_led = self._navigators[navigator].outer_led
return navigator_state
else:
return None
def setNavigatorLEDs(self, navigator, inner_led, outer_led):
if (navigator in self._navigators.keys()):
self._navigators[navigator].inner_led = (inner_led > 0)
self._navigators[navigator].outer_led = (outer_led > 0)
def lookUptransforms(self, target_frame, source_frame):
position, quaternion = self._listener.lookupTransform(target_frame, source_frame, rospy.Time(0))
relativePose = RR.RobotRaconteurNode.s.NewStructure(
'BaxterPeripheral_interface.framePose' )
relativePose.position = list(position)
relativePose.quaternion = list(quaternion)
return relativePose
def main(argv):
# parse command line arguments
parser = argparse.ArgumentParser(
description='Initialize Baxter Peripherals.')
parser.add_argument('--port', type=int, default = 0,
help='TCP port to host service on ' + \
'(will auto-generate if not specified)')
args = parser.parse_args(argv)
#Enable numpy
RR.RobotRaconteurNode.s.UseNumPy=True
#Set the Node name
RR.RobotRaconteurNode.s.NodeName="BaxterPeripheralServer"
#Create transport, register it, and start the server
print "Registering Transport"
t = RR.TcpTransport()
t.EnableNodeAnnounce(RR.IPNodeDiscoveryFlags_NODE_LOCAL |
RR.IPNodeDiscoveryFlags_LINK_LOCAL |
RR.IPNodeDiscoveryFlags_SITE_LOCAL)
RR.RobotRaconteurNode.s.RegisterTransport(t)
t.StartServer(args.port)
port = args.port
if (port == 0):
port = t.GetListenPort()
#Register the service type and the service
print "Starting Service"
RR.RobotRaconteurNode.s.RegisterServiceType(baxter_servicedef)
#Initialize object
baxter_obj = BaxterPeripherals_impl()
RR.RobotRaconteurNode.s.RegisterService("BaxterPeripherals",
"BaxterPeripheral_interface.BaxterPeripherals",
baxter_obj)
print "Service started, connect via"
print "tcp://localhost:" + str(port) + \
"/BaxterPeripheralServer/BaxterPeripherals"
raw_input("press enter to quit...\r\n")
baxter_obj.close()
# This must be here to prevent segfault
RR.RobotRaconteurNode.s.Shutdown()
if __name__ == '__main__':
main(sys.argv[1:])
|
import sunspec2.spreadsheet as spreadsheet
import pytest
import csv
import copy
import json
def test_idx():
row = ['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor',
'Units', 'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description', 'Detailed Description']
assert spreadsheet.idx(row, 'Address Offset') == 0
with pytest.raises(ValueError):
del row[0]
assert spreadsheet.idx(row, 'Address Offset', mandatory=True)
def test_row_is_empty():
row = [''] * 10
assert spreadsheet.row_is_empty(row, 0)
row[0] = 'abc'
assert not spreadsheet.row_is_empty(row, 0)
def test_find_name():
points = [
{
"name": "Inclx",
"type": "int32",
"mandatory": "M",
"units": "Degrees",
"sf": -2,
"label": "X",
"desc": "X-Axis inclination"
},
{
"name": "Incly",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Y",
"desc": "Y-Axis inclination"
},
{
"name": "Inclz",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Z",
"desc": "Z-Axis inclination"
}
]
assert spreadsheet.find_name(points, 'abc') is None
assert spreadsheet.find_name(points, 'Incly') == points[1]
def test_element_type():
pass
def test_from_spreadsheet():
model_spreadsheet = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description', 'Detailed Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model', 'Include to support orientation measurements', ''],
[0, '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier', ''],
[1, '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length', ''],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', '', ''],
['', 0, 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination', ''],
['', 2, 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination', ''],
['', 4, 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination', '']
]
model_def = {
"id": 304,
"group": {
"name": "inclinometer",
"type": "group",
"points": [
{
"name": "ID",
"value": 304,
"desc": "Model identifier",
"label": "Model ID",
"mandatory": "M",
"static": "S",
"type": "uint16"
},
{
"name": "L",
"desc": "Model length",
"label": "Model Length",
"mandatory": "M",
"static": "S",
"type": "uint16"
}
],
"groups": [
{
"name": "incl",
"type": "group",
"count": 0,
"points": [
{
"name": "Inclx",
"type": "int32",
"mandatory": "M",
"units": "Degrees",
"sf": -2,
"label": "X",
"desc": "X-Axis inclination"
},
{
"name": "Incly",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Y",
"desc": "Y-Axis inclination"
},
{
"name": "Inclz",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Z",
"desc": "Z-Axis inclination"
}
]
}
],
"label": "Inclinometer Model",
"desc": "Include to support orientation measurements"
}
}
assert spreadsheet.from_spreadsheet(model_spreadsheet) == model_def
def test_to_spreadsheet():
model_spreadsheet = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model', 'Include to support orientation measurements'],
[0, '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier'],
[1, '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length'],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', ''],
['', 0, 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination'],
['', 2, 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination'],
['', 4, 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination']
]
model_def = {
"id": 304,
"group": {
"name": "inclinometer",
"type": "group",
"points": [
{
"name": "ID",
"value": 304,
"desc": "Model identifier",
"label": "Model ID",
"mandatory": "M",
"static": "S",
"type": "uint16"
},
{
"name": "L",
"desc": "Model length",
"label": "Model Length",
"mandatory": "M",
"static": "S",
"type": "uint16"
}
],
"groups": [
{
"name": "incl",
"type": "group",
"count": 0,
"points": [
{
"name": "Inclx",
"type": "int32",
"mandatory": "M",
"units": "Degrees",
"sf": -2,
"label": "X",
"desc": "X-Axis inclination"
},
{
"name": "Incly",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Y",
"desc": "Y-Axis inclination"
},
{
"name": "Inclz",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Z",
"desc": "Z-Axis inclination"
}
]
}
],
"label": "Inclinometer Model",
"desc": "Include to support orientation measurements"
}
}
assert spreadsheet.to_spreadsheet(model_def) == model_spreadsheet
def test_to_spreadsheet_group():
model_def = {
"group": {
"desc": "DER capacity model.",
"label": "DER Capacity",
"name": "DERCapacity",
"points": [
{
"access": "R",
"desc": "DER capacity model id.",
"label": "DER Capacity Model ID",
"mandatory": "M",
"name": "ID",
"static": "S",
"type": "uint16",
"value": 702
},
{
"access": "R",
"desc": "DER capacity name model length.",
"label": "DER Capacity Model Length",
"mandatory": "M",
"name": "L",
"static": "S",
"type": "uint16"
},
{
"access": "R",
"comments": [
"Nameplate Ratings - Specifies capacity ratings"
],
"desc": "Maximum active power rating at unity power factor in watts.",
"label": "Active Power Max Rating",
"mandatory": "O",
"name": "WMaxRtg",
"sf": "W_SF",
"type": "uint16",
"units": "W",
"symbols": [
{
"name": "CAT_A",
"value": 1
},
{
"name": "CAT_B",
"value": 2
}
]
}
],
"type": "group"
},
"id": 702
}
ss = []
spreadsheet.to_spreadsheet_group(ss, model_def['group'], has_notes=False)
assert ss == [
['', '', 'DERCapacity', '', '', 'group', '', '', '', '', '', '', 'DER Capacity', 'DER capacity model.'],
['', 0, 'ID', 702, '', 'uint16', '', '', '', '', 'M', 'S', 'DER Capacity Model ID',
'DER capacity model id.'],
['', 1, 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'DER Capacity Model Length',
'DER capacity name model length.'],
['Nameplate Ratings - Specifies capacity ratings', '', '', '', '', '', '', '', '', '', '', '', '', ''],
['', 2, 'WMaxRtg', '', '', 'uint16', '', 'W_SF', 'W', '', '', '', 'Active Power Max Rating',
'Maximum active power rating at unity power factor in watts.'],
['', '', 'CAT_A', 1, '', '', '', '', '', '', '', '', '', ''],
['', '', 'CAT_B', 2, '', '', '', '', '', '', '', '', '', '']]
def test_to_spreadsheet_point():
point = {
"access": "R",
"desc": "Abnormal operating performance category as specified in IEEE 1547-2018.",
"label": "Abnormal Operating Category",
"mandatory": "O",
"name": "AbnOpCatRtg",
"symbols": [
{
"name": "CAT_1",
"value": 1
},
{
"name": "CAT_2",
"value": 2
},
{
"name": "CAT_3",
"value": 3
}
],
"type": "enum16"
}
ss = []
assert spreadsheet.to_spreadsheet_point(ss, point, has_notes=False) == 1
missing_name_p = copy.deepcopy(point)
del missing_name_p['name']
with pytest.raises(Exception) as exc1:
spreadsheet.to_spreadsheet_point(ss, missing_name_p, has_notes=False)
assert 'Point missing name attribute' in str(exc1.value)
missing_type_p = copy.deepcopy(point)
del missing_type_p['type']
with pytest.raises(Exception) as exc2:
spreadsheet.to_spreadsheet_point(ss, missing_type_p, has_notes=False)
assert 'Point AbnOpCatRtg missing type' in str(exc2.value)
unk_p_type = copy.deepcopy(point)
unk_p_type['type'] = 'abc'
with pytest.raises(Exception) as exc3:
spreadsheet.to_spreadsheet_point(ss, unk_p_type, has_notes=False)
assert 'Unknown point type' in str(exc3.value)
p_size_not_int = copy.deepcopy(point)
p_size_not_int['type'] = 'string'
p_size_not_int['size'] = 'abc'
with pytest.raises(Exception) as exc4:
spreadsheet.to_spreadsheet_point(ss, p_size_not_int, has_notes=False)
assert 'Point size is for point AbnOpCatRtg not an iteger value' in str(exc4.value)
def test_to_spreadsheet_symbol():
symbol = {"name": "MAX_W", "value": 0}
ss = []
spreadsheet.to_spreadsheet_symbol(ss, symbol, has_notes=False)
assert ss[0][2] == 'MAX_W' and ss[0][3] == 0
ss = []
del symbol['value']
with pytest.raises(Exception) as exc1:
spreadsheet.to_spreadsheet_symbol(ss, symbol, has_notes=False)
assert 'Symbol MAX_W missing value' in str(exc1.value)
ss = []
del symbol['name']
with pytest.raises(Exception) as exc2:
spreadsheet.to_spreadsheet_symbol(ss, symbol, has_notes=False)
assert 'Symbol missing name attribute' in str(exc2.value)
def test_to_spreadsheet_comment():
ss = []
spreadsheet.to_spreadsheet_comment(ss, 'Scaling Factors', has_notes=False)
assert ss[0][0] == 'Scaling Factors'
def test_spreadsheet_equal():
spreadsheet_smdx_304 = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description', 'Detailed Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model',
'Include to support orientation measurements', ''],
['', '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier', ''],
['', '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length', ''],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', '', ''],
['', '', 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination', ''],
['', '', 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination', ''],
['', '', 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination', '']
]
ss_copy = copy.deepcopy(spreadsheet_smdx_304)
assert spreadsheet.spreadsheet_equal(spreadsheet_smdx_304, ss_copy)
with pytest.raises(Exception) as exc1:
ss_copy[0][0] = 'abc'
spreadsheet.spreadsheet_equal(spreadsheet_smdx_304, ss_copy)
assert 'Line 1 different' in str(exc1.value)
with pytest.raises(Exception) as exc2:
del ss_copy[0]
spreadsheet.spreadsheet_equal(spreadsheet_smdx_304, ss_copy)
assert 'Different length' in str(exc2.value)
def test_from_csv():
model_def = {
"id": 304,
"group": {
"name": "inclinometer",
"type": "group",
"points": [
{
"name": "ID",
"value": 304,
"desc": "Model identifier",
"label": "Model ID",
"mandatory": "M",
"static": "S",
"type": "uint16"
},
{
"name": "L",
"desc": "Model length",
"label": "Model Length",
"mandatory": "M",
"static": "S",
"type": "uint16"
}
],
"groups": [
{
"name": "incl",
"type": "group",
"count": 0,
"points": [
{
"name": "Inclx",
"type": "int32",
"mandatory": "M",
"units": "Degrees",
"sf": -2,
"label": "X",
"desc": "X-Axis inclination"
},
{
"name": "Incly",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Y",
"desc": "Y-Axis inclination"
},
{
"name": "Inclz",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Z",
"desc": "Z-Axis inclination"
}
]
}
],
"label": "Inclinometer Model",
"desc": "Include to support orientation measurements"
}
}
assert model_def == spreadsheet.from_csv('sunspec2/tests/test_data/smdx_304.csv')
def test_to_csv(tmp_path):
model_def = {
"id": 304,
"group": {
"name": "inclinometer",
"type": "group",
"points": [
{
"name": "ID",
"value": 304,
"desc": "Model identifier",
"label": "Model ID",
"mandatory": "M",
"static": "S",
"type": "uint16"
},
{
"name": "L",
"desc": "Model length",
"label": "Model Length",
"mandatory": "M",
"static": "S",
"type": "uint16"
}
],
"groups": [
{
"name": "incl",
"type": "group",
"count": 0,
"points": [
{
"name": "Inclx",
"type": "int32",
"mandatory": "M",
"units": "Degrees",
"sf": -2,
"label": "X",
"desc": "X-Axis inclination"
},
{
"name": "Incly",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Y",
"desc": "Y-Axis inclination"
},
{
"name": "Inclz",
"type": "int32",
"units": "Degrees",
"sf": -2,
"label": "Z",
"desc": "Z-Axis inclination"
}
]
}
],
"label": "Inclinometer Model",
"desc": "Include to support orientation measurements"
}
}
ss = spreadsheet.to_spreadsheet(model_def)
spreadsheet.to_csv(model_def, filename=tmp_path / 'smdx_304.csv')
same_data = True
row_num = 0
idx = 0
with open(tmp_path / 'smdx_304.csv') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
idx = 0
for i in row:
if str(ss[row_num][idx]) != str(i):
same_data = False
idx += 1
row_num += 1
assert same_data
def test_spreadsheet_from_csv():
spreadsheet_smdx_304 = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description', 'Detailed Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model',
'Include to support orientation measurements', ''],
['', '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier', ''],
['', '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length', ''],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', '', ''],
['', '', 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination', ''],
['', '', 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination', ''],
['', '', 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination', '']
]
counter = 0
for row in spreadsheet.spreadsheet_from_csv('sunspec2/tests/test_data/smdx_304.csv'):
same = True
counter2 = 0
for i in row:
if i != spreadsheet_smdx_304[counter][counter2]:
same = False
counter2 += 1
counter += 1
assert same
def test_spreadsheet_to_csv(tmp_path):
spreadsheet_smdx_304 = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description', 'Detailed Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model',
'Include to support orientation measurements', ''],
[0, '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier', ''],
[1, '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length', ''],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', '', ''],
['', 0, 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination', ''],
['', 2, 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination', ''],
['', 4, 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination', '']
]
spreadsheet.spreadsheet_to_csv(spreadsheet_smdx_304, filename=tmp_path / 'smdx_304.csv')
same_data = True
rowNum = 0
idx = 0
with open(tmp_path / 'smdx_304.csv') as csvfile:
csvreader = csv.reader(csvfile)
for row in csvreader:
idx = 0
for i in row:
if str(spreadsheet_smdx_304[rowNum][idx]) != str(i):
same_data = False
idx += 1
rowNum += 1
assert same_data
|
import sys
sys.path.append('.')
import time
import signal
from multiprocessing import Pipe, Process, Event
from movementControl import MovementControl
from src.hardware.serialhandler.serialhandler import SerialHandler
from src.hardware.camera.cameraprocess import CameraProcess
from laneKeeping import LaneKeeping
from src.utils.camerastreamer.camerastreamer import CameraStreamer
# =============================== CONFIG =================================================
enableConstantForward = True
enableLateralControl = True
enableStream = False
#================================ PROCESSES ==============================================
allProcesses = list()
lcR, lcS = Pipe(duplex = False)
camStR, camStS = Pipe(duplex = False)
if enableConstantForward:
cfR, cfS = Pipe(duplex = False)
cfProc = MovementControl([lcR], [cfS])
allProcesses.append(cfProc)
shProc = SerialHandler([cfR], [])
allProcesses.append(shProc)
if enableLateralControl:
lkR, lkS = Pipe(duplex = False)
camOutPs = []
if enableStream:
streamProc = CameraStreamer([camStR], [])
allProcesses.append(streamProc)
camOutPs = [lkS, camStS]
else:
camOutPs = [lkS]
camProc = CameraProcess([],camOutPs)
allProcesses.append(camProc)
lkProc = LaneKeeping([lkR], [lcS])
allProcesses.append(lkProc)
# Starting the processes
print("Starting the processes!",allProcesses)
for proc in allProcesses:
proc.daemon = True
proc.start()
# Waiting for keyboard interruption
blocker = Event()
try:
blocker.wait()
except KeyboardInterrupt:
print("\nCatching a KeyboardInterruption exception! Shutdown all processes.\n")
for proc in allProcesses:
if hasattr(proc,'stop') and callable(getattr(proc,'stop')):
print("Process with stop",proc)
proc.stop()
proc.join()
else:
print("Process witouth stop",proc)
proc.terminate()
proc.join()
|
"""
A model that was initially meant to learn motion probabilities
using input features, the model now learns the probabilities of
two states belonging to eachother.
"""
import os
# import numpy as np
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.layers import Input, Dense, Activation
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import Conv2D
from tensorflow.python.keras.layers import concatenate
from tensorflow.python.keras.layers import RepeatVector
from tensorflow.python.keras.layers import Reshape
from tensorflow.python.keras.layers import MaxPooling2D
from tensorflow.python.keras.layers import AveragePooling2D
from tensorflow.python.keras.layers import LeakyReLU
from tensorflow.python.keras.layers import Flatten
from tensorflow.python.keras.optimizers import Nadam
from tensorflow.python.keras.optimizers import SGD
from tensorflow.python.keras import backend as K
from lib.models.util.make_parallel import make_parallel
# architecture now takes two states (time t, t+1), encodes them, and
# computes their probability of matching
class DeepVelocity(object):
def __init__(self, lr=0.00017654, lat_input_shape=(64,), screen_input_shape=(64,64,), structured_input_shape=(2,), verbose=False):
"""
https://keras.io/getting-started/functional-api-guide/#multi-input-and-multi-output-models
https://keras.io/gett ing-started/functional-api-guide/#shared-layers
https://blog.keras.io/building-autoencoders-in-keras.html
"""
# Gross hack, change later?
self.lr = lr
if verbose:
print("Network structured input shape is", structured_input.get_shape())
print("Network screen input shape is", screen_input.get_shape())
print("Network latent input shape is", lat_input.get_shape())
# Create the two state encoding legs
structured_input_a = Input(shape=structured_input_shape)
lat_input_a = Input(shape=lat_input_shape)
screen_input_a = Input(shape=screen_input_shape,)
structured_input_b = Input(shape=structured_input_shape)
lat_input_b = Input(shape=lat_input_shape)
screen_input_b = Input(shape=screen_input_shape)
eng_state_a = [structured_input_a, lat_input_a, screen_input_a]
eng_state_b = [structured_input_b, lat_input_b, screen_input_b]
# We want to broadcast the structured input (x, y) into their own
# channels, each with the same dimension as the screen input
# We can then concatenate, then convolve over the whole tensor
x = RepeatVector(64*64)(structured_input_a)
x = Reshape((64,64,2))(x)
structured_output_a = x
x = RepeatVector(64*64)(structured_input_b)
x = Reshape((64,64,2))(x)
structured_output_b = x
# Similar with the latent vector, except it will simply be repeated
# column wise
x = RepeatVector(64)(lat_input_a)
x = Reshape((64,64,1))(x)
lat_output_a = x
x = RepeatVector(64)(lat_input_b)
x = Reshape((64,64,1))(x)
lat_output_b = x
# The screen is the correct shape, just add a channel dimension
x = Reshape((64,64,1))(screen_input_a)
screen_output_a = x
x = Reshape((64,64,1))(screen_input_b)
screen_output_b= x
x = concatenate([screen_output_a, structured_output_a, lat_output_a, screen_output_b, structured_output_b, lat_output_b], axis=-1)
print("Hello, World!", x.shape)
x = Conv2D(16, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
print("1", x.shape)
x = Conv2D(32, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(2)(x)
print("2", x.shape)
x = Conv2D(64, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
print("3", x.shape)
x = Conv2D(128, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(2)(x)
print("4", x.shape)
x = Conv2D(256, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
print("5", x.shape)
x = Conv2D(512, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = MaxPooling2D(2)(x)
print("6", x.shape)
x = Conv2D(1024, (3,3))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
print("7", x.shape)
x = Conv2D(2, (1,1))(x)
x = Activation('linear')(x)
x = AveragePooling2D()(x)
print("8", x.shape)
x = Activation("softmax")(x)
print("9", x.shape)
prob_output = Reshape((2,))(x)
print("10", prob_output.shape)
self.probabilityNetwork = Model(inputs=eng_state_a+eng_state_b, outputs=[prob_output])
def compile(self):
# print("LR: ",self.lr)
# self.lr = 10**np.random.uniform(-2.2, -3.8)
optimizer = Nadam(lr=self.lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004)
# optimizer = SGD()
# self.probabilityNetwork = make_parallel(self.probabilityNetwork, 2)
self.probabilityNetwork.compile(
optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['acc', 'mse', 'categorical_crossentropy'])
def save_weights(self, path):
self.probabilityNetwork.save_weights(path)
def load(self, path):
loc = os.path.join(self.path(), path)
print("Loading weights", loc)
self.probabilityNetwork.load_weights(loc)
return self
def save_model(self, path):
self.probabilityNetwork.save(path)
def load_model(self, path):
# loc = os.path.join(self.path(), path)
# print("Loading model", path)
self.probabilityNetwork = load_model(path)
return self
def path(self):
return os.path.dirname(os.path.realpath(__file__))
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
from tensorflow.python.keras.utils import get_custom_objects
get_custom_objects().update({"contrastive_loss": contrastive_loss})
|
import os
os.sys.path.insert(0, os.path.abspath('..\settings_folder'))
import settings
from utils import get_random_end_point
def test():
arena_size = [60, 60, 20]
total_num_of_splits = 3
print("arena_size" + str(arena_size))
print("total_num_of_splits" + str(total_num_of_splits))
for split_index in range(0, total_num_of_splits):
print("-------- split_index" + str(split_index))
for i in range(0, 5):
res = get_random_end_point(arena_size, split_index, total_num_of_splits)
print("smapled point" + str(res))
test()
|
import random
from typing import Tuple, Callable
Strategy = Tuple[Callable[[int], int], Callable[[int], int]]
def random_bit() -> int:
return random.randint(0, 1)
def referee(strategy: Callable[[], Strategy]) -> bool:
you, eve = strategy()
your_input, eve_input = random_bit(), random_bit()
parity = 0 if you(your_input) == eve(eve_input) else 1
return parity == (your_input and eve_input)
def est_win_probability(strategy: Callable[[], Strategy],
n_games: int = 1000) -> float:
return sum(
referee(strategy)
for _ in range(n_games)
) / n_games
def constant_strategy() -> Strategy:
return (
lambda your_input: 0,
lambda eve_input: 0
)
|
"""print_property_table() function and RowTable class.
These are two styles of nicely formatted text tables meant for printing to
the IPython console window.
"""
from typing import Any, List, Tuple, Union
from napari.components.experimental.chunk._commands._utils import highlight
from napari.utils.translations import trans
def print_property_table(table: List[Tuple[str, Any]]) -> None:
"""Print names and values.
Example output:
Layer ID: 0
Name: numbered slices
Levels: 1
Shape: (20, 1024, 1024, 3)
Parameters
----------
table
"""
heading_width = max(len(x) for x, _ in table)
for heading, value in table:
aligned = f"{heading:>{heading_width}}"
print(f"{highlight(aligned)}: {value}")
class ColumnSpec:
"""Specification for one column in a RowTable.
Parameters
----------
spec : Union[str, dict]
String column name, or a dict specification.
"""
def __init__(self, spec: Union[str, dict]) -> None:
if isinstance(spec, str):
spec = {'name': spec} # Spec is the name, then we use defaults.
self.name = spec.get('name', "")
self.align = spec.get('align', "right")
self.width = spec.get('width')
def format(self, value, width):
"""Return formatted value with alignment."""
value_str = str(value)
if self.align == "left":
return f"{value_str:<{width}}"
return f"{value_str:>{width}}"
class RowTable:
"""A printable text table with a header and rows.
Example usage:
table = table(["NAME", "AGE"], [10, 5])
table.add_row["Mary", "25"]
table.add_row["Alice", "32"]
table.print()
Example output:
NAME AGE
Mary 25
Alice 32
Parameters
----------
headers : List[str]
The column headers such as ["NAME", "AGE"].
widths: Optional[List[int]]
Use these widths instead of automatic widths, 0 means auto for that column.
"""
# Leave room between columns.
PADDING = 2
def __init__(self, columns: List[Union[str, dict]]) -> None:
self.columns = [ColumnSpec(x) for x in columns]
self.rows: List[list] = []
self.padding = " " * self.PADDING
def add_row(self, row: List[str]) -> None:
"""Add one row of data to the table.
Parameters
----------
row : List[str]
The row values such as ["Fred", "25"].
"""
row_cols = len(row)
header_cols = len(self.columns)
if row_cols != header_cols:
raise ValueError(
trans._(
"Row with {row_cols} columns not compatible with headers ({header_cols} columns)",
deferred=True,
row_cols=row_cols,
header_cols=header_cols,
)
)
self.rows.append(row)
def _get_max_data_width(self, index: int) -> int:
"""Return maximum width of this column in the data.
Parameters
----------
index : int
Return width of this column.
Returns
-------
int
The maximum width of this column.
"""
if self.rows:
return max(len(str(row[index])) for row in self.rows)
return 0
def _get_widths(self) -> List[int]:
"""Return widths of all the columns."
Returns
-------
List[int]
The width of each column in order.
"""
widths = []
for i, spec in enumerate(self.columns):
if spec.width is not None:
width = spec.width # A fixed width column.
else:
# Auto sized column so whichever is wider: data or header.
data_width = self._get_max_data_width(i)
width = max(data_width, len(self.columns[i].name))
widths.append(width)
return widths
def get_header_str(self, widths: List[int]) -> str:
"""Return header string with all the column names.
Parameters
----------
widths : List[int]
The column widths.
Returns
-------
str
The header string.
"""
header_str = ""
for i, spec in enumerate(self.columns):
width = widths[i]
value = str(spec.name)
header_str += f"{value:<{width}}" + self.padding
return header_str
def get_row_str(self, row, widths: List[int]) -> str:
"""Get string depicting one row on the table."""
row_str = ""
for i, spec in enumerate(self.columns):
row_str += spec.format(row[i], widths[i]) + self.padding
return row_str
def print(self):
"""Print the entire table both header and rows."""
widths = self._get_widths()
print(highlight(self.get_header_str(widths)))
for row in self.rows:
print(self.get_row_str(row, widths))
|
"""API utilty functions."""
from rest_framework.views import get_view_name as drf_get_view_name
def get_view_name(view_cls, suffix=None):
name = drf_get_view_name(view_cls, suffix=None)
if name == 'Api Root':
return 'API Root'
else:
return name
|
from .index import *
from .alt import *
|
from django.db import models
class Email(models.Model):
created = models.DateTimeField(auto_now_add=True)
fro = models.CharField(max_length=255)
to = models.TextField(blank=True)
cc = models.TextField(blank=True)
bcc = models.TextField(blank=True)
subject = models.CharField(max_length=255)
text = models.TextField(blank=True)
html = models.TextField(blank=True)
def __unicode__(self):
return "%s > %s" % (self.fro, self.subject)
|
import numpy as np
# Zad1.
# Za pomocą funkcji arange stwórz tablicę numpy składającą się z 15 kolejnych wielokrotności liczby 3.
wielokrotności = np.arange(start=0, stop=3 * 15, step=3, dtype=int)
print(wielokrotności)
# Zad2.
# Stwórz listę składającą się z wartości zmiennoprzecinkowych
# a następnie zapisz do innej zmiennej jej kopię przekonwertowaną na typ int64
zmiennoprzecinkowe = np.arange(start=0, stop=10, step=1.1, dtype=float)
print(zmiennoprzecinkowe)
przekonwertowana = zmiennoprzecinkowe.astype('int64')
print(przekonwertowana)
# Zad4.
#
# Napisz funkcję, która będzie przyjmowała 2 parametry:
# liczbę, która będzie podstawą operacji potęgowania
# oraz ilość kolejnych potęg do wygenerowania.
# Korzystając z funkcji logspace generuj tablicę jednowymiarową
# kolejnych potęg podanej liczby, np. generuj(2,4) -> [2,4,8,16]
def funkcja_zad4(podstawa, ilość_kolejnych):
return np.logspace(start=1, stop=ilość_kolejnych, base=podstawa, num=ilość_kolejnych, dtype="int64")
print(funkcja_zad4(2, 4))
print(funkcja_zad4(20, 20))
print(type(funkcja_zad4(2,4)))
# Zad5.
# Napisz funkcję, która:
# Na wejściu przyjmuje jeden parametr określający długość wektora
# Na podstawie parametru generuj wektor, ale w kolejności odwróconej
# Generuj macierz diagonalną z w/w wektorem jako przekątną
def funkcja_zad5(dl_wektora):
wektor = np.arange(dl_wektora)
wektor = np.flip(wektor)
return np.diag(wektor)
print(funkcja_zad5(10))
# Zad7.
# Napisz funkcję, która wygeneruje macierz wielowymiarową postaci:
# [[2 4 6]
# [4 2 4]
# [6 4 2]]
# Przy założeniach:
# funkcja przyjmuje parametr n, który określa wymiary macierzy jako n*n
# i umieszcza wielokrotność liczby 2 na kolejnych jej przekątnych rozchodzących się od głównej przekątnej.
def funkcja_zad7(n):
dwojki_diag = np.diag([2 for a in range(n)])
dwojki = np.array([2 for a in range(n)])
for a in range(n):
np.fill_diagonal(dwojki_diag[a + 1:], dwojki + 2 * a + 2)
np.fill_diagonal(dwojki_diag[:, a + 1:], dwojki + 2 * a + 2)
return dwojki_diag
print(funkcja_zad7(5))
# Zadanie 9
# Wykorzystaj poznane na zajęciach funkcje biblioteki Numpy i stwórz macierz 5x5,
# która będzie zawierała kolejne wartości ciągu Fibonacciego.
def funkcja_zad9(n=5, m=5):
temp = [0, 1]
for i in range(n * m - 2):
temp.append(sum(temp[-2:]))
return np.array(temp).reshape((n, m))
print(funkcja_zad9(5, 5))
|
class Employee:
comp_name = "sathya"
def __init__(self):
self.name = "ravi"
self.salary = 125000.00
def displayDetails(self):
print(self.name)
print(self.salary)
print(Employee.comp_name)
#---------------------
e1 = Employee()
print("1st Object ---",e1)
e1.displayDetails()
e2 = Employee()
print("2nd Object ---",e2)
e2.displayDetails()
|
# Maximal Rectangle
# - Given a 2-d list, come up with the size of the largest rectangle
# that is only consisted of 1's
# Explanation: The algorithm looks at the matrix in the similar way as the recursive
# algorithm does but it builds up the cumulative heights as it goes.
def sol_dp(matrix):
if len(matrix) == 0:
return 0
curr_hist = [0] * len(matrix[0])
curr_max = 0
for r in range(len(matrix)):
for c in range(len(matrix[r])):
if matrix[r][c] == '0':
curr_hist[c] = 0
else:
curr_hist[c] += int(matrix[r][c])
for c in range(len(matrix[r])):
min_height = curr_hist[c]
for cc in range(c, len(matrix[r])):
if curr_hist[cc] < min_height:
min_height = curr_hist[cc]
curr_max = max(curr_max, (cc - c + 1) * min_height)
return curr_max
# Helper function
def change_to_cumulative(rect):
new_rect = []
for r in range(len(rect)):
new_r = []
cumul = 0
for c in range(len(rect[r])-1, -1, -1):
curr = int(rect[r][c])
if curr == 1:
cumul += curr
else:
cumul = 0
new_r.insert(0, cumul)
new_rect.append(new_r)
return new_rect
# Explanation: The algorithm first changes the given matrix into a what's called
# cumulative matrix. At every point, the value shows you how many
# reachable 1's are there cumulatively (change_to_cumulative()).
# If we think the size of matrix is equal to n, then this transformation
# takes O(n) running time.
# Next, with the new matrix, we now iterate through the whole matrix
# from the top-left corner to the bottom right corner. At every point,
# the algorithm calculates its biggest rectangle possigle by looking at
# the cumulative numbers and update the possible maximum it iterates
# down. At each row, we compare the current minimum cumulative number
# with the current cumulative number and see if we face a smaller number.
# When the current cumulative number is smaller, than that means as we
# go deeper down, we will only be able to count as many 1's as that
# smallest cumulative number much. So we have to keep in track of
# current rectangle is biggest as we go down the rows.
# Run Time: O(mn^2). We have O(n) from the change_to_cumulative(), O(n) for the
# inner loop that goes deeper down, and the O(n) for the outer loop that
# loops through the whole rectangle, which together become O(n^2).
def sol_bruteforce(matrix):
new_rect = change_to_cumulative(matrix)
rtn_val = 0
for rr in range(len(new_rect)):
for c in range(len(new_rect[rr])):
min_cul = new_rect[rr][c]
for r in range(rr, len(new_rect)):
if new_rect[r][c] == 0:
break
else:
if new_rect[r][c] < min_cul:
min_cul = new_rect[r][c]
rtn_val = max((r-rr+1)*min_cul, rtn_val)
return rtn_val
rect = [
["1","1","1","0","0","1","1","1","0","0","1","1","1","0","0","1","1","1","0","0"],
["1","1","1","0","0","1","1","1","0","0","1","1","1","0","0","1","1","1","1","1"],
["1","1","1","0","0","1","1","1","0","0","1","1","1","0","0","1","1","1","1","1"],
["1","0","0","1","0","1","0","0","1","0","1","0","0","1","0","1","0","0","1","0"],
["1","1","1","0","0","1","1","1","0","0","1","1","1","0","0","1","1","1","0","0"]
]
print(sol_dp(rect))
|
from time import strftime
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
import pytest
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
import enums
from localSettings import *
import localSettings
from utilityTestFunc import *
class Test:
#================================================================================================================================
# @Author: Tzachi Guetta
# Test description:
#
#================================================================================================================================
testNum = "16"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
entryName = None
entryDescription = "Entry description"
entryTags = "entrytags1,entrytags2,"
flavorsList = ["Source", "Mobile (3GP)", "Basic/Small - WEB/MBL (H264/400)","Basic/Small - WEB/MBL (H264/600)", "SD/Small - WEB/MBL (H264/900)", "SD/Large - WEB/MBL (H264/1500)","HD/720 - WEB (H264/2500)","HD/1080 - WEB (H264/4000)","WebM"]
filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\videos\QR30SecMidRight.mp4'
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
############################# TEST SETUP ###############################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initialize(self, driverFix)
self.common = Common(self.driver)
########################################################################
self.entryName = clsTestService.addGuidToString('entryName')
self.filePathDownloaded = os.path.join(localSettings.LOCAL_SETTINGS_TEMP_DOWNLOADS, self.entryName + "_" + '(' + self.flavorsList[0] + ')' + ".mp4")
writeToLog("INFO","SETUP: Going to create temp folder for download")
if self.common.createFolder(localSettings.LOCAL_SETTINGS_JENKINS_NODE_SHARED_DOWNLOAD) == False:
self.status = "Fail"
writeToLog("INFO","SETUP: FAILED to create temp folder on")
return
# TO-DO: move the below line to "crate evn test"
# self.common.admin.adminDownloadMedia(True)
########################## TEST STEPS - MAIN FLOW #######################
writeToLog("INFO","Step 1: Going to perform login to KMS site as user")
if self.common.loginAsUser() == False:
self.status = "Fail"
writeToLog("INFO","Step 1: FAILED to login as user")
return
writeToLog("INFO","Step 2: Going to upload entry")
if self.common.upload.uploadEntry(self.filePath, self.entryName, self.entryDescription, self.entryTags) == None:
self.status = "Fail"
writeToLog("INFO","Step 2: FAILED failed to upload entry")
return
writeToLog("INFO","Step 3: Going to add flavors to the entry")
if self.common.editEntryPage.addFlavorsToEntry(self.entryName, [self.flavorsList[0]]) == False:
self.status = "Fail"
writeToLog("INFO","Step 3: FAILED add flavors to the entry")
return
writeToLog("INFO","Step 4: Going to Download the flavor")
if self.common.entryPage.downloadAFlavor(self.entryName, self.flavorsList[0]) == False:
self.status = "Fail"
writeToLog("INFO","Step 4: FAILED to Download the flavor")
return
sleep(15)
writeToLog("INFO","Step 5: Going to upload the downloaded Flavor")
if self.common.upload.uploadEntry(self.filePathDownloaded, self.entryName + '_Downloaded', "downloaded description", "downloadedtags1,downloadedtags2,") == None:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to upload the downloaded Flavor")
return
writeToLog("INFO","Step 6: Going to verify uploaded entry")
if self.common.player.navigateToEntryClickPlayPause(self.entryName + '_Downloaded', "0:07") == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to verify uploaded entry")
return
#########################################################################
writeToLog("INFO","TEST PASSED")
# If an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method ****************")
self.common.base.switch_to_default_content()
self.common.myMedia.deleteEntriesFromMyMedia([self.entryName, self.entryName + '_Downloaded'])
self.common.deleteFolder(localSettings.LOCAL_SETTINGS_JENKINS_NODE_SHARED_DOWNLOAD)
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line')
|
'''
adpump
Nostale_FR
https://adpgtrack.com/click/5d15c5d8a035945cc309af93/157000/224520/subaccount
Uspd
'''
from selenium.webdriver import ActionChains
from selenium import webdriver
from time import sleep
# import xlrd
import random
import os
import time
import sys
sys.path.append("..")
# import email_imap as imap
# import json
import re
# from urllib import request, parse
from selenium.webdriver.support.ui import Select
# import base64
import Chrome_driver
import email_imap as imap
import name_get
import db
import selenium_funcs
import Submit_handle
import random
def web_submit(submit,chrome_driver,debug=0):
# test
# email_list = ['aol.com','gmail.com','hotmail.com','outlook.com']
# Mission_list = ['10052']
# print(submit['Email'])
# end = submit['Email']['Email_emu'].split('@')[1]
# print(end)
# if end not in email_list:
# email = db.read_one_selected_email(Mission_list,email_list)
# print(email)
# if len(email) == 0:
# return
# if debug == 0:
# db.write_one_info(Mission_list,email,Cookie = '')
# submit['Email'] = email['Email']
# print(submit['Email'])
# return
if debug == 1:
site = 'https://w.myspicylinks.com/index.php?id_promo=5024105_1&promokeys=e180940561f0ce6b151baadf02d96fef'
submit['Site'] = site
chrome_driver.get(submit['Site'])
chrome_driver.maximize_window()
# chrome_driver.refresh()
# yes next
element = '//*[@id="questions"]/div[1]/div[2]/a[1]'
element = selenium_funcs.scroll_and_find_up(chrome_driver,element)
element.click()
sleep(1)
# 2
element = '//*[@id="questions"]/div[2]/div[2]/a[1]'
element = selenium_funcs.scroll_and_find_up(chrome_driver,element)
element.click()
sleep(1)
# 3
element = ['//*[@id="questions"]/div[3]/div[2]/a[1]','//*[@id="questions"]/div[3]/div[2]/a[2]','//*[@id="questions"]/div[3]/div[2]/a[3]']
num = random.randint(0,2)
chrome_driver.find_element_by_xpath(element[num]).click()
sleep(1)
# 4
element = ['//*[@id="questions"]/div[4]/div[2]/a[1]','//*[@id="questions"]/div[4]/div[2]/a[2]','//*[@id="questions"]/div[4]/div[2]/a[3]','//*[@id="questions"]/div[4]/div[2]/a[4]','//*[@id="questions"]/div[4]/div[2]/a[5]','//*[@id="questions"]/div[4]/div[2]/a[6]']
num = random.randint(0,5)
chrome_driver.find_element_by_xpath(element[num]).click()
sleep(1)
try:
chrome_driver.find_element_by_xpath('//*[@id="questions"]/div[5]/div[2]/a[1]').click()
except:
pass
sleep(10)
# email
email = submit['fr_soi']['email']
try:
chrome_driver.find_element_by_xpath('//*[@id="emailPG"]').send_keys(email)
except:
return 1
sleep(1)
element = chrome_driver.find_element_by_xpath('//*[@id="pg_submit"]')
actions = ActionChains(chrome_driver)
actions.move_to_element_with_offset(element,30,15).click().perform()
num = random.randint(180,300)
sleep(num)
return 1
def test():
Mission_list = ['10000']
excel = 'fr_soi'
Excel_name = ['fr_soi','']
Email_list = ['hotmail.com','outlook.com','','aol.com','gmail.com']
submit = db.read_one_excel(Mission_list,Excel_name,Email_list)
# print(submit)
[print(item,':',submit[excel][item]) for item in submit[excel] if submit[excel][item]!=None]
# date_of_birth = Submit_handle.get_auto_birthday(submit['Uspd']['date_of_birth'])
# print(date_of_birth)
submit['Mission_Id'] = '10048'
submit['Country'] = 'FR'
chrome_driver = Chrome_driver.get_chrome(submit)
web_submit(submit,chrome_driver,1)
# print(submit['Email'])
# print(submit['Email']['Email_emu'])
# print(submit['Email']['Email_emu_pwd'])
# # print(submit['Uspd']['zip'])
# # print(submit['Uspd']['date_of_birth'])
# # print(submit['Uspd']['ssn'])
def test1():
num_gender = random.randint(0,1)
print(num_gender)
if __name__=='__main__':
test()
print('......')
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def maxSubsequence(self, nums: List[int], k: int) -> List[int]:
indices_of_top_k_nums = sorted(enumerate(nums), key=lambda el: -el[1])[:k]
sorted_indices_of_top_k_nums = sorted(indices_of_top_k_nums)
return [num for index, num in sorted_indices_of_top_k_nums]
if __name__ == "__main__":
solution = Solution()
assert [3, 3] == solution.maxSubsequence([2, 1, 3, 3], 2)
assert [-1, 3, 4] == solution.maxSubsequence([-1, -2, 3, 4], 3)
assert [3, 4] == solution.maxSubsequence([3, 4, 3, 3], 2)
|
from pymagnitude import *
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from graph import *
from a_star import *
from client import DroidClient
from word2number import w2n
import random, time, csv, re
import numpy as np
# Change this path to where you put your project folder
path = "/Users/calchen/Desktop/sphero-project/"
# Change this path to / name of your .magnitude file.
# Do not merge this line with the previous line.
vectors = Magnitude(path + "vectors/word2vecRetrofitted.magnitude")
############################################################
# Helper Function
############################################################
# Load and process training sentences
def loadTrainingSentences(file_path):
commandTypeDict = {}
with open(file_path, "r") as fin:
for line in fin:
line = line.rstrip("\n")
if len(line.strip()) == 0 or "##" == line.strip()[0:2]:
continue
commandType, command = line.split(" :: ")
if commandType not in commandTypeDict:
commandTypeDict[commandType] = [command]
else:
commandTypeDict[commandType].append(command)
return commandTypeDict
############################################################
# Classification / Intent Detection
############################################################
# Calculate the cosine similarity between two vectors
def cosineSim(vector1, vector2):
return np.dot(vector1, vector2) / (np.linalg.norm(vector1) * np.linalg.norm(vector2))
# Given a sentence, calculate its embedding
def calcPhraseEmbedding(sentence):
words = re.split("\W+", sentence)
words = [x.lower() for x in words if x.lower() not in ["", "a", "an", "the", "is"]]
if "?" in sentence:
words.append("?")
return vectors.query(words).sum(axis = 0) / len(words)
# Given a sentence embedding, rank training sentence embeddings
# according to their similarity to the given sentence embedding
def rankSentences(commandEmbedding, sentenceEmbeddings):
sortList = []
for i in range(sentenceEmbeddings.shape[0]):
similarity = cosineSim(commandEmbedding, sentenceEmbeddings[i, :])
sortList.append((i, similarity))
similarSentences = sorted(sortList, key = lambda x: x[1], reverse = True)
return [x[0] for x in similarSentences]
# Classify a given sentence
def getCommandType(categories, closestSentences, indexToTrainingSentence):
commandDict = {}
for category in categories:
commandDict[category] = 0
commandDict[indexToTrainingSentence[closestSentences[0]][1]] += 1
commandDict[indexToTrainingSentence[closestSentences[1]][1]] += 0.5
commandDict[indexToTrainingSentence[closestSentences[2]][1]] += 0.5
commandDict[indexToTrainingSentence[closestSentences[3]][1]] += 0.2
commandDict[indexToTrainingSentence[closestSentences[4]][1]] += 0.2
print(commandDict)
return max(commandDict, key = commandDict.get)
############################################################
# Parsers / Slot Filling
############################################################
class Robot:
def __init__(self, droidID, wordSimilarityCutoff, voice):
# Initialize instance variables
self.createSentenceEmbeddings()
self.droid = DroidClient()
self.name = "R2"
self.wordSimilarityCutoff = wordSimilarityCutoff
self.holoProjectorIntensity = 0
self.logicDisplayIntensity = 0
self.frontRGB = (0, 0, 0)
self.backRGB = (0, 0, 0)
self.voice = voice
self.grid = [[]]
self.speed = 0.5
self.pos = (-1, -1)
self.objCoord = dict()
# Load and process color data
self.colorToRGB = {}
with open(path + "data/colors.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter = ",")
for row in readCSV:
self.colorToRGB[row[0]] = (int(row[2]), int(row[3]), int(row[4]))
# Connect to robot
connected = self.droid.connect_to_droid(droidID)
while not connected:
connected = self.droid.connect_to_droid(droidID)
# Create training sentence embeddings
def createSentenceEmbeddings(self):
self.categories = ["state", "direction", "light", "animation", "head", "grid"]
trainingSentences = loadTrainingSentences(path + "data/r2d2TrainingSentences.txt")
self.indexToTrainingSentence = {}
i = 0
for category in self.categories:
sentences = trainingSentences[category + "Sentences"]
for sentence in sentences:
self.indexToTrainingSentence[i] = (sentence, category)
i += 1
self.sentenceEmbeddings = np.zeros((len(self.indexToTrainingSentence), vectors.dim))
for i in range(len(self.indexToTrainingSentence)):
sentence = self.indexToTrainingSentence[i][0]
sentenceEmbedding = calcPhraseEmbedding(sentence)
self.sentenceEmbeddings[i, :] = sentenceEmbedding
# Process user's input command
def inputCommand(self, command):
commandEmbedding = calcPhraseEmbedding(command)
closestSentences = rankSentences(commandEmbedding, self.sentenceEmbeddings)
# print(self.indexToTrainingSentence[closestSentences[0]][0])
# print(self.indexToTrainingSentence[closestSentences[1]][0])
# print(self.indexToTrainingSentence[closestSentences[2]][0])
# print(self.indexToTrainingSentence[closestSentences[3]][0])
# print(self.indexToTrainingSentence[closestSentences[4]][0])
print("Closet sentence was: " + self.indexToTrainingSentence[closestSentences[0]][0])
print("Its cosine similarity to the command was: " + str(cosineSim(commandEmbedding, self.sentenceEmbeddings[closestSentences[0], :])))
if cosineSim(commandEmbedding, self.sentenceEmbeddings[closestSentences[0], :]) < 0.84 and not self.voice:
subcommand = input(self.name + ": I could not understand your command. Do you want to add this command to the training set? (yes/no): ")
if "yes" in subcommand.lower():
subcommand = input("What category do you want to add it to? Choices are state, direction, light, animation, head, or grid: ")
subcommand = subcommand.lower()
if subcommand in self.categories:
with open(path + "data/r2d2TrainingSentences.txt", "a") as the_file:
the_file.write(subcommand + "Sentences :: " + command + "\n")
print("Command added. Changes will be present on restart.")
else:
print(subcommand + " not a valid category.")
return
commandType = getCommandType(self.categories, closestSentences, self.indexToTrainingSentence)
result = getattr(self, commandType + "Parser")(command.lower())
if result:
print(self.name + ": Done executing " + commandType + " command.")
else:
print(self.name + ": I could not understand your " + commandType + " command.")
# Reset the robot to its initial orientation
def reset(self):
self.droid.roll(0, 0, 0)
# Disconnect from the robot
def disconnect(self):
self.droid.disconnect()
# Flash given light(s) in given color(s) for a given amount of time
def flash_colors(self, colors, seconds = 1, front = True):
if front:
for color in colors:
self.droid.set_front_LED_color(*color)
time.sleep(seconds)
else:
for color in colors:
self.droid.set_back_LED_color(*color)
time.sleep(seconds)
# If we can't detect the color for the light(s), ask the user for more info
def askForColor(self, lightPosition = "both"):
if lightPosition != "both":
print("We detect that you want to change your " + lightPosition + " light, but could not find a color.")
else:
print("We parsed this as a light command, but could not find a color.")
command = input("Do you want to input a color? (yes/no): ")
color = False
if "yes" in command.lower():
print("You may have inputted a color, but it is not in our database or is mispelled. Please input a color or rgb tuple.")
command = input("If you want to add the color to the database, input color_name (one string) :: rgb tuple: ")
words = re.split("\W+", command)
words = [x for x in words if x != ""]
for word in words:
if word in self.colorToRGB: color = self.colorToRGB[word]
if len(words) == 4:
try:
color = (int(words[1]), int(words[2]), int(words[3]))
colorName = words[0]
with open(path + "data/colors.csv", "a") as csvStorer:
csvStorer.write("\n" + colorName + ",R2D2 " + colorName + "," + words[1] + "," + words[2] + "," + words[3])
print(colorName + " added to database. It will be available on the next restart.")
except ValueError:
superDumbVariable = 1
elif len(words) == 3:
try:
color = (int(words[0]), int(words[1]), int(words[2]))
except ValueError:
superDumbVariable = 1
return color
# Parser for a light command
def lightParser(self, command):
# Slots for a light command
slots = {"holoEmit": False, "logDisp": False, "lights": [], "add": False, "sub": False,
"percent": False, "whichRGB": [], "colors": [], "intensities": [], "rgb": False, "increment/seconds": False}
if "holoemitter" in command or "holo emitter" in command:
slots["holoEmit"] = True
if "logic display" in command:
slots["logDisp"] = True
if "dim" in command:
slots["intensities"].append("dim")
if "blink" in command:
slots["intensities"].append("blink")
if "%" in command:
slots["percent"] = True
# WANT TO MAKE INCREASE BETTER
if "increase" in command or "add" in command:
slots["add"] = True
if "decrease" in command or "reduce" in command or "subtract" in command:
slots["sub"] = True
# Front / back too similar
if "back" in command:
slots["lights"].append("back")
if "front" in command:
slots["lights"].append("front")
if slots["lights"] == []:
slots["lights"] = ["front", "back"]
if "red" in command:
slots["whichRGB"].append("red")
if "green" in command:
slots["whichRGB"].append("green")
if "blue" in command:
slots["whichRGB"].append("blue")
words = re.split("\W+", command)
words = [x for x in words if x != ""]
i = 0
for word in words:
if i < len(words) - 2:
try:
slots["rgb"] = (int(words[i]), int(words[i+1]), int(words[i+2]))
except ValueError:
superDumbVariable = True
if vectors.similarity("off", word) > self.wordSimilarityCutoff or "minimum" in command:
slots["intensities"].append("off")
elif vectors.similarity("on", word) > self.wordSimilarityCutoff or vectors.similarity("maximum", word) > self.wordSimilarityCutoff:
slots["intensities"].append("on")
if vectors.similarity("percent", word) > self.wordSimilarityCutoff:
slots["percent"] = True
if word in self.colorToRGB:
slots["colors"].append(self.colorToRGB[word])
i += 1
try:
increment = int(word)
slots["increment/seconds"] = increment
except ValueError:
continue
return self.lightSlotsToActions(slots)
# Execute a light command given its slots
def lightSlotsToActions(self, slots):
if slots["holoEmit"]:
if "off" in slots["intensities"]:
self.holoProjectorIntensity = 0
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
elif "dim" in slots["intensities"]:
self.holoProjectorIntensity = self.holoProjectorIntensity / 2
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
elif "on" in slots["intensities"]:
self.holoProjectorIntensity = 1
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
elif "blink" in slots["intensities"]:
self.droid.set_holo_projector_intensity((self.holoProjectorIntensity + 1) % 2)
time.sleep(0.3)
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
else:
return False
return True
if slots["logDisp"]:
if "off" in slots["intensities"]:
self.logicDisplayIntensity = 0
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
elif "dim" in slots["intensities"]:
self.logicDisplayIntensity = self.logicDisplayIntensity / 2
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
elif "on" in slots["intensities"]:
self.logicDisplayIntensity = 1
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
elif "blink" in slots["intensities"]:
self.droid.set_logic_display_intensity((self.logicDisplayIntensity + 1) % 2)
time.sleep(0.3)
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
else:
return False
return True
if (slots["add"] or slots["sub"]) and (slots["percent"]):
lights = slots["lights"]
if not slots["increment/seconds"]:
command = input("Percent not found in command, please input percent to change by here: ")
try:
command = command.replace("%", "")
slots["increment/seconds"] = int(command)
except ValueError:
return False
if slots["sub"]: slots["increment/seconds"] = -slots["increment/seconds"]
percent = slots["increment/seconds"]
if len(slots["whichRGB"]) == 0:
command = input("Did not find what values (red/blue/green) to change, input what values to change: ")
if "red" in command:
slots["whichRGB"].append("red")
if "green" in command:
slots["whichRGB"].append("green")
if "blue" in command:
slots["whichRGB"].append("blue")
if len(slots["whichRGB"]) == 0:
return False
if "red" in slots["whichRGB"]:
for light in lights:
rgb = getattr(self, light + "RGB")
setattr(self, light + "RGB", (max(0, min(rgb[0] + rgb[0] * percent / 100, 255)), rgb[1], rgb[2]))
getattr(self.droid, "set_" + light + "_LED_color")(*getattr(self, light + "RGB"))
if "green" in slots["whichRGB"]:
for light in lights:
rgb = getattr(self, light + "RGB")
setattr(self, light + "RGB", (rgb[0], max(0, min(rgb[1] + rgb[1] * percent / 100, 255)), rgb[2]))
getattr(self.droid, "set_" + light + "_LED_color")(*getattr(self, light + "RGB"))
if "blue" in slots["whichRGB"]:
for light in lights:
rgb = getattr(self, light + "RGB")
setattr(self, light + "RGB", (rgb[0], rgb[1], max(0, min(rgb[2] + rgb[2] * percent / 100, 255))))
getattr(self.droid, "set_" + light + "_LED_color")(*getattr(self, light + "RGB"))
return True
if slots["add"] or slots["sub"]:
lights = slots["lights"]
if not slots["increment/seconds"]:
command = input("Increment not found in command, please input amount to change by here: ")
try:
slots["increment/seconds"] = int(command)
except ValueError:
return False
if slots["sub"]:
slots["increment/seconds"] = -slots["increment/seconds"]
increaseValue = slots["increment/seconds"]
if len(slots["whichRGB"]) == 0:
command = input("Did not find what values (red/blue/green) to change, input what values to change: ")
if "red" in command:
slots["whichRGB"].append("red")
if "green" in command:
slots["whichRGB"].append("green")
if "blue" in command:
slots["whichRGB"].append("blue")
if len(slots["whichRGB"]) == 0:
return False
if "red" in slots["whichRGB"]:
for light in lights:
rgb = getattr(self, light + "RGB")
setattr(self, light + "RGB", (max(0, min(rgb[0] + increaseValue, 255)), rgb[1], rgb[2]))
getattr(self.droid, "set_" + light + "_LED_color")(*getattr(self, light + "RGB"))
if "green" in slots["whichRGB"]:
for light in lights:
rgb = getattr(self, light + "RGB")
setattr(self, light + "RGB", (rgb[0], max(0, min(rgb[1] + increaseValue, 255)), rgb[2]))
getattr(self.droid, "set_" + light + "_LED_color")(*getattr(self, light + "RGB"))
if "blue" in slots["whichRGB"]:
for light in lights:
rgb = getattr(self, light + "RGB")
setattr(self, light + "RGB", (rgb[0], rgb[1], max(0, min(rgb[2] + increaseValue, 255))))
getattr(self.droid, "set_" + light + "_LED_color")(*getattr(self, light + "RGB"))
return True
askedForColor = False
if "back" in slots["lights"] and len(slots["lights"]) == 1:
if len(slots["colors"]) > 1:
seconds = slots["increment/seconds"]
if not seconds: seconds = 1
self.flash_colors(slots["colors"], seconds, False)
elif len(slots["colors"]) == 1:
self.backRGB = slots["colors"][0]
else:
if not slots["rgb"]:
color = self.askForColor("back")
askedForColor = True
if not color: return False
self.backRGB = color
else:
self.backRGB = slots["rgb"]
self.droid.set_back_LED_color(*self.backRGB)
return True
if ("front" in slots["lights"] and len(slots["lights"]) == 1) or len(slots["colors"]) > 1:
if len(slots["colors"]) > 1:
seconds = slots["increment/seconds"]
if not seconds: seconds = 1
self.flash_colors(slots["colors"], seconds)
elif len(slots["colors"]) == 1:
self.frontRGB = slots["colors"][0]
else:
if not slots["rgb"]:
color = self.askForColor("front")
askedForColor = True
if not color:
return False
self.frontRGB = color
else:
self.frontRGB = slots["rgb"]
self.droid.set_front_LED_color(*self.frontRGB)
return True
if len(slots["colors"]) == 1:
self.backRGB = slots["colors"][0]
self.frontRGB = slots["colors"][0]
self.droid.set_back_LED_color(*self.backRGB)
self.droid.set_front_LED_color(*self.frontRGB)
return True
if len(slots["colors"]) == 0:
if slots["rgb"]:
self.backRGB = slots["rgb"]
self.frontRGB = slots["rgb"]
self.droid.set_back_LED_color(*self.backRGB)
self.droid.set_front_LED_color(*self.frontRGB)
return True
if "off" in slots["intensities"]:
self.holoProjectorIntensity = 0
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
self.logicDisplayIntensity = 0
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
self.backRGB = (0, 0, 0)
self.frontRGB = (0, 0, 0)
self.droid.set_back_LED_color(*self.backRGB)
self.droid.set_front_LED_color(*self.frontRGB)
return True
elif "dim" in slots["intensities"]:
self.holoProjectorIntensity = 0
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
self.logicDisplayIntensity = 0
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
self.backRGB = tuple(x / 2 for x in self.backRGB)
self.frontRGB = tuple(x / 2 for x in self.frontRGB)
self.droid.set_back_LED_color(*self.backRGB)
self.droid.set_front_LED_color(*self.frontRGB)
return True
elif "on" in slots["intensities"]:
self.holoProjectorIntensity = 1
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
self.logicDisplayIntensity = 1
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
return True
elif "blink" in slots["intensities"]:
self.droid.set_holo_projector_intensity((self.holoProjectorIntensity + 1) % 2)
self.droid.set_logic_display_intensity((self.holoProjectorIntensity + 1) % 2)
time.sleep(0.3)
self.droid.set_holo_projector_intensity(self.holoProjectorIntensity)
self.droid.set_logic_display_intensity(self.logicDisplayIntensity)
return True
if not slots["rgb"] and not askedForColor:
color = self.askForColor()
if color:
self.backRGB = color
self.frontRGB = color
self.droid.set_back_LED_color(*self.backRGB)
self.droid.set_front_LED_color(*self.frontRGB)
return True
return False
# Parser for a directional command
def directionParser(self, command):
if re.search("(circle|donut)", command):
if re.search("counter", command):
for heading in range(360, 0, -30):
self.droid.roll(self.speed, heading % 360, 0.6)
else:
for heading in range(0, 360, 30):
self.droid.roll(self.speed, heading, 0.6)
self.droid.roll(0, 0, 0)
return True
elif re.search("square", command):
if re.search("counter", command):
for heading in range(360, 0, -90):
self.droid.roll(0, heading % 360, 0)
time.sleep(0.35)
self.droid.roll(self.speed, heading % 360, 0.6)
else:
for heading in range(0, 360, 90):
self.droid.roll(0, heading, 0)
time.sleep(0.35)
self.droid.roll(self.speed, heading, 0.6)
self.droid.roll(0, 0, 0)
return True
elif re.search("(speed|slow|faster|slower)", command):
if re.search("(increase|faster|up)", command):
if self.speed <= 0.75:
self.speed += 0.25
else:
if self.speed >= 0.5:
self.speed -= 0.25
self.droid.animate(0)
return True
else:
flag = False
tokens = re.split("[^a-zA-Z]", command)
for token in tokens:
if token in {"up", "forward", "ahead", "straight", "north"}:
self.droid.roll(0, 0, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 0, 0.6)
flag = True
elif token in {"down", "back", "south"}:
self.droid.roll(0, 180, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 180, 0.6)
flag = True
elif token in {"left", "west"}:
self.droid.roll(0, 270, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 270, 0.6)
flag = True
elif token in {"right", "east"}:
self.droid.roll(0, 90, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 90, 0.6)
flag = True
self.droid.roll(0, 0, 0)
return flag
# Parser for a animation command
def animationParser(self, command):
if re.search("fall", command):
self.droid.animate(14)
return True
elif re.search("run away", command):
self.droid.animate(19)
return True
elif re.search("(dance|move)", command):
self.droid.animate(20)
return True
elif re.search("(sing|sound|noise)", command):
self.droid.play_sound(3)
return True
elif re.search("scream", command):
self.droid.play_sound(7)
return True
return False
# Parser for a head command
def headParser(self, command):
if re.search("(forward|ahead|straight|front)", command):
self.droid.rotate_head(0)
return True
elif re.search("left", command):
self.droid.rotate_head(-90)
return True
elif re.search("right", command):
self.droid.rotate_head(90)
return True
elif re.search("(behind|back)", command):
self.droid.rotate_head(180)
return True
return False
# Parser for a state command
def stateParser(self, command):
if re.search("color", command):
if re.search("(front|forward)", command):
print("****************************************")
print(self.frontRGB)
print("****************************************")
elif re.search("(back|rear|rare)", command):
print("****************************************")
print(self.backRGB)
print("****************************************")
else:
print("****************************************")
print(self.frontRGB)
print(self.backRGB)
print("****************************************")
return True
elif re.search("(name|call)", command):
if re.search("(want|wanna).*you", command):
if self.extractName(command) == "":
print("****************************************")
print("You didn't give me a name!")
print("****************************************")
self.droid.play_sound(7)
return False
else:
self.name = self.extractName(command)
self.droid.animate(0)
else:
print("****************************************")
print(self.name)
print("****************************************")
return True
elif re.search("(power|battery)", command):
print("****************************************")
self.droid.battery()
print("****************************************")
return True
return False
def extractName(self, command):
words = re.split("[^a-zA-Z]", command)
ret = ""
activated = False
for word in words:
if activated:
ret += word + " "
if word == "you":
activated = True
if len(ret) > 0 and ret[-1] == " ":
return ret[:-1]
else:
return ret
# Parser for a grid command
def gridParser(self, command):
# Convert the words to lowercase
# Convert numbers as words to ints
# Remove duplicate spaces
words = re.split("(x|[^a-zA-Z0-9])", command.lower())
command = ""
for word in words:
try:
command += str(w2n.word_to_num(word)) + " "
except:
command += word + " "
command = re.sub(" +", " ", command)
print("****************************************")
print(command)
print("****************************************")
if re.search("(\d+|to).*(x|by).*(\d+|to) grid", command):
words = re.split("(x|[^a-zA-Z0-9])", command)
x, y = self.extractCoord(words)
self.grid = [["" for col in range(y)] for row in range(x)]
print("****************************************")
for row in self.grid:
print(row)
print("****************************************")
self.droid.animate(0)
return True
elif re.search("you.*re.*at.*(\d+|to).*(\d+|to)", command):
if self.pos != (-1, -1):
self.grid[self.pos[0]][self.pos[1]] = ""
arr = re.split("[^a-zA-Z0-9]", command)
x, y = self.extractCoord(arr)
if len(self.grid) == 0 or len(self.grid[0]) == 0:
print("****************************************")
print("Grid is not initialized yet!")
print("****************************************")
self.droid.play_sound(7)
return False
if x < 0 or x >= len(self.grid) or y < 0 or y >= len(self.grid[0]):
print("****************************************")
print("Coordinate is out of grid!")
print("****************************************")
self.droid.play_sound(7)
return False
self.pos = (x, y)
self.grid[x][y] = "you"
self.objCoord["you"] = (x, y)
print("****************************************")
for row in self.grid:
print(row)
print("****************************************")
self.droid.animate(0)
return True
elif re.search("(s|re) .+ (at|to|on|above|below)", command):
# Replace obj with its coordinates
for obj in self.objCoord:
words = re.split(" ", obj)
for word in words:
if re.search(word, command):
x, y = self.objCoord[obj]
if re.search("(left|west)", command):
y -= 1
elif re.search("(right|east)", command):
y += 1
elif re.search("(below|south|bottom)", command):
x += 1
elif re.search("(above|north|top)", command):
x -= 1
command = self.replaceWithCoord(command, x, y)
print("****************************************")
print(command)
print("****************************************")
break
if re.search("at.*(\d+|to).*(\d+|to)", command):
words = re.split("[^a-zA-Z0-9]", command)
x, y = self.extractCoord(words)
obj = self.extractObj(words)
if len(self.grid) == 0 or len(self.grid[0]) == 0:
print("****************************************")
print("Grid is not initialized yet!")
print("****************************************")
self.droid.play_sound(7)
return False
if x < 0 or x >= len(self.grid) or y < 0 or y >= len(self.grid[0]):
print("****************************************")
print("Coordinate is out of grid!")
print("****************************************")
self.droid.play_sound(7)
return False
if obj == "":
print("****************************************")
print("You didn't specify what the obstacle(s) is/are!")
print("****************************************")
self.droid.play_sound(7)
return False
self.grid[x][y] = obj
self.objCoord[obj] = (x, y)
print("****************************************")
for row in self.grid:
print(row)
print("****************************************")
self.droid.animate(0)
return True
elif re.search("go.*to", command):
# Replace obj with its coordinates
for obj in self.objCoord:
words = re.split(" ", obj)
for word in words:
if re.search(word, command):
x, y = self.objCoord[obj]
if re.search("(left|west)", command):
y -= 1
elif re.search("(right|east)", command):
y += 1
elif re.search("(below|south|bottom)", command):
x += 1
elif re.search("(above|north|top)", command):
x -= 1
command = "go to " + str(x) + " , " + str(y)
print("****************************************")
print(command)
print("****************************************")
break
if re.search("(\d+|to).*(\d+|to)", command):
arr = re.split("[^a-zA-Z0-9]", command)
x, y = self.extractCoord(arr)
if len(self.grid) == 0 or len(self.grid[0]) == 0:
print("****************************************")
print("Grid is not initialized yet!")
print("****************************************")
self.droid.play_sound(7)
return False
if x < 0 or x >= len(self.grid) or y < 0 or y >= len(self.grid[0]):
print("****************************************")
print("Coordinate is out of grid!")
print("****************************************")
self.droid.play_sound(7)
return False
if self.pos == (-1, -1):
print("****************************************")
print("Current position hasn't been specified!")
print("****************************************")
self.droid.play_sound(7)
return False
if (x, y) in self.objCoord.values():
print("****************************************")
print("Impossible to get to the target!")
print("****************************************")
self.droid.play_sound(7)
return False
target = (x, y)
if target == self.pos:
print("****************************************")
print("You are already there!")
print("****************************************")
self.droid.animate(0)
return True
self.grid[x][y] = "target"
print("****************************************")
for row in self.grid:
print(row)
print("****************************************")
# Create a graph and calculate the moves
G = Graph(self.grid)
moves = A_star(G, self.pos, target, manhattan_distance_heuristic)
if moves is None:
print("****************************************")
print("Impossible to get to the target!")
print("****************************************")
self.grid[x][y] = ""
self.droid.play_sound(7)
return False
else:
print("****************************************")
print(moves)
print("****************************************")
# Store the initial position
init_x, init_y = self.pos
for i in range(1, len(moves)):
if moves[i][1] > moves[i - 1][1]:
# Move right
self.droid.roll(0, 90, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 90, 0.6)
elif moves[i][1] < moves[i - 1][1]:
# Move left
self.droid.roll(0, 270, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 270, 0.6)
elif moves[i][0] > moves[i - 1][0]:
# Move down
self.droid.roll(0, 180, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 180, 0.6)
elif moves[i][0] < moves[i - 1][0]:
# Move up
self.droid.roll(0, 0, 0)
time.sleep(0.35)
self.droid.roll(self.speed, 0, 0.6)
self.pos = moves[i]
self.grid[x][y] = "you"
self.objCoord["you"] = (x, y)
self.grid[init_x][init_y] = ""
print("****************************************")
for row in self.grid:
print(row)
print("****************************************")
self.droid.animate(0)
return True
self.droid.play_sound(7)
return False
# Given a sentence as an array of words, extract the coordinate as an array of ints
def extractCoord(self, words):
ret = []
for i in range(len(words) - 1, -1, -1):
word = words[i]
if word.isdigit():
ret = [int(word)] + ret
if word in {"to", "too"}:
ret = [2] + ret
if len(ret) == 2:
break
return ret
# Given a sentence as an array of words, extract the object as a string
def extractObj(self, words):
ret = ""
activated = False
for word in words:
if word == "at":
activated = False
if activated and word not in {"a", "an", "the"}:
ret += word + " "
if word in {"s", "re", "is", "are"}:
activated = True
if len(ret) > 0 and ret[-1] == " ":
return ret[:-1]
else:
return ret
# Replace obj with its coordinates
def replaceWithCoord(self, command, x, y):
words = re.split("[^a-zA-Z0-9]", command)
ret = ""
for word in words:
if word not in {"above", "below", "to", "on"}:
ret += word + " "
else:
break
ret += "at " + str(x) + " , " + str(y)
return ret
|
# noinspection PyUnresolvedReferences
from actuators.HBridgeActuator import HBridgeActuator as Actuator
import os
from controllers.Controller import Controller
# noinspection PyUnresolvedReferences
import RPi.GPIO as GPIO
class HBridgeController(Controller):
def __init__(self):
GPIO.setmode(GPIO.BOARD)
self.left = Actuator(
pin_forward=int(os.getenv('PIN_LEFT_FORWARD')),
pin_backward=int(os.getenv('PIN_LEFT_BACKWARD')),
pin_pwm=int(os.getenv('PIN_LEFT_PWM'))
)
self.right = Actuator(
pin_forward=int(os.getenv('PIN_RIGHT_FORWARD')),
pin_backward=int(os.getenv('PIN_RIGHT_BACKWARD')),
pin_pwm=int(os.getenv('PIN_RIGHT_PWM'))
)
self.steering = Actuator(
pin_forward=int(os.getenv('PIN_STEER_LEFT')),
pin_backward=int(os.getenv('PIN_STEER_RIGHT')),
pin_pwm=int(os.getenv('PIN_STEER_PWM'))
)
def steer_left(self):
self.steering.reverse()
def steer_right(self):
self.steering.forward()
def steer_neutral(self):
self.steering.neutral()
def forward(self, power: int = 100):
self.left.forward(power)
self.right.forward(power)
def reverse(self, power: int = 100):
self.left.reverse(power)
self.right.reverse(power)
def neutral(self):
self.left.neutral()
self.right.neutral()
def exit(self):
self.left.exit()
self.right.exit()
GPIO.cleanup()
|
from flask import Blueprint, request, make_response, session, jsonify, render_template, redirect,current_app
from utils.captcha.captcha import captcha
from utils.ytx_sdk.ytx_send import sendTemplateSMS
import random
import re
import functools
from models import db, UserInfo,NewsCategory,NewsInfo
from utils.qiniuyun_xjzx import pic1
user_blueprint = Blueprint('user', __name__, url_prefix='/user')
import datetime
# 图片验证码视图
@user_blueprint.route('/image_yzm')
def image_yzm():
# generate_captcha()方法生成:名字,验证码,验证码图片
name, text, content = captcha.generate_captcha()
# 将验证码图片存在session中用来验证
session['image_yzm'] = text
print(session['image_yzm'])
# make_response函数指定发送的文件类型--->jpg
response = make_response(content)
response.mimetype = 'image/jpg'
return response
# 短信验证码
@user_blueprint.route('/msg_yzm', methods=['GET'])
def msg_yzm():
dict1 = request.args
mobile = dict1.get('mobile')
image_yzm = dict1.get('image_yzm')
# 检测手机号是否合法+
if len(mobile) != 11:
return jsonify(error_info='手机号不合法')
if UserInfo.query.filter_by(mobile=mobile).count():
return jsonify(error_info='该手机号已经被注册过')
# 检测图片验证码是否正确
if image_yzm != session['image_yzm']:
return jsonify(error_info='验证码不合法')
# 发送短信验证码
# sendTemplateSMS()
# 测试阶段使用以下方式代替
msg_yzm = random.randint(1000, 10000)
print(msg_yzm)
session['msg_yzm'] = str(msg_yzm)
return jsonify(success_info="短信已发送,请注意查收")
# 注册
@user_blueprint.route('/register', methods=['POST'])
def register():
dict1 = request.form
mobile = dict1.get('mobile')
image_yzm = dict1.get('image_yzm')
msg_yzm = dict1.get('msg_yzm')
password = dict1.get('password')
if not all((mobile, image_yzm, msg_yzm, password)):
return jsonify(error_info='数据填写不完整')
if len(mobile) != 11:
return jsonify(error_info='手机号不合法')
if UserInfo.query.filter_by(mobile=mobile).count():
return jsonify(error_info='该手机号已经被注册过')
if image_yzm != session['image_yzm']:
return jsonify(error_info='图片验证码不正确')
if msg_yzm != session['msg_yzm']:
return jsonify(error_info='短信验证码不正确')
if not re.match(r'\w{6,20}', password):
return jsonify(error_info='密码格式不正确')
user = UserInfo()
user.mobile = mobile
user.nick_name = mobile
user.password = password
try:
db.session.add(user)
db.session.commit()
return jsonify(success_info='注册成功,请登录')
except:
return jsonify(error_info='服务器出错')
# 登录
@user_blueprint.route('/login', methods=['POST'])
def login():
dict1 = request.form
mobile = dict1.get('mobile')
password = dict1.get('password')
if not all((mobile, password)):
return jsonify(error_info='数据填写不完整')
if len(mobile) != 11:
return jsonify(error_info='手机号不合法')
user = UserInfo.query.filter_by(mobile=mobile).first()
if not user:
return jsonify(error_info='该手机号没有注册过哦')
if not user.check_pwd(password):
return jsonify(error_info='密码输入错误')
session['user_id'] = user.id
user.update_time = datetime.datetime.now()
db.session.commit()
# 记录时间段内的登录用户数量
now=datetime.datetime.now()
name = 'active%d%d%d'%(now.year,now.month,now.day)
for i in range(8,20):
if not current_app.redis_client.hget(name,'%02d:15'%i):
current_app.redis_client.hset(name, '%02d:15'%i,0)
if now.hour<=9 or now.hour>=20:
current_app.redis_client.hset(name,'08:15' if now.hour<=9 else '19:15',int(current_app.redis_client.hget(name,'08:15' if now.hour<=9 else '19:15').decode())+1)
else:
current_app.redis_client.hset(name, '%02d:15'%(now.hour-1) if now.minute <= 15 else '%02d:15'%now.hour, int(current_app.redis_client.hget(name, '%02d:15'%(now.hour-1) if now.minute <= 15 else '%02d:15'%now.hour).decode()) + 1)
return jsonify(success_info='登录成功', nick_name=user.nick_name,avatar_url=user.avatar_url)
# 退出
@user_blueprint.route('/logout')
def logout():
session.pop('user_id')
return jsonify(success_info='退出成功')
# 设置登录过才展示以下页面
def check_login(func):
@functools.wraps(func)
def start(*args,**kwargs):
if 'user_id' in session:
return func(*args,**kwargs)
else:
return redirect('/')
return start
# 显示用户中心视图
@user_blueprint.route('/')
@check_login
def user_index():
user = UserInfo.query.get(session['user_id'])
return render_template('news/user.html', title='用户中心',user=user)
# 用户基本信息
@user_blueprint.route('/base',methods=['GET','POST'])
@check_login
def base():
user = UserInfo.query.get(session['user_id'])
if request.method == 'GET':
return render_template('news/user_base_info.html',user=user)
elif request.method == 'POST':
dict1 = request.form
signature = dict1.get('signature')
nick_name = dict1.get('nick_name')
gender = bool(dict1.get('gender'))
try:
user.signature=signature
user.nick_name=nick_name
user.gender=gender
db.session.commit()
return jsonify(success_info='修改个人信息成功')
except:
return jsonify(error_info='服务器出错')
# 用户头像
@user_blueprint.route('/pic',methods=['GET','POST'])
@check_login
def pic():
user = UserInfo.query.get(session['user_id'])
if request.method == 'GET':
return render_template('news/user_pic_info.html',user=user)
elif request.method == 'POST':
# 获得头像需要files方法
user_pic = request.files.get('avatar')
# 将头像传到七牛云并返回一个文件名
avatar = pic1(user_pic)
try:
user.avatar = avatar
db.session.commit()
return jsonify(success_info='修改成功',avatar_url = user.avatar_url)
except:
return jsonify(error_info='服务器出错')
# 用户关注的人
@user_blueprint.route('/follow')
@check_login
def follow():
user = UserInfo.query.get(session['user_id'])
# 获取当前页码
page = int(request.args.get('page',1))
# 模板需要分页显示,需要pagination对象
pagination = user.follow_user.paginate(page,4,False)
user_follow_list = pagination.items
total_page = pagination.pages
return render_template('news/user_follow.html',user_follow_list=user_follow_list,total_page=total_page,page=page)
# 用户密码
@user_blueprint.route('/password',methods=['GET','POST'])
@check_login
def password():
user = UserInfo.query.get(session['user_id'])
if request.method == 'GET':
return render_template('news/user_pass_info.html')
elif request.method == 'POST':
dict1 = request.form
old_password = dict1.get('old_password')
new_password = dict1.get('new_password')
new_password_config = dict1.get('new_password_config')
if not re.match(r'\w{6,20}',old_password):
return render_template('news/user_pass_info.html',error_info='旧密码输入格式不正确')
if not re.match(r'\w{6,20}',new_password):
return render_template('news/user_pass_info.html',error_info='新密码输入格式不正确')
if new_password == old_password:
return render_template('news/user_pass_info.html',error_info='新密码不能与旧密码一致哦')
if new_password != new_password_config:
return render_template('news/user_pass_info.html',error_info='两次新密码输入不一致')
if not user.check_pwd(old_password):
return render_template('news/user_pass_info.html',error_info='旧密码输入有误')
try:
user.password = new_password
db.session.commit()
session.pop('user_id')
return render_template('news/user_pass_info.html',success_info='修改成功,请重新登录')
except:
return render_template('news/user_pass_info.html',error_info='服务器出错,请稍后重试')
# 用户收藏
@user_blueprint.route('/collection')
@check_login
def collection():
user = UserInfo.query.get(session['user_id'])
# 分页操作
page = int(request.args.get('page',1))
pagination = user.news_collect.order_by(NewsInfo.id.desc()).paginate(page,6,False)
tatol_page = pagination.pages
news_collect_list = pagination.items
return render_template('news/user_collection.html',page=page,tatol_page=tatol_page,news_collect_list=news_collect_list)
# 用户发布
@user_blueprint.route('/release',methods=['GET','POST'])
@check_login
def release():
user = UserInfo.query.get(session['user_id'])
category_list = NewsCategory.query.all()
try:
# 有带新闻参数的get方式请求,认为是修改新闻的请求
news_id = int(request.args.get('news_id'))
news = NewsInfo.query.get(news_id)
except:
news = None
if request.method == 'GET':
# news为NewsInfo对象或者None,当为None时,模板中即不显示值
return render_template('news/user_news_release.html', category_list=category_list,news=news)
elif request.method == 'POST':
dict1 = request.form
title = dict1.get('title')
category_id = int(dict1.get('category'))
summary = dict1.get('summary')
context = dict1.get('content')
try:
__news_pic = request.files.get('news_pic')
news_picname = pic1(__news_pic)
except:
# 用户没有上传图片,则存值空字符串
news_picname=''
if not all((title,category_id,summary,context)):
# html中存在要用到news的html,错误信息回传时,也需要传news对象
return render_template('news/user_news_release.html',category_list=category_list,error_info='您的内容没有填写完整哦',news=news)
try:
if news:
# 如果是修改操作,不用传用户id,
news.update_time=datetime.now()
else:
news = NewsInfo()
news.user_id = user.id
# 如果获取到了上传的图片文件,就上传或者修改
if news_picname:
news.pic = news_picname
news.title = title
news.category_id=category_id
news.summary=summary
news.status = 1
news.context = context
db.session.add(news)
db.session.commit()
# 数据添加成功后默认去到新闻列表页
return redirect('/user/list')
except:
return render_template('news/user_news_release.html',category_list=category_list,error_info='服务器出错',news=news)
# 用户新闻列表
@user_blueprint.route('/list')
@check_login
def list():
user = UserInfo.query.get(session['user_id'])
page = int(request.args.get('page',1))
# 分页显示
pagination = user.news.order_by(NewsInfo.id.desc()).paginate(page,6,False)
news_list = pagination.items
total_news = pagination.pages
return render_template('news/user_news_list.html',page=page,news_list=news_list,total_news=total_news)
|
import os
from PIL import Image
import numpy as np
from keras import layers
from keras.applications import DenseNet121
from keras.callbacks import Callback, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
import scipy
import tensorflow as tf
#from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras import layers
import keras
from keras.preprocessing.image import load_img
from keras.preprocessing.image import save_img
from keras.preprocessing.image import img_to_array
from sklearn.model_selection import StratifiedShuffleSplit
def get_pad_width(im, new_shape, is_rgb=True):
pad_diff = new_shape - im.shape[0], new_shape - im.shape[1]
t, b = math.floor(pad_diff[0]/2), math.ceil(pad_diff[0]/2)
l, r = math.floor(pad_diff[1]/2), math.ceil(pad_diff[1]/2)
if is_rgb:
pad_width = ((t,b), (l,r), (0, 0))
else:
pad_width = ((t,b), (l,r))
return pad_width
def preprocess_image(image_path, desired_size=224):
im = Image.open(image_path)
im = im.resize((desired_size, )*2, resample=Image.LANCZOS)
return im
def preprocess_test_data(test_df):
N = test_df.shape[0]
x_test = np.empty((N, 224, 224, 3), dtype=np.uint8)
j = 0
#for i, image_id in enumerate((test_df['id_code'])):
#f(j%500 == 0):
#print(j)
#img = preprocess_image('AnotherTest/TestingSet/'+image_id+'.jpg')
##img_array = img_to_array(img)
##save_img('processed_test_images/'+image_id, img_array)
#img.save('AnotherTest/TestingSet/'+image_id+'.jpg')
#j = j+1;
##print(j)
##if j == 100:
## break
##x_train[i, :, :, :] = preprocess_image('train_images/'+image_id+'.png')
##j = j+1;
# N = test_df.shape[0]
# x_test = np.empty((N, 224, 224, 3), dtyp e=np.uint8)
# j = 0
# for i, image_id in enumerate((test_df['id_code'])):
# if(j%500 == 0):
# print(j)
# x_test[i, :, :, :] = preprocess_image('test_images/'+image_id+'.png')
# j = j+1;
for i, image_id in enumerate((test_df['id_code'])):
x_test[i, :, :, :] = Image.open('AnotherTest/TestingSet/'+image_id+'.jpg')
test_df['diagnosis'] = test_df['diagnosis'] > 0
test_df['diagnosis'] = test_df['diagnosis'] * 1.0
# print(x_train.shape)
# print(y_train.shape)
# #print(x_test.shape)
y_test = pd.get_dummies(test_df['diagnosis']).values
print("y_test shape")
print(y_test.shape)
y_test_multi = np.empty(y_test.shape, dtype=y_test.dtype)
y_test_multi[:, 1] = y_test[:, 1]
for i in range(0, -1, -1):
y_test_multi[:, i] = np.logical_or(y_test[:, i], y_test_multi[:, i+1])
print("Original y_test:", y_test.sum(axis=0))
print("Multilabel version:", y_test_multi.sum(axis=0))
return x_test, y_test_multi
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'fullscreen_mode.ui',
# licensing of 'fullscreen_mode.ui' applies.
#
# Created: Thu Jan 2 17:55:43 2020
# by: pyside2-uic running on PySide2 5.9.0~a1
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_FullscreenMode(object):
def setupUi(self, FullscreenMode):
FullscreenMode.setObjectName("FullscreenMode")
FullscreenMode.resize(400, 300)
self.verticalLayout_2 = QtWidgets.QVBoxLayout(FullscreenMode)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.wingview_layout = QtWidgets.QVBoxLayout()
self.wingview_layout.setObjectName("wingview_layout")
self.verticalLayout.addLayout(self.wingview_layout)
self.slider_feature_size = QtWidgets.QSlider(FullscreenMode)
self.slider_feature_size.setOrientation(QtCore.Qt.Horizontal)
self.slider_feature_size.setObjectName("slider_feature_size")
self.verticalLayout.addWidget(self.slider_feature_size)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(FullscreenMode)
QtCore.QMetaObject.connectSlotsByName(FullscreenMode)
def retranslateUi(self, FullscreenMode):
FullscreenMode.setWindowTitle(QtWidgets.QApplication.translate("FullscreenMode", "Fullscreen Mode", None, -1))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
FullscreenMode = QtWidgets.QWidget()
ui = Ui_FullscreenMode()
ui.setupUi(FullscreenMode)
FullscreenMode.show()
sys.exit(app.exec_())
|
ogrenciNotlari = {
'Deniz': 8,
'Mahir': 10,
'İbrahim': 9,
'Ulaş': 9.5
}
# print(type(ogrenciNotlari))
# print(ogrenciNotlari['Deniz'])
for ogrenci in ogrenciNotlari:
print(ogrenci + " " + str(ogrenciNotlari[ogrenci]) + " aldı")
|
"""
:copyright: Michael Yusko
:license: MIT, see LICENSE for more details.
"""
__author__ = 'Michael Yusko'
__version__ = '0.1.1'
|
print("Insira 3 números reais.")
a,b,c = int(input()), int(input()), int(input())
print((a+b)*(b+c))
print(3*(a+b+c))
|
import pandas as pd
import numpy as np
from normalise_user_item_matrix import linebreak
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors import NearestNeighbors
matrix_new = pd.read_csv('user_item_matrix_normalised.csv')
drop_columns_all_except_name_new = [i for i in range(linebreak[0],linebreak[5]+1)]
drop_list_new = matrix_new.columns[drop_columns_all_except_name_new].tolist()
name_matrix_new = matrix_new.drop(drop_list_new,axis=1)
name_dict_new = dict(name_matrix_new.values)
drop_columns_new = [i for i in range(1,linebreak[1]+1)]
drop_list_new = matrix_new.columns[drop_columns_new].tolist()
taxonomy_matrix_new = matrix_new.drop(drop_list_new,axis=1)
drop_taxonomys_new = [i for i in range(linebreak[2],linebreak[5] + 1)]
drop_taxonomys_names_new = matrix_new.columns[drop_taxonomys_new].tolist()
classes_matrix_new = matrix_new.drop(drop_taxonomys_names_new, axis=1)
X_new = taxonomy_matrix_new.drop('ID',axis=1)
y_new = pd.DataFrame(taxonomy_matrix_new['ID'])
knn_new = NearestNeighbors(n_neighbors=3,algorithm='brute',metric='cosine')
knn_new.fit(X_new)
def new_user_recommendation_new(user_taste_profile):
X_test_new = pd.DataFrame(np.array(user_taste_profile).reshape(1,12), columns=X_new.columns)
neighbours_new = knn_new.kneighbors(X_test_new,return_distance=False)
neighbour_1_posn_new = neighbours_new[0][0]
neighbour_1_ID_new = y_new.iloc[neighbours_new[0][0]].values[0]
neighbour_1_name_new = name_dict_new[neighbour_1_ID_new]
neighbour_2_posn_new = neighbours_new[0][1]
neighbour_2_ID_new = y_new.iloc[neighbours_new[0][1]].values[0]
neighbour_2_name_new = name_dict_new[neighbour_2_ID_new]
neighbour_3_posn_new = neighbours_new[0][2]
neighbour_3_ID_new = y_new.iloc[neighbours_new[0][2]].values[0]
neighbour_3_name_new = name_dict_new[neighbour_3_ID_new]
target_new = X_test_new.to_numpy().reshape(1,12)
top_sim_new = round(cosine_similarity(target_new,X_new.iloc[neighbour_1_posn_new].to_numpy().reshape(1,12))[0][0],3)
second_sim_new = round(cosine_similarity(target_new,X_new.iloc[neighbour_2_posn_new].to_numpy().reshape(1,12))[0][0],3)
third_sim_new = round(cosine_similarity(target_new,X_new.iloc[neighbour_3_posn_new].to_numpy().reshape(1,12))[0][0],3)
neighbours_similarity_dict_new = {
neighbour_1_name_new:top_sim_new,
neighbour_2_name_new:second_sim_new,
neighbour_3_name_new:third_sim_new
}
class_names_new = classes_matrix_new.columns.tolist()
neighbour_1_contribution_new = classes_matrix_new.iloc[neighbour_1_posn_new]
neighbour_2_contribution_new = classes_matrix_new.iloc[neighbour_2_posn_new]
neighbour_3_contribution_new = classes_matrix_new.iloc[neighbour_3_posn_new]
recommended_classes_matrix_new = pd.Series(index=class_names_new, dtype=object)
recommended_classes_matrix_new.fillna(0.0, inplace=True)
for i in range(2,len(recommended_classes_matrix_new.index)):
recommended_classes_matrix_new.iloc[i] += neighbour_1_contribution_new.iloc[i] * neighbours_similarity_dict_new[neighbour_1_name_new]
recommended_classes_matrix_new.iloc[i] += neighbour_2_contribution_new.iloc[i] * neighbours_similarity_dict_new[neighbour_2_name_new]
recommended_classes_matrix_new.iloc[i] += neighbour_3_contribution_new.iloc[i] * neighbours_similarity_dict_new[neighbour_3_name_new]
recommended_classes_matrix_new.sort_values(ascending=False,inplace=True,kind='heapsort')
top_3_new = [recommended_classes_matrix_new.index[0],recommended_classes_matrix_new.index[1],recommended_classes_matrix_new.index[2]]
print("The people with the most aligned goals to you were: ")
print(neighbours_similarity_dict_new)
# print(top_3_new)
return top_3_new
# sample_prefs_new = [2,4,3,1,5,5,1,2,2,4,4,1]
# new_user_recommendation_new(sample_prefs_new)
|
from django.shortcuts import render
from django.shortcuts import HttpResponseRedirect
from django.shortcuts import Http404
# Create your views here.
from celery.result import AsyncResult
from tools.tasks import add
from .models import Add
# from tools.db import Db
import datetime
def add_1(request):
first = int(request.POST.get('add1'))
second = int(request.POST.get('add2'))
result = add.delay(first, second)
dd = Add(task_id=result.id, first=first, second=second,
log_date=datetime.datetime.now())
dd.save()
return HttpResponseRedirect("/")
# 任务结果
def result(request):
# 查询所有的任务信息
# db = Db()
# rows = db.get_tasksinfo()
# return render_to_response('result.html', {'rows': rows})
raise Http404("Not Support for now")
|
#Saumit Madireddy
#I pledge my honor that I have abided by the Stevens Honor System.
def main():
print("This program will determine your BMI and whether or not it is healthy.")
w = eval(input("How much do you weigh (lbs) ? "))
h = eval(input("How tall (inches) are you? "))
BMI = (w * 720) / (h * h)
print("Your BMI is", BMI)
if BMI <= 25 and BMI >= 19:
print("Your BMI is healthy")
else:
print("Your BMI is not healthy")
main()
|
import os, pathlib
from pdfquery import PDFQuery
class Applicable_Federal_Rates:
@staticmethod
def get_pdf():
return PDFQuery(os.path.join(pathlib.Path(__file__).parent.absolute(), 'current_afr_revenue_ruling.pdf'))
@staticmethod
def get_bbox_bounds(pdf):
lines = pdf.extract([('afr',f'LTTextLineHorizontal:contains("120% AFR")')])
bbox_bounds = list(set([(160, float(pdf.pq(line).attr('y0')), 310, float(pdf.pq(line).attr('y1'))) for line in lines['afr']]))
return sorted(bbox_bounds, key=lambda x: x[1], reverse=True)
@staticmethod
def get_rates(pdf, bbox_bounds):
afrs = {}
for i, term in enumerate(['short-term', 'mid-term', 'long-term']):
rates = pdf.pq(':in_bbox("%s, %s, %s, %s")' % bbox_bounds[i]).text().split('%')
rate = round(float(rates[1]),3)
afrs[term] = rate
return afrs
@staticmethod
def get_applicable_federal_rates():
pdf = Applicable_Federal_Rates.get_pdf()
pdf.load(1)
bbox_bounds = Applicable_Federal_Rates.get_bbox_bounds(pdf)
return Applicable_Federal_Rates.get_rates(pdf, bbox_bounds)
|
import os
import pygame
class Game:
def __init__(self, board, screenSize):
self.images = {}
self.board = board
self.screenSize = screenSize
self.squareSize = self.screenSize[0] // self.board.getBoardSize(), self.screenSize[
1] // self.board.getBoardSize()
self.loadImages()
self.screen = pygame.display.set_mode(self.screenSize)
def run(self):
pygame.init()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONDOWN and (not self.board.getWin() or not self.board.getLose()):
rightClick = pygame.mouse.get_pressed(num_buttons=3)[2]
self.handleClick(pygame.mouse.get_pos(), rightClick)
self.draw()
pygame.display.flip()
if self.board.getWin():
running = False
pygame.quit()
def draw(self):
topLeft = (0, 0)
for row in range(self.board.getBoardSize()):
for square in range(self.board.getBoardSize()):
cell = self.board.getCell(row, square)
image = self.images[self.getImage(cell)]
self.screen.blit(image, topLeft)
topLeft = topLeft[0] + self.squareSize[0], topLeft[1]
topLeft = 0, topLeft[1] + self.squareSize[1]
def loadImages(self):
"""
Takes the images in the 'Images' folder and stores them in a dictionary.
"""
for fileName in os.listdir("images"):
if fileName.endswith(".png"):
image = pygame.image.load(r"images/" + fileName)
image = pygame.transform.scale(image, self.squareSize)
self.images[fileName.split(".")[0]] = image
def getImage(self, cell):
if cell.getClicked():
return "bomb-at-clicked-block" if cell.getHasMine() else str(cell.getNumAround())
if self.board.getLose():
if cell.getHasMine() == 1:
return "bomb"
return 'flag' if cell.getFlagged() else 'empty-block'
def handleClick(self, pos, flag):
index = pos[1] // self.squareSize[1], pos[0] // self.squareSize[0]
cell = self.board.getCell(index[0], index[1])
self.board.handleClick(index, cell, flag)
|
#import the required function from the module!
from pywhatkit import image_to_ascii_art
#source and target path
source_path = 'img.png'
target_path = 'ascii_art.text'
#call the method
image_to_ascii_art(source_path, target_path)
|
import tensorflow as tf
import numpy as np
unique = 'helo'
#. language model은 다음에 올 글자나 단어를 예측하는 모델이어서, 마지막 글자가 입력으로 들어와도 예측할 수가 없다.
batch_size = 1
time_step_size = 4
rnn_size = 4
y_data = [1, 2, 2, 3] # 'ello'. index from 'helo'
x_data = np.array([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,1,0]], dtype='f') # 'hell'
cells = tf.nn.rnn_cell.BasicRNNCell(4) # 출력 결과(4가지 중에서 선택) 4-> rnn_size ->output 4개
state = tf.zeros([batch_size, cells.state_size]) # shape(1, 4), [[0, 0, 0, 0]]
x_data = tf.split(0, time_step_size, x_data) # layer에 포함될 cell 갯수(4). time_step_size
#tf.split 함수는 데이터를 지정한 갯수로 나눈 다음, 나누어진 요소를 Tensor 객체로 만들어 리스트에 넣어서 반환한다.
# outputs = [shape(1, 4), shape(1, 4), shape(1, 4), shape(1, 4)]
# state = shape(1, 4)
outputs, state = tf.nn.rnn(cells, x_data, state) # x_data 입력 staet 상태
# tf.reshape(tensor, shape, name=None)
# tf.concat(1, outputs) --> shape(1, 16)
logits = tf.reshape(tf.concat(1, outputs), [-1, rnn_size]) # shape(4, 4)
targets = tf.reshape(y_data, [-1]) # shape(4), [1, 2, 2, 3]
weights = tf.ones([time_step_size * batch_size]) # shape(4), [1, 1, 1, 1]
loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [targets], [weights]) # 예측값 실제값 weight
cost = tf.reduce_sum(loss) / batch_size
train_op = tf.train.RMSPropOptimizer(0.01, 0.9).minimize(cost)
with tf.Session() as sess:
tf.initialize_all_variables().run()
for i in range(100):
sess.run(train_op)
r0, r1, r2, r3 = sess.run(tf.argmax(logits, 1)) # 예측값 을 찍어봄
print(r0, r1, r2, r3, ':', unique[r0], unique[r1], unique[r2], unique[r3])
|
import requests, re
from pprint import pprint
#<a href="" class="feed-post-link gui-color-primary gui-color-hover" elementtiming="text-csr">Cidade de SP fará 'xepa' para antecipar 2ª dose da vacina; veja regras</a>
def getTitulos(url):
req = requests.get(url)
#tags = re.findall(r'(<p class="descricao">)(.+?)(<\/p>)', str(req.content))
#tags = re.findall(r'(elementtiming="text-ssr">)(.+?)(<\/a>)', str(req.content))
tags = re.findall(r'(<a href=".+?" class="feed-post-link gui-color-primary gui-color-hover" elementtiming="text-ssr">)(.+?)(<\/a>)', str(req.content))
titleList = []
for tag in tags:
um,dois,tres = tag
titleList.append(dois)
return titleList
|
from __future__ import print_function
import os
from six.moves.configparser import RawConfigParser
__author__ = 'alforbes'
try:
CONFIG_FILE = os.environ['ORLO_CONFIG']
except KeyError:
CONFIG_FILE = '/etc/orlo/orlo.ini'
config = RawConfigParser()
config.add_section('main')
config.set('main', 'debug_mode', 'false')
config.set('main', 'propagate_exceptions', 'true')
config.set('main', 'time_format', '%Y-%m-%dT%H:%M:%SZ')
config.set('main', 'time_zone', 'UTC')
config.set('main', 'strict_slashes', 'false')
config.set('main', 'base_url', 'http://localhost:8080')
config.add_section('security')
config.set('security', 'enabled', 'false')
config.set('security', 'passwd_file', 'none')
config.set('security', 'secret_key', 'change_me')
# NOTE: orlo.__init__ checks that secret_key is not "change_me" when security
# is enabled. Do not change the default here without updating __init__ as well.
config.set('security', 'token_ttl', '3600')
config.set('security', 'ldap_server', 'localhost.localdomain')
config.set('security', 'ldap_port', '389')
config.set('security', 'user_base_dn', 'ou=people,ou=example,o=test')
config.add_section('db')
config.set('db', 'uri', 'sqlite://')
config.set('db', 'echo_queries', 'false')
config.set('db', 'pool_size', '50')
config.add_section('logging')
config.set('logging', 'level', 'info')
config.set('logging', 'file', 'disabled')
config.set('logging', 'format', '%(asctime)s [%(name)s] %(levelname)s %('
'module)s:%(funcName)s:%(lineno)d - %('
'message)s')
config.add_section('deploy')
config.set('deploy', 'timeout',
'3600') # How long to timeout external deployer calls
config.add_section('deploy_shell')
config.set('deploy_shell', 'command_path',
os.path.dirname(os.path.abspath(__file__)) +
'/../deployer.py')
config.read(CONFIG_FILE)
|
import json
import DiscoveryDetails as dt
print(json.dumps(dt.discovery.get_collection(dt.environment_id, dt.collection_id).get_result(), indent=2))
|
import math
t = 0.0,5.4,-2.5,8,0.4
print(t)
print(math.__name__)
|
def combinations(n):
if (n == 1):
combos = set()
combos.add("()")
return combos
else:
sets = combinations(n - 1)
combos = set()
for combo in sets:
combos.add("()" + combo)
combos.add("(" + combo + ")")
combos.add(combo + "()")
return combos
def main():
combos = combinations(2)
print combos
combos = combinations(3)
print combos
if __name__ == '__main__':
main()
|
from django import forms
from django.core import exceptions, validators
from django.utils.translation import ugettext_lazy as _
from topnotchdev.files_widget.conf import *
class UnicodeWithAttr(str):
deleted_files = None
moved_files = None
class FilesFormField(forms.MultiValueField):
def __init__(self, max_length=None, **kwargs):
super(FilesFormField, self).__init__(**kwargs)
def compress(self, data_list):
files = UnicodeWithAttr(data_list[0])
files.deleted_files = data_list[1]
files.moved_files = data_list[2]
return files
def clean(self, value):
"""
This is a copy of MultiValueField.clean() with a BUGFIX:
- if self.required and field_value in validators.EMPTY_VALUES:
+ if field.required and field_value in validators.EMPTY_VALUES:
"""
from django.forms.utils import ErrorList
from django.core import validators
from django.core.exceptions import ValidationError
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress(value)
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
|
import mysql.connector
from mysql.connector import errorcode
#QUERYS
queryThisWeek = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE WEEK(events.DueDate)=WEEK(CURRENT_DATE);"
queryToday = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE events.DueDate=CURRENT_DATE;"
queryTomorrow = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE events.DueDate=(CURRENT_DATE + INTERVAL 1 DAY)"
queryPassedDue = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE events.DueDate<CURRENT_DATE;"
queryFutureHomework = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE events.DueDate>=CURRENT_DATE AND events.Exam=0;"
queryFutureExams = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE events.DueDate>=CURRENT_DATE AND events.Exam=1;"
queryEventSearch = "SELECT events.CourseID, events.Title, events.DueDate, events.Description FROM events WHERE INSTR(events.Title, '%s') OR INSTR(events.Description, '%s');"
queryAllCourses = "SELECT courses.CourseID, courses.Place, courses.Notes FROM courses;"
queryCourseByDay = "SELECT courses.CourseID, courses.Place, courses.Notes FROM courses WHERE INSTR(courses.Days, '%s');"
#INSERTS
insertCourse = "INSERT INTO courses (CourseID, Days, Place, Notes) VALUES ('%s', '%s', '%s', '%s');"
insertEvent = "INSERT INTO events (CourseID, Title, DueDate, Description, Exam) VALUES ('%s', '%s', '%s', '%s', '%s');"
#REMOVES
removeCourse1 = "DELETE FROM courses WHERE courses.CourseID='%s';"
removeCourse2 = "DELETE FROM events WHERE events.CourseID='%s';"
removeEvent = "DELETE FROM events WHERE events.CourseID='%s' AND events.Title='%s';"
removePassedDue = "DELETE FROM events WHERE events.DueDate<CURRENT_DATE;"
#CONFIG LOADED AT INIT
config = ""
def init(loadedConfig):
global config
config = loadedConfig
print("Testing DB connection... ")
#REDUNDANT
cnx = cur = None
try:
cnx = mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print('Something is wrong with your user name or password')
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
return 0
else:
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
print("OK!")
return 1
#TECHNICALLY REDUNDANT, PYTHON CLOSES ON END OF DEF
finally:
if cur:
cur.close()
if cnx:
cnx.close()
#LOOKUPS
def upcoming_homeworks():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryFutureHomework)
if not cur.rowcount:
return 0
else: return format_result_events_wDate(cur)
def upcoming_exams():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryFutureExams)
if not cur.rowcount:
return 0
else: return format_result_events_wDate(cur)
def upcoming_this_week():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryThisWeek)
if not cur.rowcount:
return 0
else: return format_result_events_wDate(cur)
def upcoming_today():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryToday)
if not cur.rowcount:
return 0
else: return format_result_events(cur)
def upcoming_tomorrow():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryTomorrow)
if not cur.rowcount:
return 0
else: return format_result_events(cur)
def passed_due():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryPassedDue)
if not cur.rowcount:
return 0
else: return format_result_events(cur)
def search_assignments(searchTerm):
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryEventSearch % (searchTerm, searchTerm))
if not cur.rowcount:
return 0
else: return format_result_events_wDate(cur)
def courses_all():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryAllCourses)
if not cur.rowcount:
return 0
else: return format_result_courses(cur);
def courses_day(givenDay):
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(queryCourseByDay % (givenDay))
if not cur.rowcount:
return 0
else: return format_result_courses(cur);
#INSERTS
def insert_course(givenCourseID, givenDays, givenPlace, givenNotes):
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(insertCourse % (givenCourseID, givenDays, givenPlace, givenNotes))
if not cur.rowcount:
return 0
else:
cnx.commit()
return cur.rowcount;
def insert_event(givenCourseID, givenTitle, givenDueDate, givenDescription, givenExam = None):
if givenExam is None:
givenExam = 0
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(insertEvent % (givenCourseID, givenTitle, givenDueDate, givenDescription, givenExam))
if not cur.rowcount:
return 0
else:
cnx.commit()
return cur.rowcount
#REMOVES
def remove_course(givenCourseID):
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(removeCourse1 % (givenCourseID))
if not cur.rowcount:
return 0
else:
cur.execute(removeCourse2 % (givenCourseID))
cnx.commit()
return 1
def remove_event(givenCourseID, givenTitle):
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(removeEvent % (givenCourseID, givenTitle))
if not cur.rowcount:
return 0
else:
cnx.commit()
return cur.rowcount
def prune_events():
global config
cnx = mysql.connector.connect(**config)
cur = cnx.cursor(buffered=True)
cur.execute('use schedule;')
cur.execute(removePassedDue)
if not cur.rowcount:
return 0
else:
cnx.commit()
return cur.rowcount
#FORMATTING
def format_result_events(cursor):
temp = ""
for (CourseID, Title, DueDate, Description) in cursor:
temp+=("{}: {}, {}\n".format(CourseID, Title, Description))
return temp
def format_result_events_wDate(cursor):
temp = ""
for (CourseID, Title, DueDate, Description) in cursor:
temp+=("{}, {}, Due on {:%d %b %Y}: {}.\n".format(CourseID, Title, DueDate, Description))
return temp
def format_result_courses(cursor):
temp = ""
for (CourseID, Place, Notes) in cursor:
temp+=("{} at {}: {}\n".format(CourseID, Place, Notes))
return temp
|
spam = {
'color': 'red', 'age': 42
}
for k in spam.keys():
print(k)
|
import os
import torch
import numpy as np
import argparse
import random
import yaml
from easydict import EasyDict
import gensim
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import torch.optim as optim
import data_helpers
from models.standard import *
parser = argparse.ArgumentParser(description='PyTorch for image-user CNN')
parser.add_argument('--config', default='config.yaml')
parser.add_argument('--resume', default='', type=str, help='path to checkpoint') # 增加属性
class ImageFolder(data.Dataset):
def __init__(self, home_data, work_data, school_data, restaurant_data,
shopping_data, cinema_data, sports_data, travel_data):
_, self.x_image, _, self.y = data_helpers.load_data_and_labels(home_data, work_data,
school_data, restaurant_data,
shopping_data, cinema_data,
sports_data, travel_data)
self.x_image = torch.Tensor(np.array(self.x_image, dtype="float64"))
self.y = torch.Tensor(np.array(self.y, dtype="float64"))
def __getitem__(self, index):
return self.x_image[index], self.y[index]
def __len__(self):
return len(self.x_image)
def cal_acc(pred_img, y):
pred_img = (pred_img.numpy() == pred_img.numpy().max(axis=1, keepdims=1)).astype("float64")
pred_img = [np.argmax(item) for item in pred_img]
y = [np.argmax(item) for item in y]
pred_img, y = np.array(pred_img), np.array(y)
per_img = pred_img == y
image_acc = len(per_img[per_img == True]) / len(per_img) * 100
return image_acc
def save_state(state, path, epoch):
print("=> saving checkpoint of epoch " + str(epoch))
torch.save(state, path + 'params_' + str(epoch) + '.pth')
def load_state(path, netI, optimizerI):
if not os.path.isfile(path):
print("=> no checkpoint found at '{}'".format(path))
else:
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
netI.load_state_dict(checkpoint['state_dictI'])
optimizerI.load_state_dict(checkpoint['optimizerI'])
epoch = checkpoint['epoch'] + 1
return epoch
def main():
global args, config
args = parser.parse_args()
with open(args.config) as f:
config = EasyDict(yaml.load(f))
# assert torch.cuda.is_available()
# device = torch.device("cuda")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# random seed setup
print("Random Seed: ", config.seed)
random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed(config.seed)
cudnn.benchmark = True
netI = BottlenetI()
criterion = nn.CrossEntropyLoss()
netI = netI.to(device)
criterion = criterion.to(device)
optimizerI = optim.Adam(netI.parameters(), lr=config.lr_img)
last_epoch = 0
if args.resume:
last_epoch = load_state(args.resume, netI, optimizerI)
train_dataset = ImageFolder(config.train.home_data, config.train.work_data, config.train.school_data,
config.train.restaurant_data, config.train.shopping_data, config.train.cinema_data,
config.train.sports_data, config.train.travel_data)
train_dataloader = data.DataLoader(train_dataset, batch_size=config.batch_size,
shuffle=True, pin_memory=True, num_workers=int(config.workers))
for epoch in range(last_epoch, config.epoch - 1):
for iter, [image, y] in enumerate(train_dataloader):
# print("epoch: ", epoch, "iter: ", iter)
# print(netI(image).shape)
# print(y.shape)
netI.zero_grad()
image, y = image.to(device), y.to(device)
pred_img = netI(image)
err_img = criterion(pred_img, y.argmax(dim=1))
# print("err_img: ", err_img)
err_img.backward()
optimizerI.step()
if (epoch + 1) % config.val_freq == 0:
val(netI.eval(), device)
if (epoch + 1) % config.save_freq == 0:
save_state({'state_dictI': netI.state_dict(),
'optimizerI': optimizerI.state_dict(),
'epoch': epoch}, config.img_save_path, epoch)
def val(netI, device):
_, tr_image, _, tr_y = data_helpers.load_data_and_labels(config.train.home_data, config.train.work_data,
config.train.school_data,
config.train.restaurant_data,
config.train.shopping_data,
config.train.cinema_data,
config.train.sports_data,
config.train.travel_data)
_, val_image, _, val_y = data_helpers.load_data_and_labels(config.val.home_data, config.val.work_data,
config.val.school_data,
config.val.restaurant_data,
config.val.shopping_data,
config.val.cinema_data,
config.val.sports_data,
config.val.travel_data)
tr_image = torch.Tensor(np.array(tr_image, dtype="float64"))
tr_y = np.array(tr_y, dtype="float64")
val_image = torch.Tensor(np.array(val_image, dtype="float64"))
val_y = np.array(val_y, dtype="float64")
tr_image, val_image = tr_image.to(device), val_image.to(device)
with torch.no_grad():
pred_tr_img = netI(tr_image)
pred_val_img = netI(val_image)
tr_image_acc = cal_acc(pred_tr_img.cpu(), tr_y)
val_image_acc = cal_acc(pred_val_img.cpu(), val_y)
print("image accuracy | train: %.3f %% | test: %.3f %% " % (tr_image_acc, val_image_acc))
if __name__ == "__main__":
main()
|
#-*- coding: utf-8 -*-
import random
from lib.base_entity import BaseEntity
from lib.base_animation import BaseAnimation
class LifeAnimation(BaseAnimation):
"""Custom class : Life Animation."""
WIDTH_SPRITE = 16
HEIGHT_SPRITE = 17
def get_sprite(self, move_direction):
frame = self.subsurface(
0,
self.frame * self.HEIGHT_SPRITE,
self.WIDTH_SPRITE,
self.HEIGHT_SPRITE
).convert_alpha()
return frame
def update(self):
""" Custom animation update.
Random change for frame 1->2
Fix change for frame 2->1
"""
if self.frame == 0:
if random.random()*100 > 99:
self.frame = (self.frame + 1) % self.max_frame
self.frame_delay = self.max_frame_delay
else:
if self.frame_delay < 0:
self.frame = (self.frame + 1) % self.max_frame
self.frame_delay = self.max_frame_delay
else:
self.frame_delay = self.frame_delay - 1
class Life(BaseEntity):
"""Custom class: Life entity."""
def __init__(self, rect_data):
super(Life, self).__init__(
name='Life',
rect_data=rect_data,
speed=[0,0],
max_frame=2,
max_frame_delay=0,
img='img/Life.png'
)
def init_animation(self, max_frame, max_frame_delay, img):
return LifeAnimation(max_frame, max_frame_delay, img)
def update(self, movement = None):
"""Custom Update function
update animation frame
"""
self.animation.update()
self.setup_animation(self.direction)
|
# Still learning Python I love Python
# If you find some problem or can make this code much better than please make so that I know where is gap in knowledge
# THANK YOU!!!!!
from random import randint
choice = randint(1, 3)
if choice is 1:
computerMove = "Rock"
elif choice is 2:
computerMove = "Paper"
else:
computerMove = "Scissor"
print(computerMove)
userMove = input("Enter Rock (r) Paper (p) Scissor (s)").lower()
if computerMove.lower() == "rock" and userMove.lower() == "r":
userMove = "Rock" # Writing this UserMove again in "Rock" form beacuse I want my out to beautiful
winner = f"Draw Computer plays \'{computerMove}\' and User plays \'{userMove}\'" # which let user to input First letter and when print it show the whole word
elif computerMove.lower() == "paper" and userMove.lower() == "p":
userMove = "Paper"
winner = f"Draw Computer plays \'{computerMove}\' and User plays \'{userMove}\'"
elif computerMove.lower() == "scissor" and userMove.lower() == "s":
userMove = "Scissor"
winner = f"Draw Computer plays \'{computerMove}\' and User plays \'{userMove}\'"
elif computerMove.lower() == "rock" and userMove.lower() == "p":
userMove = "Paper"
winner = f"User wins plays \'{userMove}\' over Computer\'s \'{computerMove}\'"
elif computerMove.lower() == "paper" and userMove.lower() == "r":
userMove = "Rock"
winner = f"Computer wins plays \'{computerMove}\' over User\'s \'{userMove}\'"
elif computerMove.lower() == "scissor" and userMove.lower() == "p":
userMove = "Paper"
winner = f"Computer wins plays \'{computerMove}\' over User plays \'{userMove}\'"
elif computerMove.lower() == "paper" and userMove.lower() == "s":
userMove = "Scissor"
winner = f"User wins plays \'{userMove} over Computer plays \'{computerMove}\'"
elif computerMove.lower() == "rock" and userMove.lower() == "s":
userMove = "Scissor"
winner = f"Computer wins plays \'{computerMove}\' over User plays \'{userMove}\'"
elif computerMove.lower() == "scissor" and userMove.lower() == "rock":
userMove = "Rock"
winner = f"User wins plays \'{userMove}\' over Computer plays \'{computerMove}\'"
else:
print(f"No this not allowed {userMove.capitalize()}")
try: # 1. Using try because if the else comes True in winner variable nothing will store
if winner[0] is "D": # 2. If I dont you the try function on this block it return error as NameError winner is not define
print(winner)
except NameError:
print("")
else:
print("Doing Calculation")
x = 10
print('_____' * x) # you can use a for loop in this section for print 2 lines i.e
print('_____' * x) # for i in range(2):
print("\n", winner) # print("____"*x)
print('_____' * x)
print('_____' * x)
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2017-2020, SCANOSS Ltd. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
Winnowing Algorithm implementation for SCANOSS.
This module implements an adaptation of the original winnowing algorithm by S. Schleimer, D. S. Wilkerson and A. Aiken
as described in their seminal article which can be found here: https://theory.stanford.edu/~aiken/publications/papers/sigmod03.pdf
The winnowing algorithm is configured using two parameters, the gram size and the window size. For SCANOSS the values need to be:
- GRAM: 30
- WINDOW: 64
The result of performing the Winnowing algorithm is a string called WFP (Winnowing FingerPrint). A WFP contains optionally
the name of the source component and the results of the Winnowing algorithm for each file.
EXAMPLE output: test-component.wfp
component=f9fc398cec3f9dd52aa76ce5b13e5f75,test-component.zip
file=cae3ae667a54d731ca934e2867b32aaa,948,test/test-file1.c
4=579be9fb
5=9d9eefda,58533be6,6bb11697
6=80188a22,f9bb9220
10=750988e0,b6785a0d
12=600c7ec9
13=595544cc
18=e3cb3b0f
19=e8f7133d
file=cae3ae667a54d731ca934e2867b32aaa,1843,test/test-file2.c
2=58fb3eed
3=f5f7f458
4=aba6add1
8=53762a72,0d274008,6be2454a
10=239c7dfa
12=0b2188c9
15=bd9c4b10,d5c8f9fb
16=eb7309dd,63aebec5
19=316e10eb
[...]
Where component is the MD5 hash and path of the component container (It could be a path to a compressed file or a URL).
file is the MD5 hash, file length and file path being fingerprinted, followed by
a list of WFP fingerprints with their corresponding line numbers.
"""
import hashlib
from crc32c import crc32
# Winnowing configuration. DO NOT CHANGE.
GRAM = 30
WINDOW = 64
# ASCII characters
ASCII_0 = 48
ASCII_9 = 57
ASCII_A = 65
ASCII_Z = 90
ASCII_a = 97
ASCII_z = 122
ASCII_LF = 10
ASCII_BACKSLASH = 92
MAX_CRC32 = 4294967296
def normalize(byte):
"""
This function normalizes a given byte as an ASCII character
Parameters
----------
byte : int
The byte to normalize
"""
if byte < ASCII_0:
return 0
if byte > ASCII_z:
return 0
if byte <= ASCII_9:
return byte
if byte >= ASCII_a:
return byte
if ((byte >= 65) and (byte <= 90)):
return byte + 32
return 0
def diff_to_wfp(diff, md5, src_path):
"""
This function converts a parsed diff data structure into WFP
Parameters
----------
diff : dict
A dictionary containing as keys the filenames contained in the diff
and as values its contents stored as a list of strings.
md5 : str
src_path : str
Path to the component.
"""
if len(diff) == 0:
return ''
# Print pkg line
wfp = 'component={0},{1}\n'.format(md5, src_path)
for file, lines in diff.items():
# MD5 of the file changes
lines_str = '\n'.join(lines)
wfp += wfp_for_file(file, lines_str.encode())
return wfp
def wfp_for_file(file: str, contents: bytes) -> str:
""" Returns the WFP for a file by executing the winnowing algorithm over its contents.
Parameters
----------
file: str
The name of the file
contents : bytes
The full contents of the file as a byte array.
"""
file_md5 = hashlib.md5(
contents).hexdigest()
# Print file line
wfp = 'file={0},{1},{2}\n'.format(file_md5, len(contents), file)
# Initialize variables
gram = ""
window = []
normalized = 0
line = 1
min_hash = MAX_CRC32
last_hash = MAX_CRC32
last_line = 0
output = ""
# Otherwise recurse src_content and calculate Winnowing hashes
for byte in contents:
if byte == ASCII_LF:
line += 1
normalized = 0
else:
normalized = normalize(byte)
# Is it a useful byte?
if normalized:
# Add byte to gram
gram += chr(normalized)
# Do we have a full gram?
if len(gram) >= GRAM:
gram_crc32 = crc32(gram.encode('ascii'))
window.append(gram_crc32)
# Do we have a full window?
if len(window) >= WINDOW:
# Select minimum hash for the current window
min_hash = min(window)
# Is the minimum hash a new one?
if min_hash != last_hash:
# Hashing the hash will result in a better balanced resulting data set
# as it will counter the winnowing effect which selects the "minimum"
# hash in each window
crc = crc32((min_hash).to_bytes(4, byteorder='little'))
crc_hex = '{:08x}'.format(crc)
if last_line != line:
if output:
wfp += output + '\n'
output = "%d=%s" % (line, crc_hex)
else:
output += ',' + crc_hex
last_line = line
last_hash = min_hash
# Shift window
window.pop(0)
# Shift gram
gram = gram[1:]
if output:
wfp += output + '\n'
return wfp
|
def countArrangement(N):
available_dict = {}
for i in xrange(1, N + 1):
curr_available = []
for t in xrange(1, N + 1):
if i % t == 0 or t % i == 0:
curr_available.append(t)
available_list.setdefault(i, curr_available)
# 4 * 3 * 2 * 1 / 3*2*1 * 1
# N == 4
# 1,2,3,4
# 1,4,3,2
# 2,1,3,4
# 2,4,3,1
# 3,2,1,4
# 3,4,1,2
# 4,2,3,1
# 4,1,3,2
# 1- 1,2,3,4
# 2- 1,2,4
# 3- 1,3
# 4- 1,2,4
|
class Solution(object):
def findKthPositive(self, arr, k):
n = len(arr)
j = 0
f = 0
for i in range(1,n+k+1):
if j<n and i == arr[j]:
j += 1
continue
else:
f += 1
if k == f:
return i
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import Any, Type, TypeVar, cast
from pants.util.frozendict import FrozenDict
_T = TypeVar("_T")
class RunId(int):
"""A unique id for a single run or `--loop` iteration of Pants within a single Scheduler.
RunIds are not comparable across Scheduler instances, and only equality is meaningful, not
ordering.
"""
class SessionValues(FrozenDict[Type, Any]):
"""Values set for the Session, and exposed to @rules.
Generally, each type provided via `SessionValues` should have a simple rule that returns the
type so that users can directly request it in a rule, rather than needing to query
`SessionValues`.
"""
def __getitem__(self, item: Type[_T]) -> _T:
try:
return cast(_T, super().__getitem__(item))
except KeyError:
raise KeyError(f"Expected {item.__name__} to be provided via SessionValues.")
|
n,k = [int(x) for x in raw_input().split(" ")]
list3=[n]
list1=[]
def divOf(z):
global list1
if z==1:
list1.append(1)
elif z==2 or z==3:
list1.append(1)
list1.append(z)
else:
list1.append(1)
for j in range(2,(z/2)+1):
if z%j==0:
list1.append(j)
if len(list1)>10000000:
break
list1.append(z)
def divList(list2):
global list1
for i in list2:
if i>=963761198400:
i=i/10000
divOf(i)
if len(list1)>100000:
break
else:
divOf(i)
if len(list1)>100000:
break
if k>=100000:
for i in range(100000):
divList(list3)
list3=list1[:]
list1=[]
else:
for i in range(k):
divList(list3)
if len(list1)>100000:
break
else:
list3=list1[:]
list1=[]
for i in list3:
print i,
|
# predictor
from data.handpose_data2 import UCIHandPoseDataset
from model.lstm_pm import LSTM_PM
from src.utils import *
# from __future__ import print_function
import argparse
import pandas as pd
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from collections import OrderedDict
from torch.utils.data import DataLoader
from printer import pred_images
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
# add parameter
learning_rate = 8e-6
batch_size = 1
# save_dir = ckpt
cuda = 1
# hyper parameter
temporal = 5
test_data_dir = './dataset/train_data'
test_label_dir = './dataset/train_label'
# model_epo = [50]
# load data
test_data = UCIHandPoseDataset(data_dir=test_data_dir, label_dir=test_label_dir, temporal=temporal, train=False)
print('Test dataset total number of images sequence is ----' + str(len(test_data)))
test_dataset = DataLoader(test_data, batch_size=batch_size, shuffle=False)
def load_model():
# build model
net = LSTM_PM(T=temporal)
net = net.cuda()
# save_path = os.path.join()
net.load_state_dict(torch.load('./ckpt2/ucihand_lstm_pm70.pth'))
return net
# **************************************** test all images ****************************************
print('********* test data *********')
net = load_model()
net.eval()
outp = []
for step, (images, label_map, center_map, imgs) in enumerate(test_dataset):
images = Variable(images.cuda()) # 4D Tensor
# Batch_size * (temporal * 3) * width(368) * height(368)
label_map = Variable(label_map.cuda()) # 5D Tensor
# Batch_size * Temporal * joint * 45 * 45
center_map = Variable(center_map.cuda()) # 4D Tensor
# Batch_size * 1 * width(368) * height(368)
predict_heatmaps = net(images, center_map) # get a list size: temporal * 4D Tensor
predict_heatmaps = predict_heatmaps[1:]
out = pred_images(predict_heatmaps, step, temporal=temporal)
pd.DataFrame(out).to_csv('./values/'+str(step)+'.csv', header=None, index=None)
# outp.append(out)
# print(outp[1].max)
|
#coding:utf-8
#收盘
import cv2 as cv
import numpy as np
#关闭是反向打开,扩张后跟侵蚀。它可用于关闭前景对象内的小孔或对象上的小黑点。
img = cv.imread('D:/python_file/Opencv3_study_file/images/closing.png')
kernel = np.ones((5,5),np.uint8)
closing = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)
cv.imshow('img',img)
cv.imshow('erosion',closing)
cv.waitKey(0)
cv.destroyAllWindows()
|
from mtd import Document
from xl import StandardExporter
doc = Document('test\\otto\\test.mtd')
doc.parse()
xl = StandardExporter(doc, 'test\\otto\\test.xlsx')
xl.export()
xl.save()
print('OK')
|
import sys
import win32gui, win32con
from pprint import pprint
e=sys.exit
def windowEnumerationHandler(hwnd, top_windows):
top_windows.append((hwnd, win32gui.GetWindowText(hwnd)))
if __name__ == "__main__":
results = []
top_windows = []
win32gui.EnumWindows(windowEnumerationHandler, top_windows)
for i in top_windows:
if "jabber" in i[1].lower():
print (i)
#(left, top, right, bottom)
l,t, r,b = win32gui.GetWindowRect(i[0])
#win32gui.ShowWindow(i[0],5)
win32gui.ShowWindow(i[0], win32con.SW_RESTORE)
#win32gui.SetFocus(i[0])
win32gui.SetForegroundWindow(i[0])
#win32gui.SetWindowPos(i[0], win32con.HWND_TOPMOST, 0, 0, 300, 300, 0)
#win32gui.ShowWindow(i[0],5)
#win32gui.SetForegroundWindow(i[0])
print(dir(win32gui))
if l>0:
print(l,t, r,b)
w=r-l
h=b-t
e(0)
win32gui.SetWindowPos(i[0], win32con.HWND_TOPMOST, l,t, w,h, 0)
#win32gui.SetWindowPos(i[0], win32con.HWND_TOPMOST, 0,0, 300,300, 0)
print (win32gui.GetWindowRect(i[0]))
break
|
import requests
from api import http
def test_adjust_paging_with_no_params():
target = http.Http(lambda: requests.Session())
url = "https://gitlab.com/api/v4/projects/14171783/jobs"
expected = "https://gitlab.com/api/v4/projects/14171783/jobs?per_page=20"
actual = target.__adjust_paging__(url, 20)
assert expected == actual
def test_adjust_paging_with_existing_params():
target = http.Http(lambda: requests.Session())
url = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed"
expected = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed&per_page=20"
actual = target.__adjust_paging__(url, 20)
assert expected == actual
def test_adjust_paging_with_existing_per_page_query_param():
target = http.Http(lambda: requests.Session())
url = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed&per_page=20"
expected = "https://gitlab.com/api/v4/projects/14171783/jobs?scope=success&scope=failed&per_page=10"
actual = target.__adjust_paging__(url, 10)
assert expected == actual
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
# class ImdbPipeline(object):
# def process_item(self, item, spider):
# return item
import pymongo
import re
from scrapy.conf import settings
from scrapy.exceptions import DropItem
from scrapy import log
import pprint
class ImdbPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['MONGODB_SERVER'],
settings['MONGODB_PORT']
)
self.db = connection[settings['MONGODB_DB']]
# self.collection = self.db['imdb_top250']
self.collection = self.db['imdb_top250tv']
# self.collection = self.db['raj']
def process_item(self, item, spider):
self.collection.update(
{'name':item['name']}
,dict(item)
,upsert = True)
log.msg("Quote added to MongoDB !", level=log.DEBUG, spider=spider)
return item
|
s = {
1:'One',
2:'Two',
3:'Three',
4:'Four',
5:'Five',
6:'Six',
7:'Seven',
8:'Eight',
9:'Nine',
10:'Ten',
11:'eleven',
12:'twelve',
13:'thirteen',
14:'fourteen',
15:'fifteen',
16:'sixteen',
17:'seventeen',
18:'eighteen',
19:'nineteen',
20:'twenty',
30:'thirty',
40:'forty',
50:'fifty',
60:'sixty',
70:'seventy',
80:'eighty',
90:'ninety'
}
def numbertoletter(n) :
if n == 0 :
return 'Zero'
if n in s :
n = int(n)
return s[n].capitalize()
n = str(n)
if len(n) == 2 :
n = int(n)
return s[n//10*10].capitalize()+' '+s[n%10].capitalize()
if len(n) == 3 :
n = int(n)
if n%100 == 0 :
return s[n//100]+' ' +"Hundred"
return s[n//100]+' '+"Hundred"+' '+numbertoletter(n%100)
if len(n) == 4 :
n = int(n)
if n%1000 == 0 :
return s[n//1000]+' '+"Thousand"
return s[n//1000]+' '+"Thousand"+ ' '+numbertoletter(n%1000)
if len(n) == 5 :
n = int(n)
if n%10000 == 0 :
return numbertoletter(n//1000)+ ' '+"Thousand"
return numbertoletter(n//1000)+ ' ' + "Thousand" + ' ' + numbertoletter(n%1000)
if len(n) == 6 :
n = int(n)
if n%100000 == 0 :
return s[n//100000]+' ' + "Hundred Thousand"
return s[n//100000]+' ' + "Hundred" + ' ' + numbertoletter(n%100000)
if len(n) == 7 :
n = int(n)
if n%1000000 == 0 :
return s[n//1000000]+' ' + "Million"
return s[n//1000000]+' ' + "Million" + ' ' +numbertoletter(n%1000000)
if len(n) == 8 :
n = int(n)
if n%10000000 == 0 :
return numbertoletter(n//1000000)+ ' '+"Million"
return numbertoletter(n//1000000)+ ' '+"Million" + ' ' +numbertoletter(n%1000000)
if len(n) == 9 :
n = int(n)
if n%100000000 == 0 :
return s[n//100000000]+' ' + "Hundred Million"
return s[n//100000000]+' ' + "Hundred" + ' ' +numbertoletter(n%100000000)
if len(n) >= 10 and len(n) <13:
n = int(n)
if n%1000000000 == 0 :
return numbertoletter(n//1000000000)+ ' '+"Billion"
return numbertoletter(n//1000000000)+ ' '+"Billion" + ' ' + numbertoletter(n%1000000000)
if len(n) >= 13 :
n = int(n)
if n%1000000000000 == 0 :
return numbertoletter(n//1000000000000)+ ' '+"Trillion"
return numbertoletter(n//1000000000000)+ ' '+"Trillion" + ' ' + numbertoletter(n%1000000000000)
t = int(input())
for _ in range(t) :
n = int(input())
print(numbertoletter(n).strip())
|
import functools
def pjax(pjax_template=None):
def pjax_decorator(view):
@functools.wraps(view)
def _view(request, *args, **kwargs):
resp = view(request, *args, **kwargs)
# this is lame. what else though?
# if not hasattr(resp, "is_rendered"):
# warnings.warn("@pjax used with non-template-response view")
# return resp
if request.META.get('HTTP_X_PJAX', False):
if pjax_template:
resp.template_name = pjax_template
elif "." in resp.template_name:
resp.template_name = "%s-pjax.%s" % tuple(resp.template_name.rsplit('.', 1))
else:
resp.template_name += "-pjax"
return resp
return _view
return pjax_decorator
|
from .backend import *
from ..Computation.num_properties import sign
from ..testing.types import isReal
class _ArbitraryPrecision:
def __init__(self, man, exp, value=None):
self.man = man
self.exp = exp
self.sign = sign(man)
if value is None:
self.value = man * 2 ** exp
else:
self.value = value
if self.sign < 0:
self.man = -self.man
def __str__(self):
return str(self.value)
def __repr__(self):
if self.sign > 0:
return "ArbitraryPrecision({0}, {1}) = {2}".format(self.man, self.exp, self.value)
else:
return "ArbitraryPrecision({0}, {1}) = {2}".format(-self.man, self.exp, self.value)
def __add__(self, other):
man, exp = dec_add(self, other)
return _ArbitraryPrecision(man, exp)
def __sub__(self, other):
man, exp = dec_sub(self, other)
return _ArbitraryPrecision(man, exp)
def __mul__(self, other):
man, exp = dec_mul(self, other)
return _ArbitraryPrecision(man, exp)
def __truediv__(self, other):
man, exp = dec_truediv(self, other)
return _ArbitraryPrecision(man, exp)
def __floordiv__(self, other):
return dec_floordiv(self, other)
def __mod__(self, other):
return dec_mod(self, other)
def floor(self):
man, exp = dec_floor(self)
return _ArbitraryPrecision(man, exp)
class _MachinePrecision:
pass
class _UnlimitedPrecision:
pass
class Decimal:
def __init__(self, value):
if intQ(value):
man, exp = from_int(value)
elif isReal(value):
man, exp = from_float(value)
self.value = _ArbitraryPrecision(man, exp, value)
def __str__(self):
return str(self.value)
def __repr__(self):
return repr(self.value)
def __add__(self, other):
if isinstance(self.value, _ArbitraryPrecision):
if isinstance(other.value, _ArbitraryPrecision):
return Decimal((self.value + other.value).value)
def __sub__(self, other):
if isinstance(self.value, _ArbitraryPrecision):
if isinstance(other.value, _ArbitraryPrecision):
return Decimal((self.value - other.value).value)
def __mul__(self, other):
if isinstance(self.value, _ArbitraryPrecision):
if isinstance(other.value, _ArbitraryPrecision):
return Decimal((self.value * other.value).value)
def __truediv__(self, other):
if isinstance(self.value, _ArbitraryPrecision):
if isinstance(other.value, _ArbitraryPrecision):
return Decimal((self.value / other.value).value)
def __floordiv__(self, other):
if isinstance(self.value, _ArbitraryPrecision):
if isinstance(other.value, _ArbitraryPrecision):
return Decimal((self.value // other.value).value)
def __mod__(self, other):
if isinstance(self.value, _ArbitraryPrecision):
if isinstance(other.value, _ArbitraryPrecision):
return Decimal((self.value % other.value).value)
def floor(self):
if isinstance(self.value, _ArbitraryPrecision):
return Decimal(self.value.floor().value)
|
'''9.4 Write a program to read through the mbox-short.txt
and figureout who has sent the greatest number of mail messages.
The program looks for 'From ' lines and takes the second word
of those lines as the person who sent the mail. The program creates
a Python dictionary that maps the sender's mail address to a count of
the number of times they appear in the file. After the dictionary
is produced, the program reads through the dictionary using a maximum
loop to find the most prolific committer.
'''
name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
f = open(name)
count = dict()
emails = []
for line in f:
if line.startswith('From '):
words = line.split()
#print(words[1])
count[words[1]] = count.get(words[1],0)+1
#print(count)
highestNum = None
highestItem = None
for k,v in count.items():
if highestNum < int(v) or highestNum == None:
highestNum = v
highestItem = k
print(highestItem,highestNum)
|
#
# cogs/guild/core.py
#
# mawabot - Maware's selfbot
# Copyright (c) 2017 Ma-wa-re, Ammon Smith
#
# mawabot is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
''' Has several commands that get guild information '''
import asyncio
import logging
import re
import discord
from discord.ext import commands
from mawabot.utils import normalize_caseless
ROLE_MENTION_REGEX = re.compile(r'<@&([0-9]+)>')
logger = logging.getLogger(__name__)
class Guild:
__slots__ = (
'bot',
'autonick_guilds',
'autonick_task',
)
def __init__(self, bot):
self.bot = bot
self.autonick_guilds = {}
self.autonick_task = bot.loop.create_task(self._autonick())
def __unload(self):
self.autonick_task.cancel()
@staticmethod
async def _get_role(guild, name):
id = None
if name == 'everyone':
return guild.default_role
if name.isdigit():
id = int(name)
else:
match = ROLE_MENTION_REGEX.match(name)
if match:
id = int(match[1])
if id is None:
name = name.lower()
for role in guild.roles:
if role.name.lower() == name:
return role
else:
for role in guild.roles:
if role.id == id:
return role
return None
def _get_guild(self, ctx, name):
if name is None:
return ctx.guild
if name.isdigit():
return self.bot.get_guild(int(name))
else:
name = normalize_caseless(name)
for guild in self.bot.guilds:
if normalize_caseless(guild.name) == name:
return guild
return None
async def _autonick(self):
delay = 5
old_len = 0
while True:
logger.debug('Checking autonick status...')
tasks = [asyncio.sleep(delay)]
if old_len != len(self.autonick_guilds):
logger.info(f'Autonick list changed, resetting delay...')
delay = 1
for guild, nickname in self.autonick_guilds.items():
display_name = nickname or self.bot.user.name
if guild.me.display_name != display_name:
logger.info(f'Changing nickname for {guild.name} to "{display_name}"')
tasks.append(guild.me.edit(nick=nickname))
delay = 1
if len(tasks) == 1:
logger.debug(f'No autonicks needed, increasing delay to {delay}')
delay = min(delay * 2, 3600)
old_len = len(self.autonick_guilds)
await asyncio.gather(*tasks)
@commands.command()
@commands.guild_only()
async def autonick(self, ctx, enable: bool, nickname: str = None, hide=False):
''' Enable/disable task to automatically reset your username periodically '''
if enable:
self.autonick_guilds[ctx.guild] = nickname
else:
self.autonick_guilds.pop(ctx.guild)
if hide:
await ctx.mesage.delete()
else:
enabled = 'Enabled' if enable else 'Disabled'
embed = discord.Embed(type='rich', description=f'**{enabled}** autonick for {ctx.guild.name}')
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def ack(self, ctx, *names: str):
''' Marks all messages in the current guild as read. '''
await ctx.guild.ack()
@commands.command()
async def ackall(self, ctx):
''' Marks all message in all guilds as read. '''
for guild in self.bot.guilds:
await guild.ack()
@commands.command()
@commands.guild_only()
async def ginfo(self, ctx, use_current=True):
''' Prints information about the current guild '''
text_count = len(ctx.guild.text_channels)
voice_count = len(ctx.guild.voice_channels)
role_count = len(ctx.guild.roles)
emoji_count = len(ctx.guild.emojis)
created = ctx.guild.created_at.strftime('%x @ %X')
members_online = sum(1 for member in ctx.guild.members if member.status != discord.Status.offline)
members_total = ctx.guild.member_count
members_percent = members_online / members_total * 100
text = '\n'.join((
f'Created: `{created}`',
f'Text Channels: `{text_count}`',
f'Voice Channels: `{voice_count}`',
f'Members: `{members_online} / {members_total} ({members_percent:.1f}% online)`',
f'Roles: `{role_count}`',
f'Emojis: `{emoji_count}`',
))
embed = discord.Embed(type='rich', description=text)
if ctx.guild.icon_url:
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_author(name=ctx.guild.name)
embed.add_field(name='Owner:', value=ctx.guild.owner.mention)
if use_current:
await ctx.send(embed=embed)
else:
await asyncio.gather(
ctx.message.delete(),
self.bot.output_send(embed=embed),
)
@commands.command()
@commands.guild_only()
async def rinfo(self, ctx, *, name: str = None, use_current=True):
''' Lists information about roles on the guild '''
if name is None:
fmt_role = lambda role: f'{role.mention} ({len(role.members)})'
desc = ', '.join(map(fmt_role, ctx.guild.role_hierarchy))
embed = discord.Embed(type='rich', description=desc)
embed.set_author(name=f'{len(ctx.guild.roles)} roles in {ctx.guild.name}')
else:
role = await self._get_role(ctx.guild, name)
if role is None:
desc = f'**No such role:** {name}'
embed = discord.Embed(type='rich', description=desc, color=discord.Color.red())
else:
rgb = role.color.to_rgb()
desc = '\n'.join((
role.mention,
f'Members: `{len(role.members)}`',
f'Hoisted: `{role.hoist}`',
f'Position: `{role.position}`',
f'Mentionable: `{role.mentionable}`',
f'Permissions: `{role.permissions.value}`',
f'Hex Color: `{rgb[0]:02x}{rgb[1]:02x}{rgb[2]:02x}`',
))
embed = discord.Embed(type='rich', description=desc, color=role.color)
embed.set_author(name=role.name)
embed.add_field(name='ID:', value=role.id)
embed.timestamp = role.created_at
if use_current:
await ctx.send(embed=embed)
else:
await asyncio.gather(
ctx.message.delete(),
self.bot.output_send(embed=embed),
)
@commands.command()
@commands.guild_only()
async def roles(self, ctx, name: str = None, use_current=True):
''' Lists all roles in the current (or given) guild. '''
guild = self._get_guild(ctx, name)
if guild is None:
desc = f'**No such guild:** {name}'
embed = discord.Embed(type='rich', description=desc, color=discord.Color.red())
else:
def fmt_role(role):
default = 'Default ' if role.is_default() else ''
mentionable = 'Mentionable ' if role.mentionable else ''
mentions_everyone = 'Mention @everyone ' if role.permissions.mention_everyone else ''
admin = 'Admin ' if role.permissions.administrator else ''
mention = role.mention if ctx.guild == role.guild else f'@{role.name}'
count = f'({len(role.members)})'
return f'`{role.id}` {mention} {count} {default}{mentionable}{mentions_everyone}{admin}'
desc = '\n'.join(map(fmt_role, guild.roles))
embed = discord.Embed(type='rich', description=desc)
embed.set_author(name=guild.name)
if use_current:
await ctx.send(embed=embed)
else:
await asyncio.gather(
ctx.message.delete(),
self.bot.output_send(embed=embed),
)
@commands.command()
@commands.guild_only()
async def channels(self, ctx, name: str = None, use_current=True):
''' Lists all channels in the current (or given) guild. '''
guild = self._get_guild(ctx, name)
if guild is None:
desc = f'**No such guild:** {name}'
embed = discord.Embed(type='rich', description=desc, color=discord.Color.red())
else:
def fmt_chan(chan):
topic = f' - {chan.topic}' if chan.topic else ''
return f'`{chan.id}` {chan.mention} {topic}'
is_txt_chan = lambda chan: isinstance(chan, discord.TextChannel)
desc = '\n'.join(map(fmt_chan, filter(is_txt_chan, guild.channels)))
embed = discord.Embed(type='rich', description=desc)
embed.set_author(name=guild.name)
if use_current:
await ctx.send(embed=embed)
else:
await asyncio.gather(
ctx.message.delete(),
self.bot.output_send(embed=embed),
)
@commands.command()
@commands.guild_only()
async def perms(self, ctx, *names: str, use_current=True):
for name in names:
role = await self._get_role(ctx.guild, name)
if role is None:
desc = f'**No such role:** {name}'
embed = discord.Embed(type='rich', description=desc, color=discord.Color.red())
else:
perms = role.permissions
desc = '\n'.join((
role.mention,
'',
f'Administrator: `{perms.administrator}`',
f'Ban members: `{perms.ban_members}`',
f'Kick members: `{perms.kick_members}`',
f'Manage guild: `{perms.manage_guild}`',
f'Manage channels: `{perms.manage_channels}`',
f'Manage nicknames: `{perms.manage_nicknames}`',
f'Manage roles: `{perms.manage_roles}`',
f'Manage webhooks: `{perms.manage_webhooks}`',
f'Manage emojis: `{perms.manage_emojis}`',
f'View audit log: `{perms.view_audit_log}`',
f'Read messages: `{perms.read_messages}`',
f'Send messages: `{perms.send_messages}`',
f'Add reactions: `{perms.add_reactions}`',
f'Send TTS messages: `{perms.send_tts_messages}`',
f'Embed links: `{perms.embed_links}`',
f'Attach files: `{perms.attach_files}`',
f'Read message history: `{perms.read_message_history}`',
f'Mention \\@everyone: `{perms.mention_everyone}`',
f'External emojis: `{perms.external_emojis}`',
f'Create instant invite: `{perms.create_instant_invite}`',
f'Can connect to voice: `{perms.connect}`',
f'Can speak in voice: `{perms.speak}`',
f'Mute members: `{perms.mute_members}`',
f'Deafen members: `{perms.deafen_members}`',
f'Move members: `{perms.move_members}`',
f'Use voice activation: `{perms.use_voice_activation}`',
f'Change nickname: `{perms.change_nickname}`',
))
embed = discord.Embed(type='rich', description=desc, color=role.color)
if use_current:
await ctx.send(embed=embed)
else:
await asyncio.gather(
ctx.message.delete(),
self.bot.output_send(embed=embed),
)
|
from typing import Any, Dict, List, Optional, Tuple
from mmic.components.blueprints import GenericComponent
from mmic_autodock_vina.models.input import AutoDockComputeInput
from mmic_autodock_vina.models.output import AutoDockComputeOutput
from mmic_cmd.components import CmdComponent
from cmselemental.util.decorators import classproperty
import tempfile
import os
class AutoDockComputeComponent(GenericComponent):
@classproperty
def input(cls):
return AutoDockComputeInput
@classproperty
def output(cls):
return AutoDockComputeOutput
@classproperty
def version(cls):
return ""
def execute(
self,
inputs: Dict[str, Any],
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
timeout: Optional[int] = None,
config: Optional["TaskConfig"] = None,
) -> Tuple[bool, Dict[str, Any]]:
receptor, ligand = inputs.receptor, inputs.ligand
receptor_fname = tempfile.NamedTemporaryFile(suffix=".pdbqt").name
ligand_fname = tempfile.NamedTemporaryFile(suffix=".pdbqt").name
with open(receptor_fname, "w") as fp:
fp.write(receptor)
with open(ligand_fname, "w") as fp:
fp.write(ligand)
input_model = inputs.dict()
del input_model["proc_input"]
input_model["receptor"] = receptor_fname
input_model["ligand"] = ligand_fname
# need to include flex too
input_model["out"] = tempfile.NamedTemporaryFile(suffix=".pdbqt").name
input_model["log"] = tempfile.NamedTemporaryFile(suffix=".log").name
execute_input = self.build_input(input_model, config)
execute_output = CmdComponent.compute(execute_input)
input_model["proc_input"] = inputs.proc_input
output = True, self.parse_output(execute_output.dict(), input_model)
return output
def build_input(
self,
input_model: Dict[str, Any],
config: Optional["TaskConfig"] = None,
template: Optional[str] = None,
) -> Dict[str, Any]:
cmd = ["vina"]
for key, val in input_model.items():
if val and key != "provenance":
cmd.append("--" + key)
if isinstance(val, str):
cmd.append(val)
else:
cmd.append(str(val))
env = os.environ.copy()
if config:
env["MKL_NUM_THREADS"] = str(config.ncores)
env["OMP_NUM_THREADS"] = str(config.ncores)
scratch_directory = config.scratch_directory if config else None
return {
"command": cmd,
"infiles": [input_model["ligand"], input_model["receptor"]],
"outfiles": [
input_model["out"],
input_model["log"],
],
"scratch_directory": scratch_directory,
"environment": env,
"raise_err": True,
}
def parse_output(
self, output: Dict[str, Any], inputs: AutoDockComputeInput
) -> AutoDockComputeOutput:
stdout = output["stdout"]
stderr = output["stderr"]
outfiles = output["outfiles"]
system = outfiles[inputs["out"]]
log = outfiles[inputs["log"]]
return AutoDockComputeOutput(
schema_name="mmschema",
schema_version=1,
success=True,
stdout=stdout,
stderr=stderr,
log=log,
system=system,
proc_input=inputs["proc_input"],
)
|
s1=input()
l=list(s1)
for i in range(len(l)-1):
if(i%2==0):
l[i],l[i+1]=l[i+1],l[i]
s1="".join(l)
print(s1)
|
from django.conf import settings
from django.core import signals
from django_pipes.stats import PipesStats
debug_stats = PipesStats()
# Register an event that resets pipes debug_stats.queries
# when a Django request is started.
def reset_pipes_queries(**kwargs):
debug_stats.queries = []
signals.request_started.connect(reset_pipes_queries)
from django_pipes.main import Pipe, PipeManager, ObjectNotSavedException, ResourceNotAvailableException
__all__ = ('Pipe', 'PipeManager')
|
# Generated by Django 3.0.3 on 2020-03-22 11:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0017_auto_20200322_1022'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='imageProfil',
field=models.ImageField(blank=True, upload_to='imagesDeProfil/', verbose_name='Image de Profil'),
),
]
|
c={"a":10, "b":1, "c":22}
temp=list()
for k,v in c.items():
temp.append((v, k))
temp=sorted(temp, reverse=True)
print(temp)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 07 09:05:09 2017
@author: Randall
"""
duplicity_check = open("duplicity_check.txt", "r")
duplicates = file.read().split(',')
duplicity_check.close()
duplicity_check = open("duplicity_check.txt", "a")
text_string = "20170907_wbb_ozone"
if text_string not in duplicates:
duplicity_check.write(text_string + ",")
duplicity_check.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.