text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/python2.7
"""
Driver FIle to start execution
"""
################################
####### Driver file to #########
####### start execution ########
###### Author : Pavel Sur ######
################################
import sys,os
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'HLS', 'lib'))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'HLS', 'commands'))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'sharedlib'))
import getpass, re, socket, datetime, time, commands, dircache, json, traceback, TSP, pymysql, signal
import pymysql.cursors
#import pdb
from optparse import OptionParser
from multiprocessing import Process
from threading import *
from robot.run import *
from sharedlib.sitepackages import pexpect
from sharedlib.sitepackages import sendEmail
from sharedlib.sitepackages import ftp
from sharedlib.db import AzukiDB
ASP_SGL_COMMAND_INDEX = 0
ASP_SGL_STATUS_INDEX = 1
ASP_STL_COMMAND_INDEX = 2
ASP_STL_STATUS_INDEX = 3
ASP_RCS_COMMAND_INDEX = 4
ASP_RCS_STATUS_INDEX = 5
ASP_VMS_COMMAND_INDEX = 6
ASP_VMS_STATUS_INDEX = 7
# Global variable declaration
cmd_line_args = {}
g_config_data = {}
g_config_data['SYSTEM_OS'] = 'Linux'
g_config_data['mail_to'] = 'xyz'
g_config_data['mail_from'] = 'xyz'
g_config_data['mail_server_address'] = 'xyz'
g_config_data['mail_server_uid'] = 'xyz'
g_config_data['mail_server_password'] = 'xyz'
g_config_data['mail_server_port'] = 'xyz'
g_config_data['ftp_ip'] = '115.119.242.21'
g_config_data['ftp_un'] = 'rebaca_mail'
g_config_data['ftp_pw'] = 'rebaca@5102'
g_config_data['test_cases'] = ''
AzukiDB = AzukiDB()
def readconfig(filetoread, serach_param_list):
"""
reading configuration file
filetoread: <String> file name to be read
serach_param_list: <list> list of searched params
"""
try:
file_var = open(filetoread, 'r')
lines = file_var.readlines()
for line in lines:
for ele in serach_param_list:
if ele in line:
if len(line.split()) > 1:
g_config_data.update({ele:line.split()[1]})
else:
g_config_data.update({ele:''})
file_var.close()
except OptionParsingError, error_data:
console_log("DEBUG -> ", str(sys.exc_info()[1]))
console_log("INFO ->", "unable to read configuration file = ", filetoread)
exit_execution()
def init_config():
"""
obtain and set information.
"""
socket_var = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_var.connect(('google.com', 0))
g_config_data['env_ip'] = socket_var.getsockname()[0]
g_config_data['env_user'] = getpass.getuser()
def get_cmd_line_args():
"""
getting the Options
from command line.
"""
cmd_line_args['produce_code'] = \
cmd_line_args['user_name'] = \
cmd_line_args['user_passwd'] = \
cmd_line_args['enb_mail'] = \
cmd_line_args['test_suites'] = \
cmd_line_args['emerg_mails'] = \
cmd_line_args['multi_exec'] = \
cmd_line_args['test_cases'] = \
cmd_line_args['variable_file'] = None
try:
class OptionParsingError(RuntimeError):
"""
Error Parsing
"""
def __init__(self, msg):
self.msg = msg
class OptionParsingExit(Exception):
"""
Option Parsing
"""
def __init__(self, status, msg):
self.msg = msg
self.status = status
class ModifiedOptionParser(OptionParser):
"""
Modified option Parsing
"""
def error(self, msg):
raise OptionParsingError(msg)
def exit(self, status=0, msg=None):
raise OptionParsingExit(status, msg)
parser = ModifiedOptionParser()
parser.add_option("-P", "--product_code", \
help="Enter product code", type="string")
parser.add_option("-u", "--username", help="Enter automation user name", type="string")
parser.add_option("-p", "--userpassword", help=\
"Enter automation user password", type="string")
parser.add_option("-m", "--enbmail", help=\
"Enable mail option", action="store_true")
parser.add_option("-T", "--testcase", help=\
"Enter testcase name(s)", type="string")
parser.add_option("-S", "--test_suite", help=\
"Enter test suite name(s)", type="string")
parser.add_option("-E", "--emergencymail", help=\
"Enter emergency mail ids", type="string")
parser.add_option("-N", "--multiexec", help=\
"Enter multiple execution counts", type="string")
parser.add_option("-X", "--enbxml", help=\
"Enable xml and log generation", action="store_true")
parser.add_option("-V", "--variablefile", help=\
"Enter path and variable file name", type="string")
parser.add_option("-B", "--PORTS", help=\
"Enter al the ports by comma", type="string")
try:
(options, args) = parser.parse_args()
except OptionParsingError, error:
#print 'There was a parsing error: %s' % error.msg
console_log("DEBUG -> ", 'Parsing error = %s' % error.msg)
exit_execution()
except OptionParsingExit, error:
#print 'The option parser exited with message %s and
#result code %s' % (error.msg, error.status)
console_log("DEBUG -> ", \
'The option parser exited with message %s and result code %s' % \
(error.msg, error.status))
exit_execution()
if options.product_code and options.product_code.startswith("-"):
console_log("DEBUG -> ", \
"Parsing error = %s" % "-P option requires an argument")
exit_execution()
if options.username and options.username.startswith("-"):
console_log("DEBUG -> ", \
"Parsing error = %s" % "-u option requires an argument")
exit_execution()
if options.userpassword and options.userpassword.startswith("-"):
console_log("DEBUG -> ", \
"Parsing error = %s" % "-p option requires an argument")
exit_execution()
if options.test_suite and options.test_suite.startswith("-"):
console_log("DEBUG -> ", \
"Parsing error = %s" % "-S option requires an argument")
exit_execution()
if options.testcase and options.testcase.startswith("-"):
console_log("DEBUG -> ", \
"Parsing error = %s" % "-T option requires an argument")
exit_execution()
if options.emergencymail and options.emergencymail.startswith("-"):
console_log("DEBUG ->", \
"Parsing error = %s" % "-E option requires an argument")
exit_execution()
if options.multiexec and options.multiexec.startswith("-"):
console_log("DEBUG -> ", \
"Parsing error = %s" % "-N option requires an argument")
exit_execution()
if not options.product_code:
console_log("DEBUG -> ", \
"Parsing error = %s" % "-P option requires an argument")
exit_execution()
if not options.username:
console_log("DEBUG -> ", \
"Parsing error = %s" % "-u option requires an argument")
exit_execution()
if not options.userpassword:
console_log("DEBUG -> ", \
"Parsing error = %s" % "-p option requires an argument")
exit_execution()
if not options.test_suite:
console_log("DEBUG -> ", \
"Parsing error = %s" % "-S option requires an argument")
exit_execution()
if not options.variablefile:
console_log("DEBUG -> ", \
"Parsing error = %s" % "-S option requires an argument")
exit_execution()
if not options.PORTS:
console_log("DEBUG -> ", \
"Parsing error = %s" % "-PORTS option requires an argument")
exit_execution()
cmd_line_args['produce_code'] = options.product_code
cmd_line_args['user_name'] = options.username
cmd_line_args['user_passwd'] = options.userpassword
cmd_line_args['enb_mail'] = options.enbmail
cmd_line_args['test_cases'] = options.testcase
cmd_line_args['test_suites'] = options.test_suite
cmd_line_args['emerg_mails'] = options.emergencymail
cmd_line_args['multi_exec'] = options.multiexec
cmd_line_args['enb_xml'] = options.enbxml
cmd_line_args['variable_file'] = options.variablefile
cmd_line_args['PORTS'] = str(options.PORTS).split(",")
except:
raise
#console_log("DEBUG -> ", str(sys.exc_info()[1]))
def set_report_name(product_code):
"""
Set automation report name.
product_code: <String> product code name
"""
current_time_stamp = time.strftime("%Y%m%d-%H%M")
g_config_data['log_name'] = product_code.upper() + "_" + \
cmd_line_args['user_name'] + "_" +"log_" + "_" + \
current_time_stamp + ".html"
g_config_data['report_name'] = product_code.upper() + "_" + \
cmd_line_args['user_name'] + "_" + "report_" + \
"_" + current_time_stamp + ".html"
g_config_data['xml_name'] = product_code.upper() + "_" + \
cmd_line_args['user_name'] + "_" + "xml_" + \
"_" + current_time_stamp + ".xml"
def set_report_directory_path(product_code):
"""
Set report directory path.
product_code: <String> product code
"""
report_dir_path = os.path.join(g_config_data['current_working_dir'], \
product_code.upper(), 'results')
return report_dir_path
def set_sys_path():
"""
setting system path.
"""
g_config_data['current_working_dir'] = \
os.path.abspath(os.path.dirname(__file__))
sys.path.append(g_config_data['current_working_dir'])
sys.path.append(os.path.join(g_config_data['current_working_dir'], "sharedlib"))
def exec_pre_conditions():
"""
Execute preconditions.
"""
def console_log(prefix, *args):
"""
Logging method
"""
print prefix,
print ' '.join([str(ele) for ele in args])
def exit_execution(exit_param=0):
"""
exit execution
exit_param: <int> system exit parameter.
"""
console_log("INFO -> ", "Execution ends")
sys.exit(exit_param)
return
def validate_options():
"""
validate all options.
"""
#print "cmd_line_args['user_passwd']", cmd_line_args['user_passwd']
if not cmd_line_args['produce_code'] or \
not cmd_line_args['user_name'] or \
not cmd_line_args['user_passwd'] or \
not cmd_line_args['test_suites']:
console_log("DEBUG -> ", "Insufficient options")
exit_execution()
def validate_user(username, password):
"""
validate user name and password
username: <String> user name
password: <String> user password
"""
isValidUser = AzukiDB.validateUser(username, password)
if isValidUser:
console_log("DEBUG -> ", "Authentication successful")
else:
console_log("DEBUG -> ", "User is not valid, authentication failed.")
exit_execution()
'''
user_json_file = os.path.join(g_config_data['current_working_dir'], \
"sharedlib", "user_info", "user_info.json")
data = {}
try:
data = json.loads(open(user_json_file).read())
except:
console_log("DEBUG -> ", str(sys.exc_info()[1]))
console_log("INFO ->", "unable to read user info file")
exit_execution()
if data:
specific_user_id = data.get(username)
if specific_user_id:
if specific_user_id['pw'] == password:
console_log("DEBUG -> ", "Authentication successful")
else:
console_log("DEBUG -> ", "Entered password not valid")
exit_execution()
else:
console_log("DEBUG -> ", "Entered user name not valid")
exit_execution()
'''
def match_test_suites(product_list, test_suites_list):
"""
matching test suites with test cases.
product_list: <list> list of products
test_suites_list: <list> list of test suites
"""
test_suites_as_per_product = {}
try:
for product_name in product_list:
test_suites_as_per_product.update({product_name.upper():[]})
test_suite_names = dircache.listdir(os.path.join(g_config_data['current_working_dir'], \
product_name.upper(), "testsuites"))
for test_suite in test_suites_list:
if test_suite in test_suite_names:
test_suites_as_per_product[product_name.upper()].append(test_suite)
return test_suites_as_per_product
except:
console_log("DEBUG -> ", str(sys.exc_info()[1]))
console_log("INFO -> ", "Invalid product name")
exit_execution()
def send_emails(mail_subject, mail_body):
"""
Send email to desired mail recipent
"""
mail_ids_list = []
if cmd_line_args['emerg_mails']:
mail_ids_list = [i.strip() for i in cmd_line_args['emerg_mails']]
else:
mail_ids_list = [i.strip() for i in g_config_data['mail_to'].split(',')]
if cmd_line_args['enb_mail'] or cmd_line_args['emerg_mails']:
for mail_receipient in mail_ids_list:
try:
# adding condition for negative validation
if not g_config_data['mail_from']:
console_log("DEBUG -> ", "".join(["No sender mail ID"]))
break
if not g_config_data['mail_server_address']:
console_log("DEBUG -> ", "".join(["No mail server address"]))
break
pattern_for_mail = "^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$"
pattern_for_ip = "^(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})$"
pattern_for_host_address = "^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z.]+$"
if not re.match(pattern_for_mail, mail_receipient):
console_log("DEBUG -> ", "".join(["Invalid mail ID = ", str(mail_receipient)]))
continue
if not re.match(pattern_for_mail, g_config_data['mail_from']):
console_log("DEBUG -> ", "".join(["Invalid mail ID = ", \
str(g_config_data['mail_from'])]))
break
if not re.match(pattern_for_ip, g_config_data['mail_server_address']) \
and not re.match(pattern_for_host_address, g_config_data['mail_server_address']):
console_log("DEBUG -> ", "".join(["Invalid mail Server address = ", \
str(g_config_data['mail_server_address'])]))
break
if mail_receipient and g_config_data['mail_from'] \
and g_config_data['mail_server_address']:
if not g_config_data['mail_server_port']:
g_config_data['mail_server_port'] = '25'
return_value = os.system("python " + \
os.path.join(g_config_data['current_working_dir'], \
"sharedlib", "sitepackages", "sendEmail.py") + " -t " + mail_receipient + \
' -f ' + g_config_data['mail_from'] + ' -s ' + mail_subject + \
' -b ' + mail_body + ' -S ' + g_config_data['mail_server_address'] + \
' -U ' + g_config_data['mail_server_uid'] + \
' -P ' + g_config_data['mail_server_password'] + \
' -v ' + g_config_data['mail_server_port'])
#argCmd = ["-t", mail_receipient, "-f", g_config_data['mail_from'],
# "-s", mail_subject, "-b", mail_body,
# "-S", g_config_data['mail_server_address'], "-b", mail_body,
# "-S", g_config_data['mail_server_address'],
# "-U", g_config_data['mail_server_uid'],
# "-P", g_config_data['mail_server_password'], "-v",
# g_config_data['mail_server_port']]
#
#optionMenu, args = sendEmail.getOptsAndArgs(argCmd)
#
#msgObj = sendEmail(msgTo=optionMenu.msgTo, msgFrom=optionMenu.msgFrom,
# msgSubject=optionMenu.msgSubject,
# msgBody=optionMenu.msgBody,
# attachmentDict=optionMenu.attachmentDict,
# attachmentPaths=optionMenu.attachmentPaths,
# domainName=optionMenu.domain)
#
#msgObj.send(smtpUid=optionMenu.smtpUid, smtpPassword=optionMenu.smtpPw,
# smtpAddress=optionMenu.smtpAddress,
# smtpPort=optionMenu.smtpPort)
if return_value == 0:
console_log("INFO -> ", "".join(["mail sent to ", \
str(mail_receipient)]))
else:
console_log("INFO -> ", "".join(["mail not sent to ", \
str(mail_receipient)]))
else:
console_log("DEBUG -> ", "mail details are not configured")
except:
console_log("INFO -> ", "".join(["mail not sent to ", str(mail_receipient)]))
console_log("DEBUG -> ", str(sys.exc_info()[1]))
continue
def test_suite_count_set(test_suite_list, test_suite_count_list):
"""
This function sets test suite counts
test_suite_list: <list> test suite list
test_suite_count_list: <list> test suite count list
"""
test_suite_count_dic = {}
count = 0
lentest_suite_count_list = len(test_suite_count_list)
for test_suite in test_suite_list:
if count < lentest_suite_count_list:
test_suite_count_dic.update({test_suite:test_suite_count_list[count]})
else:
test_suite_count_dic.update({test_suite:1})
count += 1
return test_suite_count_dic
def obtain_test_cases_suite(test_suite_list, test_suite_path):
"""
Obtain test_cases from test suite
test_suite_list: <list> test suite list
test_suite_path: <String> test suite path
"""
test_case_listtest_wise = {}
for test_suite in test_suite_list:
test_case_flag = False
test_case_list = []
try:
file = open(os.path.join(test_suite_path, test_suite), 'r')
lines = file.readlines()
for line in lines:
if "*Testcases*" in line:
test_case_flag = True
continue
if test_case_flag:
if line.startswith("\t") or line.startswith(" ") or line.startswith("\n") \
or line.startswith("\r"):
continue
else:
test_case_list.append(line.replace(' ', '_'))
test_case_listtest_wise.update({test_suite:test_case_list})
file.close()
except:
console_log("DEBUG -> ", str(sys.exc_info()[1]))
console_log("INFO -> ", "unable to parse test_cases from test suite")
exit_execution()
return test_case_listtest_wise
def test_case_mapping(test_case_listtest_wise, test_suite_list):
"""
select test_cases from all test_cases in test suite as per command line argument
test_case_listtest_wise: <dic> all test_cases list suite wise
test_suite_list: <list> test suite list
"""
all_test_case_list = []
valid_test_cases_list = []
if g_config_data['test_cases'] != '' or g_config_data['test_cases']:
cmd_line_args['test_cases'] = g_config_data['test_cases']
if '[' in cmd_line_args['test_cases']:
all_test_case_list = [i.strip().strip('[').strip(']') for i in \
cmd_line_args['test_cases'].strip().split('],[')]
count = 1
for test_suite in test_suite_list:
if count <= len(all_test_case_list):
test_cases = all_test_case_list[count -1]
if test_cases.find("*") >= 0:
valid_test_cases_list.extend(test_case_listtest_wise[test_suite])
elif test_cases.find("-") >= 0:
first_range = test_cases.split('-')[0]
last_range = test_cases.split('-')[1]
if first_range.isdigit() and last_range.isdigit():
for i in range(int(first_range), int(last_range) + 1):
if int(i) <= len(test_case_listtest_wise[test_suite]):
valid_test_cases_list.append(test_case_listtest_wise[test_suite][int(i) - 1])
else:
console_log("DEBUG -> ", "wrong option for testcase")
elif test_cases.find(",") >= 0:
for i in test_cases.split(','):
if i.isdigit():
if int(i) <= len(test_case_listtest_wise[test_suite]):
valid_test_cases_list.append(test_case_listtest_wise[test_suite][int(i) - 1])
else:
console_log("DEBUG -> ", "wrong option for testcase")
else:
if test_cases.isdigit():
valid_test_cases_list.append\
(test_case_listtest_wise[test_suite][int(test_cases) - 1])
else:
console_log("DEBUG -> ", "wrong option for testcase")
count += 1
cmd_line_args['test_cases'] = valid_test_cases_list
else:
cmd_line_args['test_cases'] = [i.strip() \
for i in cmd_line_args['test_cases'].strip().split(',')]
def matching_valid_testcase(test_case_listtest_wise, test_suite_list, test_case_list):
"""
Matchhing valid test_cases
"""
validtest_case_list = []
invalidflag = False
for test_casedata in test_case_list:
for test_suitedata in test_suite_list:
all_test_case_list = test_case_listtest_wise[test_suitedata]
all_test_case_list = [tcc.strip("\r\n") for tcc in all_test_case_list]
#validtest_case_list.extend([test_casedata for test_casedata
#in test_case_list if test_casedata in all_test_case_list])
if test_casedata.strip() in all_test_case_list:
validtest_case_list.append(test_casedata.strip())
break
else:
console_log("DEBUG -> ", "Invalid testcase = ", test_casedata)
invalidflag = True
cmd_line_args['test_cases'] = validtest_case_list
if invalidflag and not cmd_line_args['test_cases']:
return False
else:
return True
def exec_proc(proc_name, proc_args):
"""
Execute any process in different thread.
proc_name: <String> process name
proc_args: <String> process argument
"""
process = Process(target=proc_name, args=(proc_args,))
process.start()
process.join()
def transfer_files(source_file, dest_file, remote_system_ip, remote_system_uid, \
remote_system_pw, local_to_remote_flag=True, retry=1, delay=10):
"""
Transfer file or directory from server to client and vice versa
dest_file: <String> source file with location
clientFile: <String> client file with location
remote_system_ip: <String> remote system IP
remote_system_uid: <String> remote system user Id
remote_system_pw: <String> remote system password
local_to_remote_flag: <String> If true then transfer from local to remote or vide versa
"""
# to reduce the execution time all .svn subolders and files associated
#with that will no be transferred
# for that reason first entire segment_validation directory will be
#zipped without .svn dir then extract that zip
# and transferred that unzipped directory having no .svn
tran_flag = False
cmd_to_exec = "mkdir /tmp/" + remote_system_ip
os.system(cmd_to_exec)
print "Execute: ", cmd_to_exec
# get the directory name
source_file_array = source_file.split('/')
source_file_dir = source_file_array[len(source_file_array) - 1]
source_file_path = source_file.strip(source_file_dir)
# move to the dir
cmd_to_exec = "cd " + source_file_path
print "cmd_to_exec", cmd_to_exec
os.chdir(source_file_path)
# tar without .svn dir
cmd_to_exec = "tar --exclude=.svn -zcvf seg_"+ remote_system_ip+ ".tar.gz " + source_file_dir
print "cmd_to_exec", cmd_to_exec
os.system(cmd_to_exec)
# untar without .svn dir
cmd_to_exec = "tar -xvzf seg_"+ remote_system_ip+ ".tar.gz -C /tmp/" + remote_system_ip
print "cmd_to_exec", cmd_to_exec
os.system(cmd_to_exec)
source_file = "/tmp/" + remote_system_ip + "/segment_validation"
# deleteing tar file
cmd_to_exec = "rm -rf seg_"+ remote_system_ip+ ".tar.gz"
print "cmd_to_exec", cmd_to_exec
os.system(cmd_to_exec)
if local_to_remote_flag:
command_to_scp = "".join(["scp -r ", source_file, " ", remote_system_uid, \
"@", remote_system_ip, ":", dest_file])
else:
command_to_scp = "".join(["scp -r ", " ", remote_system_uid, "@", \
remote_system_ip, ":", source_file, " ", dest_file])
print "command_to_scp", command_to_scp
expr1 = remote_system_uid+"@"
expr2 = "Are you sure you want to continue connecting"
expr3 = "100%"
console_log("INFO -> ", "log in to transfer")
try:
child = pexpect.spawn(command_to_scp)
try:
child.expect(expr2)
child.sendline("yes")
except:
console_log("INFO -> ", "confirmation is not required")
child.expect(expr1)
child.sendline(remote_system_pw)
for i in range(retry):
try:
child.expect(expr3)
console_log("DEBUG -> ", "Transfer is started")
break
except:
time.sleep(10)
continue
else:
console_log("DEBUG -> ", "Transfer is failed")
exit_execution()
# waiting to transfer all file
for i in range(retry):
print "child.isalive()==========", child.isalive()
if child.isalive():
time.sleep(10)
continue
else:
console_log("INFO -> ", "Transfer is completed")
tran_flag = True
break
else:
console_log("DEBUG -> ", "Transfer is not complete")
child.close()
except:
console_log("DEBUG -> ", str(sys.exc_info()[1]))
print "file transfer failed"
return tran_flag
def perform_set_up_task(g_config_data, cmd_line_args, os_config_file_name, product_code):
"""
Performing set up task like copying MPEG2 and SceneScope set up to client
g_config_data: <Dic> global data
cmd_line_args: <Dic> command line arguments
os_config_file_name: <String> config file name for test plan
product_code: <String> product code
"""
tran_flag = False
os_cfg_file_name = os.path.join(g_config_data['current_working_dir'], product_code.upper(), \
"config", cmd_line_args['user_name']+"_cfg", os_config_file_name)
searched_param_list = ["VAL_CLIENT_IP", "VAL_CLIENT_UID", "VAL_CLIENT_PWD", \
"VAL_SETUP_LOCATION", "VAL_SYS_OS", "SCENESCOPE_CLIENT_IP", \
"SCENESCOPE_CLIENT_UID", "SCENESCOPE_CLIENT_PWD", \
"SCENESCOPE_SETUP_LOCATION", "SCENESCOPE_SYS_OS"]
readconfig(os_cfg_file_name, searched_param_list)
# Transferring set up file for MEPG2 packet analyzer
if g_config_data['VAL_SYS_OS'].lower() == "windows" and \
g_config_data['VAL_CLIENT_IP'].strip():
console_log("INFO -> ", "transferring MPEG2 packet analyzer set up")
source_file = os.path.join(g_config_data['current_working_dir'], \
"sharedlib", "segment_validation")
destination_file = g_config_data["VAL_SETUP_LOCATION"].replace('\\', '\\\\')
tran_flag = transfer_files(source_file, destination_file, \
g_config_data['VAL_CLIENT_IP'], \
g_config_data['VAL_CLIENT_UID'], \
g_config_data['VAL_CLIENT_PWD'], True, 100)
else:
console_log("DEBUG -> ", "transferring MPEG2 packet analyzer set up failed")
if g_config_data['VAL_CLIENT_IP'].strip() != \
g_config_data['SCENESCOPE_CLIENT_IP'].strip() and tran_flag:
if g_config_data['SCENESCOPE_SYS_OS'].lower() == "windows" and \
g_config_data['SCENESCOPE_CLIENT_IP'].strip():
console_log("INFO -> ", "transferring SceneScope set up")
source_file = os.path.join(g_config_data['current_working_dir'], \
"sharedlib", "segment_validation")
destination_file = g_config_data["SCENESCOPE_SETUP_LOCATION"].replace('\\', '\\\\')
tran_flag = transfer_files(source_file, destination_file, \
g_config_data['SCENESCOPE_CLIENT_IP'], \
g_config_data['SCENESCOPE_CLIENT_UID'], \
g_config_data['SCENESCOPE_CLIENT_PWD'], True, 100)
else:
console_log("DEBUG -> ", "transferring SceneScope set up failed")
return tran_flag
def Sig_handler(signum, frame):
print 'Signal handler called with signal', signum
#TSP.TSP.UnMount_Segments(TSP.g_socket_info, AZ_HLS_UNMOUNT_SEGMENTS_CMD)
#TSP.TSP.UnMount_Streams(TSP.g_socket_info, AZ_HLS_UNMOUNT_STREAMS_CMD)
SEGMENTS_MOUNT_PATH='/mnt/'+cmd_line_args['user_name']+''
STREAMS_MOUNT_PATH='/mnt/streams/'
AZ_HLS_UNMOUNT_SEGMENTS_CMD = '{"UNMOUNT": "'+ SEGMENTS_MOUNT_PATH +'"}'
AZ_HLS_UNMOUNT_STREAMS_CMD = '{"UNMOUNT": "'+ STREAMS_MOUNT_PATH +'"}'
TSP.TSP.UnMount_Segments(TSP.g_socket_info, AZ_HLS_UNMOUNT_SEGMENTS_CMD)
if PLATFORM != "windows":
TSP.TSP.UnMount_Streams(TSP.g_socket_info, AZ_HLS_UNMOUNT_STREAMS_CMD)
TSP.TSP.Terminate_Sessions()
TSP.TSP.Close_Sockets()
#AzukiDB.closeConnection()
Terminate_pexpect_connections()
sys.exit(0)
def Terminate_pexpect_connections():
sgl_process.close()
stl_process.close()
rcl_process.close()
val_process.close()
return
def main():
"""
main function.
"""
global g_config_data
global cmd_line_args
console_log("INFO -> ", "Execution starts")
signal.signal(signal.SIGINT, Sig_handler)
signal.signal(signal.SIGTERM, Sig_handler)
signal.signal(signal.SIGSEGV, Sig_handler)
signal.signal(signal.SIGABRT, Sig_handler)
# initializing config
init_config()
# setting system path
set_sys_path()
# executing precondition
exec_pre_conditions()
# obtaining options
get_cmd_line_args()
#getUIdetails line
#cmd_line_args['UIDetails'] = UIDetails()
# validate options
validate_options()
# validate users
#validate_user(cmd_line_args['user_name'], cmd_line_args['user_passwd'])
# creating list of multiple products
cmd_line_args['produce_code'] = [i.strip() for i \
in cmd_line_args['produce_code'].strip().split(',')]
# creating list of multiple test_suites
cmd_line_args['test_suites'] = [i.strip() for i \
in cmd_line_args['test_suites'].strip().split(',')]
# creating list of emergency mail ides
if cmd_line_args['emerg_mails']:
cmd_line_args['emerg_mails'] = [i.strip() for \
i in cmd_line_args['emerg_mails'].split(',')]
# creating list of multiple execution
if cmd_line_args['multi_exec']:
cmd_line_args['multi_exec'] = [i.strip() for i in \
cmd_line_args['multi_exec'].strip().split(',')]
# mapping between test suite and test suite counts
map_bw_test_suite_test_count = \
test_suite_count_set(cmd_line_args['test_suites'], \
cmd_line_args['multi_exec'])
# mapping of product code
mappedtest_suites = match_test_suites(cmd_line_args['produce_code'], \
cmd_line_args['test_suites'])
TSP.driver_data = cmd_line_args
# Open database connection
db = pymysql.connect(host="IP_ADDR_MYSQL_DATABASE",
user="automation",
password="passw0rd",
db="ericsson_automation",
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
# prepare a cursor object using cursor() method
cursor = db.cursor()
USER = "'"+cmd_line_args['user_name']+"'"
TSP.g_user_name = cmd_line_args['user_name']
# Prepare SQL query to INSERT a record into the database.
DEVICE_UNDER_TEST_QRY = "SELECT t2.config_val from `config_keys` as t1 left join `user_config` as t2 ON t2.config_key_id = t1.id left join `users` as t3 ON t3.id = t2.user_id WHERE t3.username = "+USER+" AND t1.key_name = 'Device Under Test IP'"
SEGMENTER_DEST_PATH_QRY = "SELECT t2.config_val from `config_keys` as t1 left join `user_config` as t2 ON t2.config_key_id = t1.id left join `users` as t3 ON t3.id = t2.user_id WHERE t3.username = "+USER+" AND t1.key_name = 'Segmenter Destination Path'"
PLATFORM_QRY = "SELECT t2.config_val from `config_keys` as t1 left join `user_config` as t2 ON t2.config_key_id = t1.id left join `users` as t3 ON t3.id = t2.user_id WHERE t3.username = "+USER+" AND t1.key_name = 'Platform'"
CDN_1_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'CDN -1 URL'"
CDN_2_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'CDN -2 URL'"
CDN_3_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'CDN -3 URL'"
CDN_4_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'CDN -4 URL'"
CDN_5_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'CDN -5 URL'"
CDN_6_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'CDN -6 URL'"
STREAMS_SERVER_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'Stream Pump Server URL'"
STREAMS_PATH_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'Streams Path'"
VMS_SERVER_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'Validation Matrix Server URL'"
RCS_SERVER_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'Run-Time Command Sever URL'"
SGL_PATH_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'SGL Path'"
#SPS_PATH_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'SPS Path'"
#RCS_PATH_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'RCS Path'"
#VMS_PATH_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'VMS Path'"
OUTPUT_ASSET_PATH_QRY = "SELECT t2.config_val from `config_keys` as t1 left join `user_config` as t2 ON t2.config_key_id = t1.id left join `users` as t3 ON t3.id = t2.user_id WHERE t3.username = "+USER+" AND t1.key_name = 'Output Asset Path'"
#EXPORT_PATH_QRY = "SELECT t2.config_val from `config_keys` as t1 left join `user_config` as t2 ON t2.config_key_id = t1.id left join `users` as t3 ON t3.id = t2.user_id WHERE t3.username = "+USER+" AND t1.key_name = 'EXPORT PATH'"
#Streams_Path = "SELECT t2.config_val from `config_keys` as t1 left join `user_config` as t2 ON t2.config_key_id = t1.id left join `users` as t3 ON t3.id = t2.user_id WHERE t3.username = "+USER+" AND t1.key_name = 'Streams Path'"
ENCRYPTION_KEY_URL_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'ENCRYPTION KEY URL'"
SGL_STATUS_LOG_DIRECTORY_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'SGL Status Log Directory'"
SPS_STATUS_LOG_DIRECTORY_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'SPS Status Log Directory'"
RCS_STATUS_LOG_DIRECTORY_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'RCS Status Log Directory'"
VMS_STATUS_LOG_DIRECTORY_QRY = "select default_val from config_keys where key_for = 'admin' and key_name = 'VMS Status Log Directory'"
DEVICE_UNDER_TEST = {}
SEGMENTER_DEST_PATH = {}
CDN_1_BASE_URL='http://cdn1.example.com/'
CDN_2_BASE_URL='http://cdn2.example.com/'
CDN_3_BASE_URL='http://cdn3.example.com/'
CDN_4_BASE_URL='http://cdn4.example.com/'
CDN_5_BASE_URL='http://cdn5.example.com/'
CDN_6_BASE_URL='http://cdn6.example.com/'
OUTPUT_ASSET_PATH = {}
STREAMS_SERVER_URL = {}
VMS_SERVER_URL = {}
RCS_SERVER_URL = {}
SPS_PATH = {}
RCS_PATH = {}
VMS_PATH = {}
EXPORT_PATH = {}
PLATFORM = {}
try:
cursor.execute(DEVICE_UNDER_TEST_QRY)
DEVICE_UNDER_TEST_VAL = cursor.fetchall()
DEVICE_UNDER_TEST = DEVICE_UNDER_TEST_VAL[0]['config_val']
cursor.execute(SEGMENTER_DEST_PATH_QRY)
SEGMENTER_DEST_PATH_VAL = cursor.fetchall()
SEGMENTER_DEST_PATH = SEGMENTER_DEST_PATH_VAL[0]['config_val']
cursor.execute(PLATFORM_QRY)
PLATFORM_VAL = cursor.fetchall()
PLATFORM = PLATFORM_VAL[0]['config_val']
cursor.execute(STREAMS_SERVER_URL_QRY)
STREAMS_SERVER_URL_VAL = cursor.fetchall()
STREAMS_SERVER_URL = STREAMS_SERVER_URL_VAL[0]['default_val']
cursor.execute(STREAMS_PATH_QRY)
STREAMS_PATH_VAL = cursor.fetchall()
STREAMS_PATH = STREAMS_PATH_VAL[0]['default_val']
cursor.execute(VMS_SERVER_URL_QRY)
VMS_SERVER_URL_VAL = cursor.fetchall()
VMS_SERVER_URL = VMS_SERVER_URL_VAL[0]['default_val']
cursor.execute(RCS_SERVER_URL_QRY)
RCS_SERVER_URL_VAL = cursor.fetchall()
RCS_SERVER_URL = RCS_SERVER_URL_VAL[0]['default_val']
cursor.execute(SGL_PATH_QRY)
SGL_PATH_VAL = cursor.fetchall()
SGL_PATH = SGL_PATH_VAL[0]['default_val']
'''
cursor.execute(SPS_PATH_QRY)
SPS_PATH_VAL = cursor.fetchall()
SPS_PATH = SPS_PATH_VAL[0]['default_val']
cursor.execute(RCS_PATH_QRY)
RCS_PATH_VAL = cursor.fetchall()
RCS_PATH = RCS_PATH_VAL[0]['default_val']
cursor.execute(VMS_PATH_QRY)
VMS_PATH_VAL = cursor.fetchall()
VMS_PATH = VMS_PATH_VAL[0]['default_val']
'''
cursor.execute(CDN_1_URL_QRY)
CDN_1_URL_VAL = cursor.fetchall()
CDN_1_BASE_URL = CDN_1_URL_VAL[0]['default_val']
cursor.execute(CDN_2_URL_QRY)
CDN_2_URL_VAL = cursor.fetchall()
CDN_2_BASE_URL = CDN_2_URL_VAL[0]['default_val']
cursor.execute(CDN_3_URL_QRY)
CDN_3_URL_VAL = cursor.fetchall()
CDN_3_BASE_URL= CDN_3_URL_VAL[0]['default_val']
cursor.execute(CDN_4_URL_QRY)
CDN_4_URL_VAL = cursor.fetchall()
CDN_4_BASE_URL= CDN_4_URL_VAL[0]['default_val']
cursor.execute(CDN_5_URL_QRY)
CDN_5_URL_VAL = cursor.fetchall()
CDN_5_BASE_URL= CDN_5_URL_VAL[0]['default_val']
cursor.execute(CDN_6_URL_QRY)
CDN_6_URL_VAL = cursor.fetchall()
CDN_6_BASE_URL= CDN_6_URL_VAL[0]['default_val']
cursor.execute(OUTPUT_ASSET_PATH_QRY)
OUTPUT_ASSET_PATH_VAL = cursor.fetchall()
OUTPUT_ASSET_PATH = OUTPUT_ASSET_PATH_VAL[0]['config_val']
cursor.execute(ENCRYPTION_KEY_URL_QRY)
ENCRYPTION_KEY_VAL = cursor.fetchall()
ENCRYPTION_KEY_URL = ENCRYPTION_KEY_VAL[0]['default_val']
cursor.execute(SGL_STATUS_LOG_DIRECTORY_QRY)
SGL_STATUS_LOG_DIRECTORY_VAL = cursor.fetchall()
SGL_STATUS_LOG_DIRECTORY = SGL_STATUS_LOG_DIRECTORY_VAL[0]['default_val']
cursor.execute(SPS_STATUS_LOG_DIRECTORY_QRY)
SPS_STATUS_LOG_DIRECTORY_VAL = cursor.fetchall()
SPS_STATUS_LOG_DIRECTORY = SPS_STATUS_LOG_DIRECTORY_VAL[0]['default_val']
cursor.execute(RCS_STATUS_LOG_DIRECTORY_QRY)
RCS_STATUS_LOG_DIRECTORY_VAL = cursor.fetchall()
RCS_STATUS_LOG_DIRECTORY = RCS_STATUS_LOG_DIRECTORY_VAL[0]['default_val']
cursor.execute(VMS_STATUS_LOG_DIRECTORY_QRY)
VMS_STATUS_LOG_DIRECTORY_VAL = cursor.fetchall()
VMS_STATUS_LOG_DIRECTORY = VMS_STATUS_LOG_DIRECTORY_VAL[0]['default_val']
#cursor.execute(EXPORT_PATH_QRY)
#EXPORT_PATH_VAL = cursor.fetchall()
#print EXPORT_PATH_VAL[0]['config_val']
#cursor.execute(Streams_Path)
#StreamsPath = cursor.fetchall()
#print StreamsPath[0]['config_val']
except:
print "Error: unable to execute the query"
# disconnect from server
db.close()
SEGMENTS_MOUNT_PATH='/mnt/'+cmd_line_args['user_name']+''
STREAMS_MOUNT_PATH='/mnt/streams/'
# Launch Connection with the Infrastructure components namely
# Segment Launcher
# Stream Pump Server
# Run-Time Command Server
# Validation Matrix Server
global sgl_process
global stl_process
global rcl_process
global val_process
if PLATFORM != "windows":
sgl_process=pexpect.spawn("ssh root@" + DEVICE_UNDER_TEST, timeout=3)
stl_process=pexpect.spawn("ssh root@" + STREAMS_SERVER_URL, timeout=3)
rcl_process=pexpect.spawn("ssh root@" + RCS_SERVER_URL, timeout=3)
val_process=pexpect.spawn("ssh root@" + VMS_SERVER_URL, timeout=3)
ssh_key1='Are you sure you want to continue connecting'
ssh_key2='password:'
ssh_key3='Permission denied'
ret=stl_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==0:
stl_process.sendline('yes')
ret=stl_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==1:
stl_process.sendline("rebaca")
ret=stl_process.expect([ssh_key3,pexpect.TIMEOUT])
if ret==0:
print "Provided user credentials are incorrect for stream server: " + STREAMS_SERVER_URL
Terminate_pexpect_connections()
sys.exit(1)
elif ret==2:
print "TIMEOUT, means non of the expected keys matched to keys came from 'expect' for stream server " + STREAMS_SERVER_URL
print stl_process.before
stl_process.terminate()
sys.exit(1)
elif ret==3:
print "EOF reached, means stream server at " + STREAMS_SERVER_URL + " server is un-reachable."
print stl_process.before
stl_process.terminate()
sys.exit(1)
'''
try:
stl_process.expect('password')
except:
stl_process.expect('(yes/no)')
stl_process.sendline("yes")
stl_process.expect('password')
stl_process.sendline("rebaca")
'''
ret=rcl_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==0:
rcl_process.sendline('yes')
ret=rcl_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==1:
rcl_process.sendline("rebaca")
ret=rcl_process.expect([ssh_key3,pexpect.TIMEOUT])
if ret==0:
print "Provided user credentials are incorrect for runtime command server: " + STREAMS_SERVER_URL
Terminate_pexpect_connections()
sys.exit(1)
elif ret==2:
print "TIMEOUT, means non of the expected keys matched to keys came from 'expect' for runtime command server " + STREAMS_SERVER_URL
print rcl_process.before
rcl_process.terminate()
sys.exit(1)
elif ret==3:
print "EOF reached, means runtime command server at " + RCS_SERVER_URL + " server is un-reachable."
print rcl_process.before
rcl_process.terminate()
sys.exit(1)
'''
try:
rcl_process.expect('password')
except:
rcl_process.expect('(yes/no)?')
rcl_process.sendline("yes")
rcl_process.expect('password')
rcl_process.sendline("rebaca")
'''
ret=val_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==0:
val_process.sendline('yes')
ret=val_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==1:
val_process.sendline("rebaca")
ret=val_process.expect([ssh_key3,pexpect.TIMEOUT])
if ret==0:
print "Provided user credentials are incorrect for validation server: " + STREAMS_SERVER_URL
Terminate_pexpect_connections()
sys.exit(1)
elif ret==2:
print "TIMEOUT, means non of the expected keys matched to keys came from 'expect' for validation server " + STREAMS_SERVER_URL
print val_process.before
val_process.terminate()
sys.exit(1)
elif ret==3:
print "EOF reached, means validation server at " + VMS_SERVER_URL + " server is un-reachable."
print val_process.before
val_process.terminate()
sys.exit(1)
'''
try:
val_process.expect('password')
except:
val_process.expect('(yes/no)?')
val_process.sendline("yes")
val_process.expect('password')
val_process.sendline("rebaca")
'''
if PLATFORM != "windows":
ret=sgl_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==0:
sgl_process.sendline('yes')
ret=sgl_process.expect([ssh_key1,ssh_key2,pexpect.TIMEOUT,pexpect.EOF])
if ret==1:
sgl_process.sendline("rebaca")
ret=sgl_process.expect([ssh_key3,pexpect.TIMEOUT])
if ret==0:
print "Provided user credentials are incorrect for server: " + STREAMS_SERVER_URL
Terminate_pexpect_connections()
sys.exit(1)
elif ret==2:
print "TIMEOUT, means non of the expected keys matched to keys came from 'expect' for " + STREAMS_SERVER_URL
print sgl_process.before
sgl_process.terminate()
sys.exit(1)
elif ret==3:
print "EOF reached, means segmenter machine at " + DEVICE_UNDER_TEST + " is un-reachable."
print sgl_process.before
sgl_process.terminate()
sys.exit(1)
'''
try:
sgl_process.expect('password')
except:
sgl_process.expect('(yes/no)?')
sgl_process.sendline("yes")
sgl_process.expect('password')
sgl_process.sendline("rebaca")
'''
run_sgl_cmd='mkdir ' + SGL_STATUS_LOG_DIRECTORY + '; segment_launcher ' + g_config_data['env_ip'] + ' ' + cmd_line_args['PORTS'][ASP_SGL_COMMAND_INDEX] + ' ' \
+ cmd_line_args['PORTS'][ASP_SGL_STATUS_INDEX] + ' ' + SGL_STATUS_LOG_DIRECTORY + ' ' + cmd_line_args["user_name"] + ' > ' + SGL_PATH + \
'/console_sgl_' + cmd_line_args["user_name"] + '.txt 2>&1 &'
run_stl_cmd='mkdir ' + SPS_STATUS_LOG_DIRECTORY + '; stream_pump_server ' + g_config_data['env_ip'] + ' ' + cmd_line_args['PORTS'][ASP_STL_COMMAND_INDEX] + \
' ' + cmd_line_args['PORTS'][ASP_STL_STATUS_INDEX] + ' ' + SPS_STATUS_LOG_DIRECTORY + ' ' + cmd_line_args["user_name"] + ' > ' + SPS_STATUS_LOG_DIRECTORY + \
'/console_sps_' + cmd_line_args["user_name"] + '.txt 2>&1 &'
run_rcs_cmd='mkdir ' + RCS_STATUS_LOG_DIRECTORY + '; runtime_command_server ' + g_config_data['env_ip'] + ' ' + cmd_line_args['PORTS'][ASP_RCS_COMMAND_INDEX] +\
' ' + cmd_line_args['PORTS'][ASP_RCS_STATUS_INDEX] + ' ' + RCS_STATUS_LOG_DIRECTORY + ' ' + cmd_line_args["user_name"] + ' > ' + RCS_STATUS_LOG_DIRECTORY + \
'/console_rcs_' + cmd_line_args["user_name"] + '.txt 2>&1 &'
run_val_cmd='mkdir ' + VMS_STATUS_LOG_DIRECTORY + '; validation_matrix_server ' + g_config_data['env_ip'] + ' ' + cmd_line_args['PORTS'][ASP_VMS_COMMAND_INDEX] +\
' ' + cmd_line_args['PORTS'][ASP_VMS_STATUS_INDEX] + ' ' + VMS_STATUS_LOG_DIRECTORY + ' ' + cmd_line_args["user_name"] + ' > ' + VMS_STATUS_LOG_DIRECTORY + \
'/console_vms_' + cmd_line_args["user_name"] + '.txt 2>&1 &'
if PLATFORM != "windows":
sgl_process.sendline('export LD_LIBRARY_PATH=' + SGL_PATH + '/lib:' + SGL_PATH + '/lib/boost:' + SGL_PATH + '/lib/openssl-1.0.0l:' + SGL_PATH + '/lib/linux64:' + SGL_PATH + '/lib/proto')
#stl_process.sendline('export LD_LIBRARY_PATH=' + SPS_PATH + '/lib')
#rcl_process.sendline('export LD_LIBRARY_PATH=' + RCS_PATH + '/lib')
#val_process.sendline('export LD_LIBRARY_PATH=' + VMS_PATH + '/lib')
if PLATFORM != "windows":
sgl_process.sendline("cd " + SGL_PATH)
#stl_process.sendline("cd " + SPS_PATH + "/support")
#rcl_process.sendline("cd " + RCS_PATH + "/support")
#val_process.sendline("cd " + VMS_PATH + "/support")
if PLATFORM != "windows":
sgl_process.sendline(run_sgl_cmd)
stl_process.sendline(run_stl_cmd)
rcl_process.sendline(run_rcs_cmd)
val_process.sendline(run_val_cmd)
# Establish Connection with the Infrastructure components namely
# Segment Launcher
# Stream Pump Server
# Run-Time Command Server
# Validation Matrix Server
TSP.TSP.Establish_Connection(cmd_line_args['PORTS'][ASP_SGL_COMMAND_INDEX], cmd_line_args['PORTS'][ASP_SGL_STATUS_INDEX],\
cmd_line_args['PORTS'][ASP_STL_COMMAND_INDEX], cmd_line_args['PORTS'][ASP_STL_STATUS_INDEX],\
cmd_line_args['PORTS'][ASP_RCS_COMMAND_INDEX], cmd_line_args['PORTS'][ASP_RCS_STATUS_INDEX],\
cmd_line_args['PORTS'][ASP_VMS_COMMAND_INDEX], cmd_line_args['PORTS'][ASP_VMS_STATUS_INDEX])
if PLATFORM != "windows":
AZ_HLS_MOUNT_SEGMENTS_CMD = '{"MOUNT": "'+ DEVICE_UNDER_TEST +' '+ OUTPUT_ASSET_PATH +' '+ SEGMENTS_MOUNT_PATH +'"}'
else:
AZ_HLS_MOUNT_SEGMENTS_CMD = '{"MOUNT": "\\\"-t cifs -o username=rebaca,password=rebaca //'+ DEVICE_UNDER_TEST +'/'+ OUTPUT_ASSET_PATH +'\\\" /'+ SEGMENTS_MOUNT_PATH +'"}'
print "windows mount command: " + AZ_HLS_MOUNT_SEGMENTS_CMD
AZ_HLS_UNMOUNT_SEGMENTS_CMD = '{"UNMOUNT": "'+ SEGMENTS_MOUNT_PATH +'"}'
AZ_HLS_MOUNT_STREAMS_CMD = '{"MOUNT": "'+ STREAMS_SERVER_URL +' '+STREAMS_PATH+' '+ STREAMS_MOUNT_PATH +'"}'
AZ_HLS_UNMOUNT_STREAMS_CMD = '{"UNMOUNT": "'+ STREAMS_MOUNT_PATH +'"}'
# Mount the Segments directory of the Device Under Test to Validation Matrix Server
TSP.TSP.Mount_Segments(TSP.g_socket_info, AZ_HLS_MOUNT_SEGMENTS_CMD)
# Mount the Streams directory of Stream Server to the Device Under Test
if PLATFORM != "windows":
TSP.TSP.Mount_Streams(TSP.g_socket_info, AZ_HLS_MOUNT_STREAMS_CMD)
# Create directory for storing the JSON objects
TSP.g_json_directory = os.path.join(os.path.abspath(os.path.dirname(__file__))+ '/HLS/results/json_reports/',
"" + cmd_line_args['produce_code'][0] + "_" + cmd_line_args["user_name"] + "_" + str(
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M')))
try:
os.makedirs(TSP.g_json_directory)
except OSError, e:
if e.errno != 17:
print "Directory not created" # This was not a "directory exist" error..
# flag to transfer validation setup to the client
flag_validation_setup_transfer = False
# executing multiple products
for product_code in cmd_line_args['produce_code']:
console_log("=" * 78)
console_log("=" * 78)
console_log("=" * 78)
console_log("INFO -> ", "".join(["execution starts for product = ", \
product_code]))
# reading common resource configuration
# cfg_file_name = os.path.join(g_config_data['current_working_dir'], \
# product_code.upper(), \
# "config", cmd_line_args['user_name']+"_cfg", "common_resource.txt")
# searched_param_list = ["SYSTEM_OS", "mail_to", "mail_from", "mail_server_address", \
# "mail_server_uid", \
# "mail_server_password", "mail_server_port", "ftp_ip", "ftp_un", "ftp_pw", \
# "test_cases"]
# readconfig(cfg_file_name, searched_param_list)
# reading os configuration
os_config_file_name = None
if g_config_data['SYSTEM_OS']:
if g_config_data['SYSTEM_OS'].lower() == "windows":
os_config_file_name = "Windows_config.txt"
elif g_config_data['SYSTEM_OS'].lower() == "linux":
os_config_file_name = "Linux_config.txt"
# Uncomment the following 5 lines of code for enabling file-transfer of tools
# res_dic = perform_set_up_task(g_config_data, cmd_line_args, \
# os_config_file_name, product_code)
# if not res_dic:
# print "Validation set up transfer failed"
# return False
#--------------------------------------------------------------#
# os_cfg_file_name = os.path.join(g_config_data['current_working_dir'],
# product_code.upper(),
# "config", cmd_line_args['user_name']+"_cfg", os_config_file_name)
# searched_param_list = ["VAL_CLIENT_IP", "VAL_CLIENT_UID", "VAL_CLIENT_PWD",
# "VAL_SETUP_LOCATION", "VAL_SYS_OS", "SCENESCOPE_CLIENT_IP",
# "SCENESCOPE_CLIENT_UID", "SCENESCOPE_CLIENT_PWD",
# "SCENESCOPE_SETUP_LOCATION", "SCENESCOPE_SYS_OS"]
# readconfig(os_cfg_file_name, searched_param_list)
#
# # Transferring set up file for MEPG2 packet analyzer
# if not flag_validation_setup_transfer and \
# g_config_data['VAL_SYS_OS'].lower() == "windows" and
# g_config_data['VAL_SYS_OS'].lower() == "windows":
# source_file = os.path.join(g_config_data['current_working_dir'],
# "sharedlib", "segment_validation")
# destination_file = g_config_data \
# ["VAL_SETUP_LOCATION"].replace('\\', '\\\\')
# transfer_files(source_file, destination_file, \
# g_config_data['VAL_CLIENT_IP'],
# g_config_data['VAL_CLIENT_UID'],
# g_config_data['VAL_CLIENT_PWD'], True, 30)
# flag_validation_setup_transfer = True
#
suite_cmd = ""
# command for test suite
for test_suite in mappedtest_suites[product_code.upper()]:
test_suite_cmd = " " + os.path.join(g_config_data['current_working_dir'], \
product_code.upper(), "testsuites", test_suite)
if cmd_line_args['multi_exec']:
test_suite_cmd = test_suite_cmd * int(map_bw_test_suite_test_count[test_suite])
suite_cmd += " " + test_suite_cmd
# execution exists if wrong suite gets mentioned
if not suite_cmd:
console_log("DEBUG -> ", "Invalid test suite names")
exit_execution()
# command for test cases
# obtain all test_cases from tessuite
test_caseslist_suite_wise = \
obtain_test_cases_suite(mappedtest_suites[product_code.upper()], \
os.path.join(g_config_data['current_working_dir'], product_code.upper(), \
"testsuites"))
# creating list of multiple test_cases
if cmd_line_args['test_cases']:
test_case_mapping(test_caseslist_suite_wise, \
mappedtest_suites[product_code.upper()])
# listing valid test_cases
if cmd_line_args['test_cases']:
resp = matching_valid_testcase(test_caseslist_suite_wise, \
mappedtest_suites[product_code.upper()], \
cmd_line_args['test_cases'])
if not resp:
exit_execution()
test_case_cmd = ""
if cmd_line_args['test_cases']:
for test_case in cmd_line_args['test_cases']:
test_case = "_".join(test_case.split())
test_case_cmd += ("-t " + test_case +" ")
# command for report directory
report_dir_path = set_report_directory_path(product_code)
report_dir_cmd = "".join(["-d ", report_dir_path])
# command for test report name
set_report_name(product_code)
report_name_cmd = "".join(["-r ", g_config_data['report_name']])
# command for test log name
log_name_cmd = "".join(["-l ", g_config_data['log_name']])
repname = "".join([report_dir_path, "/", g_config_data['report_name']])
#print "repname ==>", repname
lname = "".join([report_dir_path, g_config_data['log_name']])
# command to test xml report name
if cmd_line_args['enb_xml']:
xml_command_name = "".join(["-o ", \
g_config_data['xml_name']])
else:
xml_command_name = "".join(["-o ", "NONE"])
# adding variable
addvariable = "".join([" ", "--variable ", "uname:", \
cmd_line_args['user_name'], " "])
# adding variable file
addvariablefile = "".join([" ", "--variablefile ", \
cmd_line_args['variable_file'], " "])
# entire command
entire_command = "".join([report_dir_cmd, " ", report_name_cmd, " ", \
log_name_cmd, " ", xml_command_name, addvariable, addvariablefile, test_case_cmd, suite_cmd])
#console_log("INFO -> ", "Options = ", entire_command)
entire_cmd_list = entire_command.split()
# executing process
exec_proc(run_cli, list(entire_cmd_list))
# generate report data
parentDir = os.path.dirname(__file__)
rootDirPath = os.path.abspath(parentDir)
def __parseLog():
report_ip = AzukiDB.getKeyValue('admin', 'reporting_ip')
if not report_ip:
report_ip = g_config_data['env_ip']
# Parsing log files
logFileParserPath = os.path.join( rootDirPath,
'sharedlib', 'reportAccumulator.py')
os.system( '/usr/bin/python '+ logFileParserPath + \
' -g ' + rootDirPath + \
' -H ' + report_ip)
t = Thread (target=__parseLog)
t.daemon = True
t.start()
# sending mail
mail_subject = ('"Automation ends for product = "' + product_code.upper() + \
'" by user = "' + cmd_line_args['user_name'])
#mail_body = ("".join(['"http://"', g_config_data['env_ip'],
# '"/"', '"/AzukiAutomation/"', product_code.upper(),
#'"/results/"', g_config_data['report_name']]))
"""
mail_body = '"Dear "' + g_config_data['mail_to'].split('@')[0] + \
'"\nAutomation executed on "' + product_code.upper() +\
'" and Automation Test Report kept in FTP IP: "' + g_config_data['ftp_ip'] + \
'"\nThanks,\nAzuki-Rebaca Team"'
# Copying Test Report to FTP and FTP details sending in mail
ftp_check = ftp.send_file_to_ftp(g_config_data['ftp_ip'], g_config_data['ftp_un'], \
g_config_data['ftp_pw'], repname)
send_emails(mail_subject, mail_body)
"""
# Mount the Segments directory of the Device Under Test to Validation Matrix Server
TSP.TSP.UnMount_Segments(TSP.g_socket_info, AZ_HLS_UNMOUNT_SEGMENTS_CMD)
# Mount the Streams directory of Stream Server to the Device Under Test
if PLATFORM != "windows":
TSP.TSP.UnMount_Streams(TSP.g_socket_info, AZ_HLS_UNMOUNT_STREAMS_CMD)
TSP.TSP.Terminate_Sessions()
console_log("INFO -> ", "Exection ends for product = ", product_code.upper())
# freeup ports
AzukiDB.releasePorts(cmd_line_args['PORTS'])
#closing db connection
AzukiDB.closeConnection()
if __name__ == "__main__":
try:
main()
except:
# freeup ports
AzukiDB.releasePorts(cmd_line_args['PORTS'])
#closing db connection
AzukiDB.closeConnection()
raise
|
"""
Module MontyLingua
MONTY LINGUA - An end-to-end natural language processor
for English, for the Python/Java platform
Author: Hugo Liu <hugo@media.mit.edu>
Project Page: <http://web.media.mit.edu/~hugo/montylingua>
Copyright (c) 2002-2004 by Hugo Liu, MIT Media Lab
All rights reserved.
Non-commercial use is free, as provided in the GNU GPL
By downloading and using MontyLingua, you agree to abide
by the additional copyright and licensing information in
"license.txt", included in this distribution
If you use this software in your research, please
acknowledge MontyLingua and its author, and link to back
to the project page http://web.media.mit.edu/~hugo/montylingua.
Please cite montylingua in academic publications as:
Liu, Hugo (2004). MontyLingua: An end-to-end natural
language processor with common sense. Available
at: web.media.mit.edu/~hugo/montylingua.
************************************************
DOCUMENTATION OVERVIEW
About MontyLingua:
- MontyTokenizer
- normalizes punctuation, spacing and
contractions, with sensitivity to abbrevs.
- MontyTagger
- Part-of-speech tagging using PENN TREEBANK tagset
- enriched with "Common Sense" from the Open Mind
Common Sense project
- exceeds accuracy of Brill94 tbl tagger
using default training files
- MontyREChunker
- chunks tagged text into verb, noun, and adjective
chunks (VX,NX, and AX respectively)
- incredible speed and accuracy improvement over
previous MontyChunker
- MontyExtractor
- extracts verb-argument structures, phrases, and
other semantically valuable information
from sentences and returns sentences as "digests"
- MontyLemmatiser
- part-of-speech sensitive lemmatisation
- strips plurals (geese-->goose) and
tense (were-->be, had-->have)
- includes regexps from Humphreys and Carroll's
morph.lex, and UPENN's XTAG corpus
- MontyNLGenerator
- generates summaries
- generates surface form sentences
- determines and numbers NPs and tenses verbs
- accounts for sentence_type
WHERE MUST THE DATAFILES BE?
- the "datafiles" include all files ending in *.MDF
- the best solution is to create an environment variable called
"MONTYLINGUA" and put the path to the datafiles there
- alternatively, MontyLingua can find the datafiles if they are
in the operating system "PATH" variable, or in the current
working directory
API:
The MontyLingua Python API is MontyLingua.html
The MontyLingua Java API is JMontyLingua.html
RUNNING:
MontyLingua can be called from Python, Java,
or run at the command line.
A. From Python, import the MontyLingua.py file
B. From your Java code:
1. make sure "montylingua.jar" is
in your class path, in addition to
associated subdirectories and data files
2. in your code, you need something like:
import montylingua.JMontyLingua; // loads namespace
public class YourClassHere {
public static JMontyLingua j = new JMontyLingua();
public yourFunction(String raw, String toked) {
jisted = j.jist_predicates(raw); // an example function
3. For a good use case example, see Sample.java.
C. From the command line:
1. if you have python installed and in your path:
type "run.bat"
2. if you have java installed and in your path:
type "runJavaCommandline.bat"
VERSION HISTORY:
New in version 2.1 (6 Aug 2004)
- new MontyNLGenerator component (in Beta phase)
- includes version 2.0.1 bugfix for problem
where java api wasn't being exposed
New in version 2.0 (29 Jul 2004)
- 2.5X speed enhancement for whole system
2X speed enhancement for tagger component
- rule-based chunker replaced with much faster
and more accurate regular expression chunker
- common sense added to MontyTagger component
improves word-level tagger accuracy to 97%
- updated and expanded lexicon for English
- added a user-customizable lexicon
CUSTOMLEXICON.MDF
- improvements to MontyLemmatiser incorporating
exception cases
- html documentation added
- speed optimizations to all code
- improvements made to semantic extraction
- added a morphological analyzer component,
MontyMorph
- expanded Java API
New in version 1.3.1 (11 Nov 2003)
- mainly bugfixes
- datafiles can now sit in the current working directory (".")
or in the path of either of the two environment variables
"MONTYLINGUA" or "PATH"
- presence of the '/' token in input won't crash system
New in Version 1.3 (5 Nov 2003)
- lisp-style predicate output added
- Sample.java example file added to illustrate API
New in Version 1.2 (12 Sep 2003)
- MontyChunker rules expanded
- MontyLingua JAVA API added
- MontyLingua documentation added
New in Version 1.1 (1 Sep 2003)
- MontyTagger optimized, 2X loading and 2.5X tagging speed
- MontyLemmatiser added to MontyLingua suite
- MontyChunker added
- MontyLingua command-line capability added
New in Version 1.0 (3 Aug 2003)
- First release
- MontyTagger (since 15 Jan 2001) added to MontyLingua
--please send bugs & suggestions to hugo@media.mit.edu--
"""
__author__ = "Hugo Liu <hugo@media.mit.edu>"
__version__ = "2.1"
import MontyTokenizer, MontyTagger, MontyLemmatiser, MontyREChunker, MontyExtractor, MontyNLGenerator
class MontyLingua:
def __init__(self,trace_p=0):
print '\n****** MontyLingua v.'+__version__+' ******'
print '***** by hugo@media.mit.edu *****'
self.trace_p = trace_p
self.theMontyTokenizer = MontyTokenizer.MontyTokenizer()
self.theMontyLemmatiser = MontyLemmatiser.MontyLemmatiser()
self.theMontyTagger = MontyTagger.MontyTagger(trace_p,self.theMontyLemmatiser)
self.theMontyChunker = MontyREChunker.MontyREChunker()
self.theMontyExtractor = MontyExtractor.MontyExtractor()
self.theMontyNLGenerator = MontyNLGenerator.MontyNLGenerator()
print '*********************************\n'
#
# MAIN FUNCTIONS
#
def generate_summary(self,vsoos):
return self.theMontyNLGenerator.generate_summary(vsoos)
def generate_sentence(self,vsoo,sentence_type='declaration',tense='past',s_dtnum=('',1),o1_dtnum=('',1),o2_dtnum=('',1),o3_dtnum=('',1)):
return self.theMontyNLGenerator.generate_sentence(vsoo,sentence_type=sentence_type,tense=tense,s_dtnum=s_dtnum,o1_dtnum=o1_dtnum,o2_dtnum=o2_dtnum,o3_dtnum=o3_dtnum)
def jist_predicates(self,text):
infos = self.jist(text)
svoos_list = []
for info in infos:
svoos = info['verb_arg_structures_concise']
svoos_list.append(svoos)
return svoos_list
def tokenize(self,sentence,expand_contractions_p=1):
return self.theMontyTokenizer.tokenize(sentence,expand_contractions_p)
def tag_tokenized(self,tokenized_text):
return self.theMontyTagger.tag_tokenized(tokenized_text)
def jist(self,text):
sentences = self.split_sentences(text)
tokenized = map(self.tokenize,sentences)
tagged = map(self.tag_tokenized,tokenized)
chunked = map(self.chunk_tagged,tagged)
print "CHUNKED: " + string.join(chunked,'\n ')
extracted = map(self.extract_info,chunked)
return extracted
def pp_info(self,extracted_infos):
"""pretty prints sentence information digests returned by jist()"""
for i in range(len(extracted_infos)):
keys = extracted_infos[i].keys()
keys.sort()
print "\n\n SENTENCE #%s DIGEST:\n"%str(i+1)
for key in keys:
print (key+": ").rjust(22) + str(extracted_infos[i][key])
def split_paragraphs(self,text):
return self.theMontyTokenizer.split_paragraphs(text)
def split_sentences(self,text):
return self.theMontyTokenizer.split_sentences(text)
def strip_tags(self,tagged_or_chunked_text):
toks = tagged_or_chunked_text.split()
toks = filter(lambda x:'/' in x,toks)
toks = map(lambda x:x.split('/')[0],toks)
return ' '.join(toks)
def parse_pred_arg(self,pp):
# unpp augmented predicate
pp.strip
toks = pp.strip()[1:-1].split()
args = ' '.join(toks)[1:-1].split('" "')
return args
def chunk_tagged(self,tagged_text):
return self.theMontyChunker.Process(tagged_text)
def chunk_lemmatised(self,lemmatised_text):
return self.theMontyChunker.chunk_multitag(lemmatised_text)
def lemmatise_tagged(self,tagged_text):
return self.theMontyLemmatiser.lemmatise_tagged_sentence(tagged_text)
def extract_info(self,chunked_text):
return self.theMontyExtractor.extract_info(chunked_text,self.theMontyLemmatiser.lemmatise_tagged_sentence)
# END MONTYLINGUA CLASS
#
# COMMAND LINE PROGRAM
#
if __name__ == "__main__":
import sys,time
if '/?' in sys.argv or '-?' in sys.argv:
print """
USAGE: >> python MontyLingua.py
"""
sys.exit(0)
m = MontyLingua()
# show command prompt interface
print '\n'
try:
while 1:
sentence = ''
try:
sentence = raw_input('> ')
except:
raise
time1 = time.time()
print '\n'
extractions = m.jist(sentence)
print m.pp_info(extractions)
predicates_list = map(lambda a:m.parse_pred_arg(a),reduce(lambda y,z:y+z,map(lambda x:x['verb_arg_structures_concise'],extractions)))
print predicates_list
print '\nGENERATED SUMMARY:\n'+m.generate_summary(predicates_list)
time2= time.time()
print "-- monty took",str(round(time2-time1,2)),'seconds. --\n'
except KeyboardInterrupt:
print "\n-- monty says goodbye! --"
sys.exit(0)
|
filename=input(("enter the filenme"))
extension=filename.split('.')
print("extension",extension[-1])
|
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
def adjust_spines(ax, spines, offset=3, smart_bounds=False):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', offset))
spine.set_smart_bounds(smart_bounds)
else:
spine.set_color('None') # don't draw spine
# Turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# No y-axis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# No x-axis ticks
ax.xaxis.set_ticks([])
def compare_results(data, x, y, hue, col, group_labels, dodge=True,
n_rows=None, n_cols=None):
# Compute offsets
width = 0.8
n_levels = np.unique(data[hue]).shape[0]
if dodge:
each_width = width / n_levels
offsets = np.linspace(0, width - each_width, n_levels)
offsets -= offsets.mean()
else:
offsets = np.zeros(n_levels)
# Set order of x groups
if x == 'classifier type':
order = ['SVM', 'ocSVM']
else:
order = None
# Open figure and plot groups
n_groups = np.unique(data[col]).shape[0]
if n_rows is None and n_cols is None:
n_rows = 1
n_cols = n_groups
fig, ax = plt.subplots(nrows=n_rows, ncols=n_cols, squeeze=True, figsize=(18, 5))
for i_group in range(n_groups):
# Select data
data_this_group = data[data[col] == i_group]
# Make strip plot
p = sns.categorical._StripPlotter(x=x, y=y, hue=hue,
data=data_this_group,
order=order, jitter=True,
dodge=dodge, palette='hls',
hue_order=None, orient='v',
color=None)
p.draw_stripplot(ax[i_group], dict(s=10 ** 2, edgecolor='w',
linewidth=1, zorder=1))
# Add legend and keep also left axes for first group only
if i_group == 0:
p.add_legend_data(ax[i_group])
keep_axes = ['bottom', 'left']
else:
p.hue_names = None
keep_axes = ['bottom']
# Add labels
p.annotate_axes(ax[i_group])
# Add means of each group
sns.categorical._PointPlotter(x=x, y=y, hue=hue, data=data_this_group,
order=order, estimator=np.mean,
ci=95, n_boot=1000, markers='o',
linestyles='-', dodge=offsets[-1],
join=False, scale=1, color='k',
errwidth=None, capsize=None,
hue_order=None, units=None, orient='v',
palette=None).draw_points(ax[i_group])
# Adjust axes appearance
adjust_spines(ax[i_group], spines=keep_axes, offset=0,
smart_bounds=False)
ax[i_group].set_title(group_labels[i_group])
if i_group > 0:
ax[i_group].set_ylabel('')
ax[i_group].set_xlabel('')
# Set same y-limits
y_limits = np.vstack([i.get_ylim() for i in ax])
y_limits = [y_limits.min(), y_limits.max()]
[i.set_ylim(*y_limits) for i in ax]
# Adjust subplot layout
fig.tight_layout()
fig.subplots_adjust(wspace=.2)
|
import socket
"""
******功能描述说明******
1.获取键盘数据,并将其发送给对方
2.接收数据并显示
"""
def send_msg(udp_socket):
"""发送信息"""
msg = input("\n请输入您要发送的信息:")
dest_ip = input("\n请输入您要发送的ip:")
dest_port = int(input("\n请输入对方的端口号:"))
# 发送数据到指定的电脑上的指定程序中,需要编码为bytes类型的数据进行发送
udp_socket.sendto(msg.encode("utf-8"), (dest_ip, dest_port))
def recv_msg(udp_socket):
"""接收信息"""
# 接收到的数据recv_data是一个元组
# 第1个元素是对方发送的bytes类型的数据
# 第2个元素是对方的ip和端口
recv_data = udp_socket.recvfrom(4096)
recv_msg = recv_data[0].decode('utf-8')
recv_ip = recv_data[1]
print(">>>%s:%s" % (str(recv_ip),recv_msg))
def main():
# 创建udp服务套接字
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# 绑定本地的地址信息,包括IP和端口号:元组(‘ip字符串’, 端口号)
udp_socket.bind(('', 7080))
while True:
"""建立一个死循环,不断的收发信息"""
print("*"*30)
print("1.接收消息")
print("2.发送消息")
print("*"*30)
opt_num = input("请输入你要选择的选项(1或2):")
if opt_num == '1':
recv_msg(udp_socket)
elif opt_num == '2':
send_msg(udp_socket)
else:
print("您的输入有误,请重新输入")
if __name__ == '__main__':
main()
|
from django.contrib import admin
class ConsumerAdmin(admin.ModelAdmin):
fields = ('name', 'key', 'secret', 'status')
readonly_fields = ('key', 'secret')
def save_model(self, request, obj, form, change):
obj.status = 'accepted'
if change is False:
obj.generate_random_codes()
else:
obj.save()
|
'''
Write a program that randomly chooses and displays four digits, each from one to nine, with repetitions allowed.
The program should prompt for the player to enter an arithmetic expression using just those, and all of those four digits, used exactly once each.
The program should check then evaluate the expression.
The goal is for the player to enter an expression that evaluates to 24.
Only multiplication, division, addition, and subtraction operators/functions are allowed.
Division should use floating point or rational arithmetic, etc, to preserve remainders.
Brackets are allowed, if using an infix expression evaluator.
Forming multiple digit numbers from the supplied digits is disallowed. (So an answer of 12+12 when given 1, 2, 2, and 1 is wrong).
The order of the digits when given does not have to be preserved.
Note:
The type of expression evaluator used is not mandated. An RPN evaluator is equally acceptable for example.
The task is not for the program to generate the expression, or test whether an expression is even possible.
'''
|
from interface import Qubit, QuantumDevice
import numpy as np
import qutip as qt
from qutip.qip.operations import hadamard_transform
from typing import List
class SimulatedQubit(Qubit):
qubit_id: int
parent: "Simulator"
def __init__(self, parent_simulator: "Simulator", id: int):
self.qubit_id = id
self.parent = parent_simulator
def h(self) -> None:
self.parent._apply(hadamard_transform(), [self.qubit_id])
def ry(self, angle: float) -> None:
self.parent._apply(qt.ry(angle), [self.qubit_id])
def x(self) -> None:
self.parent._apply(qt.sigmax(), [self.qubit_id])
def measure(self) -> bool:
projectors = [
qt.operations.gates.gate_expand_1toN(
qt.basis(2, outcome) * qt.basis(2, outcome).dag(),
self.parent.capacity,
self.qubit_id
)
for outcome in (0, 1)
]
post_measurement_states = [
projector * self.parent.register_state
for projector in projectors
]
probabilities = [
post_measurement_state.norm() ** 2
for post_measurement_state in post_measurement_states
]
sample = np.random.choice([0, 1], p=probabilities)
self.parent.register_state = post_measurement_states[sample].unit()
return bool(sample)
def reset(self) -> None:
if self.measure():
self.x()
class Simulator(QuantumDevice):
capacity: int
available_qubits: List[SimulatedQubit]
regiter_state: qt.Qobj
def __init__(self, capacity=3):
self.capacity = capacity
self.available_qubits = [
SimulatedQubit(self, i)
for i in range(capacity)
]
self.regiter_state = qt.tensor(
*[
qt.basis(2, 0)
for _ in range(capacity)
]
)
def allocate_qubit(self) -> SimulatedQubit:
if self.available_qubits:
return self.available_qubits.pop()
def deallocate_qubit(self, qubit: SimulatedQubit) -> None:
self.available_qubits.append(qubit)
def _apply(self, unitary: qt.Qobj, ids: List[int]) -> None:
if len(ids) != 1:
raise ValueError(
"Поддерживаются только однокубитовые унитарные матрицы.")
matrix = qt.operations.gates.gate_expand_1toN(
unitary, self.capacity, ids[0]
)
self.register_state = matrix * self.register_state
|
# -*- coding: utf-8 -*-
from typing import List
class Solution:
def numberOfBeams(self, bank: List[str]) -> int:
last_lasers, result = 0, 0
for row in bank:
current_lasers = row.count("1")
if current_lasers and last_lasers:
result += current_lasers * last_lasers
if current_lasers:
last_lasers = current_lasers
return result
if __name__ == "__main__":
solution = Solution()
assert 8 == solution.numberOfBeams(["011001", "000000", "010100", "001000"])
assert 0 == solution.numberOfBeams(["000", "111", "000"])
|
from datetime import datetime,timedelta,timezone
import ast
import pytz
import json
from user_input.models import UserDailyInput
from quicklook.serializers import UserQuickLookSerializer
from django.db.models import Q
from collections import OrderedDict
import quicklook.calculations.garmin_calculation
from quicklook.models import (
UserQuickLook,
Grades,
Sleep,
Steps,
ExerciseAndReporting,
SwimStats,
BikeStats,
Food,
Alcohol
)
from fitbit.models import (
UserFitbitDataSleep,
UserFitbitDataHeartRate,
UserFitbitDataActivities,
UserFitbitDataSteps,
UserFitbitDatabody,
UserFitbitDatafoods
)
from user_input.models import DailyUserInputStrong
from user_input.models import DailyUserInputOptional
from .converter.fitbit_to_garmin_converter import fitbit_to_garmin_sleep,\
fitbit_to_garmin_activities,\
fitbit_to_garmin_epoch,\
steps_minutly_to_quartly
from user_input.utils.daily_activity import get_daily_activities_in_base_format
def get_fitbit_model_data(model,user,start_date, end_date,
order_by = None, group_by_date=False):
date_field = None
data_field = None
if model == UserFitbitDataSleep:
date_field = "date_of_sleep"
data_field = "sleep_data"
elif model == UserFitbitDataHeartRate:
date_field = "date_of_heartrate"
data_field = "heartrate_data"
elif model == UserFitbitDataActivities:
date_field = "date_of_activities"
data_field = "activities_data"
elif model == UserFitbitDataSteps:
date_field = "date_of_steps"
data_field = "steps_data"
elif model == UserFitbitDatabody:
date_field = "date_of_body"
data_field = "body_data"
elif model == UserFitbitDatafoods:
date_field = "date_of_foods"
data_field = "foods_data"
if date_field and data_field:
lookup_kwargs = {'{}__{}'.format(date_field,"range"):(
start_date,end_date)}
if order_by:
summaries = model.objects.filter(
**lookup_kwargs,
user = user).order_by(order_by)
else:
summaries = model.objects.filter(
**lookup_kwargs,
user = user)
if group_by_date:
return {q.__dict__.get(date_field)
: q.__dict__.get(data_field) for q in summaries}
else:
return [q.__dict__.get(data_field) for q in summaries]
else:
return None
def get_epoch_time_from_timestamp(timestamp):
if timestamp:
if timestamp[-3:-2] == ':':
timestamp = timestamp[:-3]+timestamp[-2:]
dobj = datetime.strptime(timestamp,"%Y-%m-%dT%H:%M:%S.%f")
time_in_utc_seconds = int(dobj.timestamp())
return (time_in_utc_seconds)
def get_combined_sleep_data(sleep_data, sleep_start_time, awaketime_between_naps):
remSleepInSeconds = sleep_data[0]['remSleepInSeconds']+sleep_data[1]['remSleepInSeconds']
restlessDurationInSeconds = sleep_data[0]['restlessDurationInSeconds']+sleep_data[1]['restlessDurationInSeconds']
validation = sleep_data[0]['validation']+sleep_data[1]['validation']
deepSleepDurationInSeconds = sleep_data[0]['deepSleepDurationInSeconds']+sleep_data[1]['deepSleepDurationInSeconds']
lightSleepDurationInSeconds = sleep_data[0]['lightSleepDurationInSeconds']+sleep_data[1]['lightSleepDurationInSeconds']
unmeasurableSleepInSeconds = sleep_data[0]['unmeasurableSleepInSeconds']
startTimeOffsetInSeconds = sleep_data[0]['startTimeOffsetInSeconds']
durationInSeconds = sleep_data[0]['durationInSeconds']+sleep_data[1]['durationInSeconds']
awakeDurationInSeconds = sleep_data[0]['awakeDurationInSeconds']+sleep_data[1]['awakeDurationInSeconds']
light = sleep_data[0]['sleepLevelsMap']['light'] + sleep_data[1]['sleepLevelsMap']['light']
rem = sleep_data[0]['sleepLevelsMap']['rem'] + sleep_data[1]['sleepLevelsMap']['rem']
deep = sleep_data[0]['sleepLevelsMap']['deep'] + sleep_data[1]['sleepLevelsMap']['deep']
awake = sleep_data[0]['sleepLevelsMap']['awake'] + sleep_data[1]['sleepLevelsMap']['awake']
restless = sleep_data[0]['sleepLevelsMap']['restless'] + sleep_data[1]['sleepLevelsMap']['restless']
sleepLevelsMap = dict({'light':light, 'rem':rem, 'awake':awake, 'deep':deep, 'restless':restless})
trans_sleep_data = dict({'remSleepInSeconds': remSleepInSeconds,
'restlessDurationInSeconds':restlessDurationInSeconds,
'validation':validation,
'deepSleepDurationInSeconds': deepSleepDurationInSeconds,
'summaryId':sleep_data[0]['summaryId'],
'lightSleepDurationInSeconds': lightSleepDurationInSeconds,
'unmeasurableSleepInSeconds':unmeasurableSleepInSeconds,
'startTimeOffsetInSeconds':startTimeOffsetInSeconds,
'startTimeInSeconds':sleep_start_time,
'durationInSeconds':durationInSeconds + awaketime_between_naps,
'sleepLevelsMap':sleepLevelsMap,
'awakeDurationInSeconds':awakeDurationInSeconds + awaketime_between_naps,
'calendarDate':sleep_data[0]['calendarDate']})
return trans_sleep_data
def get_sleep_stats(sleep_data, ui_bedtime = None,
ui_awaketime = None, ui_sleep_duration = None,
ui_timezone = None,str_date=True):
sleep_stats = {
"deep_sleep": '',
"light_sleep": '',
"awake_time": '',
"rem_sleep":'',
"restless_sleep":'',
"sleep_bed_time": '',
"sleep_awake_time": '',
"sleep_per_wearable":'',
"sleep_per_userinput":'',
}
have_userinput_sleep = False
trans_sleep_data = None
if ui_bedtime and ui_awaketime and ui_sleep_duration and ui_timezone:
# If manual sleep bedtime last night and awake time is submitted then we'll
# user this sleep bedtime time and awake time. We'll convert these time in
# timezone from where user input was submitted by user
have_userinput_sleep = True
target_tz = pytz.timezone(ui_timezone)
ui_bedtime = ui_bedtime.astimezone(target_tz)
ui_awaketime = ui_awaketime.astimezone(target_tz)
if sleep_data:
if len(sleep_data['sleep']) > 1:
trans_sleep_data_list = []
for single_sleep_record in sleep_data['sleep']:
trans_sleep_data_list.append(fitbit_to_garmin_sleep(single_sleep_record))
if single_sleep_record['isMainSleep'] == False:
second_sleep_start_time = single_sleep_record['startTime']
second_sleep_end_time = single_sleep_record['endTime']
else:
first_sleep_start_time = single_sleep_record['startTime']
first_sleep_end_time = single_sleep_record['endTime']
first_sleep_end_time_utc_seconds = get_epoch_time_from_timestamp(first_sleep_end_time)
second_sleep_start_time_utc_seconds = get_epoch_time_from_timestamp(second_sleep_start_time)
awaketime_between_naps = second_sleep_start_time_utc_seconds - first_sleep_end_time_utc_seconds
if awaketime_between_naps <= 9000:
trans_sleep_data = get_combined_sleep_data(trans_sleep_data_list,
first_sleep_start_time,
awaketime_between_naps)
else:
for single_sleep_record in sleep_data['sleep']:
if single_sleep_record['isMainSleep'] == True:
trans_sleep_data = fitbit_to_garmin_sleep(single_sleep_record)
else:
main_sleep_data = list(filter(lambda x:x.get('isMainSleep'),sleep_data['sleep']))
if not main_sleep_data:
main_sleep_data = sleep_data['sleep']
main_sleep_data = main_sleep_data[0]
trans_sleep_data = fitbit_to_garmin_sleep(main_sleep_data)
# main_sleep_data = list(filter(lambda x:x.get('isMainSleep'),sleep_data['sleep']))
# if not main_sleep_data:
# main_sleep_data = sleep_data['sleep']
# main_sleep_data = main_sleep_data[0]
# trans_sleep_data = fitbit_to_garmin_sleep(main_sleep_data)
if trans_sleep_data:
sleep_stats["deep_sleep"] = quicklook.calculations.garmin_calculation.sec_to_hours_min_sec(
trans_sleep_data['deepSleepDurationInSeconds'],
include_sec = False
)
sleep_stats["light_sleep"] = quicklook.calculations.garmin_calculation.sec_to_hours_min_sec(
trans_sleep_data['lightSleepDurationInSeconds'],
include_sec = False
)
sleep_stats["awake_time"] = quicklook.calculations.garmin_calculation.sec_to_hours_min_sec(
trans_sleep_data['awakeDurationInSeconds'],
include_sec = False
)
sleep_stats["rem_sleep"] = quicklook.calculations.garmin_calculation.sec_to_hours_min_sec(
trans_sleep_data['remSleepInSeconds'],include_sec = False
)
sleep_stats["restless_sleep"] = quicklook.calculations.garmin_calculation.sec_to_hours_min_sec(
trans_sleep_data['restlessDurationInSeconds'],
include_sec = False
)
sleep_stats["sleep_per_wearable"] = quicklook.calculations.garmin_calculation.sec_to_hours_min_sec(
(trans_sleep_data['durationInSeconds']
- trans_sleep_data['awakeDurationInSeconds']
- trans_sleep_data['restlessDurationInSeconds']),
include_sec = False
)
if ui_sleep_duration:
sleep_stats['sleep_per_userinput'] = ui_sleep_duration
if have_userinput_sleep:
bed_time = ui_bedtime.replace(tzinfo = None)
awake_time = ui_awaketime.replace(tzinfo = None)
if str_date:
sleep_stats['sleep_bed_time'] = bed_time.strftime("%I:%M %p")
sleep_stats['sleep_awake_time'] = awake_time.strftime("%I:%M %p")
else:
sleep_stats['sleep_bed_time'] = bed_time
sleep_stats['sleep_awake_time'] = awake_time
elif trans_sleep_data:
bed_time = datetime.strptime(
trans_sleep_data['startTimeInSeconds'],
"%Y-%m-%dT%H:%M:%S.%f")
wake_time = bed_time + timedelta(
seconds = trans_sleep_data['durationInSeconds'])
if str_date:
sleep_stats["sleep_bed_time"] = bed_time.strftime("%I:%M %p")
sleep_stats["sleep_awake_time"] = wake_time.strftime("%I:%M %p")
else:
sleep_stats['sleep_bed_time'] = bed_time
sleep_stats['sleep_awake_time'] = wake_time
else:
if not str_date:
sleep_stats['sleep_bed_time'] = None
sleep_stats['sleep_awake_time'] = None
return sleep_stats
def fitbit_steps_data(todays_steps_data):
''' This function is refer for fitbit_steps_data using this function
we have to displaying the fitbit_stepsdata in Raw data.
'''
if todays_steps_data:
total_steps = todays_steps_data['activities-steps'][0]['value']
else:
total_steps = 0
return total_steps
def fitbit_heartrate_data(user,current_date):
todays_heartrate_data = get_fitbit_model_data(
UserFitbitDataHeartRate,user,current_date.date(),current_date.date())
if todays_heartrate_data:
todays_heartrate_data = ast.literal_eval(todays_heartrate_data[0].replace(
"'heartrate_fitbit': {...}","'heartrate_fitbit': {}"))
todays_heartrate_data = todays_heartrate_data['activities-heart']
if todays_heartrate_data:
heartrate_value = todays_heartrate_data[0].get("value")
if heartrate_value and isinstance(heartrate_value,dict):
# for the case when user have no intraday access
resting_heartrate = heartrate_value.get("restingHeartRate",0)
else:
# If user have intraday access
resting_heartrate = todays_heartrate_data[0].get("restingHeartRate",0)
else:
resting_heartrate = 0
else:
resting_heartrate = 0
return resting_heartrate
def get_avg_sleep_grade(ui_sleep_duration,sleep_per_wearable,age,sleep_aid):
if ui_sleep_duration and ui_sleep_duration != ":":
grade_point = quicklook.calculations.garmin_calculation\
.cal_average_sleep_grade(ui_sleep_duration,age,sleep_aid)
return grade_point
elif sleep_per_wearable:
grade_point = quicklook.calculations.garmin_calculation\
.cal_average_sleep_grade(sleep_per_wearable,age,sleep_aid)
return grade_point
return (None,None)
def get_exercise_steps(trans_activity_data):
total_execrcise_steps = 0
for i,single_activity in enumerate(trans_activity_data):
total_execrcise_steps = total_execrcise_steps + int(single_activity.get("steps",0))
return total_execrcise_steps
def makeformat(trans_activity_data,current_date,last_seven_days_date):
formated_data = OrderedDict()
while(last_seven_days_date <= current_date):
formated_data[last_seven_days_date.strftime('%Y-%m-%d')]=[]
last_seven_days_date += timedelta(days=1)
fitbt_act = None
if trans_activity_data:
for i,single_activity in enumerate(trans_activity_data):
activity_start_time = trans_activity_data[i][0]["startTimeInSeconds"]
activity_offset = trans_activity_data[i][0]["startTimeOffsetInSeconds"]
actvity_date = datetime.utcfromtimestamp(activity_start_time
+ activity_offset).strftime("%Y-%m-%d")
if actvity_date:
# if datetime.strptime(actvity_date,'%Y-%m-%d') <= current_date:
formated_data[actvity_date] = single_activity
return formated_data
def get_exercise_consistency_grade(user,current_date,user_age,weekly_user_input_activities):
trans_activity_data = []
last_seven_days_date = current_date - timedelta(days=6)
week_activity_data = UserFitbitDataActivities.objects.filter(
Q(created_at__gte = last_seven_days_date)&
Q(created_at__lte = current_date),
user=user).order_by('created_at')
daily_strong = list(DailyUserInputStrong.objects.filter(
Q(user_input__created_at__gte = last_seven_days_date)&
Q(user_input__created_at__lte = current_date),
user_input__user = user).order_by('user_input__created_at'))
weekly_daily_strong = quicklook.calculations.garmin_calculation.get_weekly_user_input_data(
daily_strong,current_date,last_seven_days_date)
if week_activity_data:
for i in range(0,len(week_activity_data)):
todays_activity_data = ast.literal_eval(week_activity_data[i].activities_data.replace(
"'activity_fitbit': {...}","'activity_fitbit': {}"))
todays_activity_data = todays_activity_data.get('activities')
if todays_activity_data:
trans_activity_data.append(list(map(fitbit_to_garmin_activities,todays_activity_data)))
formated_data = makeformat(trans_activity_data,current_date,last_seven_days_date)
weekly_combined_activities = quicklook.calculations.\
garmin_calculation.get_weekly_combined_activities(
formated_data,{},weekly_user_input_activities,
last_seven_days_date,current_date,user_age)
exe_consistency_grade,exe_consistency_point = quicklook.calculations.\
garmin_calculation.get_exercise_consistency_grade(
weekly_daily_strong,weekly_combined_activities,7,user_age)
return (exe_consistency_grade,exe_consistency_point)
def get_unprocessed_food_grade(daily_strong_input,current_date):
'''
This funtion works as get the unprocessed food grade and GPA from garmin calculations file
Args: daily_strong_input(DailyStrongInput object)
current_date(type:string,format:YYYY-MM-DD)
Return: tuple of unprocessed food grade and unprocessed food GPA
'''
unprocessed_food_grade_pt = quicklook.calculations.garmin_calculation.get_unprocessed_food_grade(
daily_strong_input,current_date)
return unprocessed_food_grade_pt
def get_penality_grades(ui_smoking_penalty,ui_controlled_substance_penalty,ui_sleep_aid_penalty):
'''
This funtion works as get the penalities from garmin calculations file
Args: smoke,sleep_aid,controlled_substance data from User inputs
Return: dictonary of smoking,sleep,controlled_substance penalities
'''
penalties = quicklook.calculations.garmin_calculation.cal_penalty(
ui_smoking_penalty,ui_controlled_substance_penalty,ui_sleep_aid_penalty)
return penalties
def get_overall_grades(grades_calculated_data):
'''
This funtion works as get the Overal all grade and GPA from garmin calculations file
Args: All grades
Return: tuple of Overall grade Overall GPA
'''
overall_grade_pt = quicklook.calculations.garmin_calculation.get_overall_grade(
grades_calculated_data)
return overall_grade_pt
def create_fitbit_quick_look(user,from_date=None,to_date=None):
'''
calculate and create quicklook instance for given date range
Arguments -
1) user is a "User" instance representing currently logged in user
2) from_date expect date string in format YYYY-MM-DD
3) to_date expect date string in format YYYY-MM-DD
'''
# date range for which quicklook is calculated
from_dt = quicklook.calculations.garmin_calculation.str_to_datetime(from_date)
to_dt = quicklook.calculations.garmin_calculation.str_to_datetime(to_date)
current_date = from_dt
SERIALIZED_DATA = []
user_age = user.profile.age()
while current_date <= to_dt:
tomorrow_date = current_date + timedelta(days=1)
last_seven_days_date = current_date - timedelta(days=6)
start_epoch = int(current_date.replace(tzinfo=timezone.utc).timestamp())
end_epoch = start_epoch + 86400
grades_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields('grade')
exercise_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields('exercise')
swim_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields('swim')
bike_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields('bike')
steps_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields('step')
sleeps_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields('sleep')
food_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields("food")
alcohol_calculated_data = quicklook.calculations.garmin_calculation.get_blank_model_fields("alcohol")
todays_sleep_data = None
tomorrow_sleep_data = None
sleep_data = get_fitbit_model_data(
UserFitbitDataSleep,user,current_date.date(),
tomorrow_date.date(),group_by_date=True)
if sleep_data:
todays_sleep_data = sleep_data.get(current_date.strftime('%Y-%m-%d'))
tomorrow_sleep_data = sleep_data.get(tomorrow_date.strftime('%Y-%m-%d'))
if todays_sleep_data:
todays_sleep_data = ast.literal_eval(todays_sleep_data.replace(
"'sleep_fitbit': {...}","'sleep_fitbit': {}"))
if tomorrow_sleep_data:
tomorrow_sleep_data = ast.literal_eval(tomorrow_sleep_data.replace(
"'sleep_fitbit': {...}","'sleep_fitbit': {}"))
todays_epoch_data = []
todays_steps_data = get_fitbit_model_data(
UserFitbitDataSteps,user,current_date.date(),current_date.date())
if todays_steps_data:
todays_steps_data = ast.literal_eval(todays_steps_data[0].replace(
"'steps_fitbit': {...}","'steps_fitbit': {}"))
if todays_steps_data.get("activities-steps-intraday"):
intraday_steps = todays_steps_data.get("activities-steps-intraday")
interval_duration = 15*60
quarterly_dataset = steps_minutly_to_quartly(
current_date.date(),
intraday_steps.get('dataset',[]))
for step in quarterly_dataset:
todays_epoch_data.append(fitbit_to_garmin_epoch(
step,current_date.date(),interval_duration))
try:
user_inputs_qs = UserDailyInput.objects.select_related(
'strong_input','encouraged_input','optional_input').filter(
created_at__range = (current_date,tomorrow_date),user=user)
user_inputs = {q.created_at.strftime("%Y-%m-%d"):q for q in user_inputs_qs}
todays_user_input = user_inputs.get(current_date.strftime("%Y-%m-%d"))
tomorrows_user_input = user_inputs.get(tomorrow_date.strftime("%Y-%m-%d"))
except UserDailyInput.DoesNotExist:
todays_user_input = None
tomorrows_user_input = None
# pull data for past 7 days (incuding today)
daily_strong = list(DailyUserInputStrong.objects.filter(
Q(user_input__created_at__gte = last_seven_days_date)&
Q(user_input__created_at__lte = current_date),
user_input__user = user).order_by('user_input__created_at'))
daily_optional = list(DailyUserInputOptional.objects.filter(
Q(user_input__created_at__gte = last_seven_days_date)&
Q(user_input__created_at__lte = current_date),
user_input__user = user).order_by('user_input__created_at'))
todays_daily_optional = []
for i,q in enumerate(daily_optional):
if q.user_input.created_at == current_date.date():
todays_daily_optional.append(daily_optional[i])
break
todays_daily_strong = []
for i,q in enumerate(daily_strong):
if q.user_input.created_at == current_date.date():
todays_daily_strong.append(daily_strong[i])
break
weekly_user_input_activities = get_daily_activities_in_base_format(
user,last_seven_days_date.date(),
to_date = current_date.date(),
include_all = True)
userinput_activities = weekly_user_input_activities[current_date.strftime('%Y-%m-%d')]
todays_activity_data = get_fitbit_model_data(
UserFitbitDataActivities,user,current_date.date(),current_date.date())
if todays_activity_data:
todays_activity_data = ast.literal_eval(todays_activity_data[0].replace(
"'activity_fitbit': {...}","'activity_fitbit': {}"))
todays_activity_data = todays_activity_data['activities']
if todays_activity_data:
todays_activity_data = list(map(fitbit_to_garmin_activities,
todays_activity_data))
combined_user_exercise_activities,combined_user_exec_non_exec_activities =\
quicklook.calculations.garmin_calculation.\
get_filtered_activity_stats(
todays_activity_data,user_age,
userinput_activities = userinput_activities,
epoch_summaries = todays_epoch_data,
provide_all=True)
ui_bedtime = None
ui_awaketime = None
ui_timezone = None
ui_sleep_duration = ""
ui_sleep_comment = ""
ui_sleep_aid = ""
ui_workout_easy_hard = ""
ui_medication = ""
ui_smoke_substance = ""
ui_water_consumed_workout = 0
ui_pain = ""
ui_pain_area = ""
ui_stress_level = ""
ui_chia_seeds_consumed_workout = 0
ui_fast_before_workout = ""
ui_sick = ''
ui_workout_comment = ""
ui_workout_effort_level = 0
ui_prcnt_unprocessed_food_consumed_yesterday = 0
ui_non_processed_food = ""
ui_processed_food = ""
ui_diet_type = ""
ui_alcohol_day = ""
ui_sleep_aid_penalty = ""
ui_controlled_substance_penalty = ""
ui_smoking_penalty = ""
ui_did_workout = ""
ui_prcnt_breath_through_nose = 0
ui_no_plants_consumed = 0
ui_list_of_pants_consumed = ""
# calling the resting hearate from fitbit models
resting_heartrate = fitbit_heartrate_data(user,current_date)
# passing resting heart rate value to exercise dictionary
exercise_calculated_data['resting_hr_last_night'] = resting_heartrate
if todays_user_input:
ui_bedtime = todays_user_input.strong_input.sleep_bedtime
ui_awaketime = todays_user_input.strong_input.sleep_awake_time
ui_timezone = todays_user_input.timezone
ui_sleep_comment = todays_user_input.strong_input.sleep_comment
ui_sleep_aid = todays_user_input.strong_input.prescription_or_non_prescription_sleep_aids_last_night
ui_workout_easy_hard = todays_user_input.strong_input.work_out_easy_or_hard
ui_medication = todays_user_input.strong_input.prescription_or_non_prescription_medication_yesterday
ui_smoke_substance = todays_user_input.strong_input.smoke_any_substances_whatsoever
water_consumed = todays_user_input.encouraged_input.water_consumed_during_workout
if water_consumed:
ui_water_consumed_workout = int(water_consumed)
ui_pain = todays_user_input.encouraged_input.pains_twings_during_or_after_your_workout
ui_pain_area = todays_user_input.encouraged_input.pain_area
ui_stress_level = todays_user_input.encouraged_input.stress_level_yesterday
chia_seeds_consumed = todays_user_input.optional_input.chia_seeds_consumed_during_workout
if ui_chia_seeds_consumed_workout:
ui_chia_seeds_consumed_workout = int(chia_seeds_consumed)
ui_fast_before_workout = todays_user_input.optional_input.fasted_during_workout
ui_sick = todays_user_input.optional_input.sick
ui_workout_comment = todays_user_input.optional_input.general_Workout_Comments
effort_level = todays_user_input.strong_input.workout_effort_level
if effort_level:
ui_workout_effort_level = int(effort_level)
prcnt_non_processed_food = todays_user_input.strong_input.prcnt_unprocessed_food_consumed_yesterday
if prcnt_non_processed_food:
ui_prcnt_unprocessed_food_consumed_yesterday = int(prcnt_non_processed_food)
ui_non_processed_food = todays_user_input.strong_input.list_of_unprocessed_food_consumed_yesterday
ui_processed_food = todays_user_input.strong_input.list_of_processed_food_consumed_yesterday
ui_no_plants_consumed = todays_user_input.strong_input.no_plants_consumed
ui_list_of_pants_consumed = todays_user_input.strong_input.list_of_pants_consumed
ui_diet_type = todays_user_input.optional_input.type_of_diet_eaten
ui_alcohol_day = todays_user_input.strong_input.number_of_alcohol_consumed_yesterday
ui_sleep_aid_penalty = todays_user_input.strong_input.prescription_or_non_prescription_sleep_aids_last_night
ui_controlled_substance_penalty = todays_user_input.strong_input.controlled_uncontrolled_substance
ui_smoking_penalty = todays_user_input.strong_input.smoke_any_substances_whatsoever
ui_did_workout = todays_user_input.strong_input.workout
prcnt_breath_through_nose = todays_user_input.encouraged_input.workout_that_user_breathed_through_nose
if prcnt_breath_through_nose:
ui_prcnt_breath_through_nose = int(prcnt_breath_through_nose)
'''user inputs of activites for displaying exercise reporting'''
exercise_calculated_data['workout_easy_hard'] = ui_workout_easy_hard
exercise_calculated_data['water_consumed_workout'] = ui_water_consumed_workout
exercise_calculated_data['chia_seeds_consumed_workout'] = ui_chia_seeds_consumed_workout
exercise_calculated_data['fast_before_workout'] = ui_fast_before_workout
exercise_calculated_data['pain'] = ui_pain
exercise_calculated_data['pain_area'] = ui_pain_area
exercise_calculated_data['stress_level'] = ui_stress_level
exercise_calculated_data['sick'] = ui_sick
exercise_calculated_data['medication'] = ui_medication
exercise_calculated_data['smoke_substance'] = ui_smoke_substance
exercise_calculated_data['workout_comment'] = ui_workout_comment
exercise_calculated_data['effort_level'] = ui_workout_effort_level
exercise_calculated_data['nose_breath_prcnt_workout'] = ui_prcnt_breath_through_nose
#Food
food_calculated_data['prcnt_non_processed_food'] = ui_prcnt_unprocessed_food_consumed_yesterday
food_calculated_data['non_processed_food'] = ui_non_processed_food
food_calculated_data['processed_food'] = ui_processed_food
food_calculated_data['diet_type'] = ui_diet_type
food_calculated_data['no_plants_consumed_ql'] = ui_no_plants_consumed
food_calculated_data['list_of_pants_consumed_ql'] = ui_list_of_pants_consumed
# Grades
todays_daily_strong = []
for i,q in enumerate(daily_strong):
if q.user_input.created_at == current_date.date():
todays_daily_strong.append(daily_strong[i])
break
if todays_daily_strong:
unprocessed_food_grade_pt = get_unprocessed_food_grade(todays_daily_strong,current_date)
grades_calculated_data['prcnt_unprocessed_food_consumed_grade'] = unprocessed_food_grade_pt[0] \
if unprocessed_food_grade_pt[0] else ''
grades_calculated_data['prcnt_unprocessed_food_consumed_gpa'] = unprocessed_food_grade_pt[1] \
if unprocessed_food_grade_pt[1] else 0
#Alcohol
grade,avg_alcohol,avg_alcohol_gpa = quicklook.calculations\
.garmin_calculation\
.get_alcohol_grade_avg_alcohol_week(daily_strong,user)
alcohol_calculated_data['alcohol_day'] = ui_alcohol_day
grades_calculated_data['alcoholic_drink_per_week_grade'] = grade
alcohol_calculated_data['alcohol_week'] = avg_alcohol
grades_calculated_data['alcoholic_drink_per_week_gpa'] = avg_alcohol_gpa
# Penalties
if todays_daily_strong:
penalties = get_penality_grades(
ui_smoking_penalty,
ui_controlled_substance_penalty,
ui_sleep_aid_penalty)
grades_calculated_data["sleep_aid_penalty"] = penalties['sleep_aid_penalty']
grades_calculated_data['ctrl_subs_penalty'] = penalties['ctrl_subs_penalty']
grades_calculated_data['smoke_penalty'] = penalties['smoke_penalty']
#Sleep Calculations
# adding nap hours for sleep per user input felid
ui_sleep_duration = quicklook.calculations.garmin_calculation.\
get_user_input_total_sleep(
todays_daily_strong,todays_daily_optional)
sleep_stats = get_sleep_stats(todays_sleep_data,
ui_bedtime = ui_bedtime,ui_awaketime = ui_awaketime,
ui_sleep_duration = ui_sleep_duration,
ui_timezone = ui_timezone,str_date=False)
sleep_bed_time = sleep_stats['sleep_bed_time']
sleep_awake_time = sleep_stats['sleep_awake_time']
if sleep_bed_time:
sleep_bed_time = sleep_bed_time.strftime("%I:%M %p")
else:
sleep_bed_time = ''
if sleep_awake_time:
sleep_awake_time = sleep_awake_time.strftime("%I:%M %p")
else:
sleep_awake_time = ''
sleeps_calculated_data['deep_sleep'] = sleep_stats['deep_sleep']
sleeps_calculated_data['light_sleep'] = sleep_stats['light_sleep']
sleeps_calculated_data['rem_sleep'] = sleep_stats['rem_sleep']
sleeps_calculated_data['awake_time'] = sleep_stats['awake_time']
sleeps_calculated_data['sleep_bed_time'] = sleep_bed_time
sleeps_calculated_data['sleep_awake_time'] = sleep_awake_time
sleeps_calculated_data['sleep_per_wearable'] = sleep_stats['sleep_per_wearable']
sleeps_calculated_data['sleep_per_user_input'] = (ui_sleep_duration
if ui_sleep_duration else "")
sleeps_calculated_data['sleep_comments'] = ui_sleep_comment
sleeps_calculated_data['sleep_aid'] = ui_sleep_aid
sleeps_calculated_data['restless_sleep'] = sleep_stats['restless_sleep']
# Sleep grade point calculation
sleep_grade_point = get_avg_sleep_grade(
sleep_stats['sleep_per_userinput'],
sleep_stats['sleep_per_wearable'],
user_age,ui_sleep_aid
)
grades_calculated_data['avg_sleep_per_night_grade'] = \
sleep_grade_point[0] if sleep_grade_point[0] else ''
grades_calculated_data['avg_sleep_per_night_gpa'] = \
sleep_grade_point[1] if sleep_grade_point[1] else 0
# Exercise/Activity Calculations
activity_stats = quicklook.calculations.garmin_calculation.get_activity_stats(
combined_user_exercise_activities,user_age)
exercise_calculated_data['did_workout'] = activity_stats['have_activity']
exercise_calculated_data['distance_run'] = activity_stats['distance_run_miles']
exercise_calculated_data['distance_bike'] = activity_stats['distance_bike_miles']
exercise_calculated_data['distance_swim'] = activity_stats['distance_swim_yards']
exercise_calculated_data['distance_other'] = activity_stats['distance_other_miles']
exercise_calculated_data['workout_duration'] = quicklook.calculations.garmin_calculation.\
sec_to_hours_min_sec(
activity_stats['total_duration'])
exercise_calculated_data['pace'] = activity_stats['pace']
exercise_calculated_data['avg_heartrate'] = activity_stats['avg_heartrate']
exercise_calculated_data['activities_duration'] = activity_stats['activities_duration']
exercise_calculated_data['did_workout'] = quicklook.calculations.garmin_calculation.\
did_workout_today(have_activities=activity_stats['have_activity']
,user_did_workout=ui_did_workout)
exercise_calculated_data['total_exercise_activities'] = activity_stats['total_exercise_activities']
exercise_calculated_data['total_strength_activities'] = activity_stats['total_strength_activities']
# Steps calculation
daily_total_steps = fitbit_steps_data(todays_steps_data)
exercise_steps,non_exercise_steps,total_steps = quicklook.calculations.\
garmin_calculation.cal_exercise_steps_total_steps(
int(daily_total_steps),
combined_user_exec_non_exec_activities,
user_age
)
steps_calculated_data['non_exercise_steps'] = non_exercise_steps
steps_calculated_data['exercise_steps'] = exercise_steps
steps_calculated_data['total_steps'] = total_steps
# Non exercise steps grade and gpa calculation
moment_non_exercise_steps_grade_point = quicklook.calculations\
.garmin_calculation.cal_non_exercise_step_grade(non_exercise_steps)
grades_calculated_data['movement_non_exercise_steps_grade'] = \
moment_non_exercise_steps_grade_point[0]
grades_calculated_data['movement_non_exercise_steps_gpa'] = \
moment_non_exercise_steps_grade_point[1]
# Exercise Grade and point calculation
exe_consistency_grade = get_exercise_consistency_grade(
user,current_date,user_age,weekly_user_input_activities)
grades_calculated_data['exercise_consistency_grade'] = \
exe_consistency_grade[0]
grades_calculated_data['exercise_consistency_score'] = \
exe_consistency_grade[1]
# Movement consistency and movement consistency grade calculation
# Time user go to bed last night
yesterday_bedtime = sleep_stats['sleep_bed_time']
# Time user wake up today
today_awake_time = sleep_stats['sleep_awake_time']
# Time user went to bed again today
today_bedtime = None
strength_start_time = None
strength_end_time = None
nap_start_time = None
nap_end_time = None
tomorrows_user_input_tz = None
if tomorrows_user_input and tomorrows_user_input.strong_input:
today_bedtime = tomorrows_user_input.strong_input.sleep_bedtime
tomorrows_user_input_tz = tomorrows_user_input.timezone
if (today_bedtime and tomorrows_user_input_tz):
target_tz = pytz.timezone(tomorrows_user_input_tz)
today_bedtime = today_bedtime.astimezone(target_tz).replace(tzinfo=None)
else:
tomorrow_sleep_stats = get_sleep_stats(tomorrow_sleep_data,str_date=False)
today_bedtime = tomorrow_sleep_stats['sleep_bed_time']
if todays_user_input:
ui_strength_start_time = todays_user_input.strong_input.strength_workout_start
ui_strength_end_time = todays_user_input.strong_input.strength_workout_end
if ui_strength_start_time and ui_strength_end_time:
strength_start_time = quicklook.calculations.garmin_calculation\
._str_duration_to_dt(current_date,ui_strength_start_time)
strength_end_time = quicklook.calculations.garmin_calculation\
._str_duration_to_dt(current_date,ui_strength_end_time)
ui_nap_start_time = todays_user_input.optional_input.nap_start_time
ui_nap_end_time = todays_user_input.optional_input.nap_end_time
if ui_nap_start_time and ui_nap_end_time:
nap_start_time = quicklook.calculations.garmin_calculation\
._str_duration_to_dt(current_date,ui_nap_start_time)
nap_end_time = quicklook.calculations.garmin_calculation\
._str_duration_to_dt(current_date,ui_nap_end_time)
movement_consistency = quicklook.calculations.garmin_calculation\
.cal_movement_consistency_summary(
user,
current_date,
todays_epoch_data,
yesterday_bedtime = yesterday_bedtime,
today_awake_time = today_awake_time,
combined_user_activities = combined_user_exercise_activities,
today_bedtime = today_bedtime,
user_input_strength_start_time = strength_start_time,
user_input_strength_end_time = strength_end_time,
nap_start_time = nap_start_time,
nap_end_time = nap_end_time
)
if movement_consistency:
steps_calculated_data['movement_consistency'] = json.dumps(movement_consistency)
inactive_hours = movement_consistency.get("inactive_hours")
grade = quicklook.calculations.garmin_calculation\
.cal_movement_consistency_grade(inactive_hours)
grades_calculated_data['movement_consistency_grade'] = grade
# overal grades gpa and grade
overall_grade_pt = get_overall_grades(grades_calculated_data)
grades_calculated_data['overall_health_grade'] = overall_grade_pt[0]
grades_calculated_data['overall_health_gpa'] = overall_grade_pt[1]
# Average exercise heartrate grade calculation
avg_exercise_hr_grade_pts = quicklook.calculations.\
garmin_calculation.get_average_exercise_heartrate_grade(
combined_user_exercise_activities,todays_daily_strong,user_age)
hr_grade = 'N/A' if not avg_exercise_hr_grade_pts[0] else avg_exercise_hr_grade_pts[0]
grades_calculated_data['avg_exercise_hr_grade'] = hr_grade
grades_calculated_data['avg_exercise_hr_gpa'] = avg_exercise_hr_grade_pts[1]\
if avg_exercise_hr_grade_pts[1] else 0
exercise_calculated_data['avg_exercise_heartrate'] = avg_exercise_hr_grade_pts[2]\
if avg_exercise_hr_grade_pts[2] else 0
#Average non strength exercise heartrate calculation
avg_non_strength_hr_grade_pts = quicklook.calculations.\
garmin_calculation.get_average_exercise_heartrate_grade(
combined_user_exercise_activities,todays_daily_strong,user_age,
non_strength_only=True)
exercise_calculated_data['avg_non_strength_heartrate'] = avg_non_strength_hr_grade_pts[2]\
if avg_exercise_hr_grade_pts[2] else 0
# If quick look for provided date exist then update it otherwise
# create new quicklook instance
try:
user_ql = UserQuickLook.objects.get(user=user,created_at = current_date.date())
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.grades_ql,grades_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.exercise_reporting_ql, exercise_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.swim_stats_ql, swim_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.bike_stats_ql, bike_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.steps_ql, steps_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.sleep_ql, sleeps_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.food_ql, food_calculated_data)
quicklook.calculations.garmin_calculation\
.update_helper(user_ql.alcohol_ql, alcohol_calculated_data)
except UserQuickLook.DoesNotExist:
user_ql = UserQuickLook.objects.create(user = user,created_at=current_date.date())
Grades.objects.create(user_ql=user_ql, **grades_calculated_data)
ExerciseAndReporting.objects.create(user_ql = user_ql,**exercise_calculated_data)
SwimStats.objects.create(user_ql=user_ql, **swim_calculated_data)
BikeStats.objects.create(user_ql = user_ql,**bike_calculated_data)
Steps.objects.create(user_ql = user_ql,**steps_calculated_data)
Sleep.objects.create(user_ql = user_ql,**sleeps_calculated_data)
Food.objects.create(user_ql = user_ql,**food_calculated_data)
Alcohol.objects.create(user_ql = user_ql,**alcohol_calculated_data)
SERIALIZED_DATA.append(UserQuickLookSerializer(user_ql).data)
#Add one day to current date
current_date += timedelta(days=1)
return SERIALIZED_DATA
|
# -*- python -*-
class Patient(object):
def __init__( self, id, name, allergies ):
self.id = id
self.name = name
self.allergies = allergies
self.bed_number = None
def __repr__( self ):
return(
"{" +
" " + "type:" + " " + "Patient" +
", " + "id:" + " " + str( self.id ) +
", " + "name:" + " \"" + self.name + "\"" +
", " + "allergies:" + " " + str( self.allergies ) +
", " + "bed_number:" + " " + str( self.bed_number ) +
" " + "}"
)
|
"""Change polling model
Revision ID: 20c45fff9ccb
Revises: 1efa9375e67b
Create Date: 2020-07-05 16:12:47.521137
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '20c45fff9ccb'
down_revision = '1efa9375e67b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('service_polling',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('service', sa.String(), nullable=True),
sa.Column('started_at', sa.DateTime(), nullable=True),
sa.Column('stopped_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('polling')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('polling',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('repository_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('last_event', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('last_open_update', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('last_full_update', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='polling_pkey')
)
op.drop_table('service_polling')
# ### end Alembic commands ###
|
from re import sub
from rest_framework import serializers
from django.urls import reverse
from django.utils.html import strip_tags
from django.contrib.auth.models import User
from .models import Post
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('first_name', 'last_name')
class PostSerializer(serializers.ModelSerializer):
author = UserSerializer()
post_title = serializers.SerializerMethodField()
shorter_title = serializers.SerializerMethodField()
strip_content = serializers.SerializerMethodField()
shorter_content = serializers.SerializerMethodField()
thumb_img = serializers.SerializerMethodField()
feat_img = serializers.SerializerMethodField()
post_url = serializers.SerializerMethodField()
pub_date = serializers.SerializerMethodField()
def get_post_title(self, obj):
if len(obj.header_title) > 110:
return obj.header_title[:110] + '...'
else:
return obj.header_title
def get_shorter_title(self, obj):
if len(obj.header_title) > 60:
return obj.header_title[:60] + '...'
else:
return obj.header_title
def get_strip_content(self, obj):
remove_tags = strip_tags(obj.post_content)
strip_newline = sub(r'\r\n', ' ', remove_tags)
return strip_newline[:150] + '...'
def get_shorter_content(self, obj):
remove_tags = strip_tags(obj.post_content)
strip_newline = sub(r'\r\n', ' ', remove_tags)
return strip_newline[:90] + '...'
def get_thumb_img(self, obj):
if obj.thumbnail_image:
return obj.thumbnail_image.url
else:
return
def get_feat_img(self, obj):
if obj.featured_image:
return obj.featured_image.url
else:
return
def get_post_url(self, obj):
return reverse('posts:post_detail', kwargs={'post_slug': obj.post_slug, 'post_id': obj.pk} )
def get_pub_date(self, obj):
return obj.publication_date.strftime('%b %d, %Y')
class Meta:
model = Post
fields = ('id', 'author', 'is_published', 'pub_date', 'post_title', 'shorter_title', 'strip_content', 'shorter_content', 'post_category', 'thumb_img', 'feat_img', 'post_url')
depth = 1
|
"""Plot data from table of pressures and fluxes from proplyd arc fits
"""
from astropy.table import Table
from matplotlib import pyplot as plt
import json
import numpy as np
import seaborn as sns
AU = 1.49597870691e13
PC = 3.085677582e18
d_Orion = 440.0
k_Boltzmann = 1.3806503e-16
cos80 = 0.173648177667
tab = Table.read('../doc/wind-fits.tab', format='ascii.tab')
sources = sorted(set(tab['Source']))
n = len(sources)
colors = sns.color_palette('Set1', n)
# Physical separations in parsec
D = tab['D'] * d_Orion*AU/PC
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.loglog(D, tab['F(star)'], c='k', alpha=0.1, lw=10, label='')
ax2.loglog(D, tab['P(wind)']/k_Boltzmann, c='k', alpha=0.1, lw=10, label='')
mm = (tab['F(ph)/F(*)'] > 0.707) & (tab['F(ph)/F(*)'] < 1.414)
out_colnames = ['Source' ,'D prime', 'R0/D prime', 'Rc/R0 prime full',
'Rc/R0 prime select', 'beta', 'xi', 'inc', 'D', 'R0/D']
out_formats = {
'D prime': '%.1f',
'R0/D prime': '%.4f',
'Rc/R0 prime full': '%.4f',
'Rc/R0 prime select': '%.4f',
'beta': '%.4f',
'xi': '%.4f',
'inc': '%.4f',
'D': '%.4f',
'R0/D': '%.4f',
}
out_rows = []
def var_range(x, dx):
return x, dx
for source, color in zip(sources, colors):
m = tab['Source'] == source
Dprime = tab['D\''][m][0] * d_Orion*AU/PC
F = tab['F(photo)'][m][0]
# Get data from variational fits
combined_file = '../read-shapes/LV-bowshocks-xyfancy-variations-{}.save'.format(source)
vardata = json.load(open(combined_file))
# Combine with data from org table to fill in a new output table
Rcp_select = tab['Rc\'/R0\''][m & mm]
Rcp_full = np.array(vardata['Rc'])/np.array(vardata['R0'])
beta = tab[r'\beta'][m & mm]
xi = tab['xi'][m & mm]
inc = tab['i'][m & mm]
DD = tab['D'][m & mm] * d_Orion*AU/PC
R0_D = tab['R0/D'][m & mm]
out_rows.append({
'Source': source,
'D prime': tab['D\''][m][0],
'R0/D prime': var_range(np.mean(vardata['R0']), np.std(vardata['R0'])),
'Rc/R0 prime full': var_range(np.mean(Rcp_full), np.std(Rcp_full)),
'Rc/R0 prime select': var_range(np.mean(Rcp_select), np.std(Rcp_select)),
'beta': var_range(np.mean(beta), np.std(beta)),
'xi': var_range(np.min(xi), np.max(xi)),
'inc': var_range(np.mean(inc), np.std(inc)),
'D': var_range(np.mean(DD), np.std(DD)),
'R0/D': var_range(np.mean(R0_D), np.std(R0_D)),
})
ax1.loglog([Dprime, Dprime/cos80], [F, F], ':', c=color, alpha=0.4, label='')
ax1.loglog(D[m], tab['F(photo)'][m],
'-', c=color, alpha=0.4, label='')
ax1.loglog(D[m & mm], tab['F(photo)'][m & mm],
'o-', lw=3, c=color, label=source)
ax2.loglog(D[m], tab['P(in)'][m]/k_Boltzmann,
'-', c=color, alpha=0.4, label='')
ax2.loglog(D[m & mm], tab['P(in)'][m & mm]/k_Boltzmann,
'o-', c=color, lw=3, label=source)
ax2.legend(ncol=2, loc='lower left')
ax2.set_xlim(0.008, 0.3)
ax2.set_ylim(1e7, 4e9)
ax1.set_ylim(2e12, 3e14)
ax1.set_ylabel(r'Ionizing Flux, $\mathrm{cm^{-2}\ s^{-1}}$')
ax2.set_ylabel(r'Stagnation Pressure: $P/k$, $\mathrm{cm^{-3}\ K}$')
ax2.set_xlabel('Distance, parsec')
fig.set_size_inches(5, 8)
fig.tight_layout()
fig.savefig('plot-wind-fits.pdf')
out_tab = Table(names=out_colnames, rows=out_rows)
print(out_tab.pprint(max_width=-1, max_lines=-1))
out_tab.write('arc-fit-table-for-paper.tab',
format='ascii.fixed_width', formats=out_formats)
|
# -*-coding:utf-8-*-
import time
import numpy as np
from gensim.models.doc2vec import Doc2Vec
from sklearn.externals import joblib
import labsql
class SvmInfer:
def __init__(self):
self.test_set = {}
self.clf = joblib.load("./svm_models/train_model.m")
self.conn = labsql.LabSQL('172.168.1.36', 'sohu', 'sa', 'scucc')
pred_data = self.conn.fetch("select * from prep_all")
for ID, data, _ in pred_data:
d2v = Doc2Vec.load('./d2v_models/d2v_dm_model.all')
inferred_vector = d2v.infer_vector(str(data).split(), alpha=1e-3, epochs=100)
self.test_set[ID] = inferred_vector
def inferred(self):
y_pred = self.clf.predict(np.array(list(self.test_set.values())))
for id, ID in enumerate(self.test_set):
self.conn.insert("""insert into [prep_labeled]([id],[label]) values(?,?)""",
[list(self.test_set.keys())[id], int(list(y_pred)[id])])
x = (id + 1) / len(self.test_set)
print('[' + '#' * round(x * 100) + '>' + ' ' * (100 - round(x * 100)) + '] ' + str(round(x * 100, 2)) + '%')
if __name__ == '__main__':
st = time.time()
inf = SvmInfer()
inf.inferred()
inf.conn.close_connection()
print('runtime: %s sec' % (time.time() - st))
|
from .views import CoffeeShopViewSet, NewsletterViewSet, BookViewSet,CoffeeShopBooksViewSet
from rest_framework import routers
from django.urls import path, include
app_name = 'api-coffee-shops'
router = routers.DefaultRouter()
router.register(r'coffee_shops', CoffeeShopViewSet)
router.register(r'coffee_shops/(?P<id>\d+)', CoffeeShopViewSet)
router.register(r'newsletter', NewsletterViewSet)
router.register(r'books',BookViewSet)
router.register(r'books/(?P<id>\d+)',BookViewSet)
router.register(r'coffee_shop/(?P<id>\d+)/books',CoffeeShopBooksViewSet)
urlpatterns = [
path('', include(router.urls))
]
|
import numpy as np
small = 1e-6
def createFAST_EllipticWingBlade(span, nNodes):
dx = span/(nNodes-1.0)
x = np.arange(0,span+small, dx) - span*0.5
c = np.sqrt(1.0 - (2.0*x/span)**2)
c[0] = small
c[-1] = small
with open('EllipticWing_Aerodyn_Blade.dat','w') as f:
f.write('------- AERODYN v15.00.* BLADE DEFINITION INPUT FILE -------------------------------------\n')
f.write('Elliptic wing\n')
f.write('====== Blade Properties =================================================================\n')
f.write('{} NumBlNds - Number of blade nodes used in the analysis (-)\n'.format(nNodes))
f.write('BlSpn BlCrvAC BlSwpAC BlCrvAng BlTwist BlChord BlAFID\n')
f.write('(m) (m) (m) (deg) (deg) (m) (-)\n')
for i in range(nNodes):
print x[i]+span*0.5
f.write('{} 0.0000000E+00 0.0000000E+00 0.0000000E+00 0.0000000E+00 {} 1\n'.format(x[i]+span*0.5, c[i] ))
if __name__ == "__main__":
createFAST_EllipticWingBlade(10, 18)
|
def pericolo_minimo(mappa):
def conversion(mappa):
grafo = {nodo:[] for nodo in range(len(mappa) * len(mappa[0]))}
col_size = len(mappa)
row_size = len(mappa[0])
for col in range(len(mappa)):
for row in range(len(mappa[col])):
nodo = (row_size * row) + col
val_nodo = mappa[col][row]
if (row >= 1):
nodo_up = (row_size * (row - 1)) + col
grafo[nodo].append((nodo_up,
abs(val_nodo - mappa[col][row - 1])))
if (row < row_size - 1):
nodo_down = (row_size * (row + 1)) + col
grafo[nodo].append((nodo_down,
abs(val_nodo - mappa[col][row + 1])))
if (col < col_size - 1):
nodo_right = (row_size * row) + (col + 1)
grafo[nodo].append((nodo_right,
abs(val_nodo - mappa[col + 1][row])))
if (col >= 1):
nodo_left = (row_size * row) + (col - 1)
grafo[nodo].append((nodo_left,
abs(val_nodo - mappa[col - 1][row])))
return grafo
def dijkstra_modificato(nodo, grafo):
from heapq import heappop, heappush
min_costo = 0
visitati = {nodo}
# Inizializzo heap.
heap = []
for adiacente, costo in grafo[nodo]:
heappush(heap, (costo, adiacente)) # così ordina per costo
while heap:
costo, nodo = heappop(heap)
if costo > min_costo:
min_costo = costo
if nodo == len(grafo) - 1:
break
# Devo controllare che la coppia non sia vecchia.
if nodo not in visitati:
visitati.add(nodo)
for adiacente, costo in grafo[nodo]:
if adiacente not in visitati:
heappush(heap, (costo, adiacente))
return min_costo
grafo = conversion(mappa)
return dijkstra_modificato(0, grafo)
|
from backpack.core.derivatives.basederivatives import BaseDerivatives
class FlattenDerivatives(BaseDerivatives):
def hessian_is_zero(self):
return True
def ea_jac_t_mat_jac_prod(self, module, g_inp, g_out, mat):
return mat
def _jac_t_mat_prod(self, module, g_inp, g_out, mat):
return self.reshape_like_input(mat, module)
def _jac_mat_prod(self, module, g_inp, g_out, mat):
return self.reshape_like_output(mat, module)
def is_no_op(self, module):
"""Does flatten add an operation to the computational graph.
If the input is already flattened, no operation will be added for
the `Flatten` layer. This can lead to an intuitive order of backward
hook execution, see the discussion at https://discuss.pytorch.org/t/
backward-hooks-changing-order-of-execution-in-nn-sequential/12447/4 .
"""
return tuple(module.input0.shape) == tuple(module.output.shape)
|
n=int(input('Enter:'))
a=0
b=1
if n==1:
print(a)
else:
print(a,end=' ')
print(b,end=' ')
for i in range(n-2):
c=a+b
a=b
b=c
print(c,end=' ')
|
"""
- module that houses an object of type uPub
"""
class uPub_data(object):
"""docstring for `uPub_data`."""
def __init__(self, line):
import hashlib
"""assigns values to the object.
INPUT: MD file containing all data; path to image
"""
self.title = None # title of the microPublication
self.path_to_image = None
self.gff = None
self.fna = None
self.faa = None
self.uPub_path = None
self.supp_path = None
self.md_path = None
self.publication_data = None # data for publication (author list, figure title, etc...) as dictionary
def show( self ):
"""prints the object as a DICTIONARY
"""
from pprint import pprint as pprint
pprint( self.__dict__ )
def docx_to_md( uPub_doc ):
"""
- converts uPub doc from DOCX to MD
- saves the image to disc and links to object
- returns dictionary of paths to MD and PNG file
"""
import os # to run pandoc from the command line
from uPub_overworld_defs import getListOfFiles
md_path = ".".join(uPub_doc.split(".")[:-1])+".md"
image_folder = "/".join(uPub_doc.split("/")[:-1])+"_pub_image"
os.system( "pandoc -s {0} --extract-media {2} --wrap=none --reference-links -t markdown -o {1}".format(
uPub_doc,
md_path,
image_folder,
) )
image_file_name = getListOfFiles( image_folder )[0]
output_dict = {
"image_file_name" : image_file_name,
"md_path" : md_path,
}
return( output_dict )
def associate_all_data( uPub_data_object ):
"""
- takes in a uPub_data object and associates
- calls in other methods to get each separate thing like title, figure text, etc...
- returns a dictionary of the publication data:
dict = {
title : ............... ,
author_list : [
corresponding,
other1,
other2,
...
],
figure_text : ............... ,
...
}
- uses other definitions to get invidual objects
- INPUT: uPub_data_object
- OUTPUT: dictionary that houses publication_data
"""
from pprint import pprint as pprint
title = uPub_data.get_title( uPub_data_object.md_path )
authors = uPub_data.get_authors( uPub_data_object.md_path )
image_caption = uPub_data.get_image_caption( uPub_data_object.md_path )
data = {
"title" : title ,
"authors" : authors ,
"image_caption" : image_caption ,
}
# print( "DATA", end="\t" )
# pprint( data )
return( data )
# return( "Finished running uPub_data.associate_all_data( uPub_data_object )" )
def get_title( md_path ):
"""
- reads in the md_path file and returns the name of the publication
- INPUT: MD PATH (STR)
- OUTPUT: TITLE OF PUBLICATION (STR)
"""
title = ""
with open( md_path, "r" ) as md_file:
for line in md_file:
if "title:" in line:
title = line.strip()[8:-1]
break
return( title )
def get_authors( md_path ):
"""
- gets the authors of the microPublication
- return a list of objects of type author
"""
from author_object import author
# filter out the section that houses author information.
# Pass that list of lists to a function in the author class to return author data
author_data_raw = author.grab_author_data( md_path )
author_data_formatted = author.format_author_data( author_data_raw )
return( author_data_formatted )
def get_image_caption( md_path ):
"""
- grabs the caption of the image from the MD
- returns a string
"""
image_caption = ""
with open( md_path, "r" ) as md_file:
for line in md_file:
if "**[Figure 1:]{.underline}**" in line:
image_caption = line.replace("**[Figure 1:]{.underline}**", "").strip()
image_caption = uPub_data.italicize_appropriate_terms( image_caption )
return( image_caption )
def italicize_appropriate_terms( janky_string ):
"""
- returns a string that has been appropriately italicized.
"""
italicized_text = "*this text will show up italicized*"
print( janky_string )
return( italicized_text )
|
# 数据去均值后协方差矩阵不变,特征值与特征向量也不变
# 数据标准化后的协方差矩阵等于相关系数矩阵,等于原始数据的相关系数矩阵
#机器学习采用的是规范化后的协方差矩阵
# numpy.std() 求标准差的时候默认是除以 n 的,即是有偏的,np.std无偏样本标准差方式为 ddof = 1;
# pandas.std() 默认是除以n-1 的,即是无偏的,如果想和numpy.std() 一样有偏,需要加上参数ddof=0 ,即pandas.std(ddof=0)
#用规范化的数据进行奇异值分解和特征分解,奇异值s的平方等于特征值,特征向量均相同,即<1><2><3>的特征向量相同,
# 但<1><2>的特征值是<3>的m=nobs-1倍,即 λ1,2/m = λ3,相差(样本数-1)倍
mat = np.array([[32,45,43],[34,56,3],[54,2,4],[43,9,65]])
mat_norm = mat-np.mean(mat,axis=0)
# mat_norm = (mat-np.mean(mat,axis=0))/np.std(mat,axis=0,ddof=1)#<1>
u,s,v=linalg.svd(mat_norm) #<1> #奇异值结果是按从大到小排序的
eva,eve=linalg.eig(mat_norm.T.dot(mat_norm)) #<2>
coveva,coveve=linalg.eig(np.cov(mat.T)) #<3>
# coveva,coveve=linalg.eig(np.corrcoef(mat.T)) #<3>
import numpy as np
import pandas as pd
from pandas import DataFrame
from scipy import linalg
import matplotlib.pyplot as plt
# from sklearn.decomposition import PCA
# debt = pd.read_excel("C:\\Users\\Administrator\\Desktop\\梁\\世纪华通偿债能力.xls",header=0)
debt = pd.ExcelFile("C:\\Users\\Administrator\\Desktop\\梁\\世纪华通偿债能力.xls").parse('Sheet1')
debt = debt.set_index(['截止日期'])
debt = debt.fillna(method='bfill')
class pca(object):
def __init__(self,data, k=None,scale_method='0_1standardized'):
self.data=data
self.k=k
self.scale_method = scale_method
self.data_scale = self.data_normalize()
self.cov_mat = self.covar_mat()
self.eigVals = self.calc_eigen()['Eigenvalues']
self.eigVals_sort = self.eigVals.sort_values(ascending=False) #特征值降序排列
self.eigVects = self.calc_eigen()['Eigenvectors']
self.variance_ratio = self.eigvals_and_ratio()['Eigenvalues of the Covariance Matrix']['Proportion%']
self.cum_variance_ratio = self.eigvals_and_ratio()['Eigenvalues of the Covariance Matrix']['Cumulative%']
self.EigVects_K = self.eigvects_k()
def __new__(cls,data,*args,**kwargs):
if is not isinstance(data,DataFrame):
raise TypeError('The type of input data must be pandas DataFrame')
return object.__new__(cls)
def data_normalize(self):
if self.scale_method=='0_1standardized':
data_scale = (self.data-self.data.mean())/self.data.std() #标准化(教科书)corr()不变
elif self.scale_method=='mean_divided':
data_scale = self.data/self.data.mean() #均值法(文献)-corr()不变
elif self.scale_method=='centralized':
data_scale = self.data-self.data.mean() #中心化cov()/corr()不变 maxmin corr()不变
else:
raise ValueError("The parameter of 'scale_method' is not correct")
return data_scale
def corr_mat(self):
corr_mat = self.data_scale.corr()
corr_mat.columns = [['Correlation Matrix']*len(corr_mat.columns),list(corr_mat.columns)]
return corr_mat
def covar_mat(self):
cov_mat = self.data_scale.cov() #协方差矩阵(标准化时也为相关系数矩阵)
cov_mat.columns = [['Correlation Matrix']*len(cov_mat.columns),list(cov_mat.columns)]
return cov_mat
def calc_eigen(self):
eigVals,eigVects = linalg.eig(self.cov_mat) #计算特征值和特征向量
eigVects = pd.DataFrame(np.around(eigVects.T,4),columns=['Eigenvectors']*eigVects.shape[0])
eigVals = [i.real if i.imag==0.0 else i for i in np.around(eigVals,4)]
eigVals = pd.Series(eigVals,name='Eigenvalues')
eig = pd.concat([eigVals,eigVects],axis=1)
return eig
def eigvals_and_ratio(self):
eigVals_variance_ratio = self.eigVals_sort/self.eigVals.sum() #方差贡献率
cum_variance_ratio = self.eigVals_sort.cumsum()/self.eigVals.sum() #累积方差贡献率
eig_and_ratio = pd.DataFrame({'Eigenvalues':self.eigVals_sort,'Difference':-self.eigVals_sort.diff(),
'Proportion%':eigVals_variance_ratio*100,'Cumulative%':cum_variance_ratio*100},columns=['Eigenvalues','Difference','Proportion%','Cumulative%'])
eig_and_ratio.columns = [['Eigenvalues of the Covariance Matrix']*len(eig_and_ratio.columns),list(eig_and_ratio.columns)]
eig_and_ratio = eig_and_ratio.fillna(' ')
return eig_and_ratio
def eigvects_k(self):
if self.k==None:
self.k = (self.cum_variance_ratio<70.0).sum()+1
elif type(self.k)==float:
self.k = (self.cum_variance_ratio<self.k*100).sum()+1
eigValInd = self.eigVals_sort.index #特征值降序索引号
eigValInd_K = eigValInd[:self.k] #保留最大的前K个特征值
EigVects_K = self.eigVects.iloc[eigValInd_K,:] #最大的前K个特征值对应的特征向量
return EigVects_K
def prin_score_equation(self):
prin_equation = []
for i in range(self.k):
equation = 'Y'+str(i+1)+'='+ (pd.Series(['%+f'%(round(z[0],4))+'*'+z[1] for z in zip(self.EigVects_K.iloc[i,:],self.data_scale.columns)]).str.cat(sep=''))
prin_equation.append(equation)
prin_score = np.dot(self.data_scale,self.EigVects_K.T) #将数据转换到低维新空间,即主成分得分
Y = ['Y'+str(i+1) for i in range(self.k)]
prin_score = pd.DataFrame(prin_score,columns=Y,index=self.data_scale.index)
compre_score = np.dot(prin_score.values,np.vstack(self.variance_ratio[:self.k].values))
compre_score = pd.DataFrame(compre_score,index=self.data_scale.index,columns=['comprehensive_score'])
return prin_equation,prin_score,compre_score
@property
def screeplot(self):
plt.plot(np.arange(1,len(self.data.columns)+1),self.eigVals,marker='o',linestyle='-')
plt.xlabel('The Number of Principal Component')
plt.ylabel('Eigenvalues')
plt.title('Screeplot')
plt.xticks(np.arange(len(self.data.columns)+1))
return plt.show()
@property
def n_components(self):
return self.k
|
"""Tests for wait_for_travis"""
import pytest
from constants import (
NO_PR_BUILD,
TRAVIS_FAILURE,
TRAVIS_PENDING,
TRAVIS_SUCCESS,
)
from wait_for_travis import wait_for_travis
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize("statuses,result", [
[[NO_PR_BUILD, NO_PR_BUILD], NO_PR_BUILD],
[[NO_PR_BUILD, TRAVIS_SUCCESS], TRAVIS_SUCCESS],
[[NO_PR_BUILD, TRAVIS_FAILURE], TRAVIS_FAILURE],
[[TRAVIS_PENDING, TRAVIS_SUCCESS], TRAVIS_SUCCESS],
[[TRAVIS_PENDING, TRAVIS_FAILURE], TRAVIS_FAILURE],
[[TRAVIS_SUCCESS], TRAVIS_SUCCESS],
[[TRAVIS_FAILURE], TRAVIS_FAILURE],
])
async def test_wait_for_travis(mocker, statuses, result):
"""wait_for_travis should check the github status API every 30 seconds"""
get_status_mock = mocker.async_patch('wait_for_travis.get_status_of_pr')
get_status_mock.side_effect = statuses
sleep_sync_mock = mocker.async_patch('asyncio.sleep')
token = 'token'
org = 'org'
repo = 'repo'
branch = 'branch'
assert await wait_for_travis(
github_access_token=token,
org=org,
repo=repo,
branch=branch,
) == result
get_status_mock.assert_any_call(
github_access_token=token,
org=org,
repo=repo,
branch=branch,
)
assert get_status_mock.call_count == len(statuses)
assert sleep_sync_mock.call_count == len(statuses) - 1
if len(statuses) - 1 > 0:
sleep_sync_mock.assert_any_call(30)
|
a={'tel':25,'hin':36,'eng':15}
k=input("enter sub:")
if k in a:
print("present, value = ",a[k])
else:
print("not found")
|
def temp():
temp = float(input("Enter temperature:"))
units = input("In which units C or F?:")
if units == "c":
temp = ((9/5)*temp)+32
print("That's",temp,"F")
elif units == "F":
temp = (temp-32)*(5/9)
print("That's",tmp,"C")
else:
print("The unint should be wither C or F")
temp()
|
import calendar
import math
import numpy as np
import threading
import re
import time
# input data from the manager to the engine: timestamp,duration,bitrate,width,height
def find_bitrate_stats(documents, method):
""" Calculate simple statistics for a collection of bitrates
Args:
documents: A MongoDB documents object with records containing the field
'bitrate'
method: 'avg', 'min', 'max', or 'range'
Return: either a single integer or 'N/A'
"""
bitrates = list()
for document in documents:
try:
bitrates.append(document['bitrate'])
except KeyError:
return 'N/A'
try:
if method.lower() == 'min':
return min(bitrates)
if method.lower() == 'avg':
return ( sum(bitrates) / len(bitrates) )
if method.lower() == 'max':
return max(bitrates)
if method.lower() == 'range':
return (max(bitrates) - min(bitrates))
except (ValueError, ZeroDivisionError):
return 'N/A'
def handle_this_method_call(_dictionary):
print 'engine is being called'
print _dictionary
def add_videoTime(_stats):
"""
:return: A list of the
the video time based on the received timestamps and segment duration time.
It assumes that the all segments have the same size.
"""
passedTime = 0
for i in range(0, len(_stats), 1):
if i is 0:
passedTime = 0
else:
passedTime += _stats[i]['duration']
_stats[i]['videoTime'] = _stats[i]['timestamp'] + passedTime
return (_stats)
def test_add_videoTime(_stats):
_new_stats = add_videoTime(_stats)
print "New Stats:"
print _new_stats
def calc_noStalls(_stats):
"""
:input: _stats with the video time
:return: number of times stall have happened
"""
# If timeStamp[i+1] > videoTime[i] stall was happened, i is the segment index.
#TODO add check for duplicate segment requests and so on.
noStalls = 0
for i in range(1, len(_stats) - 1):
if _stats[i]['videoTime'] < _stats[i + 1]['timestamp']:
noStalls += 1
return noStalls
def calc_videoQuality(_stats):
"""
video_bitrate: Video Bitrate, shows video compression bitrate like 200kbps
video_resolution: Video Resolution like 720p or 1080p
Return: videoQuality.
"""
_videoQuality = list()
for document in _stats:
height = int(document['height'])
bitrate = int(document['bitrate'])
if height == 720 and bitrate < 2000:
_videoQuality.append([document['timestamp'], -4.85 * math.pow(int(bitrate), -0.647) + 1.011])
if height == 1080:
_videoQuality.append([document['timestamp'], -3.035 * math.pow(int(bitrate), -0.5061) + 1.022])
if height == 360 and bitrate < 1000:
_videoQuality.append([document['timestamp'], -17.53 * math.pow(int(bitrate), -1.048) + 0.9912])
return _videoQuality
def test_calc_videoQuality(_stats):
print calc_videoQuality(_stats)
def get_minVideoBitrate(_stats):
"""
:param _stats: It is the list of parameters passed to calculate stats
:return: minimum video bitrate in the list
"""
if len(_stats) > 0:
_minBitrate = min(_stats[:]['bitrate'])
else:
_minBitrate = 0
return _minBitrate
def get_maxVideoBitrate(_stats):
"""
:param _stats: It is the list of parameters passed to calculate stats
:return: maximum video bitrate in the list
"""
if len(_stats) > 0:
_maxBitrate = max(_stats[:]['bitrate'])
else:
_maxBitrate = 0
return _maxBitrate
def get_noBitrateChanges(_stats):
"""
:param _stats:
:return: the number of changes in the video bitrate.
"""
if len(_stats) <= 0:
return 0
prev = _stats[0]['bitrate']
count = 0
for i in range(1, len(_stats)):
if _stats[i]['bitrate'] is not prev:
count += 1
prev = _stats[i]['bitrate']
return count
def test_get_noBitrateChanges(_stats):
"""
:param _stats:
:return: test the get_noBitrateChanges function
"""
_noBitrateChanges=get_noBitrateChanges(_stats)
print "Number of Bitrate Changes is:" + str(_noBitrateChanges)
def calc_weightedAvgBitrate(_stats):
"""
:param _stats: It gets _stats with updated videoTimestamp
:return: weighted avgBitrate based on the playback time duration.
"""
_avgBitrate = 0
_sumBitrate = 0
_totalTime = long(_stats[-1]['videoTime']) - long(_stats[0]['videoTime'])
for i in range(0, len(_stats)-1):
_sumBitrate += (long(_stats[i+1]['videoTime'])-long(_stats[i]['videoTime']))*int(_stats[i]['bitrate'])
_avgBitrate=_sumBitrate/_totalTime
return _avgBitrate # def _calcSwitchingImpact(self,start_time,curr_time,vq_before,vq_after):
def test_calc_weightedAvgBitrate(_stats):
"""
:param _stats:
:return: test the calc_weightedAvgBitrate
"""
_stats_w_videoTime=add_videoTime(_stats)
_wAvgBitrate=calc_weightedAvgBitrate(_stats_w_videoTime)
print "Weighted Average Bitrate is:" + str(_wAvgBitrate)
def calc_switchingImpact(startTime,currentTime,vq0,vq1):
"""
:param startTime: start time stamp for the switching from vq0 bitrate to vq1 bitrate
:param currentTime: current time for the video playback
:param vq0: video quality before switching
:param vq1: video quality after switching
:return: switching impact for the current change in bitrate
(note, calculated switching impact is only for this change and need to sum up with switching impact values for
other video bitrate switches"
"""
_si=np.absolute(vq1 - vq0)*np.exp(-0.015*(currentTime - startTime))
return _si
def calc_totalSwitchingImpact(_stats,currentTime):
"""
:param _stats: List of stats with the video Time.
:param currentTime: current video Time.
:return: calculate the total switching impact considering all previous changes until the currentTime.
"""
_totalSI=0.0
return _totalSI
# br_data=self._stats['videoBitrate']
# sw_index=[]
# #find where switching is happened in the video bitrate list and return the list of index.
# for i in range(1,len(br_data)-2):
# if br_data[i+1] != br_data[i]:
# sw_index.append(i+1)
# if currentTime < long(self._stats['videoTime'][-1]):
# print "current time is not matched with the logs time"
# return
# else:
# end_time=currentTime - long(self._stats['videoTime'][-1])
# #Create an array for each switching happens in the time.
# br_impact=np.zeros((len(sw_index),end_time))
# k=0
# for i in sw_index:
# for j in range(end_time):
# if j < i:
# br_impact[k,j]=0
# else:
# br_impact[k,j]=self._calcSwitchingImpact(
# start_time=self._stats['videoTime'][i],
# curr_time=self._stats['videoTime'][i]+(j-i),
# br_before=self._stats['videoQuality'][i-1],
# br_after=self._stats['videoQuality'][i]
# )
# k=k+1
# final_data=np.zeros(end_time)
# for j in range(br_impact.shape[1]):
# for i in range(br_impact.shape[0]):
# final_data[j]+=br_impact[i][j]
# self._stats['switchingImpact']=list(final_data)
|
# -*- coding: utf-8 -*-
"""Unittests for models.
"""
__license__ = """
This file is part of Janitoo.
Janitoo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Janitoo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Janitoo. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Sébastien GALLET aka bibi21000'
__email__ = 'bibi21000@gmail.com'
__copyright__ = "Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000"
import sys, os
import time, datetime
import unittest
import threading
import logging
import traceback
from pkg_resources import iter_entry_points
from nose_parameterized import parameterized
from nose.plugins.skip import SkipTest
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import Table, Column, String
from alembic import command as alcommand
from janitoo_nosetests import JNTTBase, alembic_version, DBCONFS
from janitoo.options import JNTOptions
from janitoo_db.base import Base, create_db_engine
from janitoo_db.migrate import Config as alConfig, collect_configs, janitoo_config
##############################################################
#Check that we are in sync with the official command classes
#Must be implemented for non-regression
from janitoo.classes import COMMAND_DESC
COMMAND_DISCOVERY = 0x5000
assert(COMMAND_DESC[COMMAND_DISCOVERY] == 'COMMAND_DISCOVERY')
##############################################################
class JNTTModels(JNTTBase):
"""Test the models
"""
def tearDown(self):
#~ try:
#~ self.drop_all()
#~ except Exception:
#~ pass
JNTTBase.tearDown(self)
def setUp(self):
JNTTBase.setUp(self)
options = JNTOptions({'conf_file':self.getDataFile(self.models_conf)})
options.load()
self.dbengine = create_db_engine(options)
self.dbmaker = sessionmaker()
# Bind the sessionmaker to engine
self.dbmaker.configure(bind=self.dbengine)
self.dbsession = scoped_session(self.dbmaker)
self.drop_all()
self.options = options
def skipSqliteTest(self):
"""Skip a test for sqlite database
"""
if self.options.get_option('database', 'sqlalchemy.url').startswith('sqlite:'):
raise SkipTest("%s" % ("Skipped for sqlite database"))
def create_all(self):
Base.metadata.create_all(bind=self.dbengine)
def drop_all(self):
Base.metadata.drop_all(bind=self.dbengine)
try:
alembic_version.drop(bind=self.dbengine)
except Exception:
pass
class JNTTModelsCommon(object):
"""Common tests for models
"""
models_conf = "tests/data/janitoo_db.conf"
def test_001_versiondb(self):
self.drop_all()
config = alConfig(conf_file=self.models_conf)
config.initdb()
versions = config.versiondb()
self.assertTrue(len(versions)>0)
def test_002_heads(self):
self.drop_all()
config = alConfig(conf_file=self.models_conf)
heads = config.heads()
self.assertTrue(len(heads)>0)
def test_003_checkdb(self):
self.drop_all()
config = alConfig(conf_file=self.models_conf)
config.initdb()
self.assertTrue(config.checkdb())
config.downgrade()
self.assertFalse(config.checkdb())
class JNTTDbsModels(JNTTBase):
"""Tests for model on docker
"""
dbconf = ('sqlite', {'dbconf':'sqlite:////tmp/janitoo_tests.sqlite'})
def setUp(self):
JNTTBase.setUp(self)
tmp_conf = self.cpTempFile(self.models_conf)
options = JNTOptions(options={'conf_file':tmp_conf})
options.set_option('database', 'sqlalchemy.url', self.dbconf[1]['dbconf'])
self.models_conf = tmp_conf
self.dbengine = create_db_engine(self.dbconf[1]['dbconf'])
self.dbmaker = sessionmaker()
# Bind the sessionmaker to engine
self.dbmaker.configure(bind=self.dbengine)
self.dbsession = scoped_session(self.dbmaker)
self.drop_all()
@classmethod
def skipSqliteTest(self):
"""Skip a test for sqlite database
"""
if self.dbconf[1]['dbconf'].startswith('sqlite:'):
raise SkipTest("%s" % ("Skipped for sqlite database"))
def tearDown(self):
#~ try:
#~ self.drop_all()
#~ except Exception:
#~ pass
JNTTBase.tearDown(self)
def create_all(self):
Base.metadata.create_all(bind=self.dbengine)
def drop_all(self):
Base.metadata.drop_all(bind=self.dbengine)
try:
alembic_version.drop(bind=self.dbengine)
except Exception:
pass
class JNTTDockerModels(JNTTDbsModels):
"""Tests for model on docker
"""
def setUp(self):
JNTTBase.onlyDockerTest()
JNTTDbsModels.setUp(self)
def jntt_models(module_name, cls, prefix='Class', dbs=None, skipcond=None):
"""Launch cls tests for every supported database
"""
try:
if skipcond is not None:
skipcond()
except Exception as e:
traceback.print_exc()
#~ print(e)
return
if dbs is None:
dbs = DBCONFS
for name, conf in dbs:
setattr(sys.modules[module_name], 'TestModels_%s_%s'%(prefix,name), type('TestModels_%s_%s'%(prefix,name), (JNTTDbsModels,cls), {'dbconf': (name, conf), '__qualname__':'TestModels_%s_%s'%(prefix,name), '__name__':'TestModels_%s_%s'%(prefix,name)}))
def jntt_docker_models(module_name, cls, prefix='Class', dbs=None, skipcond=None):
"""Launch cls tests for every supported database
"""
try:
if skipcond is not None:
skipcond()
except Exception as e:
traceback.print_exc()
#~ print(e)
return
if dbs is None:
dbs = DBCONFS
for name, conf in dbs:
setattr(sys.modules[module_name], 'TestModels_%s_%s'%(prefix,name), type('TestModels_%s_%s'%(prefix,name), (JNTTDockerModels,cls), {'dbconf': (name, conf)}))
class JNTTFullModels(JNTTBase):
"""Test the models
"""
db_uri = "sqlite:////tmp/janitoo_test/home/fullmodel.sqlite"
def setUp(self):
JNTTBase.setUp(self)
import janitoo_db.models
@classmethod
def skipSqliteTest(self):
"""Skip a test for sqlite database
"""
if self.db_uri.startswith('sqlite:'):
raise SkipTest("%s" % ("Skipped for sqlite database"))
class JNTTDockerFullModels(JNTTFullModels):
"""Tests for full models on docker
"""
dbconf = ('sqlite', {'dbconf':'sqlite:////tmp/janitoo_tests.sqlite'})
def setUp(self):
JNTTFullModels.onlyDockerTest()
JNTTFullModels.setUp(self)
self.db_uri = self.dbconf[1]['dbconf']
def jntt_docker_fullmodels(module_name, cls, prefix='Class', dbs=None, skipcond=None):
"""Launch cls tests for every supported database
"""
try:
if skipcond is not None:
skipcond()
except Exception as e:
traceback.print_exc()
#~ print(e)
return
if dbs is None:
dbs = DBCONFS
for name, conf in dbs:
setattr(sys.modules[module_name], 'TestFullModels_%s_%s'%(prefix,name), type('TestFullModels_%s_%s'%(prefix,name), (JNTTDockerFullModels,cls), {'dbconf': (name, conf)}))
class JNTTFullModelsCommon(object):
"""Common tests for models
"""
def test_001_upgrade(self):
alcommand.upgrade(janitoo_config(self.db_uri), 'heads')
def test_002_downgrade(self):
alcommand.upgrade(janitoo_config(self.db_uri), 'heads')
alcommand.downgrade(janitoo_config(self.db_uri), 'base')
|
# Copyright 2018 Davide Spadini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pydriller import Git, ModifiedFile
@pytest.fixture()
def modification(request):
path, commit = request.param
gr = Git(path)
yield gr.get_commit(commit).modified_files[0]
gr.clear()
@pytest.mark.parametrize('modification',
[("test-repos/diff", "9a985d4a12a3a12f009ef39750fd9b2187b766d1")],
indirect=True)
def test_extract_line_number_and_content(modification: ModifiedFile):
assert modification.diff_parsed
added = modification.diff_parsed['added']
deleted = modification.diff_parsed['deleted']
assert (127, ' RevCommit root = rw.parseCommit(headId);') in deleted
assert (128, ' rw.sort(RevSort.REVERSE);') in deleted
assert (129, ' rw.markStart(root);') in deleted
assert (130, ' RevCommit lastCommit = rw.next();') in deleted
assert (131, ' throw new RuntimeException("Changing this line " + path);') in added
@pytest.mark.parametrize('modification',
[("test-repos/diff", "f45ee2f8976d5f018a1e4ec83eb4556a3df8b0a5")],
indirect=True)
def test_additions(modification: ModifiedFile):
assert modification.diff_parsed
added = modification.diff_parsed['added']
deleted = modification.diff_parsed['deleted']
assert (127, ' RevCommit root = rw.parseCommit(headId);') in added
assert (128, ' rw.sort(RevSort.REVERSE);') in added
assert (129, ' rw.markStart(root);') in added
assert (130, ' RevCommit lastCommit = rw.next();') in added
assert (131, '') in added
assert len(deleted) == 0
assert len(added) == 5
@pytest.mark.parametrize('modification',
[("test-repos/diff", "147c7ce9f725a0e259d63f0bf4e6c8ac085ff8c8")],
indirect=True)
def test_deletions(modification: ModifiedFile):
assert modification.diff_parsed
added = modification.diff_parsed['added']
deleted = modification.diff_parsed['deleted']
assert (184, ' List<ChangeSet> allCs = new ArrayList<>();') in deleted
assert (221, ' private GregorianCalendar convertToDate(RevCommit revCommit) {') in deleted
assert (222, ' GregorianCalendar date = new GregorianCalendar();') in deleted
assert (223, ' date.setTimeZone(revCommit.getAuthorIdent().getTimeZone());') in deleted
assert (224, ' date.setTime(revCommit.getAuthorIdent().getWhen());') in deleted
assert (225, '') in deleted
assert (226, ' return date;') in deleted
assert (227, ' }') in deleted
assert (228, '') in deleted
assert (301, ' if(!collectConfig.isCollectingBranches())') in deleted
assert (302, ' return new HashSet<>();') in deleted
assert (303, '') in deleted
assert len(deleted) == 12
assert len(added) == 0
@pytest.mark.parametrize('modification',
[("test-repos/no_newline", "52a78c1ee5d100528eccba0a3d67371dbd22d898")],
indirect=True)
def test_diff_no_newline(modification: ModifiedFile):
"""
If a file ends without a newline git represents this with the additional line
\\ No newline at end of file
in diffs. This test asserts these additional lines are parsed correctly.
"""
assert modification.diff_parsed
added = modification.diff_parsed['added']
deleted = modification.diff_parsed['deleted']
assert (1, 'test1') in deleted # is considered as deleted as a 'newline' command is added
assert (1, 'test1') in added # now with added 'newline'
assert (2, 'test2') in added
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Pass.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Pass_Window(object):
def setupUi(self, Pass_Window):
Pass_Window.setObjectName("Pass_Window")
Pass_Window.resize(492, 302)
self.centralwidget = QtWidgets.QWidget(Pass_Window)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(90, 30, 271, 16))
self.label.setObjectName("label")
self.l_pass1 = QtWidgets.QLabel(self.centralwidget)
self.l_pass1.setGeometry(QtCore.QRect(60, 80, 81, 16))
self.l_pass1.setObjectName("l_pass1")
self.l_pass2 = QtWidgets.QLabel(self.centralwidget)
self.l_pass2.setGeometry(QtCore.QRect(60, 110, 81, 16))
self.l_pass2.setObjectName("l_pass2")
self.inputPass = QtWidgets.QLineEdit(self.centralwidget)
self.inputPass.setGeometry(QtCore.QRect(40, 190, 113, 20))
self.inputPass.setObjectName("inputPass")
self.btnChoicePass = QtWidgets.QPushButton(self.centralwidget)
self.btnChoicePass.setGeometry(QtCore.QRect(160, 190, 75, 23))
self.btnChoicePass.setObjectName("btnChoicePass")
self.btnVoltarPass = QtWidgets.QPushButton(self.centralwidget)
self.btnVoltarPass.setGeometry(QtCore.QRect(250, 190, 75, 23))
self.btnVoltarPass.setObjectName("btnVoltarPass")
self.l_defesa_pass = QtWidgets.QLabel(self.centralwidget)
self.l_defesa_pass.setGeometry(QtCore.QRect(140, 260, 251, 16))
self.l_defesa_pass.setText("")
self.l_defesa_pass.setObjectName("l_defesa_pass")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(40, 80, 47, 13))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(40, 110, 47, 13))
self.label_3.setObjectName("label_3")
Pass_Window.setCentralWidget(self.centralwidget)
self.retranslateUi(Pass_Window)
QtCore.QMetaObject.connectSlotsByName(Pass_Window)
def retranslateUi(self, Pass_Window):
_translate = QtCore.QCoreApplication.translate
Pass_Window.setWindowTitle(_translate("Pass_Window", "MainWindow"))
self.label.setText(_translate("Pass_Window", "Escolha o software deataque em Password cracking"))
self.l_pass1.setText(_translate("Pass_Window", "password 1"))
self.l_pass2.setText(_translate("Pass_Window", "password 2"))
self.btnChoicePass.setText(_translate("Pass_Window", "Escolher"))
self.btnVoltarPass.setText(_translate("Pass_Window", "Voltar"))
self.label_2.setText(_translate("Pass_Window", "1 - "))
self.label_3.setText(_translate("Pass_Window", "2 -"))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('relationships', '0009_auto_20150717_2035'),
]
operations = [
migrations.AddField(
model_name='offer',
name='is_mutate',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(default=b'', max_length=100, verbose_name='\u0421\u0442\u0430\u0442\u0443\u0441', choices=[(b'is_active', b'\xd0\xa3\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xba \xd0\xbf\xd1\x80\xd0\xb8\xd0\xbd\xd1\x8f\xd1\x82'), (b'is_canseled', b'\xd0\x9e\xd1\x82\xd0\xba\xd0\xb0\xd0\xb7 \xd0\xbe\xd1\x82 \xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0\xba\xd0\xb0'), (b'id_paid', b'\xd0\xa3\xd1\x81\xd0\xbf\xd0\xb5\xd1\x88\xd0\xbd\xd1\x8b\xd0\xb5 \xd0\xbe\xd1\x82\xd0\xbd\xd0\xbe\xd1\x88\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f'), (b'is_fail', b'\xd0\x9e\xd1\x82\xd0\xbd\xd0\xbe\xd1\x88\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xbf\xd1\x80\xd0\xb5\xd1\x80\xd0\xb2\xd0\xb0\xd0\xbd\xd1\x8b')]),
),
]
|
"""Form management utilities."""
import abc
import re
from collections import OrderedDict
from django.forms import TypedChoiceField
from django.forms.fields import Field
from django.forms.widgets import RadioSelect
from django.shortcuts import render
from django.utils.encoding import force_str
from django.utils.translation import gettext as _, gettext_lazy
from modoboa.lib.exceptions import BadRequest
from modoboa.lib.web_utils import render_to_json_response
ABC = abc.ABCMeta(force_str("ABC"), (object,), {})
class WizardStep(object):
"""A wizard step."""
def __init__(self, uid, formclass, title, formtpl=None, new_args=None):
"""Constructor."""
self.uid = uid
self._cls = formclass
self.title = title
self.formtpl = formtpl
self._new_args = new_args
self._prev = None
self._next = None
self.form = None
@property
def prev(self):
return self._prev
@prev.setter
def prev(self, step):
self._prev = step
@property
def next(self):
return self._next
@next.setter
def next(self, step):
self._next = step
def check_access(self, wizard):
"""Check if this step should be displayed or not."""
return True
def create_form(self, data=None):
"""Instantiate a new form."""
args = []
if self._new_args is not None:
args += self._new_args
if data:
args.append(data)
self.form = self._cls(*args)
class WizardForm(ABC):
"""Custom wizard."""
template_name = "common/wizard_forms.html"
def __init__(self, request, submit_button_label=None):
self.request = request
self.steps = []
self._submit_button_label = submit_button_label
@property
def submit_button_label(self):
if self._submit_button_label is None:
self._submit_button_label = _("Submit")
return self._submit_button_label
@property
def errors(self):
result = {}
for step in self.steps:
for name, value in list(step.form.errors.items()):
if name == "__all__":
continue
result[name] = value
return result
@property
def first_step(self):
"""Return the first step."""
return self.steps[0] if self.steps else None
def add_step(self, step):
"""Add a new step to the wizard."""
if self.steps:
step.prev = self.steps[-1]
self.steps[-1].next = step
self.steps += [step]
def create_forms(self, data=None):
for step in self.steps:
step.create_form(data)
def _get_step_id(self):
"""Retrieve the step identifier from the request."""
stepid = self.request.POST.get("stepid", None)
if stepid is None:
raise BadRequest(_("Invalid request"))
stepid = int(stepid.replace("step", ""))
if stepid < 0:
raise BadRequest(_("Invalid request"))
return min(stepid, len(self.steps))
def previous_step(self):
"""Go back to the previous step."""
stepid = self._get_step_id()
stepid -= 2
self.create_forms(self.request.POST)
for step in self.steps:
step.form.is_valid()
while stepid >= 0:
if self.steps[stepid].check_access(self):
break
stepid -= 1
return render_to_json_response({
"title": self.steps[stepid].title, "id": self.steps[stepid].uid,
"stepid": stepid
})
def next_step(self):
"""Go to the next step if previous forms are valid."""
stepid = self._get_step_id()
self.create_forms(self.request.POST)
statuses = []
for cpt in range(0, stepid):
if self.steps[cpt].check_access(self):
statuses.append(self.steps[cpt].form.is_valid())
if False in statuses:
return render_to_json_response({
"stepid": stepid, "id": self.steps[stepid - 1].uid,
"form_errors": self.errors
}, status=400)
while stepid < len(self.steps):
if self.steps[stepid].check_access(self):
break
stepid += 1
if stepid == len(self.steps):
return self.done()
return render_to_json_response({
"title": self.steps[stepid].title, "id": self.steps[stepid].uid,
"stepid": stepid
})
def extra_context(self, context):
"""Provide additional information to template's context.
"""
pass
def process(self):
"""Process the request."""
if self.request.method == "POST":
if self.request.POST.get("target", "next") == "next":
return self.next_step()
return self.previous_step()
self.create_forms()
context = {"wizard": self}
self.extra_context(context)
return render(self.request, self.template_name, context)
@abc.abstractmethod
def done(self):
"""Method to exexute when all steps are validated.
Must be implemented by all sub classes.
:rtype: HttpResponse
"""
class DynamicForm(object):
"""
A form which accepts dynamic fields.
We consider a field to be dynamic when it can appear multiple
times within the same request.
"""
fields = {}
data = {}
def _create_field(self, typ, name, value=None, pos=None):
"""Create a new form field.
"""
self.fields[name] = typ(label="", required=False)
if value is not None:
self.fields[name].initial = value
if pos:
order = list(self.fields.keys())
order.remove(name)
order.insert(pos, name)
self.fields = OrderedDict((key, self.fields[key]) for key in order)
def _load_from_qdict(self, qdict, pattern, typ):
"""Load all instances of a field from a ``QueryDict`` object.
:param ``QueryDict`` qdict: a QueryDict object
:param string pattern: pattern used to find field instances
:param typ: a form field class
"""
expr = re.compile(r'%s_\d+' % pattern)
values = []
for k, v in list(qdict.items()):
if k == pattern or expr.match(k):
values.append((k, v))
ndata = self.data.copy()
values.reverse()
for v in values:
if v[0] in self.fields:
continue
self._create_field(typ, v[0])
ndata[v[0]] = v[1]
self.data = ndata
class TabForms(object):
"""
Simple forms container.
This class tries to encapsulate multiple forms that will be
displayed using tabs. It is different from a classical formset
because it can contain different forms.
"""
template_name = "common/tabforms.html"
def __init__(self, request, instances=None, classes=None):
self.request = request
self.instances = {}
to_remove = []
for fd in self.forms:
args = []
kwargs = {}
if "new_args" in fd:
args += fd["new_args"]
if request.method == "POST":
args.append(request.POST)
if instances is not None:
self.instances = instances
mname = "check_%s" % fd["id"]
if hasattr(self, mname):
if not getattr(self, mname)(instances[fd["id"]]):
to_remove += [fd]
continue
kwargs["instance"] = instances[fd["id"]]
if classes is not None and fd["id"] in classes:
fd["instance"] = classes[fd["id"]](*args, **kwargs)
else:
fd["instance"] = fd["cls"](*args, **kwargs)
self.forms = [form for form in self.forms if form not in to_remove]
if self.forms:
self.active_id = self.forms[0]["id"]
def _before_is_valid(self, form):
return True
@property
def errors(self):
"""Return validation errors.
We aggregate all form errors into one dictionary.
:rtype: dict
"""
result = {}
for f in self.forms:
for name, value in list(f["instance"].errors.items()):
if name == "__all__":
continue
result[name] = value
return result
def is_valid(self, mandatory_only=False, optional_only=False):
"""Check if the form is valid.
:param boolean mandatory_only:
:param boolean optional_only:
"""
to_remove = []
for f in self.forms:
if mandatory_only and \
("mandatory" not in f or not f["mandatory"]):
continue
elif optional_only and ("mandatory" in f and f["mandatory"]):
continue
if not self._before_is_valid(f):
to_remove.append(f)
continue
if not f["instance"].is_valid():
self.active_id = f["id"]
return False
self.forms = [f for f in self.forms if f not in to_remove]
return True
@abc.abstractmethod
def save(self):
"""Save objects here.
"""
def remove_tab(self, tabid):
for f in self.forms:
if f["id"] == tabid:
self.forms.remove(f)
break
def __iter__(self):
return self.forward()
def forward(self):
for form in self.forms:
yield form
def extra_context(self, context):
""""Provide additional information to template's context.
"""
pass
@abc.abstractmethod
def done(self):
"""Actions to execute after the form has been validated and saved.
:rtype: HttpResponse instance
"""
def process(self):
"""Process the received request.
"""
if self.request.method == "POST":
if self.is_valid():
self.save()
return self.done()
return render_to_json_response(
{"form_errors": self.errors}, status=400
)
context = {
"tabs": self,
}
if self.forms:
context.update({
"action_label": _("Update"),
"action_classes": "submit",
})
self.extra_context(context)
active_tab_id = self.request.GET.get("active_tab", "default")
if active_tab_id != "default":
context["tabs"].active_id = active_tab_id
return render(self.request, self.template_name, context)
class UserKwargModelFormMixin:
"""Simple form mixin to add support for user kwargs in constructor."""
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
#
# Custom fields from here
#
class HorizontalRadioSelect(RadioSelect):
template_name = "common/horizontal_select.html"
option_template_name = "common/horizontal_select_option.html"
class SeparatorField(Field):
"""Custom field to represent a separator."""
def __init__(self, *args, **kwargs):
kwargs["required"] = False
super(SeparatorField, self).__init__(*args, **kwargs)
class YesNoField(TypedChoiceField):
"""A yes/no form field."""
def __init__(self, *args, **kwargs):
"""Constructor."""
kwargs.update({
"choices": (
(True, gettext_lazy("Yes")),
(False, gettext_lazy("No"))
),
"widget": HorizontalRadioSelect(),
"coerce": lambda x: x == "True"
})
super(YesNoField, self).__init__(*args, **kwargs)
|
from typing import List
from sqlalchemy import func, and_
from sqlalchemy.orm.exc import NoResultFound
from bitcoin_acks.constants import ReviewDecision
from bitcoin_acks.database import session_scope
from bitcoin_acks.github_data.graphql_queries import (
comments_graphql_query,
reviews_graphql_query
)
from bitcoin_acks.github_data.repositories_data import RepositoriesData
from bitcoin_acks.github_data.users_data import UsersData
from bitcoin_acks.models import Comments
class CommentsData(RepositoriesData):
def __init__(self, repository_path: str, repository_name: str):
super(CommentsData, self).__init__(repository_path=repository_path,
repository_name=repository_name)
@staticmethod
def get_review_count(pull_request_id: str, pull_request_author_id) -> int:
with session_scope() as session:
review_count = (
session.query(func.count(Comments.id))
.filter(
and_(
Comments.pull_request_id == pull_request_id,
Comments.review_decision != ReviewDecision.NONE,
Comments.author_id != pull_request_author_id
)
).scalar()
)
return review_count
def get_all(self, pull_request_number: int):
for query, nested_name in (
(comments_graphql_query, 'comments'),
(reviews_graphql_query, 'reviews')):
received_count = 0
first_cursor = None
variables = {
'commentsLast': 100,
'prNumber': pull_request_number
}
while True:
if first_cursor is not None:
variables['commentsCursorBefore'] = first_cursor
json_object = {
'query': query,
'variables': variables
}
data = self.graphql_post(json_object=json_object).json()
results = data['data']['repository']['pullRequest'][nested_name]
expected_count = results['totalCount']
if not expected_count:
break
comments = results['edges']
received_count += len(comments)
first_cursor = comments[0]['cursor']
comments = [c['node'] for c in comments]
for comment in comments:
yield comment
if received_count >= expected_count:
break
@staticmethod
def identify_review_decision(text: str) -> ReviewDecision:
text = text.lower()
if 'concept ack' in text or 're-ack' in text:
return ReviewDecision.CONCEPT_ACK
elif 'tested ack' in text:
return ReviewDecision.TESTED_ACK
elif 'utack' in text or 'untested ack' in text:
return ReviewDecision.UNTESTED_ACK
elif 'tack ' in text:
return ReviewDecision.TESTED_ACK
elif 'nack' in text:
return ReviewDecision.NACK
elif text.startswith('ack '):
return ReviewDecision.TESTED_ACK
else:
return ReviewDecision.NONE
def upsert(self, pull_request_id: str, data: dict) -> bool:
data['body'] = data['body'].replace('\x00', '')
review_decision = self.identify_review_decision(data['body'])
author = data.pop('author')
author_id = UsersData().upsert(data=author)
with session_scope() as session:
try:
record = (
session.query(Comments)
.filter(Comments.id == data['id'])
.one()
)
except NoResultFound:
record = Comments()
record.pull_request_id = pull_request_id
record.author_id = author_id
session.add(record)
for key, value in data.items():
setattr(record, key, value)
record.auto_detected_review_decision = review_decision
if review_decision == ReviewDecision.NONE:
return False
return True
def bulk_upsert(self, pull_request_id: str, comments: List[dict]):
ack_comment_authors = []
comments = sorted(comments,
key=lambda k: k['publishedAt'],
reverse=True)
comments = [c for c in comments if c['body'] and c['author'] is not None]
for comment in comments:
comment_author_name = comment['author']['login']
if comment_author_name not in ack_comment_authors:
is_review_decision = self.upsert(pull_request_id=pull_request_id,
data=comment)
if is_review_decision:
ack_comment_authors.append(comment_author_name)
if __name__ == '__main__':
comments_data = CommentsData(repository_name='bitcoin',
repository_path='bitcoin')
comments = [c for c in comments_data.get_all(pull_request_number=10637)]
print(len(comments))
|
import logging
from flask.blueprints import Blueprint
from waitlist.permissions import perm_manager
from flask_login import login_required, current_user
from waitlist.base import db
from waitlist.storage.database import Account, AccountNote
from flask.templating import render_template
import flask
from flask.globals import request
from werkzeug.utils import redirect
from flask.helpers import url_for
from waitlist.utility.constants import account_notes
from waitlist.utility.account.notes.note_renderer import render_note_text
bp = Blueprint('accounts_profile', __name__)
logger = logging.getLogger(__name__)
perm_manager.define_permission('view_profile')
perm_manager.define_permission('profile_notes_add')
perm_manager.define_permission('view_notes_high') # <= 500
perm_manager.define_permission('view_notes_low') # < 100
perm_manager.define_permission('view_notes_med') # < 200
perm_manager.define_permission('view_notes_all')
perm_manager.define_permission('view_notes')
@bp.route("/<int:accountid>", methods=["GET"])
@login_required
@perm_manager.require('view_profile')
def profile(accountid):
account = db.session.query(Account).get(accountid)
if account is None:
flask.abort(404, "Account not found!")
max_restriction_level = 0
if perm_manager.get_permission('view_notes_low').can():
max_restriction_level = 100
if perm_manager.get_permission('view_notes_med').can():
max_restriction_level = 200
if perm_manager.get_permission('view_notes_high').can():
max_restriction_level = 500
criterion = (AccountNote.accountID == accountid)
if not perm_manager.get_permission("view_notes_all").can():
criterion = criterion & (AccountNote.restriction_level < max_restriction_level)
notes = db.session.query(AccountNote).filter(criterion).all()
return render_template('account/profile.html', account=account,
notes=notes,
note_renderer=render_note_text)
@bp.route("/byname/<path:username>", methods=["GET"])
@login_required
@perm_manager.require('view_profile')
def profile_by_name(username):
account = db.session.query(Account).filter(Account.username == username).first()
if account is None:
flask.abort(404, "Account not found!")
return profile(account.id)
@bp.route('/<int:accountid>/notes/add', methods=['POST'])
@login_required
@perm_manager.require('profile_notes_add')
def notes_add(accountid):
note = request.form['note']
if note is None or note == '':
flask.abort(400, 'Note can not be empty')
restriction_level = int(request.form['restriction_level'])
history_entry = AccountNote(accountID=accountid,
byAccountID=current_user.id, note=note,
restriction_level=restriction_level,
type=account_notes.TYPE_HUMAN)
db.session.add(history_entry)
db.session.commit()
return redirect(url_for('.profile', accountid=accountid))
|
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os, os.path
import shutil
import logging
from DynamicSchedulerGeneric import PersistentEstimators
class BasicEstimatorWrapper(PersistentEstimators.BasicEstimator):
def __init__(self, keepfiles=False):
self.sampleNumber = 1000
self.storeDir = "/tmp/dynschedtest"
if os.path.exists(self.storeDir) and not keepfiles:
shutil.rmtree(self.storeDir)
if not os.path.exists(self.storeDir):
os.mkdir(self.storeDir)
self.now = 1000
self.buffer = dict()
self.nqueued = dict()
self.nrun = dict()
self.localERT = dict()
self.localWRT = dict()
def setERT(self, qName, value):
self.localERT[qName] = value
def setWRT(self, qName, value):
self.localWRT[qName] = value
class EstimatorsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_BasicEstimator_ok(self):
estimator = BasicEstimatorWrapper()
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_1', 'start' : 500})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_2', 'start' : 200})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_3',})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_4',})
estimator.estimate()
self.assertTrue(estimator.localERT['dteam'] == 1300)
def test_BasicEstimator_empty(self):
estimator = BasicEstimatorWrapper()
estimator.estimate()
self.assertTrue(len(estimator.localERT) == 0)
def test_BasicEstimator_multi_estimate(self):
estimator = BasicEstimatorWrapper()
estimator.now = 2000
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_1', 'start' : 500})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_2', 'start' : 200})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_3', 'start' : 1200})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_4', 'start' : 1700})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_5',})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_6',})
estimator.estimate()
estimator = BasicEstimatorWrapper(True)
estimator.now = 3000
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_3', 'start' : 1200})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_4', 'start' : 1700})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_5', 'start' : 2200})
estimator.register({'queue' : 'dteam', 'state' : 'running', 'jobid': 'crea_6', 'start' : 2400})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_7',})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_8',})
estimator.register({'queue' : 'dteam', 'state' : 'queued', 'jobid': 'crea_9',})
estimator.estimate()
self.assertTrue(estimator.localERT['dteam'] == 2600)
if __name__ == '__main__':
if os.path.exists('logging.conf'):
import logging.config
logging.config.fileConfig('logging.conf')
else:
logging.basicConfig()
unittest.main()
|
from django.apps import AppConfig
class ArtworkConfig(AppConfig):
name = 'artuium_server.artwork'
|
import mysql.connector
import sys
db = mysql.connector.connect(host = "localhost",
user = "dbuser04",
passwd = "salasana",
db = "pizzaDB",
buffered = True)
cur = db.cursor()
#main function is a game core!! Function call on the end of code!
def main():
# Initialize player location
location = ""
action = ""
# command is action verb
# target object
# Dont ask
title = "*"*40 + "\n*" + '{:>28}'.format('PIZZA-HAT EXPRESS') + '{:>11}'.format('*') + ("\n*" + '{:>39}'.format('*')) + '{:>2}'.format('\nTHE BEST TEXT ADVENTURE GAME IN THE WORLD') + ("\n*" + '{:>39}'.format('*'))*2 + "\n" + "*"*40
print(title + "\n")
intro = "\n\nYour name is Jack. The year is 2318. You are just an ordinary pizza delivery guy for an intergalactic pizza company.\n\nYou have just woken from a late night shift. There seems to be some email on the computer. Please experience the world around you and maybe put on some clothes.\n\n"
myprint(intro)
while action!="quit" or location!="EXIT":
# location is current location
locationID = getLocID()
location = getLocName()
# input_command is the initial inputted command
input_command=input("> ").split()
filters = ["and","for","the","with","a","an","at","of","on","in"," ", "'"]
# final_command is the command that is fed to the game
final_command = []
for x in input_command:
if x in filters:
x = x.replace(x, "")
if len(x) > 0:
final_command.append(x)
if len(final_command) >= 1:
action = final_command[0].lower()
else:
action = ""
if len(final_command) >= 2:
target = final_command[len(final_command)-1].lower() #lower make the string to lowercase
else:
target = ""
if action == "get" or action == "take":
if target!="":
getFunc(target)
else:
print("It's just a verb!, Try again!")
elif action == "i" or action == "inventory":
inventoryfunc()
elif action == "combine":
combFunc(final_command)
elif action == "help":
getHelp()
elif action == "open":
if target != '':
if len(final_command) == 3:
objectname = final_command[1]
openFunc(locationID,target,objetctname)
else:
openFunc(locationID, target)
else:
print("Try again")
elif action == "use":
if target != '':
useFunc(target, locationID)
elif action == "press":
pressFunc(locationID)
elif action == "show" or action == "look" or action == "examine":
if target != "":
showitemfunc(target)
else:
lookaroundfunc()
elif action == "n" or action == 's' or action == 'w' or action == 'e' or action == 'north' or action == 'south' or action == 'west' or action == 'east':
movefunc(action)
elif action == 'map':
getmap()
else:
if action == 'quit':
return
print("I dont understand this command")
def showitemfunc(target):
try:
# Get object descriptions from the same placeID that the player is in
cur.execute("SELECT objectID, description \
FROM object, player WHERE player.placeID = object.placeID AND object.name = '%s';" % (target))
item_desc = cur.fetchall()
# Get item names and descriptions from the objects
cur.execute("SELECT name, description FROM item WHERE objectID = '%i';" % item_desc[0][0])
items = cur.fetchall()
except IndexError:
desc = ''
# If error is raised check player inventory
for x in getInventory():
if x[0] == target:
desc = x[1]
if len(desc) > 1:
myprint(desc)
else:
print("This place doesn't contain this object")
else:
myprint(item_desc[0][1])
for i in items:
print("| " + i[0] + " |",end=" : ")
print(i[1])
def lookaroundfunc():
cur.execute("SELECT place.description, player.placeID FROM place, player WHERE player.placeID = place.placeID;")
rez = cur.fetchall()
myprint(rez[0][0])
cur.execute("SELECT name FROM object WHERE object.placeID = '%i';" % (rez[0][1]))
objects = cur.fetchall()
print("\nIn this place are:", end="\n")
for i in objects:
myprint(i[0])
print("\nInput 'show' and object, if you want to see it.")
def movefunc(dist):
cur.execute("SELECT placeID FROM player;")
player_placeid = cur.fetchall()[0][0]
try:
cur.execute("SELECT whereTo, objectID FROM movingtable WHERE placeID = '%i' AND \
direction = '%s';" % (player_placeid, dist))
rez = cur.fetchall()
whereTo = rez[0][0]
objectID = rez[0][1]
except IndexError:
print("you can't go there!")
else:
if objectID != None:
cur.execute("SELECT locked, name FROM object WHERE ObjectID = '%i';" % objectID)
rez = cur.fetchall()
locked = rez[0][0]
name = rez[0][1]
if locked == 1:
print("You can't go there. A " + name + " is blocking the way")
return
cur.execute("UPDATE player SET placeID = %i WHERE player.playerID = 1" % (whereTo))
cur.execute("SELECT name FROM place WHERE placeID = '%i'" % (whereTo))
new_place_name = cur.fetchall()[0][0]
print("Your location is: " + new_place_name)
def inventoryfunc():
cur.execute("SELECT name FROM item WHERE playerID = 1;")
all_items = cur.fetchall()
print("You are carrying: ", end=" ")
for i in all_items:
print(" | " + i[0] + " | ", end=" ")
print()
def getInventory():
cur.execute("SELECT item.name, item.description \
FROM item INNER JOIN player \
ON item.playerID = player.playerID")
carrying = cur.fetchall()
return carrying
def getAction(Id, Req): # For getting actions, Req 0 = Object, Req 1 = Item
Type = '' # Id is the id of object or item
if Req == 0:
Type = 'Object'
elif Req == 1:
Type = 'Item'
sql = "SELECT actiontable.description FROM actiontable \
JOIN %s \
WHERE actiontable.actionID = %s.actionID \
AND %s.%sID = %i " % (Type, Type, Type, Type, Id)
try:
cur.execute(sql)
result = cur.fetchall()
return result[[0][0]]
except IndexError:
return None
def getFunc(target):
cur.execute("SELECT placeID FROM player;")
player_placeid = cur.fetchall()
sql = "SELECT item.name FROM item, object \
WHERE item.objectID = object.objectID AND object.placeID = '%i'" % (player_placeid[0][0])
cur.execute(sql)
item_list = cur.fetchall()
ilist = [] #new list for items
for i in item_list:
ilist.append(*i) #append all items to list
if target in ilist:
cur.execute("SELECT itemID FROM item WHERE name = '%s';" % (target))
target_item_id = cur.fetchall()[0][0]
cur.execute("SELECT playerID FROM item WHERE name = '%s';" % (target))
player_id = cur.fetchall()[0][0]
action = getAction(target_item_id, 1)
if action != None:
if 'UPDATE' in action[0]:
cur.execute(action[0])
else:
print(action[0])
if player_id != 1:
cur.execute("UPDATE item SET playerID = 1 WHERE itemID = '%i'" % (target_item_id))
cur.execute("UPDATE item SET objectID = NULL WHERE itemID = '%i'" % (target_item_id))
print("You pick up: " + target)
else:
print("This item is already in your inventory")
else:
print("This item is not found")
def getLocName():
sql = "SELECT place.name \
FROM place join player \
WHERE place.placeID = player.placeID"
cur.execute(sql)
result = cur.fetchall()
return(result[0][0])
def getLocID():
sql = "SELECT player.placeID \
FROM player"
cur.execute(sql)
result = cur.fetchall()
return(result[0][0])
def openFunc(loc, request, *objectname):
multiple = 'There is multiple objects:' # String for multiple objects
y = 0 # Used for counting
objectID = None # object.objectID
objectType = getObjectType(request, loc) # objecttype.typeID
action = None # actiontable.actionID
if objectType != None: # if objectType has been initialized get results
sql = "SELECT object.objectID, object.name, objecttype.typename, object.actionID, object.usable \
FROM objecttype join object \
WHERE object.typeID = objecttype.typeID and object.placeID = %i and objecttype.typeID = %i" % (loc, objectType)
cur.execute(sql)
result = cur.fetchall()
else:
print("You can't do that!")
return
if objectname != (): # If objectname is given
objectname = ''.join(objectname) # Make it into string
for x in result:
if objectname == x[1] and x[4] == 1: # If objectname matches and
objectID = x[0] # is usable -> Set objectID.
action = getAction(objectID, 0) # Get actions
if objectID == None: #If no objectID was stored
print("You can't do that!")
elif action == None and objectID != None: #If objectID was found and no action
print("Jack opens the " + objectname + ' ' + request)
cur.execute("UPDATE object SET locked = 0 WHERE object.objectID = %i;" % objectID)
return
elif len(result) > 1: # If there is multiple objects
for x in result:
y += 1 # start the counter
multiple += ' ' + x[1] + ' ' + x[2] # Add to the string
if y != len(result): # While y is smaller than lenght of result
multiple += ',' # add a comma
print(multiple) # print the string
return
elif len(result) == 1 : # If there is only one result
if result[0][4] == 1: # If it's usable
action = getAction(result[0][0], 0) # Get actions
objectname = result[0][2] # Set objectname
objectID = result[0][0]
else:
print("The "+ result[0][1] + " is not usable")
if action != None: # If there is actions
print(action[0])
elif action == None and request == objectname:
print("Jack opens the " + objectname)
cur.execute("UPDATE object SET locked = 0 WHERE object.objectID = %i;" % objectID)
return
def getObjectType(request, loc): # Getting the objecttype.typeID
typename = request
sql = "SELECT objecttype.typename, object.typeID \
FROM objecttype join object \
WHERE objecttype.typename = '%s' and objecttype.typeID = object.typeID and object.placeID = %i" % (typename, loc)
cur.execute(sql)
result = cur.fetchall()
for x in result:
if typename == x[0]:
return x[1]
def useFunc(target, locID):
try:
cur.execute("SELECT object.name, object.actionID, object.objectID FROM object WHERE name = '%s' and placeID = '%i' " % (target, locID))
result = cur.fetchall()
if result[0][1] != None:
myprint(getAction(result[0][2], 0)[0])
else:
print("You can't use that!")
except:
IndexError
print("You can't use that!")
# Ilmansuunnat Minne pelaaja voi mennä olemastaan ruudusta
def moving():
sql = "SELECT movingTable.direction \
FROM player, place, movingTable \
WHERE player.placeID = place.placeID \
AND place.placeID = movingTable.placeID"
cur.execute(sql)
move = cur.fetchall()
movements = str(move)
return movements
def mapbase():
base = list(" _______\n|\t|\n|\t|\n|_______|")
return base
# Kartta siitä ruudusta missä pelaaja on
def getmap():
base = mapbase()
movements = moving()
for x in movements:
if x == "n":
base[4] = "n"
elif x == "w":
base[13] = "w"
elif x == "e":
base[15] = "e"
elif x == "s":
base[21] = "s"
for x in base:
print(x, end='')
print('\n')
def storyMode(index): # Used for storytelling. Activated by pressing by buttons.
if index == 1:
ask = input("Are you sure you want to advance to the next area ? (Y/N) ")
if ask == 'yes' or ask == 'Y' or ask == 'y':
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 995 AND 998")
result = cur.fetchall()
if wait == 0:
myprint(result[0][0])
while wait == 0:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint(result[1][0])
while wait == 1:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint(result[2][0])
while wait == 2:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint(result[3][0])
cur.execute("SELECT planet.name, planet.description FROM planet WHERE planet.planetID BETWEEN 21 and 26")
result = cur.fetchall()
y = 1
print("SYSTEM: " + '{:>2}'.format('PROTEUS'))
for x in result:
print(str(y) + '{:>25}'.format(x[0]))
y += 1
while True:
try:
command = int(input("\nORBITAL BODIES DISCOVERED! EMERCENCY LANDING POSSIBLE! CHOOSE A PLANET TO LAND: "))
if command > 6 or command < 1:
continue
except ValueError:
continue
break
myprint(result[command - 1][1])
if command == 4 or command == 6:
print("GAME OVER LOSER")
gameOver(result[command -1][0])
else:
cur.execute("UPDATE player SET placeID = 24 WHERE playerID = 1" )
myprint('\nJack crashes to ' + result[command - 1][0] + ' and barely makes it alive.\n\nThe poorly fitted space ship is badly damaged and Jack has to repair his engine before advancing his journey.')
elif ask == 'n' or ask == 'N' or ask == 'no':
print("press again when you are ready")
return
else:
return
elif index == 2:
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 899 AND 993")
result = cur.fetchall()
inventory = getInventory()
license = False
for x in inventory:
if x[0] == 'drivers-license':
license = True
if license == False:
print("Please pick up the required drivers-license first")
return
myprint('\n' + result[1][0] + '\n')
while wait == 0:
command = input("> ")
myprint(result[0][0])
wait += 1
myprint('\n' + result[2][0] + '\n')
while wait == 1:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint('\n' + result[3][0] + '\n')
print("")
myprint('\n' + result[4][0] + '\n')
print("")
cur.execute("UPDATE player SET placeID = 2;")
target = "starship-key-card"
getFunc(target)
return
elif index == 3:
cur.execute("SELECT itemID FROM item WHERE playerID = 1;")
all_items = cur.fetchall()
r=0
for x in all_items:
if 41 in x or 42 in x or 43 in x or 44 in x:
r+=1
if r != 4:
print("You dont have all the ingredients to make the pizza.")
return
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 1040 AND 1041")
result = cur.fetchall()
if wait == 0:
myprint('\n' + result[0][0] + '\n')
while wait == 0:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
else:
print("Probably best if I wait for the pizza to be ready.")
print('\n' + result[1][0] + '\n')
cur.execute("UPDATE item SET objectID = 46 WHERE itemID = 46")
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 41")
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 42")
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 43")
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 44")
target = "pizza"
getFunc(target)
elif index == 6:
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 1042 AND 1044")
result = cur.fetchall()
if wait == 0:
myprint('\n' + result[0][0] + '\n')
while wait == 0:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint('\n' + result[1][0] + '\n')
while wait == 1:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint('\n' + result[2][0] + '\n')
cur.execute("UPDATE item SET objectID = 41 WHERE itemID = 411;")
target = "secret-note"
getFunc(target)
cur.execute("UPDATE player SET placeID = 43")
cur.execute("UPDATE movingtable SET placeID = NULL WHERE moveID = 429")
else:
myprint(result[2][0])
gameOver("Proteus")
elif index == 4:
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 895 AND 897")
result = cur.fetchall()
selection = ''
myprint(result[0][0])
while len(selection) < 3:
number = input("> ")
print("*beep*")
selection += number
if selection == '123':
myprint(result[1][0])
cur.execute("UPDATE object SET placeID = 27 WHERE objectID = 236")
else:
myprint(result[2][0])
gameOver("Proteus")
elif index == 5:
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 880 AND 883")
result = cur.fetchall()
while wait == 0:
myprint(result[0][0])
command = input(">")
if command == 'wait' or command == 'WAIT':
wait += 1
myprint(result[1][0])
myprint(result[2][0])
myprint(result[3][0])
while wait == 1:
command = input(">")
if command == 'wait' or command == 'WAIT':
wait += 1
else:
print("Mayby you should wait for the creatures to leave the shop!")
myprint("Once the creatures leave you push the sewer cover aside and a path opens which you can use to enter the shop.")
cur.execute("UPDATE movingTable SET placeID = 26 WHERE moveID = 29")
elif index == 9:
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 884 AND 889")
result = cur.fetchall()
ask = input("Are you sure you want to advance to the next area ? (Y/N) ")
if ask == 'yes' or ask == 'Y' or ask == 'y':
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 288")
myprint(result[0][0])
myprint(result[1][0])
while wait == 0:
command = input("> ").split()
if 'hit' in command:
wait += 1
else:
print("Please hit the dashboard.")
myprint(result[2][0])
myprint(result[3][0])
while wait == 1:
command = input("> ").split()
if 'sleep' or 'rest' or 'wait' in command:
wait += 1
else:
print("You need to rest now!")
print("...\n...\n...\nzzz\nzZz\n...\nzZZ\n..")
myprint(result[4][0])
myprint(result[5][0])
print("\n\nJack arrives at HIP-17710 refueling station.")
cur.execute("UPDATE player SET placeID = 31 WHERE playerID = 1")
elif index == 10:
cur.execute("SELECT planet.name FROM planet WHERE planet.planetID BETWEEN 31 and 34")
result = cur.fetchall()
y = 1
print("::Public transportation system::")
for x in result:
print(str(y) + '{:>25}'.format(x[0]))
y += 1
while True:
try:
selection = int(input("\nWelcome to the public transportation system. Choose a planet to travel to: "))
if selection > 1 or selection < 4:
break
except ValueError:
print("Please use values between 1 and 4.")
continue
break
if selection == 1:
print("\nJack travels to %s" % result[selection - 1][0])
cur.execute("UPDATE player SET placeID = 32 WHERE playerID = 1")
elif selection == 2:
print("\nJack travels to %s" % result[selection - 1][0])
cur.execute("UPDATE player SET placeID = 33 WHERE playerID = 1")
elif selection == 3:
print("\nJack travels to %s" % result[selection - 1][0])
cur.execute("UPDATE player SET placeID = 34 WHERE playerID = 1")
elif selection == 4:
print("\nJack travels to %s" % result[selection - 1][0])
cur.execute("UPDATE player SET placeID = 31 WHERE playerID = 1")
elif index == 11:
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 770 AND 772")
result = cur.fetchall()
myprint(result[0][0])
myprint(result[1][0])
myprint(result[2][0])
count = 0
while True:
question = 'The stranger asks you a question: '
if count == 0:
answer = input(question + '1 + 1 = ? ')
if answer == '2':
print("Correct!")
count += 1
else:
print("That's not quite right..")
break
if count == 1:
answer = input(question + 'What earth animal has a long neck? ')
if answer == 'giraffe':
print("Correct!")
count += 1
else:
print("That's not quite right..")
break
if count == 2:
answer = input(question + 'The sum of all the natural numbers? ')
if answer == '-1/12':
print("Correct!")
target = "glowing-vial"
cur.execute("UPDATE item SET objectID = 310 WHERE itemID = 37")
getFunc(target)
else:
print("That's not quite right..")
break
break
elif index == 20:
wait = 0
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 311")
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 777 AND 778")
result = cur.fetchall()
myprint(result[0][0])
myprint(result[1][0])
while wait == 0:
command = input("> ")
if command == 'wait':
wait += 1
else:
print("You should wait for the hyperdrive travel to complete")
print("\nJack arrives to Cernobog")
cur.execute("UPDATE player SET placeID = 42 WHERE playerID = 1")
elif index == 12:
cur.execute("SELECT itemID FROM item WHERE playerID = 1;")
items = cur.fetchall()
r=0
for x in items:
if 49 in x or 411 in x:
r+=1
if r == 2:
cur.execute("UPDATE player SET placeID = 47;")
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID = 1045")
result = cur.fetchall()
myprint(result[0][0])
elif r == 1:
print("I still need to make the poison pizza.")
elif index == 13:
wait = 0
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 1046 AND 1047")
result = cur.fetchall()
if wait == 0:
myprint('\n' + result[0][0] + '\n')
while wait == 0:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
else:
print("You should probably wait for a while before doing in.")
print('\n' + result[1][0] + '\n')
cur.execute("UPDATE object SET locked = 0 WHERE objectID = 47")
elif index == 14:
wait = 0
cur.execute("UPDATE player SET placeID = 1000;")
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 1048 AND 1051")
result = cur.fetchall()
if wait == 0:
myprint('\n' + result[0][0] + '\n')
while wait == 0:
command = input("> ")
if command == 'wait' or command == 'WAIT':
wait += 1
else:
print("Its probably best to just wait.")
myprint('\n' + result[1][0] + '\n')
while wait == 1:
command = input("> ")
if command == 'y' or command == 'Y' or command == 'yes' or command == 'YES':
wait += 1
elif command == 'n' or command == 'N' or command == 'no' or command == 'NO':
wait += 2
else:
myprint("YES or NO that should not be so hard, after what you have been trough... Or was it all just pure luck?")
if wait == 2:
myprint('\n' + result[2][0] + '\n')
print('You won the game')
elif wait == 3:
myprint('\n' + result[3][0] + '\n')
print('You won the game')
elif index == 15:
cur.execute("SELECT actiontable.description FROM actiontable WHERE actionID BETWEEN 765 AND 768")
result = cur.fetchall()
inventory = getInventory()
hasItem = False
myprint(result[0][0])
for item in inventory:
if item[0] == 'domestic-pet':
hasItem = True
if hasItem:
myprint(result[2][0])
myprint(result[3][0])
getFunc('quantum-flux')
cur.execute("UPDATE item SET playerID = NULL WHERE itemID = 31")
else:
myprint(result[1][0])
def pressFunc(locationID):
def travel():
if locationID == 3:
storyMode(1)
elif locationID == 1:
storyMode(2)
elif locationID == 415:
storyMode(6)
elif locationID == 27:
storyMode(4)
elif locationID == 26:
storyMode(5)
elif locationID == 45:
storyMode(3)
elif locationID == 28:
storyMode(9)
elif locationID == 31 or locationID == 32 or locationID == 33 or locationID == 34:
storyMode(10)
elif locationID == 39:
storyMode(11)
elif locationID == 311:
storyMode(20)
elif locationID == 416:
storyMode(12)
elif locationID == 48:
storyMode(13)
elif locationID == 417:
storyMode(14)
sys.exit()
elif locationID == 310:
storyMode(15)
cur.execute("SELECT object.usable FROM object join objecttype WHERE object.placeID = %i \
and objecttype.typename = 'button' and object.typeID = objecttype.typeID" % locationID)
result = cur.fetchall()
if len(result) > 0 and result[0][0] == 1:
travel()
else:
print("You can't press that yet!")
def getHelp():
print("Use these commands to interact with the game:\n\n \
Look or show: To look around or to examine objects.\n \
N, S, W, E or north, south, west, east: To move around the game world.\n \
Use: To use an object.\n \
Press: To press a button.\n \
Wait: To wait when you are told to.\n \
Map: To print out a map, for navigation purposes.\n \
Open: To open doors and various objects.\n \
Inventory or i: To examine your inventory.\n \
Get or take: To pick up an item.\n \
Combine: To combine two items together.\n \
Quit: To quit the game. You wouldn't want to do that, would you?")
def gameOver(location):
print("Jack dies a horrible death on " + location + '\n\n\n\n')
sys.exit()
def combFunc(final_command):
if len(final_command) >= 3:
item_one = final_command[1].lower()
item_two = final_command[len(final_command)-1].lower()
count = 0
cur = db.cursor()
sql = "SELECT name FROM item WHERE playerID = 1 and groupID > 0"
cur.execute(sql)
itemrez = cur.fetchall()
for i in itemrez:
if i[0] == item_one or i[0] == item_two:
count = count + 1
if count == 2:
cur = db.cursor()
sql = "SELECT groupID FROM item WHERE name = '%s' or name = '%s'" % (item_one, item_two)
cur.execute(sql)
result = cur.fetchall()
if result[0][0] == result[1][0]:
cur = db.cursor()
sql = "UPDATE item SET playerID = NULL WHERE groupID = '%i'" % (result[0][0])
cur.execute(sql)
cur = db.cursor()
sql = "SELECT resultID FROM itemGroup WHERE groupID = '%i'" % (result[0][0])
cur.execute(sql)
groupid = cur.fetchall()
action = getAction(groupid[0][0], 1)
if action != None:
cur.execute(action[0])
cur = db.cursor()
sql = "UPDATE item SET playerID = 1 WHERE itemID = '%i'" % (groupid[0][0])
cur.execute(sql)
cur.execute("SELECT name FROM item WHERE itemID = '%i'" % (groupid[0][0]))
result = cur.fetchall()
myprint("Jack combines the two items and gets a " + result[0][0])
else:
print("You can't do it! Try other items!")
else:
print("You dont have this item")
else:
print("What you want to combine? ")
def myprint(text):
max_length = 70
arr = text.split()
count = 0
for i in arr:
if count + len(i) <= max_length:
if count > 0:
print(" ", end='')
count = count + 1
print(i, end='')
else:
print("")
count = 0
print(i, end='')
count = count + len(i)
print("")
main()
|
from __future__ import print_function
import unittest
"""
Test the example deployer
"""
class TestDeployer(unittest.TestCase):
ENV = {}
ARGS = {}
METADATA = {}
def test_exit_0(self):
"""
Test the deployer exits 0
"""
|
from settings import settings
from office365.runtime.auth.user_credential import UserCredential
from office365.sharepoint.client_context import ClientContext
credentials = UserCredential(settings['user_credentials']['username'],
settings['user_credentials']['password'])
ctx = ClientContext(settings['url']).with_credentials(credentials)
target_folder = "/Shared Documents/Archive/2020/Sept"
target_folder = ctx.web.ensure_folder_path(target_folder).execute_query()
print(target_folder.serverRelativeUrl)
|
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.db import transaction
from .models import User
class VisitorSignUpForm(UserCreationForm):
username = forms.CharField(required=True)
email = forms.EmailField(required=True)
class Meta(UserCreationForm.Meta):
model = User
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.user_type = 1
user.username = self.cleaned_data.get('username')
user.email = self.cleaned_data.get('email')
user.save()
visitor = User.objects.create(user=user)
visitor.save()
return user
class StaffSignUpForm(UserCreationForm):
username = forms.CharField(required=True)
email = forms.EmailField(required=True)
User_Type_Choices =(
(2,'staff'),
(3, 'curator'),
(4,'admin'),
)
staff_type = forms.MultipleChoiceField(choices=User_Type_Choices)
class Meta(UserCreationForm.Meta):
model = User
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.username = self.cleaned_data.get('username')
user.email = self.cleaned_data.get('email')
user.user_type = int(self.cleaned_data.get('staff_type')[0])
user.save()
if user.user_type == 2:
staff = User.objects.create(user=user)
elif user.user_type == 3:
staff = User.objects.create(user=user)
elif user.user_type == 4:
staff = User.objects.create(user=user)
staff.save()
return user
|
import numpy as np
# principal component analysis
from hylite.filter.mnf import plotMNF
from hylite import HyData
def PCA(hydata, output_bands=20, band_range=None, step=5):
"""
Apply a PCA dimensionality reduction to the hyperspectral dataset using singular vector decomposition (SVD).
*Arguments*:
- data = the dataset (HyData object) to apply PCA to.
- output_bands = number of bands to return (i.e. how many dimensions to retain). Default is 20.
- band_range = the spectral range to perform the PCA over. If (int,int) is passed then the values are treated as
min/max band IDs, if (float,float) is passed then values are treated as wavelenghts (in nm). If None is
passed (default) then the PCA is computed using all bands. Note that wavelengths can only be passed
if image is a hyImage object.
- step = subsample the dataset during SVD for performance reason. step = 1 will include all pixels in the calculation,
step = n includes every nth pixel only. Default is 5 (as most images contain more than enough pixels to
accurately estimate variance etc.).
*Returns*:
- bands = Bands transformed into PCA space, ordered from highest to lowest variance.
- factors = the factors (vector) each band is multiplied with to give the corresponding PCA band.
- wav = a list of wavelengths the transform was applied to (handy for plotting), or None if wavelength info is not avaliable.
"""
# get numpy array
wav = None
decomp = False
if isinstance(hydata, HyData):
wav = hydata.get_wavelengths()
if hydata.is_int():
hydata.decompress() # MNF doesn't work very well with ints....
decomp = True # so we can compress again afterwards
data = hydata.data.copy()
else:
data = hydata.copy()
# get band range
if band_range is None: # default to all bands
minb = 0
maxb = data.shape[-1]
else:
if isinstance(band_range[0], int) and isinstance(band_range[1], int):
minb, maxb = band_range
else:
assert isinstance(hydata, HyData), "Error - no wavelength information found."
minb = hydata.get_band_index(band_range[0])
maxb = hydata.get_band_index(band_range[1])
# prepare feature vectors
X = data[..., :].reshape(-1, data.shape[-1])
# print(minb,maxb)
X = X[::step, minb:maxb] # subsample
X = X[np.isfinite(np.sum(X, axis=1)), :] # drop vectors containing nans
X = X[np.sum(X, axis=1) > 0, :] # drop vectors containing all zeros
# calculate mean and center
mean = np.mean(X, axis=0)
X = X - mean[None, :]
# calculate covariance
cov = np.dot(X.T, X) / (X.shape[0] - 1)
# and eigens (sorted from biggest to smallest)
eigval, eigvec = np.linalg.eig(cov)
idx = np.argsort(eigval)[::-1]
eigvec = eigvec[:, idx]
eigval = np.abs(eigval[idx])
# project data
data = data[..., minb:maxb] - mean
out = np.zeros_like(data)
for b in range(min(output_bands, data.shape[-1])):
out[..., b] = np.dot(data, eigvec[:, b])
# compute variance percentage of each eigenvalue
eigval /= np.sum(eigval) # sum to 1
# filter wavelengths for return
if not wav is None:
wav = wav[minb:maxb]
# compress?
if decomp:
hydata.compress()
# prepare output
outobj = hydata.copy(data=False)
outobj.header.drop_all_bands() # drop band specific attributes
outobj.data = out[..., 0:output_bands]
outobj.set_wavelengths(np.cumsum(eigval[0:output_bands])) # wavelengths are % of explained variance
outobj.push_to_header()
return outobj, eigvec.T, wav
def plotPCA(n, R, factors, wavelength, flip=False, **kwds):
"""
Utility function for plotting PCA components and their associated band weights calculate on images. Note that
this method is identical to plotMNF(...).
*Arguments*:
- n = the nth PCA component will be plotted
- R = array containing the PCA (as returned by PCA(...))
- factors = the list of principal compoenent weights, as returned by PCA(...)
- wavelengths = wavelengths corresponding to each band/factor used to calculate the minimum noise fractions,
as returned by PCA( ... ).
- flip = True if the sign of the principal components/weights should be flipped. Default is False.
*Keywords*:
- keywords are passed to plot.imshow(...).
*Returns*:
- fig, ax = the figure and list of associated axes.
"""
return plotMNF(n, R, factors, wavelength, flip, **kwds)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 19:49:18 2017
@author: ares
"""
import numpy as np
import helpers as hp
from scipy.stats import truncnorm
M = 40 # Size of Patch Array
timelength = 3000
stim = np.zeros([M,M,timelength])
for ind in range(timelength):
stim[:,:,ind] = hp.getSineWavePatternPatch(size=[M,M],mult=[1,1],orientation = 0.1*ind, sf = 4,
phase = 0,wave_type = 'sine',radius = [20,20],center = [20,20])
Stim = np.reshape(stim,[M**2,timelength])
# Set up network
eSize = M**2
ySize = 100
W = truncnorm.rvs(0.0,0.02,size = [eSize,ySize])
V = truncnorm.rvs(0,0.02,size = [ySize,eSize])
H = truncnorm.rvs(0,0.02,size = [ySize,ySize])
eta = 0.0001
y = np.zeros([1,ySize])
E = np.zeros([1, eSize])
Y = []
for t in range(2*timelength):
In = Stim[:,np.mod(t,timelength)]
In = (In - np.mean(In))/np.std(In)
E = np.tanh(In - np.dot(y,V))
y = np.tanh(np.dot(E,W) + np.dot(y,H))
Y.append(E)
W += eta*np.dot(E.T,y)
V += eta*np.dot(y.T,E)
H += eta*np.dot(y,y.T)
|
from sys import stdin
word = str(stdin.readline().strip())
if "ss" in word:
print("hiss")
else:
print("no hiss")
|
# from datetime import datetime, date
# d = datetime.now()
# print(d)
# print(d.year, d.month, d.day, d.hour, d.minute, d.second)
if True:
print("true")
else:
print("false")
total = 'aaa' + \
'bbbbb' + \
'cccc'
print(total)
str='123456789'
print(len(str))
print(str[0])
print(str[0:-1])
print(str[0:3])
print(str[0:5:2])
print(str * 2)
print(str + "a")
print('abc\nd')
print(r'abc\nd')
pas = input("\n\n")
print(pas)
|
import enchant
dipper = "abcdefghijklmnopqrstuvwxyz"
d = enchant.Dict("en_US")
decrypted_msg = []
score = []
def get_keys(message):
for key in range(0, 27):
mabel = decrypt2(key)
zipping(message, mabel)
def encrypt(Message,key):
Encrypted_Message = ""
Message = Message.lower()
for i in range(len(Message)):
character = Message[i]
Encrypted_Message += chr((ord(character) + key - 97) % 26 + 97)
return Encrypted_Message
def decrypt1(Encrypted_Message,key):
Decrypted_Message = ""
for i in range(len(Encrypted_Message)):
character = Encrypted_Message[i]
Decrypted_Message += chr(122 - (122 - (ord(character) - key)) % 26)
return Decrypted_Message
def decrypt2(shift):
shifted_alphabet_lines = ""
shift %= 26
alphabet = "abcdefghijklmnopqrstuvwxyz"
for i in range(len(alphabet)):
character = alphabet[i]
shifted_alphabet_lines += chr(122 - (122 - (ord(character) - shift)) % 26)
return shifted_alphabet_lines
def zipping(message, mabel):
word = message.translate({ord(x): y for (x, y) in zip(dipper, mabel)})
decrypted_msg.append(word)
score_check(word)
def score_check(line):
points = 0
words = line.split()
for word in words:
if d.check(word):
points += 1
else:
pass
score.append(points)
def show_final_guess(input_word):
total_words = len(input_word.split())
for index in range(0,len(decrypted_msg)):
probability = score[index]/total_words * 100
if probability == 100 :
print("Decrypted Message : " + decrypted_msg[index])
if __name__ == "__main__":
print('<---Please select one of the options given below--->\n')
Value = int(input('1 : Encryption\n2 : Decryption\n-->'))
if (Value == 1):
Message = input("Please Enter Your MESSAGE (Plain Text) : ")
key = int(input('Please Enter the desired SHIFT KEY : '))
print("Encrypted Message : ", encrypt(Message, key))
elif (Value == 2):
print('<---Please select one of the options given below--->\n')
Value = int(input("1 : If you know the key\n2 : If you don't know the key\n-->"))
if Value == 1:
z = []
Message = input("Please Enter Your MESSAGE (Cipher Text) : ")
Message = Message.lower()
Message = Message.split()
key = int(input('Please Enter the desired SHIFT KEY : '))
for m in Message:
x = decrypt1(m, key)
z.append(x)
l = " ".join(z)
print("Decrypted Message : ", l)
elif Value == 2:
Message = input("Please Enter Your MESSAGE (Cipher Text) : ")
Message = Message.lower()
get_keys(Message)
show_final_guess(Message)
else:
print('Please Select the valid Option')
else:
print('Please Select the Valid Option')
|
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam
from sklearn import datasets
from sklearn import preprocessing
'''
1. trees.csv를 읽어들여서 아래에 대해
Volume을 예측해 보세요.(텐서, 케라스)
Girth 8.8, 10.5
Height 63, 72
'''
trees = np.loadtxt("../../../data/trees.csv", delimiter=",", skiprows=1, dtype=np.float32)
# 정규화
total_scale = preprocessing.MinMaxScaler()
trees = total_scale.fit_transform(trees)
# 스케일을 하게 되면 0값이 생성되게 되는데 이게 학습에 문제가 되지 않을까?
x_data = trees[:, :-1]
y_data = trees[:, -1:]
print(x_data)
print(y_data)
print(x_data.shape)
print(y_data.shape)
W = tf.Variable(tf.random_uniform([2, 1]))
b = tf.Variable(tf.random_uniform([1]))
X = tf.placeholder(dtype=tf.float32, shape=[None, 2])
Y = tf.constant(y_data, dtype=tf.float32)
hx = tf.matmul(X, W) + b
cost = tf.reduce_mean(tf.square(hx - Y))
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in np.arange(1000):
_cost, _train = sess.run([cost, train], feed_dict={X: x_data})
if not i % 100:
print(i, _cost)
# predict
# Girth 8.8, 10.5
# Height 63, 72
# --> [[8.8, 63], [10.5, 72]]
print("학습된 Weight : ", sess.run(W))
print("학습된 bias : ", sess.run(b))
# 입력데이터에 대한 스케일 조정
predict_data = np.float32(np.array([[8.8, 63], [10.5, 72]]))
# 새로운 스케일러가 필요한것일까? 기존에 있던것에 적용해서 변환하는 것이 맞겠지?
# predict_data_scale = preprocessing.MinMaxScaler()
# predict_data_scale.fit_transform(predict_data)
# scaled_predict_data = predict_data_scale.transform(predict_data)
scaled_predict_data = total_scale.fit_transform(predict_data)
predict_result = sess.run(hx, feed_dict={X: scaled_predict_data})
print("(Tensorflow) 스케일된 예측값 : ", predict_result)
# 원본데이터 라벨에 대해 스케일 조정 학습
trees = np.loadtxt("../../../data/trees.csv", delimiter=",", skiprows=1)
y_data = np.float32(trees[:, -1:])
label_data_scale = preprocessing.MinMaxScaler()
scaled_label_data = label_data_scale.fit_transform(y_data)
# 복원
inverse_predict_result = label_data_scale.inverse_transform(predict_result)
print("(Tensorflow) 복원된 예측값 : ", inverse_predict_result)
model = Sequential(Dense(units=1, input_shape=[2]))
model.compile(loss="mean_squared_error", optimizer=Adam(learning_rate=0.01))
history = model.fit(x_data, y_data, epochs=1000)
# print(history.history["loss"])
predict_result = model.predict(scaled_predict_data)
print("(Keras) 스케일된 예측값 : ", predict_result)
inverse_predict_result = label_data_scale.inverse_transform(predict_result)
print("(Keras) 복원된 예측값 : ", inverse_predict_result)
'''
2. volume이 40 이상이면 크다
30이상이면 보통 미만이면 적음으로
아래와 같이 출력하시요
volume 정도
============
10.3 적음
...
'''
trees = np.loadtxt("../../../data/trees.csv", delimiter=",", skiprows=1, dtype=np.float32)
volume = trees[:, -1]
big_index = np.where(volume >= 40)
medium_index = np.where((30 <= volume) & (volume < 40))
small_index = np.where(volume < 30)
'''
3. Height 가 가장 작은값과 큰값을
구하시요
4. girth(테두리) 가 가장큰 top5를
출력하세요(girth, height, volume)
'''
|
# -*- coding:utf8 -*-
from django.http import HttpResponse
def home(request):
return HttpResponse("ok")
|
import unittest
from katas.kyu_6.does_my_number_look_big_in_this import narcissistic
class NarcissisticTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(narcissistic(7))
def test_true_2(self):
self.assertTrue(narcissistic(371))
def test_false(self):
self.assertFalse(narcissistic(122))
def test_false_2(self):
self.assertFalse(narcissistic(4887))
|
arr=[5,3,4,8]
min_arr=arr[0] # we assume that [0] is min of arr
for _ in arr:
if min_arr >_:
min_arr=_
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# otra_app78.py
#
from Tkinter import * # Importamos el modulo Tkinter
def DrawList(): # Creamos una lista con algunos nombres
plist = ['Pablo','Renato','José Alonso']
for item in plist: # Insertamos los items en un Listbox
listbox.insert(END,item);
root = Tk() # Creamos una ventana de fondo
listbox = Listbox(root)
boton = Button(root,text = "Presionar",command = DrawList)
boton.pack()
listbox.pack() # Hacemos los pack() del boton y el Listbox
root.mainloop() # Entramos en el loop
|
# coding: utf-8
import re
from yabot.models.vote import VotePool
from yabot.models.member import Member
class VoteMonitor(object):
def process_text(self, send_from, text):
pass
def process_command(self, send_from, text):
match = re.match(ur'投票 (\d+)', text)
if not match:
return
member = Member(send_from)
vote_id = match.group(1)
vote = VotePool.get_valid_vote(vote_id)
if not vote:
return u'没有这个投票啊,你瞎打的吧..'
ret = vote.incr(member)
if ret:
l = [u'+1票\n']
else:
l = [u'并没有+1票\n']
l.append(u'投票者:')
for m in vote.members:
l.append(m.nickname)
if vote.is_success():
l.append(u'票数达到啦!' + vote.success_msg)
else:
l.append(u'现在就剩%s票啦!' % (vote.threshold - vote.cnt))
return '\n'.join(l)
|
from django.urls import path, include, re_path
from . import views
app_name = 'datawarehouse'
urlpatterns = [
path('', views.DatawarehouseView.as_view(), name='index'),
# path('review', views.DatawarehouseView.as_view(), name='index'),
path('mahasiswa/<int:id>', views.MahasiswaView.as_view(), name='mahasiswa'),
path('krs/<int:id>', views.KrsView.as_view(), name='krs'),
path('krs_detail/<int:id>', views.KrsDetailView.as_view(), name='krs_detail'),
]
|
# agents.py
# -----------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from captureAgents import CaptureAgent
from captureAgents import AgentFactory
import distanceCalculator
import random, time, util
from game import Directions
import keyboardAgents
import game
from util import nearestPoint
import math
#############
# FACTORIES #
#############
NUM_KEYBOARD_AGENTS = 0
class DeepAgentFactory(AgentFactory):
"Returns one keyboard agent and offensive reflex agents"
def __init__(self, isRed):
AgentFactory.__init__(self, isRed)
def getAgent(self, index):
return self.choose('deep', index)
def choose(self, agentStr, index):
if agentStr == 'keys':
global NUM_KEYBOARD_AGENTS
NUM_KEYBOARD_AGENTS += 1
if NUM_KEYBOARD_AGENTS == 1:
return keyboardAgents.KeyboardAgent(index)
elif NUM_KEYBOARD_AGENTS == 2:
return keyboardAgents.KeyboardAgent2(index)
else:
raise Exception('Max of two keyboard agents supported')
elif agentStr == 'deep':
return DeepAgent(index)
else:
raise Exception("No staff agent identified by " + agentStr)
##########
# Agents #
##########
class ReflexCaptureAgent(CaptureAgent):
"""
A base class for reflex agents that chooses score-maximizing actions
"""
def chooseAction(self, gameState):
"""
Picks among the actions with the highest Q(s,a).
"""
actions = gameState.getLegalActions(self.index)
# You can profile your evaluation time by uncommenting these lines
# start = time.time()
values = [self.evaluate(gameState, a) for a in actions]
# print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)
maxValue = max(values)
bestActions = [a for a, v in zip(actions, values) if v == maxValue]
action = random.choice(bestActions)
return action
def getSuccessor(self, gameState, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gameState.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if pos != nearestPoint(pos):
# Only half a grid position was covered
return successor.generateSuccessor(self.index, action)
else:
return successor
def evaluate(self, gameState, action):
"""
Computes a linear combination of features and feature weights
"""
self.doInference(gameState)
features = self.getFeatures(gameState, action)
weights = self.getWeights(gameState, action)
return features * weights
class DeepAgent(ReflexCaptureAgent):
# static variables
init = False
legalPositions = None
inferences = {}
def initialize(self, gameState):
if DeepAgent.legalPositions is None:
DeepAgent.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]
def doInference(self, gameState):
if not DeepAgent.init:
self.initialize(gameState)
for opp in self.getOpponents(gameState):
if opp not in DeepAgent.inferences:
DeepAgent.inferences[opp] = self.getUniformDistribution()
# do the inference
emissionModel = util.Counter()
# hardcoded uniform distribution over [-6, +6]
observation = gameState.getAgentDistances()[opp]
count = 1
for i in range(observation-12, observation+13):
emissionModel[i] = count
if i < observation:
count += 1
else:
count -= 1
emissionModel.normalize()
allPossible = util.Counter()
for lp in DeepAgent.legalPositions:
dist = util.manhattanDistance(lp, gameState.getAgentState(self.index).getPosition())
allPossible[lp] = emissionModel[dist] * DeepAgent.inferences[opp][lp]
newAllPossible = util.Counter()
for x,y in allPossible:
prob = allPossible[(x, y)]
moves = [(x, y+1), (x, y-1), (x-1, y), (x+1, y), (x, y)]
for m in moves:
if m in DeepAgent.legalPositions:
newAllPossible[m] += prob
newAllPossible.normalize()
if allPossible.totalCount() != 0:
DeepAgent.inferences[opp] = newAllPossible
else:
DeepAgent.inferences[opp] = self.getUniformDistribution()
def getUniformDistribution(self):
uniform = util.Counter()
for lp in DeepAgent.legalPositions:
uniform[lp] = 1.0 / len(DeepAgent.legalPositions)
return uniform
def getPosition(self, index, gameState):
"""
Only call this on enemy indices.
"""
if gameState.getAgentState(index).getPosition() is not None:
return gameState.getAgentState(index).getPosition()
else:
return DeepAgent.inferences[index].argMax()
def getFeatures(self, gameState, action):
succ = self.getSuccessor(gameState, action)
myPos = succ.getAgentState(self.index).getPosition()
width = succ.getWalls().width
height = succ.getWalls().height
xBorder = width / 2
squad = [s for s in self.getTeam(succ) if s != self.index]
homiePos = succ.getAgentState(squad[0]).getPosition()
ourFood = self.getFoodYouAreDefending(succ).asList()
OUR = self.getCapsulesYouAreDefending(succ)
ourOff = [oo for oo in self.getTeam(succ) if succ.getAgentState(oo).isPacman]
ourDef = [od for od in self.getTeam(succ) if not succ.getAgentState(od).isPacman]
theirFood = self.getFood(succ).asList()
THEIR = self.getCapsules(succ)
theirOff = [to for to in self.getOpponents(succ) if succ.getAgentState(to).isPacman]
theirDef = [td for td in self.getOpponents(succ) if not succ.getAgentState(td).isPacman]
### features
## offense
# len(theirFood)
# distToTheirFoodCenter
# minDistToFood
# minDistToCapsule
# minDistToGhost
# scaredTime (TODO)
# recentlyEaten (TODO)
# ourOff
# theirDef
## defense
# len(ourFood)
# ourDef
# theirOff
# minDistToInvader
# distToOurFoodBorderCenter
## general
# minDistToHomie
# score
# stop
# reverse
# bias
# ??? combine ourOffRatio and theirOffRatio ???
## offense
numTheirFood = len(theirFood)
minDistToCloserFood = width * height
for tf in theirFood:
minDistToCloserFood = min(minDistToCloserFood, self.getMazeDistance(myPos, tf))
eatFood = 0
if myPos in self.getFood(gameState).asList() or myPos in self.getCapsules(gameState):
eatFood = 1
minDistToCapsule = 0 if len(THEIR) == 0 else (width * height)
for tc in THEIR:
minDistToCapsule = min(minDistToCapsule, self.getMazeDistance(myPos, tc))
minDistToGhost = 0 if len(theirDef) == 0 else (width * height)
for td in theirDef:
minDistToGhost = min(minDistToGhost, self.getMazeDistance(myPos, self.getPosition(td, succ)))
if gameState.getAgentState(td).scaredTimer != 0:
minDistToGhost /= (-3.0)
tfc = [0, 0]
for tf in theirFood:
tfc = list(sum(c) for c in zip(tfc, tf))
tfc = list(c / len(theirFood) for c in tfc)
minDist = width + height
moreClosest = tfc
if moreClosest not in DeepAgent.legalPositions:
for lp in DeepAgent.legalPositions:
dist = util.manhattanDistance(tuple(tfc), lp)
if dist < minDist:
minDist = dist
moreClosest = lp
distToTheirFoodCenter = self.getMazeDistance(myPos, tuple(moreClosest))
## defense
numTheirOff = len(theirOff)
minDistToInvader = 0
if not succ.getAgentState(self.index).isPacman:
minDistToInvader = width * height
for to in theirOff:
minDistToInvader = min(minDistToInvader, self.getMazeDistance(myPos, self.getPosition(to, gameState)))
if minDistToInvader > 4:
minDistToInvader = width * height
## general
minDistToHomie = width * height
for s in squad:
dist = self.getMazeDistance(myPos, succ.getAgentState(s).getPosition())
minDistToHomie = min(dist, minDistToHomie)
features = util.Counter()
# offense
features['numTheirFood'] = numTheirFood
features['minDistToCloserFood'] = minDistToCloserFood
features['minDistToCapsule'] = minDistToCapsule
features['minDistToGhost'] = minDistToGhost
features['minDistToHomie'] = minDistToHomie # if gameState.getAgentState(self.index).isPacman else 0
features['distToTheirFoodCenter'] = distToTheirFoodCenter
features['eatFood'] = eatFood
# defense
features['numTheirOff'] = numTheirOff
features['minDistToInvader'] = minDistToInvader
return features
def getWeights(self, gameState, action):
return {
# offense
'numTheirFood' : -20,
'minDistToCloserFood' : -100,
'minDistToCapsule' : -50,
'minDistToGhost' : 40,
'minDistToHomie' : 50,
'distToTheirFoodCenter' : -20,
'eatFood' : 100000,
# defense
'numTheirOff' : -2000,
'minDistToInvader' : -150,
}
|
#-*-coding:utf-8-*-
#这是认证蓝本的登录表单
from flask_wtf import Form
from wtforms import StringField,PasswordField,BooleanField, SubmitField
from wtforms.validators import Required,Length,Email,Regexp,EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(Form):
email=StringField('邮箱',validators=[Required(),Length(1,64),Email()])
password=PasswordField('密码',validators=[Required()])
remember_me=BooleanField('记住我')
submit=SubmitField('登录')
class RegistrationForm(Form):
email=StringField('邮箱',validators=[Required(),Length(1,64),Email()])
username=StringField('昵称',validators=[Required(),Length(1,64),Regexp('^[^0-9].*$',0,'第一个字符不能是数字,那样太丑!')])
password=PasswordField('密码',validators=[Required(),EqualTo('password2',message='Passwords must math.')])
password2=PasswordField('确认密码',validators=[Required()])
submit=SubmitField('注册')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已经注册.')
def validate_username(self,field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已经注册.')
class ChangePasswordForm(Form):
old_password=StringField('旧密码',validators=[Required()])
password=StringField('新密码',validators=[Required(),Length(1,64),EqualTo('password2',message='Password must match.')])
password2=StringField('确认密码',validators=[Required(),Length(1,64)])
submit=SubmitField('更新')
#重设密码表单
class PasswordResetRequestForm(Form):
email=StringField('邮箱',validators=[Required(),Length(1,64),Email()])
submit=SubmitField('重设密码')
class PasswordResetForm(Form):
email=StringField('邮箱',validators=[Required(),Length(1,64),Email()])
password=PasswordField('密码',validators=[Required(),EqualTo('password2',message='Password must match')])
password2=PasswordField('确认密码',validators=[Required()])
submit=SubmitField('重设密码')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('邮箱地址不符')
#重设邮件地址
class EmailaddressResetForm(Form):
email=StringField('新邮箱',validators=[Required(),Length(1,64),Email()])
password=PasswordField('密码',validators=[Required()])
submit=SubmitField('重设邮箱')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱地址已存在')
#注销账户
class DeleteAccountForm(Form):
old_password=StringField('旧密码',validators=[Required()])
submit=SubmitField('确认注销')
class DeleteAccountAdminForm(Form):
old_password=StringField('旧密码',validators=[Required()])
submit=SubmitField('确认注销')
|
import random
x = random.randrange(1,7)
def roll_die(sides):
r = random.randrange(1, sides + 1)
return r
|
"""Policy daemon management command."""
import asyncio
from contextlib import suppress
import functools
import logging
import signal
from django.core.management.base import BaseCommand
from ... import core
logger = logging.getLogger("modoboa.policyd")
def ask_exit(signame, loop):
"""Stop event loop."""
loop.stop()
class Command(BaseCommand):
"""Management command for policy daemon."""
help = "Launch Modoboa policy daemon"
def add_arguments(self, parser):
"""Add command line arguments."""
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=9999)
parser.add_argument("--debug", action="store_true",
help="Enable debug mode")
def handle(self, *args, **options):
"""Entry point."""
loop = asyncio.get_event_loop()
coro = asyncio.start_server(
core.new_connection, options["host"], options["port"]
)
server = loop.run_until_complete(coro)
# Schedule reset task
core.start_reset_counters_coro()
for signame in {'SIGINT', 'SIGTERM'}:
loop.add_signal_handler(
getattr(signal, signame),
functools.partial(ask_exit, signame, loop)
)
logger.info("Serving on {}".format(server.sockets[0].getsockname()))
if options["debug"]:
loop.set_debug(True)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
loop.run_forever()
logger.info("Stopping policy daemon...")
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
# Cancel pending tasks
for task in asyncio.all_tasks(loop):
task.cancel()
# Await task to execute it's cancellation. Cancelled task
# raises asyncio.CancelledError that we can suppress
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
loop.close()
|
from datetime import timedelta
from .const import Timeframe
_MAX_DAYS_PER_TIMEFRAME = {
Timeframe.TICKS: 1,
Timeframe.MINUTES1: 365,
Timeframe.MINUTES5: 365 * 2,
Timeframe.MINUTES10: 365 * 3,
Timeframe.MINUTES15: 365 * 4,
Timeframe.MINUTES30: 365 * 5,
Timeframe.HOURLY: 365 * 6,
Timeframe.DAILY: 365 * 10,
Timeframe.WEEKLY: 365 * 30,
Timeframe.MONTHLY: 365 * 30,
}
def split_interval(start_date, end_date, timeframe):
if end_date < start_date:
raise ValueError('start_date must be >= end_date, but got {} and {}'
.format(start_date, end_date))
delta_days = (end_date - start_date).days + 1
max_days = _MAX_DAYS_PER_TIMEFRAME[timeframe]
chunks_count, remainder = divmod(delta_days, max_days)
if remainder != 0:
chunks_count += 1
if chunks_count <= 1:
return ((start_date, end_date),)
chunks = []
offset_start = timedelta(0)
offset_end = timedelta(max_days)
for chunk_i in range(chunks_count):
chunks.append((start_date + offset_start,
min(start_date + offset_end - timedelta(1), end_date)))
offset_start += timedelta(max_days)
offset_end += timedelta(max_days)
return tuple(chunks)
|
import numpy as np
from scipy.linalg import expm
import math
class Logistic:
def __init__(self, learning_rate=0.001, num=1000):
self.learning_rate = learning_rate
self.num_iterations = num
def fit(self, x, y):
arr = np.ones(x.shape[0])
x = np.append(arr.reshape(arr.shape[0],1), x, axis=1)
self.x = x
self.y = y
self.row = x.shape[0]
self.col = x.shape[1]
self.params = np.random.randn(x.shape[1])
self.gradient_descent_main()
# Hypothesis : h(x) = 1 / (1 + e ^ (- theta.T * x))
# Cost Function : J(theta) = - (np.sum(y * log(h(x)) + (1 - y) * log(1 - h(x)))) / m
def get_dim(self):
return self.row, self.col
def cost_function(self, y_pred):
first = np.matmul(self.y.reshape(self.y.shape[0],1), np.log(y_pred))
h1 = 1 - self.y
h2 = 1 - y_pred
second = np.matmul(h1, np.log(h2))
cost = first + second
return (- np.sum(cost) / self.row)
def gradient_descent_main(self):
for i in range(self.num_iterations):
error = np.array([])
for j in range(0, self.row):
mult = np.matmul(self.params, self.x[j])
sum_mult = np.sum(mult)
sum_mult = np.negative(sum_mult)
val = math.exp(sum_mult)
hypothesis = 1 / (1 + val)
error = np.append(error, hypothesis - self.y[j])
self.params -= ((self.learning_rate / self.row) * (self.x.T.dot(error)))
def predict(self, x_test):
hypothesis = np.array([])
arr = np.ones(x_test.shape[0])
x_test = np.append(arr.reshape(arr.shape[0],1), x_test, axis=1)
for i in range(0, len(x_test)):
mult = np.matmul(self.params, x_test[i])
sum_mult = np.sum(mult)
sum_mult = np.negative(sum_mult)
val = math.exp(sum_mult)
h = 1 / (1 + val)
hypothesis = np.append(hypothesis, h)
for k in range(0, len(hypothesis)):
if hypothesis[k] < 0.5:
hypothesis[k] = 0
else:
hypothesis[k] = 1
return hypothesis
def score(self, y_test, y_pred):
wrong_cnt = 0
right_cnt = 0
for i in range(len(y_test)):
if(y_test[i] == y_pred[i]):
right_cnt += 1
else:
wrong_cnt += 1
return (right_cnt/(right_cnt+wrong_cnt) * 100)
def get_params(self):
return self.params
|
import sigpy as sp
import numpy as np
import sigpy.mri as mr
import sigpy_e.nft as nft
def jsens_calib(ksp, coord, dcf, ishape, device = sp.Device(-1)):
img_s = nft.nufft_adj([ksp],[coord],[dcf],device = device,ishape = ishape,id_channel =True)
ksp = sp.fft(input=np.asarray(img_s[0]),axes=(1,2,3))
mps = mr.app.JsenseRecon(ksp,
mps_ker_width=12,
ksp_calib_width=32,
lamda=0,
device=device,
comm=sp.Communicator(),
max_iter=10,
max_inner_iter=10).run()
return mps
def FD(ishape, axes=None):
"""Linear operator that computes finite difference gradient.
Args:
ishape (tuple of ints): Input shape.
"""
I = sp.linop.Identity(ishape)
axes = sp.util._normalize_axes(axes, len(ishape))
ndim = len(ishape)
linops = []
for i in axes:
D = I - sp.linop.Circshift(ishape, [0] * i + [1] + [0] * (ndim - i - 1))
R = sp.linop.Reshape([1] + list(ishape), ishape)
linops.append(R * D)
G = sp.linop.Vstack(linops, axis=0)
return G
def TVt_prox(X, lamda, iter_max = 10):
scale = np.max(np.abs(X))
X = X/scale
TVt = FD(X.shape,axes=(0,))
X_b = X
Y = TVt*X
Y = Y/(np.abs(Y)+1e-9)*np.minimum(np.abs(Y)+1e-9,1)
for _ in range(iter_max):
X_b = X_b - ((X_b-X)+lamda*TVt.H*Y)
Y = Y + lamda*TVt*X_b
Y = Y/(np.abs(Y)+1e-9)*np.minimum(np.abs(Y)+1e-9,1)
X_b = X_b * scale
return X_b
|
import sys
sys.path.append('../500_common')
import lib_seq
a = "Chrome11"
b = "Profile 1"
path = "../504_kyoto01/data/result.html"
lib_seq.main(a, b, None, None, waitTime=10, preTime=20, check=False)
|
import sys
import os
sys.path.append(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
|
import graphlab
import numpy as np
def top_words(wiki, name):
row = wiki[wiki['name'] == name]
word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])
return word_count_table.sort('count', ascending=False)
def top_words_tf_idf(wiki, name):
row = wiki[wiki['name'] == name]
word_count_table = row[['tf_idf']].stack('tf_idf', new_column_name=['word','weight'])
return word_count_table.sort('weight', ascending=False)
# load wikipedia data
wiki = graphlab.SFrame('../../data/people_wiki.gl')
# preprocess word count structure
wiki['word_count'] = graphlab.text_analytics.count_words(wiki['text'])
# create nearest neighbors model using word counts
model = graphlab.nearest_neighbors.create(wiki, label='name', features=['word_count'], method='brute_force', distance='euclidean')
# query 10 Obama's nearest neighbors
model.query(wiki[wiki['name']=='Barack Obama'], label='name', k=10)
# get top words on Obama's and Barrio's articles
obama_words = top_words(wiki, 'Barack Obama')
barrio_words = top_words(wiki, 'Francisco Barrio')
# combine sets
combined_words = obama_words.join(barrio_words, on='word')
combined_words = combined_words.rename({'count':'Obama', 'count.1':'Barrio'})
# generate set of common words in Obama's and Barrio's articles
common_words = set(combined_words.sort('Obama', ascending=False)['word'][0:5])
# apply function to dataset to know how many registers contains top words
wiki['has_top_words'] = wiki['word_count'].apply(lambda word_count_vector: True if common_words.issubset(set(word_count_vector.keys())) else False)
(wiki['has_top_words'] == 1).sum()
# compute euclidean distance between Obama's, Bush's and Biden's pages
obama_page = wiki[wiki['name']=='Barack Obama'][0]
bush_page = wiki[wiki['name']=='George W. Bush'][0]
biden_page = wiki[wiki['name']=='Joe Biden'][0]
graphlab.distances.euclidean(obama_page['word_count'], bush_page['word_count'])
graphlab.distances.euclidean(obama_page['word_count'], biden_page['word_count'])
graphlab.distances.euclidean(bush_page['word_count'], biden_page['word_count'])
# compare Obama's article to Bush's article via top words
obama_words = top_words(wiki, 'Barack Obama')
bush_words = top_words(wiki, 'George W. Bush')
combined_words = obama_words.join(bush_words, on='word')
combined_words = combined_words.rename({'count':'Obama', 'count.1':'Bush'})
set(combined_words.sort('Obama', ascending=False)['word'][0:10])
# preprocess TF IDF structure
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['word_count'])
# create nearest neighbors model using TF IDF
model_tf_idf = graphlab.nearest_neighbors.create(wiki, label='name', features=['tf_idf'],
method='brute_force', distance='euclidean')
# query 10 nearest neighbors of Obama
model_tf_idf.query(wiki[wiki['name'] == 'Barack Obama'], label='name', k=10)
# get top words on Obama's and Schiliro's articles
obama_tf_idf = top_words_tf_idf(wiki, 'Barack Obama')
schiliro_tf_idf = top_words_tf_idf(wiki, 'Phil Schiliro')
combined_words = obama_tf_idf.join(schiliro_tf_idf, on='word')
combined_words = combined_words.rename({'weight':'Obama', 'weight.1':'Schiliro'})
combined_words.sort('Obama', ascending=False)
common_words = set(combined_words.sort('Obama', ascending=False)['word'][0:5])
# apply function to dataset to know how many registers contains top words using TF IDF
wiki['has_top_words'] = wiki['word_count'].apply(lambda word_count_vector: True if common_words.issubset(set(word_count_vector.keys())) else False)
(wiki['has_top_words'] == 1).sum()
# compute euclidean distance of Obama's and Biden's articles using TF IDF
obama_page = wiki[wiki['name']=='Barack Obama'][0]
biden_page = wiki[wiki['name']=='Joe Biden'][0]
graphlab.distances.euclidean(obama_page['tf_idf'], biden_page['tf_idf'])
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 02:39:50 2020
@author: Nishal Sundarraman
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
dataset=pd.read_csv('train.csv')
temp=dataset.iloc[:,:].values
from sklearn.impute import SimpleImputer
si=SimpleImputer(missing_values=np.nan,strategy='mean')
si.fit(temp[:,[5]])
temp[:,[5]]=si.transform(temp[:,[5]])
si1=SimpleImputer(missing_values=np.nan,strategy='most_frequent')
si1.fit(temp[:,[11]])
temp[:,[11]]=si1.transform(temp[:,[11]])
temp=pd.DataFrame(temp)
Y=dataset.iloc[:,[1]].values
X=temp.iloc[:,[2,4,5,6,7,11]].values
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
X[:,1]=le.fit_transform(X[:,1])
le1=LabelEncoder()
X[:,5]=le1.fit_transform(X[:,5])
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0)
from sklearn.decomposition import PCA
pca=PCA(n_components=None)
X_train=pca.fit_transform(X_train)
X_test=pca.transform(X_test)
explained_variance=pca.explained_variance_ratio_
"""
from sklearn.svm import SVC
classifier=SVC()
classifier.fit(X_train,Y_train)
"""
"""
from sklearn.neighbors import KNeighborsClassifier
classifier=KNeighborsClassifier(n_neighbors=100,metric='minkowski',p=2)
classifier.fit(X_train,Y_train)
"""
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
classifier=LogisticRegression(random_state=0)
classifier.fit(X_train,Y_train)
y_pred=classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm=confusion_matrix(Y_test,y_pred)
from sklearn.metrics import accuracy_score
score=accuracy_score(Y_test,y_pred)
|
#!/usr/bin/env python3
#
# A very basic DREAM Python example. This script generates a basic
# DREAM input file which can be passed to 'dreami'.
#
# Run as
#
# $ ./basic.py
#
# ###################################################################
import numpy as np
import sys
sys.path.append('../../py/')
from DREAM.DREAMSettings import DREAMSettings
import DREAM.Settings.Equations.IonSpecies as Ions
import DREAM.Settings.Solver as Solver
import DREAM.Settings.CollisionHandler as Collisions
ds = DREAMSettings()
times = [0]
radius = [0, 1]
# Set E_field
efield = 50*np.ones((len(times), len(radius)))
ds.eqsys.E_field.setPrescribedData(efield=efield, times=times, radius=radius)
# Set n_cold (prescribed; it is automatically calculated self-consistently otherwise)
#density = 1e20 * np.ones((len(times), len(radius)))
#ds.eqsys.n_cold.setPrescribedData(density=density, times=times, radius=radius)
# Set temperature
temperature = 10 * np.ones((len(times), len(radius)))
ds.eqsys.T_cold.setPrescribedData(temperature=temperature, times=times, radius=radius)
# Set ions
ds.eqsys.n_i.addIon(name='D', Z=1, iontype=Ions.IONS_PRESCRIBED_FULLY_IONIZED, n=1e20)
ds.eqsys.n_i.addIon(name='Ar', Z=18, iontype=Ions.IONS_PRESCRIBED_NEUTRAL, n=1e20)
# Hot-tail grid settings
pmax = 5
ds.hottailgrid.setNxi(30)
ds.hottailgrid.setNp(100)
ds.hottailgrid.setPmax(pmax)
#ds.hottailgrid.collfreq_mode = Collisions.COLLFREQ_MODE_SUPERTHERMAL
ds.hottailgrid.collfreq_mode = Collisions.COLLFREQ_MODE_FULL
#ds.hottailgrid.collfreq_type = Collisions.COLLFREQ_TYPE_NON_SCREENED
ds.hottailgrid.collfreq_type = Collisions.COLLFREQ_TYPE_PARTIALLY_SCREENED
# Set initial hot electron distribution function
"""
fhot_r = np.array([0])
fhot_p = np.linspace(0, pmax, 100)
fhot_xi = np.array([1])
nR, nP, nXi = fhot_r.size, fhot_p.size, fhot_xi.size
fhot = np.zeros((nR, nXi, nP))
for k in range(0, nR):
for j in range(0, nXi):
fhot[k,j,:] = (pmax - fhot_p) / pmax
ds.eqsys.f_hot.setInitialValue(init=fhot, r=fhot_r, p=fhot_p, xi=fhot_xi)
"""
# Set initial Maxwellian @ T = 1 keV, n = 5e19, uniform in radius
ds.eqsys.f_hot.setInitialProfiles(rn0=0, n0=5e19, rT0=0, T0=1000)
# Disable runaway grid
ds.runawaygrid.setEnabled(False)
# Set up radial grid
ds.radialgrid.setB0(5)
ds.radialgrid.setMinorRadius(0.22)
ds.radialgrid.setWallRadius(0.22)
ds.radialgrid.setNr(10)
# Use the linear solver
ds.solver.setType(Solver.LINEAR_IMPLICIT)
# Set time stepper
ds.timestep.setTmax(1.0e-6)
ds.timestep.setNt(4)
# Save settings to HDF5 file
ds.save('dream_settings.h5')
|
#!/usr/bin/env python3
"""Business Logic for training and preserving classification algorithms from skicit learn package.
Usage:
python3 words.py <URL>
"""
import pickle
import Data.data as dl
from Common import constants
from datetime import datetime
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
def get_standard_scaler(version=1):
"""Retrieves StandardScaler from scikit package. Scaler is first trained and then preserved in MongoDb.
Once preserved Scaler is retrieved from MongoDb on all following function calls.
Returns:
scaler: sklearn.preprocessing StandardScaler
"""
system = dl.read_dict_from_mongo("LC", "LC_SYSTEM", {"version": version})
if "scaler_preserved" in system and system["scaler_preserved"]:
print("--StandardScaller already preserved.")
return pickle.loads(system["scaler_model_bin"])
else:
print("--StandardScaller trained and preserved for the first time.")
#load train data
train_data, _ = get_lc_train_data()
#train and preserve scaler
scaler = StandardScaler().fit(train_data)
system["scaler_model_bin"] = pickle.dumps(scaler)
system["scaler_preserved"] = True
dl.update_mongo_collection(system, "LC", "LC_SYSTEM", {"version": version})
return scaler
def get_decisiontree_classifier(version=1):
"""Retrieves Decision Tree classifier from scikit package. Classifier is first trained and
then preserved on file system. Once preserved classifier is retrieved from file system on all following function calls.
Classifier is trained on training data first standardized with StandardScaler and then transformed with PCA(n_components=10)
Returns:
dt_clf: sklearn.tree DecisionTreeClassifier
"""
system = dl.read_dict_from_mongo("LC", "LC_SYSTEM", {"version": version})
if "dt_preserved" in system and system["dt_preserved"]:
print("--Decision Tree model already preserved.")
with open(system["dt_model_disk_path"], "rb") as model_file:
return pickle.load(model_file)
else:
print("--DecisionTree trained and preserved for the first time.")
#load train data
train_data, target_classes = get_lc_train_data()
#load scaler and transform data
scaler = get_standard_scaler(version=version)
standardized_train_data = scaler.transform(train_data)
#load PCA and transform data
pca = get_pca_scaler(n_components=10, version=version)
transformed_train_data = pca.transform(standardized_train_data)
#train and preserve Decision Tree
dt_clf = DecisionTreeClassifier()
dt_clf.fit(transformed_train_data, target_classes)
model_file_path = _get_custom_clf_model_file_path("dt_model_v{0}".format(version))
with open(model_file_path, "wb") as model_file:
pickle.dump(dt_clf, model_file)
system["dt_preserved"] = True
system["dt_model_disk_path"] = model_file_path
dl.update_mongo_collection(system, "LC", "LC_SYSTEM", {"version": version})
return dt_clf
def get_randomforest_classifier(n_trees=500, version=1):
"""Retrieves Random Forest classifier from scikit package. Classifier is first trained and
then preserved on file system. Once preserved classifier is retrieved from file system on all following function calls.
Classifier is trained on training data first standardized with StandardScaler and then transformed with PCA(n_components=50)
Args:
n_trees: Number of trees for Random Forest classifier.
Returns:
rf_clf: sklearn.ensemble import RandomForestClassifier
"""
system = dl.read_dict_from_mongo("LC", "LC_SYSTEM", {"version": version})
if "rf{0}_preserved".format(n_trees) in system and system["rf{0}_preserved".format(n_trees)]:
print("--RandomForest with {0} trees already preserved.".format(n_trees))
with open(system["rf{0}_model_disk_path".format(n_trees)], "rb") as model_file:
return pickle.load(model_file)
else:
print("--Random Forest with {0} trees trained and preserved for the first time.".format(n_trees))
#load train data
train_data, target_classes = get_lc_train_data()
#load scaler and standardize data
scaler = get_standard_scaler(version=version)
standardized_train_data = scaler.transform(train_data)
#load PCA and transform data
pca = get_pca_scaler(n_components=50, version=version)
transformed_train_data = pca.transform(standardized_train_data)
#train and preserve RandomForest
rf_clf = RandomForestClassifier(n_estimators=n_trees)
rf_clf.fit(transformed_train_data, target_classes)
model_file_path = _get_custom_clf_model_file_path("rf{0}_model_v{1}".format(n_trees, version))
with open(model_file_path, "wb") as model_file:
pickle.dump(rf_clf, model_file)
system["rf{0}_preserved".format(n_trees)] = True
system["rf{0}_model_disk_path".format(n_trees)] = model_file_path
dl.update_mongo_collection(system, "LC", "LC_SYSTEM", {"version": version})
return rf_clf
def get_kneighbors_classifier(version=1):
"""Retrieves KNeighbors classifier from scikit package. Classifier is first trained and
then preserved on file system. Once preserved classifier is retrieved from file system on all following function calls.
Classifier is trained on training data first standardized with StandardScaler and then transformed with PCA(n_components=50)
Args:
n_trees: Number of trees for Random Forest classifier.
Returns:
kn_clf: sklearn.neighbors KNeighborsClassifier
"""
system = dl.read_dict_from_mongo("LC", "LC_SYSTEM", {"version": version})
if "kn_preserved" in system and system["kn_preserved"]:
print("--KNeighbors already preserved.")
with open(system['kn_model_disk_path'], 'rb') as model_file:
return pickle.load(model_file)
else:
print("--KNeighbors trained and preserved for the first time.")
#load train data
train_data, target_classes = get_lc_train_data()
#load scaler and transform data
scaler = get_standard_scaler(version=version)
standardized_train_data = scaler.transform(train_data)
# load PCA and transform data
pca = get_pca_scaler(n_components=50, version=version)
transformed_train_data = pca.transform(standardized_train_data)
#train and preserve KNeighbors
kn_clf = KNeighborsClassifier()
kn_clf.fit(transformed_train_data, target_classes)
model_file_path = _get_custom_clf_model_file_path("kn_model_v{0}".format(version))
with open(model_file_path, "wb") as model_file:
pickle.dump(kn_clf, model_file)
system["kn_preserved"] = True
system["kn_model_disk_path"] = model_file_path
dl.update_mongo_collection(system, "LC", "LC_SYSTEM", {"version": version})
return kn_clf
def get_naivebayes_classifier(version=1):
"""Retrieves Naive Bayes classifier from scikit package. Classifier is first trained and
then preserved on file system. Once preserved classifier is retrieved from file system on all following function calls.
Classifier is trained on training data first standardized with StandardScaler and then transformed with PCA(n_components=20)
Returns:
nb_clf: sklearn.naive_bayes GaussianNB
"""
system = dl.read_dict_from_mongo("LC", "LC_SYSTEM", {"version": version})
if "nb_preserved" in system and system['nb_preserved']:
print("--Naive Bayes already preserved.")
with open(system['nb_model_disk_path'], 'rb') as model_file:
return pickle.load(model_file)
else:
print("--Naive Bayes trained and preserved for the first time.")
#load train data
train_data, target_classes = get_lc_train_data()
#load scaler and transform data
scaler = get_standard_scaler(version=version)
standardized_train_data = scaler.transform(train_data)
# load PCA and transform data
pca = get_pca_scaler(n_components=20, version=version)
transformed_train_data = pca.transform(standardized_train_data)
#train and preserve Naive Bayes
nb_clf = GaussianNB()
nb_clf.fit(transformed_train_data, target_classes)
model_file_path = _get_custom_clf_model_file_path("nb_model{0}".format(version))
with open(model_file_path, "wb") as model_file:
pickle.dump(nb_clf, model_file)
system["nb_preserved"] = True
system["nb_model_disk_path"] = model_file_path
dl.update_mongo_collection(system, "LC", "LC_SYSTEM", {"version": version})
return nb_clf
def get_pca_scaler(n_components, svd_solver="auto", version=1):
"""Retrieves PCA from scikit package. PCA is first trained and
then preserved on file system. Once preserved PCA is retrieved from file system on all following function calls.
PCA is trained on training data first standardized with StandardScaler.
Args:
n_components: Number of components to retrieve with PCA.
svd_solver: PCA svd_solver
Returns:
pca: sklearn.decomposition PCA
"""
system = dl.read_dict_from_mongo("LC", "LC_SYSTEM", {"version": version})
if str("pca{0}_preserved".format(n_components)) in system and system["pca{0}_preserved".format(n_components)]:
print("--PCA (n_components={0}) already preserved.".format(n_components))
with open(system["pca" + str(n_components) + "_model_disk_path"], "rb") as model_file:
return pickle.load(model_file)
else:
print("--PCA (n_components={0}) trained and preserved for the first time.".format(n_components))
# load train data
train_data, _ = get_lc_train_data()
# load scaler and transform data
scaler = get_standard_scaler(version=version)
standardized_train_data = scaler.transform(train_data)
# train and preserve PCA
pca = PCA(n_components=n_components, svd_solver=svd_solver)
pca.fit(standardized_train_data)
model_file_path = _get_custom_clf_model_file_path("pca{0}_model_v{1}".format(n_components, version))
with open(model_file_path, "wb") as model_file:
pickle.dump(pca, model_file)
system["pca{0}_preserved".format(n_components)] = True
system["pca{0}_model_disk_path".format(n_components)] = model_file_path
dl.update_mongo_collection(system, "LC", "LC_SYSTEM", {"version": version})
return pca
def get_lc_train_data():
"""Gets train data and target classes from MongoDb LC database.
Returns:
train_data: Pandas DataFrame with train data.
target_classes: List of target classes for retrieved train records.
"""
train_data = dl.read_pandas_from_mongo("LC", "LC_TRAIN_DATA")
target_classes = train_data["species"]
train_data = train_data.drop(["id", "species"], 1)
return train_data, target_classes
def mongo_collection_exists(db, collection_name, query={}):
"""Check if MongoDb collection exists. If query is specified check if there are any documents within given collection
that satisfy given query.
Returns:
bool: True if collection/document exists, False otherwise.
"""
return dl.mongo_collection_exists(db=db, collection=collection_name, query=query)
def persisted_models_not_valid(system_query={}):
"""Checks if state of persisted models is valid. If train data set is changed since models were trained
their state is not valid and they need to be trained again to reflect latest dataset.
Returns:
bool: True if models are not valid, False if models are valid.
"""
train_collection_count = dl.get_collection_size(db="LC", collection="LC_TRAIN_DATA")
system = dl.read_dict_from_mongo(db="LC", collection="LC_SYSTEM", query=system_query)
if "train_collection_count" in system:
return int(system["train_collection_count"]) != int(train_collection_count)
return False
def reset_system_collection(system_query, db="LC", collection="LC_SYSTEM"):
"""Deletes the first system collection that satisfies the system_query and then creates new one
with the same system_query and number of records in train set.
"""
print("--Removing document from LC_SYSTEM collection.")
dl.delete_dict_from_mongo(db=db, collection=collection, query=system_query)
document = system_query
document["train_collection_count"] = dl.get_collection_size(db=db, collection="LC_TRAIN_DATA")
print("--Adding updated document to LC_SYSTEM collection.")
dl.write_dict_to_mongo(db=db, collection=collection, data_dict=document)
def _get_custom_clf_model_file_path(filename):
"""Gets custom file path by adding %Y%m%d%H%M%S prefix to the filename.
Args:
filename
Returns:
str: FileName in format %Y%m%d%H%M%S_filename.pkl
"""
file_name = "".join([datetime.now().strftime("%Y%m%d%H%M%S"), "_", filename, ".pkl"])
return "".join([constants.TRAINED_MODELS_PATH, file_name])
|
#!/usr/bin/python3
# pihsm: Turn your Raspberry Pi into a Hardware Security Module
# Copyright (C) 2017 System76, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import pihsm
from pihsm.common import compute_digest, b32enc, log_response
from pihsm.ipc import ClientClient
log = pihsm.configure_logging(__name__)
# We'll likely add options eventually:
parser = argparse.ArgumentParser()
args = parser.parse_args()
# We need stdin, stdout opened in binary mode:
manifest = sys.stdin.buffer.read()
digest = compute_digest(manifest)
log.info('--> Manifest: %s (%d bytes)', b32enc(digest), len(manifest))
client = ClientClient()
response = client.make_request(digest)
assert len(response) == 400
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
log_response(response)
|
import os
def menu(account):
while(True):
print("<Main menu>")
print("1. View and Modify Profile")
print("2. View and Modify list of exercise")
print("3. Analyze my exercise record")
print("4. Set and View my exercise goal")
print("5. Submit today's exercise record")
print("6. To the start screen")
print("7. Exit Program\n")
sel = input("select menu : ")
os.system('cls')
if sel == '1':
while(True):
if viewAndModifyProfile(account):
break
elif sel == '2':
viewAndModifyListOfExercise(account)
elif sel == '3':
analyzeMyExerciseRecord(account)
elif sel == '4':
setAndViewMyExerciseGoal(account)
elif sel == '5':
subMitExerciseRecord(account)
elif sel == '6':
return False
elif sel == '7':
return True
else:
print("You must enter a natural number from 1 to 7.")
input()
os.system('cls')
continue
def viewAndModifyProfile(account):
os.system('cls')
account.view()
print("Select the number of the item you want to modify.")
print("(If multiple selections are made, please add a space between the number and the number).")
print("(q : Back to Menu)")
str = input("=> ")
if str == 'q' or str == 'Q':
os.system('cls')
return True
list = str.split(" ")
for i in list:
if len(i) != 1 or i < '1' or i > '6' or i == '2' or i == '4':
print("Digit 1, 3, 5, 6 allowed only")
input()
os.system('cls')
return False
list.sort()
os.system('cls')
account.revise(list)
os.system('cls')
def viewAndModifyListOfExercise(account):
ERR_MESSAGE = "An error occured. We'll load the previous page."
workOut = account.workOut
while True:
sel = workOut.view()
if sel == '1':
workOutIndex = workOut.getWorkOutSelection()
while True:
sel2, index = workOut.viewWorkOut(workOutIndex)
if sel2 == '1':
workOut.editWorkOut(index)
elif sel2 == '2':
workOut.deleteWorkOut(index)
break
elif sel2 == '3':
break
else:
print(ERR_MESSAGE)
os.system('cls')
break
elif sel == '2':
os.system('cls')
workOut.addWorkOut()
os.system('cls')
elif sel == '3':
return
else:
input(ERR_MESSAGE)
os.system('cls')
return
def analyzeMyExerciseRecord(account):
os.system('cls')
activity = account.activity
activity.analyze(account)
os.system('cls')
def setAndViewMyExerciseGoal(account):
if account.goal.isGoal():
account.goal.view()
else:
account.goal.setGoal(account)
def subMitExerciseRecord(account):
goal = account.goal
activity = account.activity
while True:
# back to main page if goal.txt doesn't exist
if not goal.isGoal():
os.system('cls')
print("You should set your goal first.")
input()
os.system('cls')
break
sel = activity.submit()
if sel == "1":
activity.submitWorkOutRecord(account)
elif sel == "2":
os.system('cls')
print("The date has been changed.")
print(f"Current Date: {account.currentDate}")
activity.tomorrow(account)
input()
os.system('cls')
break
elif sel == "3":
return
else:
os.system('cls')
input("An error occured. Loading main page...")
os.system('cls')
|
from turtle import Turtle
from math import pi
def polyline(t, n, length, angle):
""" Draws n segments line with the given length
and angle in degrees between them.
t: Turtle
n: segments in polyline
length: length of the segment
angle: angle (degrees) between segments
"""
for i in range(n):
t.fd(length)
t.lt(angle)
def polygon(t, n, length):
""" Draws n sides polygon with the given length
and angle in degrees between them.
t: Turtle
n: sides
length: length of the side
"""
angle = 360.0 / n
polyline(t, n, length, angle)
def arc(t, r, angle):
""" Draws arc with given radius - r and angle in degrees.
t: Turtle
r: radius of the arc
angle: angle (degrees) for arc
"""
arc_length = 2 * pi * r * angle / 360
n = int(arc_length / 3) + 1
step_length = arc_length / n
step_angle = float(angle) / n
polyline(t, n, step_length, step_angle)
def circle(t, r):
""" Draws circle with given radius - r.
t: Turtle
r: radius of the circle
"""
arc(t, r, 360)
def petal(t, r, angle):
"""
Draws a petal using two arcs.
t: Turtle
r: radius of the arc
angle: angle (degrees) for arc
"""
for i in range(2):
arc(t, r, angle)
t.lt(180-angle)
def flower(t, n, length):
pass
bob = Turtle()
petal(bob, 10, 120)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from astree.StatementSequence import StatementSequence
# AST represents a program's abstract syntax tree
class AST(StatementSequence):
@staticmethod
def preprocess(source):
# Add endline after ';'
source = source.replace(';', ';\n')
# Replace tabs with spaces
source = source.replace('\t', ' ')
# Replace 'if(' by 'if ('
source = source.replace('if(', 'if (')
# Replace 'while(' by 'while '
source = source.replace('while(', 'while (')
# Add endline before and after '{' '}'
source = source.replace('{', ' { ')
source = source.replace('}', ' } ')
source = re.sub(r'\s+\{\s+', '\n{\n', source)
source = re.sub(r'\s+\}\s+', '\n}\n', source)
# Remove extra endlines
source = re.sub(r'\n+', '\n', source)
# Remove extra whitespace
source = re.sub(r' +', ' ', source)
# Remove all whitespace at begining of line
source = re.sub(r'\n\s+', '\n', source)
# Remove all whitespace before endline
source = re.sub(r'\s+\n', '\n', source)
# Look for if and while to group their conditions in one line
lines = source.split('\n')
result = []
i = 0
while i < len(lines):
line = lines[i].strip()
if not line:
i += 1
elif line.startswith('if ') or line.startswith('while '):
j = i+1
while lines[j] != '{':
j += 1
result.append(' '.join([line.strip() for line in lines[i:j]]))
result.append('{')
i = j+1
else:
result.append(line)
i += 1
return '\n'.join(result)
if __name__ == "__main__":
source = """
0 : X := 2;
1 : if (X <= 0) {
2 : X := -X;
} else {
3 : X := 1 - X;
}
4 : if (X == 1) {
5 : X := 1;
6 : X := X + 1;
}
7 : Y := 5;
8 : while Y + X >= X
{
9 : Y := X - Y;
10 : X := Y + X;
11 : if ((X < 2) && (1 >= 0)) || true {
12 : var := 4*3;
}
}
"""
print(AST.parse(AST.preprocess(source)))
|
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
class Category(models.Model):
class Meta:
db_table = 'category'
category = models.CharField(max_length = 200)
slug = models.SlugField(unique = True)
#vivod v adminke
def __unicode__(self):
return self.category
class Authors(models.Model):
class Meta:
db_table = 'authors'
name = models.CharField(max_length = 200)
slug = models.SlugField(unique = True)
#vivod v adminke
def __unicode__(self):
return self.name
class Tags(models.Model):
class Meta:
db_table = 'tags'
tag_name = models.CharField(max_length = 100)
slug = models.SlugField(unique = True)
#vivod v adminke
def __unicode__(self):
return self.tag_name
class Article(models.Model):
class Meta:
db_table = 'article'
article = models.CharField(max_length = 200)
text = models.TextField()
date = models.DateTimeField()
author = models.ForeignKey(Authors, blank=True, null=True)
#article category
article_cat = models.ForeignKey(Category, blank=True, null=True)
article_tag = models.ManyToManyField(Tags, blank=True)
slug = models.SlugField(unique = True)
#vivod v adminke
def __unicode__(self):
return self.article
# Create your models here.
|
a = int(input("変数aの値を入力してください"))
if (a == 0):
print("変数 a の値は 0 です")
elif (a == 1):
print("変数 a の値は 1 です")
else:
print("変数 a の値は 0,1 以外の文字列です")
|
# _*_ coding: utf-8 _*_
import random
game_count = 0
all_counts = []
while True:
game_count += 1
guess_count = 0
answer = random.randint(0,99)
while True:
guess = int(input("请猜一个数字(0-99):"))
guess_count += 1
if guess == answer:
print("恭禧你,猜中了")
print("你总共猜了" + str(guess_count) + "次")
all_counts.append(guess_count)
break;
elif guess > answer:
print("你猜的数字太大了")
else:
print("你猜的数字太小了")
onemore = input("还要再玩一次吗(Y/N)?")
if onemore != 'Y' and onemore != 'y':
print("欢迎下次再来玩!")
print("您的成绩如下:")
print(all_counts)
print("平均猜中次数" + str(sum(all_counts)/float(len(all_counts))))
break;
|
# 使用 randint()
import random
# 定義 Encrypt 類別
class Encrypt:
def __init__(self):
self.setcode()
def setcode(self):
# 取得 a 、 b 值
a = random.randint(0, 9)
print(a) # 印出 a
b = random.randint(0, 9)
print(b) # 印出 b
# 利用公式建立密碼表
self.code = ""
c = "a"
i = 0
while i < 26:
x = c
y = ord(x) * a + b
m = y % 26
self.code += chr(m + 97)
c = chr(ord(c) + 1)
i += 1
def getcode(self):
return self.code
# 編碼的方法
def toEncode(self, str):
pass
# 解碼的方法
def toDecode(self, str):
pass
# 測試部分
if __name__ == '__main__':
e = Encrypt()
print()
print(e.getcode())
print()
# 檔名: encrypt04.py
# 作者: Kaiching Chang
# 時間: July, 2014
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 01:55:03 2020
@author: sumant
"""
while True:
try:
print()
print("Welcome To My Calculator")
print("========================")
print("1. ADD")
print("2. SUB")
print("3. MUL")
print("4. DIV")
print("0. Exit")
option=int(input("Please Choose Operation: "))
if option > 4:
print("Invalid Key Operation.")
break
elif option == 0:
print("Bye Welcome back..!")
break
try:
num1=int(input("Enter number1: "))
num2=int(input("Enter number2: "))
if option == 1:
print(num1+num2)
elif option == 2:
print(num1-num2)
elif option == 3:
print(num1*num2)
elif option == 4:
if num2 != 0:
print(num1/num2)
else:
print("Can`t divided by zero")
else:
print("Invalid Key Operation.")
except(ZeroDivisionError,ValueError) as e:
print("Error occured",e)
choice = input("Do you want to perform more (yes or no)? ")
if choice.lower() != 'yes':
print("Bye Catch up later..!")
break
except:
print("Error Occured Invalid literal")
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Find the contiguous subarray within an array (containing at least one number) which has the largest product.
# For example, given the array [2,3,-2,4],
# the contiguous subarray [2,3] has the largest product = 6.
# If 0 in nums, the solution failed.
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
largest_product = max(nums)
cur_product = nums[0]
negative_product = 1
for i in range(1, len(nums)):
if cur_product * nums[i] <= cur_product:
if negative_product == 1:
negative_product = cur_product * nums[i]
cur_product = 1
else:
cur_product = negative_product * cur_product * nums[i]
negative_product = 1
else:
cur_product *= nums[i]
if cur_product > largest_product:
largest_product = cur_product
return largest_product
# Use swap(max_, min_) to handle negative
# Use max() and min() to handle 0.
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_, min_ = nums[0], nums[0]
largest_product = nums[0]
for num in nums[1:]:
if num < 0:
max_, min_ = min_, max_
max_ = max(num, max_ * num)
min_ = min(num, min_ * num)
largest_product = max(max_, largest_product)
return largest_product
if __name__ == '__main__':
print(Solution().maxProduct([2, 3, -2, 4]))
print(Solution().maxProduct([0, 2]))
print(Solution().maxProduct([-1, 0, -2]))
# Concise solution.
def maxProduct(nums):
maximum = big = small = nums[0]
for n in nums[1:]:
big, small = max(n, n * big, n * small), min(n, n * big, n * small)
# Just like a, b = b, a
maximum = max(maximum, big)
return maximum
# But!!!!! This solution will get mistake.
def maxProduct(self, nums):
maximum = big = small = nums[0]
for n in nums[1:]:
big = max(n, n * big, n * small)
small = min(n, n * big, n * small)
# Due to big has been updated, so small will get wrong result.
# In [257]: dis.dis('a=1; a, b = 0, a')
# 1 0 LOAD_CONST 0 (1)
# 3 STORE_NAME 0 (a)
# 6 LOAD_CONST 1 (0)
# 9 LOAD_NAME 0 (a)
# 12 ROT_TWO
# 13 STORE_NAME 0 (a)
# 16 STORE_NAME 1 (b)
# 19 LOAD_CONST 2 (None)
# 22 RETURN_VALUE
maximum = max(maximum, big)
return maximum
# Notice
a = [1]
b = a
b += [1] # Then a = [1,1]
b = b + [1] # Then a = [1]
class Solution2:
# @param A, a list of integers
# @return an integer
def maxProduct(self, A):
global_max, local_max, local_min = float("-inf"), 1, 1
for x in A:
local_max = max(1, local_max) # To handle the zero.
if x > 0:
local_max, local_min = local_max * x, local_min * x
else:
local_max, local_min = local_min * x, local_max * x
global_max = max(global_max, local_max)
return global_max
|
from manim import *
import random
WIN = 'W'
LOSS = 'L'
WIN_LOSS_LIST = ([WIN]*33) + ([LOSS]*66)
WIN_PROFIT = 75
LOSS_PROFIT = -25
def get_profit(result):
return WIN_PROFIT if result == WIN else LOSS_PROFIT
def s_tex(*args, **kwargs):
return Tex(*args, **kwargs).scale(0.6)
def fischer_yates(list):
for i in range(len(list)):
index = random.randrange(i, len(list))
val = list[i]
list[i] = list[index]
list[index] = val
return list
class Profit(Scene):
def construct(self):
plane = Axes(
x_range=(0, 100, 10),
y_range=(-300, 1200, 100),
x_length=9,
y_length=6,
axis_config={
"include_numbers": True,
"number_scale_value": 0.6
},
)
plane.center()
win_loss_sequence = fischer_yates(WIN_LOSS_LIST)
total = 0
profit = [0]
for i in range(0, 99):
total += get_profit(win_loss_sequence[i])
profit.append(total)
line_graph = plane.get_line_graph(
x_values=[i for i in range(0, 100)],
y_values=profit,
stroke_width=1,
line_color=WHITE,
vertex_dot_radius=0.02
)
x_label = plane.get_x_axis_label(s_tex("rounds"), edge=DOWN, direction=DOWN) \
.shift(0.25*DOWN)
y_label = plane.get_y_axis_label(s_tex("profit (cumulative)"), edge=UP, direction=UP) \
.shift(0.4*RIGHT)
self.play(FadeIn(plane, line_graph, x_label, y_label))
self.wait(1)
avg_line = plane.get_line_graph(
x_values=[0, 99],
y_values=[0, total],
vertex_dot_radius=0.05
)
self.play(FadeIn(avg_line))
self.wait(1)
label = s_tex(
"slope = ",
r"$\dfrac{\textrm{(total profit)}}{\textrm{(rounds)}}$",
"= avg. profit").shift(2*UP)
arrow = Arrow(
stroke_width=1,
max_tip_length_to_length_ratio=0.1,
start=label.get_bottom(),
end=label.get_bottom() + 2*DOWN)
self.play(FadeIn(label, arrow))
self.wait(1)
goal_tex = s_tex("Goal: avg. profit > 0")
goal = Group(goal_tex, SurroundingRectangle(goal_tex)).shift(3.5*UP)
self.play(FadeIn(goal))
self.wait(1)
self.play(FadeOut(plane, line_graph, avg_line, x_label, y_label, label, arrow))
self.wait(2)
|
# inject the lib folder before everything else
from typing import Optional
from waitlist.base import db
from waitlist.permissions.manager import StaticRoles
from waitlist.storage.database import Account, Character, Role, APICacheCharacterInfo
from waitlist.utility.utils import get_random_token
import waitlist.utility.outgate as outgate
if __name__ == '__main__':
name = input("Login Name:")
print("Creating Account")
acc = Account()
acc.username = name
acc.login_token = get_random_token(16)
print("Account created")
admin_role = db.session.query(Role).filter(Role.name == StaticRoles.ADMIN).first()
acc.roles.append(admin_role)
db.session.add(acc)
print(acc.login_token)
char_name = "--"
list_eveids = []
while char_name:
char_name = input("Enter Character to associate with this account:")
char_name = char_name.strip()
if not char_name:
break
char_info: Optional[APICacheCharacterInfo] = outgate.character.get_info_by_name(char_name)
char_id = char_info.id
# assign this too because his name could have had wrong case
char_name = char_info.characterName
character: Character = db.session.query(Character).get(char_id)
if character is None:
character = Character()
character.eve_name = char_name
character.id = char_id
print("Added "+character.__repr__())
list_eveids.append(char_id)
acc.characters.append(character)
db.session.commit()
is_valid = False
char_id = None
while not is_valid:
char_id = int(input("Enter charid to set as active char out of "+", ".join([str(i) for i in list_eveids])+":"))
for posid in list_eveids:
if posid == char_id:
is_valid = True
break
acc.current_char = char_id
db.session.commit()
print("Admin Account created!")
|
import requests
from flask import Flask
app = Flask(__name__)
SENSOR_NAMING = {
"10-000802e4e190": "Raspberry Pi",
"10-000802e42e3f": "Raumtemperatur",
}
BASE_URL = "http://192.168.1.14:9090/api/v1/"
@app.route("/")
def hello():
res = requests.get(BASE_URL + "query?query=sensor_temperature_in_celsius")
ret = ""
ret_values = []
for sensor in res.json()["data"]["result"]:
name = SENSOR_NAMING[sensor["metric"]["sensor"]]
value = sensor["value"][1]
ret_values.append((name, value))
ret_values = sorted(ret_values)
for name, value in ret_values:
ret += "%s: %s</br>" % (name, value)
return "Hello World!</br>" + ret
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
from aimacode.logic import PropKB
from aimacode.planning import Action
from aimacode.search import (
Node, Problem,
)
from aimacode.utils import expr
from lp_utils import (
FluentState, encode_state, decode_state, conjunctive_sentence,
)
from my_planning_graph import PlanningGraph
from functools import lru_cache
import logging
#uncomment for debugging
#logging.basicConfig(filename='AirCargoProblem.log',level=logging.DEBUG)
class AirCargoProblem(Problem):
def __init__(self, cargos, planes, airports, initial: FluentState, goal: list):
"""
:param cargos: list of str
cargos in the problem
:param planes: list of str
planes in the problem
:param airports: list of str
airports in the problem
:param initial: FluentState object
positive and negative literal fluents (as expr) describing initial state
:param goal: list of expr
literal fluents required for goal test
"""
self.state_map = initial.pos + initial.neg
self.initial_state_TF = encode_state(initial, self.state_map)
Problem.__init__(self, self.initial_state_TF, goal=goal)
self.cargos = cargos
self.planes = planes
self.airports = airports
self.actions_list = self.get_actions()
def get_actions(self):
"""
This method creates concrete actions (no variables) for all actions in the problem
domain action schema and turns them into complete Action objects as defined in the
aimacode.planning module. It is computationally expensive to call this method directly;
however, it is called in the constructor and the results cached in the `actions_list` property.
Returns:
----------
list<Action>
list of Action objects
"""
# TODO create concrete Action objects based on the domain action schema for: Load, Unload, and Fly
# concrete actions definition: specific literal action that does not include variables as with the schema
# for example, the action schema 'Load(c, p, a)' can represent the concrete actions 'Load(C1, P1, SFO)'
# or 'Load(C2, P2, JFK)'. The actions for the planning problem must be concrete because the problems in
# forward search and Planning Graphs must use Propositional Logic
def load_actions():
"""Create all concrete Load actions and return a list
:return: list of Action objects
"""
loads = []
for c in self.cargos:
for p in self.planes:
for a in self.airports:
#PRECOND:At(c, a) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a)
#ignore the unary ones
precond_pos = [expr("At({},{})".format(c,a)), expr("At({},{})".format(p,a))]
precond_neg = []
#EFFECT: ¬ At(c, a) ∧ In(c, p))
effect_add = [expr("In({},{})".format(c,p))]
effect_rem = [expr("At({},{})".format(c,a))]
#Load(c, p, a)
act = Action(expr("Load({},{},{})".format(c,p,a)),
[precond_pos, precond_neg], [effect_add, effect_rem])
loads.append(act)
return loads
def unload_actions():
"""Create all concrete Unload actions and return a list
:return: list of Action objects
"""
unloads = []
for c in self.cargos:
for p in self.planes:
for a in self.airports:
#PRECOND: In(c, p) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a)
precond_pos = [expr("In({},{})".format(c,p)),
expr("At({},{})".format(p,a))]
precond_neg = []
#EFFECT: At(c, a) ∧ ¬ In(c, p))
effect_add = [expr("At({},{})".format(c,a))]
effect_rem = [expr("In({},{})".format(c,p))]
#Unload(c, p, a)
act = Action(expr("Unload({},{},{})".format(c,p,a)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
unloads.append(act)
return unloads
def fly_actions():
"""Create all concrete Fly actions and return a list
:return: list of Action objects
"""
flys = []
for fr in self.airports:
for to in self.airports:
if fr != to:
for p in self.planes:
precond_pos = [expr("At({}, {})".format(p, fr)),
]
precond_neg = []
effect_add = [expr("At({}, {})".format(p, to))]
effect_rem = [expr("At({}, {})".format(p, fr))]
fly = Action(expr("Fly({}, {}, {})".format(p, fr, to)),
[precond_pos, precond_neg],
[effect_add, effect_rem])
flys.append(fly)
return flys
return load_actions() + unload_actions() + fly_actions()
def check_precond(self, kb, action):
"""Checks if the precondition is satisfied in the current state"""
assert isinstance(kb, PropKB), "kb is not of type PropKB: %r" % kb
# check for positive clauses
for clause in action.precond_pos:
if clause not in kb.clauses:
return False
# check for negative clauses
for clause in action.precond_neg:
if clause in kb.clauses:
return False
logger.debug("All precondtions satisfied for action=%r", action)
return True
def apply_action(self, kb, action):
# check if the preconditions are satisfied
#if not self.check_precond(kb, action):
# raise Exception("Action pre-conditions not satisfied")
# remove negative litera
for clause in action.effect_rem:
kb.retract(clause)
# add positive literals
for clause in action.effect_add:
kb.tell(clause)
def actions(self, state: str) -> list:
""" Return the actions that can be executed in the given state.
:param state: str
state represented as T/F string of mapped fluents (state variables)
e.g. 'FTTTFF'
:return: list of Action objects
"""
#The actions that are applicable to a state are all those whose preconditions are satisfied.
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
possible_actions = [ a for a in self.actions_list if a.check_precond(kb, a.args) ]
return possible_actions
def result(self, state: str, action: Action):
""" Return the state that results from executing the given
action in the given state. The action must be one of
self.actions(state).
:param state: state entering node
:param action: Action applied
:return: resulting state after action
"""
assert state != None, "state passed to Problem.result is None"
assert action != None, "action passed to Problem.result is None"
new_state = FluentState([], [])
#decode the fulents of the current state
old_fs = decode_state(state, self.state_map)
#compute the new state's positive fluents by applying the actions
kb = PropKB()
#initialize a KB with positive fluents of old state.
kb.tell(old_fs.pos_sentence())
# apply the rem and add effects of the action onto the KB
self.apply_action(kb,action)
# the positive fluents of new state are in KB
new_state.pos = kb.clauses
logging.debug("Positive Fluents of New State: %r", new_state.pos)
#encode_state only cares about the positive fluents in new_state
#anything in state_map which is not in new_state.pos is assigned 'F'
return encode_state(new_state, self.state_map)
def goal_test(self, state: str) -> bool:
""" Test the state to see if goal is reached
:param state: str representing state
:return: bool
"""
kb = PropKB()
kb.tell(decode_state(state, self.state_map).pos_sentence())
for clause in self.goal:
if clause not in kb.clauses:
return False
return True
def h_1(self, node: Node):
# note that this is not a true heuristic
h_const = 1
return h_const
@lru_cache(maxsize=8192)
def h_pg_levelsum(self, node: Node):
"""This heuristic uses a planning graph representation of the problem
state space to estimate the sum of all actions that must be carried
out from the current state in order to satisfy each individual goal
condition.
"""
# requires implemented PlanningGraph class
pg = PlanningGraph(self, node.state)
pg_levelsum = pg.h_levelsum()
return pg_levelsum
@lru_cache(maxsize=8192)
def h_ignore_preconditions(self, node: Node):
"""This heuristic estimates the minimum number of actions that must be
carried out from the current state in order to satisfy all of the goal
conditions by ignoring the preconditions required for an action to be
executed.
"""
assert node != None, "Node passed to h_ignore_preconditions is None"
# implement (see Russell-Norvig Ed-3 10.2.3 or Russell-Norvig Ed-2 11.2)
count = 0
kb = PropKB()
# Add the positive sentences of current state to KB
kb.tell(decode_state(node.state, self.state_map).pos_sentence())
for clause in self.goal:
if clause not in kb.clauses:
count += 1
return count
"""
-Init(At(C1, SFO) ∧ At(C2, JFK)
∧ At(P1, SFO) ∧ At(P2, JFK)
∧ Cargo(C1) ∧ Cargo(C2)
∧ Plane(P1) ∧ Plane(P2)
∧ Airport(JFK) ∧ Airport(SFO))
Goal(At(C1, JFK) ∧ At(C2, SFO))
"""
def air_cargo_p1() -> AirCargoProblem:
cargos = ['C1', 'C2']
planes = ['P1', 'P2']
airports = ['JFK', 'SFO']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
]
neg = [expr('At(C2, SFO)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('At(C1, JFK)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('At(P1, JFK)'),
expr('At(P2, SFO)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
"""
- Problem 2 initial state and goal:
Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(C3, ATL)
∧ At(P1, SFO) ∧ At(P2, JFK) ∧ At(P3, ATL)
∧ Cargo(C1) ∧ Cargo(C2) ∧ Cargo(C3)
∧ Plane(P1) ∧ Plane(P2) ∧ Plane(P3)
∧ Airport(JFK) ∧ Airport(SFO) ∧ Airport(ATL))
Goal(At(C1, JFK) ∧ At(C2, SFO) ∧ At(C3, SFO))
"""
def air_cargo_p2() -> AirCargoProblem:
cargos = ['C1', 'C2', 'C3']
planes = ['P1', 'P2', 'P3']
airports = ['JFK', 'SFO', 'ATL']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(C3, ATL)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
expr('At(P3, ATL)'),
]
neg = [expr('At(C2, SFO)'),
expr('At(C2, ATL)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('In(C2, P3)'),
expr('At(C1, JFK)'),
expr('At(C1, ATL)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('In(C1, P3)'),
expr('At(C3, SFO)'),
expr('At(C3, JFK)'),
expr('In(C3, P1)'),
expr('In(C3, P2)'),
expr('In(C3, P3)'),
expr('At(P1, JFK)'),
expr('At(P1, ATL)'),
expr('At(P2, SFO)'),
expr('At(P2, ATL)'),
expr('At(P3, SFO)'),
expr('At(P3, JFK)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
expr('At(C3, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
"""
- Problem 3 initial state and goal:
Init(At(C1, SFO) ∧ At(C2, JFK) ∧ At(C3, ATL) ∧ At(C4, ORD)
∧ At(P1, SFO) ∧ At(P2, JFK)
∧ Cargo(C1) ∧ Cargo(C2) ∧ Cargo(C3) ∧ Cargo(C4)
∧ Plane(P1) ∧ Plane(P2)
∧ Airport(JFK) ∧ Airport(SFO) ∧ Airport(ATL) ∧ Airport(ORD))
Goal(At(C1, JFK) ∧ At(C3, JFK) ∧ At(C2, SFO) ∧ At(C4, SFO))
"""
def air_cargo_p3() -> AirCargoProblem:
cargos = ['C1', 'C2', 'C3','C4']
planes = ['P1', 'P2']
airports = ['JFK', 'SFO','ATL','ORD']
pos = [expr('At(C1, SFO)'),
expr('At(C2, JFK)'),
expr('At(C3, ATL)'),
expr('At(C4, ORD)'),
expr('At(P1, SFO)'),
expr('At(P2, JFK)'),
]
neg = [expr('At(C1, JFK)'),
expr('At(C1, ATL)'),
expr('At(C1, ORD)'),
expr('In(C1, P1)'),
expr('In(C1, P2)'),
expr('At(C2, SFO)'),
expr('At(C2, ATL)'),
expr('At(C2, ORD)'),
expr('In(C2, P1)'),
expr('In(C2, P2)'),
expr('At(C3, SFO)'),
expr('At(C3, JFK)'),
expr('At(C2, ORD)'),
expr('In(C3, P1)'),
expr('In(C3, P2)'),
expr('At(C4, SFO)'),
expr('At(C4, JFK)'),
expr('At(C4, ATL)'),
expr('In(C4, P1)'),
expr('In(C4, P2)'),
expr('At(P1, JFK)'),
expr('At(P1, ATL)'),
expr('At(P1, ORD)'),
expr('At(P2, SFO)'),
expr('At(P2, ATL)'),
expr('At(P2, ORD)'),
]
init = FluentState(pos, neg)
goal = [expr('At(C1, JFK)'),
expr('At(C2, SFO)'),
expr('At(C3, JFK)'),
expr('At(C4, SFO)'),
]
return AirCargoProblem(cargos, planes, airports, init, goal)
|
from model import Todo
import motor.motor_asyncio
client = motor.motor_asyncio.AsyncIOMotorClient('mongodb://root:example@localhost:27017/TodoList?authSource=admin')
database = client.TodoList
collection = database.todo
async def fetchOneTodo(title):
document = await collection.find_one({"title":title})
return document
async def fetchAllTodos():
todos = []
cursor = collection.find({})
async for document in cursor:
todos.append(Todo(**document))
return todos
async def createTodo(todo):
document = todo
result = await collection.insert_one(document)
return result
async def updateTodo(title,desc):
await collection.update_one({"title":title}, {"$set":{
"description":desc}})
document = await collection.find_one({"title":title})
return document
async def removeTodo(title):
await collection.delete_one({"title":title})
return True
|
#
#==================================================================================================
# Jasy Template
# Copyright 2013 Sebastian Fastner
#--------------------------------------------------------------------------------------------------
# Based upon
# Core - JavaScript Foundation
# Copyright 2010-2012 Zynga Inc.
# Copyright 2012-2013 Sebastian Werner
#--------------------------------------------------------------------------------------------------
# Based on the work of:
# Hogan.JS by Twitter, Inc.
# https://github.com/twitter/hogan.js
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#==================================================================================================
#
__all__ = ["compile"]
import Parser
accessTags = [
"#", # go into section / loop start
"?", # if / has
"^", # if not / has not
"$", # insert variable
"=" # insert raw / non escaped
]
# Tags which support children
innerTags = [
"#",
"?",
"^"
]
def escapeContent(content):
return content.replace("\"", "\\\"").replace("\n", "\\n")
def escapeMatcher(str):
return str.replace("\\", "\\\\").replace("\"", "\\\"").replace("\n", "\\\n").replace("\r", "\\\r")
def walk(node, labels, nostrip):
code = ""
for current in node:
if type(current) == str:
code += 'buf+="' + escapeMatcher(current) + '";'
elif current["tag"] == "\n":
code += 'buf+="\\n";'
else:
tag = current["tag"]
name = current["name"]
escaped = escapeMatcher(name)
if tag in accessTags:
if name == ".":
accessor = 2
elif "." in name:
accessor = 1
else:
accessor = 0
accessorCode = '"' + escaped + '",' + str(accessor) + ',data'
if tag in innerTags:
innerCode = walk(current["nodes"], labels, nostrip)
if tag == "?":
code += 'if(this._has(' + accessorCode + ')){' + innerCode + '}'
elif tag == "^":
code += 'if(!this._has(' + accessorCode + ')){' + innerCode + '}'
elif tag == "#":
code += 'this._section(' + accessorCode + ',partials,labels,function(data,partials,labels){' + innerCode + '});'
elif tag == "=":
code += 'buf+=this._data(' + accessorCode + ');'
elif tag == "$":
code += 'buf+=this._variable(' + accessorCode + ');';
elif tag == ">":
code += 'buf+=this._partial("' + escaped + '",data,partials,labels);'
elif tag == "_":
if labels and escaped in labels:
code += walk(Parser.parse(labels[escaped], True), labels);
else:
code += 'buf+=this._label("' + escaped + '",data,partials,labels);'
return code
def compile(text, labels=[], nostrip=False, name=None):
tree = Parser.parse(text, nostrip)
wrapped = escapeContent('var buf="";' + walk(tree, labels, nostrip) + 'return buf;');
if name:
name = escapeContent("\"%s\"" % name)
else:
name = "null"
text = escapeContent(text)
return "new core.template.Template(new Function('data', 'partials', 'labels', \"%s\"), \"%s\", %s);" % (wrapped, text, name)
|
from enum import Enum
class FitnessConfig(Enum):
LOWEST = 0
AVG = 1
HIGHEST = 2
|
from clpy.types.list import make_list
from clpy.types.vector import make_vector
from clpy.types.number import make_int
from clpy.types.string import make_str
class Space(object):
def __init__(self):
pass
def eq(self, a, b):
return a.eq(b)
def hash(self, a):
return a.hash()
def make_list(self, v):
return make_list(self, v)
def make_vector(self, v):
return make_vector(self, v)
def make_int(self, v):
return make_int(self, v)
def make_str(self, v):
return make_str(self, v)
|
import numpy as np
from pypm import ICP
from picp.main import icp_config, art
from picp.plot_trajectory import icp_p_to_gaussian
from picp.simulator.maps import create_basic_hallway, from_ascii_art
from picp.simulator.scan_generator import ScanGenerator
from picp.util.pose import Pose
import matplotlib.pyplot as plt
def icp_plane_penalties_config():
conf = icp_config()
conf["referenceDataPointsFilters"].append({"SurfaceNormalDataPointsFilter": {"knn": 5}})
conf["errorMinimizer"] = {"PointToPlaneWithPenaltiesErrorMinimizer": {}}
return conf
def icp_covariance(knn=5):
conf = ICP.BASIC_CONFIG.copy()
#conf['matcher']['KDTreeMatcher']['knn'] = knn
conf['transformationCheckers'][0]['CounterTransformationChecker']['maxIterationCount'] = 40
sensor_noise = lambda cov: [
{"SimpleSensorNoiseDataPointsFilter": {"sensorType": 0, # For LMS-150
"covariance": cov}}
]
discretisation_est = [{"SurfaceCovarianceDataPointsFilter": {"knn": knn}},
{"DecomposeCovarianceDataPointsFilter": {"keepNormals": 0}}
]
conf["readingDataPointsFilters"] = sensor_noise(0)
conf["referenceDataPointsFilters"] = sensor_noise(1) + discretisation_est
conf["outlierFilters"] = [{"SensorNoiseOutlierFilter": {}}]
conf["errorMinimizer"] = {"PointToPointWithPenaltiesErrorMinimizer": {"confidenceInPenalties": 0.5}}
return conf
# def icp_p_to_gaussian(knn=5):
# conf = icp_covariance(knn=knn)
# conf["errorMinimizer"] = {"PointToGaussianErrorMinimizer": {}}
# return conf
if __name__ == "__main__":
# origin = Pose()
# orientation = np.deg2rad(55)
#
# walls = create_basic_hallway(orientation)
# sg = ScanGenerator(walls, nb_beam=180)
# ref = sg.generate(origin).transpose()
orientation = np.deg2rad(0)
walls, poses = from_ascii_art(art, orientation=orientation)
sg = ScanGenerator(walls, nb_beam=180)
print("Generating map...")
ref = sg.generate(poses[0], check_cache=True).transpose()
# ref = np.array([[5, 5]])
max_v = 20
# experiments = [("P2Gaussian knn=10", icp_p_to_gaussian(knn=10), max_v),
# ("P2Gaussian knn=20", icp_p_to_gaussian(knn=20), max_v),
# ("P2Gaussian knn=30", icp_p_to_gaussian(knn=30), max_v)
# ]
experiments = [
("P2Gaussian knn=5", icp_p_to_gaussian(knn=5), max_v),
("P2Gaussian knn=10", icp_p_to_gaussian(knn=10), max_v),
("P2Gaussian knn=20", icp_p_to_gaussian(knn=20), max_v)
]
# experiments = [("P2Point", ICP.BASIC_CONFIG, 7),
# ("P2Plan", icp_plane_penalties_config(), 7),
# ("P2Gaussian", icp_p_to_gaussian(), 1000)]
# np.set_printoptions(threshold=np.nan)
# print(residuals)
fig = plt.figure()
icp = ICP()
for i, (label, config, max_value) in enumerate(experiments):
icp.load_from_dict(config)
print(f"Computing `{label}` max_val:`{max_value}`")
nb_sample = 300
residuals = icp.compute_residual_function(ref, [-8, -3], [13, 4], nb_sample)
# residuals = icp.compute_residual_function(ref, [-4, -4], [4, 4], 100)
min_value = 0
ax = fig.add_subplot(1, len(experiments), i + 1)
ax.set_title(label)
ax.axis('equal')
residual = np.clip(residuals[:, 2], min_value, max_value)
# residual = residuals[:, 2]
hb = ax.hexbin(residuals[:, 0], residuals[:, 1], residual, gridsize=2 * nb_sample // 3, cmap='inferno')
fig.colorbar(hb, ax=ax, extend='max')
plt.show()
|
# A class is the blueprint of an object
class FirstClass:
def __init__(self):
b = 90
"""This is the docstring""" # It can se accessed using __doc__ variable
a = 10
def printtext(self):
print("I am function")
def printfunc(self):
print("I am Function")
fir = FirstClass()
print(fir.b)
print(fir.__doc__)
print(fir.printfunc())
|
from scipy import optimize
# We define our function for later use when solving for Steady State:
def solve_for_ss(n,tau,rho,alpha):
""" solve for the steady state level of capital-per-worker
Args:
tau (float): taxation (fraction of income)
rho (float): patience parameter
n (float): population growth rate
alpha (float): cobb-douglas parameter
Returns:
result (RootResults): the solution represented as a RootResults object
"""
# Define objective function
f = lambda k: k**alpha
obj_kss = lambda kss: kss - ((1/((2+rho)*(1+n)))*(1-tau)*(1-alpha)*f(kss)-((1+rho)/(2+rho))*tau*((1-alpha)/alpha)*kss)
# Call root finder
result = optimize.root_scalar(obj_kss,bracket=[0.1,100],method='bisect')
return result
|
#Apendix:
import numpy as np
import os
import cv2
import time
import matplotlib.pyplot as plt
from scipy import interpolate
def eqHist(original):
image = original.copy()
image[:,:,0] = cv2.equalizeHist(image[:,:,0])
image[:,:,1] = cv2.equalizeHist(image[:,:,1])
image[:,:,2] = cv2.equalizeHist(image[:,:,2])
return image
def f2d(original, kernel):
if kernel is 'box':
kernel = 'boxfilter'
if kernel is 'blur':
kernel = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]])/16
if kernel is 'sharpen':
kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
filt = cv2.filter2D(original, -1, kernel)
return filt
def display(image):
while True:
cv2.imshow('frame',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
def write(image, name, mod):
cv2.imwrite(mod+"/" + mod + "_" + name, image)
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.html
def plotHist(image, imageName):
color = ('b','g','r')
for i,col in enumerate(color):
histr = cv2.calcHist([image],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.title(imageName)
plt.savefig('filt_hist/filt-hist_' + imageName)
def interp(image):
new = np.zeros(((image.shape[0]*2) - 1, (image.shape[1] * 2) - 1, image.shape[2]))
new[::2,::2] = image[:,:,:]
new[1::2] = (new[:-1:2] + new[2::2])/2
new[:, 1::2] = (new[:, :-1:2] + new[:, 2::2]) / 2
def medFilt(image):
return(cv2.medianBlur(image,3))
elapsed_times = []
for image in os.listdir("lab2"):
im = cv2.imread('lab2/' + image)
#time
start_time = time.time()
#histogram equalization
#histim = eqHist(im)
#plotHist(histim, image)
#write(histim, image, 'histeq')
# sharpen or blur
#f2dim = f2d(im, 'sharpen')
#f2dim = f2d(im, 'box')
#f2dim = f2d(im, 'blur')
#write(f2dim, image, 'sharpen')
#write(f2dim, image, 'blur')
#medF = medFilt(im)
#medF = medFilt(im)
#display(im)
#display(histim)
#display(f2dim)
#display(eqHist(f2dim))
#display(medF)
#write(medF, image, 'medFilter')
#write(f2dim, image, 'sharp_blur')
# plot original images to histogram
#plotHist(im, image)
interp(im)
#end time
elapsed_times.append(time.time() - start_time)
print(np.mean(elapsed_times))
|
from django.shortcuts import render, redirect
from django.contrib.auth import logout
from django.views.generic.base import TemplateView
from django.views.generic.base import View
from django.http import HttpResponse
def home(request):
return render(request, 'home.html')
def my_logout(request):
logout(request)
return redirect('home')
class HomePageView(TemplateView):
template_name = "home2.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['home3_var'] = 'minha var em home3'
return context
class MyView(View):
def get(self, request, *args, **kwargs):
return HttpResponse('Hello, World GET!')
def post(self, request, *args, **kwargs):
return HttpResponse('Hello, World POST!')
|
from django.db import models
from pulp import *
import numpy as np
Nb_creneaux = 13
class Patient(models.Model):
nom = models.CharField(max_length=100)
motif = models.CharField(max_length=500)
jour = models.IntegerField()
medecin = models.IntegerField()
choix_1 = models.IntegerField()
choix_2 = models.IntegerField()
choix_3 = models.IntegerField()
affectation =models.BooleanField(default=False)
creneau = models.IntegerField(null=True)
#programme linéaire en nombres entiers
def plne(creneaux,t,pref,n):
x= LpVariable.dicts('créneaux',creneaux,0, 1,LpInteger)
y=LpVariable.dicts('assign',t,0,1,LpBinary)
prob = LpProblem("Prise de rendez-vous",pulp.LpMinimize)
prob += lpSum([pref[j][i]*y[(i,j)] for (i,j) in t])
for i in range(n):
prob+=lpSum(y[(i,j)] for j in range(Nb_creneaux))==1
for j in range(Nb_creneaux):
prob+=lpSum(y[(i,j)] for i in range(n))==x[j]
prob.solve()
return y
def Timetable(medecin,jour):
res=[]
n=Patient.objects.filter(medecin=medecin,jour=jour).count()
assert(n<=Nb_creneaux)
pref= [[100 for i in range(n)] for j in range(Nb_creneaux)] #pref est la matrice des préférences des patients : pref[k][i] contient le rang assigné par le patient i au créneau k
t=[(i,j) for i in range(n) for j in range(Nb_creneaux)]
creneaux = [i for i in range(Nb_creneaux)]
compt=0
lien = []
for patient in Patient.objects.filter(medecin=medecin,jour=jour):
if patient.affectation == False :
patient.save()
pref[int(patient.choix_1)-1][compt]=1
pref[int(patient.choix_2)-1][compt]=2
pref[int(patient.choix_3)-1][compt]=3
else :
#il s'agit ici de s'assurer que l'affectation d'un patient ne peut plus bouger
#à chaque fois que l'administrateur établit son emploi du temps, les affectations
#sont fixées
pref[patient.creneau-1][compt] = 0
lien.append(patient)
patient.affectation = True
compt+=1
y = plne(creneaux,t,pref,n)
temp=[0]*n
for i in range(n):
temp[i]=int(sum([(j+1)*y[(i,j)].value() for j in range(Nb_creneaux)]))
res.append([temp[i],lien[i].nom])
lien[i].creneau=temp[i]
lien[i].save()
return res
|
# https://www.hackerrank.com/contests/june-world-codesprint/challenges/equal-stacks
def diminu_sum(nums):
total = sum(nums)
for x in nums[-1::-1]:
total -= x
yield total
'''
def all_same(items):
return all(x == items[0] for x in items)
def all_same(items):
try:
iterator = iter(items)
first = next(iterator)
return all(first == rest for rest in iterator)
except StopIteration:
return True
'''
def all_same(items):
return len(set(items)) == 1
def max_equal_stacks(stacks):
dimisums = []
n = stacks
for st in stacks:
st_sum_gen = diminu_sum(st)
dimisums.append(st_sum_gen)
comparing_heights.append(sum(st))
equal_height_found = all_same(comparing_heights)
while not equal_height_found:
for i in range(n):
if comparing_heights[i] == max(comparing_heights):
comparing_heights[i] = next(dimisums[i])
return comparing_heights[0]
|
import numpy as np
def linear_kernel(X1, X2=None,**kwargs):
if X2 is None:
X2 = X1
K = X1.dot(X2.T)
return K
def polynomial_kernel(X1, X2=None, **kwargs):
degree = kwargs.get('degree',2)
if X2 is None:
X2 = X1
return (1 + linear_kernel(X1, X2))**degree
def rbf_kernel(X1, X2=None,**kwargs):
gamma = kwargs.get('gamma',1)
if X2 is None:
X2 = X1
X1_norm = np.sum(X1**2,axis=-1)
X2_norm = np.sum(X2**2,axis=-1)
X1_dot_X2 = np.matmul(X1, X2.T)
K = np.exp(- gamma * (X1_norm[:, None] + X2_norm[None, :] - 2 * X1_dot_X2))
return K
|
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_title_from_index(df, index):
return df[df.Index == index]["Actors"].values[0]
def get_index_from_title(df, actor):
return df[df.Actors == actor]["Index"].values[0]
def combine_features(row):
return str(row['Number of movies']) +" "+str(row['Number of awards'])+" "+row["Genres"]
def get_similar_actors(actor_name):
df = pd.read_csv("Datasets/actors_dataset.csv")
features = ['Number of movies','Number of awards','Genres']
for feature in features:
df[feature] = df[feature].fillna('')
df["combined_features"] = df.apply(combine_features,axis=1)
cv = CountVectorizer()
count_matrix = cv.fit_transform(df["combined_features"])
cosine_sim = cosine_similarity(count_matrix)
actor_index = get_index_from_title(df, actor_name)
similar_actors = list(enumerate(cosine_sim[actor_index]))
sorted_similar_actors = sorted(similar_actors,key=lambda x:x[1],reverse=True)
actors_list = []
i=0
for element in sorted_similar_actors:
actors_list.append(get_title_from_index(df, element[0]))
i=i+1
if i>5:
break
return actors_list
|
import random, math
import util
NUM_BITS_IN_NUM = 3
CITIES_NUM = 2 ** NUM_BITS_IN_NUM
class TravelingSalesman:
data = None
best_fitness = 9999999
count_fitness = 0
total_count_fitness = 10
def new_individual(self):
if not self.data:
self.data = []
for num in range(CITIES_NUM):
self.data.append({'id': num, 'x': random.randint(0, 200), 'y': random.randint(0, 200)})
v = range(CITIES_NUM)
random.shuffle(v)
ind = []
for x in v:
ind += self.int_to_bin(x)
return ind
def get_fitness(self, individual):
v = []
for i in xrange(CITIES_NUM):
number = int(''.join(individual[i * NUM_BITS_IN_NUM:(i + 1) * NUM_BITS_IN_NUM]), 2)
v.append(number)
tour_distance = 0
for j, i in enumerate(v):
from_city = self.data[i]
destination_city = None
if j + 1 < len(v):
destination_city = self.data[j + 1]
else:
destination_city = self.data[v[0]]
tour_distance += math.sqrt((math.fabs(destination_city['x'] - from_city['x'])) ** 2 + (math.fabs(destination_city['y'] - from_city['y'])) ** 2)
return float(tour_distance)
def int_to_bin(self, number):
return list(bin(number)[2:].zfill(NUM_BITS_IN_NUM))
def validate_individual(self, individual):
v = []
for i in xrange(CITIES_NUM):
number = int(''.join(individual[i * NUM_BITS_IN_NUM:(i + 1) * NUM_BITS_IN_NUM]), 2)
if number not in v:
v.append(number)
for i in self.data:
if i['id'] not in v:
v.append(i['id'])
ind = []
for x in v:
ind += self.int_to_bin(x)
return True, ind
def num_bits(self):
return NUM_BITS_IN_NUM * CITIES_NUM
def is_finished(self, best):
f = self.get_fitness(best)
if f < self.best_fitness:
self.best_fitness = f
self.count_fitness = 0
elif f == self.best_fitness:
self.count_fitness += 1
if self.count_fitness == self.total_count_fitness:
return True
return False
def show(self, individual):
return 'caminho de deus'
|
#
# @lc app=leetcode.cn id=90 lang=python3
#
# [90] 子集 II
#
# @lc code=start
class Solution:
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
res, track = [], []
def backtrack(nums, depth):
res.append(track[:])
for i in range(depth, len(nums)):
# 已回溯过得相同值节点,就跳过
if i > depth and nums[i] == nums[i-1]:
continue
track.append(nums[i])
backtrack(nums, i + 1)
track.pop()
backtrack(nums, 0)
return res
# @lc code=end
|
import tornado.web
import os
import config
from views import index
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/students1', index.Students1Handler),
(r'/students2', index.Students2Handler),
(r'/students3', index.Students3Handler),
(r'/home', index.HomeHandler),
(r'/(.*)$', index.StaticFileHandler,
{"path": os.path.join(config.BASE_DIRS, "static/html"), "default_filename": "index.html"})
]
super(Application,self).__init__(handlers, **config.settings)
|
import tensorflow as tf
from tensorflow import keras
import numpy as np
def st(s1):
jsonFile = open("Fashion.json", "r")
loadedModelJson = jsonFile.read()
jsonFile.close()
loadedModel = tf.keras.models.model_from_json(loadedModelJson)
loadedModel.load_weights("Fashion.h5")
loadedModel.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
gg = np.array(s1)
#print(gg)
test = np.array([gg])
pred = loadedModel.predict(test)
k = np.argmax(pred[0])
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'T-shirt/top', 'Ankle boot']
return class_names[k]
|
import json
import numpy as np
from autodisc.helper.data import JSONNumpyEncoder, json_numpy_object_hook
from autodisc.helper.data import set_dict_default_values
def test_set_dict_default_values():
# simple dict
def_dict = {'a': 1, 'b': 2}
trg_dict = {'b': 20, 'c': 30}
test_dict = {'a': 1, 'b': 20, 'c': 30}
new_dict = set_dict_default_values(trg_dict, def_dict)
assert new_dict == test_dict
# recursive dic
def_dict = {'a': 1, 'b': {'aa': 5, 'bb': 6}}
trg_dict = {'b': {'bb': 60, 'cc': 70}, 'c': 30}
test_dict = {'a': 1, 'b': {'aa': 5, 'bb': 60, 'cc': 70}, 'c': 30}
new_dict = set_dict_default_values(trg_dict, def_dict)
assert new_dict == test_dict
# empty dict
def_dict = {'a': 1, 'b': {'aa': 5, 'bb': 6}}
trg_dict = None
new_dict = set_dict_default_values(trg_dict, def_dict)
assert new_dict == def_dict
def test_json_numpy():
expected = np.arange(100, dtype=np.float)
dumped = json.dumps(expected, cls=JSONNumpyEncoder)
result = json.loads(dumped, object_hook=json_numpy_object_hook)
#None of the following assertions will be broken.
assert result.dtype == expected.dtype, "Wrong Type"
assert result.shape == expected.shape, "Wrong Shape"
assert np.allclose(expected, result), "Wrong Values"
expected = np.array([[1,2.3], [3, 4.3]])
dumped = json.dumps(expected, cls=JSONNumpyEncoder)
result = json.loads(dumped, object_hook=json_numpy_object_hook)
#None of the following assertions will be broken.
assert result.dtype == expected.dtype, "Wrong Type"
assert result.shape == expected.shape, "Wrong Shape"
assert np.allclose(expected, result), "Wrong Values"
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that dependent rules are executed iff a dependency action modifies its
outputs.
"""
import TestGyp
import os
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('restat.gyp', chdir='src')
chdir = 'relocate/src'
test.relocate('src', chdir)
# Building 'dependent' the first time generates 'side_effect', but building it
# the second time doesn't, because 'create_intermediate' doesn't update its
# output.
test.build('restat.gyp', 'dependent', chdir=chdir)
test.built_file_must_exist('side_effect', chdir=chdir)
os.remove(test.built_file_path('side_effect', chdir=chdir))
test.build('restat.gyp', 'dependent', chdir=chdir)
test.built_file_must_not_exist('side_effect', chdir=chdir)
test.pass_test()
|
import os
import sys
import time
filename = input("Enter an input file name: ")
exists = os.path.isfile("./%s" % filename)
notEmpty = os.path.getsize("./%s" % filename) > 0
if exists and notEmpty:
file = open ("./%s" % filename, "r")
else:
print ("File doesn't exist or is empty.")
exit
freqList = list()
for freqChange in file:
freqList.append(int(freqChange))
file.close()
freq = 0
found = False
foundFreqList = list()
startTime = time.time()
while found != True:
for freqChange in freqList:
freq += freqChange
if freq in foundFreqList:
found = True
break
else:
foundFreqList.append(freq)
print (time.time() - startTime) #127.36426 sec
print ("First frequency found twice: %d" % freq)
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the email scanner summary pipeline."""
import mock
import unittest
from google.cloud.security.common.gcp_type import iam_policy
from google.cloud.security.common.gcp_type import resource
from google.cloud.security.notifier.pipelines import email_scanner_summary_pipeline
from google.cloud.security.scanner.scanners import iam_rules_scanner
from google.cloud.security.scanner.audit import rules as audit_rules
from tests.unittest_utils import ForsetiTestCase
class EmailScannerSummaryPipelineTest(ForsetiTestCase):
"""Tests for the email_scanner_summary_pipeline."""
@mock.patch('google.cloud.security.scanner.scanners.iam_rules_scanner.iam_rules_engine',
autospec=True)
def test_can_compose_scanner_summary(self, mock_rules_engine):
"""Test that the scan summary is built correctly."""
email_pipeline = (
email_scanner_summary_pipeline.EmailScannerSummaryPipeline(
111111))
members = [iam_policy.IamPolicyMember.create_from(u)
for u in ['user:a@b.c', 'group:g@h.i', 'serviceAccount:x@y.z']
]
unflattened_violations = [
audit_rules.RuleViolation(
resource_type='organization',
resource_id='abc111',
rule_name='Abc 111',
rule_index=0,
violation_type=audit_rules.VIOLATION_TYPE['whitelist'],
role='role1',
members=tuple(members)),
audit_rules.RuleViolation(
resource_type='project',
resource_id='def222',
rule_name='Def 123',
rule_index=1,
violation_type=audit_rules.VIOLATION_TYPE['blacklist'],
role='role2',
members=tuple(members)),
]
scanner = iam_rules_scanner.IamPolicyScanner({}, {}, '', '')
all_violations = scanner._flatten_violations(unflattened_violations)
total_resources = {
resource.ResourceType.ORGANIZATION: 1,
resource.ResourceType.PROJECT: 1,
}
actual = email_pipeline._compose(all_violations, total_resources)
expected_summaries = {
resource.ResourceType.ORGANIZATION: {
'pluralized_resource_type': 'Organizations',
'total': 1,
'violations': {
'abc111': len(members)
}
},
resource.ResourceType.PROJECT: {
'pluralized_resource_type': 'Projects',
'total': 1,
'violations': {
'def222': len(members)
}
},
}
expected_totals = sum(
[v for t in expected_summaries.values()
for v in t['violations'].values()])
expected = (expected_totals, expected_summaries)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import pandas as pd
from pandas.testing import assert_series_equal
from powersimdata.network.usa_tamu.constants.zones import abv2state, id2abv
from prereise.gather.demanddata.nrel_efs.map_states import (
decompose_demand_profile_by_state_to_loadzone,
shift_local_time_by_loadzone_to_utc,
)
def test_decompose_demand_profile_by_state_to_loadzone():
# Create dummy aggregate demand DataFrame
cont_states = sorted(set(abv2state) - {"AK", "HI"})
agg_dem = pd.DataFrame(
1,
index=pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
columns=cont_states,
)
agg_dem.index.name = "Local Time"
# Generate the test result
test_agg_dem = decompose_demand_profile_by_state_to_loadzone(agg_dem).round(5)
# Create the expected result for demand percentage in load zone 7 (NY)
exp_agg_dem = pd.Series(
0.67803,
index=pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
name=7,
)
exp_agg_dem.index.name = "UTC Time"
# Compare the two results
assert_series_equal(exp_agg_dem, test_agg_dem[7])
def test_shift_local_time_by_loadzone_to_utc():
# Create dummy DataFrame
agg_dem = pd.DataFrame(
1,
index=pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
columns=set(id2abv),
)
agg_dem.index.name = "Local Time"
agg_dem.iloc[8712:8736] += 1.0
# Generate the test result
test_agg_dem = shift_local_time_by_loadzone_to_utc(agg_dem)
# Create the expected result for UTC-shifted demand in load zone 1 (ME)
exp_agg_dem = pd.Series(
1.0,
index=pd.date_range("2016-01-01", "2017-01-01", freq="H", closed="left"),
name=1,
)
exp_agg_dem.index.name = "UTC Time"
exp_agg_dem.iloc[0:5] += 1
exp_agg_dem.iloc[8717:8741] += 1
# Compare the two results
assert_series_equal(exp_agg_dem, test_agg_dem[1])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.