text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python3
"""prelockd"""
from ctypes import CDLL
from json import dump, load
from mmap import ACCESS_READ, mmap
from os import getpid, listdir, path, sysconf, sysconf_names, times
from re import search
from signal import SIGHUP, SIGINT, SIGQUIT, SIGTERM, signal
from sre_constants import error as invalid_re
from sys import argv, exit, stderr, stdout
from time import monotonic, process_time, sleep
def valid_re(reg_exp):
"""
Validate regular expression.
"""
try:
search(reg_exp, '')
except invalid_re:
errprint('Invalid config: invalid regexp: {}'.format(reg_exp))
exit(1)
def errprint(*text):
"""
"""
print(*text, file=stderr, flush=True)
def string_to_float_convert_test(string):
"""
Try to interprete string values as floats.
"""
try:
return float(string)
except ValueError:
return None
def mlockall():
"""
Lock process memory.
"""
MCL_FUTURE = 2
libc = CDLL('libc.so.6', use_errno=True)
result = libc.mlockall(MCL_FUTURE)
if result != 0:
errprint('ERROR: cannot lock process memory: [Errno {}]'.format(
result))
errprint('Exit.')
exit(1)
else:
if debug_self:
print('process memory locked with MCL_FUTURE')
def signal_handler(signum, frame):
"""
Handle signals: close fd, dump d and t, exit.
"""
print('Got signal {}'.format(signum))
print('Unlocking files and dumping saved snapshots')
unlock_files(lock_dict)
lock_t = round(monotonic() - var_dict['lock_t0'], 1)
# print('lock_t:', lock_t)
dump_d['t'] = lock_t
dump_d['d'] = d
jdump(dump_path, dump_d)
if debug_1:
mm_debug()
if debug_self:
self_rss = get_self_rss()
print('self rss: {}M'.format(round(self_rss / MIB, 1)))
cpu()
print('Exit.')
exit()
def get_pid_list():
"""
"""
pid_list = []
for pid in listdir('/proc'):
if pid[0].isdecimal() is False:
continue
pid_list.append(pid)
pid_list.remove(self_pid)
return pid_list
def get_uptime():
"""
"""
with open('/proc/uptime', 'rb', buffering=0) as f:
return float(f.read().decode().split(' ')[0])
def get_uniq_id(pid):
"""
"""
try:
with open('/proc/' + pid + '/stat', 'rb', buffering=0) as f:
x, _, y = f.read().decode('utf-8', 'ignore').rpartition(')')
if y[-7:] == ' 0 0 0\n':
return None # skip kthreads
if lock_only_critical:
lock_ok = False
name = x.partition(' (')[2]
if name in name_set:
lock_ok = True
if debug_map:
print('found process with critical name:', pid, name)
if not lock_ok and check_cgroup:
cgroup2 = pid_to_cgroup_v2(pid)
for cgroup_re in cgroup_set:
if search(cgroup_re, cgroup2) is not None:
lock_ok = True
if debug_map:
print('found process from critical cgroup:',
pid, name, cgroup2)
break
if not lock_ok:
return None
starttime = y.split(' ')[20]
uniq_id = starttime + '+' + x
return starttime, uniq_id
except (FileNotFoundError, ProcessLookupError):
return None
def get_current_set():
"""
"""
m0 = monotonic()
p0 = process_time()
if debug_map:
print('Looking for mapped files...')
new_map_set = set()
pid_list = get_pid_list()
uptime = get_uptime()
uniq_set = set()
for pid in pid_list:
s = get_uniq_id(pid)
if s is None:
continue
uniq_map_set = set()
starttime, uniq_id = s
uniq_set.add(uniq_id)
lifetime = uptime - float(starttime) / SC_CLK_TCK
if uniq_id in uniq_map_d:
new_map_set.update(uniq_map_d[uniq_id])
continue
if debug_map:
print('finding mapped files for new process:', uniq_id)
maps = '/proc/' + pid + '/maps'
try:
try:
with open(maps, 'rb', buffering=0) as f:
lines_list = f.read().decode('utf-8', 'ignore').split('\n')
except PermissionError as e:
errprint(e)
exit(1)
for line in lines_list:
w_root = line.partition('/')[2]
uniq_map_set.add(w_root)
new_map_set.update(uniq_map_set)
if lifetime >= min_save_lifetime:
uniq_map_d[uniq_id] = uniq_map_set
except FileNotFoundError as e:
errprint(e)
continue
uniq_map_d_set = set(uniq_map_d)
dead_uniq_set = uniq_map_d_set - uniq_set
for uniq_id in dead_uniq_set:
del uniq_map_d[uniq_id]
new_map_set.discard('')
final_map_set = set()
for i in new_map_set:
final_map_set.add('/' + i)
current_map_set = set()
for pathname in final_map_set:
if path.exists(pathname):
current_map_set.add(pathname)
else:
if debug_map:
print('skip:', pathname)
if debug_map:
list_d = list(current_map_set)
list_d.sort()
for pathname in list_d:
print('mapped:', pathname)
m1 = monotonic()
p1 = process_time()
if debug_map:
print('Found {} mapped files in {}s (process time: {}s)'.format(len(
current_map_set), round(m1 - m0, 3), round(p1 - p0, 3)))
return current_map_set
def get_sorted(rp_set):
"""
"""
di = dict()
for rp in rp_set:
rp = str(rp) # ?
if search(lock_path_regex, rp) is not None:
try:
s = path.getsize(rp)
if s > 0:
di[rp] = s
except FileNotFoundError as e:
if debug_map:
print(e)
else:
if debug_map:
print("skip (doesn't match $LOCK_PATH_REGEX): " + rp)
sorted_list = list(di.items())
sorted_list.sort(key=lambda i: i[1])
return sorted_list
def get_sorted_locked():
"""
"""
di = dict()
for rp in lock_dict:
size = lock_dict[rp][1]
di[rp] = size
sorted_list = list(di.items())
sorted_list.sort(key=lambda i: i[1], reverse=True)
return sorted_list
def lock_files(rp_set):
"""
"""
if debug_lock:
print('locking new files...')
lock_counter = 0
sorted_list = get_sorted(rp_set)
sorted_locked = get_sorted_locked()
len_sorted_locked = len(sorted_locked)
last_index = 0
for f_realpath, size in sorted_list:
# skip large files
if size > max_file_size:
if debug_lock:
print(
'skip (file size {}M > $MAX_FILE_SIZE_MIB) {}'.format(
round(size / MIB, 1), f_realpath))
continue
else:
break
if f_realpath in lock_dict:
continue
cap = max_total_size
locked = get_total_size()
avail = cap - locked
total_size = get_total_size()
avail = max_total_size - total_size
if avail < size:
if len_sorted_locked == 0:
if debug_lock:
print('skip (total_size ({}M) + size ({}M) > max_total_s'
'ize) {}'.format(
round(total_size / MIB, 1), round(
size / MIB, 1), f_realpath))
continue
else:
break
else:
old_f, old_s = sorted_locked[last_index]
if size < old_s:
avail_future = avail + old_s - size
if avail_future < size:
if debug_lock:
print('skip (total_size ({}M) + size ({}M) > max'
'_total_size) {}'.format(
round(total_size / MIB, 1), round(
size / MIB, 1), f_realpath))
continue
else:
break
lock_dict[old_f][0].close()
del lock_dict[old_f]
last_index += 1
try:
print(
'locking ({}M) {}'.format(
round(
size / MIB,
1),
f_realpath))
with open(f_realpath, 'rb') as f:
mm = mmap(f.fileno(), 0, access=ACCESS_READ)
mm_len = len(mm)
if mm_len != size:
print('W: mm_len != size:', f_realpath)
lock_dict[f_realpath] = (mm, mm_len)
lock_counter += 1
continue
except OSError as e:
errprint(e)
break
if debug_lock:
print('skip (total_size ({}M) + size ({}M) > max_total_s'
'ize) {}'.format(
round(total_size / MIB, 1), round(
size / MIB, 1), f_realpath))
continue
else:
break
try:
if debug_lock:
print('locking ({}M) {}'.format(
round(size / MIB, 1), f_realpath))
with open(f_realpath, 'rb') as f:
mm = mmap(f.fileno(), 0, access=ACCESS_READ)
mm_len = len(mm)
if mm_len != size:
print('W: mm_len != size:', f_realpath)
lock_dict[f_realpath] = (mm, mm_len)
lock_counter += 1
except OSError as e:
errprint(e)
break
if debug_1:
mm_debug()
def unlock_files(elements):
"""
"""
if len(elements) > 0:
del_set = set()
for f_realpath in elements:
try:
lock_dict[f_realpath][0].close()
del_set.add(f_realpath)
if debug_lock:
print('unlocked:', f_realpath)
except KeyError:
if debug_lock:
print('key error:', f_realpath)
if len(del_set) > 0:
for i in del_set:
try:
lock_dict.pop(i)
except KeyError:
print('key error:', f_realpath)
def get_self_rss():
"""
"""
with open('/proc/self/statm') as f:
return int(f.readline().split(' ')[1]) * SC_PAGESIZE
def get_total_size():
"""
"""
ts = 0
for rp in lock_dict:
s = lock_dict[rp][1]
ts += s
return ts
def mm_debug():
"""
"""
locked = get_total_size()
num = len(lock_dict)
print('currently locked {}M, {} files'.format(
round(locked / MIB, 1),
num,
))
def string_to_int_convert_test(string):
"""Try to interpret string values as integers."""
try:
return int(string)
except ValueError:
return None
def get_final_set():
"""
d, lock_path_set -> final_set
"""
final_set = set()
for rp in d:
for min_entry, from_latest in lock_path_set:
v_list = d[rp][-from_latest:]
entry = v_list.count(YES)
if entry >= min_entry:
final_set.add(rp)
continue
return final_set
def rotate_snapshots():
"""
current_set, minus, max_store_num, d
-> d
"""
for rp in current_set:
if rp in d:
v_list = d[rp][-max_store_num:]
v_list.append(YES)
if len(v_list) > max_store_num:
del v_list[0]
d[rp] = v_list
else:
d[rp] = [YES]
for rp in minus:
v_list = d[rp][-max_store_num:]
v_list.append(NO)
if len(v_list) > max_store_num:
del v_list[0]
if YES in v_list:
d[rp] = v_list
else:
del d[rp]
def jdump(pathname, data):
"""
"""
with open(pathname, 'w') as f:
dump(data, f, sort_keys=True, indent=0)
def jload(pathname):
"""
"""
with open(pathname) as f:
return load(f)
def pid_to_cgroup_v2(pid):
"""
"""
cgroup_v2 = ''
try:
with open('/proc/' + pid + '/cgroup') as f:
for index, line in enumerate(f):
if index == cgroup_v2_index:
cgroup_v2 = line[3:-1]
return cgroup_v2
except FileNotFoundError:
return ''
def get_cgroup2_index():
"""
Find cgroup-line position in /proc/[pid]/cgroup file.
"""
cgroup_v2_index = None
with open('/proc/self/cgroup') as f:
for index, line in enumerate(f):
if line.startswith('0::'):
cgroup_v2_index = index
return cgroup_v2_index
def cpu():
"""
"""
m = monotonic() - start_time
user_time, system_time = times()[0:2]
p_time = user_time + system_time
p_percent = p_time / m * 100
print('process time: {}s (average: {}%)'.format(
round(p_time, 2), round(p_percent, 2)))
###############################################################################
start_time = monotonic()
MIB = 1024**2
self_pid = str(getpid())
SC_CLK_TCK = sysconf(sysconf_names['SC_CLK_TCK'])
uniq_map_d = dict()
min_save_lifetime = 300
dump_path = '/var/lib/prelockd/dump.json'
cgroup_v2_index = get_cgroup2_index()
a = argv[1:]
la = len(a)
if la == 0:
errprint('invalid input: missing CLI options')
exit(1)
elif la == 1:
if a[0] == '-p':
get_current_set()
exit()
else:
errprint('invalid input')
exit(1)
elif la == 2:
if a[0] == '-c':
config = a[1]
else:
errprint('invalid input')
exit(1)
else:
errprint('invalid input: too many options')
exit(1)
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_total = int(mem_list[0].split(':')[1][:-4])
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
SC_PAGESIZE = sysconf(sysconf_names['SC_PAGESIZE'])
config_dict = dict()
lock_path_set = set()
max_store_num = 0
name_set = set()
cgroup_set = set()
try:
with open(config) as f:
for line in f:
if line[0] == '$' and '=' in line:
key, _, value = line.partition('=')
key = key.rstrip()
value = value.strip()
if key in config_dict:
errprint('config key {} duplication'.format(key))
exit(1)
config_dict[key] = value
if line[0] == '@':
if line.startswith('@LOCK_PATH ') and '=' in line:
a_list = line.partition('@LOCK_PATH ')[2:][0].split()
lal = len(a_list)
if lal != 2:
print(lal)
errprint('invalid conf')
exit(1)
a_dict = dict()
for pair in a_list:
key, _, value = pair.partition('=')
a_dict[key] = value
min_entry = string_to_int_convert_test(a_dict['MIN_ENTRY'])
if min_entry is None:
errprint('Invalid config: invalid MIN_ENTRY: not int')
from_latest = string_to_int_convert_test(
a_dict['FROM_LATEST'])
if from_latest is None:
errprint(
'Invalid config: invalid FROM_LATEST: not int')
if min_entry > from_latest:
errprint('invalid conf')
exit(1)
if min_entry < 1 or from_latest < 1:
errprint('invalid conf')
exit(1)
if from_latest > max_store_num:
max_store_num = from_latest
lock_path_set.add((min_entry, from_latest))
if line.startswith('@CRITICAL_NAME_LIST '):
a_list = line.partition(
'@CRITICAL_NAME_LIST ')[2:][0].split(',')
for name in a_list:
name_set.add(name.strip(' \n'))
if line.startswith('@CRITICAL_CGROUP2_REGEX '):
cgroup_re = line.partition('@CRITICAL_CGROUP2_REGEX ')[2:][
0].strip(' \n')
if valid_re(cgroup_re) is None:
cgroup_set.add(cgroup_re)
except (PermissionError, UnicodeDecodeError, IsADirectoryError,
IndexError, FileNotFoundError) as e:
errprint('Invalid config: {}. Exit.'.format(e))
exit(1)
name_set.discard('')
if len(cgroup_set) > 0:
check_cgroup = True
else:
check_cgroup = False
def valid_v(x):
"""
"""
if x == '0':
return False
elif x == '1':
return True
else:
errprint('Invalid $VERBOSITY value')
exit(1)
if '$VERBOSITY' in config_dict:
verbosity = config_dict['$VERBOSITY']
if len(verbosity) != 4:
errprint('invalid $VERBOSITY value')
exit(1)
debug_1 = valid_v(verbosity[0])
debug_self = valid_v(verbosity[1])
debug_lock = valid_v(verbosity[2])
debug_map = valid_v(verbosity[3])
else:
errprint('missing $VERBOSITY key')
exit(1)
if '$LOCK_ONLY_CRITICAL' in config_dict:
lock_only_critical = config_dict['$LOCK_ONLY_CRITICAL']
if lock_only_critical == 'True':
lock_only_critical = True
elif lock_only_critical == 'False':
lock_only_critical = False
else:
errprint('invalid $LOCK_ONLY_CRITICAL value')
exit(1)
else:
errprint('missing $LOCK_ONLY_CRITICAL key')
exit(1)
if '$MAX_FILE_SIZE_MIB' in config_dict:
string = config_dict['$MAX_FILE_SIZE_MIB']
max_file_size_mib = string_to_float_convert_test(string)
if max_file_size_mib is None:
errprint('invalid $MAX_FILE_SIZE_MIB value')
exit(1)
max_file_size = int(max_file_size_mib * 1048576)
else:
errprint('missing $MAX_FILE_SIZE_MIB key')
exit(1)
if '$LOCK_PATH_REGEX' in config_dict:
lock_path_regex = config_dict['$LOCK_PATH_REGEX']
valid_re(lock_path_regex)
else:
errprint('missing $LOCK_PATH_REGEX key')
exit(1)
if '$MAX_TOTAL_SIZE_PERCENT' in config_dict:
string = config_dict['$MAX_TOTAL_SIZE_PERCENT']
max_total_size_percent = string_to_float_convert_test(string)
if max_total_size_percent is None:
errprint('invalid $MAX_TOTAL_SIZE_PERCENT value')
exit(1)
max_total_size_pc = int(mem_total * max_total_size_percent / 100) * 1024
else:
errprint('missing $MAX_TOTAL_SIZE_PERCENT key')
exit(1)
if '$MAX_TOTAL_SIZE_MIB' in config_dict:
string = config_dict['$MAX_TOTAL_SIZE_MIB']
max_total_size_mib = string_to_float_convert_test(string)
if max_total_size_mib is None:
errprint('invalid $MAX_TOTAL_SIZE_MIB value')
exit(1)
max_total_size_mib = int(max_total_size_mib * 1048576)
else:
errprint('missing $MAX_TOTAL_SIZE_MIB key')
exit(1)
if max_total_size_mib <= max_total_size_pc:
max_total_size = max_total_size_mib
else:
max_total_size = max_total_size_pc
if '$POLL_INTERVAL_SEC' in config_dict:
string = config_dict['$POLL_INTERVAL_SEC']
interval = string_to_float_convert_test(string)
if interval is None:
errprint('invalid $POLL_INTERVAL_SEC value')
exit(1)
else:
errprint('missing $POLL_INTERVAL_SEC key')
exit(1)
config = path.abspath(config)
print('Starting prelockd with the config: {}'.format(config))
if debug_self:
print('$LOCK_PATH_REGEX: ', lock_path_regex)
print('$MAX_FILE_SIZE_MIB: ', max_file_size / MIB)
print('$MAX_TOTAL_SIZE_MIB: ', round(max_total_size_mib / MIB, 1))
print('$MAX_TOTAL_SIZE_PERCENT: ', round(
max_total_size_pc / MIB, 1), '(MiB)')
print('max_total_size: ', round(
max_total_size / MIB, 1), '(MiB)')
print('$VERBOSITY: ', verbosity)
print('$LOCK_PATH_REGEX: ', lock_path_regex)
print('@LOCK_PATH ', lock_path_set)
print('$LOCK_ONLY_CRITICAL ', lock_only_critical)
print('@CRITICAL_NAME_LIST ', list(name_set))
print('@CRITICAL_CGROUP2_REGEX set: ', cgroup_set)
if max_store_num == 0:
print('WARNING: lock rules are empty!')
mlockall()
dump_d = dict()
var_dict = dict()
lock_dict = dict()
sig_list = [SIGTERM, SIGINT, SIGQUIT, SIGHUP]
for i in sig_list:
signal(i, signal_handler)
if debug_1:
mm_debug()
YES = 1
NO = 0
if debug_self:
self_rss = get_self_rss()
print('self rss: {}M'.format(round(self_rss / MIB, 1)))
var_dict['lock_t0'] = monotonic()
try:
dump_d = jload(dump_path)
d = dump_d['d']
lock_t = dump_d['t']
final_set = get_final_set()
lock_files(final_set)
lock_t0 = monotonic() - lock_t
var_dict['lock_t0'] = lock_t0
extra_t = interval - lock_t
if extra_t > 0:
stdout.flush()
sleep(extra_t)
except Exception as e:
print(e)
d = dict()
lock_t = 0
while True:
current_set = get_current_set()
len_cur = len(current_set)
d_set = set(d)
minus = d_set - current_set
rotate_snapshots()
final_set = get_final_set()
old_final_set = set(lock_dict)
unlock_it = old_final_set - final_set
unlock_files(unlock_it)
lock_it = final_set - old_final_set
lock_files(lock_it)
var_dict['lock_t0'] = monotonic()
if debug_self:
self_rss = get_self_rss()
print('self rss: {}M'.format(round(self_rss / MIB, 1)))
cpu()
stdout.flush()
sleep(interval)
|
def find_element(self, *loc, secs=5):
'''
寻找页面元素
:param loc: (By.ID,'elementId')
:param time: timeout
:return: None
# 显式等待元素,超过10秒未找到则抛出超时异常(TimeoutException)
# presence_of_element_located: 不关心元素是否可见,只关心元素是否存在在页面中
# visibility_of_element_located: 不仅找到元素,并且该元素必须可见
'''
loc = str(loc)
by = loc.split(">>")[0].strip()
value = loc.split(">>")[1].strip()
messages = 'Element: {0} not found in {1} seconds.'.format(loc, secs)
if by == "id":
WebDriverWait(self.driver, secs, 0.5).until(EC.presence_of_element_located((By.ID, value)), messages)
elif by == "name":
WebDriverWait(self.driver, secs, 0.5).until(EC.presence_of_element_located((By.NAME, value)), messages)
elif by == "class":
WebDriverWait(self.driver, secs, 0.5).until(EC.presence_of_element_located((By.CLASS_NAME, value)), messages)
elif by == "link_text":
WebDriverWait(self.driver, secs, 0.5).until(EC.presence_of_element_located((By.LINK_TEXT, value)), messages)
elif by == "xpath":
WebDriverWait(self.driver, secs, 0.5).until(EC.presence_of_element_located((By.XPATH, value)), messages)
elif by == "css":
WebDriverWait(self.driver, secs, 0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR, value)), messages)
else:
raise NameError("Please enter the correct targeting elements,'id','name','class','link_text','xpaht','css'.")
def get_element(self, *loc):
'''
获取页面元素
driver.get_element(loc)
loc:('BY.ID >> elementID') --string
'''
loc = str(loc)
by = loc.split(">>")[0].strip()
value = loc.split(">>")[1].strip()
if by == "id":
element = self.driver.find_element_by_id(value)
elif by == "name":
element = self.driver.find_element_by_name(value)
elif by == "class":
element = self.driver.find_element_by_class_name(value)
elif by == "link_text":
element = self.driver.find_element_by_link_text(value)
elif by == "xpath":
element = self.driver.find_element_by_xpath(value)
elif by == "css":
element = self.driver.find_element_by_css_selector(value)
else:
raise NameError(
"Please enter the correct targeting elements,'id','name','class','link_text','xpaht','css'.")
return element
#检查元素且找到元素
def getwait_element(driver,loc,secs=10):
by = loc.split(">>")[0].strip()
value = loc.split(">>")[1].strip()
messages = 'Element: {0} not found in {1} seconds.'.format(loc, secs)
print(by)
print(value)
if by == "id":
WebDriverWait(driver, secs, 0.5).until(EC.presence_of_element_located((By.ID, value)), messages)
element = driver.find_element_by_id(value)
return element
elif by == "name":
WebDriverWait(driver, secs, 0.5).until(EC.presence_of_element_located((By.NAME, value)), messages)
element = driver.find_element_by_name(value)
return element
elif by == "class":
WebDriverWait(driver, secs, 0.5).until(EC.presence_of_element_located((By.CLASS_NAME, value)), messages)
element = driver.find_element_by_class_name(value)
return element
elif by == "link_text":
WebDriverWait(driver, secs, 0.5).until(EC.presence_of_element_located((By.LINK_TEXT, value)), messages)
element = driver.find_element_by_link_text(value)
return element
elif by == "xpath":
WebDriverWait(driver, secs, 0.5).until(EC.presence_of_element_located((By.XPATH, value)), messages)
element = driver.find_element_by_xpath(value)
return element
elif by == "css":
WebDriverWait(driver, secs, 0.5).until(EC.presence_of_element_located((By.CSS_SELECTOR, value)), messages)
element = driver.find_element_by_css_selector(value)
return element
else:
raise NameError(
"Please enter the correct targeting elements,'id','name','class','link_text','xpaht','css'.") |
from flask import render_template, request
from .category import get_home_page, get_category
from .file_operations import get_visual_files, get_visual_file, get_all_visual_chart_files
from .species_repository import get_parent_details, getSpeciesDetail, get_all_species_details, get_home_page_data, \
get_species_experts_data
from ..utils.constants import environment_details, display_details
from ..utils.extract_value import get_base_url_till_given_string, split_path
from ..utils.auto_suggestion_using_trie import autocomplete_main
from importlib import import_module
import json
def render_home():
data = get_home_page()
return render_template('home/home.html', ckan_url=environment_details['ckan'], json_data=data,
js_files=get_visual_files(0))
def render_category(path):
path_array = path.split('/')
parent_data = get_parent_details(path_array[-1])
category_path = split_path(path)
if parent_data:
data = get_category(parent_data['_id'], parent_data, path_array[0])
else:
return render_species_details(path_array)
return render_template('category/category.html', ckan_url=environment_details['ckan'],
json_data=data,
parent_data=parent_data,
parent_name=parent_data['name'],
fullpath=category_path,
js_files=get_visual_files(parent_data['id']),
base_url=get_base_url_till_given_string(request, 'category'))
def render_species_details(path):
species_name = get_species_name(category_path=path)
species_record = getSpeciesDetail(path[0], path[-1])
species_display_info = _get_filtered_details(species_record=species_record, keys=display_details)
species_display_info['is_species'] = 'true'
return render_template('species_detail/species_detail.html', ckan_url=environment_details['ckan'],
species_name=species_name, json_data=species_display_info,
fullpath=path)
def _get_filtered_details(species_record, keys):
available_data = species_record.keys()
species_display_info = {display_key: (species_record[display_key] if display_key in available_data else '')
for display_key in keys}
return species_display_info
def get_species_name(category_path):
return category_path[len(category_path) - 1]
def get_category_name(category_path):
return category_path[len(category_path) - 2]
def raise_exception(e):
return render_template('common/custom_error_view.html', message=e.description,
base_url=get_base_url_till_given_string(request, 'category')), 500
def get_json(filename):
category_path = split_path(path=filename)
my_module = import_module('.' + '.'.join(category_path), package='app.apis')
return my_module.main()
def render_experts():
data = get_home_page_data()
return render_template('species_experts/find_experts.html', ckan_url=environment_details['ckan'], parent_data=data)
def render_report():
return render_template('reports/report.html', ckan_url=environment_details['ckan'], js_files=get_all_visual_chart_files())
def find_auto_complete_species():
search_key = request.args.get('search_key', '')
species_data = get_all_species_details()
autocompleted_data = autocomplete_main(search_key, species_data)
if not autocompleted_data:
return json.dumps(autocompleted_data)
return json.dumps(autocompleted_data[:10])
def find_species_experts():
selected_key = request.args.get('selected_key', '')
species_expert_data = get_species_experts_data(selected_key)
return json.dumps(species_expert_data)
def get_visual_report(filename):
js_files = get_visual_file(filename)
return json.dumps(js_files)
def get_visual_chart(filename):
js_files = get_visual_file(filename)
return render_template('common/visual_chart.html', js_files=js_files)
def about_us():
return render_template('home/about.html') |
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import data.climate.window_generator as wg
# https://www.tensorflow.org/tutorials/structured_data/time_series
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
train_df = pd.read_csv("jena_climate_2009_2016_train.csv")
val_df = pd.read_csv("jena_climate_2009_2016_val.csv")
test_df = pd.read_csv("jena_climate_2009_2016_test.csv")
dense = tf.keras.Sequential([
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
])
single_step_window = wg.WindowGenerator(train_df=train_df, val_df=val_df, test_df=test_df,
input_width=1, label_width=1, shift=1,
label_columns=['T (degC)'])
history = wg.compile_and_fit(dense, single_step_window)
dense.save("h5/dense_32_1_19__32_1_1.h5")
wide_window = wg.WindowGenerator(train_df=train_df, val_df=val_df, test_df=test_df,
input_width=24, label_width=24, shift=1,
label_columns=['T (degC)'])
wide_window.plot(dense)
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 1 10:42:25 2017
@author: wuyiming
"""
SR = 16000
FFT_SIZE = 1024
BATCH_SIZE = 1
PATCH_LENGTH = 512
WIN_LENGTH = 1024
H = int(WIN_LENGTH * 0.25)
ALPHA = 2
PATH_FFT = "spectro"
PATH_EVAL = "dataset/test"
PATH_TRAIN = "spectro/train"
PATH_TRAIN_wav = "dataset/train"
PATH_VAL_wav = "dataset/val"
PATH_VAL = "spectro/val"
PATH_RESULT = "result"
PATH_CHECKPOINTS = "checkpoints"
|
# coding: utf-8
# coding: utf-8
import json
import urllib
import pymongo
import requests
# coding: utf-8
import json
import random
import urllib
from urllib import quote
import MySQLdb
import datetime
import requests
import time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import redis
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, DateTime, Integer, Text, INT
#from mayidaili import useproxy
engine = create_engine('mysql+pymysql://root:@localhost:3306/house?charset=utf8')
DBSession = sessionmaker(bind=engine)
Base = declarative_base()
session = DBSession()
class Advertiser(Base):
__tablename__ = 'tb_sep'
id = Column(Integer, primary_key=True)
house_name = Column(String(80))
city_name = Column(String(80))
url = Column(String(300))
price = Column(INT)
latitude = Column(String(50))
longitude = Column(String(50))
origin = Column(String(80))
months = Column(String(80))
crawl_time = Column(DateTime)
address = Column(String(300))
building_date = Column(String(60))
building_type = Column(String(60))
Base.metadata.create_all(engine)
dbname = 'test'
collection = 'fangtianxia_final1'
client = pymongo.MongoClient('127.0.0.1', 27017)
db = client[dbname]
data = db[collection].find({})
data_list = list(data)
for i in data_list:
advertiser = Advertiser(
house_name = i['name'],
city_name = i['city_name'],
url = i['url'],
price = int(i['price']),
latitude = i['latitude'],
longitude = i['longitude'],
origin = i['origin'],
months = i['month_price'],
crawl_time = i['crawl_date'],
address = i['location'],
building_date = i['building_date'],
building_type = i['building_type']
)
session.add(advertiser)
try:
session.commit()
except Exception as e:
print(e)
session.rollback()
session.close() |
""" script to check the number of entries in root-files (to check, if simulation worked correctly): """
import ROOT
# path to the root files:
input_path = "/local/scratch1/pipc51/astro/blum/detsim_output_data/"
# number of entries in each file:
number_entries = 100
# index of first file:
start_file = 900
# index of last file:
stop_file = 999
# loop over every file:
for index in range(start_file, stop_file+1):
# file name:
file_name = "user_atmoNC_{0:d}.root".format(index)
# input name:
input_name = input_path + file_name
# load ROOT file:
rfile = ROOT.TFile(input_name)
# get the "evt"-TTree from the TFile:
rtree_evt = rfile.Get("evt")
# get the number of events in the geninfo Tree:
number_events = rtree_evt.GetEntries()
if number_events == number_entries:
continue
else:
print("\nnumber of events ({0:d}) != {1:d}".format(number_events, number_entries))
print("failed file: {0}".format(file_name))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from analyse_immo.database import Database
from analyse_immo.impots.micro_foncier import Micro_Foncier
from test.testcase_fileloader import TestCaseFileLoader
@unittest.skip('fixme')
class TestImpotMicroFoncier(TestCaseFileLoader):
def setUp(self):
self._database = Database()
def testInit(self):
_ = Micro_Foncier(self._database, 0, 0)
def testBasseImpossable(self):
imf = Micro_Foncier(self._database, 10000, 0)
self.assertEqual(imf.base_impossable, 7000)
def testRevenuFoncierImpossable(self):
imf = Micro_Foncier(self._database, 10000, 0.11)
self.assertEqual(imf.revenu_foncier_impossable, 770)
def testPrelevementSociauxMontant(self):
imf = Micro_Foncier(self._database, 10000, 0.11)
self.assertEqual(imf.prelevement_sociaux_montant, 1204)
def testImpotTotal(self):
imf = Micro_Foncier(self._database, 10000, 0.11)
self.assertEqual(imf.impot_total, 1974)
if __name__ == '__main__':
unittest.main()
|
from django.contrib import admin
from pycont.apps.transactions.models import Transaction
admin.site.register(Transaction)
|
import os
import ray
import json
import tqdm
import torch
import numpy as np
import logging
from typing import List
logger = logging.getLogger(__name__)
class SentenceSegmenter:
def __init__(self, tokenizer, max_seq_length: int):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
def __call__(self, doc_sentences) -> List[List[str]]:
token_segments = []
current_seq = []
for count, sent in enumerate(doc_sentences):
if count > 0:
sent = " " + sent
token_sent = self.tokenizer.tokenize(sent)
if len(token_sent) > self.max_seq_length:
# append last sequence
token_segments.append(current_seq)
for i in range(0, len(token_sent) - self.max_seq_length, self.max_seq_length):
token_segments.append(token_sent[i:i + self.max_seq_length])
# assign the current seq the tail of token_sent
current_seq = token_sent[i + self.max_seq_length:i + self.max_seq_length * 2]
continue
if (len(current_seq) + len(token_sent)) > self.max_seq_length:
token_segments.append(current_seq)
current_seq = token_sent
else:
current_seq = current_seq + token_sent
if len(current_seq) > 0:
token_segments.append(current_seq)
# remove empty segment
token_segments = [seg for seg in token_segments if seg]
return token_segments
@ray.remote
def _process(segmenter, lines, rank):
all_token_segments = []
# progress bar
if rank == 0:
lines = tqdm.tqdm(lines)
else:
lines = lines
for line in lines:
example = json.loads(line)
token_segments = segmenter(example["sents"])
all_token_segments.append(token_segments)
return all_token_segments
class SegmentDocLoader:
def __init__(
self, tokenizer, max_seq_length: int, corpus_path: str, cache_dir: str = "cache/cached_corpus_sectors"
):
self.tokenizer = tokenizer
self.corpus_path = corpus_path
self.cache_dir = cache_dir
self.max_seq_length = max_seq_length
self.sent_segmenter = SentenceSegmenter(tokenizer, max_seq_length)
if cache_dir:
os.makedirs(cache_dir, exist_ok=True)
if "OMP_NUM_THREADS" not in os.environ.keys():
os.environ["OMP_NUM_THREADS"] = str(os.cpu_count() // 2)
def load_sector(self, sector_id):
if self.cache_dir:
cache_path = os.path.join(self.cache_dir, f"{sector_id}_cache.pkl")
if os.path.exists(cache_path):
try:
logger.info("Loadding Cache")
processed_docs = torch.load(cache_path)
logger.info("Finished Loading")
return processed_docs
except:
logger.info("File Corrupted. Data will be re-processed")
# processing data
with open(os.path.join(self.corpus_path, str(sector_id) + ".jsonl"), "r") as f:
data = f.readlines()
processed_docs = []
logger.info("Processing Data. Takes about 10 mins")
# multi-processing
ray_objs = []
step_size = len(data) // int(os.environ["OMP_NUM_THREADS"])
for i in range(0, len(data), step_size):
ray_objs.append(_process.remote(self.sent_segmenter, data[i:i + step_size], i))
for i in range(len(ray_objs)):
processed_docs.extend(ray.get(ray_objs[i]))
if self.cache_dir:
logger.info("Saving Into Cache")
torch.save(processed_docs, cache_path)
logger.info("Finished Saving Into Cache")
return processed_docs |
from chineseviewer import app
def main():
app.run(
port=42045,
debug=True
)
if __name__ == '__main__':
main()
|
import psutil
# current = len(psutil.pids())
# for proc in psutil.process_iter():
# print(proc.info)
import threading
def application(proc):
"""thread worker function"""
try:
# print(proc.name(), 'is still running:', proc.is_running(), '- Parent:', proc.parent())
print(proc.name(), 'is still running:', proc.is_running(), '- Parent PID:', proc.ppid())
print(proc.name(), 'exit code:', proc.wait(), 'is still running:', proc.is_running())
except:
None
for proc in psutil.process_iter(attrs=['pid', 'username']):
if proc.info['username'] == 'DESKTOP-C68RIMR\\QuocChuong':
pid = proc.info['pid']
process = psutil.Process(pid)
t = threading.Thread(target=application, args=(process,))
t.start()
# for proc in psutil.process_iter(attrs=['pid', 'name', 'username']):
# print(proc.info)
|
"""
Factorial of a Number
Create a function that receives a non-negative
integer and returns the factorial of that number.
Examples:
fact(0) ➞ 1
fact(1) ➞ 1
fact(3) ➞ 6
fact(6) ➞ 720
Notes:
Avoid using built-in functions to solve this challenge.
"""
def fact(n):
product = 1
while n > 1:
product *= n
n -= 1
return product |
'''
* Function Name: determine_drop_angle()
* Input: (node, required_dep_zone, last_dir)
* Output: turning angle ( +45 degrees or - 45 degrees )
* Logic: After reaching the dropping position,
* the bot has to turn towards the dropping zone to drop the fruit.
* So this function determines whether the bot should turn in +45 degrees
* or -45 degrees to drop the fruit.
* Example Call: determine_next_node(4, 20, 'N')
'''
def determine_drop_angle(node, required_dep_zone, last_dir):
x, y = nodeToCordinate(node)
if last_dir == 'N':
check_right = cordinateToNode((x + 1, y))
if check_right in required_dep_zone:
print 'im here'
return 'angle_right'
else:
return 'angle_left'
elif last_dir == 'S':
check_right = cordinateToNode((x + 1, y))
if check_right in required_dep_zone:
print 'im here'
return 'angle_left'
else:
return 'angle_right'
elif last_dir == 'W':
check_right = cordinateToNode((x, y + 1))
if check_right in required_dep_zone:
print 'im here'
return 'angle_right'
else:
return 'angle_left'
else:
check_right = cordinateToNode((x, y + 1))
if check_right in required_dep_zone:
print 'im here'
return 'angle_left'
else:
return 'angle_right'
#---------------------------------------------------------------------------------------------------------------------#
'''
* Function Name: nodeBeforeShotestNode()
* Input: (currentNode, nodeDirection)
* Output: destination node, boolean
* Logic: While traversing to the fruit location,
* the bot has to reach the location from the front and not take a turn.
* This is so that the fruit kept at any height can come in the field of view of
* the camera and detect it properly.
* Example Call: nodeBeforeShotestNode(35, 'N')
'''
def nodeBeforeShotestNode(currentNode, nodeDirection):
x, y = nodeToCordinate(currentNode)
node = 0
if nodeDirection == 'N':
if y - 1 > 0:
node = cordinateToNode((x, y - 1))
if node in travesable_nodes:
return node, True
else:
return node, False
elif nodeDirection == 'W':
if x + 1 < 7:
node = cordinateToNode((x + 1, y))
if node in travesable_nodes:
return node, True
else:
return node, False
elif nodeDirection == 'E':
if x - 1 > 0:
node = cordinateToNode((x - 1, y))
if node in travesable_nodes:
return node, True
else:
return node, False
else:
if y + 1 < 7:
node = cordinateToNode((x, y + 1))
if node in travesable_nodes:
return node, True
else:
return node, False
# ---------------------------------------------------------------------------------------------------------------------#
'''
* Function Name: determine_next_node()
* Input: (node, required_dep_zone, last_dir)
* Output: next node
* Logic: After reaching the dropping position,
* the bot has to turn towards the dropping zone to drop the fruit.
* So this function determines whether the bot should make the required
* turn and in which direction.
* Example Call: determine_next_node(20, 30, 'S')
'''
def determine_next_node(node, required_dep_zone, last_dir):
x, y = nodeToCordinate(node)
if last_dir == 'N' or last_dir == 'S':
check_right = cordinateToNode((x + 1, y))
if check_right in required_dep_zone:
print 'im here'
return cordinateToNode((x + 1, y))
else:
return cordinateToNode((x - 1, y))
else:
check_right = cordinateToNode((x, y + 1))
if check_right in required_dep_zone:
print 'im here'
return cordinateToNode((x, y + 1))
else:
return cordinateToNode((x, y - 1))
#---------------------------------------------------------------------------------------------------------------------#
'''
* Function Name: turn_4_dropping_fruit()
* Input: (dir, cur_node, dep_zone)
* Output: drop_turn_required, dir_4_drop
* Logic: After reaching the dropping position,
* the bot has to turn towards the dropping zone to drop the fruit.
* So this function determines the required turn to face the dropping zone.
* It also updates the final direction which the bot will face.
* Example Call: turn_4_dropping_fruit('W', 11, 12)
'''
def turn_4_dropping_fruit(dir_last, cur_node, dep_zone):
nex_node = determine_next_node(cur_node, dep_zone, dir_last)
print 'next node : ', nex_node
print 'dir_last : ', dir_last
print 'current node : ', cur_node
drop_turn_required, drop_turn_cost, dir_after_drop = calculateTurn(dir_last, cur_node, nex_node)
if drop_turn_required == 'left-forward':
drop_turn_required = 'left'
elif drop_turn_required == 'right-forward':
drop_turn_required = 'right'
return drop_turn_required, dir_after_drop
#---------------------------------------------------------------------------------------------------------------------#
'''
* Function Name: turn_4_tree()
* Input: (dir, cur_node, nex_node)
* Output: fruit_turn_required, dir_4_fruit
* Logic: After reaching the fruit position,
* the bot has to turn towards the tree to identify the fruit.
* So this function determines the required turn to face the fruit.
* It also updates the final direction which the bot will face.
* Example Call: turn_4_tree('E', 29, 36)
'''
def turn_4_tree(dir_last, cur_node, nex_node):
fruit_turn_required, fruit_turn_cost, dir_4_fruit = calculateTurn(dir_last, cur_node, nex_node)
if fruit_turn_required == 'reverse': # If its reverse at the end of path, turn 180 degrees
fruit_turn_required = 'turn-180'
if dir_4_fruit == 'N':
dir_4_fruit = 'S'
elif dir_4_fruit == 'W':
dir_4_fruit = 'E'
elif dir_4_fruit == 'S':
dir_4_fruit = 'N'
else:
dir_4_fruit = 'W'
# Remove the extra 'forward' at the end of path
elif fruit_turn_required == 'left-forward':
fruit_turn_required = 'left'
elif fruit_turn_required == 'right-forward':
fruit_turn_required = 'right'
# elif fruit_turn_required == 'forward':
# fruit_turn_required.pop()
return fruit_turn_required, dir_4_fruit
# ------------------------------------------------------------------- # |
#! /usr/bin/env python
#DESCRIPTION OF THIS NODE
#Approach 3 - Left-Wall Spiral
#On initial move, robot moves forward until it finds a wall
#Rotate to have wall in its left-field of vision
#Move forward and turn appropriately, following the left wall
#Increase distance to wall after completing 1 round
#END OF THE DESCRIPTION
import rospy
import time
import math
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
pub = rospy.Publisher('cmd_vel', Twist, queue_size=100)
# current approximate position of the robot
curX = 0
curY = 0
# the starting position for this round
oriX = 0
oriY = 0
# used for timer
tick = 0
delay = 0
first_on = True
# distance to an obstacle/wall before robot needs to turn away or slow down (in metres)
hit_threshold = 0.3
slow_threshold = 0.5
# distance to the left to check for wall
turn_threshold = 0.4
turn_lower_threshold = 0.3
# error boundary to check if the robot makes one complete round
starting_radius = 0.3
def callback( sensor_data ):
global hit_threshold,slow_threshold, turn_threshold, starting_radius
n = len(sensor_data.ranges)
base_data = Twist()
hit = False
slow = False
global first_on, oriX, oriY, curX, curY,tick
# check for obstacles that are directly blocking the way
for distance in sensor_data.ranges:
if (distance < hit_threshold):
hit = True
if first_on:
first_on = False
tick = time.time()
oriX = curX
oriY = curY
print "ORIGINAL POSITION %s,%s" %(oriX,oriY)
break
# check for nearby obstacles that might be blocking the way
if not hit:
for distance in sensor_data.ranges[n/3:n*2/3]:
if (distance < slow_threshold):
slow = True
break
# we should look for a wall first when initially turned on
if first_on or hit:
turning_left = False
else:
# look for an opening to the left
turning_left = True
for i in range(n/3,n):
# keep a minimum distance from the wall
if (sensor_data.ranges[i] < turn_lower_threshold):
turning_left = False
hit = True
break
elif (sensor_data.ranges[i] < turn_threshold):
turning_left = False
break
# move the base properly
if turning_left:
#print "TURNING LEFT"
base_data.linear.x = 0.1
base_data.angular.z = 0.25
elif hit:
#print "AVOIDING SOMETHING"
base_data.linear.x = 0
base_data.angular.z = -0.3
elif slow:
#print "MOVING FORWARD SLOWLY"
base_data.linear.x = 0.2
base_data.angular.z = 0
else:
#print "MOVING FORWARD QUICKLY"
base_data.linear.x = 0.35
base_data.angular.z = 0
pub.publish(base_data)
def odometry_callback(msg):
global turn_lower_threshold,turn_threshold, starting_radius
global curX,curY,oriX,oriY,tick,delay
curX = msg.pose.pose.position.x
curY = msg.pose.pose.position.y
if first_on:
return
# a delay time is used to find a new original point
if delay>0:
if time.time()-tick<delay:
return
else:
oriX = curX
oriY = curY
print "NEW ORIGINAL POSITION %s,%s" %(oriX,oriY)
delay = 0
time.time()
# increase the distance to left wall to discover the inside (or outside) area
elif time.time()-tick > 20 and math.fabs(curX-oriX)<starting_radius and math.fabs(curY-oriY)<starting_radius:#detect that robot has completed a whole round of the room
print "I WENT IN A CIRCLE!! SPIRALING IN!!!"
turn_lower_threshold += 0.3
turn_threshold += 0.3
tick = time.time()
delay = 4
if __name__ == '__main__':
rospy.init_node('move')
base_data = Twist()
rospy.Subscriber('odom', Odometry, odometry_callback)
rospy.Subscriber('base_scan', LaserScan, callback)
rospy.spin()
|
from fileinput import input
from collections import defaultdict
from string import ascii_uppercase
from copy import deepcopy
lines = [i.rstrip() for i in input()]
tuples = [(i[5], i[36]) for i in lines]
x = defaultdict(list)
for t in tuples:
x[t[1]].append(t[0])
[x[s] for s in t[0]]
#print(x)
def findFirstEmpty(dct):
for s in dct.keys():
if dct[s] == []:
return s
return ""
backup = dict(x)
order = []
while len(order) < 26:
key = findFirstEmpty(x)
order.append(key)
for s in x.keys():
x[s] = [i for i in x[s] if i != key]
del x[key]
#print("".join(order))
#print((x))
##Part2
print(backup)
done = set()
workers = [("", 0)] * 5
def findFirstEmptyBis(dct):
for s in dct.keys():
if dct[s] == [] and s not in getInProgress(workers):
return s
return ""
def removeFromDict(letter, dct):
for x in dct.keys():
dct[x] = [i for i in dct[x] if i != letter]
del dct[letter]
def getInProgress(wrk):
return {i[0] for i in workers}
def getSeconds(letter):
if letter == "":
return 0
return 61 + (ord(letter) - ord('A'))
counter = -1
while len(done) < 27:
#print(workers)
counter += 1
workers = [(l, t-1) for l,t in workers]
for i,w in enumerate(workers):
if w[1] <= 0:
done.add(w[0])
if w[0] in backup:
removeFromDict(w[0], backup)
nxtletter = findFirstEmptyBis(backup)
workers[i] = (nxtletter, getSeconds(nxtletter))
print(counter) |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('destroy',views.destroy),
path('add2',views.add2),
path('addnumber',views.addnumber),
] |
"""Hackthon URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from rest_framework.authtoken.views import obtain_auth_token
from django.contrib.auth.models import User
from rest_framework import routers
from Hackapp import views
from Hackapp.views import UserLoginAPIView, UserLogoutAPIView, UserVerifyLoginAPIView,RoleInfo, RoleEdit,isrecruier,listofusers
app_name='users'
router=routers.DefaultRouter()
router.register('user',views.UserViewSet,basename='user-api')
urlpatterns = [
path('admin/', admin.site.urls),
path('users/login/', UserLoginAPIView.as_view(), name="login"),
path('users/logout/', UserLogoutAPIView.as_view(),name="logout"),
path('users/verify_login/', UserVerifyLoginAPIView.as_view(),name="logout"),
path('api/',include(router.urls)),
path('roleinfo/',RoleInfo.as_view(),name='roleinfo'),
path('editrole/',RoleEdit.as_view(),name='editrole'),
path('isrecruiter/',isrecruier.as_view(),name='isrecruiter'),
path('listofusers/',listofusers.as_view(),name='listofusers'),
]
|
'''
Due to some BUGs of Pycharm(Community Version), I can no longer use Chinese words in my program, or they'll vanish upon input.
The problem would be presumably fixed next time with Professional Version used.
'''
# Importation Area
import math
# Functions Area
def arraydelta(array1, array2):
if len(array1) != len(array2):
return -1
else:
sum = 0
for i in range(len(array1)):
sum+=(array1[i] - array2[i]) ** 2
return math.sqrt(sum)
def def_A(A):
# Part A
A[0][0] = 31
A[0][1] = -13
A[0][5] = -10
A[1][0] = -13
A[1][1] = 35
A[1][2] = 9
A[1][4] = 11
A[2][1] = -9
A[2][2] = 31
A[2][3] = -10
A[3][2] = -10
A[3][3] = 79
A[3][4] = -30
A[3][8] = -9
A[4][3] = -30
A[4][4] = 57
A[4][5] = -7
A[4][7] = -5
A[5][4] = -7
A[5][5] = 47
A[5][6] = -30
A[6][5] = -30
A[6][6] = 41
A[7][4] = -5
A[7][7] = 27
A[7][8] = -2
A[8][3] = -9
A[8][7] = -2
A[8][8] = 29
# Part B
A[0][9] = -15
A[1][9] = 27
A[2][9] = -23
A[3][9] = 0
A[4][9] = -20
A[5][9] = 12
A[6][9] = -7
A[7][9] = 7
A[8][9] = 10
return A
# Executation Area
if __name__ == '__main__':
A = [[0 for j in range(10)]for i in range(9)]
A = def_A(A)
eps = 1E-8
x1 = x2 = x3 = x4 = [0] * 9
# Gauss-Seidel Method
while (arraydelta(x1, x2) > eps):
step_GS = 0
x1 = x2
for i in range(9):
sum = 0
for j in range(n):
if j != i:
sum += A[i][j] * x2[j]
x2[i] = -(sum - A[i][9]) / A[i][i]
step_GS += 1
# SOR Method
step_SOR = []
for k in range(99):
omega = (k + 1) / 50
step_SOR.append(0)
while (arraydelta(x1, x2) > eps):
x3 = x4
for i in range(9):
sum = 0
for j in range(n):
if j != i:
sum += A[i][j] * x4[j]
x4[i] = (1 - omega) * x4[i] + omega * sum
step_SOR += 1
best = range(99).index(min(step_SOR))
# Output Area果
print("The roots are:")
for i in range(9):
print("x%d = %.12f" % (i+1, x2[i]))
print("The overall steps of Gauss-Seidel Iteration Method is %d\n" % step_GS)
print("SOR Iteration results:")
for k in len(step_SOR):
print("Relaxation factor: %.2f, Iteration steps: %d" % ((k + 1) / 50, step_SOR[k]))
print("The best relaxation factor is: %.2f" % (best+1) / 50)
|
from tl_classifier import TLClassifier
from matplotlib import pyplot as plt
classifier = TLClassifier(False) #run in sim mode
import cv2
image = cv2.imread('data/test_images_sim/left0988.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(12, 8))
plt.imshow(image)
plt.show()
result = classifier.get_classification(image)
print('result: ', result)
|
class APIError(Exception):
'''Base Exception class for the Insurance Company Search API'''
def api_error(self, message):
'''JSON wrapper function for errors returned via JSON API'''
return {'error': {'message':message, 'type':self._get_error_name()}}
def _get_error_name(self):
return str(self.__class__).split('.')[-1].split('\'')[0]
|
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
from qbstyles import mpl_style
DR = Path(__file__).parent
def main():
"""Main."""
fp = DR / '2018.csv'
data = pd.read_csv(str(fp))
mpl_style(dark=True)
fig = plt.figure(figsize=(8, 4), dpi=72) # figsize * dpi
ax = fig.add_subplot(
111, # 1行目1列の1番目
xticks=range(0, 20, 2)
)
n, bins, patches = ax.hist(data['R'], bins=data['R'].max() - data['R'].min())
fig.suptitle('Hist')
for i in range(len(bins) - 1):
ax.text(
(bins[i + 1] + bins[i]) / 2,
n[i],
int(n[i]),
horizontalalignment='center'
)
fig.savefig(str(DR / 'hist.png'))
plt.show()
if __name__ == '__main__':
main()
|
def is_prime(n):
for i in range(2, n):
if n % i == 0:
return False
return n > 1
def count_primes_below(n):
raise NotImplementedError()
def gcd(x, y):
raise NotImplementedError()
# Rekent het n-de getal van Fibonacci uit
def fibonacci(n):
raise NotImplementedError()
# Rekent de som van de cijfers van n op
def sum_digits(n):
raise NotImplementedError()
# Keer de cijfers van n om. Bv. 123 -> 321
def reverse_digits(n):
raise NotImplementedError()
|
#{
#Driver Code Starts
#Initial Template for Python 3
# } Driver Code Ends
#User function Template for python3
import math
power = lambda a,b : a**b
##write the lambda expression in one line here
#{
#Driver Code Starts.
def main():
testcases=int(input()) #testcases
while(testcases>0):
base=int(input())
exp=int(input())
print(power(base,exp)) ##calling the anonymous function
testcases-=1
if __name__=='__main__':
main()
#} Driver Code Ends |
from django import forms
from django.contrib.auth.models import User
from Blog_App.models import Blogger,Blog
class BloggerForm(forms.ModelForm):
username=forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
email=forms.EmailField(widget=forms.EmailInput(attrs={'class':'form-control'}))
about=forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
password=forms.CharField(widget=forms.PasswordInput(attrs={'class':'form-control'}))
profile_pic=forms.ImageField()
class Meta():
model=Blogger
fields=('username','email','password','about','profile_pic')
class BlogForm(forms.ModelForm):
title=forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
post=forms.CharField(widget=forms.Textarea(attrs={'class':'form-control'}))
post_pic=forms.ImageField()
#Blogger=forms.HiddenInput()
class Meta():
model=Blog
fields=('title','post','post_pic')
|
import wx
import cv2
import wx.html2
import wx.html
from props.FormField import *
import webbrowser
import subprocess
class WebApp(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None, title=title, size=(1000, 500), style = wx.SYSTEM_MENU | wx.CLOSE_BOX | wx.CAPTION )
panel = wx.Panel(self)
field = FormField()
menubar = field.setMenuBar()
header = field.setFormHeader(panel, size=(500, 100))
vbox = wx.BoxSizer(wx.VERTICAL)
font = wx.Font(30, wx.ROMAN, wx.ITALIC, wx.FONTWEIGHT_BOLD)
header_title = wx.StaticText(header, 1, "Menu", style = wx.ALIGN_CENTER, size=(500, 300))
header_title.SetFont(font)
vbox.Add(header, 0, wx.ALIGN_CENTER_VERTICAL, 1)
webpanel = wx.Panel(panel, size=(1000, 400))
# web = wx.html2.WebView.New(webpanel, size=(1000, 400))
# web.SetPage("<iframe src='http://localhost:4200' width='100px' height='100px'></iframe>", "")
# web.EnableContextMenu(True)
# html = wx.html.HtmlWindow(webpanel)
# html.LoadPage("http://localhost:4200")
# html.SetPage("htmlbody" \
# "h1Error/h1" \
# "Some error occurred :-H)" \
# "/body/hmtl")
# html.SetRelatedFrame(self, "HTML : %%s")
# webbrowser.open("http://localhost:4200", new=0, autoraise=True)
# vbox.Add(html, 0, wx.ALIGN_CENTER_VERTICAL, 1)
# os.system("cd C:\\Users\\acer\\laradev\\opencv")
# os.system("php artisan serve")
subprocess.call("cd C:\\Users\\acer\\laradev\\opencv", shell=True)
subprocess.call("php artisan serve", shell=True)
panel.SetSizer(vbox)
self.SetMenuBar(menubar)
self.Centre()
self.Show(True)
# if __name__ == "__main__":
# app = wx.App(False)
# login = WebApp("Banana Detection")
# app.MainLoop()
|
'''
Copyright (c) 2015 Jittapan "iSuzutsuki" Pluemsumran
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import json, os, configparser
import tkinter as tk
from tkinter.filedialog import askopenfilename
from pprint import pprint
json_data = open('config.json')
data = json.load(json_data)
servers = data['servers']
try:
initdir = data['propsdir']
except KeyError:
initdir = ''
root = tk.Tk()
root.withdraw()
print("Please select lol.exe location")
filename = askopenfilename(title='Select lol.properties location',initialfile='lol.properties',filetypes=[
("LoL server properties", ("lol.properties"))
],initialdir=initdir)
if filename == '':
quit()
data['propsdir'] = os.path.dirname(filename)
json_data.close()
with open('config.json', 'w') as f:
json.dump(data, f)
f.close()
with open(filename, 'r') as f:
config_string = '[d]\n' + f.read() #Adds dummy section
f.close()
config = configparser.ConfigParser()
config.read_string(config_string)
while True:
print("========Server List========")
for server,info in servers.items():
pprint("--> " + server + " (" + info['title'] + ")")
selection = input("Please select the server: ")
selected = None
try:
selected = servers[selection]
except KeyError:
print(selection, " server doesn't exist in the configuration.")
continue
if selected is not None:
config['d']['host'] = selected['host']
config['d']['lq_uri'] = selected['lq_uri']
config['d']['xmpp_server_url'] = selected['xmpp_server_url']
with open(filename, 'w') as f:
config.write(f)
f.close()
with open(filename, 'r') as f:
data = f.read().splitlines(True)
with open(filename, 'w') as f:
f.writelines(data[1:])
os.system('clear')
print('Edited lol.properties')
else:
print("Unexpected code route reached.")
|
# -*- coding: utf-8 -*-
"""
Implementation
==============
Plotting is implemented hierarchically in 3 different types of
functions/classes:
top-level (public names)
Top-level functions or classes have public names create an entire figure.
Some classes also retain the figure and provide methods for manipulating
it.
_ax_
Functions beginning with _ax_ organize an axes object. They do not
create their own axes object (this is provided by the top-level function),
but change axes formatting such as labels and extent.
_plt_
Functions beginning with _plt_ only plot data to a given axes object
without explicitly changing aspects of the axes themselves.
Top-level plotters can be called with nested lists of data-objects (NDVar
instances). They create a separate axes for each list element. Axes
themselves can have multiple layers (e.g., a difference map visualized through
a colormap, and significance levels indicated by contours).
Example: t-test
---------------
For example, the default plot for testnd.ttest() results is the
following list (assuming the test compares A and B):
``[A, B, [diff(A,B), p(A, B)]]``
where ``diff(...)`` is a difference map and ``p(...)`` is a map of p-values.
The main plot function creates a separate axes object for each list element:
- ``A``
- ``B``
- ``[diff(A,B), p(A, B)]``
Each of these element is then plotted with the corresponding _ax_ function.
The _ax_ function calls _plt_ for each of its input elements. Thus, the
functions executed are:
#. plot([A, B, [diff(A,B), p(A, B)]])
#. ---> _ax_(A)
#. ------> _plt_(A)
#. ---> _ax_(B)
#. ------> _plt_(B)
#. ---> _ax_([diff(A,B), p(A, B)])
#. ------> _plt_(diff(A,B))
#. ------> _plt_(p(A, B))
"""
from __future__ import division
import __main__
from itertools import chain
import math
import os
import shutil
import subprocess
import tempfile
import matplotlib as mpl
from matplotlib.figure import SubplotParams
from matplotlib.ticker import FormatStrFormatter, FuncFormatter, ScalarFormatter
import numpy as np
import PIL
from .._utils.subp import command_exists
from ..fmtxt import Image, texify
from .._colorspaces import symmetric_cmaps, zerobased_cmaps
from .._data_obj import ascategorial, asndvar, isnumeric, cellname, \
DimensionMismatchError
# defaults
defaults = {'maxw': 16, 'maxh': 10}
backend = {'eelbrain': True, 'autorun': None, 'show': True}
# store figures (they need to be preserved)
figures = []
# constants
default_cmap = None
default_meas = '?'
def do_autorun(run=None):
# http://stackoverflow.com/a/2356420/166700
if run is not None:
return run
elif backend['autorun'] is None:
return not hasattr(__main__, '__file__')
else:
backend['autorun']
def configure(frame=True, autorun=None, show=True):
"""Set basic configuration parameters for the current session
Parameters
----------
frame : bool
Open figures in the Eelbrain application. This provides additional
functionality such as copying a figure to the clipboard. If False, open
figures as normal matplotlib figures.
autorun : bool
When a figure is created, automatically enter the GUI mainloop. By
default, this is True when the figure is created in interactive mode
but False when the figure is created in a script (in order to run the
GUI at a specific point in a script, call :func:`eelbrain.gui.run`).
show : bool
Show plots on the screen when they're created (disable this to create
plots and save them without showing them on the screen).
"""
if autorun is not None:
autorun = bool(autorun)
backend['eelbrain'] = bool(frame)
backend['autorun'] = autorun
backend['show'] = bool(show)
meas_display_unit = {'time': u'ms',
'V': u'µV',
'B': u'fT',
'sensor': int}
unit_format = {u'ms': 1e3,
u'mV': 1e3,
u'µV': 1e6,
u'pT': 1e12,
u'fT': 1e15,
u'dSPM': 1,
int: int}
scale_formatters = {1: ScalarFormatter(),
1e3: FuncFormatter(lambda x, pos: '%g' % (1e3 * x)),
1e6: FuncFormatter(lambda x, pos: '%g' % (1e6 * x)),
1e9: FuncFormatter(lambda x, pos: '%g' % (1e9 * x)),
1e12: FuncFormatter(lambda x, pos: '%g' % (1e12 * x)),
1e15: FuncFormatter(lambda x, pos: '%g' % (1e15 * x)),
int: FormatStrFormatter('%i')}
def find_axis_params_data(v, label):
"""
Parameters
----------
v : NDVar | Var | str | scalar
Unit or scale of the axis.
Returns
-------
tick_formatter : Formatter
Matplotlib axis tick formatter.
label : str | None
Axis label.
"""
if isinstance(v, basestring):
if v in unit_format:
scale = unit_format[v]
unit = v
else:
raise ValueError("Unknown unit: %s" % repr(v))
elif isinstance(v, float):
scale = v
unit = None
elif isnumeric(v):
meas = v.info.get('meas', None)
data_unit = v.info.get('unit', None)
if meas in meas_display_unit:
unit = meas_display_unit[meas]
scale = unit_format[unit]
if data_unit in unit_format:
scale /= unit_format[data_unit]
else:
scale = 1
unit = data_unit
else:
raise TypeError("unit=%s" % repr(v))
if label is True:
if meas and unit and meas != unit:
label = '%s [%s]' % (meas, unit)
elif meas:
label = meas
elif unit:
label = unit
else:
label = getattr(v, 'name', None)
return scale_formatters[scale], label
def find_axis_params_dim(meas, label):
"""Find an axis label
Parameters
----------
dimname : str
Name of the dimension.
label : None | True | str
Label argument.
Returns
-------
label : str | None
Returns the default axis label if label==True, otherwise the label
argument.
"""
if meas in meas_display_unit:
unit = meas_display_unit[meas]
scale = unit_format[unit]
if label is True:
if isinstance(unit, basestring):
label = "%s [%s]" % (meas.capitalize(), unit)
else:
label = meas.capitalize()
else:
scale = 1
if label is True:
label = meas.capitalize()
return scale_formatters[scale], label
def find_im_args(ndvar, overlay, vlims={}, cmaps={}):
"""Construct a dict with kwargs for an im plot
Parameters
----------
ndvar : NDVar
Data to be plotted.
overlay : bool
Whether the NDVar is plotted as a first layer or as an overlay.
vlims : dict
{meas: (vmax, vmin)} mapping to replace v-limits based on the
ndvar.info dict.
cmaps : dict
{meas: cmap} mapping to replace the cmap in the ndvar.info dict.
Returns
-------
im_args : dict
Arguments for the im plot (cmap, vmin, vmax).
Notes
-----
The NDVar's info dict contains default arguments that determine how the
NDVar is plotted as base and as overlay. In case of insufficient
information, defaults apply. On the other hand, defaults can be overridden
by providing specific arguments to plotting functions.
"""
if overlay:
kind = ndvar.info.get('overlay', ('contours',))
else:
kind = ndvar.info.get('base', ('im',))
if 'im' in kind:
if 'meas' in ndvar.info:
meas = ndvar.info['meas']
else:
meas = default_meas
if meas in cmaps:
cmap = cmaps[meas]
elif 'cmap' in ndvar.info:
cmap = ndvar.info['cmap']
else:
cmap = default_cmap
if meas in vlims:
vmin, vmax = vlims[meas]
else:
vmin, vmax = find_vlim_args(ndvar)
vmin, vmax = fix_vlim_for_cmap(vmin, vmax, cmap)
im_args = {'cmap': cmap, 'vmin': vmin, 'vmax': vmax}
else:
im_args = None
return im_args
def find_uts_args(ndvar, overlay, color=None):
"""Construct a dict with kwargs for a uts plot
Parameters
----------
ndvar : NDVar
Data to be plotted.
overlay : bool
Whether the NDVar is plotted as a first layer or as an overlay.
vlims : dict
Vmax and vmin values by (meas, cmap).
Returns
-------
uts_args : dict
Arguments for a uts plot (color).
Notes
-----
The NDVar's info dict contains default arguments that determine how the
NDVar is plotted as base and as overlay. In case of insufficient
information, defaults apply. On the other hand, defaults can be overridden
by providing specific arguments to plotting functions.
"""
if overlay:
kind = ndvar.info.get('overlay', ())
else:
kind = ndvar.info.get('base', ('trace',))
if 'trace' in kind:
args = {}
color = color or ndvar.info.get('color', None)
if color is not None:
args['color'] = color
else:
args = None
return args
def find_uts_hlines(ndvar):
"""Find horizontal lines for uts plots (based on contours)
Parameters
----------
ndvar : NDVar
Data to be plotted.
Returns
-------
h_lines : iterator
Iterator over (y, kwa) tuples.
"""
contours = ndvar.info.get('contours', None)
if contours:
for level in sorted(contours):
args = contours[level]
if isinstance(args, dict):
yield level, args.copy()
else:
yield level, {'color': args}
def find_uts_ax_vlim(layers, vlims={}):
"""Find y axis limits for uts axes
Parameters
----------
layers : list of NDVar
Data to be plotted.
vlims : dict
Vmax and vmin values by (meas, cmap).
Returns
-------
bottom : None | scalar
Lowest value on y axis.
top : None | scalar
Highest value on y axis.
"""
bottom = None
top = None
overlay = False
for ndvar in layers:
if overlay:
kind = ndvar.info.get('overlay', ())
else:
kind = ndvar.info.get('base', ('trace',))
overlay = True
if 'trace' not in kind:
continue
meas = ndvar.info.get('meas', default_meas)
if meas in vlims:
bottom_, top_ = vlims[meas]
if bottom is None:
bottom = bottom_
elif bottom_ != bottom:
raise RuntimeError("Double vlim specification")
if top is None:
top = top_
elif top_ != top:
raise RuntimeError("Double vlim specification")
return bottom, top
def find_fig_cmaps(epochs, cmap):
"""Find cmap for every meas
Returns
-------
cmaps : dict
{meas: cmap} dict for all meas.
"""
out = {}
for ndvar in chain(*epochs):
meas = ndvar.info.get('meas', default_meas)
if meas in out and out[meas]:
pass
elif cmap is not None:
out[meas] = cmap
cmap = None
elif 'cmap' in ndvar.info:
out[meas] = ndvar.info['cmap']
else:
out[meas] = None
for k in out.keys():
if out[k] is None:
out[k] = default_cmap
return out
def find_fig_contours(epochs, vlims, contours_arg):
"""Find contour arguments for every meas type
Parameters
----------
epochs : list of list of NDVar
Data to be plotted.
vlims : dist
Vlims dict (used to interpret numerical arguments)
contours_arg : int | sequence | dict
User argument. Can be an int (number of contours), a sequence (values
at which to draw contours), a kwargs dict (must contain the "levels"
key), or a {meas: kwargs} dictionary.
Returns
-------
contours : dict
{meas: kwargs} mapping for contour plots.
Notes
-----
The NDVar's info dict contains default arguments that determine how the
NDVar is plotted as base and as overlay. In case of insufficient
information, defaults apply. On the other hand, defaults can be overridden
by providing specific arguments to plotting functions.
"""
if isinstance(contours_arg, dict) and 'levels' not in contours_arg:
out = contours_arg.copy()
contours_arg = None
else:
out = {}
for ndvars in epochs:
for layer, ndvar in enumerate(ndvars):
meas = ndvar.info.get('meas', default_meas)
if meas in out:
continue
if contours_arg is not None:
param = contours_arg
contours_arg = None
else:
if layer: # overlay
kind = ndvar.info.get('overlay', ('contours',))
else:
kind = ndvar.info.get('base', ())
if 'contours' in kind:
param = ndvar.info.get('contours', None)
if layer:
param = ndvar.info.get('overlay_contours', param)
else:
param = ndvar.info.get('base_contours', param)
if isinstance(param, dict) and 'levels' not in param:
levels = sorted(param)
colors = [param[v] for v in levels]
param = {'levels': levels, 'colors': colors}
else:
param = None
if param is None:
out[meas] = None
elif isinstance(param, dict):
out[meas] = param
elif isinstance(param, int):
vmin, vmax = vlims[meas]
out[meas] = {'levels': np.linspace(vmin, vmax, param),
'colors': 'k'}
else:
out[meas] = {'levels': tuple(param), 'colors': 'k'}
return out
def find_fig_vlims(plots, vmax=None, vmin=None, cmaps=None):
"""Find vmin and vmax parameters for every (meas, cmap) combination
Parameters
----------
plots : nested list of NDVar
Unpacked plot data.
vmax : None | dict | scalar
Dict: predetermined vlims (take precedence). Scalar: user-specified
vmax parameter (used for for the first meas kind).
vmin : None | scalar
User-specified vmin parameter. If vmax is user-specified but vmin is
None, -vmax is used.
Returns
-------
vlims : dict
Dictionary of im limits: {meas: (vmin, vmax)}.
"""
if isinstance(vmax, dict):
vlims = vmax
user_vlim = None
else:
vlims = {}
if vmax is None:
user_vlim = None
elif vmin is None:
user_vlim = (vmax, 0)
else:
user_vlim = (vmax, vmin)
out = {} # {meas: (vmin, vmax), ...}
first_meas = None # what to use user-specified vmax for
for ndvar in chain(*plots):
meas = ndvar.info.get('meas', default_meas)
if user_vlim is not None and first_meas is None:
first_meas = meas
vmin, vmax = user_vlim
else:
vmin, vmax = find_vlim_args(ndvar)
if meas in vlims:
continue
elif user_vlim is not None and meas == first_meas:
vmax, vmin = user_vlim
elif meas in out:
vmin_, vmax_ = out[meas]
vmin = min(vmin, vmin_)
vmax = max(vmax, vmax_)
if cmaps:
cmap = cmaps[meas]
else:
cmap = ndvar.info.get('cmap', None)
vmin, vmax = fix_vlim_for_cmap(vmin, vmax, cmap)
out[meas] = (vmin, vmax)
out.update(vlims)
return out
def find_vlim_args(ndvar, vmin=None, vmax=None):
if vmax is None:
vmax = ndvar.info.get('vmax', None)
if vmin is None:
vmin = ndvar.info.get('vmin', None)
if vmax is None:
xmax = np.nanmax(ndvar.x)
xmin = np.nanmin(ndvar.x)
abs_max = max(abs(xmax), abs(xmin)) or 1e-14
scale = math.floor(np.log10(abs_max))
vmax = math.ceil(xmax * 10 ** -scale) * 10 ** scale
vmin = math.floor(xmin * 10 ** -scale) * 10 ** scale
return vmin, vmax
def fix_vlim_for_cmap(vmin, vmax, cmap):
"Fix the vmin value to yield an appropriate range for the cmap"
if cmap in symmetric_cmaps:
if vmax is None and vmin is None:
pass
elif vmin is None:
vmax = abs(vmax)
vmin = -vmax
elif vmax is None:
vmax = abs(vmin)
vmin = -vmax
else:
vmax = max(abs(vmax), abs(vmin))
vmin = -vmax
elif cmap in zerobased_cmaps:
vmin = 0
return vmin, vmax
def find_data_dims(ndvar, dims):
"""Find dimensions in data
"""
if isinstance(dims, int):
if ndvar.ndim == dims:
return ndvar.dimnames
elif ndvar.ndim - 1 == dims:
return ndvar.dimnames[1:]
else:
raise ValueError("NDVar does not have the right number of dimensions")
else:
if len(dims) == ndvar.ndim:
all_dims = list(ndvar.dimnames)
elif len(dims) == ndvar.ndim - 1 and ndvar.has_case:
all_dims = list(ndvar.dimnames[1:])
else:
raise ValueError("NDVar does not have the right number of dimensions")
out_dims = []
for dim in dims:
if dim is None:
for dim in all_dims:
if dim not in dims:
break
else:
raise ValueError("NDVar does not have requested dimensions %s" % repr(dims))
elif dim not in all_dims:
raise ValueError("NDVar does not have requested dimension %s" % dim)
out_dims.append(dim)
all_dims.remove(dim)
return out_dims
def unpack_epochs_arg(Y, dims, Xax=None, ds=None):
"""Unpack the first argument to top-level NDVar plotting functions
Parameters
----------
Y : NDVar | list
the first argument.
dims : int | tuple
The number of dimensions needed for the plotting function, or tuple
with dimension entries (str | None).
Xax : None | categorial
A model to divide Y into different axes. Xax is currently applied on
the first level, i.e., it assumes that Y's first dimension is cases.
ds : None | Dataset
Dataset containing data objects which are provided as str.
Returns
-------
axes_data : list of list of NDVar
The processed data to plot.
dims : tuple of str
Names of the dimensions.
Notes
-----
Ndvar plotting functions above 1-d UTS level should support the following
API:
- simple NDVar: summary ``plot(meg)``
- list of ndvars: summary for each ``plot(meg.as_list())``
- NDVar and Xax argument: summary for each ``plot(meg, Xax=subject)
- nested list of layers (e.g., ttest results: [c1, c0, [c1-c0, p]])
"""
# get proper Y
if hasattr(Y, '_default_plot_obj'):
Y = Y._default_plot_obj
if isinstance(Y, (tuple, list)):
data_dims = None
if isinstance(dims, int):
ndims = dims
else:
ndims = len(dims)
else:
Y = asndvar(Y, ds=ds)
data_dims = find_data_dims(Y, dims)
ndims = len(data_dims)
if Xax is not None and isinstance(Y, (tuple, list)):
err = ("Xax can only be used to divide Y into different axes if Y is "
"a single NDVar (got a %s)." % Y.__class__.__name__)
raise TypeError(err)
# create list of plots
if isinstance(Xax, str) and Xax.startswith('.'):
dimname = Xax[1:]
if dimname == 'case':
if not Y.has_case:
err = ("Xax='.case' supplied, but Y does not have case "
"dimension")
raise ValueError(err)
values = range(len(Y))
unit = ''
else:
dim = Y.get_dim(dimname)
values = dim.values
unit = getattr(dim, 'unit', '')
name = dimname.capitalize() + ' = %s'
if unit:
name += ' ' + unit
axes = [Y.sub(name=name % v, **{dimname: v}) for v in values]
elif Xax is not None:
Xax = ascategorial(Xax, ds=ds)
axes = []
for cell in Xax.cells:
v = Y[Xax == cell]
v.name = cell
axes.append(v)
elif isinstance(Y, (tuple, list)):
axes = Y
else:
axes = [Y]
axes = [unpack_ax(ax, ndims, ds) for ax in axes]
if data_dims is None:
for layers in axes:
for l in layers:
if data_dims is None:
data_dims = find_data_dims(l, dims)
else:
find_data_dims(l, data_dims)
return axes, data_dims
def unpack_ax(ax, ndim, ds):
# returns list of NDVar
if isinstance(ax, (tuple, list)):
return [_unpack_layer(layer, ndim, ds) for layer in ax]
else:
return [_unpack_layer(ax, ndim, ds)]
def _unpack_layer(y, ndim, ds):
# returns NDVar
ndvar = asndvar(y, ds=ds)
if ndvar.ndim == ndim + 1:
if ndvar.has_case:
ndvar = ndvar.mean('case')
if ndvar.ndim != ndim:
err = ("Plot requires ndim=%i, got %r with ndim=%i" %
(ndim, ndvar, ndvar.ndim))
raise DimensionMismatchError(err)
return ndvar
def str2tex(txt):
"""If matplotlib usetex is enabled, replace tex sensitive characters in the
string.
"""
if txt and mpl.rcParams['text.usetex']:
return texify(txt)
else:
return txt
class mpl_figure:
"cf. _wxgui.mpl_canvas"
def __init__(self, **fig_kwargs):
"creates self.figure and self.canvas attributes and returns the figure"
from matplotlib import pyplot
self._plt = pyplot
self.figure = pyplot.figure(**fig_kwargs)
self.canvas = self.figure.canvas
def Close(self):
self._plt.close(self.figure)
def SetStatusText(self, text):
pass
def Show(self):
if mpl.get_backend() == 'WXAgg' and do_autorun():
self._plt.show()
def redraw(self, axes=[], artists=[]):
"Adapted duplicate of mpl_canvas.FigureCanvasPanel"
self.canvas.restore_region(self._background)
for ax in axes:
ax.draw_artist(ax)
extent = ax.get_window_extent()
self.canvas.blit(extent)
for artist in artists:
ax = artist.get_axes()
ax.draw_artist(ax)
extent = ax.get_window_extent()
self.canvas.blit(extent)
def store_canvas(self):
self._background = self.canvas.copy_from_bbox(self.figure.bbox)
# MARK: figure composition
def _loc(name, size=(0, 0), title_space=0, frame=.01):
"""
takes a loc argument and returns x,y of bottom left edge
"""
if isinstance(name, basestring):
y, x = name.split()
# interpret x
elif len(name) == 2:
x, y = name
else:
raise NotImplementedError("loc needs to be string or len=2 tuple/list")
if isinstance(x, basestring):
if x == 'left':
x = frame
elif x in ['middle', 'center', 'centre']:
x = .5 - size[0] / 2
elif x == 'right':
x = 1 - frame - size[0]
else:
raise ValueError(x)
# interpret y
if isinstance(y, basestring):
if y in ['top', 'upper']:
y = 1 - frame - title_space - size[1]
elif y in ['middle', 'center', 'centre']:
y = .5 - title_space / 2. - size[1] / 2.
elif y in ['lower', 'bottom']:
y = frame
else:
raise ValueError(y)
return x, y
def frame_title(plot, y, x=None, xax=None):
"""Generate frame title from common data structure
Parameters
----------
plot : str
Name of the plot.
y : data-obj
Dependent variable.
x : data-obj
Predictor.
xax : data-obj
Grouping variable for axes.
"""
if xax is None:
if x is None:
return "%s: %s" % (plot, y.name)
else:
return "%s: %s ~ %s" % (plot, y.name, x.name)
elif x is None:
return "%s: %s | %s" % (plot, y.name, xax.name)
else:
return "%s: %s ~ %s | %s" % (plot, y.name, x.name, xax.name)
class _EelFigure(object):
"""Parent class for Eelbrain figures.
In order to subclass:
- find desired figure properties and then use them to initialize
the _EelFigure superclass; then use the
:py:attr:`_EelFigure.figure` and :py:attr:`_EelFigure.canvas` attributes.
- end the initialization by calling `_EelFigure._show()`
- add the :py:meth:`_fill_toolbar` method
"""
_default_format = 'png' # default format when saving for fmtext
_default_xlabel_ax = -1
_default_ylabel_ax = 0
_make_axes = True
def __init__(self, frame_title, nax, axh_default, ax_aspect, tight=True,
title=None, frame=True, yaxis=True, *args, **kwargs):
"""Parent class for Eelbrain figures.
Parameters
----------
frame_title : str
Frame title.
nax : None | int
Number of axes to produce layout for. If None, no layout is
produced.
axh_default : scalar
Default height per axes.
ax_aspect : scalar
Width to height ration (axw / axh).
tight : bool
Rescale axes so that the space in the figure is used optimally
(default True).
title : str
Figure title (default is no title).
frame : bool | 't'
How to frame the plots.
``True`` (default): normal matplotlib frame;
``False``: omit top and right lines;
``'t'``: draw spines at x=0 and y=0, common for ERPs.
yaxis : bool
Draw the y-axis (default True).
h : scalar
Height of the figure.
w : scalar
Width of the figure.
axh : scalar
Height of the axes.
axw : scalar
Width of the axes.
nrow : int
Set a limit to the number of rows (default is no limit).
ncol : int
Set a limit to the number of columns (defaut is no limit). If
neither nrow or ncol is specified, a square layout is preferred.
dpi : int
DPI for the figure (default is to use matplotlib rc parameters).
show : bool
Show the figure in the GUI (default True). Use False for creating
figures and saving them without displaying them on the screen.
run : bool
Run the Eelbrain GUI app (default is True for interactive plotting and
False in scripts).
"""
if title:
frame_title = '%s: %s' % (frame_title, title)
# layout
layout = Layout(nax, ax_aspect, axh_default, tight, *args, **kwargs)
# find the right frame
if backend['eelbrain']:
from .._wxgui import get_app
from .._wxgui.mpl_canvas import CanvasFrame
get_app()
frame_ = CanvasFrame(None, frame_title, eelfigure=self, **layout.fig_kwa)
else:
frame_ = mpl_figure(**layout.fig_kwa)
figure = frame_.figure
if title:
self._figtitle = figure.suptitle(title)
else:
self._figtitle = None
# make axes
axes = []
if self._make_axes and nax is not None:
for i in xrange(1, nax + 1):
ax = figure.add_subplot(layout.nrow, layout.ncol, i)
axes.append(ax)
# axes modifications
if frame == 't':
ax.tick_params(direction='inout', bottom=False, top=True,
left=False, right=True, labelbottom=True,
labeltop=False, labelleft=True,
labelright=False)
ax.spines['right'].set_position('zero')
ax.spines['left'].set_visible(False)
ax.spines['top'].set_position('zero')
ax.spines['bottom'].set_visible(False)
elif not frame:
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.spines['top'].set_visible(False)
if not yaxis:
ax.yaxis.set_ticks(())
ax.spines['left'].set_visible(False)
# store attributes
self._frame = frame_
self.figure = figure
self._axes = axes
self.canvas = frame_.canvas
self._layout = layout
self._tight_arg = tight
# add callbacks
self.canvas.mpl_connect('motion_notify_event', self._on_motion)
self.canvas.mpl_connect('axes_leave_event', self._on_leave_axes)
def _show(self):
if self._tight_arg:
self._tight()
self.draw()
if backend['show'] and self._layout.show:
self._frame.Show()
if backend['eelbrain'] and do_autorun(self._layout.run):
from .._wxgui import run
run()
def _tight(self):
"Default implementation based on matplotlib"
self.figure.tight_layout()
if self._figtitle:
trans = self.figure.transFigure.inverted()
extent = self._figtitle.get_window_extent(self.figure.canvas.renderer)
bbox = trans.transform(extent)
t_bottom = bbox[0, 1]
self.figure.subplots_adjust(top=1 - 2 * (1 - t_bottom))
def _on_leave_axes(self, event):
"update the status bar when the cursor leaves axes"
self._frame.SetStatusText(':-)')
def _on_motion(self, event):
"update the status bar for mouse movement"
ax = event.inaxes
if ax:
x = ax.xaxis.get_major_formatter().format_data(event.xdata)
y = ax.yaxis.get_major_formatter().format_data(event.ydata)
self._frame.SetStatusText('x = %s, y = %s' % (x, y))
def _fill_toolbar(self, tb):
"""
Subclasses should add their toolbar items in this function which
is called by CanvasFrame.FillToolBar()
"""
pass
def close(self):
"Close the figure."
self._frame.Close()
def _configure_xaxis_dim(self, meas, label, ticklabels, axes=None):
"""Configure the x-axis based on a dimension
Parameters
----------
meas : str
The measure assigned to this axis.
label : None | str
Axis label.
ticklabels : bool
Whether to print tick-labels.
axes : list of Axes
Axes which to format (default is EelFigure._axes)
"""
if axes is None:
axes = self._axes
formatter, label = find_axis_params_dim(meas, label)
if ticklabels:
for ax in axes:
ax.xaxis.set_major_formatter(formatter)
else:
for ax in axes:
ax.xaxis.set_ticklabels(())
if label:
self.set_xlabel(label)
def _configure_xaxis(self, v, label, axes=None):
if axes is None:
axes = self._axes
formatter, label = find_axis_params_data(v, label)
for ax in axes:
ax.xaxis.set_major_formatter(formatter)
if label:
self.set_xlabel(label)
def _configure_yaxis_dim(self, meas, label, axes=None):
"Configure the y-axis based on a dimension"
if axes is None:
axes = self._axes
formatter, label = find_axis_params_dim(meas, label)
for ax in axes:
ax.yaxis.set_major_formatter(formatter)
if label:
self.set_ylabel(label)
def _configure_yaxis(self, v, label, axes=None):
if axes is None:
axes = self._axes
formatter, label = find_axis_params_data(v, label)
for ax in axes:
ax.yaxis.set_major_formatter(formatter)
if label:
self.set_ylabel(label)
def draw(self):
"(Re-)draw the figure (after making manual changes)."
self._frame.canvas.draw()
def _asfmtext(self):
return self.image()
def image(self, name=None, format=None):
"""Create FMTXT Image from the figure
Parameters
----------
name : str
Name for the file (without extension; default is 'image').
format : str
File format (default 'png').
Returns
-------
image : fmtxt.Image
Image FMTXT object.
"""
if format is None:
format = self._default_format
image = Image(name, format)
self.figure.savefig(image, format=format)
return image
def save(self, *args, **kwargs):
"Short-cut for Matplotlib's :meth:`~matplotlib.figure.Figure.savefig()`"
self.figure.savefig(*args, **kwargs)
def set_xtick_rotation(self, rotation):
"""Rotate every x-axis tick-label by an angle (counterclockwise, in degrees)
Parameters
----------
rotation : scalar
Counterclockwise rotation angle, in degrees.
"""
for ax in self._axes:
for t in ax.get_xticklabels():
t.set_rotation(rotation)
self.draw()
def set_xlabel(self, label, ax=None):
"""Set the label for the x-axis
Parameters
----------
label : str
X-axis label.
ax : int
Axis on which to set the label (default is usually the last axis).
"""
if ax is None:
ax = self._default_xlabel_ax
self._axes[ax].set_xlabel(label)
def set_ylabel(self, label, ax=None):
"""Set the label for the y-axis
Parameters
----------
label : str
Y-axis label.
ax : int
Axis on which to set the label (default is usually the first axis).
"""
if ax is None:
ax = self._default_ylabel_ax
self._axes[ax].set_ylabel(label)
class Layout():
"""Create layouts for figures with several axes of the same size
"""
def __init__(self, nax, ax_aspect, axh_default, tight, h=None, w=None,
axh=None, axw=None, nrow=None, ncol=None, dpi=None, show=True,
run=None):
"""Create a grid of axes based on variable parameters.
Parameters
----------
nax : int
Number of axes required.
ax_aspect : scalar
Width / height aspect of the axes.
axh_default : scalar
The default axes height if it can not be determined from the other
parameters.
tight : bool
Rescale axes so that the space in the figure is used optimally
(default True).
h : scalar
Height of the figure.
w : scalar
Width of the figure.
axh : scalar
Height of the axes.
axw : scalar
Width of the axes.
nrow : int
Set a limit to the number of rows (default is no limit).
ncol : int
Set a limit to the number of columns (defaut is no limit). If
neither nrow or ncol is specified, a square layout is preferred.
dpi : int
DPI for the figure (default is to use matplotlib rc parameters).
show : bool
Show the figure in the GUI (default True). Use False for creating
figures and saving them without displaying them on the screen.
run : bool
Run the Eelbrain GUI app (default is True for interactive plotting and
False in scripts).
"""
if h and axh:
if h < axh:
raise ValueError("h < axh")
if w and axw:
if w < axw:
raise ValueError("w < axw")
self.w_fixed = w or axw
if nax is None:
if w is None:
if h is None:
h = axh_default
w = ax_aspect * h
elif h is None:
h = w / ax_aspect
elif nrow is None and ncol is None:
if w and axw:
ncol = math.floor(w / axw)
nrow = math.ceil(nax / ncol)
if h:
axh = axh or h / nrow
elif axh:
h = axh * nrow
else:
axh = axw / ax_aspect
h = axh * nrow
elif h and axh:
nrow = math.floor(h / axh)
ncol = math.ceil(nax / nrow)
if w:
axw = axw or w / ncol
elif axw:
w = axw * ncol
else:
axw = axh * ax_aspect
w = axw * ncol
elif w:
if axh:
ncol = round(w / (axh * ax_aspect))
else:
ncol = round(w / (axh_default * ax_aspect))
ncol = max(1, min(nax, ncol))
axw = w / ncol
nrow = math.ceil(nax / ncol)
if h:
axh = h / nrow
else:
if not axh:
axh = axw / ax_aspect
h = nrow * axh
elif h:
if axw:
nrow = round(h / (axw / ax_aspect))
else:
nrow = round(h / axh_default)
if nax < nrow:
nrow = nax
elif nrow < 1:
nrow = 1
axh = h / nrow
ncol = math.ceil(nax / nrow)
if w:
axw = w / ncol
else:
if not axw:
axw = axh * ax_aspect
w = ncol * axw
elif axh or axw:
axh = axh or axw / ax_aspect
axw = axw or axh * ax_aspect
ncol = min(nax, math.floor(defaults['maxw'] / axw))
nrow = math.ceil(nax / ncol)
h = nrow * axh
w = ncol * axw
else:
maxh = defaults['maxh']
maxw = defaults['maxw']
# try default
axh = axh_default
axw = axh_default * ax_aspect
ncol = min(nax, math.floor(maxw / axw))
nrow = math.ceil(nax / ncol)
h = axh * nrow
if h > maxh:
col_to_row_ratio = maxw / (ax_aspect * maxh)
# nax = ncol * nrow
# nax = (col_to_row * nrow) * nrow
nrow = math.ceil(math.sqrt(nax / col_to_row_ratio))
ncol = math.ceil(nax / nrow)
h = maxh
axh = h / nrow
w = maxw
axw = w / ncol
else:
w = axw * ncol
else:
if nrow is None:
ncol = min(nax, ncol)
nrow = int(math.ceil(nax / ncol))
elif ncol is None:
nrow = min(nax, nrow)
ncol = int(math.ceil(nax / nrow))
if h:
axh = axh or h / nrow
if w:
axw = axw or w / ncol
if not axw and not axh:
axh = axh_default
if axh and not axw:
axw = axh * ax_aspect
elif axw and not axh:
axh = axw / ax_aspect
if nax is not None:
nrow = int(nrow)
ncol = int(ncol)
if w is None:
w = axw * ncol
if h is None:
h = axh * nrow
fig_kwa = dict(figsize=(w, h), dpi=dpi)
# make subplot parameters absolute
if nax and not tight:
size = 2
bottom = mpl.rcParams['figure.subplot.bottom'] * size / h
left = mpl.rcParams['figure.subplot.left'] * size / w
right = 1 - (1 - mpl.rcParams['figure.subplot.right']) * size / w
top = 1 - (1 - mpl.rcParams['figure.subplot.top']) * size / h
hspace = mpl.rcParams['figure.subplot.hspace'] * size / h
wspace = mpl.rcParams['figure.subplot.wspace'] * size / w
fig_kwa['subplotpars'] = SubplotParams(left, bottom, right, top,
wspace, hspace)
self.nax = nax
self.h = h
self.w = w
self.axh = axh
self.axw = axw
self.nrow = nrow
self.ncol = ncol
self.fig_kwa = fig_kwa
self.show = show
self.run = run
class LegendMixin(object):
__choices = ('invisible', 'separate window', 'draggable', 'upper right',
'upper left', 'lower left', 'lower right', 'right',
'center left', 'center right', 'lower center', 'upper center',
'center')
__args = (False, 'fig', 'draggable', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def __init__(self, legend, legend_handles):
"""Legend toolbar menu mixin
Parameters
----------
legend : str | int | 'fig' | None
Matplotlib figure legend location argument or 'fig' to plot the
legend in a separate figure.
legend_hamdles : dict
{cell: handle} dictionary.
"""
self.__handles = legend_handles
self.legend = None
if self.__handles:
self.plot_legend(legend)
def _fill_toolbar(self, tb):
import wx
choices = [name.title() for name in self.__choices]
self.__ctrl = wx.Choice(tb, choices=choices, name='Legend')
tb.AddControl(self.__ctrl, "Legend")
self.__ctrl.Bind(wx.EVT_CHOICE, self.__OnChoice, source=self.__ctrl)
def __OnChoice(self, event):
self.__plot(self.__args[event.GetSelection()])
def plot_legend(self, loc='fig', *args, **kwargs):
"""Plots (or removes) the legend from the figure.
Parameters
----------
loc : False | 'fig' | 'draggable' | str | int
Where to plot the legend (see Notes; default 'fig').
Returns
-------
legend_figure : None | legend
If loc=='fig' the Figure, otherwise None.
Notes
-----
legend content can be modified through the figure's
``legend_handles`` and ``legend_labels`` attributes.
Possible values for the ``loc`` argument:
``False``:
Make the current legend invisible
'fig':
Plot the legend in a new figure
'draggable':
The legend can be dragged to the desired position with the mouse
pointer.
str | int:
Matplotlib position argument: plot the legend on the figure
Matplotlib Position Arguments:
- 'upper right' : 1,
- 'upper left' : 2,
- 'lower left' : 3,
- 'lower right' : 4,
- 'right' : 5,
- 'center left' : 6,
- 'center right' : 7,
- 'lower center' : 8,
- 'upper center' : 9,
- 'center' : 10,
"""
out = self.__plot(loc, *args, **kwargs)
if loc:
if isinstance(loc, basestring):
if loc == 'fig':
loc = 'separate window'
loc = self.__choices.index(loc)
self.__ctrl.SetSelection(loc)
return out
def save_legend(self, *args, **kwargs):
"""Save the legend as image file
Parameters for Matplotlib's figure.savefig()
"""
p = self.plot_legend(show=False)
p.save(*args, **kwargs)
p.close()
def __plot(self, loc, *args, **kwargs):
if loc and self.__handles:
cells = sorted(self.__handles)
labels = [cellname(cell) for cell in cells]
handles = [self.__handles[cell] for cell in cells]
if loc == 'fig':
return Legend(handles, labels, *args, **kwargs)
else:
# take care of old legend; remove() not implemented as of mpl 1.3
if self.legend is not None and loc == 'draggable':
self.legend.draggable(True)
elif self.legend is not None:
self.legend.set_visible(False)
self.legend.draggable(False)
elif loc == 'draggable':
self.legend = self.figure.legend(handles, labels, loc=1)
self.legend.draggable(True)
if loc != 'draggable':
self.legend = self.figure.legend(handles, labels, loc=loc)
self.draw()
elif self.legend is not None:
self.legend.set_visible(False)
self.legend = None
self.draw()
elif not self.__handles:
raise RuntimeError("No handles to produce legend.")
class Legend(_EelFigure):
def __init__(self, handles, labels, *args, **kwargs):
_EelFigure.__init__(self, "Legend", None, 2, 1, False, *args, **kwargs)
self.legend = self.figure.legend(handles, labels, loc=2)
# resize figure to match legend
if not self._layout.w_fixed:
self.draw()
bb = self.legend.get_window_extent()
w0, h0 = self._frame.GetSize()
h = int(h0 + bb.x0 - bb.y0)
w = int(bb.x0 + bb.x1)
self._frame.SetSize((w, h))
self._show()
class Figure(_EelFigure):
def __init__(self, nax=None, title='Figure', *args, **kwargs):
_EelFigure.__init__(self, title, nax, 2, 1, *args, **kwargs)
def show(self):
self._show()
class ImageTiler(object):
"""
Create tiled images and animations from individual image files.
Parameters
----------
ext : str
Extension to append to generated file names.
nrow : int
Number of rows of tiles in a frame.
ncol : int
Number of columns of tiles in a frame.
nt : int
Number of time points in the animation.
dest : str(directory)
Directory in which to place files. If None, a temporary directory
is created and removed upon deletion of the ImageTiler instance.
"""
def __init__(self, ext='.png', nrow=1, ncol=1, nt=1, dest=None):
if dest is None:
self.dir = tempfile.mkdtemp()
else:
if not os.path.exists(dest):
os.makedirs(dest)
self.dir = dest
# find number of digits necessary to name images
row_fmt = '%%0%id' % (np.floor(np.log10(nrow)) + 1)
col_fmt = '%%0%id' % (np.floor(np.log10(ncol)) + 1)
t_fmt = '%%0%id' % (np.floor(np.log10(nt)) + 1)
self._tile_fmt = 'tile_%s_%s_%s%s' % (row_fmt, col_fmt, t_fmt, ext)
self._frame_fmt = 'frame_%s%s' % (t_fmt, ext)
self.dest = dest
self.ncol = ncol
self.nrow = nrow
self.nt = nt
def __del__(self):
if self.dest is None:
shutil.rmtree(self.dir)
def get_tile_fname(self, col=0, row=0, t=0):
if col >= self.ncol:
raise ValueError("col: %i >= ncol" % col)
if row >= self.nrow:
raise ValueError("row: %i >= nrow" % row)
if t >= self.nt:
raise ValueError("t: %i >= nt" % t)
if self.ncol == 1 and self.nrow == 1:
return self.get_frame_fname(t)
fname = self._tile_fmt % (col, row, t)
return os.path.join(self.dir, fname)
def get_frame_fname(self, t=0, dirname=None):
if t >= self.nt:
raise ValueError("t: %i >= nt" % t)
if dirname is None:
dirname = self.dir
fname = self._frame_fmt % (t,)
return os.path.join(dirname, fname)
def make_frame(self, t=0, redo=False):
"""Produce a single frame."""
dest = self.get_frame_fname(t)
if os.path.exists(dest):
if redo:
os.remove(dest)
else:
return
# collect tiles
images = []
colw = [0] * self.ncol
rowh = [0] * self.nrow
for r in xrange(self.nrow):
row = []
for c in xrange(self.ncol):
fname = self.get_tile_fname(c, r, t)
if os.path.exists(fname):
im = PIL.Image.open(fname)
colw[c] = max(colw[c], im.size[0])
rowh[r] = max(rowh[r], im.size[1])
else:
im = None
row.append(im)
images.append(row)
cpos = np.cumsum([0] + colw)
rpos = np.cumsum([0] + rowh)
out = PIL.Image.new('RGB', (cpos[-1], rpos[-1]))
for r, row in enumerate(images):
for c, im in enumerate(row):
if im is None:
pass
else:
out.paste(im, (cpos[c], rpos[r]))
out.save(dest)
def make_frames(self):
for t in xrange(self.nt):
self.make_frame(t=t)
def make_movie(self, dest, framerate=10, codec='mpeg4'):
"""Make all frames and export a movie"""
dest = os.path.expanduser(dest)
dest = os.path.abspath(dest)
root, ext = os.path.splitext(dest)
dirname = os.path.dirname(dest)
if ext not in ['.mov', '.avi']:
if len(ext) == 4:
dest = root + '.mov'
else:
dest = dest + '.mov'
if not command_exists('ffmpeg'):
err = ("Need ffmpeg for saving movies. Download from "
"http://ffmpeg.org/download.html")
raise RuntimeError(err)
elif os.path.exists(dest):
os.remove(dest)
elif not os.path.exists(dirname):
os.mkdir(dirname)
self.make_frames()
# make the movie
frame_name = self._frame_fmt
cmd = ['ffmpeg', # ?!? order of options matters
'-f', 'image2', # force format
'-r', str(framerate), # framerate
'-i', frame_name,
'-c', codec,
'-sameq', dest,
'-pass', '2' #
]
sp = subprocess.Popen(cmd, cwd=self.dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = sp.communicate()
if not os.path.exists(dest):
raise RuntimeError("ffmpeg failed:\n" + stderr)
def save_frame(self, dest, t=0, overwrite=False):
if not overwrite and os.path.exists(dest):
raise IOError("File already exists: %r" % dest)
self.make_frame(t=t)
fname = self.get_frame_fname(t)
im = PIL.Image.open(fname)
im.save(dest)
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given two integers L and R, find the count of numbers in the range [L, R] (inclusive) having a prime number
# of set bits in their binary representation.
# (Recall that the number of set bits an integer has is the number of 1s present when written in binary.
# For example, 21 written in binary is 10101 which has 3 set bits. Also, 1 is not a prime.)
# Example 1:
# Input: L = 6, R = 10
# Output: 4
# Explanation:
# 6 -> 110 (2 set bits, 2 is prime)
# 7 -> 111 (3 set bits, 3 is prime)
# 9 -> 1001 (2 set bits , 2 is prime)
# 10->1010 (2 set bits , 2 is prime)
# Example 2:
# Input: L = 10, R = 15
# Output: 5
# Explanation:
# 10 -> 1010 (2 set bits, 2 is prime)
# 11 -> 1011 (3 set bits, 3 is prime)
# 12 -> 1100 (2 set bits, 2 is prime)
# 13 -> 1101 (3 set bits, 3 is prime)
# 14 -> 1110 (3 set bits, 3 is prime)
# 15 -> 1111 (4 set bits, 4 is not prime)
# Note:
# L, R will be integers L <= R in the range [1, 10^6].
# R - L will be at most 10000.
# 200 / 200 test cases passed.
# Status: Accepted
# Runtime: 441 ms
# Leetcode Weekly Contest 67.
# Is_prime algorithm and use bin() bulit-in function to transform binary.
class Solution(object):
def countPrimeSetBits(self, L, R):
"""
:type L: int
:type R: int
:rtype: int
"""
count = 0
def is_prime(digit):
if digit == 1:
return False
if digit in [2, 3, 5, 7, 11, 13, 17, 19, 23]:
return True
for i in range(L, R + 1):
count = count + (1 if is_prime(bin(i).count('1')) else 0)
return count
if __name__ == '__main__':
print(Solution().countPrimeSetBits(10, 15))
print(Solution().countPrimeSetBits(6, 10))
print(Solution().countPrimeSetBits(289098, 296294))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import random
import data_util
from data_util import ClothSample
import torch
import time
from modeling import AlbertForCloth
from transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
import functools
from timeit import default_timer as timer
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write(s + '\n')
def get_logger(log_path, **kwargs):
return functools.partial(logging, log_path=log_path, **kwargs)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default='./data',
type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default='bert-base-uncased', type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default='cloth',
type=str,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default='EXP/',
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--cache_size",
default=256,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=16,
type=int,
help="Total batch size for eval.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--model_load_dir',
type=str,
required=True,
help="The model.bin directory location")
args = parser.parse_args()
suffix = time.strftime('%Y%m%d-%H%M%S')
args.output_dir = os.path.join(args.output_dir, suffix)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
logging = get_logger(os.path.join(args.output_dir, 'log.txt'))
data_file = {'temp':'temp'}
for key in data_file.keys():
data_file[key] = data_file[key] + '-' + args.bert_model + '.pt'
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda:1")
n_gpu = 1#torch.cuda.device_count()
logging("device {} n_gpu {} distributed training {}".format(device, n_gpu, bool(args.local_rank != -1)))
task_name = args.task_name.lower()
print("===================", args.local_rank)
# Prepare model
model = AlbertForCloth.from_pretrained(args.model_load_dir, cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
# Start evaluation
logging("\033[1;32m******Running evaluation******\n\033[0;37m")
logging("Batch size = {}".format(args.eval_batch_size))
valid_data = data_util.Loader(args.data_dir, data_file['temp'], args.cache_size, args.eval_batch_size, device)
# Run prediction for full data
model.eval()
eval_loss, eval_accuracy, eval_h_acc, eval_m_acc = 0, 0, 0, 0
nb_eval_steps, nb_eval_examples, nb_eval_h_examples = 0, 0, 0
for inp, tgt in valid_data.data_iter(shuffle=False):
with torch.no_grad():
tmp_eval_loss, tmp_eval_accuracy, tmp_h_acc, tmp_m_acc = model(inp, tgt)
if n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu.
tmp_eval_accuracy = tmp_eval_accuracy.sum()
tmp_h_acc = tmp_h_acc.sum()
tmp_m_acc = tmp_m_acc.sum()
eval_loss += tmp_eval_loss.item()
eval_accuracy += tmp_eval_accuracy.item()
eval_h_acc += tmp_h_acc.item()
eval_m_acc += tmp_m_acc.item()
nb_eval_examples += inp[-2].sum().item()
nb_eval_h_examples += (inp[-2].sum(-1) * inp[-1]).sum().item()
nb_eval_steps += 1
print("=====================nb_eval============", nb_eval_steps)
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
if nb_eval_h_examples != 0:
eval_h_acc = eval_h_acc / nb_eval_h_examples
else:
eval_h_acc = 0
eval_m_acc = eval_m_acc / (nb_eval_examples - nb_eval_h_examples)
result = {'dev_eval_loss': eval_loss,
'dev_eval_accuracy': eval_accuracy,
'dev_h_acc':eval_h_acc,
'dev_m_acc':eval_m_acc}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logging("***** Dev Eval results *****")
for key in sorted(result.keys()):
logging(" {} = {}".format(key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
main()
|
from pretrained.corridor import corridor_interpolation
from pretrained.sphere import sphere_interpolation
# Corridor dataset interpolation
corridor_interpolation(model_path="./flownet2/pretrained_models/FlowNet2_checkpoint.pth.tar")
# Sphere dataset interpolation
sphere_interpolation(model_path="./flownet2/pretrained_models/FlowNet2_checkpoint.pth.tar")
|
import datetime
#from datetime import datetime
import urllib
import urllib2
import json
import boto3
import pytz
import requests
import time
import os
import logging
import re
from botocore.exceptions import ClientError
from requests.auth import HTTPBasicAuth
from pytz import timezone
from urllib2 import Request, urlopen, URLError, HTTPError
client_ec2 = boto3.client('ec2')
client_cfn = boto3.client('cloudformation')
client_ats = boto3.client('autoscaling')
#ambariPass = os.environ['ambariPassword']
client_db = boto3.client('dynamodb')
#print('This is My Ambari Password:-' + ambariPass)
ambariUser = 'admin'
ambariPass = 'admin'
print('This is My Ambari Password:-' + ambariPass)
HOOK_URL = "https://hooks.slack.com/services/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"
def lambda_handler(event, context):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('ResourceTable')
from base64 import b64decode
from urllib2 import Request, urlopen, URLError, HTTPError
print('Starting')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
response_scan = client_db.scan(TableName='ResourceTable')
print(response_scan)
now = datetime.datetime.now(timezone('US/Eastern'))
print('current time EST is:-')
print(now)
#day_no = datetime.datetime.today().weekday()
day_no = now.weekday()
print('day_no is '+ str(day_no))
count_stacks = response_scan['Count']
for i in range(0,count_stacks):
instanceList = []
stack_name = response_scan['Items'][i]['stackName']
print(stack_name)
if 'aws-hdp-platform-lmb-nonprod-dev' in stack_name['S']:
instanceList = response_scan['Items'][i]['instanceIds']['SS']
print(instanceList)
startTimeString = response_scan['Items'][i]['days']['M'][str(day_no)]['M']['start']['S']
startHour = int(startTimeString.split(':')[0])
startMin = int(startTimeString.split(':')[1])
stopTimeString = response_scan['Items'][i]['days']['M'][str(day_no)]['M']['stop']['S']
stopHour = int(stopTimeString.split(':')[0])
stopMin = int(stopTimeString.split(':')[1])
startTime = int(startHour)
stopTime = int(stopHour)
print('Start Hour is ')
print(startHour)
print('Start Min is')
print(startMin)
print('Stop Hour is')
print(stopHour)
print('Stop Min is')
print(stopMin)
asgList = response_scan['Items'][i]['autoScalingGroups']['SS']
print(asgList)
try:
response_instance = client_ec2.describe_instances(InstanceIds = instanceList)
#print (response_instance)
except ClientError as e:
logger.error("Received error: %s", e, exc_info=True)
if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':
print (e)
print('error has beeen printed')
missing_instances = re.findall(r"'(.*?)'", e.response['Error']['Message'])[0].replace("'", "").split(", ")
instanceList = list(set(instanceList) - set(missing_instances))
print(instanceList)
response_instance = client_ec2.describe_instances(InstanceIds = instanceList)
for j in range(0,len(instanceList)):
tag_length = len(response_instance['Reservations'][j]['Instances'][0]['Tags'])
print(tag_length)
for k in range (0,tag_length):
if 'Ambari' in response_instance['Reservations'][j]['Instances'][0]['Tags'][k]['Value']:
ambariIp = response_instance['Reservations'][j]['Instances'][0]['InstanceId']
#check if its a weekend.
#print("Its a weekday")
print('ambariIp is')
print(ambariIp)
print(now.hour)
print(now.minute)
if int(startTime) > int(stopTime):
print('Reverse')
if int(startTime) <= now.hour:
print("Its Within hour limit to start the instances")
if (now.hour == (int(stopTime) - 1) and int(stopHour) != 24):
if (now.minute >= int(stopMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending stop alert")
send_alert(stack_name,'Stop',stopTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
elif (now.hour == int(stopHour)):
if (now.minute < int(stopMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending stop alert")
send_alert(stack_name,'Stop',stopTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
if int(startMin) <= now.minute :
print("Its Within Minutes limit to start the instances")
for j in range(0,len(response_instance))::
if response_instance['Reservations'][j]['Instances'][0]['State']['Name'] != 'running':
try:
client_ec2.start_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
except ClientError as e:
logger.error("Received error: %s", e, exc_info=True)
if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':
print (e)
print('error has beeen printed')
missing_instances = re.findall(r"'(.*?)'", e.response['Error']['Message'])[0].replace("'", "").split(", ")
instanceList = list(set(instanceList) - set(missing_instances))
print(instanceList)
client_ec2.start_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
print("waiting for 120 seconds so that all the instances are up and running properly")
time.sleep(120)
print("Starting services now")
services_on(ambariIp)
cluster_service_list = check_service_list(ambariIp)
s_dict = check_service_state(cluster_service_list,ambariIp)
flag = 0
while (flag == 0):
len_dict = len(s_dict)
print("number of services: "+ str(len_dict))
count = 0
for key,value in s_dict.iteritems():
count = count + 1
if value != 'STARTED':
print(key +' is still not started successfully')
time.sleep(10)
services_on(ambariIp)
s_dict = check_service_state(cluster_service_list,ambariIp)
if count == len_dict:
print("changed flag status to 1")
flag = 1
print("Started All services")
else:
print('It is already running')
elif int(stopTime) <= now.hour < int(startTime):
print("Its Within hour limit to stop the instances")
if (now.hour == (int(startHour) - 1) and int(startHour) != 24):
if (now.minute >= int(startMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending start alert")
send_alert(stack_name,'Start',startTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
elif (now.hour == int(startHour)):
if (now.minute < int(stopMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending start alert")
send_alert(stack_name,'Start',startTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
for j in range(0,len(response_instance)):
if (response_instance['Reservations'][j]['Instances'][0]['State']['Name'] == 'stopped' or response_instance['Reservations'][j]['Instances'][0]['State']['Name'] == 'stopping'):
print('Instances Already Stopped')
else:
print("Not Within Time Limit")
services_off(ambariIp)
print("Switching OFF the services")
cluster_service_list = check_service_list(ambariIp)
s_dict = check_service_state(cluster_service_list,ambariIp)
flag = 0
while (flag == 0):
len_dict = len(s_dict)
print("number of services: "+ str(len_dict))
count = 0
for key,value in s_dict.iteritems():
count = count + 1
print(str(count) + 'service stopped:-' + key )
if value != 'INSTALLED' and value != 'UNKNOWN':
print(key +' is still not stopped successfully')
time.sleep(10)
s_dict.clear()
s_dict = check_service_state(cluster_service_list,ambariIp)
time.sleep(5)
if count == len_dict:
print("changed flag status to 1")
flag = 1
print("Switched off services")
for group in asgList:
client_ats.suspend_processes(AutoScalingGroupName = group ,ScalingProcesses=['HealthCheck','ReplaceUnhealthy'])
print("suspended group " + group)
try:
client_ec2.stop_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
except ClientError as e:
logger.error("Received error: %s", e, exc_info=True)
if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':
print (e)
print('error has beeen printed')
missing_instances = re.findall(r"'(.*?)'", e.response['Error']['Message'])[0].replace("'", "").split(", ")
instanceList = list(set(instanceList) - set(missing_instances))
print(instanceList)
client_ec2.stop_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
print("stopped instances")
else:
if int(startTime) <= now.hour < int(stopTime):
print("Its Within hour limit to start the instances")
if (now.hour == (int(stopHour) - 1) and int(stopHour) != 24):
if ( int(stopMin) <= int(now.minute)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending stop alert")
send_alert(stack_name,'Stop',stopTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
elif ( now.hour == int(stopHour)):
if ( int(now.minute) < int(stopMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending stop alert")
send_alert(stack_name,'Stop',stopTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
if startMin <= now.minute :
print("Its Within Minutes limit")
for j in range(0,len(response_instance)):
if response_instance['Reservations'][j]['Instances'][0]['State']['Name'] != 'running':
try:
client_ec2.start_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
except ClientError as e:
logger.error("Received error: %s", e, exc_info=True)
if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':
print (e)
print('error has beeen printed')
missing_instances = re.findall(r"'(.*?)'", e.response['Error']['Message'])[0].replace("'", "").split(", ")
instanceList = list(set(instanceList) - set(missing_instances))
print(instanceList)
client_ec2.start_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
print("waiting for 120 seconds so that all the instances are up and running properly")
time.sleep(120)
print("Starting services now")
services_on(ambariIp)
cluster_service_list = check_service_list(ambariIp)
s_dict = check_service_state(cluster_service_list,ambariIp)
flag = 0
while (flag == 0):
len_dict = len(s_dict)
print("number of services: "+ str(len_dict))
count = 0
for key,value in s_dict.iteritems():
count = count + 1
if value != 'STARTED':
print(key +' is still not started successfully')
time.sleep(10)
services_on(ambariIp)
s_dict = check_service_state(cluster_service_list,ambariIp)
if count == len_dict:
print("changed flag status to 1")
flag = 1
print("Started All services")
else:
print('It is already running')
else:
if (now.hour == (int(startHour) - 1) and int(startHour) != 24):
if (now.minute >= int(startMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending start alert")
send_alert(stack_name,'Start',startTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
elif (now.hour == int(startHour)):
if (now.minute < int(stopMin)):
response_get = table.get_item(Key={'stackName': stack_name['S'] })
flagIA = response_get['Item']['instanceAlertFlag']
if flagIA != 1:
print("sending start alert")
send_alert(stack_name,'Start',startTimeString)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 1},ReturnValues="UPDATED_NEW")
for j in range(0,len(response_instance)):
if (response_instance['Reservations'][j]['Instances'][0]['State']['Name'] == 'stopped' or response_instance['Reservations'][j]['Instances'][0]['State']['Name'] == 'stopping'):
print('Instances Already Stopped')
else:
print("Not Within Time Limit")
services_off(ambariIp)
print("Switching OFF the services")
cluster_service_list = check_service_list(ambariIp)
s_dict = check_service_state(cluster_service_list,ambariIp)
flag = 0
while (flag == 0):
len_dict = len(s_dict)
print("number of services: "+ str(len_dict))
count = 0
for key,value in s_dict.iteritems():
count = count + 1
#print(str(count) + 'service stopped:-' + key )
if (value != 'INSTALLED' and value != 'UNKNOWN'):
print(key +' is still not stopped successfully')
time.sleep(10)
s_dict.clear()
s_dict = check_service_state(cluster_service_list,ambariIp)
time.sleep(5)
if count == len_dict:
print("changed flag status to 1")
flag = 1
print("Switched off services")
for group in asgList:
client_ats.suspend_processes(AutoScalingGroupName = group ,ScalingProcesses=['HealthCheck','ReplaceUnhealthy'])
print("suspended group " + group)
try:
client_ec2.stop_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
except ClientError as e:
logger.error("Received error: %s", e, exc_info=True)
if e.response['Error']['Code'] == 'InvalidInstanceID.NotFound':
print (e)
print('error has beeen printed')
missing_instances = re.findall(r"'(.*?)'", e.response['Error']['Message'])[0].replace("'", "").split(", ")
instanceList = list(set(instanceList) - set(missing_instances))
print(instanceList)
client_ec2.stop_instances( InstanceIds = instanceList)
response = table.update_item(Key={'stackName': stack_name['S'] }, UpdateExpression="set instanceAlertFlag = :r",ExpressionAttributeValues={ ':r' : 0},ReturnValues="UPDATED_NEW")
print("stopped instances")
break
def services_on(ip):
try:
response_instance = client_ec2.describe_instances( InstanceIds = [ip])
print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName'])
base_url = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters'
r = requests.get(base_url, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print ("cluster name")
print (cluster_name)
#Start All Services
status = base_url+'/'+cluster_name+'/services'
startdata = {"RequestInfo":{"context":"_PARSE_.START.ALL_SERVICES","operation_level":{"level":"CLUSTER","cluster_name":"Sandbox"}},"Body":{"ServiceInfo":{"state":"STARTED"}}}
headers = {"X-Requested-By": "ambari"}
response = requests.put(status, auth=HTTPBasicAuth(ambariUser, ambariPass), data=json.dumps(startdata), headers=headers, verify=False)
print("Start Services Executed")
except Exception as e:
print(e)
time.sleep(10)
services_on(ip)
def services_off(ip):
try:
response_instance = client_ec2.describe_instances( InstanceIds = [ip])
print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName'])
base_url = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters'
r = requests.get(base_url, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print ("cluster name")
print (cluster_name)
#Stop All Services
status = base_url+'/'+cluster_name+'/services'
stopdata = {"RequestInfo":{"context":"_PARSE_.STOP.ALL_SERVICES","operation_level":{"level":"CLUSTER","cluster_name":"Sandbox"}},"Body":{"ServiceInfo":{"state":"INSTALLED"}}}
headers = {"X-Requested-By": "ambari"}
response = requests.put(status, auth=HTTPBasicAuth(ambariUser, ambariPass), data=json.dumps(stopdata), headers=headers, verify=False)
print("Stop Services Executed")
#time.sleep(50)
#while requests.get(status, auth=HTTPBasicAuth('admin', 'admin'), verify=False).json()['ServiceInfo']['state'] != 'INSTALLED':
# print "Waiting for HDFS services to stop..."
# print requests.get(hdfs_status, auth=HTTPBasicAuth('admin', 'admin'), verify=False).json()['ServiceInfo']['state']
# time.sleep(5)
except Exception as e:
print(e)
time.sleep(10)
services_off(ip)
def check_service_list(ip):
try:
print("getting service list")
response_instance = client_ec2.describe_instances( InstanceIds = [ip])
print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName'])
base_url = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters'
r = requests.get(base_url, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print ("cluster name")
print (cluster_name)
base_url_services = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters/'+cluster_name+'/services'
r_services = requests.get(base_url_services, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
print(r_services.json())
service_list = []
for i in range(0,len(r_services.json()['items'])):
service_list.append(r_services.json()['items'][i]['ServiceInfo']['service_name'])
print (service_list)
except Exception as e:
print(e)
return service_list
def check_service_state(s_list,ip):
try:
response_instance = client_ec2.describe_instances( InstanceIds = [ip])
print("DNS: " + response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName'])
base_url = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters'
r = requests.get(base_url, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
cluster_name = r.json()['items'][0]['Clusters']['cluster_name']
print("checking state")
service_dict = {}
#GET api/v1/clusters/c1/services/HDFS?fields=ServiceInfo/state
for service in s_list:
print('turning Off maintenance mode ' + service)
maintenanceURL = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters/'+cluster_name+'/services/'+service
stopdata = {"RequestInfo":{"context":"Turn Off Maintenance Mode"},"Body":{"ServiceInfo":{"maintenance_state":"OFF"}}}
headers = {"X-Requested-By": "ambari"}
response = requests.put(maintenanceURL, auth=HTTPBasicAuth(ambariUser, ambariPass), data=json.dumps(stopdata), headers=headers, verify=False)
print('maintenance resposne is')
print(response)
#curl -u admin:$PASSWORD -i -H 'X-Requested-By: ambari' -X PUT -d '{"RequestInfo": {"context" :"Remove Falcon from maintenance mode"}, "Body": {"ServiceInfo": {"maintenance_state": "OFF"}}}' http://$AMBARI_HOST:8080/api/v1/clusters/$CLUSTER/services/FALCON
#print('Service check begins')
base_url_state = 'https://'+response_instance['Reservations'][0]['Instances'][0]['PrivateDnsName']+':8444/api/v1/clusters/'+cluster_name+'/services/'+service+'?fields=ServiceInfo/state'
print(base_url_state)
r_state = requests.get(base_url_state, auth=HTTPBasicAuth(ambariUser, ambariPass), verify=False)
print(r_state)
print(r_state.json())
state_of_service = r_state.json()['ServiceInfo']['state']
print(service +' = '+ state_of_service)
service_dict[service] = state_of_service
return service_dict
except Exception as e:
print(e)
def send_alert(stack,state,timing):
d = datetime.datetime.strptime(timing, "%H:%M")
timing = d.strftime("%I:%M %p")
if(state == 'Start'):
slack_message = {
"text": stack['S'] + "will start at " + str(timing) + " EST." ,
"channel" : "#instance_alerts",
"username" : stack['S']
}
elif (state == 'Stop'):
slack_message = {
"text": stack['S'] + "will stop at " + str(timing) + " EST.",
"channel" : "#instance_alerts",
"username" : stack['S']
}
req = Request(HOOK_URL, json.dumps(slack_message))
try:
response = urlopen(req)
response.read()
logger.info("Message posted to %s", slack_message['channel'])
except HTTPError as e:
logger.error("Request failed: %d %s", e.code, e.reason)
except URLError as e:
logger.error("Server connection failed: %s", e.reason) |
from django.urls import path
from login.views import (
company_login,
company_login_action,
member_login,
member_login_action,
)
urlpatterns = [
path("", member_login),
path("member/", member_login),
path("member/action/", member_login_action),
path("company/", company_login),
path("company/action/", company_login_action),
]
|
import numpy as np
from difflib import SequenceMatcher
import re, math
from collections import Counter
result=[]
match=0
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
def comparebyarray(array):
with open("Lies.txt") as f:
content = f.readlines()
content = [x.strip() and x.split() for x in content]
#Lies array
for j in content:
str1 = ' '.join(array)
str2 = ' '.join(j)
vector1 = text_to_vector(str1)
vector2 = text_to_vector(str2)
cosine = get_cosine(vector1, vector2)
matcher = SequenceMatcher(None, str1,str2).ratio()
if matcher>=0.80:
print str1+"|matcher"
print matcher
if cosine >= 0.80:
print str1+"|cosine"
print cosine
def expressionhunt(array):
match=0
with open("Lies.txt") as f:
content = f.readlines()
content = [x.strip() and x.split() for x in content]
#Lies array
for j in content:#para cada alerta
np.array_equal(array, j)
#para cada palavra alerta
for index2,y in enumerate(j):
for index,x in enumerate(array): #para palavra do scam
print y,index2
file = open("scam3.txt")
file = file.read()
words = file.split()
limit = len(words)
samplearray=[]
match=0
for index, value in enumerate(words):
if index<=(limit-6):
for i in range(6):
word = words[index+i]
samplearray.append(word)
if i==5:
comparebyarray(samplearray)
samplearray=[]
|
import csv
import random
def gameView():
print ("****** Text Adventure Game v1.0 ******")
print ("* *")
print ("* 1 - New Game *")
print ("* 2 - Load Game *")
print ("* 3 - Quit *")
print ("* *")
print ("**************************************")
def runGame(startValue, storyData, rowCount):
for i in range(startValue, rowCount, 1):
#Checks to see if it's the end, then chooses a random ending from what's left over in the file.
if not(storyData[i][1]):
randomEnding = random.randint(i, rowCount)
print (storyData[randomEnding][0])
break
#Prints Options if they are included
print (storyData[i][0])
print ("1 - " + storyData[i][1])
print ("2 - " + storyData[i][2])
print ("3 - Save Game")
#Alternative way to do above
'''
for j in range(1,3,1):
print(str(j) + " - " + storyData[i][j])
print ("3 - Save Game")
'''
ui = userInput()
if(ui == 3):
print (">>> Game Saved!")
return i
break
def userInput():
#Gathers user input
while True:
try:
userInput = int(input("What do you want to do?\n"))
break
except(ValueError):
print("Error with input. Please try again!")
return userInput
def main():
storyData = []
rowCount = 0
#store data in storyData array
infile = open("story.csv", "r")
reader = csv.reader(infile)
for row in reader:
storyData.append(row)
rowCount = rowCount + 1
print (rowCount)
#initialize run values
game = 0
savedStartValue = 0
while(game == 0):
#Start running the game, call functions gameView and userInput
gameView()
initialInput = userInput()
#Loop for view options 1, 2 and 3
if(initialInput == 1):
savedStartValue = 0
rg = runGame(savedStartValue, storyData, rowCount)
savedStartValue = rg
elif(initialInput == 2):
loadGame = runGame(savedStartValue, storyData, rowCount)
elif(initialInput == 3):
print ("Quitting game!")
infile.close()
main()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from flask import Flask, request, jsonify, redirect
import os, json
from imageai.Detection import ObjectDetection
# In[2]:
model_path = os.getcwd()
# In[3]:
PRE_TRAINED_MODELS =["yolo.h5"]
# In[4]:
# create imageAI objects and load models
object_detector =ObjectDetection()
object_detector.setModelTypeAsYOLOv3()
object_detector.setModelPath(os.path.join(model_path, PRE_TRAINED_MODELS[0]))
object_detector.loadModel()
object_detections = object_detector.detectObjectsFromImage(input_image="people_umbrella.jpg")
# In[5]:
#define model paths and allow file extention
UPLOAD_FOLDER =model_path
ALLOWED_EXTENSIONS =set(['png','jpg','jpeg','gif'])
# In[6]:
app =Flask(__name__)
app.config['UPLOAD_FOLDER'] =UPLOAD_FOLDER
# In[7]:
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
# In[8]:
@app.route('/predict',methods=['POST'])
def upload_file():
if request.method =='POST':
#check if the post request has the first part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file =request.files['file']
# if user does not select file, brower also submit an empty part without filename
if file.filename =='':
print ('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename =file.filename
file_path =os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(file_path)
try:
object_detections =object_detector.detectObjectsFromImage(input_image =file_path)
except Exception as ex:
return jsonify(str(ex))
resp=[]
for eachObject in object_detections:
resp.append([eachObject["name"], round(eachObject["percentage_probability"],3)])
return json.dumpa(dict(enumerate(resp)))
# In[ ]:
if __name__ =="__main__":
app.run(host='0.0.0.0', port=4445)
|
print("Welcome")
def fun():
print("function")
fun()
def argu(arg):
print(arg + "ument")
argu("arg")
x=int(0)
if x<0:
print("negative")
elif x>0:
print("positive")
else:
print("zero")
print(range(10))
for n in range(3, 11):
for x in range(2, n):
if n%x == 0:
print(n, "is a composite number")
break
else:
print(n, "is a prime number")
bikes = ["ninja", "bullet", "ducati"]
a = bikes[2]
print(a)
b = len(bikes)
print(b)
for a in bikes:
print(a)
bikes.append("luna")
print(bikes)
bikes.remove("luna")
print(bikes)
bikes.insert(2, "vespa")
print(bikes)
bikes.sort(reverse=False)
print(bikes)
def sortlist(w):
return len(w)
cars = ["audi", "bmw", "mercedies", "jaguar"]
cars.sort(key=sortlist)
print(cars)
q=cars.count("bmw")
print(q)
cars.extend(bikes)
print(cars)
train= (1, 5, 9, 3, 7)
bikes.extend(train)
print(bikes)
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies the use of the environment during regeneration when the gyp file
changes, specifically via build of an executable with C preprocessor
definition specified by CFLAGS.
In this test, gyp and build both run in same local environment.
"""
import TestGyp
# CPPFLAGS works in ninja but not make; CFLAGS works in both
FORMATS = ('make', 'ninja')
test = TestGyp.TestGyp(formats=FORMATS)
# First set CFLAGS to blank in case the platform doesn't support unsetenv.
with TestGyp.LocalEnv({'CFLAGS': '',
'GYP_CROSSCOMPILE': '1'}):
test.run_gyp('cflags.gyp')
test.build('cflags.gyp')
expect = """FOO not defined\n"""
test.run_built_executable('cflags', stdout=expect)
test.run_built_executable('cflags_host', stdout=expect)
test.sleep()
with TestGyp.LocalEnv({'CFLAGS': '-DFOO=1',
'GYP_CROSSCOMPILE': '1'}):
test.run_gyp('cflags.gyp')
test.build('cflags.gyp')
expect = """FOO defined\n"""
test.run_built_executable('cflags', stdout=expect)
# Environment variables shouldn't influence the flags for the host.
expect = """FOO not defined\n"""
test.run_built_executable('cflags_host', stdout=expect)
test.sleep()
with TestGyp.LocalEnv({'CFLAGS': ''}):
test.run_gyp('cflags.gyp')
test.build('cflags.gyp')
expect = """FOO not defined\n"""
test.run_built_executable('cflags', stdout=expect)
test.sleep()
with TestGyp.LocalEnv({'CFLAGS': '-DFOO=1'}):
test.run_gyp('cflags.gyp')
test.build('cflags.gyp')
expect = """FOO defined\n"""
test.run_built_executable('cflags', stdout=expect)
test.pass_test()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2020-04-14 22:06:59
# @Author : Fallen (xdd043@qq.com)
# @Link : https://github.com/fallencrasher/python-learning
# @Version : $Id$
a=1
# for x in range(10):
# print(x)
def func1():
print('hello world!')
|
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
import torch
def plot(ep_return, avg_returns, stds, fig, ax):
episode = np.arange(len(avg_returns))
avg_returns = np.array(avg_returns)
stds = np.array(stds)
ax.clear()
ax.set_xlabel('Episode')
ax.set_ylabel('Returns')
# plot average returns
ax.plot(episode, avg_returns, label='Average Returns')
# plot standard deviations
ax.fill_between(episode, avg_returns-stds[1:], avg_returns+stds[1:],
facecolor='blue', alpha=0.1)
ax.plot(episode, ep_return, label='Episode Return')
ax.set_title('Returns')
ax.legend()
fig.tight_layout()
fig.canvas.draw()
plt.show()
def set_seed(env, seed=0):
env.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
def normalize(X, eps=1e-8):
return (X-X.mean())/(X.std()+eps)
class Trajectory:
def __init__(self, n_traj=10, rollout=20, gamma=0.9, lam=0.95):
self.n_traj = n_traj
self.rollout = rollout
self.gamma = gamma
self.lam = lam
self.batch = {'state_values': [],
'rewards': [],
'log_probs': [],
'observations': [],
'dones': []}
self.step_count = 0
def add(self, state_values, rewards, log_probs, observations, dones):
for key in self.batch.keys():
self.batch[key].append(eval(key))
self.step_count += 1
def batch_full(self):
if self.step_count // self.rollout == self.n_traj:
return True
else:
return False
def discount(self, delta):
return scipy.signal.lfilter([1], [1, -self.gamma*self.lam], delta.detach().numpy()[::-1], axis=0)[::-1]
def gae(self, rewards, state_values, dones):
advantages = self.discount(rewards[:-1] + self.gamma*state_values[1:]*(1 - dones[1:]) - state_values[:-1])
advantages = torch.tensor(normalize(advantages), dtype=torch.float32)
return torch.cat([advantages, torch.tensor([0], dtype=torch.float32).unsqueeze(0)])
def fetch(self, traj):
R = 0
returns = []
# calculate return from each time step ('reward-to-go')
for r in self.batch['rewards'][traj:traj+self.rollout][::-1]:
R = r + self.gamma * R
returns.insert(0, R)
# convert lists of required values into tensors
returns = torch.tensor(returns).unsqueeze(1)
state_values = torch.cat(self.batch['state_values'][traj:traj+self.rollout])
log_probs = torch.cat(self.batch['log_probs'][traj:traj+self.rollout]).unsqueeze(1)
dones = torch.tensor(self.batch['dones'][traj:traj+self.rollout]).unsqueeze(1).type(torch.FloatTensor)
# calculate GAE, normalize advantage estimate, and set to correct size
advantages = self.gae(returns, state_values, dones)
returns = normalize(returns)
return returns, advantages, log_probs
def get_obs(self):
obs_idx = torch.multinomial(torch.arange(self.step_count, dtype=torch.float32), self.rollout)
return torch.cat(self.batch['observations'])[obs_idx, :]
def clear(self):
for key in self.batch.keys():
del self.batch[key][:]
self.step_count = 0
|
import re
from mcp21.package import MCPPackageBase
# This has been pulled from the tab-completion scheme in favor of
# dns-com-vmoo-smartcompletion. Now this package maintains its own
# list of rehashed thingies, with which it does nothing.
class MCPPackage(MCPPackageBase):
def __init__(self, mcp):
MCPPackageBase.__init__(self, mcp)
self.package = 'dns-com-awns-rehash'
self.min = '1.0'
self.max = '1.1'
self.hashes = []
mcp.register(self, ['dns-com-awns-rehash-commands'])
mcp.register(self, ['dns-com-awns-rehash-add'])
mcp.register(self, ['dns-com-awns-rehash-remove'])
def dispatch(self, msg):
if msg.message == 'dns-com-awns-rehash-commands': self.do_commands(msg)
if msg.message == 'dns-com-awns-rehash-add': self.do_add(msg)
if msg.message == 'dns-com-awns-rehash-remove': self.do_remove(msg)
def do_commands(self, msg):
self.hashes = msg.data['list'].split(' ')
def do_add(self, msg):
self.hashes.append(msg.data['list'].split(' '))
def do_remove(self, msg):
self.hashes[:] = [x for x in self.hashes if x not in msg.data['list'].split()]
|
'''
Created on Nov 18, 2011
@author: jason
'''
import bson
import tornado.web
import datetime
import simplejson
import MongoEncoder.MongoEncoder
from Map.BrowseTripHandler import BaseHandler
class ShowSightsHandler(BaseHandler):
def get(self, site):
site = self.syncdb.sites.find_one({"lc_sitename": site.upper()})
self.render("Sites/sight.html", site = site )
class RemoveSiteFromTrip(BaseHandler):
@tornado.web.authenticated
def post(self):
trip_id = self.get_argument('trip_id')
site_name = self.get_argument('site_name')
group_id = self.get_argument('group_id')
if group_id == 'new':
self.write('success')
return
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
for group in trip['groups']:
if group['group_id'] == bson.ObjectId(group_id):
for i, dest in enumerate(group['dest_place']):
if dest['dest'] == site_name:
del group['dest_place'][i]
break
self.syncdb.trips.save(trip)
#self.syncdb.trips.update({'trip_id':bson.ObjectId(trip_id)},{'$pull':{'dest_place':{'dest':site_name}}})
self.write('success')
class AddSiteToTrip(BaseHandler):
@tornado.web.authenticated
def post(self):
_site = {}
trip_id = self.get_argument('trip_id')
site_name = self.get_argument('site_name')
date = self.get_argument('date')
ride = self.get_argument('site_ride')
group_id = self.get_argument('group_id')
site = self.syncdb.sites.find_one({'lc_username': {'$regex':'^'+site_name.upper()}})
if site:
_site['description']= site['description']
_site['geo']= site['geo']
else:
_site['description']= ''
_site['geo'] = ''
_site['date'] = date
_site['notes'] = []
_site['dest'] = site_name
_site['type'] = ride
#self.syncdb.trips.update({'trip_id':bson.ObjectId(trip_id)},{'$addToSet':{'dest_place':_site}})
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
for group in trip['groups']:
if group['group_id'] == bson.ObjectId(group_id):
group['dest_place'].append(_site)
self.syncdb.trips.save(trip)
trip_site = self.render_string("Sites/trip_site.html", site = _site, singletrip = trip)
self.write(trip_site)
class PostNoteToSite(BaseHandler):
@tornado.web.authenticated
def post(self):
site_name = self.get_argument('site_name')
trip_id = self.get_argument('trip_id')
group_id = self.get_argument('group_id')
trip = self.syncdb.trips.find_one({'trip_id':bson.ObjectId(trip_id)})
message = {"note": self.get_argument('note'), "date": datetime.datetime.utcnow(),'from': {'username': self.current_user['username'], 'user_id': self.current_user['user_id'], 'picture':self.current_user['picture']}}
for group in trip['groups']:
if group['group_id'] == bson.ObjectId(group_id):
for place in group['dest_place']:
if place['dest'] == site_name:
print(site_name)
place['notes'].append(message)
break
#response = {'comment_id': bson.ObjectId(),'body': content,'date': datetime.datetime.utcnow(),'from': {'username': self.current_user['username'], 'user_id': self.current_user['user_id'], 'picture':self.current_user['picture']}}
#self.syncdb.trips.update({"trip_id":bson.ObjectId(trip_id),"dest_place.dest":site_name}, {'$push': {'dest_place.note':message}})
self.syncdb.trips.save(trip)
#print(unicode(simplejson.dumps(message, cls=MongoEncoder.MongoEncoder.MongoEncoder)))
self.write(unicode(simplejson.dumps(message, cls=MongoEncoder.MongoEncoder.MongoEncoder)))
|
""""
四极 CCT 二级场
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
sys.path.append(path.dirname(path.dirname(
path.abspath(path.dirname(__file__)))))
from cctpy import *
if __name__ == '__main__':
BaseUtils.i_am_sure_my_code_closed_in_if_name_equal_main()
agcct3_winding_number = 25
agcct4_winding_number = 40
agcct5_winding_number = 34
gantry1 = HUST_SC_GANTRY(
qs3_gradient=5.546,
qs3_second_gradient=-57.646,
dicct345_tilt_angles=[30, 87.426, 92.151, 91.668],
agcct345_tilt_angles=[94.503, 30, 72.425, 82.442],
dicct345_current=0,
agcct345_current=-5642.488,
agcct3_winding_number=agcct3_winding_number,
agcct4_winding_number=agcct4_winding_number,
agcct5_winding_number=agcct5_winding_number,
agcct3_bending_angle=-67.5*(agcct3_winding_number)/(
agcct3_winding_number+agcct4_winding_number+agcct5_winding_number),
agcct4_bending_angle=-67.5*(agcct4_winding_number)/(
agcct3_winding_number+agcct4_winding_number+agcct5_winding_number),
agcct5_bending_angle=-67.5*(agcct5_winding_number)/(
agcct3_winding_number+agcct4_winding_number+agcct5_winding_number),
DL1=0.9007765,
GAP1=0.4301517,
GAP2=0.370816,
qs1_length=0.2340128,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=0.200139,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2.35011,
GAP3=0.43188,
qs3_length=0.24379,
agcct345_inner_small_r=83 * MM,
agcct345_outer_small_r=98 * MM, # 83+15
dicct345_inner_small_r=114 * MM, # 83+30+1
dicct345_outer_small_r=130 * MM, # 83+45 +2
)
gantry2 = HUST_SC_GANTRY(
qs3_gradient=5.546,
qs3_second_gradient=-57.646,
dicct345_tilt_angles=[30, 87.426, 92.151, 91.668],
agcct345_tilt_angles=[94.503, 30, 72.425, 82.442],
dicct345_current=9445.242,
agcct345_current=0,
agcct3_winding_number=agcct3_winding_number,
agcct4_winding_number=agcct4_winding_number,
agcct5_winding_number=agcct5_winding_number,
agcct3_bending_angle=-67.5*(agcct3_winding_number)/(
agcct3_winding_number+agcct4_winding_number+agcct5_winding_number),
agcct4_bending_angle=-67.5*(agcct4_winding_number)/(
agcct3_winding_number+agcct4_winding_number+agcct5_winding_number),
agcct5_bending_angle=-67.5*(agcct5_winding_number)/(
agcct3_winding_number+agcct4_winding_number+agcct5_winding_number),
DL1=0.9007765,
GAP1=0.4301517,
GAP2=0.370816,
qs1_length=0.2340128,
qs1_aperture_radius=60 * MM,
qs1_gradient=0.0,
qs1_second_gradient=0.0,
qs2_length=0.200139,
qs2_aperture_radius=60 * MM,
qs2_gradient=0.0,
qs2_second_gradient=0.0,
DL2=2.35011,
GAP3=0.43188,
qs3_length=0.24379,
agcct345_inner_small_r=83 * MM,
agcct345_outer_small_r=98 * MM, # 83+15
dicct345_inner_small_r=114 * MM, # 83+30+1
dicct345_outer_small_r=130 * MM, # 83+45 +2
)
bl1 = gantry1.create_second_bending_part_beamline()
bl2 = gantry2.create_second_bending_part_beamline()
bl1_bz = bl1.magnetic_field_bz_along(step=10*MM)
bl2_bz = bl2.magnetic_field_bz_along(step=10*MM)
Plot2.plot_p2s(bl1_bz,describe='r-')
Plot2.plot_p2s(bl2_bz,describe='k-')
Plot2.ylim()
Plot2.legend("dipole","quad")
Plot2.info("s/m","B/T","dipolar field along trajectory")
Plot2.show() |
class Node:
def __init__(self, data):
self.data = data
self.right_child = None
self.left_child = None
class Tree:
def __init__(self):
self.root_node = None
def find_min(self):
current = self.root_node
while current.left_child:
current = current.left_child
return current
def insert(self, data):
node = Node(data)
if self.root_node is None:
self.root_node = node
else:
current = self.root_node
parent = None
while True:
parent = current
if node.data < current.data:
current = current.left_child
if current is None:
parent.left_child = node
return
else:
current = current.right_child
if current is None:
parent.right_child = node
return
def get_node_with_parent(self, data):
parent = None
current = self.root_node
if current is None:
return (parent, None)
while True:
if current.data == data:
return (parent, current)
elif current.data > data:
parent = current
current = current.left_child
else:
parent = current
current = current.right_child
return (parent, current)
def remove(self, data):
parent, node = self.get_node_with_parent(data)
if parent is None and node is None:
return False
# Get children count
children_count = 0
if node.left_child and node.right_child:
children_count = 2
elif (node.left_child is None) and (node.right_child is None):
children_count = 0
else:
children_count = 1
if children_count == 0:
if parent:
if parent.right_child is node:
parent.right_node = None
else:
parent.left_child = None
else:
self.root_node = None
elif children_count == 1:
next_node = None
if node.left_child:
next_node = node.left_child
else:
next_node = node.right_child
if parent:
if parent.left_child is node:
parent.left_child = next_node
else:
parent.right_child = next_node
else:
self.root_node = next_node
else:
parent_of_leftmost_node = node
leftmost_node = node.right_child
while leftmost_node.left_child:
parent_of_leftmost_node = leftmost_node
leftmost_node = leftmost_node.left_child
node.data = leftmost_node.left_child
def search(self, data):
current = self.root_node
while True:
if current is None:
return None
elif current.data is data:
return data
elif current.data > data:
current = current.left_child
else:
current = current.right_child |
"""
CCT 建模优化代码
二维曲线段
作者:赵润晓
日期:2021年4月27日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# 包含 Line2、StraightLine2、ArcLine2、Trajectory 四个类
# Line2 是抽向基类,表示二维 xy 平面任意一条有方向的连续曲线段,可以是直线、圆弧、或它们的组合
# 这个类主要用于构建理想轨道,理想轨道的用处在于
# 1. 更容易确定磁铁元件的位置
# 2. 用于构建参考粒子
# 3. 用于研究理想轨道上的磁场分布
# 注:因为 Line2 的用处在于构建理想轨道,理想轨道一般就以全局坐标系进行构建,所以不涉及坐标变换
# 使用一条有直线段和一条有向圆弧,来说明 Line2 的函数
# straight_line 表示一条有方向的直线段,长度 1 米,方向平行于 x 轴正向,起点为原点
straight_line = StraightLine2(
length=1*M, # 长度 1 米,米为标准单位,*M 可不写
direct=P2.x_direct(), # 方向平行于 x 轴正向
start_point=P2.origin() # 起点为原点
)
# arc_line 表示一条有方向的圆弧,圆弧的弧心为原点,半径 1 米
# 以弧心建立极坐标系(极点为弧心,极轴平行于 x 轴正方向),则圆弧起点对应极角为 0 弧度
# 圆弧方向为逆时针,旋转 180 度
arc_line = ArcLine2(
starting_phi=0, #
center=P2.origin(), # 圆弧的弧心为原点
radius=1*M, # 半径 1 米,米为标准单位,*M 可不写
total_phi=math.pi, # 旋转 180 度
clockwise=False # 圆弧方向为逆时针
)
# 下面以 straight_line 和 arc_line 为例说明 Line2 的函数使用方法
# 函数 get_length() 返回二维曲线的长度
print("straight_line 的长度为",straight_line.get_length()) # 1.0
print("arc_line 的长度为",arc_line.get_length()) # 3.141592653589793
# 函数 point_at(s) 返回二维曲线的 s 位置的点坐标
# 所谓 s 位置,即以曲线起点出发(曲线有方向,有长度,所以必存在起点),沿曲线运动 s 长度后,所在点的坐标
# s = 0 时,即曲线的起点
print("straight_line 的起点为",straight_line.point_at(0)) # (0.0, 0.0)
print("arc_line 的起点为",arc_line.point_at(0)) # (1.0, 0.0)
# s = get_length() 时,即曲线的终点
print("straight_line 的终点为",straight_line.point_at(straight_line.get_length())) # (1.0, 0.0)
print("arc_line 的终点为",arc_line.point_at(arc_line.get_length())) # (-1.0, 1.2246467991473532e-16)
# 对于任意 [0,get_length()] 位置,都可以计算
print("straight_line.point_at(0.5) 为",straight_line.point_at(0.5)) # (0.5, 0.0)
print("arc_line.point_at(math.pi/2) 为",arc_line.point_at(math.pi/2)) # (6.123233995736766e-17, 1.0)
# 函数 direct_at(s) 返回二维曲线的 s 位置的切向
# s = 0 时,即曲线的起点位置处切向
print("straight_line 的起点处切向为",straight_line.direct_at(0)) # (1.0, 0.0)
print("arc_line 的起点处切向为",arc_line.direct_at(0)) # (6.123233995736766e-17, 1.0)
# s = get_length() 时,即曲线的终点处切向
print("straight_line 的终点处切向为",straight_line.direct_at(straight_line.get_length())) # (1.0, 0.0)
print("arc_line 的终点处切向为",arc_line.direct_at(arc_line.get_length())) # (-1.8369701987210297e-16, -1.0)
# 对于任意 [0,get_length()] 位置,都可以计算
print("straight_line.direct_at(0.5) 为",straight_line.direct_at(0.5)) # (1.0, 0.0)
print("arc_line.direct_at(math.pi/2) 为",arc_line.direct_at(math.pi/2)) # (-1.0, 1.2246467991473532e-16)
# 函数 right_hand_side_point(s,d) 和 left_hand_side_point(s,d) 用来计算曲线 s 位置处,右手侧/左手侧 d 位置处点坐标
# 这两个函数可以用人沿着曲线运动来直观的理解,假设人沿着曲线正方向运动,先运动 s 距离,然后他右手侧/左手侧 d 位置处的点,即函数的返回值
# 对于 straight_line 来说,起点位置,右手侧 1 米位置的点为(很明显是 (0,-1))
print("straight_line 起点位置,右手侧 1 米位置的点为",straight_line.right_hand_side_point(0,1))
# (6.123233995736766e-17, -1.0)
# 对于 arc_line 来说,终点位置,左手侧 1 米位置的点为(很明显是原点)
print("arc_line 终点位置,左手侧 1 米位置的点为",arc_line.left_hand_side_point(arc_line.get_length(),1))
# (0.0, 2.465190328815662e-32)
# 函数 point_at_start()、point_at_middle() 和 point_at_end() 返回曲线起点、中点和终点坐标
print("straight_line 的起点为",straight_line.point_at_start()) # (0.0, 0.0)
print("straight_line 的中点为",straight_line.point_at_middle()) # (0.5, 0.0)
print("straight_line 的终点为",straight_line.point_at_end()) # (1.0, 0.0)
# 函数 direct_at_start()、direct_at_middle() 和 direct_at_end() 返回曲线起点、中点和终点处的切向
print("arc_line 的起点处的切向为",arc_line.direct_at_start()) # (6.123233995736766e-17, 1.0)
print("arc_line 的中点处的切向为",arc_line.direct_at_middle()) # (-1.0, 1.2246467991473532e-16)
print("arc_line 的终点处的切向为",arc_line.direct_at_end()) # (-1.8369701987210297e-16, -1.0)
# 函数 __add__(),可以实现曲线的平移,使用 + 运算符,配合二维矢量实现
# 将 straight_line 向上平移 0.5 米
straight_line_up05 = straight_line+P2(y=0.5)
print("straight_line 向上平移 0.5 米,起点为",straight_line_up05.point_at_start())
# (0.0, 0.5)
# disperse2d(step) 将二维曲线离散成连续的二维点,step 为离散点步长,默认 1 毫米。返回值为二维点的数组。
# straight_line 按照 step=0.2 米离散
# 返回 [(0.0, 0.0), (0.2, 0.0), (0.4, 0.0), (0.6000000000000001, 0.0), (0.8, 0.0), (1.0, 0.0)]
print("straight_line 按照 step=0.2 米离散",straight_line.disperse2d(0.2))
# arc_line 按照默认 step 离散,查看离散后点数组的长度,和前 2 项
print("arc_line 按照默认 step 离散,查看离散后点数组的长度",len(arc_line.disperse2d()))
# 3143
print("arc_line 按照默认 step 离散,查看前 2 项",arc_line.disperse2d()[:2])
# [(1.0, 0.0), (0.9999995001296789, 0.0009998701878188414)]
# disperse2d_with_distance() 将二维曲线离散成连续的二维点,其中二维点带有其所在位置(距离)
# 带有位置/距离,是为了方便磁场计算时。得到磁场某分量按照位置/距离的分布
# straight_line 按照 step=0.5 米离散
print("straight_line 按照 step=0.2 米离散",straight_line.disperse2d_with_distance(0.25))
# 返回 [(0.0:(0.0, 0.0)), (0.25:(0.25, 0.0)), (0.5:(0.5, 0.0)), (0.75:(0.75, 0.0)), (1.0:(1.0, 0.0))]
# disperse3d() 和 disperse3d_with_distance()和上面两个方法类似,返回的点为三维点
# 因此可以传入二维到三维的转换的 lambda 函数,p2_t0_p3,控制二维点到三维点的转换方式
# arc_line 按照默认 step 离散,p2_t0_p3 将转换的点 z 方向平移 0.7 米,查看前 2 项
print(
"arc_line 按照默认 step 离散,p2_t0_p3 将转换的点 z 方向平移 0.7 米,查看前 2 项",
arc_line.disperse3d_with_distance(p2_t0_p3=lambda p2:p2.to_p3()+P3(z=0.7))[:2]
)# [(0.0:(1.0, 0.0, 0.7)), (0.0009998703544206852:(0.9999995001296789, 0.0009998701878188414, 0.7))]
# 函数 __str__(),将二维曲线转为字符串,在 print() 时自动调用
print(straight_line)
# 直线段,起点(0.0, 0.0),方向(1.0, 0.0),长度1.0
print(straight_line.__str__())
# 直线段,起点(0.0, 0.0),方向(1.0, 0.0),长度1.0
print(arc_line)
# 弧线段,起点(1.0, 0.0),方向(6.123233995736766e-17, 1.0),顺时针False,半径1.0,角度3.141592653589793
print(arc_line.__str__())
# 弧线段,起点(1.0, 0.0),方向(6.123233995736766e-17, 1.0),顺时针False,半径1.0,角度3.141592653589793
# -------------------------------------------------- #
# StraightLine2 表示二维有向直线段。构造方法为指定长度、方向、起点
# straight_line 表示一条有方向的直线段,长度 1 米,方向平行于 x 轴正向,起点为原点
straight_line = StraightLine2(
length=1*M, # 长度 1 米,米为标准单位,*M 可不写
direct=P2.x_direct(), # 方向平行于 x 轴正向
start_point=P2.origin() # 起点为原点
)
# 所有基类 Line2 的函数都可以使用,这里介绍 StraightLine2 特有的一些函数
# 函数 position_of(point) 求点 point 相对于该直线段的方位
# 因为直线段是有方向的,所以可以确定 point 在其左侧还是右侧
# 返回值有三种
# 1 point 在直线段的右侧
# -1 point 在直线段的左侧
# 0 point 在直线段所在直线上
# 示例如下,其中虚线表示直线段,--> 表示直线段的方向
# 符号 % & 和 $ 表示三个点
# %
# --------------&---->
# $
# 如上图,对于点 % ,在直线左侧,返回 -1
# 对于点 & 在直线上,返回 0
# 对于点 $,在直线右侧,返回 1
# 使用 straight_line 举例
print("点 (0.5, 0) 在straight_line上,所以 position_of 返回",straight_line.position_of(P2(x=0.5))) # 0
print("点 (-10, 0) 在straight_line所在直线上,所以 position_of 返回",straight_line.position_of(P2(x=-10))) # 0
print("点 (0.5, 5) 在straight_line的左侧,所以 position_of 返回",straight_line.position_of(P2(0.5, 5))) # -1
print("点 (-10, -1) 在straight_line的右侧,所以 position_of 返回",straight_line.position_of(P2(-10, -1))) # 1
# 函数 straight_line_equation() 返回这条直线段所在直线的方程
# 方程形式为 Ax + By + C = 0,返回 (A,B,C) 形式的元组
# 注意结果不唯一,不能用于比较
print("straight_line 所在直线方程系数为:",straight_line.straight_line_equation())
# (0.0, 1.0, -0.0)
# 类函数 intersecting_point(pa,va,pb,vb) 求两条直线 a 和 b 的交点
# 参数意义如下
# pa 直线 a 上的一点
# va 直线 a 方向
# pb 直线 b 上的一点
# vb 直线 b 方向
# 返回值有三个, cp ka kb
# cp 交点
# ka 交点在直线 a 上的位置,即 cp = pa + va * ka
# kb 交点在直线 b 上的位置,即 cp = pb + vb * kb
# 下面举个例子,直线 a 有 pa=(0,0) va=(1,1),直线 b 有 pb=(1,1) vb=(1.-1)
# 很明现交点是 (1,1)
print("求两条直线 a 和 b 的交点:",StraightLine2.intersecting_point(
pa = P2.origin(),
va = P2(1,1),
pb = P2(1,1),
vb = P2(1,-1)
)) # ((1.0, 1.0), 1.0, 0.0)
# 类函数 is_on_right(view_point, view_direct, viewed_point)
# 查看点 viewed_point 是不是在右边,观察点为 view_point 观测方向为 view_direct
# 返回值
# 1 在右侧
# 0 在正前方或者正后方
# -1 在左侧
# 这个函数和 position_of() 很类似
# 查看 y 轴右侧的一个点
print("查看 y 轴右侧的一个点",StraightLine2.is_on_right(
view_point=P2.origin(),
view_direct=P2.y_direct(),
viewed_point=P2(1,1)
)) # 1
# 类函数 calculate_k_b(p1,p2) 计算过两点的直线方程
# 使用 y = kx + d 表示,返回 (k,d) 元组
# 使用点 (0,1) 和点 (2,5) 举例子
print("类函数 calculate_k_b(p1,p2) 计算过两点的直线方程",
StraightLine2.calculate_k_b(
p1 = P2(0,1),
p2 = P2(2,5),
)) # (2.0, 1.0)
# -------------------------------------------------- #
# ArcLine2 表示二维有向圆弧段。构造方法为指定弧心、半径、起点弧度、弧角、方向
# 弧心 表示圆弧所在圆的圆心
# 半径 圆弧的半径
# 起点弧度 以弧心建立建立极坐标系(极点为弧心,极轴平行于 x 轴正方向),圆弧起点对应的弧度
# 弧角 圆弧角度,如 1/4 圆的圆弧角度 90 度,半圆角度 180 度。注意单位要转为弧度制
# 方向 有向圆弧段的方向,布尔值,True 为顺时针,False 为逆时针
# 下面以举例例子说明 起点弧度 的意义
# 45 度圆弧,起点分别设为 30 45 60 度,并绘图(为了显示区分,半径设为不同值)
DEG = 1/180*math.pi # 简化弧度制计算
ORI = P2.origin()
arc45_start30 = ArcLine2(starting_phi=30*DEG,center=ORI,radius=10,total_phi=45*DEG,clockwise=False)
arc45_start45 = ArcLine2(starting_phi=45*DEG,center=ORI,radius=11,total_phi=45*DEG,clockwise=False)
arc45_start60 = ArcLine2(starting_phi=60*DEG,center=ORI,radius=12,total_phi=45*DEG,clockwise=False)
# 去除注释显示绘图效果
# plt.plot(*P2.extract(arc45_start30.disperse2d()))
# plt.plot(*P2.extract(arc45_start45.disperse2d()))
# plt.plot(*P2.extract(arc45_start60.disperse2d()))
# plt.axis("equal") # 让 x 轴和 y 轴坐标比例相同
# plt.show()
# 下面以举例例子说明 方向 的意义
# 建立两个方向相反的圆弧
arc45_clockwise = ArcLine2(starting_phi=30*DEG,center=ORI,radius=10,total_phi=45*DEG,clockwise=True)
arc45_anticlockwise = ArcLine2(starting_phi=30*DEG,center=ORI,radius=11,total_phi=45*DEG,clockwise=False)
# 去除注释显示绘图效果
# plt.plot(*P2.extract(arc45_clockwise.disperse2d()))
# plt.plot(*P2.extract(arc45_anticlockwise.disperse2d()))
# plt.axis("equal") # 让 x 轴和 y 轴坐标比例相同
# plt.show()
# ArcLine2 提供另外一种创建圆弧的类方法 ArcLine2.create(),这个方法特别适合束线设计
# 需要的参数如下
# 起点 圆弧的起点(因为 ArcLine2 表示二维有向圆弧段,所以存在起点)
# 起点切向 圆弧的起点处切向向量(因为 ArcLine2 表示二维有向圆弧段,所以存在起点)
# 半径 圆弧半径
# 方向 布尔值,顺时针还是逆时针。True 为顺时针,False 为逆时针
# 弧角 圆弧角度,如 1/4 圆的圆弧角度 90 度,半圆角度 180 度。注意单位是角度制,和构造方法不同
# 举个例子,需要在 (0,100) 位置和 y 方向作为圆弧的起始,半径 10 弧度 90 度的顺时针圆弧
arc90 = ArcLine2.create(
start_point=P2(0,100),
start_direct=P2.y_direct(),
radius=10,
clockwise=True,
total_deg=90
)
# 去除注释显示绘图效果
# plt.plot(*P2.extract(arc90.disperse2d()))
# plt.axis("equal") # 让 x 轴和 y 轴坐标比例相同
# plt.show()
# 类函数 unit_circle(phi) 获取极坐标(r=1.0,phi=phi)的点的直角坐标(x,y)
# 例如 45 度
print("unit_circle(45) =",ArcLine2.unit_circle(45*DEG))
# (0.7071067811865476, 0.7071067811865476)
# 函数 __str__() 和 __repr__() 将圆弧转为字符串,在调用 print() 时自动执行
# 下面三条语句均打印:
print(arc90)
print(arc90.__str__())
print(arc90.__repr__())
# 弧线段[起点(0.0, 100.0),方向(6.123233995736766e-17, 1.0),顺时针,半径10,角度1.5707963267948966] |
from django import forms
from Apis.canino.models import Perro
class PerroForm(forms.ModelForm):
# TODO: Define other fields here
class Meta:
model = Perro
fields = [
'nombre',
'sexo',
'raza',
'edad',
'rescate',
'adoptante',
'vacuna',
]
labels = {
'nombre':'Nombre',
'sexo':'Sexo',
'raza':'Raza',
'edad':'Edad',
'rescate':'fecha de rescate o ingreso',
'adoptante': 'Adoptante',
'vacuna':'Vacunas',
}
widgets = {
'nombre':forms.TextInput(attrs={'class':'form-control'}),
'sexo':forms.Select(attrs={'class': 'form-control'}),
'raza':forms.Select(attrs={'class':'form-control'}),
'edad':forms.TextInput(attrs={'class':'form-control'}),
'rescate':forms.TextInput(attrs={'class':'form-control'}),
'adoptante':forms.Select(attrs={'class':'form-control'}),
'vacuna':forms.CheckboxSelectMultiple(),
}
|
# coding=utf-8
import sys
import draw
import re
import message
initFlag = 0
def parser_init(line):
texts = re.split("\.|\。", line)
if initFlag == 0:
init(texts)
else:
newObj(texts)
return 0
def cleanList(list):
while '' in list:
list.remove('')
return list
def init(texts):
global initFlag
for text in texts:
if message.command(text) % 10 == 4:
background = int(re.sub("\D", "", text))
if message.command(text) % 10 == 3:
temp = re.sub("\D", "|", text).split("|")
temp = cleanList(temp)
draw.initPic(sys.argv[1].split(".")[0], "2", temp[0], temp[1])
draw.bg(background)
initFlag = 1
def newObj(texts):
# print()
obj = {}
for text in texts:
if text == "\n" or text == "":
break
if text.find('#') != -1:
return 0
if message.objName(text) != -1:
obj['name'] = message.objName(text) % 5
else:
obj[str(message.command(text) % 10)] = re.sub("\D", "|", text)
if obj['name'] == 0:
temp = obj['7'].split("|")
temp = cleanList(temp)
start = [int(temp[0]), int(temp[1])]
end = [int(temp[2]), int(temp[3])]
color = int(re.sub("\|", "", obj.get("2", "-1")))
draw.drawLine(start, end, color)
if obj['name'] == 1:
temp = obj['7'].split("|")
temp = cleanList(temp)
center = [int(temp[0]), int(temp[1])]
width = int(re.sub("\|", " ", obj['0']))
height = int(re.sub("\|", "", obj['1']))
color = int(re.sub("\|", "", obj['2']))
borderColor = int(re.sub("\|", "", obj.get("8", "-1")))
draw.drawRectangle(center, width, height, color, borderColor)
if obj['name'] == 2:
temp = obj['7'].split("|")
temp = cleanList(temp)
center = [int(temp[0]), int(temp[1])]
width = int(re.sub("\|", "", obj['0']))
height = int(re.sub("\|", "", obj['0']))
color = int(re.sub("\|", "", obj.get("2", "-1")))
borderColor = int(re.sub("\|", "", obj.get("8", "-1")))
draw.drawRectangle(center, width, height, color, borderColor)
if obj['name'] == 3:
temp = obj['7'].split("|")
temp = cleanList(temp)
center = [int(temp[0]), int(temp[1])]
color = int(re.sub("\|", "", obj.get("2", "-1")))
borderColor = int(re.sub("\|", "", obj.get("8", "-1")))
radius = int(re.sub("\|", "", obj['6']))
draw.drawCricle(center, radius, 360, color, borderColor)
if obj['name'] == 4:
color = int(re.sub("\|", "", obj.get("2", "-1")))
borderColor = int(re.sub("\|", "", obj.get("8", "-1")))
order = obj['9'].split("|")
orderList = []
order = cleanList(order)
for i in range(0, int(len(order)/2)):
orderList.append([int(order[i*2]), int(order[(i*2+1)])])
draw.drawPolygon(orderList, borderColor, color)
return 0
inputFile = open(sys.argv[1], "r")
try:
for line in inputFile:
parser_init(line)
finally:
inputFile.close()
draw.saveFile()
draw.export()
|
import os
from pyglet.gl import *
from data import scanDirectory, loadImages
from game_map import Map
from render import Render
from creatures import Spawn
def init():
window = pyglet.window.Window(width=800,height=600, resizable=True, visible=False)
window.clear()
window.resize = resize
window.set_visible(True)
window.resize(window.width, window.height)
current_dir = os.path.abspath(os.path.dirname(__file__))
load_files_from_dir = 'data'
data_dir = os.path.normpath(os.path.join(current_dir, '..', load_files_from_dir))
game_data = {}
scanDirectory(data_dir, game_data)
loadImages(game_data['data']['agents']['Monster01']['animations'])
loadImages(game_data['data']['map']['elements']['House01'])
game_data["window"] = window
state = GameState(game_data)
game_data["game"] = state
map = Map(32, 32, game_data, 32)
spawner = Spawn(game_data)
map.populate()
render = Render(game_data)
print game_data
def resize(widthWindow, heightWindow):
"""Initial settings for the OpenGL state machine, clear color, window size, etc"""
glEnable(GL_BLEND)
glEnable(GL_POINT_SMOOTH)
glShadeModel(GL_SMOOTH)# Enables Smooth Shading
glBlendFunc(GL_SRC_ALPHA,GL_ONE)#Type Of Blending To Perform
glHint(GL_PERSPECTIVE_CORRECTION_HINT,GL_NICEST);#Really Nice Perspective Calculations
glHint(GL_POINT_SMOOTH_HINT,GL_NICEST);#Really Nice Point Smoothing
glDisable(GL_DEPTH_TEST)
'''
All objects the create/receive events register with the game state
'''
class GameState(pyglet.event.EventDispatcher):
def __init__(self, game_data):
self.time = 0
self.window = game_data["window"]
self.window.push_handlers(self)
pyglet.clock.schedule_interval(self.update, 1.0/60)
def add_handler(self, handler):
'''Add an event handler to the gamestate.
event handlers are objects which accept and generate events'''
if handler != None:
self.push_handlers(handler)
else:
pass
def update(self, dt):
self.dispatch_event('on_update', dt)
# Relay many window events to our child handlers
def on_draw(self, *args):
self.window.clear()
self.relay_event('on_draw', *args)
def on_key_press(self, *args):
self.relay_event('on_key_press', *args)
def on_key_release(self, *args):
self.relay_event('on_key_release', *args)
def on_mouse_motion(self, *args):
self.relay_event('on_mouse_motion', *args)
def on_mouse_press(self, *args):
self.relay_event('on_mouse_press', *args)
def spawn(self, class_name, tile):
self.dispatch_event('on_spawn', class_name, tile)
def relay_event(self, event_type, *args):
'''Similar to dispatch_event, only it does not call the event
handler on the dispatcher itself, which is not deisired for relaying.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
'''
assert event_type in self.event_types
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
handler = frame.get(event_type, None)
if handler:
try:
if handler(*args):
return
except TypeError:
self._raise_dispatch_exception(event_type, args, handler)
GameState.register_event_type('on_key_press')
GameState.register_event_type('on_key_release')
GameState.register_event_type('on_mouse_motion')
GameState.register_event_type('on_mouse_press')
GameState.register_event_type('on_draw')
GameState.register_event_type('on_update')
#Custom events live here
GameState.register_event_type('on_spawn')
|
'''
시간초과 나면 큐로 풀려고 했으나, list 만 사용해도 해결됨...
원리는 너비탐색이고, 처음 배추밭을 0,0부터 시작해서 1을 만나는 지점에서 너비탐색을 하게 될때마다 카운트를 하면
필요한 배추벌레 마리수를 구할 수 있다.
너비탐색을 실시할때는 거쳐간 좌표는 1 -> 0으로 값을 바꿔주면서 해당 좌표를 큐에 집어넣고
큐에서 좌표를 뽑아와서는 동서남북 칸에 1이 있는지, 그 칸이 배추 밭 안의 칸인지를 동시 조건으로 체크하고
그 칸이 조건에 맞으면 다시 그 칸의 값을 1 -> 0으로 바꿔주고 큐에 넣어주는 형태로 실시해준다.
'''
import sys
from queue import Queue
r = sys.stdin.readline
moveX = [-1, 0, 1, 0] #상하좌우 탐색을 위해서 x, y로 이동가능한 좌표를 리스트에 저장해 놓앗다
moveY = [0, -1, 0, 1]
def check(x, y, wid, hei) : #해당 칸이 배추밭 이내의 칸인지 범위를 벗어나는지 탐색해준다.
if x < wid and y < hei and x >= 0 and y >= 0 :
return 1
else :
return 0
def bfs(cx, cy, wid, hei) :
#q = Queue()
#q.put([cx, cy])
q = []
q.append([cx, cy])
while len(q) > 0 :
x, y = q[0]
del q[0]
for i in range(4) :
nx = x + moveX[i]
ny = y + moveY[i]
if check(nx, ny, wid, hei) == 1 and land[nx][ny] == 1:
land[nx][ny] = 0
q.append([nx, ny])
#q.put([nx, ny])
T = int(r())
result = []
for i in range(T) :
cnt = 0
temp = list(map(int, r().split()))
land = [[0 for _ in range(temp[0])] for _ in range(temp[1])]
for j in range(temp[2]) :
xy = list(map(int, r().split()))
land[xy[1]][xy[0]] = 1
for j in range(temp[1]) :
for k in range(temp[0]) :
if land[j][k] == 1 :
land[j][k] = 0
bfs(j, k, temp[1], temp[0])
cnt += 1
result.append(cnt)
for i in result :
print(i) |
import matplotlib.pyplot as plt
from dataframe import tables, roomColumn
# Initialise a dictionary to reference the respective tables from the string values of the room numbers
rooms = {string : obj for string, obj in zip(roomColumn, tables)}
# We'll be using a class object to generate different graphs based on the room selected and the type of graph selected
class Graph():
# Object variables: room number, graph type, plot
def __init__(self, room: str, type: str):
self.room = room
self.type = type
self.title = room[:2] + '.' + room[2:]
def __str__(self):
return self.title
# Method to plot a line graph on matplotlib
def makeLine(self):
fig, ax = plt.subplots()
graph = rooms[self.room]
ax.plot(graph['Temperature'], graph['Humidity'], label=self.title)
ax.set_xlabel('Temperature')
ax.set_ylabel('Humidity')
ax.set_title(self.title)
ax.legend()
# Method to plot a bar graph on matplotlib
def makeHist(self):
fig, ax = plt.subplots()
graph = rooms[self.room]
ax.bar(graph['Temperature'], graph['Humidity'], label=self.title)
ax.set_xlabel('Temperature')
ax.set_ylabel('Humidity')
ax.set_title(self.title)
# Display the plotted graph
def show(self):
plt.show()
# Display plotted graph based on the type of graph in the object variable
def getPlot(self):
if self.type.lower() == 'line':
self.makeLine()
elif self.type.lower() == 'bar':
self.makeHist()
self.show()
|
import random
def power(a, b): # To avoid overflow error, we calculate the 'slow way'
z = 1
for i in range(int(b)):
z *= a
return z
def euclid(a, b): # Implements Euclid's algorithm for GCD
while a != 0:
temp = a
a = b % a
b = temp
return b
def findPeriod(g, N): # Finds the period of f(x)= g^x % N
i = 0
repeat = (g**(i)) % N # The number we are looking for
r = -1
z = g**i # the current number we are checking, ie. g^x
while r != repeat: # until we find the number, keep increasing x
i += 1
z *= g # increments the exponent: z = g^i --> z = g^(i+1)
r = z % N
return i
def shor(N):
p = -1 # initializes our variables
g = 2
while p % 2 != 0:
g = random.randint(3, N - 2) # guess a random number
# the range limits the factors to not be 1 or N
print("Our guess: " + str(g))
if euclid(g, N) == 1: # if we didn't guess a factor improve our guess
print("Finding period")
p = findPeriod(g, N)
print("Period: " + str(p))
else:
print("we guessed a factor")
break
if p % 2 != 0:
print("Odd period: we have to start over :(\n")
if p == -1: # We guessed a factor so p was never updated
a = float(euclid(g, N)) # We float them just so they are consistent
b = float(N / a)
else:
num = power(g, p/2)
a = euclid(N, num + 1)
b = euclid(N, num - 1)
return a, b
factorMe = int(input("Enter a number to factor: "))
a, b = 1, 1
while a == 1 or b == 1 or a == factorMe or b == factorMe:
a, b = shor(factorMe)
print("THE FACTORS")
print("A: " + str(a))
print("B: " + str(b))
|
import os
import csv
with open('sg_name.csv', 'r') as f:
reader = csv.reader(f)
sg_name = list(reader)
# security_groups = ['sg-0676add0ee71cacbb','sg-0f947c509fae16c84','sg-01471a8e2ed4b5c9c',
# 'sg-00a5c7ab4dcc52eef','sg-0526a8f2243d196ab','sg-9d7a80ef']
for sg in sg_name:
os.system("aws ec2 create-security-group --group-name {} --description \"My security group\" > created_sg.json".format(sg))
|
def dfs(graph, start, end):
visited, need_visit = list(), list()
need_visit.append(start)
count = 0
while need_visit:
node = need_visit.pop()
if node not in visited:
visited.append(node)
if node == end:
return count
need_visit.extend(graph[node])
count +=1
return count
def make_graph(words):
word_dict = {}
for src_word in words:
word_list = words.copy()
word_list.remove(src_word)
word_dict[src_word] = []
for dst_word in word_list:
count = 0
for i in range(len(dst_word)):
if src_word[i] != dst_word[i]:
count +=1
if count == 1:
word_dict[src_word] += [dst_word]
return word_dict
def solution(begin, target, words):
if not target in words:
return 0
all_word = words + [begin]
graph = make_graph(all_word)
answer = dfs(graph, begin, target)
return answer |
#!/usr/bin/env python3
import sys
import math
def parseLine(steps):
places=[(0,0)]
x= 0
y= 0
for step in steps:
#print(step)
dirn=step[0]
dist=int(step[1:])
for n in range(dist):
if dirn == "U":
y+=1
elif dirn == "D":
y-=1
elif dirn == "R":
x+=1
elif dirn == "L":
x-=1
else:
print("Unknown dirn ",dirn)
sys.exit()
places.append((x,y))
return places
try:
fp= open("rgcinput.txt","r")
w1= fp.readline()
w2= fp.readline()
w1pos= parseLine(w1.split(","))
w2pos= parseLine(w2.split(","))
#print(w1pos)
#print(w2pos)
bestx= 0
besty= 0
bestdist= 0
#slow but quick to code
w1dist= -1
bestwiredist= 0
for w in w1pos:
w1dist+=1
try:
w2dist= w2pos.index(w)
(wx,wy)= w
print ("Join at ",(wx,wy),(w1dist,w2dist))
if wx == 0 and wy == 0:
continue
dist= (wx*wx) + (wy*wy)
if bestdist == 0 or dist < bestdist:
bestdist= dist
bestx= wx
besty= wy
if bestwiredist == 0 or (w1dist+w2dist) < bestwiredist:
bestwiredist= w1dist+w2dist
print ("Current best",bestwiredist)
except:
#print ("Could not find",w)
continue
print("Part 1",math.sqrt(bestx*bestx+besty*besty))
print("Part 2",bestwiredist+1)
finally:
fp.close()
|
# input the start and end index
start = int(input("Enter the start index: "))
end = int(input("Enter the end index: "))
sum1 = 0
# loop to find the sum of even numbers from start to end
for i in range(start, end+1, 2):
sum1 += i
print("Sum of even numbers:", sum1) |
from datetime import datetime, timedelta
from dateutil.parser import parse
from olanalytics import search
def test_closest():
assert search.closest([0.4], [0, 1, 3]) == [
(0, 0)
]
assert search.closest([1, 4, 6, 7], [0, 1, 3]) == [
(1, 1), (2, 3), (2, 3), (2, 3)
]
assert search.closest([-2, -1, 0, 1, 2], [0, 1, 3]) == [
(0, 0), (0, 0), (0, 0), (1, 1), (2, 3)
]
assert search.closest(
[-3, 0, 3, 6, 9], [2, 4], scope=2, strict=False
) == [
(None, None), (0, 2), (1, 4), (1, 4), (None, None)
]
assert search.closest(
[-3, 0, 3, 6, 9], [2, 4, 6], scope=2, strict=True
) == [
(None, None), (None, None), (1, 4), (2, 6), (None, None)
]
elements = [
parse('2017-04-10T00:00:00+02:00'),
parse('2017-04-10T00:10:00+02:00'),
parse('2017-04-10T00:20:00+02:00'),
parse('2017-04-10T00:30:00+02:00'),
parse('2017-04-10T00:40:00+02:00'),
parse('2017-04-10T00:50:00+02:00'),
parse('2017-04-10T01:00:00+02:00'),
]
values = [
parse('2017-04-10T00:00:00+02:00'),
parse('2017-04-10T00:40:00+02:00'),
parse('2017-04-10T00:50:00+02:00'),
]
assert search.closest(values, elements) == [
(0, parse('2017-04-10T00:00:00+02:00')),
(4, parse('2017-04-10T00:40:00+02:00')),
(5, parse('2017-04-10T00:50:00+02:00')),
]
elements = [
datetime(2017, 4, 10) + timedelta(seconds=10*60*i) for i in range(20)
]
assert search.closest(elements, elements) == list(enumerate(elements))
def test_previous():
assert search.previous([1, 5], [0, 2, 4], scope=1, strict=False) == [
(0, 0), (2, 4)
]
assert search.previous([1, 5], [0, 2, 4], scope=1) == [
(None, None), (None, None)
]
assert search.previous([1, 5], [2, 4, 6], scope=2) == [
(None, None), (1, 4)
]
assert search.previous([1, 5, 9], [0, 2], scope=2) == [
(0, 0), (None, None), (None, None)
]
assert search.previous(
[1, 2, 3, 4, 5], [0, 2, 4, 6], scope=2, strict=False
) == [
(0, 0), (1, 2), (1, 2), (2, 4), (2, 4)
]
assert search.previous([1, 2, 3, 4], [2.5]) == [
(None, None), (None, None), (0, 2.5), (0, 2.5)
]
|
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'idkey:STRING, '
'fecha:STRING, '
'OBLIGACION:STRING, '
'CEDULA:STRING, '
'CLIENTE:STRING, '
'PRODUCTO:STRING, '
'SUBESTADO_MOROSIDAD:STRING, '
'VALOR_DESEMBOLSO:STRING, '
'VALOR_CUOTA:STRING, '
'FECHA_DESEMBOLSO:STRING, '
'DIAS_MORA:STRING, '
'FECHA_PROXIMO_PAGO:STRING, '
'FECHA_ULTIMO_PAGO:STRING, '
'SALDO_INTERES_CORRIENTE:STRING, '
'VALOR_MORA:STRING, '
'VALOR_SALDO_CAPITAL:STRING, '
'NOM_CIUDAD:STRING, '
'DIRECCION_CORRESPONDENCIA:STRING, '
'EMAIL:STRING, '
'CUOTAS_PAGADAS:STRING, '
'CUOTAS_SIN_PAGAR:STRING, '
'CUOTAS_EN_MORA:STRING, '
'DIA_CORTE:STRING, '
'USUARIO_GESTIONM:STRING, '
'TIPO_CONTACTOM:STRING, '
'GESTION_CONM:STRING, '
'INFORMACION_CONTACTOM:STRING, '
'DOCUMENTO_CODEUDORM:STRING, '
'NOMBRE_CODEUDORM:STRING, '
'CELULAR_CODEUDORM:STRING, '
'TELEFONO_CODEUDORM:STRING, '
'CORREO_CODEUDOR:STRING, '
'TIPO_CLIENTE:STRING, '
'TELEFONO:STRING, '
'CELULAR:STRING, '
'PROFESION:STRING, '
'EDAD:STRING, '
'RESULTADO_GESTIONM:STRING, '
'FECHA_GESTIONM:STRING, '
'VALOR_ULTIMO_PAGO:STRING, '
'FECHA_ULTIMOACUERDOM:STRING, '
'ACUERDOPAGOM:STRING, '
'PLACA:STRING, '
'REFERENCIA:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split('|')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha' : self.mifecha,
'OBLIGACION' : arrayCSV[0],
'CEDULA' : arrayCSV[1],
'CLIENTE' : arrayCSV[2],
'PRODUCTO' : arrayCSV[3],
'SUBESTADO_MOROSIDAD' : arrayCSV[4],
'VALOR_DESEMBOLSO' : arrayCSV[5],
'VALOR_CUOTA' : arrayCSV[6],
'FECHA_DESEMBOLSO' : arrayCSV[7],
'DIAS_MORA' : arrayCSV[8],
'FECHA_PROXIMO_PAGO' : arrayCSV[9],
'FECHA_ULTIMO_PAGO' : arrayCSV[10],
'SALDO_INTERES_CORRIENTE' : arrayCSV[11],
'VALOR_MORA' : arrayCSV[12],
'VALOR_SALDO_CAPITAL' : arrayCSV[13],
'NOM_CIUDAD' : arrayCSV[14],
'DIRECCION_CORRESPONDENCIA' : arrayCSV[15],
'EMAIL' : arrayCSV[16],
'CUOTAS_PAGADAS' : arrayCSV[17],
'CUOTAS_SIN_PAGAR' : arrayCSV[18],
'CUOTAS_EN_MORA' : arrayCSV[19],
'DIA_CORTE' : arrayCSV[20],
'USUARIO_GESTIONM' : arrayCSV[21],
'TIPO_CONTACTOM' : arrayCSV[22],
'GESTION_CONM' : arrayCSV[23],
'INFORMACION_CONTACTOM' : arrayCSV[24],
'DOCUMENTO_CODEUDORM' : arrayCSV[25],
'NOMBRE_CODEUDORM' : arrayCSV[26],
'CELULAR_CODEUDORM' : arrayCSV[27],
'TELEFONO_CODEUDORM' : arrayCSV[28],
'CORREO_CODEUDOR' : arrayCSV[29],
'TIPO_CLIENTE' : arrayCSV[30],
'TELEFONO' : arrayCSV[31],
'CELULAR' : arrayCSV[32],
'PROFESION' : arrayCSV[33],
'EDAD' : arrayCSV[34],
'RESULTADO_GESTIONM' : arrayCSV[35],
'FECHA_GESTIONM' : arrayCSV[36],
'VALOR_ULTIMO_PAGO' : arrayCSV[37],
'FECHA_ULTIMOACUERDOM' : arrayCSV[38],
'ACUERDOPAGOM' : arrayCSV[39],
'PLACA' : arrayCSV[40],
'REFERENCIA' : arrayCSV[41]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-crediorbe" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery crediorbe' >> beam.io.WriteToBigQuery(
gcs_project + ":crediorbe.asignacion",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD")
|
a = "Python is Fun!"
print(a[0]) #P
print(a[-1]) #! -> 마이너스 인덱스는 뒤부터 시작
print(a[0 : 6]) #Python -> 0번 인덱스부터 5번까지의 문자열, 자바의 substring과 비슷
print(a[-4 : -1]) #Fun
print(a[:6]) #Python -> 번호를 생략하면 시작부터를 말한다
print(a[-4:]) #Fun! -> 맨 뒤까지의 문자열
print(a[:]) #Python is Fun! -> 둘 다 생략하면 처음부터 끝까지 나타냄
#문자열의 길이 반환
print(len(a)) #14
#자바에서는 a.length()라고 했었다
print("=" * 30)
print(a.find("n")) #5 -> python의 n이 나온다
print(a.rfind("n")) #12 -> fun의 n이 나온다
print(a.find("Java")) #-1
#print(a.index("Java")) #예외를 일이킨다
print("=" * 30)
print(a.count("n")) #2
b = ""
print(b.isascii()) #True
print("=" * 30)
str_list = ["ⅷ", "¾", "2³", "3", "3.14"]
for str in str_list:
print(str, ".isnumeric(): ", str. isnumeric())
print(str, ".isdigit(): ", str.isdigit())
print(str, ".isdecimal(): ", str.isdecimal())
print("-" * 5)
"""
ⅷ .isnumeric(): True
ⅷ .isdigit(): False
ⅷ .isdecimal(): False
-----
¾ .isnumeric(): True
¾ .isdigit(): False
¾ .isdecimal(): False
-----
2³ .isnumeric(): True
2³ .isdigit(): True
2³ .isdecimal(): False
-----
3 .isnumeric(): True
3 .isdigit(): True
3 .isdecimal(): True
-----
3.14 .isnumeric(): False
3.14 .isdigit(): False
3.14 .isdecimal(): False
-----
"""
print("=" * 30)
print(a.replace("Fun", "interesting")) #Python is interesting!
#replace라는 결과로 a라는 문자열이 바뀐 것이 아니라 새로운 문자열이 반환된 것이다
print("=" * 30)
print(a[:6].rjust(10)) # Python -> 10개의 문자를 받는데 우측 정렬이 된 것이다
print(a[:6].rjust(3)) #Python -> 정렬이 되지는 않고 문자만 출력됨
print(a.split()) #['Python', 'is', 'Fun!'] -> 공백을 기준으로 잘라 리스트 형태로 나타냄
print(a.split("n")) #['Pytho', ' is Fu', '!'] -> 부분자는 포함되지 않는다
#print(a.split("")) #에러 발생, 일일히 1글자씩 쪼갤 때는 안된다
#문자열을 한 글자씩 자르려면?
print([str for str in a])
#['P', 'y', 't', 'h', 'o', 'n', ' ', 'i', 's', ' ', 'F', 'u', 'n', '!']
#리스트에 구분 문자열로 추가해 문자열 반환
print("*".join(["Python", "is", "Fun!"])) #Python*is*Fun!
|
from datetime import datetime
from collections import namedtuple
from dateutil import parser
import sys
import re
import argparse
argparser = argparse.ArgumentParser(description='Create Context list from Taskpaper file.')
argparser.add_argument('file', help='The filename for the Taskpaper file')
argparser.add_argument('-c', '--context', help='limit output to a specific context')
args = argparser.parse_args()
tpfile = args.file
with open(tpfile, encoding='utf-8') as f:
tplines = f.readlines()
Flagged = namedtuple('Flagged', ['type', 'taskdate', 'project', 'task'])
flaglist = []
contextlist = set()
errlist = []
done_count = 0
na_count = 0
total_count = 0
project = ''
for line in tplines:
try:
if ':\n' in line:
project = line.strip()[:-1]
elif (('@done' in line) or ('cancelled' in line)):
done_count = done_count + 1
elif ('' == line.strip()) or ('☐' not in line):
continue
else:
na_count = na_count + 1
subject = context = created = ''
found = False
match = re.search(r'☐(?P<subject>[^@]+)@(?P<context>.*)\((?P<date>.*)\)', line)
if match:
found = True
subject = match.group('subject').strip()
context = match.group('context')
created = match.group('date')
if not found:
match = re.search(r'☐(?P<subject>[^@]+)@(?P<context>.*)', line)
if match:
found = True
subject = match.group('subject').strip()
context = match.group('context')
if not found:
match = re.search(r'☐(?P<subject>.*)', line)
if match:
subject = match.group('subject').strip()
if created:
taskdate = datetime.date(parser.parse(created))
else:
taskdate = ''
flaglist.append(
Flagged(context, taskdate, project, subject))
contextlist.add(context)
except Exception as e:
errlist.append((line, e))
total_count = na_count + done_count
today_date = datetime.date(datetime.now())
print('{0} of {1} tasks done, {2} tasks open'.format(done_count, total_count, na_count))
if args.context:
print('\n%s\n' % (args.context.upper()))
found = False
for task in flaglist:
if ((task.type.lower() == args.context.lower()) and (task.project != 'Archive')):
found = True
open_since = ''
if task.taskdate:
open_since = (today_date - task.taskdate).days
print('\t[%s] %s open since %s days' % (task.project, task.task, open_since))
if not found:
print('\t (none)')
else:
for context in sorted(contextlist):
print('\n%s\n' % (context.upper()))
found = False
for task in flaglist:
if ((task.type == context) and (task.project != 'Archive')):
found = True
open_since = ''
if task.taskdate:
open_since = (today_date - task.taskdate).days
print('\t[%s] %s open since %s days' % (task.project, task.task, open_since))
if not found:
print('\t (none)')
if len(errlist) != 0:
print('\nERROR PARSING THESE LINES\n')
for errtask in errlist:
print('\t%s' % str(errtask))
|
# How many ways to make 2 pounds (200 cents) out of combo of (any number of) 8 types of coin?
# ====================================================================================
# I do not get this one. There is something fairly deep going on here...
# We use the standard dynamic programming algorithm to solve the subset sum problem over integers.
# The order of the coin values does not matter, but the values need to be unique.
def compute():
TOTAL = 200
# At the start of each loop iteration, ways[i] is the number of ways to use {any copies
# of the all the coin values seen before this iteration} to form an unordered sum of i
ways = [1] + [0] * TOTAL
for coin in [1, 2, 5, 10, 20, 50, 100, 200]:
for i in range(len(ways) - coin):
ways[i + coin] += ways[i]
return str(ways[-1])
if __name__ == "__main__":
print(compute())
# lista = [1, 2]
alist = [1, 2, 3]
print(alist[-1])
|
# Copyright 2004-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['apply_permissions', 'apply_recursive_permissions',
'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand',
'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
from copy import deepcopy
import errno
import io
try:
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
import logging
import re
import shlex
import stat
import string
import sys
import traceback
import glob
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'pickle',
'portage.dep:Atom',
'portage.util.listdir:_ignorecvs_dirs',
'subprocess',
)
from portage import os
from portage import _encodings
from portage import _os_merge
from portage import _unicode_encode
from portage import _unicode_decode
from portage.exception import InvalidAtom, PortageException, FileNotFound, \
OperationNotPermitted, ParseError, PermissionDenied, ReadOnlyFileSystem
from portage.localization import _
from portage.proxy.objectproxy import ObjectProxy
from portage.cache.mappings import UserDict
noiselimit = 0
def initialize_logger(level=logging.WARN):
"""Sets up basic logging of portage activities
Args:
level: the level to emit messages at ('info', 'debug', 'warning' ...)
Returns:
None
"""
logging.basicConfig(level=logging.WARN, format='[%(levelname)-4s] %(message)s')
def writemsg(mystr,noiselevel=0,fd=None):
"""Prints out warning and debug messages based on the noiselimit setting"""
global noiselimit
if fd is None:
fd = sys.stderr
if noiselevel <= noiselimit:
# avoid potential UnicodeEncodeError
if isinstance(fd, io.StringIO):
mystr = _unicode_decode(mystr,
encoding=_encodings['content'], errors='replace')
else:
mystr = _unicode_encode(mystr,
encoding=_encodings['stdio'], errors='backslashreplace')
if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr):
fd = fd.buffer
fd.write(mystr)
fd.flush()
def writemsg_stdout(mystr,noiselevel=0):
"""Prints messages stdout based on the noiselimit setting"""
writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
def writemsg_level(msg, level=0, noiselevel=0):
"""
Show a message for the given level as defined by the logging module
(default is 0). When level >= logging.WARNING then the message is
sent to stderr, otherwise it is sent to stdout. The noiselevel is
passed directly to writemsg().
@type msg: str
@param msg: a message string, including newline if appropriate
@type level: int
@param level: a numeric logging level (see the logging module)
@type noiselevel: int
@param noiselevel: passed directly to writemsg
"""
if level >= logging.WARNING:
fd = sys.stderr
else:
fd = sys.stdout
writemsg(msg, noiselevel=noiselevel, fd=fd)
def normalize_path(mypath):
"""
os.path.normpath("//foo") returns "//foo" instead of "/foo"
We dislike this behavior so we create our own normpath func
to fix it.
"""
if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
path_sep = os.path.sep.encode()
else:
path_sep = os.path.sep
if mypath.startswith(path_sep):
# posixpath.normpath collapses 3 or more leading slashes to just 1.
return os.path.normpath(2*path_sep + mypath)
else:
return os.path.normpath(mypath)
def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
"""This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
begins with a #, it is ignored, as are empty lines"""
mylines=grablines(myfilename, recursive, remember_source_file=True)
newlines=[]
for x, source_file in mylines:
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
#into single spaces.
myline = x.split()
if x and x[0] != "#":
mylinetemp = []
for item in myline:
if item[:1] != "#":
mylinetemp.append(item)
else:
break
myline = mylinetemp
myline = " ".join(myline)
if not myline:
continue
if myline[0]=="#":
# Check if we have a compat-level string. BC-integration data.
# '##COMPAT==>N<==' 'some string attached to it'
mylinetest = myline.split("<==",1)
if len(mylinetest) == 2:
myline_potential = mylinetest[1]
mylinetest = mylinetest[0].split("##COMPAT==>")
if len(mylinetest) == 2:
if compat_level >= int(mylinetest[1]):
# It's a compat line, and the key matches.
newlines.append(myline_potential)
continue
else:
continue
if remember_source_file:
newlines.append((myline, source_file))
else:
newlines.append(myline)
return newlines
def map_dictlist_vals(func,myDict):
"""Performs a function on each value of each key in a dictlist.
Returns a new dictlist."""
new_dl = {}
for key in myDict:
new_dl[key] = []
new_dl[key] = [func(x) for x in myDict[key]]
return new_dl
def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
"""
Stacks an array of dict-types into one array. Optionally merging or
overwriting matching key/value pairs for the dict[key]->list.
Returns a single dict. Higher index in lists is preferenced.
Example usage:
>>> from portage.util import stack_dictlist
>>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
>>> {'a':'b','x':'y'}
>>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
>>> {'a':['b','c'] }
>>> a = {'KEYWORDS':['x86','alpha']}
>>> b = {'KEYWORDS':['-x86']}
>>> print stack_dictlist( [a,b] )
>>> { 'KEYWORDS':['x86','alpha','-x86']}
>>> print stack_dictlist( [a,b], incremental=True)
>>> { 'KEYWORDS':['alpha'] }
>>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
>>> { 'KEYWORDS':['alpha'] }
@param original_dicts a list of (dictionary objects or None)
@type list
@param incremental True or false depending on whether new keys should overwrite
keys which already exist.
@type boolean
@param incrementals A list of items that should be incremental (-foo removes foo from
the returned dict).
@type list
@param ignore_none Appears to be ignored, but probably was used long long ago.
@type boolean
"""
final_dict = {}
for mydict in original_dicts:
if mydict is None:
continue
for y in mydict:
if not y in final_dict:
final_dict[y] = []
for thing in mydict[y]:
if thing:
if incremental or y in incrementals:
if thing == "-*":
final_dict[y] = []
continue
elif thing[:1] == '-':
try:
final_dict[y].remove(thing[1:])
except ValueError:
pass
continue
if thing not in final_dict[y]:
final_dict[y].append(thing)
if y in final_dict and not final_dict[y]:
del final_dict[y]
return final_dict
def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
"""Stacks an array of dict-types into one array. Optionally merging or
overwriting matching key/value pairs for the dict[key]->string.
Returns a single dict."""
final_dict = {}
for mydict in dicts:
if not mydict:
continue
for k, v in mydict.items():
if k in final_dict and (incremental or (k in incrementals)):
final_dict[k] += " " + v
else:
final_dict[k] = v
return final_dict
def append_repo(atom_list, repo_name, remember_source_file=False):
"""
Takes a list of valid atoms without repo spec and appends ::repo_name.
"""
if remember_source_file:
return [(Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True), source) \
for atom, source in atom_list]
else:
return [Atom(atom + "::" + repo_name, allow_wildcard=True, allow_repo=True) \
for atom in atom_list]
def stack_lists(lists, incremental=1, remember_source_file=False,
warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
"""Stacks an array of list-types into one array. Optionally removing
distinct values using '-value' notation. Higher index is preferenced.
all elements must be hashable."""
matched_removals = set()
unmatched_removals = {}
new_list = {}
for sub_list in lists:
for token in sub_list:
token_key = token
if remember_source_file:
token, source_file = token
else:
source_file = False
if token is None:
continue
if incremental:
if token == "-*":
new_list.clear()
elif token[:1] == '-':
matched = False
if ignore_repo and not "::" in token:
#Let -cat/pkg remove cat/pkg::repo.
to_be_removed = []
token_slice = token[1:]
for atom in new_list:
atom_without_repo = atom
if atom.repo is not None:
# Atom.without_repo instantiates a new Atom,
# which is unnecessary here, so use string
# replacement instead.
atom_without_repo = \
atom.replace("::" + atom.repo, "", 1)
if atom_without_repo == token_slice:
to_be_removed.append(atom)
if to_be_removed:
matched = True
for atom in to_be_removed:
new_list.pop(atom)
else:
try:
new_list.pop(token[1:])
matched = True
except KeyError:
pass
if not matched:
if source_file and \
(strict_warn_for_unmatched_removal or \
token_key not in matched_removals):
unmatched_removals.setdefault(source_file, set()).add(token)
else:
matched_removals.add(token_key)
else:
new_list[token] = source_file
else:
new_list[token] = source_file
if warn_for_unmatched_removal:
for source_file, tokens in unmatched_removals.items():
if len(tokens) > 3:
selected = [tokens.pop(), tokens.pop(), tokens.pop()]
writemsg(_("--- Unmatched removal atoms in %s: %s and %s more\n") % \
(source_file, ", ".join(selected), len(tokens)),
noiselevel=-1)
else:
writemsg(_("--- Unmatched removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
noiselevel=-1)
if remember_source_file:
return list(new_list.items())
else:
return list(new_list)
def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
"""
This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
@param myfilename: file to process
@type myfilename: string (path)
@param juststrings: only return strings
@type juststrings: Boolean (integer)
@param empty: Ignore certain lines
@type empty: Boolean (integer)
@param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
@type recursive: Boolean (integer)
@param incremental: Append to the return list, don't overwrite
@type incremental: Boolean (integer)
@rtype: Dictionary
@return:
1. Returns the lines in a file in a dictionary, for example:
'sys-apps/portage x86 amd64 ppc'
would return
{ "sys-apps/portage" : [ 'x86', 'amd64', 'ppc' ]
"""
newdict={}
for x in grablines(myfilename, recursive):
#the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
#into single spaces.
if x[0] == "#":
continue
myline=x.split()
mylinetemp = []
for item in myline:
if item[:1] != "#":
mylinetemp.append(item)
else:
break
myline = mylinetemp
if len(myline) < 2 and empty == 0:
continue
if len(myline) < 1 and empty == 1:
continue
if incremental:
newdict.setdefault(myline[0], []).extend(myline[1:])
else:
newdict[myline[0]] = myline[1:]
if juststrings:
for k, v in newdict.items():
newdict[k] = " ".join(v)
return newdict
_eapi_cache = {}
def read_corresponding_eapi_file(filename, default="0"):
"""
Read the 'eapi' file from the directory 'filename' is in.
Returns "0" if the file is not present or invalid.
"""
eapi_file = os.path.join(os.path.dirname(filename), "eapi")
try:
eapi = _eapi_cache[eapi_file]
except KeyError:
pass
else:
if eapi is None:
return default
return eapi
eapi = None
try:
f = io.open(_unicode_encode(eapi_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'], errors='replace')
lines = f.readlines()
if len(lines) == 1:
eapi = lines[0].rstrip("\n")
else:
writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
noiselevel=-1)
f.close()
except IOError:
pass
_eapi_cache[eapi_file] = eapi
if eapi is None:
return default
return eapi
def grabdict_package(myfilename, juststrings=0, recursive=0, allow_wildcard=False, allow_repo=False,
verify_eapi=False, eapi=None):
""" Does the same thing as grabdict except it validates keys
with isvalidatom()"""
pkgs=grabdict(myfilename, juststrings, empty=1, recursive=recursive)
if not pkgs:
return pkgs
if verify_eapi and eapi is None:
eapi = read_corresponding_eapi_file(myfilename)
# We need to call keys() here in order to avoid the possibility of
# "RuntimeError: dictionary changed size during iteration"
# when an invalid atom is deleted.
atoms = {}
for k, v in pkgs.items():
try:
k = Atom(k, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
except InvalidAtom as e:
writemsg(_("--- Invalid atom in %s: %s\n") % (myfilename, e),
noiselevel=-1)
else:
atoms[k] = v
return atoms
def grabfile_package(myfilename, compatlevel=0, recursive=0, allow_wildcard=False, allow_repo=False,
remember_source_file=False, verify_eapi=False, eapi=None):
pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True)
if not pkgs:
return pkgs
if verify_eapi and eapi is None:
eapi = read_corresponding_eapi_file(myfilename)
mybasename = os.path.basename(myfilename)
atoms = []
for pkg, source_file in pkgs:
pkg_orig = pkg
# for packages and package.mask files
if pkg[:1] == "-":
pkg = pkg[1:]
if pkg[:1] == '*' and mybasename == 'packages':
pkg = pkg[1:]
try:
pkg = Atom(pkg, allow_wildcard=allow_wildcard, allow_repo=allow_repo, eapi=eapi)
except InvalidAtom as e:
writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e),
noiselevel=-1)
else:
if pkg_orig == str(pkg):
# normal atom, so return as Atom instance
if remember_source_file:
atoms.append((pkg, source_file))
else:
atoms.append(pkg)
else:
# atom has special prefix, so return as string
if remember_source_file:
atoms.append((pkg_orig, source_file))
else:
atoms.append(pkg_orig)
return atoms
def grablines(myfilename, recursive=0, remember_source_file=False):
mylines=[]
if recursive and os.path.isdir(myfilename):
if os.path.basename(myfilename) in _ignorecvs_dirs:
return mylines
try:
dirlist = os.listdir(myfilename)
except OSError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(myfilename)
elif e.errno in (errno.ENOENT, errno.ESTALE):
return mylines
else:
raise
dirlist.sort()
for f in dirlist:
if not f.startswith(".") and not f.endswith("~"):
mylines.extend(grablines(
os.path.join(myfilename, f), recursive, remember_source_file))
else:
try:
myfile = io.open(_unicode_encode(myfilename,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace')
if remember_source_file:
mylines = [(line, myfilename) for line in myfile.readlines()]
else:
mylines = myfile.readlines()
myfile.close()
except IOError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(myfilename)
elif e.errno in (errno.ENOENT, errno.ESTALE):
pass
else:
raise
return mylines
def writedict(mydict,myfilename,writekey=True):
"""Writes out a dict to a file; writekey=0 mode doesn't write out
the key and assumes all values are strings, not lists."""
lines = []
if not writekey:
for v in mydict.values():
lines.append(v + "\n")
else:
for k, v in mydict.items():
lines.append("%s %s\n" % (k, " ".join(v)))
write_atomic(myfilename, "".join(lines))
def shlex_split(s):
"""
This is equivalent to shlex.split, but if the current interpreter is
python2, it temporarily encodes unicode strings to bytes since python2's
shlex.split() doesn't handle unicode strings.
"""
convert_to_bytes = sys.hexversion < 0x3000000 and not isinstance(s, bytes)
if convert_to_bytes:
s = _unicode_encode(s)
rval = shlex.split(s)
if convert_to_bytes:
rval = [_unicode_decode(x) for x in rval]
return rval
class _tolerant_shlex(shlex.shlex):
def sourcehook(self, newfile):
try:
return shlex.shlex.sourcehook(self, newfile)
except EnvironmentError as e:
writemsg(_("!!! Parse error in '%s': source command failed: %s\n") % \
(self.infile, str(e)), noiselevel=-1)
return (newfile, io.StringIO())
_invalid_var_name_re = re.compile(r'^\d|\W')
def getconfig(mycfg, tolerant=0, allow_sourcing=False, expand=True):
if isinstance(expand, dict):
# Some existing variable definitions have been
# passed in, for use in substitutions.
expand_map = expand
expand = True
else:
expand_map = {}
mykeys = {}
f = None
try:
# NOTE: shlex doesn't support unicode objects with Python 2
# (produces spurious \0 characters).
if sys.hexversion < 0x3000000:
f = open(_unicode_encode(mycfg,
encoding=_encodings['fs'], errors='strict'), 'rb')
else:
f = open(_unicode_encode(mycfg,
encoding=_encodings['fs'], errors='strict'), mode='r',
encoding=_encodings['content'], errors='replace')
content = f.read()
except IOError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(mycfg)
if e.errno != errno.ENOENT:
writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
if e.errno not in (errno.EISDIR,):
raise
return None
finally:
if f is not None:
f.close()
# Workaround for avoiding a silent error in shlex that is
# triggered by a source statement at the end of the file
# without a trailing newline after the source statement.
if content and content[-1] != '\n':
content += '\n'
# Warn about dos-style line endings since that prevents
# people from being able to source them with bash.
if '\r' in content:
writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
"in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
lex = None
try:
if tolerant:
shlex_class = _tolerant_shlex
else:
shlex_class = shlex.shlex
# The default shlex.sourcehook() implementation
# only joins relative paths when the infile
# attribute is properly set.
lex = shlex_class(content, infile=mycfg, posix=True)
lex.wordchars = string.digits + string.ascii_letters + \
"~!@#$%*_\:;?,./-+{}"
lex.quotes="\"'"
if allow_sourcing:
lex.source="source"
while 1:
key=lex.get_token()
if key == "export":
key = lex.get_token()
if key is None:
#normal end of file
break;
equ=lex.get_token()
if (equ==''):
msg = lex.error_leader() + _("Unexpected EOF")
if not tolerant:
raise ParseError(msg)
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
elif (equ!='='):
msg = lex.error_leader() + \
_("Invalid token '%s' (not '=')") % (equ,)
if not tolerant:
raise ParseError(msg)
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
val=lex.get_token()
if val is None:
msg = lex.error_leader() + \
_("Unexpected end of config file: variable '%s'") % (key,)
if not tolerant:
raise ParseError(msg)
else:
writemsg("%s\n" % msg, noiselevel=-1)
return mykeys
key = _unicode_decode(key)
val = _unicode_decode(val)
if _invalid_var_name_re.search(key) is not None:
msg = lex.error_leader() + \
_("Invalid variable name '%s'") % (key,)
if not tolerant:
raise ParseError(msg)
writemsg("%s\n" % msg, noiselevel=-1)
continue
if expand:
mykeys[key] = varexpand(val, mydict=expand_map,
error_leader=lex.error_leader)
expand_map[key] = mykeys[key]
else:
mykeys[key] = val
except SystemExit as e:
raise
except Exception as e:
if isinstance(e, ParseError) or lex is None:
raise
msg = _unicode_decode("%s%s") % (lex.error_leader(), e)
writemsg("%s\n" % msg, noiselevel=-1)
raise
return mykeys
_varexpand_word_chars = frozenset(string.ascii_letters + string.digits + "_")
_varexpand_unexpected_eof_msg = "unexpected EOF while looking for matching `}'"
def varexpand(mystring, mydict=None, error_leader=None):
if mydict is None:
mydict = {}
"""
new variable expansion code. Preserves quotes, handles \n, etc.
This code is used by the configfile code, as well as others (parser)
This would be a good bunch of code to port to C.
"""
numvars=0
#in single, double quotes
insing=0
indoub=0
pos = 0
length = len(mystring)
newstring = []
while pos < length:
current = mystring[pos]
if current == "'":
if (indoub):
newstring.append("'")
else:
newstring.append("'") # Quote removal is handled by shlex.
insing=not insing
pos=pos+1
continue
elif current == '"':
if (insing):
newstring.append('"')
else:
newstring.append('"') # Quote removal is handled by shlex.
indoub=not indoub
pos=pos+1
continue
if (not insing):
#expansion time
if current == "\n":
#convert newlines to spaces
newstring.append(" ")
pos += 1
elif current == "\\":
# For backslash expansion, this function used to behave like
# echo -e, but that's not needed for our purposes. We want to
# behave like bash does when expanding a variable assignment
# in a sourced file, in which case it performs backslash
# removal for \\ and \$ but nothing more. It also removes
# escaped newline characters. Note that we don't handle
# escaped quotes here, since getconfig() uses shlex
# to handle that earlier.
if (pos+1>=len(mystring)):
newstring.append(current)
break
else:
current = mystring[pos + 1]
pos += 2
if current == "$":
newstring.append(current)
elif current == "\\":
newstring.append(current)
# BUG: This spot appears buggy, but it's intended to
# be bug-for-bug compatible with existing behavior.
if pos < length and \
mystring[pos] in ("'", '"', "$"):
newstring.append(mystring[pos])
pos += 1
elif current == "\n":
pass
else:
newstring.append(mystring[pos - 2:pos])
continue
elif current == "$":
pos=pos+1
if mystring[pos]=="{":
pos=pos+1
braced=True
else:
braced=False
myvstart=pos
while mystring[pos] in _varexpand_word_chars:
if (pos+1)>=len(mystring):
if braced:
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
pos=pos+1
break
pos=pos+1
myvarname=mystring[myvstart:pos]
if braced:
if mystring[pos]!="}":
msg = _varexpand_unexpected_eof_msg
if error_leader is not None:
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
else:
pos=pos+1
if len(myvarname)==0:
msg = "$"
if braced:
msg += "{}"
msg += ": bad substitution"
if error_leader is not None:
msg = error_leader() + msg
writemsg(msg + "\n", noiselevel=-1)
return ""
numvars=numvars+1
if myvarname in mydict:
newstring.append(mydict[myvarname])
else:
newstring.append(current)
pos += 1
else:
newstring.append(current)
pos += 1
return "".join(newstring)
# broken and removed, but can still be imported
pickle_write = None
def pickle_read(filename,default=None,debug=0):
if not os.access(filename, os.R_OK):
writemsg(_("pickle_read(): File not readable. '")+filename+"'\n",1)
return default
data = None
try:
myf = open(_unicode_encode(filename,
encoding=_encodings['fs'], errors='strict'), 'rb')
mypickle = pickle.Unpickler(myf)
data = mypickle.load()
myf.close()
del mypickle,myf
writemsg(_("pickle_read(): Loaded pickle. '")+filename+"'\n",1)
except SystemExit as e:
raise
except Exception as e:
writemsg(_("!!! Failed to load pickle: ")+str(e)+"\n",1)
data = default
return data
def dump_traceback(msg, noiselevel=1):
info = sys.exc_info()
if not info[2]:
stack = traceback.extract_stack()[:-1]
error = None
else:
stack = traceback.extract_tb(info[2])
error = str(info[1])
writemsg("\n====================================\n", noiselevel=noiselevel)
writemsg("%s\n\n" % msg, noiselevel=noiselevel)
for line in traceback.format_list(stack):
writemsg(line, noiselevel=noiselevel)
if error:
writemsg(error+"\n", noiselevel=noiselevel)
writemsg("====================================\n\n", noiselevel=noiselevel)
class cmp_sort_key(object):
"""
In python-3.0 the list.sort() method no longer has a "cmp" keyword
argument. This class acts as an adapter which converts a cmp function
into one that's suitable for use as the "key" keyword argument to
list.sort(), making it easier to port code for python-3.0 compatibility.
It works by generating key objects which use the given cmp function to
implement their __lt__ method.
"""
__slots__ = ("_cmp_func",)
def __init__(self, cmp_func):
"""
@type cmp_func: callable which takes 2 positional arguments
@param cmp_func: A cmp function.
"""
self._cmp_func = cmp_func
def __call__(self, lhs):
return self._cmp_key(self._cmp_func, lhs)
class _cmp_key(object):
__slots__ = ("_cmp_func", "_obj")
def __init__(self, cmp_func, obj):
self._cmp_func = cmp_func
self._obj = obj
def __lt__(self, other):
if other.__class__ is not self.__class__:
raise TypeError("Expected type %s, got %s" % \
(self.__class__, other.__class__))
return self._cmp_func(self._obj, other._obj) < 0
def unique_array(s):
"""lifted from python cookbook, credit: Tim Peters
Return a list of the elements in s in arbitrary order, sans duplicates"""
n = len(s)
# assume all elements are hashable, if so, it's linear
try:
return list(set(s))
except TypeError:
pass
# so much for linear. abuse sort.
try:
t = list(s)
t.sort()
except TypeError:
pass
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti += 1
i += 1
return t[:lasti]
# blah. back to original portage.unique_array
u = []
for x in s:
if x not in u:
u.append(x)
return u
def unique_everseen(iterable, key=None):
"""
List unique elements, preserving order. Remember all elements ever seen.
Taken from itertools documentation.
"""
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
stat_cached=None, follow_links=True):
"""Apply user, group, and mode bits to a file if the existing bits do not
already match. The default behavior is to force an exact match of mode
bits. When mask=0 is specified, mode bits on the target file are allowed
to be a superset of the mode argument (via logical OR). When mask>0, the
mode bits that the target file is allowed to have are restricted via
logical XOR.
Returns True if the permissions were modified and False otherwise."""
modified = False
if stat_cached is None:
try:
if follow_links:
stat_cached = os.stat(filename)
else:
stat_cached = os.lstat(filename)
except OSError as oe:
func_call = "stat('%s')" % filename
if oe.errno == errno.EPERM:
raise OperationNotPermitted(func_call)
elif oe.errno == errno.EACCES:
raise PermissionDenied(func_call)
elif oe.errno == errno.ENOENT:
raise FileNotFound(filename)
else:
raise
if (uid != -1 and uid != stat_cached.st_uid) or \
(gid != -1 and gid != stat_cached.st_gid):
try:
if follow_links:
os.chown(filename, uid, gid)
else:
portage.data.lchown(filename, uid, gid)
modified = True
except OSError as oe:
func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
if oe.errno == errno.EPERM:
raise OperationNotPermitted(func_call)
elif oe.errno == errno.EACCES:
raise PermissionDenied(func_call)
elif oe.errno == errno.EROFS:
raise ReadOnlyFileSystem(func_call)
elif oe.errno == errno.ENOENT:
raise FileNotFound(filename)
else:
raise
new_mode = -1
st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
if mask >= 0:
if mode == -1:
mode = 0 # Don't add any mode bits when mode is unspecified.
else:
mode = mode & 0o7777
if (mode & st_mode != mode) or \
((mask ^ st_mode) & st_mode != st_mode):
new_mode = mode | st_mode
new_mode = (mask ^ new_mode) & new_mode
elif mode != -1:
mode = mode & 0o7777 # protect from unwanted bits
if mode != st_mode:
new_mode = mode
# The chown system call may clear S_ISUID and S_ISGID
# bits, so those bits are restored if necessary.
if modified and new_mode == -1 and \
(st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
if mode == -1:
new_mode = st_mode
else:
mode = mode & 0o7777
if mask >= 0:
new_mode = mode | st_mode
new_mode = (mask ^ new_mode) & new_mode
else:
new_mode = mode
if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
new_mode = -1
if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
# Mode doesn't matter for symlinks.
new_mode = -1
if new_mode != -1:
try:
os.chmod(filename, new_mode)
modified = True
except OSError as oe:
func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
if oe.errno == errno.EPERM:
raise OperationNotPermitted(func_call)
elif oe.errno == errno.EACCES:
raise PermissionDenied(func_call)
elif oe.errno == errno.EROFS:
raise ReadOnlyFileSystem(func_call)
elif oe.errno == errno.ENOENT:
raise FileNotFound(filename)
raise
return modified
def apply_stat_permissions(filename, newstat, **kwargs):
"""A wrapper around apply_secpass_permissions that gets
uid, gid, and mode from a stat object"""
return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
mode=newstat.st_mode, **kwargs)
def apply_recursive_permissions(top, uid=-1, gid=-1,
dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
"""A wrapper around apply_secpass_permissions that applies permissions
recursively. If optional argument onerror is specified, it should be a
function; it will be called with one argument, a PortageException instance.
Returns True if all permissions are applied and False if some are left
unapplied."""
# Avoid issues with circular symbolic links, as in bug #339670.
follow_links = False
if onerror is None:
# Default behavior is to dump errors to stderr so they won't
# go unnoticed. Callers can pass in a quiet instance.
def onerror(e):
if isinstance(e, OperationNotPermitted):
writemsg(_("Operation Not Permitted: %s\n") % str(e),
noiselevel=-1)
elif isinstance(e, FileNotFound):
writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
else:
raise
all_applied = True
for dirpath, dirnames, filenames in os.walk(top):
try:
applied = apply_secpass_permissions(dirpath,
uid=uid, gid=gid, mode=dirmode, mask=dirmask,
follow_links=follow_links)
if not applied:
all_applied = False
except PortageException as e:
all_applied = False
onerror(e)
for name in filenames:
try:
applied = apply_secpass_permissions(os.path.join(dirpath, name),
uid=uid, gid=gid, mode=filemode, mask=filemask,
follow_links=follow_links)
if not applied:
all_applied = False
except PortageException as e:
# Ignore InvalidLocation exceptions such as FileNotFound
# and DirectoryNotFound since sometimes things disappear,
# like when adjusting permissions on DISTCC_DIR.
if not isinstance(e, portage.exception.InvalidLocation):
all_applied = False
onerror(e)
return all_applied
def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
stat_cached=None, follow_links=True):
"""A wrapper around apply_permissions that uses secpass and simple
logic to apply as much of the permissions as possible without
generating an obviously avoidable permission exception. Despite
attempts to avoid an exception, it's possible that one will be raised
anyway, so be prepared.
Returns True if all permissions are applied and False if some are left
unapplied."""
if stat_cached is None:
try:
if follow_links:
stat_cached = os.stat(filename)
else:
stat_cached = os.lstat(filename)
except OSError as oe:
func_call = "stat('%s')" % filename
if oe.errno == errno.EPERM:
raise OperationNotPermitted(func_call)
elif oe.errno == errno.EACCES:
raise PermissionDenied(func_call)
elif oe.errno == errno.ENOENT:
raise FileNotFound(filename)
else:
raise
all_applied = True
if portage.data.secpass < 2:
if uid != -1 and \
uid != stat_cached.st_uid:
all_applied = False
uid = -1
if gid != -1 and \
gid != stat_cached.st_gid and \
gid not in os.getgroups():
all_applied = False
gid = -1
apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
stat_cached=stat_cached, follow_links=follow_links)
return all_applied
class atomic_ofstream(ObjectProxy):
"""Write a file atomically via os.rename(). Atomic replacement prevents
interprocess interference and prevents corruption of the target
file when the write is interrupted (for example, when an 'out of space'
error occurs)."""
def __init__(self, filename, mode='w', follow_links=True, **kargs):
"""Opens a temporary filename.pid in the same directory as filename."""
ObjectProxy.__init__(self)
object.__setattr__(self, '_aborted', False)
if 'b' in mode:
open_func = open
else:
open_func = io.open
kargs.setdefault('encoding', _encodings['content'])
kargs.setdefault('errors', 'backslashreplace')
if follow_links:
canonical_path = os.path.realpath(filename)
object.__setattr__(self, '_real_name', canonical_path)
tmp_name = "%s.%i" % (canonical_path, os.getpid())
try:
object.__setattr__(self, '_file',
open_func(_unicode_encode(tmp_name,
encoding=_encodings['fs'], errors='strict'),
mode=mode, **kargs))
return
except IOError as e:
if canonical_path == filename:
raise
# Ignore this error, since it's irrelevant
# and the below open call will produce a
# new error if necessary.
object.__setattr__(self, '_real_name', filename)
tmp_name = "%s.%i" % (filename, os.getpid())
object.__setattr__(self, '_file',
open_func(_unicode_encode(tmp_name,
encoding=_encodings['fs'], errors='strict'),
mode=mode, **kargs))
def _get_target(self):
return object.__getattribute__(self, '_file')
if sys.hexversion >= 0x3000000:
def __getattribute__(self, attr):
if attr in ('close', 'abort', '__del__'):
return object.__getattribute__(self, attr)
return getattr(object.__getattribute__(self, '_file'), attr)
else:
# For TextIOWrapper, automatically coerce write calls to
# unicode, in order to avoid TypeError when writing raw
# bytes with python2.
def __getattribute__(self, attr):
if attr in ('close', 'abort', 'write', '__del__'):
return object.__getattribute__(self, attr)
return getattr(object.__getattribute__(self, '_file'), attr)
def write(self, s):
f = object.__getattribute__(self, '_file')
if isinstance(f, io.TextIOWrapper):
s = _unicode_decode(s)
return f.write(s)
def close(self):
"""Closes the temporary file, copies permissions (if possible),
and performs the atomic replacement via os.rename(). If the abort()
method has been called, then the temp file is closed and removed."""
f = object.__getattribute__(self, '_file')
real_name = object.__getattribute__(self, '_real_name')
if not f.closed:
try:
f.close()
if not object.__getattribute__(self, '_aborted'):
try:
apply_stat_permissions(f.name, os.stat(real_name))
except OperationNotPermitted:
pass
except FileNotFound:
pass
except OSError as oe: # from the above os.stat call
if oe.errno in (errno.ENOENT, errno.EPERM):
pass
else:
raise
os.rename(f.name, real_name)
finally:
# Make sure we cleanup the temp file
# even if an exception is raised.
try:
os.unlink(f.name)
except OSError as oe:
pass
def abort(self):
"""If an error occurs while writing the file, the user should
call this method in order to leave the target file unchanged.
This will call close() automatically."""
if not object.__getattribute__(self, '_aborted'):
object.__setattr__(self, '_aborted', True)
self.close()
def __del__(self):
"""If the user does not explicitely call close(), it is
assumed that an error has occurred, so we abort()."""
try:
f = object.__getattribute__(self, '_file')
except AttributeError:
pass
else:
if not f.closed:
self.abort()
# ensure destructor from the base class is called
base_destructor = getattr(ObjectProxy, '__del__', None)
if base_destructor is not None:
base_destructor(self)
def write_atomic(file_path, content, **kwargs):
f = None
try:
f = atomic_ofstream(file_path, **kwargs)
f.write(content)
f.close()
except (IOError, OSError) as e:
if f:
f.abort()
func_call = "write_atomic('%s')" % file_path
if e.errno == errno.EPERM:
raise OperationNotPermitted(func_call)
elif e.errno == errno.EACCES:
raise PermissionDenied(func_call)
elif e.errno == errno.EROFS:
raise ReadOnlyFileSystem(func_call)
elif e.errno == errno.ENOENT:
raise FileNotFound(file_path)
else:
raise
def ensure_dirs(dir_path, **kwargs):
"""Create a directory and call apply_permissions.
Returns True if a directory is created or the permissions needed to be
modified, and False otherwise.
This function's handling of EEXIST errors makes it useful for atomic
directory creation, in which multiple processes may be competing to
create the same directory.
"""
created_dir = False
try:
os.makedirs(dir_path)
created_dir = True
except OSError as oe:
func_call = "makedirs('%s')" % dir_path
if oe.errno in (errno.EEXIST,):
pass
else:
if os.path.isdir(dir_path):
# NOTE: DragonFly raises EPERM for makedir('/')
# and that is supposed to be ignored here.
# Also, sometimes mkdir raises EISDIR on FreeBSD
# and we want to ignore that too (bug #187518).
pass
elif oe.errno == errno.EPERM:
raise OperationNotPermitted(func_call)
elif oe.errno == errno.EACCES:
raise PermissionDenied(func_call)
elif oe.errno == errno.EROFS:
raise ReadOnlyFileSystem(func_call)
else:
raise
if kwargs:
perms_modified = apply_permissions(dir_path, **kwargs)
else:
perms_modified = False
return created_dir or perms_modified
class LazyItemsDict(UserDict):
"""A mapping object that behaves like a standard dict except that it allows
for lazy initialization of values via callable objects. Lazy items can be
overwritten and deleted just as normal items."""
__slots__ = ('lazy_items',)
def __init__(self, *args, **kwargs):
self.lazy_items = {}
UserDict.__init__(self, *args, **kwargs)
def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
"""Add a lazy item for the given key. When the item is requested,
value_callable will be called with *pargs and **kwargs arguments."""
self.lazy_items[item_key] = \
self._LazyItem(value_callable, pargs, kwargs, False)
# make it show up in self.keys(), etc...
UserDict.__setitem__(self, item_key, None)
def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
"""This is like addLazyItem except value_callable will only be called
a maximum of 1 time and the result will be cached for future requests."""
self.lazy_items[item_key] = \
self._LazyItem(value_callable, pargs, kwargs, True)
# make it show up in self.keys(), etc...
UserDict.__setitem__(self, item_key, None)
def update(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
"expected at most 1 positional argument, got " + \
repr(len(args)))
if args:
map_obj = args[0]
else:
map_obj = None
if map_obj is None:
pass
elif isinstance(map_obj, LazyItemsDict):
for k in map_obj:
if k in map_obj.lazy_items:
UserDict.__setitem__(self, k, None)
else:
UserDict.__setitem__(self, k, map_obj[k])
self.lazy_items.update(map_obj.lazy_items)
else:
UserDict.update(self, map_obj)
if kwargs:
UserDict.update(self, kwargs)
def __getitem__(self, item_key):
if item_key in self.lazy_items:
lazy_item = self.lazy_items[item_key]
pargs = lazy_item.pargs
if pargs is None:
pargs = ()
kwargs = lazy_item.kwargs
if kwargs is None:
kwargs = {}
result = lazy_item.func(*pargs, **kwargs)
if lazy_item.singleton:
self[item_key] = result
return result
else:
return UserDict.__getitem__(self, item_key)
def __setitem__(self, item_key, value):
if item_key in self.lazy_items:
del self.lazy_items[item_key]
UserDict.__setitem__(self, item_key, value)
def __delitem__(self, item_key):
if item_key in self.lazy_items:
del self.lazy_items[item_key]
UserDict.__delitem__(self, item_key)
def clear(self):
self.lazy_items.clear()
UserDict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo=None):
"""
This forces evaluation of each contained lazy item, and deepcopy of
the result. A TypeError is raised if any contained lazy item is not
a singleton, since it is not necessarily possible for the behavior
of this type of item to be safely preserved.
"""
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for k in self:
k_copy = deepcopy(k, memo)
lazy_item = self.lazy_items.get(k)
if lazy_item is not None:
if not lazy_item.singleton:
raise TypeError(_unicode_decode("LazyItemsDict " + \
"deepcopy is unsafe with lazy items that are " + \
"not singletons: key=%s value=%s") % (k, lazy_item,))
UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
return result
class _LazyItem(object):
__slots__ = ('func', 'pargs', 'kwargs', 'singleton')
def __init__(self, func, pargs, kwargs, singleton):
if not pargs:
pargs = None
if not kwargs:
kwargs = None
self.func = func
self.pargs = pargs
self.kwargs = kwargs
self.singleton = singleton
def __copy__(self):
return self.__class__(self.func, self.pargs,
self.kwargs, self.singleton)
def __deepcopy__(self, memo=None):
"""
Override this since the default implementation can fail silently,
leaving some attributes unset.
"""
if memo is None:
memo = {}
result = self.__copy__()
memo[id(self)] = result
result.func = deepcopy(self.func, memo)
result.pargs = deepcopy(self.pargs, memo)
result.kwargs = deepcopy(self.kwargs, memo)
result.singleton = deepcopy(self.singleton, memo)
return result
class ConfigProtect(object):
def __init__(self, myroot, protect_list, mask_list):
self.myroot = myroot
self.protect_list = protect_list
self.mask_list = mask_list
self.updateprotect()
def updateprotect(self):
"""Update internal state for isprotected() calls. Nonexistent paths
are ignored."""
os = _os_merge
self.protect = []
self._dirs = set()
for x in self.protect_list:
ppath = normalize_path(
os.path.join(self.myroot, x.lstrip(os.path.sep)))
try:
if stat.S_ISDIR(os.stat(ppath).st_mode):
self._dirs.add(ppath)
self.protect.append(ppath)
except OSError:
# If it doesn't exist, there's no need to protect it.
pass
self.protectmask = []
for x in self.mask_list:
ppath = normalize_path(
os.path.join(self.myroot, x.lstrip(os.path.sep)))
try:
"""Use lstat so that anything, even a broken symlink can be
protected."""
if stat.S_ISDIR(os.lstat(ppath).st_mode):
self._dirs.add(ppath)
self.protectmask.append(ppath)
"""Now use stat in case this is a symlink to a directory."""
if stat.S_ISDIR(os.stat(ppath).st_mode):
self._dirs.add(ppath)
except OSError:
# If it doesn't exist, there's no need to mask it.
pass
def isprotected(self, obj):
"""Returns True if obj is protected, False otherwise. The caller must
ensure that obj is normalized with a single leading slash. A trailing
slash is optional for directories."""
masked = 0
protected = 0
sep = os.path.sep
for ppath in self.protect:
if len(ppath) > masked and obj.startswith(ppath):
if ppath in self._dirs:
if obj != ppath and not obj.startswith(ppath + sep):
# /etc/foo does not match /etc/foobaz
continue
elif obj != ppath:
# force exact match when CONFIG_PROTECT lists a
# non-directory
continue
protected = len(ppath)
#config file management
for pmpath in self.protectmask:
if len(pmpath) >= protected and obj.startswith(pmpath):
if pmpath in self._dirs:
if obj != pmpath and \
not obj.startswith(pmpath + sep):
# /etc/foo does not match /etc/foobaz
continue
elif obj != pmpath:
# force exact match when CONFIG_PROTECT_MASK lists
# a non-directory
continue
#skip, it's in the mask
masked = len(pmpath)
return protected > masked
def new_protect_filename(mydest, newmd5=None, force=False):
"""Resolves a config-protect filename for merging, optionally
using the last filename if the md5 matches. If force is True,
then a new filename will be generated even if mydest does not
exist yet.
(dest,md5) ==> 'string' --- path_to_target_filename
(dest) ==> ('next', 'highest') --- next_target and most-recent_target
"""
# config protection filename format:
# ._cfg0000_foo
# 0123456789012
os = _os_merge
prot_num = -1
last_pfile = ""
if not force and \
not os.path.exists(mydest):
return mydest
real_filename = os.path.basename(mydest)
real_dirname = os.path.dirname(mydest)
for pfile in os.listdir(real_dirname):
if pfile[0:5] != "._cfg":
continue
if pfile[10:] != real_filename:
continue
try:
new_prot_num = int(pfile[5:9])
if new_prot_num > prot_num:
prot_num = new_prot_num
last_pfile = pfile
except ValueError:
continue
prot_num = prot_num + 1
new_pfile = normalize_path(os.path.join(real_dirname,
"._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
if last_pfile and newmd5:
try:
last_pfile_md5 = portage.checksum._perform_md5_merge(old_pfile)
except FileNotFound:
# The file suddenly disappeared or it's a broken symlink.
pass
else:
if last_pfile_md5 == newmd5:
return old_pfile
return new_pfile
def find_updated_config_files(target_root, config_protect):
"""
Return a tuple of configuration files that needs to be updated.
The tuple contains lists organized like this:
[ protected_dir, file_list ]
If the protected config isn't a protected_dir but a procted_file, list is:
[ protected_file, None ]
If no configuration files needs to be updated, None is returned
"""
encoding = _encodings['fs']
if config_protect:
# directories with some protect files in them
for x in config_protect:
files = []
x = os.path.join(target_root, x.lstrip(os.path.sep))
if not os.access(x, os.W_OK):
continue
try:
mymode = os.lstat(x).st_mode
except OSError:
continue
if stat.S_ISLNK(mymode):
# We want to treat it like a directory if it
# is a symlink to an existing directory.
try:
real_mode = os.stat(x).st_mode
if stat.S_ISDIR(real_mode):
mymode = real_mode
except OSError:
pass
if stat.S_ISDIR(mymode):
mycommand = \
"find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
else:
mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
os.path.split(x.rstrip(os.path.sep))
mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
cmd = shlex_split(mycommand)
if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
# Python 3.1 does not support bytes in Popen args.
cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
for arg in cmd]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = _unicode_decode(proc.communicate()[0], encoding=encoding)
status = proc.wait()
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
files = output.split('\0')
# split always produces an empty string as the last element
if files and not files[-1]:
del files[-1]
if files:
if stat.S_ISDIR(mymode):
yield (x, files)
else:
yield (x, None)
_ld_so_include_re = re.compile(r'^include\s+(\S.*)')
def getlibpaths(root, env=None):
def read_ld_so_conf(path):
for l in grabfile(path):
include_match = _ld_so_include_re.match(l)
if include_match is not None:
subpath = os.path.join(os.path.dirname(path),
include_match.group(1))
for p in glob.glob(subpath):
for r in read_ld_so_conf(p):
yield r
else:
yield l
""" Return a list of paths that are used for library lookups """
if env is None:
env = os.environ
# the following is based on the information from ld.so(8)
rval = env.get("LD_LIBRARY_PATH", "").split(":")
rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
rval.append("/usr/lib")
rval.append("/lib")
return [normalize_path(x) for x in rval if x]
|
from django.shortcuts import render
from django.utils import timezone
from django.views import generic
from .models import Post
# Create your views here.
# def post_list(request):
# posts = Post.objects.filter(date_published__lte=timezone.now()).order_by('date_published')
# return render(request, 'blog/post_list.html', {'posts': posts})
class IndexView(generic.ListView):
template_name = 'blog/post_list.html'
context_object_name = 'latest_post_list'
def get_queryset(self):
"""Return the last five published questions."""
return Post.objects.filter(date_published__lte=timezone.now())
class DetailView(generic.DetailView):
model = Post
template_name = 'blog/detail.html' |
import torch
import numpy
import gym
class MaxDispersionSampler:
"""
Uniformly samples the input space of an env.
"""
def __init__(self, env, n_steps, n_skills, n_seed_samples = 50000):
print('building sampler')
self.env = env
self.actions = self.get_path_set(steps=n_steps, n_final=n_skills, n_samples = n_seed_samples)
print('built sampler')
def sample(self, state, n_contexts, n_steps):
# import pdb;pdb.set_trace()
s = self.env.ego_state.clone()
samples = []
for i in range(n_contexts):
self.env.ego_state = state.clone()
acts = []
states = []
for n in range(n_steps):
a = self.actions[i][n]
a[0] /= 4
a[1] /= .2618
self.env.step(a)
s_new = self.env.ego_state.clone()
acts.append(a)
states.append(s_new)
samples.append((states, acts))
# pdb.set_trace()
self.env.ego_state = s
return samples
def gen_samples(self, acc = torch.linspace(-4, 4, 5), w = torch.linspace(-.2618, .2618, 5), steps = 10, n_samples = 10000):
actions = []
actions.append([torch.zeros(2) for i in range(steps)])
for i in range(n_samples):
self.env.reset()
acts = []
for t in range(steps):
i1 = torch.randint(acc.shape[0], (1, ))
i2 = torch.randint(w.shape[0], (1, ))
a = torch.tensor([acc[i1], w[i2]])
acts.append(a)
actions.append(acts)
return actions
def get_path_set(self, acc = torch.linspace(-4, 4, 5), w = torch.linspace(-.2618, .2618, 5), steps = 10, n_samples = 10000, n_final=10):
action_set = self.gen_samples(acc, w, steps, n_samples)
actions_idxs = [0]
states_out = []
for n in range(n_samples):
init_state = torch.zeros(4)
init_state[3] = self.env.speed_limit
states_out.append([init_state])
s_new = init_state.clone()
for t in range(steps):
s_new[3] += self.env.dt * action_set[n][t][0]
d_s = self.env.veh.propagate_from_tensor(s_new, torch.tensor([s_new[3], action_set[n][t][1]]))
s_new[:3] += self.env.dt * d_s
states_out[n].append(s_new)
s_new = s_new.clone()
states_out[n] = torch.stack(states_out[n])
states_out = torch.stack(states_out)
for n in range(n_final - 1):
mindists = []
for path in states_out:
dists = states_out[[actions_idxs]] - path
dists = dists[:, :, [0, 1]]
dists = (dists[:, :, 0] ** 2 + dists[:, :, 1] ** 2)**0.5
min_dist, _ = dists.sum(dim=1).min(dim=0)
mindists.append(min_dist)
mindists = torch.stack(mindists)
new_idx = mindists.argmax()
actions_idxs.append(new_idx)
actions_out = []
for i in actions_idxs:
actions_out.append(action_set[i])
return actions_out
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 5 15:45:49 2017
@author: sglusnev
"""
|
# Generated by Django 2.1.7 on 2019-02-27 10:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20190227_1037'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='photo',
field=models.FileField(blank=True, null=True, upload_to='profiles'),
),
]
|
from .base_processing_node import BaseProcessingNode, ProcessingArtifact
class ReportProcessingNode(BaseProcessingNode):
def __init__(self, available_artifacts, outputs):
super(ReportProcessingNode, self).__init__(available_artifacts, outputs)
self.outputs = outputs
self.fmt = 'json'
def get_artifacts(self):
datahub_type = 'derived/report'
resource_name = 'validation_report'
tabular_artifacts = [
artifact for artifact in self.available_artifacts
if artifact.datahub_type == 'source/tabular'
]
output = ProcessingArtifact(
datahub_type, resource_name,
[], tabular_artifacts,
[('assembler.validate_resource', {})],
False, 'Validating package contents', content_type='application/json')
yield output
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 12:27:09 2018
@author: sanika
"""
import requests
from bs4 import BeautifulSoup
from urllib.request import urlopen
import csv
dict_relevant_links = {}
dict_all_links = {}
# get the page
page = requests.get('https://www.fortuneindia.com/fortune-500/company-list/indian-oil-corporation?year=2017')
# collect and parse the page and create soup object
soup = BeautifulSoup(page.text, 'html.parser')
# removing unnecessary data
unwanted_links = soup.find(class_='page-footer')
unwanted_links.decompose()
main_url = "www.fortuneindia.com/fortune-500/company-list/"
# pulling text from webpage
company_link = soup.findAll('a')
for all_company_links in company_link:
link = all_company_links.get('href')
if main_url in link:
a = link.replace('https://www.fortuneindia.com/fortune-500/company-list/', '')
b = a.split('?')
company_name = b[0]
# Creating dictionary for company link pair
dict_all_links[company_name] = link
#print(dict_all_links)
#print(len (dict_all_links))
# Creating an csv file
rank = 0
f = csv.writer(open('fortune500.csv', 'w'))
f.writerow(['Rank', 'Company Name', 'Revenue', '% Change', 'Net Operating Income', '% Change', 'Profit', '% Change', 'Assets', '% Change',
'Net Worth', '% Change', 'Equity Dividend', '% Change', 'Employee Cost', '% Change'])
# Extracting tables from webpage
for key,value in dict_all_links.items():
rank = rank + 1
company_name = key
temp_list = []
html_page = urlopen(value)
soup = BeautifulSoup(html_page, "html5lib")
for row in soup('table')[0].findAll('tr'):
tds = row('td')
#print(tds)
temp_list.append(tds[1].text)
temp_list.append(tds[2].text)
#print(temp_list)
temp_list[0] = rank
temp_list[1] = company_name
print(temp_list)
f.writerow([temp_list[0], temp_list[1], temp_list[2], temp_list[3], temp_list[4], temp_list[5], temp_list[6], temp_list[7],
temp_list[8], temp_list[9], temp_list[10], temp_list[11], temp_list[12], temp_list[13], temp_list[14], temp_list[15]])
#csv_fortune-500_writer_writerow([temp_list[0], temp_list[1], temp_list[2], temp_list[3], temp_list[4],temp_list[5], temp_list[6], temp_list[7], temp_list[8], temp_list[9], temp_list[10], temp_list[11], temp_list[12], temp_list[13], temp_list[14],temp_list[15]])
# unwanted_tags1 = soup.find('tr')
# unwanted_tags1.decompose()
# unwanted_tags2 = soup.find(class_='profit-ratios')
# unwanted_tags2.decompose()
# table_content = soup.findAll('td')
# print(type(table_content))
|
import os
import psycopg2
DATABASE_URL = os.popen('heroku config:get DATABASE_URL -a app_name').read()[:-1]
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
new = "Danny"
origin = "David"
postgres_update_query = f"""UPDATE test_table set name = %s WHERE name = %s"""
cursor.execute(postgres_update_query, (new, origin))
conn.commit()
count = cursor.rowcount
print(count, "Record update successfully into table")
|
#%%
# Initial imports
import pandas as pd
from sklearn.cluster import KMeans
import plotly.express as px
import hvplot.pandas
# %%
# Load data
file_path = "/Users/oshadi/Desktop/Analysis Projects/Cryptocurrencies/module examples/shopping_data_cleaned.csv"
df_shopping = pd.read_csv(file_path)
df_shopping.head(10)
# %%
df_shopping.hvplot.scatter(x="Annual Income", y="Spending Score (1-100)")
# %%
# Function to cluster and plot dataset
def test_cluster_amount(df, clusters):
model = KMeans(n_clusters=clusters, random_state=5)
model
model.fit(df)
df["class"] = model.labels_
# %%
test_cluster_amount(df_shopping, 2)
df_shopping.hvplot.scatter(x="Annual Income", y="Spending Score (1-100)", by="class")
# %%
fig = px.scatter_3d(
df_shopping,
x="Annual Income",
y="Spending Score (1-100)",
z="Age",
color="class",
symbol="class",
width=800,
)
fig.update_layout(legend=dict(x=0, y=1))
fig.show()
# %%
file_path = "/Users/oshadi/Desktop/Analysis Projects/Cryptocurrencies/module examples/new_iris_data.csv"
df_iris = pd.read_csv(file_path)
df_iris.head(10)
# %%
inertia = []
k = list(range(1, 11))
# %%
# Looking for the best K
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(df_iris)
inertia.append(km.inertia_)
# %%
# Define a DataFrame to plot the Elbow Curve using hvPlot
elbow_data = {"k": k, "inertia": inertia}
df_elbow = pd.DataFrame(elbow_data)
df_elbow.hvplot.line(x="k", y="inertia", title="Elbow Curve", xticks=k)
# %%
inertia = []
k = list(range(1, 11))
# Calculate the inertia for the range of K values
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(df_shopping)
inertia.append(km.inertia_)
# %%
file_path = "/Users/oshadi/Desktop/Analysis Projects/Cryptocurrencies/module examples/new_iris_data.csv"
df_iris = pd.read_csv(file_path)
df_iris.head(10)
# %%
inertia = []
k = list(range(1, 11))
# Calculate the inertia for the range of K values
for i in k:
km = KMeans(n_clusters=i, random_state=0)
km.fit(df_shopping)
inertia.append(km.inertia_)
# %%
elbow_data = {"k": k, "inertia": inertia}
df_elbow = pd.DataFrame(elbow_data)
df_elbow.hvplot.line(x="k", y="inertia", title="Elbow Curve", xticks=k)
# %%
def get_clusters(k, data):
# Create a copy of the DataFrame
data = data.copy()
# Initialize the K-Means model
model = KMeans(n_clusters=k, random_state=0)
# Fit the model
model.fit(data)
# Predict clusters
predictions = model.predict(data)
# Create return DataFrame with predicted clusters
data["class"] = model.labels_
return data
# %%
five_clusters = get_clusters(5, df_shopping)
five_clusters.head()
# %%
six_clusters = get_clusters(6, df_shopping)
six_clusters.head()
# %%
# Plot the 3D-scatter with x="Annual Income", y="Spending Score (1-100)" and z="Age"
fig = px.scatter_3d(
five_clusters,
x="Age",
y="Spending Score (1-100)",
z="Annual Income",
color="class",
symbol="class",
width=800,
)
fig.update_layout(legend=dict(x=0, y=1))
fig.show()
# %%
# Plotting the 3D-Scatter with x="Annual Income", y="Spending Score (1-100)" and z="Age"
fig = px.scatter_3d(
six_clusters,
x="Age",
y="Spending Score (1-100)",
z="Annual Income",
color="class",
symbol="class",
width=800,
)
fig.update_layout(legend=dict(x=0, y=1))
fig.show()
# %%
|
import dotainput.local_config
import dotainput.util
from telegram.processor import Processor
import http.client
import json
import logging
import socket
import threading
import time
class Streamer:
"""
Processes matches from the Dota 2 web API
(see http://dev.dota2.com/showthread.php?t=58317 and
https://wiki.teamfortress.com/wiki/WebAPI#Dota_2) and calls process on each.
This class is not thread-safe.
"""
def __init__(self):
self.running = False
self._most_recent_streamed_match = None
self._processor = Processor()
def start(self, poll_interval=100):
"""
Starts the Streamer; may not be started if it is already running.
:param poll_interval: Number of milliseconds after which to poll for new
matches. Default is 1000. Valve suggests rate limiting within
applications to at most one request per second.
"""
self.running = True
self._connection = dotainput.util.create_steamapi_connection()
self.poll_interval = poll_interval / 1000
self._poll_thread = threading.Thread(target=self._poll_continuously)
self._poll_thread.start()
self._processor.start()
def stop(self):
"""
Stops the Streamer. Closes all connections.
:return:
"""
self.running = False
self._poll_thread.join()
self._connection.close()
def _reconnect_connection(self, num_attempts=0):
"""
Reconnect the steam API connection, because sometimes it fails...
Retries up to 'num_attempts' times, waiting for self.poll_interval in
between each retry. 'num_attempts' of -1 signifies to retry forever.
Raises the socket.timeout if it times out for num_attempts times.
:param num_attempts: Number of times to attempt to retry. Default 10.
"""
try:
self._connection.close()
self._connection.connect()
time.sleep(self.poll_interval)
# Except all exceptions... I don't have time for this
except (socket.timeout, ConnectionRefusedError, Exception) as e:
if num_attempts == -1:
logging.warning("Reconnect failed, retrying forever.")
self._reconnect_connection(num_attempts=-1)
elif num_attempts > 1:
logging.warning("Reconnect failed, retrying %d more times" %
(num_attempts - 1))
self._reconnect_connection(num_attempts - 1)
else:
logging.error("Reconnect failed.")
raise e
def _poll_continuously(self):
"""
Loops continuously and polls if self._started = True. Does not return
until self._started = False.
Relies on time.sleep to poll, so may fall behind if processing takes too
long.
"""
while self.running:
if self._most_recent_streamed_match is None:
self._most_recent_streamed_match = \
self._get_recent_match_seq_num()
self._connection.request(
"GET",
"/IDOTA2Match_570/GetMatchHistoryBySequenceNum/V001/"
"?key={key}&start_at_match_seq_num={match_seq_num}"
.format(
key=dotainput.local_config.DOTA2_API_KEY,
match_seq_num=self._most_recent_streamed_match + 1
)
)
try:
response = self._connection.getresponse().read()
except http.client.BadStatusLine:
logging.info("Received empty response (BadStatusLine), "
"waiting & continuing...")
self._reconnect_connection(num_attempts=-1)
continue
except socket.timeout:
logging.info("Connection timed out, "
"waiting & continuing...")
self._reconnect_connection(num_attempts=-1)
continue
except ConnectionResetError:
logging.info("Connection reset, waiting & continuing...")
self._reconnect_connection(num_attempts=-1)
try:
match_history = json.loads(response.decode("utf-8"))
except ValueError as e:
logging.error(
"Error while decoding JSON response: %s. Error:\n%s"
% (response, e)
)
continue
if "result" not in match_history:
logging.warning("JSON Malformed result: %s" % match_history)
continue
if "matches" not in match_history["result"]:
# Reached end for now.
logging.info("No new matches, continuing ...")
time.sleep(self.poll_interval)
continue
json_matches = match_history["result"]["matches"]
if len(json_matches) == 0:
logging.warning("No matches in 'matches' field of result, this "
"is unexpected. json received was:\n%s" %
match_history)
continue
self._most_recent_streamed_match = \
json_matches[-1]["match_seq_num"]
for match in json_matches:
self._processor.process_match(match)
time.sleep(self.poll_interval)
def _get_recent_match_seq_num(self):
"""
:return: A match_seq_num of a recent match to start streaming from.
"""
self._connection.request(
"GET",
"/IDOTA2Match_570/GetMatchHistory/V001/"
"?key={key}"
"&matches_requested=1"
.format(
key=dotainput.local_config.DOTA2_API_KEY
)
)
response = self._connection.getresponse()
decoded = json.loads(response.read().decode("utf-8"))
time.sleep(self.poll_interval) # Rate limit for the API
return decoded["result"]["matches"][-1]["match_seq_num"]
|
from flask import Flask, render_template,abort
import os
app = Flask(__name__)
notas = [
{"id":1,"nombre":"Paco Pérez","curso":"4º","nota":"6.25"},
{"id":2,"nombre":"Manuel Rodríguez","curso":"3º","nota":"5.00"},
{"id":3,"nombre":"María Roldán","curso":"2º","nota":"7.15"},
{"id":1000,"nombre":"José Domingo","curso":"2º","nota":"10"}]
@app.route('/',methods=["GET","POST"])
def inicio():
return render_template("inicio.html",lista_notas=notas)
@app.route('/alumno/<int:id>')
def alumno(id):
for alumno in notas:
if alumno["id"]==id:
return render_template("alumno.html",alum=alumno)
abort(404)
port=os.environ["PORT"]
app.run("0.0.0.0",int(port),debug=True)
|
#!/usr/local/bin/python3.8
print ('\nString Encoding and Functions')
# Unicode code points (the numbers behind characters)
# Strings value in code points
print ( ord('a') ) # 97
# Python 3 => Unicode by default (specifically UTF-8 encoded)
# UTF-8 stands for "Unicode Transformation Format," with the "8" meaning that values are 8-bits in length
# code points ==> decimal
# unicode code pints ==> hexadecimal
# U+2122 (unicode code points) ==> 8482 (code points)
print ( 0x2122 ) # 8482 unicode code points --> code points
print ( ord( '\u2122' ) ) # 8482 unicode code points --> code point
# 'u' stands for unicode code point
print ( '\u2122' ) # 'TM' unicode code points --> string
print ( chr(0x2122) ) # hexadecimal --> string chr() function
print ( chr(8482) ) # decimal --> string chr() function
# SUMMARY
# string --> code points
ord('™') # 8482
# code points --> string
chr(8482) # '™'
# unicode code points --> code points
ord('\u2122') # 8482
# unicode code points --> string
'\u2122' |
# Utility functions of Excel spreadsheet operations
import xlrd
import xlwt
from datetime import date
# cell write style for RMB currency
rmbStyle = xlwt.Style.easyxf(num_format_str='¥#,##0.00')
# cell write style for column title
titleStyle = xlwt.Style.easyxf(strg_to_parse='font: bold on')
def toDateCellStr(_dateValue):
return date(*_dateValue[:3]).strftime('%Y/%m/%d') |
"""
URLify: Write a method to replace all spaces in a string with '%20'. You may assume that the string has sufficient space at the end to hold the additional characters, and that you are given the "true" length of the string. (Note: if implementing in Java, please use a character array so that you can perform this operation in place.)
EXAMPLE
Input: "Mr John Smith ", 13 Output: "Mr%20John%20Smith"
"""
import ctypes
def urlify(s, length):
count = 0
for i in range(length):
if s[i] == " ":
count += 1
index = length + count*2
if length < len(s):
s[length] = '\0'
for i in range(length-1, -1, -1):
if s[i] == ' ':
s[index-1] = '0'
s[index-2] = '2'
s[index-3] = '%'
index -= 3
else:
s[index-1] = s[i]
index -= 1
string = "Mr John Smith "
mutable = ctypes.create_string_buffer(string)
urlify(mutable, 13)
print(mutable.value) |
s1,s2=input().split()
s3=''
s4=''
s3+=s1[0].upper()
s4+=s2[0].upper()
for i in range(len(s1)-1):
s3+=s1[i+1].lower()
s4+=s2[i+1].lower()
print(s3+' '+s4)
|
# ------------------------------------------------------------------------------
# CPN
# paper:
# https://arxiv.org/abs/1711.07319
# Written by Haiyang Liu (haiyangliu1997@gmail.com)
# ------------------------------------------------------------------------------
import torch.nn as nn
import torch
import math
__all__ = ['CPN50', 'CPN101']
class globalNet(nn.Module):
def __init__(self, channel_settings, output_shape, num_class):
super(globalNet, self).__init__()
self.channel_settings = channel_settings
laterals, upsamples, predict = [], [], []
for i in range(len(channel_settings)):
laterals.append(self._lateral(channel_settings[i]))
predict.append(self._predict(output_shape, num_class))
if i != len(channel_settings) - 1:
upsamples.append(self._upsample())
self.laterals = nn.ModuleList(laterals)
self.upsamples = nn.ModuleList(upsamples)
self.predict = nn.ModuleList(predict)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _lateral(self, input_size):
layers = []
layers.append(nn.Conv2d(input_size, 256,
kernel_size=1, stride=1, bias=False))
layers.append(nn.BatchNorm2d(256))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def _upsample(self):
layers = []
layers.append(torch.nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True))
layers.append(torch.nn.Conv2d(256, 256,
kernel_size=1, stride=1, bias=False))
layers.append(nn.BatchNorm2d(256))
return nn.Sequential(*layers)
def _predict(self, output_shape, num_class):
layers = []
layers.append(nn.Conv2d(256, 256,
kernel_size=1, stride=1, bias=False))
layers.append(nn.BatchNorm2d(256))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(256, num_class,
kernel_size=3, stride=1, padding=1, bias=False))
layers.append(nn.Upsample(size=output_shape, mode='bilinear', align_corners=True))
layers.append(nn.BatchNorm2d(num_class))
return nn.Sequential(*layers)
def forward(self, x):
global_fms, global_outs = [], []
for i in range(len(self.channel_settings)):
if i == 0:
feature = self.laterals[i](x[i])
else:
feature = self.laterals[i](x[i]) + up
global_fms.append(feature)
if i != len(self.channel_settings) - 1:
up = self.upsamples[i](feature)
feature = self.predict[i](feature)
global_outs.append(feature)
return global_fms, global_outs
import torch.nn as nn
import torch
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 2)
self.relu = nn.ReLU(inplace=True)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * 2,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * 2),
)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class refineNet(nn.Module):
def __init__(self, lateral_channel, out_shape, num_class):
super(refineNet, self).__init__()
cascade = []
num_cascade = 4
for i in range(num_cascade):
cascade.append(self._make_layer(lateral_channel, num_cascade-i-1, out_shape))
self.cascade = nn.ModuleList(cascade)
self.final_predict = self._predict(4*lateral_channel, num_class)
def _make_layer(self, input_channel, num, output_shape):
layers = []
for i in range(num):
layers.append(Bottleneck(input_channel, 128))
layers.append(nn.Upsample(size=output_shape, mode='bilinear', align_corners=True))
return nn.Sequential(*layers)
def _predict(self, input_channel, num_class):
layers = []
layers.append(Bottleneck(input_channel, 128))
layers.append(nn.Conv2d(256, num_class,
kernel_size=3, stride=1, padding=1, bias=False))
layers.append(nn.BatchNorm2d(num_class))
return nn.Sequential(*layers)
def forward(self, x):
refine_fms = []
for i in range(4):
refine_fms.append(self.cascade[i](x[i]))
out = torch.cat(refine_fms, dim=1)
out = self.final_predict(out)
return out
class CPN(nn.Module):
def __init__(self, resnet, output_shape, num_class, pretrained=True):
super(CPN, self).__init__()
channel_settings = [2048, 1024, 512, 256]
self.resnet = resnet
self.global_net = globalNet(channel_settings, output_shape, num_class)
self.refine_net = refineNet(channel_settings[-1], output_shape, num_class)
def forward(self, x):
res_out = self.resnet(x)
global_fms, global_outs = self.global_net(res_out)
refine_out = self.refine_net(global_fms)
return global_outs, refine_out
def CPN50(out_size,num_class,pretrained=True):
res50 = resnet50(pretrained=pretrained)
model = CPN(res50, output_shape=out_size,num_class=num_class, pretrained=pretrained)
return model
def CPN101(out_size,num_class,pretrained=True):
res101 = resnet101(pretrained=pretrained)
model = CPN(res101, output_shape=out_size,num_class=num_class, pretrained=pretrained)
return model |
# tail-call recursion version:
def factorial_tail(n, acc=1):
if n < 2:
return 1 * acc
else:
return factorial_tail(n - 1, acc * n)
# Iterate version:
def factorial_iter(n, acc=1):
while True:
if n < 2:
return 1 * acc
(n, acc) = (n - 1, acc * n)
continue
break
|
def fib(n):
count = 0
a, b = 0, 1
while n >+ count:
print(a, end=' ')
a, b = b, b+a
count += 1
print()
numberList = []
number = int(input("Enter the number for fibo rows: "))
if number >= 1 and number <= 100:
for i in range(number):
numbers = int(input("Enter the index of fibonacci: "))
if number >= 1 and number <= 100:
numberList.append(numbers)
else:
print("out of index")
for index, number in enumerate(numberList):
fib(numberList[index])
else:
print("out of index") |
#!/usr/bin/python
import cgi
import os
import cgitb
import sqlite3
import sys
import config
from geopy.geocoders import Nominatim
def scandir (dir, rpath, html4, curs, curs2):
nlines=0
ld = os.listdir(dir)
ld.sort()
for f in ld:
if f[0:2] == "FD" and f.find(rg) != -1:
id =f[-13:-4]
dte=f[2:8]
alt=0.0
dst=0.0
cnt=0
addr=''
selcmd="select count(*), max(altitude) as maxa, max(distance) as maxd from OGNDATA where idflarm = '%s' and date = '%s' "% (id, dte)
curs.execute(selcmd)
reg=curs.fetchone()
if reg and reg != None:
cnt=reg[0]
if cnt > 0:
alt=reg[1]
dst=reg[2]
#geolocator = Nominatim(timeout=5)
execmd="select max(altitude) as maxa, latitude, longitude from OGNDATA where idflarm = '%s' and date = '%s' "% (id, dte)
curs2.execute(execmd)
reg=curs2.fetchone()
if reg and reg != None:
malt=reg[0]
if malt == alt:
lati=reg[1]
long=reg[2]
addr=''
#loc = geolocator.reverse([lati,long])
#if loc.address != None:
#addr=(loc.address).encode('utf8')
addr=' '
else:
lati=0.0
long=0.0
addr=''
nlines += 1
if cnt > 0:
details = (" ==> Count(%4d) MDist(%5.1f) MAlt(%6.1f) Lat(%7.4f) Long(%7.4f) %s " % (cnt, dst, alt, lati, long, addr))
else:
details = " "
fn=html4 + rpath + '/' + f.lstrip()
fname=("FN:%-33s" % f)
print fn , '">MAP</a>', "<a>", fname, details, "</a>"
elif (os.path.isdir(dir+'/'+f)):
nlines +=scandir(dir+'/'+f, rpath+'/'+f, html4, curs, curs2)
return(nlines)
#
# Get IGC file by registration
#
setcmd1="set global sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION';"
setcmd2="set session sql_mode='STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION';"
rootdir = config.DBpath+"/fd"
if config.MySQL:
import MySQLdb # the SQL data base routines^M
conn=MySQLdb.connect(host=config.DBhost, user=config.DBuser, passwd=config.DBpasswd, db=config.DBname)
else:
import sqlite3
conn=sqlite3.connect(config.DBpath+config.DBSQLite3)
curs=conn.cursor()
curs2=conn.cursor()
if config.MySQL:
curs.execute(setcmd1)
curs.execute(setcmd2)
cgitb.enable()
# select distinct date from OGNDATA where idflarm=(select idglider from GLIDERS where registration = 'D-2520') ;
regist = sys.argv[1:] # first parameter
if regist :
rr = regist[0] # get the registration
else:
rr = ''
html1="""<head><meta charset="UTF-8"></head><TITLE>Get the flights</TITLE> <IMG src="../gif/ogn-logo-150x150.png" border=1 alt=[image]><H1>The flights for the selected registration are: </H1> <HR> <P> %s </P> </HR> """
html2="""<center><table><tr><td><pre>"""
html3="""</pre></td></tr></table></center>"""
html4='<a href="http://cunimb.net/igc2map.php?lien=http://'+config.reposerver+'/DIRdata/fd'
nlines=0
if rr == '':
print (html1 % 'Invalid registration')
else:
rg=rr.strip()
rg=rg.upper()
cmd="select cn from GLIDERS where registration = '%s' " % rg
curs.execute(cmd)
reg=curs.fetchone()
if reg and reg != None:
cn=reg[0]
else:
cn=''
vd = ('Valid registration: %-s %s:' % (rg,cn))
print (html1 % vd)
print html2
nlines=scandir(rootdir, "", html4, curs, curs2)
if nlines == 0:
print "No flights found for:", rg
print html3
|
import threading
import os
import logging
import calendar
import sys
import time
import requests
from scapy.all import *
from crowsnest import config
import request
manager = None
def packet_capture(packet):
""" Inspect packet headers, create and handoff a Get request object
with the information we're interested in for further processing """
get_found = str()
host = str()
if packet.haslayer(Raw):
load = packet[Raw].load
try:
headers, body = load.split(r"\r\n\r\n", 1)
except:
headers = load
body = ''
header_lines = headers.split(r"\r\n")
for h in header_lines:
if 'get /' in h.lower():
get_found = h.split(' ')[1]
src_ip = packet[IP].src
if get_found:
for h in header_lines:
if 'host: ' in h.lower():
host = h.split(":")[1].strip(" ").split("\r\n")[0]
file_ = get_found.split('/')[-1]
get_request = request.Get(calendar.timegm(time.gmtime()), src_ip, host, get_found, file_)
#sys.stdout.write('[get]')
handle_get_request(get_request)
def handle_get_request(request):
""" Check the file type received and handoff appropriately"""
file_type = get_file_type(request.file_)
if file_type == '.mpd':
#sys.stdout.write('.mpd ')
request_for_mpd(request)
elif file_type == '.m4s':
#sys.stdout.write('.m4s ')
request_for_m4s(request)
elif file_type == '.mp4':
#sys.stdout.write('.mp4')
request_for_mp4()
def get_file_type(file_):
return file_[-4:]
def request_for_mpd(request):
#sys.stdout.write('-> handling mpd\n')
manager.handle_mpd_request(request)
def request_for_m4s(request):
#sys.stdout.write('-> handling m4s\n')
manager.handle_m4s_request(request)
def request_for_mp4():
pass
#sys.stdout.write('-> handling mp4\n')
class sniffing_thread(threading.Thread):
daemon = True
def __init__(self, _manager):
global manager
manager = _manager
#self.debug()
threading.Thread.__init__(self)
def run(self):
try:
print 'im sniffing'
sniff(iface=config.sniffer['ifname'], filter=str(config.sniffer['protocol'])+" port "+str(config.sniffer['port']) , prn=packet_capture, store=0)
except Exception as e:
print 'error: ' + str(e)
sys.exit(1)
def debug(self):
handle_get_request(request.Get(calendar.timegm(time.gmtime()), '0.0.0.0', 'http://www-itec.uni-klu.ac.at', '/ftp/datasets/mmsys12/BigBuckBunny/bunny_2s/BigBuckBunny_2s_isoffmain_DIS_23009_1_v_2_1c2_2011_08_30.mpd', 'BigBuckBunny_2s_isoffmain_DIS_23009_1_v_2_1c2_2011_08_30.mpd'))
handle_get_request(request.Get(calendar.timegm(time.gmtime()), '0.0.0.0', 'http://www-itec.uni-klu.ac.at', 'ftp/datasets/mmsys12/BigBuckBunny/bunny_2s/bunny_2s_8000kbit/bunny_2s4.m4s', 'bunny_2s4.m4s'))
time.sleep(1)
handle_get_request(request.Get(calendar.timegm(time.gmtime()), '0.0.0.0', 'http://www-itec.uni-klu.ac.at', 'ftp/datasets/mmsys12/BigBuckBunny/bunny_2s/bunny_2s_8000kbit/bunny_2s5.m4s', 'bunny_2s4.m4s'))
|
import torch
import torch.nn as nn
import argparse, os
from torch.nn import functional as F
import matplotlib.pyplot as plt
from sklearn import decomposition
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
from sklearn.model_selection import train_test_split
import lightgbm as lgb
def dir_path(string):
if os.path.isfile(string):
return string
else:
raise NotADirectoryError(string)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def two_args_str_int(x):
try:
return int(x)
except:
return x
def two_args_str_float(x):
try:
return float(x)
except:
return x
class F1_Loss(nn.Module):
'''Calculate F1 score. Can work with gpu tensors
The original implmentation is written by Michal Haltuf on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. epsilon <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
- http://www.ryanzhang.info/python/writing-your-own-loss-function-module-for-pytorch/
'''
def __init__(self, epsilon=1e-7):
super().__init__()
self.epsilon = epsilon
def forward(self, y_pred, y_true, ):
assert y_pred.ndim == 2
assert y_true.ndim == 1
y_true = F.one_hot(y_true.long(), 2).to(torch.float32)
y_pred = F.softmax(y_pred, dim=1)
tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)
f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)
return 1 - f1.mean()
class BCE_bit_Loss(nn.Module):
def __init__(self, lambda1 =1 ,lambda2=1, lambda3=1, epsilon=1e-7):
super().__init__()
self.epsilon = epsilon
self.lambda1 = lambda1
self.lambda2 = lambda2
self.lambda3 = lambda3
def f1(self, y_pred, y_true):
assert y_pred.ndim == 2
assert y_true.ndim == 1
y_true = F.one_hot(y_true.long(), 2).to(torch.float32)
y_pred = F.softmax(y_pred, dim=1)
tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)
f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)
return 1 - f1.mean()
def bit_loss(self, y_pred, y_true ):
return ((1 - y_pred) ** y_true + y_pred ** (1 - y_true)).mean()
def forward(self, y_pred, y_true ):
return self.lambda1 * F.mse_loss(y_pred, y_true) + self.lambda2 *self.f1(y_pred.unsqueeze(1), y_true) + self.lambda3 *self.bit_loss(y_pred, y_true)
class ContrastiveLoss(torch.nn.Module):
"""
Contrastive loss function.
Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
"""
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
def plot_acp(num, xinput, xval, label, title, path):
fig = plt.figure(num, figsize=(10, 10))
pca = decomposition.PCA(n_components=2)
pca.fit(xinput)
X = pca.transform(xval)
index0 = label == 0
index0 = index0.squeeze(1)
index1 = label == 1
index1 = index1.squeeze(1)
plt.scatter(X[index0, 0], X[index0, 1], color = 'b')
plt.scatter(X[index1, 0], X[index1, 1], color = 'r', alpha=0.1)
plt.xlabel("Composante 1")
plt.ylabel("Composante 2")
plt.title(" ACP sur : "+str(title))
fig.savefig(path + " ACP sur : "+str(title) + ".png")
def lgbm_eval(data_intermediare_train, label_train, data_intermediare_val, label_val):
X_train, X_test, y_train, y_test = train_test_split(data_intermediare_train, label_train, test_size=0.05,
random_state=42)
model = lgb.LGBMClassifier(objective='binary', reg_lambda=1, n_estimators=10000)
model.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=50, verbose=2)
y_pred = model.predict(data_intermediare_val)
print(accuracy_score(y_pred=y_pred, y_true=label_val))
print(confusion_matrix(y_pred=y_pred, y_true=label_val))
|
"""
A 10-substring of a number is a substring of its digits that sum up to 10.
For example, the 10-substrings of the number 3523014 are:
3523014, 3523014, 3523014, 3523014
Write a python function, find_ten_substring(num_str) which accepts a string and returns the list of 10-substrings of that string.
Handle the possible errors in the code written inside the function.
+--------------+---------------------------------+
| Sample Input | Expected Output |
+--------------+---------------------------------+
| '3523014' | ['5230', '23014', '523', '352'] |
+--------------+---------------------------------+
"""
#PF-Assgn-41
def find_ten_substring(num_str):
final=[]
temp=[]
for index,value in enumerate(list(num_str)):
l=[]
temp=[]
for i in range(index,len(num_str)):
l.append(int(num_str[i]))
s=sum(l)
if s < 10:
continue
elif s==10:
for value in l:
temp.append(str(value))
sum_string = "".join(temp)
final.append(sum_string)
if i!=(len(num_str)-1):
if num_str[i+1]=='0':
temp=[]
continue
break
elif s>10:
break
return final
num_str="2825302"
print("The number is:",num_str)
result_list=find_ten_substring(num_str)
print(result_list)
|
#!/usr/bin/env python
#coding:utf8
from flask import Flask, redirect, url_for
from config import create_app
app = create_app()
app.config['SQLALCHEMY_DATABASE_URI']='mysql://root:1234@localhost:3306/database'
app.config['SECRET_KEY'] = '\xca\x0c\x86\x04\x98@\x02b\x1b7\x8c\x88]\x1b\xd7"+\xe6px@\xc3#\\'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS '] = False
from main import main as main_blueprint # 蓝本类,调用实例对象,将网址分模块处理
app.register_blueprint(blueprint=main_blueprint, url_prefix='/')
from editor import editor as editor_blueprint
app.register_blueprint(blueprint=editor_blueprint, url_prefix='/<projectId>/editor')
from analysis import analysis as analysis_blueprint
app.register_blueprint(blueprint=analysis_blueprint, url_prefix='/<projectId>/analysis')
from user import user as user_blueprint
app.register_blueprint(blueprint=user_blueprint, url_prefix='/user')
@app.route('/')
def index():
return redirect(url_for('main.home'))
# 路由重定向,全部交给geteditor处理
@app.route('/<projectId>/')
def select_project(projectId):
if not str(projectId).startswith('No'):
return ''
return redirect(url_for('editor.getEditor', projectId=projectId))
# 启动server服务器
if __name__ == '__main__':
# app.run(debug = True)
app.run(port=2000, debug = True) # 显示调试信息 |
class Solution:
def countPairs(self, nums: List[int], k: int) -> int:
n, cnt = len(nums), 0
for i in range(n):
for j in range(i+1, n):
if nums[i] == nums[j] and (i*j) % k == 0:
cnt += 1
return cnt |
import sys
sys.path.append('/usr/local/anaconda3/lib/python3.6/site-packages')
from numpy import sin, linspace
def f(x):
return sin(x)
x = linspace(0,4,11)
print(x)
#y = sin(x)
y = f(x)
print(y)
legend = []
from matplotlib import pyplot as plt
plt.grid()
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('$sin(x)$ funkcija un tās atvasinājumi')
plt.plot(x,y,'k')
legend.append('$sin(x)$ funkcija')
plt.plot(x,y, 'go')
legend.append('$sin(x)$ funkcija (daži punkti)')
deltax = x[1] - x[0]
N = len(x)
derivative = []
for i in range(N):
temp = (f(x[i] + deltax) - f(x[i])) / deltax
derivative.append(temp)
print(derivative)
plt.plot(x,derivative, 'y')
legend.append('atvasinajums')
plt.plot(x,derivative, 'ro')
legend.append('atvasinajums (daži punkti)')
derivative_through_array = []
for i in range(N-1):
temp = (y[i+1] - y[i]) / (x[i+1] - x[i])
derivative_through_array.append(temp)
plt.plot(x[0:N-1], derivative_through_array,'m')
legend.append('atvasinajums (daži punkti)')
plt.plot(x[0:N-1], derivative_through_array,'bo')
legend.append('atvasinajums (izmantojot vērtibas no masiva; daži punkti)')
#print(plt.legend.__doc__)
plt.legend(legend, loc = 3)
plt.show()
|
import collections
import statistics
class WindowQueue(object):
def __init__(self, maxsize=15, needMin=False, needMax=False):
self.maxsize = maxsize
self.needMin = needMin
self.needMax = needMax
self.clear()
def clear(self):
self.main = collections.deque()
if self.needMin:
self.mindeque = collections.deque()
if self.needMax:
self.maxdeque = collections.deque()
def get_minimum(self):
return self.mindeque[0]
def full(self):
return len(self.main) == self.maxsize
def get_size(self):
return len(self.main)
def get_maximum(self):
return self.maxdeque[0]
def score(self, more_last = True):
if not more_last:
return statistics.median(self.main)
last_part = round(self.maxsize/4)
try:
rm1 = statistics.median(self.main + self.main[-1*last_part:])
except:
rm1 = statistics.median(self.main)
return round(rm1,3), round(min(self.main),3), round(self.main[-1],3)
def add_tail(self, val):
if self.needMin:
while len(self.mindeque) > 0 and val < self.mindeque[-1]:
self.mindeque.pop()
self.mindeque.append(val)
if self.needMax:
while len(self.maxdeque) > 0 and val > self.maxdeque[-1]:
self.maxdeque.pop()
self.maxdeque.append(val)
self.main.append(val)
if len(self.main) > self.maxsize:
self._remove_head()
def get(self, idx=0):
return self.main[idx]
def _remove_head(self):
val = self.main.popleft()
if self.needMin:
if val < self.mindeque[0]:
raise ValueError("Wrong value")
elif val == self.mindeque[0]:
self.mindeque.popleft()
if self.needMax:
if val > self.maxdeque[0]:
raise ValueError("Wrong value")
elif val == self.maxdeque[0]:
self.maxdeque.popleft()
|
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
# Create your views here.
def scrape(request):
page = requests.get('https://digitallibrary.un.org/')
soup =BeautifulSoup(page.text,'html.parser')
link_address = []
for link in soup.find_all('a'):
link_address.append(link.get('href'))
return render(request,'myapp/result.html',{'link_address':link_address})
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 11:42:19 2020
@author: ctralie
"""
import pickle
students = pickle.load(open("students.dat", "rb"))
# Create a dictionary whose key is
# a class year, and whose value is a count
# of how many students are in that year
classyears = {}
for name in students:
student = students[name]
year = student['year']
if not (year in classyears):
classyears[year] = 1
else:
classyears[year] += 1
print(classyears) |
import unfurl.util
import unittest
from unfurl.yamlmanifest import YamlManifest
from unfurl.job import Runner, JobOptions
from unfurl.runtime import Status
import os
import os.path
import warnings
manifestScript = """
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
service_template:
dsl_definitions:
topology_template:
relationship_templates:
k8sConnection:
# if a template defines node or capability it will be used
# as the default relationship when connecting to that node
default_for: ANY
# target: k8sCluster
type: unfurl.relationships.ConnectsTo.K8sCluster
properties:
context: {get_env: [UNFURL_TEST_KUBECONTEXT]}
KUBECONFIG: {get_env: UNFURL_TEST_KUBECONFIG}
node_templates:
k8sCluster:
type: unfurl.nodes.K8sCluster
directives:
- discover
k8sNamespace:
type: unfurl.nodes.K8sNamespace
requirements:
- host: k8sCluster
properties:
name: octest
testSecret:
# add metadata, type: Opaque
# base64 values and omit data from status
type: unfurl.nodes.K8sSecretResource
requirements:
- host: k8sNamespace
properties:
name: test-secret
data:
uri: "{{ lookup('env', 'TEST_SECRET') }}"
"""
@unittest.skipIf("k8s" in os.getenv("UNFURL_TEST_SKIP", ""), "UNFURL_TEST_SKIP set")
class k8sTest(unittest.TestCase):
def setUp(self):
try:
# Ansible generates tons of ResourceWarnings
warnings.simplefilter("ignore", ResourceWarning)
except:
# python 2.x doesn't have ResourceWarning
pass
def test_k8sConfig(self):
os.environ["TEST_SECRET"] = "a secret"
manifest = YamlManifest(manifestScript)
job = Runner(manifest).run(JobOptions(add=True, startTime=1))
assert not job.unexpectedAbort
assert job.status == Status.ok, job.summary()
# print(job.summary())
# print(job.out.getvalue())
# verify secret contents isn't saved in config
self.assertNotIn("a secret", job.out.getvalue())
self.assertNotIn("YSBzZWNyZXQ", job.out.getvalue()) # base64 of "a secret"
# print (job.out.getvalue())
self.assertIn("<<REDACTED>>", job.out.getvalue())
assert not job.unexpectedAbort
assert job.status == Status.ok, job.summary()
manifest = YamlManifest(job.out.getvalue())
job2 = Runner(manifest).run(JobOptions(workflow="undeploy", startTime=2))
results = job2.jsonSummary()
assert not job2.unexpectedAbort
assert job2.status == Status.ok, job2.summary()
assert len(results["tasks"]) == 1, results
|
from vidstream import CameraClient
from vidstream import VideoClient
from vidstream import ScreenShareClient
import threading
# Choose One
# client3 = CameraClient('127.0.0.1', 9998)
# client2 = VideoClient('127.0.0.1', 9999, 'video.mp4')
client3 = ScreenShareClient('127.0.0.1', 9999)
t= threading.Thread(target=client3.start_stream)
t.start()
# Other Code
while input("") != 'STOP':
continue
# client1.start_stream()
# client2.start_stream()
client3.stop_stream |
"""Common functionality for testing the v2 API."""
from ..base import TestCase as BaseTestCase
from ..base import APITestCase as BaseAPITestCase
class NamespaceMixin(object):
"""Designate the namespace for tests."""
namespace = 'v2'
__test__ = True # Run these tests if disabled in base class
def full_api_reverse(self, viewname, **kwargs):
"""Create a full URL for a namespaced API view."""
return 'http://testserver' + self.api_reverse(viewname, **kwargs)
class TestCase(BaseTestCase, NamespaceMixin):
"""Useful methods for testing."""
class APITestCase(BaseAPITestCase, NamespaceMixin):
"""Useful methods for testing API endpoints."""
|
import nltk
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
from collections import defaultdict,Counter
from nltk import word_tokenize
import pandas as pd
import numpy as np
def Topic_modeling(df):
stop_words=set(nltk.corpus.stopwords.words('english'))
df['clean_doc'] = df['sentence'].str.replace("[^a-zA-Z#]", " ")
# cleaning the text
def clean_text(headline):
le=WordNetLemmatizer()
word_tokens=word_tokenize(headline)
tokens=[le.lemmatize(w) for w in word_tokens if w not in stop_words]
cleaned_text=" ".join(tokens)
return cleaned_text
df['clean_doc'] = df['clean_doc'].apply(clean_text)
# Tf-idf for the data
vect =TfidfVectorizer(stop_words=stop_words,max_features=1000)
vect_text=vect.fit_transform(df['clean_doc'])
# Topic_modeling Algo
from sklearn.decomposition import LatentDirichletAllocation
lda_model=LatentDirichletAllocation(n_components=2)
lda_top=lda_model.fit_transform(vect_text)
#Top 10 Words that has more impact on the topic:
vocab = vect.get_feature_names()
topic = defaultdict(list)
for i, comp in enumerate(lda_model.components_):
vocab_comp = zip(vocab, comp)
sorted_words = sorted(vocab_comp, key= lambda x:x[1], reverse=True)[:10]
for t in sorted_words:
topic[i].append(t[0])
# Most prominent topic for the paragraph
top = []
for i in range(len(df)):
top.append(np.argmax(lda_top[i]))
c = Counter(top)
ma,ti = -1,-1
for i,j in c.items():
if j>ma:
ma = j
ti = i
return ti,topic[ti]
|
import sys
import params # Parameters for the neurons
import numpy as np
import itertools
import shutil
import os
import generateFig4 as genF4 # Main NEST code to run simulations
import generateFig4_lossy as genF4_los # NEST code with lossy synapse
import Sim_raster as S_r # Plots the results
pars = params.get_parameters() #
postfix = "Fig4_"
#genF4_los.run(postfix,pars) # Uncomment this line and comment the line below for using the lossy synapse version to generate DTT
genF4.run(postfix,pars)
S_r.drawFig4()
|
from django.db import models
class Musica(models.Model):
slug = models.SlugField(primary_key=True, max_length=100)
nome = models.CharField(max_length=255)
letra = models.TextField()
cifra = models.TextField()
info = models.TextField()
link_video = models.URLField(blank=True, null=True)
categorias = models.ManyToManyField("Categoria")
rating = models.FloatField(blank=True, null=True)
votes = models.PositiveIntegerField(blank=True, null=True)
link_lpsalmo = models.URLField(blank=True, null=True)
tem_imagem = models.BooleanField(default=False)
banner_lateral = models.ForeignKey("Banner", related_name="banner_lateral_mus", blank=True, null=True)
banner_footer = models.ForeignKey("Banner", related_name="banner_footer_mus", blank=True, null=True)
class Meta:
app_label = "mpm"
def __str__(self):
return self.nome.encode('utf-8')
def get_video_code(self):
if self.link_video:
try:
return self.link_video[self.link_video.rindex('/'):].replace("embed",'').replace('watch?v=','').replace('v=','')
except ValueError:
return ""
else:
return ""
def add_rate(self, rate):
#weighted average
self.rating = (self.rating * self.votes + rate*100/5) / (self.votes + 1)
self.votes += 1
def get_rating_per_5(self):
return self.rating * 5 / 100.0
def get_formated_rating(self):
return "%.2f" % self.rating
def get_legend(self):
plural = ""
if(self.votes > 1):
plural = "s"
retorno = "<span property='ratingValue'>%.2f</span> em <span property='ratingCount'>%d</span> voto%s"
return retorno % (self.get_rating_per_5(), self.votes, plural)
def get_absolute_url(self):
return "/musica/%s/" % self.slug
def get_inicio(self):
retorno = self.letra[:140].replace("<strong>",'').replace("<strong",'').replace("<stron",'').replace("<stro",'').replace("<str",'').replace("<st",'').replace("<s",'')
retorno = retorno.replace("</strong>",'').replace("</strong",'').replace("</stron",'').replace("</stro",'').replace("</str",'').replace("</st",'').replace("</s",'')
retorno = retorno.replace("</",'').replace("<",'')
return retorno
|
import sys
input = sys.stdin.readline
value, length = list(map(int, input().split()))
|
import dill
import gzip
import imageio
import numpy as np
import os
import shutil
import warnings
from dps import cfg
from dps.utils import image_to_string, resize_image
def background_names():
backgrounds_dir = os.path.join(cfg.data_dir, 'backgrounds')
return sorted(
f.split('.')[0]
for f in os.listdir(backgrounds_dir)
if f.endswith('.png') or f.endswith('.jpg')
)
def load_backgrounds(background_names, shape=None):
if isinstance(background_names, str):
background_names = background_names.split()
backgrounds_dir = os.path.join(cfg.data_dir, 'backgrounds')
backgrounds = []
for name in background_names:
f = os.path.join(backgrounds_dir, '{}.jpg'.format(name))
try:
b = imageio.imread(f)
except FileNotFoundError:
f = os.path.join(backgrounds_dir, '{}.png'.format(name))
b = imageio.imread(f)
if shape is not None and b.shape != shape:
b = resize_image(b, shape)
b = np.uint8(b)
backgrounds.append(b)
return backgrounds
def emnist_classes():
return (
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
emnist_filenames = [c + ".pklz" for c in emnist_classes()]
def _validate_emnist(path):
path = str(path)
if not os.path.isdir(path):
return False
return set(os.listdir(path)) == set(emnist_filenames)
def convert_emnist_and_store(path, new_image_shape):
""" Images are stored on disk in float format. """
if new_image_shape == (28, 28):
raise Exception("Original shape of EMNIST is (28, 28).")
print("Converting (28, 28) EMNIST dataset to {}...".format(new_image_shape))
emnist_dir = os.path.join(path, 'emnist')
new_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*new_image_shape))
try:
shutil.rmtree(str(new_dir))
except FileNotFoundError:
pass
os.mkdir(new_dir)
classes = ''.join(
[str(i) for i in range(10)]
+ [chr(i + ord('A')) for i in range(26)]
+ [chr(i + ord('a')) for i in range(26)]
)
for i, cls in enumerate(sorted(classes)):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
new_x = []
for img in _x:
img = resize_image(img, new_image_shape, preserve_range=False)
new_x.append(img)
print(cls)
print(image_to_string(_x[0]))
_x = np.array(new_x, dtype=_x.dtype)
print(image_to_string(_x[0]))
path_i = os.path.join(new_dir, cls + '.pklz')
with gzip.open(path_i, 'wb') as f:
dill.dump(_x, f, protocol=dill.HIGHEST_PROTOCOL)
def load_emnist(
path, classes, balance=False, include_blank=False,
shape=None, one_hot=False, n_examples=None, example_range=None, show=False):
""" Load emnist data from disk by class.
Elements of `classes` pick out which emnist classes to load, but different labels
end up getting returned because most classifiers require that the labels
be in range(len(classes)). We return a dictionary `class_map` which maps from
elements of `classes` down to range(len(classes)).
Pixel values of returned images are integers in the range 0-255, but stored as float32.
Returned X array has shape (n_images,) + shape.
Parameters
----------
path: str
Path to data directory, assumed to contain a sub-directory called `emnist`.
classes: list of character from the set (0-9, A-Z, a-z)
Each character is the name of a class to load.
balance: bool
If True, will ensure that all classes are balanced by removing elements
from classes that are larger than the minimu-size class.
include_blank: bool
If True, includes an additional class that consists of blank images.
shape: (int, int)
Shape of the images.
one_hot: bool
If True, labels are one-hot vectors instead of integers.
n_examples: int
Maximum number of examples returned. If not supplied, return all available data.
example_range: pair of floats
Pair of floats specifying, for each class, the range of examples that should be used.
Each element of the pair is a number in (0, 1), and the second number should be larger.
show: bool
If True, prints out an image from each class.
"""
emnist_dir = os.path.join(path, 'emnist')
classes = list(classes) + []
needs_reshape = False
if shape and shape != (28, 28):
resized_dir = os.path.join(path, 'emnist_{}_by_{}'.format(*shape))
if _validate_emnist(resized_dir):
emnist_dir = resized_dir
else:
needs_reshape = True
if example_range is not None:
assert 0.0 <= example_range[0] < example_range[1] <= 1.0
x, y = [], []
class_map, class_count = {}, {}
for i, cls in enumerate(sorted(classes)):
with gzip.open(os.path.join(emnist_dir, str(cls) + '.pklz'), 'rb') as f:
_x = dill.load(f)
if example_range is not None:
low = int(example_range[0] * len(_x))
high = int(example_range[1] * len(_x))
_x = _x[low:high, ...]
x.append(np.uint8(255 * np.minimum(_x, 1)))
y.extend([i] * _x.shape[0])
if show:
print(cls)
print(image_to_string(x[-1]))
class_map[cls] = i
class_count[cls] = _x.shape[0]
x = np.concatenate(x, axis=0)
if include_blank:
min_class_count = min(class_count.values())
blanks = np.zeros((min_class_count,) + x.shape[1:], dtype=np.uint8)
x = np.concatenate((x, blanks), axis=0)
blank_idx = len(class_map)
y.extend([blank_idx] * min_class_count)
blank_symbol = ' '
class_map[blank_symbol] = blank_idx
classes.append(blank_symbol)
y = np.array(y)
if balance:
min_class_count = min(class_count.values())
keep_x, keep_y = [], []
for i, cls in enumerate(classes):
keep_indices = np.nonzero(y == class_map[cls])[0]
keep_indices = keep_indices[:min_class_count]
keep_x.append(x[keep_indices, ...])
keep_y.append(y[keep_indices])
x = np.concatenate(keep_x, 0)
y = np.concatenate(keep_y, 0)
order = np.random.permutation(x.shape[0])
x = x[order]
y = y[order]
if n_examples:
x = x[:n_examples]
y = y[:n_examples]
if one_hot:
_y = np.zeros((y.shape[0], len(classes))).astype('f')
_y[np.arange(y.shape[0]), y] = 1.0
y = _y
if needs_reshape:
if x.shape[0] > 10000:
warnings.warn(
"Performing an online resize of a large number of images ({}), "
"consider creating and storing the resized dataset.".format(x.shape[0])
)
x = [resize_image(img, shape) for img in x]
x = np.uint8(x)
return x, y, class_map
def omniglot_classes():
omniglot_dir = os.path.join(cfg.data_dir, 'omniglot')
alphabets = os.listdir(omniglot_dir)
classes = []
for ab in alphabets:
n_characters = len(os.listdir(os.path.join(omniglot_dir, ab)))
classes.extend(["{},{}".format(ab, i+1) for i in range(n_characters)])
return classes
# Class spec: alphabet,character
def load_omniglot(
path, classes, include_blank=False, shape=None, one_hot=False, indices=None, show=False):
""" Load omniglot data from disk by class.
Elements of `classes` pick out which omniglot classes to load, but different labels
end up getting returned because most classifiers require that the labels
be in range(len(classes)). We return a dictionary `class_map` which maps from
elements of `classes` down to range(len(classes)).
Returned images are arrays of floats in the range 0-255. White text on black background
(with 0 corresponding to black). Returned X array has shape (n_images,) + shape.
Parameters
----------
path: str
Path to data directory, assumed to contain a sub-directory called `omniglot`.
classes: list of strings, each giving a class label
Each character is the name of a class to load.
balance: bool
If True, will ensure that all classes are balanced by removing elements
from classes that are larger than the minimu-size class.
include_blank: bool
If True, includes an additional class that consists of blank images.
shape: (int, int)
Shape of returned images.
one_hot: bool
If True, labels are one-hot vectors instead of integers.
indices: list of int
The image indices within the classes to include. For each class there are 20 images.
show: bool
If True, prints out an image from each class.
"""
omniglot_dir = os.path.join(path, 'omniglot')
classes = list(classes)[:]
if not indices:
indices = list(range(20))
for idx in indices:
assert 0 <= idx < 20
x, y = [], []
class_map, class_count = {}, {}
for i, cls in enumerate(sorted(list(classes))):
alphabet, character = cls.split(',')
char_dir = os.path.join(omniglot_dir, alphabet, "character{:02d}".format(int(character)))
files = os.listdir(char_dir)
class_id = files[0].split("_")[0]
for idx in indices:
f = os.path.join(char_dir, "{}_{:02d}.png".format(class_id, idx + 1))
_x = imageio.imread(f)
# Convert to white-on-black
_x = 255. - _x
if shape:
_x = resize_image(_x, shape)
x.append(_x)
y.append(i)
if show:
print(cls)
print(image_to_string(x[-1]))
class_map[cls] = i
class_count[cls] = len(indices)
x = np.array(x, dtype=np.uint8)
if include_blank:
min_class_count = min(class_count.values())
blanks = np.zeros((min_class_count,) + shape, dtype=np.uint8)
x = np.concatenate((x, blanks), axis=0)
blank_idx = len(class_map)
y.extend([blank_idx] * min_class_count)
blank_symbol = ' '
class_map[blank_symbol] = blank_idx
classes.append(blank_symbol)
y = np.array(y)
order = np.random.permutation(x.shape[0])
x = x[order]
y = y[order]
if one_hot:
_y = np.zeros((y.shape[0], len(classes))).astype('f')
_y[np.arange(y.shape[0]), y] = 1.0
y = _y
return x, y, class_map
|
#!/usr/bin/env python3
indata_filename = "indata.txt"
values = []
def readValuesFromFile(filename):
result = []
f = open(filename, 'r')
while True:
line = f.readline()
if not line:
break
value = int(line)
result.append(value)
return result
def getTwoAdditiveComponents(values, sum):
#Assuming that there are no duplicates which sum to sum
for v1 in values:
for v2 in values:
if v1 + v2 == sum:
return v1,v2
return None
def getThreeAdditiveComponents(values, sum):
#Assuming that there are no duplicates which sum to sum
for v1 in values:
for v2 in values:
for v3 in values:
if v1 + v2 + v3 == sum:
return v1,v2,v3
return None
values = readValuesFromFile(indata_filename)
print("values", values)
v1,v2 = getTwoAdditiveComponents(values, 2020)
print("v1,v2", v1,v2)
answer1 = v1*v2
print("v1*v2", answer1)
v1,v2,v3 = getThreeAdditiveComponents(values, 2020)
print("v1,v2,v3", v1,v2,v3)
answer2 = v1*v2*v3
print("v1*v2*v3", answer2)
|
import json
class MessageParser():
def __init__(self):
self.possible_responses = {
'error': self.parse_error,
'info': self.parse_info,
'message': self.parse_message,
'history': self.parse_history,
}
def parse(self, payload):
payload = json.loads(payload)
if payload['response'] in self.possible_responses:
return self.possible_responses[payload['response']](payload)
else:
return 'Error. JSON object from server has invalid format.'
def parse_error(self, message):
res = message['sender'] + " (error): " + message['content']
return res
def parse_info(self, message):
res = message['sender'] + " (info): " + message['content']
return res
def parse_message(self, message):
res = message['sender'] + " (message): " + message['content']
return res
def parse_history(self, message):
res = ""
for j in message['content']:
d = json.loads(j)
if d['content'] != 'None':
res += self.parse_message(d)
res += "\n"
return res
# Include more methods for handling the different responses...
|
from django.db import models
class BaseLog(models.Model):
log_type = models.CharField(max_length=32)
timestamp = models.DateTimeField(auto_now_add=True)
server = models.CharField(max_length=8)
transaction_num = models.BigIntegerField()
class UserCommandLog(models.Model):
base_log = models.OneToOneField(BaseLog, on_delete=models.CASCADE)
command = models.CharField(max_length=16)
username = models.CharField(max_length=64)
stock_symbol = models.CharField(max_length=3)
filename = models.FilePathField(path="/dumplog_output")
funds = models.DecimalField(decimal_places=2, max_digits=32, null=True)
class QuoteServerLog(models.Model):
base_log = models.OneToOneField(BaseLog, on_delete=models.CASCADE)
price = models.DecimalField(decimal_places=2, max_digits=32)
stock_symbol = models.CharField(max_length=3)
username = models.CharField(max_length=128)
quote_server_time = models.BigIntegerField()
# TODO: what is crypto key length?
crypto_key = models.CharField(max_length=256)
class AccountTransactionLog(models.Model):
base_log = models.OneToOneField(BaseLog, on_delete=models.CASCADE)
action = models.CharField(max_length=32)
username = models.CharField(max_length=64)
funds = models.DecimalField(decimal_places=2, max_digits=32)
class SystemEventLog(models.Model):
base_log = models.OneToOneField(BaseLog, on_delete=models.CASCADE)
command = models.CharField(max_length=16)
username = models.CharField(max_length=64)
stock_symbol = models.CharField(max_length=3)
filename = models.FilePathField(path="/dumplog_output")
funds = models.DecimalField(decimal_places=2, max_digits=32, null=True)
class ErrorEventLog(models.Model):
base_log = models.OneToOneField(BaseLog, on_delete=models.CASCADE)
command = models.CharField(max_length=16)
username = models.CharField(max_length=64)
stock_symbol = models.CharField(max_length=3)
filename = models.FilePathField(path="/dumplog_output")
funds = models.DecimalField(decimal_places=2, max_digits=32, null=True)
error_message = models.CharField(max_length=512)
class DebugEventLog(models.Model):
base_log = models.OneToOneField(BaseLog, on_delete=models.CASCADE)
command = models.CharField(max_length=16)
username = models.CharField(max_length=64)
stock_symbol = models.CharField(max_length=3)
filename = models.FilePathField(path="/dumplog_output")
funds = models.DecimalField(decimal_places=2, max_digits=32, null=True)
debug_message = models.CharField(max_length=512)
|
#! python3
import numpy as np
def predict_by_distance(distance_matrix, gallery_Y):
'''
compute prediction given distance matrix
'''
n_query = distance_matrix.shape[0]
predict_matrix = np.zeros(distance_matrix.shape)
for i in range(n_query):
print(i, end='\r')
distance_vector = distance_matrix[i]
predict_matrix[i] = [x for _,x in sorted(zip(distance_vector, gallery_Y))]
return predict_matrix
def calculate_map(distance_matrix, gallery_Y, query_Y):
'''
compute and return the mean average precision
based on the distance matrix
'''
predict_matrix = predict_by_distance(distance_matrix, gallery_Y)
return round(mean_average_precision(predict_matrix, query_Y), 3)
def calculate_cmc_rank(distance_matrix, gallery_Y, query_Y, k=1):
predict_matrix = predict_by_distance(distance_matrix, gallery_Y)
n_query = predict_matrix.shape[0]
cmc_rank = []
for i in range(n_query):
preds = predict_matrix[i]
label = query_Y[i]
count, tp = 0, 0
for j in range(k):
pred = preds[j]
count += 1
if pred == label:
tp += 1
cmc_rank.append(tp / count)
return round(np.average(cmc_rank), 3)
def average_precision(preds, label):
'''
compute and return the average precision for each query
'''
count = 0
tp = 0
precisions = []
for pred in preds:
count += 1
if pred == label:
tp += 1
precisions.append(tp / count)
if tp == 0:
return 0
return np.average(precisions)
def mean_average_precision(predict_matrix, query_Y):
n_query = predict_matrix.shape[0]
average_precisions = []
for i in range(n_query):
preds = predict_matrix[i]
label = query_Y[i]
AP = average_precision(preds, label)
average_precisions.append(AP)
return np.average(average_precisions)
def normalized(A):
return (A - A.min()) / (A.max() - A.min()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.