content
stringlengths 5
1.05M
|
|---|
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
import math
import matplotlib as mpl
from pylab import cm
avenirfont = {'fontname': 'Avenir', 'size': 18}
units = {'perf': 'paket/sn', 'cost': 'USD/saat'}
names = {'perf': 'performans', 'cost': 'maliyet'}
plt.rcParams['text.usetex'] = False
mpl.rcParams['font.family'] = 'Avenir'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 2
csv_file = 'data.log'
df = pd.read_csv(csv_file, delim_whitespace=True)
varnames = ['re','cpu','heap']
var_idx_map = [0,1,2]
variables = dict()
for var in varnames:
variables[var] = sorted(df[var].unique())
gridsize = [len(x) for x in variables.values()]
n = len(variables.keys()) # dimension
N = len(df) # number of records
state_visit_counts = dict()
# s = {'re': 1, 'heap': 100, 'cpu': 100} --> return 0
# s = {'re': 1, 'heap': 100, 'cpu': 200} --> return 1
def state_to_idx(s):
idx = 0
for i in range(n):
varname = varnames[i]
idx += variables[varname].index(s[varname]) * np.prod([len(variables[varnames[j]]) for j in range(i+1,n) ])
return int(idx)
# Inverse of state_to_idx
def idx_to_state(idx):
s = dict()
for i in range(n):
varname = varnames[i]
block = int(np.prod([len(variables[varnames[j]]) for j in range(i+1,n) ]))
s[varname] = variables[varname][idx // block]
idx = idx - block * (idx // block)
return s
def filter_data(state):
filter_idx = True
for var in varnames:
filter_idx = (filter_idx) & (df[var] == state[var])
return filter_idx
def filter_data_state_idx(state_idx):
return filter_data(idx_to_state(state_idx))
def neighborlist(state):
yield state
for var in variables:
state_value = state[var]
loc = variables[var].index(state_value)
if loc - 1 >= 0:
s = state.copy()
s[var] = variables[var][loc-1]
yield s
if loc + 1 < len(variables[var]):
s = state.copy()
s[var] = variables[var][loc+1]
yield s
def probmatrix():
m = np.prod(gridsize)
A = np.zeros((m,m))
for j in range(m):
home_state = idx_to_state(j)
neighbors = [n for n in iter(neighborlist(home_state))]
for i in [state_to_idx(x) for x in neighbors]:
A[i,j] = 1/len(neighbors)
return A
def probat(n):
m = np.prod(gridsize)
# Starting probability
p = np.ones((m,1))/m
#p[state_to_idx({'re': 5, 'cpu': 500, 'heap': 500})] = 1
#p[0] = 1
A = probmatrix()
for i in range(n):
p = np.dot(A,p)
return p
probs = probat(N)
# %%
for index, row in df.iterrows():
state = dict(row[varnames])
state_idx = state_to_idx(state)
if state_idx in state_visit_counts.keys():
state_visit_counts[state_idx] += 1
else:
state_visit_counts[state_idx] = 1
# %%
# number of all possible states
expected_state_count = np.prod(gridsize)
observed_state_count = len(state_visit_counts.keys())
if expected_state_count != observed_state_count:
print("The number of observed states (%d) does not match to expected (%d)"
%(observed_state_count, expected_state_count))
# %% Checking if state visit frequency matches to the expected
# Calculate the all possible connections
tot_connections = np.prod(gridsize)
for variable, values in variables.items():
tot_connections += 2 * (len(values)-1) * np.prod([len(variables[k]) for k in variables.keys() if k != variable])
visit_frequency_avg_rel_err = 0
observed_state_visit_count = np.zeros((np.prod(gridsize)))
expected_state_visit_count = np.zeros((np.prod(gridsize)))
for state_idx, visit_count in state_visit_counts.items():
state = idx_to_state(state_idx)
# interior states has 2n+1 connections (maximum)
conn_count = 2 * n + 1
for var, val in state.items():
# If there is no more state in the left/right
# decrease the connections
if val == min(variables[var]):
conn_count -= 1
if val == max(variables[var]):
conn_count -= 1
expected_state_visit_prob_infinity = conn_count / tot_connections
expected_state_visit_count[state_idx] = int(probs[state_idx,0] * N)
if abs(expected_state_visit_prob_infinity-probs[state_idx,0])/expected_state_visit_prob_infinity > 0.00001:
print("Matrix exponential and theory don't match")
observed_state_visit_count[state_idx] = state_visit_counts[state_idx]
rel_error = abs(expected_state_visit_count[state_idx]-observed_state_visit_count[state_idx]) \
/expected_state_visit_count[state_idx]
visit_frequency_avg_rel_err += rel_error
plt.figure(figsize=(8,8))
plt.plot(observed_state_visit_count,'o',label='Observed')
plt.plot(expected_state_visit_count,'.',label='expected')
plt.legend()
plt.xlabel('states orderd by state index')
plt.ylabel('state visit counts')
plt.title('Total number of state visits:%d' % N)
plt.show()
visit_frequency_avg_rel_err /= len(state_visit_counts)
# Close to zero means that the states are visited enough times
# to make meaningful statistics
# If it is large i.e. > .20, better collect more data
print('Average Relative Error in state visit frequency %f'%visit_frequency_avg_rel_err)
# %%
for metric in ['inc_tps','out_tps','cost']:
missing_indices = df[(np.isnan(df[metric])) | (df[metric] == 0)].index
missing_values = []
for missing_index in missing_indices:
missing_state = dict(df.loc[missing_index][varnames])
missing_state_idx = state_to_idx(missing_state)
hist_df_idx = (df.index < missing_index) & \
(np.isnan(df[metric]) == False) & \
(df[metric] != 0)
search_idx = hist_df_idx & filter_data_state_idx(missing_state_idx)
if len(df[search_idx][metric]) == 0:
try_count = 0
while len(df[search_idx][metric]) == 0:
if try_count > 7:
print("giving up...")
break
# try, index+1,index-1,index+2,index-2,...
try_state_idx = missing_state_idx + (try_count//2 + 1) * \
(1 if try_count % 2 == 0 else -1)
search_idx = hist_df_idx & filter_data_state_idx(try_state_idx)
try_count += 1
if try_count > 7:
logging.error('There is no historical data for %s' % missing_state)
else:
logging.info('Instead of %s, %s is used' % (missing_state, idx_to_state(try_state_idx)))
estimation = np.mean(df[search_idx][metric])
missing_values.append(estimation)
df.loc[missing_indices,metric] = missing_values
# %%
df['perf'] = df['out_tps'] * df['re']
df['1st'] = 0
df_aggr = df.groupby(varnames).mean()
# %%
f = dict()
for output in ['perf', 'cost']:
f[output] = dict()
# Constant
f[output]['0th'] = np.mean(df_aggr[output])
f[output]['D'] = np.mean(df_aggr[output] ** 2) - f[output]['0th']**2
f[output]['1st'] = dict()
f[output]['Di'] = dict()
f[output]['Si'] = dict()
f[output]['2nd'] = dict()
f[output]['Dij'] = dict()
f[output]['Sij'] = dict()
# First order terms (Performance)
for feature in varnames:
f[output]['1st'][feature] = df_aggr.groupby(feature).mean()[output].values - f[output]['0th']
f[output]['Di'][feature] = np.mean(f[output]['1st'][feature] ** 2)
f[output]['Si'][feature] = f[output]['Di'][feature] / f[output]['D']
for i in range(N):
df.loc[i,'1st'] = f[output]['0th']
for feature in varnames:
df.loc[i,'1st'] += f[output]['1st'][feature][variables[feature].index(df.loc[i,feature])]
for i in range(0,n):
varidx = var_idx_map[i]
feature = varnames[varidx]
plt.figure()
plt.rcParams['text.usetex'] = True
plt.plot(variables[feature], f[output]['1st'][feature],'*-',label='f1 (%s)'%feature)
plt.title(r'$f_%d(x_%d)$ (\textbf{%s})'%(i+1,i+1,output))
plt.xlabel(r'$x_%d$ (\textbf{%s})'%(i+1,feature) )
plt.ylabel(r'$f_%d$ (\textbf{pps})'%(i+1))
plt.grid(True)
ax = plt.gca()
ax.set_xticks(variables[feature])
plt.savefig('hdmr_1st_n9_%s_%s.pdf' % (output, feature))
plt.show()
# 2nd order HDMR terms
vmin = math.inf
vmax = -math.inf
for i in range(0,n-1):
varidx1 = var_idx_map[i]
feature1 = varnames[varidx1]
n1 = len(variables[feature1])
for j in range(i+1,n):
varidx2 = var_idx_map[j]
feature2 = varnames[varidx2]
n2 = len(variables[feature2])
fij = np.reshape(df_aggr.groupby([feature1,feature2]).mean()[output].values,(n1,n2)) \
- np.reshape(f[output]['1st'][feature1],(n1,1)) \
- np.reshape(f[output]['1st'][feature2],(1,n2)) \
- f[output]['0th']
f[output]['2nd'][feature1+','+feature2] = fij
f[output]['Dij'][feature1+','+feature2] = np.mean(fij**2)
f[output]['Sij'][feature1+','+feature2] = f[output]['Dij'][feature1+','+feature2] / f[output]['D']
if fij.min() < vmin:
vmin = fij.min()
if fij.max() > vmax:
vmax = fij.max()
# 3rd order HDMR terms
xvalues3d, yvalues3d, zvalues3d = [] , [] , []
for i in range(0,n-1):
varidx1 = var_idx_map[i]
feature1 = varnames[varidx1]
n1 = len(variables[feature1])
for j in range(i+1,n):
varidx2 = var_idx_map[j]
feature2 = varnames[varidx2]
n2 = len(variables[feature2])
for k in range(j+1,n):
varidx3 = var_idx_map[k]
feature3 = varnames[varidx3]
n3 = len(variables[feature3])
xvalues3d.append(feature1)
yvalues3d.append(feature2)
zvalues3d.append(feature3)
fijk = np.zeros((n1,n2,n3))
for i1 in range(n1):
val1 = variables[feature1][i1]
for i2 in range(n2):
val2 = variables[feature2][i2]
for i3 in range(n3):
val3 = variables[feature2][i3]
state = {feature1: val1, feature2: val2, feature3: val3}
fijk[i1,i2,i3] = df[filter_data(state)][output].mean()
fijk = fijk \
- f[output]['0th'] \
- np.reshape(f[output]['1st'][feature1],(n1,1,1)) \
- np.reshape(f[output]['1st'][feature2],(1,n2,1)) \
- np.reshape(f[output]['1st'][feature3],(1,1,n3)) \
- np.reshape(f[output]['2nd'][feature1+','+feature2],(n1,n2,1)) \
- np.reshape(f[output]['2nd'][feature1+','+feature3],(n1,1,n3)) \
- np.reshape(f[output]['2nd'][feature2+','+feature3],(1,n2,n3))
f[output]['3rd'] = fijk
for i in range(0,n-1):
varidx1 = var_idx_map[i]
feature1 = varnames[varidx1]
n1 = len(variables[feature1])
for j in range(i+1,n):
plt.figure(figsize=(10,10))
plt.rcParams['text.usetex'] = True
varidx2 = var_idx_map[j]
feature2 = varnames[varidx2]
n2 = len(variables[feature2])
ax = plt.gca()
fij = f[output]['2nd'][feature1+','+feature2]
img = ax.imshow(fij,vmin=vmin,vmax=vmax,cmap='GnBu')
plt.ylabel(r'$x_%d$ (\textbf{%s})'%(i+1,feature1))
plt.xlabel(r'$x_%d$ (\textbf{%s})'%(j+1,feature2))
ax.set_xticks(np.arange(n2))
ax.set_yticks(np.arange(n1))
ax.set_xticklabels(list(map(str, variables[feature2])))
ax.set_yticklabels(list(map(str, variables[feature1])))
for k in range(n1):
for l in range(n2):
ax.text(l,k,'%.4f'%fij[k,l],ha='center',va='center')
plt.colorbar(img, ax=ax,fraction=0.046, pad=0.04)
plt.title(r'$f_{%d%d}(x_%d,x_%d)$'%(i+1,j+1,i+1,j+1))
plt.tight_layout()
plt.savefig('hdmr_2nd_n9_%s_%d_%d.pdf'%(output,i+1,j+1))
plt.show()
colors = cm.get_cmap('tab10', 3)
fig = plt.figure(figsize=(5,4))
ax = fig.add_axes([0,0,1,1])
# Hide the top and right spines of the axis
ax.spines['right'].set_visible(True)
ax.spines['top'].set_visible(True)
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
#plt.rcParams['text.usetex'] = True
for i in range(0,n):
varidx = var_idx_map[i]
feature = varnames[varidx]
ax.plot(np.arange(1,10),f[output]['1st'][feature],'-',
color=colors(i),linewidth=3,label=r'$f_%d(x)$'%(i+1))
ax.set_xticks(np.arange(1,10))
ax.set_xlim(1,9)
ax.legend(loc='upper left',frameon=False)
#plt.title(r'\textbf{Birli YBMG Terimleri (%s)}'%names[output])
plt.grid(False)
ax.set_xlabel('x (sıra sayısı)',labelpad=1)
ax.set_ylabel('%s'% units[output],labelpad=1)
plt.xticks(**avenirfont)
plt.yticks(**avenirfont)
plt.rcParams['pdf.fonttype'] = 42
#plt.tight_layout()
plt.savefig('hdmr_1st_n9_%s.pdf' % (output), transparent=False, bbox_inches='tight', dpi=300)
#plt.show()
plt.show()
|
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
import threading
from collections import OrderedDict
import yaml
import yodl
debug = False
class DockerCompose:
def __init__(self, compose, project, compose_base_dir, requested_services):
self.project = project
self.compose_base_dir = compose_base_dir
self.services = self.merge_services(compose.get('services', {}))
self.networks = compose.get('networks', {})
self.volumes = compose.get('volumes', {})
self.filtered_services = filter(lambda service: not requested_services or service in requested_services, self.services)
def project_prefix(self, value):
return '{}_{}'.format(self.project, value) if self.project else value
def merge_services(self, services):
result = OrderedDict()
for service in services:
service_config = services[service]
result[service] = service_config
if 'extends' in service_config:
extended_config = service_config['extends']
extended_service = extended_config['service']
del result[service]['extends']
if 'file' in extended_config:
extended_service_data = self.merge_services(
yaml.load(open(self.compose_base_dir + extended_config['file'], 'r'), yodl.OrderedDictYAMLLoader)['services']
)[extended_service]
else:
extended_service_data = result[extended_service]
merge(result[service], extended_service_data, None, self.mergeEnv)
return result
@staticmethod
def mergeEnv(a, b, key):
if key == 'environment':
if isinstance(a[key], dict) and isinstance(b[key], list):
a[key] = b[key] + list({'{}={}'.format(k, v) for k, v in a[key].items()})
elif isinstance(a[key], list) and isinstance(b[key], dict):
a[key][:0] = list({'{}={}'.format(k, v) for k, v in b[key].items()})
else:
raise ('Unknown type of "{}" value (should be either list or dictionary)'.format(key))
@staticmethod
def call(cmd, ignore_return_code=False):
print('Running: \n' + cmd + '\n')
if not debug:
ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = ps.wait()
stdout = ps.communicate()[0]
if returncode != 0 and not ignore_return_code:
print >> sys.stderr, ('Error: command "{}" failed: {}'.format(cmd, stdout))
sys.exit(returncode)
else:
return stdout
def is_service_exists(self, service):
return self.call('/bin/bash -o pipefail -c "docker service ls | awk \'{{print \\$2}}\' | (egrep \'^{}$\' || :)"'.format(self.project_prefix(service)))
def is_external_network(self, network):
if network not in self.networks:
print >> sys.stderr, ('Error: network "{}" is not defined in networks'.format(network))
sys.exit(1)
return isinstance(self.networks[network], dict) and 'external' in self.networks[network]
def up(self):
for network in self.networks:
if not self.is_external_network(network):
cmd = '[ "`docker network ls | awk \'{{print $2}}\' | egrep \'^{0}$\'`" != "" ] || docker network create --driver overlay --opt encrypted {0}' \
.format(self.project_prefix(network))
self.call(cmd)
for volume in self.volumes:
cmd = '[ "`docker volume ls | awk \'{{print $2}}\' | egrep \'^{0}$\'`" != "" ] || docker volume create --name {0}' \
.format(self.project_prefix(volume))
if isinstance(self.volumes[volume], dict) and self.volumes[volume]['driver']:
cmd = cmd + ' --driver={0}'.format(self.volumes[volume]['driver'])
self.call(cmd)
services_to_start = []
for service in self.filtered_services:
if self.is_service_exists(service):
services_to_start.append(service)
continue
service_config = self.services[service]
cmd = ['docker service create --with-registry-auth \\\n --name', self.project_prefix(service), '\\\n']
service_image = []
service_command = []
def add_flag(key, value):
cmd.extend([key, shellquote(value), '\\\n'])
for parameter in service_config:
value = service_config[parameter]
def restart():
add_flag('--restart-condition', {'always': 'any'}[value])
def logging():
add_flag('--log-driver', value.get('driver', 'json-file'))
log_opts = value['options']
if log_opts:
for k, v in log_opts.items():
if v is not None:
add_flag('--log-opt', '{}={}'.format(k, v))
def mem_limit():
add_flag('--limit-memory', value)
def image():
service_image.append(value)
def command():
if isinstance(value, list):
service_command.extend(value)
else:
service_command.extend(value.split(' '))
def expose():
pass # unsupported
def container_name():
pass # unsupported
def hostname():
pass # unsupported; waiting for https://github.com/docker/docker/issues/24877
def labels():
value = service_config[parameter]
# ^ working-around the lack of `nonlocal` statement.
if isinstance(value, dict):
value = ('%s=%s' % i for i in value.iteritems())
for label in value:
add_flag('--label', label)
def mode():
add_flag('--mode', value)
def extra_hosts():
pass # unsupported
def ports():
for port in value:
add_flag('--publish', port)
def networks():
for network in value:
add_flag('--network', network if self.is_external_network(network) else self.project_prefix(network))
def volumes():
for volume in value:
splitted_volume = volume.split(':')
src = splitted_volume.pop(0)
dst = splitted_volume.pop(0)
readonly = 0
if splitted_volume and splitted_volume[0] == 'ro':
readonly = 1
if src.startswith('.'):
src = src.replace('.', self.compose_base_dir, 1)
if src.startswith('/'):
add_flag('--mount', 'type=bind,src={},dst={},readonly={}'.format(src, dst, readonly))
else:
add_flag('--mount', 'src={},dst={},readonly={}'.format(self.project_prefix(src), dst, readonly))
def environment():
if isinstance(value, dict):
for k, v in value.items():
add_flag('--env', '{}={}'.format(k, v))
else:
for env in value:
if env.startswith('constraint') or env.startswith('affinity'):
constraint = env.split(':', 2)[1]
add_flag('--constraint', constraint)
else:
add_flag('--env', env)
def replicas():
add_flag('--replicas', value)
def env_file():
for v in value:
with open(v) as env_file:
for line in env_file:
if not line.startswith('#') and line.strip():
add_flag('--env', line.strip())
def unsupported():
print >> sys.stderr, ('WARNING: unsupported parameter {}'.format(parameter))
locals().get(parameter, unsupported)()
if len(service_image) == 0:
print('ERROR: no image specified for %s service' % service)
sys.exit(1)
cmd.extend(service_image)
cmd.extend(service_command)
self.call(' '.join(cmd))
if services_to_start:
self.start(services_to_start)
def pull(self):
nodes = self.call("docker node ls | grep Ready | awk -F'[[:space:]][[:space:]]+' '{print $2}'").rstrip().split('\n')
threads = []
for node in nodes:
cmd = '; '.join(['docker -H tcp://{}:2375 pull {}'.format(node, self.services[service]['image']) for service in self.filtered_services])
threads.append((node, threading.Thread(target=self.call, args=(cmd,))))
for node, thread in threads:
print('Pulling on node {}'.format(node))
thread.start()
for node, thread in threads:
thread.join()
print('Node {} - DONE'.format(node))
def stop(self):
services = filter(self.is_service_exists, self.filtered_services)
cmd_args = ['{}={}'.format(self.project_prefix(service), 0) for service in services]
if cmd_args:
self.call('docker service scale ' + ' '.join(cmd_args))
def rm(self):
services = filter(self.is_service_exists, self.filtered_services)
cmd_args = [self.project_prefix(service) for service in services]
if cmd_args:
self.call('docker service rm ' + ' '.join(cmd_args))
def start(self, services=None):
if services is None:
services = self.filtered_services
cmd = 'docker service scale ' + \
' '.join(['{}={}'.format(self.project_prefix(service), self.services[service].get('replicas', '1')) for service in services])
self.call(cmd)
def convert(self):
# Based on http://stackoverflow.com/a/8661021
represent_dict_order = lambda _self, data: _self.represent_mapping('tag:yaml.org,2002:map', data.items())
yaml.add_representer(OrderedDict, represent_dict_order)
def project_prefix(value):
return '{}-{}'.format(self.project, value) if self.project else value
if self.networks:
print >> sys.stderr, ('WARNING: unsupported parameter "networks"')
for volume in self.volumes:
print >> sys.stderr, ('WARNING: unsupported parameter "volumes"')
for service in self.filtered_services:
service_config = self.services[service]
service = service.replace('_', '-')
service_result = OrderedDict([
('apiVersion', 'v1'),
('kind', 'Service'),
('metadata', OrderedDict([
('name', project_prefix(service)),
('labels', OrderedDict())
])),
('spec', OrderedDict([
('selector', OrderedDict())
]))
])
deployment_result = OrderedDict([
('apiVersion', 'extensions/v1beta1'),
('kind', 'Deployment'),
('metadata', OrderedDict([
('name', project_prefix(service))
])),
('spec', OrderedDict([
('replicas', 1),
('template', OrderedDict([
('metadata', OrderedDict([
('labels', OrderedDict())
])),
('spec', OrderedDict([
('containers', [OrderedDict([
('name', project_prefix(service)),
])])
]))
]))
]))
])
service_labels = service_result['metadata']['labels']
service_selector = service_result['spec']['selector']
deployment_labels = deployment_result['spec']['template']['metadata']['labels']
deployment_spec = deployment_result['spec']['template']['spec']
container = deployment_result['spec']['template']['spec']['containers'][0]
service_labels['service'] = self.project
service_labels['app'] = service
for parameter in service_config:
value = service_config[parameter]
def restart():
deployment_spec['restartPolicy'] = {'always': 'Always'}[value]
def logging():
pass # unsupported
def mem_limit():
container['resources'] = {'limits': {'memory': value.replace('m', 'Mi').replace('g', 'Gi')}}
def image():
container['image'] = value
def command():
if isinstance(value, list):
container['args'] = value
else:
container['args'] = value.split(' ')
def expose():
service_result['spec']['ports'] = []
container['ports'] = []
for port in value:
port_int = int(port)
service_result['spec']['ports'].append(OrderedDict([('port', port_int), ('targetPort', port_int), ('name', str(port_int))]))
container['ports'].append({'containerPort': port_int})
def container_name():
service_result['metadata']['name'] = value
deployment_result['metadata']['name'] = value
container['name'] = value
service_labels['app'] = value
def hostname():
pass # unsupported
def labels():
pass # TODO
def mode():
pass # TODO
def extra_hosts():
pass # unsupported
def ports():
for port in value:
pass # TODO
def networks():
pass # unsupported
def volumes():
container['volumeMounts'] = []
deployment_spec['volumes'] = []
for volume in value:
splitted_volume = volume.split(':')
src = splitted_volume.pop(0)
dst = splitted_volume.pop(0)
readonly = 0
if splitted_volume and splitted_volume[0] == 'ro':
readonly = 1
if src.startswith('.'):
src = src.replace('.', self.compose_base_dir, 1)
if src.startswith('/'):
volume_name = src.split('/')[-1].replace('.', '').replace('_', '-')
container['volumeMounts'].append(OrderedDict([('name', volume_name), ('mountPath', dst)]))
deployment_spec['volumes'].append(OrderedDict([('name', volume_name), ('hostPath', {'path': src})]))
# TODO readonly
else:
volume_name = src.replace('_', '-')
container['volumeMounts'].append(OrderedDict([('name', volume_name), ('mountPath', dst)]))
deployment_spec['volumes'].append(
OrderedDict([('name', volume_name), ('hostPath', {'path': '/volumes/' + project_prefix(volume_name)})]))
# TODO readonly
def environment():
if isinstance(value, dict):
container['env'] = []
for k, v in value.items():
container['env'].append(OrderedDict([('name', k), ('value', v)]))
else:
for env in value:
if env.startswith('constraint') or env.startswith('affinity'):
if 'nodeSelector' not in deployment_spec:
deployment_spec['nodeSelector'] = OrderedDict()
constraint = env.split(':', 2)[1]
selector = 'FIX_ME'
if constraint.startswith('node.hostname=='):
selector = 'kubernetes.io/hostname'
constraint = constraint.split('==')[1]
if constraint.startswith('engine.labels.'):
[selector, constraint] = constraint.split('==')
selector = selector.replace('engine.labels.', '')
deployment_spec['nodeSelector'][selector] = constraint
else:
if 'env' not in container:
container['env'] = []
[k, v] = env.split('=')
container['env'].append(OrderedDict([('name', k), ('value', v)]))
def replicas():
deployment_result['spec']['replicas'] = int(value)
def unsupported():
print >> sys.stderr, ('WARNING: unsupported parameter {}'.format(parameter))
locals().get(parameter, unsupported)()
service_selector.update(service_labels)
deployment_labels.update(service_labels)
sys.stdout.write(yaml.dump(service_result, default_flow_style=False))
print('---')
sys.stdout.write(yaml.dump(deployment_result, default_flow_style=False))
print('---')
def main():
envs = {
'COMPOSE_FILE': 'docker-compose.yml',
'COMPOSE_HTTP_TIMEOUT': '60',
'COMPOSE_TLS_VERSION': 'TLSv1'
}
env_path = os.path.join(os.getcwd(), '.env')
if os.path.isfile(env_path):
with open(env_path) as env_file:
envs.update(dict(map(lambda line: line.strip().split('=', 1), (line for line in env_file if not line.startswith('#') and line.strip()))))
map(lambda e: os.environ.update({e[0]: e[1]}), (e for e in envs.items() if not e[0] in os.environ))
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=50, width=120))
parser.add_argument('-f', '--file', type=argparse.FileType(), help='Specify an alternate compose file (default: docker-compose.yml)', default=[],
action='append')
parser.add_argument('-p', '--project-name', help='Specify an alternate project name (default: directory name)',
default=os.environ.get('COMPOSE_PROJECT_NAME'))
parser.add_argument('--dry-run', action='store_true')
subparsers = parser.add_subparsers(title='Command')
parser.add_argument('_service', metavar='service', nargs='*', help='List of services to run the command for')
services_parser = argparse.ArgumentParser(add_help=False)
services_parser.add_argument('service', nargs='*', help='List of services to run the command for')
pull_parser = subparsers.add_parser('pull', help='Pull service images', add_help=False, parents=[services_parser])
pull_parser.set_defaults(command='pull')
rm_parser = subparsers.add_parser('rm', help='Stop and remove services', add_help=False, parents=[services_parser])
rm_parser.set_defaults(command='rm')
rm_parser.add_argument('-f', help='docker-compose compatibility; ignored', action='store_true')
start_parser = subparsers.add_parser('start', help='Start services', add_help=False, parents=[services_parser])
start_parser.set_defaults(command='start')
stop_parser = subparsers.add_parser('stop', help='Stop services', add_help=False, parents=[services_parser])
stop_parser.set_defaults(command='stop')
up_parser = subparsers.add_parser('up', help='Create and start services', add_help=False, parents=[services_parser])
up_parser.set_defaults(command='up')
up_parser.add_argument('-d', help='docker-compose compatibility; ignored', action='store_true')
convert_parser = subparsers.add_parser('convert', help='Convert services to Kubernetes format', add_help=False, parents=[services_parser])
convert_parser.set_defaults(command='convert')
args = parser.parse_args(sys.argv[1:])
if len(args.file) == 0:
try:
args.file = map(lambda f: open(f), os.environ['COMPOSE_FILE'].split(':'))
except IOError as e:
print(e)
parser.print_help()
sys.exit(1)
global debug
debug = args.dry_run
compose_base_dir = os.path.dirname(os.path.abspath(args.file[0].name))
if args.project_name is None:
args.project_name = os.path.basename(compose_base_dir)
# Decode and merge the compose files
compose_dicts = map(lambda f: yaml.load(f, yodl.OrderedDictYAMLLoader), args.file)
merged_compose = reduce(merge, compose_dicts)
docker_compose = DockerCompose(merged_compose, args.project_name, compose_base_dir + '/', args.service)
getattr(docker_compose, args.command)()
# Based on http://stackoverflow.com/questions/7204805/dictionaries-of-dictionaries-merge/7205107#7205107
def merge(a, b, path=None, conflict_resolver=None):
"""merges b into a"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)], conflict_resolver)
elif isinstance(a[key], list) and isinstance(b[key], list):
a[key].extend(b[key])
elif a[key] == b[key]:
pass # same leaf value
else:
if conflict_resolver:
conflict_resolver(a, b, key)
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
def shellquote(s):
return "'" + s.replace("'", "'\\''") + "'"
if __name__ == "__main__":
main()
|
import math
import vmath
from vmathlib import vcolor, vutil
import drawutil
from unit_manager import Unit
class Testease(Unit):
def tick(self, delta_time):
self._draw_ease_panel(vmath.Vector3())
def _draw_ease_panel(self, panel_position):
exp = 3
ease_funcs = [
vutil.alpha_ease_none,
vutil.alpha_ease_in,
vutil.alpha_ease_out,
vutil.alpha_ease_in_out
]
game_time = self.world.game_time
time_scale = 0.5
alpha = vutil.fract(game_time * time_scale)
for index, ease_func in enumerate(ease_funcs):
eased_alpha = ease_func(alpha, exp)
circle_position = panel_position + vmath.Vector3(-2.0, 0.0, 0.0) * index
self._draw_ease_circle(circle_position, 0.8, eased_alpha)
line_position = circle_position + vmath.Vector3(0.0, 0.0, -1.5)
self._draw_ease_line(line_position, 1.6, eased_alpha)
def _draw_ease_circle(self, position, radius, alpha):
transform = vmath.Transform()
transform.translation = position
drawutil.draw_circle(transform, radius)
theta = vutil.lerp(0.0, math.pi * 2.0, alpha)
delta_position = vmath.Vector3(math.sin(theta), 0.0, math.cos(theta)) * radius
drawutil.draw_cube(position + delta_position, 0.1, color=vcolor.BLUE)
def _draw_ease_line(self, position, length, alpha):
delta_half = vmath.Vector3(length, 0.0, 0.0) * 0.5
start = position + delta_half
end = position - delta_half
drawutil.draw_line(start, end)
res_position = vutil.lerp(start, end, alpha)
drawutil.draw_cube(res_position, 0.1, color=vcolor.BLUE)
|
import unittest
import wd.parallel
from selenium import webdriver
import copy
class Selenium2OnSauce(unittest.TestCase):
def setUp(self):
desired_capabilities = [
webdriver.DesiredCapabilities.FIREFOX,
webdriver.DesiredCapabilities.FIREFOX,
webdriver.DesiredCapabilities.CHROME
]
self.drivers = wd.parallel.Remote(
desired_capabilities=desired_capabilities
)
@wd.parallel.multiply
def test_sauce(self):
self.driver.get('http://saucelabs.com/test/guinea-pig')
self.assertTrue("I am a page title - Sauce Labs" in self.driver.title);
self.driver.find_element_by_id('comments').send_keys('Hello! I am some example comments. I should appear in the page after you submit the form')
self.driver.find_element_by_id('submit').click()
comments = self.driver.find_element_by_id('your_comments')
self.assertTrue('Your comments: Hello! I am some example comments. I should appear in the page after you submit the form' in comments.text)
body = self.driver.find_element_by_xpath('//body')
self.assertFalse('I am some other page content' in body.text)
self.driver.find_elements_by_link_text('i am a link')[0].click()
body = self.driver.find_element_by_xpath('//body')
self.assertTrue('I am some other page content' in body.text)
@wd.parallel.multiply
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
|
# Задача 10. Вариант 49.
#Напишите программу "Генератор персонажей" для игры. Пользователю должно быть предоставлено 30 пунктов, которые можно распределить между четырьмя характеристиками: Сила, Здоровье, Мудрость и Ловкость. Надо сделать так, чтобы пользователь мог не только брать эти пункты из общего "пула", но и возвращать их туда из характеристик, которым он решил присвоить другие значения.
#Valkovskey M.A.
from operator import add, sub
character = {'name' : ' ',
'power' : 0,
'health' : 0,
'knowledge' : 0,
'speed' : 0}
cons = 30
def info(character, rep=True):
print('\nСила:\t\t', character['power'],
'\nЗдоровье:\t', character['health'],
'\nМудрость:\t', character['knowledge'],
'\nЛовкость:\t', character['speed'],
'\nОсталось', cons, 'баллов\n')
if cons == 0 and rep:
print("Введите 'конец' чтобы закончить\n")
character['name'] = input('Дайте имя вашему герою: ').capitalize()
print(
'\nХорошо, теперь опишите героя', character['name'],
'''
Героя можно развить в четырех направлениях:
Сила, Здоровье, Мудрость и Ловкость
У вас есть 30 очков, распределите их
Для этого вводите команды типа 'сила + 10' или 'ловкость - 5'
''')
while True:
user = input('> ').lower().split()
if len(user) != 3:
if user[0] == 'конец':
break
print('Неправильная команда')
continue
what, how, much = None, None, 0
if user[0] == 'сила':
what = 'power'
elif user[0] == 'здоровье':
what = 'health'
elif user[0] == 'мудрость':
what = 'knowledge'
elif user[0] == 'ловкость':
what = 'speed'
else:
print('Неизвестная команда:', user[0])
continue
if user[1] == '+':
how = add
elif user[1] == '-':
how = sub
else:
print('Неизвестный оператор:', user[1])
continue
try:
much = int(user[2])
except:
print('Неизвестное число:', user[2])
continue
while True:
abil = character[what]
value = how(abil, much)
diff = abil - value
if value < 0 or (cons + diff) < 0:
much = much - 1
continue
break
cons += abil - value
character[what] = value
info(character)
print('\nГерой', character['name'], 'готов у бою!')
info(character, rep=False)
input('Нажмите ENTER...')
|
# Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains "collector" objects. Collectors provide a way to gather
"raw" results from a :class:`whoosh.matching.Matcher` object, implement
sorting, filtering, collation, etc., and produce a
:class:`whoosh.searching.Results` object.
The basic collectors are:
TopCollector
Returns the top N matching results sorted by score, using block-quality
optimizations to skip blocks of documents that can't contribute to the top
N. The :meth:`whoosh.searching.Searcher.search` method uses this type of
collector by default or when you specify a ``limit``.
UnlimitedCollector
Returns all matching results sorted by score. The
:meth:`whoosh.searching.Searcher.search` method uses this type of collector
when you specify ``limit=None`` or you specify a limit equal to or greater
than the number of documents in the searcher.
SortingCollector
Returns all matching results sorted by a :class:`whoosh.sorting.Facet`
object. The :meth:`whoosh.searching.Searcher.search` method uses this type
of collector when you use the ``sortedby`` parameter.
Here's an example of a simple collector that instead of remembering the matched
documents just counts up the number of matches::
class CountingCollector(Collector):
def prepare(self, top_searcher, q, context):
# Always call super method in prepare
Collector.prepare(self, top_searcher, q, context)
self.count = 0
def collect(self, sub_docnum):
self.count += 1
c = CountingCollector()
mysearcher.search_with_collector(myquery, c)
print(c.count)
There are also several wrapping collectors that extend or modify the
functionality of other collectors. The meth:`whoosh.searching.Searcher.search`
method uses many of these when you specify various parameters.
NOTE: collectors are not designed to be reentrant or thread-safe. It is
generally a good idea to create a new collector for each search.
"""
import os
import threading
from array import array
from bisect import insort
from collections import defaultdict
from heapq import heapify, heappush, heapreplace
from whoosh import sorting
from whoosh.compat import abstractmethod, iteritems, itervalues, xrange
from whoosh.searching import Results, TimeLimit
from whoosh.util import now
# Functions
def ilen(iterator):
total = 0
for _ in iterator:
total += 1
return total
# Base class
class Collector(object):
"""Base class for collectors.
"""
def prepare(self, top_searcher, q, context):
"""This method is called before a search.
Subclasses can override this to perform set-up work, but
they should still call the superclass's method because it sets several
necessary attributes on the collector object:
self.top_searcher
The top-level searcher.
self.q
The query object
self.context
``context.needs_current`` controls whether a wrapping collector
requires that this collector's matcher be in a valid state at every
call to ``collect()``. If this is ``False``, the collector is free
to use faster methods that don't necessarily keep the matcher
updated, such as ``matcher.all_ids()``.
:param top_searcher: the top-level :class:`whoosh.searching.Searcher`
object.
:param q: the :class:`whoosh.query.Query` object being searched for.
:param context: a :class:`whoosh.searching.SearchContext` object
containing information about the search.
"""
self.top_searcher = top_searcher
self.q = q
self.context = context
self.starttime = now()
self.runtime = None
self.docset = set()
def run(self):
# Collect matches for each sub-searcher
try:
for subsearcher, offset in self.top_searcher.leaf_searchers():
self.set_subsearcher(subsearcher, offset)
self.collect_matches()
finally:
self.finish()
def set_subsearcher(self, subsearcher, offset):
"""This method is called each time the collector starts on a new
sub-searcher.
Subclasses can override this to perform set-up work, but
they should still call the superclass's method because it sets several
necessary attributes on the collector object:
self.subsearcher
The current sub-searcher. If the top-level searcher is atomic, this
is the same as the top-level searcher.
self.offset
The document number offset of the current searcher. You must add
this number to the document number passed to
:meth:`Collector.collect` to get the top-level document number
for use in results.
self.matcher
A :class:`whoosh.matching.Matcher` object representing the matches
for the query in the current sub-searcher.
"""
self.subsearcher = subsearcher
self.offset = offset
self.matcher = self.q.matcher(subsearcher, self.context)
def computes_count(self):
"""Returns True if the collector naturally computes the exact number of
matching documents. Collectors that use block optimizations will return
False since they might skip blocks containing matching documents.
Note that if this method returns False you can still call :meth:`count`,
but it means that method might have to do more work to calculate the
number of matching documents.
"""
return True
def all_ids(self):
"""Returns a sequence of docnums matched in this collector. (Only valid
after the collector is run.)
The default implementation is based on the docset. If a collector does
not maintain the docset, it will need to override this method.
"""
return self.docset
def count(self):
"""Returns the total number of documents matched in this collector.
(Only valid after the collector is run.)
The default implementation is based on the docset. If a collector does
not maintain the docset, it will need to override this method.
"""
return len(self.docset)
def collect_matches(self):
"""This method calls :meth:`Collector.matches` and then for each
matched document calls :meth:`Collector.collect`. Sub-classes that
want to intervene between finding matches and adding them to the
collection (for example, to filter out certain documents) can override
this method.
"""
collect = self.collect
for sub_docnum in self.matches():
collect(sub_docnum)
@abstractmethod
def collect(self, sub_docnum):
"""This method is called for every matched document. It should do the
work of adding a matched document to the results, and it should return
an object to use as a "sorting key" for the given document (such as the
document's score, a key generated by a facet, or just None). Subclasses
must implement this method.
If you want the score for the current document, use
``self.matcher.score()``.
Overriding methods should add the current document offset
(``self.offset``) to the ``sub_docnum`` to get the top-level document
number for the matching document to add to results.
:param sub_docnum: the document number of the current match within the
current sub-searcher. You must add ``self.offset`` to this number
to get the document's top-level document number.
"""
raise NotImplementedError
@abstractmethod
def sort_key(self, sub_docnum):
"""Returns a sorting key for the current match. This should return the
same value returned by :meth:`Collector.collect`, but without the side
effect of adding the current document to the results.
If the collector has been prepared with ``context.needs_current=True``,
this method can use ``self.matcher`` to get information, for example
the score. Otherwise, it should only use the provided ``sub_docnum``,
since the matcher may be in an inconsistent state.
Subclasses must implement this method.
"""
raise NotImplementedError
def remove(self, global_docnum):
"""Removes a document from the collector. Not that this method uses the
global document number as opposed to :meth:`Collector.collect` which
takes a segment-relative docnum.
"""
items = self.items
for i in xrange(len(items)):
if items[i][1] == global_docnum:
items.pop(i)
return
raise KeyError(global_docnum)
def _step_through_matches(self):
matcher = self.matcher
while matcher.is_active():
yield matcher.id()
matcher.next()
def matches(self):
"""Yields a series of relative document numbers for matches
in the current subsearcher.
"""
# We jump through a lot of hoops to avoid stepping through the matcher
# "manually" if we can because all_ids() is MUCH faster
if self.context.needs_current:
return self._step_through_matches()
else:
return self.matcher.all_ids()
def finish(self):
"""This method is called after a search.
Subclasses can override this to perform set-up work, but
they should still call the superclass's method because it sets several
necessary attributes on the collector object:
self.runtime
The time (in seconds) the search took.
"""
self.runtime = now() - self.starttime
def _results(self, items, **kwargs):
# Fills in a Results object with the invariant information and the
# given "items" (a list of (score, docnum) tuples)
r = Results(self.top_searcher, self.q, items, **kwargs)
r.runtime = self.runtime
r.collector = self
return r
@abstractmethod
def results(self):
"""Returns a :class:`~whoosh.searching.Results` object containing the
results of the search. Subclasses must implement this method
"""
raise NotImplementedError
# Scored collectors
class ScoredCollector(Collector):
"""Base class for collectors that sort the results based on document score.
"""
def __init__(self, replace=10):
"""
:param replace: Number of matches between attempts to replace the
matcher with a more efficient version.
"""
Collector.__init__(self)
self.replace = replace
def prepare(self, top_searcher, q, context):
# This collector requires a valid matcher at each step
Collector.prepare(self, top_searcher, q, context)
if top_searcher.weighting.use_final:
self.final_fn = top_searcher.weighting.final
else:
self.final_fn = None
# Heap containing top N (score, 0-docnum) pairs
self.items = []
# Minimum score a document must have to make it into the top N. This is
# used by the block-quality optimizations
self.minscore = 0
# Number of times the matcher was replaced (for debugging)
self.replaced_times = 0
# Number of blocks skipped by quality optimizations (for debugging)
self.skipped_times = 0
def sort_key(self, sub_docnum):
return 0 - self.matcher.score()
def _collect(self, global_docnum, score):
# Concrete subclasses should override this method to collect matching
# documents
raise NotImplementedError
def _use_block_quality(self):
# Concrete subclasses should override this method to return True if the
# collector should use block quality optimizations
return False
def collect(self, sub_docnum):
# Do common work to calculate score and top-level document number
global_docnum = self.offset + sub_docnum
score = self.matcher.score()
if self.final_fn:
score = self.final_fn(self.top_searcher, global_docnum, score)
# Call specialized method on subclass
return self._collect(global_docnum, score)
def matches(self):
minscore = self.minscore
matcher = self.matcher
usequality = self._use_block_quality()
replace = self.replace
replacecounter = 0
# A flag to indicate whether we should check block quality at the start
# of the next loop
checkquality = True
while matcher.is_active():
# If the replacement counter has reached 0, try replacing the
# matcher with a more efficient version
if replace:
if replacecounter == 0 or self.minscore != minscore:
self.matcher = matcher = matcher.replace(minscore or 0)
self.replaced_times += 1
if not matcher.is_active():
break
usequality = self._use_block_quality()
replacecounter = self.replace
if self.minscore != minscore:
checkquality = True
minscore = self.minscore
replacecounter -= 1
# If we're using block quality optimizations, and the checkquality
# flag is true, try to skip ahead to the next block with the
# minimum required quality
if usequality and checkquality and minscore is not None:
self.skipped_times += matcher.skip_to_quality(minscore)
# Skipping ahead might have moved the matcher to the end of the
# posting list
if not matcher.is_active():
break
yield matcher.id()
# Move to the next document. This method returns True if the
# matcher has entered a new block, so we should check block quality
# again.
checkquality = matcher.next()
class TopCollector(ScoredCollector):
"""A collector that only returns the top "N" scored results.
"""
def __init__(self, limit=10, usequality=True, **kwargs):
"""
:param limit: the maximum number of results to return.
:param usequality: whether to use block-quality optimizations. This may
be useful for debugging.
"""
ScoredCollector.__init__(self, **kwargs)
self.limit = limit
self.usequality = usequality
self.total = 0
def _use_block_quality(self):
return (self.usequality
and not self.top_searcher.weighting.use_final
and self.matcher.supports_block_quality())
def computes_count(self):
return not self._use_block_quality()
def all_ids(self):
# Since this collector can skip blocks, it doesn't track the total
# number of matching documents, so if the user asks for all matched
# docs we need to re-run the search using docs_for_query
return self.top_searcher.docs_for_query(self.q)
def count(self):
if self.computes_count():
return self.total
else:
return ilen(self.all_ids())
# ScoredCollector.collect calls this
def _collect(self, global_docnum, score):
items = self.items
self.total += 1
# Document numbers are negated before putting them in the heap so that
# higher document numbers have lower "priority" in the queue. Lower
# document numbers should always come before higher document numbers
# with the same score to keep the order stable.
if len(items) < self.limit:
# The heap isn't full, so add this document
heappush(items, (score, 0 - global_docnum))
# Negate score to act as sort key so higher scores appear first
return 0 - score
elif score > items[0][0]:
# The heap is full, but if this document has a high enough
# score to make the top N, add it to the heap
heapreplace(items, (score, 0 - global_docnum))
self.minscore = items[0][0]
# Negate score to act as sort key so higher scores appear first
return 0 - score
else:
return 0
def remove(self, global_docnum):
negated = 0 - global_docnum
items = self.items
# Remove the document if it's on the list (it may not be since
# TopCollector forgets documents that don't make the top N list)
for i in xrange(len(items)):
if items[i][1] == negated:
items.pop(i)
# Restore the heap invariant
heapify(items)
self.minscore = items[0][0] if items else 0
return
def results(self):
# The items are stored (postive score, negative docnum) so the heap
# keeps the highest scores and lowest docnums, in order from lowest to
# highest. Since for the results we want the highest scores first,
# sort the heap in reverse order
items = self.items
items.sort(reverse=True)
# De-negate the docnums for presentation to the user
items = [(score, 0 - docnum) for score, docnum in items]
return self._results(items)
class UnlimitedCollector(ScoredCollector):
"""A collector that returns **all** scored results.
"""
def __init__(self, reverse=False):
ScoredCollector.__init__(self)
self.reverse = reverse
# ScoredCollector.collect calls this
def _collect(self, global_docnum, score):
self.items.append((score, global_docnum))
self.docset.add(global_docnum)
# Negate score to act as sort key so higher scores appear first
return 0 - score
def results(self):
# Sort by negated scores so that higher scores go first, then by
# document number to keep the order stable when documents have the
# same score
self.items.sort(key=lambda x: (0 - x[0], x[1]), reverse=self.reverse)
return self._results(self.items, docset=self.docset)
# Sorting collector
class SortingCollector(Collector):
"""A collector that returns results sorted by a given
:class:`whoosh.sorting.Facet` object. See :doc:`/facets` for more
information.
"""
def __init__(self, sortedby, limit=10, reverse=False):
"""
:param sortedby: see :doc:`/facets`.
:param reverse: If True, reverse the overall results. Note that you
can reverse individual facets in a multi-facet sort key as well.
"""
Collector.__init__(self)
self.sortfacet = sorting.MultiFacet.from_sortedby(sortedby)
self.limit = limit
self.reverse = reverse
def prepare(self, top_searcher, q, context):
self.categorizer = self.sortfacet.categorizer(top_searcher)
# If the categorizer requires a valid matcher, then tell the child
# collector that we need it
rm = context.needs_current or self.categorizer.needs_current
Collector.prepare(self, top_searcher, q, context.set(needs_current=rm))
# List of (sortkey, docnum) pairs
self.items = []
def set_subsearcher(self, subsearcher, offset):
Collector.set_subsearcher(self, subsearcher, offset)
self.categorizer.set_searcher(subsearcher, offset)
def sort_key(self, sub_docnum):
return self.categorizer.key_for(self.matcher, sub_docnum)
def collect(self, sub_docnum):
global_docnum = self.offset + sub_docnum
sortkey = self.sort_key(sub_docnum)
self.items.append((sortkey, global_docnum))
self.docset.add(global_docnum)
return sortkey
def results(self):
items = self.items
items.sort(reverse=self.reverse)
if self.limit:
items = items[:self.limit]
return self._results(items, docset=self.docset)
class UnsortedCollector(Collector):
def prepare(self, top_searcher, q, context):
Collector.prepare(self, top_searcher, q, context.set(weighting=None))
self.items = []
def collect(self, sub_docnum):
global_docnum = self.offset + sub_docnum
self.items.append((None, global_docnum))
self.docset.add(global_docnum)
def results(self):
items = self.items
return self._results(items, docset=self.docset)
# Wrapping collectors
class WrappingCollector(Collector):
"""Base class for collectors that wrap other collectors.
"""
def __init__(self, child):
self.child = child
@property
def top_searcher(self):
return self.child.top_searcher
@property
def context(self):
return self.child.context
def prepare(self, top_searcher, q, context):
self.child.prepare(top_searcher, q, context)
def set_subsearcher(self, subsearcher, offset):
self.child.set_subsearcher(subsearcher, offset)
self.subsearcher = subsearcher
self.matcher = self.child.matcher
self.offset = self.child.offset
def all_ids(self):
return self.child.all_ids()
def count(self):
return self.child.count()
def collect_matches(self):
for sub_docnum in self.matches():
self.collect(sub_docnum)
def sort_key(self, sub_docnum):
return self.child.sort_key(sub_docnum)
def collect(self, sub_docnum):
return self.child.collect(sub_docnum)
def remove(self, global_docnum):
return self.child.remove(global_docnum)
def matches(self):
return self.child.matches()
def finish(self):
self.child.finish()
def results(self):
return self.child.results()
# Allow and disallow collector
class FilterCollector(WrappingCollector):
"""A collector that lets you allow and/or restrict certain document numbers
in the results::
uc = collectors.UnlimitedCollector()
ins = query.Term("chapter", "rendering")
outs = query.Term("status", "restricted")
fc = FilterCollector(uc, allow=ins, restrict=outs)
mysearcher.search_with_collector(myquery, fc)
print(fc.results())
This collector discards a document if:
* The allowed set is not None and a document number is not in the set, or
* The restrict set is not None and a document number is in the set.
(So, if the same document number is in both sets, that document will be
discarded.)
If you have a reference to the collector, you can use
``FilterCollector.filtered_count`` to get the number of matching documents
filtered out of the results by the collector.
"""
def __init__(self, child, allow=None, restrict=None):
"""
:param child: the collector to wrap.
:param allow: a query, Results object, or set-like object containing
docnument numbers that are allowed in the results, or None (meaning
everything is allowed).
:param restrict: a query, Results object, or set-like object containing
document numbers to disallow from the results, or None (meaning
nothing is disallowed).
"""
self.child = child
self.allow = allow
self.restrict = restrict
def prepare(self, top_searcher, q, context):
self.child.prepare(top_searcher, q, context)
allow = self.allow
restrict = self.restrict
ftc = top_searcher._filter_to_comb
self._allow = ftc(allow) if allow else None
self._restrict = ftc(restrict) if restrict else None
self.filtered_count = 0
def all_ids(self):
child = self.child
_allow = self._allow
_restrict = self._restrict
for global_docnum in child.all_ids():
if ((_allow and global_docnum not in _allow)
or (_restrict and global_docnum in _restrict)):
continue
yield global_docnum
def count(self):
child = self.child
if child.computes_count():
return child.count() - self.filtered_count
else:
return ilen(self.all_ids())
def collect_matches(self):
child = self.child
_allow = self._allow
_restrict = self._restrict
if _allow is not None or _restrict is not None:
filtered_count = self.filtered_count
for sub_docnum in child.matches():
global_docnum = self.offset + sub_docnum
if ((_allow is not None and global_docnum not in _allow)
or (_restrict is not None and global_docnum in _restrict)):
filtered_count += 1
continue
child.collect(sub_docnum)
self.filtered_count = filtered_count
else:
# If there was no allow or restrict set, don't do anything special,
# just forward the call to the child collector
child.collect_matches()
def results(self):
r = self.child.results()
r.filtered_count = self.filtered_count
r.allowed = self.allow
r.restricted = self.restrict
return r
# Facet grouping collector
class FacetCollector(WrappingCollector):
"""A collector that creates groups of documents based on
:class:`whoosh.sorting.Facet` objects. See :doc:`/facets` for more
information.
This collector is used if you specify a ``groupedby`` parameter in the
:meth:`whoosh.searching.Searcher.search` method. You can use the
:meth:`whoosh.searching.Results.groups` method to access the facet groups.
If you have a reference to the collector can also use
``FacetedCollector.facetmaps`` to access the groups directly::
uc = collectors.UnlimitedCollector()
fc = FacetedCollector(uc, sorting.FieldFacet("category"))
mysearcher.search_with_collector(myquery, fc)
print(fc.facetmaps)
"""
def __init__(self, child, groupedby, maptype=None):
"""
:param groupedby: see :doc:`/facets`.
:param maptype: a :class:`whoosh.sorting.FacetMap` type to use for any
facets that don't specify their own.
"""
self.child = child
self.facets = sorting.Facets.from_groupedby(groupedby)
self.maptype = maptype
def prepare(self, top_searcher, q, context):
facets = self.facets
# For each facet we're grouping by:
# - Create a facetmap (to hold the groups)
# - Create a categorizer (to generate document keys)
self.facetmaps = {}
self.categorizers = {}
# Set needs_current to True if any of the categorizers require the
# current document to work
needs_current = context.needs_current
for facetname, facet in facets.items():
self.facetmaps[facetname] = facet.map(self.maptype)
ctr = facet.categorizer(top_searcher)
self.categorizers[facetname] = ctr
needs_current = needs_current or ctr.needs_current
context = context.set(needs_current=needs_current)
self.child.prepare(top_searcher, q, context)
def set_subsearcher(self, subsearcher, offset):
WrappingCollector.set_subsearcher(self, subsearcher, offset)
# Tell each categorizer about the new subsearcher and offset
for categorizer in itervalues(self.categorizers):
categorizer.set_searcher(self.child.subsearcher, self.child.offset)
def collect(self, sub_docnum):
matcher = self.child.matcher
global_docnum = sub_docnum + self.child.offset
# We want the sort key for the document so we can (by default) sort
# the facet groups
sortkey = self.child.collect(sub_docnum)
# For each facet we're grouping by
for name, categorizer in iteritems(self.categorizers):
add = self.facetmaps[name].add
# We have to do more work if the facet allows overlapping groups
if categorizer.allow_overlap:
for key in categorizer.keys_for(matcher, sub_docnum):
add(categorizer.key_to_name(key), global_docnum, sortkey)
else:
key = categorizer.key_for(matcher, sub_docnum)
key = categorizer.key_to_name(key)
add(key, global_docnum, sortkey)
return sortkey
def results(self):
r = self.child.results()
r._facetmaps = self.facetmaps
return r
# Collapsing collector
class CollapseCollector(WrappingCollector):
"""A collector that collapses results based on a facet. That is, it
eliminates all but the top N results that share the same facet key.
Documents with an empty key for the facet are never eliminated.
The "top" results within each group is determined by the result ordering
(e.g. highest score in a scored search) or an optional second "ordering"
facet.
If you have a reference to the collector you can use
``CollapseCollector.collapsed_counts`` to access the number of documents
eliminated based on each key::
tc = TopCollector(limit=20)
cc = CollapseCollector(tc, "group", limit=3)
mysearcher.search_with_collector(myquery, cc)
print(cc.collapsed_counts)
See :ref:`collapsing` for more information.
"""
def __init__(self, child, keyfacet, limit=1, order=None):
"""
:param child: the collector to wrap.
:param keyfacet: a :class:`whoosh.sorting.Facet` to use for collapsing.
All but the top N documents that share a key will be eliminated
from the results.
:param limit: the maximum number of documents to keep for each key.
:param order: an optional :class:`whoosh.sorting.Facet` to use
to determine the "top" document(s) to keep when collapsing. The
default (``orderfaceet=None``) uses the results order (e.g. the
highest score in a scored search).
"""
self.child = child
self.keyfacet = sorting.MultiFacet.from_sortedby(keyfacet)
self.limit = limit
if order:
self.orderfacet = sorting.MultiFacet.from_sortedby(order)
else:
self.orderfacet = None
def prepare(self, top_searcher, q, context):
# Categorizer for getting the collapse key of a document
self.keyer = self.keyfacet.categorizer(top_searcher)
# Categorizer for getting the collapse order of a document
self.orderer = None
if self.orderfacet:
self.orderer = self.orderfacet.categorizer(top_searcher)
# Dictionary mapping keys to lists of (sortkey, global_docnum) pairs
# representing the best docs for that key
self.lists = defaultdict(list)
# Dictionary mapping keys to the number of documents that have been
# filtered out with that key
self.collapsed_counts = defaultdict(int)
# Total number of documents filtered out by collapsing
self.collapsed_total = 0
# If the keyer or orderer require a valid matcher, tell the child
# collector we need it
needs_current = (context.needs_current
or self.keyer.needs_current
or (self.orderer and self.orderer.needs_current))
self.child.prepare(top_searcher, q,
context.set(needs_current=needs_current))
def set_subsearcher(self, subsearcher, offset):
WrappingCollector.set_subsearcher(self, subsearcher, offset)
# Tell the keyer and (optional) orderer about the new subsearcher
self.keyer.set_searcher(subsearcher, offset)
if self.orderer:
self.orderer.set_searcher(subsearcher, offset)
def all_ids(self):
child = self.child
limit = self.limit
counters = defaultdict(int)
for subsearcher, offset in child.subsearchers():
self.set_subsearcher(subsearcher, offset)
matcher = child.matcher
keyer = self.keyer
for sub_docnum in child.matches():
ckey = keyer.key_for(matcher, sub_docnum)
if ckey is not None:
if ckey in counters and counters[ckey] >= limit:
continue
else:
counters[ckey] += 1
yield offset + sub_docnum
def count(self):
if self.child.computes_count():
return self.child.count() - self.collapsed_total
else:
return ilen(self.all_ids())
def collect_matches(self):
lists = self.lists
limit = self.limit
keyer = self.keyer
orderer = self.orderer
collapsed_counts = self.collapsed_counts
child = self.child
matcher = child.matcher
offset = child.offset
for sub_docnum in child.matches():
# Collapsing category key
ckey = keyer.key_to_name(keyer.key_for(matcher, sub_docnum))
if not ckey:
# If the document isn't in a collapsing category, just add it
child.collect(sub_docnum)
else:
global_docnum = offset + sub_docnum
if orderer:
# If user specified a collapse order, use it
sortkey = orderer.key_for(child.matcher, sub_docnum)
else:
# Otherwise, use the results order
sortkey = child.sort_key(sub_docnum)
# Current list of best docs for this collapse key
best = lists[ckey]
add = False
if len(best) < limit:
# If the heap is not full yet, just add this document
add = True
elif sortkey < best[-1][0]:
# If the heap is full but this document has a lower sort
# key than the highest key currently on the heap, replace
# the "least-best" document
# Tell the child collector to remove the document
child.remove(best.pop()[1])
add = True
if add:
insort(best, (sortkey, global_docnum))
child.collect(sub_docnum)
else:
# Remember that a document was filtered
collapsed_counts[ckey] += 1
self.collapsed_total += 1
def results(self):
r = self.child.results()
r.collapsed_counts = self.collapsed_counts
return r
# Time limit collector
class TimeLimitCollector(WrappingCollector):
"""A collector that raises a :class:`TimeLimit` exception if the search
does not complete within a certain number of seconds::
uc = collectors.UnlimitedCollector()
tlc = TimeLimitedCollector(uc, timelimit=5.8)
try:
mysearcher.search_with_collector(myquery, tlc)
except collectors.TimeLimit:
print("The search ran out of time!")
# We can still get partial results from the collector
print(tlc.results())
IMPORTANT: On Unix systems (systems where signal.SIGALRM is defined), the
code uses signals to stop searching immediately when the time limit is
reached. On Windows, the OS does not support this functionality, so the
search only checks the time between each found document, so if a matcher
is slow the search could exceed the time limit.
"""
def __init__(self, child, timelimit, greedy=False, use_alarm=True):
"""
:param child: the collector to wrap.
:param timelimit: the maximum amount of time (in seconds) to
allow for searching. If the search takes longer than this, it will
raise a ``TimeLimit`` exception.
:param greedy: if ``True``, the collector will finish adding the most
recent hit before raising the ``TimeLimit`` exception.
:param use_alarm: if ``True`` (the default), the collector will try to
use signal.SIGALRM (on UNIX).
"""
self.child = child
self.timelimit = timelimit
self.greedy = greedy
if use_alarm:
import signal
self.use_alarm = use_alarm and hasattr(signal, "SIGALRM")
else:
self.use_alarm = False
self.timer = None
self.timedout = False
def prepare(self, top_searcher, q, context):
self.child.prepare(top_searcher, q, context)
self.timedout = False
if self.use_alarm:
import signal
signal.signal(signal.SIGALRM, self._was_signaled)
# Start a timer thread. If the timer fires, it will call this object's
# _timestop() method
self.timer = threading.Timer(self.timelimit, self._timestop)
self.timer.start()
def _timestop(self):
# Called when the timer expires
self.timer = None
# Set an attribute that will be noticed in the collect_matches() loop
self.timedout = True
if self.use_alarm:
import signal
os.kill(os.getpid(), signal.SIGALRM)
def _was_signaled(self, signum, frame):
raise TimeLimit
def collect_matches(self):
child = self.child
greedy = self.greedy
for sub_docnum in child.matches():
# If the timer fired since the last loop and we're not greedy,
# raise the exception
if self.timedout and not greedy:
raise TimeLimit
child.collect(sub_docnum)
# If the timer fired since we entered the loop or it fired earlier
# but we were greedy, raise now
if self.timedout:
raise TimeLimit
def finish(self):
if self.timer:
self.timer.cancel()
self.timer = None
self.child.finish()
# Matched terms collector
class TermsCollector(WrappingCollector):
"""A collector that remembers which terms appeared in which terms appeared
in each matched document.
This collector is used if you specify ``terms=True`` in the
:meth:`whoosh.searching.Searcher.search` method.
If you have a reference to the collector can also use
``TermsCollector.termslist`` to access the term lists directly::
uc = collectors.UnlimitedCollector()
tc = TermsCollector(uc)
mysearcher.search_with_collector(myquery, tc)
# tc.termdocs is a dictionary mapping (fieldname, text) tuples to
# sets of document numbers
print(tc.termdocs)
# tc.docterms is a dictionary mapping docnums to lists of
# (fieldname, text) tuples
print(tc.docterms)
"""
def __init__(self, child, settype=set):
self.child = child
self.settype = settype
def prepare(self, top_searcher, q, context):
# This collector requires a valid matcher at each step
self.child.prepare(top_searcher, q, context.set(needs_current=True))
# A dictionary mapping (fieldname, text) pairs to arrays of docnums
self.termdocs = defaultdict(lambda: array("I"))
# A dictionary mapping docnums to lists of (fieldname, text) pairs
self.docterms = defaultdict(list)
def set_subsearcher(self, subsearcher, offset):
WrappingCollector.set_subsearcher(self, subsearcher, offset)
# Store a list of all the term matchers in the matcher tree
self.termmatchers = list(self.child.matcher.term_matchers())
def collect(self, sub_docnum):
child = self.child
termdocs = self.termdocs
docterms = self.docterms
child.collect(sub_docnum)
global_docnum = child.offset + sub_docnum
# For each term matcher...
for tm in self.termmatchers:
# If the term matcher is matching the current document...
if tm.is_active() and tm.id() == sub_docnum:
# Add it to the list of matching documents for the term
term = tm.term()
termdocs[term].append(global_docnum)
docterms[global_docnum].append(term)
def results(self):
r = self.child.results()
r.termdocs = dict(self.termdocs)
r.docterms = dict(self.docterms)
return r
|
from django.apps import AppConfig
from django.utils.module_loading import autodiscover_modules
from django.utils.translation import gettext_lazy as _
from .settings import MODULE_INIT_DEFAULT
class UwsgifyConfig(AppConfig):
name = 'uwsgiconf.contrib.django.uwsgify'
verbose_name = _('uWSGI Integration')
def ready(self):
try:
import uwsgi
# This will handle init modules discovery for non-embedded.
# Part for embedding is done in toolbox.SectionMutator.mutate
# via in master process import.
autodiscover_modules(MODULE_INIT_DEFAULT)
except ImportError:
pass
|
"""Appication sub-layer for list synchronization."""
import collections
import itertools
import logging
from typing import Deque, Generic, List, Optional, Type, TypeVar
import attr
from typing_extensions import Protocol
from ventserver.protocols import events
from ventserver.protocols import exceptions
from ventserver.sansio import channels
from ventserver.sansio import protocols
# Structural Subtypes
class Indexed(Protocol):
"""Structural subtype for protobuf messages with an id field."""
id: int # pylint: disable=invalid-name
_ListElement = TypeVar('_ListElement', bound=Indexed)
class ListSegment(Protocol[_ListElement]):
"""Structural subtype for protobuf messages containing a list segment."""
next_expected: int
total: int
remaining: int
elements: List[_ListElement]
# List Sending
@attr.s
class UpdateEvent(events.Event, Generic[_ListElement]):
"""State update event."""
next_expected: Optional[int] = attr.ib(default=None)
new_element: Optional[_ListElement] = attr.ib(default=None)
def has_data(self) -> bool:
"""Return whether the event has data."""
return self.next_expected is not None or self.new_element is not None
@attr.s
class SendSynchronizer(
protocols.Filter[UpdateEvent[_ListElement], ListSegment[_ListElement]]
):
"""List synchronization filter for sending lists.
Inputs are descriptions received from the peer about the id of the next
list element it expects as well as any new element to add to the list.
The id of the next expected list element should never decrease, and the
id of the next element to add to the list should never decrease (and it
should preferably increment by 1).
Outputs are the next elements to send to the peer.
"""
ID_TYPE_SIZE = 32
MAX_ID = 2 ** ID_TYPE_SIZE - 1 # max for uint32
_logger = logging.getLogger('.'.join((__name__, 'Synchronizer')))
segment_type: Type[ListSegment[_ListElement]] = attr.ib()
max_len: int = attr.ib(default=MAX_ID + 1)
max_segment_len: int = attr.ib(default=256)
_elements: Deque[_ListElement] = attr.ib(factory=collections.deque)
_next_expected: int = attr.ib(default=0)
_next_expected_index: int = attr.ib(default=0)
_buffer: channels.DequeChannel[UpdateEvent[_ListElement]] = attr.ib(
factory=channels.DequeChannel
)
@max_len.validator
def _validate_max_len(self, _: 'attr.Attribute[int]', value: int) -> None:
"""Ensure that the max len field has an allowed value."""
if value < 0 or value > self.MAX_ID + 1:
raise ValueError(
'max_len must be between 0 and {}'.format(self.MAX_ID + 1)
)
def input(self, event: Optional[UpdateEvent[_ListElement]]) -> None:
"""Handle input events."""
if event is None or not event.has_data():
return
if event.new_element is not None:
self._add_element(event.new_element)
if event.next_expected is None:
return
self._buffer.input(event)
def output(self) -> Optional[ListSegment[_ListElement]]:
"""Emit the next output event."""
event = self._buffer.output()
if event is None:
return None
if event.next_expected is None:
return None
if event.next_expected < self._next_expected:
raise exceptions.ProtocolDataError(
'Next expected event id cannot decrease from {} to {}!'
.format(self._next_expected, event.next_expected)
)
self._next_expected = event.next_expected
self._advance_next_expected_index()
output_event = self.segment_type()
output_event.next_expected = self._next_expected
output_event.total = len(self._elements)
output_event.elements = list(itertools.islice(
self._elements,
self._next_expected_index,
self._next_expected_index + self.max_segment_len
))
output_event.remaining = len(self._elements) - self._next_expected_index
self._logger.debug('Sending: %s', output_event)
return output_event
def _advance_next_expected_index(self) -> None:
"""Advance the next_expected index to the next expected id."""
self._next_expected_index = next(
(
i for (i, elem) in enumerate(self._elements)
if elem.id >= self._next_expected
), len(self._elements)
)
def _add_element(self, element: _ListElement) -> None:
"""Attempt to add the element to the end of the list."""
while len(self._elements) >= self.max_len:
oldest_element = self._elements.popleft()
if self._next_expected_index > 0:
self._next_expected_index -= 1
else:
self._logger.warning(
'Discarding without sending: %s', oldest_element
)
self._elements.append(element)
self._logger.debug('Added: %s', element)
|
from scfmsp.controlflowanalysis.AbstractInstruction import AbstractInstruction
from scfmsp.controlflowanalysis.StatusRegister import StatusRegister
class InstructionRra(AbstractInstruction):
name = 'rra'
def get_execution_time(self):
oplist = self.oplist.split()
if(self.register_mode):
return 1
if(self.indexed_mode):
if(oplist[0][1] == '3'): # constant generator -----------------
return 1
else:
return 4
if(self.indirect_mode or self.immediate_mode):
if(oplist[0][1] == '2' or oplist[0][1] == '3'):
return 1
else:
return 3
def execute_judgment(self, ac):
if(self.register_mode):
domain = ac.ra.get(self.arguments[0]) & ac.secenv.get(self.get_execution_point()) & ac.sra.get(StatusRegister.CARRY)
ac.ra.set(self.arguments[0], domain)
else:
domain = ac.mem & ac.secenv.get(self.get_execution_point()) & ac.sra.get(StatusRegister.CARRY)
ac.mem = domain
ac.sra.set(StatusRegister.CARRY, domain)
ac.sra.set(StatusRegister.ZERO, domain)
ac.sra.set(StatusRegister.Negative, domain)
ac.sra.set(StatusRegister.Overflow, domain)
|
"""Helpful constants, functions, and classes for use of the Publican."""
from calendar import isleap, mdays
from datetime import date, timedelta
from decimal import Decimal
# Conveniences when working with decimals.
zero = Decimal('0.00')
zero_places = Decimal('1')
two_places = Decimal('1.00')
def dollars(n):
return (n or zero).quantize(zero_places)
def cents(n):
return (n or zero).quantize(two_places)
# Supercharged versions of the date and timedelta, that have the
# additional advantage that they are capitalized and so stop getting
# confused with my variables. (I use "date" a lot.)
class Date(date):
def next_business_day(self):
w = self.weekday()
if w < 6:
return self
return self + Interval(days=7 - w)
class Interval(timedelta):
pass
# Periods of time have both a beginning and an end.
class Period(object):
name = None
def __init__(self, start=None, end=None):
self.start = start
self.end = end
class Year(Period):
def __init__(self, number):
self.number = number
self.start = Date(number, 1, 1)
self.end = Date(number, 12, 31)
self.name = unicode(number)
def __unicode__(self):
return self.name
def next(self):
return Year(self.number + 1)
class Month(Period):
def __init__(self, year, number):
self.year = year
self.number = number
self.start = Date(year, number, 1)
bump = (number == 2) and isleap(year)
self.end = Date(year, number, mdays[number] + bump)
self.name = u'{}-{:02}'.format(year, number)
def next(self):
if self.number == 12:
return Month(self.year + 1, 1)
return Month(self.year, self.number + 1)
class Quarter(Period):
def __init__(self, year, number):
self.year = year
self.number = number
month = number * 3
self.start = Date(year, month - 2, 1)
self.end = Date(year, month, mdays[month]) # works: month != Feb
self.name = u'{}-Q{}'.format(self.year, self.number)
def next(self):
if self.number == 4:
return Quarter(self.year + 1, 1)
return Quarter(self.year, self.number + 1)
# Routines for working with Periods.
_die = object()
def get_period(name, default=_die):
"""Return the period with the given name."""
a = str(name).split('-')
if len(a) == 2:
year = int(a[0])
if a[1].startswith('Q'):
number = int(a[1][1:])
return Quarter(year, number)
else:
return Month(year, int(a[1]))
elif len(a) == 1:
year = int(a[0])
return Year(year)
if default is not _die:
return default
raise ValueError('there is no Period named {!r}'.format(name))
def years_range(start, end):
"""Return the years from date `start` to `end` inclusive."""
number = start.year
year = Year(number)
yield year
while year.end < end:
year = year.next()
yield year
def quarters_range(start, end):
"""Return the quarters from date `start` to `end` inclusive."""
year = start.year
number = (start.month + 2) // 3
quarter = Quarter(year, number)
yield quarter
while quarter.end < end:
quarter = quarter.next()
yield quarter
def months_range(start, end):
"""Return the months from date `start` to `end` inclusive."""
month = Month(start.year, start.month)
yield month
while month.end < end:
month = month.next()
yield month
|
import os
from flask_restful import Resource, request
from jsonschema import ValidationError
from server.database import db
from server.database.queries.executions import get_execution
from server.resources.decorators import login_required, marshal_response
from server.database.models.execution import Execution, ExecutionStatus
from server.common.error_codes_and_messages import (
ErrorCodeAndMessageFormatter, ErrorCodeAndMessageAdditionalDetails,
EXECUTION_NOT_FOUND, UNAUTHORIZED, CORRUPTED_EXECUTION, UNEXPECTED_ERROR,
CANNOT_REPLAY_EXECUTION, UNSUPPORTED_DESCRIPTOR_TYPE)
from server.resources.helpers.executions import (
get_execution_as_model, get_descriptor_path, get_absolute_path_inputs_path)
from server.resources.helpers.execution_play import start_execution
from server.resources.models.descriptor.descriptor_abstract import Descriptor
class ExecutionPlay(Resource):
@login_required
@marshal_response()
def put(self, user, execution_identifier):
execution_db = get_execution(execution_identifier, db.session)
if not execution_db:
return ErrorCodeAndMessageFormatter(EXECUTION_NOT_FOUND,
execution_identifier)
if execution_db.creator_username != user.username:
return UNAUTHORIZED
if execution_db.status != ExecutionStatus.Initializing:
return ErrorCodeAndMessageFormatter(CANNOT_REPLAY_EXECUTION,
execution_db.status.name)
execution, error = get_execution_as_model(user.username, execution_db)
if error:
return CORRUPTED_EXECUTION
# Get the descriptor path
descriptor_path = get_descriptor_path(user.username,
execution.identifier)
# Get appriopriate descriptor object
descriptor = Descriptor.descriptor_factory_from_type(
execution_db.descriptor)
if not descriptor:
# We don't have any descriptor defined for this pipeline type
logger = logging.getLogger('server-error')
logger.error(
"Unsupported descriptor type extracted from file at {}".format(
descriptor_path))
return ErrorCodeAndMessageFormatter(UNSUPPORTED_DESCRIPTOR_TYPE,
execution_db.descriptor)
modified_inputs_path = get_absolute_path_inputs_path(
user.username, execution.identifier)
if not os.path.isfile(modified_inputs_path):
logger = logging.getLogger('server-error')
logger.error("Absolute path inputs file not found at {}".format(
descriptor_path))
return UNEXPECTED_ERROR
# The execution is valid and we are now ready to start it
start_execution(user, execution, descriptor, modified_inputs_path)
|
def is_match(text, pattern):
"""Basic Regex Parser that only includes '.' and '*'. Case sensitive."""
def match(t_char, p_char):
"""Defines the comparision between characters in the text and pattern."""
if t_char == p_char or p_char == ".":
return True
return False
# Scratch Work
# . - wildcard, anything you want
# * - multiple of the preceding charactering
# Ab*, "Abbb"
# true
# only letters
# case sensitive
# text = "aa", pattern = "a"
# False => t != p
# Simple Str Matching
# ite
# p "a"
# ^
# t = "aa"
# ^
# letters
# length
# can also havwe 0 or more instance of * chars
# Example
# text = "aa", pattern = "aa" --> T
# "abc", pattern = "a.c" --> T
# text = "acd", pattern = "ab*c." -
# t = "acd"
# ^
# p = "ab*c."
# ^
# ac, abc, abbc, abbbc, abbbbc
# t =;
# ^
# p = ;
# . ^
# t = abcdf;
# s = a.*;
#
# Use Recursion?
# Base Cases
# . ---> T
# a --> p_char == t_char
# Recursive Cases
# . b*
# .*
# text = "acd", pattern = "ab*c."
# ^ ^
# Pseudocode - 2 pointers
# init 2 pointers at start of p and t
p_idx, t_idx = 0, 0
# iterate over p
while p_idx < len(pattern):
p_char = pattern[p_idx]
# check for an * after the pointer in p
if p_idx < len(pattern) - 1 and pattern[p_idx + 1] == "*":
# do a while loop in t to find how many of the pchar we have
num_p_char_in_text = 0
while t_idx < len(text) and match(text[t_idx], p_char):
num_p_char_in_text += 1
t_idx += 1
# if that num >= 0, then T, move ahead the p ointer by 2
if num_p_char_in_text >= 0:
p_idx += 2
# if the p_char = ., or P_char == t_char --> True
elif match(text[t_idx], p_char):
# move ahead both poiters
p_idx += 1
t_idx += 1
# if both letters != --> False
elif t_idx >= len(text) or (
t_idx <= len(text) and pattern[p_idx] != text[t_idx]
):
return False
# at the end, we should reach the end of both strings
return t_idx == len(text)
# Time: O(max(t, p)) - where t = length of text, and p = length of the pattern
# Space: O(1)
if __name__ == "__main__":
# Test Cases
assert is_match("", "") == True
assert is_match("aa", "") == False
assert is_match("bb", "bb") == True
assert is_match("", "a*") == True
assert is_match("abbdbb", "ab*d") == False
assert is_match("aba", "a.a") == True
assert is_match("acd", "ab*c.") == True
assert is_match("abaa", "a.*a*") == True
|
# Ordering
def cascade(n):
"""Print a cascade of prefixes of n.
>>> cascade(1234)
1234
123
12
1
12
123
1234
"""
if n < 10:
print(n)
else:
print(n)
cascade(n//10)
print(n)
def cascade2(n):
"""Print a cascade of prefixes of n."""
print(n)
if n >= 10:
cascade(n//10)
print(n)
def inverse_cascade(n):
"""Print an inverse cascade of prefixes of n.
>>> inverse_cascade(1234)
1
12
123
1234
123
12
1
"""
grow(n)
print(n)
shrink(n)
def f_then_g(f, g, n):
if n:
f(n)
g(n)
grow = lambda n: f_then_g(grow, print, n//10)
shrink = lambda n: f_then_g(print, shrink, n//10)
# Tree recursion
def fib(n):
"""Compute the nth Fibonacci number.
>>> fib(8)
21
"""
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-2) + fib(n-1)
def count_partitions(n, m):
"""Count the partitions of n using parts up to size m.
>>> count_partitions(6, 4)
9
>>> count_partitions(10, 10)
42
"""
if n == 0:
return 1
elif n < 0:
return 0
elif m == 0:
return 0
else:
with_m = count_partitions(n-m, m)
without_m = count_partitions(n, m-1)
return with_m + without_m
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
https://github.com/cgloeckner/pyvtt/
Copyright (c) 2020-2021 Christian Glöckner
License: MIT (see LICENSE for details)
"""
import unittest, tempfile, pathlib
import utils
class FancyUrlApiTest(unittest.TestCase):
def setUp(self):
# create temporary directory
self.tmpdir = tempfile.TemporaryDirectory()
root = pathlib.Path(self.tmpdir.name)
self.paths = utils.PathApi(appname='unittest', root=root)
self.urls = utils.FancyUrlApi(self.paths)
def tearDown(self):
del self.urls
del self.paths
del self.tmpdir
def test___call__(self):
urls = list()
for i in range(100):
url = self.urls()
parts = url.split('-')
self.assertEqual(len(parts), 3)
urls.append(url)
# expect multiple URLs (assumed to many of them but extreme collisions may happen, so at least 2)
self.assertGreaterEqual(len(urls), 2)
|
import re
import sys
import os
import unicodecsv as csv
import pandas as pd
import itertools
import numpy as np
from collections import Counter
import science.roles as r
roles = r.Roles()
empl_path = sys.argv[1] #directory of csv files to process
#target = sys.argv[2] #csv file name to write
## Iterate.
empl_file_lst = os.listdir(empl_path)
for empl_file_name in empl_file_lst:
empl_file = empl_path + '/' +empl_file_name
updated = []
with open(empl_file,"rb") as f:
reader = csv.reader(f,encoding='utf-8',escapechar='',delimiter='\t')
current_id = ''
entries_per_person = []
max_edu = 0
edu_dpmt = set()
for idx, entry in enumerate(itertools.chain(reader,[[None]*33])):
if idx == np.inf: # End point
break
# Some datasets have extra column of employee names
# Drop entry[1] if it contains employee name instead of birth year
if len(entry) > 33:
del entry[1]
if entry[0] != current_id:
# append and reset
e_dpmt = ','
e_dpmt = e_dpmt.join(edu_dpmt)
for e in entries_per_person:
e[8] = max_edu
e[29] = e_dpmt # I think 29th column had no use previously?
updated.append(e)
current_id = entry[0]
del entries_per_person[:]
max_edu = 0
edu_dpmt.clear()
if entry[26] == 'False': # employment data
match = re.search(r",\s?([\s\w,]*)",entry[17])
if (match is not None):
normalized_title = match.group(1)
else:
normalized_title = entry[17]
try:
entry[18] = roles.parse_work(normalized_title).departments.pop() #is this appropriate?
except:
pass
entries_per_person.append(entry)
elif entry[26] == 'True': #education data
query = roles.parse_edu(entry[20], entry[17])
max_edu = max(max_edu, query.level)
# add department
for f in query.faculties:
edu_dpmt.add(f)
entries_per_person.append(entry)
with open(empl_file + "_updated.csv", "w", newline="") as f:
writer = csv.writer(f, dialect = csv.excel_tab)
writer.writerows(updated)
|
import sys
from hmac import new as sign
from hashlib import sha256
from .validators import *
PY2 = True if sys.version_info.major == 2 else False
if PY2:
from urlparse import urlparse, urlunparse, parse_qs
from base64 import encodestring as encode
else:
from urllib.parse import urlparse, urlunparse, parse_qs
from base64 import encodebytes as encode
def string_format(value):
if isinstance(value, bytes):
return ''.join(chr(x) for x in bytearray(value))
if isinstance(value, (int, float, bool)):
return str(value)
if isinstance(value, list):
return [string_format(x) for x in value]
if PY2 == True and isinstance(value, unicode):
return value.decode('utf-8')
return value
def get_map_from_query(query):
res_map = dict()
for key, value in parse_qs(query, keep_blank_values=True).items():
res_map[string_format(key)] = string_format(value)
return res_map
def get_sorted_query_string(params):
keys = sorted(params.keys())
res_params = []
for key in keys:
value = params[key]
if isinstance(value, list):
res_params.append(
'&'.join(['{}={}'.format(string_format(key), val) for val in sorted(string_format(value))]))
else:
res_params.append('{}={}'.format(string_format(key), string_format(value)))
return '&'.join(res_params)
def get_sorted_params_string(params):
keys = sorted(params.keys())
res_params = []
for key in keys:
value = params[key]
if isinstance(value, list):
res_params.append(
''.join(['{}{}'.format(string_format(key), val) for val in sorted(string_format(value))]))
elif isinstance(value, dict):
res_params.append('{}{}'.format(string_format(key), get_sorted_params_string(value)))
else:
res_params.append('{}{}'.format(string_format(key), string_format(value)))
return ''.join(res_params)
def construct_get_url(uri, params, empty_post_params=True):
parsed_uri = urlparse(uri.encode('utf-8'))
base_url = urlunparse((parsed_uri.scheme.decode('utf-8'),
parsed_uri.netloc.decode('utf-8'),
parsed_uri.path.decode('utf-8'), '', '',
'')).encode('utf-8')
params.update(get_map_from_query(parsed_uri.query))
query_params = get_sorted_query_string(params)
if len(query_params) > 0 or not empty_post_params:
base_url = base_url + bytearray('?' + query_params, 'utf-8')
if len(query_params) > 0 and not empty_post_params:
base_url = base_url + bytearray('.', 'utf-8')
return base_url
def construct_post_url(uri, params):
base_url = construct_get_url(uri, dict(), True if len(params) == 0 else False)
return base_url + bytearray(get_sorted_params_string(params), 'utf-8')
def get_signature_v3(auth_token, base_url, nonce):
base_url = bytearray('{}.{}'.format(string_format(base_url), string_format(nonce)), 'utf-8')
try:
return encode(sign(auth_token, base_url, sha256).digest()).strip()
except TypeError:
return encode(sign(bytearray(auth_token, 'utf-8'), base_url, sha256).digest()).strip()
@validate_args(
method=[all_of(of_type(six.text_type), is_in(('POST', 'GET'), case_sensitive=False))],
uri=[is_url()],
params=[optional(of_type(dict))],
nonce=[of_type(six.text_type)],
auth_token=[of_type(six.text_type)],
v3_signature=[of_type(six.text_type)],
)
def validate_v3_signature(method, uri, nonce, auth_token, v3_signature, params=None):
"""
Validates V3 Signature received from Plivo to your server
:param method: Your callback method
:param uri: Your callback URL
:param params: Params received in callback from Plivo
:param nonce: X-Plivo-Signature-V3-Nonce header
:param v3_signature: X-Plivo-Signature-V3 header
:param auth_token: (Sub)Account auth token
:return: True if the request matches signature, False otherwise
"""
if params is None:
params = dict()
auth_token = bytes(auth_token.encode('utf-8'))
nonce = bytes(nonce.encode('utf-8'))
v3_signature = bytes(v3_signature.encode('utf-8'))
base_url = construct_get_url(uri, params) if method == 'GET' else construct_post_url(uri, params)
signature = get_signature_v3(auth_token, base_url, nonce)
return signature in v3_signature.split(b',')
|
# Moeda
# 111
# Crie um pacote chamado utilidadesCeV que tenha dois módulos internos chamados moeda e dado. Transfira todas as funções utilizadas nos desafios 107, 108 e 109 para o primeiro pacote e mantenha tudo funcionando.
def aumentar(preco = 0, taxa = 0, formato = False):
'''
-> Funcao para calcular o aumento do preco.
:param preco: valor a ser calculado
:param taxa: valor (percentual) do aumento
:param formato: Caso a opcao seje 'True' ele ira formatar o resultado, por padrao esta false, ou seja, o resoltado nao ser formatado.
:return: resultado da operacao
'''
res = preco + (preco * taxa / 100)
return res if formato is False else moedaFormat(res)
# return res if not formato else moedaFormat(res)
def diminuir(preco = 0, taxa = 0, formato = False):
'''
-> Funcao para calcular o desconto/dimunuicao do preco.
:param preco: valor a ser calculado
:param taxa: valor (percentual) do desconto/dimunuicao
:param formato: Caso a opcao seje 'True' ele ira formatar o resultado, por padrao esta false, ou seja, o resoltado nao ser formatado.
:return: resultado da operacao
'''
res = preco - (preco * taxa / 100)
return res if formato is False else moedaFormat(res)
def dobro(preco = 0, formato = False):
'''
-> Funcao para calcular o dobro preco.
:param preco: valor a ser calculado
:param formato: Caso a opcao seje 'True' ele ira formatar o resultado, por padrao esta false, ou seja, o resoltado nao ser formatado.
:return: resultado da operacao
'''
res = preco * 2
return res if formato is False else moedaFormat(res)
def metade(preco = 0, formato = False):
'''
-> Funcao para calcular a metade preco.
:param preco: valor a ser calculado
:param formato: Caso a opcao seje 'True' ele ira formatar o resultado, por padrao esta false, ou seja, o resoltado nao ser formatado.
:return: resultado da operacao
'''
res = preco / 2
return res if formato is False else moedaFormat(res)
def moedaFormat(preco = 0, moeda = 'R$'):
return f'\33[32m{moeda} {preco:.2f}\33[m'.replace('.' ,',')
def taxaFormat(taxa = 0, porcentagem = '%'):
return f'\33[32m{taxa:.2f} {porcentagem}\33[m'.replace('.' ,',')
def resumo(preco = 0, taxaAumento = 10, taxaReducao = 5):
print('-' * 35)
print('RESUMO DO VALOR'.center(35))
print('-' * 35)
print(f'Preço analisado: \t{moedaFormat(preco)}')
print(f'Dobro do preço: \t{dobro(preco, True)}')
print(f'Metade do preço: \t{metade(preco, True)}')
print(f'{taxaFormat(taxaAumento)} de aumento: \t{aumentar(preco, taxaAumento, True)}')
print(f'{taxaFormat(taxaReducao)} de redução: \t{diminuir(preco, taxaReducao, True)}')
print('-' * 35)
|
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
import numpy as np
from pathlib import Path
plt.style.use('seaborn-white')
# sns.set()
font = {'family' : 'sans-serif',
'size' : 16}
matplotlib.rc('font', **font)
sns.set_palette('mako')
def coverage(y, yL, yH):
return (100 / y.shape[0] * ((y>yL)&(y<yH)).sum())
def plot_pcc_history(history, plot_path):
n_data = [len(history[k]['train_index']) for k in history]
pcc = [history[k]['PCC_test'] for k in history]
with plt.style.context('seaborn-white'):
plt.figure(figsize=(8, 8))
plt.plot(n_data, pcc,
c='r', label='PCC')
plt.title('PCC over the number of data points')
plt.legend()
plt.xlabel("Number of data points")
plt.ylabel("PCC")
plt.savefig(plot_path, bbox_inches='tight')
plt.close()
def plot_r2_history(history, plot_path):
n_data = [len(history[k]['train_index']) for k in history]
r2 = [history[k]['R2_test'] for k in history]
with plt.style.context('seaborn-white'):
plt.figure(figsize=(8, 8))
plt.plot(n_data, r2,
c='r', label='R2')
plt.title('R2 over the number of data points')
plt.legend()
plt.xlabel("Number of data points")
plt.ylabel("R2")
plt.savefig(plot_path, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
random = Path(__file__).resolve().parent / 'random'
for f in random.glob("*.npy"):
history = np.load(f, allow_pickle=True).item()
i = str(f).split('.')[0].split('_')[-1]
plot_pcc_history(history, f.parent / f'pcc_{i}.png')
plot_r2_history(history, f.parent / f'r2_{i}.png')
var_red = Path(__file__).resolve().parent / 'variance_reduction'
for f in var_red.glob("*.npy"):
history = np.load(f, allow_pickle=True).item()
i = str(f).split('.')[0].split('_')[-1]
plot_pcc_history(history, f.parent / f'pcc_{i}.png')
plot_r2_history(history, f.parent / f'r2_{i}.png')
clus_var_red = Path(__file__).resolve().parent / 'cluster_variance_reduction'
for f in clus_var_red.glob("*.npy"):
history = np.load(f, allow_pickle=True).item()
i = str(f).split('.')[0].split('_')[-1]
plot_pcc_history(history, f.parent / f'pcc_{i}.png')
plot_r2_history(history, f.parent / f'r2_{i}.png')
|
# -*- coding: utf-8 -*-
# encoding: utf-8
import json
import os
from time import time
import re
import requests
from base64 import b64encode
from kitty.data.report import Report
from kitty.targets.server import ServerTarget
from requests.exceptions import RequestException
from utils import set_class_logger
@set_class_logger
class FuzzerTarget(ServerTarget):
def not_implemented(self, func_name):
pass
def __init__(self, name, base_url, report_dir):
super(FuzzerTarget, self).__init__(name)
self.base_url = base_url
self._last_sent_request = None
self.accepted_status_codes = list(range(200, 300)) + list(range(400, 500))
self.report_dir = report_dir
self.logger.info('Logger initialized')
def error_report(self, msg, req):
if hasattr(req, 'request'):
self.report.add('request method', req.request.method)
self.report.add('request body', req.request.body)
self.report.add('response', req.text)
else:
for k, v in req.items():
if isinstance(v, dict):
for subkey, subvalue in v.items():
self.report.add(subkey, b64encode(subvalue))
else:
self.report.add(k, b64encode(v))
self.report.set_status(Report.ERROR)
self.report.error(msg)
def save_report_to_disc(self):
try:
if not os.path.exists(os.path.dirname(self.report_dir)):
try:
os.makedirs(os.path.dirname(self.report_dir))
except OSError:
pass
with open('{}/{}_{}.json'.format(self.report_dir, self.test_number, time()), 'wb') as report_dump_file:
report_dump_file.write(json.dumps(self.report.to_dict(), ensure_ascii=False, encoding='utf-8'))
except Exception as e:
self.logger.error(
'Failed to save report "{}" to {} because: {}'
.format(self.report.to_dict(), self.report_dir, e)
)
def transmit(self, **kwargs):
try:
_req_url = list()
for url_part in self.base_url, kwargs['url']:
self.logger.info('URL part: {}'.format(url_part))
_req_url.append(url_part.strip('/'))
request_url = '/'.join(_req_url)
request_url = self.expand_path_variables(request_url, kwargs.get('path_variables'))
request_url = self.expand_query_parameters(request_url, kwargs.get('params'))
if kwargs.get('path_variables'):
kwargs.pop('path_variables')
kwargs.pop('url')
self.logger.warn('>>> Formatted URL: {} <<<'.format(request_url))
if "API_FUZZER_API_KEY" in os.environ:
headers = {'Authorization': 'api-key {}'.format(os.getenv("API_FUZZER_API_KEY", ""))}
headers_sanitized = {'Authorization': 'api-key THIS_IS_THE_API_KEY_FROM_ENVIRONMENT'}
if 'headers' in kwargs:
combinedHeaders = {key: value for (key, value) in (headers.items() + kwargs['headers'].items())}
combinedHeadersSanitized = {key: value for (key, value) in (headers_sanitized.items() + kwargs['headers'].items())}
del kwargs['headers']
headers = combinedHeaders
headers_sanitized = combinedHeadersSanitized
self.logger.warn('Request Headers:{}, KWARGS:{}, url: {}'.format(headers_sanitized, kwargs, _req_url))
_return = requests.request(url=request_url, headers=headers, verify=False, **kwargs)
else:
self.logger.warn('Request KWARGS:{}, url: {}'.format(kwargs, _req_url))
_return = requests.request(url=request_url, verify=False, **kwargs)
status_code = _return.status_code
self.logger.warn('request returned with status code: {}'.format(status_code))
if status_code:
if status_code not in self.accepted_status_codes:
self.report.add('parsed status_code', status_code)
self.report.add('request method', _return.request.method)
self.report.add('request body', _return.request.body)
self.report.add('response', _return.text)
self.report.set_status(Report.FAILED)
self.report.failed('return code {} is not in the expected list'.format(status_code))
else:
self.error_report('Failed to parse http response code', _return.headers)
return _return
except (RequestException, UnicodeDecodeError) as e: # request failure such as InvalidHeader
self.error_report('Failed to parse http response code, exception: {}'.format(e), kwargs)
def post_test(self, test_num):
"""Called after a test is completed, perform cleanup etc."""
super(FuzzerTarget, self).post_test(test_num)
if self.report.get('status') != Report.PASSED:
self.save_report_to_disc()
def expand_path_variables(self, url, path_parameters):
if not isinstance(path_parameters, dict):
self.logger.error('path_parameters: {}'.format(path_parameters))
return url
for path_key, path_value in path_parameters.items():
try:
_temporally_url_list = list()
path_parameter = path_key.split('|')[-1]
splitter = '({' + path_parameter + '})'
url_list = re.split(splitter, url)
self.logger.info('Processing: {} key: {} splitter: {} '.format(url_list, path_parameter, splitter))
for url_part in url_list:
if url_part == '{' + path_parameter + '}':
_temporally_url_list.append(path_value.decode('unicode-escape').encode('utf8'))
else:
_temporally_url_list.append(url_part.encode())
url = "".join(_temporally_url_list)
self.logger.warn('url 1: {} | {}->{}'.format(url, path_parameter, path_value))
except Exception as e:
self.logger.warn('Failed to replace string in url: {} param: {}, exception: {}'.format(url, path_value, e))
url = url.replace("{", "").replace("}", "")
return url
def expand_query_parameters(self, url, query_parameters):
if not isinstance(query_parameters, dict):
self.logger.error('query_parameters: {}'.format(query_parameters))
return url
url = url + '?'
for param_key, param_value in query_parameters.items():
try:
pkey = param_key.split('|')[-1]
url=url + pkey + '=' + param_value.decode('unicode-escape').encode('utf8') + '&'
except Exception as e:
self.logger.warn('Failed to replace string in url: {} param: {}, exception: {}'.format(url, path_value, e))
return url
|
from foundations_events.consumers.jobs.mixins.job_event_notifier import JobEventNotifier
class JobNotifier(JobEventNotifier):
"""Sends a notification message when a job is queued
Arguments:
job_notifier {JobNofitier} -- A JobNotifier for sending out the messages
"""
@staticmethod
def _state():
return 'Queued'
|
from . import plot_target_dist
from . import plot_tfidf
from . import plot_words_cdf
from . import plot_roc_curve
from . import plot_comment_length_hist
|
# Objective: visualize X and E, the input graph to the model
import matplotlib.pyplot as plt
import numpy as np
# take in X and E
# draw a large point for X(i,1:4)
# for every E(i,j) = 1, draw a line between X(i,1:4) and X(j,1:4)
def plotGraph(X, E):
fig = plt.figure()
ax = plt.axes(projection="3d")
ax.scatter3D(X[:,1], X[:,2], X[:,3], c=X[:,3], cmap="Greens")
for i in range(E.size[0]):
# get indices of nonzero elements
idx = np.nonzero(E[i,:])
for j in range(len(idx)):
ax.plot3D([X[i,1], X[idx[j],1]], [X[i,2], X[idx[j],2]], [X[i,3], X[idx[j],3]], "b")
plt.show()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019. Mike Herbert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
Provides a place lookup gazeteer based on files from geonames.org.
Provides the location lookup methods for the geodata package.
+ Creates a local sqlite3 place database of geonames.org data
+ Parses lookup text and returns multiple matches ranked by closeness to lookup term
+ Provides latitude/longitude
+ Supports Wildcard search, Phonetic/Soundex search, and Word search of place database names
+ Search by feature type (e.g. mountain, cemetery, palace, etc)
+ Database can be filtered to only include specified countries, languages, and feature types
Main routines for Geodata package:
example.py - a sample demonstrating place lookups using geodata
geodata.open - open database. create DB if missing
geodata.find_best_match - parse location and provide the best match
geodata.find_matches - parse location and provide a ranked list of matches
geodata.find_feature - lookup location by feature type and provide a ranked list of matches
normalize.py - Normalize text for lookup
"""
import collections
import copy
import logging
from operator import itemgetter
from geodata import GeoUtil, GeodataBuild, Loc, MatchScore, GeoSearch
class Geodata:
"""
Provide a place lookup gazeteer based on files from geonames.org
"""
def __init__(self, directory_name: str, display_progress,
show_message: bool, exit_on_error: bool, languages_list_dct, feature_code_list_dct,
supported_countries_dct, volume=''):
"""
Init
#Args:
directory_name: directory where geoname.org files are. DB will be in 'cache' folder under this
display_progress: None or function to display progress(percent_done:int, msg:str)
show_message: If True - show TKInter message dialog on error
exit_on_error: If True - exit on significant error
languages_list_dct: Dictionary of ISO-2 languages to import from AlternateNamesV2.txt
feature_code_list_dct: Dictionary of Geoname Feature codes to import into DB
supported_countries_dct: Dictionary of ISO-2 Country codes to import into DB
"""
self.logger = logging.getLogger(__name__)
self.display_progress = display_progress
self.save_place: Loc = Loc.Loc()
self.miss_diag_file = None
self.distance_cutoff = 0.6 # Value to determine if two lat/longs are similar based on Rectilinear Distance
self.geo_build = GeodataBuild.GeodataBuild(str(directory_name), display_progress=self.display_progress,
show_message=show_message, exit_on_error=exit_on_error,
languages_list_dct=languages_list_dct,
feature_code_list_dct=feature_code_list_dct,
supported_countries_dct=supported_countries_dct,
volume=volume)
def find_matches(self, location: str, place: Loc) :
"""
Find a location in the geoname database. On successful match, place.georow_list will contain
a list of georows that matched the name. Each georow can be copied to a Loc structure by
calling process_result
#Args:
location: comma separated name of location to find, e.g. 'Los Angeles, California, USA'
place: Loc structure
plain_search: If True then don't do wildcard searches
#Returns:
GeoUtil.Result code
"""
place.parse_place(place_name=location, geo_db=self.geo_build.geodb)
best_score = 9999
self.is_country_valid(place)
if place.result_type == GeoUtil.Result.NOT_SUPPORTED:
place.georow_list.clear()
return best_score
# Create full entry text
place.update_names(self.geo_build.output_replace_dct)
flags = ResultFlags(limited=False, filtered=False)
result_list = [] # We will do different search types and append all results into result_list
# self.logger.debug(f'== FIND LOCATION City=[{place.city}] Adm2=[{place.admin2_name}]\
# Adm1=[{place.admin1_name}] Pref=[{place.prefix}] Cntry=[{place.country_name}] iso=[{place.country_iso}] Type={place.place_type} ')
# Save a shallow copy of place so we can restore fields
self.save_place = copy.copy(place)
# After parsing, last token is either country or underscore.
# Second to last is either Admin1 or underscore
# If >2 tokens: token[0] is placed in City and in Prefix
# If >3 tokens: token[1] is placed in Admin2 and appended to Prefix
# 1) Try lookup based on standard parsing: lookup city, county, state/province, or country as parsed
self.logger.debug(f' 1) Standard, based on parsing. pref [{place.prefix}] city [{place.city}]'
f' sdx={GeoSearch.get_soundex(place.city)} '
f'feat={place.feature} typ=[{place.place_type}]')
if place.place_type != Loc.PlaceType.COUNTRY and place.place_type != Loc.PlaceType.ADMIN1 \
and place.place_type != Loc.PlaceType.ADMIN1:
self.logger.debug('find std place - not ADM* ')
best_score = self.geo_build.geodb.s.lookup_place(place=place)
self.logger.debug(f'std: best={best_score}')
if place.georow_list:
result_list.extend(place.georow_list)
# self.logger.debug(result_list)
if best_score >= MatchScore.Score.POOR_CUTOFF:
# No good matches found. Try a deep search on soundex of combinations of terms
self.logger.debug('--- DEEP SEARCH city ---')
best_score = self.geo_build.geodb.s.deep_lookup(place=place)
# self.logger.debug(place.georow_list)
if place.georow_list:
result_list.extend(place.georow_list)
# Restore fields
self._restore_fields(place, self.save_place)
# 2) Try second token (Admin2) as a city
if place.admin2_name != '':
self.logger.debug(f'try 2nd token as city')
place.georow_list.clear()
best_score = self._find_type_as_city(place, Loc.PlaceType.ADMIN2)
self.logger.debug(f'2nd token best={best_score}')
if place.georow_list:
result_list.extend(place.georow_list)
# self.logger.debug(result_list)
# See if we found any good scoring matches
if best_score >= MatchScore.Score.POOR_CUTOFF:
# No good matches found. Try a deep search on soundex of combinations of terms
self.logger.debug('--- DEEP SEARCH city ---')
best_score = self.geo_build.geodb.s.deep_lookup(place=place)
# self.logger.debug(place.georow_list)
if place.georow_list:
result_list.extend(place.georow_list)
self._restore_fields(place, self.save_place)
# Move result_list into place georow list
place.georow_list.clear()
place.georow_list.extend(result_list)
# self.logger.debug(place.georow_list)
else:
self.logger.debug('not country, adm1, adm2')
return place.result_type
if len(place.georow_list) > 0:
best_score = self.geo_build.geodb._assign_scores(place.georow_list, place, '', fast=False, quiet=True)
# self.logger.debug('process results')
self.process_results(place=place, flags=flags)
flags = self.filter_results(place)
# self.logger.debug(place.georow_list)
if len(place.georow_list) == 0:
# NO MATCH
if place.result_type != GeoUtil.Result.NO_COUNTRY and place.result_type != GeoUtil.Result.NOT_SUPPORTED:
place.result_type = GeoUtil.Result.NO_MATCH
self.logger.debug(f'Not found.')
#place.result_type = GeoUtil.Result.STRONG_MATCH
else:
self.logger.debug('Found country')
elif len(place.georow_list) > 1:
self.logger.debug(f'Success! {len(place.georow_list)} matches')
place.result_type = GeoUtil.Result.MULTIPLE_MATCHES
# Process the results
self.process_results(place=place, flags=flags)
# self.logger.debug(f'Status={place.status}')
return place.result_type
def find_best_match(self, location: str, place: Loc) -> bool:
"""
Find the best scoring match for this location in the geoname dictionary.
#Args:
location: location name, e.g. Los Angeles, California, USA
place: Loc instance
#Returns: True if a match was found
place is updated with -- lat, lon, district, city, country_iso, result code
"""
# First parse the location into <prefix>, city, <district2>, district1, country.
# Then look it up in the place db
res = self.find_matches(location, place)
# Clear to just best entry
flags = self.filter_results(place)
# If multiple matches, truncate to first match
if len(place.georow_list) > 0:
place.georow_list = place.georow_list[:1]
self.process_results(place=place, flags=flags)
place.set_place_type()
nm = f'{place.get_long_name(self.geo_build.output_replace_dct)}'
place.prefix = place.prefix_cleanup(place.prefix, nm)
return True
else:
if res in GeoUtil.successful_match:
nm = f'{place.get_long_name(self.geo_build.output_replace_dct)}'
place.prefix = place.prefix_cleanup(place.prefix, nm)
#print(f'Found pre=[{place.prefix}{place.prefix_commas}] Nam=[{nm}]')
return True
return False
def find_geoid(self, geoid: str, place: Loc)->None:
"""
Lookup by geoid
#Args:
geoid: Geonames.org geoid
place: Location fields in place are updated
#Returns: None. Location fields in place are updated
"""
flags = ResultFlags(limited=False, filtered=False)
place.geoid = geoid
place.georow_list.clear()
self.geo_build.geodb.s.lookup_geoid(georow_list=place.georow_list, geoid=place.geoid, place=place)
if len(place.georow_list) == 0:
self.geo_build.geodb.s.lookup_geoid(georow_list=place.georow_list, geoid=place.geoid, place=place, admin=True)
if len(place.georow_list) > 0:
place.result_type = GeoUtil.Result.STRONG_MATCH
self.process_results(place=place, flags=flags)
# self.logger.debug(f'found geoid {place.georow_list[0]}')
else:
place.result_type = GeoUtil.Result.NO_MATCH
# self.logger.debug(f'NOT FOUND geoid {geoid}')
def _find_type_as_city(self, place: Loc, typ)-> int:
"""
Do a lookup using the field specifed by typ as a city name. E.g. if typ is PlaceType.ADMIN1 then
use the place.admin1_name field to do the city lookup
#Args:
place: Loc instance
typ: Loc.PlaceType - Specifies which field to use as target for lookup
#Returns: None
place.georow_list is updated with matches
"""
# place.standard_parse = False
typ_name = ''
best = 999
if typ == Loc.PlaceType.CITY:
# Try City as city (do as-is)
typ_name = 'City'
pass
elif typ == Loc.PlaceType.ADMIN2:
# Try ADMIN2 as city
if place.admin2_name != '':
# if '*' not in place.city:
# place.prefix += ' ' + place.city
place.city = place.admin2_name
place.admin2_name = ''
typ_name = 'Admin2'
elif typ == Loc.PlaceType.PREFIX:
# Try Prefix as City
if place.prefix != '':
place.city = place.prefix
# if '*' not in tmp:
# place.prefix = tmp
typ_name = 'Prefix'
elif typ == Loc.PlaceType.ADVANCED_SEARCH:
# Advanced Search
best = self.geo_build.geodb.lookup_place(place=place)
return best
else:
self.logger.warning(f'Unknown TYPE {typ}')
if typ_name != '':
result_list = []
self.logger.debug(f'2) Try {typ_name} as City. Target={place.city} pref [{place.prefix}] ')
place.place_type = Loc.PlaceType.CITY
best = self.geo_build.geodb.s.lookup_place(place=place)
#best_score = self.geo_build.geodb.assign_scores(result_list, place, '', fast=True, quiet=False)
self.logger.debug(f'best={best}')
if best >= MatchScore.Score.POOR_CUTOFF:
self.logger.debug('--- DEEP SEARCH ADM2 ---')
best = self.geo_build.geodb.s.deep_lookup(place=place)
return best
def _lookup_city_as_admin2(self, place: Loc, result_list)->int:
"""
Lookup place.city as admin2 name
#Args:
place:
result_list:
#Returns:
"""
# Try City as ADMIN2
# place.standard_parse = False
place.admin2_name = place.city
place.city = ''
place.place_type = Loc.PlaceType.ADMIN2
self.logger.debug(f' Try admin2 [{place.admin2_name}] as city [{place.get_five_part_title()}]')
best = self.geo_build.geodb.lookup_place(place=place)
result_list.extend(place.georow_list)
return best
def find_feature(self, place)->int:
"""
Lookup location with - name, country, and feature
#Args:
place: place.name, place.country, and place.feature are used for lookup
#Returns:
None. place.georow_list contains matches
"""
self.logger.debug('Feature Search')
best = self._find_type_as_city(place, place.place_type)
return best
# if len(place.georow_list) > 0:
# Build list - sort and remove duplicates
# self.logger.debug(f'Match {place.georow_list}')
# flags = ResultFlags(limited=False, filtered=False)
# self.process_results(place=place, flags=flags)
# self.filter_results(place)
def process_results(self, place: Loc, flags) -> None:
"""
Update fields in place record using first entry in place.georow_list
Updates fields with available data: city, admin1, admin2, country, lat/long, feature, etc.
#Args:
place: Loc instance
flags: Flags tuple as returned by sort_results
#Returns:
None. place instance fields are updated
"""
# self.logger.debug(f'**PROCESS RESULT: Res={place.result_type} Georow_list={place.georow_list}')
if place.result_type == GeoUtil.Result.NOT_SUPPORTED:
place.place_type = Loc.PlaceType.COUNTRY
if place.result_type in GeoUtil.successful_match and len(place.georow_list) > 0:
self.geo_build.geodb.copy_georow_to_place(row=place.georow_list[0], place=place, fast=False)
elif len(place.georow_list) > 0 and place.result_type != GeoUtil.Result.NOT_SUPPORTED:
# self.logger.debug(f'***RESULT={place.result_type} Setting to Partial')
place.result_type = GeoUtil.Result.PARTIAL_MATCH
place.set_place_type_text()
@staticmethod
def distance(lat_a: float, lon_a: float, lat_b: float, lon_b: float):
"""
Returns rectilinear distance in degrees between two lat/longs
Args:
lat_a: latitude of point A
lon_a: longitude of point A
lat_b: latitude of point B
lon_b: longitude of point B
Returns: Rectilinear distance between two points
"""
return abs(lat_a - lat_b) + abs(lon_a - lon_b)
def remove_duplicates(self, place):
# sort list by LON/LAT and score so we can remove dups
# for row in place.georow_list:
# self.logger.debug(row)
if len(place.georow_list) == 0:
self.logger.debug('empty')
return
try:
rows_sorted_by_latlon = sorted(place.georow_list, key=itemgetter(GeoUtil.Entry.LON, GeoUtil.Entry.LAT, GeoUtil.Entry.SCORE))
except IndexError as e:
rows_sorted_by_latlon = place.georow_list
place.georow_list.clear()
# Create a dummy 'previous' row so the comparison to previous entry works on the first item
prev_geo_row = self.geo_build.make_georow(name='q', iso='q', adm1='q', adm2='q', lat=900, lon=900, feat='q', geoid='q', sdx='q')
georow_idx = 0
# Keep track of list by GEOID to ensure no duplicates in GEOID
geoid_dict = {} # Key is GEOID. Value is List index
# Find and remove if two entries are duplicates - defined as two items with:
# 1) same GEOID or 2) same name and lat/lon is within Box Distance of 0.6 degrees
for geo_row in rows_sorted_by_latlon:
# self.logger.debug(f'{geo_row[GeoUtil.Entry.NAME]},{geo_row[GeoUtil.Entry.FEAT]} '
# f'{geo_row[GeoUtil.Entry.SCORE]:.1f} {geo_row[GeoUtil.Entry.ADM2]}, '
# f'{geo_row[GeoUtil.Entry.ADM1]} {geo_row[GeoUtil.Entry.ISO]}')
if self._valid_year_for_location(place.event_year, geo_row[GeoUtil.Entry.ISO], geo_row[GeoUtil.Entry.ADM1], 60) is False:
# Skip location if location name didnt exist at the time of event WITH 60 years padding
continue
if self._valid_year_for_location(place.event_year, geo_row[GeoUtil.Entry.ISO], geo_row[GeoUtil.Entry.ADM1], 0) is False:
# Flag if location name didnt exist at the time of event
date_filtered = True
old_row = list(geo_row)
geo_row = tuple(old_row)
if geo_row[GeoUtil.Entry.NAME] != prev_geo_row[GeoUtil.Entry.NAME]:
# Add this item to georow list since it has a different name. Also add its idx to geoid dict
place.georow_list.append(geo_row)
geoid_dict[geo_row[GeoUtil.Entry.ID]] = georow_idx
georow_idx += 1
elif geoid_dict.get(geo_row[GeoUtil.Entry.ID]):
# We already have an entry for this geoid. Replace it if this one has better score
row_idx = geoid_dict.get(geo_row[GeoUtil.Entry.ID])
old_row = place.georow_list[row_idx]
if geo_row[GeoUtil.Entry.SCORE] < old_row[GeoUtil.Entry.SCORE]:
# Same GEOID but this has better score so replace other entry.
place.georow_list[row_idx] = geo_row
self.logger.debug(f'Better score {geo_row[GeoUtil.Entry.SCORE]} < '
f'{old_row[GeoUtil.Entry.SCORE]} {geo_row[GeoUtil.Entry.NAME]}')
elif self.distance(float(prev_geo_row[GeoUtil.Entry.LAT]), float(prev_geo_row[GeoUtil.Entry.LON]),
float(geo_row[GeoUtil.Entry.LAT]), float(geo_row[GeoUtil.Entry.LON])) > self.distance_cutoff:
# Add this item to georow list since Lat/lon is different from previous item. Also add its idx to geoid dict
place.georow_list.append(geo_row)
geoid_dict[geo_row[GeoUtil.Entry.ID]] = georow_idx
georow_idx += 1
elif geo_row[GeoUtil.Entry.SCORE] < prev_geo_row[GeoUtil.Entry.SCORE]:
# Same Lat/lon but this has better score so replace previous entry.
place.georow_list[georow_idx - 1] = geo_row
geoid_dict[geo_row[GeoUtil.Entry.ID]] = georow_idx - 1
# self.logger.debug(f'Use. {geo_row[GeoUtil.Entry.SCORE]} < {prev_geo_row[GeoUtil.Entry.SCORE]} {geo_row[GeoUtil.Entry.NAME]}')
prev_geo_row = geo_row
def filter_results(self, place: Loc):
"""
Sort place.georow_list by match score and eliminate duplicates
In case of duplicate, keep the one with best match score.
See MatchScore.match_score() for details on score calculation
Discard names that didnt exist at time of event (update result flag if this occurs)
Duplicates are defined as two items with:
1) same GEOID or 2) same name and similar lat/lon (within Rectilinear Distance of distance_cutoff degrees)
Add flag if we hit the lookup limit
#Args:
place:
#Returns:
ResultFlags(limited=limited_flag, filtered=date_filtered)
"""
date_filtered = False # Flag to indicate whether we dropped locations due to event date
# event_year = place.event_year
if len(place.georow_list) > 100:
limited_flag = True
else:
limited_flag = False
if len(place.georow_list) == 0:
self.logger.debug('EMPTY')
return ResultFlags(limited=limited_flag, filtered=date_filtered)
# Remove duplicate locations in list (have same name and lat/lon)
self.remove_duplicates(place)
if len(place.georow_list) == 0:
self.logger.error(f'georow_list = 0')
return ResultFlags(limited=limited_flag, filtered=date_filtered)
gap_threshold = 0
score = 0
# Sort places in match_score order
new_list = sorted(place.georow_list, key=itemgetter(GeoUtil.Entry.SCORE, GeoUtil.Entry.ADM1))
if len(new_list) == 0:
self.logger.error(f'new_list = 0')
return ResultFlags(limited=limited_flag, filtered=date_filtered)
if len(new_list[0]) < GeoUtil.Entry.SCORE + 1:
self.logger.debug(f'len = {len(new_list[0])}')
self.logger.debug(f'[{new_list[0]}]')
return ResultFlags(limited=limited_flag, filtered=date_filtered)
min_score = new_list[0][GeoUtil.Entry.SCORE]
place.georow_list.clear()
# Go through sorted list and only add items to georow_list that are close to the best score
for rw, geo_row in enumerate(new_list):
score = geo_row[GeoUtil.Entry.SCORE]
# admin1_name = self.geo_build.geodb.get_admin1_name_direct(geo_row[GeoUtil.Entry.ADM1], geo_row[GeoUtil.Entry.ISO])
# admin2_name = self.geo_build.geodb.get_admin2_name_direct(geo_row[GeoUtil.Entry.ADM1],
# geo_row[GeoUtil.Entry.ADM2], geo_row[GeoUtil.Entry.ISO])
base = MatchScore.Score.VERY_GOOD + (MatchScore.Score.GOOD / 3)
gap_threshold = base + abs(min_score) * .6
# Range to display when there is a strong match
# if (min_score <= base and score > min_score + gap_threshold) or score > MatchScore.Score.VERY_POOR * 1.5:
if score > min_score + gap_threshold:
self.logger.debug(f'SKIP Score={score:.1f} Min={min_score:.1f} Gap={gap_threshold:.1f} [{geo_row[GeoUtil.Entry.PREFIX]}]'
f' {geo_row[GeoUtil.Entry.NAME]},'
f' {geo_row[GeoUtil.Entry.ADM2]},'
f' {geo_row[GeoUtil.Entry.ADM1]} ')
else:
place.georow_list.append(geo_row)
self.logger.debug(f'Score {score:.1f} [{geo_row[GeoUtil.Entry.PREFIX]}] {geo_row[GeoUtil.Entry.NAME]}, '
f'AD2={geo_row[GeoUtil.Entry.ADM2]},'
f' AD1={geo_row[GeoUtil.Entry.ADM1]} {geo_row[GeoUtil.Entry.ISO]}')
# self.logger.debug(f'min={min_score:.1f}, gap2={gap_threshold:.1f} strong cutoff={min_score + gap_threshold:.1f}')
if min_score <= MatchScore.Score.VERY_GOOD and len(place.georow_list) == 1 and place.result_type != GeoUtil.Result.NOT_SUPPORTED:
place.result_type = GeoUtil.Result.STRONG_MATCH
else:
# Log item that we couldnt match
if self.miss_diag_file:
self.miss_diag_file.write(
f'Lookup {place.original_entry} thresh={gap_threshold} gap={score - min_score}\n\n')
return ResultFlags(limited=limited_flag, filtered=date_filtered)
def open(self, repair_database: bool, query_limit: int):
"""
Open geodb. Create DB if needed
#Args:
repair_database: If True, create DB if missing or damaged.
query_limit: SQL query limit
#Returns:
True if error
"""
self._progress("Reading Geoname files...", 70)
return self.geo_build.open_geodb(repair_database=repair_database, query_limit=query_limit)
def _progress(self, msg: str, percent: int):
if self.display_progress is not None:
self.display_progress(percent, msg)
else:
self.logger.debug(msg)
def is_country_valid(self, place: Loc) -> bool:
"""
See if COUNTRY is present and is in the supported country list
#Args:
place:
#Returns:
True if country is valid
"""
if place.country_iso == '':
place.result_type = GeoUtil.Result.NO_COUNTRY
is_valid = False
elif place.country_iso not in self.geo_build.supported_countries_dct:
self.logger.debug(f'Country [{place.country_iso}] not supported')
place.result_type = GeoUtil.Result.NOT_SUPPORTED
place.place_type = Loc.PlaceType.COUNTRY
is_valid = False
else:
is_valid = True
return is_valid
@staticmethod
def _valid_year_for_location(event_year: int, country_iso: str, admin1: str, pad_years: int) -> bool:
"""
See if this state/province had modern names at the time of the event. Only US and Canada currently supported.
For example, looking up New York for year 1410 would be invalid since it did not have an English name at that time.
Data is based on https://en.wikipedia.org/wiki/List_of_North_American_settlements_by_year_of_foundation
Geonames has support for date ranges on names but that data is sparsely populated and not used here yet.
#Args:
event_year: Year to check
country_iso: ISO-2 country code
admin1: State/Province name
pad_years: Number of years to pad for inaccuracy
#Returns:
True if valid
"""
if not event_year:
return True
# Try looking up start year by state/province
place_year = admin1_name_start_year.get(f'{country_iso}.{admin1.lower()}')
if place_year is None:
# Try looking up start year by country
place_year = country_name_start_year.get(country_iso)
if place_year is None:
place_year = -1
if event_year + pad_years < place_year:
# self.logger.debug(f'Invalid year: incorporation={place_year} event={event_year} loc={admin1},{iso} pad={padding}')
return False
else:
return True
@staticmethod
def _feature_priority(feature: str):
"""
Returns 0-100 for feature priority. PP1M - city with 1 million people is zero
#Args:
feature:
#Returns:
0-100 for feature priority
"""
res = feature_priority.get(feature)
if res:
return 100.0 - res
else:
return 100.0 - feature_priority.get('DEFAULT')
def open_diag_file(self, miss_diag_fname: str):
"""
Open diagnostic file
#Args:
miss_diag_fname:
#Returns:
"""
self.miss_diag_file = open(miss_diag_fname, 'wt')
def close_diag_file(self):
"""
Close diagnostic file
Returns:
"""
if self.miss_diag_file:
self.miss_diag_file.close()
@staticmethod
def _restore_fields(place, save_place):
# Restore fields that were overwritten
place.city = save_place.city
place.admin2_name = save_place.admin2_name
place.prefix = save_place.prefix
def close(self):
"""
Close files and database
Returns: None
"""
if self.geo_build:
self.geo_build.geodb.close()
def log_results(self, geo_row_list):
for geo_row in geo_row_list:
self.logger.debug(f' {geo_row[GeoUtil.Entry.NAME]}')
# Entries are only loaded from geonames.org files if their feature is in this list
# Highest value is for large city or capital
# Also, If there are 2 identical entries, we only add the one with higher feature priority.
# These scores are also used for match ranking score
# Note: PP1M, P1HK, P10K do not exist in Geonames and are created by geodata.geodataBuild
feature_priority = {
'PP1M': 100, 'ADM1': 96, 'PPLA': 96, 'PPLC': 96, 'PPLH': 71, 'ADM0': 93, 'PPLA2': 93, 'P1HK': 93,
'P10K': 89, 'PPLX': 82, 'PP1K': 82, 'PRN': 71, 'PRSH': 71, 'RLG': 71, 'RUIN': 71, 'STG': 71,
'PPLG': 75, 'RGN': 71, 'AREA': 71, 'NVB': 71, 'PPLA3': 71, 'ADMF': 71, 'PPLA4': 69, 'PPLF': 69, 'ADMX': 66,
'PPLQ': 60, 'PPLR': 60, 'PPLS': 55, 'PPLL': 55, 'PPLW': 55, 'PPL': 55, 'SQR': 50, 'ISL': 50,
'ADM2': 45, 'CH': 44, 'MSQE': 44, 'MSTY': 44, 'SYG': 44, 'MUS': 44, 'CMTY': 44, 'CSTL': 44, 'EST': 44,
'MILB': 44, 'MNMT': 44, 'PAL': 44, 'HSTS': 42, 'PRK': 42, 'ADM3': 32,
'BTL' : 22, 'HSP': 0, 'VAL': 0, 'MT': 0, 'ADM4': 0, 'DEFAULT': 0,
}
ResultFlags = collections.namedtuple('ResultFlags', 'limited filtered')
# Starting year this country name was valid
country_name_start_year = {
'cu': -1,
}
# Starting year when modern names were valid for this state/province
# https://en.wikipedia.org/wiki/List_of_North_American_settlements_by_year_of_foundation
admin1_name_start_year = {
'us.al': 1711,
'us.ak': 1774,
'us.az': 1775,
'us.ar': 1686,
'us.ca': 1769,
'us.co': 1871,
'us.ct': 1633,
'us.de': 1638,
'us.dc': 1650,
'us.fl': 1565,
'us.ga': 1566,
'us.hi': -1,
'us.id': 1862,
'us.il': 1703,
'us.in': 1715,
'us.ia': 1785,
'us.ks': 1870,
'us.ky': 1775,
'us.la': 1699,
'us.me': 1604,
'us.md': 1633,
'us.ma': 1620,
'us.mi': 1784,
'us.mn': 1820,
'us.ms': 1699,
'us.mo': 1765,
'us.mt': 1877,
'us.ne': 1854,
'us.nv': 1905,
'us.nh': 1638,
'us.nj': 1624,
'us.nm': 1598,
'us.ny': 1614,
'us.nc': 1653,
'us.nd': 1871,
'us.oh': 1785,
'us.ok': 1889,
'us.or': 1811,
'us.pa': 1682,
'us.ri': 1636,
'us.sc': 1663,
'us.sd': 1865,
'us.tn': 1739,
'us.tx': 1685,
'us.ut': 1847,
'us.vt': 1650,
'us.va': 1607,
'us.wa': 1825,
'us.wv': 1788,
'us.wi': 1685,
'us.wy': 1867,
'ca.01': 1795,
'ca.02': 1789,
'ca.03': 1733,
'ca.04': 1766,
'ca.05': 1583,
'ca.07': 1604,
'ca.08': 1673,
'ca.09': 1764,
'ca.10': 1541,
'ca.11': 1862,
'ca.12': 1700,
'ca.13': 1700,
'ca.14': 1700
}
|
def string_hash(s: str) -> int:
a = sum( ord(c) for c in s )
n = s.count(' ')
b = ord(s[-1])-ord(s[0]) if s else 0
c = (a | b) & (~a << 2)
d = c ^ ( (n+1) << 5 )
return d
|
# Напишите функцию (F): на вход список имен и целое число N; на выходе список длины N случайных
# имен из первого списка (могут повторяться, можно взять значения: количество имен 20, N = 100,
# рекомендуется использовать функцию random)
from random import choices
def choices_name(list_names,N=15):
'''Задается список имен - list_names и целое число - N.
Возвращает список длины N случайных имен.'''
return choices(list_names,k=N)
new_list_names = ['Ваня','Дима','Лена','Ира','Игорь','Ян','Маша','Даша','Наташа','Вася']
b=choices_name(new_list_names)
print(b)
# 2 Напишите функцию вывода самого частого имени из списка на выходе функции F
def often_word(listik):
'''Задается список listik.
Возвращает самый частый элемент списка'''
c_dict={}
for i in range (len(b)):
c_dict[b[i]]=0
for sym in b:
c_dict[sym]+=1
print(c_dict) # выводит словарь
d=list(c_dict.items())
#print(d)
#print(list(c_dict.values())) # выводит список значений
e=max(list(c_dict.values()))
#print(max(list(c_dict.values()))) # выводит максим значение из списка
#print(c_dict[2])
most_often=[]
for key,values in d:
if values == e:
# return(key,':',values)
#print(key,':',values)
most_often.append((key,values))
return most_often
print(often_word(b))
# 3 Напишите функцию вывода самой редкой буквы, с которого начинаются имена в списке на
def redkaiy_bukva(names):
'''
:param names: Задаем список имен
:return: Самую редкую букву, с которой начинаются имена
'''
#print(b)
new = []
for word in b:
new+= word[0]
print(new)
new_dict = {}
for r in new:
new_dict[r]=0
for sym in new:
new_dict[sym]+=1
print(new_dict)
dd=list(new_dict.items())
ee=min(list(new_dict.values())) # мин значение
most_min=[]
for key,values in dd:
if values == ee:
most_min.append((key)[0])
ccc_dict={}
for i in range (len(most_min)):
ccc_dict[most_min[i]]=0
for sym in most_min:
ccc_dict[sym]+=1
#print(ccc_dict.keys())
return list(ccc_dict.keys()) # ТАК ВЫВОДИТ ВСЕ РЕДКИЕ БУКВЫ!!!!!!!!!!!!
print(redkaiy_bukva(b))
#PRO: LIGHT +
#4. В файле с логами найти дату самого позднего лога (по метке времени)
text='''
2011-08-01 18:03:34,338 - exampleApp - INFO - Program started
2012-09-02 19:13:53,338 - exampleApp - INFO - added 7 and 8 to get15
2012-10-02 20:23:31,338 - exampleApp - INFO - Done!
2013-08-01 01:43:33,338 - exampleApp - INFO - Program started
2011-09-19 12:53:33,338 - exampleApp - INFO - added 10 and 11 to get15
2012-10-12 22:03:33,338 - exampleApp - INFO - Done!
2017-08-01 01:13:51,338 - exampleApp - INFO - Program started
2019-09-19 12:21:34,338 - exampleApp - INFO - added 7 and 8 to get15
2018-10-12 23:31:01,338 - exampleApp - INFO - Done!
'''
#https://andreyex.ru/yazyk-programmirovaniya-python/uchebnik-po-python-3/python-3-vremya-metod-strptime/
from datetime import datetime
#помещаем файл в папку с проектом!!!!!
with open('log.csv','r') as file:
last_date =()
for line in file:
if not last_date:
last_date = datetime.strptime(line[:23], '%Y-%m-%d %H:%M:%S,%f')
continue
date = datetime.strptime(line[:23], '%Y-%m-%d %H:%M:%S,%f')
if date > last_date:
last_date = date
print(last_date)
|
# -*- coding: utf-8 -*-
"""
1637. Widest Vertical Area Between Two Points Containing No Points
Given n points on a 2D plane where points[i] = [xi, yi],
Return the widest vertical area between two points such that no points are inside the area.
A vertical area is an area of fixed-width extending infinitely along the y-axis (i.e., infinite height).
The widest vertical area is the one with the maximum width.
Note that points on the edge of a vertical area are not considered included in the area.
Constraints:
n == points.length
2 <= n <= 105
points[i].length == 2
0 <= xi, yi <= 109
"""
class Solution:
def maxWidthOfVerticalArea(self, points):
x_axis = sorted(set(point[0] for point in points))
return max(x_axis[ind] - x_axis[ind - 1] for ind in range(1, len(x_axis)))
|
import os
import unittest
from smartexcel.smart_excel import SmartExcel
from smartexcel.tests.data.definitions.dummy import DUMMY_DEFINITION
from smartexcel.tests.data.data_models.dummy import DummyData
class TestParse(unittest.TestCase):
def setUp(self):
self.definition = DUMMY_DEFINITION
self.data = DummyData()
self.filepath = '/tmp/dummy_test.xlsx'
if os.path.exists(self.filepath):
os.remove(self.filepath)
SmartExcel(
definition=self.definition,
data=self.data,
output=self.filepath
).dump()
def test_parse(self):
excel = SmartExcel(
definition=self.definition,
data=self.data,
path=self.filepath
)
data = excel.parse()
self.assertEqual(data, [
{'name': 'PA', 'age': 29, 'city': 'Paris'},
{'name': 'Cairo', 'age': 0, 'city': 'Muizenberg'},
{'name': 'Carina', 'age': 26, 'city': 'Windhoek'}])
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
.. py:module::contextualdefault
:synopsis: Tools for options which allow for a default callable that needs
also the context ctx
"""
import click
class ContextualDefaultOption(click.Option):
"""A class that extends click.Option allowing to define a default callable
that also get the context ctx as a parameter.
"""
def __init__(self, *args, contextual_default=None, **kwargs):
self._contextual_default = contextual_default
super().__init__(*args, **kwargs)
def get_default(self, ctx):
"""If a contextual default is defined, use it, otherwise behave normally."""
if self._contextual_default is None:
return super().get_default(ctx)
return self._contextual_default(ctx)
|
import datetime
from tests import factories
from tests.common import ApiBaseTest
from webservices.rest import api
from webservices.resources.filings import FilingsView, FilingsList
class TestFilings(ApiBaseTest):
def test_committee_filings(self):
""" Check filing returns with a specified committee id"""
committee_id = 'C8675309'
factories.FilingsFactory(committee_id=committee_id)
results = self._results(api.url_for(FilingsView, committee_id=committee_id))
self.assertEqual(results[0]['committee_id'], committee_id)
def test_candidate_filings(self):
candidate_id = 'P12345'
factories.FilingsFactory(candidate_id=candidate_id)
results = self._results(api.url_for(FilingsView, candidate_id=candidate_id))
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['candidate_id'], candidate_id)
def test_filings(self):
""" Check filings returns in general endpoint"""
factories.FilingsFactory(committee_id='C001')
factories.FilingsFactory(committee_id='C002')
results = self._results(api.url_for(FilingsList))
self.assertEqual(len(results), 2)
def test_filter_date(self):
[
factories.FilingsFactory(receipt_date=datetime.date(2012, 1, 1)),
factories.FilingsFactory(receipt_date=datetime.date(2013, 1, 1)),
factories.FilingsFactory(receipt_date=datetime.date(2014, 1, 1)),
factories.FilingsFactory(receipt_date=datetime.date(2015, 1, 1)),
]
min_date = datetime.date(2013, 1, 1)
results = self._results(api.url_for(FilingsList, min_receipt_date=min_date))
self.assertTrue(all(each for each in results if each['receipt_date'] >= min_date.isoformat()))
max_date = datetime.date(2014, 1, 1)
results = self._results(api.url_for(FilingsList, max_receipt_date=max_date))
self.assertTrue(all(each for each in results if each['receipt_date'] <= max_date.isoformat()))
results = self._results(api.url_for(FilingsList, min_receipt_date=min_date, max_receipt_date=max_date))
self.assertTrue(
all(
each for each in results
if min_date.isoformat() <= each['receipt_date'] <= max_date.isoformat()
)
)
def test_filings_filters(self):
[
factories.FilingsFactory(committee_id='C0004'),
factories.FilingsFactory(committee_id='C0005'),
factories.FilingsFactory(candidate_id='H0001'),
factories.FilingsFactory(beginning_image_number=123456789021234567),
factories.FilingsFactory(form_type='3'),
factories.FilingsFactory(primary_general_indicator='G'),
factories.FilingsFactory(amendment_indicator='A'),
factories.FilingsFactory(report_type='POST GENERAL'),
factories.FilingsFactory(report_year=1999),
factories.FilingsFactory(document_type='X'),
factories.FilingsFactory(cycle=2000),
]
filter_fields = (
('beginning_image_number', 123456789021234567),
('form_type', '3'),
('primary_general_indicator', 'G'),
('amendment_indicator', 'A'),
('report_type', 'Post General'),
('report_year', 1999),
('candidate_id', 'H0001'),
('document_type', 'X'),
('cycle', 2000),
)
# checking one example from each field
orig_response = self._response(api.url_for(FilingsList))
original_count = orig_response['pagination']['count']
for field, example in filter_fields:
page = api.url_for(FilingsList, **{field: example})
# returns at least one result
results = self._results(page)
self.assertGreater(len(results), 0)
# doesn't return all results
response = self._response(page)
self.assertGreater(original_count, response['pagination']['count'])
def test_sort(self):
[
factories.FilingsFactory(beginning_image_number=2),
factories.FilingsFactory(beginning_image_number=1),
]
results = self._results(api.url_for(FilingsList, sort='beginning_image_number'))
self.assertTrue(
[each['beginning_image_number'] for each in results],
[1, 2]
)
def test_sort_bad_column(self):
response = self.app.get(api.url_for(FilingsList, sort='request_type'))
self.assertEqual(response.status_code, 422)
def test_regex(self):
""" Getting rid of extra text that comes in the tables."""
factories.FilingsFactory(
report_type_full='report {more information than we want}',
committee_id='C007',
form_type='RFAI',
report_year=2004,
)
results = self._results(api.url_for(FilingsView, committee_id='C007'))
self.assertEqual(results[0]['document_description'], 'RFAI: report 2004')
|
###Titulo: Contagem de cédulas e modedas
###Função: Este programa modifica a listagem 5.14 para trabalhar também com moedas de R$ 0,50 R$ 0,10 R$ 0,05 R$ 0,02 e R$ 0,01
###Autor: Valmor Mantelli Jr.
###Data: 26/12/2018
###Versão: 0.0.1
# Declaração de variáve e atribuição de valor
valor = float(input("Digite o valor a pagar: "))
cedulas = 0
atual = 100
apagar = valor
# Processamento e Saída
while True:
if atual <= apagar:
apagar -= atual
cedulas += 1
else:
if atual >= 1:
print ("%d cédula(s) de R$%d" % (cedulas, atual))
else:
print ("%d moeda(s) de R$%.2f" % (cedulas, atual))
if apagar < 0.01:
break
elif atual == 100:
atual = 50
elif atual == 50:
atual = 20
elif atual == 20:
atual = 10
elif atual == 10:
atual = 5
elif atual == 5:
atual = 1
elif atual == 1:
atual = 0.5
elif atual == 0.5:
atual = 0.10
elif atual == 0.10:
atual = 0.05
elif atual == 0.05:
atual = 0.02
elif atual == 0.02:
atual = 0.01
cedulas = 0
|
vowels=['a','e','i','o','u']
word=input("enter the words to search for vowels:")
found=[]
for letter in word:
if letter in vowels:
if letter not in found:
found.append(letter)
print(found)
print("the no. of different vowels present in",word,"is",len(found))
|
"""
Plot training/validation loss and F1-score per category.
TODO: generalize the plotter for different situations than 4 models...
"""
import matplotlib.pyplot as plt
from glob import glob
import re
def plot_training_loss(dirname, model):
"""
Plot training and validation loss during training
:param dirname: name of directory containing the logs (..._training.log)
:param model: either onset or note
"""
f, ax = plt.subplots(2, 2, sharex=True, sharey=True)
sets = ['set_1',
'set_2',
'set_3',
'set_4'
]
idx = 0
for set in sets:
res = {}
with open(f'{dirname}/best_{model}_{set}_training.log') as fin:
headers = fin.readline().strip().split()
for h in headers:
res[h] = []
for line in fin:
i = 0
for v in line.strip().split():
res[headers[i]].append(float(v))
i += 1
u = idx//2
v = idx%2
ax[u][v].plot(res['epoch'], res['loss'], label='training')
ax[u][v].plot(res['epoch'], res['val_loss'], label='validation')
handles, labels = ax[u][v].get_legend_handles_labels()
ax[u][v].legend(handles, labels)
ax[u][v].set_title('model_{}'.format(set[-1]))
if u==1:
ax[u][v].set_xlabel('epoch')
if v==0:
ax[u][v].set_ylabel('loss')
idx += 1
plt.show()
def plot_category_score(dirname):
"""
Plot F1-score per category
:param dirname: directory containing the logs (..._predictions.log)
"""
data = {}
categories = []
for f in sorted(glob(dirname+'/*_predictions.log')):
model_set = re.search('set_[0-9]',f).group()
with open(f) as fin:
res = {}
for line in fin:
if line[:5]=='TOTAL':
set = re.search('set_[0-9]',line).group()
res[set] = {}
if line[:5]=='carac':
l = line.strip().split('\t')
category = l[0][6:]
if category not in categories:
categories.append(category)
if category not in res[set]:
res[set][category] = {'x': [], 'f1': []}
f1 = l[5]
if f1 != 'nan':
res[set][category]['x'].append(float(l[1]))
res[set][category]['f1'].append(float(f1))
data[model_set] = res
for cat in categories:
f, ax = plt.subplots(2, 2, sharex=True, sharey=True)
idx = 0
for model in data:
u = idx // 2
v = idx % 2
idx += 1
for set in data[model]:
ax[u][v].plot(data[model][set][cat]['x'], data[model][set][cat]['f1'], label=set)
if u==1 and v==1:
handles, labels = ax[u][v].get_legend_handles_labels()
ax[u][v].legend(handles, labels)
ax[u][v].set_title('model_{}'.format(model[-1]))
if cat=='silence_notes':
ax[u][v].set_xlim(0,50)
if u==1:
ax[u][v].set_xlabel(cat)
if v==0:
ax[u][v].set_ylabel('F1-score')
plt.show()
if __name__=='__main__':
plot_training_loss('best_models', 'onset')
plot_training_loss('best_models', 'note')
plot_category_score('best_models/categories_scores_onset')
plot_category_score('best_models/categories_scores_note')
|
from tornado.gen import coroutine
from torip.ipapis import api_factory
__author__ = 'mendrugory'
class Locator:
"""
Main Class which will be the responsible of locating the IP.
"""
def __init__(self, api_name=None, **config):
self.api_name = api_name
self.config = config
@coroutine
def locate(self, address):
"""
It locates the IP Address / Server Name
:param address: String IP Address / Server Name
:return:
"""
api = api_factory(self.api_name, **self.config)
result = yield api.locate(address)
return result
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import uuid
import requests_mock
from canvasapi.canvas import Canvas
from canvasapi.upload import Uploader
from tests import settings
from tests.util import cleanup_file, register_uris
@requests_mock.Mocker()
class TestUploader(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.requester = self.canvas._Canvas__requester
self.filename = "testfile_uploader_{}".format(uuid.uuid4().hex)
self.file = open(self.filename, "w+")
def tearDown(self):
self.file.close()
cleanup_file(self.filename)
# start()
def test_start(self, m):
requires = {"uploader": ["upload_response", "upload_response_upload_url"]}
register_uris(requires, m)
uploader = Uploader(self.requester, "upload_response", self.file)
result = uploader.start()
self.assertTrue(result[0])
self.assertIsInstance(result[1], dict)
self.assertIn("url", result[1])
def test_start_path(self, m):
requires = {"uploader": ["upload_response", "upload_response_upload_url"]}
register_uris(requires, m)
uploader = Uploader(self.requester, "upload_response", self.filename)
result = uploader.start()
self.assertTrue(result[0])
self.assertIsInstance(result[1], dict)
self.assertIn("url", result[1])
def test_start_file_does_not_exist(self, m):
with self.assertRaises(IOError):
Uploader(self.requester, "upload_response", "test_file_not_real.xyz")
# upload()
def test_upload_no_upload_url(self, m):
register_uris({"uploader": ["upload_response_no_upload_url"]}, m)
with self.assertRaises(ValueError):
Uploader(
self.requester, "upload_response_no_upload_url", self.filename
).start()
def test_upload_no_upload_params(self, m):
register_uris({"uploader": ["upload_response_no_upload_params"]}, m)
with self.assertRaises(ValueError):
Uploader(
self.requester, "upload_response_no_upload_params", self.filename
).start()
def test_upload_fail(self, m):
requires = {"uploader": ["upload_fail", "upload_response_fail"]}
register_uris(requires, m)
uploader = Uploader(self.requester, "upload_response_fail", self.file)
result = uploader.start()
self.assertFalse(result[0])
self.assertIsInstance(result[1], dict)
self.assertNotIn("url", result[1])
|
''' backup db from db_production '''
from connect import connect
def backup():
''' backup all document from production to backup '''
clients = {'production': connect('production'), 'backup': connect('backup')}
dbs = {'production': clients['production']['test'], 'backup': clients['backup']['test']}
for collection in dbs['production'].collection_names():
cursor = dbs['production'][collection].find()
dbs['backup'][collection].insert_many([document for document in cursor])
print('finished backup')
if __name__ == '__main__':
backup()
|
# Generated by Django 3.2.7 on 2021-09-07 18:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='video',
old_name='author',
new_name='channelTitle',
),
migrations.RenameField(
model_name='video',
old_name='publish_datetime',
new_name='publishTime',
),
]
|
#!/usr/bin/env python
r"""Contais Ion class.
Class inherets from :py:class:`~solarwindpy.core.base.Base` and contains :py:class:`~solarwindpy.core.vector.Vector` along with
:py:class:`~solarwindpy.core.tensor.Tensor` objects.
"""
import pdb # noqa: F401
# import numpy as np
import pandas as pd
# We rely on views via DataFrame.xs to reduce memory size and do not
# `.copy(deep=True)`, so we want to make sure that this doesn't
# accidentally cause a problem.
pd.set_option("mode.chained_assignment", "raise")
try:
from . import base
from . import vector
from . import tensor
except ImportError:
import base
import vector
import tensor
class Ion(base.Base):
r"""Ion class.
Properties
----------
species, velocity, thermal_speed, number_density, mass_density, anisotropy,
temperature, pressure
Methods
-------
set_species, set_data
"""
def __init__(self, data, species):
self.set_species(species)
super(Ion, self).__init__(data)
# self.set_data(data, species)
def __eq__(self, other):
# eq = super(Ion, self).__eq__(other)
if id(self) == id(other):
return True
elif type(self) != type(other):
return False
elif self.species != other.species:
return False
else:
try:
pd.testing.assert_frame_equal(self.data, other.data)
except AssertionError:
return False
# try:
# # eq_data = self.data == other.data
# pd.testing.assert_frame_equal(self.data, other.data)
# except ValueError as e:
# # print(dir(e), flush=True)
# msg = "Can only compare identically-labeled DataFrame objects"
# if msg in str(e):
# return False
# else:
# raise e
#
# while isinstance(eq_data, pd.core.generic.NDFrame):
# eq_data = eq_data.all()
# if eq_data:
# return True
#
return True
def set_species(self, species):
assert isinstance(species, str)
if "+" in species:
raise NotImplementedError
self._species = species
def set_data(self, data):
# assert isinstance(data, pd.DataFrame)
super(Ion, self).set_data(data)
species = self.species
# TODO: Implement the following optional species xs if necessary
# based on ways ions are later created in Plasma.
if data.columns.names == ["M", "C"]:
pass
elif data.columns.names == ["M", "C", "S"]:
data = data.sort_index(axis=1)
data = data.xs(species, axis=1, level="S")
else:
msg = "Unrecognized data column names: %s" % (data.columns.names)
raise ValueError(msg)
chk = [
("n", ""),
("v", "x"),
("v", "y"),
("v", "z"),
("w", "par"),
("w", "per"),
]
assert pd.Index(chk).isin(data.columns).all()
self._data = data
@property
def species(self):
r"""Ion species.
"""
return self._species
@property
def velocity(self):
r"""Ion's velocity stored as a :py:class:`~solarwindpy.core.vector.Vector`.
"""
return vector.Vector(self.data.loc[:, "v"])
@property
def v(self):
r"""
Shortcut to :py:meth:`velocity` property.
"""
return self.velocity
@property
def thermal_speed(self):
r"""Ion's thermal speed stored as :py:class:`~solarwindpy.core.tensor.Tensor`.
"""
return tensor.Tensor(self.data.loc[:, "w"])
@property
def w(self):
r"""Shortcut to :py:meth:`thermal_speed`.
"""
return self.thermal_speed
@property
def number_density(self):
r"""Number density returned from underlying :py:meth:`~solarwindpy.core.base.Base.data` as a `pd.Series`.
"""
return self.data.loc[:, "n"]
@property
def n(self):
r"""Shortcut to :py:meth:`number_density`.
"""
return self.number_density
@property
def mass_density(self):
r"""Ion's mass density.
"""
out = self.n * self.constants.m_in_mp.loc[self.species]
out.name = "rho"
return out
@property
def rho(self):
r"""
Shortcut to :py:meth:`mass_density`.
"""
return self.mass_density
@property
def anisotropy(self):
r"""Temperature anisotropy :math:`R_T = p_\perp/p_\parallel`.
"""
exp = pd.Series({"par": -1, "per": 1})
pth = self.pth.drop("scalar", axis=1)
assert pth.shape[1] == 2
assert exp.index.equals(pth.columns)
# TODO: test `skipna=False` to ensure NaNs propagate.
ani = pth.pow(exp, axis=1, level="C").product(axis=1, skipna=False)
ani.name = "RT"
return ani
@property
def temperature(self):
r""":math:`T = \frac{m}{2 k_B} w^2`.
"""
m = self.constants.m.loc[self.species]
# TODO: Make math operations work on ThermalSpeed
w = self.w.data * self.units.w
coeff = 0.5 * m / (self.constants.kb.J * self.units.temperature)
temp = coeff * w.pow(2)
temp.name = "T"
return temp
@property
def pth(self):
r"""Thermal pressure :math:`p_\mathrm{th} = \frac{1}{2}\rho w^2`.
"""
rho = self.rho * self.units.rho
# TODO: Make math operations work on ThermalSpeed
w = self.w.multiply(self.units.w)
pth = (0.5 / self.units.pth) * w.pow(2).multiply(rho, axis=0)
pth.name = "pth"
return pth
@property
def cs(self):
r"""Species' sound speed.
"""
pth = self.pth * self.units.pth
rho = self.rho * self.units.rho
gamma = self.constants.polytropic_index["scalar"]
cs = pth.divide(rho).multiply(gamma).pow(0.5) / self.units.cs
cs.name = "cs"
return cs
@property
def specific_entropy(self):
r"""Calculate the specific entropy following [1] as
:math:`p_\mathrm{th} \rho^{-\gamma}`
where :math:`gamma=5/3`, :math:`p_\mathrm{th}` is the thermal presure,
and :math:`rho` is the mass density.
References
----------
[1] Siscoe, G. L. (1983). Solar System Magnetohydrodynamics (pp.
11–100). https://doi.org/10.1007/978-94-009-7194-3_2
"""
comp = "scalar"
gamma = self.constants.polytropic_index.loc[comp]
pth = self.pth.loc[:, comp] * self.units.pth
rho = self.rho * self.units.rho
out = pth.multiply(rho.pow(-gamma)) / self.units.specific_entropy
out.name = "S"
return out
@property
def S(self):
r"""Shortuct to :py:meth:`~specific_entropy`.
"""
return self.specific_entropy
|
#!/usr/bin/env python
#################################################################################
## Program: BRAINS (Brain Research: Analysis of Images, Networks, and Systems)
## Language: Python
##
## Author: David Welch
##
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
##
#################################################################################
# """Import necessary modules from nipype."""
# from nipype.utils.config import config
# config.set('logging', 'log_to_file', 'false')
# config.set_log_dir(os.getcwd())
# --config.set('logging', 'workflow_level', 'DEBUG')
# --config.set('logging', 'interface_level', 'DEBUG')
# --config.set('execution','remove_unnecessary_outputs','true')
"""
segmentation.py
============================
Description:
The purpose of this is to..
Author:
David Welch
Usage:
"""
from builtins import str
from nipype.utils.misc import package_check
# package_check('nipype', '5.4', 'tutorial1') ## HACK: Check nipype version
package_check("numpy", "1.3", "tutorial1")
package_check("scipy", "0.7", "tutorial1")
package_check("networkx", "1.0", "tutorial1")
package_check("IPython", "0.10", "tutorial1")
from utilities.misc import common_ants_registration_settings
def segmentation(
projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=""
):
"""
This function...
:param projectid:
:param subjectid:
:param sessionid:
:param master_config:
:param onlyT1:
:param pipeline_name:
:return:
"""
import os.path
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
from nipype.interfaces import ants
from nipype.interfaces.utility import IdentityInterface, Function, Merge
# Set universal pipeline options
from nipype import config
config.update_config(master_config)
from PipeLineFunctionHelpers import clip_t1_image_with_brain_mask
from .WorkupT1T2BRAINSCut import create_brains_cut_workflow
from utilities.distributed import modify_qsub_args
from nipype.interfaces.semtools import BRAINSSnapShotWriter
# CLUSTER_QUEUE=master_config['queue']
CLUSTER_QUEUE_LONG = master_config["long_q"]
baw200 = pe.Workflow(name=pipeline_name)
# HACK: print for debugging
for key, itme in list(master_config.items()):
print(("-" * 30))
print((key, ":", itme))
print(("-" * 30))
# END HACK
inputsSpec = pe.Node(
interface=IdentityInterface(
fields=[
"t1_average",
"t2_average",
"template_t1",
"hncma_atlas",
"LMIatlasToSubject_tx",
"inputLabels",
"inputHeadLabels",
"posteriorImages",
"UpdatedPosteriorsList",
"atlasToSubjectRegistrationState",
"rho",
"phi",
"theta",
"l_caudate_ProbabilityMap",
"r_caudate_ProbabilityMap",
"l_hippocampus_ProbabilityMap",
"r_hippocampus_ProbabilityMap",
"l_putamen_ProbabilityMap",
"r_putamen_ProbabilityMap",
"l_thalamus_ProbabilityMap",
"r_thalamus_ProbabilityMap",
"l_accumben_ProbabilityMap",
"r_accumben_ProbabilityMap",
"l_globus_ProbabilityMap",
"r_globus_ProbabilityMap",
"trainModelFile_txtD0060NT0060_gz",
]
),
run_without_submitting=True,
name="inputspec",
)
# outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
# run_without_submitting=True, name='outputspec')
currentClipT1ImageWithBrainMaskName = (
"ClipT1ImageWithBrainMask_" + str(subjectid) + "_" + str(sessionid)
)
ClipT1ImageWithBrainMaskNode = pe.Node(
interface=Function(
function=clip_t1_image_with_brain_mask,
input_names=["t1_image", "brain_labels", "clipped_file_name"],
output_names=["clipped_file"],
),
name=currentClipT1ImageWithBrainMaskName,
)
ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = (
"clipped_from_BABC_labels_t1.nii.gz"
)
baw200.connect(
[
(
inputsSpec,
ClipT1ImageWithBrainMaskNode,
[("t1_average", "t1_image"), ("inputLabels", "brain_labels")],
)
]
)
currentA2SantsRegistrationPostABCSyN = (
"A2SantsRegistrationPostABCSyN_" + str(subjectid) + "_" + str(sessionid)
)
## INFO: It would be great to update the BRAINSABC atlasToSubjectTransform at this point, but
## That requires more testing, and fixes to ANTS to properly collapse transforms.
## For now we are simply creating a dummy node to pass through
A2SantsRegistrationPostABCSyN = pe.Node(
interface=ants.Registration(), name=currentA2SantsRegistrationPostABCSyN
)
many_cpu_ANTsSyN_options_dictionary = {
"qsub_args": modify_qsub_args(CLUSTER_QUEUE_LONG, 8, 8, 16),
"overwrite": True,
}
A2SantsRegistrationPostABCSyN.plugin_args = many_cpu_ANTsSyN_options_dictionary
common_ants_registration_settings(
antsRegistrationNode=A2SantsRegistrationPostABCSyN,
registrationTypeDescription="A2SantsRegistrationPostABCSyN",
output_transform_prefix="AtlasToSubjectPostBABC_SyN",
output_warped_image="atlas2subjectPostBABC.nii.gz",
output_inverse_warped_image="subject2atlasPostBABC.nii.gz",
save_state="SavedInternalSyNStatePostBABC.h5",
invert_initial_moving_transform=False,
initial_moving_transform=None,
)
## INFO: Try multi-modal registration here
baw200.connect(
[
(
inputsSpec,
A2SantsRegistrationPostABCSyN,
[
("atlasToSubjectRegistrationState", "restore_state"),
("t1_average", "fixed_image"),
("template_t1", "moving_image"),
],
)
]
)
myLocalSegWF = create_brains_cut_workflow(
projectid,
subjectid,
sessionid,
master_config["queue"],
master_config["long_q"],
"Segmentation",
onlyT1,
)
MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(sessionid)
MergeStage2AverageImages = pe.Node(
interface=Merge(2),
run_without_submitting=True,
name=MergeStage2AverageImagesName,
)
baw200.connect(
[
(
inputsSpec,
myLocalSegWF,
[
("t1_average", "inputspec.T1Volume"),
("template_t1", "inputspec.template_t1"),
("posteriorImages", "inputspec.posteriorDictionary"),
("inputLabels", "inputspec.RegistrationROI"),
],
),
(inputsSpec, MergeStage2AverageImages, [("t1_average", "in1")]),
(
A2SantsRegistrationPostABCSyN,
myLocalSegWF,
[("composite_transform", "inputspec.atlasToSubjectTransform")],
),
]
)
baw200.connect(
[
(
inputsSpec,
myLocalSegWF,
[
("rho", "inputspec.rho"),
("phi", "inputspec.phi"),
("theta", "inputspec.theta"),
("l_caudate_ProbabilityMap", "inputspec.l_caudate_ProbabilityMap"),
("r_caudate_ProbabilityMap", "inputspec.r_caudate_ProbabilityMap"),
(
"l_hippocampus_ProbabilityMap",
"inputspec.l_hippocampus_ProbabilityMap",
),
(
"r_hippocampus_ProbabilityMap",
"inputspec.r_hippocampus_ProbabilityMap",
),
("l_putamen_ProbabilityMap", "inputspec.l_putamen_ProbabilityMap"),
("r_putamen_ProbabilityMap", "inputspec.r_putamen_ProbabilityMap"),
(
"l_thalamus_ProbabilityMap",
"inputspec.l_thalamus_ProbabilityMap",
),
(
"r_thalamus_ProbabilityMap",
"inputspec.r_thalamus_ProbabilityMap",
),
(
"l_accumben_ProbabilityMap",
"inputspec.l_accumben_ProbabilityMap",
),
(
"r_accumben_ProbabilityMap",
"inputspec.r_accumben_ProbabilityMap",
),
("l_globus_ProbabilityMap", "inputspec.l_globus_ProbabilityMap"),
("r_globus_ProbabilityMap", "inputspec.r_globus_ProbabilityMap"),
(
"trainModelFile_txtD0060NT0060_gz",
"inputspec.trainModelFile_txtD0060NT0060_gz",
),
],
)
]
)
if not onlyT1:
baw200.connect(
[
(inputsSpec, myLocalSegWF, [("t2_average", "inputspec.T2Volume")]),
(inputsSpec, MergeStage2AverageImages, [("t2_average", "in2")]),
]
)
file_count = 15 # Count of files to merge into MergeSessionSubjectToAtlas
else:
file_count = 14 # Count of files to merge into MergeSessionSubjectToAtlas
## NOTE: Element 0 of AccumulatePriorsList is the accumulated GM tissue
# baw200.connect([(AccumulateLikeTissuePosteriorsNode, myLocalSegWF,
# [(('AccumulatePriorsList', get_list_index, 0), "inputspec.TotalGM")]),
# ])
### Now define where the final organized outputs should go.
DataSink = pe.Node(
nio.DataSink(),
name="CleanedDenoisedSegmentation_DS_" + str(subjectid) + "_" + str(sessionid),
)
DataSink.overwrite = master_config["ds_overwrite"]
DataSink.inputs.base_directory = master_config["resultdir"]
# DataSink.inputs.regexp_substitutions = generate_output_patern(projectid, subjectid, sessionid,'BRAINSCut')
# DataSink.inputs.regexp_substitutions = GenerateBRAINSCutImagesOutputPattern(projectid, subjectid, sessionid)
DataSink.inputs.substitutions = [
(
"Segmentations",
os.path.join(
projectid, subjectid, sessionid, "CleanedDenoisedRFSegmentations"
),
),
("subjectANNLabel_", ""),
("ANNContinuousPrediction", ""),
("subject.nii.gz", ".nii.gz"),
("_seg.nii.gz", "_seg.nii.gz"),
(".nii.gz", "_seg.nii.gz"),
("_seg_seg", "_seg"),
]
baw200.connect(
[
(
myLocalSegWF,
DataSink,
[
(
"outputspec.outputBinaryLeftCaudate",
"Segmentations.@LeftCaudate",
),
(
"outputspec.outputBinaryRightCaudate",
"Segmentations.@RightCaudate",
),
(
"outputspec.outputBinaryLeftHippocampus",
"Segmentations.@LeftHippocampus",
),
(
"outputspec.outputBinaryRightHippocampus",
"Segmentations.@RightHippocampus",
),
(
"outputspec.outputBinaryLeftPutamen",
"Segmentations.@LeftPutamen",
),
(
"outputspec.outputBinaryRightPutamen",
"Segmentations.@RightPutamen",
),
(
"outputspec.outputBinaryLeftThalamus",
"Segmentations.@LeftThalamus",
),
(
"outputspec.outputBinaryRightThalamus",
"Segmentations.@RightThalamus",
),
(
"outputspec.outputBinaryLeftAccumben",
"Segmentations.@LeftAccumben",
),
(
"outputspec.outputBinaryRightAccumben",
"Segmentations.@RightAccumben",
),
("outputspec.outputBinaryLeftGlobus", "Segmentations.@LeftGlobus"),
(
"outputspec.outputBinaryRightGlobus",
"Segmentations.@RightGlobus",
),
(
"outputspec.outputLabelImageName",
"Segmentations.@LabelImageName",
),
("outputspec.outputCSVFileName", "Segmentations.@CSVFileName"),
],
),
# (myLocalSegWF, DataSink, [('outputspec.cleaned_labels', 'Segmentations.@cleaned_labels')])
]
)
MergeStage2BinaryVolumesName = "99_MergeStage2BinaryVolumes_" + str(sessionid)
MergeStage2BinaryVolumes = pe.Node(
interface=Merge(12),
run_without_submitting=True,
name=MergeStage2BinaryVolumesName,
)
baw200.connect(
[
(
myLocalSegWF,
MergeStage2BinaryVolumes,
[
("outputspec.outputBinaryLeftAccumben", "in1"),
("outputspec.outputBinaryLeftCaudate", "in2"),
("outputspec.outputBinaryLeftPutamen", "in3"),
("outputspec.outputBinaryLeftGlobus", "in4"),
("outputspec.outputBinaryLeftThalamus", "in5"),
("outputspec.outputBinaryLeftHippocampus", "in6"),
("outputspec.outputBinaryRightAccumben", "in7"),
("outputspec.outputBinaryRightCaudate", "in8"),
("outputspec.outputBinaryRightPutamen", "in9"),
("outputspec.outputBinaryRightGlobus", "in10"),
("outputspec.outputBinaryRightThalamus", "in11"),
("outputspec.outputBinaryRightHippocampus", "in12"),
],
)
]
)
## SnapShotWriter for Segmented result checking:
SnapShotWriterNodeName = "SnapShotWriter_" + str(sessionid)
SnapShotWriter = pe.Node(
interface=BRAINSSnapShotWriter(), name=SnapShotWriterNodeName
)
SnapShotWriter.inputs.outputFilename = (
"snapShot" + str(sessionid) + ".png"
) # output specification
SnapShotWriter.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
SnapShotWriter.inputs.inputSliceToExtractInPhysicalPoint = [
-3,
-7,
-3,
5,
7,
22,
-22,
]
baw200.connect(
[
(MergeStage2AverageImages, SnapShotWriter, [("out", "inputVolumes")]),
(MergeStage2BinaryVolumes, SnapShotWriter, [("out", "inputBinaryVolumes")]),
(
SnapShotWriter,
DataSink,
[("outputFilename", "Segmentations.@outputSnapShot")],
),
]
)
# currentAntsLabelWarpToSubject = 'AntsLabelWarpToSubject' + str(subjectid) + "_" + str(sessionid)
# AntsLabelWarpToSubject = pe.Node(interface=ants.ApplyTransforms(), name=currentAntsLabelWarpToSubject)
#
# AntsLabelWarpToSubject.inputs.num_threads = -1
# AntsLabelWarpToSubject.inputs.dimension = 3
# AntsLabelWarpToSubject.inputs.output_image = 'warped_hncma_atlas_seg.nii.gz'
# AntsLabelWarpToSubject.inputs.interpolation = "MultiLabel"
#
# baw200.connect([(A2SantsRegistrationPostABCSyN, AntsLabelWarpToSubject, [('composite_transform', 'transforms')]),
# (inputsSpec, AntsLabelWarpToSubject, [('t1_average', 'reference_image'),
# ('hncma_atlas', 'input_image')])
# ])
# #####
# ### Now define where the final organized outputs should go.
# AntsLabelWarpedToSubject_DSName = "AntsLabelWarpedToSubject_DS_" + str(sessionid)
# AntsLabelWarpedToSubject_DS = pe.Node(nio.DataSink(), name=AntsLabelWarpedToSubject_DSName)
# AntsLabelWarpedToSubject_DS.overwrite = master_config['ds_overwrite']
# AntsLabelWarpedToSubject_DS.inputs.base_directory = master_config['resultdir']
# AntsLabelWarpedToSubject_DS.inputs.substitutions = [('AntsLabelWarpedToSubject', os.path.join(projectid, subjectid, sessionid, 'AntsLabelWarpedToSubject'))]
#
# baw200.connect([(AntsLabelWarpToSubject, AntsLabelWarpedToSubject_DS, [('output_image', 'AntsLabelWarpedToSubject')])])
MergeSessionSubjectToAtlasName = "99_MergeSessionSubjectToAtlas_" + str(sessionid)
MergeSessionSubjectToAtlas = pe.Node(
interface=Merge(file_count),
run_without_submitting=True,
name=MergeSessionSubjectToAtlasName,
)
baw200.connect(
[
(
myLocalSegWF,
MergeSessionSubjectToAtlas,
[
("outputspec.outputBinaryLeftAccumben", "in1"),
("outputspec.outputBinaryLeftCaudate", "in2"),
("outputspec.outputBinaryLeftPutamen", "in3"),
("outputspec.outputBinaryLeftGlobus", "in4"),
("outputspec.outputBinaryLeftThalamus", "in5"),
("outputspec.outputBinaryLeftHippocampus", "in6"),
("outputspec.outputBinaryRightAccumben", "in7"),
("outputspec.outputBinaryRightCaudate", "in8"),
("outputspec.outputBinaryRightPutamen", "in9"),
("outputspec.outputBinaryRightGlobus", "in10"),
("outputspec.outputBinaryRightThalamus", "in11"),
("outputspec.outputBinaryRightHippocampus", "in12"),
],
),
# (FixWMPartitioningNode, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
(
inputsSpec,
MergeSessionSubjectToAtlas,
[("UpdatedPosteriorsList", "in13")],
),
(inputsSpec, MergeSessionSubjectToAtlas, [("t1_average", "in14")]),
]
)
if not onlyT1:
assert file_count == 15
baw200.connect(
[(inputsSpec, MergeSessionSubjectToAtlas, [("t2_average", "in15")])]
)
LinearSubjectToAtlasANTsApplyTransformsName = (
"LinearSubjectToAtlasANTsApplyTransforms_" + str(sessionid)
)
LinearSubjectToAtlasANTsApplyTransforms = pe.MapNode(
interface=ants.ApplyTransforms(),
iterfield=["input_image"],
name=LinearSubjectToAtlasANTsApplyTransformsName,
)
LinearSubjectToAtlasANTsApplyTransforms.inputs.num_threads = -1
LinearSubjectToAtlasANTsApplyTransforms.inputs.interpolation = "Linear"
baw200.connect(
[
(
A2SantsRegistrationPostABCSyN,
LinearSubjectToAtlasANTsApplyTransforms,
[("inverse_composite_transform", "transforms")],
),
(
inputsSpec,
LinearSubjectToAtlasANTsApplyTransforms,
[("template_t1", "reference_image")],
),
(
MergeSessionSubjectToAtlas,
LinearSubjectToAtlasANTsApplyTransforms,
[("out", "input_image")],
),
]
)
MergeMultiLabelSessionSubjectToAtlasName = (
"99_MergeMultiLabelSessionSubjectToAtlas_" + str(sessionid)
)
MergeMultiLabelSessionSubjectToAtlas = pe.Node(
interface=Merge(2),
run_without_submitting=True,
name=MergeMultiLabelSessionSubjectToAtlasName,
)
baw200.connect(
[
(
inputsSpec,
MergeMultiLabelSessionSubjectToAtlas,
[("inputLabels", "in1"), ("inputHeadLabels", "in2")],
)
]
)
### This is taking this sessions RF label map back into NAC atlas space.
# {
MultiLabelSubjectToAtlasANTsApplyTransformsName = (
"MultiLabelSubjectToAtlasANTsApplyTransforms_" + str(sessionid) + "_map"
)
MultiLabelSubjectToAtlasANTsApplyTransforms = pe.MapNode(
interface=ants.ApplyTransforms(),
iterfield=["input_image"],
name=MultiLabelSubjectToAtlasANTsApplyTransformsName,
)
MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.num_threads = -1
MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.interpolation = "MultiLabel"
baw200.connect(
[
(
A2SantsRegistrationPostABCSyN,
MultiLabelSubjectToAtlasANTsApplyTransforms,
[("inverse_composite_transform", "transforms")],
),
(
inputsSpec,
MultiLabelSubjectToAtlasANTsApplyTransforms,
[("template_t1", "reference_image")],
),
(
MergeMultiLabelSessionSubjectToAtlas,
MultiLabelSubjectToAtlasANTsApplyTransforms,
[("out", "input_image")],
),
]
)
# }
### Now we must take the sessions to THIS SUBJECTS personalized atlas.
# {
# }
### Now define where the final organized outputs should go.
Subj2Atlas_DSName = "SubjectToAtlas_DS_" + str(sessionid)
Subj2Atlas_DS = pe.Node(nio.DataSink(), name=Subj2Atlas_DSName)
Subj2Atlas_DS.overwrite = master_config["ds_overwrite"]
Subj2Atlas_DS.inputs.base_directory = master_config["resultdir"]
Subj2Atlas_DS.inputs.regexp_substitutions = [
(r"_LinearSubjectToAtlasANTsApplyTransforms_[^/]*", r"" + sessionid + "/")
]
baw200.connect(
[
(
LinearSubjectToAtlasANTsApplyTransforms,
Subj2Atlas_DS,
[("output_image", "SubjectToAtlasWarped.@linear_output_images")],
)
]
)
Subj2AtlasTransforms_DSName = "SubjectToAtlasTransforms_DS_" + str(sessionid)
Subj2AtlasTransforms_DS = pe.Node(nio.DataSink(), name=Subj2AtlasTransforms_DSName)
Subj2AtlasTransforms_DS.overwrite = master_config["ds_overwrite"]
Subj2AtlasTransforms_DS.inputs.base_directory = master_config["resultdir"]
Subj2AtlasTransforms_DS.inputs.regexp_substitutions = [
(r"SubjectToAtlasWarped", r"SubjectToAtlasWarped/" + sessionid + "/")
]
baw200.connect(
[
(
A2SantsRegistrationPostABCSyN,
Subj2AtlasTransforms_DS,
[
(
"composite_transform",
"SubjectToAtlasWarped.@composite_transform",
),
(
"inverse_composite_transform",
"SubjectToAtlasWarped.@inverse_composite_transform",
),
],
)
]
)
# baw200.connect([(MultiLabelSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [('output_image', 'SubjectToAtlasWarped.@multilabel_output_images')])])
if master_config["plugin_name"].startswith(
"SGE"
): # for some nodes, the qsub call needs to be modified on the cluster
A2SantsRegistrationPostABCSyN.plugin_args = {
"template": master_config["plugin_args"]["template"],
"overwrite": True,
"qsub_args": modify_qsub_args(master_config["queue"], 8, 8, 24),
}
SnapShotWriter.plugin_args = {
"template": master_config["plugin_args"]["template"],
"overwrite": True,
"qsub_args": modify_qsub_args(master_config["queue"], 1, 1, 1),
}
LinearSubjectToAtlasANTsApplyTransforms.plugin_args = {
"template": master_config["plugin_args"]["template"],
"overwrite": True,
"qsub_args": modify_qsub_args(master_config["queue"], 1, 1, 1),
}
MultiLabelSubjectToAtlasANTsApplyTransforms.plugin_args = {
"template": master_config["plugin_args"]["template"],
"overwrite": True,
"qsub_args": modify_qsub_args(master_config["queue"], 1, 1, 1),
}
return baw200
|
"""1725. Number Of Rectangles That Can Form The Largest Square
https://leetcode.com/problems/number-of-rectangles-that-can-form-the-largest-square/
"""
from typing import List
def count_good_rectangles(rectangles: List[List[int]]) -> int:
k = min(rectangles[0])
for l, w in rectangles:
k = max(k, min(l, w))
ans = 0
for l, w in rectangles:
if l >= k and w >= k:
ans += 1
return ans
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'messenger.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Messenger(object):
def setupUi(self, Messenger):
Messenger.setObjectName("Messenger")
Messenger.resize(369, 481)
self.centralwidget = QtWidgets.QWidget(Messenger)
self.centralwidget.setObjectName("centralwidget")
self.sendButton = QtWidgets.QPushButton(self.centralwidget)
self.sendButton.setGeometry(QtCore.QRect(300, 410, 51, 32))
self.sendButton.setObjectName("sendButton")
self.textInput = QtWidgets.QTextEdit(self.centralwidget)
self.textInput.setGeometry(QtCore.QRect(20, 410, 271, 31))
self.textInput.setObjectName("textInput")
self.messagesBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.messagesBrowser.setGeometry(QtCore.QRect(20, 100, 331, 291))
self.messagesBrowser.setObjectName("messagesBrowser")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(100, 17, 171, 31))
font = QtGui.QFont()
font.setPointSize(20)
self.label.setFont(font)
self.label.setObjectName("label")
self.nameInput = QtWidgets.QLineEdit(self.centralwidget)
self.nameInput.setGeometry(QtCore.QRect(238, 60, 113, 21))
self.nameInput.setObjectName("nameInput")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(203, 63, 31, 16))
self.label_2.setObjectName("label_2")
Messenger.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(Messenger)
self.statusbar.setObjectName("statusbar")
Messenger.setStatusBar(self.statusbar)
self.retranslateUi(Messenger)
QtCore.QMetaObject.connectSlotsByName(Messenger)
def retranslateUi(self, Messenger):
_translate = QtCore.QCoreApplication.translate
Messenger.setWindowTitle(_translate("Messenger", "Messenger"))
self.sendButton.setText(_translate("Messenger", ">"))
self.textInput.setPlaceholderText(_translate("Messenger", "Введите текст..."))
self.label.setText(_translate("Messenger", "Skillbox Messenger"))
self.nameInput.setPlaceholderText(_translate("Messenger", "Введите имя..."))
self.label_2.setText(_translate("Messenger", "Имя:"))
|
"""Implementation of Fowler's Money pattern"""
from __future__ import annotations
from math import floor
from decimal import Decimal, ROUND_HALF_EVEN
from re import sub
import operator
from babel.numbers import format_currency
class Money:
"""
Money class that implements Fowler's Money pattern:
https://martinfowler.com/eaaCatalog/money.html
"""
def __init__(self, amount: int):
self.__assert_amount(amount)
self.__amount = amount
self.__currency = "USD"
def instance(self, amount: int) -> Money:
"""
Return new money object using the given amount
"""
self.__assert_amount(amount)
return self.__class__(amount)
def __str__(self):
return format_currency(
float(self.amount / 100),
self.__currency,
format=None,
locale="en_US",
currency_digits=True,
format_type="standard")
@staticmethod
def from_float(amount: float) -> Money:
"""
Return new money object instantiated from a float value
"""
if not isinstance(amount, float):
raise ValueError("Amount must be a float")
return Money(floor(amount * 100))
@staticmethod
def from_string(currency_str: str) -> Money:
"""
Return new money object instantiated from a string currency value
"""
if not isinstance(currency_str, str):
raise ValueError("Amount must be a string")
value = Decimal(sub(r'[^\d.]', '', currency_str))
return Money.from_float(float(value))
@staticmethod
def __assert_amount(amount):
"""
Assert that given amount is an integer
"""
if not isinstance(amount, int):
raise ValueError("Amount must be an integer")
@staticmethod
def __assert_operand(operand):
"""
Assert that given operand is a numeric type
"""
if not isinstance(operand, (int, float)):
raise ValueError("Operand must be a numeric value")
@property
def amount(self) -> int:
"""
Return money amount
"""
return self.__amount
def __add__(self, other) -> Money:
"""
Return a new money object that amounts to
sum of this object and given money object
"""
if isinstance(other, Money):
return self.__class__(self.amount + other.amount)
self.__assert_amount(other)
return self.__class__(self.amount + other)
def __radd__(self, other):
"""
Return a new money object that amounts to
sum of this object and given money object
"""
return self.__add__(other)
def __sub__(self, other) -> Money:
"""
Return a new money object that amounts to
difference of this object and given money object
"""
if isinstance(other, Money):
return self.__class__(self.amount - other.amount)
self.__assert_amount(other)
return self.__class__(self.amount - other)
def __rsub__(self, other):
"""
Return a new money object that amounts to
difference of this object and given money object
"""
return (-self).__add__(other)
def __mul__(self, factor: (int, float)) -> Money:
"""
Return a new money object that amounts to
product of this object and given money object
"""
self.__assert_operand(factor)
return self.__class__(round(self.amount * factor))
def __rmul__(self, factor) -> Money:
"""
Return a new money object that amounts to
product of this object and given money object
"""
return self.__mul__(factor)
def __truediv__(self, other) -> Money:
"""
Return a new money object that amounts to
quotient of this object and given money object
"""
if isinstance(other, Money):
if other.amount == 0:
raise ZeroDivisionError()
return round(self.amount / other.amount)
self.__assert_operand(other)
if other == 0:
raise ZeroDivisionError()
return self.__class__(round(self.amount / other))
def __floordiv__(self, other) -> Money:
"""
Return a new money object that amounts to
quotient of this object and given money object
"""
if isinstance(other, Money):
if other.amount == 0:
raise ZeroDivisionError()
return self.amount // other.amount
self.__assert_operand(other)
if other == 0:
raise ZeroDivisionError()
return self.__class__(self.amount // other)
def __eq__(self, other) -> bool:
"""
Check if given money object value
and currency matches this object
"""
if isinstance(other, Money):
return self.amount == other.amount
self.__assert_amount(other)
return self.amount == other
def __gt__(self, other) -> bool:
"""
Check if object amount is
greater than given money amount
"""
return self.__compare(other, operator.gt)
def __ge__(self, other) -> bool:
"""
Check if object amount is greater
or if it equals to given money amount
"""
return self.__compare(other, operator.ge)
def __lt__(self, other) -> bool:
"""
Check if object amount is
less than given money amount
"""
return self.__compare(other, operator.lt)
def __le__(self, other) -> bool:
"""
Check if object amount is less or
if it equals to given money amount
"""
return self.__compare(other, operator.le)
def __compare(self, other, comparison_operator) -> bool:
"""
Compare object amount to given money
amount using the provided comparison operator
"""
if isinstance(other, Money):
return comparison_operator(self.amount, other.amount)
self.__assert_amount(other)
return comparison_operator(self.amount, other)
def __round__(self) -> Money:
"""
Return a new money object with a rounded amount
"""
decimal_value = Decimal(self.amount / 100)
quantized_value = decimal_value.quantize(exp=Decimal(1.00),
rounding=ROUND_HALF_EVEN)
rounded = int(quantized_value)
return self.__class__(rounded * 100)
def __int__(self) -> int:
"""
Return an int representation of a money object
"""
return self.amount
def __float__(self) -> float:
"""
Return a float representation of a money object
"""
return round(self.amount / 100, 2)
def __neg__(self):
"""
Return a new money object with a negative amount
"""
return self.__class__(-self.amount)
def __pos__(self):
"""
Return a new money object with a positive amount
"""
return self.__class__(+self.amount)
def __abs__(self):
"""
Return a new money object with an absolute value of the amount
"""
return self.__class__(abs(self.amount))
|
# -*- coding: utf-8 -*-
"""
migrated from TheCannon package
"""
import numpy as np
# from functools import partial
# from multiprocessing import Pool
# import matplotlib.pyplot as plt
import scipy.optimize as opt
LARGE = 200.
SMALL = 1. / LARGE
def _partial_func(func, *args, **kwargs):
""" something """
def wrap(x, *p):
return func(x, p, **kwargs)
return wrap
def gaussian_weight_matrix(wl, L):
""" Matrix of Gaussian weights
Parameters
----------
wl: numpy ndarray
pixel wavelength values
L: float
width of Gaussian
Return
------
Weight matrix
"""
return np.exp(-0.5*(wl[:,None]-wl[None,:])**2/L**2)
def _sinusoid(x, p, L, y):
""" Return the sinusoid cont func evaluated at input x for the continuum.
Parameters
----------
x: float or np.array
data, input to function
p: ndarray
coefficients of fitting function
L: float
width of x data
y: float or np.array
output data corresponding to input x
Returns
-------
func: float
function evaluated for the input x
"""
N = int(len(p)/2)
n = np.linspace(0, N, N+1)
k = n*np.pi/L
func = 0
for n in range(0, N):
func += p[2*n]*np.sin(k[n]*x)+p[2*n+1]*np.cos(k[n]*x)
return func
def _weighted_median(values, weights, quantile):
""" Calculate a weighted median for values above a particular quantile cut
Used in pseudo continuum normalization
Parameters
----------
values: np ndarray of floats
the values to take the median of
weights: np ndarray of floats
the weights associated with the values
quantile: float
the cut applied to the input data
Returns
------
the weighted median
"""
sindx = np.argsort(values)
cvalues = 1. * np.cumsum(weights[sindx])
cvalues = cvalues / cvalues[-1]
foo = sindx[cvalues > quantile]
if len(foo) == 0:
return values[0]
indx = foo[0]
return values[indx]
def _find_cont_gaussian_smooth(wl, fluxes, ivars, w):
""" Returns the weighted mean block of spectra
Parameters
----------
wl: numpy ndarray
wavelength vector
flux: numpy ndarray
block of flux values
ivar: numpy ndarray
block of ivar values
L: float
width of Gaussian used to assign weights
Returns
-------
smoothed_fluxes: numpy ndarray
block of smoothed flux values, mean spectra
"""
print("Finding the continuum")
bot = np.dot(ivars, w.T)
top = np.dot(fluxes*ivars, w.T)
bad = bot == 0
cont = np.zeros(top.shape)
cont[~bad] = top[~bad] / bot[~bad]
return cont
# why dataset?
def _cont_norm_gaussian_smooth(dataset, L):
""" Continuum normalize by dividing by a Gaussian-weighted smoothed spectrum
Parameters
----------
dataset: Dataset
the dataset to continuum normalize
L: float
the width of the Gaussian used for weighting
Returns
-------
dataset: Dataset
updated dataset
"""
print("Gaussian smoothing the entire dataset...")
w = gaussian_weight_matrix(dataset.wl, L)
print("Gaussian smoothing the training set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.tr_flux, dataset.tr_ivar, w)
norm_tr_flux, norm_tr_ivar = _cont_norm(
dataset.tr_flux, dataset.tr_ivar, cont)
print("Gaussian smoothing the test set")
cont = _find_cont_gaussian_smooth(
dataset.wl, dataset.test_flux, dataset.test_ivar, w)
norm_test_flux, norm_test_ivar = _cont_norm(
dataset.test_flux, dataset.test_ivar, cont)
return norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar
def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc):
""" Fit a continuum to a continuum pixels in a segment of spectra
Functional form can be either sinusoid or chebyshev, with specified degree
Parameters
----------
fluxes: numpy ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
for jj in range(nstars):
flux = fluxes[jj,:]
ivar = ivars[jj,:]
pix = np.arange(0, npixels)
y = flux[contmask]
x = pix[contmask]
yivar = ivar[contmask]
yivar[yivar == 0] = SMALL**2
if ffunc=="sinusoid":
p0 = np.ones(deg*2) # one for cos, one for sin
L = max(x)-min(x)
pcont_func = _partial_func(_sinusoid, L=L, y=flux)
popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0,
sigma=1./np.sqrt(yivar))
elif ffunc=="chebyshev":
fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg)
for element in pix:
if ffunc=="sinusoid":
cont[jj,element] = _sinusoid(element, popt, L=L, y=flux)
elif ffunc=="chebyshev":
cont[jj,element] = fit(element)
return cont
def _find_cont_fitfunc_regions(fluxes, ivars, contmask, deg, ranges, ffunc):
""" Run fit_cont, dealing with spectrum in regions or chunks
This is useful if a spectrum has gaps.
Parameters
----------
fluxes: ndarray of shape (nstars, npixels)
training set or test set pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
contmask: numpy ndarray of length (npixels)
boolean pixel mask, True indicates that pixel is continuum
deg: int
degree of fitting function
ffunc: str
type of fitting function, chebyshev or sinusoid
Returns
-------
cont: numpy ndarray of shape (nstars, npixels)
the continuum, parallel to fluxes
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
cont = np.zeros(fluxes.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
if ffunc=="chebyshev":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop], deg=deg, ffunc="chebyshev")
elif ffunc=="sinusoid":
output = _find_cont_fitfunc(fluxes[:,start:stop],
ivars[:,start:stop],
contmask[start:stop], deg=deg, ffunc="sinusoid")
cont[:,start:stop] = output
return cont
def _find_cont_running_quantile(wl, fluxes, ivars, q, delta_lambda,
verbose=False):
""" Perform continuum normalization using a running quantile
Parameters
----------
wl: numpy ndarray
wavelength vector
fluxes: numpy ndarray of shape (nstars, npixels)
pixel intensities
ivars: numpy ndarray of shape (nstars, npixels)
inverse variances, parallel to fluxes
q: float
the desired quantile cut
delta_lambda: int
the number of pixels over which the median is calculated
Output
------
norm_fluxes: numpy ndarray of shape (nstars, npixels)
normalized pixel intensities
norm_ivars: numpy ndarray of shape (nstars, npixels)
rescaled pixel invariances
"""
if verbose:
print("contnorm.py: continuum norm using running quantile")
cont = np.zeros(fluxes.shape)
nstars = fluxes.shape[0]
for jj in range(nstars):
if verbose:
print("cont_norm_q(): working on star %s" %jj)
flux = fluxes[jj,:]
ivar = ivars[jj,:]
for ll, lam in enumerate(wl):
indx = (np.where(abs(wl-lam) < delta_lambda))[0]
flux_cut = flux[indx]
ivar_cut = ivar[indx]
cont[jj,ll] = _weighted_median(flux_cut, ivar_cut, q)
return cont
def _cont_norm_running_quantile(wl, fluxes, ivars, q, delta_lambda, verbose=False):
cont = _find_cont_running_quantile(wl, fluxes, ivars, q, delta_lambda)
norm_fluxes = np.ones(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
norm_fluxes[cont!=0] = fluxes[cont!=0] / cont[cont!=0]
norm_ivars = cont**2 * ivars
return norm_fluxes, norm_ivars
def _cont_norm_running_quantile_regions(wl, fluxes, ivars, q, delta_lambda, ranges, verbose=False):
""" Perform continuum normalization using running quantile, for spectrum
that comes in chunks
"""
# print("contnorm.py: continuum norm using running quantile")
if verbose:
print("Taking spectra in %s chunks" %len(ranges))
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm_running_quantile(
wl[start:stop], fluxes[:,start:stop],
ivars[:,start:stop], q, delta_lambda)
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
return norm_fluxes, norm_ivars
def _cont_norm(fluxes, ivars, cont):
""" Continuum-normalize a continuous segment of spectra.
Parameters
----------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
contmask: boolean mask
True indicates that pixel is continuum
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances
"""
nstars = fluxes.shape[0]
npixels = fluxes.shape[1]
norm_fluxes = np.ones(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
bad = cont == 0.
norm_fluxes = np.ones(fluxes.shape)
norm_fluxes[~bad] = fluxes[~bad] / cont[~bad]
norm_ivars = cont**2 * ivars
return norm_fluxes, norm_ivars
def _cont_norm_regions(fluxes, ivars, cont, ranges):
""" Perform continuum normalization for spectra in chunks
Useful for spectra that have gaps
Parameters
---------
fluxes: numpy ndarray
pixel intensities
ivars: numpy ndarray
inverse variances, parallel to fluxes
cont: numpy ndarray
the continuum
ranges: list or np ndarray
the chunks that the spectrum should be split into
Returns
-------
norm_fluxes: numpy ndarray
normalized pixel intensities
norm_ivars: numpy ndarray
rescaled inverse variances
"""
nstars = fluxes.shape[0]
norm_fluxes = np.zeros(fluxes.shape)
norm_ivars = np.zeros(ivars.shape)
for chunk in ranges:
start = chunk[0]
stop = chunk[1]
output = _cont_norm(fluxes[:,start:stop],
ivars[:,start:stop],
cont[:,start:stop])
norm_fluxes[:,start:stop] = output[0]
norm_ivars[:,start:stop] = output[1]
for jj in range(nstars):
bad = (norm_ivars[jj,:] == 0.)
norm_fluxes[jj,:][bad] = 1.
return norm_fluxes, norm_ivars
# ############################
# here below goes my functions
# ############################
# def _cont_norm_running_quantile(wave, flux)
|
#from core.classifier import CustomizedClassifier,isbad #classifier class
from core import xyz #coorddb class (holds the data)
import math
#sklearn module - does the classification
import core.classifier as classifier
from core.classifier import *
import traceback
from sklearn import semi_supervised
class SpectralClassifier(classifier.CustomizedClassifier):
'''
Parameters: cltype="LabelSpreading", x=data("RAW_HEIGHTS"), y=exceeds("SCORE", 0.5), unlabelled_files = None, result = "SPECTRAL_PROB", **args
where
cltype is a classifier in sklearn.semi_supervised (i.e. LabelSpreading or LabelPropagation)
** args are as in sklearn (e.g. kernel='knn', n_neighbors=3)
'''
def __init__(self, cltype="LabelSpreading", x=data("RAW_HEIGHTS"), y=exceeds("SCORE", 0.5), unlabelled_files = None, result = "SPECTRAL_PROB", y_is_1d=True, predict_prob = False, preprocess_fun = None, **args):
f = getattr(semi_supervised, cltype)
clf = f(**args)
self.unlabelled_files = unlabelled_files if unlabelled_files is not None else []
#clf.predict = clf.predict_proba #we do not want to get just True or False, but a number in between
classifier.CustomizedClassifier.__init__(self, x, y, result, clf, y_is_1d, predict_prob, preprocess_fun)
def train_files(self, files):
data = self.load_data(files, True)
udata = self.load_data(self.unlabelled_files, False)
self.train(data, udata)
#could create intermediate SemisupervisedClassifier
def train(self, db, udb):
#print db.data["Y"][:10]
y = self.y_to_1d(db.data["Y"]) + [-1] * udb.size
positive = len([yy for yy in y if yy > 0])
self.show_info("Train total:"+ str(len(y))+"\n")
self.show_info("Train positive:"+ str(positive)+"\n")
self.show_info("Train unlabeled:"+ str(udb.size)+"\n")
try:
self.clf.fit(db.data["X"] + udb.data["X"], y)
except MemoryError:
self.show_info("Out of memory.")
raise
except:
self.show_info(traceback.format_exc())
print ('''Saving X to X.pickle; python -ic "import pickle; X = pickle.load(open('X.pickle','rb'))" to find problem''')
f = open("X.pickle", "wb")
pickle.dump(db.data["X"],f)
f.close()
raise
#todo: extract from the signature?
@classmethod
def default_args(cl):
return ""
#core.classifier.cl_items.register(KnnClassifier, "k-nearest neighbours")
|
#!/usr/bin/env python
import os
import yaml
import logging
import pymysql
from connections import Connections
__dir__ = os.path.dirname(__file__)
config = yaml.safe_load(open(os.path.join(__dir__, "../default_config.yaml")))
try:
config.update(yaml.safe_load(open(os.path.join(__dir__, "../config.yaml"))))
except IOError:
# is ok if we do not have config.yaml
pass
logging.basicConfig(
filename=config['KILLER_LOG_PATH'],
level=logging.INFO,
format='%(asctime)s pid:%(process)d %(message)s'
)
logging.info("Started killer process, with limit %s", config['QUERY_TIME_LIMIT'])
conn = Connections(config)
cur = conn.replica.cursor()
try:
cur.execute('SHOW PROCESSLIST')
queries = cur.fetchall()
logging.info("Found %s queries running", len(queries))
to_kill = [q for q in queries
if q[5] > config['QUERY_TIME_LIMIT'] and q[4] != 'Sleep']
logging.info("Found %s queries to kill", len(to_kill))
for q in to_kill:
try:
cur.execute('KILL QUERY %s', q[0])
logging.info("Killed query with thread_id:%s" % q[0])
except pymysql.InternalError as e:
if e.args[0] == 1094: # Error code for 'no such thread'
logging.info('Query with thread_id:%s dead before it could be killed')
else:
raise
finally:
logging.info("Finished killer process")
cur.close()
conn.close_all()
|
import cv2
import sys
# from PIL import Image
import numpy as np
import os
import base64
# import sys
import io
from PIL import Image
cascade_file = "haarcascade_frontalface_default.xml"
#만약 dataset이라는 폴더 못찾으면 만들도록 설정
path_data = 'dataset'
if path_data not in os.listdir():
os.mkdir(path_data)
def cropImage(path, cnt):
num = 1
files = os.listdir(path)
# path에 있는 파일들에 대해서 얼굴 인식해서 크롭 후 dataset폴더에 저장하는 과정 수행
for file in files:
img = cv2.imread(os.path.join(path, file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier(cascade_file)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cropped = img[y - int(h / 4):y + h + int(h / 4), x - int(w / 4):x + w + int(w / 4)]
cv2.imwrite("dataset/User." + str(cnt) + '.' + str(num) + ".jpg", cropped)
num += 1
def decode_img(msg):
msg = msg[msg.find(b"<plain_txt_msg:img>")+len(b"<plain_txt_msg:img>"):
msg.find(b"<!plain_txt_msg>")]
msg = base64.b64decode(msg)
buf = io.BytesIO(msg)
img = Image.open(buf)
return img
#path ='../nCube-Thyme-Nodejs-master/note.txt'
path = '/home/pi/Desktop/Documents/nCube-Thyme-Nodejs-master/note2.txt'
f = open(path,'rb')
#fileread = f.read()+b"<!plain_txt_msg>"
fileread = f.read()
print(fileread)
decoding_img = decode_img(fileread)
print(decode_img)
resize=decoding_img.resize((720,720))
resize.save('./decodinpi g2.jpg')
f.close()
cnt=1
#dataset 폴더에 있는 이미지들로 학습 수행
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(cascade_file)
# 이미지랑 label 데이터 얻기
def getImagesAndLabels(path):
imagePaths = [os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img,'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x,y,w,h) in faces:
faceSamples.append(img_numpy[y:y+h,x:x+w])
ids.append(id)
return faceSamples,ids
#print ("\n [INFO] Training faces. It will take a few seconds. Wait ...")
faces,ids = getImagesAndLabels(path_data)
recognizer.train(faces, np.array(ids))
# Save the model into trainer/trainer.yml
recognizer.write('trainer/trainer.yml') # recognizer.save() worked on Mac, but not on Pi
# Print the numer of faces trained and end program
#print("\n [INFO] {0} faces trained. Exiting Program".format(len(np.unique(ids))))
cv2.waitKey(0)
cv2.destroyAllWindows()
|
"""
Module cart3d -- Cartesian coordinates, 3 axes, double-precision.
Includes class Cart3d.
"""
import array as ar
class Cart3d():
"A 3-element tuple of double-precision values."
prefix = "Cart3d."
enumx = (0, 1, 2) # enumeration of indices
zeros = [ 0.0, 0.0, 0.0 ]
def __init__(self, *args) -> None:
acnt = len(args) # argument count
if acnt == 0:
# no args, fill with zeros
self.elems = ar.array('d', Cart3d.zeros)
elif acnt == 1:
if isinstance(args[0], list) and len(args[0]) == 3:
# list comprised of 3 floats
self.elems = ar.array('d', args[0])
elif isinstance(args[0], Cart3d):
# Cart3d
self.elems = args[0].elems
else:
raise TypeError('Cart3d __init__ unknown argument')
elif acnt == 3 and isinstance(args[0], float):
# 3 floats
self.elems = ar.array('d', args)
else:
raise TypeError('Cart3d __init__ unrecognized args count')
def __repr__(self):
return Cart3d.prefix+self.elems.__repr__()
def __add__(self, other):
# Cart3d + Cart3d
c = Cart3d()
for i in Cart3d.enumx:
c.elems[i] = self.elems[i] + other.elems[i]
return c
def __sub__(self, other) :
# Cart3d - Cart3d
c = Cart3d()
for i in Cart3d.enumx:
c.elems[i] = self.elems[i] - other.elems[i]
return c
def __mul__(self, other):
c = Cart3d()
if isinstance(other, float):
# Cart3d * float
for i in Cart3d.enumx:
c.elems[i] = self.elems[i] * other
return c
elif isinstance(other, Cart3d):
# Cart3d * Cart3d (dot product)
s = 0.0
for i in Cart3d.enumx:
s += self.elems[i] * other.elems[i]
return s
def __truediv__(self, other):
# Cart3d / float
c = Cart3d()
for i in Cart3d.enumx:
c.elems[i] = self.elems[i] / other
return c
def __iadd__(self, other):
# Cart3d += Cart3d
for i in Cart3d.enumx:
self.elems[i] += other.elems[i]
return self
def __isub__(self, other):
# Cart3d -= Cart3d
for i in Cart3d.enumx:
self.elems[i] -= other.elems[i]
return self
def __imul__(self, other):
# Cart3d *= float
c = Cart3d()
for i in Cart3d.enumx:
c.elems[i] = self.elems[i] * other
return c
def __itruediv__(self, other):
# Cart3d /= float
c = Cart3d()
for i in Cart3d.enumx:
c.elems[i] = self.elems[i] / other
return c
def fromlist(self, ll):
assert len(ll) == 3
self.elems = ar.array('d', ll)
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('skinematics', parent_package, top_path)
'''
# An example Fortran extension
config.add_extension(
'fortran_stuff',
sources=['fortran_stuff.f90']
)
'''
config.add_subpackage('tests')
return config
|
import radical.utils as _ru
import os
from prof_utils import *
_pwd = os.path.dirname (__file__)
_root = "%s/.." % _pwd
version_short, version_detail, version_base, version_branch, \
sdist_name, sdist_path = _ru.get_version([_root, _pwd])
version = version_short
|
'''
---------------------------------------------
MyDict - My version of the class Dictionary
Author: Kethan Vegunta
---------------------------------------------
Description:
My version of the python dictionary. It's syntax is very similar to a regular
python dict.
'''
import pickle
class MyDict:
'''
Create MyDict object:
myDict1 = MyDict()
'''
def __init__(self):
pass
'''
Get Item:
myDict1["hi"]
'''
def __getitem__(self, key):
return getattr(self, self.__convert_item_to_str(key))
'''
Set Item:
myDict1["hi"] = 1
'''
def __setitem__(self, key, value):
setattr(self, self.__convert_item_to_str(key), value)
'''
Delete Item:
del myDict1["hi"]
or
myDict1.pop("hi")
'''
def __delitem__(self, key):
delattr(self, self.__convert_item_to_str(key))
'''
Iterate Through MyDict:
for key in myDict1:
print(key)
'''
def __iter__(self):
for key in list(vars(self)):
yield self.__convert_str_to_item(key)
'''
keys() returns a set containing all the keys
Iterate Through MyDict Keys:
for key in myDict1.keys():
print(key)
'''
def keys(self):
return {self.__convert_str_to_item(key) for key in vars(self)}
'''
values() returns a set containing the value of all the keys
Iterate Through MyDict Values:
for key in myDict1.values():
print(key)
'''
def values(self):
return {
self[self.__convert_str_to_item(key)] for key in list(vars(self))
}
'''
items() returns a set of tuples, with each tuple containing a key,
value pair.
Iterate Through MyDict Keys and Values:
for key, value in myDict1.items():
print(key, value)
'''
def items(self):
return {
(
self.__convert_str_to_item(key),
self[self.__convert_str_to_item(key)]
)
for key in list(vars(self))
}
'''
Update MyDict:
Goes throught values and updates or adds key, value pairs
myDict1.update()
'''
def update(self, to_update_dict):
for key, value in to_update_dict.items():
self[key] = value
'''
Pop Item out of MyDict:
pops key out of MyDict instance
myDict1.pop("key")
You can also use the `del` keyword:
del myDict1["key"]
'''
def pop(self, key):
del self[key]
'''
Clearing MyDict:
Removes all key, value pairs from dictionary
myDict1.clear()
'''
def clear(self):
for key in self.keys():
del self[key]
'''
Copying MyDict:
Returns a copy of MyDict
myDict1_copy = myDict1.copy()
'''
def copy(self):
newMyDict = MyDict()
for key, value in self.items():
newMyDict[key] = value
return newMyDict
def __convert_item_to_str(self, item):
return pickle.dumps(item).decode('unicode_escape')
def __convert_str_to_item(self, item):
return pickle.loads(
item.encode('utf-8', 'unicode_escape').replace(b'\xc2', b'')
)
|
from dino_utils import (
get_model_slug_for_dino,
get_model_for_dino,
get_model_resolution_for_dino,
label_image_for_dino,
count_num_features_for_dino,
get_preprocessing_for_dino,
)
from keras_utils import (
get_model_slug_for_keras,
get_model_for_keras,
get_model_resolution_for_keras,
label_image_for_keras,
count_num_features_for_keras,
get_dummy_preprocessing_for_keras,
)
from openai_utils import (
get_model_slug_for_clip,
get_model_for_clip,
get_model_resolution_for_clip,
label_image_for_clip,
count_num_features_for_clip,
get_preprocessing_for_clip,
)
def get_my_model_of_choice(choice_index=0):
available_models = [
get_model_slug_for_keras(),
get_model_slug_for_clip(),
get_model_slug_for_dino(),
]
# The following line is where you can switch between Keras' MobileNet and OpenAI's CLIP:
chosen_model = available_models[choice_index]
return chosen_model
def get_num_features(model=None, args=None):
if get_model_slug_for_clip() == get_my_model_of_choice():
num_features = count_num_features_for_clip(model)
elif get_model_slug_for_dino() == get_my_model_of_choice():
num_features = count_num_features_for_dino(model, args)
else:
num_features = count_num_features_for_keras(model)
return num_features
def get_preprocessing_tool():
if get_model_slug_for_clip() == get_my_model_of_choice():
preprocessing_tool = get_preprocessing_for_clip()
elif get_model_slug_for_dino() == get_my_model_of_choice():
preprocessing_tool = get_preprocessing_for_dino()
else:
preprocessing_tool = get_dummy_preprocessing_for_keras()
return preprocessing_tool
def get_target_model_size(resolution=None):
if resolution is None:
if get_model_slug_for_clip() == get_my_model_of_choice():
resolution = get_model_resolution_for_clip()
elif get_model_slug_for_dino() == get_my_model_of_choice():
resolution = get_model_resolution_for_dino()
else:
resolution = get_model_resolution_for_keras()
target_model_size = (resolution, resolution)
return target_model_size
def get_input_shape(target_model_size, num_channels=3):
# Image data format: channels last
input_shape = tuple(list(target_model_size) + [num_channels])
return input_shape
def load_model(target_model_size=None, include_top=False, pooling="avg", args=None):
if target_model_size is None:
target_model_size = get_target_model_size()
input_shape = get_input_shape(target_model_size)
if get_model_slug_for_clip() == get_my_model_of_choice():
model = get_model_for_clip(
input_shape=input_shape, include_top=include_top, pooling=pooling
)
elif get_model_slug_for_dino() == get_my_model_of_choice():
model = get_model_for_dino(args)
else:
model = get_model_for_keras(
input_shape=input_shape, include_top=include_top, pooling=pooling
)
return model
def convert_image_to_features(image, model, preprocess=None):
if get_model_slug_for_clip() == get_my_model_of_choice():
yhat = label_image_for_clip(image, model, preprocess=preprocess)
elif get_model_slug_for_dino() == get_my_model_of_choice():
yhat = label_image_for_dino(image, model, preprocess=preprocess)
else:
yhat = label_image_for_keras(image, model, preprocess=preprocess)
features = yhat.flatten()
return features
if __name__ == "__main__":
chosen_model = get_my_model_of_choice()
print("Slug of the chosen model: {}".format(chosen_model))
|
import os
from solaris.eval.base import Evaluator
import solaris
import geopandas as gpd
import pandas as pd
class TestEvaluator(object):
def test_init_from_file(self):
"""Test instantiation of an Evaluator instance from a file."""
base_instance = Evaluator(os.path.join(solaris.data.data_dir,
'gt.geojson'))
gdf = solaris.data.gt_gdf()
assert base_instance.ground_truth_sindex.bounds == gdf.sindex.bounds
assert base_instance.proposal_GDF.equals(gpd.GeoDataFrame([]))
assert base_instance.ground_truth_GDF.equals(
base_instance.ground_truth_GDF_Edit)
def test_init_from_gdf(self):
"""Test instantiation of an Evaluator from a pre-loaded GeoDataFrame."""
gdf = solaris.data.gt_gdf()
base_instance = Evaluator(gdf)
assert base_instance.ground_truth_sindex.bounds == gdf.sindex.bounds
assert base_instance.proposal_GDF.equals(gpd.GeoDataFrame([]))
assert base_instance.ground_truth_GDF.equals(
base_instance.ground_truth_GDF_Edit)
def test_init_empty_geojson(self):
"""Test instantiation of Evaluator with an empty geojson file."""
base_instance = Evaluator(os.path.join(solaris.data.data_dir,
'empty.geojson'))
expected_gdf = gpd.GeoDataFrame({'sindex': [],
'condition': [],
'geometry': []})
assert base_instance.ground_truth_GDF.equals(expected_gdf)
def test_score_proposals(self):
"""Test reading in a proposal GDF from a geojson and scoring it."""
eb = Evaluator(os.path.join(solaris.data.data_dir, 'gt.geojson'))
eb.load_proposal(os.path.join(solaris.data.data_dir, 'pred.geojson'))
pred_gdf = solaris.data.pred_gdf()
assert eb.proposal_GDF.iloc[:, 0:3].sort_index().equals(pred_gdf)
expected_score = [{'class_id': 'all',
'iou_field': 'iou_score_all',
'TruePos': 8,
'FalsePos': 20,
'FalseNeg': 20,
'Precision': 0.2857142857142857,
'Recall': 0.2857142857142857,
'F1Score': 0.2857142857142857}]
scores = eb.eval_iou(calculate_class_scores=False)
assert scores == expected_score
def test_iou_by_building(self):
"""Test output of ground truth table with per-building IoU scores"""
data_folder = solaris.data.data_dir
path_truth = os.path.join(data_folder, 'SN2_sample_truth.csv')
path_pred = os.path.join(data_folder, 'SN2_sample_preds.csv')
path_ious = os.path.join(data_folder, 'SN2_sample_iou_by_building.csv')
path_temp = './temp.pd'
eb = Evaluator(path_truth)
eb.load_proposal(path_pred, conf_field_list=['Confidence'],
proposalCSV=True)
eb.eval_iou_spacenet_csv(miniou=0.5, imageIDField='ImageId',
min_area=20)
output = eb.get_iou_by_building()
result_actual = pd.DataFrame(output)
result_actual.sort_values(by=['ImageId', 'BuildingId'], inplace=True)
ious_actual = list(result_actual['iou_score'])
result_expected = pd.read_csv(path_ious, index_col=0)
result_expected.sort_values(by=['ImageId', 'BuildingId'], inplace=True)
ious_expected = list(result_expected['iou_score'])
maxdifference = max([abs(x-y) for x, y in zip(ious_actual,
ious_expected)])
epsilon = 1E-9
assert maxdifference < epsilon
|
"""
Exceptions raised by geopy.
"""
class GeopyError(Exception):
"""
Geopy-specific exceptions are all inherited from GeopyError.
"""
class ConfigurationError(GeopyError):
"""
When instantiating a geocoder, the arguments given were invalid. See
the documentation of each geocoder's `__init__` for more details.
"""
class GeocoderServiceError(GeopyError):
"""
There was an exception caused when calling the remote geocoding service,
and no more specific exception could be raised by geopy. When calling
geocoders' `geocode` or `reverse` methods, this is the most general
exception that can be raised, and any non-geopy exception will be caught
and turned into this. The exception's message will be that of the
original exception.
"""
class GeocoderQueryError(GeocoderServiceError):
"""
The remote geocoding service raised a bad request over the user's input.
"""
class GeocoderQuotaExceeded(GeocoderServiceError):
"""
The remote geocoding service refused to fulfill the request
because the client has used its quota.
"""
class GeocoderAuthenticationFailure(GeocoderServiceError):
"""
The remote geocoding service rejects the API key or account
credentials this geocoder was instantiated with.
"""
class GeocoderInsufficientPrivileges(GeocoderServiceError):
"""
The remote geocoding service refused to fulfill a request using the
account credentials given.
"""
class GeocoderTimedOut(GeocoderServiceError):
"""
The call to the geocoding service was aborted because no response
was receiving within the `timeout` argument of either the geocoding class
or, if specified, the method call. Some services are just consistently
slow, and a higher timeout may be needed to use them.
"""
class GeocoderParseError(GeocoderServiceError):
"""
Geopy could not parse the service's response. This is a bug in geopy.
"""
|
from django import template
register = template.Library()
@register.filter(name='get_notetype_notes_count')
def get_notetype_notes_count(notes):
notes_without_type = notes.filter(note_type=None).count()
notes_count = notes.count()
notes_with_type = notes_count - notes_without_type
return notes_with_type
|
from django.urls import path
from . import views
urlpatterns = [
path('translate', views.TranslateApiView.as_view()),
]
|
"""Tests for hedging_value function."""
import unittest
from numpy import kron, cos, sin, pi, sqrt, isclose
from toqito.states import basis
from toqito.nonlocal_games.quantum_hedging import QuantumHedging
class TestQuantumHedging(unittest.TestCase):
"""Unit test for hedging_value."""
e_0, e_1 = basis(2, 0), basis(2, 1)
e_00, e_01 = kron(e_0, e_0), kron(e_0, e_1)
e_10, e_11 = kron(e_1, e_0), kron(e_1, e_1)
alpha = 1 / sqrt(2)
theta = pi / 8
w_var = alpha * cos(theta) * e_00 + sqrt(1 - alpha ** 2) * sin(theta) * e_11
l_1 = -alpha * sin(theta) * e_00 + sqrt(1 - alpha ** 2) * cos(theta) * e_11
l_2 = alpha * sin(theta) * e_10
l_3 = sqrt(1 - alpha ** 2) * cos(theta) * e_01
q_1 = w_var * w_var.conj().T
q_0 = l_1 * l_1.conj().T + l_2 * l_2.conj().T + l_3 * l_3.conj().T
def test_max_prob_outcome_a_primal_1_dim(self):
"""
Maximal probability of outcome "a" when dim == 1.
The primal problem of the hedging semidefinite program.
"""
q_0 = TestQuantumHedging.q_0
hedging_value = QuantumHedging(q_0, 1)
self.assertEqual(isclose(hedging_value.max_prob_outcome_a_primal(), cos(pi / 8) ** 2), True)
def test_max_prob_outcome_a_primal_2_dim(self):
"""
Test maximal probability of outcome "a" when dim == 2.
The primal problem of the hedging semidefinite program.
"""
q_00 = kron(TestQuantumHedging.q_0, TestQuantumHedging.q_0)
hedging_value = QuantumHedging(q_00, 2)
self.assertEqual(isclose(hedging_value.max_prob_outcome_a_primal(), cos(pi / 8) ** 4), True)
def test_max_prob_outcome_a_dual_1_dim(self):
"""
Test maximal probability of outcome "a" when dim == 1.
The dual problem of the hedging semidefinite program.
"""
q_0 = TestQuantumHedging.q_0
hedging_value = QuantumHedging(q_0, 1)
self.assertEqual(isclose(hedging_value.max_prob_outcome_a_dual(), cos(pi / 8) ** 2), True)
def test_max_prob_outcome_a_dual_2_dim(self):
"""
Test maximal probability of outcome "a" when dim == 2.
The dual problem of the hedging semidefinite program.
"""
q_00 = kron(TestQuantumHedging.q_0, TestQuantumHedging.q_0)
hedging_value = QuantumHedging(q_00, 2)
self.assertEqual(isclose(hedging_value.max_prob_outcome_a_dual(), cos(pi / 8) ** 4), True)
def test_min_prob_outcome_a_primal_1_dim(self):
"""
Test minimal probability of outcome "a" when dim == 1.
The primal problem of the hedging semidefinite program.
"""
q_1 = TestQuantumHedging.q_1
hedging_value = QuantumHedging(q_1, 1)
self.assertEqual(isclose(hedging_value.min_prob_outcome_a_primal(), 0, atol=0.01), True)
def test_min_prob_outcome_a_primal_2_dim(self):
"""
Test minimal probability of outcome "a" when dim == 2.
The primal problem of the hedging semidefinite program.
"""
q_11 = kron(TestQuantumHedging.q_1, TestQuantumHedging.q_1)
hedging_value = QuantumHedging(q_11, 2)
self.assertEqual(isclose(hedging_value.min_prob_outcome_a_primal(), 0, atol=0.01), True)
def test_min_prob_outcome_a_dual_1_dim(self):
"""
Test minimal probability of outcome "a" when dim == 1.
The dual problem of the hedging semidefinite program.
"""
q_1 = TestQuantumHedging.q_1
hedging_value = QuantumHedging(q_1, 1)
self.assertEqual(isclose(hedging_value.min_prob_outcome_a_dual(), 0, atol=0.01), True)
def test_min_prob_outcome_a_dual_2_dim(self):
"""
Test minimal probability of outcome "a" when dim == 2.
The dual problem of the hedging semidefinite program.
"""
q_11 = kron(TestQuantumHedging.q_1, TestQuantumHedging.q_1)
hedging_value = QuantumHedging(q_11, 2)
self.assertEqual(isclose(hedging_value.min_prob_outcome_a_dual(), 0, atol=0.01), True)
if __name__ == "__main__":
unittest.main()
|
import unittest
from . import test_shapes
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTests( loader.loadTestsFromModule(test_shapes) )
runner = unittest.TextTestRunner(verbosity = 3)
result = runner.run(suite)
|
import numpy as np
import torch
import torch.optim as optim
from datasets.kitti import FrustumKitti
from datasets.kitti.attributes import kitti_attributes as kitti
from meters.kitti import MeterFrustumKitti
from modules.frustum import FrustumPointNetLoss
from evaluate.kitti.frustum.eval import evaluate
from utils.config import Config, configs
# data configs
configs.data.num_points_per_object = 512
configs.data.num_heading_angle_bins = 12
configs.data.size_template_names = kitti.class_names
configs.data.num_size_templates = len(configs.data.size_template_names)
configs.data.class_name_to_size_template_id = {
cat: cls for cls, cat in enumerate(configs.data.size_template_names)
}
configs.data.size_template_id_to_class_name = {
v: k for k, v in configs.data.class_name_to_size_template_id.items()
}
configs.data.size_templates = np.zeros((configs.data.num_size_templates, 3))
for i in range(configs.data.num_size_templates):
configs.data.size_templates[i, :] = kitti.class_name_to_size_template[
configs.data.size_template_id_to_class_name[i]]
configs.data.size_templates = torch.from_numpy(configs.data.size_templates.astype(np.float32))
# dataset configs
configs.dataset = Config(FrustumKitti)
configs.dataset.root = 'data/kitti/frustum/frustum_data'
configs.dataset.num_points = 1024
configs.dataset.classes = configs.data.classes
configs.dataset.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.dataset.class_name_to_size_template_id = configs.data.class_name_to_size_template_id
configs.dataset.random_flip = True
configs.dataset.random_shift = True
configs.dataset.frustum_rotate = True
configs.dataset.from_rgb_detection = False
# evaluate configs
configs.evaluate.fn = evaluate
configs.evaluate.batch_size = 32
configs.evaluate.dataset = Config(split='val', from_rgb_detection=True)
# train configs
configs.train = Config()
configs.train.num_epochs = 209
configs.train.batch_size = 32
# train: meters
configs.train.meters = Config()
for name, metric in [
('acc/iou_3d_{}', 'iou_3d'), ('acc/acc_{}', 'accuracy'),
('acc/iou_3d_acc_{}', 'iou_3d_accuracy'), ('acc/iou_3d_class_acc_{}', 'iou_3d_class_accuracy')
]:
configs.train.meters[name] = Config(
MeterFrustumKitti, metric=metric, num_heading_angle_bins=configs.data.num_heading_angle_bins,
num_size_templates=configs.data.num_size_templates, size_templates=configs.data.size_templates,
class_name_to_class_id={cat: cls for cls, cat in enumerate(configs.data.classes)}
)
# train: metric for save best checkpoint
configs.train.metrics = ('acc/iou_3d_class_acc_val', 'acc/iou_3d_acc_val')
# train: criterion
configs.train.criterion = Config(FrustumPointNetLoss)
configs.train.criterion.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.train.criterion.num_size_templates = configs.data.num_size_templates
configs.train.criterion.size_templates = configs.data.size_templates
configs.train.criterion.box_loss_weight = 1.0
configs.train.criterion.corners_loss_weight = 10.0
configs.train.criterion.heading_residual_loss_weight = 20.0
configs.train.criterion.size_residual_loss_weight = 20.0
# train: optimizer
configs.train.optimizer = Config(optim.Adam)
configs.train.optimizer.lr = 1e-3
|
import numpy as np
import os
import ntpath
import time
from . import util
from . import html
from pdb import set_trace as st
import math
# save image to the disk
def save_images(webpage, images_list, names_list, image_path, title=None, width=256):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path)
# name = os.path.splitext(short_path)[0]
name = short_path
if not title:
title = name
webpage.add_header(title)
ims = []
txts = []
links = []
for names, images in zip(names_list, images_list):
for label, image_numpy in zip(names, images):
image_name = '%s_%s.jpg' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
class Visualizer():
def __init__(self, opt):
# self.opt = opt
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.log_path = os.path.join(opt.expr_dir, 'train_log.txt')
if self.display_id > 0:
import visdom
self.vis = visdom.Visdom(port=opt.display_port)
if self.use_html:
self.web_dir = os.path.join(opt.expr_dir,'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, ncols=2, save_result=False, image_format='jpg'):
if self.display_id > 0: # show images in the browser
title = self.name
nrows = int(math.ceil(len(visuals.items()) / float(ncols)))
images = []
idx = 0
for label, image_numpy in visuals.items():
title += " | " if idx % nrows == 0 else ", "
title += label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if len(visuals.items()) % ncols != 0:
white_image = np.ones_like(image_numpy.transpose([2, 0, 1]))*255
images.append(white_image)
self.vis.images(images, nrow=nrows, win=self.display_id + 1,
opts=dict(title=title))
if self.use_html and save_result: # save images to a html file
for label, image_numpy in visuals.items():
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.%s' % (epoch, label, image_format))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
img_path = 'epoch%.3d_%s.%s' % (n, label, image_format)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in errors.items():
message += ', %s: %.3f' % (k, v)
print(message)
# write losses to text file as well
with open(self.log_path, "a") as log_file:
log_file.write(message+'\n')
# save image to the disk
def save_images_old(self, webpage, visuals, image_path, short=False):
image_dir = webpage.get_image_dir()
if short:
short_path = ntpath.basename(image_path)
name = os.path.splitext(short_path)[0]
else:
name = image_path
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
image_name = '%s_%s.jpg' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
|
import datetime
from enum import Enum
from django.utils import timezone
from apps.db_data.models import Event
class AnnouncementType(Enum):
WEEK = "week"
TOMORROW = "tomorrow"
TODAY = "today"
HOUR = "hour"
B_TIME = "now" # Berkeley Time
def get_events_in_time_delta(requested_atype: AnnouncementType):
"""
Retrieves a list of Event objects within the requested
time delta.
requested_atype will take in enum constants in
AnnouncementType
"""
now = timezone.now().astimezone(timezone.get_current_timezone())
events = get_events_in_time_range(*timeify(requested_atype))
return events
def get_events_in_time_range(start_time, end_time):
"""
Takes in a two datetime objects, start_time and end_time
Returns events within the datetime range
"""
events = Event.objects.filter(start_time__gte=start_time).filter(
start_time__lte=end_time
)
return events
def timeify(requested_atype: AnnouncementType):
"""
Converts requested_tdelta string into a corresponding datetime object
"""
now = timezone.now()
time_ranges = {
AnnouncementType.WEEK: (now, now + datetime.timedelta(weeks=1)),
AnnouncementType.TOMORROW: (
now + datetime.timedelta(days=1),
now + datetime.timedelta(days=1, hours=23, minutes=59, seconds=59),
),
AnnouncementType.TODAY: (
now,
now + datetime.timedelta(hours=23, minutes=59, seconds=59),
),
AnnouncementType.HOUR: (
now + datetime.timedelta(hours=1),
now + datetime.timedelta(hours=1, minutes=59, seconds=59),
),
AnnouncementType.B_TIME: (now, now + datetime.timedelta(minutes=10)),
}
return time_ranges[requested_atype]
|
import re
def is_alpha(string):
charRe = re.compile(r"[^a-zA-Z.]")
string = charRe.search(string)
return not bool(string)
def location_handler(loc):
if len(loc) and loc != " " and loc.isalpha():
return True
return False
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fds/protobuf/stach/table/RowDefinition.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from fds.protobuf.stach.table import DataFormat_pb2 as fds_dot_protobuf_dot_stach_dot_table_dot_DataFormat__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='fds/protobuf/stach/table/RowDefinition.proto',
package='factset.protobuf.stach.table',
syntax='proto3',
serialized_options=b'\n com.factset.protobuf.stach.tableB\022RowDefinitionProtoZ=github.com/factset/stachschema/go/v2/fds/protobuf/stach/table\252\002\034FactSet.Protobuf.Stach.Table',
serialized_pb=b'\n,fds/protobuf/stach/table/RowDefinition.proto\x12\x1c\x66\x61\x63tset.protobuf.stach.table\x1a)fds/protobuf/stach/table/DataFormat.proto\"U\n\rRowDefinition\x12\n\n\x02id\x18\x01 \x01(\t\x12\x38\n\x06\x66ormat\x18\x02 \x01(\x0b\x32(.factset.protobuf.stach.table.DataFormatB\x94\x01\n com.factset.protobuf.stach.tableB\x12RowDefinitionProtoZ=github.com/factset/stachschema/go/v2/fds/protobuf/stach/table\xaa\x02\x1c\x46\x61\x63tSet.Protobuf.Stach.Tableb\x06proto3'
,
dependencies=[fds_dot_protobuf_dot_stach_dot_table_dot_DataFormat__pb2.DESCRIPTOR,])
_ROWDEFINITION = _descriptor.Descriptor(
name='RowDefinition',
full_name='factset.protobuf.stach.table.RowDefinition',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='factset.protobuf.stach.table.RowDefinition.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='format', full_name='factset.protobuf.stach.table.RowDefinition.format', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=121,
serialized_end=206,
)
_ROWDEFINITION.fields_by_name['format'].message_type = fds_dot_protobuf_dot_stach_dot_table_dot_DataFormat__pb2._DATAFORMAT
DESCRIPTOR.message_types_by_name['RowDefinition'] = _ROWDEFINITION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RowDefinition = _reflection.GeneratedProtocolMessageType('RowDefinition', (_message.Message,), {
'DESCRIPTOR' : _ROWDEFINITION,
'__module__' : 'fds.protobuf.stach.table.RowDefinition_pb2'
# @@protoc_insertion_point(class_scope:factset.protobuf.stach.table.RowDefinition)
})
_sym_db.RegisterMessage(RowDefinition)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
"""
This script will handle running SMILESMerge in a docker container.
It handles generating the docker image and container, handling the user variables,
executing SMILESMerge, and copying the files from the container to the desired directory.
This script requires a JSON file that contains all the parameters that would be
required to run SMILESMerge on a host system (ie paths on your computer).
Necessary files, such as the source compound file, will be copied into the docker.
To run SMILESMerge from within docker. Launches docker
image. Accepts the exact same parameters as SMILESMerge, with the following
exceptions:
1) User variables must be supplied in JSON format.
- Please see documentation within the tutorial manual and an example can be found:
- ./examples/sample_SMILESMerge_docker_json.json
Required variables within the JSON file:
- `-root_output_folder`: folder path on host system that results will be copied to.
- `-source_compound_file`: Path on host system to the tab-delineate .smi
file that will seed generation 1.
The resulting SMILESMerge output to the desired root_output_folder.
An example JSON is provided in: ./sample_SMILESMerge_docker_json.json
To run SMILESMerge in a docker, please run the `smilesmerge_in_docker.py` script:
Example on Linux/MacOS:
# cd to this directory in a bash terminal
1) cd SMILESMerge/docker/
# Run smilesmerge_in_docker.py with sudo and supply a json file using the
# normal pathing of your system.
2) `sudo python smilesmerge_in_docker.py -j ./examples/sample_SMILESMerge_docker_json.json`
# Results will be output to the directory specified by the root_output_folder variable
Example on Windows OS:
1) open a docker enabled and bash enabled terminal with administrative privileges
# cd to this directory in a bash terminal
3) cd SMILESMerge/docker/
4) `python smilesmerge_in_docker.py -j ./examples/sample_SMILESMerge_docker_json.json`
# Results will be output to the directory specified by the root_output_folder variable
"""
import __future__
import os
import shutil
import json
import argparse
import sys
def change_permissions(file_path):
"""
This will open the permissions for a given file.
Inputs:
:param str file_path: Path to a file to open permissions to.
"""
os.chmod(file_path, 0o777)
def change_permissions_recursively(file_or_folder_path):
"""
This will open the permissions for a given file/folder.
Skip permissions change if Windows.
Inputs:
:param str file_or_folder_path: Path to a file/folder to open permissions to.
"""
if os.name == "nt" or os.name == "ce":
# chmod and os.chmod do not apply to Windows OS so lets skip this.
pass
elif sys.platform.lower() in ["linux", "linux2"]:
# chmod -R recursively open the permissions
os.system("chmod -R a+rwx {}".format(file_or_folder_path))
elif sys.platform.lower() == "darwin":
# chmod -R recursively open the permissions
os.system("chmod -R a+rwx {}".format(file_or_folder_path))
else:
# chmod may not be a valid command on other OS systems.
# So let's do this the manual way.
if os.path.isdir(file_or_folder_path):
directory_path_list = []
file_list = []
for top_dir, dir_list, list_of_files in os.walk(
file_or_folder_path, topdown=False
):
for directory in [os.path.join(top_dir, d) for d in dir_list]:
directory_path_list.append(directory)
for file_path in [os.path.join(top_dir, fil) for fil in list_of_files]:
file_list.append(file_path)
# Convert mods on all files within a directory
file_list = list(set(file_list))
directory_path_list = list(set(directory_path_list))
for file_path in file_list:
change_permissions(file_path)
for dir_path in directory_path_list:
change_permissions(dir_path)
else:
change_permissions(file_or_folder_path)
def adjust_dockerfile():
"""
This will open Dockerfile and check if the entrypoint has been switched
to the windows version (run_smilesmerge_in_container.bash) if not it will
modify the Dockerfile to use the windows version of the script.
This only should run on Windows OS.
Change:
ENTRYPOINT ["bash", "/smilesmerge/run_smilesmerge_in_container.bash"]
To:
# ENTRYPOINT ["bash", "/smilesmerge/run_smilesmerge_in_container.bash"]
ENTRYPOINT ["bash", "/smilesmerge/run_smilesmerge_in_container_windows.bash"]
"""
printout = ""
normal_entry = "/smilesmerge/run_smilesmerge_in_container.bash"
windows_entry = "/smilesmerge/run_smilesmerge_in_container_windows.bash"
replacement_line = (
'ENTRYPOINT ["bash", "/smilesmerge/run_smilesmerge_in_container_windows.bash"]\n'
)
print("Modifying the Dockerfile to run for Windows. Changing Entrypoint.")
with open(os.path.abspath("Dockerfile"), "r") as f:
for line in f.readlines():
if "ENTRYPOINT" in line and normal_entry in line:
if "#" not in line:
line = "# " + line + "\n"
printout = printout + line
printout = printout + replacement_line
continue
if "ENTRYPOINT" in line and windows_entry in line:
continue
printout = printout + line
with open(os.path.abspath("Dockerfile"), "w") as f:
f.write(printout)
def make_docker():
"""
This will create the docker to run SMILESMerge.
This is also where all of the files are copied into the image.
If docker image can not be created it will raise an exception.
"""
if os.name == "nt" or os.name == "ce":
# so it's running under windows. multiprocessing disabled
adjust_dockerfile()
print("Creating new docker image for SMILESMerge")
output_and_log_dir = os.path.abspath("output_and_log_dir") + os.sep
log_file = "{}log.txt".format(output_and_log_dir)
printout = (
"\nAttempting to create the docker container. If 1st time running "
+ "this script it may take a few minutes. Output details are piped to: "
+ "{}\n".format(log_file)
)
print(printout)
try:
os.system("docker build -t smilesmerge . > {}".format(log_file))
except:
printout = (
"\nCan not create a docker file. Please make sure to run the "
+ "script with sudo/administrative privileges.\nIf Linux/MacOS:\n"
+ "\t'sudo python smilesmerge_in_docker.py -j PATH/JSON.json'\n"
+ "If Windows:\n\tRun from bash and "
+ "docker enabled terminal with administrative privileges.\n"
+ "Please also make sure docker is installed on the system."
)
print(printout)
raise Exception(printout)
# Remove the temporary SMILESMerge directory
shutil.rmtree("SMILESMerge")
def check_for_required_inputs(json_vars):
"""
Confirm all the required inputs were provided.
Required Variables go here.
Inputs:
:param dict json_vars: The parameters. A dictionary of {parameter name:
value}.
Returns:
:returns: dict json_vars: The updated json_vars with input-file paths
changed to the output dir/inputs/ subdirectory.
"""
keys_from_input = list(json_vars.keys())
list_of_required_inputs = [
"root_output_folder",
"source_compound_file",
]
missing_variables = []
for variable in list_of_required_inputs:
if variable in keys_from_input:
continue
missing_variables.append(variable)
if len(missing_variables) != 0:
printout = "\nRequired variables are missing from the input. A description \
of each of these can be found by running python ./RunSMILESMerge -h"
printout = printout + "\nThe following required variables are missing: "
for variable in missing_variables:
printout = printout + "\n\t" + variable
print("")
print(printout)
print("")
raise NotImplementedError("\n" + printout + "\n")
#######################################
# Check that all required files exist #
#######################################
# convert paths to abspath, in case necessary
json_vars["root_output_folder"] = os.path.abspath(json_vars["root_output_folder"])
json_vars["source_compound_file"] = os.path.abspath(
json_vars["source_compound_file"]
)
# Check root_output_folder exists
if os.path.exists(json_vars["root_output_folder"]) is False:
# If the output directory doesn't exist, then make ithe output
# directory doesn't exist, then make it
try:
os.makedirs(json_vars["root_output_folder"])
os.makedirs(json_vars["root_output_folder"] + os.sep + "inputs")
change_permissions_recursively(json_vars["root_output_folder"])
except:
raise NotImplementedError(
"root_output_folder could not be found and could not be created. \
Please manual create desired directory or check input parameters"
)
if os.path.exists(json_vars["root_output_folder"]) is False:
raise NotImplementedError(
"root_output_folder could not be found and could not be created. \
Please manual create desired directory or check input parameters"
)
if os.path.isdir(json_vars["root_output_folder"]) is False:
raise NotImplementedError(
"root_output_folder is not a directory. \
Check your input parameters."
)
# Check source_compound_file exists
if os.path.isfile(json_vars["source_compound_file"]) is False:
raise NotImplementedError(
"source_compound_file must be a tab delineated .smi file. \
source_compound_file can not be found: \
{}.".format(
json_vars["source_compound_file"]
)
)
if ".smi" not in json_vars["source_compound_file"]:
raise NotImplementedError(
"source_compound_file must be a \
tab delineated .smi file."
)
# You need to copy the input files to the output directory, so it can be
# easily access from the docker container. (JDD addition.)
shutil.copy2(
json_vars["source_compound_file"],
json_vars["root_output_folder"]
+ os.sep + "inputs" + os.sep
+ os.path.basename(json_vars["source_compound_file"]),
)
json_vars["source_compound_file"] = "/Outputfolder/inputs/" + \
os.path.basename(json_vars["source_compound_file"])
return json_vars
def find_previous_runs(folder_name_path):
"""
This will check if there are any previous runs in the output directory.
- If there are it will return the interger of the number label of the last Run folder path.
- ie if there are folders Run_0, Run_1, Run_2 the function will return int(2)
- If there are no previous Run folders it returns None.
Inputs:
:param str folder_name_path: is the path of the root output folder. We will
make a directory within this folder to store our output files
Returns:
:returns: int last_run_number: the int of the last run number or None if no previous runs.
"""
path_exists = True
i = 0
while path_exists is True:
folder_path = "{}{}{}".format(folder_name_path, i, os.sep)
if os.path.exists(folder_path):
i = i + 1
else:
path_exists = False
if i == 0:
# There are no previous runs in this directory
last_run_number = None
return None
# A previous run exists. The number of the last run.
last_run_number = i - 1
return last_run_number
def get_run_number(root_folder_path):
"""
Determine run number for the new directory.
Always Start a fresh new run.
-If no previous runs exist in the root_folder_path
-If there are previous runs in the root_folder_path
incremental increasing the name by 1 from the last
run in the same output directory.
Inputs:
:param str root_folder_path: is the path of the root output folder. We will
make a directory within this folder to store our output files
Returns:
:returns: str run_num: the string of the run number "Run_*"
"""
folder_name_path = root_folder_path + "Run_"
print(folder_name_path)
last_run_number = find_previous_runs(folder_name_path)
if last_run_number is None:
# There are no previous simulation runs in this directory
print("There are no previous runs in this directory.")
print("Starting a new run named Run_0.")
# make a folder for the new generation
run_number = 0
else:
# Start a new fresh simulation
# Make a directory for the new run by increasing run number by +1
# from last_run_number
run_number = last_run_number + 1
folder_name_path = root_folder_path + "Run_" + str(run_number) + os.sep
print("The Run number is: ", run_number)
print("The Run folder path is: ", folder_name_path)
print("")
return "Run_{}".format(run_number)
def get_output_folder(json_vars):
"""
Find the folder for where to place output runs on host system.
Inputs:
:param dict json_vars: The parameters. A dictionary of {parameter name: value}.
Returns:
:returns: str root_output_folder: the string of the directory for
puting output folders
:returns: str run_num: the string of the run number "Run_*"
"""
root_output_folder = os.path.abspath(json_vars["root_output_folder"]) + os.sep
change_permissions_recursively(root_output_folder)
run_num = get_run_number(root_output_folder)
return root_output_folder, run_num
def move_files_to_temp_dir(json_vars):
"""
This will move all files needed to a temp_user_files directory and will created a modified
json_vars dict called docker_json_vars which will be used for pathing within
the docker.
Inputs:
:param dict json_vars: The parameters. A dictionary of {parameter name: value}.
"""
docker_json_vars = {}
# make or remove and make the temp_user_files dir
temp_dir_path = os.path.abspath("temp_user_files") + os.sep
if os.path.exists(temp_dir_path):
shutil.rmtree(temp_dir_path)
os.mkdir(temp_dir_path)
change_permissions_recursively(temp_dir_path)
# make or remove and make an output_and_log_dir
output_and_log_dir = os.path.abspath("output_and_log_dir") + os.sep
if os.path.exists(output_and_log_dir):
shutil.rmtree(output_and_log_dir)
os.mkdir(output_and_log_dir)
change_permissions_recursively(output_and_log_dir)
print("copying files into temp directory: temp_user_files")
# get files from json_vars
for var_name in json_vars.keys():
var_item = json_vars[var_name]
if str(type(var_item)) not in ["<type 'unicode'>", "<type 'unicode'>"]:
continue
var_item = str(var_item)
# This could be a different variable that is not a path
if os.path.exists(var_item) is False:
continue
if var_name == "root_output_folder":
continue
basename = os.path.basename(var_item)
temp_path = temp_dir_path + basename
if os.path.isdir(var_item):
shutil.copytree(var_item, temp_path)
docker_json_vars[var_name] = "/UserFiles/" + basename + "/"
continue
if os.path.isfile(var_item):
shutil.copyfile(var_item, temp_path)
docker_json_vars[var_name] = "/UserFiles/" + basename
for var_name in json_vars.keys():
if var_name not in docker_json_vars.keys():
docker_json_vars[var_name] = json_vars[var_name]
# Set output folder
docker_json_vars["root_output_folder"] = "/Outputfolder/"
with open(temp_dir_path + "docker_json_vars.json", "w") as file_item:
json.dump(docker_json_vars, file_item, indent=4)
# update permissions so files can be manipulated without sudo/admin
change_permissions_recursively(temp_dir_path)
change_permissions_recursively(output_and_log_dir)
# Copy over SMILESMerge files into a temp directory
temp_smilesmerge_path = os.path.abspath("SMILESMerge") + os.sep
script_dir = str(os.path.dirname(os.path.realpath(__file__)))
smilesmerge_top_dir = str(os.path.dirname(script_dir))
if os.path.exists(temp_smilesmerge_path):
shutil.rmtree(temp_smilesmerge_path)
os.mkdir(temp_smilesmerge_path)
smilesmerge_top_dir = smilesmerge_top_dir + os.sep
change_permissions_recursively(temp_smilesmerge_path)
# Copy all files in SMILESMerge directory into a temp except the Docker folder
for fol_to_copy in [
"smilesmerge",
"source_compounds",
"accessory_scripts",
"tutorial",
]:
shutil.copytree(
smilesmerge_top_dir + fol_to_copy, temp_smilesmerge_path + fol_to_copy
)
shutil.copyfile(
smilesmerge_top_dir + "RunSMILESMerge.py", temp_smilesmerge_path + "RunSMILESMerge.py"
)
# Open permissions
change_permissions_recursively(temp_smilesmerge_path)
def handle_json_info(vars):
"""
This will open the json file.
1) check that JSON file has basic info
- source compound file.
2) copy files to a temp directory
-source compound file.
3) make a JSON file with modified information for within docker
Inputs:
:param dict vars: Dictionary of User specified variables
Returns:
:param dict json_vars: Dictionary of User specified variables
:returns: str root_output_folder: the string of the directory for
puting output folders
:returns: str run_num: the string of the run number "Run_*"
"""
print("Handling files")
json_file = vars["json_file"]
if os.path.exists(json_file) is False:
printout = "\njson_file is required. Can not find json_file: {}.\n".format(
json_file
)
print(printout)
raise Exception(printout)
json_vars = json.load(open(json_file))
json_vars = check_for_required_inputs(json_vars)
move_files_to_temp_dir(json_vars)
# get output folder
outfolder_path, run_num = get_output_folder(json_vars)
return json_vars, outfolder_path, run_num
def run_SMILESMerge_docker_main(vars):
"""
This function runs the processing to:
1) check that JSON file has basic info
-source compound file.
2) copy files to a temp directory
-source compound file.
3) make a JSON file with modified information for within docker
4) Build docker image and link files to output folder
-This includes an adjustment to the Dockerfile if
running it on a Windows OS
5) execute RunSMILESMerge.py from within the docker container
6) export the files back to the final end dir
Inputs:
:param dict vars: Dictionary of User specified variables
"""
printout = (
"\n\nThis script builds a docker for SMILESMerge and runs SMILESMerge "
+ "within the docker. The setup may take a few minutes the first time being run "
+ "and SMILESMerge may take a long time depending on the settings.\n\n"
)
print(printout)
# Check that we are in the correct directory if not raise exception
script_dir = str(os.path.dirname(os.path.realpath(__file__))) + os.sep
if os.path.abspath(os.getcwd()) != os.path.abspath(script_dir):
printout = "\nMust execute this script from this directory: {}\n".format(
script_dir
)
printout = printout + "Before running please 'cd {}'\n".format(script_dir)
print(printout)
raise Exception(printout)
# Run parts 1-3
# 1) check that JSON file has basic info
# -source compound file.
# 2) copy files to a temp directory
# -source compound file.
# 3) make a JSON file with modified information for within docker
json_vars, outfolder_path, run_num = handle_json_info(vars)
# Run build docker image
make_docker()
# Run part 5) run SMILESMerge in the container
print("\nRunning SMILESMerge in Docker")
command = "docker run --rm -it -v {}:/Outputfolder/".format(outfolder_path)
command = command + " smilesmerge --name smilesmerge --{}".format(run_num)
# Execute SMILESMerge
print(command)
os.system(command)
change_permissions_recursively(outfolder_path)
print("SMILESMerge Results placed in: {}".format(outfolder_path))
PARSER = argparse.ArgumentParser()
# Allows the run commands to be submitted via a .json file.
PARSER.add_argument(
"--json_file",
"-j",
metavar="param.json_file",
required=True,
help="Name of a json file containing all parameters. \
Overrides other arguments. This takes all the parameters described in \
RunSMILESMerge.py.",
)
PARSER.add_argument(
"--override_sudo_admin_privileges",
metavar="param.override_sudo_admin_privileges",
default=False,
help="Docker normally requires `sudo` (linux/macos) or `Administrator` \
privileges (windows/cygwin). If an system does not have such privileges, \
or does not require such privileges, setting this to True, will skip the \
check for privileges. This variable is provided via commandline, and \
IS NOT RECOMMENDED for most OS. ",
)
ARGS_DICT = vars(PARSER.parse_args())
print("")
print("BE SURE TO RUN THIS SCRIPT WITH SUDO (LINUX/MACOS) OR ADMINISTRATOR")
print("(WINDOWS) PRIVILEGES!")
print("")
# Check that this is running with appropriate privileges.
# i.e., sudo (linux/macos) or Administrator privileges (Windows/cygwin)
if ARGS_DICT["override_sudo_admin_privileges"] == False:
if sys.platform.lower() in ["darwin", "linux", "linux2"]:
if os.getuid() != 0:
printout = "\n\nMust run this script with `sudo` privileges.\n\t"
printout = printout + "Please retry running with `sudo` privileges.\n\n"
print(printout)
raise Exception(printout)
elif sys.platform.lower() == "win32" or sys.platform.lower() == "cygwin":
import ctypes
if ctypes.windll.shell32.IsUserAnAdmin() != 1:
printout = "\n\nMust run this script from a terminal with `Administrator` privileges.\n\t"
printout = printout + "Please retry running with `Administrator` privileges.\n\n"
print(printout)
raise Exception(printout)
else:
print("")
print("BE SURE TO RUN THIS SCRIPT WITH SUDO (LINUX/MACOS) OR ADMINISTRATOR")
print("(WINDOWS) PRIVILEGES!")
print("")
else:
print("\n##############################################################")
print("WARNING: Skipping check for privileges.")
print("\tBE SURE TO RUN THIS SCRIPT WITH APPROPRIATE PRIVILEGES:")
print("\tSUDO (LINUX/MACOS) OR ADMINISTRATOR (WINDOWS) PRIVILEGES!")
print("\tFailure to do so may result in Docker failures.")
print("##############################################################\n")
run_SMILESMerge_docker_main(ARGS_DICT)
|
import logging
import threading
from ftplib import FTP
class FtpUploader(threading.Thread):
TMP_DIR = '/tmp/'
def __init__(self, files, host, user, passwd, ftp_dir=None, logger=None):
super().__init__()
self.daemon = True
self.files = files
self.host = host
self.user = user
self.passwd = passwd
self.ftp_dir = ftp_dir
self.logger = logger or logging.getLogger()
def run(self):
try:
ftp = FTP(self.host, user=self.user, passwd=self.passwd)
if self.ftp_dir:
ftp.cwd(self.ftp_dir)
for filename in self.files:
rsp_code = ftp.storbinary("STOR " + filename, open(FtpUploader.TMP_DIR + filename, 'rb'))
self.logger.info('FTP upload %s: %s' % (filename, rsp_code))
except:
self.logger.error('Unexpected error', exc_info=True)
finally:
if ftp:
ftp.close()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------
from typing import List, Any, Optional, TYPE_CHECKING
from azure.core import PipelineClient
from six import python_2_unicode_compatible
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._generated import AzureAttestationRestClient
from ._generated.models import (
AttestationType,
PolicyResult as GeneratedPolicyResult,
PolicyCertificatesResult,
JSONWebKey,
AttestationCertificateManagementBody,
StoredAttestationPolicy as GeneratedStoredAttestationPolicy,
PolicyCertificatesModificationResult as GeneratedPolicyCertificatesModificationResult
)
from ._configuration import AttestationClientConfiguration
from ._models import (
AttestationSigner,
AttestationToken,
AttestationResponse,
AttestationSigningKey,
PolicyCertificatesModificationResult,
PolicyResult,
AttestationTokenValidationException
)
import base64
from azure.core.tracing.decorator import distributed_trace
from threading import Lock, Thread
class AttestationAdministrationClient(object):
"""Provides administrative APIs for managing an instance of the Attestation Service.
:param instance_url: base url of the service
:type instance_url: str
:param credential: Credentials for the caller used to interact with the service.
:type credential: :class:`~azure.core.credentials.TokenCredential`
:keyword Pipeline pipeline: If omitted, the standard pipeline is used.
:keyword HttpTransport transport: If omitted, the standard pipeline is used.
:keyword list[HTTPPolicy] policies: If omitted, the standard pipeline is used.
"""
def __init__(
self,
credential, # type: "TokenCredential"
instance_url, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if not credential:
raise ValueError("Missing credential.")
self._config = AttestationClientConfiguration(credential, instance_url, **kwargs)
self._client = AzureAttestationRestClient(credential, instance_url, **kwargs)
self._statelock = Lock()
self._signing_certificates = None
@distributed_trace
def get_policy(self, attestation_type, **kwargs):
#type(AttestationType, **Any) -> AttestationResponse[str]:
""" Retrieves the attestation policy for a specified attestation type.
:param attestation_type: :class:`azure.security.attestation.AttestationType` for
which to retrieve the policy.
:type attestation_type: azure.security.attestation.AttestationType
:return: Attestation service response encapsulating a string attestation policy.
:rtype: azure.security.attestation.AttestationResponse[str]
:raises azure.security.attestation.AttestationTokenValidationException: Raised when an attestation token is invalid.
"""
policyResult = self._client.policy.get(attestation_type, **kwargs)
token = AttestationToken[GeneratedPolicyResult](token=policyResult.token, body_type=GeneratedPolicyResult)
token_body = token.get_body()
stored_policy = AttestationToken[GeneratedStoredAttestationPolicy](token=token_body.policy, body_type=GeneratedStoredAttestationPolicy)
actual_policy = stored_policy.get_body().attestation_policy #type: bytes
if self._config.token_validation_options.validate_token:
if not token.validate_token(self._config.token_validation_options, self._get_signers(**kwargs)):
raise AttestationTokenValidationException("Token Validation of get_policy API failed.")
return AttestationResponse[str](token, actual_policy.decode('utf-8'))
@distributed_trace
def set_policy(self, attestation_type, attestation_policy, signing_key=None, **kwargs):
#type:(AttestationType, str, Optional[AttestationSigningKey], **Any) -> AttestationResponse[PolicyResult]
""" Sets the attestation policy for the specified attestation type.
:param attestation_type: :class:`azure.security.attestation.AttestationType` for
which to set the policy.
:type attestation_type: azure.security.attestation.AttestationType
:param attestation_policy: Attestation policy to be set.
:type attestation_policy: str
:param signing_key: Signing key to be used to sign the policy
before sending it to the service.
:type signing_key: azure.security.attestation.AttestationSigningKey
:return: Attestation service response encapsulating a :class:`PolicyResult`.
:rtype: azure.security.attestation.AttestationResponse[azure.security.attestation.PolicyResult]
:raises azure.security.attestation.AttestationTokenValidationException: Raised when an attestation token is invalid.
.. note::
If the attestation instance is in *Isolated* mode, then the
`signing_key` parameter MUST be a signing key containing one of the
certificates returned by :meth:`get_policy_management_certificates`.
If the attestation instance is in *AAD* mode, then the `signing_key`
parameter does not need to be provided.
"""
policy_token = AttestationToken[GeneratedStoredAttestationPolicy](
body=GeneratedStoredAttestationPolicy(attestation_policy = attestation_policy.encode('ascii')),
signer=signing_key,
body_type=GeneratedStoredAttestationPolicy)
policyResult = self._client.policy.set(attestation_type=attestation_type, new_attestation_policy=policy_token.serialize(), **kwargs)
token = AttestationToken[GeneratedPolicyResult](token=policyResult.token,
body_type=GeneratedPolicyResult)
if self._config.token_validation_options.validate_token:
if not token.validate_token(self._config.token_validation_options, self._get_signers(**kwargs)):
raise AttestationTokenValidationException("Token Validation of set_policy API failed.")
return AttestationResponse[PolicyResult](token, PolicyResult._from_generated(token.get_body()))
@distributed_trace
def reset_policy(self, attestation_type, signing_key=None, **kwargs):
#type:(AttestationType, Optional[AttestationSigningKey], **dict[str, Any]) -> AttestationResponse[PolicyResult]
""" Resets the attestation policy for the specified attestation type to the default value.
:param attestation_type: :class:`azure.security.attestation.AttestationType` for
which to set the policy.
:type attestation_type: azure.security.attestation.AttestationType
:param attestation_policy: Attestation policy to be reset.
:type attestation_policy: str
:param signing_key: Signing key to be
used to sign the policy before sending it to the service.
:type signing_key: azure.security.attestation.AttestationSigningKey
:return: Attestation service response encapsulating a :class:`PolicyResult`.
:rtype: azure.security.attestation.AttestationResponse[azure.security.attestation.PolicyResult]
:raises azure.security.attestation.AttestationTokenValidationException: Raised when an attestation token is invalid.
.. note::
If the attestation instance is in *Isolated* mode, then the
`signing_key` parameter MUST be a signing key containing one of the
certificates returned by :meth:`get_policy_management_certificates`.
If the attestation instance is in *AAD* mode, then the `signing_key`
parameter does not need to be provided.
"""
policy_token = AttestationToken(
body=None,
signer=signing_key)
policyResult = self._client.policy.reset(attestation_type=attestation_type, policy_jws=policy_token.serialize(), **kwargs)
token = AttestationToken[GeneratedPolicyResult](token=policyResult.token,
body_type=GeneratedPolicyResult)
if self._config.token_validation_options.validate_token:
if not token.validate_token(self._config.token_validation_options, self._get_signers(**kwargs)):
raise AttestationTokenValidationException("Token Validation of reset_policy API failed.")
return AttestationResponse[PolicyResult](token, PolicyResult._from_generated(token.get_body()))
@distributed_trace
def get_policy_management_certificates(self, **kwargs):
#type:(**Any) -> AttestationResponse[list[list[bytes]]]
""" Retrieves the set of policy management certificates for the instance.
The list of policy management certificates will only be non-empty if the
attestation service instance is in Isolated mode.
:return: Attestation service response
encapsulating a list of DER encoded X.509 certificate chains.
:rtype: azure.security.attestation.AttestationResponse[list[list[bytes]]]
"""
cert_response = self._client.policy_certificates.get(**kwargs)
token = AttestationToken[PolicyCertificatesResult](
token=cert_response.token,
body_type=PolicyCertificatesResult)
if self._config.token_validation_options.validate_token:
if not token.validate_token(self._config.token_validation_options, self._get_signers(**kwargs)):
raise Exception("Token Validation of PolicyCertificates API failed.")
certificates = []
cert_list = token.get_body()
for key in cert_list.policy_certificates.keys:
key_certs = [base64.b64decode(cert) for cert in key.x5_c]
certificates.append(key_certs)
return AttestationResponse(token, certificates)
@distributed_trace
def add_policy_management_certificate(self, certificate_to_add, signing_key, **kwargs):
#type:(bytes, AttestationSigningKey, **Any) -> AttestationResponse[PolicyCertificatesModificationResult]
""" Adds a new policy management certificate to the set of policy management certificates for the instance.
:param bytes certificate_to_add: DER encoded X.509 certificate to add to
the list of attestation policy management certificates.
:param signing_key: Signing Key representing one of
the *existing* attestation signing certificates.
:type signing_key: azure.security.attestation.AttestationSigningKey
:return: Attestation service response
encapsulating the status of the add request.
:rtype: azure.security.attestation.AttestationResponse[azure.security.attestation.PolicyCertificatesModificationResult]
The :class:`PolicyCertificatesModificationResult` response to the
:meth:`add_policy_management_certificate` API contains two attributes
of interest.
The first is `certificate_resolution`, which indicates
whether the certificate in question is present in the set of policy
management certificates after the operation has completed, or if it is
absent.
The second is the `thumbprint` of the certificate added. The `thumbprint`
for the certificate is the SHA1 hash of the DER encoding of the
certificate.
"""
key=JSONWebKey(kty='RSA', x5_c = [ base64.b64encode(certificate_to_add).decode('ascii')])
add_body = AttestationCertificateManagementBody(policy_certificate=key)
cert_add_token = AttestationToken[AttestationCertificateManagementBody](
body=add_body,
signer=signing_key,
body_type=AttestationCertificateManagementBody)
cert_response = self._client.policy_certificates.add(cert_add_token.serialize(), **kwargs)
token = AttestationToken[GeneratedPolicyCertificatesModificationResult](token=cert_response.token,
body_type=GeneratedPolicyCertificatesModificationResult)
if self._config.token_validation_options.validate_token:
if not token.validate_token(self._config.token_validation_options, self._get_signers(**kwargs)):
raise Exception("Token Validation of PolicyCertificate Add API failed.")
return AttestationResponse[PolicyCertificatesModificationResult](token, PolicyCertificatesModificationResult._from_generated(token.get_body()))
@distributed_trace
def remove_policy_management_certificate(self, certificate_to_add, signing_key, **kwargs):
#type:(bytes, AttestationSigningKey, **Any) -> AttestationResponse[PolicyCertificatesModificationResult]
""" Removes a new policy management certificate to the set of policy management certificates for the instance.
:param bytes certificate_to_add: DER encoded X.509 certificate to add to
the list of attestation policy management certificates.
:param signing_key: Signing Key representing one of
the *existing* attestation signing certificates.
:type signing_key: azure.security.attestation.AttestationSigningKey
:return: Attestation service response
encapsulating a list of DER encoded X.509 certificate chains.
:rtype: azure.security.attestation.AttestationResponse[azure.security.attestation.PolicyCertificatesModificationResult]
The :class:`PolicyCertificatesModificationResult` response to the
:meth:`remove_policy_management_certificate` API contains two attributes
of interest.
The first is `certificate_resolution`, which indicates
whether the certificate in question is present in the set of policy
management certificates after the operation has completed, or if it is
absent.
The second is the `thumbprint` of the certificate added. The `thumbprint`
for the certificate is the SHA1 hash of the DER encoding of the
certificate.
"""
key=JSONWebKey(kty='RSA', x5_c = [ base64.b64encode(certificate_to_add).decode('ascii')])
add_body = AttestationCertificateManagementBody(policy_certificate=key)
cert_add_token = AttestationToken[AttestationCertificateManagementBody](
body=add_body,
signer=signing_key,
body_type=AttestationCertificateManagementBody)
cert_response = self._client.policy_certificates.remove(cert_add_token.serialize(), **kwargs)
token = AttestationToken[GeneratedPolicyCertificatesModificationResult](token=cert_response.token,
body_type=GeneratedPolicyCertificatesModificationResult)
if self._config.token_validation_options.validate_token:
if not token.validate_token(self._config.token_validation_options, self._get_signers(**kwargs)):
raise Exception("Token Validation of PolicyCertificate Remove API failed.")
return AttestationResponse[PolicyCertificatesModificationResult](token, PolicyCertificatesModificationResult._from_generated(token.get_body()))
def _get_signers(self, **kwargs):
#type(**Any) -> List[AttestationSigner]
""" Returns the set of signing certificates used to sign attestation tokens.
"""
with self._statelock:
if (self._signing_certificates == None):
signing_certificates = self._client.signing_certificates.get(**kwargs)
self._signing_certificates = []
for key in signing_certificates.keys:
# Convert the returned certificate chain into an array of X.509 Certificates.
self._signing_certificates.append(AttestationSigner._from_generated(key))
signers = self._signing_certificates
return signers
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AttestationAdministrationClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
from O365.contact import Contact
from O365.group import Group
from O365.connection import Connection
import logging
import json
import requests
import time
log = logging.getLogger(__name__)
class Event( object ):
'''
Class for managing the creation and manipluation of events in a calendar.
Methods:
create -- Creates the event in a calendar.
update -- Sends local changes up to the cloud.
delete -- Deletes event from the cloud.
toJson -- returns the json representation.
fullcalendarioJson -- gets a specific json representation used for fullcalendario.
getSubject -- gets the subject of the event.
getBody -- gets the body of the event.
getStart -- gets the starting time of the event. (struct_time)
getEnd -- gets the ending time of the event. (struct_time)
getAttendees -- gets the attendees of the event.
getReminder -- returns True if reminder is enabled, False if not.
getCategories -- returns a list of the event's categories.
addAttendee -- adds an attendee to the event. update needs to be called for notification.
setSubject -- sets the subject line of the event.
setBody -- sets the body of the event.
setStart -- sets the starting time of the event. (struct_time)
setEnd -- sets the starting time of the event. (struct_time)
setAttendees -- sets the attendee list.
setStartTimeZone -- sets the timezone for the start of the event item.
setEndTimeZone -- sets the timezone for the end of the event item.
setReminder -- sets the reminder.
setCategories -- sets a list of the event's categories.
Variables:
time_string -- Formated time string for translation to and from json.
create_url -- url for creating a new event.
update_url -- url for updating an existing event.
delete_url -- url for deleting an event.
'''
#Formated time string for translation to and from json.
time_string = '%Y-%m-%dT%H:%M:%S'
#takes a calendar ID
create_url = 'https://outlook.office365.com/api/v1.0/me/calendars/{0}/events'
#takes current event ID
update_url = 'https://outlook.office365.com/api/v1.0/me/events/{0}'
#takes current event ID
delete_url = 'https://outlook.office365.com/api/v1.0/me/events/{0}'
def __init__(self,json=None,auth=None,cal=None,verify=True):
'''
Creates a new event wrapper.
Keyword Argument:
json (default = None) -- json representation of an existing event. mostly just used by
this library internally for events that are downloaded by the callendar class.
auth (default = None) -- a (email,password) tuple which will be used for authentication
to office365.
cal (default = None) -- an instance of the calendar for this event to associate with.
'''
self.auth = auth
self.calendar = cal
self.attendees = []
if json:
self.json = json
self.isNew = False
else:
self.json = {}
self.verify = verify
self.startTimeZone = time.strftime("%Z", time.gmtime())
self.endTimeZone = time.strftime("%Z", time.gmtime())
def create(self,calendar=None):
'''
This method creates an event on the calender passed.
IMPORTANT: It returns that event now created in the calendar, if you wish
to make any changes to this event after you make it, use the returned value
and not this particular event any further.
calendar -- a calendar class onto which you want this event to be created. If this is left
empty then the event's default calendar, specified at instancing, will be used. If no
default is specified, then the event cannot be created.
'''
connection = Connection()
# Change URL if we use Oauth
if connection.is_valid() and connection.oauth != None:
self.create_url = self.create_url.replace("outlook.office365.com/api", "graph.microsoft.com")
elif not self.auth:
log.debug('failed authentication check when creating event.')
return False
if calendar:
calId = calendar.calendarId
self.calendar = calendar
log.debug('sent to passed calendar.')
elif self.calendar:
calId = self.calendar.calendarId
log.debug('sent to default calendar.')
else:
log.debug('no valid calendar to upload to.')
return False
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
log.debug('creating json for request.')
data = json.dumps(self.json)
response = None
try:
log.debug('sending post request now')
response = connection.post_data(self.create_url.format(calId),data,headers=headers,auth=self.auth,verify=self.verify)
log.debug('sent post request.')
if response.status_code > 399:
log.error("Invalid response code [{}], response text: \n{}".format(response.status_code, response.text))
return False
except Exception as e:
if response:
log.debug('response to event creation: %s',str(response))
else:
log.error('No response, something is very wrong with create: %s',str(e))
return False
log.debug('response to event creation: %s',str(response))
return Event(response.json(),self.auth,calendar)
def update(self):
'''Updates an event that already exists in a calendar.'''
connection = Connection()
# Change URL if we use Oauth
if connection.is_valid() and connection.oauth != None:
self.update_url = self.update_url.replace("outlook.office365.com/api", "graph.microsoft.com")
elif not self.auth:
return False
if self.calendar:
calId = self.calendar.calendarId
else:
return False
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
data = json.dumps(self.json)
response = None
print(data)
try:
response = connection.patch_data(self.update_url.format(self.json['id']),data,headers=headers,auth=self.auth,verify=self.verify)
log.debug('sending patch request now')
except Exception as e:
if response:
log.debug('response to event creation: %s',str(response))
else:
log.error('No response, something is very wrong with update: %s',str(e))
return False
log.debug('response to event creation: %s',str(response))
return Event(json.dumps(response),self.auth)
def delete(self):
'''
Delete's an event from the calendar it is in.
But leaves you this handle. You could then change the calendar and transfer the event to
that new calendar. You know, if that's your thing.
'''
connection = Connection()
# Change URL if we use Oauth
if connection.is_valid() and connection.oauth != None:
self.delete_url = self.delete_url.replace("outlook.office365.com/api", "graph.microsoft.com")
elif not self.auth:
return False
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
response = None
try:
log.debug('sending delete request')
response = connection.delete_data(self.delete_url.format(self.json['id']),headers=headers,auth=self.auth,verify=self.verify)
except Exception as e:
if response:
log.debug('response to deletion: %s',str(response))
else:
log.error('No response, something is very wrong with delete: %s',str(e))
return False
return response
def toJson(self):
'''
Creates a JSON representation of the calendar event.
oh. uh. I mean it simply returns the json representation that has always been in self.json.
'''
return self.json
def fullcalendarioJson(self):
'''
returns a form of the event suitable for the vehicle booking system here.
oh the joys of having a library to yourself!
'''
ret = {}
ret['title'] = self.json['subject']
ret['driver'] = self.json['organizer']['emailAddress']['name']
ret['driverEmail'] = self.json['organizer']['emailAddress']['address']
ret['start'] = self.json['start']
ret['end'] = self.json['end']
ret['isAllDay'] = self.json['isAllDay']
return ret
def getSubject(self):
'''Gets event subject line.'''
return self.json['subject']
def getBody(self):
'''Gets event body content.'''
return self.json['body']['content']
def getStart(self):
'''Gets event start struct_time'''
if 'Z' in self.json['start']:
return time.strptime(self.json['start'], self.time_string+'Z')
else:
return time.strptime(self.json['start']["dateTime"].split('.')[0], self.time_string)
def getEnd(self):
'''Gets event end struct_time'''
if 'Z' in self.json['end']:
return time.strptime(self.json['end'], self.time_string+'Z')
else:
return time.strptime(self.json['end']["dateTime"].split('.')[0], self.time_string)
def getAttendees(self):
'''Gets list of event attendees.'''
return self.json['attendees']
def getReminder(self):
'''Gets the reminder's state.'''
return self.json['isReminderOn']
def getCategories(self):
'''Gets the list of categories for this event'''
return self.json['categories']
def setSubject(self,val):
'''sets event subject line.'''
self.json['subject'] = val
def setBody(self,val,contentType='Text'):
'''
sets event body content:
Examples for ContentType could be 'Text' or 'HTML'
'''
cont = False
while not cont:
try:
self.json['body']['content'] = val
self.json['body']['contentType'] = contentType
cont = True
except:
self.json['body'] = {}
def setStart(self,val):
'''
sets event start time.
Argument:
val - this argument can be passed in three different ways. You can pass it in as a int
or float, in which case the assumption is that it's seconds since Unix Epoch. You can
pass it in as a struct_time. Or you can pass in a string. The string must be formated
in the json style, which is %Y-%m-%dT%H:%M:%S. If you stray from that in your string
you will break the library.
'''
if isinstance(val,time.struct_time):
self.json['start'] = {"dateTime":time.strftime(self.time_string,val), "timeZone": self.startTimeZone}
elif isinstance(val,int):
self.json['start'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.startTimeZone}
elif isinstance(val,float):
self.json['start'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.startTimeZone}
else:
#this last one assumes you know how to format the time string. if it brakes, check
#your time string!
self.json['start'] = val
def setEnd(self,val):
'''
sets event end time.
Argument:
val - this argument can be passed in three different ways. You can pass it in as a int
or float, in which case the assumption is that it's seconds since Unix Epoch. You can
pass it in as a struct_time. Or you can pass in a string. The string must be formated
in the json style, which is %Y-%m-%dT%H:%M:%SZ. If you stray from that in your string
you will break the library.
'''
if isinstance(val,time.struct_time):
self.json['end'] = {"dateTime":time.strftime(self.time_string,val), "timeZone": self.endTimeZone}
elif isinstance(val,int):
self.json['end'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.endTimeZone}
elif isinstance(val,float):
self.json['end'] = {"dateTime":time.strftime(self.time_string,time.gmtime(val)), "timeZone": self.endTimeZone}
else:
#this last one assumes you know how to format the time string. if it brakes, check
#your time string!
self.json['end'] = val
def setAttendees(self,val):
'''
set the attendee list.
val: the one argument this method takes can be very flexible. you can send:
a dictionary: this must to be a dictionary formated as such:
{"EmailAddress":{"Address":"recipient@example.com"}}
with other options such ass "Name" with address. but at minimum it must have this.
a list: this must to be a list of libraries formatted the way specified above,
or it can be a list of libraries objects of type Contact. The method will sort
out the libraries from the contacts.
a string: this is if you just want to throw an email address.
a contact: type Contact from this library.
For each of these argument types the appropriate action will be taken to fit them to the
needs of the library.
'''
self.json['attendees'] = []
if isinstance(val,list):
self.json['attendees'] = val
elif isinstance(val,dict):
self.json['attendees'] = [val]
elif isinstance(val,str):
if '@' in val:
self.addAttendee(val)
elif isinstance(val,Contact):
self.addAttendee(val)
elif isinstance(val,Group):
self.addAttendee(val)
else:
return False
return True
def setStartTimeZone(self,val):
'''sets event start timezone'''
self.startTimeZone = val
self.json['start']["startTimeZone"] = val
def setEndTimeZone(self,val):
'''sets event end timezone'''
self.endTimeZone = val
self.json['end']["endTimeZone"] = val
def addAttendee(self,address,name=None):
'''
Adds a recipient to the attendee list.
Arguments:
address -- the email address of the person you are sending to. <<< Important that.
Address can also be of type Contact or type Group.
name -- the name of the person you are sending to. mostly just a decorator. If you
send an email address for the address arg, this will give you the ability
to set the name properly, other wise it uses the email address up to the
at sign for the name. But if you send a type Contact or type Group, this
argument is completely ignored.
'''
if isinstance(address,Contact):
self.json['attendees'].append(address.getFirstEmailAddress())
elif isinstance(address,Group):
for con in address.contacts:
self.json['attendees'].append(address.getFirstEmailAddress())
else:
if name is None:
name = address[:address.index('@')]
self.json['attendees'].append({'emailAddress':{'address':address,'name':name}})
def setLocation(self,loc):
'''
Sets the event's location.
Arguments:
loc -- two options, you can send a dictionary in the format discribed here:
https://msdn.microsoft.com/en-us/office/office365/api/complex-types-for-mail-contacts-calendar#LocationBeta
this will allow you to set address, coordinates, displayname, location email
address, location uri, or any combination of the above. If you don't need that much
detail you can simply send a string and it will be set as the locations display
name. If you send something not a string or a dict, it will try to cast whatever
you send into a string and set that as the display name.
'''
if 'Location' not in self.json:
self.json['location'] = {"adress":None}
if isinstance(loc,dict):
self.json['location'] = loc
else:
self.json['location'] = {'displayName':str(loc)}
def getLocation(self):
'''
Get the current location, if one is set.
'''
if 'location' in self.json:
return self.json['location']
return None
def setReminder(self,val):
'''
Sets the event's reminder.
Argument:
val -- a boolean
'''
if val == True or val == False:
self.json['isReminderOn'] = val
def setCategories(self,cats):
'''
Sets the event's categories.
Argument:
cats -- a list of categories
'''
if isinstance(cats, (list, tuple)):
self.json['categories'] = cats
#To the King!
|
import arcpy
import os
import logging
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
arcpy.env.workspace = os.path.dirname(os.path.dirname(__file__))
arcpy.env.overwriteOutput = True
gdb = os.path.join(arcpy.env.workspace, 'NCIClinicalTrialsAPI', 'NCIClinicalTrialsAPI.gdb')
def removeTables():
logger.debug('Deleting unnecessary tables...')
for table in ['nciSites', 'nciUniqueSites']:
arcpy.management.Delete(table)
|
import os
import numpy as np
from utils.load_data import load_data
from utils.CSV_data_generator import DataGen
from models.NormBase import NormBase
def evaluate_model(config, save_name, legacy = False):
"""
This function runs and evaluates the model with given config.
The model is saved to models/saved/config['save_name']/save_name.
If already calculated the result is loaded instead of calculated.
:param config:
:param save_name:
:param legacy: set to True to use old version
:return:
"""
if not os.path.exists(os.path.join("models/saved", config['save_name'])):
os.mkdir(os.path.join("models/saved", config['save_name']))
# folder for save and load
save_folder = os.path.join("models/saved", config['save_name'], save_name)
if not os.path.exists(save_folder):
os.mkdir(save_folder)
if not legacy:
try:
# load results if available
accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
it_resp = np.load(os.path.join(save_folder, "it_resp.npy"))
labels = np.load(os.path.join(save_folder, "labels.npy"))
except IOError:
try:
norm_base = NormBase(config, input_shape=(224,224,3), save_name=save_name)
except IOError:
norm_base = NormBase(config, input_shape=(224,224,3))
data_train = load_data(config)
norm_base.fit(data_train)
norm_base.save_model(config, save_name)
data_test = load_data(config, train=False)
accuracy, it_resp, labels = norm_base.evaluate(data_test)
return accuracy, it_resp, labels
else:
try:
# load results if available
accuracy = np.load(os.path.join(save_folder, "accuracy.npy"))
it_resp = np.load(os.path.join(save_folder, "it_resp.npy"))
labels = np.load(os.path.join(save_folder, "labels.npy"))
print("[MODEL] it_resp is available and is loaded from {}".format(save_folder))
# load vectors if available
ref_vector = np.load(os.path.join(save_folder, "ref_vector.npy"))
tun_vector = np.load(os.path.join(save_folder, "tuning_vector.npy"))
except IOError:
# calculate results if not available
print("[LOOP] start training")
# create model
norm_base = NormBase(config, input_shape=(224, 224, 3))
try:
# load vectors if available
ref_vector = np.load(os.path.join(save_folder, "ref_vector.npy"))
tun_vector = np.load(os.path.join(save_folder, "tuning_vector.npy"))
print("[MODEL] ref_vector and tun_vector are available and loaded from {}"
.format(save_folder))
norm_base.set_ref_vector(ref_vector)
norm_base.set_tuning_vector(tun_vector)
print("[MODEL] Set ref vector", np.shape(ref_vector))
print("[MODEL] Set tuning vector", np.shape(tun_vector))
except IOError:
# calculate vectors if not available
# load train data
data_train = load_data(config)
print("[Data] -- Data loaded --")
# train model
norm_base.fit(data_train, batch_size=config['batch_size'])
ref_vector = norm_base.r
tun_vector = norm_base.t
# save model
np.save(os.path.join(save_folder, "ref_vector"), ref_vector)
np.save(os.path.join(save_folder, "tuning_vector"), tun_vector)
print("[LOOP] start prediction")
# load test data
data_test = load_data(config, train=False, sort_by=['image'])
print("[Data] -- Data loaded --")
# evaluate
accuracy, it_resp, labels = norm_base.evaluate(data_test)
np.save(os.path.join(save_folder, "accuracy"), accuracy)
np.save(os.path.join(save_folder, "it_resp"), it_resp)
np.save(os.path.join(save_folder, "labels"), labels)
return accuracy, it_resp, labels, ref_vector, tun_vector
|
import cv2
import numpy as np
from utils import transform
def main():
img = cv2.imread('/home/sandeep/tmp/img01.png', cv2.IMREAD_COLOR).astype(np.float32)
img = transform.blur_image(img)
cv2.imwrite('/home/sandeep/tmp/img01-blur.png', img)
if __name__ == '__main__':
main()
|
from typing import Optional
from contracts.story import (GetStoryRequest, GetStoryResponse,
DetailStoryRequest, DetailStoryResponse,
PostStoryRequest, PostStoryResponse,
PutStoryRequest, PutStoryResponse,
DeleteStoryRequest, DeleteStoryResponse)
from .repository import StoryRespository
class StoryServiceAgent(object):
@classmethod
def get_story(cls, request_obj: GetStoryRequest, path: str) -> GetStoryResponse:
story_list, total_count = StoryRespository.get_story(limit=request_obj.limit, offset=request_obj.offset,
sort=request_obj.sort, category_id=request_obj.category_id)
return GetStoryResponse(request=request_obj, response=story_list, path=path, total_count=total_count)
@classmethod
def detail_story(cls, request_obj: DetailStoryRequest) -> DetailStoryResponse:
story = StoryRespository.detail_story(id=request_obj.id)
return DetailStoryResponse(**story.__dict__)
@classmethod
def post_story(cls, request_obj: PostStoryRequest) -> PostStoryResponse:
story = StoryRespository.post_story(category_id=request_obj.category_id, name=request_obj.name,
slug=request_obj.slug, description=request_obj.description,
content=request_obj.content)
return PostStoryResponse(**story.__dict__)
@classmethod
def put_story(cls, request_obj: PutStoryRequest) -> PutStoryResponse:
story = StoryRespository.put_story(id=request_obj.id, category_id=request_obj.category_id,
name=request_obj.name, slug=request_obj.slug,
description=request_obj.description, content=request_obj.content)
return PutStoryResponse(**story.__dict__)
@classmethod
def delete_story(cls, request_obj: DeleteStoryRequest) -> DeleteStoryResponse:
story = StoryRespository.delete_story(id=request_obj.id)
return DeleteStoryResponse(story)
|
import hashlib
import pathlib
from io import TextIOBase
from typing import Optional, List, Tuple, Set
from cincan.command_log import CommandLogIndex, CommandLog, quote_args
class FileDependency:
"""File dependency information for command log"""
def __init__(self, file: pathlib.Path, digest: str, out:bool):
self.file = file
self.digest = digest
self.out = out
self.next: List['CommandDependency'] = []
def __str__(self):
file_string = self.file.as_posix() + (' ' + self.digest[:16] if self.digest else '/')
next_strings = [str(s).replace('\n', '\n ') for s in self.next]
p = '\n|-- ' if self.out else '\n^-- '
return file_string + (p + p.join(next_strings) if next_strings else '')
class CommandDependency:
"""Command dependency information for command log"""
def __init__(self, command: CommandLog, out:bool):
self.command = command
self.out = out
self.next: List[FileDependency] = []
def __str__(self):
cmd_string = " ".join(quote_args(self.command.command))
next_strings = [str(s).replace('\n', '\n ') for s in self.next]
p = '\n|-->' if self.out else '\n^---'
return cmd_string + (p + p.join(next_strings) if next_strings else '')
class CommandInspector:
"""Inspector for doing analysis based on command log"""
def __init__(self, log: CommandLogIndex, work_dir: pathlib.Path):
self.log = log
self.work_dir = work_dir
def __work_path(self, path: pathlib.Path) -> pathlib.Path:
if path.as_posix().startswith('/dev/'):
return path
try:
return path.relative_to(self.work_dir)
except ValueError:
return path.resolve()
def fanin(self, file: pathlib.Path, depth: int, already_covered: Set[str] = None,
digest: Optional[str] = None) -> FileDependency:
file_digest = digest or self.hash_of(file)
file_dep = FileDependency(self.__work_path(file), file_digest, out=False)
file_check = file.as_posix() + ':' + file_digest
already_covered = already_covered or set([])
if depth < 1 or file_check in already_covered:
return file_dep
already_covered.add(file_check)
for cmd in self.log.list_entries(reverse=True):
output_here = any(filter(lambda f: f.digest == file_digest, cmd.out_files))
if output_here:
cmd_dep = CommandDependency(cmd, out=False)
for file in cmd.in_files:
cmd_dep.next.append(self.fanin(file.path, depth - 1, already_covered, file.digest))
file_dep.next.append(cmd_dep)
return file_dep
def fanout(self, file: pathlib.Path, depth: int, already_covered: Set[str] = None,
digest: Optional[str] = None) -> FileDependency:
file_digest = digest or self.hash_of(file)
file_dep = FileDependency(self.__work_path(file), file_digest, out=True)
file_check = file.as_posix() + ':' + file_digest
already_covered = already_covered or set([])
if depth < 1 or file_check in already_covered:
return file_dep
already_covered.add(file_check)
for cmd in self.log.list_entries(reverse=True):
input_here = any(filter(lambda f: f.digest == file_digest, cmd.in_files))
if input_here:
cmd_dep = CommandDependency(cmd, out=True)
for file in cmd.out_files:
cmd_dep.next.append(self.fanout(file.path, depth -1, already_covered, file.digest))
file_dep.next.append(cmd_dep)
return file_dep
@classmethod
def hash_of(cls, file: pathlib.Path) -> str:
if not file.is_file():
return ''
md = hashlib.sha256()
with file.open("rb") as f:
chunk = f.read(2048)
while chunk:
md.update(chunk)
chunk = f.read(2048)
return md.hexdigest()
|
import os
os.environ['AWS_REGION'] = 'eu-west-1'
from ..index import Item
def test_bool_convert():
item = Item()
item.item_key = {'key': {'S': 'value'}}
item.item_value = {
'string': {'S': 'str'},
'bool': {'BOOL': 'true'},
'map': {'M': {
'string': {'S': 'str'},
'bool': {'BOOL': 'true'},
}},
'list': {'L': [
{'S': 'str'},
{'BOOL': 'true'},
]},
}
out = item.construct_item()
assert out['string']['S'] == 'str'
assert isinstance(out['bool']['BOOL'], bool)
assert out['bool']['BOOL'] is True
assert isinstance(out['map']['M']['bool']['BOOL'], bool)
assert out['map']['M']['bool']['BOOL'] is True
assert isinstance(out['list']['L'][1]['BOOL'], bool)
assert out['list']['L'][1]['BOOL'] is True
|
import math
import random
import sqlite3
# CONSTANTS
NODE_POS_OFFSET_AT_CREATION = 10
MIN_NODE_FRICTION = 0
MAX_NODE_FRICTION = 1
class Node:
def __init__(self, x_pos, y_pos, friction, speed, angle):
# position in space
self.x_pos = x_pos
self.y_pos = y_pos
# friction with the ground
self.friction = friction
# speed vector (better than vX and vY?)
self.speed = speed
self.angle = angle
@classmethod
def from_random(cls):
# TODO: update generation of random x and y position (?)
x_pos = random.randint(-1 * NODE_POS_OFFSET_AT_CREATION, NODE_POS_OFFSET_AT_CREATION)
y_pos = random.randint(-1 * NODE_POS_OFFSET_AT_CREATION, NODE_POS_OFFSET_AT_CREATION)
friction = random.uniform(MIN_NODE_FRICTION, MAX_NODE_FRICTION)
# node doesnt have any speed at creation
speed = 0
angle = 0
return cls(x_pos, y_pos, friction, speed, angle)
class Muscle:
def __init__(self):
pass
@classmethod
def random_init(cls):
pass
class Creature:
def __init__(self):
self.nodes = []
self.muscles = []
self.alive = True
@classmethod
def random_init(cls):
pass
def insert_into_db(self, db_name):
conn = sqlite3.connect(db_name)
cur = conn.cursor()
insert_creature = """
INSERT INTO creatures
(alive)
VALUES
(1);
"""
get_creature_id = """
SELECT creature_id FROM creatures
ORDER BY creature_id DESC
LIMIT 1;
"""
insert_nodes = """
INSERT INTO nodes
(x_position, y_position, friction, creature_id)
VALUES
(?, ?, ?, ?);
"""
insert_muscles = """
INSERT INTO nodes
(extended_length, contracted_length, extended_time, contracted_time, strength,
node_1_id, node_2_id, creature_id)
VALUES
(?, ?, ?, ?, ?,
?, ?, ?);
"""
cur.execute(insert_creature)
creature_id = cur.execute(get_creature_id)
for node in self.nodes:
cur.execute(insert_nodes, node.x_pos, node.y_pos, node.friction, creature_id)
# TODO: find better way?
for muscle in self.muscles:
cur.execute(insert_muscles, muscle.extended_length, muscle.contracted_length, \
muscle.extended_time, muscle.contracted_time, muscle.strength, \
node_1_id, node_2_id, creature_id)
cur.close()
conn.close()
def nCr(n, r):
f = math.factorial
return f(n) // f(r) // f(n-r)
def main():
pass
if __name__ == '__main__':
main()
|
# SPDX-License-Identifier: Apache-2.0
"""
Test scikit-learn's PowerTransform
"""
import unittest
import numpy as np
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from test_utils import dump_data_and_model, TARGET_OPSET
try:
from sklearn.preprocessing import PowerTransformer # noga
except ImportError:
# Not available in scikit-learn < 0.20.0
PowerTransformer = None
class TestSklearnPowerTransformer(unittest.TestCase):
"""Test cases for PowerTransform converter"""
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_yeo_johnson_positive_without_scaler(self):
pt = PowerTransformer(standardize=False)
data = np.array([[1, 2], [3, 2], [4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_yeo_johnson_negative_without_scaler(self):
pt = PowerTransformer(standardize=False)
data = np.array([[-1, -2], [-3, -2], [-4, -5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_yeo_johnson_combined_without_scaler(self):
pt = PowerTransformer(standardize=False)
data = np.array([[1, -2], [0, -2], [-4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_box_cox_without_scaler(self):
pt = PowerTransformer(standardize=False, method='box-cox')
data = np.array([[1, 2], [3, 2], [4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_yeo_johnson_positive_with_scaler(self):
pt = PowerTransformer()
data = np.array([[1, 2], [3, 2], [4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_with_scaler_blacklist(self):
pt = PowerTransformer()
data = np.array([[1, 2], [3, 2], [4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET,
black_op={'Scaler'})
self.assertNotIn("Scaler", str(model_onnx))
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformerBlackList")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_yeo_johnson_negative_with_scaler(self):
pt = PowerTransformer()
data = np.array([[-1, -2], [-3, -2], [-4, -5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_yeo_johnson_combined_with_scaler(self):
pt = PowerTransformer()
data = np.array([[1, -2], [3, -2], [-4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_box_cox_with_scaler(self):
pt = PowerTransformer(method='box-cox')
data = np.array([[1, 2], [3, 2], [4, 5]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
@unittest.skipIf(PowerTransformer is None, "Problems with import occurred")
def test_powertransformer_zeros(self):
pt = PowerTransformer()
data = np.array([[0, 0], [0, 0]], dtype=np.float32)
model = pt.fit(data)
model_onnx = convert_sklearn(model, "scikit-learn PowerTransformer",
[("input_float",
FloatTensorType([None, None]))],
target_opset=TARGET_OPSET)
self.assertIsNotNone(model_onnx)
dump_data_and_model(data, model, model_onnx,
basename="PowerTransformer")
if __name__ == '__main__':
unittest.main()
|
import fpdf
from fpdf import FPDF, HTMLMixin
# Create pdf report to show the results in a readable way
# The content of the pdf has to be written following the
# HTML standard. It will be converted in pdf at the end,
# using output_pdf()
class MyFPDF(FPDF, HTMLMixin):
pass
# parameters for the pdf
pdf = MyFPDF()
font_size = 11
html = ''
check = False
# initialize the structure of the pdf and append the title of the report in an HTML format
def init():
global html
global check
pdf.add_page()
html += '<H1 align="center">MQTTSA Report</H1>'
check = True
# append a paragraph to the HTML
def add_paragraph(title, msg=None):
global html
global check
if check == False:
init()
if msg != None:
html += '<h2 align="left">'+title+"</h2><font size="+str(font_size)+">"+msg+'</p><br>'
else:
html += '<h2 align="left">'+title+'</h2>'
# append a sub-paragraph to the HTML
def add_sub_paragraph(title, msg=None):
global html
if msg != None:
html += '<h4 align="left">'+title+"</h4><font size="+str(font_size)+">"+msg+'</p><br>'
else:
html += '<h4 align="left">'+title+'</h4>'
# append to an existing paragraph of the HTML
def add_to_existing_paragraph(msg):
global html
html += "<font size="+str(font_size)+">"+msg+'</font><br>'
# generate the pdf using the HTML
def output_pdf():
global html
pdf.write_html(html.encode('utf-8').decode('latin-1'))
pdf.output("../report.pdf")
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# @license : Copyright(C), Your Company
# @Author: Zhang Dong
# @Contact : 1010396971@qq.com
# @Date: 2020-07-19 9:20
# @Description: 模型下载地址:https://github.com/pytorch/vision/issues/616
# 调用本地模型:https://github.com/pytorch/vision/pull/1057
# @Software : PyCharm
from __future__ import print_function
from __future__ import division
import os
import torch.nn as nn
from torchvision import datasets, models
from PIL import Image
import torchvision.transforms as transforms
from torch.autograd import Variable
num_classes = 2 # 数据集中的分类数量
BATCH_SIZE = 128 # batch_size
# Flag for feature extracting. When False, we fine-tune the whole model,
# when True we only update the reshaped layer params
feature_extract = True
TARGET_IMG_SIZE = 224
img_to_tensor = transforms.ToTensor()
# and then put local models into ./moddels/checkpoints/resnet18-5c106cde.pth
os.environ['TORCH_HOME'] = './models'
def set_parameter_requires_grad(model, feature_extracting):
if feature_extract:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
model_ft = None
input_size = 0
if model_name == 'resnet':
""" Resnet 18 """
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "alexnet":
""" Alexnet """
model_ft = models.alexnet(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "vgg":
""" VGG11_bn """
model_ft = models.vgg11_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "squeezenet":
""" Squeezenet """
model_ft = models.squeezenet1_0(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))
model_ft.num_classes = num_classes
input_size = 224
elif model_name == "densenet":
""" Densenet """
model_ft = models.densenet121(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
input_size = 224
elif model_name == "inception":
""" Inception v3 Be careful, expects (299,299) sized images and has auxiliary output """
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
# Handle the auxilary net
num_ftrs = model_ft.AuxLogits.fc.in_features
model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)
# Handle the primary net
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
input_size = 299
else:
print("Invalid model name, exiting...")
exit()
return model_ft
# Model to choose from [resnet, alexnet, vgg, squeezenet, densenet, inception]
model_name = 'resnet'
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
def extract_feature(model, imgpath):
model.eval()
img = Image.open(imgpath)
img = img.resize((TARGET_IMG_SIZE, TARGET_IMG_SIZE))
tensor = img_to_tensor(img) # 将图片转化成tensor
# tensor = tensor.cuda() # 如果只是在cpu上跑的话要将这行去掉
result = model(Variable(tensor))
result_npy = result.data.cpu().numpy() # 保存的时候一定要记得转成cpu形式的,不然可能会出错
return result_npy[0] # 返回的矩阵shape是[1, 512, 14, 14],这么做是为了让shape变回[512, 14,14]
if __name__=="__main__":
model = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
imgpath = './t.jpg'
tmp = extract_feature(model, imgpath)
print(tmp.shape) # 打印出得到的tensor的shape
print(tmp) # 打印出tensor的内容,其实可以换成保存tensor的语句,这里的话就留给读者自由发挥了
|
#!/usr/bin/env python
def gcd(m, n):
while n:
m, n = n, m % n
return m
def lcm(m, n):
return m * n / gcd(m, n)
def is_power(n):
return ((n & (n - 1)) == 0) and n != 0
if __name__ == '__main__':
print lcm(512, 3)
print '16:', is_power(16)
print '4:', is_power(4)
print '2:', is_power(2)
print '5:', is_power(5)
print '1:', is_power(1)
|
from typing import Tuple
import numpy as np
from numpy.core.fromnumeric import size
def get_dataset() -> Tuple[np.ndarray, np.ndarray]:
"""OR-Function dataset"""
x = np.array([[0, 0], [1, 0], [0, 1], [1, 1]]) # Input in das OR-Gatter
y = np.array([[0], [1], [1], [1]]) # Ergebnis eines OR-Gatters
return x, y
def accuracyScore(y_true: np.ndarray, y_pred: np.ndarray):
N = y_true.shape[0] # Anzahl der Datenpunkte
accuracy = np.sum(y_true == y_pred) / N # Wie oft richtig
return accuracy
def step_function(input_sig: np.ndarray) -> np.ndarray:
output_signal = (input_sig > 0.0).astype(np.int_)
return output_signal
class Perceptron:
def __init__(self, learning_rate: float, input_dim: int) -> None:
"""Initialisierung des Perceptron Objekts"""
self.learning_rate = learning_rate
self.input_dim = input_dim
# Erstellen der Gewichtsmatrix, zufällig gewählte Gewichte
self.w = np.random.uniform(-1, 1, size=(self.input_dim, 1))
def _update_weights(self, x: np.ndarray, y: np.ndarray, y_pred: np.ndarray) -> None:
error = (y - y_pred)
delta = error * x
for delta_i in delta:
self.w = self.w + self.learning_rate * delta_i.reshape(-1, 1)
def train(self, x: np.ndarray, y: np.ndarray, epochs: int = 1) -> None:
"""Trainieren des Models. 1.Vorhersagte, 2.Gewichtung anpassen, 3.Genauigkeit, 4.Ausgabe"""
for epoch in range(1, epochs + 1):
y_pred = self.predict(x)
self._update_weights(x, y, y_pred)
accuracy = accuracyScore(y, y_pred)
print(f"Epoch: {epoch} Accuracy: {accuracy}")
def predict(self, x: np.ndarray) -> np.ndarray:
input_sig = np.dot(x, self.w)
output_sig = step_function(input_sig)
return output_sig
def evaluate(self, x: np.ndarray, y: np.ndarray) -> None:
y_pred = self.predict(x)
return accuracyScore(y, y_pred)
if __name__ == "__main__":
x, y = get_dataset()
input_dim = x.shape[1] # Anzahl der Features
learning_rate = 0.5
p = Perceptron(learning_rate, input_dim)
p.train(x, y, epochs=10)
|
from random import randint
from sqlalchemy.exc import IntegrityError
from faker import Faker
from app import db
from app.models import User, Post, Comment
def post_comments():
fake = Faker()
user_count = User.query.count()
post_count = Post.query.count()
for i in range(1,post_count+1):
p = Post.query.get(i)
for i in range(randint(10,15)):
u = User.query.offset(randint(0, user_count - 1)).first()
comment = Comment(
body=fake.text(),
date_created=fake.past_date(),
author=u,
post=p)
db.session.add(comment)
db.session.commit()
|
"""
Given a number of stairs, you can climb at most m stairs at a time.
For instance, for m=3, you can climb 1, 2, or 3 stairs at a time.
Count the number of different ways that you can reach the top.
:param stair_count: No of stairs to climb.
:param max_steps: Max no of stairs you can climb at a time.
:return: Number of different ways to reach the top.
"""
def climb_stairs_recursive(stair_count, max_steps):
if stair_count <= 2:
return stair_count
if max_steps <= 1:
return max_steps
total = 0
for i in range(max_steps):
total += climb_stairs_recursive(stair_count - (i + 1), max_steps)
return total
def climb_stairs_recursive_memoized(stair_count, max_steps):
pass
def climb_stairs_fibonacci(stair_count, max_steps):
if stair_count <= 1:
return stair_count
if max_steps <= 1:
return 1
fib = [0, 1]
for _ in range(stair_count):
fib.append(sum(fib))
if len(fib) > max_steps:
fib.pop(0)
return fib[-1]
def climb_stairs_with_variable_steps(stair_count, possibleSteps):
pass
# tests
assert climb_stairs_fibonacci(0, 2) == 0
assert climb_stairs_recursive(0, 2) == 0
assert climb_stairs_recursive_memoized(0, 2) == 0
assert climb_stairs_fibonacci(1, 2) == 1
assert climb_stairs_recursive(1, 2) == 1
assert climb_stairs_recursive_memoized(1, 2) == 1
assert climb_stairs_fibonacci(2, 2) == 2
assert climb_stairs_recursive(2, 2) == 2
assert climb_stairs_recursive_memoized(2, 2) == 2
assert climb_stairs_fibonacci(7, 2) == 21
assert climb_stairs_recursive(7, 2) == 21
assert climb_stairs_recursive_memoized(7, 2) == 21
assert climb_stairs_fibonacci(40, 1) == 1
assert climb_stairs_recursive(40, 1) == 1
assert climb_stairs_recursive_memoized(40, 1) == 1
assert climb_stairs_fibonacci(16, 7) == 31489
assert climb_stairs_recursive(16, 7) == 31489
assert climb_stairs_recursive_memoized(16, 7) == 31489
assert climb_stairs_fibonacci(30, 30) == 536870912
assert climb_stairs_recursive(30, 30) == 536870912
assert climb_stairs_recursive_memoized(30, 30) == 536870912
print('done: all tests pass')
|
from store_vertices import store_vertices
from dijkstra_algo import dijkstra
def main():
nodes = set()
graph = dict()
result = []
visited = {}
e = int(input('Enter the number of roads: '))
store_vertices(e, nodes, graph)
starting = input('Starting point: ')
ending = input('ending point point: ')
unvisited = {node: None for node in nodes}
current = starting
current_distance = 0
unvisited[current] = current_distance
prev = dijkstra(graph, unvisited, visited, current_distance, current)
while ending in prev:
result.append(ending)
ending = prev[ending]
result.append(starting)
print('Shortest path will be: ')
for i in range(0, len(result)):
if i < len(result) - 1:
print(result[len(result) - i - 1], end=' -> ')
else:
print(result[len(result) - i - 1])
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from unittest.mock import MagicMock, patch
from dateutil.tz import tzlocal
from airflow import DAG, AirflowException
from airflow.contrib.sensors.emr_run_job_flows import EmrRunJobFlows
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestEmrRunJobFlows(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
# Mock out the emr_client (moto has incorrect response)
self.emr_client = MagicMock()
self.boto3_session = None # This is set in _verify_job_flow_execution
self.emr_run_job_flows = EmrRunJobFlows(
task_id='test_task',
poke_interval=0,
job_flows=self._stubbed_job_flows([
["cluster1"], # first batch is just this cluster
["cluster2a", "cluster2b"], # then these two run in parallel
["cluster3"]]), # and finally, this third batch
dag=DAG('test_dag_id', default_args=args)
)
self.states = {}
self.clusters = []
def _stubbed_job_flows(self, names_queue):
job_flows = []
for names_batch in names_queue:
job_flows_batch = {}
for name in names_batch:
job_flows_batch[name] = self._cluster_config(name)
job_flows.append(job_flows_batch)
return job_flows
def _cluster_config(self, name):
return {
'Name': name,
'ReleaseLabel': '5.11.0',
'Instances': {
'KeepJobFlowAliveWhenNoSteps': False
},
'Steps': [{
'Name': 'test_step',
'ActionOnFailure': 'TERMINATE_CLUSTER',
'HadoopJarStep': {
'Jar': 'command-runner.jar',
'Args': [
'/usr/lib/spark/bin/run-example',
'{{ macros.ds_add(ds, -1) }}',
'{{ ds }}'
]
}
}]
}
def test_execute_calls_until_all_clusters_reach_a_terminal_state(self):
self.clusters = ["cluster1", "cluster2a", "cluster2b", "cluster3"]
self.states["j-cluster1"] = []
self.states["j-cluster1"].append("STARTING")
self.states["j-cluster1"].append("BOOTSTRAPPING")
self.states["j-cluster1"].append("RUNNING")
self.states["j-cluster1"].append("RUNNING")
self.states["j-cluster1"].append("TERMINATING")
self.states["j-cluster1"].append("TERMINATED") # (End Of Batch)
self.states["j-cluster2a"] = []
self.states["j-cluster2b"] = []
self.states["j-cluster2a"].append("STARTING") # a
self.states["j-cluster2b"].append("STARTING") # b
self.states["j-cluster2a"].append("BOOTSTRAPPING") # a
self.states["j-cluster2b"].append("BOOTSTRAPPING") # b
self.states["j-cluster2a"].append("RUNNING") # a
self.states["j-cluster2b"].append("RUNNING") # b
self.states["j-cluster2a"].append("RUNNING") # a
self.states["j-cluster2b"].append("RUNNING") # b
self.states["j-cluster2a"].append("RUNNING") # a
self.states["j-cluster2b"].append("TERMINATING") # b
self.states["j-cluster2a"].append("RUNNING") # a
self.states["j-cluster2b"].append("TERMINATED") # b: terminal
self.states["j-cluster2a"].append("TERMINATING") # a
self.states["j-cluster2b"].append("TERMINATED") # b: terminal
self.states["j-cluster2a"].append("TERMINATED") # a (End Of Batch)
self.states["j-cluster2b"].append("TERMINATED") # b (End Of Batch)
self.states["j-cluster3"] = []
self.states["j-cluster3"].append("STARTING")
self.states["j-cluster3"].append("BOOTSTRAPPING")
self.states["j-cluster3"].append("RUNNING")
self.states["j-cluster3"].append("RUNNING")
self.states["j-cluster3"].append("TERMINATING")
self.states["j-cluster3"].append("TERMINATED") # (all done)
self.emr_client.describe_cluster.side_effect = self._describe
self.emr_client.run_job_flow.side_effect = self._create
self._verify_job_flow_execution()
def test_execute_stops_when_cluster_in_batch_fails(self):
self.clusters = ["cluster1"]
# First, cluster1 is queried until it terminates
self.states["j-cluster1"] = []
self.states["j-cluster1"].append("STARTING")
self.states["j-cluster1"].append("BOOTSTRAPPING")
self.states["j-cluster1"].append("RUNNING")
self.states["j-cluster1"].append("RUNNING")
self.states["j-cluster1"].append("TERMINATING")
self.states["j-cluster1"].append("TERMINATED")
# Then, both cluster2a and cluster2b are queried
self.states["j-cluster2a"] = []
self.states["j-cluster2b"] = []
self.states["j-cluster2a"].append("STARTING") # a
self.states["j-cluster2b"].append("STARTING") # b
self.states["j-cluster2a"].append("BOOTSTRAPPING") # a
self.states["j-cluster2b"].append("BOOTSTRAPPING") # b
self.states["j-cluster2a"].append("RUNNING") # a
self.states["j-cluster2b"].append("RUNNING") # b
self.states["j-cluster2a"].append("RUNNING") # a
self.states["j-cluster2b"].append("RUNNING") # b
self.states["j-cluster2a"].append("TERMINATING") # a
self.states["j-cluster2b"].append("TERMINATED_WITH_ERRORS") # b
# We expect that no more calls are to be made, even though cluster3
# hasn't even started and cluster2a isn't yet terminated.
self.emr_client.describe_cluster.side_effect = self._describe
self.emr_client.run_job_flow.side_effect = self._create
self._verify_job_flow_execution(failure=True)
def test_execute_stops_on_cluster_creation_failure(self):
self.clusters = ["cluster1"]
# Note that self.states is empty since there's nothing to poke.
self.emr_client.run_job_flow.side_effect = self._fail_to_create
self._verify_job_flow_execution(failure=True)
def _verify_job_flow_execution(self, failure=False):
# Mock out the emr_client creator
emr_session_mock = MagicMock()
emr_session_mock.client.return_value = self.emr_client
self.boto3_session = MagicMock(return_value=emr_session_mock)
with patch('boto3.session.Session', self.boto3_session):
try:
if failure:
with self.assertRaises(AirflowException):
self._execute_and_verify_expectations()
else:
self._execute_and_verify_expectations()
except Exception as e:
raise e
def _execute_and_verify_expectations(self):
created = len(self.clusters)
poked = sum([len(cs) for cs in self.states.values()])
self.emr_run_job_flows.execute(None)
self.assertEqual(self.emr_client.run_job_flow.call_count, created)
self.assertEqual(self.emr_client.describe_cluster.call_count, poked)
# Convenience methods for describing clusters
def _running_cluster(self, name, state="RUNNING"):
return {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': name,
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': state,
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(
2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
def _terminated_cluster(self, name):
return {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': name,
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED',
'StateChangeReason': {},
'Timeline': {
'CreationDateTime': datetime.datetime(
2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
def _failed_cluster(self, name):
return {
'Cluster': {
'Applications': [
{'Name': 'Spark', 'Version': '1.6.1'}
],
'AutoTerminate': True,
'Configurations': [],
'Ec2InstanceAttributes': {'IamInstanceProfile': 'EMR_EC2_DefaultRole'},
'Id': name,
'LogUri': 's3n://some-location/',
'Name': 'PiCalc',
'NormalizedInstanceHours': 0,
'ReleaseLabel': 'emr-4.6.0',
'ServiceRole': 'EMR_DefaultRole',
'Status': {
'State': 'TERMINATED_WITH_ERRORS',
'StateChangeReason': {
'Code': 'BOOTSTRAP_FAILURE',
'Message': 'Master instance (i-0663047709b12345c) failed attempting to '
'download bootstrap action 1 file from S3'
},
'Timeline': {
'CreationDateTime': datetime.datetime(
2016, 6, 27, 21, 5, 2, 348000, tzinfo=tzlocal())}
},
'Tags': [
{'Key': 'app', 'Value': 'analytics'},
{'Key': 'environment', 'Value': 'development'}
],
'TerminationProtected': False,
'VisibleToAllUsers': True
},
'ResponseMetadata': {
'HTTPStatusCode': 200,
'RequestId': 'd5456308-3caa-11e6-9d46-951401f04e0e'
}
}
def _describe(self, *args, **kwargs):
name = kwargs['ClusterId']
state = self.states[name].pop(0)
return {
'TERMINATED': self._terminated_cluster(name),
'TERMINATED_WITH_ERRORS': self._failed_cluster(name),
}.get(state, self._running_cluster(name, state))
def _fail_to_create(self, *args, **kwargs):
return {
'ResponseMetadata': {
'HTTPStatusCode': 400
}
}
def _create(self, *args, **kwargs):
return {
'ResponseMetadata': {
'HTTPStatusCode': 200
},
'JobFlowId': 'j-' + kwargs['Name']
}
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python2
__author__ = "Jordan Prince Tremblay, Ismael Balafrej, Felix Labelle, Felix Martel-Denis, Eric Matte, Adam Letourneau, Julien Chouinard-Beaupre, Antoine Mercier-Nicol"
__copyright__ = "Copyright 2018, DEVINE Project"
__credits__ = ["Simon Brodeur", "Francois Ferland", "Jean Rouat"]
__license__ = "BSD"
__version__ = "1.0.0"
__email__ = "devine.gegi-request@listes.usherbrooke.ca"
__status__ = "Production"
import json
import unittest
import rospy
from devine_common import ros_utils
import devine_tests.segmentation.segmentation_helper as helper
class TestSegmentationRate(unittest.TestCase):
""" Test to validate segmentation rates """
def test_segmentation_rate_on_two_frames(self):
""" Loads images and posts the corresponding segmentation rates """
test_images = helper.load_test_images(__file__, ros_utils.get_fullpath(__file__, "test.json"))
for image in test_images:
self.assertFalse(rospy.is_shutdown())
rospy.loginfo("### Test image %s ###", image[helper.FILENAME])
self.segmentation_should_find_most_of_the_objects(image)
def segmentation_should_find_most_of_the_objects(self, test_image):
""" Evaluates segmentation rate for a single image """
expected_objects = test_image[helper.EXPECTED_OBJECTS]
data = helper.segment_image(test_image)
rospy.logwarn("Message timestamp: %s", json.loads(data.data)['timestamp'])
objects_found = helper.get_segmented_objets(data)
missed_objects = helper.get_missed_objects(expected_objects, objects_found)
other_found_objects = helper.get_missed_objects(objects_found, expected_objects)
rospy.loginfo("Number of missed detections: %d", len(missed_objects))
rospy.loginfo("Number of other detected objects: %d", len(other_found_objects))
objects_missed_ratio = float(len(missed_objects)) / len(expected_objects)
rospy.loginfo("Percentage of objects missed: %.02f", objects_missed_ratio)
if objects_missed_ratio >= 0.5:
rospy.logwarn("The ratio of objects missed if greater than 50%.")
self.assertTrue(objects_missed_ratio < 0.7, "The object missed ratio is too high.")
if __name__ == "__main__":
rospy.init_node("test")
unittest.main()
|
#!/usr/bin/env python3
import os
import sys
from subprocess import check_call, check_output, Popen
from glob import glob
EMAIL_ADDRESS = 'dummy@liamg.com'
# if this is true, then the sbatch files won't actually be submitted
dryrun = False
# if this is true, then existing output files will be overwritten
# this has no effect on consistent_trees -- it'll be run regardless
overwrite = False
def find_run_name(path):
possibilities = path.split('/')
for pos in possibilities:
if 'elvis' in pos:
return pos.strip('m12_elvis_')
if 'm09' in pos or 'm10' in pos or 'm11' in pos or 'm12' in pos:
return pos
return path
# options for the jobs that we'll actually submit
run_name = find_run_name(os.getcwd())
# number of jobs to run at once and number per node for AHF halo finding:
ahf_nnodes = 10
ahf_ntasks_per_node = 4
ahf_ompthreads = 8
ahf_jobname = 'ahf-'+run_name
ahf_ntasks = ahf_nnodes * ahf_ntasks_per_node
# number of jobs to run at once and number for node for building the AHF trees:
mtree_ntasks = 96
mtree_ntasks_per_node = 24
mtree_jobname = 'mtree-'+run_name
ctrees_jobname = 'ctree-'+run_name
# sbatch options that are in common across all setps
sbatch_options = {
'mail-user': EMAIL_ADDRESS, # will do begin, end, and fail
'partition': 'cca',
'time': '168:00:00',
'exclusive': '',
}
# file names and paths:
simulation_directory = '../..'
snapshot_inbase = simulation_directory+'/output/snapdir_'
home = os.path.expanduser('~')+'/code/halo'
# input parameter files:
# file that defines all the AHF inputs (other than fname, outname, file type)
ahf_param_base = home+'/AHF.input'
ctree_param_base = home+'/ctree-input.cfg'
ahf_jobfile = 'ahf_commands.sh'
ahf_sbatchfile = 'run_ahf.slurm'
mtree_jobfile = 'mtree_commands.sh'
mtree_sbatchfile = 'run_mtree.slurm'
ctree_sbatchfile = 'run_ctrees.slurm'
# output file paths:
# overall output directory
catalog_directory = './catalog'
# directory for MergerTree outputs
mtree_directory = catalog_directory + '/ahf-trees'
# directories for consistent_trees outputs
hlist_directory = catalog_directory + '/hlists'
tree_directory = catalog_directory + '/trees'
tree_outputs_directory = catalog_directory + '/outputs'
# directory for disBatch log files:
disbatch_log_directory = 'disbatch-logs'
# paths that hopefully shouldn't need to be changed:
python = 'python3'
perl = 'perl'
# path to execuatables
ahf_exec = home + '/ahf-v1.0-094/bin/AHF-v1.0-094'
mtree_exec = home + '/ahf-v1.0-094/bin/MergerTree'
ctree_directory = home + '/consistent-trees'
# or remove the np for periodic boundaries
tree_script = "do_merger_tree_np.pl"
hlist_script = "halo_trees_to_catalog.pl"
# path to scripts to convert hdf5->gadget and AHF_halos + mtree_idx->out_xxx.list
temp_gbin_dir = 'temp-gbins'
hdf5_to_gbin = home + '/convert_ptype_to_gbin.py'
this_script = os.path.realpath(__file__)
# directories for AHF
ahf_halo_directory = catalog_directory + '/halos'
ahf_input_directory = catalog_directory + '/inputs'
ahf_log_directory = catalog_directory + '/logs'
ahf_parameter_directory = catalog_directory + '/paramters'
ahf_particle_directory = catalog_directory + '/particles'
ahf_profile_directory = catalog_directory + '/profiles'
ahf_substructure_directory = catalog_directory + '/substructure'
ahf_std_directory = catalog_directory + '/std-logs'
#####################
# define some useful functions
def checkdir(d):
if not os.path.isdir(d):
os.mkdir(d)
def write_slurm(f, options, job_name, ntasks, ntasks_per_node,
disbatch_command_file=None, disbatch_log_base=None,
pre_disbatch_commands=[], post_disbatch_commands=[]):
def write_sbatch_line(key, val=''):
string = '#SBATCH --'+key
if len(val):
string += '='+val
f.write(string+'\n')
f.write('#!/usr/bin/env bash\n')
f.write('\n')
write_sbatch_line('job-name', job_name)
write_sbatch_line('ntasks', str(ntasks))
write_sbatch_line('ntasks-per-node', str(ntasks_per_node))
for key, val in options.items():
write_sbatch_line(key, val)
if 'mail-user' in options:
write_sbatch_line('mail-type', 'begin')
write_sbatch_line('mail-type', 'end')
write_sbatch_line('mail-type', 'fail')
f.write('\n')
# set the umask -- files created should be readable for group and other
f.write('umask 0022\n\n')
# write any pre disBatch commands (e.g. export OMP_NUM_THREADS)
for cmd in pre_disbatch_commands:
f.write('{}\n'.format(cmd))
# call disBatch:
if disbatch_command_file is not None:
# make sure we've loaded the module we need
f.write('module load disBatch\n')
if disbatch_log_base is not None:
f.write(
'disBatch.py -p {} {}\n\n'.format(disbatch_log_base, disbatch_command_file))
else:
f.write('disBatch.py {}\n\n'.format(
disbatch_log_base, disbatch_command_file))
# write any post disBatch commands (e.g. moving files or calling do_ctrees or )
for cmd in post_disbatch_commands:
f.write('{}\n'.format(cmd))
# couple blank lines at the end for safety
f.write('\n\n')
return
# make sure all our definitions are ok
# don't want to output in current directory
assert catalog_directory != ""
# stuff we need for AHF to work
assert os.path.isfile(ahf_param_base)
assert os.path.isfile(hdf5_to_gbin)
assert os.path.isfile(ahf_exec)
# stuff we need for MergerTree to work
assert os.path.isfile(mtree_exec)
# stuff we need for consistent trees to work
assert os.path.isdir(ctree_directory)
assert os.path.isfile(ctree_directory+'/'+tree_script)
assert os.path.isfile(ctree_directory+'/'+hlist_script)
assert os.path.isfile(ctree_param_base)
all_output_directories = [temp_gbin_dir, catalog_directory,
ahf_halo_directory, ahf_input_directory, ahf_log_directory,
ahf_parameter_directory, ahf_substructure_directory, disbatch_log_directory,
ahf_particle_directory, ahf_profile_directory, ahf_std_directory,
mtree_directory, hlist_directory, tree_directory, tree_outputs_directory]
# make sure the output directories exist
for directory in all_output_directories:
checkdir(directory)
def run_ahf():
'''
run ahf using the paths defined above
creates the .AHF_input files based on an input parameter file
then creates a list of commands for disBatch.py to handle
then creates a SLURM file to run those commands, and submits it
'''
snapshots = glob(snapshot_inbase+"*")
if len(snapshots) == 0:
raise OSError("No snapshots found! Exiting")
snapshots.sort()
# now halo find on all the snapshots using disBatch
# read in the lines in the paramter file
f = open(ahf_param_base, 'r')
paramlines = [line for line in f]
f.close()
print("Setting up halo finding on {} snapshots".format(len(snapshots)))
# loop over the snapshots, create the input files, and create the commands to run
commands = []
for ii in range(len(snapshots)):
snap = snapshots[ii]
snapnum = snap.split('_')[-1]
if '.' in snapnum:
snapnum = snapnum.split('.')[0]
snapnum = int(snapnum)
if os.path.isdir(snap):
conversion_inbase = snap + '/'
doing_dir = True
sname = glob(conversion_inbase +
'/*')[0].split('/')[-1].split('.')[0]
ahf_filetype = '61'
else:
conversion_inbase = snap
doing_dir = False
sname = snap.split('/')[-1].rsplit('.', 1)[0]
ahf_filetype = '60'
# build the AHF.input file -- only ic_filename, ic_filetype, and outfile_prefix get edited
ahf_inbase = temp_gbin_dir + '/' + sname
outfile_exists = len(glob(catalog_directory + '/' + sname + '*AHF_halos')
) or len(glob(ahf_halo_directory + '/' + sname + '*AHF_halos'))
# output file exists and we don't want to overwrite
if outfile_exists and (overwrite == False):
print(
"Already have a halo file for {0}; skipping it.".format(sname))
continue
if doing_dir and not ahf_inbase.endswith('.'):
ahf_inbase += '.'
paramf = ahf_input_directory+'/{0}'.format(sname)
with open(paramf, 'w') as param:
for l in paramlines:
if l.startswith('ic_filename'):
l = 'ic_filename = '+ahf_inbase+'\n'
elif l.startswith('outfile_prefix'):
l = 'outfile_prefix = '+catalog_directory + '/' + sname + '\n'
elif l.startswith('ic_filetype'):
l = 'ic_filetype = '+ahf_filetype+'\n'
param.write(l)
logfile = ahf_std_directory+'/'+sname+'.stdout'
# convert, run, clean up
commands.append(r'{} {} {} {} && {} {} &> {} && rm {}*'.format(
python, hdf5_to_gbin, conversion_inbase, temp_gbin_dir,
ahf_exec, paramf, logfile,
ahf_inbase))
# now dump the commands to a file:
with open(ahf_jobfile, 'w') as jobf:
jobf.write('\n'.join(commands))
# create the sbatch file to do the actual work:
if ahf_ompthreads > 0:
pre_disbatch_commands = ['export OMP_NUM_THREADS='+str(ahf_ompthreads)]
else:
pre_disbatch_commands = []
post_disbatch_commands = [
'mv ' + catalog_directory + '/*.AHF_halos ' + ahf_halo_directory + '/',
'mv ' + catalog_directory + '/*.log ' + ahf_log_directory + '/',
'mv ' + catalog_directory + '/*.parameter ' + ahf_parameter_directory + '/',
'mv ' + catalog_directory + '/*.AHF_profiles ' + ahf_profile_directory + '/',
'mv ' + catalog_directory + '/*.AHF_particles ' + ahf_particle_directory + '/',
'mv ' + catalog_directory + '/*.AHF_substructure ' +
ahf_substructure_directory + '/',
python+' '+this_script+' 2']
with open(ahf_sbatchfile, 'w') as sbatchf:
write_slurm(sbatchf, sbatch_options, ahf_jobname, ahf_ntasks, ahf_ntasks_per_node,
disbatch_command_file=ahf_jobfile, disbatch_log_base=disbatch_log_directory+'/ahf',
pre_disbatch_commands=pre_disbatch_commands,
post_disbatch_commands=post_disbatch_commands)
print("Submitting {} to do the halo finding ({} tasks at once with {} per node)".format(
ahf_jobfile, ahf_ntasks, ahf_ntasks_per_node))
if dryrun is not True:
job_info = check_output(['sbatch', ahf_sbatchfile])
job_id = int(job_info.strip().split()[-1])
print("When job {} finishes, it should resubmit this script ({}) with an argument of 2".format(
job_id, this_script))
else:
print("Actually, you'll have to do the submission of {} yourself...".format(
ahf_jobfile))
return -1
def run_mtree():
'''
run MergerTree using the paths defined above. AHF must have been run first
creates a list of commands for disBatch.py to handle that are MergerTree < 2 "input file ii" "input file ii+1" "output file"
then creates a SLURM file to run those commands, and submits it
'''
# lambda function to get snapshot name from the AHF particle files
def snapname(fname): return fname.split('/')[-1].split('.z')[0]
particle_files = glob(ahf_particle_directory+'/*particles')
particle_files.sort()
if not len(particle_files):
raise OSError("Didn't find any particle files in {}".format(
ahf_particle_directory))
commands = []
for ii in range(len(particle_files)-1):
file1 = particle_files[ii]
file2 = particle_files[ii+1]
outfile = mtree_directory + '/' + \
snapname(file1)+'_to_'+snapname(file2)
if os.path.isfile(outfile+'_mtree_idx') and (overwrite == False):
print("Already have {}, so skipping {} to {}".format(
outfile, snapname(file1), snapname(file2)))
continue
commands.append(mtree_exec + ' <<< "2 ' +
file1+' '+file2+' '+outfile+'"')
with open(mtree_jobfile, 'w') as jobf:
jobf.write('\n'.join(commands))
# no pre commands necessary here; post is to rerun this script with arg 3
post_disbatch_commands = [python+' '+this_script+' 3']
with open(mtree_sbatchfile, 'w') as sbatchf:
write_slurm(sbatchf, sbatch_options, mtree_jobname, mtree_ntasks, mtree_ntasks_per_node,
disbatch_command_file=mtree_jobfile, disbatch_log_base=disbatch_log_directory+'/mtree',
post_disbatch_commands=post_disbatch_commands)
print("Submitting {} to run MergerTree ({} tasks at once with {} per node".format(
mtree_jobfile, mtree_ntasks, mtree_ntasks_per_node))
if dryrun is not True:
job_info = check_output(['sbatch', mtree_sbatchfile])
job_id = int(job_info.strip().split()[-1])
print("When job {} finishes it should resubmit this script {} with an argument of 3".format(
job_id, this_script))
return job_id
else:
print("Actually, you'll have to do the submission of {} yourself...".format(
mtree_sbatchfile))
return -1
def run_ctrees():
'''
creats a job script that will
1. builds the out_xxx.lists (using this file)
2. runs ctrees,
3. runs hlists
'''
import gizmo_analysis
# call this script to make the out.lists (no need to worry about variables cause they're set internally)
commands = ['set -e']
commands.append('export PYTHONPATH=$HOME/stock-wetzel/src')
cmd = [python, this_script, 'out']
commands.append(' '.join(cmd))
# need to load a snapshot for the cosomology + box size (though latter probably irrelevant for non-periodic boxes)
header = gizmo_analysis.gizmo_io.Read.read_header(
simulation_directory=simulation_directory)
Om = header['omega_matter']
Ol = header['omega_lambda']
h0 = header['hubble']
boxwidth = header['box.length/h']
cwd = os.getcwd()+'/'
# build the input file using absolute paths
cfgname = cwd+"ctrees.cfg"
with open(cfgname, 'w') as cfg, open(ctree_param_base, 'r') as incfg:
# Write the relevant file paths
cfg.write('SCALEFILE = '+cwd+catalog_directory+'/DescScale.txt\n')
cfg.write('INBASE = '+cwd+catalog_directory+'\n')
cfg.write('OUTBASE = '+cwd+tree_outputs_directory+'\n')
cfg.write('TREE_OUTBASE = '+cwd+tree_directory+'\n')
cfg.write('HLIST_OUTBASE = '+cwd+hlist_directory+'\n')
cfg.write('\n')
cfg.write('Om={} #Omega_Matter\n'.format(Om))
cfg.write('Ol={} #Omega_Lambda\n'.format(Ol))
cfg.write('h0={} #h0\n'.format(h0))
cfg.write('BOX_WIDTH={} #h0\n'.format(boxwidth))
# write the rest of the lines, making sure to skip the lines I already wrote
for line in incfg:
l = line.strip()
if l.split('=')[0].strip() in ['SCALEFILE', 'INBASE', 'OUTBASE', 'TREE_OUTBASE', 'HLIST_OUTBASE', 'Om', 'Ol', 'h0']:
continue
cfg.write(line)
# now cd to the ctrees directory and call the do_merger_tree.pl:
# call is: perl do_merger_tree_np.pl <consistent-trees directory> <consistent-trees config file>
commands.append('cd '+ctree_directory)
commands.append(' '.join([perl, tree_script, ctree_directory, cfgname]))
# now make the hlists:
# call is: perl halo_trees_to_catalog.pl <consistent-trees config file>
commands.append(' '.join([perl, hlist_script, cfgname]))
with open(ctree_sbatchfile, 'w') as sbatchf:
write_slurm(sbatchf, sbatch_options, ctrees_jobname, 1, 1,
pre_disbatch_commands=commands)
print("Submitting {} to build the out_xxx.list files, make the tree_0.0.0.dat file, and create the hlist files".format(
ctree_sbatchfile))
if dryrun is not True:
job_info = check_output(['sbatch', ctree_sbatchfile])
job_id = int(job_info.strip().split()[-1])
print("When job {} finishes, you should be able to create the HDF5 files with submit_rockstar_hdf5.py".format(job_id))
return job_id
else:
print("Actually, you'll have to do the submission yourself...")
return -1
def build_out_lists():
import gizmo_analysis
halofiles = glob(ahf_halo_directory+'/*_halos')
halofiles.sort()
idxfiles = glob(mtree_directory+'/*_mtree_idx')
idxfiles.sort()
assert len(halofiles) == len(idxfiles)+1
# load a snapshot to get information for the header: need Om, Ol, h, particle mass, box size
part = gizmo_analysis.gizmo_io.Read.read_snapshots(
species='dark', simulation_directory=simulation_directory, properties='mass')
boxlength_o_h = part.info['box.length/h']/1e3
Om = part.Cosmology['omega_matter']
Ol = part.Cosmology['omega_lambda']
h = part.Cosmology['hubble']
particle_mass = np.median(part['dark']['mass']) * h
del part
def load_scalefactor(index):
return gizmo_analysis.gizmo_io.Read.read_header(
snapshot_value=index, simulation_directory=simulation_directory)['scalefactor']
def make_header(colheads, scale):
header = ''
header += colheads + '\n'
header += 'a = {:7.6f}\n'.format(scale)
header += 'Om = {:7.6f}; Ol = {:7.6f}; h = {:7.6f}\n'.format(Om, Ol, h)
header += 'Particle mass: {:.5e} Msun/h\n'.format(particle_mass)
header += 'Box size: {:.6f} Mpc/h\n'.format(boxlength_o_h)
header += 'file created from AHF halo catalogs'
return header
scalelist = []
fnums = []
# want: #ID DescID Mass Vmax Vrms Radius Rs Np X Y Z VX VY VZ JX JY JZ Spin
# add on: Rmax, r2, sigV, cNFW
# if baryons, also add on Mstar, Mgas
# don't have: Jx, Jy, Jz, but use Lx, Ly, Lz; no Vrms, but can be zeros
with open(halofiles[-1], 'r') as f:
if 'gas' in f.readline():
baryons = True
print(
"Looks like you included baryons in the halo finding, so I'll carry Mgas etc. through")
else:
baryons = False
print("Don't see evidence of baryons in the halo finding")
# output columns (copied from Andrew's rockstar_io):
column_heads = "ID DescID M200b(Msun/h) Vmax Vrms R200b Rs Np "
column_heads += "X Y Z Vx Vy Vz Lx Ly Lz "
column_heads += "Spin rs_klypin M200b_tot Mvir M200c M500c M100m Xoff Voff "
column_heads += "Spin_Bullock b_to_a c_to_a A[x] A[y] A[z] "
column_heads += "b_to_a(500c) c_to_a(500c) A[x](500c) A[y](500c) A[z](500c) "
column_heads += "T/|U| M_pe_Behroozi M_pe_diemer Type "
column_heads += "SM Gas BH_mass m200b_highres m200b_lowres"
# don't have:
# M200b_tot (set = M200b), Mvir, M200c, M500c, M100m, A[x], A[y], A[z],
# anything at 500c, M_pe_Behroozi, M_pe_diemer, Voff
# Type, but that all seems to be 0 as far as I can tell
# calculate:
# rs_klypin from Rmax
# T/U from T and U
# m200b_highres and m200b_lowres from m200b and fMhires
iddt = int
for ii in range(len(halofiles)):
fname = halofiles[ii].split('/')[-1]
file_rshift = float(fname.split('z')[-1][:-len('.AHF_halos')])
file_scale = 1./(1+file_rshift)
snum = int(fname.split('_')[1].split('.z')[0])
outname = catalog_directory+'/out_{0:03}.list'.format(snum)
with open(halofiles[ii], 'r') as f:
lc = 0
for line in f:
lc += 1
if lc > 2:
break
if lc < 2 or (lc == 2 and line == ''):
print("! no halos in {0}!".format(halofiles[ii]))
continue
scale = load_scalefactor(snum)
rshift = (1./scale) - 1
assert (np.abs(rshift - file_rshift)/(0.5*(rshift+file_rshift)) < 0.01) \
or (np.abs(scale - file_scale)/(0.5*(scale+file_scale)) < 0.01)
# only save non-blank files to the DescScales file, so I don't try to run ctrees on them -- i.e. put this after the lc check
# but save files that I've already done, so put this before the overwrite/existence check
fnums.append(snum)
# use the higher precision scale factor from the snapshot file
scalelist.append(scale)
if path.isfile(outname) and (not overwrite):
print("Already have {}; skipping it.".format(outname))
continue
# input columns, annoyingly starting at 1
# ID(1) hostHalo(2) numSubStruct(3) Mvir(4) npart(5)
# Xc(6) Yc(7) Zc(8) VXc(9) VYc(10) VZc(11) Rvir(12)
# Rmax(13) r2(14) mbp_offset(15) com_offset(16)
# Vmax(17) v_esc(18) sigV(19) lambda(20)
# lambdaE(21) Lx(22) Ly(23) Lz(24) b(25) c(26)
# Eax(27) Eay(28) Eaz(29) Ebx(30) Eby(31) Ebz(32) Ecx(33)
# Ecy(34) Ecz(35) ovdens(36) nbins(37) fMhires(38)
# Ekin(39) Epot(40) SurfP(41) Phi0(42) cNFW(43)
# n_gas(44) M_gas(45) lambda_gas(46) lambdaE_gas(47)
# Lx_gas(48) Ly_gas(49) Lz_gas(50) b_gas(51)
# c_gas(52) Eax_gas(53) Eay_gas(54) Eaz_gas(55)
# Ebx_gas(56) Eby_gas(57) Ebz_gas(58) Ecx_gas(59)
# Ecy_gas(60) Ecz_gas(61) Ekin_gas(62) Epot_gas(63)
# n_star(64) M_star(65) lambda_star(66) lambdaE_star(67)
# Lx_star(68) Ly_star(69) Lz_star(70) b_star(71)
# c_star(72) Eax_star(73) Eay_star(74) Eaz_star(75)
# Ebx_star(76) Eby_star(77) Ebz_star(78) Ecx_star(79)
# Ecy_star(80) Ecz_star(81) Ekin_star(82) Epot_star(83)
hal_prog_ids = np.loadtxt(
halofiles[ii], unpack=True, usecols=0, dtype=iddt)
if baryons:
mass, vmax, veldisp, radius, rs, numpart, \
x, y, z, vx, vy, vz, lx, ly, lz, \
spin_peebles, rmax, xoff, \
Spin_Bullock, b_to_a, c_to_a, \
kinetic, potential, mstar, mgas, \
fMhires = np.loadtxt(halofiles[ii], unpack=True,
usecols=[3, 16, 18, 11, 13, 4,
5, 6, 7, 8, 9, 10, 21, 22, 23,
20, 12, 15,
19, 24, 25,
38, 39, 64, 44,
37])
else:
mass, vmax, veldisp, radius, rs, numpart, \
x, y, z, vx, vy, vz, lx, ly, lz, \
spin_peebles, rmax, xoff, \
Spin_Bullock, b_to_a, c_to_a, \
kinetic, potential, \
fMhires = np.loadtxt(halofiles[ii], unpack=True,
usecols=[3, 16, 18, 11, 13, 4,
5, 6, 7, 8, 9, 10, 21, 22, 23,
20, 12, 15,
19, 24, 25,
38, 39,
37])
mstar = np.zeros(mass.size, dtype=int)
mgas = np.zeros(mass.size, dtype=int)
numpart = numpart.astype('int')
x /= 1e3
y /= 1e3
z /= 1e3
rs_klypin = rmax/2.1626
T_over_U = kinetic / np.abs(potential)
mhighres = fMhires * mass
mlowres = (1.0 - fMhires) * mass
# lots of columns I don't have in AHF unfortunately
fill_array = np.empty(hal_prog_ids.size, dtype=int)
fill_array.fill(-1)
zero_array = np.zeros(hal_prog_ids.size, dtype=int)
hal_desc_ids = np.empty_like(hal_prog_ids)
hal_desc_ids.fill(-1)
# leave as -1 in the last timestep; no descendants then
if halofiles[ii] != halofiles[-1]:
tree_prog_ids, tree_desc_ids = np.loadtxt(
idxfiles[ii], unpack=True, dtype=iddt)
# indices of the halos that have descendants in the tree -- every halo in the tree is in the catalogs, but not vice versa:
hal_indices = np.where(np.isin(hal_prog_ids, tree_prog_ids))[0]
# check that everything matches up:
assert (hal_prog_ids[hal_indices] == tree_prog_ids).all()
# now fill in the descendants (where I have them) using the trees
hal_desc_ids[hal_indices] = tree_desc_ids
header = make_header(column_heads, scale)
output_data = np.column_stack((
hal_prog_ids, hal_desc_ids, mass, vmax, veldisp, radius, rs, numpart,
x, y, z, vx, vy, vz, lx, ly, lz,
# fills are the masses I don't have and voff
spin_peebles, rs_klypin, mass, fill_array, fill_array, fill_array, fill_array, xoff, fill_array,
# fills are the allgood shape vectors
Spin_Bullock, b_to_a, c_to_a, fill_array, fill_array, fill_array,
# fills are shape etc at 500c
fill_array, fill_array, fill_array, fill_array, fill_array,
# fills are smoothed masses, zeros are type
T_over_U, fill_array, fill_array, zero_array,
mstar, mgas, zero_array, mhighres, mlowres)) # zeros are BH_mass
# cast everything to a string before saving it
np.savetxt(outname, output_data, header=header, comments='#', fmt='%s')
print("Wrote {0}".format(outname))
# and finally save the scale factors of the halo catalogs
np.savetxt(outdir+'DescScale.txt',
np.column_stack((fnums, scalelist)), fmt="%d %5f")
# steps are:
if __name__ == '__main__':
'''
this is a python script to automate the process of
running the amiga halo finder on a GIZMO/FIRE simulation,
then running the MergerTree utility in Amiga Halo Finder
(which creates links between timesteps based on the
particle IDs in the halos), then massage those outputs
into a format that Consistent Trees can handle, then
call consistent trees on those massaged outputs.
everything except ctrees is automatically parallelized,
since each timestep (or in the case of ctrees, each
timestep1 -> timestep2 link) is independent of
everything else. set the options at the top of the
script to handle how much parallelization you want,
and to point to the appropriate files.
you'll need:
* AHF, of course, compiled with whatever options you want
* MergerTree
* consistent-trees
* diBatch, which handles the spreading out of the jobs
* ahf_param_base -- a AHF.input file with the parameters you
want to use set. input name, output name, and file
types will be set for each snapshot, but all other
parameters will be kep the same throughout.
* ctree_param_base -- a cfg file for consistent trees with
the options that you want to use. as with ahf_param_base,
filenames will be set automatically; cosmology and box
size will be as well. all other options will be copied
over though.
* hdf5_to_gbin -- a script that converts the GIZMO HDF5 snapshots
into gadget binary snapshots. I believe this step can be
removed, as the latest AHF versions have raw support for
HDF5 files. However, take a look in the base repository
for an old and ugly script that can do this conversion
for a single particle type (e.g. if you want to run AHF
on just dark matter, which is a pretty common use case)
Once you have all your files in place and all your arguments set
appropriately at the top of this script, run it with
$ python run_ahf_analysis.py 1
That'll create the AHF.input files and make a list of commands needed
to do the halo finding on all of the snapshots. It'll then create and
call an sbatch file that uses disBatch to run that list of commands in
paralell. Once disBatch is done (i.e. all the halo finding has been run),
this script will call itself again with an argument of 2, i.e. the
equivalent of doing
$ python run_ahf_analysis.py 2
That'll do the same (create a list of commands and an sbatch file to run them
via disBatch), but for the MergerTree step. Again, once it'd done, it'll
call this script again with an argument of 3:
$ python run_ahf_analysis.py 3
That step will create (then submit) an sbatch file that builds the out.lists
then runs consistent trees, then you should be done!
'''
if len(sys.argv) != 2:
raise OSError(
"Must call with a step of 1, 2, or 3 (or ahf, mtree, ctrees) to run the appropriate step")
step_todo = sys.argv[1]
# step 1 / ahf: run AHF
if step_todo == '1' or step_todo == 'ahf':
run_ahf()
# step 2 / mtree: run MergerTree:
elif step_todo == '2' or step_todo == 'mtree':
run_mtree()
# step 3 / ctrees: run consistent_trees
elif step_todo == '3' or step_todo == 'ctrees':
run_ctrees()
# step 3.5 / out: build the out.lists (should be run on a compute node)
elif step_todo == '3.5' or step_todo == 'out':
build_out_lists()
else:
raise OSError("Don't know how to do step {}".format(step_todo))
|
from copy import deepcopy
from unittest import TestCase
from unittest.mock import patch, MagicMock
from libcloud.compute.types import Provider
from libcloud.compute.drivers.ec2 import EC2NodeDriver
from host_provider.providers.aws import AWSProvider, OfferingNotFoundError
from host_provider.credentials.aws import CredentialAddAWS
from .fakes.ec2 import LIST_SIZES, FAKE_CREDENTIAL, FAKE_HOST
from .fakes.base import FAKE_TAGS
ENVIRONMENT = "dev"
ENGINE = "redis"
class TestCredentialAWS(TestCase):
def setUp(self):
self.provider = AWSProvider(ENVIRONMENT, ENGINE)
self.provider.wait_state = MagicMock()
def test_provider_name(self):
self.assertEqual(Provider.EC2, self.provider.get_provider())
def test_get_credential_add(self):
self.assertEqual(
self.provider.get_credential_add(), CredentialAddAWS
)
def test_validate_credential(self):
invalid_content = deepcopy(FAKE_CREDENTIAL)
invalid_content.update({'mimOfSubnets': "3"})
success, error = self.provider.credential_add(invalid_content)
self.assertFalse(success)
self.assertEqual(error, "Must be 3 active subnets at least")
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
def test_build_client(self, content):
self.build_credential_content(content)
self.assertEqual(
type(self.provider.build_client()), EC2NodeDriver
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.list_sizes',
new=MagicMock(return_value=LIST_SIZES)
)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.create_node'
)
@patch(
'host_provider.providers.aws.CredentialAWS.zone'
)
@patch(
'host_provider.credentials.aws.CredentialAWS.collection_last'
)
@patch('dbaas_base_provider.team.TeamClient.make_labels',
new=MagicMock(return_value={})
)
def test_create_host_without_environment_of_teams(
self, collection_last, zone, create_node, credential_content
):
self.create_host_tests(
collection_last, create_node, credential_content, zone
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.list_sizes',
new=MagicMock(return_value=LIST_SIZES)
)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.create_node'
)
@patch(
'host_provider.providers.aws.CredentialAWS.zone'
)
@patch(
'host_provider.credentials.aws.CredentialAWS.collection_last'
)
@patch('dbaas_base_provider.team.TeamClient.make_labels',
new=MagicMock(return_value={}))
def test_create_host_without_teams(self, collection_last, zone,
create_node, credential_content):
self.create_host_tests(
collection_last, create_node, credential_content, zone
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.list_sizes',
new=MagicMock(return_value=LIST_SIZES)
)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.create_node'
)
@patch(
'host_provider.providers.aws.CredentialAWS.zone'
)
@patch(
'host_provider.credentials.aws.CredentialAWS.collection_last'
)
@patch('dbaas_base_provider.team.TeamClient.make_labels',
new=MagicMock(return_value=FAKE_TAGS)
)
def test_create_host_with_teams(self, collection_last, zone, create_node,
credential_content):
self.create_host_tests(
collection_last,
create_node,
credential_content,
zone,
has_tags=True
)
def build_credential_content(self, content, **kwargs):
values = deepcopy(FAKE_CREDENTIAL)
values.update(kwargs)
content.return_value = values
@patch(
'host_provider.providers.aws.CredentialAWS.get_content',
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.list_sizes',
return_value=LIST_SIZES
)
def test_offering(self, sizes_mock, content):
self.build_credential_content(content)
result = self.provider.offering_to(cpu=1, memory=512)
self.assertEqual(1, result.id)
self.assertEqual(1, result.extra['cpu'])
self.assertEqual(512, result.ram)
result = self.provider.offering_to(cpu=2, memory=1024)
self.assertEqual(3, result.id)
self.assertEqual(2, result.extra['cpu'])
self.assertEqual(1024, result.ram)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content',
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.list_sizes',
return_value=LIST_SIZES
)
def test_offering_not_found(self, sizes_mock, content):
self.build_credential_content(content)
with self.assertRaises(OfferingNotFoundError):
self.provider.offering_to(cpu=99, memory=999)
def create_host_tests(
self, collection_last, create_node, content, zone, **kwargs
):
collection_last.find_one.return_value = []
self.build_credential_content(content, **kwargs)
zone = "fake_subnet_id_2"
name = "infra-01-123456"
group = "infra123456"
self.provider.create_host(1, 1024, name, group, zone)
project = content.return_value.get("projectid", None)
if project:
project = self.provider.BasicInfo(id=project)
expected_tags = FAKE_TAGS if kwargs.get('has_tags') else {}
create_node.assert_called_once_with(
name=name,
image=self.provider.BasicInfo('fake_so_image_id'),
ex_keyname='elesbom',
size=LIST_SIZES[1],
ex_security_group_ids=['fake_security_group_id'],
ex_subnet=self.provider.BasicInfo('fake_subnet_id_2'),
ex_metadata=expected_tags
)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver',
)
def test_start(self, node_driver, content):
self.build_credential_content(content)
self.provider.start(FAKE_HOST)
node_driver().ex_start_node.assert_called_once_with(
self.provider.BasicInfo('fake_identifier')
)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.ex_stop_node',
)
def test_stop(self, ex_stop, content):
self.build_credential_content(content)
identifier = "fake-uuid-ec2-stac"
self.provider.stop(identifier)
ex_stop.assert_called_once_with(self.provider.BasicInfo(identifier))
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'libcloud.compute.drivers.ec2.EC2NodeDriver.destroy_node',
)
def test_destroy_aws(self, destroy_node, content):
self.build_credential_content(content)
identifier = "fake-uuid-cloud-stac"
self.provider._destroy(identifier)
destroy_node.assert_called_once_with(
self.provider.BasicInfo(identifier)
)
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'host_provider.providers.aws.CredentialAWS.collection_last'
)
def test_all_nodes_deleted(self, collection_last, content):
self.build_credential_content(content)
group = "fake123456"
self.provider._all_node_destroyed(group)
collection_last.delete_one.assert_called_once_with({
"environment": self.provider.credential.environment, "group": group
})
@patch(
'host_provider.providers.aws.CredentialAWS.get_content'
)
@patch(
'host_provider.providers.aws.AWSProvider._destroy'
)
@patch(
'host_provider.providers.aws.AWSProvider._all_node_destroyed'
)
@patch(
'host_provider.providers.base.Host'
)
def test_destroy(self, host, all_node_destroyed, destroy, content):
self.build_credential_content(content)
host.filter.return_value = [1]
group = "fake123456"
identifier = "fake-uuid-cloud-stac"
self.provider.destroy(group, identifier)
host.filter.assert_called_once_with(group=group)
destroy.assert_called_once_with(identifier)
all_node_destroyed.assert_called_once_with(group)
|
"""file_renamer: module for renaming files"""
from pathlib import Path
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING: # pragma: no cover
from file_renamer.src.main import MainPage
class FileRenamer:
"""A class for renaming files"""
def __init__(self, kivy_gui: "MainPage") -> None:
"""Initialize class"""
super().__init__()
# Attributes pulled/massaged from kivy GUI
self.output_row = kivy_gui.row_5
self.set_progressbar_max = kivy_gui.set_progressbar_max
self.update_progress_bar_val = kivy_gui.update_progress_bar_val
self.message = kivy_gui.message
self.prepend = (
kivy_gui.prepend_filename_input.text.strip()
.replace(" ", "_")
.replace(".", "")
)
self.extensions = self.get_extensions(kivy_gui.extensions_input.text.lower())
self.folder_loc_msg = kivy_gui.folder_loc_msg
self.folder_loc = self.get_path(kivy_gui.folder_loc_label.text)
# Attributes calculated by `rename_files()`
self.relevant_files: list[Path] = []
self.total_relevant_files = 0
self.padding = 0
@staticmethod
def get_extensions(extensions_string: Optional[str]) -> list[str]:
"""Get extensions in list from string"""
if not extensions_string:
return []
extensions = extensions_string.strip().split(",")
return [f".{ext.strip(' .')}" for ext in extensions]
def get_path(self, dir_path: str) -> Optional[Path]:
"""Get path to file"""
if dir_path == self.folder_loc_msg:
return None
return Path(dir_path)
def check_inputs(self) -> bool:
"""Check inputs are input properly, Return True if errors found"""
errors = []
if not self.prepend:
errors.append("Add filename prepend!")
if not self.extensions:
errors.append("Add affected extensions!")
if not self.folder_loc:
errors.append("Select a folder!")
if not errors:
return False
error_msg = " ".join(errors)
self.message(error_msg)
return True
def list_relevant_files(self) -> int:
"""Create list of all paths with relevant file extensions in folder"""
assert isinstance(self.folder_loc, Path)
for path in self.folder_loc.iterdir():
if any(str(path).lower().endswith(ext) for ext in self.extensions):
self.relevant_files.append(path)
self.relevant_files.sort()
self.total_relevant_files = len(self.relevant_files)
self.set_progressbar_max(self.total_relevant_files)
return self.total_relevant_files
def get_padding(self) -> None:
"""Get padding size from total_relevant_files count"""
remaining_files = float(self.total_relevant_files)
while remaining_files >= 1:
self.padding += 1
remaining_files /= 10
def rename_files(self) -> None:
"""Main function for renaming files"""
if self.check_inputs():
return
if not self.list_relevant_files():
self.message("No files with provided file extension(s) found in folder!")
return
self.get_padding()
for file_num, path in enumerate(self.relevant_files, start=1):
extension = path.suffix
directory = path.parent
file_num_str = str(file_num).zfill(self.padding)
new_name = f"{self.prepend}_{file_num_str}{extension}"
new_filepath = Path(directory, new_name)
path.rename(new_filepath)
self.update_progress_bar_val(file_num)
self.message(f"Done! Renamed {self.total_relevant_files:,} files!")
|
"""Automate the Boring Stuff didn't finish the tic-tac-toe program in its
dictionaries section. Someone had to answer the call...
...It shouldn't have been me."""
import sys, time
# we'll just leave this here
ttt_board = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',
'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',
'low-L': ' ', 'low-M': ' ', 'low-R': ' '}
def print_board(ttt_board):
"""
Prints the tic-tac-toe board to the player(s). Updates after player(s)
input their move.
:param ttt_board: A dictionary where keys are spaces and values are ' '
:return: string_board, the tic-tac-toe board
"""
string_board = \
f"""
{ttt_board['top-L']}|{ttt_board['top-M']}|{ttt_board['top-R']}
-+-+-
{ttt_board['mid-L']}|{ttt_board['mid-M']}|{ttt_board['mid-R']}
-+-+-
{ttt_board['low-L']}|{ttt_board['low-M']}|{ttt_board['low-R']}
"""
return string_board
def check_win(print_board, ttt_board):
"""
Refactor this horrendous function. Also, checks to see if either player
X or player O has won (because Automate the Boring Stuff stopped at this
point in coding the game).
:param print_board: Displays board, X, and O positions to player(s)
:param ttt_board: Argument passed into print_board w/ position keys/values
:return: None, sys.exit() if is_winner == True
"""
is_winner = False
player = ''
def winner(player):
winner_print = [
"* * * * * * * * * * *",
"* * * * * * * * * * *",
f" Player {player} wins!",
"* * * * * * * * * * *",
"* * * * * * * * * * *"
]
for line in winner_print:
time.sleep(.5)
print(line)
sys.exit()
# check X
if ttt_board['top-L'] == 'X' and ttt_board['top-M'] == 'X' and ttt_board[
'top-R'] == 'X':
player = 'X'
is_winner = True
if ttt_board['mid-L'] == 'X' and ttt_board['mid-M'] == 'X' and ttt_board[
'mid-R'] == 'X':
player = 'X'
is_winner = True
if ttt_board['low-L'] == 'X' and ttt_board['low-M'] == 'X' and ttt_board[
'low-R'] == 'X':
player = 'X'
is_winner = True
if ttt_board['top-L'] == 'X' and ttt_board['mid-L'] == 'X' and ttt_board[
'low-L'] == 'X':
player = 'X'
is_winner = True
if ttt_board['top-M'] == 'X' and ttt_board['mid-M'] == 'X' and ttt_board[
'low-M'] == 'X':
player = 'X'
is_winner = True
if ttt_board['top-R'] == 'X' and ttt_board['mid-R'] == 'X' and ttt_board[
'low-R'] == 'X':
player = 'X'
is_winner = True
if ttt_board['top-L'] == 'X' and ttt_board['mid-M'] == 'X' and ttt_board[
'low-R'] == 'X':
player = 'X'
is_winner = True
if ttt_board['top-R'] == 'X' and ttt_board['mid-M'] == 'X' and ttt_board[
'low-L'] == 'X':
player = 'X'
is_winner = True
# check O
if ttt_board['top-L'] == 'O' and ttt_board['top-M'] == 'O' and ttt_board[
'top-R'] == 'O':
player = 'O'
is_winner = True
if ttt_board['mid-L'] == 'O' and ttt_board['mid-M'] == 'O' and ttt_board[
'mid-R'] == 'O':
player = 'O'
is_winner = True
if ttt_board['low-L'] == 'O' and ttt_board['low-M'] == 'O' and ttt_board[
'low-R'] == 'O':
player = 'O'
is_winner = True
if ttt_board['top-L'] == 'O' and ttt_board['mid-L'] == 'O' and ttt_board[
'low-L'] == 'O':
player = 'O'
is_winner = True
if ttt_board['top-M'] == 'O' and ttt_board['mid-M'] == 'O' and ttt_board[
'low-M'] == 'O':
player = 'O'
is_winner = True
if ttt_board['top-R'] == 'O' and ttt_board['mid-R'] == 'O' and ttt_board[
'low-R'] == 'O':
player = 'O'
is_winner = True
if ttt_board['top-L'] == 'O' and ttt_board['mid-M'] == 'O' and ttt_board[
'low-R'] == 'O':
player = 'O'
is_winner = True
if ttt_board['top-R'] == 'O' and ttt_board['mid-M'] == 'O' and ttt_board[
'low-L'] == 'O':
player = 'O'
is_winner = True
if is_winner:
print(print_board(ttt_board))
winner(player)
def main(ttt_board, print_board, check_win):
"""
PleasestopmewhyIamIcreatingthismonster.
Also, this function starts the game.
:param print_board: Displays board, X, and O positions to player(s)
:param ttt_board: Argument passed into print_board w/ position keys/values
:param check_win: Function that checks if either player has won.
:return: None. sys.exit() if winner.
"""
print("""
WELCOME...
...to tacode's terribly written tic-tac-toe game!
___________________________________________________
Board positions:
top-L | top-M | top-R
--------+---------+--------
mid-L | mid-M | mid-R
--------+---------+--------
low-L | low-M | low-R
Example: To move on the middle space, enter mid-M
___________________________________________________
""")
turn = 'X'
position_error = 0
move_number = 0
positions_list = ['top-L', 'top-M', 'top-R',
'mid-L', 'mid-M', 'mid-R',
'low-L', 'low-M', 'low-R']
for i in range(9):
print(print_board(ttt_board))
while True:
while True:
move = input(f"Turn for player {turn}. Move on which space? ")
# pro error handling
if move not in positions_list:
position_error += 1
if position_error == 3:
print("""
Moves not recognized, key in one of the following:
'top-L' | 'top-M' | 'top-R'
--------+---------+--------
'mid-L' | 'mid-M' | 'mid-R'
--------+---------+--------
'low-L' | 'low-M' | 'low-R'
""")
position_error = 0
else:
break
# more pro error handling
if ttt_board[move] not in ['X', 'O']:
position_error = 0
ttt_board[move] = turn
move_number += 1
break
else:
print("That space is taken. Try again.")
# check for winner
if move_number >= 5:
check_win(print_board, ttt_board)
# check for draw
if move_number == 9:
print(print_board(ttt_board))
print('...Wait, wut?')
sys.exit()
if turn == 'X':
turn = 'O'
else:
turn = 'X'
print(print_board(ttt_board))
if __name__ == "__main__":
main(ttt_board, print_board, check_win)
# Congratulations, your programming skills have decreased by reading this
|
# -*- coding: utf-8 -*-
from .wiserapi import WiserBaseAPI, _convert_case
class HotWater(WiserBaseAPI):
def __init__(self, *args, **kwargs):
"""Represnts the /HotWater object in the Restful API"""
# Defining default values
self.id = None # 2
self.override_type = None # "None",
self.schedule_id = None # 1000,
self.mode = None # "Auto",
self.water_heating_state = None # "Off",
self.hot_water_relay_state = None # "Off"
super(HotWater, self).__init__(*args, **kwargs)
|
from koapy.backend.kiwoom_open_api_w.core.KiwoomOpenApiWEventHandler import (
KiwoomOpenApiWEventHandler,
)
from koapy.utils.logging.Logging import Logging
class KiwoomOpenApiWLoggingEventHandler(KiwoomOpenApiWEventHandler, Logging):
def OnReceiveTrData(self, scrnno, rqname, trcode, recordname, prevnext):
self.logger.debug(
"OnReceiveTrData(%r, %r, %r, %r, %r)",
scrnno,
rqname,
trcode,
recordname,
prevnext,
)
def OnReceiveRealData(self, code, realtype, realdata):
self.logger.debug("OnReceiveRealData(%r, %r, %r)", code, realtype, realdata)
def OnReceiveMsg(self, scrnno, rqname, trcode, msg):
self.logger.debug("OnReceiveMsg(%r, %r, %r, %r)", scrnno, rqname, trcode, msg)
def OnReceiveChejanData(self, gubun, itemcnt, fidlist):
self.logger.debug("OnReceiveChejanData(%r, %r, %r)", gubun, itemcnt, fidlist)
def OnEventConnect(self, errcode):
self.logger.debug("OnEventConnect(%r)", errcode)
|
from awsio.python.lib.io.s3.s3dataset import S3Dataset
from torch.utils.data import DataLoader
url_list = ['s3://image-data-bucket/train/n01440764/n01440764_10026.JPEG',
's3://image-data-bucket/train/n01440764/n01440764_10027.JPEG',
's3://image-data-bucket/train/n01440764/n01440764_10029.JPEG']
dataset = S3Dataset(url_list)
dataloader = DataLoader(dataset,
batch_size=2,
num_workers=64)
for i, (image, label) in enumerate(dataloader):
print(type(image), len(image))
|
# Generated by Django 2.0.4 on 2018-04-25 18:34
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("feeds", "0004_auto_20180425_1530")]
operations = [
migrations.RemoveField(model_name="notificationfeed", name="read_at"),
migrations.RemoveField(model_name="notificationfeed", name="seen_at"),
migrations.AlterField(
model_name="notificationfeed",
name="activity_store",
field=django.contrib.postgres.fields.jsonb.JSONField(default=[]),
),
migrations.AlterField(
model_name="personalfeed",
name="activity_store",
field=django.contrib.postgres.fields.jsonb.JSONField(default=[]),
),
migrations.AlterField(
model_name="userfeed",
name="activity_store",
field=django.contrib.postgres.fields.jsonb.JSONField(default=[]),
),
]
|
#!/usr/bin/env python
"""
Numba sampling routines
"""
import numpy as np
import numba
import math
from numba import jit, int8, int16, int32, float32, float64, prange
import lom._numba.lom_outputs as lom_outputs
import lom._numba.lom_outputs_fuzzy as lom_outputs_fuzzy
from numba.types import int64
def get_scalar_output_function_2d(model, fuzzy=False):
if fuzzy is False:
if model == 'OR-AND':
return lom_outputs.OR_AND_product
if model == 'XOR-AND':
return lom_outputs.XOR_AND_product
if model == 'XOR-NAND':
return lom_outputs.XOR_NAND_product
if model == 'OR-XOR':
return lom_outputs.OR_XOR_product
if model == 'NAND-XOR':
return lom_outputs.NAND_XOR_product
if model == 'XOR-XOR':
return lom_outputs.XOR_XOR_product
if model == 'XOR-NXOR':
return lom_outputs.XOR_NXOR_product
if model == 'OR-NAND':
return lom_outputs.OR_NAND_product
if model == 'qL-AND':
return lom_outputs.qL_AND_product
else:
if model == 'OR-AND':
return lom_outputs_fuzzy.OR_AND_product_fuzzy
if model == 'XOR-AND':
return lom_outputs_fuzzy.XOR_AND_product_fuzzy
if model == 'XOR-NAND':
return lom_outputs_fuzzy.XOR_NAND_product_fuzzy
if model == 'OR-XOR':
return lom_outputs_fuzzy.OR_XOR_product_fuzzy
if model == 'NAND-XOR':
return lom_outputs_fuzzy.NAND_XOR_product_fuzzy
if model == 'XOR-XOR':
return lom_outputs_fuzzy.XOR_XOR_product_fuzzy
if model == 'XOR-NXOR':
return lom_outputs_fuzzy.XOR_NXOR_product_fuzzy
if model == 'OR-NAND':
return lom_outputs_fuzzy.OR_NAND_product_fuzzy
def get_scalar_output_function_3d(model, fuzzy=False):
if fuzzy is False:
if model == 'OR-AND':
return lom_outputs.OR_AND_product_3d
if model == 'XOR-AND':
return lom_outputs.XOR_AND_product_3d
if model == 'XOR-NAND':
return lom_outputs.XOR_NAND_product_3d
if model == 'OR-XOR':
return lom_outputs.OR_XOR_product_3d
if model == 'NAND-XOR':
return lom_outputs.NAND_XOR_product_3d
if model == 'XOR-XOR':
return lom_outputs.XOR_XOR_product_3d
if model == 'XOR-NXOR':
return lom_outputs.XOR_NXOR_product_3d
if model == 'OR-NAND':
return lom_outputs.OR_NAND_product_3d
else:
if model == 'OR-AND':
return lom_outputs_fuzzy.OR_AND_product_fuzzy_3d
if model == 'XOR-AND':
return lom_outputs_fuzzy.XOR_AND_product_fuzzy_3d
if model == 'XOR-NAND':
return lom_outputs_fuzzy.XOR_NAND_product_fuzzy_3d
if model == 'OR-XOR':
return lom_outputs_fuzzy.OR_XOR_product_fuzzy_3d
if model == 'NAND-XOR':
return lom_outputs_fuzzy.NAND_XOR_product_fuzzy_3d
if model == 'XOR-XOR':
return lom_outputs_fuzzy.XOR_XOR_product_fuzzy_3d
if model == 'XOR-NXOR':
return lom_outputs_fuzzy.XOR_NXOR_product_fuzzy_3d
if model == 'OR-NAND':
return lom_outputs_fuzzy.OR_NAND_product_fuzzy_3d
def make_output_function_2d(model):
get_scalar_output_2d = get_scalar_output_function_2d(model, fuzzy=False)
@jit('int8[:,:](int8[:,:], int8[:,:])',
nogil=True, nopython=False, parallel=True)
def output_function_2d(Z, U):
N = Z.shape[0]
D = U.shape[0]
X = np.zeros([N, D], dtype=np.int8)
for n in prange(N):
for d in prange(D):
X[n, d] = get_scalar_output_2d(Z[n, :], U[d, :])
return X
return output_function_2d
def make_output_function_3d(model):
get_scalar_output_3d = get_scalar_output_function_3d(model, fuzzy=False)
@jit('int8[:,:,:](int8[:,:], int8[:,:], int8[:,:])',
nogil=False, nopython=False, parallel=True)
def output_function_3d(Z, U, V):
N = Z.shape[0]
D = U.shape[0]
M = V.shape[0]
X = np.zeros([N, D, M], dtype=np.int8)
for n in prange(N):
for d in prange(D):
for m in range(M):
X[n, d, m] = get_scalar_output_3d(Z[n, :], U[d, :], V[m, :])
return X
return output_function_3d
def make_output_function_2d_fuzzy(model):
get_scalar_output_2d = get_scalar_output_function_2d(model, fuzzy=True)
@jit('float64[:,:](float64[:,:], float64[:,:])',
nogil=True, nopython=False, parallel=True)
def output_function_2d(Z, U):
N = Z.shape[0]
D = U.shape[0]
X = np.zeros([N, D], dtype=np.float64)
for n in prange(N):
for d in prange(D):
X[n, d] = get_scalar_output_2d(Z[n, :], U[d, :])
return X
return output_function_2d
def make_output_function_3d_fuzzy(model):
get_scalar_output_3d = get_scalar_output_function_3d(model, fuzzy=True)
@jit('float64[:,:](float64[:,:], float64[:,:], float64[:,:])',
nogil=True, nopython=False, parallel=True)
def output_function_3d(Z, U, V):
N = Z.shape[0]
D = U.shape[0]
M = V.shape[0]
X = np.zeros([N, D, M], dtype=np.float64)
for n in prange(N):
for d in prange(D):
for m in range(M):
X[n, d, m] = get_scalar_output_3d(Z[n, :], U[d, :], V[m, :])
return X
return output_function_3d
def make_correct_predictions_counter(model, dimensionality):
"""
Generates function that counts the number of deterministically correct
predictions with signature fct(factor0, factor1, ..., data)
"""
if model == 'OR-AND-IBP':
model = 'OR-AND'
# ql-AND model requires extra treatment because of additional
# argument q.
if model == 'qL-AND':
output_fct = get_scalar_output_function_2d(model, fuzzy=False)
@jit('int64(int8[:,:], int8[:,:], int8[:,:], int8[:])',
nogil=True, nopython=True, parallel=True)
def correct_predictions_counter(Z, U, X, q):
N, D = X.shape
count = int64(0)
for n in prange(N):
for d in prange(D):
if output_fct(Z[n, :], U[d, :], q[d]) == X[n, d]:
count += 1
return count
return correct_predictions_counter
if dimensionality == 2:
output_fct = get_scalar_output_function_2d(model, fuzzy=False)
@jit('int64(int8[:,:], int8[:,:], int8[:,:])',
nogil=True, nopython=True, parallel=True)
def correct_predictions_counter(Z, U, X):
N, D = X.shape
count = int64(0)
for n in prange(N):
for d in prange(D):
if output_fct(Z[n, :], U[d, :]) == X[n, d]:
count += 1
return count
elif dimensionality == 3:
output_fct = get_scalar_output_function_3d(model, fuzzy=False)
@jit('int64(int8[:,:], int8[:,:], int8[:,:], int8[:,:,:])',
nogil=True, nopython=True, parallel=True)
def correct_predictions_counter(Z, U, V, X):
N, D, M = X.shape
count = int64(0)
for n in prange(N):
for d in prange(D):
for m in range(M):
if output_fct(Z[n, :], U[d, :], V[m, :]) == X[n, d, m]:
count += 1
return count
else:
raise NotImplementedError(
'Count correct predictions for dimensinalty > 3')
return correct_predictions_counter
def make_lbda_update_fct(model, dimensionality):
"""
Set lambda in OR-AND machine to its MLE
TODO: make for general arity
"""
# Next: use OR-AND update instead of qL-AND for testings
if model == 'qL-AND':
counter = make_correct_predictions_counter(model, dimensionality)
def lbda_update_fct(parm):
alpha, beta = parm.beta_prior
# correct predictions, counting 0 prediction as false
P = counter(*[x.val for x in parm.layer.factors],
parm.layer.child(),
parm.layer.q()[0, :])
# number of data points that are to be predicted
ND = np.prod(parm.layer.child().shape) - np.count_nonzero(parm.layer.child() == 0)
parm.val = np.max([-np.log(((ND + alpha + beta) / (float(P) + alpha)) - 1), 0])
# print('\n')
# print(P, ND)
return lbda_update_fct
if model == 'MAX-AND':
import lom._numba.max_machine_sampler
return lom._numba.max_machine_sampler.bda_MAX_AND
else:
counter = make_correct_predictions_counter(model, dimensionality)
def lbda_update_fct(parm):
alpha, beta = parm.beta_prior
# correct predictions, counting 0 prediction as false
P = counter(*[x.val for x in parm.layer.factors], parm.layer.child())
# number of data points that are to be predicted
ND = np.prod(parm.layer.child().shape) - np.count_nonzero(parm.layer.child() == 0)
parm.val = np.max([-np.log(((ND + alpha + beta) / (float(P) + alpha)) - 1), 0])
return lbda_update_fct
|
from unittest.mock import patch, call
from django.core.management import call_command
from django.db.migrations import RunSQL, RunPython
from django.test import TestCase
from .commands.collect_sql import SqlType, SqlObjectOperation
class MockMigration(object):
def __init__(self, operations):
self.operations = operations
def mock_run_python(apps, schema_editor):
pass
class CollectSqlTest(TestCase):
@patch('smartmin.management.commands.collect_sql.Command.load_migrations')
@patch('smartmin.management.commands.collect_sql.Command.write_dump')
def test_command(self, mock_write_dump, mock_load_migrations):
mock_load_migrations.return_value = [
MockMigration(operations=[
RunSQL("CREATE INDEX test_1 ON foo(bar); CREATE INDEX test_2 ON foo(bar);"),
RunPython(mock_run_python)
]),
MockMigration(operations=[
RunSQL("DROP INDEX test_2;"),
]),
MockMigration(operations=[
RunSQL("CREATE TRIGGER test_1 AFTER TRUNCATE ON flows_flowstep EXECUTE PROCEDURE foo();"),
RunSQL("CREATE INDEX a_test ON foo(bar);"),
RunPython(mock_run_python)
]),
]
call_command('collect_sql', output_dir='sql')
mock_write_dump.assert_has_calls([
call('indexes', [
SqlObjectOperation("CREATE INDEX a_test ON foo(bar);", SqlType.INDEX, "a_test", True),
SqlObjectOperation("CREATE INDEX test_1 ON foo(bar);", SqlType.INDEX, "test_1", True),
], 'sql'),
call('triggers', [
SqlObjectOperation("CREATE TRIGGER test_1 AFTER TRUNCATE ON flows_flowstep EXECUTE PROCEDURE foo();",
SqlType.TRIGGER, "test_1", True)
], 'sql')
])
|
import operator
import itertools
import logging
import yapsy
import yapsy.PluginFileLocator
import yapsy.PluginManager
from . import types
from . import decorators
from . import locator
class PluginManager(object):
PLUGIN_CATS = {types.ConversationPlugin.CATEGORY:
types.ConversationPlugin,
types.SpeechHandlerPlugin.CATEGORY:
types.SpeechHandlerPlugin,
types.TTSPlugin.CATEGORY:
types.TTSPlugin,
types.STTPlugin.CATEGORY:
types.STTPlugin}
def __init__(self, config, directories_list):
plugin_locator = locator.PluginLocator()
pm = yapsy.PluginManager.PluginManager(
categories_filter=self.PLUGIN_CATS,
directories_list=directories_list,
plugin_locator=plugin_locator)
pm = decorators.PluginCheckDecorator(config, pm)
pm = decorators.PluginConfigDecorator(config, pm)
pm = decorators.PluginGettextDecorator(config, pm)
self._pm = pm
self._pm.collectPlugins()
for plugin_info in self.get_plugins_of_category(
types.ConversationPlugin.CATEGORY):
self.__add_conversation_methods(plugin_info.plugin_object)
def __add_conversation_methods(self, plugin_object):
PM = self
def plugin_delegate_input(phrase):
handler = PM.find_handler(phrase)
if handler:
handler_func, variables = handler
handler_func(plugin_object, **variables)
else:
logging.getLogger(__name__).warning(
"No plugin can handle '%s'", phrase)
def plugin_get_command_phrases():
return PM.get_command_phrases()
plugin_object.delegate_input = plugin_delegate_input
plugin_object.get_command_phrases = plugin_get_command_phrases
def get_plugin_by_slug(self, slug, category="Default"):
"""
Get the plugin correspoding to a given category and slug
"""
for item in self._pm.getPluginsOfCategory(category):
if item.slug == slug:
return item
return None
def get_plugins_of_category(self, category_name):
available_plugins = self._pm.getPluginsOfCategory(category_name)
# sort on secondary key
available_plugins.sort(key=operator.attrgetter('slug'))
# now sort on primary key, descending
available_plugins.sort(key=operator.attrgetter('priority'),
reverse=True)
return available_plugins
def get_all_plugins(self):
return self._pm.getAllPlugins()
def find_handler(self, phrase):
handlers = self.find_handlers(phrase)
if len(handlers) > 0:
return handlers[0]
def find_handlers(self, phrase):
return [handler for handler in
[plugin.plugin_object.get_handler(phrase)
for plugin in self.get_plugins_of_category(
types.SpeechHandlerPlugin.CATEGORY)]
if handler is not None]
def get_command_phrases(self):
phrases = [plugin.plugin_object.get_command_phrases()
for plugin in self.get_plugins_of_category(
types.SpeechHandlerPlugin.CATEGORY)]
return sorted(list(set(itertools.chain.from_iterable(phrases))))
|
import json
import os
import threading
import urllib.request
from collections import defaultdict
from flask import Flask, abort, jsonify, redirect, request
CACHE_PATH = os.getenv("CACHE_PATH", "cache.json")
DEFAULT_SOURCE = os.getenv("DEFAULT_SOURCE", "repo.horoscope.dev")
USER_AGENT = os.getenv("USER_AGENT", "Dalamud.DivinationPluginRepo.DownloadCounter/1.0 (+https://github.com/horoscope-dev/Dalamud.DivinationPluginRepo.DownloadCounter)")
def load_cache():
c = defaultdict(int)
if os.path.exists(CACHE_PATH):
with open(CACHE_PATH) as f:
c.update(json.load(f))
return c
def save_cache(cache):
with open(CACHE_PATH, "w") as f:
json.dump(cache, f, indent=2, sort_keys=True)
cache = load_cache()
cache_lock = threading.Lock()
app = Flask(__name__)
def check_if_exists(url):
try:
req = urllib.request.Request(url, method="HEAD", headers={"User-Agent": USER_AGENT})
with urllib.request.urlopen(req):
return True
except urllib.error.HTTPError:
return False
@app.route("/<any(stable, testing):channel>/<plugin>")
def download(channel, plugin):
source = request.args.get("source", DEFAULT_SOURCE)
url = f"https://{source}/dist/{channel}/{plugin}/latest.zip"
if not check_if_exists(url):
return abort(404)
with cache_lock:
cache[plugin] += 1
save_cache(cache)
return redirect(url)
@app.route("/statistics")
def statistics():
with cache_lock:
return jsonify(cache)
@app.route("/")
def index():
return redirect(f"https://{DEFAULT_SOURCE}")
|
from .default_tagger import tag
from .base import map_paren, reverse_map_paren
__all__ = ['tag', 'map_paren', 'reverse_map_paren']
|
from typing import Callable, Optional
from flask import Blueprint
def _factory(
partial_module_string: str,
url_prefix: str,
protected: bool = False,
auth_function: Optional[Callable] = None,
) -> Blueprint:
# Create the blueprint
blueprint = Blueprint(
partial_module_string,
f"src.views.{partial_module_string}",
url_prefix=url_prefix,
)
# This endpoint is not to be publicly used
if protected:
# Protected endpoints must have an authorization method
if auth_function is None:
raise NotImplementedError(
"An authorization method must be given for protected endpoints!" # noqa
)
# Protect the endpoint with an authorization routine
blueprint.before_request(auth_function)
return blueprint
root = _factory("root", "/")
search = _factory("search", "/search")
shortcuts = _factory("shortcuts", "/")
all_blueprints = (root, search, shortcuts)
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for interacting with Google Cloud Storage.
Allows interacting with Cloud Storage via user-friendly objects
rather than via Connection.
"""
from gcloud.exceptions import NotFound
from gcloud._helpers import get_default_project
from gcloud.storage._implicit_environ import get_default_connection
from gcloud.storage.batch import Batch
from gcloud.storage.bucket import Bucket
from gcloud.storage.iterator import Iterator
def lookup_bucket(bucket_name, connection=None):
"""Get a bucket by name, returning None if not found.
You can use this if you would rather checking for a None value
than catching an exception::
>>> from gcloud import storage
>>> storage.set_defaults()
>>> bucket = storage.lookup_bucket('doesnt-exist')
>>> print bucket
None
>>> bucket = storage.lookup_bucket('my-bucket')
>>> print bucket
<Bucket: my-bucket>
:type bucket_name: string
:param bucket_name: The name of the bucket to get.
:type connection: :class:`gcloud.storage.connection.Connection` or
``NoneType``
:param connection: Optional. The connection to use when sending requests.
If not provided, falls back to default.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The bucket matching the name provided or None if not found.
"""
connection = _require_connection(connection)
try:
return get_bucket(bucket_name, connection=connection)
except NotFound:
return None
def list_buckets(project=None, max_results=None, page_token=None, prefix=None,
projection='noAcl', fields=None, connection=None):
"""Get all buckets in the project.
This will not populate the list of blobs available in each
bucket.
>>> from gcloud import storage
>>> for bucket in storage.list_buckets():
>>> print bucket
This implements "storage.buckets.list".
:type project: string or ``NoneType``
:param project: Optional. The project to use when listing all buckets.
If not provided, falls back to default.
:type max_results: integer or ``NoneType``
:param max_results: Optional. Maximum number of buckets to return.
:type page_token: string or ``NoneType``
:param page_token: Optional. Opaque marker for the next "page" of buckets.
If not passed, will return the first page of buckets.
:type prefix: string or ``NoneType``
:param prefix: Optional. Filter results to buckets whose names begin with
this prefix.
:type projection: string or ``NoneType``
:param projection: If used, must be 'full' or 'noAcl'. Defaults to
'noAcl'. Specifies the set of properties to return.
:type fields: string or ``NoneType``
:param fields: Selector specifying which fields to include in a
partial response. Must be a list of fields. For example
to get a partial response with just the next page token
and the language of each bucket returned:
'items/id,nextPageToken'
:type connection: :class:`gcloud.storage.connection.Connection` or
``NoneType``
:param connection: Optional. The connection to use when sending requests.
If not provided, falls back to default.
:rtype: iterable of :class:`gcloud.storage.bucket.Bucket` objects.
:returns: All buckets belonging to this project.
"""
connection = _require_connection(connection)
if project is None:
project = get_default_project()
extra_params = {'project': project}
if max_results is not None:
extra_params['maxResults'] = max_results
if prefix is not None:
extra_params['prefix'] = prefix
extra_params['projection'] = projection
if fields is not None:
extra_params['fields'] = fields
result = _BucketIterator(connection=connection,
extra_params=extra_params)
# Page token must be handled specially since the base `Iterator`
# class has it as a reserved property.
if page_token is not None:
result.next_page_token = page_token
return iter(result)
def get_bucket(bucket_name, connection=None):
"""Get a bucket by name.
If the bucket isn't found, this will raise a
:class:`gcloud.storage.exceptions.NotFound`.
For example::
>>> from gcloud import storage
>>> from gcloud.exceptions import NotFound
>>> try:
>>> bucket = storage.get_bucket('my-bucket')
>>> except NotFound:
>>> print 'Sorry, that bucket does not exist!'
This implements "storage.buckets.get".
:type bucket_name: string
:param bucket_name: The name of the bucket to get.
:type connection: :class:`gcloud.storage.connection.Connection` or
``NoneType``
:param connection: Optional. The connection to use when sending requests.
If not provided, falls back to default.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The bucket matching the name provided.
:raises: :class:`gcloud.exceptions.NotFound`
"""
connection = _require_connection(connection)
bucket = Bucket(bucket_name, connection=connection)
bucket.reload()
return bucket
def create_bucket(bucket_name, project=None, connection=None):
"""Create a new bucket.
For example::
>>> from gcloud import storage
>>> storage.set_defaults()
>>> bucket = storage.create_bucket('my-bucket')
>>> print bucket
<Bucket: my-bucket>
This implements "storage.buckets.insert".
If the bucket already exists, will raise
:class:`gcloud.exceptions.Conflict`.
:type project: string
:param project: Optional. The project to use when creating bucket.
If not provided, falls back to default.
:type bucket_name: string
:param bucket_name: The bucket name to create.
:type connection: :class:`gcloud.storage.connection.Connection` or
``NoneType``
:param connection: Optional. The connection to use when sending requests.
If not provided, falls back to default.
:rtype: :class:`gcloud.storage.bucket.Bucket`
:returns: The newly created bucket.
"""
connection = _require_connection(connection)
bucket = Bucket(bucket_name, connection=connection)
bucket.create(project)
return bucket
class _BucketIterator(Iterator):
"""An iterator listing all buckets.
You shouldn't have to use this directly, but instead should use the
helper methods on :class:`gcloud.storage.connection.Connection`
objects.
:type connection: :class:`gcloud.storage.connection.Connection`
:param connection: The connection to use for querying the list of buckets.
"""
def __init__(self, connection, extra_params=None):
super(_BucketIterator, self).__init__(connection=connection, path='/b',
extra_params=extra_params)
def get_items_from_response(self, response):
"""Factory method which yields :class:`.Bucket` items from a response.
:type response: dict
:param response: The JSON API response for a page of buckets.
"""
for item in response.get('items', []):
name = item.get('name')
bucket = Bucket(name, connection=self.connection)
bucket._properties = item
yield bucket
def _require_connection(connection=None):
"""Infer a connection from the environment, if not passed explicitly.
:type connection: :class:`gcloud.storage.connection.Connection`
:param connection: Optional.
:rtype: :class:`gcloud.storage.connection.Connection`
:returns: A connection based on the current environment.
:raises: :class:`EnvironmentError` if ``connection`` is ``None``, and
cannot be inferred from the environment.
"""
# NOTE: We use current Batch directly since it inherits from Connection.
if connection is None:
connection = Batch.current()
if connection is None:
connection = get_default_connection()
if connection is None:
raise EnvironmentError('Connection could not be inferred.')
return connection
|
ANSI = "\u001b["
CODE = f"{ANSI}38;5;"
THEME = {
"th_added": f"{CODE}48;1m",
"th_conflicted": f"{CODE}209;1m",
"th_deleted": f"{CODE}203;1m",
"th_dim": f"{ANSI}2m",
"th_error": f"{CODE}9;1m",
"th_keyword": f"{CODE}171;1m",
"th_modified": f"{CODE}221;1m",
"th_normal": f"{CODE}15;1m",
"th_renamed": f"{CODE}203;1m",
"th_reset": f"{ANSI}0m",
"th_success": f"{CODE}47;1m",
"th_untracked": f"{CODE}69;1m",
"th_stash": f"{CODE}69;1m",
}
ICONS = {
"ic_modified": "☢",
"ic_untracked": "✱",
"ic_renamed": "✦",
"ic_deleted": "✝",
"ic_conflicted": "■",
"ic_resetted": "⧗",
"ic_removed": "−",
"ic_patch_add": "▲",
"ic_patch_remove": "▼",
"ic_added": "✚",
"ic_error": "✖",
"ic_stash": "★",
"ic_selection": "❤",
"ic_normal": "•",
"ic_success": "⚑",
"ic_branch": "⚲",
"ic_log_selected": "♦",
"ic_log": "⋅",
}
EMPTY = {}
INPUT_THEME = {
"ADD_SELECTION": {"selection": f"{CODE}48;1m"},
"BRANCH_CREATION_ABOUT": EMPTY,
"BRANCH_CREATION_CONFIRM": EMPTY,
"BRANCH_CREATION_ID": EMPTY,
"BRANCH_CREATION_SWITCH": EMPTY,
"BRANCH_CREATION_TYPE": {"selection": f"{CODE}221;1m"},
"BRANCH_SELECTION": {"selection": f"{CODE}171;1m"},
"BRANCH_RENAME": {"font": f"{CODE}221;1m"},
"COMMIT_CREATION_ABOUT": EMPTY,
"COMMIT_CREATION_CONFIRM": EMPTY,
"COMMIT_CREATION_SCOPE": EMPTY,
"COMMIT_CREATION_TYPE": {"selection": f"{CODE}221;1m"},
"LOG_LOG": EMPTY,
"PATCH_SELECTION": EMPTY,
"REMOVE_SELECTION": {"selection": f"{CODE}9;1m"},
"RESET_SELECTION": {"selection": f"{CODE}48;1m"},
"STASH_CREATION_NAME": EMPTY,
"STASH_SELECTION": EMPTY,
}
INPUT_ICONS = {
"+": ICONS["ic_modified"],
"-": ICONS["ic_error"],
"selection": ICONS["ic_selection"],
"normal": ICONS["ic_normal"],
"log_selection": ICONS["ic_log_selected"],
"log_normal": ICONS["ic_log"],
}
|
#!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
"""
Author: YangYunYi
Date: 2021/9/1 23:04
docs: test_executor.py
description: test executor
"""
import os
import sys
import json
from adoctor_check_scheduler.common.constant import CheckTopic, CheckGroup
from adoctor_check_scheduler.common.config import scheduler_check_config
from adoctor_check_scheduler.common.check_consumer import RetryTaskConsumer
from aops_utils.kafka.producer import BaseProducer
from aops_utils.kafka.kafka_exception import ProducerInitError
from aops_utils.log.log import LOGGER
CONFIG_PATH = "conf"
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
def publish_check_task(task_msg, topic):
"""
Publish check task
Args:
task_msg (dict): task msg
"""
try:
producer = BaseProducer(scheduler_check_config)
LOGGER.debug("Send check task msg %s", task_msg)
producer.send_msg(topic, task_msg)
except ProducerInitError as exp:
LOGGER.error("Produce task msg failed. %s" % exp)
def test_do_check():
test_msg = {'task_id': 1630421641844, 'user': 'admin',
'host_list': [{'host_id': 'eca3b022070211ecab3ca01c8d75c8f3',
'public_ip': '90.90.64.65'}],
'check_items': ['check_item2'], 'time_range': [1630421611, 1630421641]}
publish_check_task(test_msg, CheckTopic.do_check_topic)
def test_import_check_rule():
config_path = os.path.join(CURRENT_PATH, CONFIG_PATH, "check_rule.json")
with open(config_path, 'r', encoding='utf-8-sig') as cfg_file:
check_config = json.load(cfg_file)
check_config["username"] = "admin"
publish_check_task(check_config, CheckTopic.import_check_rule_topic)
def test_delete_check_rule():
delete_check_msg = {
"username": "admin",
"check_items": ["check_item1", "check_item7"]
}
publish_check_task(delete_check_msg, CheckTopic.delete_check_rule_topic)
def start_retry_consumer():
# Start retry task consumer
retry_task_consumer = RetryTaskConsumer(CheckTopic.retry_check_topic,
CheckGroup.retry_check_group_id,
scheduler_check_config)
retry_task_consumer.start()
retry_task_consumer.join()
test_func = {"import_check_rule": test_import_check_rule,
"delete_check_rule": test_delete_check_rule,
"do_check": test_do_check,
"start_retry_consumer": start_retry_consumer
}
def main():
"""
Entry of commands.
"""
if len(sys.argv) != 2:
LOGGER.error("Invalid parameter. Available options are "
"as follows:[diag,check_default,check_user]")
return
test_mode = sys.argv[1]
if test_mode in test_func.keys():
test_func[test_mode]()
else:
LOGGER.error("Invalid test mode %s. Available options are "
"as follows:[diag,check_default,check_user]",
sys.argv[1])
if __name__ == '__main__':
main()
|
import scrapy
import lxml.html
import re
import urllib2
from ceCrawler.items import assignment
from scrapy.selector import Selector
from scrapy.http import HtmlResponse
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
import json
def first():
list=[]
with open("/home/babak/Dropbox/semesters/7/tahlil/project/scrapy/ceCrawler/new.json",'r') as f:
for line in f:
line=line.strip()
list.append( json.loads(line)['link']+"/index.php/section/assignments/file/assignments")
return list
class DmozSpider(scrapy.Spider):
name = "assignmentCrawler"
allowed_domains = ["http://ce.sharif.ir"]
start_urls = first()
def parse(self, response):
hxs = Selector(response)
item=assignment()
item['link']=response.url.replace('/index.php/section/assignments/file/assignments','')
dateSplitter="||||||||||" #10 ta \
anonsSplitter="**********" #10 ta *
name=""
date=''
HWlink=''
for i,x in enumerate(hxs.xpath('//td[@class="titleBar"]')):
try:
header=hxs.xpath("//td[@class='titleBar']")[i]
header=header.xpath('table[@width="100%"]/tr/td/b/text()').extract()
namen=header[0]
name+=namen+dateSplitter
daten=header[1]
date+=daten+dateSplitter
except:
name+=dateSplitter
date+=dateSplitter
try:
HWlinkn=hxs.xpath("//table[@class='table1']")[i]
HWlinkn=HWlinkn.xpath('tr[2]/td/table/tr/td/li/a/@href').extract()
HWlink+=HWlinkn[0]+dateSplitter
except:
HWlink+=dateSplitter
item['name']=name.replace('\r\n','').replace('\t','')
item['Deadline']=date.replace('\r\n','').replace('\t','')
item['HWlink']=HWlink.replace('\r\n','').replace('\t','')
yield item
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.