content
stringlengths 5
1.05M
|
|---|
import cv2
import io
import math
import numpy as np
import statistics
import s3_utils
from PIL import Image
def resize_with_aspect_ratio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def get_slope_angle_degrees(image):
'''Returns a slope of all coordinate points (black and near-black pixels) in image in degrees'''
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
image = cv2.flip(image, 0)
img_height, img_width = image.shape[:2]
total_pixels = img_height * img_width
x_coords = []
y_coords = []
row_counter = img_height - 1
black_pixels = 0
while row_counter > 0:
for position, pixel in enumerate(image[row_counter, :]):
if pixel == 0:
x_coords.append(position)
y_coords.append(row_counter)
black_pixels += 1
row_counter -= 1
x = np.array(x_coords)
y = np.array(y_coords)
slope = (len(x) * np.sum(x * y) - np.sum(x) * np.sum(y)) / (len(x) * np.sum(x * x) - np.sum(x) ** 2)
# slope, _ = np.polyfit(x, y, 1)
# slope, _ = np.polyfit(x, y, 1)
# slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
# abline(slope, 0)
# np.polynomial.pol
# zipped = zip(x,y)
print("x is:", x)
print("y is:", y)
# plt.plot(x, y, 'o')
# plt.plot(x, slope * x + 1)
# plt.show()
slope_angle = math.atan(slope)
slope_angle_degrees = math.degrees(slope_angle)
print("Slope:", slope, "Slope angle:", slope_angle, "Slope angle degrees:", slope_angle_degrees)
print("Black pixel percentage:", black_pixels / total_pixels)
return slope_angle_degrees
def rotate_image(mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h), borderMode=cv2.BORDER_CONSTANT, borderValue=255)
return rotated_mat
def left_justify_pad_right(image):
height = image.shape[0]
image = cv2.bitwise_not(image)
column_count = 0
for column in image.T:
if np.sum(column) == 0:
column_count += 1
else:
break
left_justified = image[:, column_count:]
left_justified = cv2.bitwise_not(left_justified)
# show_image(left_justified, "left justified")
justified_width = left_justified.shape[1]
print(f"left_justified {left_justified} and shape {left_justified.ndim}")
columns_to_add = 300 - justified_width
append_ones = np.ones((height, columns_to_add), np.uint8)
append_white = np.full_like(append_ones, 255)
# show_image(left_justified, "LEFT JUSTIFIED")
justified_and_appended = np.concatenate((left_justified, append_white), axis=1)
# show_image(justified_and_appended, "justified and appended")
# print("New dimensions:", justified_and_appended.shape)
return justified_and_appended
def get_bounding_rows(img):
def row_is_white(row):
# Allow for an off-white pixel or two
if row.sum() > len(row) * 255 - 100:
return True
return False
upper = 0
lower = img.shape[0] - 1
empty_upper = False
row_count = 0
quartile = img.shape[0] // 4
for row in img:
white_row = row_is_white(row)
if white_row and row_count < quartile:
empty_upper = True
upper += 1
row_count += 1
continue
if white_row and row_count > quartile * 3:
lower = row_count
return upper, lower
row_count += 1
return upper, lower
def find_first_black_pixel(array):
return (array < 10).argmax(axis=0)
def get_fin_contour_vector(fin_matrix):
contours = []
for row in fin_matrix:
first_black = find_first_black_pixel(row)
contours.append(first_black)
return np.array(contours)
def get_absolute_diff(arr1, arr2):
return np.absolute(arr1 - arr2)
def show_image(img, window_name="edges"):
# img = ResizeWithAspectRatio(img, width=1200)
cv2.imshow(window_name, img)
cv2.moveWindow(window_name, 500, 0)
cv2.waitKey()
cv2.destroyAllWindows()
def get_sorted_contours(contours):
results = {}
sorted_contours = sorted(contours, key=lambda c: cv2.arcLength(c, False), reverse=True)
return sorted_contours
def get_median_contour_length(contours):
lengths = []
for contour in contours:
lengths.append(cv2.arcLength(contour, False))
return statistics.median(lengths)
def contour_filter(contours):
median = get_median_contour_length(contours) + 40
# print("Median contour length is: ", median)
median_plus_contours = []
low=10000.0
high=0
for contour in contours:
length = cv2.arcLength(contour, False)
if length > median + 90:
median_plus_contours.append(contour)
# contour_lens.append(length)
if length < low:
low = length
if length > high:
high = length
print("Total contours:", len(contours), "Low: ", low, "High: ", int(high), "Median: ", median)
return median_plus_contours
def get_contour_img(img, color=True):
contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = contour_filter(contours)
contours = get_sorted_contours(contours)
blank_img = 255 * np.ones(img.shape, dtype=np.uint8)
if color:
blank_img = cv2.cvtColor(blank_img, cv2.COLOR_GRAY2RGB)
return cv2.drawContours(blank_img, contours[:1], -1, (0, 255, 0), 2)
def get_fin_vector(s3_url, rect, thresh, flip):
'''rect - coordinates of rectangle in [x1,y1,x2,y2] format'''
local_image_file = s3_utils.download_from_s3(s3_url)
img = cv2.imread(local_image_file, 0)
if flip:
img = cv2.flip(img, 1)
img = cv2.bitwise_not(img)
_, thresh = cv2.threshold(img, thresh, 255, 0)
contour_img = get_contour_img(thresh, color=False)
fin_rect = contour_img[rect[1]:rect[3], rect[0]:rect[2]]
image = cv2.convertScaleAbs(fin_rect, alpha=(255.0))
slope_angle_degrees = get_slope_angle_degrees(image)
if slope_angle_degrees < 0:
rotation = abs(slope_angle_degrees)
else:
rotation = slope_angle_degrees * -1
rotated_image = rotate_image(image, rotation)
_, thresh = cv2.threshold(rotated_image, 200, 255, 0) # Rotation generates gray pix
upper, lower = get_bounding_rows(thresh)
cropped = thresh[upper:lower, 0:thresh.shape[1]]
rotated_and_resized = resize_with_aspect_ratio(cropped, height=350)
justified_and_padded = left_justify_pad_right(rotated_and_resized)
return get_fin_contour_vector(justified_and_padded)
def get_fin_contour_png(s3_url, rect, thresh, flip):
'''rect - coordinates of rectangle in [x1,y1,x2,y2] format'''
local_image_file = s3_utils.download_from_s3(s3_url)
img = cv2.imread(local_image_file, 0)
if flip:
img = cv2.flip(img, 1)
img = cv2.bitwise_not(img)
_, thresh = cv2.threshold(img, thresh, 255, 0)
img = get_contour_img(thresh)
img = img[rect[1]:rect[3], rect[0]:rect[2]]
img = Image.fromarray(img)
img = img.convert('RGBA')
img_data = img.load()
width, height = img.size
for y in range(height):
for x in range(width):
if img_data[x, y] == (255, 255, 255, 255):
img_data[x, y] = (0, 255, 255, 0)
# mem_file = io.BytesIO()
img.save("/tmp/foo.png", format="PNG")
return "/tmp/foo.png"
# return mem_file
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('oled_ros')
import sys
import time
import rospy
from std_msgs.msg import String
if __name__ == '__main__':
rospy.init_node('pub_oled')
pub = rospy.Publisher("status", String, latch=True, queue_size=1)
msg = String()
msg.data = str(sys.argv[1])
#msg.data = "Hola"
#print str(sys.argv[1])
pub.publish(msg)
time.sleep(1)
|
import os
import errno
from multiprocessing.pool import Pool
from tqdm import tqdm
import requests
from PIL import Image
def download(pid, image_list, base_url, save_dir, image_size=(512, 512)):
colors = ['red', 'green', 'blue', 'yellow']
for i in tqdm(image_list, postfix=pid):
img_id = i.split('_', 1)
for ind, color in enumerate(colors):
try:
img_path = img_id[0] + '/' + img_id[1] + '_' + color + '.jpg'
img_name = i + '_' + color + '.png'
img_url = base_url + img_path
# Get the raw response from the url
r = requests.get(img_url, allow_redirects=True, stream=True)
r.raw.decode_content = True
# Use PIL to resize the image and to convert it to L
# (8-bit pixels, black and white)
im = Image.open(r.raw)
if color == 'yellow':
ind = 0
im = im.resize(image_size, Image.LANCZOS).split()[ind]
im.save(os.path.join(save_dir, img_name), 'PNG')
except:
print(i)
print(img_url)
if __name__ == '__main__':
# Parameters
process_num = 1
image_size = (512, 512)
url = 'http://v18.proteinatlas.org/images/'
# csv_path = '../../../../kaggle_protein_atlas/input_data/hpa_website_set_2.csv'
save_dir = '../../../../kaggle_protein_atlas/input_data/train_extended_2/'
# Create the directory to save the images in case it doesn't exist
try:
os.makedirs(save_dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
print('Parent process %s.' % os.getpid())
# img_list = pd.read_csv(csv_path)['Id']
# external_raw = pd.read_csv('../../../../kaggle_protein_atlas/input_data/hpa_website_set_2.csv')
# external_forum = pd.read_csv('../../../../kaggle_protein_atlas/input_data/HPAv18RBGY_wodpl.csv')
# img_list = external_forum[~external_forum.Id.isin(external_raw.Id)].Id.values
f = open("../../../../kaggle_protein_atlas/input_data/add_img_2.txt", 'r', encoding="utf8")
m = f.readlines()
f.close()
# r = [(len(l.split('http://v18.proteinatlas.org/images/')), l) for l in m if 'http:' in l]
img_list = m
# for rr in r:
# if rr[0] == 2:
# img_list.append(
# "_".join(rr[1].split('http://v18.proteinatlas.org/images/')[-1].split('_')[:3]).replace('/', '_'))
# if rr[0] > 2:
# for i in rr[1].split('http://v18.proteinatlas.org/images/')[1:]:
# img_list.append("_".join(i.replace('/', '_').split('_')[:3]))
list_len = len(img_list)
p = Pool(process_num)
for i in range(process_num):
start = int(i * list_len / process_num)
end = int((i + 1) * list_len / process_num)
process_images = img_list[start:end]
p.apply_async(download, args=(str(i), process_images, url, save_dir, image_size))
print('Waiting for all subprocesses done...')
p.close()
p.join()
print('All subprocesses done.')
|
# roughly
# [print(bin(ord(x)).replace("0b", "")) for x in "Hello, world!\0\0\0"]
# python3 txtmunge.py --rotate 90 --flipy --invert hello1.txt test/tutorial1.txt
|
import numpy as np
import cantera as ct
import pyutils.ctutils as pc
import pyutils.filename as fn
from scipy.special import erfc
class PremixedFlameState:
def __init__(self, flame, fuel, oxidizer={'O2':1., 'N2':3.76}, T=None):
self.flame = flame
self.fuel = pc.gas.parser_stream(fuel)
self.oxidizer = pc.gas.parser_stream(oxidizer)
self.T = T
if flame.T[0] < flame.T[-1]:
self.density = flame.density[0]
else:
self.density = -flame.density[-1]
def __idx_unburnt(self):
return 0
def __idx_fcr(self):
fcr = self.fuel_consumption_rate()
return np.argmax(fcr)
def __idx_hrr(self):
hrr = self.flame.heat_release_rate
return np.argmax(hrr)
def __idx_T(self):
T = self.flame.T
return np.argmax(T)
def fuel_list(self):
return list(self.fuel.keys())
def expansion(self):
return self.flame.density[-1]/self.flame.density[0]
def consumption_speed(self):
flame = self.flame
fuels = self.fuel_list()
fuel_rate = np.zeros( len(fuels) )
fuel_mass = np.zeros( len(fuels) )
for i, s in enumerate(fuels):
# get species index
index = flame.gas.species_index( s )
# calculate fuel consumption
fuel_rate[i] = - ( np.trapz(flame.net_production_rates[index],
flame.grid)
*flame.gas.molecular_weights[index] )
# fuel mass fraction difference
fuel_mass[i] = flame.Y[index, 0] - flame.Y[index,-1]
fuel_rate_sum = np.sum( fuel_rate )
fuel_mass_sum = np.sum( fuel_mass )
sc = fuel_rate_sum / ( self.density * fuel_mass_sum )
return sc
def thermal_consumption_speed(self):
flame = self.flame
T_u = flame.T[0]
T_b = flame.T[-1]
dT = flame.heat_release_rate/flame.cp
sum = np.trapz(dT, flame.grid)
sc = sum / (T_b-T_u) / self.density
return sc
def mass_flux(self):
return self.consumption_speed()*self.flame.density[0]
def fuel_consumption_rate(self):
flame = self.flame
fuels = self.fuel_list()
fuel_rate = np.zeros((len(fuels), flame.T.size))
for i, s, in enumerate(fuels):
# get species index
index = flame.gas.species_index( s )
fuel_rate[i] = (-flame.net_production_rates[index]
*flame.gas.molecular_weights[index] )
fuel_consumption_rate = np.sum( fuel_rate, axis=0 )
return fuel_consumption_rate
def thermal_thickness(self):
T = self.flame.T
x = self.flame.grid
T_grad = np.gradient( T, x )
if T[-1] > T[0]:
return ( T[-1] - T[0] ) / T_grad.max()
else:
return ( T[-1] - T[0] ) / T_grad.min()
def diffusive_thickness(self):
kappa = self.flame.thermal_conductivity
cp = self.flame.cp
rho = self.flame.density
alpha = kappa / (rho*cp)
sc = self.consumption_speed()
if self.flame.T[0] < self.flame.T[-1] :
return alpha[0] / sc
else:
return alpha[-1] / sc
def T_peak(self):
return self.flame.T[self.__idx_fcr()]
def displacement_speed(self):
if self.T is not None:
return np.interp(self.T, self.flame.T, self.flame.velocity)
return self.flame.velocity[self.__idx_hrr()]
def density_weighted_displacement_speed(self, T=None):
if T is not None:
x = np.interp(T, self.flame.T, self.flame.grid)
elif self.T is not None:
x = np.interp(self.T, self.flame.T, self.flame.grid)
else:
x = self.flame.grid[self.__idx_hrr()]
rho = np.interp(x, self.flame.grid, self.flame.density)
sd = np.interp(x, self.flame.grid, self.flame.velocity)
return rho*sd/self.density
def strain_rate(self, T=None):
if float(ct.__version__[:3]) <=2.4:
at = 2. * self.flame.V
else:
at = 2. * self.flame.spread_rate
if T is not None:
return np.interp(T, self.flame.T, at)
elif self.T is not None:
return np.interp(self.T, self.flame.T, at)
else:
return at[self.__idx_hrr()]
def Ka(self):
at = self.strain_rate()
df = self.thermal_thickness()
sc = self.consumption_speed()
return at * df / sc
def Re(self):
df = self.thermal_thickness()
sc = self.consumption_speed()
rho_u = self.flame.density[0]
mu_u = self.flame.viscosity[0]
return sc * df * rho_u / mu_u
def Le_fuel(self, Le_spe):
flame = self.flame
fuels = self.fuel_list()
sum_X = 0.
sum_Le = 0.
for i, s, in enumerate(fuels):
# get species index
idx = flame.gas.species_index( s )
sum_X += flame.X[idx][0]
sum_Le += flame.X[idx][0] * Le_spe[idx]
Le_F = sum_Le / sum_X
return Le_F
def Le_oxidizer(self, Le_spe):
idx = self.flame.gas.species_index('O2')
return Le_spe[idx]
def Le_species_eff(self, type_Le):
Le_spe = self.Le_species()
switch = {'T':self.__idx_T,
'fcr':self.__idx_fcr,
'unburnt':self.__idx_unburnt}
idx = switch.get(type_Le)()
Le_spe_eff = Le_spe[:,idx]
return Le_spe_eff
def Le_species(self):
kappa = self.flame.thermal_conductivity
cp = self.flame.cp
rho = self.flame.density
alpha = kappa / (rho*cp)
D = self.flame.mix_diff_coeffs
Le_spe = np.empty(D.shape)
for i, D_spe in enumerate(D):
Le_spe[i] = alpha / D_spe
return Le_spe
def equivalence_ratio(self):
gas = self.flame.gas
gas.TPY = gas.T, gas.P, self.flame.Y[:,0]
return gas.get_equivalence_ratio()
def export_profile(self, file_name='premix.dat', unit='cgs'):
f = self.flame
# export cantera flame result in the form of premix output
# variable names (A20)
# X, U, RHO, Y, T (E20.10)
# unit system
# SI
convertor_length = 1.0
convertor_density = 1.0
# cgs
if unit == 'cgs':
convertor_length = 1.0E+02
convertor_density = 1.0E-03
# variale names
species_names = f.gas.species_names
variable_names = ['X', 'U', 'RHO'] + species_names +['T',]
str_names = ''.join(['{:>20}'.format(n) for n in variable_names])
# data for output
data = np.zeros((f.grid.size, len(variable_names)))
data[:,0] = f.grid * convertor_length
data[:,1] = f.u * convertor_length
data[:,2] = f.density * convertor_density
data[:,3:-1] = f.Y.transpose()
data[:,-1] = f.T
np.savetxt(file_name, data, fmt='%20.10E', delimiter='',
header=str_names, comments='')
return 0
def export_profile_YT(self, file_name='cema.inp'):
f = self.flame
data = np.zeros((f.grid.size, len(f.gas.species_names)+1))
data[:,:-1] = f.Y.transpose()
data[:,-1] = f.T
np.savetxt(file_name, data, fmt='%20.10E', delimiter='')
return 0
class FreeFlameState(PremixedFlameState):
def __init__(self, solution, chemistry, fuel, oxidizer={'O2':1., 'N2':3.76}):
self.chemistry = chemistry
gas = ct.Solution(chemistry, loglevel=0)
flame = ct.FreeFlame(gas, width=0.1)
flame.restore(solution, loglevel=0)
PremixedFlameState.__init__(self, flame, fuel, oxidizer)
def Ze(self, perturb=0.01, **kwargs):
chemistry = self.chemistry
fuel = self.fuel
oxidizer = self.oxidizer
T = self.flame.T[0]
p = self.flame.P / ct.one_atm
phi = self.equivalence_ratio()
return pc.Ze(chemistry, fuel, oxidizer, T, p, phi, perturb, **kwargs)
def Le_eff(self, type_idx='unburnt', type_mix='erf'):
def mix_linear(Le_F, Le_O, phi):
if phi < 0.8:
return Le_F
elif phi > 1.2:
return Le_O
else:
return (3.-2.5*phi)*Le_F+(2.5*phi-2.)*Le_O
def mix_erf(Le_F, Le_O, phi):
Ze = self.Ze()
phi_n = phi/(1.+phi)
x = Ze*(phi_n-0.5)*2.
f = erfc(x)
return Le_O + (Le_F-Le_O)*f/2.
def mix_Bechtold(Le_F, Le_O, phi):
Ze = self.Ze()
if phi < 1.:
phi_ = 1./phi
Le_E = Le_O
Le_D = Le_F
else:
phi_ = phi
Le_E = Le_F
Le_D = Le_O
A = 1. + Ze * ( phi_ - 1. )
return (Le_E+Le_D*A)/(1.+A)
def mix_Bechtold_cut(Le_F, Le_O, phi):
if phi < 0.8:
return Le_F
elif phi > 1.2:
return Le_O
else:
return mix_Bechtold(Le_F, Le_O, phi)
def mix_Dortz(Le_F, Le_O, phi):
if phi <= 0.6:
return Le_F
elif phi >= 1.2:
return Le_O
else:
Le_BM = mix_Bechtold(Le_F, Le_O, phi)
if phi <= 1.:
return 2.5*(1.-phi)*Le_F+(2.5*phi-1.5)*Le_BM
else:
return 2.5*(phi-1.)*Le_O+(3.5-2.5*phi)*Le_BM
phi = self.equivalence_ratio()
Le_spe_eff = self.Le_species_eff(type_idx)
Le_F = self.Le_fuel(Le_spe_eff)
Le_O = self.Le_oxidizer(Le_spe_eff)
switch = {'linear':mix_linear,
'erf':mix_erf,
'Bechtold':mix_Bechtold,
'Bechtold_cut':mix_Bechtold_cut,
'Dortz':mix_Dortz}
Le_eff = switch.get(type_mix)(Le_F, Le_O, phi)
return Le_eff
class CounterflowPremixedFlameState(PremixedFlameState):
def __init__(self, solution, chemistry, fuel, oxidizer={'O2':1., 'N2':3.76}, T=None):
self.chemistry = chemistry
gas = ct.Solution(chemistry, loglevel=0)
flame = ct.CounterflowPremixedFlame(gas, width=0.1)
flame.restore(solution, loglevel=0)
PremixedFlameState.__init__(self, flame, fuel, oxidizer, T)
class CounterflowTwinFlameState(PremixedFlameState):
def __init__(self, solution, chemistry, fuel, oxidizer={'O2':1., 'N2':3.76}, T=None):
self.chemistry = chemistry
gas = ct.Solution(chemistry, loglevel=0)
flame = ct.CounterflowTwinPremixedFlame(gas, width=0.1)
flame.restore(solution, loglevel=0)
PremixedFlameState.__init__(self, flame, fuel, oxidizer, T)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-01 10:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0005_auto_20160131_2038'),
]
operations = [
migrations.RenameField(
model_name='resource',
old_name='type',
new_name='post_type',
),
migrations.AlterField(
model_name='resource',
name='post_status',
field=models.CharField(choices=[('publish', 'Publish'), ('draft', 'Draft'), ('trash', 'Trash')], max_length=25),
),
]
|
import torchvision
import torchvision.transforms as T
import torch
import numpy as np
import cv2
import requests
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
class Model:
def __init__(self,confidence_thresh=0.6):
self.model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
self.model.eval()
self.transform = T.Compose([T.ToTensor()])
self.conf_thresh = confidence_thresh
def get_seg_output(self,image:np.array):
image = self.transform(image.copy())
print(image.shape)
with torch.no_grad():
pred = self.model([image])
outputs = [(pred[0]['masks'][i][0],pred[0]['labels'][i]) for i in range(len(pred[0]['boxes'])) if pred[0]['scores'][i]>self.conf_thresh and pred[0]['labels'][i]==1]
# outputs = [(pred[0]['masks'][i][0],pred[0]['labels'][i]) for i in range(len(pred[0]['boxes'])) if pred[0]['scores'][i]>self.conf_thresh]
return outputs
class Preprocessing:
def __init__(self,kernel,lower_bound=0.1,upper_bound=0.9,dilate_iter=10,erode_iter=10):
self.kernel = kernel
self.low_thresh = lower_bound
self.high_thresh = upper_bound
self.dilate_iter = dilate_iter
self.erode_iter = erode_iter
def get_target_mask(self,masks):
out = np.zeros(masks[0].shape)
for mask in masks:
out += mask
out = np.clip(out,0,1)
return out
def get_trimap(self,masks):
target_mask = self.get_target_mask(masks)
foreground = target_mask >= self.high_thresh
ambiguous = (target_mask < self.high_thresh)*(target_mask>=self.low_thresh)
print(self.erode_iter)
erode = cv2.erode(foreground.astype('uint8'),self.kernel,iterations=self.erode_iter)
dilate = cv2.dilate(ambiguous.astype('uint8'),self.kernel,iterations=self.dilate_iter)
h, w = target_mask.shape
bg_giver = np.clip((erode + dilate),0,1 )
trimap = np.zeros((h, w, 2))
trimap[erode == 1, 1] = 1
trimap[bg_giver == 0, 0] = 1
return trimap
|
from math import log
def dataRange(X):
"""
Accepts a list of lists (X) and returns the "column" ranges. e.g.
X = [[8,7,3],
[4,1,9],
[5,6,2]]
dataRange(X) # returns: [ [4,8], [1,7], [2,9] ]
"""
def col(j):
return map(lambda x: x[j], X)
k = len(X[0]) # number of columns in X
return map(lambda j: [ min(col(j)), max(col(j)) ], range(k))
def argmax(x):
"""
returns the index of the element is a list which corresponds to the maximum
"""
return x.index(max(x))
def log2(x):
"""
log base 2
"""
return log(x) / log(2)
|
# chat/consumers.py
from time import time
import json
from main.game.especialMoves import EnPassant
from main.game.game import selectPiece
from main.game.ConvertStringArray import arrayToStringallPieces, arrayTostring, stringToArray, arrayToHistory
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from main.game.verifyCheck import verificarMate
from .models import Room,GameHistory
class RoomConsumer(WebsocketConsumer):
def connect(self):
#por causa do all auth já estar como padrão ao executarmos o self.scope ele já nos retorna o usuário logado
self.time = time
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
self.Room,created = Room.objects.get_or_create(roomCode=self.room_group_name)
if created:
self.Room.user1= str(self.scope['user'])
self.historico,created = GameHistory.objects.get_or_create(RoomName=str(self.room_name),
user1=self.Room.user1,
user2=self.Room.user2,
timer1=self.Room.timer1,
timer2=self.Room.timer2,
history='')
self.Room.save()
else:
if self.Room.user1 == str(self.scope['user']):
pass
else:
self.Room.user2 = str(self.scope['user'])
self.Room.save()
# Join room group
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
if self.Room.user1 != '' and self.Room.user2 != '':
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'start_game',
'data':{
'user1':self.Room.user1,
'user2':self.Room.user2
}
}
)
self.Room.save()
self.accept()
def disconnect(self, close_code):
# Leave room group
async_to_sync(self.channel_layer.group_discard)(
self.room_group_name,
self.channel_name
)
# Receive message from room group
def chat_message(self, event):
message = event['data']['message']
# Send message to WebSocket
self.send(text_data=json.dumps({
'message': message,
'usuario':event['usuario']
}))
def start_game(self,data):
self.Room,created = Room.objects.get_or_create(roomCode=self.room_group_name)
usuario1 = data['data']['user1']
usuario2 = data['data']['user2']
if usuario1 == str(self.scope['user']):
self.send(text_data=json.dumps({
'user1': usuario1,
'user2': usuario2,
'userColor':'w',
'message':'game has been started you are white pieces',
'startGame':self.Room.pieces
}))
elif usuario2 == str(self.scope['user']):
self.send(text_data=json.dumps({
'user1': usuario1,
'user2': usuario2,
'userColor':'b',
'message':'game has been started you are black pieces',
'startGame':self.Room.pieces
}))
# functios inside function
def timerHandler(self,who):
# timer temporário
if self.Room.tempTimer == 0:
self.Room.tempTimer = int(self.time()%10000)
self.Room.save()
return
tempTimer = self.Room.tempTimer
if who == self.Room.user1:
newTempTimer = int(self.time()%10000)
self.Room.timer1 = self.Room.timer1 - (newTempTimer-tempTimer)
self.send(text_data=json.dumps({
'message':'o brancho mexeu e o tempo é: {}'.format(self.Room.timer1)
}))
elif who == self.Room.user2:
newTempTimer = int(self.time()%10000)
self.Room.timer2 = self.Room.timer2 - (newTempTimer-tempTimer)
self.send(text_data=json.dumps({
'message':'o brancho mexeu e o tempo é: {}'.format(self.Room.timer2)
}))
self.Room.save()
self.Room.tempTimer = int(self.time()%10000)
if self.Room.timer1 <= 0:
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'game_end',
'data':'w'
}
)
elif self.Room.timer2 <= 0:
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'game_end',
'data':'b'
}
)
def timer_end(self):
if self.Room.user1 == str(self.scope['user']):
self.timerHandler(self,self.Room.user1)
elif self.Room.user2 == str(self.scope['user']):
self.timerHandler(self,self.Room.user2)
def select_piece(self,data):
#recolhe a peça que foi selecionada
allPieces = stringToArray(self.Room.pieces)
piece = data['data']['piece']
color = piece[1]
if color =='w' and self.Room.user1 == str(self.scope['user']) and self.Room.whoMove == True:
#recolhe todas as peças no backend
#checa se a peça existe
for line in allPieces:
for pieceInBack in line:
if pieceInBack == piece:
#se a peça existir vou retornar o movimentos possíveis caso haja se não apenas retorno a peça
moves = selectPiece(allPieces,piece,self.Room)
if piece == moves.strip():
self.send(text_data=json.dumps({
'message':'nenhum movimento possível',
'piece':piece
}))
else:
self.send(text_data=json.dumps({
'message':'moves',
'moves':moves.strip()
}))
elif color=='b' and self.Room.user2 == str(self.scope['user']) and self.Room.whoMove == False:
for line in allPieces:
for pieceInBack in line:
if pieceInBack == piece:
#se a peça existir vou retornar o movimentos possíveis caso haja se não apenas retorno a peça
moves = selectPiece(allPieces,piece,self.Room)
if piece == moves.strip():
self.send(text_data=json.dumps({
'message':'nenhum movimento possível',
'piece':piece
}))
else:
self.send(text_data=json.dumps({
'message':'moves',
'moves':moves.strip()
}))
def actualizeWhoMove(self,data):
move = data['data']['data']['move']
move = move.split(' ')
color = move[0][1]
if color == 'w':
self.Room.whoMove = False
else:
self.Room.whoMove = True
#executa os movimentos para a peça selecionada
def move_piece(self,data):
self.timerHandler(data['usuario'])
EnPassant = False
#actualize move for all players
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'actualizeWhoMove',
'data':data
}
)
move = data['data']['move']
move = move.split(' ')
pieces = self.Room.pieces
piecesArray = stringToArray(pieces)
#atualiza o histórico para os players
if self.Room.history != '':
self.Room.history = self.Room.history + arrayToHistory(move) +','
else:
self.Room.history = arrayToHistory(move)+','
for line in piecesArray:
for piece in line:
if piece == move[0]:
#verifica se é um peão
if move[0][0] == 'p':
#verifica promoção
if move[1][2] == '7' and move[1][1] == 'w':
move[1] = 'q'+move[1][1]+move[1][2]+move[1][3]
elif move[1][2] == '0' and move[1][1] == 'b':
move[1] = 'q'+move[1][1]+move[1][2]+move[1][3]
#verifica se é um movimento EnPassant
if move[0][3] != move[1][3]:
if piecesArray[int(move[1][2])][int(move[1][3])] == '----':
#aplica o EnPassant
piecesArray[int(piece[2])][int(piece[3])] = '----'
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
if move[0][1] == 'w':
move.append(piecesArray[int(move[1][2])-1][int(move[1][3])])
piecesArray[int(move[1][2])-1][int(move[1][3])] = '----'
else:
move.append(piecesArray[int(move[1][2])+1][int(move[1][3])])
piecesArray[int(move[1][2])+1][int(move[1][3])] = '----'
EnPassant = True
self.send(text_data=json.dumps({
'message':'moved',
'enPassant':move
}))
elif move[0][0] == 'k':
movimento = int(move[1][3])
if movimento == int(move[0][3])+2 or movimento == int(move[0][3])-2:
if movimento == int(move[0][3])+2:
move.append(piecesArray[int(move[0][2])][movimento+1])
move.append('r'+move[0][1]+move[0][2]+str(movimento-1))
piecesArray[int(move[0][2])][int(move[0][3])] = '----'
piecesArray[int(move[0][2])][movimento-1] = move[3]
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
elif movimento == int(move[0][3])-2:
move.append(piecesArray[int(move[0][2])][movimento-2])
move.append('r'+move[0][1]+move[0][2]+str(movimento+1))
piecesArray[int(move[0][2])][int(move[0][3])] = '----'
piecesArray[int(move[0][2])][movimento+1] = move[3]
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
self.send(text_data=json.dumps({
'message':'moved',
'castles':move
}))
EnPassant = True
if EnPassant == False:
piecesArray[int(piece[2])][int(piece[3])] = '----'
piecesArray[int(move[1][2])][int(move[1][3])] = move[1]
move_piece = move
self.send(text_data=json.dumps({
'message':'moved',
'movePiece':move_piece
}))
self.Room.pieces = arrayToStringallPieces(piecesArray)
if move[0][1] == 'w':
mate = verificarMate(piecesArray,'b')
else:
mate = verificarMate(piecesArray,'w')
if mate:
self.send(text_data=json.dumps({
'gameEnd':'acabou',
'whoLost':mate
}))
if self.Room.user1 == str(self.scope['user']):
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':'game_end',
'data':mate
}
)
def game_end(self,data):
loser = data['data']
winner = ''
if loser == 'w':
winner = 'b'
elif loser == 'b':
winner = 'w'
else:
winner = 'd'
if self.Room.user1 == str(self.scope['user']):
self.historico.result = winner
self.historico.user1 = self.Room.user1
self.historico.user2 = self.Room.user2
self.historico.history = self.Room.history
self.historico.timer1 = self.Room.timer1
self.historico.timer2 - self.Room.timer2
self.historico.save()
# Receive message from WebSocket
def receive(self, text_data):
text_data_json = json.loads(text_data)
command = text_data_json['command']
usuario = str(self.scope['user'])
# Send message to room group
async_to_sync(self.channel_layer.group_send)(
self.room_group_name,
{
'type':command,
'data':text_data_json,
'usuario':usuario
}
)
|
import os
import ray
import numpy as np
from glob import glob
import cv2
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--videos-path", type=str, default="./HMDB51_videos/")
parser.add_argument("--frames-path", type=str, default="./HMDB51_frames/")
parser.add_argument("--flows-path", type=str, default="./HMDB51_flows/")
parser.add_argument("--quality", type=int, default=75)
parser.add_argument("--flow-mode", action="store_true")
parser.add_argument("--num-cpus", type=int, default=8)
parser.add_argument("--pyr-scale", type=float, default=0.5)
parser.add_argument("--levels", type=int, default=3)
parser.add_argument("--winsize", type=int, default=15)
parser.add_argument("--iterations", type=int, default=3)
parser.add_argument("--poly-n", type=int, default=5)
parser.add_argument("--poly-sigma", type=float, default=1.2)
parser.add_argument("--flags", type=int, default=0)
args = parser.parse_args()
# check directory(videos_path, frames_path, flows_path)
assert os.path.exists(args.videos_path) is True, "'{}' directory is not exist !!".format(args.videos_path)
# only flow
if args.flow_mode:
assert os.path.exists(args.flows_path) is False, "'{}' directory is already exist !!".format(args.flows_path)
else:
assert os.path.exists(args.frames_path) is False, "'{}' directory is already exist !!".format(args.frames_path)
# get videos root path
videos_root_path_list = glob(os.path.join(args.videos_path, "*"))
# init ray on local
ray.init(num_cpus=args.num_cpus)
@ray.remote
def extractor(index, video_root_path):
videos_path_list = glob(os.path.join(video_root_path, "*"))
for sub_index, video_path in enumerate(videos_path_list):
frame_name = "/".join(video_path.split("\\" if os.name == 'nt' else "/")[-2:]).split('.avi')[0]
# read video
cap = cv2.VideoCapture(video_path)
ret, frame_first = cap.read()
if ret == False:
print("'{}' video reading failure !! skip this video...".format(video_path))
return
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
print("main index: {}/{} sub index: {}/{} name: {} length: {}".format(index+1, len(videos_root_path_list),sub_index+1, len(videos_path_list), frame_name, length))
# only flow
if args.flow_mode:
# flow path
flow_path = os.path.join(args.flows_path, frame_name)
os.makedirs(flow_path)
# convert to gray
frame_prev_gray = cv2.cvtColor(frame_first, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame_first)
hsv[..., 1] = 255
else:
# frame path
frame_path = os.path.join(args.frames_path, frame_name)
os.makedirs(frame_path)
# save first frame
if not cv2.imwrite(os.path.join(frame_path, "0.jpg"), frame_first, [int(cv2.IMWRITE_JPEG_QUALITY), args.quality]):
raise Exception("could not write frame !!")
for i in range(1, length):
# read next frame
ret, frame_next = cap.read()
if ret == False:
msg = "index '{}' of '{}' video reading failure !! skip this frame...".format(i, video_path)
continue
# only flow
if args.flow_mode:
frame_next_gray = cv2.cvtColor(frame_next, cv2.COLOR_BGR2GRAY)
# Computes a dense optical flow using the Gunnar Farneback's algorithm
frame_flow = cv2.calcOpticalFlowFarneback(frame_prev_gray, frame_next_gray, None, args.pyr_scale, args.levels, args.winsize, args.iterations, args.poly_n, args.poly_sigma, args.flags)
# Calculates the magnitude and angle of 2D vectors
mag, ang = cv2.cartToPolar(frame_flow[..., 0], frame_flow[..., 1])
hsv[..., 0] = ang*180/np.pi/2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
cv2.imwrite(os.path.join(flow_path, "{}.jpg".format(i - 1)), cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), [int(cv2.IMWRITE_JPEG_QUALITY), args.quality])
frame_prev_gray = frame_next_gray
else:
# save next frame
if not cv2.imwrite(os.path.join(frame_path, "{}.jpg".format(i)), frame_next, [int(cv2.IMWRITE_JPEG_QUALITY), args.quality]):
raise Exception("could not write frame !!")
cap.release()
ray.get([extractor.remote(i, video_root_path) for i, video_root_path in enumerate(videos_root_path_list)])
|
# coding=utf-8
# import modular
import bisect
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path
import pickle
import sys
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
# define variables
CACHENAME_TD_MATRIX = "cache-td-matrix.pickle"
DIRNAME_NEWS = "news\\"
DIRNAME_WORDS = "words\\"
FILENAME_PLOT = "latent_space.png"
NON_BMP_MAP = dict.fromkeys(range(0x10000, sys.maxunicode + 1), 0xfffd)
MATRIX_SCALAR = 0.5
THRESHOLD_KEY_WORDS = 9
THRESHOLD_LSA_SIGMA = 0.9
PLOT_FONT_SIZE = 10
PLOT_X_SIZE = 7
PLOT_Y_SIZE = 7
# define functions
def read_words_docs_matrix(counts_file_list, threshold_tf=1):
text_terms = None
text_docs = None
td_matrix = None
if os.path.isfile(CACHENAME_TD_MATRIX):
print("[Msg][TD-Matrix] Read the TD-Mmatrix")
with open(CACHENAME_TD_MATRIX, 'rb') as f_in:
text_terms, text_docs, tfidf = pickle.load(f_in)
else:
# TERMS and DOCS tags
print("[Msg][TD-Matrix] Create the TD-Mmatrix")
text_docs = ["d{0}".format(i+1) for i in range(len(counts_file_list))]
text_terms = []
# read json files of word segmentation results
docs_list = []
terms_set = set()
for i, filename_input in enumerate(counts_file_list):
with open(DIRNAME_WORDS + filename_input, "r", encoding="utf-8", errors="ignore") as f_in:
# read tf
words = json.load(f_in)
words = words["term-frequency"]
# choose tf > the threshold
if threshold_tf > 1:
words = {t: tf for t, tf in words.items() if tf >= threshold_tf}
# append filtered tf to a list
if words:
docs_list.append(words)
terms_set |= set(words)
# --------------------------------------------------
# create term-index
terms_index = {}
if terms_set:
for i, t in enumerate(sorted(terms_set)):
# index of a word
terms_index[t] = i
# add its tag to a list
text_terms.append(t)
# --------------------------------------------------
num_terms = len(terms_index) # rows: terms
num_docs = len(docs_list) # cols: docs
print("terms:", num_terms)
print("docs: ", num_docs)
# create TF matrix
tf = np.zeros([num_terms, num_docs])
for j, doc in enumerate(docs_list):
for t, tf_ in doc.items():
i = terms_index[t]
# TF
# tf[i,j] = tf_
tf[i, j] = 1 + np.log(tf_)
# create IDF matrix
idf = np.zeros([num_terms, num_docs])
for i in range(num_terms):
t = tf[i, :]
b = t > 0
nt = len(t[b])
# idf
idf[i, b] = np.log(1 + (num_docs / nt))
# create TFIDF matrix
# tfidf = np.zeros([num_terms, num_docs])
#
# for i in range(num_terms):
# idf_ = idf[i,:]
# tf_ = tf[i,:]
# b = tf_ > 0
# tfidf[i, b] = tf_[b] * idf_[b]
#
tfidf = np.multiply(tf, idf)
return text_terms, text_docs, tfidf
def get_k_singulars(s, p=0.9):
s_cum = np.cumsum(s)
c_max = np.amax(s_cum) * p
k = bisect.bisect_left(s_cum, c_max)
return k
def matrix_linear_scaling(m, scalar=1.0):
# get dimensions of matrix
d1, d2 = m.shape
# normalization
for i in range(m.ndim):
col_i = m[:, i]
col_max = np.amax(col_i)
m[:, i] = np.divide(col_i, col_max)
# create diagonal matrix
s = np.zeros([d1, d1])
for i in range(d1):
for j in range(d1):
s[i, j] = np.linalg.norm(m[i] - m[j])
# linearly scaling the matrix
s = np.multiply(s, scalar)
m = np.dot(s, m)
return m
def lsa(txt_t, txt_d, m, k=2, p=0.9):
print("[Msg][LSA] Use SVD to decompose the TD-Matrix")
# SVD
u, s, vT = np.linalg.svd(m)
# --------------------------------------------------
# get top-k singular values of the matrix
k = get_k_singulars(s, p)
k = k if k > 2 else 2
print("k: ", k)
# print("k/2: ", round(k/2))
u_ = u[:, :k] # u
s_ = np.diag(s) # s
s_ = s_[:k, :k]
vT_ = vT[:k, :] # vT
# --------------------------------------------------
# transfer terms and docs to the latent semantic space
u_s_ = np.dot(u_, s_)
# s_vT_ = np.dot(s_,vT_).transpose()
# --------------------------------------------------
print("[Msg][LSA] Project the latent space to a 2d-plane")
# project the latent semantic space to a 2-d space
# u_s_embedded = MDS(n_components=2, max_iter=1000).fit_transform(u_s_)
u_s_embedded = TSNE(n_components=2, init='pca', random_state=0).fit_transform(u_s_)
# do liner scaling to the 2-d space
u_s_embedded = matrix_linear_scaling(u_s_embedded, scalar=MATRIX_SCALAR)
# --------------------------------------------------
print("[Msg][LSA] Draw a LSA 2d-plot")
# plot setting of showing chinese words
plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei']
plt.rcParams['axes.unicode_minus'] = False
# create figure
fig, axes = plt.subplots(figsize=(PLOT_X_SIZE, PLOT_Y_SIZE), facecolor='w')
x = [u[0] for u in u_s_embedded]
y = [u[1] for u in u_s_embedded]
axes.scatter(x, y, marker="o", facecolors='none', edgecolors='b', s=0, alpha=0.8)
for i, t in enumerate(txt_t):
axes.annotate(u"{0}".format(t), (x[i], y[i]), color="b", fontsize=PLOT_FONT_SIZE)
# --------------------------------------------------
# plt.legend(["terms", "docs"], loc=0)
plt.tick_params(axis='both', which='major', labelsize=6)
plt.tick_params(axis='both', which='minor', labelsize=6)
plt.tight_layout()
plt.savefig(FILENAME_PLOT, dpi=300, bbox_inches='tight', pad_inches=0.05)
plt.close(fig)
def latent_semantic_analysis():
print(">>> START Latent-Semantic-Analysis!!")
print()
print("[Msg] Get word-counts file list")
counts_file_list = os.listdir(DIRNAME_WORDS)
print("[Msg] Read/Create the Words-Docs matrix")
text_terms, text_docs, tfidf = read_words_docs_matrix(counts_file_list, threshold_tf=THRESHOLD_KEY_WORDS)
print("[Msg] Latent semantic analysis")
lsa(text_terms, text_docs, tfidf, p=THRESHOLD_LSA_SIGMA)
print()
print(">>> STOP Latent-Semantic-Analysis!!")
if __name__ == "__main__":
latent_semantic_analysis()
|
#This is a Nipype generator. Warning, here be dragons.
import sys
import nipype
import nipype.pipeline as pe
import nipype.interfaces.io as io
import nipype.interfaces.ants as ants
import nipype.interfaces.afni as afni
import nipype.interfaces.fsl as fsl
WorkingDirectory = "~/Porcupipelines/ThisStudy"
#Generic datagrabber module that wraps around glob in an
NodeHash_30bb950 = pe.Node(io.S3DataGrabber(outfields=['outfiles']), name = 'NodeName_30bb950')
NodeHash_30bb950.inputs.anon = True
NodeHash_30bb950.inputs.bucket = 'openneuro'
NodeHash_30bb950.inputs.bucket_path = 'ds000101/ds000101_R2.0.0/uncompressed/'
NodeHash_30bb950.inputs.local_directory = '/tmp'
NodeHash_30bb950.inputs.sort_filelist = True
NodeHash_30bb950.inputs.template = 'sub-01/anat/sub-01_T1w.nii.gz'
#Wraps command **N4BiasFieldCorrection**
NodeHash_1ea4b50 = pe.Node(interface = ants.N4BiasFieldCorrection(), name = 'NodeName_1ea4b50')
NodeHash_1ea4b50.inputs.copy_header = False
NodeHash_1ea4b50.inputs.dimension = 3
NodeHash_1ea4b50.inputs.num_threads = 4
NodeHash_1ea4b50.inputs.save_bias = True
#Wraps command **3dUnifize**
NodeHash_291d6d0 = pe.Node(interface = afni.Unifize(), name = 'NodeName_291d6d0')
NodeHash_291d6d0.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **3dSkullStrip**
NodeHash_1ddfa30 = pe.Node(interface = afni.SkullStrip(), name = 'NodeName_1ddfa30')
NodeHash_1ddfa30.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **3dcalc**
NodeHash_3bd6370 = pe.Node(interface = afni.Calc(), name = 'NodeName_3bd6370')
NodeHash_3bd6370.inputs.expr = 'a*step(b)'
NodeHash_3bd6370.inputs.outputtype = 'NIFTI_GZ'
#Wraps command **fslmaths**
NodeHash_49ddb10 = pe.Node(interface = fsl.Threshold(), name = 'NodeName_49ddb10')
NodeHash_49ddb10.inputs.args = '-bin'
NodeHash_49ddb10.inputs.thresh = 1.e-3
#Wraps command **3dUnifize**
NodeHash_229c200 = pe.Node(interface = afni.Unifize(), name = 'NodeName_229c200')
NodeHash_229c200.inputs.gm = True
NodeHash_229c200.inputs.outputtype = 'NIFTI_GZ'
#Generic datasink module to store structured outputs
NodeHash_3207070 = pe.Node(interface = io.DataSink(), name = 'NodeName_3207070')
NodeHash_3207070.inputs.base_directory = '/tmp'
#Create a workflow to connect all those nodes
analysisflow = nipype.Workflow('MyWorkflow')
analysisflow.connect(NodeHash_30bb950, 'outfiles', NodeHash_1ea4b50, 'input_image')
analysisflow.connect(NodeHash_1ea4b50, 'output_image', NodeHash_291d6d0, 'in_file')
analysisflow.connect(NodeHash_291d6d0, 'out_file', NodeHash_1ddfa30, 'in_file')
analysisflow.connect(NodeHash_1ea4b50, 'bias_image', NodeHash_3207070, 'bias_image')
analysisflow.connect(NodeHash_291d6d0, 'out_file', NodeHash_3bd6370, 'in_file_a')
analysisflow.connect(NodeHash_1ddfa30, 'out_file', NodeHash_3bd6370, 'in_file_b')
analysisflow.connect(NodeHash_3bd6370, 'out_file', NodeHash_49ddb10, 'in_file')
analysisflow.connect(NodeHash_3bd6370, 'out_file', NodeHash_229c200, 'in_file')
analysisflow.connect(NodeHash_49ddb10, 'out_file', NodeHash_3207070, 'out_mask')
analysisflow.connect(NodeHash_229c200, 'out_file', NodeHash_3207070, 'out_file')
#Run the workflow
plugin = 'MultiProc' #adjust your desired plugin here
plugin_args = {'n_procs': 1} #adjust to your number of cores
analysisflow.write_graph(graph2use='flat', format='png', simple_form=False)
analysisflow.run(plugin=plugin, plugin_args=plugin_args)
|
import setuptools, os, sys
import patch
with open("README.md", "r") as fh:
long_description = fh.read()
# Collect all files recursively from the data folder
data_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), "patch", "data"))
data_files = []
for (dirpath, dirnames, filenames) in os.walk(data_folder):
rel_folder = os.path.relpath(dirpath, "patch")
if len(filenames) > 0:
data_files.append(os.path.join(rel_folder, "*"))
setuptools.setup(
name="nrn-patch",
version=patch.__version__,
author="Robin De Schepper",
author_email="robingilbert.deschepper@unipv.it",
description="A Pythonic, object-oriented, monkey patch for NEURON",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/helveg/patch",
license="MIT",
packages=setuptools.find_packages(),
include_package_data=True,
package_data={"patch": data_files, "patch_extensions": [os.path.join("mod","*.mod")]},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
entry_points={"glia.package": ["patch_extensions = patch_extensions"]},
install_requires=["setuptools", "nrn-glia"],
extras_require={"dev": ["sphinx", "pre-commit", "black", "sphinxcontrib-contentui"]},
)
|
from typing import Any, Generator
def divisor_sum(number: int) -> int:
return sum(i for i in range(1, (number//2) + 1)
if number % i == 0)
def calculate_abundant_numbers(limit: int) -> Generator[int, Any, None]:
for x in range(1, limit):
if divisor_sum(x) > x:
yield x
def can_express_in_abundant_sum(numbers: set[int], number: int) -> bool:
for num1 in numbers:
if number - num1 in numbers:
return True
if num1 > number:
return False
return False
def main() -> None:
upper_limit = 28_123
abundant_numbers = set(calculate_abundant_numbers(upper_limit))
s = sum(i for i in range(1, upper_limit)
if not can_express_in_abundant_sum(abundant_numbers, i))
print(s)
if __name__ == "__main__":
main()
|
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Input, Activation, BatchNormalization
from tensorflow.keras import optimizers
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from tensorflow.keras import backend as K
import time
def r2_metric(y_true, y_pred):
SS_res = K.sum(K.square( y_true-y_pred ))
SS_tot = K.sum(K.square( y_true - K.mean(y_true) ))
return ( 1 - SS_res/(SS_tot + K.epsilon()) )
def create_model(input_shape=10,
output_shape=10,
layers = [50],
activation = 'relu',
optimizer = 'adam',
learning_rate = 0.001,
dropout = True,
decay = 0.9,
decay_steps = 10000,
batch_norm=True):
model = Sequential(name = str(time.time()))
model.add(Input(input_shape))
for l in layers:
model.add(Dense(l))
if batch_norm:
model.add(BatchNormalization())
model.add(Activation(activation=activation))
if dropout:
model.add(Dropout(0.5))
model.add(Dense(output_shape))
# Compile model
lr_schedule = ExponentialDecay(initial_learning_rate=learning_rate,
decay_steps=decay_steps,
decay_rate=decay)
opt = optimizers.get({
'class_name': optimizer,
'config': {'learning_rate' : lr_schedule}})
model.compile(loss='mse',
optimizer=opt,
metrics=[r2_metric])
return model
|
from stringfuzz.parser import parse
__all__ = [
'nop',
]
# public API
def nop(ast):
return ast
|
#!/usr/bin/evn python
# -*- coding: utf-8 -*-
# python version 2.7.6
import os,hashlib,datetime,logging
logging.basicConfig(filename = "md5.log",level = logging.INFO,format='[%(asctime)s %(levelname)s] %(message)s',datefmt='%Y%m%d %H:%M:%S')
#计算大文件的MD5
def getBigFileMD5(filePath):
m = hashlib.md5()
with open(filePath,"rb") as f:
while True:
data = f.read(8096)
if not data:
break
m.update(data)
return m.hexdigest()
#获取某个目录下文件列表
def getFileList(targetDir):
fileDict = {}
fileList = os.listdir(targetDir)
for i in fileList:
filepath = os.path.join(targetDir,i)
if os.path.isfile(filepath):
md5 = getBigFileMD5(filepath)
fileDict[i.split('.')[0]] = md5
with open('fileMd5.txt','a') as f:
for key,value in fileDict.items():
f.write(key+","+value+'\n')
f.flush()
if __name__ == '__main__':
startTime = datetime.datetime.now()
getFileList('/share/data/file/')
endTime = datetime.datetime.now()
t = (endTime-startTime).total_seconds()
logging.info('本次运行的时间---v5:'+str(t))
|
#!/usr/bin/python3
import time
print("Start: %s" % time.ctime())
time.sleep(18)
print("End : %s" % time.ctime())
|
"""Firebase firestore services"""
import firebase_admin
from firebase_admin import firestore
from firebase_admin import credentials
class FirebaseService:
"""class holding all needed firestore operations
"""
def __init__(self):
"""initialize firebase firestore client.
"""
firebase_admin.initialize_app(
credentials.Certificate("secret/serviceAccountKey.json"))
self._db = firestore.client()
def update_generated_status(self, request, status):
"""change status of 'isGenerated' is firestore.
Args:
request (ModelInput): request format from flutter.
status (bool): state whether question generated.
"""
if not isinstance(status, bool):
raise TypeError("'status' must be a bool value")
doc_ref = self._db.collection('users').document(request.uid)
doc_ref.update({'isGenerated': status})
def send_results_to_fs(self, request, questions, crct_ans, all_ans):
"""send generated question to appropiate fs doc.
Args:
request (ModelInput): request format from flutter.
questions (list[str]): list of generated questions.
crct_ans (list[str]): list of correct answers.
all_ans (list[str]): list of all answers squeezed together.
"""
if not isinstance(questions, list):
raise TypeError("'questions' must be list of strings")
if not isinstance(crct_ans, list):
raise TypeError("'crct_ans' must be list of strings")
if not isinstance(all_ans, list):
raise TypeError("'all_ans' must be list of strings")
doc_ref = self._db.collection('users').document(request.uid)
for idx, question in enumerate(questions):
q_dict = {
'question': question,
'crct_ans': crct_ans[idx],
'all_ans': all_ans[idx * 4: 4 + idx * 4]
}
doc_ref.collection(request.name).document(str(idx)).set(q_dict)
|
#
# PySNMP MIB module ACCORD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ACCORD-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:12:42 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, TimeTicks, Integer32, ModuleIdentity, Gauge32, enterprises, iso, Counter64, NotificationType, Bits, MibIdentifier, IpAddress, Counter32, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "TimeTicks", "Integer32", "ModuleIdentity", "Gauge32", "enterprises", "iso", "Counter64", "NotificationType", "Bits", "MibIdentifier", "IpAddress", "Counter32", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
accord = MibIdentifier((1, 3, 6, 1, 4, 1, 6333))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 6333, 1))
mgc_100 = MibIdentifier((1, 3, 6, 1, 4, 1, 6333, 1, 1)).setLabel("mgc-100")
mibBuilder.exportSymbols("ACCORD-MIB", accord=accord, mgc_100=mgc_100, products=products)
|
from persistry.plugins.generic import GenericProfile2
class PluginClass(object):
def __init__(self, hive_object):
self.hive = hive_object
self.result = []
def process_plugin(self):
current = self.hive.current_control_set
paths = ["Microsoft\\Windows NT\\CurrentVersion\\Winlogon\\Shell",
"Microsoft\\Windows NT\\CurrentVersion\\Winlogon\\Userinit",
"Microsoft\\Windows NT\\CurrentVersion\\Winlogon\\Taskman"]
gen = GenericProfile2(self.hive)
for path in paths:
gen.process_plugin(path)
self.result += gen.result
self.keys = gen.keys
|
#!/usr/bin/env python
import RPi.GPIO as GPIO
import subprocess
import time
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use Broadcom pin numbering
GPIO.setup(10, GPIO.OUT, initial=GPIO.LOW)
while (True):
time.sleep(1)
ps = subprocess.Popen(['iwgetid'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
try:
output = subprocess.check_output(('grep', 'ESSID'), stdin=ps.stdout)
#print(output)
GPIO.output(10, GPIO.HIGH)
except subprocess.CalledProcessError:
# grep did not match any lines
#print("No wireless networks connected")
GPIO.output(10, GPIO.LOW)
|
# Simple:
# a --> b
# --> c --> d
# --> d
graph1 = {
"a": ["b", "c", "d"],
"b": [],
"c": ["d"],
"d": []
}
# 2 components
graph2 = {
"a": ["b", "c", "d"],
"b": [],
"c": ["d"],
"d": [],
"e": ["g", "f", "q"],
"g": [],
"f": [],
"q": []
}
# cycle
graph3 = {
"a": ["b", "c", "d"],
"b": [],
"c": ["d", "e"],
"d": [],
"e": ["g", "f", "q"],
"g": ["c"],
"f": [],
"q": []
}
from collections import deque
GRAY, BLACK = 0, 1
def topological(graph):
order, enter, state = deque(), set(graph), {}
def dfs(node):
state[node] = GRAY
for k in graph.get(node, ()):
sk = state.get(k, None)
if sk == GRAY: raise ValueError("cycle")
if sk == BLACK: continue
enter.discard(k)
dfs(k)
order.appendleft(node)
state[node] = BLACK
while enter: dfs(enter.pop())
return order
# check how it works
print topological(graph1)
print topological(graph2)
try: topological(graph3)
except ValueError: print "Cycle!"
|
{% if cookiecutter.use_bfio -%}
from bfio.bfio import BioReader, BioWriter
import bioformats
import javabridge as jutil
{%- endif %}
import argparse, logging, subprocess, time, multiprocessing, sys
import numpy as np
from pathlib import Path
if __name__=="__main__":
# Initialize the logger
logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
''' Argument parsing '''
logger.info("Parsing arguments...")
parser = argparse.ArgumentParser(prog='main', description='{{ cookiecutter.project_short_description }}')
# Input arguments
{% for inp,val in cookiecutter._inputs|dictsort -%}
parser.add_argument('--{{ inp }}', dest='{{ inp }}', type=str,
help='{{ val.description }}', required={{ val.required }})
{% endfor -%}
# Output arguments
{%- for out,val in cookiecutter._outputs|dictsort %}
parser.add_argument('--{{ out }}', dest='{{ out }}', type=str,
help='{{ val.description }}', required=True)
{% endfor %}
# Parse the arguments
args = parser.parse_args()
{% for inp,val in cookiecutter._inputs|dictsort -%}
{% if val.type=="boolean" -%}
{{ inp }} = args.{{ inp }} == 'true'
logger.info('{{ inp }} = {}'.format({{ inp }}))
{% else -%}
{{ inp }} = args.{{ inp }}
{% if val.type=="collection" and cookiecutter.use_bfio -%}
if (Path.is_dir(Path(args.{{ inp }}).joinpath('images'))):
# switch to images folder if present
fpath = str(Path(args.{{ inp }}).joinpath('images').absolute())
{% endif -%}
logger.info('{{ inp }} = {}'.format({{ inp }}))
{% endif -%}
{% endfor %}
{%- for out,val in cookiecutter._outputs|dictsort -%}
{{ out }} = args.{{ out }}
logger.info('{{ out }} = {}'.format({{ out }}))
{%- endfor %}
# Surround with try/finally for proper error catching
try:
{% if cookiecutter.use_bfio -%}
# Start the javabridge with proper java logging
logger.info('Initializing the javabridge...')
log_config = Path(__file__).parent.joinpath("log4j.properties")
jutil.start_vm(args=["-Dlog4j.configuration=file:{}".format(str(log_config.absolute()))],class_path=bioformats.JARS)
{% endif -%}
{% for inp,val in cookiecutter._inputs|dictsort -%}
{% if val.type=="collection" -%}
# Get all file names in {{ inp }} image collection
{{ inp }}_files = [f.name for f in Path({{ inp }}).iterdir() if f.is_file() and "".join(f.suffixes)=='.ome.tif']
{% endif %}
{% endfor -%}
{% for inp,val in cookiecutter._inputs|dictsort -%}
{% for out,n in cookiecutter._outputs|dictsort -%}
{% if val.type=="collection" and cookiecutter.use_bfio -%}
# Loop through files in {{ inp }} image collection and process
for i,f in enumerate({{ inp }}_files):
# Load an image
br = BioReader(Path({{ inp }}).joinpath(f))
image = np.squeeze(br.read_image())
# initialize the output
out_image = np.zeros(image.shape,dtype=br._pix['type'])
""" Do some math and science - you should replace this """
logger.info('Processing image ({}/{}): {}'.format(i,len({{ inp }}_files),f))
out_image = awesome_math_and_science_function(image)
# Write the output
bw = BioWriter(Path({{ out }}).joinpath(f),metadata=br.read_metadata())
bw.write_image(np.reshape(out_image,(br.num_y(),br.num_x(),br.num_z(),1,1)))
{%- endif %}{% endfor %}{% endfor %}
finally:
{%- if cookiecutter.use_bfio %}
# Close the javabridge regardless of successful completion
logger.info('Closing the javabridge')
jutil.kill_vm()
{%- endif %}
# Exit the program
sys.exit()
|
import pickle as pkl
import os.path
import geopandas as gpd
import pandas as pd
import pandas_explode
pandas_explode.patch() # adds a `df.explode` method to all DataFrames
# above should be removed for Python 3.8 but as long as we're using
# dash we're on 3.7
import shapely
from shapely.geometry import Point, LineString
import sidewalkify
import networkx as nx
import osmnx as ox
import geometry
import util
import elevation
LOCAL_CRS = "EPSG:26919"
GLOBAL_CRS = "EPSG:4326"
OFFSET = 6
# start with a shapefile
# we start with:
print("load streets")
streets = gpd.read_file("data/brighton/brighton_streets.shp")
assert util.is_global(streets)
def add_sidewalks(gdf):
gdf['sw_left'] = OFFSET
gdf['sw_right'] = OFFSET
return gdf
# explodes rows with MultiLineStrings into multiple rows with LineStrings
def explode_geometry(gdf):
original_crs = gdf.crs
df_temp = pd.DataFrame(gdf)
df_temp['geometry'] = df_temp['geometry'].map(util.multis_to_line_list_safe)
df_temp = df_temp.explode('geometry')
gdf = gpd.GeoDataFrame(df_temp, geometry='geometry')
gdf.crs = original_crs
return gdf
streets = streets.to_crs(LOCAL_CRS)
streets = explode_geometry(streets)
# make sidewalks!
if not os.path.isfile("test/snapped.shp"):
print("draw sidewalks")
sidewalks = sidewalkify.draw.draw_sidewalks(sidewalkify.graph.graph_workflow(streets), crs=LOCAL_CRS)
sidewalks['geometry'] = sidewalks.geometry.map(util.ls_to_mls)
sidewalks = sidewalks.explode().reset_index(drop=True)
assert len(sidewalks[sidewalks.geometry.map(lambda x: len(x.coords) != 2)]) == 0
# sidewalks.geometry = sidewalks.geometry.map(geometry.round_edge)
print("snap sidewalks")
# all_sidewalks = shapely.ops.unary_union(pd.Series(sidewalks.geometry))
# sidewalks.geometry = sidewalks.geometry.apply(lambda x: geometry.snap_endpoints(x, all_sidewalks, 1))
def concat(lst_of_lsts):
return [l for lst in lst_of_lsts for l in lst]
all_points = list(map(shapely.geometry.Point,
concat(list((sidewalks['geometry'].map(lambda x: x.coords[:]).values)))))
all_points = shapely.ops.unary_union(all_points)
def snap_nearby_point(row_geo, geom):
print(row_geo)
line = row_geo
p0, p1 = line.coords[:]
p0 = shapely.geometry.Point(p0)
p1 = shapely.geometry.Point(p1)
p01 = shapely.ops.unary_union([p0, p1])
geom = geom.difference(p01)
p0_new = shapely.ops.snap(p0, geom, 1.5)
p1_new = shapely.ops.snap(p1, geom, 1.5)
geom = shapely.ops.unary_union([geom, p0_new, p1_new])
new_line = shapely.geometry.LineString([p0_new, p1_new])
return new_line
sidewalks.geometry = sidewalks.geometry.map(lambda x: snap_nearby_point(x, all_points))
sidewalks.crs = LOCAL_CRS
# maybe run this a second time if it's still no bueno
sidewalks.to_file("test/snapped.shp")
else:
print("loading snapped.shp")
sidewalks = gpd.read_file("test/snapped.shp")
sidewalks = sidewalks.to_crs(GLOBAL_CRS)
## add elevation
print("add elevation")
sidewalks = elevation.add_angle(sidewalks)
sidewalks.to_file("test/elevated.shp")
assert sidewalks.crs == GLOBAL_CRS
sidewalks = sidewalks.to_crs(LOCAL_CRS)
# sidewalks.geometry = sidewalks.geometry.map(geometry.round_edge)
print("build graph")
# put together points, index them, etc.
# TODO: good recc from pylint to change this to a set comprehension
# TODO: surely this can be improved anyway
sw_points = gpd.GeoDataFrame(list(
map(Point,
(list(set([point for ls in list(sidewalks.geometry.map(lambda x: list(x.coords)).values) for point in ls])))
)
))
sw_points.geometry = sw_points[0]
sw_points.crs = LOCAL_CRS
len_sw = len(list(sw_points.geometry.map(lambda x: x.coords)))
sw_coord_dict = dict(list(set(zip(list(sw_points.geometry.map(lambda x: tuple(x.coords)[0])), range(len_sw)))))
sidewalks['u'] = sidewalks.geometry.map(lambda x: sw_coord_dict[x.coords[0]])
sidewalks['v'] = sidewalks.geometry.map(lambda x: sw_coord_dict[x.coords[-1]])
sidewalks['key'] = 0
sw_points['id'] = sw_points.geometry.map(lambda x: sw_coord_dict[x.coords[0]])
sw_points['osmid'] = sw_points.id
sidewalks['osmid'] = sidewalks.index.map(lambda x: 100000 * x)
with open('test/sw_points_dict.pkl', 'wb') as pklfile:
pkl.dump(sw_points, pklfile)
assert sidewalks.crs == LOCAL_CRS
assert sw_points.crs == LOCAL_CRS
sidewalks = sidewalks.to_crs(GLOBAL_CRS)
sw_points = sw_points.to_crs(GLOBAL_CRS)
sw_points['x'] = sw_points.geometry.map(lambda x: x.coords[0][1])
sw_points['y'] = sw_points.geometry.map(lambda x: x.coords[0][0])
sidewalks_G = ox.graph_from_gdfs(sw_points, sidewalks)
def angle_reverse(G):
rev_edges = nx.reverse(G).edges(data=True)
def reverse_line(linestring):
p0, p1 = linestring.coords[:]
return LineString([Point(p1), Point(p0)])
def rev_angle(dic):
dic['angle_deg'] = -dic['angle_deg']
dic['geometry'] = reverse_line(dic['geometry'])
return dic
return [(u, v, rev_angle(dat)) for (u, v, dat) in rev_edges]
sidewalks_G.add_edges_from(angle_reverse(sidewalks_G))
print(len(sidewalks_G.edges))
## time to build crosswalks
print("build crosswalks")
# TODO: this is not pipeline-y!
intersections = gpd.read_file("data/brighton/brighton_points_clean.shp")
geometry.add_crosswalks(sidewalks_G, intersections)
print(len(sidewalks_G.edges))
with open("test/brighton_G.pkl", 'wb') as pklfile:
pkl.dump(sidewalks_G, pklfile)
sidewalks = ox.graph_to_gdfs(sidewalks_G, nodes=False, edges=True)
assert sidewalks.crs == GLOBAL_CRS
sidewalks.to_file("test/final.shp")
|
from __future__ import unicode_literals
import requests
from requests.compat import urljoin
CLIENT_ID = '1e51d85f1b6d4025b6a5aa47bc61bf1c'
CLIENT_SECRET = 'af02034c5808483f9c09a693feadd0d6'
DISK_BASE_URL = 'https://cloud-api.yandex.net/v1/disk/'
OAUTH_TOKEN_URL = 'https://oauth.yandex.com/token'
SHORTLINK_URL = 'https://clck.ru/Dsvit'
BROWSE_LIMIT = (1 << 31) - 1
BROWSE_DIR_FIELDS = ','.join(
'_embedded.items.' + field
for field in ['file', 'media_type', 'name', 'path', 'type']
)
LIST_FILES_FIELDS = ','.join(
'items.' + field
for field in ['file', 'name', 'path', 'type']
)
class YDiskException(Exception):
def __init__(self, message, error_code):
super(YDiskException, self).__init__(message)
self.error_code = error_code
def __str__(self):
return '[%s] %s' % (self.error_code, self.message)
@classmethod
def from_json(cls, json):
code = json['error']
description = json.get('description') or json.get('error_description')
return cls(description, code)
class YDiskSession(requests.Session):
def __init__(self, base_url, proxy, user_agent, token=None):
super(YDiskSession, self).__init__()
self.base_url = base_url
self.headers.update({'User-Agent': user_agent})
self.proxies.update({'http': proxy, 'https': proxy})
if token:
self.headers.update({'Authorization': 'OAuth ' + token})
def request(self, method, url, *args, **kwargs):
return super(YDiskSession, self).request(
method, urljoin(self.base_url, url), *args, **kwargs
)
class YDiskDirectory(object):
def __init__(self, name, path):
self.name = name
self.path = path
class YDiskFile(object):
def __init__(self, session, name, path, download_link):
self._session = session
self.download_link = download_link
self.name = name
self.path = path
class YDisk(object):
@staticmethod
def exchange_token(auth_code, proxy, user_agent):
request_data = {
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'code': auth_code,
'grant_type': 'authorization_code'
}
with YDiskSession(OAUTH_TOKEN_URL, proxy, user_agent) as session:
response = session.post('', data=request_data)
if response.ok:
return response.json()['access_token']
else:
raise YDiskException.from_json(response.json())
def __init__(self, token, proxy, user_agent):
self._session = YDiskSession(DISK_BASE_URL, proxy, user_agent, token)
request_params = {
'fields': 'user.login,user.display_name'
}
response = self._session.get('', params=request_params)
if response.ok:
user = response.json()['user']
self.id = user['login']
self.name = user.get('display_name') or self.id
else:
raise YDiskException.from_json(response.json())
def dispose(self):
self._session.close()
def browse_dir(self, path):
request_params = {
'fields': BROWSE_DIR_FIELDS,
'limit': BROWSE_LIMIT,
'path': path,
'sort': 'name'
}
response = self._session.get('resources', params=request_params)
if response.ok:
for item in response.json()['_embedded']['items']:
name = item['name']
path = YDisk._get_item_path(item)
if item['type'] == 'dir':
yield YDiskDirectory(name=name, path=path)
elif item['media_type'] == 'audio':
yield YDiskFile(
session=self._session,
name=name,
path=path,
download_link=item['file']
)
else:
raise YDiskException.from_json(response.json())
def get_file(self, path):
request_params = {
'fields': 'name,file',
'path': path
}
response = self._session.get('resources', params=request_params)
if response.ok:
file_info = response.json()
return YDiskFile(
session=self._session,
name=file_info['name'],
path=path,
download_link=file_info['file']
)
else:
raise YDiskException.from_json(response.json())
def list_files(self, media_type='audio'):
request_params = {
'fields': LIST_FILES_FIELDS,
'limit': BROWSE_LIMIT,
'media_type': media_type
}
response = self._session.get('resources/files', params=request_params)
if response.ok:
for item in response.json()['items']:
if item['type'] == 'file':
yield YDiskFile(
session=self._session,
name=item['name'],
path=YDisk._get_item_path(item),
download_link=item['file']
)
else:
raise YDiskException.from_json(response.json())
@staticmethod
def _get_item_path(item):
return item['path'].lstrip('disk:')
|
import kdb
import socket
"""
Usage:
> sudo kdb mount file.ini /python python script=/path/to/dns_plugin.py
> kdb meta-set user:/python/my_hostname check/dns ''
> kdb set user:/python/my_hostname www.libelektra.org
"""
META_DNS_NAME = "meta:/check/dns"
def get_ipv4_by_hostname(hostname) -> bool:
return bool([
i[4][0] # address
for i in socket.getaddrinfo(hostname, 0)
if i[0] is socket.AddressFamily.AF_INET and i[1] is socket.SocketKind.SOCK_RAW
])
def check_key(key: kdb.Key):
# we only check if Meta META_DNS_NAME is set
if m := key.getMeta(META_DNS_NAME):
if key.value != '':
try:
return get_ipv4_by_hostname(key.value)
except Exception as e:
return False
return True
class ElektraDNSPlugin(object):
def __init__(self):
pass
def open(self, config: kdb.KeySet, errorKey):
"""
returns:
# - nil or 0: no error
# - -1 : error during initialization
"""
return 0
def get(self, returned: kdb.KeySet, parentKey: kdb.Key):
"""
# - nil or 1 : on success
# - 0 : OK but nothing was to do
# - -1 : failure
"""
mod = "system:/elektra/modules/python"
if parentKey.name == mod:
returned.append(kdb.Key(mod, kdb.KEY_VALUE, "contract below"))
returned.append(kdb.Key(mod + "/infos", kdb.KEY_VALUE, "contract below"))
returned.append(kdb.Key(mod + "/infos/license", kdb.KEY_VALUE, "BSD"))
returned.append(kdb.Key(mod + "/infos/provides", kdb.KEY_VALUE, "check"))
returned.append(kdb.Key(mod + "/infos/status", kdb.KEY_VALUE, "maintained"))
returned.append(kdb.Key(mod + "/infos/placements", kdb.KEY_VALUE, "postgetstorage presetstorage"))
returned.append(kdb.Key(mod + "/infos/description", kdb.KEY_VALUE, "checks if name is resolvable"))
return 1
warning_list = []
for k in returned:
if not check_key(k):
warning_list.append(k)
print(f"Couldn't resolve domain name for key: {k}")
if warning_list:
parentKey.setMeta("warnings", str(len(warning_list)))
c = 0
for warn_key in warning_list:
if c == 100:
c = 0
if c > 9:
index = "#_" + str(c)
else:
index = "#" + str(c)
parentKey.setMeta(f"warnings/{index}/number", "C03200")
parentKey.setMeta(f"warnings/{index}/description", "Validation Semantic")
parentKey.setMeta(f"warnings/{index}/reason", f"Failed to resolve domain name for key {warn_key}")
parentKey.setMeta(f"warnings/{index}/module", "python check/dns script")
parentKey.setMeta(f"warnings/{index}/file", "unknown")
parentKey.setMeta(f"warnings/{index}/line", "0")
parentKey.setMeta(f"warnings/{index}/mountpoint", str(parentKey.name))
parentKey.setMeta(f"warnings/{index}/configfile", str(parentKey.value))
c += 1
return -1
return 1
def set(self, returned: kdb.KeySet, parentKey: kdb.Key):
"""
# - nil or 1 : on success
# 0 : on success with no changed keys in database
# -1 : failure
"""
for k in returned:
if not check_key(k):
parentKey.setMeta("error", f"number description reason module")
parentKey.setMeta("error/number", "C03200")
parentKey.setMeta("error/description", "Validation Semantic")
parentKey.setMeta("error/reason", f"Failed to resolve domain name for key {k}")
parentKey.setMeta("error/module", "python check/dns script")
parentKey.setMeta("error/file", "unknown")
parentKey.setMeta("error/line", "0")
parentKey.setMeta("error/mountpoint", str(parentKey.name))
parentKey.setMeta("error/configfile", str(parentKey.value))
print(f"Couldn't validate key {k}")
return -1
return 1
def error(self, returned: kdb.KeySet, parentKey: kdb.Key):
"""
# - nil or 1 : on success
# 0 : on success with no action
# -1 : failure
"""
return 1
def close(self, errorKey):
return 0
|
colors = ['yellow', 'blue', 'red']
for color in colors:
print color
|
#!/usr/bin/env python
'''
MIT License
Copyright (c) 2017 Tairan Liu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__author__ = "Tairan Liu"
__copyright__ = "Copyright 2017, Tairan Liu"
__credits__ = ["Tairan Liu", "Other Supporters"]
__license__ = "MIT"
__version__ = "0.4-dev"
__maintainer__ = "Tairan Liu"
__email__ = "liutairan2012@gmail.com"
__status__ = "Development"
class QuadStates():
def __init__(self, frameid, addr_long, addr_short):
self.frame_id = frameid
self.address_long = addr_long
self.address_short = addr_short
self.msp_api_version = {'msp_protocol_version':0,'api_version_major':0,'api_version_minor':0}
self.msp_board_info = {'board_identifier':'','hardware_revision':0}
self.msp_ident = {'version':0, 'multitype':0, 'msp_version':0, 'capability':0}
self.msp_misc = {'intPowerTrigger1':0, 'conf_minthrottle':0, 'maxthrottle':0, 'mincommand':0, 'failsafe_throttle':0, 'plog_arm_counter':0, 'plog_lifetime':0, 'conf_mag_declination':0, 'conf_vbatscale':0, 'conf_vbatlevel_warn1':0, 'conf_vbatlevel_warn2':0, 'conf_vbatlevel_crit':0}
self.sensor_flags = {'hardware':0, 'pitot':0, 'sonar':0, 'gps':0, 'mag':0, 'baro':0, 'acc':0}
self.msp_altitude = {'estalt':0, 'vario':0}
self.msp_sonar_altitude = {'sonar_altitude':0}
# GPS
self.msp_raw_gps = {'gps_fix':0, 'gps_numsat':0, 'gps_lat':0, 'gps_lon':0, 'gps_altitude':0, 'gps_speed':0, 'gps_ground_course':0, 'gps_hdop':0}
self.msp_comp_gps = {'range':0, 'direction':0, 'update':0}
self.msp_gps_svinfo = {'gps_hdop':0}
self.msp_gps_statistics = {'gps_last_message_dt':0, 'gps_errors':0, 'gps_timeouts':0, 'gps_packet_count':0, 'gps_hdop':0, 'gps_eph':0, 'gps_epv':0}
self.msp_attitude = {'angx':0, 'angy':0, 'heading':0}
self.msp_wp = {'wp_no':0, 'action':0, 'lat':0, 'lon':0, 'altitude':0, 'p1':0, 'p2':0, 'p3':0, 'flag':0}
self.msp_nav_status = {'nav_mode':0, 'nav_state':0, 'action':0, 'wp_number':0, 'nav_error':0, 'mag_hold_heading':0} # 'target_bearing'
self.msp_nav_config = {'flag1':0, 'flag2':0, 'wp_radius':0, 'safe_wp_distance':0, 'nav_max_altitude':0, 'nav_speed_max':0, 'nav_speed_min':0, 'crosstrack_gain':0, 'nav_bank_max':0, 'rth_altitude':0, 'land_speed':0, 'fence':0, 'max_wp_number':0}
self.msp_radio = {'rxerrors':0, 'fixed_errors':0, 'localrssi':0, 'remrssi':0, 'txbuf':0, 'noise':0, 'remnoise':0}
self.msp_rc_tuning = {'rc_rate':0, 'rc_expo':0, 'rollpitchrate':0, 'yawrate':0, 'dynthrpid':0, 'throttle_mid':0, 'throttle_expo':0}
self.msp_analog = {'vbat':0, 'powermetersum':0, 'rssi':0, 'amps':0}
self.nav_poshold = {'nav_user_control_mode':0, 'nav_max_speed':0, 'nav_max_climb_rate':0, 'nav_manual_speed':0, 'nav_manual_climb_rate':0, 'nav_mc_bank_angle':0, 'nav_use_midthr_for_althold':0, 'nav_mc_hover_thr':0, 'reserved':[0,0,0,0,0,0,0,0]}
self.rcChannels = {'roll':0,'pitch':0,'yaw':0,'throttle':0,'aux1':0,'aux2':0,'aux3':0,'aux4':0,'elapsed':0,'timestamp':0}
self.rawIMU = {'ax':0,'ay':0,'az':0,'gx':0,'gy':0,'gz':0,'mx':0,'my':0,'mz':0,'elapsed':0,'timestamp':0}
self.motor = {'m1':0,'m2':0,'m3':0,'m4':0,'elapsed':0,'timestamp':0}
self.attitude = {'angx':0,'angy':0,'heading':0,'elapsed':0,'timestamp':0}
self.message = {'angx':0,'angy':0,'heading':0,'roll':0,'pitch':0,'yaw':0,'throttle':0,'elapsed':0,'timestamp':0}
self.msp_status = {'cycleTime':0,'i2cError':0,'activeSensors':0,'flightModeFlags':0,'profile':0}
self.msp_status_ex = {'cycletime':0, 'i2cError':0, 'activeSensors':0, 'flightModeFlags':0, 'profile':0, 'averageSystemLoadPercent':0, 'armingFlags':0}
self.msp_sensor_status = {'hardware_health':0, 'gyro':0, 'acce':0, 'comp':0, 'baro':0, 'gps':0, 'range':0, 'pitot':0}
self.msp_loop_time = {'looptime':0}
self.activeBoxes = []
self.flightModes = {'ARM':0,'ANGLE':0,'HORIZON':0,'FAILSAFE':0,'ALTHOLD':0,
'MAG':0,'HEADFREE':0,'HEADADJ':0,'NAVRTH':0,'POSHOLD':0,
'PASSTHRU':0,'HOMERESET':0,'NAVWP':0,'AIRMODE':0,'GCSNAV':0,
'HEADINGLOCK':0,'SURFACE':0,'TURNASSIST':0,'NAVLAUNCH':0}
self.armStatus = {'OK_TO_ARM':0, 'PREVENT_ARMING':0, 'ARMED':0,
'WAS_EVER_ARMED':0, 'BLOCK_UAV_NOT_LEVEL':0,
'BLOCK_SENSORS_CALIB':0, 'BLOCK_SYSTEM_OVERLOAD':0,
'BLOCK_NAV_SAFETY':0, 'BLOCK_COMPASS_NOT_CALIB':0,
'BLOCK_ACC_NOT_CALIB':0, 'UNUSED':0, 'BLOCK_HARDWARE_FAILURE':0}
self.missionList = []
self.tempMission = []
self.downloadMissionList = []
|
# ======================================================================
# Syntax Scoring
# Advent of Code 2021 Day 10 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# t e s t _ l i n e . p y
# ======================================================================
"Test Line for Advent of Code 2021 day 10, Syntax Scoring"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import unittest
import line
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
EXAMPLES = [
{'text': "[]", 'corrupted': False, 'incomplete': False, 'score': 0},
{'text': "{()()()}", 'corrupted': False, 'incomplete': False, 'score': 0},
{'text': "[<>({}){}[([])<>]]", 'corrupted': False, 'incomplete': False, 'score': 0},
{'text': "(]", 'corrupted': True, 'incomplete': False, 'score': 57},
{'text': "{()()()>", 'corrupted': True, 'incomplete': False, 'score': 25137},
{'text': "[({(<(())[]>[[{[]{<()<>>", 'corrupted': False, 'incomplete': True, 'score': 288957},
{'text': "[(()[<>])]({[<{<<[]>>(", 'corrupted': False, 'incomplete': True, 'score': 5566},
{'text': "(((({<>}<{<{<>}{[]{[]{}", 'corrupted': False, 'incomplete': True, 'score': 1480781},
{'text': "{<[[]]>}<{[{[{[]{()[[[]", 'corrupted': False, 'incomplete': True, 'score': 995444},
{'text': "<{([{{}}[<[[[<>{}]]]>[]]", 'corrupted': False, 'incomplete': True, 'score': 294},
]
# ======================================================================
# TestLine
# ======================================================================
class TestLine(unittest.TestCase): # pylint: disable=R0904
"Test Line object"
def test_empty_init(self):
"Test the default Line creation"
# 1. Create default Line object
myobj = line.Line()
# 2. Make sure it has the default values
self.assertEqual(myobj.part2, False)
self.assertEqual(myobj.text, None)
self.assertEqual(myobj.corrupted, False)
self.assertEqual(myobj.incomplete, False)
self.assertEqual(myobj.illegal, None)
self.assertEqual(myobj.score, 0)
def test_text_init(self):
"Test the Line object creation from text"
# 1. Loop for all of the examples
for example in EXAMPLES:
# 2. Create the line
myobj = line.Line(text=example['text'])
# 2. Make sure it has the expected values
self.assertEqual(myobj.part2, False)
self.assertEqual(myobj.text, example['text'])
self.assertEqual(myobj.corrupted, example['corrupted'])
self.assertEqual(myobj.incomplete, example['incomplete'])
self.assertEqual(myobj.score, example['score'])
if example['corrupted']:
self.assertTrue(myobj.illegal in line.ENDERS)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end t e s t _ l i n e . p y end
# ======================================================================
|
from . import data_structures
|
import sys
import dataclasses
import string
from typing import Optional, List
source = ''
source_index = 0
tokens = []
token_index = 0
def get_char() -> Optional[str]:
global source, source_index
if source_index == len(source):
# byte でなくstrをとりあえず返すので
# 0として判断できるようなNoneを返す
return None
char = source[source_index]
source_index += 1
return char
def unget_char() -> None:
global source_index
source_index -= 1
@dataclasses.dataclass
class Token:
kind: str # "intliteral", "punct"
value: str
def read_number(char: str) -> str:
number = [char]
while True:
char = get_char()
if char is None:
break
if '0' <= char and char <= '9':
number.append(char)
else:
unget_char()
break
return ''.join(number)
def tokenize() -> List[Token]:
tokens = []
print('# Tokens : ', end='')
while True:
char = get_char()
if char is None:
break
if char in string.digits:
intliteral = read_number(char)
token = Token('intliteral', intliteral)
tokens.append(token)
print(f" '{token.value}'", end='')
elif char in [' ', '\t', '\n']:
continue
elif char in [';', '+', '-', '*', '/']:
token = Token('punct', char)
tokens.append(token)
print(f" '{token.value}'", end='')
else:
raise Exception(f"tokenizer: Invalid char: '{char}'")
# golang panic: output exit status 2 to stderr
# but, $? is 1
print()
return tokens
def get_token() -> Token:
global tokens, token_index
if token_index == len(tokens):
return None
token = tokens[token_index]
token_index += 1
return token
@dataclasses.dataclass
class Expr:
"""golang のstruct に合わせてdefault 設定
"""
kind: str # "intliteral", "unary"
intval: int = 0 # for intliteral
operator: str = '' # "-", "+", ...
# https://www.python.org/dev/peps/pep-0484/#forward-references
operand: Optional['Expr'] = None # for unary expr
left: Optional['Expr'] = None # for binary expr
right: Optional['Expr'] = None # for binary expr
def parse_unary_expr() -> Optional[Expr]:
token = get_token()
if token.kind == 'intliteral':
intval = int(token.value)
return Expr('intliteral', intval=intval)
elif token.kind == 'punct':
return Expr('unary',
operator=token.value,
operand=parse_unary_expr())
else:
raise Exception('Unexpected token')
def parse() -> Expr:
expr = parse_unary_expr()
while True:
token = get_token()
if token is None or token.value == ';':
return expr
if token.value in ['+', '-', '*', '/']:
return Expr('binary',
operator=token.value,
left=expr,
right=parse_unary_expr())
else:
raise Exception(f'unexpected token: {token.value}')
def generate_expr(expr: Expr) -> None:
if expr.kind == 'intliteral':
print(f' movq ${expr.intval}, %rax')
elif expr.kind == 'unary':
if expr.operator == '-':
print(f' movq $-{expr.operand.intval}, %rax')
elif '+':
print(f' movq ${expr.operand.intval}, %rax')
else:
raise Exception(f'generator: Unknown unary operator: {expr.operator}')
elif expr.kind == 'binary':
print(f' movq ${expr.left.intval}, %rax')
print(f' movq ${expr.right.intval}, %rcx')
if expr.operator == '+':
print(' addq %rcx, %rax')
elif expr.operator == '-':
print(' subq %rcx, %rax')
elif expr.operator == '*':
print(' imulq %rcx, %rax')
elif expr.operator == '/':
print(' movq $0, %rdx')
print(' idiv %rcx')
else:
raise Exception(f'generator: Unknown binary operator: {expr.operator}')
else:
raise Exception(f'generator: Unknown expr.kind: {expr.kind}')
def generate_code(expr: Expr) -> None:
print(' .global main')
print('main:')
generate_expr(expr)
print(' ret')
def main() -> None:
global source, tokens
source = sys.stdin.read()
tokens = tokenize()
expr = parse()
generate_code(expr)
if __name__ == '__main__':
main()
|
import glob
import sys
import os
import posixpath
import time
import numpy
from matplotlib.pyplot import subplots, colorbar
import pyFAI, pyFAI.units
from pyFAI.test.utilstest import UtilsTest
import fabio
from matplotlib.colors import LogNorm
import scipy.optimize
from pyFAI.opencl.peak_finder import OCL_PeakFinder
import gc
import shutil
from pyFAI.ext.bilinear import Bilinear
pyfai_color = "limegreen"
onda_color = "orange"
#Installation of a local copy of the Cython-bound peakfinder8
targeturl = "https://github.com/kif/peakfinder8"
targetdir = posixpath.split(targeturl)[-1]
if os.path.exists(targetdir):
shutil.rmtree(targetdir, ignore_errors=True)
pwd = os.getcwd()
try:
os.system("git clone " + targeturl)
os.chdir(targetdir)
os.system(sys.executable + " setup.py build")
except exception as err:
print(err)
finally:
os.chdir(pwd)
sys.path.append(pwd+"/"+glob.glob(f"{targetdir}/build/lib*")[0])
from ssc.peakfinder8_extension import peakfinder_8
img = UtilsTest.getimage("Pilatus6M.cbf")
geo = UtilsTest.getimage("Pilatus6M.poni")
method = ("no", "csr", "cython")
unit = pyFAI.units.to_unit("q_nm^-1")
dummy = -2
ddummy=1.5
npt = 500
repeat = 10
SNR=3
noise=1.0
nb = 2
him = 4
hiM = 999
max_num_peaks = 10000
polarization_factor = 0.90
ai = pyFAI.load(geo)
print(ai)
fimg = fabio.open(img)
msk = fimg.data<=0
fixed = fimg.data.copy()
fixed[msk] = 1
polarization = ai.polarization(factor=polarization_factor)
fig,ax = subplots( figsize=(12,8))
#fig.tight_layout(pad=3.0)
ln = LogNorm(1, fimg.data.max())
mimg = ax.imshow(fixed, norm=ln, interpolation="hanning", cmap="viridis")
int1d = ai.integrate1d(fimg.data, npt, unit=unit, method=method)
m = list(ai.engines.keys())[0]
integrator = ai.engines[m].engine
r2d = ai._cached_array[unit.name.split("_")[0] + "_center"]
r2dp = (r2d/ai.detector.pixel1).astype(numpy.float32)
data = fimg.data.astype(numpy.float32)
pmsk = (1-msk).astype(numpy.int8)
kwargs_pf = {"max_num_peaks":max_num_peaks,
"data":data,
"mask":pmsk,
"pix_r":r2dp,
"asic_nx":ai.detector.shape[1],
"asic_ny":ai.detector.shape[0],
"nasics_x":1,
"nasics_y":1,
"adc_thresh":noise,
"hitfinder_min_snr":SNR,
"hitfinder_min_pix_count":him,
"hitfinder_max_pix_count":hiM,
"hitfinder_local_bg_radius":nb}
res1 = peakfinder_8(**kwargs_pf)
kwargs_py = {"data":fimg.data,
"dummy": dummy, "delta_dummy":ddummy,
"error_model": "azimuthal",
"cutoff_clip":0,
"cycle":3,
"noise":noise,
"cutoff_pick":SNR,
"patch_size":2*nb+1,
"connected":him,
"polarization": polarization
}
print(f"Len of Cheetah result: {len(res1[0])}")
gc.disable()
t0 = time.perf_counter()
for i in range(repeat):
res1 = peakfinder_8(**kwargs_pf)
t1 = time.perf_counter()
gc.enable()
print(f"Execution_time for Cheetah: {1000*(t1-t0)/repeat:.3f}ms")
pf = OCL_PeakFinder(integrator.lut,
image_size=fimg.shape[0] * fimg.shape[1],
empty=0,
unit=unit,
bin_centers=integrator.bin_centers,
radius=ai._cached_array[unit.name.split("_")[0] + "_center"],
mask=msk.astype("int8"),
profile=True)
print(pf, pf.ctx.devices[0])
res = pf.peakfinder8(**kwargs_py)
print(f"Len of pyFAI result: {len(res)}")
gc.disable()
t0 = time.perf_counter()
for i in range(repeat):
res = pf.peakfinder8(**kwargs_py)
t1 = time.perf_counter()
gc.enable()
print("\n".join(pf.log_profile(1)))
print(f"Execution_time for pyFAI: {1000*(t1-t0)/repeat:.3f}ms")
ax.plot(res["pos1"], res["pos0"], "1", color=pyfai_color, label="pyFAI")
ax.plot(res1[0], res1[1], "2", color=onda_color, label="Onda")
ax.legend()
fig.savefig("peakfinder.eps")
fig.savefig("peakfinder.png")
fig.show()
print("# Histogram")
fig,ax = subplots( figsize=(12,8))
rmax = 44
interp = Bilinear(r2d)
r_ch = [interp(i) for i in zip(res1[1], res1[0])]
r_py = [interp(i) for i in zip(res["pos0"], res["pos1"])]
#ax.hist(r_py, rmax+1, range=(0, rmax), label="pyFAI", alpha=0.8)
#ax.hist(r_ch, rmax+1, range=(0, rmax), label="Cheetah", alpha=0.8)
hpy = numpy.histogram(r_py, rmax+1, range=(0, rmax))
hch = numpy.histogram(r_ch, rmax+1, range=(0, rmax))
ax.plot(0.5*(hpy[1][1:]+hpy[1][:-1]), hpy[0], "-", color=pyfai_color, label="pyFAI")
ax.plot(0.5*(hch[1][1:]+hch[1][:-1]), hch[0], "-", color=onda_color, label="Onda")
#ax.set_xlabel(int1d.unit.label)
ax.set_xlabel("Resolution $d$-spacing ($\\AA$)")
ax.set_ylabel("Number of Bragg peaks")
ax.set_title("Density of Bragg peaks per ring")
ax.legend()
#
q1 = ax.get_xticks()
from numpy import pi
#new_labels = [ f"{d:.4f}" for d in 20*pi/flabel]
d1 = 20*pi/q1
d2 = numpy.linspace(len(d1)+int(abs(d1).min()), int(abs(d1).min()), len(d1)+1)
q2 = 20*pi/d2
new_labels = [str(int(i)) for i in d2]
ax.set_xticks(q2)
ax.set_xticklabels(new_labels)
ax.set_xlim(0, rmax+1)
fig.show()
#fig.canvas.draw()
#################
fig.savefig("peak_per_ring.eps")
fig.savefig("peak_per_ring.png")
input("finish")
|
# Generated by Django 2.2.4 on 2019-08-11 14:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Intern',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comp_name', models.CharField(max_length=30)),
('job', models.CharField(max_length=30)),
('desc', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Signup1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=30)),
('gender', models.CharField(max_length=10)),
('phone_no', models.IntegerField()),
('status', models.CharField(default='Pending', max_length=30)),
('email', models.EmailField(max_length=254)),
('resume_file', models.FileField(upload_to='resume_folder')),
('internship', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='web_app.Intern')),
],
),
]
|
from scitools.all import *
x0 = 100 # initial amount of individuals
M = 500 # carrying capacity
rho = 4 # initial growth rate in percent
N = 200 # number of time intervals
index_set = range(N+1)
x = zeros(len(index_set))
# Compute solution
x[0] = x0
for n in index_set[1:]:
x[n] = x[n-1] + (rho/100.0)*x[n-1]*(1 - x[n-1]/float(M))
print (x)
plot(index_set, x, 'r', xlabel='time units',
ylabel='no of individuals', hardcopy='tmp.pdf')
|
import math
import os
import numpy as np
from brainex.experiments.harvest_setup import generate_exp_set_from_root, run_exp_set_GENEX, generate_ex_set_GENEX
def run_gx_test(dataset_path, output_dir, dist_types, ex_config, mp_args):
"""
The start and end parameter together make an interval that contains the datasets to be included in this experiment
:param mp_args: the configuration of the multiprocess backend,
go to this site https://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-spark-configure.html for
the correct Spark configuration with AWS; you only need to worry the configs that are exposed to you ->
that is: the number of workers, the max driver memory and the max result size
:param dataset_path: the path to the archive datasets
:param dataset_soi: (soi: size of interest) a iterable of two integers for binning the experiment
:param output_dir: the path to which the result csv's will be saved
:param exclude_list: the list of dataset names to be excluded from the experiment archive
:param dist_types: a list of strings, must contain at least one item. Items must be ones of the following: eu,ch,ma
:param ex_config: a dict contains hyper-parameters for the experiment. They are
'num_sample': int, number of samples to consider in each dataset, set this to math.inf for complete experiment
'query_split': float, a fraction of the dataset to be taken as queries, use 0.2 for the time being
'_lb_opt': bool, whether to turn of lower-bounding optimization for DTW, leave it False in not otherwise specified
'radius': int, the length radius for Genex Query, leave it being 1 if not otherwise specified
'use_spark': bool, whether to use the Spark backend, leave it being True if not otherwise specified
'loi_range': float, only consider sequences within a percentage length of the longest sequence, use 0.1 for the time being
'st': float, hyper-parameters that determines the cluster boundary in genex.build, leave it being True if not otherwise specified
'paa_seg': the n segment of PAA, use 3 as a heuristic approach
"""
valid_dt = ['eu', 'ch', 'ma']
try:
assert os.path.isdir(dataset_path)
assert os.path.isdir(output_dir)
assert 0 < len(dist_types) <= 3
assert np.all([x in valid_dt for x in dist_types])
except AssertionError:
raise Exception('Assertion failed in checking parameters')
exp_set_list = [generate_ex_set_GENEX(dataset_path, output_dir, dt) for dt in dist_types]
return [run_exp_set_GENEX(es, mp_args, **ex_config) for es in exp_set_list]
'''
Start of the experiment script
'''
if __name__ == "__main__":
# Start of Config Parameters #########################
'''
check the docstring of the above function - run_ucr_test for details regarding the parameters
'''
dataset = '/home/apocalyvec/data/Genex/datasets85'
output = '/home/apocalyvec/data/Genex/brainex'
dist_types_to_test = ['eu', 'ma', 'ch']
ex_config_test = {
'_lb_opt': False,
'radius': 1,
'use_spark': True,
'st': 0.1,
}
mp_args = {'num_worker': 32,
'driver_mem': 24,
'max_result_mem': 24}
run_gx_test(dataset, output, dist_types=dist_types_to_test, ex_config=ex_config_test, mp_args=mp_args)
|
# thumbnail.py
#
# GameGenerator is free to use, modify, and redistribute for any purpose
# that is both educational and non-commercial, as long as this paragraph
# remains unmodified and in its entirety in a prominent place in all
# significant portions of the final code. No warranty, express or
# implied, is made regarding the merchantability, fitness for a
# particular purpose, or any other aspect of the software contained in
# this module.
import pygame
import gg.utils
class Thumbnail(pygame.sprite.Sprite):
"""A size-reduced representation of a sprite.
The size of the resulting sprite is determined by the height
specified. The image is resized to that height, preserving the aspect
ratio and thus the relative width.
"""
def __init__(self, group, pos, new_height, image_file, image_dir=None):
"""Initialize the thumbnail."""
pygame.sprite.Sprite.__init__(self, group)
self.image, image_rect = gg.utils._load_image(image_file, image_dir,
'a thumbnail')
aspect_ratio = image_rect.width / image_rect.height
new_width = round(new_height * aspect_ratio)
new_size = (new_width, new_height)
# Scale the image
try:
self.image = pygame.transform.smoothscale(self.image, new_size)
except ValueError:
self.image = pygame.transform.scale(self.image, new_size)
self.rect = self.image.get_rect()
self.rect.topleft = pos
def update(self):
"""Nothing is required except completeness :)"""
pass
|
import os
import subprocess
from pathlib import Path
from typing import List
from testrunner.GradleFinder import GradleFinder
class TestSuiteRunner:
def __init__(self, couchEditRoot: str = None):
gradleFinder = GradleFinder(couchEditRoot)
self.__gradlePath = gradleFinder.getGradleExecutablePath()
def runTest(self, outputDir: Path, gradleTask: str, testNamePattern: str) -> Path:
params = self.buildCommand(outputDir, gradleTask, testNamePattern)
print("Running {0}".format(testNamePattern), flush=True)
process = subprocess.run(args=params, cwd=str(self.__gradlePath.parent), check = True)
if process.returncode != 0:
print("Errors occured while executing {0}:\nstdOut:{1}\nstdErr:{2}".format(
testNamePattern,
process.stdout,
process.stderr
))
return outputDir
def buildCommand(self, outputDir: Path, gradleTask: str, testNamePattern: str) -> List[str]:
gradleString = str(self.__gradlePath)
rerun = '--rerun-tasks'
output = '-DoutDir={0}'.format(str(outputDir))
testNamePatternParam = '--tests'
return [
gradleString,
gradleTask,
rerun,
output,
testNamePatternParam,
testNamePattern
]
|
__author__ = 'luissaguas'
from frappe.celery_app import celery_task
import frappe
pubsub = None
def publish(channel, message):
from frappe.async import get_redis_server
r = get_redis_server()
r.publish(channel, message)
def get_redis_pubsub():
from frappe.async import get_redis_server
global pubsub
if not pubsub:
r = get_redis_server()
pubsub = r.pubsub()
return pubsub
def subscribe(*args, **kwargs):
p = get_redis_pubsub()
p.subscribe(*args, **kwargs)
def run_async_task(site):
#from l10n_pt_hr_salary.tasks import get_message
#print "running async task"
x = X()
#task = x.get_message.delay(site)
x.delay(site)
#print "saguas task %s" % task.id
def get_messages():
p = get_redis_pubsub()
return p.get_message()
#print "prepare for messages for redis pubsub"
#run_async_task()
#subscribe("teste_channel")
#publish("teste_channel", "ola saguas")
#from celery.contrib.methods import task
from celery import Task
#from celery.contrib.methods import task_method
class X(Task):
#class X(object):
#@celery_task()
#@task()
#@current_app.task(filter=task_method)
def run(self, site):
self.get_message(site)
#@current_app.task(filter=task_method)
def get_message(self, site):
#from l10n_pt_hr_salary.utils.utils import get_redis_pubsub
#from l10n_pt_hr_salary import get_redis_pubsub
#import time
frappe.init(site=site)
p = get_redis_pubsub()
subscribe("teste_channel")
from frappe.async import emit_via_redis
#print "waiting from message from redis %s" % p
for message in p.listen():
print "saguas message %s" % message
response = {}
response.update({
"status": "Success",
"task_id": "yowsub",
"result": "ola from python"
})
emit_via_redis("teste_channel2", response, "task:yowsub")
#while True:
# message = p.get_message()
# if message:
# print "message %s" % message
# time.sleep(0.001) # be nice to the system :)
#run_async_task("site2.local")
|
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Abstract classes for solvers
Author: Jeff Mahler
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from dexnet.constants import DEF_MAX_ITER
from dexnet.learning import MaxIterTerminationCondition
import IPython
# class Solver(metaclass=ABCMeta):
class Solver:
__metaclass__ = ABCMeta
def __init__(self, objective):
self.objective_ = objective
@abstractmethod
def solve(self, termination_condition = MaxIterTerminationCondition(DEF_MAX_ITER),
snapshot_rate = 1):
'''
Solves for the maximal / minimal point
'''
pass
class TopKSolver(Solver):
def __init__(self, objective):
Solver.__init__(self, objective)
@abstractmethod
def top_K_solve(self, K, termination_condition = MaxIterTerminationCondition(DEF_MAX_ITER),
snapshot_rate = 1):
'''
Solves for the top K maximal / minimal points
'''
pass
# class SamplingSolver(TopKSolver, metaclass=ABCMeta):
class SamplingSolver(TopKSolver):
""" Optimization methods based on a sampling strategy"""
__metaclass__ = ABCMeta
# class DiscreteSamplingSolver(SamplingSolver, metaclass=ABCMeta):
class DiscreteSamplingSolver(SamplingSolver):
__metaclass__ = ABCMeta
def __init__(self, objective, candidates):
"""
Initialize a solver with a discrete set of candidate points
specified in a list object
"""
self.candidates_ = candidates # discrete candidates
self.num_candidates_ = len(candidates)
TopKSolver.__init__(self, objective)
@abstractmethod
def discrete_maximize(self, candidates, termination_condition, snapshot_rate):
"""
Main loop for sampling-based solvers
"""
pass
def partition(self, K):
"""
Partition the input space into K bins uniformly at random
"""
candidate_bins = []
indices = np.linspace(0, self.num_candidates_)
indices_shuff = np.random.shuffle(indices)
candidates_per_bin = np.floor(float(self.num_candidates_) / float(K))
# loop through bins, adding candidates at random
start_i = 0
end_i = min(start_i + candidates_per_bin, self.num_candidates_ - 1)
for k in range(K-1):
candidate_bins.push_back(self.candidates_[indices_shuff[start_i:end_i]])
start_i = start_i + candidates_per_bin
end_i = min(start_i + candidates_per_bin, self.num_candidates_ - 1)
candidate_bins.push_back(self.candidates_[indices_shuff[start_i:end_i]])
return candidate_bins
def solve(self, termination_condition = MaxIterTerminationCondition(DEF_MAX_ITER),
snapshot_rate = 1):
""" Call discrete maxmization function with all candidates """
return self.discrete_maximize(self.candidates_, termination_condition, snapshot_rate)
def top_K_solve(self, K, termination_condition = MaxIterTerminationCondition(DEF_MAX_ITER),
snapshot_rate = 1):
""" Solves for the top K maximal / minimal points """
# partition the input space
if K == 1:
candidate_bins = [self.candidates_]
else:
candidate_bins = self.partition(K)
# maximize over each bin
top_K_results = []
for k in range(K):
top_K_results.append(self.discrete_maximize(candidate_bins[k], termination_condition, snapshot_rate))
return top_K_results
class OptimizationSolver(Solver):
def __init__(self, objective, ineq_constraints = None, eq_constraints = None, eps_i = 1e-2, eps_e = 1e-2):
"""
Inequality constraints: g_i(x) <= 0
Equality constraints: h_i(x) <= 0
"""
self.ineq_constraints_ = ineq_constraints
self.eq_constraints_ = eq_constraints
self.eps_i_ = eps_i
self.eps_e_ = eps_e
Solver.__init__(self, objective)
def is_feasible(self, x):
""" Check feasibility of a given point """
try:
self.objective_.check_valid_input(x)
except ValueError as e:
return False
if self.ineq_constraints_ is not None:
for g in self.ineq_constraints_:
if np.sum(g(x) > eps_i * np.ones(g.num_outputs())) > 0:
return False
if self.eq_constraints_ is not None:
for h in self.eq_constraints_:
if np.sum(np.abs(h(x)) > eps_e * np.ones(h.num_outputs())) > 0:
return False
return True
|
import torchvision.models as models
from pytorch_lightning.metrics.functional import accuracy
from torchvision.io import read_image
from skimage import io
import numpy as np
import pandas as pd
import os
from PIL import Image
from sklearn.utils import shuffle
from glob import glob
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from torchvision import models
from torch.utils.data import Dataset, DataLoader
from collections import OrderedDict
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
from time import time
import matplotlib.pyplot as plt
from sklearn import metrics
import pickle as pkl
import cv2
import pandas as pd
from pathlib import Path
from tqdm import tqdm
class XrayDataset(Dataset):
def __init__(self, df, transform = None, parent_dir = Path("data/mimic"), preload = True): #
self.annotations = df
self.transform = transform
self.parent_dir = parent_dir
self.root = parent_dir
self.preload = preload
if preload:
X = []
y = []
print("loading Xray data")
for i, row in tqdm(df.iterrows(), total = len(df)):
img_id = row.img_id
img_path = str(self.parent_dir/"mimic_scaled"/img_id)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
y_label = row.class_id
X.append(image)
y.append(y_label)
self.X = X
self.y = np.array(y)
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
y_df = self.annotations.iloc[index].class_id
if self.preload:
image = self.X[index]
y_label = self.y[index]
if y_label != y_df:
print("mismatch")
raise ValueError
else:
img_id = self.annotations.iloc[index].img_id
img_path = str(self.parent_dir/"mimic_scaled"/img_id)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
if image is None:
print("image not found")
y_label = self.annotations.iloc[index].class_id
if self.transform:
image = self.transform(image)
return [image, y_label]
def resize_to(self, image, target_size = 240):
resize_ratio = target_size/min(image.shape[:2])
(h, w) = image.shape[:2]
h_new = int(h*resize_ratio)
w_new = int(w*resize_ratio)
img = cv2.resize(image, (w_new,h_new), interpolation=cv2.INTER_AREA)
return img
class XrayDatasetExtended(Dataset):
def __init__(self, df, transform = None, parent_dir = Path("data/mimic"), preload = True): #
self.annotations = df
self.transform = transform
self.parent_dir = parent_dir
self.root = parent_dir
self.preload = preload
if preload:
X = []
y = []
print("loading Xray data")
for i, row in tqdm(df.iterrows(), total = len(df)):
img_id = row.img_id
img_path = str(self.parent_dir/"mimic_scaled"/img_id)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
y_label = row.continual_class_id
X.append(image)
y.append(y_label)
self.X = X
self.y = np.array(y)
def __len__(self):
return len(self.annotations)
def __getitem__(self, index):
y_df = self.annotations.iloc[index].continual_class_id
if self.preload:
image = self.X[index]
y_label = self.y[index]
if y_label != y_df:
print("mismatch")
raise ValueError
else:
img_id = self.annotations.iloc[index].img_id
img_path = str(self.parent_dir/"mimic_scaled"/img_id)
image = cv2.imread(img_path, cv2.IMREAD_COLOR)
if image is None:
print("image not found")
y_label = self.annotations.iloc[index].continual_class_id
if self.transform:
image = self.transform(image)
return [image, y_label]
def resize_to(self, image, target_size = 240):
resize_ratio = target_size/min(image.shape[:2])
(h, w) = image.shape[:2]
h_new = int(h*resize_ratio)
w_new = int(w*resize_ratio)
img = cv2.resize(image, (w_new,h_new), interpolation=cv2.INTER_AREA)
return img
|
#########################################################################
# -*- coding:utf-8 -*-
# File Name: ma.py
# Author: wayne
# mail: @163.com
# Created Time: 2015/8/27 9:46:34
#########################################################################
#!/bin/python
f = lambda x:x*x
print f(3)
t = map(f, [1,2,3,4])
print t
|
#!/usr/bin/env python
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file
# Implements WeaveStateLoad class that sets up virtual network topology
# together with Weave fabric configuration.
#
import json
import os
import sys
from happy.ReturnMsg import ReturnMsg
from happy.Utils import *
import happy.HappyStateLoad
import WeaveFabricAdd
import WeaveNodeConfigure
import WeaveNetworkGateway
from Weave import Weave
from WeaveState import WeaveState
options = {}
options["quiet"] = False
options["json_file"] = None
def option():
return options.copy()
class WeaveStateLoad(WeaveState):
"""
weave-state-load loads weave virtual network topology from a file.
weave-state-load [-h --help] [-q --quiet] [-f --file <JSON_FILE>]
Example:
$ waeve-state-load <file>.json
Builds weave virtual network topology based on description specified in <file>.json.
return:
0 success
1 fail
"""
def __init__(self, opts=options):
WeaveState.__init__(self)
self.quiet = opts["quiet"]
self.new_json_file = opts["json_file"]
def __pre_check(self):
# Check if the name of the new node is given
if self.new_json_file is None:
emsg = "Missing name of file that specifies virtual network topology."
self.logger.error("[localhost] WeaveStateLoad: %s" % (emsg))
self.exit()
# Check if json file exists
if not os.path.exists(self.new_json_file):
emsg = "Cannot find the configuration file %s" % (self.new_json_file)
self.logger.error("[localhost] WeaveStateLoad: %s" % emsg)
self.exit()
self.new_json_file = os.path.realpath(self.new_json_file)
emsg = "Loading Weave Fabric from file %s." % (self.new_json_file)
self.logger.debug("[localhost] WeaveStateLoad: %s" % (emsg))
def __load_JSON(self):
emsg = "Import state file %s." % (self.new_json_file)
self.logger.debug("[localhost] WeaveStateLoad: %s" % (emsg))
try:
with open(self.new_json_file, 'r') as jfile:
json_data = jfile.read()
self.weave_topology = json.loads(json_data)
except Exception:
emsg = "Failed to load JSON state file: %s" % (self.new_json_file)
self.logger.error("[localhost] WeaveStateLoad: %s" % emsg)
self.exit()
def __load_network_topology(self):
emsg = "Loading network topology."
self.logger.debug("[localhost] WeaveStateLoad: %s" % (emsg))
options = happy.HappyStateLoad.option()
options["quiet"] = self.quiet
options["json_file"] = self.new_json_file
happyLoad = happy.HappyStateLoad.HappyStateLoad(options)
happyLoad.run()
self.readState()
def __create_fabric(self):
emsg = "Creating Weave Fabric."
self.logger.debug("[localhost] WeaveStateLoad: %s" % (emsg))
options = WeaveFabricAdd.option()
options["fabric_id"] = self.getFabricId(self.weave_topology)
options["quiet"] = self.quiet
if options["fabric_id"] is not None:
addFabric = WeaveFabricAdd.WeaveFabricAdd(options)
addFabric.run()
self.readState()
def __configure_weave_nodes(self):
emsg = "Configuring weave nodes"
self.logger.debug("[localhost] WeaveStateLoad: %s" % emsg)
weave_nodes = self.getWeaveNodeRecord(self.weave_topology)
for node in weave_nodes.keys():
options = WeaveNodeConfigure.option()
options['quiet'] = self.quiet
options['node_name'] = node
node_record = weave_nodes[node]
options['weave_node_id'] = node_record.get('weave_node_id', None)
options['weave_certificate'] = node_record.get('weave_node_certificate', "")
options['private_key'] = node_record.get('private_key', "")
options['pairing_code'] = node_record.get('pairing_code', None)
wnc = WeaveNodeConfigure.WeaveNodeConfigure(options)
wnc.run()
self.readState()
def __configure_network_gateway(self):
emsg = "Configuring Weave gateway."
self.logger.debug("[localhost] WeaveStateLoad: %s" % (emsg))
for network_id in self.getWeaveNetworkIds(self.weave_topology):
gateways = self.getWeaveNetworkGatewayIds(network_id, self.weave_topology)
for gateway in gateways:
options = WeaveNetworkGateway.option()
options["quiet"] = self.quiet
options["add"] = True
options["network_id"] = network_id
options["gateway"] = gateway
wsg = WeaveNetworkGateway.WeaveNetworkGateway(options)
wsg.run()
self.readState()
def __post_check(self):
emsg = "Loading Weave Fabric completed."
self.logger.debug("[localhost] WeaveStateLoad: %s" % (emsg))
def run(self):
with self.getStateLockManager():
self.__pre_check()
self.__load_JSON()
self.__load_network_topology()
self.__create_fabric()
self.__configure_weave_nodes()
self.__configure_network_gateway()
self.__post_check()
return ReturnMsg(0)
|
from setuptools import setup, find_packages
setup(
name="MinkLoc3D",
version="0.0.1",
packages=find_packages(),
)
|
import torch
import torch.nn as nn
from torch_geometric.nn.glob.glob import global_mean_pool
from utils.models import unsorted_segment_sum
class EGNNEncoder(nn.Module):
"""PyTorch version of EGNN, mostly lifted from original implementation.
Sources:
- EGNN paper: https://arxiv.org/abs/2102.09844
- https://github.com/vgsatorras/egnn/blob/main/models/egnn_clean/egnn_clean.py
"""
def __init__(self, in_node_nf, in_edge_nf, h_nf, out_nf, emb_nf, n_layers, act_fn = nn.ReLU(), device = 'cpu'):
super(EGNNEncoder, self).__init__()
# main layers (no init layer needed)
self.n_layers = n_layers
# TODO: emb init instead?
self.add_module("EGNN_0", EGNNUpdate(in_node_nf, in_edge_nf, h_nf, out_nf, act_fn))
for l in range(1, n_layers):
self.add_module(f"EGNN_{l}", EGNNUpdate(out_nf, in_edge_nf, out_nf, out_nf, act_fn))
# final emb processing + create graph emb
self.post = EGNNPost(out_nf, emb_nf)
self.to(device)
def forward(self, batch):
node_feats, batch_node_vec = batch['node_feats'], batch['batch_node_vec']
edge_index, edge_attr = batch['edge_index'], batch['edge_attr']
coords = batch['coords']
for l in range(self.n_layers):
node_feats, edge_attr, coords = self._modules[f"EGNN_{l}"](node_feats, edge_index, edge_attr, coords)
node_embs, graph_emb = self.post(node_feats, batch_node_vec)
return node_embs, graph_emb, coords
### Main classes used for EGNN processing
class EGNNUpdate(nn.Module):
"""Equivariant convolution layer to process nodes, edges, and coordinates.
Mostly identical to EGNN E_GCL layer: https://github.com/vgsatorras/egnn/blob/main/models/egnn_clean/egnn_clean.py
"""
def __init__(self, in_node_nf, in_edge_nf, h_nf, out_nf, act_fn = nn.ReLU()):
super(EGNNUpdate, self).__init__()
# feat and mlp non-input dims
out_edge_nf = in_edge_nf
coord_dim = 3
radial_dim = 1
h_coord_nf = radial_dim * 2 # arbitrary, just between num_edge_fs and 1
# mlp input dims
in_node_mlp_nf = in_node_nf + out_edge_nf + coord_dim # node_feats + agg + coords
in_edge_mlp_nf = (in_node_nf * 2) + in_edge_nf + radial_dim
in_coord_mlp_nf = in_edge_nf # number of edge features
# mlps: node, edge, coord_edge (no bias, final layer has xav uniform init [following orig]), coord
self.node_mlp = nn.Sequential(nn.Linear(in_node_mlp_nf, h_nf, True), act_fn, nn.Linear(h_nf, out_nf, True))
self.edge_mlp = nn.Sequential(nn.Linear(in_edge_mlp_nf, h_nf, True), act_fn, nn.Linear(h_nf, out_edge_nf, True))
layer = nn.Linear(h_coord_nf, radial_dim, False)
nn.init.xavier_uniform_(layer.weight, gain = 0.001)
self.coord_edge_mlp = nn.Sequential(nn.Linear(in_coord_mlp_nf, h_coord_nf), nn.ReLU(), layer)
self.coord_mlp = nn.Linear(coord_dim, coord_dim)
def forward(self, node_feats, edge_index, edge_attr, coords):
radial, bond_lengths = self.coord_to_radial(edge_index, coords)
edge_out = self.edge_update(node_feats, edge_index, edge_attr, radial)
coord_out = self.coord_update(edge_index, edge_out, coords, bond_lengths)
node_out = self.node_update(node_feats, edge_index, edge_out, coord_out)
return node_out, edge_out, coord_out
def coord_to_radial(self, edge_index, coords):
"""Calculate bond lengths and normalise using radial.
TODO: Alt coord_norm as class like SE(3).
"""
atom_is, atom_js = edge_index
bond_lengths = coords[atom_is] - coords[atom_js]
radial = torch.sum(bond_lengths**2, 1).unsqueeze(1)
norm = torch.sqrt(radial) + 1
normed_bond_lengths = bond_lengths / norm
return radial, normed_bond_lengths
def edge_update(self, node_feats, edge_index, edge_attr, radial):
"""Create node features for each bonded pair of atoms and run through MLP."""
atom_is, atom_js = edge_index
atom_is_fs, atom_js_fs = node_feats[atom_is], node_feats[atom_js]
edge_in = torch.cat([atom_is_fs, atom_js_fs, edge_attr, radial], dim = 1)
return self.edge_mlp(edge_in)
def coord_update(self, edge_index, edge_attr, coords, bond_lengths):
"""Update normed bond lengths using epsilon based on bond lengths and MLP(edge).
Added Coord_MLP at end to push closer to ground truth.
"""
atom_is, _ = edge_index
eps_c = self.coord_edge_mlp(edge_attr) # e_c: ~e-4/e-5
trans = bond_lengths * eps_c # trans: ~e-5/e-6
agg = unsorted_segment_sum(trans, atom_is, coords.size(0))
coords += agg
coord_out = self.coord_mlp(coords)
return coord_out
def node_update(self, node_feats, edge_index, edge_attr, coords):
"""Using coordinates as feature, doesn't in original."""
atom_is, _ = edge_index
agg = unsorted_segment_sum(edge_attr, atom_is, node_feats.size(0))
node_in = torch.cat([node_feats, agg, coords], dim=1)
return self.node_mlp(node_in)
class EGNNPost(nn.Module):
"""Final EGNN processing for node and graph embeddings."""
def __init__(self, out_nf, emb_nf):
super(EGNNPost, self).__init__()
self.node_emb_out = nn.Linear(out_nf, emb_nf)
def forward(self, node_feats, batch_node_vec):
node_embs = self.node_emb_out(node_feats)
graph_emb = global_mean_pool(node_embs, batch_node_vec)
return node_embs, graph_emb
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import json
from pykafka import KafkaClient
from scrapy.crawler import Crawler
from spiders import settings
from spiders.items.common import core
class SpidersPipeline(object):
def __init__(self):
kafka_hosts = settings.KAFKA_HOSTS
hosts = ",".join(kafka_hosts)
# 初始化client
self._client = KafkaClient(hosts=hosts)
kafka_topic = settings.KAFKA_TOPIC.encode(encoding="UTF-8")
if kafka_topic not in self._client.topics:
raise Exception('scrapy kafka topic not exists')
# 初始化Producer 需要把topic name变成字节的形式
self._producer = self._client.topics[kafka_topic].get_producer()
def process_item(self, item, spider: Crawler):
if type(item).__name__ == core.BaseData.__name__:
json_str = json.dumps(item, default=lambda obj: obj.__dict__, sort_keys=True, indent=4)
self._producer.produce(json_str.encode())
else:
item.save(force_insert=False, validate=False, clean=True, )
def close_spider(self, spider):
self._producer.stop()
"""
mongoengine 存储爬取的数据
主要在模型创建自定的类,且继承 mongoengine.Document
"""
class MongoDBPipeline(object):
# def __init__(self):
# self.ids_seen = set()
def open_spider(self, spider):
pass
def close_spider(self, spider):
pass
def process_item(self, item, spider):
item.save(force_insert=False, validate=False, clean=True, )
|
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from plot_2d_from_json import plot_community
from tqdm import tqdm
import json
import os
if __name__ == '__main__':
os.makedirs('image_data', exist_ok=True)
canvas_size = 1000
with open('ReCo_json.json', encoding='utf-8') as f:
data = json.load(f)
for community in tqdm(data):
_id = community['_id']
plot_community(community, canvas_size, printing_title=False,
building_color='black', boundary_color='red', hide_spines=True)
plt.savefig('image_data/'+str(_id)+'.jpg', dpi=150)
plt.clf()
|
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^', include('ratings.urls')),
)
|
# async
import asyncio
import random
async def marge(cookiejar, n):
for x in range(n):
print('marge producing {}/{}'.format(x+1, n))
await asyncio.sleep(random.random())
cookie = str(x+1)
await cookiejar.put(cookie)
async def homer(cookiejar):
while True:
cookie = await cookiejar.get()
print('homer consuming {}...'.format(cookie))
await asyncio.sleep(random.random())
cookiejar.task_done()
async def run(n):
cookiejar = asyncio.Queue()
consumer = asyncio.ensure_future(homer(cookiejar))
await marge(cookiejar, n)
await cookiejar.join()
loop = asyncio.get_event_loop()
loop.run_until_complete(run(10))
loop.close()
|
import numpy as np
from scipy import stats
'''
Distributions for prior weight precision (tausq_inv), defined as classes.
constant: Prior precision (tausq_inv) is treated as constant, i.e.
there is no attempt to change the initial hyperparameter values.
ard: Automatic relevance determination, i.e. the model tries
to learn the distribution of tausq_inv via variational Bayes
(assuming that tausq_inv has a gamma distribution).
ard_drv_atn: Automatic relevance determination (assuming that tausq_inv has a gamma
distribution) with the assumption that all of the regression weights
(w) associated with a feature share a common prior precision (tausq_inv).
This ends up being a form of derived attention model.
'''
class constant:
'''
Prior precision (tausq_inv) is treated as constant, i.e.
there is no attempt to change the initial hyperparameter values.
'''
def __init__(self, n_y, n_f, sim_pars):
self.tausq_inv_array = np.array(n_y*n_f*[sim_pars['tausq_inv']]).reshape((n_f, n_y))
def update(self, mean_wsq, y_psb):
pass # do nothing, because tausq_inv is assumed to be known and constant
def mean_tausq_inv(self):
return self.tausq_inv_array
def mean_tausq(self):
return 1/self.tausq_inv_array
constant.par_names = ['tausq_inv']
class ard:
'''
Automatic relevance determination, i.e. the model tries
to learn the distribution of tausq_inv via variational Bayes
(assuming that tausq_inv has a gamma distribution).
'''
def __init__(self, n_y, n_f, sim_pars):
self.n_y = n_y
self.prior_hpar0 = sim_pars['prior_tausq_inv_hpar0']
self.prior_hpar1 = sim_pars['prior_tausq_inv_hpar1']
self.hpar0 = np.array(n_f*n_y*[sim_pars['prior_tausq_inv_hpar0']], dtype='float').reshape((n_f, n_y))
self.hpar1 = sim_pars['prior_tausq_inv_hpar1']
def update(self, mean_wsq, y_psb):
# update hyperparameters
for j in range(self.n_y):
self.hpar0[:, j] = self.prior_hpar0 - 0.5*mean_wsq[:, j]
self.hpar1 = self.prior_hpar1 + 0.5
def mean_tausq_inv(self):
return (self.hpar1 + 1)/(-self.hpar0)
def mean_tausq(self):
return -self.hpar0/self.hpar1
ard.par_names = ['prior_tausq_inv_hpar0', 'prior_tausq_inv_hpar1']
class ard_drv_atn:
'''
Automatic relevance determination (assuming that tausq_inv has a gamma
distribution) with the assumption that all of the regression weights
(w) associated with a feature share a common prior precision (tausq_inv).
This ends up being a form of derived attention model.
'''
def __init__(self, n_y, n_f, sim_pars):
self.n_y = n_y
self.n_f = n_f
self.prior_hpar0 = sim_pars['prior_tausq_inv_hpar0']
self.prior_hpar1 = sim_pars['prior_tausq_inv_hpar1']
self.hpar0 = np.array(n_f*[sim_pars['prior_tausq_inv_hpar0']], dtype='float')
self.hpar1 = sim_pars['prior_tausq_inv_hpar1']
self.y_psb_so_far = np.zeros(n_y)
def update(self, mean_wsq, y_psb):
# keep track of which outcomes have been observed so far
for j in range(self.n_y):
if y_psb[j] == 1:
self.y_psb_so_far[j] = 1
# update hyperparameters
self.hpar0 = self.prior_hpar0 - 0.5*mean_wsq.sum(1)
self.hpar1 = self.prior_hpar1 + 0.5*self.y_psb_so_far.sum()
def mean_tausq_inv(self):
mean_tausq_inv = np.zeros((self.n_f, self.n_y))
for i in range(self.n_f):
mean_tausq_inv[i, :] = (self.hpar1 + 1)/(-self.hpar0[i])
return mean_tausq_inv
def mean_tausq(self):
mean_tausq = np.zeros((self.n_f, self.n_y))
for i in range(self.n_f):
mean_tausq[i, :] = -self.hpar0[i]/self.hpar1
return mean_tausq
ard_drv_atn.par_names = ['prior_tausq_inv_hpar0', 'prior_tausq_inv_hpar1']
|
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
import pytest
import os
import testinfra.utils.ansible_runner
import pprint
pp = pprint.PrettyPrinter()
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def base_directory():
"""
get molecule directories
"""
cwd = os.getcwd()
if('group_vars' in os.listdir(cwd)):
directory = "../.."
molecule_directory = "."
else:
directory = "."
molecule_directory = "molecule/{}".format(os.environ.get('MOLECULE_SCENARIO_NAME'))
return directory, molecule_directory
@pytest.fixture()
def get_vars(host):
"""
parse ansible variables
- defaults/main.yml
- vars/main.yml
- molecule/${MOLECULE_SCENARIO_NAME}/group_vars/all/vars.yml
"""
base_dir, molecule_dir = base_directory()
file_defaults = "file={}/defaults/main.yml name=role_defaults".format(base_dir)
file_vars = "file={}/vars/main.yml name=role_vars".format(base_dir)
file_molecule = "file={}/group_vars/all/vars.yml name=test_vars".format(molecule_dir)
defaults_vars = host.ansible("include_vars", file_defaults).get("ansible_facts").get("role_defaults")
vars_vars = host.ansible("include_vars", file_vars).get("ansible_facts").get("role_vars")
molecule_vars = host.ansible("include_vars", file_molecule).get("ansible_facts").get("test_vars")
ansible_vars = defaults_vars
ansible_vars.update(vars_vars)
ansible_vars.update(molecule_vars)
templar = Templar(loader=DataLoader(), variables=ansible_vars)
result = templar.template(ansible_vars, fail_on_undefined=False)
return result
def local_facts(host):
"""
return local fact
"""
return host.ansible("setup").get("ansible_facts").get("ansible_local").get("tomcat")
def test_fact_file(host):
"""
check created ansible facts
"""
f = host.file("/etc/ansible/facts.d/tomcat.fact")
assert f.exists
assert f.is_file
def test_tmp_directory(host, get_vars):
"""
test remote deployment directory
"""
dir = host.file(get_vars.get('deployment_tmp_directory'))
assert dir.exists
assert dir.is_directory
def test_files(host, get_vars):
"""
test jmx-remote.jar
"""
facts = local_facts(host)
major_version = int(facts.get("version").get("major"))
patch_version = int(facts.get("version").get("patch"))
f = host.file(
"{0}/catalina-jmx-remote.jar".format(
get_vars.get('deployment_tmp_directory')))
if(major_version <= 9 and patch_version <= 14):
assert f.exists
assert f.is_file
else:
assert not f.exists
def test_tomcat_version_link(host, get_vars):
"""
tomcat version
"""
d = host.file(
"{0}/{1}".format(
get_vars.get('tomcat_user').get('home_directory'),
get_vars.get('tomcat_version')))
assert d.exists
assert d.is_symlink
def test_tomcat_webapps(host, get_vars):
"""
"""
version = get_vars.get('tomcat_version')
install_path = get_vars.get('tomcat_user').get('home_directory')
webapps = get_vars.get('tomcat_remove_webapps')
for w in webapps:
directory = "{0}/{1}/webapps/{2}".format(install_path, version, w)
d = host.file(directory)
assert not d.exists
|
# -*- coding: utf-8 -*-
# Copyright (c) Polyconseil SAS. All rights reserved.
from __future__ import unicode_literals
import json
import os
import os.path
from dokang import api
from . import compat
def get_harvester(fqn):
module_fqn, function_fqn = fqn.rsplit('.', 1)
# Hack around https://bugs.python.org/issue21720
if compat.PY2 and not isinstance(module_fqn, bytes):
module_fqn = module_fqn.encode()
function_fqn = function_fqn.encode()
module = __import__(module_fqn, fromlist=[function_fqn])
return getattr(module, function_fqn)
def doc_set(settings, uploaded):
harvester = get_harvester(settings['dokang.uploaded_docs.harvester'])
upload_dir = settings.get('dokang.uploaded_docs.dir')
uploaded_path = os.path.join(upload_dir, uploaded)
title = None
info_file = os.path.join(uploaded_path, '.dokang')
if os.path.exists(info_file):
with open(info_file) as fp:
info = json.load(fp)
title = info.get('title') if isinstance(info, dict) else None
return {
'id': uploaded,
'title': title or uploaded,
'path': uploaded_path,
'harvester': harvester(),
}
def get_doc_sets(settings):
"""
Get doc sets using path of doc sets file defined in settings.
"""
index_path = settings['dokang.index_path']
if not os.path.exists(index_path):
try:
os.makedirs(os.path.dirname(index_path))
except OSError: # It's ok if the parent dir exists already
pass
api.initialize_index(index_path)
upload_dir = settings['dokang.uploaded_docs.dir']
if not os.path.exists(upload_dir):
os.makedirs(upload_dir)
return {
uploaded: doc_set(settings, uploaded)
for uploaded in (
x.decode('utf-8') if isinstance(x, bytes) else x
for x in os.listdir(upload_dir)
)
}
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# -*- coding: UTF-8 -*-
from sys_base import *
from controls.sys_works import *
from controls.manage_user import *
class manager_account(sys_base):
def __init__(self, aid):
self.aid = aid
def getManagerAccounts(self):
return user_list_all();
def getManagerAccount(self):
return user_detail_by_name(self.aid)
def getManagerAccountsbyRole(self, role):
return user_name_from_group_name(role);
def doWorks(self, parameters):
return user_manage_account_action(parameters)
def get_manageraccount(aid):
return manager_account(aid = aid)
def get_manageraccounts():
return manager_account(aid = 0)
|
import pickle
import os
import re
import shutil
import subprocess
from functools import lru_cache
from typing import List, Tuple
from .column_model import CorrelationClusteringColumn
from .emd_utils import intersection_emd, quantile_emd
from .quantile_histogram import QuantileHistogram
from ...data_sources.base_column import BaseColumn
from ...utils.utils import convert_data_type, create_folder, get_project_root
def compute_cutoff_threshold(matrix_c: list, threshold: float):
"""
Algorithm 1 of the paper "Automatic Discovery of Attributes in Relational Databases" from M. Zhang et al. [1]
This algorithm computes the threshold of a column that determines if any other column is to be considered
its neighbour.
Parameters
---------
matrix_c : list
A list containing dicts of EMD/ColumnName pairs
threshold : float
The conservative global EMD cutoff threshold described in [1]
Returns
-------
float
The cutoff threshold of the input column
"""
matrix_c.append({'e': threshold, 'c': 0})
matrix_c = sorted(matrix_c, key=lambda k: k['e'])
cutoff = 0.0
gap = 0.0
i = 0
while i < len(matrix_c) - 1 and matrix_c[i + 1]['e'] <= threshold:
if gap < (matrix_c[i + 1]['e'] - matrix_c[i]['e']):
gap = matrix_c[i + 1]['e'] - matrix_c[i]['e']
cutoff = matrix_c[i]['e']
i += 1
return cutoff
def column_combinations(columns: List[Tuple], quantiles: int, uuid: str,
intersection: bool = False):
"""
All the unique combinations between all the columns
Parameters
---------
columns : list
A list that contains all the column names
quantiles : int
The number of quantiles that the histograms are split on
intersection : bool, optional
If true do the intersection EMD else the normal EMD
uuid:
The unique identifier of the run
Returns
-------
tuple
A tuple with ((column_name1, column_name1), quantiles, intersection)
"""
c = len(columns)
c_i = 0
while c_i < c:
_, table_guid_i, _, column_guid_i = columns[c_i]
c_j = c_i + 1
while c_j < c:
_, table_guid_j, _, column_guid_j = columns[c_j]
if table_guid_i != table_guid_j:
yield (columns[c_i], columns[c_j]), quantiles, intersection, uuid
c_j = c_j + 1
c_i = c_i + 1
def process_emd(tup: tuple):
"""
Function defining a single quantile_emd process between two columns.
Parameters
---------
tup : tuple
A tuple with ((column_name1, column_name1), quantiles, intersection)
Returns
-------
tuple
a dictionary entry {k: joint key of the column combination, v: quantile_emd calculation}
"""
name_i, name_j, k, quantile, intersection, uuid = unwrap_process_input_tuple(tup)
tn_i, _, cn_i, _ = name_i
tn_j, _, cn_j, _ = name_j
c1 = read_from_cache(str((tn_i, cn_i)), uuid)
c2 = read_from_cache(str((tn_j, cn_j)), uuid)
if intersection:
return k, intersection_emd(c1, c2, quantile)
else:
return k, quantile_emd(c1, c2, quantile)
@lru_cache(maxsize=32)
def read_from_cache(file_name: str, uuid: str):
"""
Function that reads from a pickle file lru cache a column after pre-processing
Parameters
----------
file_name: str
The file name that contains the
uuid:
The unique identifier of the run
Returns
-------
CorrelationClusteringColumn
The preprocessed column
"""
return get_column_from_store(file_name, uuid)
def unwrap_process_input_tuple(tup: tuple):
"""
Helper function that unwraps a tuple to its components and creates a unique key for the column combination
Parameters
---------
tup : tuple
the tuple to unwrap
"""
names, quantile, intersection, uuid = tup
name_i, name_j = names
k = (name_i, name_j)
return name_i, name_j, k, quantile, intersection, uuid
def insert_to_dict(dc: dict, k: str, v: dict):
"""
Helper function that instantiates a list to a dictionary key if it is not present and then appends an
EMD/ColumnName pair to it
Parameters
---------
dc : dict
the dictionary
k : str
the key
v : dict
EMD/ColumnName pair
"""
if k not in dc:
dc[k] = list()
dc[k].append(v)
def transform_dict(dc: dict):
"""
Helper function that transforms a dict with composite column combination keys to a dict with column keys and
values EMD/ColumnName pairs in a sorted list (ascending based on the EMD value)
Parameters
---------
dc : dict
the dictionary
"""
tmp_dict = dict()
for k, v in dc.items():
k1, k2 = k
v1 = {'e': v, 'c': k2}
v2 = {'e': v, 'c': k1}
insert_to_dict(tmp_dict, k1, v1)
insert_to_dict(tmp_dict, k2, v2)
return tmp_dict
def process_columns(tup: tuple):
"""
Process a pandas dataframe column to a column_model_scale.Column
Parameters
---------
tup : tuple
tuple containing the information of the column to be processed
"""
column_name, column_uid, data, source_name, source_guid, quantiles, uuid = tup
column = CorrelationClusteringColumn(column_name, column_uid, data, source_name, source_guid, quantiles, uuid)
if column.size > 0:
column.quantile_histogram = QuantileHistogram(column.long_name, column.ranks, column.size, quantiles)
tn_i, _, cn_i, _ = column.long_name
fname = (tn_i, cn_i)
folder = get_project_root() + '/algorithms/distribution_based/cache/column_store/' + uuid
create_folder(folder)
pickle_path = folder + '/' + re.sub('\\W+', '_', str(fname)) + '.pkl'
with open(pickle_path, 'wb') as output:
pickle.dump(column, output, pickle.HIGHEST_PROTOCOL)
del column
def parallel_cutoff_threshold(tup: tuple):
"""
Process the cutoff threshold in parallel for each column
Parameters
---------
tup : tuple
tuple containing the information of the column to be processed
"""
matrix_a, column, threshold = tup
name_i = column.long_name
theta = compute_cutoff_threshold(matrix_a[name_i], threshold)
n_c = [(name_i, i['c']) for i in matrix_a[name_i] if i['e'] <= theta]
return n_c
def ingestion_column_generator(columns: List[BaseColumn], table_name: str, table_guid: object, quantiles: int,
uuid: str):
"""
Generator of incoming pandas dataframe columns
"""
for column in columns:
yield column.name, column.unique_identifier, column.data, table_name, table_guid, quantiles, uuid
def cuttoff_column_generator(matrix_a: dict, columns: List[Tuple[str, str, str, str]], threshold: float, uuid: str):
"""
Generator of columns for the cutoff threshold computation
"""
for column_name in columns:
tn_i, _, cn_i, _ = column_name
fname = (tn_i, cn_i)
column = get_column_from_store(fname, uuid)
yield matrix_a, column, threshold
def generate_global_ranks(data: list, uuid: str):
"""
Function that creates a pickle file with the global ranks of all the values inside the database.
Parameters
----------
data : list
All the values from every column
uuid:
The unique identifier of the run
"""
ranks = unix_sort_ranks(set(data), uuid)
folder = get_project_root() + '/algorithms/distribution_based/cache/global_ranks/' + uuid
with open(folder + "/" + uuid + '.pkl', 'wb') as output:
pickle.dump(ranks, output, pickle.HIGHEST_PROTOCOL)
def unix_sort_ranks(corpus: set, uuid: str):
"""
Function that takes a corpus sorts it with the unix sort -n command and generates the global ranks
for each value in the corpus.
Parameters
----------
corpus: set
The corpus (all the unique values from every column)
uuid:
The unique identifier of the run
Returns
-------
dict
The ranks in the form of k: value, v: the rank of the value
"""
folder = get_project_root() + '/algorithms/distribution_based/cache/sorts/' + uuid
with open(folder + "/unsorted_file.txt", 'w') as out:
for var in corpus:
print(str(var), file=out)
with open(folder + '/sorted_file.txt', 'w') as f:
if os.name == 'nt':
subprocess.call(['sort', folder + '/unsorted_file.txt'], stdout=f)
else:
sort_env = os.environ.copy()
sort_env['LC_ALL'] = 'C'
subprocess.call(['sort', '-n', folder + '/unsorted_file.txt'], stdout=f, env=sort_env)
rank = 1
ranks = []
with open(folder + '/sorted_file.txt', 'r') as f:
txt = f.read()
for var in txt.splitlines():
ranks.append((convert_data_type(var.replace('\n', '')), rank))
rank = rank + 1
return dict(ranks)
def create_cache_dirs(uuid: str):
""" Create the directories needed for the correlation clustering algorithm"""
primary_folder: str = get_project_root() + '/algorithms/distribution_based/cache'
create_folder(primary_folder)
create_folder(primary_folder + '/global_ranks')
create_folder(primary_folder + '/column_store')
create_folder(primary_folder + '/sorts')
create_folder(primary_folder + '/global_ranks/' + uuid)
create_folder(primary_folder + '/column_store/' + uuid)
create_folder(primary_folder + '/sorts/' + uuid)
def cleanup_files(uuid: str):
primary_folder: str = get_project_root() + '/algorithms/distribution_based/cache'
shutil.rmtree(primary_folder + '/global_ranks/' + uuid)
shutil.rmtree(primary_folder + '/column_store/' + uuid)
shutil.rmtree(primary_folder + '/sorts/' + uuid)
def get_column_from_store(file_name: str, uuid: str):
folder = get_project_root() + '/algorithms/distribution_based/cache/column_store/' + uuid
file_path = folder + '/' + re.sub('\\W+', '_', str(file_name)) + '.pkl'
if os.path.getsize(file_path) > 0:
with open(file_path, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
|
from mygrad.tensor_base import Tensor
from .ops import MoveAxis, Roll, SwapAxes, Transpose
__all__ = ["transpose", "moveaxis", "swapaxes", "roll"]
def transpose(a, *axes, constant=False):
""" Permute the dimensions of a tensor.
Parameters
----------
a : array_like
The tensor to be transposed
axes : Optional[Tuple[int]]
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
mygrad.Tensor
`a` with its axes permuted. A new tensor is returned.
Examples
--------
>>> import mygrad as mg
>>> a = mg.Tensor([[1, 2], [3, 4]])
>>> a
Tensor([[1, 2],
[3, 4]])
>>> a.transpose()
Tensor([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
Tensor([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
Tensor([[1, 3],
[2, 4]]) """
if not axes:
axes = None
elif hasattr(axes[0], "__iter__") or axes[0] is None:
if len(axes) > 1:
raise TypeError(
"'{}' object cannot be interpreted as an integer".format(type(axes[0]))
)
axes = axes[0]
return Tensor._op(Transpose, a, op_args=(axes,), constant=constant)
def moveaxis(a, source, destination, constant=False):
""" Move axes of a tensor to new positions. Other axes remain in their
original order.
Parameters
----------
a : array_like
The array whose axes should be reordered.
source : Union[int, Sequence[int]]
Original positions of the axes to move. These must be unique.
destination : Union[int, Sequence[int]]
Destination positions for each of the original axes. These must also be
unique.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
result : mygrad.Tensor
Array with moved axes. This array is a view of the input array..
Examples
--------
>>> from mygrad import Tensor, moveaxis
>>> x = Tensor(np.zeros((3, 4, 5)))
>>> moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> moveaxis(x, -1, 0).shape
(5, 3, 4)
>>> moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3) """
return Tensor._op(MoveAxis, a, op_args=(source, destination), constant=constant)
def swapaxes(a, axis1, axis2, constant=False):
""" Interchange two axes of a tensor.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
constant : bool, optional(default=False)
If ``True``, the returned tensor is a constant (it
does not back-propagate a gradient)
Returns
-------
mygrad.Tensor
Examples
--------
>>> from mygrad import Tensor, swapaxes
>>> x = Tensor([[1, 2, 3]])
>>> swapaxes(x, 0, 1)
Tensor([[1],
[2],
[3]])
>>> x = Tensor([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
>>> x
Tensor([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> swapaxes(x, 0, 2)
Tensor([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return Tensor._op(SwapAxes, a, op_args=(axis1, axis2), constant=constant)
def roll(a, shift, axis=None, constant=False):
"""
Roll tensor elements along a given axis.
Elements that roll beyond the end of an axis "wrap back around" to the beginning.
This docstring was adapted from ``numpy.roll``
Parameters
----------
a : array_like
Input tensor.
shift : Union[int, Tuple[int, ...]]
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : Optional[Union[int, Tuple[int, ...]]]
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : Tensor
Output array, with the same shape as `a`.
Examples
--------
>>> import mygrad as mg
>>> x = mg.arange(10)
>>> mg.roll(x, 2)
Tensor([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> x2 = mg.reshape(x, (2,5))
>>> x2
Tensor([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> mg.roll(x2, 1)
Tensor([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> mg.roll(x2, 1, axis=0)
Tensor([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> mg.roll(x2, 1, axis=1)
Tensor([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
"""
return Tensor._op(
Roll, a, op_kwargs=dict(shift=shift, axis=axis), constant=constant
)
|
from pydantic import BaseModel
class BookGenre(BaseModel):
id: int
name: str
class BookGenreIn(BaseModel):
name: str
|
from pamqp.heartbeat import Heartbeat
from pamqp.commands import Connection
import amqpstorm
from amqpstorm import AMQPConnectionError
from amqpstorm.channel0 import Channel0
from amqpstorm.tests.utility import FakeConnection
from amqpstorm.tests.utility import FakeFrame
from amqpstorm.tests.utility import TestFramework
class Channel0FrameTests(TestFramework):
def configure(self):
self.connection = amqpstorm.Connection('localhost', 'guest', 'guest',
lazy=True)
def test_channel0_heartbeat(self):
channel = Channel0(self.connection)
self.assertIsNone(channel.on_frame(Heartbeat()))
def test_channel0_on_close_frame(self):
self.connection.set_state(self.connection.OPEN)
channel = Channel0(self.connection)
self.assertFalse(self.connection.exceptions)
channel.on_frame(Connection.Close())
self.assertTrue(self.connection.exceptions)
self.assertTrue(self.connection.is_closed)
self.assertRaisesRegexp(
AMQPConnectionError,
'Connection was closed by remote server: ',
self.connection.check_for_errors
)
def test_channel0_on_close_ok_frame(self):
self.connection.set_state(self.connection.OPEN)
channel = Channel0(self.connection)
self.assertFalse(self.connection.is_closed)
channel.on_frame(Connection.CloseOk())
self.assertTrue(self.connection.is_closed)
def test_channel0_is_blocked(self):
channel = Channel0(self.connection)
self.assertFalse(channel.is_blocked)
channel.on_frame(Connection.Blocked('travis-ci'))
self.assertTrue(channel.is_blocked)
self.assertEqual(self.get_last_log(),
'Connection is blocked by remote server: travis-ci')
def test_channel0_unblocked(self):
channel = Channel0(self.connection)
channel.on_frame(Connection.Blocked())
self.assertTrue(channel.is_blocked)
channel.on_frame(Connection.Unblocked())
self.assertFalse(channel.is_blocked)
self.assertEqual(self.get_last_log(),
'Connection is blocked by remote server: ')
def test_channel0_open_ok_frame(self):
channel = Channel0(self.connection)
self.assertFalse(self.connection.is_open)
channel.on_frame(Connection.OpenOk())
self.assertTrue(self.connection.is_open)
def test_channel0_start_frame(self):
connection = FakeConnection()
connection.parameters['username'] = 'guest'
connection.parameters['password'] = 'guest'
channel = Channel0(connection)
properties = {
'version': 0
}
channel.on_frame(Connection.Start(server_properties=properties))
self.assertEqual(channel.server_properties, properties)
self.assertIsInstance(connection.get_last_frame(), Connection.StartOk)
def test_channel0_start_invalid_auth_frame(self):
connection = FakeConnection()
connection.parameters['username'] = 'guest'
connection.parameters['password'] = 'guest'
channel = Channel0(connection)
channel.on_frame(Connection.Start(mechanisms='invalid'))
self.assertRaisesRegexp(
AMQPConnectionError,
'Unsupported Security Mechanism\(s\): invalid',
connection.check_for_errors
)
def test_channel0_tune_frame(self):
connection = FakeConnection()
connection.parameters['virtual_host'] = '/'
channel = Channel0(connection)
channel.on_frame(Connection.Tune())
self.assertIsInstance(connection.get_last_frame(), Connection.TuneOk)
self.assertIsInstance(connection.get_last_frame(), Connection.Open)
def test_channel0_unhandled_frame(self):
channel = Channel0(self.connection)
channel.on_frame(FakeFrame())
self.assertEqual(self.get_last_log(),
"[Channel0] Unhandled Frame: FakeFrame")
|
g_id = 0
class Process:
def __init__(self, burst_time, arrival_time = 0, priority = 0):
global g_id
self.id = g_id
g_id += 1
self.priority = priority
self.burst_time = burst_time
self.arrival_time = arrival_time
|
class Solution:
def exclusiveTime(self, n: int, logs: List[str]) -> List[int]:
ans = [0] * n
stack = []
prev = 0
for each in logs:
func, start_end, time = each.split(':')
func, time = int(func), int(time)
if start_end == 'start':
if stack:
ans[stack[-1]] += time - prev
stack.append(func)
prev = time
else:
ans[stack.pop()] += time - prev + 1
prev = time + 1
return ans
|
#!/usr/bin/env python
import json
import tweepy
from ryver import Post
import logging
from random import randint
from os import path
class Tweet(Post):
def __init__(self, tweet, config):
super(Tweet, self).__init__()
self.reviewUrl = config["reviewUrl"]
self.genAttr(tweet)
def genAttr(self, tweet):
if not hasattr(self, "id"):
self.id = tweet.id
self.__raw = tweet._json
if hasattr(tweet, "retweeted_status"):
self.genAttr(tweet.retweeted_status)
else:
self.author = "Twitter - " + tweet.author.name
self.author_image = tweet.author.profile_image_url_https
self.text = tweet.full_text.encode("utf8")
self.text += "\n\n_Give us feedback:_ "
self.text += "[like]({0}/{1}/?opinion={2}) or [dislike]({0}/{1}/?opinion={3}) ?".format(
self.reviewUrl,
self.id,
"like",
"dislike"
)
self.text += self.__processEnding(tweet)
def __processEnding(self, tweet):
ending = ""
if hasattr(tweet, "entities"):
if "media" in tweet.entities:
self.media = tweet.entities["media"]
n = len(self.media)
self.text = " ".join(self.text.split()[:-n])
if len(self.media) > 0:
ending += " []({})".format(
self.media[0]["media_url_https"]
)
if "urls" in tweet.entities:
self.urls = tweet.entities["urls"]
if len(self.urls) > 0:
ending = " []({})".format(self.urls[0]["url"]) + ending
return ending
def writeToFolder(self, folder):
with open(path.join(folder, str(self.id) + ".json"), "w") as f:
json.dump(self.__raw, f, indent=2, sort_keys=True)
class TwitterPuller(object):
def __init__(self, **kwargs):
with open("config.json") as f:
self.config = json.load(f)
self.config.update(kwargs)
self.auth = tweepy.OAuthHandler(
self.config["ConsumerKey"],
self.config["ConsumerSecret"])
self.auth.set_access_token(
self.config["AccessToken"],
self.config["AccessTokenSecret"])
self.api = tweepy.API(self.auth)
@property
def lastId(self):
return self.config["lastId"]
@lastId.setter
def lastId(self, x):
self.config["lastId"] = x
with open("config.json", "w") as f:
json.dump(self.config, f, indent=2, sort_keys=True)
def get(self, **kwargs):
default = {
"tweet_mode": "extended",
"count": 3,
"include_entities": True,
"result_type": "popular",
"since_id": self.lastId,
}
default.update(kwargs)
public_tweets = self.api.home_timeline(**default)
public_tweets = map(lambda x: Tweet(x, self.config), public_tweets)
if len(public_tweets) > 0:
self.lastId = public_tweets[0].id
return public_tweets
if __name__ == "__main__":
import os
dir = os.path.dirname(__file__)
if dir != "":
os.chdir(dir)
# create logger
logger = logging.getLogger("custom")
logger.setLevel(logging.INFO)
f = logging.FileHandler("main.log")
f.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(message)s')
# add formatter to ch
f.setFormatter(formatter)
# add ch to logger
logger.addHandler(f)
try:
pulled = TwitterPuller().get()
n = len(pulled)
logger.info("Pulled {} tweet(s)".format(n))
for tweet in pulled:
tweet.send()
tweet.writeToFolder("history")
logger.info(
"Sent tweet {0} from {1}".format(
tweet.id,
tweet.author.encode("utf-8")
))
except Exception as e:
logger.error(e)
|
import os
script_dir = os.path.dirname(os.path.realpath(__file__))
tmp_dir = os.path.abspath( script_dir + "/../../../../tmp/" ) + "/"
|
def average_speed(s1 : float, s0 : float, t1 : float, t0 : float) -> float:
"""
[FUNC] average_speed:
Returns the average speed.
Where:
Delta Space = (space1[s1] - space0[s0])
Delta Time = (time1[t1] - time0[t0])
"""
return ((s1-s0)/(t1-t0));
def average_acceleration(v1 : float, v0 : float, t1 : float, t0 : float) -> float:
"""
[FUNC] average_acceleration:
Returns the average_acceleration
"""
return ((v1-v0)/(t1-t0));
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: feast/specs/ImportSpec.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from feast.types import Value_pb2 as feast_dot_types_dot_Value__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='feast/specs/ImportSpec.proto',
package='feast.specs',
syntax='proto3',
serialized_options=_b('\n\013feast.specsB\017ImportSpecProtoZ6github.com/gojek/feast/protos/generated/go/feast/specs'),
serialized_pb=_b('\n\x1c\x66\x65\x61st/specs/ImportSpec.proto\x12\x0b\x66\x65\x61st.specs\x1a\x17\x66\x65\x61st/types/Value.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb8\x01\n\nImportSpec\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x35\n\x07options\x18\x02 \x03(\x0b\x32$.feast.specs.ImportSpec.OptionsEntry\x12\x10\n\x08\x65ntities\x18\x03 \x03(\t\x12#\n\x06schema\x18\x04 \x01(\x0b\x32\x13.feast.specs.Schema\x1a.\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xa2\x01\n\x06Schema\x12\"\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x12.feast.specs.Field\x12\x19\n\x0ftimestampColumn\x18\x05 \x01(\tH\x00\x12\x34\n\x0etimestampValue\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x16\n\x0e\x65ntityIdColumn\x18\x07 \x01(\tB\x0b\n\ttimestamp\"(\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tfeatureId\x18\x02 \x01(\tBV\n\x0b\x66\x65\x61st.specsB\x0fImportSpecProtoZ6github.com/gojek/feast/protos/generated/go/feast/specsb\x06proto3')
,
dependencies=[feast_dot_types_dot_Value__pb2.DESCRIPTOR,google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_IMPORTSPEC_OPTIONSENTRY = _descriptor.Descriptor(
name='OptionsEntry',
full_name='feast.specs.ImportSpec.OptionsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='feast.specs.ImportSpec.OptionsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='feast.specs.ImportSpec.OptionsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=242,
serialized_end=288,
)
_IMPORTSPEC = _descriptor.Descriptor(
name='ImportSpec',
full_name='feast.specs.ImportSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='feast.specs.ImportSpec.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='options', full_name='feast.specs.ImportSpec.options', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entities', full_name='feast.specs.ImportSpec.entities', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema', full_name='feast.specs.ImportSpec.schema', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_IMPORTSPEC_OPTIONSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=288,
)
_SCHEMA = _descriptor.Descriptor(
name='Schema',
full_name='feast.specs.Schema',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fields', full_name='feast.specs.Schema.fields', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestampColumn', full_name='feast.specs.Schema.timestampColumn', index=1,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestampValue', full_name='feast.specs.Schema.timestampValue', index=2,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entityIdColumn', full_name='feast.specs.Schema.entityIdColumn', index=3,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='timestamp', full_name='feast.specs.Schema.timestamp',
index=0, containing_type=None, fields=[]),
],
serialized_start=291,
serialized_end=453,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='feast.specs.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='feast.specs.Field.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='featureId', full_name='feast.specs.Field.featureId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=455,
serialized_end=495,
)
_IMPORTSPEC_OPTIONSENTRY.containing_type = _IMPORTSPEC
_IMPORTSPEC.fields_by_name['options'].message_type = _IMPORTSPEC_OPTIONSENTRY
_IMPORTSPEC.fields_by_name['schema'].message_type = _SCHEMA
_SCHEMA.fields_by_name['fields'].message_type = _FIELD
_SCHEMA.fields_by_name['timestampValue'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_SCHEMA.oneofs_by_name['timestamp'].fields.append(
_SCHEMA.fields_by_name['timestampColumn'])
_SCHEMA.fields_by_name['timestampColumn'].containing_oneof = _SCHEMA.oneofs_by_name['timestamp']
_SCHEMA.oneofs_by_name['timestamp'].fields.append(
_SCHEMA.fields_by_name['timestampValue'])
_SCHEMA.fields_by_name['timestampValue'].containing_oneof = _SCHEMA.oneofs_by_name['timestamp']
DESCRIPTOR.message_types_by_name['ImportSpec'] = _IMPORTSPEC
DESCRIPTOR.message_types_by_name['Schema'] = _SCHEMA
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImportSpec = _reflection.GeneratedProtocolMessageType('ImportSpec', (_message.Message,), dict(
OptionsEntry = _reflection.GeneratedProtocolMessageType('OptionsEntry', (_message.Message,), dict(
DESCRIPTOR = _IMPORTSPEC_OPTIONSENTRY,
__module__ = 'feast.specs.ImportSpec_pb2'
# @@protoc_insertion_point(class_scope:feast.specs.ImportSpec.OptionsEntry)
))
,
DESCRIPTOR = _IMPORTSPEC,
__module__ = 'feast.specs.ImportSpec_pb2'
# @@protoc_insertion_point(class_scope:feast.specs.ImportSpec)
))
_sym_db.RegisterMessage(ImportSpec)
_sym_db.RegisterMessage(ImportSpec.OptionsEntry)
Schema = _reflection.GeneratedProtocolMessageType('Schema', (_message.Message,), dict(
DESCRIPTOR = _SCHEMA,
__module__ = 'feast.specs.ImportSpec_pb2'
# @@protoc_insertion_point(class_scope:feast.specs.Schema)
))
_sym_db.RegisterMessage(Schema)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), dict(
DESCRIPTOR = _FIELD,
__module__ = 'feast.specs.ImportSpec_pb2'
# @@protoc_insertion_point(class_scope:feast.specs.Field)
))
_sym_db.RegisterMessage(Field)
DESCRIPTOR._options = None
_IMPORTSPEC_OPTIONSENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
from .rest import Restful
from .pgs import Pgs
from .transaction import Transaction
__all__ = ['Restful', 'Pgs', 'Transaction']
|
import json
import re
import pytest
from mealie.services.scraper.cleaner import Cleaner
from mealie.services.scraper.scraper import extract_recipe_from_html
from tests.test_config import TEST_RAW_HTML, TEST_RAW_RECIPES
# https://github.com/django/django/blob/stable/1.3.x/django/core/validators.py#L45
url_validation_regex = re.compile(
r"^(?:http|ftp)s?://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain...
r"localhost|" # localhost...
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$",
re.IGNORECASE,
)
@pytest.mark.parametrize(
"json_file,num_steps",
[
("best-homemade-salsa-recipe.json", 2),
(
"blue-cheese-stuffed-turkey-meatballs-with-raspberry-balsamic-glaze-2.json",
3,
),
("bon_appetit.json", 8),
("chunky-apple-cake.json", 4),
("dairy-free-impossible-pumpkin-pie.json", 7),
("how-to-make-instant-pot-spaghetti.json", 8),
("instant-pot-chicken-and-potatoes.json", 4),
("instant-pot-kerala-vegetable-stew.json", 13),
("jalapeno-popper-dip.json", 4),
("microwave_sweet_potatoes_04783.json", 4),
("moroccan-skirt-steak-with-roasted-pepper-couscous.json", 4),
("Pizza-Knoblauch-Champignon-Paprika-vegan.html.json", 3),
],
)
def test_cleaner_clean(json_file, num_steps):
recipe_data = Cleaner.clean(json.load(open(TEST_RAW_RECIPES.joinpath(json_file))))
assert len(recipe_data["recipeInstructions"]) == num_steps
def test_clean_category():
assert Cleaner.category("my-category") == ["my-category"]
def test_clean_html():
assert Cleaner.html("<div>Hello World</div>") == "Hello World"
def test_clean_image():
assert Cleaner.image(None) == "no image"
assert Cleaner.image("https://my.image/path/") == "https://my.image/path/"
assert Cleaner.image({"url": "My URL!"}) == "My URL!"
assert Cleaner.image(["My URL!", "MY SECOND URL"]) == "My URL!"
@pytest.mark.parametrize(
"instructions",
[
"A\n\nB\n\nC\n\n",
"A\nB\nC\n",
"A\r\n\r\nB\r\n\r\nC\r\n\r\n",
"A\r\nB\r\nC\r\n",
["A", "B", "C"],
[{"@type": "HowToStep", "text": x} for x in ["A", "B", "C"]],
],
)
def test_cleaner_instructions(instructions):
assert Cleaner.instructions(instructions) == [
{"text": "A"},
{"text": "B"},
{"text": "C"},
]
def test_html_with_recipe_data():
path = TEST_RAW_HTML.joinpath("healthy_pasta_bake_60759.html")
url = "https://www.bbc.co.uk/food/recipes/healthy_pasta_bake_60759"
recipe_data = extract_recipe_from_html(open(path, encoding="utf8").read(), url)
assert len(recipe_data["name"]) > 10
assert len(recipe_data["slug"]) > 10
assert recipe_data["orgURL"] == url
assert len(recipe_data["description"]) > 100
assert url_validation_regex.match(recipe_data["image"])
assert len(recipe_data["recipeIngredient"]) == 13
assert len(recipe_data["recipeInstructions"]) == 4
def test_time_cleaner():
my_time_delta = "PT2H30M"
return_delta = Cleaner.time(my_time_delta)
assert return_delta == "2 Hours 30 Minutes"
|
'''
Created on Sep 24, 2020
@author: amir
'''
from datetime import datetime
import scipy.constants
class settings():
def __init__(self, dataEditing_mode=1):
self.filter = {
'ws' : 'ws/champ_2003_297_307/EKF/',# folder of observations/precise orbir/ephemeris/etc.
'begin' : self.dt([2003,10,29,0,0,0]),# beginning time of OD (UTC)
'end' : self.dt([2003,10,29,1,30,0]),# end time of OD (UTC)
'type' : 'kalman',# Type of the filtering algorithm
'obsType': 'Graphic',# Observation Type
'Dynmod' : 0,# Set State Vector paremeters that will be estimatedcknowledge receipt of your application for the position of Doctoral candidate
'measurementNoise' : 0.1,
'obsSISRE' : 1.5,
'stepSize' : 30, # Filter output interval in second
'timeUpdateThreshold' : 1,
'enableSaveFilterOutput' :1, # Update threshold for the
'ephemeris' : 'precise', #'brodcast'
'EphemerisInterval' : 15*60, # eohemeris modelin interval
'sigmascale' : 2
}
# self.filter.obsType = 'Graphic'; # Observation Type
# self.filter.obsType = 'Code'; # Observation Type
# self.filter.obsType = 'navsol'; # Observation Type
# To estimate atmospheric drag, solar radiation presure, empirical
# accelertaions and markov process corelletion time
# Set enableDynModParam to "0" or "1" or "2"
# 0 : do not estimate
# 1 : estimate atmospheric drag coefficient, solar radiation presure
# coefficient and empirical accelertaions
# 2 : estimate atmospheric drag coefficient, solar radiation presure
# coefficient, empirical accelertaions and markov process corelletion
# time
self.init = {
'CD' : 2,# initial value for drag acceleration coeff
'CR' : 1.5,# initial value for solar radiation acceleration coeff
'Psrad' : 4.56e-6,# Solar radiation pressure at 1 AU, [N/m^2], IERS 96
'empacc' : [1e-6,1e-6,1e-6],# in RTN
'corelTime' : 600,# initial value for correlation time of marcov process
'posXYZ' : [0,0,0],
'velXYZ' : [0,0,0],#[0.05,0.05,0.05],
'atmDragCoeff' : 0.001,
'solarRadCoeff' : 0.001,
'empAccellRTN' : [1e-9,1e-9,1e-9 ],#[1000e-9 1000e-9 1000e-9]; # m/s2
'corelTime' : 1e-2,
'recClcBias' : 1,
'ambiguityBias' : 0.01,
}
self.sat = {
'a2m' : (1.22/522),# cross sectıonal area to mass (m2/kg)
'mass' : 522,# satellite mass (kg)
'incId' : 4,# satellite orbital parameter representing inclination
'nmax' : 70,# maximum degree of Stoke's coefficients
}
# Set Time System Parameters
self.TimeRefSystem = {
'UT1_UTC' : -0.3652860,# in second
'TAI_UTC' : 32,# in second
'PolarMotion_xp' : 0.220270,# in second
'PolarMotion_yp' : 0.242220,# in second
'MJD2000' : 2451545.0-2400000.5, # Modified Julian date of J2000
}
# Set filter time propagation parameters
# propagation step size in second
# Set statistical parameters of measurements and auxiliary parameters
# Set Initial statistical parameters
# Set parameters required for data editing
# Set the data editing mode.The value of 1 for filtSet.dataEditing.mode
# indicates recursive outlier detection. The value of 2 is for robust filtering
# if recursive outlier detection mode is selected consider the following settings
self.dataEditing = {}
if dataEditing_mode == 1:
self.dataEditing = {'outlierFactor' : 5000,# Outlier Factor
'elevationThreshold' : 10, # elevation threshold in degree
}
# if robust mode is selected consider the following settings
elif dataEditing_mode == 2:
# Set the level of significance (los) value for the chi-square
# distribution used to detect faulty measurements. The los can be
# set to one of following values ;
# los=> (.995 .990 .975 .950 .900 .100 .050 .025 .010 .005 )
self.dataEditig = {'chiSquare_loss' : 0.95}
# Minumum number of observation
self.dataEditing['minNumObs'] = 4
# GPS recever Antenna Offset from the base location in radial, alongTrack
# crossTrack directions
self.dataEditing['AntOffFromSatBaseLoc'] = [-0.4306 , -1.488, 0]
# For Unscented Kalman Filter; sigma-vector parameters
self.ukfParams = {
'kappa' : 0,
'alfa' : 0.1,
'beta' : 2,
}
self.constants = {'speed_of_light' : scipy.constants.physical_constants['speed of light in vacuum'][0],
'lambda_L1' : (scipy.constants.physical_constants['speed of light in vacuum'][0]/1575.42e6), # wavelength of L1 carrier
'f1' : 1575.42e6, #Hz;
'f2' : 1227.6e6, #Hz
'earth_Radius' : 6378136.46,# Earth's mean radius(m)
'earth_GM' : 3986004.415e8,# Earth's gravity constant, [m^3/s^2],EGM2008
'Sun_GM' : 1.32712438e20,# Sun's gravity constant [m^3/s^2]; IAU 1976
'Moon_GM' : 4902799059741.11,# Moon's garvity constant
'earth_w' : 7.2921158553e-5,# Earth angular velocity, NIMA 1997
'AU' : 149597870000.0,# Astronomical unit [m], IAU 1976
}
def dt(self, t):
dstring = '{0:04}-{1:02}-{2:02} {3:02}:{4:02}:0{5}'\
.format(t[0],t[1],t[2],t[3],t[4],t[5])
return datetime.strptime(dstring,"%Y-%m-%d %H:%M:%S")
if __name__ == '__main__':
settings = settings(dataEditing_mode=1)
print('done')
|
import time
import itertools
import cv2 as cv
import mediapipe as mp
import pyautogui
from model.gesture_classifier import \
Gesture, GestureClassifier
def main():
# Initialize mediapipe
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
model_complexity=0,
max_num_hands=1,
min_detection_confidence=0.85,
min_tracking_confidence=0.75)
# Create debugging utilities
prev_frame_time = 0
new_frame_time = 0
debug_msg = "FPS: {:.0f}"
status_msg = "Gesture: {}"
font = cv.FONT_HERSHEY_COMPLEX_SMALL
# Create gesture classifier
classify = GestureClassifier()
gesture = None
# OpenCV start video capture
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
last_gesture_recognize = None
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
image_height, image_width, _ = image.shape
# Disable writeable to improve performance
image.flags.writeable = False
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv.cvtColor(image, cv.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
preprocessed_landmarks = preprocess_landmarks(
hand_landmarks, image_width, image_height)
gesture = classify(preprocessed_landmarks)
#Shortcut hotkeys
if gesture != last_gesture_recognize:
last_gesture_recognize = gesture
if gesture == Gesture.OPEN:
pyautogui.hotkey("ctrlleft", "c")
elif gesture == Gesture.CLOSED:
pyautogui.hotkey("ctrlleft", "v")
elif gesture == Gesture.OKAY:
pyautogui.hotkey("ctrlleft", "x")
mp_drawing.draw_landmarks(
image,
hand_landmarks,
mp_hands.HAND_CONNECTIONS,
mp_drawing_styles.get_default_hand_landmarks_style(),
mp_drawing_styles.get_default_hand_connections_style())
# Calculate FPS here using time
new_frame_time = time.time()
fps = 1 / (new_frame_time-prev_frame_time)
prev_frame_time = new_frame_time
# Final processing of frame
image_flipped = cv.flip(image, 1)
image_flipped = cv.putText(
image_flipped, debug_msg.format(fps),
(0, image_height - 12), font, 0.5,
(255, 255, 255), 1, cv.LINE_AA)
image_flipped = cv.putText(
image_flipped, status_msg.format(gesture),
(0, 24), font, 1.5,
(255, 255, 255), 1, cv.LINE_AA)
# Flip the image horizontally for a selfie-view display.
cv.imshow('Hands', image_flipped)
if cv.waitKey(5) & 0xFF == 27:
break
cap.release()
def preprocess_landmarks(landmarks, image_width, image_height):
# Transofrm landmark into absolute points
absolute_points = []
for landmark in landmarks.landmark:
x = min(int(landmark.x * image_width), image_width - 1)
y = min(int(landmark.y * image_height), image_height - 1)
absolute_points.append([x, y])
if len(absolute_points) == 0:
return absolute_points
# Transform absolute points into relative points
relative_points = []
base_x, base_y = 0, 0
for index, point in enumerate(absolute_points):
if index == 0:
base_x, base_y = point[0], point[1]
x = point[0] - base_x
y = point[1] - base_y
relative_points.append([x, y])
# Convert to a one-dimensional list
points = list(itertools.chain.from_iterable(relative_points))
# Normalize the values
max_value = max(list(map(abs, points)))
def _normalize(n):
return n / max_value
points = list(map(_normalize, points))
return points
if __name__ == "__main__":
main()
|
# Copyright 2010-2017 Luc Saffre
# License: BSD (see file COPYING for details)
"""A collection of utilities which require Django settings to be
importable.
This defines some helper classes like
- :class:`Parametrizable` and :class:`Permittable` ("mixins" with
common functionality for both actors and actions),
- the volatile :class:`InstanceAction` object
- the :class:`ParameterPanel` class (used
e.g. by :class:`lino.mixins.periods.ObservedDateRange`)
- :attr:`ContentType` and `GenericForeignKey`
"""
from __future__ import unicode_literals
import six
from django.conf import settings
from django.db.models import ForeignKey
from .utils import UnresolvedField, UnresolvedModel
if settings.SITE.is_installed('contenttypes'):
from lino.modlib.gfks.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey \
as DjangoGenericForeignKey
from django.contrib.contenttypes.fields import GenericRelation
def is_foreignkey(fld):
return isinstance(fld, (ForeignKey, DjangoGenericForeignKey))
else:
GenericForeignKey = UnresolvedField
ContentType = UnresolvedModel
GenericRelation = UnresolvedField
def is_foreignkey(fld):
return isinstance(fld, ForeignKey)
def gfk2lookup(gfk, obj, **kw):
"""Return a `dict` with the lookup keywords for the given
GenericForeignKey field `gfk` on the given database object `obj`.
See also :ref:`book.specs.gfks`.
"""
if obj is None:
# 20120222 : here was only `pass`, and the two other lines
# were uncommented. don't remember why I commented them out.
# But it caused all tasks to appear in UploadsByController of
# an insert window for uploads.
kw[gfk.ct_field] = None
kw[gfk.fk_field] = None
else:
ct = ContentType.objects.get_for_model(obj.__class__)
kw[gfk.ct_field] = ct
if not isinstance(obj.pk, six.integer_types):
# IntegerField gives `long` when using MySQL
return kw
kw[gfk.fk_field] = obj.pk
return kw
|
# Copyright (c) 2012 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import command
import gitutil
import os
def FindGetMaintainer():
"""Look for the get_maintainer.pl script.
Returns:
If the script is found we'll return a path to it; else None.
"""
try_list = [
os.path.join(gitutil.GetTopLevel(), 'scripts'),
]
# Look in the list
for path in try_list:
fname = os.path.join(path, 'get_maintainer.pl')
if os.path.isfile(fname):
return fname
return None
def GetMaintainer(fname, verbose=False):
"""Run get_maintainer.pl on a file if we find it.
We look for get_maintainer.pl in the 'scripts' directory at the top of
git. If we find it we'll run it. If we don't find get_maintainer.pl
then we fail silently.
Args:
fname: Path to the patch file to run get_maintainer.pl on.
Returns:
A list of email addresses to CC to.
"""
get_maintainer = FindGetMaintainer()
if not get_maintainer:
if verbose:
print("WARNING: Couldn't find get_maintainer.pl")
return []
stdout = command.Output(get_maintainer, '--norolestats', fname)
lines = stdout.splitlines()
return [ x.replace('"', '') for x in lines ]
|
#!/usr/bin/env python
# -*-coding:utf-8-*-
# Author: nomalocaris <nomalocaris.top>
""""""
from __future__ import (absolute_import, unicode_literals)
from .adaptive_grid_construction import generate_adaptive_grid
from .adaptive_grid_construction import read_mdl_data
from .adaptive_grid_construction import cal_split
from .adaptive_grid_construction import generate_sd_grid_mapping_traj
from .trip_distribution_extraction import trip_distribution_main
from .mobility_model_construction import mobility_model_main
from .route_length_estimation import route_length_estimate_main
from .synthetic_trajectory_generation import syn
|
# DESAFIO 019
# Um professor quer sortear um de deus 4 alunos para apagar a lousa. Faça um programa que ajude ele,
# lendo o nome deles e escrevendo o nome do escolhido.
from random import choice
a1 = str(input('Primeiro aluno: '))
a2 = str(input('Segundo aluno: '))
a3 = str(input('Terceiro aluno: '))
a4 = str(input('Quarto aluno: '))
lista = [a1, a2, a3, a4]
so = choice(lista)
print(f'O aluno escolhido foi: {so}')
|
import understand
import re
db = understand.open("fastgrep2.udb")
print(db.ents())
#for ent in sorted(db.ents(), key=lambda ent: ent.name()):
# print(ent.name(), " [", ent.kindname(), "]", sep="", end="\n")
# Create a regular expression that is case insensitive
searchstr = re.compile("r*.h", re.I)
for file in db.lookup(searchstr, "File"):
print(file)
print("**************************************")
metrics = db.metric(db.metrics())
for k, v in sorted(metrics.items()):
print(k, "=", v)
print("**************************************")
for func in db.ents("function,method,procedure"):
metric = func.metric(("Cyclomatic",))
if metric["Cyclomatic"] is not None:
print (func," = ",metric["Cyclomatic"],sep="")
print("*************************************")
for func in db.ents("function,method,procedure"):
file = "callby_" + func.name() + ".png"
print (func.longname(),"->",file)
func.draw("Called By",file)
print("************************************")
for func in db.ents("function,method,procedure"):
for line in func.ib():
print(line, end="")
file = db.lookup("r*.h")[0]
for lexeme in file.lexer():
print (lexeme.text(),end="")
if lexeme.ent():
print ("@",end="")
|
import numpy as _np
import numexpr as _ne
from ..util import fastlen as _fastlen
class correlator:
def __init__(self, mask, fftfunctions=(_np.fft.rfftn, _np.fft.irfftn)):
"""
nd-correlations with constant mask.
will not normalize between different added images, but each single image.
optional fftfunctions argument is pair of forward and backward fft function to use (default: numpy default)
based on http://www.dirkpadfield.com/Home/MaskedFFTRegistrationPresentation.pdf but instead of subtracting correlations of mean, divide by it
masked Autocorrelation of Image = (maskedImage C maskedImage)
--------------------------------------------
(mask C Image) * (Image C mask)
-----------
(mask C mask)
"""
fft, ifft = fftfunctions
self._fft = fftfunctions
self._shape = mask.shape
self._padshape = tuple(_fastlen(2 * s) for s in mask.shape)
self._mask = self._pad(mask)
self._fmask = fft(self._mask)
self._mCm = ifft(self._fmask * self._fmask.conj())
def corr(self, image):
"""
does a new correlation
"""
for ds, cs in zip(image.shape, self._shape):
if ds != cs:
raise ValueError('data has not expected shape')
fft, ifft = self._fft
pimage = self._pad(image * self._mask[tuple((slice(0, s) for s in self._shape))])
fimg = fft(pimage)
res = ifft(_ne.evaluate('fimg*conj(fimg)')) # iCi
_ne.evaluate('fimg*conj(fmask)', local_dict={'fmask': self._fmask, 'fimg': fimg}, out=fimg)
norm = ifft(fimg) # iCm
_ne.evaluate('conj(fimg)', out=fimg)
# after fftshift it would be mCi=flip iCm in both directions,
# fftshift effects first row/column differently, so in the unshifted version a multiplication by mCi is:
# norm *= _np.roll(norm[tuple(norm.ndim * [slice(None, None, -1)])], norm.ndim * [1], range(0, norm.ndim))
norm *= ifft(fimg)
_ne.evaluate('where(norm>1e-5,res/norm*mCm,res*mCm)', out=res, local_dict={'res': res, 'norm': norm, 'mCm': self._mCm})
res = _np.fft.fftshift(res)[tuple((slice(ps // 2 - s + 1, ps // 2 + s) for s, ps in zip(self._shape, self._padshape)))]
return res
def _pad(self, data):
"""
pads data to size, data will be in top left corner of return
"""
ret = _np.zeros(self._padshape, _np.float64)
ret[tuple((slice(0, s) for s in self._shape))] = data
return ret
@property
def shape_input(self):
"""
expected input shape for add function
"""
return self._shape
@property
def shape_result(self):
"""
shape of the result
"""
return tuple((2 * s - 1 for s in self._shape))
@property
def mask(self):
"""
used mask
"""
return self._mask[tuple((slice(0, s) for s in self._shape))]
|
"""atl_thymio2_ros2 controller."""
# You may need to import some classes of the controller module. Ex:
# from controller import Robot, Motor, DistanceSensor
from controller import Robot
import rclpy
from rclpy.node import Node
from thymio2_interfaces.msg import Thymio2Controller
from thymio2_interfaces.srv import Thymio2ControllerSrv
from thymio2_interfaces.srv import Thymio2MotorSrv
from thymio2_interfaces.srv import Thymio2LEDSrv
# create the Robot instance.
robot = Robot()
# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())
class Thymio2ControllerNode(Node): # MODIFY NAME
def __init__(self):
super().__init__("Thymio2ControllerNode")
self.declare_parameter("robot_device", "THYMIO_V1")
self.thymio2_status_publisher_ = self.create_publisher(Thymio2Controller, "ThymioControllerPublisher_"+str(self.get_parameter("robot_device").value), 10)
self.thymio2_controller_service_ = self.create_service(Thymio2ControllerSrv, "ThymioControllerService_"+str(self.get_parameter("robot_device").value), self.callback_thymio2_controller)
self.thymio2_motor_service_ = self.create_service(Thymio2MotorSrv, "Thymio2MotorSrv_"+str(self.get_parameter("robot_device").value), self.callback_thymio2_motors)
self.thymio2_led_service_ = self.create_service(Thymio2LEDSrv, "Thymio2LEDSrv_"+str(self.get_parameter("robot_device").value), self.callback_thymio2_LED)
self.timer_ = self.create_timer(0.5, self.publish_thymio_status)
self.get_logger().info("Thymio2_DBUS_Controller publisher has been started.")
# Initializing WEBOT Thymio actuator
## Motors
self.leftMotor = self.robot.getMotor('motor.left')
self.rightMotor = self.robot.getMotor('motor.right')
self.leftMotor.setPosition(float('inf'))
self.rightMotor.setPosition(float('inf'))
self.leftMotor.setVelocity(0)
self.rightMotor.setVelocity(0)
self.motorMaxSpeed = self.leftMotor.getMaxVelocity()
self.get_logger().info("Thymio2 Motors initialzed!")
def callback_thymio2_controller(self, request, response):
if request.data:
self.counter_ = 0
response.success = True
response.log_message = "Counter has been reset"
else:
response.success = False
response.log_message = "Counter has not been reset"
return response
def callback_thymio2_motors(self, request, response):
response.log_message = "OK"
self.leftMotor.setVelocity(request.motor_left_target)
self.rightMotor.setVelocity(request.motor_right_target)
return response
def callback_thymio2_LED(self, request, response):
# Add LED commands here
return response
def publish_thymio_status(self):
# Read and publish sensor data
msg = Thymio2Controller()
msg.acc =
msg.prox_horizontal
msg.prox_ground_ambiant
msg.prox_ground_delta
msg.prox_ground_reflected
msg.sd_present
msg.debug_message
self.thymio2_status_publisher_.publish(msg)
# while robot.step(timestep) != -1:
# Enter here functions to read sensor data, like:
# val = ds.getValue()
# Publish sensor date to ROS2 topic - check with
# ros2 topic list
# ros2 topic echo <topic>`
# Process sensor data here.
# Enter here functions to send actuator commands, like:
# motor.setPosition(10.0)
def main(args=None):
rclpy.init(args=args)
node = Thymio2ControllerNode() # MODIFY NAME
rclpy.spin(node)
rclpy.shutdown()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from azureml.core import Model, Run
import argparse
import numpy as np
import iJungle
import joblib
run = Run.get_context()
print("iJungle version:", iJungle.__version__)
run.log('iJungle_version', iJungle.__version__)
parser = argparse.ArgumentParser()
# Input Data
parser.add_argument("--input-data", type=str, dest='input_data', help='Overhead dataset')
parser.add_argument("--id-feature", type=str, dest='id_feature', help='ID Freature')
# Hyper parameters
parser.add_argument('--trees', type=int, dest='trees', default=100, help='Number of trees')
parser.add_argument('--subsample-size', type=int, dest='subsample_size', default=8192, help='Subsample size')
# Add arguments to args collection
args = parser.parse_args()
id_feat = str(args.id_feature)
print('id feature', id_feat)
# Log Hyperparameter values
trees = np.int(args.trees)
subsample_size = np.int(args.subsample_size)
print('trees', trees)
print('subsample_size', subsample_size)
run.log('trees', trees)
run.log('subsample_size', subsample_size)
# Load training data
print("Loading Data...")
W = run.input_datasets['overhead_data'].to_pandas_dataframe() # Get the training data from the estimator input
W.set_index(id_feat, inplace=True)
# Load iFor_list pickle
print("Loading pickle...")
model_name = 'iJungle_light_' + str(trees) + '_' + str(subsample_size)
print(model_name)
model_path = Model.get_model_path(model_name)
print(model_path)
iFor_list = joblib.load(model_path)
# Evaluation
print("Starting evaluation ...")
os.makedirs(iJungle._MODEL_DIR, exist_ok=True)
results = iJungle.model_eval_fun(W, iFor_list)
results_filename = os.path.join(iJungle._MODEL_DIR, model_name + '_results.pkl')
print("Writing results:", results_filename)
joblib.dump(value=results, filename=results_filename)
# Log dummy metric
run.log('Dummy', np.float(0))
run.complete()
|
from AML.math2d import *
v1 = Vector2(50,50)
v1r = 40
v2 = Vector2(100,100)
v2r = 50
v1v2 = v2 - v1
v2v1 = -v1v2
v1v2u = v1v2/v1v2.mag()
v2v1u = v2v1/v2v1.mag()
v1rv = v1 + v1v2u*v1r
v2rv = v2 + v2v1u*v2r
vmid = v1rv - v2rv
print(vmid)
|
from semantic_aware_models.models.recommendation.random_recommender import RandomRecommender
from surprise.reader import Reader
import json
def main():
with open('random_recommender_config.json', 'r') as f:
config = json.load(f)
path = config['path']
separator = config['separator']
n_folds = config['n_folds']
output_recommendation_file_path = path + '<output_recommendation_file_path>'
input_file_path = path + '<input_file_path>'
ratings_file_path = path + '<ratings_file_path>'
random_path = output_recommendation_file_path + 'random/'
reader = Reader(line_format='user item rating timestamp', sep=' ')
recommender = RandomRecommender(ratings_file_path=ratings_file_path, separator=separator)
recommender.recommend_rival(n_folds=n_folds, train_test_file_path=input_file_path, reader=reader, recommendation_file_path=random_path)
if __name__ == '__main__':
main()
|
"""Mesh representation of the MANO hand model.
See `here <https://mano.is.tue.mpg.de/>`_ for details on the model.
Their code has been refactored and documented by Alexander Fabisch (DFKI GmbH, Robotics Innovation Center).
License Notice
Software Copyright License for non-commercial scientific research purposes
This project uses and modifies the MANO model and components from the software to generate the 3D hand mesh model
Please read carefully and respect the license of the publisher
License: [https://mano.is.tue.mpg.de/license.html](https://mano.is.tue.mpg.de/license.html)
"""
import json
from scipy import sparse
import pytransform3d.rotations as pr
import pytransform3d.transformations as pt
import numpy as np
from pkg_resources import resource_filename
import open3d as o3d
class HandState:
"""Holds an Open3D mesh representation of the Mano hand model.
Mano is described by Romero (2017).
J. Romero, D. Tzionas and M. J. Black:
Embodied Hands: Modeling and Capturing Hands and Bodies Together (2017),
ACM Transactions on Graphics, (Proc. SIGGRAPH Asia),
https://ps.is.tuebingen.mpg.de/uploads_file/attachment/attachment/392/Embodied_Hands_SiggraphAsia2017.pdf
website: https://mano.is.tue.mpg.de/
Parameters
----------
left : bool, optional (default: True)
Left hand. Right hand otherwise.
"""
def __init__(self, left=True):
model_parameters = load_model(left)
self.betas = np.zeros(10)
self.pose = np.zeros(48)
self.faces = model_parameters.pop("f")
self.shape_parameters = {
"v_template": model_parameters["v_template"],
"J_regressor": model_parameters["J_regressor"],
"shapedirs": model_parameters["shapedirs"],
}
self.pose_parameters = {
"weights": model_parameters["weights"],
"kintree_table": model_parameters["kintree_table"],
"posedirs": model_parameters["posedirs"],
}
self.pose_parameters["J"], self.pose_parameters["v_template"] = \
apply_shape_parameters(betas=self.betas, **self.shape_parameters)
self.vertices = hand_vertices(pose=self.pose, **self.pose_parameters)
self.material = o3d.visualization.rendering.MaterialRecord()
color = np.array([245, 214, 175, 255]) / 255.0
self.material.base_color = color
self.material.shader = "defaultLit"
self._mesh = o3d.geometry.TriangleMesh(
o3d.utility.Vector3dVector(self.vertices),
o3d.utility.Vector3iVector(self.faces))
self._mesh.compute_vertex_normals()
self._mesh.paint_uniform_color(color[:3])
self._points = o3d.geometry.PointCloud(
o3d.utility.Vector3dVector(self.vertices))
self._points.paint_uniform_color((0, 0, 0))
self.mesh_updated = False
def set_pose_parameter(self, idx, value):
self.pose[idx] = value
self.recompute_shape()
self.mesh_updated = True
def set_shape_parameter(self, idx, value):
self.betas[idx] = value
self.recompute_shape()
self.mesh_updated = True
def recompute_shape(self):
self.pose_parameters["J"], self.pose_parameters["v_template"] = \
apply_shape_parameters(betas=self.betas, **self.shape_parameters)
@property
def n_pose_parameters(self):
return self.pose.shape[0]
@property
def n_shape_parameters(self):
return self.betas.shape[0]
@property
def hand_mesh(self):
if self.mesh_updated:
self.recompute_mesh()
self.mesh_updated = False
return self._mesh
def recompute_mesh(self, mesh2world=None, vertex_normals=True,
triangle_normals=True):
self.vertices[:, :] = hand_vertices(
pose=self.pose, **self.pose_parameters)
if mesh2world is not None:
self.vertices[:, :] = pt.transform(
mesh2world, pt.vectors_to_points(self.vertices))[:, :3]
self._mesh.vertices = o3d.utility.Vector3dVector(self.vertices)
if vertex_normals:
self._mesh.compute_vertex_normals()
if triangle_normals:
self._mesh.compute_triangle_normals()
self._points.points = o3d.utility.Vector3dVector(self.vertices)
@property
def hand_pointcloud(self):
if self.mesh_updated:
self.recompute_mesh()
self.mesh_updated = False
return self._points
def load_model(left=True):
"""Load model parameters.
Parameters
----------
left : bool, optional (default: True)
Left hand. Right hand otherwise.
Returns
-------
model_parameters : dict
Parameters that we need to compute mesh of hand.
"""
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
if left:
filename = os.path.join(dir_path, "mano_left.json")
else:
filename = os.path.join(dir_path, "mano_right.json")
with open(filename, "r") as f:
model_kwargs = json.load(f)
J_regressor = model_kwargs["J_regressor"]
model_kwargs["J_regressor"] = sparse.csc_matrix(
(J_regressor["data"], J_regressor["indices"], J_regressor["indptr"]))
for k in ["f", "kintree_table", "J", "weights", "posedirs", "v_template",
"shapedirs"]:
model_kwargs[k] = np.array(model_kwargs[k])
return model_kwargs
def apply_shape_parameters(v_template, J_regressor, shapedirs, betas):
"""Apply shape parameters.
Parameters
----------
v_template : array, shape (n_vertices, 3)
Vertices of template model
J_regressor : array, shape (n_parts, n_vertices)
Joint regressor matrix that is used to predict joints for a body.
shapedirs : array, shape (n_vertices, 3, n_principal_shape_parameters)
Orthonormal principal components of shape displacements. Deviation
from the template model.
betas : array, shape (n_principal_shape_parameters,)
Linear shape coefficients. These define the magnitude of deviation
from the template model.
Returns
-------
J : array, shape (n_parts, 3)
Joint positions
v_shaped : array, shape (n_vertices, 3)
Shaped vertices of template model
"""
v_shaped = v_template + shapedirs.dot(betas)
return J_regressor.dot(v_shaped), v_shaped
def hand_vertices(J, weights, kintree_table, v_template, posedirs, pose=None):
"""Compute vertices of hand mesh.
n_parts = 16
n_vertices = 778
n_principal_shape_parameters = 10
Mesh shape is computed according to Loper et al. (2015).
M. Loper, N. Mahmood, J. Romero, G. Pons-Moll, M. J. Black:
SMPL: A Skinned Multi-Person Linear Model (2015), ACM Transactions on
Graphics (Proc. SIGGRAPH Asia), pp 248:1-248:16,
http://files.is.tue.mpg.de/black/papers/SMPL2015.pdf
Parameters
----------
J : array, shape (n_parts, 3)
Joint positions
weights : array, shape (n_vertices, n_parts)
Blend weight matrix, how much does the rotation of each part effect
each vertex
kintree_table : array, shape (2, n_parts)
Table that describes the kinematic tree of the hand.
kintree_table[0, i] contains the index of the parent part of part i
and kintree_table[1, :] does not matter for the MANO model.
v_template : array, shape (n_vertices, 3)
Vertices of template model
posedirs : array, shape (n_vertices, 3, 9 * (n_parts - 1))
Orthonormal principal components of pose displacements.
pose : array, shape (n_parts * 3)
Hand pose parameters
"""
if pose is None:
pose = np.zeros(kintree_table.shape[1] * 3)
pose = pose.reshape(-1, 3)
v_posed = v_template + posedirs.dot(lrotmin(pose))
vertices = forward_kinematic(pose, v_posed, J, weights, kintree_table)
return vertices
def lrotmin(p):
"""Compute offset magnitudes to the template model from pose parameters.
Parameters
----------
pose : array, shape (n_parts * 3)
Hand pose parameters
Returns
-------
offset_magnitudes : array, shape (135,)
Magnitudes of offsets computed from pose parameters
"""
return np.concatenate(
[(pr.matrix_from_compact_axis_angle(np.array(pp)) - np.eye(3)).ravel()
for pp in p[1:]]).ravel()
def forward_kinematic(pose, v, J, weights, kintree_table):
"""Computes the blending of joint influences for each vertex.
Parameters
----------
pose : array, shape (n_parts * 3)
Hand pose parameters
v : array, shape (n_vertices, 3)
Vertices
J : array, shape (n_parts, 3)
Joint positions
weights : array, shape (n_vertices, n_parts)
Blend weight matrix, how much does the rotation of each part effect
each vertex
kintree_table : array, shape (2, n_parts)
Table that describes the kinematic tree of the hand.
kintree_table[0, i] contains the index of the parent part of part i
and kintree_table[1, :] does not matter for the MANO model.
Returns
-------
v : array, shape (n_vertices, 3)
Transformed vertices
"""
A = global_rigid_transformation(pose, J, kintree_table)
T = A.dot(weights.T)
rest_shape_h = np.vstack((v.T, np.ones((1, v.shape[0]))))
v = (T[:, 0, :] * rest_shape_h[0, :].reshape((1, -1)) +
T[:, 1, :] * rest_shape_h[1, :].reshape((1, -1)) +
T[:, 2, :] * rest_shape_h[2, :].reshape((1, -1)) +
T[:, 3, :] * rest_shape_h[3, :].reshape((1, -1))).T
return v[:, :3]
def global_rigid_transformation(pose, J, kintree_table):
"""Computes global rotation and translation of the model.
Parameters
----------
pose : array, shape (n_parts * 3)
Hand pose parameters
J : array, shape (n_parts, 3)
Joint positions
kintree_table : array, shape (2, n_parts)
Table that describes the kinematic tree of the hand.
kintree_table[0, i] contains the index of the parent part of part i
and kintree_table[1, :] does not matter for the MANO model.
Returns
-------
A : array, shape (4, 4, n_parts)
Transformed joint poses
"""
id_to_col = {kintree_table[1, i]: i
for i in range(kintree_table.shape[1])}
parent = {i: id_to_col[kintree_table[0, i]]
for i in range(1, kintree_table.shape[1])}
results = {0: pt.transform_from(
pr.matrix_from_compact_axis_angle(pose[0, :]), J[0, :])}
for i in range(1, kintree_table.shape[1]):
T = pt.transform_from(pr.matrix_from_compact_axis_angle(
pose[i, :]), J[i, :] - J[parent[i], :])
results[i] = results[parent[i]].dot(T)
results = [results[i] for i in sorted(results.keys())]
def pack(x):
return np.hstack([np.zeros((4, 3)), x.reshape((4, 1))])
return np.dstack(
[results[i] - pack(results[i].dot(np.hstack(((J[i, :]), (0,)))))
for i in range(len(results))])
|
import telebot
import random
from collections import Counter
bot = telebot.TeleBot("200009247:AAHf6MAz5e3NOWr0Ypb_vnWhGiKuOrz8Fcg")
# Handles all text messages that contains the commands '/start' or '/help'.
d = open('diction.dictionary')
dictionaryItems = d.read().split("\n")
bigwords = []
for items in dictionaryItems:
if len(items) == 9:
bigwords.append(items)
def combinations(string):
yield ''
for i, d in enumerate(string):
for comb in combinations(string[i + 1:]):
yield d + comb
# Takes a list as input and outputs only the words in dictionary
def dictletters(words):
a = []
for item in words:
if item in dictionaryItems and len(item)>2:
a.append(item)
return a
# storing it in dictionary so tht scoring can be easy based on the number of letters they could conjure
def possible(ourword):
allpossible = {}
word = ""
for letter in ourword:
word = word + letter
b = dictletters(combinations(word))
for items in b:
allpossible[items] = len(items)
return allpossible
ourword = ""
alpossible = {}
letters={}
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
ourword = ''.join(sorted(random.choice(bigwords)))
bot.reply_to(message, "Howdy, Make as many words as possible with:" + ourword)
#alpossible.update(possible(sorted(ourword)))
#print(alpossible)
letters.update(dict(Counter(ourword)))
@bot.message_handler(func=lambda message: True)
def echo_all(message):
a = str(message.text)
a=a.lower()
if a == "*" or a == "end-it":
bot.reply_to(message, "GoodBye!!")
pass
return
elif a not in alpossible:
alpossible[a]=1
else:
alpossible[a]=alpossible[a]+1
#print(letters)
l2 = dict(Counter(a))
#print(l2)
flag = 0
for keys, values in l2.items():
if keys not in letters:
flag=1
break
elif letters[keys] < values:
flag = -1
break
else:
continue
if a not in dictionaryItems:
flag=1
if(flag!=0):
if alpossible[a] >1:
bot.reply_to(message, "You have given the same invalid input")
else:
bot.reply_to(message, "Invalid Input")
else:
if alpossible[a]>1:
bot.reply_to(message, "Your have already entered this")
elif a in alpossible:
bot.reply_to(message, "Keep going")
else:
bot.reply_to(message, message.text + " is not a valid word")
bot.polling()
|
# Sync protocol PoC
import hashlib
import networksim
import networkwhisper
import random
import sync_pb2
import time
# Each group belongs to a client.
# Hardcoded for now.
# group\_id = HASH("GROUP\_ID", client\_id, group\_descriptor)
GROUP_ID = "0xdeadbeef"
# TODO: Introduce exponential back-off for send_time based on send_count
# XXX: Hardcoded for logging, cba
#NODE = 'xxx'
# XXX: Add debug log level
#def log(message):
# XXX: Instead of this, how about printing this to a sync log?
#print(message)
# XXX: Don't know which node! Oops.
# with open(NODE + 'sync.log', 'w') as f:
# f.write(message + '\n')
# print(message)
def merge_two_dicts(x, y):
z = x.copy()
z.update(y)
return z
class Node():
def __init__(self, logfn, name, network, profile, mode='batch'):
self.name = name
self.log = []
self.messages = {}
self.sync_state = {}
self.peers = {}
self.network = network
self.time = 0
self.mode = mode
self.offeredMessages = {} # XXX: Should be bounded
self.logger = logfn
# XXX: Assumes only one group
self.group_id = GROUP_ID
self.sharing = {GROUP_ID: set()}
# Network should be aware of sync node so it can call it
network.sync_node = self
self.profile = profile
# for index in pulsating reseries if mobile node
# XXX: Hacky
if (self.name == 'A'):
self.randomSeed = 0
elif (self.name == 'B'):
self.randomSeed = 1
else:
self.randomSeed = random.randint(1,10)
if profile == 'burstyMobile':
self.reliability = 0.1
self.update_availability()
elif profile == 'onlineDesktop':
self.reliability = 1 # or 0.9
else:
self.reliability = 1
self.availability = self.reliability
def tick(self):
# XXX: What else do?
# TODO: Send message if reached send time
self.time += 1
if (self.profile == 'burstyMobile'):
self.update_availability()
if (self.availability == 1):
if (self.mode == 'batch'):
self.ack_sent_messages()
self.ack_offered_messages()
self.req_offered_messages()
self.send_requested_messages()
self.send_messages()
elif (self.mode == 'interactive'):
self.ack_received_messages()
self.ack_offered_messages()
self.req_offered_messages()
self.send_requested_messages()
self.offer_messages()
#elif (self.availability == 0):
#print "*** node NOT available", self.name
#else:
# print "*** conflation overload, reliability/availability mismatch"
# NOTE: Assuming same semantics for batch and interactive mode.
#- **Acknowledge** any messages **received** from the peer that the device has
#not yet acknowledged
def ack_received_messages(self):
self.ack_sent_messages()
# - **Acknowledge** any messages **sent** by the peer that the device has not yet
# acknowledged
def ack_sent_messages(self):
# TODO: Accumulate and send all in one go
# XXX: Better Pythonesque way to do this
for mid, x in self.sync_state.items():
for peer, flags in x.items():
if flags['ack_flag'] == 1:
ack_rec = new_ack_record([mid])
self.network.send_message(self.name, peer, ack_rec)
self.sync_state[mid][peer]['ack_flag'] = 0
self.logger(" ACK ({} -> {}): {}".format(self.name[-4:], peer[-4:], mid[-4:]))
# - **Acknowledge** any messages **offered** by the peer that the device holds,
# and has not yet acknowledged
# ACK maybe once?!
def ack_offered_messages(self):
for peer, message_ids in self.offeredMessages.items():
for message_id in message_ids:
if (message_id in self.messages and
# XXX: What if they didn't receive ACK?
self.sync_state[message_id][peer]['ack_flag'] == 1):
# XXX: Slurp up
ack_rec = new_ack_record([message_id])
self.sync_state[message_id][peer]['ack_flag'] = 0
self.network.send_message(self.name, peer, ack_rec)
# **Request** any messages **offered** by the peer that the device does not
# hold, and has not yet requested
# NOTE: (Overloaded?) use of send_time and send_count for reqs.
# Seems OK since hold flag clarifies if you need to offer/send or ack.
def req_offered_messages(self):
# XXX: Not removing from cache, instead letting it grow indefinitely
# (later: bounded) UNLESS ACK etc is received
for peer_id, message_ids in self.offeredMessages.items():
for message_id in message_ids:
if (message_id not in self.messages and
# XXX: Not clear this is part of spec
self.sync_state[message_id][peer_id]['send_time'] <= self.time
):
# XXX: Slurp up
req_rec = new_req_record([message_id])
self.network.send_message(self.name, peer_id, req_rec)
n = self.sync_state[message_id][peer_id]["send_count"] + 1
self.update_sync_state(message_id, peer_id, {
'hold_flag': 1,
'send_count': n,
'send_time': self.time + int(n**2) + 1
})
self.logger("REQUEST ({} -> {}): {}".format(self.name[-4:], peer_id[-4:], message_id[-4:]))
# XXX: It is double requesting, should be polite
# - **Send** any messages that the device is **sharing** with the peer, that have
# been **requested** by the peer, and that have reached their send times
def send_requested_messages(self):
for message_id, x in self.sync_state.items():
for peer_id, flags in x.items():
if (peer_id in self.sharing[self.group_id] and
flags['request_flag'] == 1 and
flags['send_time'] <= self.time):
message = self.messages[message_id]
send_count = self.sync_state[message_id][peer_id]["send_count"] + 1
self.sync_state[message_id][peer_id]["send_count"] = send_count
self.sync_state[message_id][peer_id]["send_time"] += self.time + send_count*2
self.sync_state[message_id][peer_id]["request_flag"] = 0
self.logger('MESSAGE ({} -> {}): {} requested and sent'.format(self.name[-4:], peer_id[-4:], message_id[-4:]))
# XXX: Can introduce latency here
self.network.send_message(self.name, peer_id, message)
# When turn off request flag?
#- **Offer** any messages that the device is **sharing** with the peer, and does
# not know whether the peer holds, and that have reached their send times
# XXX: Not tested yet, interactive mode
def offer_messages(self):
for message_id, x in self.sync_state.items():
for peer_id, flags in x.items():
ids = []
if (peer_id in self.sharing[self.group_id] and
flags['hold_flag'] == 0 and
flags['send_time'] <= self.time):
# TODO: Extend to slurp up all, need index peer->message
offer_rec = new_offer_record([message_id])
# HERE we send
# XXX: peer_id, should be pbukey
self.network.send_message(self.name, peer_id, offer_rec)
send_count = self.sync_state[message_id][peer_id]["send_count"] + 1
self.sync_state[message_id][peer_id]["send_count"] = send_count
self.sync_state[message_id][peer_id]["send_time"] += self.time + send_count*2
self.logger(" OFFER ({} -> {}): {}".format(self.name[-4:], peer_id[-4:], message_id[-4:]))
# - **Send** any messages that the device is **sharing** with the peer, and does
# not know whether the peer holds, and that have reached their send times
def send_messages(self):
for message_id, x in self.sync_state.items():
for peer_id, flags in x.items():
# Should be case for B no?
if (peer_id in self.sharing[self.group_id] and
flags['hold_flag'] == 0 and
flags['send_time'] <= self.time):
message = self.messages[message_id]
send_count = self.sync_state[message_id][peer_id]["send_count"] + 1
self.sync_state[message_id][peer_id]["send_count"] = send_count
self.sync_state[message_id][peer_id]["send_time"] += self.time + send_count*2
self.logger('MESSAGE ({} -> {}): {} sent'.format(self.name[-4:], peer_id[-4:], message_id[-4:]))
# XXX: Can introduce latency here
self.network.send_message(self.name, peer_id, message)
# XXX: Why would node know about peer and not just name?
# TODO: Refactor this to illustrate that it is just a set of pubkeys
def addPeer(self, peer_id, peer):
self.peers[peer_id] = peer
def share(self, peer_id):
self.sharing[self.group_id].add(peer_id)
# Helper method
def update_sync_state(self, message_id, peer_id, new_state):
if message_id not in self.sync_state:
self.sync_state[message_id] = {}
if peer_id not in self.sync_state[message_id]:
self.sync_state[message_id][peer_id] = {
"hold_flag": 0,
"ack_flag": 0,
"request_flag": 0,
"send_count": 0,
"send_time": self.time + 1
}
current = self.sync_state[message_id][peer_id]
new = merge_two_dicts(current, new_state)
self.sync_state[message_id][peer_id] = new
def append_message(self, message):
message_id = get_message_id(message)
#print("*** append", message)
self.log.append({"id": message_id,
"message": message})
# XXX: Ugly but easier access while keeping log order
self.messages[message_id] = message
self.sync_state[message_id] = {}
# Ensure added for each peer
# If we add peer at different time, ensure state init
# TODO: Only share with certain peers, e.g. clientPolicy
# XXX here we go, probably
#print("**SHARE1 SHDNOTBEEMPTY", self.peers)
# TODO: Problem - this shouldn't be empty
# Where does this come from?
for peer in self.peers.keys():
if peer in self.sharing[self.group_id]:
#print("**SHARE2", peer)
# ok, then what?
self.sync_state[message_id][peer] = {
"hold_flag": 0,
"ack_flag": 0,
"request_flag": 0,
"send_count": 0,
"send_time": self.time + 1
}
# TODO: Probably something more here for message parsing
# TODO: Need to switch from object to pubkey here with name etc
def on_receive(self, sender, message):
if random.random() < self.reliability:
#print "*** {} received message from {}".format(self.name, sender.name)
if (message.header.type == 1):
self.on_receive_message(sender, message)
elif (message.header.type == 0):
self.on_receive_ack(sender, message)
elif (message.header.type == 2):
self.on_receive_offer(sender, message)
elif (message.header.type == 3):
self.on_receive_request(sender, message)
else:
print("XXX: unknown message type")
else:
self.logger("*** node {} offline, dropping message".format(self.name))
# TODO: Problem: It assumes there's a name, as opposed to a pubkey
def on_receive_message(self, sender_pubkey, message):
message_id = get_message_id(message)
self.logger('MESSAGE ({} -> {}): {} received'.format(sender_pubkey[-4:], self.name[-4:], message_id[-4:]))
if message_id not in self.sync_state:
self.sync_state[message_id] = {}
if sender_pubkey in self.sync_state[message_id]:
self.sync_state[message_id][sender_pubkey]['hold_flag'] == 1
self.sync_state[message_id][sender_pubkey]['ack_flag'] == 1
# XXX: ACK again here?
self.sync_state[message_id][sender_pubkey] = {
"hold_flag": 1,
"ack_flag": 1,
"request_flag": 0,
"send_count": 0,
"send_time": 0
}
# XXX: If multiple group id, dispatch per group id
for peer in self.sharing[self.group_id]:
if peer not in self.sync_state[message_id]:
self.sync_state[message_id][peer] = {
"hold_flag": 0,
"ack_flag": 0,
"request_flag": 0,
"send_count": 0,
"send_time": 0
}
# XXX: Huh, This is MESSAGE, shouldn't it be PAYLOAD inside it?
assert message.header.type == 1, "Type should be MESSAGE, possible faulty logic"
self.messages[message_id] = message
# XXX: Shortcuts, lets take 'em
text = message.payload.message.body.decode('utf-8')
short_sender = sender_pubkey[-4:]
print(short_sender + ": " + text)
def on_receive_ack(self, sender_pubkey, message):
for ack in message.payload.ack.id:
self.logger(' ACK ({} -> {}): {} received'.format(sender_pubkey[-4:], self.name[-4:], ack[-4:]))
self.sync_state[ack][sender_pubkey]["hold_flag"] = 1
def on_receive_offer(self, sender_pubkey, message):
for message_id in message.payload.offer.id:
self.logger(' OFFER ({} -> {}): {} received'.format(sender_pubkey[-4:], self.name[-4:], message_id[-4:]))
if (message_id in self.sync_state and
sender_pubkey in self.sync_state[message_id] and
self.sync_state[message_id][sender_pubkey]['ack_flag'] == 1):
print("Have message, not ACKED yet, add to list", sender_pubkey, message_id)
if sender_pubkey not in self.offeredMessages:
self.offeredMessages[sender_pubkey] = []
self.offeredMessages[sender_pubkey].append(message_id)
elif message_id not in self.sync_state:
#print "*** {} on_receive_offer from {} not holding {}".format(self.name, sender_pubkey, message_id)
if sender_pubkey not in self.offeredMessages:
self.offeredMessages[sender_pubkey] = []
self.offeredMessages[sender_pubkey].append(message_id)
#else:
# print "*** {} on_receive_offer have {} and ACKd OR peer {} unknown".format(self.name, message_id, sender_pubkey)
# XXX: Init fn to wrap updates
if message_id not in self.sync_state:
self.sync_state[message_id] = {}
if sender_pubkey not in self.sync_state[message_id]:
self.sync_state[message_id][sender_pubkey] = {
"hold_flag": 1,
"ack_flag": 0,
"request_flag": 0,
"send_count": 0,
"send_time": 0
}
self.sync_state[message_id][sender_pubkey]['hold_flag'] = 1
#print "*** {} offeredMessages {}".format(self.name, self.offeredMessages)
def on_receive_request(self, sender_pubkey, message):
for req in message.payload.request.id:
self.logger('REQUEST ({} -> {}): {} received'.format(sender_pubkey[-4:], self.name[-4:], req[-4:]))
self.sync_state[req][sender_pubkey]["request_flag"] = 1
def print_sync_state(self):
print("\n{} POV @{}".format(self.name[-4:], self.time))
print("-" * 60)
n = self.name
for message_id, x in self.sync_state.items():
line = message_id[-4:] + " | "
for peer, flags in x.items():
line += peer[-4:] + ": "
if flags['hold_flag']:
line += "hold "
if flags['ack_flag']:
line += "ack "
if flags['request_flag']:
line += "req "
line += "@" + str(flags['send_time'])
line += "(" + str(flags['send_count']) + ")"
line += " | "
print(line)
#log("-" * 60)
# Shorter names for pubkey
def print_sync_state2(self):
print("\n{} POV @{}".format(self.name[-4:], self.time))
print("-" * 60)
n = self.name[-4:]
for message_id, x in self.sync_state.items():
line = message_id[-4:] + " | "
for peer, flags in x.items():
line += peer[-4:] + ": "
if flags['hold_flag']:
line += "hold "
if flags['ack_flag']:
line += "ack "
if flags['request_flag']:
line += "req "
line += "@" + str(flags['send_time'])
line += "(" + str(flags['send_count']) + ")"
line += " | "
print(line)
#log("-" * 60)
def update_availability(self):
#arr = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
arr = [1, 1, 0, 0, 1, 1, 0, 0]
idx = (self.time + self.randomSeed) % 8 # 10
self.reliability = arr[idx]
# XXX conflating these for now, depends on POV/agency
self.availability = arr[idx]
# XXX: Self-describing better in practice, format?
def sha1(message):
# XXX correct encoding?
sha = hashlib.sha1(message.encode('utf-8'))
return sha.hexdigest()
#- message\_id = HASH("MESSAGE\_ID", group\_id, timestamp, message\_body)
# TODO: Create a message
def create_message(body):
group_id = "0xdeadbeef"
timestamp = time.time()
message_body = body
message = {"group_id": group_id, "timestamp": timestamp, "message_body": message_body}
return message
# XXX: Is this hashing correctly?
def get_message_id(message_record):
msg = message_record.payload.message
s = "MESSAGE_ID" + msg.group_id + str(msg.timestamp) + msg.body.decode()
#print("***", s)
return sha1(s)
# TODO: Move these protobuf helpers somewhere better
# XXX: where is the message id?
def new_message_record(body):
msg = sync_pb2.Record()
msg.header.version = 1
# assert based on type and length
msg.header.type = 1 # MESSAGE type
# XXX: Should be inferred
msg.header.length = 10
# XXX: Hardcoded for now
msg.payload.message.group_id = "foo"
# XXX: Should be 64 bit integer ms
msg.payload.message.timestamp = int(time.time())
msg.payload.message.body = str.encode(body)
return msg
def new_ack_record(ids):
msg = sync_pb2.Record()
msg.header.version = 1
# assert based on type and length
msg.header.type = 0 # ACK type
# XXX: Should be inferred
msg.header.length = 10
for id in ids:
msg.payload.ack.id.append(id)
return msg
def new_offer_record(ids):
msg = sync_pb2.Record()
msg.header.version = 1
# assert based on type and length
msg.header.type = 2 # OFFER type
# XXX: Should be inferred
msg.header.length = 10
for id in ids:
msg.payload.offer.id.append(id)
return msg
def new_req_record(ids):
msg = sync_pb2.Record()
msg.header.version = 1
# assert based on type and length
msg.header.type = 3 # REQUEST type
# XXX: Should be inferred
msg.header.length = 10
for id in ids:
msg.payload.request.id.append(id)
return msg
# Mocking
################################################################################
# TODO: For whisper nodes should be public keys
# What about keypair to try to decrypt? should be in node
def run(steps=10):
n = networksim.NetworkSimulator()
# XXX: Not clear to me what's best here
# Interactive: less BW, Batch: less coordination
a = Node("A", n, 'burstyMobile', 'batch')
b = Node("B", n, 'burstyMobile', 'batch')
c = Node("C", n, 'desktop', 'interactive')
d = Node("D", n, 'desktop', 'batch')
n.peers["A"] = a
n.peers["B"] = b
n.peers["C"] = c
n.peers["D"] = d
n.nodes = [a, b, c, d]
a.addPeer("B", b)
a.addPeer("C", c)
b.addPeer("A", a)
c.addPeer("A", a)
#b.addPeer("C", c) # hm
#c.addPeer("B", b)
b.addPeer("D", d)
c.addPeer("D", d)
# NOTE: Client should decide policy, implict group
a.share("B")
b.share("A")
# C and D participating
# a.share("C")
b.share("D")
c.share("A")
c.share("D")
d.share("B")
d.share("C")
print("\nAssuming one group context (A-B (C-D) share):")
# XXX: Conditional append to get message graph?
# TODO: Actually need to encode graph, client concern
local_appends = {
1: [[a, "A: hello world"]],
2: [[b, "B: hello!"]],
}
for i in range(steps):
# NOTE: include signature and parent message
if n.time in local_appends:
for peer, msg in local_appends[n.time]:
rec = new_message_record(msg)
peer.append_message(rec)
n.tick()
#a.print_sync_state()
#b.print_sync_state()
#c.print_sync_state()
# a.print_sync_state()
# b.print_sync_state()
# c.print_sync_state()
# d.print_sync_state()
def whisperRun(steps=10):
a_keyPair = "0x57083392b29bdf24512c93cfdf45d38c87d9d882da3918c59f4406445ea976a4"
b_keyPair= "0x7b5c5af9736d9f1773f2020dd0fef0bc3c8aeaf147d2bf41961e766588e086e7"
# TODO: should be node names
# Derived, used for addressing
a_pubKey = "0x04d94a1a01872b598c7cdc5aca2358d35eb91cd8a91eaea8da277451bb71d45c0d1eb87a31ea04e32f537e90165c870b3e115a12438c754d507ac75bddd6ecacd5"
b_pubKey = "0x04ff921ddf78b5ed4537402f59a150caf9d96a83f2a345a1ddf9df12e99e7778f314c9ca72e8285eb213af84f5a7b01aabb62c67e46657976ded6658e1b9e83c73"
aNode = networkwhisper.WhisperNodeHelper(a_keyPair)
bNode = networkwhisper.WhisperNodeHelper(b_keyPair)
# XXX: Not clear to me what's best here
# Interactive: less BW, Batch: less coordination
a = Node(a_pubKey, aNode, 'burstyMobile', 'batch')
b = Node(b_pubKey, bNode, 'burstyMobile', 'batch')
# XXX: Not clear this is needed for Whisper, since all nodes should be part of network
# Possibly analog with topics?
#n.peers["A"] = a
#n.peers["B"] = b
aNode.nodes = [a]
bNode.nodes = [b]
a.addPeer(b_pubKey, b)
b.addPeer(a_pubKey, a)
# NOTE: Client should decide policy, implict group
a.share(b_pubKey)
b.share(a_pubKey)
print("\nAssuming one group context (A-B) share):")
# XXX: Conditional append to get message graph?
# TODO: Actually need to encode graph, client concern
local_appends = {
1: [[a, "A: hello world"]],
2: [[b, "B: hello!"]],
}
# XXX: what is this again? should be for both nodes
for i in range(steps):
# NOTE: include signature and parent message
if aNode.time in local_appends:
for peer, msg in local_appends[aNode.time]:
rec = new_message_record(msg)
peer.append_message(rec)
# XXX: Why discrete time model here?
aNode.tick()
bNode.tick()
#a.print_sync_state()
#b.print_sync_state()
# a.print_sync_state2()
# b.print_sync_state2()
# TODO: With Whisper branch this one breaks, probably due to sender{,.name} => sender_pubkey mismatch.
#run(30)
#whisperRun(30)
|
"""test signin util functions"""
from sqlalchemy.orm import Session
from app.controllers.users_controller import check_user
def get_user_test(sql: Session, username: str):
"""get user test data"""
return check_user(sql, username=username)
|
#!/usr/bin/env pypy
from commons import isRealistic
import random
if not isRealistic():
random.seed(0)
class ID:
@staticmethod
def getId(id):
return int(id.split('_')[-1])
"""
Represents a Hadoop attempt.
"""
class Attempt:
def __init__(self, attemptId=None, task=None, seconds=10, approx=False):
self.task = task
self.attemptId = attemptId
self.approx = approx
self.nodeId = None
self.start = None
self.finish = None
self.status = Job.Status.QUEUED
self.seconds = seconds # Remaining seconds
def progress(self, p):
self.seconds -= p
def drop(self):
self.seconds = 0
self.status = Job.Status.DROPPED
def isCompleted(self):
return self.seconds <= 0
def getJobId(self):
return '_'.join(self.attemptId.split('_')[0:3]).replace('attempt_', 'job_')
def getTaskId(self):
return self.attemptId[:self.attemptId.rfind('_')].replace('attempt_', 'task_')
def getId(self):
return int(self.attemptId.split('_')[4])
def isMap(self):
#return self.getTask().isMap()
return self.attemptId.rfind('_m_') >= 0
def isRed(self):
#return self.getTask().isRed()
return self.attemptId.rfind('_r_') >= 0
def isApprox(self):
return self.approx
def getTask(self):
return self.task
def getJob(self):
return self.getTask().getJob()
def __str__(self):
return self.attemptId
"""
Represents a Hadoop task.
"""
class Task:
def __init__(self, taskId=None, job=None, length=None, lengthapprox=None, gauss=None):
self.job = job
self.taskId = taskId
self.length = length
self.lengthapprox = length
if lengthapprox != None:
self.lengthapprox = lengthapprox
self.gauss = gauss # Task length distribution in %
self.attempts = {}
self.nattempts = 0
self.status = Job.Status.QUEUED # Status: QUEUED -> RUNNING -> SUCCEEDED | DROPPED
self.approx = False
def isQueued(self):
if len(self.attempts) == 0:
return True
else:
for attempt in self.attempts.values():
if attempt.status == Job.Status.QUEUED:
return True
return False
def isMap(self):
return self.taskId.rfind('_m_') >= 0
def isRed(self):
return self.taskId.rfind('_r_') >= 0
def getAttempt(self):
if len(self.attempts) == 0:
self.nattempts += 1
attemptId = (self.taskId+'_%d' % self.nattempts).replace('task_', 'attempt_')
seconds = self.length if not self.approx else self.lengthapprox
if self.gauss != None:
seconds = int(random.gauss(seconds, self.gauss/100.0*seconds))
# Minimum task length
if seconds < 3:
seconds = 3
attempt = Attempt(attemptId=attemptId, task=self, seconds=seconds, approx=self.approx)
self.attempts[attempt.attemptId] = attempt
return attempt
else:
for attempt in self.attempts.values():
if attempt.status == Job.Status.QUEUED:
return attempt
return None
# We drop this task before start
def drop(self):
if len(self.attempts) == 0:
self.nattempts += 1
attemptId = (self.taskId+'_%04d' % self.nattempts).replace('task_', 'attempt_')
attempt = Attempt(attemptId=attemptId, task=self, seconds=0)
attempt.status = Job.Status.DROPPED
self.attempts[attempt.attemptId] = attempt
def getJob(self):
return self.job
"""
Represents a Hadoop job.
"""
class Job:
# Job priorities
class Priority:
VERY_HIGH = 5
HIGH = 4
NORMAL = 3
LOW = 2
VERY_LOW = 1
# Job status
# QUEUED -> RUNNING -> SUCCEEDED | DROPPED
class Status:
QUEUED = 0
RUNNING = 1
SUCCEEDED = 2
DROPPED = 3
toString = {QUEUED:'QUEUED', RUNNING:'RUNNING', SUCCEEDED:'SUCCEEDED', DROPPED:'DROPPED'}
def __init__(self, jobId=None, nmaps=16, lmap=60, lmapapprox=None, nreds=1, lred=30, lredapprox=None, submit=0):
self.jobId = jobId
self.nmaps = nmaps
self.nreds = nreds
self.lmap = lmap
self.lred = lred
#self.gauss = None
self.gauss = 20
self.lmapapprox = lmapapprox if lmapapprox != None else self.lmap
self.lredapprox = lredapprox if lredapprox != None else self.lred
self.submit = submit # Submission time
self.finish = None # Finish time
self.priority = Job.Priority.NORMAL
# Set queue execution state
self.reset()
# Algorithm
self.approxAlgoMapMax = 0.0 # Max % => approxAlgoMapVal < approxAlgoMapMax
self.approxAlgoMapVal = 0.0 # %
self.approxAlgoRedMax = 0.0 # Max % => approxAlgoRedVal < approxAlgoRedMax
self.approxAlgoRedVal = 0.0 # %
# Dropping
self.approxDropMapMin = 100.0 # Min % => approxDropMapVal > approxDropMapMin
self.approxDropMapVal = 100.0 # %
self.approxDropRedMin = 100.0 # Min % => approxDropRedVal > approxDropRedMin
self.approxDropRedVal = 100.0 # %
'''
# Approximation
self.approxMapProbability = 0.0
self.approxRedProbability = 0.0
# Dropping
self.dropMapPercentage = 1.0
self.dropRedPercentage = 1.0
'''
'''
Reset the job running
'''
def reset(self):
# Mark it as queued
self.status = Job.Status.QUEUED
# Reset Tasks
self.cmaps = 0
self.creds = 0
self.maps = {}
self.reds = {}
def initTasks(self):
# Maps
# Decide which maps are approximate
numMapApprox = int(round(self.nmaps*self.approxAlgoMapVal/100.0))
numMapPrecis = self.nmaps - numMapApprox
mapsApproximated = random.sample([True]*numMapApprox + [False]*numMapPrecis, self.nmaps)
# Initialize tasks
for nmap in range(0, self.nmaps):
taskId = '%s_m_%06d' % (self.jobId.replace('job_', 'task_'), nmap+1)
self.maps[taskId] = Task(taskId, self, self.lmap, self.lmapapprox)
self.maps[taskId].gauss = self.gauss
# Set approximation
self.maps[taskId].approx = mapsApproximated[nmap]
# Reduces
# Decide which maps are approximate
numRedApprox = int(round(self.nreds*self.approxAlgoRedVal/100.0))
numRedPrecis = self.nreds - numRedApprox
redsApproximated = random.sample([True]*numRedApprox + [False]*numRedPrecis, self.nreds)
# Initialize tasks
for nred in range(0, self.nreds):
taskId = '%s_r_%06d' % (self.jobId.replace('job_', 'task_'), nred+1)
self.reds[taskId] = Task(taskId, self, self.lred, self.lredapprox)
self.reds[taskId].gauss = self.gauss
# Set approximation
self.reds[taskId].approx = redsApproximated[nred]
def getMapTask(self):
for mapTask in self.maps.values():
if mapTask.isQueued():
return mapTask.getAttempt()
return None
def getRedTask(self):
# Wait for the maps to finish. TODO slow start
if self.cmaps >= len(self.maps):
for redTask in self.reds.values():
if redTask.isQueued():
return redTask.getAttempt()
return None
def mapQueued(self):
ret = 0
for mapTask in self.maps.values():
if mapTask.isQueued():
ret += 1
return ret
def redQueued(self):
ret = 0
if self.isMapCompleted():
for redTask in self.reds.values():
if redTask.isQueued():
ret += 1
return ret
def getStart(self):
start = None
for task in self.maps.values() + self.reds.values():
for attempt in task.attempts.values():
if start == None or start > attempt.start:
start = attempt.start
return start
def getFinish(self):
finish = None
for task in self.maps.values() + self.reds.values():
for attempt in task.attempts.values():
if finish == None or finish < attempt.finish:
finish = attempt.finish
return finish
# Check if all the maps are completed
def isMapCompleted(self):
return self.cmaps >= len(self.maps)
# Check if the job is running
def isRunning(self):
return self.cmaps < len(self.maps) or self.creds < len(self.reds)
# Get the list of nodes that have run this node
def getNodes(self):
ret = []
for task in self.maps.values() + self.reds.values():
for attempt in task.attempts.values():
if attempt.nodeId != None and attempt.nodeId not in ret:
ret.append(attempt.nodeId)
return ret
def getQuality(self):
ret = 100.0
total = 0
approximations = 0
for task in self.maps.values() + self.reds.values():
for attempt in task.attempts.values():
total += 1
if attempt.approx or attempt.status == Job.Status.DROPPED:
approximations += 1
if total > 0:
ret = 100.0 - 100.0*approximations/total
return ret
# Complete an attempt
def completeAttempt(self, attempt, drop=False):
ret = []
# Map
if attempt.isMap():
for mapTask in self.maps.values():
if mapTask.taskId == attempt.getTaskId():
if drop:
mapTask.status = Job.Status.DROPPED
else:
mapTask.status = Job.Status.SUCCEEDED
self.cmaps += 1
# Reduce
else:
for redTask in self.reds.values():
if redTask.taskId == attempt.getTaskId():
if drop:
redTask.status = Job.Status.DROPPED
else:
redTask.status = Job.Status.SUCCEEDED
self.creds += 1
if self.creds >= len(self.reds):
ret.append(self)
return ret
def isMapDropping(self):
return 100.0*self.cmaps/len(self.maps) > self.approxDropMapVal
def isRedDropping(self):
return 100.0*self.creds/len(self.reds) > self.approxDropRedVal
# Drop an attempt
def dropAttempt(self, attempt):
return self.completeAttempt(attempt, drop=True)
# Add an attempt to the job
def addAttempt(self, attempt):
taskId = attempt.getTaskId()
if attempt.isMap():
if taskId not in self.maps:
self.maps[taskId] = Task(taskId)
self.maps[taskId].attempts[attempt.attemptId] = attempt
if attempt.isRed():
if taskId not in self.reds:
self.reds[taskId] = Task(taskId)
self.reds[taskId].attempts[attempt.attemptId] = attempt
|
import SocketServer
import socket
import threading
import numpy as np
import cv2
import pygame
from pygame.locals import *
import socket
import time
import os
# SocketServer.ThreadingTCPServer.allow_reuse_address = True
RASP_IP = '192.168.43.70'
RASP_SERV_PORT = 7879
COMP_IP = '192.168.43.210'
COMP_SERV_PORT = 8002
command = {
# single commands
'rs': "rst_:",
'f': "fwd_:",
'rev': "rev_:",
'r': "rht_:",
'l': "lft_:",
# combination commands
'f_r': "f_rt:",
'f_l': "f_lf:",
'rev_r': "rv_r:",
'rev_l': "rv_l:",
# 5 character in each string
}
"""
9x9 output
k = [
[1, 0, 0, 0, 0, 0, 0, 0, 0], # left
[0, 1, 0, 0, 0, 0, 0, 0, 0], # right
[0, 0, 1, 0, 0, 0, 0, 0, 0], # forward
[0, 0, 0, 1, 0, 0, 0, 0, 0], # reverse
[0, 0, 0, 0, 1, 0, 0, 0, 0], # forward_left
[0, 0, 0, 0, 0, 1, 0, 0, 0], # forward_right
[0, 0, 0, 0, 0, 0, 1, 0, 0], # reverse_left
[0, 0, 0, 0, 0, 0, 0, 1, 0], # reverse_right
[0, 0, 0, 0, 0, 0, 0, 0, 1], # stop ~ reset
]
"""
class CollectTrainingData(object):
def __init__(self):
# creating server for camera
self.server_socket = socket.socket()
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_socket.bind((COMP_IP, COMP_SERV_PORT))
self.server_socket.listen(0)
# accept single connection
self.connection = self.server_socket.accept()[0].makefile('rb')
# create a socket and connect to motor controller
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((RASP_IP, RASP_SERV_PORT))
self.send_motor = True
self.k = np.zeros((4, 4), float)
for i in range(4):
self.k[i, i] = 1
self.temp_label = np.zeros((1, 4), 'float')
pygame.init()
self.collect_data()
def save_image(self,image,filename,label=4):
if label==2:
# save streamed images
cv2.imwrite('training_images/forward/frame_{}.jpg'.format(filename), image)
elif label ==0:
cv2.imwrite('training_images/left/frame_{}.jpg'.format(filename), image)
elif label ==1:
cv2.imwrite('training_images/right/frame_{}.jpg'.format(filename), image)
else :
cv2.imwrite('training_images/unclassified/frame_{}.jpg'.format(filename), image)
def collect_data(self):
saved_frame = 0
total_frame = 0
# collect_images for training
print 'Start collecting images'
e1 = cv2.getTickCount()
image_array = np.zeros((1, 115200))
label_array = np.zeros((1, 4), 'float')
image_to_list=[]
# stream video frames one by one
try:
stream_bytes = ''
frame = 1
while self.send_motor:
# print("reading data")
stream_bytes += self.connection.read(1024)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_ANYCOLOR)
# select lower half of the image
roi = image[120:240, :]
# save streamed images
#cv2.imwrite('training_images/frame{:>05}.jpg'.format(frame), image)
# cv2.imshow('roi_image',roi)
cv2.imshow('image', image)
# reshape roi image in one array
temp_array = roi.reshape(1, 115200).astype(np.float32)
frame += 1
total_frame += 1
# get input from human driver
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
key_input = pygame.key.get_pressed()
# complex orders
if key_input[pygame.K_UP] and key_input[pygame.K_RIGHT]:
print("Forward Right")
simage_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[1]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,1])
self.save_image(image,image_name,1)
#self.ser.write(chr(6))
self.client_socket.send(command['f_r'])
elif key_input[pygame.K_UP] and key_input[pygame.K_LEFT]:
print("Forward Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[0]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,0])
#self.save_image(image,image_name,0)
self.client_socket.send(command['f_l'])
elif key_input[pygame.K_DOWN] and key_input[pygame.K_RIGHT]:
print("Reverse Right")
self.client_socket.send(command['rev_r'])
elif key_input[pygame.K_DOWN] and key_input[pygame.K_LEFT]:
print("Reverse Left")
self.client_socket.send(command['rev_l'])
# simple orders
elif key_input[pygame.K_UP]:
print("Forward")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[2]))
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,2])
#self.save_image(image,image_name,2)
self.client_socket.send(command['f'])
elif key_input[pygame.K_DOWN]:
print("Reverse")
saved_frame += 1
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[3]))
self.client_socket.send(command['rev'])
elif key_input[pygame.K_RIGHT]:
print("Right")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[1]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,1])
#self.save_image(image,image_name,1)
self.client_socket.send(command['r'])
elif key_input[pygame.K_LEFT]:
print("Left")
image_array = np.vstack((image_array, temp_array))
label_array = np.vstack((label_array, self.k[0]))
saved_frame += 1
image_name=time.strftime("%m.%d.%Y_%H%M%S")
image_to_list.append([image,image_name,0])
#self.save_image(image,image_name,0)
self.client_socket.send(command['l'])
elif key_input[pygame.K_x] or key_input[pygame.K_q]:
print 'exit'
self.send_motor = False
self.client_socket.send(command['rs'])
break
elif event.type == pygame.KEYUP:
self.client_socket.send(command['rs'])
#print(stream_bytes)
#self.client_socket.send(stream_bytes)
# save training images and labels
train = image_array[1:, :]
train_labels = label_array[1:, :]
# save training data as a numpy file
file_name = str(int(time.time()))
directory = "training_data"
if not os.path.exists(directory):
os.makedirs(directory)
try:
np.savez(directory + '/' + file_name + '.npz', train=train, train_labels=train_labels)
except IOError as e:
print(e)
for i in image_to_list:
self.save_image(i[0],i[1],i[2])
e2 = cv2.getTickCount()
# calculate streaming duration
time0 = (e2 - e1) / cv2.getTickFrequency()
print 'Streaming duration:', time0
print(train.shape)
print(train_labels.shape)
print 'Total frame:', total_frame
print 'Saved frame:', saved_frame
print 'Dropped frame', total_frame - saved_frame
finally:
self.connection.close()
self.server_socket.close()
self.client_socket.close()
if __name__ == '__main__':
CollectTrainingData()
|
from telegram.ext import (Dispatcher, CommandHandler, Filters)
from telegram.ext.dispatcher import run_async
from libs.group.kvs import kvs
def attach(dispatcher: Dispatcher):
dispatcher.add_handler(
CommandHandler(
command='rule',
filters=Filters.group,
callback=_group_command_rules,
)
)
dispatcher.add_handler(
CommandHandler(
command='rules',
filters=Filters.group,
callback=_group_command_rules,
)
)
@run_async
def _group_command_rules(update, context):
context.bot.send_message(
chat_id=update.effective_chat.id,
text=kvs['rules'],
disable_web_page_preview=True,
)
|
# -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from django.http import HttpResponseRedirect
# Import the models and forms I created to write the views
from .forms import CourseForm, StudentForm,AttendanceForm,CourseStudentForm,CourseChoiceField,StudentChoiceField
from .models import Course, Student,Course_Student,Attendance
@login_required(login_url="/login/")
# Method Name: index
# Method Parameter: request, which is the user's request
# This brings the user to the home screen
def index(request):
return render(request, "index.html")
# Method Name: updateRegistrationTable
# Method Parameter:
# 1. course, which is the course the user wishes to update the registration
# 2. student, which is the student the user wishes to make changes to
# This method takes the selected course and student and update their information in the course and student table
def updateRegistrationTable(course, student):
# Finds the registration the user performs
registration = Course_Student.objects.filter(course=course, student=student).first()
# Finds the amount of times a student attended class
attendTimes = Attendance.objects.filter(course=course, student=student).count()
registration.attend_times = attendTimes
# Update the student's balance
registration.balance = registration.paid_credit - attendTimes*course.price
registration.save()
# Method Name: refreshRegistrationTable
# Method Parameter: none
# This method does the same thing as updateRegistrationTable but updates everything on the registration table
def refreshRegistrationTable():
# Takes all the objects from Course_Student form
existingRegistrations = Course_Student.objects.all()
# Loop through all the objets
for r in existingRegistrations:
# Update the objects
updateRegistrationTable(r.course, r.student)
# Method Name: handleDeleteCourse
# Method Parameter: request, which is the user's request
# This method allows the user to delete a course from the courses table in courses.html
def handleDeleteCourse(request):
# 1.Delete the course from databse
if request.method == 'POST':
courseName = request.POST.get("targetCourse"," ")
course = get_object_or_404(Course, pk=courseName)
course.delete()
# 2. redirect to courses.html
return HttpResponseRedirect("/courses.html")
# Method Name: handleEditCourse
# Method Parameter: request, which is the user's request
# This method allows the user to edit a course from the courses table in courses.html
def handleEditCourse(request):
# 1.Save the course to database
if request.method == 'POST':
# retrieves the required information about the course
coursePk = request.POST.get("targetCourse"," ")
courseName = request.POST.get("editCourse_name"," ")
coursePeriod = request.POST.get("editCourse_period"," ")
coursePlace = request.POST.get("editCourse_place"," ")
coursePrice = request.POST.get("editCourse_price"," ")
courseStart = request.POST.get("editCourse_start"," ")
courseEnd = request.POST.get("editCourse_end"," ")
#update the database
course = get_object_or_404(Course, pk=coursePk)
course.name = courseName
course.period = coursePeriod
course.price = coursePrice
course.place = coursePlace
course.start = courseStart
course.end = courseEnd
course.save()
# 2. redirect to courses.html
return HttpResponseRedirect("/courses.html")
# Method Name: handleAddCourse
# Method Parameter: request, which is the user's request
# This method allows the user to add a course to the courses table in courses.html
def handleAddCourse(request):
# 1. Valid the input and Save form to database
if request.method == 'POST':
# create a form instance and populate it with data from the request:
courseForm = CourseForm(request.POST)
# check whether it's valid:
if courseForm.is_valid():
# process the data in form.cleaned_data as required
courseForm.save()
# 2. redirect to courses.html
return HttpResponseRedirect("/courses.html")
# Method Name: handleCourses
# Method Parameter: request, which is the user's request
# This method allows the user to display the course on the courses table in courses.html
def handleCourses(request):
# 1. Retrieve all the existing data from Course Table
existingCourses = Course.objects.all()
# define the context with the data
context = {
'existingCourses': existingCourses,
}
# 2. Redirect to courses.html with the data
return render(request, 'courses.html', context)
# Method Name: handleDeleteStudent
# Method Parameter: request, which is the user's request
# This method allows the user to delete a student from the students table in students.html
def handleDeleteStudent(request):
# 1.Delete the course from database
if request.method == 'POST':
studentName = request.POST.get("targetStudent"," ")
student = get_object_or_404(Student, pk=studentName)
student.delete()
# 2. redirect to students.html
return HttpResponseRedirect("/students.html")
# Method Name: handleEditStudent
# Method Parameter: request, which is the user's request
# This method allows the user to edit a student from the students table in students.html
def handleEditStudent(request):
# 1.Retrieve the user input and update database
if request.method == 'POST':
studentPk = request.POST.get("targetStudent"," ")
studentName = request.POST.get("editStudent_name"," ")
studentPhone = request.POST.get("editStudent_phone"," ")
studentEmail = request.POST.get("editStudent_email"," ")
#update the database
student = get_object_or_404(Student, pk=studentPk)
student.name = studentName
student.phone = studentPhone
student.email = studentEmail
student.save()
# 2. redirect to students.html
return HttpResponseRedirect("/students.html")
# Method Name: handleAddStudent
# Method Parameter: request, which is the user's request
# This method allows the user to add a student from the students table in students.html
def handleAddStudent(request):
# 1. Valid the input and Save form to database
if request.method == 'POST':
# create a form instance and populate it with data from the request:
studentForm = StudentForm(request.POST)
# check whether it's valid:
if studentForm.is_valid():
# save the data in form.cleaned_data to database
studentForm.save()
# 2. redirect to courses.html
return HttpResponseRedirect("/students.html")
# Method Name: handleStudents
# Method Parameter: request, which is the user's request
# This method allows the user to display the students on the students table in students.html
def handleStudents(request):
# 1. Retrieve all the existing student data from database
existingStudents = Student.objects.all()
# define the context with the data
context = {
'existingStudents': existingStudents,
}
# 2. Redirect to courses.html with the data
return render(request, 'students.html', context)
# Method Name: handleDeleteRegistration
# Method Parameter: request, which is the user's request
# This method allows the user to delete a registration on the registration table in registration.html
def handleDeleteRegistration(request):
# 1.Get the user input and delete the selected registration from database
if request.method == 'POST':
registrationPk = request.POST.get("targetRegistration"," ")
registration = get_object_or_404(Course_Student, pk=registrationPk)
registration.delete()
# 2. redirect to courses.html
return HttpResponseRedirect("/registration.html")
# Method Name: handleEditRegistration
# Method Parameter: request, which is the user's request
# This method allows the user to edit a registration on the registration table in registration.html
def handleEditRegistration(request):
# 1.Retrieve the user input and save the updated registration to database
if request.method == 'POST':
registrationPk = request.POST.get("targetRegistraton"," ")
registrationPaidCredit = request.POST.get("editRegistration_paid_credit"," ")
registrationScore = request.POST.get("editRegistration_score"," ")
#update the database
registration = get_object_or_404(Course_Student, pk=registrationPk)
registration.paid_credit = registrationPaidCredit
registration.score = registrationScore
registration.save()
updateRegistrationTable(registration.course, registration.student)
# 2. redirect to courses.html
return HttpResponseRedirect("/registration.html")
# Method Name: handleAddRegistration
# Method Parameter: request, which is the user's request
# This method allows the user to add a registration on the registration table
def handleAddRegistration(request):
# 1. Valid the input and Save form to database
if request.method == 'POST':
# Retrieve the user input about: the student name, the course name, and the student's current credit
studentName = request.POST.get("students"," ")
courseName = request.POST.get("courses"," ")
paidCredit = request.POST.get("paid_credit"," ")
# If the studentName or courseName is empty, then an error message will be displayed
if (studentName == " " or courseName == " "):
html_template = loader.get_template( 'error-400.html' )
context = {
"errorMessage" : "Please select a course and a student"
}
return HttpResponse(html_template.render(context, request))
# Find the student's entry in its table
selectedStudent = Student.objects.filter(name=studentName).first()
# Find the course's entry in its table
selectedCourse = Course.objects.filter(name=courseName).first()
# Create another entry in the Course_Student form
registration = Course_Student(
course=selectedCourse,
student=selectedStudent,
paid_credit=paidCredit
)
# try-except loop to try to register the student with its course
try:
registration.save()
# if it ere already exists a registration of the student and course, then an error message will be displayed
except Exception:
html_template = loader.get_template( 'error-400.html' )
context = {
"errorMessage" : "The student already registers the course "
}
return HttpResponse(html_template.render(context, request))
# redirect to registration.html
return HttpResponseRedirect("registration.html")
# Send the list of courses and students data to the registration page
courseList = CourseChoiceField()
studentList = StudentChoiceField()
context = {
'courseList':courseList,
'studentList':studentList,
}
return render(request, 'register.html', context)
# Method Name: handleRegistration
# Method Parameter: request, which is the user's request
# This method helps display the registration information
def handleRegistration(request):
# Update all the students on the registration table
refreshRegistrationTable()
# Gets all the many-to-many relationship between the course and student
existingRegistrations = Course_Student.objects.all()
context = {
'existingRegistrations': existingRegistrations,
}
# Return the relationships information to help display the registration
return render(request, 'registration.html', context)
# Method Name: handleDeleteAttendance
# Method Parameter: request, which is the user's request
# This method allows the user to delete an attendance on the attendance table in attendance.html
def handleDeleteAttendance(request):
# 1.Delete the attendance from database
if request.method == 'POST':
# Get the primary key of the targetAttendance
attendancePk = request.POST.get("targetAttendance"," ")
attendance = get_object_or_404(Attendance, pk=attendancePk)
course = attendance.course
student = attendance.student
# Delete attendance
attendance.delete()
# Update the student's credit
updateRegistrationTable(course, student)
# 2. redirect to attendance.html
return HttpResponseRedirect("/attendance.html")
# Method Name: handleEditAttendance
# Method Parameter: request, which is the user's request
# This method allows the user to edit an attendance on the attendance table in attendance.html
def handleEditAttendance(request):
# 1.Retrive the user input and save the updated attendance info to database
if request.method == 'POST':
# Get the primary key of the targetAttendance
attendancePk = request.POST.get("targetAttendance"," ")
attendanceDate = request.POST.get("editAttendance_date_attended"," ")
#update the databse
attendance = get_object_or_404(Attendance, pk=attendancePk)
attendance.date_attended = attendanceDate
attendance.save()
# 2. redirect to courses.html
return HttpResponseRedirect("/attendance.html")
# Method Name: handleAddAttendanceSelectCourse
# Method Parameter: request, which is the user's request
# This allows the user to select a course when they are trying to do their attendance
def handleAddAttendanceSelectCourse(request):
# Retreive the user input and select the course
if request.method == 'POST':
# Get the user's choice for course
courseName = request.POST.get("courses"," ")
# Based on the courseName find the course in the Course form
selectedCourse = Course.objects.filter(name=courseName).first()
# Retrieve the students who registered to this course
registration_list = list(Course_Student.objects.filter(course=selectedCourse))
# Display information
context = {
"coursename" : courseName,
"registration_list" : registration_list,
}
return render(request, 'attend.html', context)
# If the request method is not post, then it would be get
# Display the courselist for the user to choose
courseList = CourseChoiceField()
context = {
'courseList':courseList,
}
return render(request, 'attend-selectcourse.html', context)
# Method Name: handleAddAttendance
# Method Parameter: request, which is the user's request
# This allows the user to finish adding their attendance
def handleAddAttendance(request):
# Valid the input and Save new attedance entry to database
if request.method == 'POST':
# Get the user's choice for student
studentList = request.POST.getlist('attendance_students',"")
# If there were no student, then the teacher shouldn't be able to submit an attendance sheet
if (len(studentList) == 0):
html_template = loader.get_template( 'error-400.html' )
context = {
"errorMessage" : "There is currently no student enrolled, please check again"
}
return HttpResponse(html_template.render(context, request))
# Retrieve the user's target course
courseName = request.POST.get("target_course"," ")
# Get the user's choice for date
date = request.POST.get("attendance_date"," ")
# Based on courseName, find the course in the courseForm
selectedCourse = Course.objects.filter(name=courseName).first()
# Loop through the studentName in the studentList
for studentName in studentList:
# Uses the student name to find its corresponding object
selectedStudent = Student.objects.filter(name=studentName).first()
# Create a new attendance record
attendance = Attendance(
course=selectedCourse,
student=selectedStudent,
date_attended=date
)
# Save the attendance
attendance.save()
# update registration table
updateRegistrationTable(selectedCourse, selectedStudent)
# redirect to registration.html
return HttpResponseRedirect("attendance.html")
# On the off chance that there is an error and the method is not post, then it will be redirected to index.html
return render(request, 'index.html')
# Method Name: handleAttendance
# Method Parameter: request, which is the user's request
# This method helps display the attendance sheet
def handleAttendance(request):
# Gets all the attendance from Attendance table
existingAttendances = Attendance.objects.all()
context = {
'existingAttendances': existingAttendances,
}
# Return the data
return render(request, 'attendance.html', context)
# Method Name: pages
# Method Parameter: request, which is the user's request
# This method determines which method the user wishes to perform
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
# The reason for this is because all the templates become unique and identifiable after the / sign
load_template = request.path.split('/')[-1]
# The for loops checks what is the html template and performs the corresponding action
if (load_template == "addCourse.html"):
return handleAddCourse(request)
if (load_template == "editCourse.html"):
return handleEditCourse(request)
if (load_template == "deleteCourse.html"):
return handleDeleteCourse(request)
if (load_template == "courses.html"):
return handleCourses(request)
if (load_template == "addStudent.html"):
return handleAddStudent(request)
if (load_template == "editStudent.html"):
return handleEditStudent(request)
if (load_template == "deleteStudent.html"):
return handleDeleteStudent(request)
if (load_template == "students.html"):
return handleStudents(request)
if (load_template == "register.html"):
return handleAddRegistration(request)
if (load_template == "editRegistration.html"):
return handleEditRegistration(request)
if (load_template == "deleteRegistration.html"):
return handleDeleteRegistration(request)
if (load_template == "registration.html"):
return handleRegistration(request)
if (load_template == "attend-selectcourse.html"):
return handleAddAttendanceSelectCourse(request)
if (load_template == "attend.html"):
return handleAddAttendance(request)
if (load_template == "editAttendance.html"):
return handleEditAttendance(request)
if (load_template == "deleteAttendance.html"):
return handleDeleteAttendance(request)
if (load_template == "attendance.html"):
return handleAttendance(request)
# If the load_template did not equal to any of the above templates, it will be
html_template = loader.get_template( load_template )
return HttpResponse(html_template.render(context, request))
# If the template does not exist, then error 404 message will be displayed
except template.TemplateDoesNotExist:
html_template = loader.get_template( 'error-404.html' )
return HttpResponse(html_template.render(context, request))
# Else, error 500 message will be displayed
except:
html_template = loader.get_template( 'error-500.html' )
return HttpResponse(html_template.render(context, request))
|
# -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import logging
import os
import sys
from pathlib import Path
from typing import Type, Tuple, List, Dict, Optional, Set
import yaml
from abex.azure_config import AzureConfig, parse_results_and_add_azure_config_vars
from abex.constants import AZUREML_ENV_NAME
from abex.dataset import Dataset
from abex.scripts.run import RunConfig
from abex.settings import OptimizerConfig, CustomDumper, load_config_from_path_or_name
from abex.simulations import SimulatedDataGenerator
from abex.simulations.looping import load_resolutions_from_command, run_multiple_seeds
from abex.simulations.submission import (
create_temporary_azureml_environment_file,
get_pythonpath_relative_to_root_dir,
spec_file_basename,
create_config_json_file,
)
from azureml.core import Workspace, ComputeTarget, Datastore, Environment, RunConfiguration, Experiment, Run
from azureml.core.conda_dependencies import CondaDependencies
from azureml.pipeline.core import PipelineData, StepSequence, Pipeline
from azureml.pipeline.steps import PythonScriptStep
from psbutils.create_amlignore import create_amlignore
from psbutils.misc import ROOT_DIR
from psbutils.psblogging import logging_to_stdout
class AMLResources:
def __init__(self, azure_config: AzureConfig):
if azure_config.compute_target in (None, "local"):
raise ValueError(
"AML pipelines don't run locally. Please specify a remote compute target"
) # pragma: no cover
self.ws = Workspace(azure_config.subscription_id, azure_config.resource_group, azure_config.workspace_name)
self.compute_target = ComputeTarget(self.ws, azure_config.compute_target)
self.env = self.specify_conda_environment()
self.datastore = Datastore(workspace=self.ws, name="workspaceblobstore")
@staticmethod
def specify_conda_environment():
# set up the Python environment
env = Environment(AZUREML_ENV_NAME)
aml_env_file = create_temporary_azureml_environment_file()
conda_deps = CondaDependencies(conda_dependencies_file_path=aml_env_file)
env.python.conda_dependencies = conda_deps
env.environment_variables = {"PYTHONPATH": get_pythonpath_relative_to_root_dir()}
return env
def specify_run_step(
args: RunConfig,
aml_resources: AMLResources,
run_script_path: Path,
loop_config_class: Type[OptimizerConfig],
check_consistency: bool = True,
) -> Tuple[List[PythonScriptStep], List[PipelineData], Dict[str, List[str]], List[str]]:
"""
Create the pipeline step(s) to run the simulation.
Args:
aml_resources: an instance of AMLResources which contains the necessary information on
AML resources to instantiate pipeline steps
run_script_path: script that the run step should invoke
loop_config_class: (subclass of) OptimizerConfig that should be instantiated
check_consistency: whether to run data_and_simulation_are_consistent; normally we do, but
this may be set to False for tests that check other parts of this functionality.
Returns: A list of PythonScriptSteps, with one for each expansion, a list of output data locations in AML,
a dictionary of styled subsets for plotting, and a list of the temporary spec files that have been created
"""
# Expand config
selections_and_configs = list(load_resolutions_from_command(args, loop_config_class))
temp_spec_files = []
parallel_steps = []
all_run_outputs = []
styled_subsets: Dict[str, List[str]] = {}
# For each expansion, create a PythonScriptStep to run the simulator script.
num_selections = len(selections_and_configs)
for index, pair_list in enumerate(selections_and_configs, 1):
config0 = pair_list[0][1]
if (not check_consistency) or data_and_simulation_are_consistent(config0):
logging.info(
f"Config resolution {index} of {num_selections} will have {len(pair_list)} runs included in pipeline"
)
else: # pragma: no cover
logging.error(f"Dropping config resolution {index} of {num_selections} from pipeline")
continue
for config_dct, config in pair_list:
batch_strategy = config_dct["bayesopt"]["batch_strategy"]
acquisition = config_dct["bayesopt"]["acquisition"]
experiment_label = f"{batch_strategy} - {acquisition}"
# TODO: what about acquisition, optimization_strategy?
if batch_strategy not in styled_subsets:
styled_subsets[batch_strategy] = [experiment_label]
else:
styled_subsets[batch_strategy].append(experiment_label) # pragma: no cover
# set up the run configuration
aml_run_config = RunConfiguration(_name=f"Parallel run combination {config.resolution_spec}.{config.seed}")
aml_run_config.target = aml_resources.compute_target
aml_run_config.environment = aml_resources.env # type: ignore # auto
# create different versions of args for each combination
temp_config_path = spec_file_basename(config.resolution_spec, config.seed or 0, suffix="yml")
temp_spec_files.append(temp_config_path)
with Path(temp_config_path).open("w") as fp:
yaml.dump(config_dct, fp, Dumper=CustomDumper)
args.spec_file = temp_config_path
original_arg_list = sys.argv[1:]
simulator_args = original_arg_list
spec_file_index = simulator_args.index("--spec_file")
simulator_args[spec_file_index + 1] = temp_config_path
num_runs_index = simulator_args.index("--num_runs")
if isinstance(num_runs_index, int) and num_runs_index >= 0:
simulator_args[num_runs_index + 1] = "1" # pragma: no cover
else:
simulator_args += ["--num_runs", "1"]
# create PipelineData to consume the output of this step in the next (plotting) step
step_output = PipelineData(
name=f"outputs_batch_{config.resolution_spec}_{config.seed}",
output_name=f"outputs_batch_{config.resolution_spec}_{config.seed}",
datastore=aml_resources.datastore,
is_directory=True,
)
all_run_outputs += [step_output]
simulator_args += ["--output_dir", step_output]
step = PythonScriptStep(
script_name=str(run_script_path.absolute().relative_to(ROOT_DIR)),
source_directory=ROOT_DIR,
arguments=simulator_args,
outputs=[step_output],
compute_target=aml_resources.compute_target,
runconfig=aml_run_config,
)
parallel_steps.append(step)
return parallel_steps, all_run_outputs, styled_subsets, temp_spec_files
def specify_plotting_step(
styled_subsets: Dict[str, List[str]],
experiment_labels: List[str],
all_run_outputs: List[PipelineData],
aml_resources: AMLResources,
plot_script_path: Path,
) -> Tuple[PythonScriptStep, PipelineData]:
"""
Create the pipeline steps to plot the results of the simulator.
Args:
styled_subsets:
experiment_labels: A list of parameters to take from the Spec file to use as experiment
labels. Currently only accepts 'acquisition' and 'batch_strategy'
all_run_outputs: A list of PipelineData paths to directories where simulator results are stored
aml_resources: an instance of AMLResources which contains the necessary information on
AML resources to instantiate pipeline steps
Returns: The PythonScriptStep for running the data, plus the PipelineData path to where the output will be stored
"""
output_plot = PipelineData(
name="plotting_output",
output_name="plotting_output",
datastore=aml_resources.datastore,
)
styled_subsets_list = []
for subset_members in styled_subsets.values():
styled_subsets_list += ["--styled_subset", " ".join([f'"{m}"' for m in subset_members])] # pragma: no cover
# assert len(all_run_outputs) == len(experiment_labels)
experiment_dirs = []
for run_output in all_run_outputs:
experiment_dirs += ["--experiment_dirs", run_output] # pragma: no cover
plotting_args = (
["--output_dir", output_plot, "--num_simulator_samples_per_optimum", "100"]
+ experiment_dirs
+ experiment_labels
)
# Plotting
plotting_run_config = RunConfiguration(_name="Plotting")
plotting_run_config.target = aml_resources.compute_target
plotting_run_config.environment = aml_resources.env # type: ignore # auto
plotting_step = PythonScriptStep(
script_name=str(plot_script_path.absolute().relative_to(ROOT_DIR)),
source_directory=ROOT_DIR,
arguments=plotting_args,
inputs=all_run_outputs,
outputs=[output_plot],
compute_target=aml_resources.compute_target,
runconfig=plotting_run_config,
)
return plotting_step, output_plot
def data_and_simulation_are_consistent(config: OptimizerConfig) -> bool: # pragma: no cover
"""
Check that the input names in the data section of the config match those of the simulator specified in it,
and likewise the output names. Also check that the data files themselves are consistent with the data
settings in the config. These checks are also carried out elsewhere, but running them here avoids
submitting an AML job that will fail as soon as it starts to run.
"""
data_var_names = sorted(config.data.inputs.keys())
dataset_ok = True
missing_names: Set[str] = set()
data_output_name: Optional[str] = None
if config.data.folder.is_dir() or config.data.files:
df = config.data.load_dataframe()
df_col_names = sorted(df.columns.tolist())
data_output_name = config.data.output_column
missing_names = set(data_var_names).union([data_output_name]).difference(df_col_names)
if missing_names:
logging.error(
"One or more columns expected by the config file are missing from the data: "
+ ", ".join(sorted(missing_names))
)
try:
Dataset(df, config.data)
except ValueError as e:
logging.error(f"Constructing Dataset object raised a ValueError: {e}")
dataset_ok = False
simulator = config.get_simulator()
data_generator = SimulatedDataGenerator(simulator)
simulation_var_names = sorted(data_generator.parameter_space.parameter_names)
input_names_consistent = data_var_names == simulation_var_names
if not input_names_consistent: # pragma: no cover
logging.error("Inputs in the config file must match those of the data generator (simulator)")
logging.error(f"Inputs in the config: {', '.join(data_var_names)}")
logging.error(f"Inputs allowed by data generator: {', '.join(simulation_var_names)}")
simulation_output_name = data_generator.objective_col_name
output_names_consistent = (data_output_name == simulation_output_name) or data_output_name is None
if not output_names_consistent:
logging.error("Output in the config file must match objective of the data generator (simulator)")
logging.error(f"Output in the config: {data_output_name}")
logging.error(f"Objective of the data generator: {simulation_output_name}")
return input_names_consistent and output_names_consistent and not missing_names and dataset_ok
def run_simulator_pipeline(
arg_list: Optional[List[str]],
run_script_path: Path,
plot_script_path: Path,
loop_config_class: Type[OptimizerConfig],
) -> Optional[Run]: # pragma: no cover
"""
Creates and runs an Azure ML pipeline to run an entire workflow of 1) running the simulator (with 1
node per config expansion and seed), 2) plotting the results. Results can be viewed in the AML portal,
or directly in the Datastore named 'workspaceblobstore' which will be created in the Workspace
specified in azureml-args.yml
"""
logging_to_stdout()
# TODO: replace arg parse with method that expects experiment_labels, styled_subsets as args
parser = RunConfig()
args = parser.parse_args(arg_list)
if not args.submit_to_aml:
raise ValueError("This script doesn't support local runs. Please ensure --submit_to_aml flag is set ")
logging.info("Creating .amlignore")
create_amlignore(run_script_path)
logging.info("Creating pipeline")
parser2_result = parse_results_and_add_azure_config_vars(parser, arg_list)
azure_config = AzureConfig(**parser2_result.args)
aml_resources = AMLResources(azure_config)
parallel_steps, all_run_outputs, styled_subsets, temp_spec_files = specify_run_step(
args, aml_resources, run_script_path, loop_config_class
)
if not parallel_steps:
logging.error("All config resolutions were dropped - bailing out")
return None
experiment_labels = [
"--experiment_labels",
"acquisition",
"--experiment_labels",
"batch_strategy",
"--experiment_labels",
"batch",
"--experiment_labels",
"hmc",
]
plotting_step, plotting_datastore = specify_plotting_step(
styled_subsets,
experiment_labels,
all_run_outputs,
aml_resources,
plot_script_path,
)
all_steps = StepSequence(steps=[parallel_steps, plotting_step])
pipeline = Pipeline(workspace=aml_resources.ws, steps=all_steps)
logging.info("Validating pipeline")
pipeline.validate()
logging.info("Creating experiment")
expt = Experiment(aml_resources.ws, "simulator_pipeline")
logging.info("Submitting pipeline run")
pipeline_run = expt.submit(pipeline) # noqa: F841
# remove the temporary Spec file created earlier
[os.remove(spec_file) for spec_file in temp_spec_files] # type: ignore
return pipeline_run
def run_simulations_in_pipeline(arg_list: Optional[List[str]], loop_config: Type[OptimizerConfig]): # pragma: no cover
logging_to_stdout()
parser = RunConfig()
args = parser.parse_args(arg_list)
# Multiple resolutions, if any, will be handled inside a single AML run.
_, config_dct = load_config_from_path_or_name(args.spec_file)
rd_path = Path(config_dct["results_dir"])
if rd_path.name.startswith("seed"):
results_dir = f"{args.output_dir}/{rd_path.parent.name}/{rd_path.name}"
else:
results_dir = args.output_dir
logging.info(f"Updating results dir from {config_dct['results_dir']} to {results_dir}")
config_dct["results_dir"] = results_dir
create_config_json_file(config_dct, args)
# Expand config
selections_and_configs = load_resolutions_from_command(args, loop_config)
for pair_list in selections_and_configs:
run_multiple_seeds(args, pair_list)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import time
import argparse
import cv2 as cv
from yunet.yunet_tflite import YuNetTFLite
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--movie", type=str, default=None)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument(
"--model",
type=str,
default='model/model_float16_quant.tflite',
)
parser.add_argument(
'--input_shape',
type=str,
default="160,120",
help="Specify an input shape for inference.",
)
parser.add_argument(
'--score_th',
type=float,
default=0.6,
help='Conf confidence',
)
parser.add_argument(
'--nms_th',
type=float,
default=0.3,
help='NMS IoU threshold',
)
parser.add_argument(
'--topk',
type=int,
default=5000,
)
parser.add_argument(
'--keep_topk',
type=int,
default=750,
)
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
if args.movie is not None:
cap_device = args.movie
model_path = args.model
input_shape = tuple(map(int, args.input_shape.split(',')))
score_th = args.score_th
nms_th = args.nms_th
topk = args.topk
keep_topk = args.keep_topk
# カメラ準備 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
yunet = YuNetTFLite(
model_path=model_path,
input_shape=input_shape,
conf_th=score_th,
nms_th=nms_th,
topk=topk,
keep_topk=keep_topk,
)
while True:
start_time = time.time()
# カメラキャプチャ ################################################
ret, frame = cap.read()
if not ret:
break
debug_image = copy.deepcopy(frame)
# 推論実施 ########################################################
bboxes, landmarks, scores = yunet.inference(frame)
elapsed_time = time.time() - start_time
# デバッグ描画
debug_image = draw_debug(
debug_image,
elapsed_time,
score_th,
input_shape,
bboxes,
landmarks,
scores,
)
# キー処理(ESC:終了) ##############################################
key = cv.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #########################################################
cv.imshow('YuNet TFLite Sample', debug_image)
cap.release()
cv.destroyAllWindows()
def draw_debug(
image,
elapsed_time,
score_th,
input_shape,
bboxes,
landmarks,
scores,
):
image_width, image_height = image.shape[1], image.shape[0]
debug_image = copy.deepcopy(image)
for bbox, landmark, score in zip(bboxes, landmarks, scores):
if score_th > score:
continue
# 顔バウンディングボックス
x1 = int(image_width * (bbox[0] / input_shape[0]))
y1 = int(image_height * (bbox[1] / input_shape[1]))
x2 = int(image_width * (bbox[2] / input_shape[0])) + x1
y2 = int(image_height * (bbox[3] / input_shape[1])) + y1
cv.rectangle(debug_image, (x1, y1), (x2, y2), (0, 255, 0), 2)
# スコア
cv.putText(debug_image, '{:.4f}'.format(score), (x1, y1 + 12),
cv.FONT_HERSHEY_DUPLEX, 0.5, (0, 255, 0))
# 顔キーポイント
for _, landmark_point in enumerate(landmark):
x = int(image_width * (landmark_point[0] / input_shape[0]))
y = int(image_height * (landmark_point[1] / input_shape[1]))
cv.circle(debug_image, (x, y), 2, (0, 255, 0), 2)
# 推論時間
text = 'Elapsed time:' + '%.0f' % (elapsed_time * 1000)
text = text + 'ms'
debug_image = cv.putText(
debug_image,
text,
(10, 30),
cv.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
thickness=2,
)
return debug_image
if __name__ == '__main__':
main()
|
"""Models"""
from os.path import dirname, basename, isfile
from server import login
from server.models.taskyuser import TaskyUser
import glob
modules = glob.glob(dirname(__file__) + "/*.py")
__all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
@login.user_loader
def load_user(id: str) -> TaskyUser:
"""
Stores the given user into the current user's session, which allows for
easy retrieval of the instantiated user object.
:param id: the ID of the user
:return: the instantiated TaskyUser
"""
return TaskyUser.query.get(int(id))
|
from django.conf.urls import url, patterns
from news.views import News
urlpatterns = patterns('',
url(r'^$', News.as_view(), name='news'),
)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.contrib import admin
from .models import Functie, VerklaringHanterenPersoonsgegevens
class FunctieAdmin(admin.ModelAdmin):
filter_horizontal = ('accounts',)
ordering = ('beschrijving',)
search_fields = ('beschrijving', 'nhb_ver__naam', 'nhb_ver__plaats', 'bevestigde_email', 'nieuwe_email')
class VHPGAdmin(admin.ModelAdmin):
search_fields = ('account__username',)
list_select_related = ('account',)
admin.site.register(Functie, FunctieAdmin)
admin.site.register(VerklaringHanterenPersoonsgegevens, VHPGAdmin)
# end of file
|
from .errors import *
from .warnings import *
class LinkedEntities:
def __init__(self):
self.rider_category_ids = set()
self.fare_container_ids = set()
def check_linked_fp_entities(line, rider_categories, rider_category_by_fare_container, linked_entities_by_fare_product):
linked_entities = linked_entities_by_fare_product.setdefault(line.fare_product_id, LinkedEntities())
if line.rider_category_id:
linked_entities.rider_category_ids.add(line.rider_category_id)
if line.rider_category_id not in rider_categories:
line.add_error(NONEXISTENT_RIDER_CATEGORY_ID)
else:
linked_entities.rider_category_ids.add('')
if line.fare_container_id:
linked_entities.fare_container_ids.add(line.fare_container_id)
if line.fare_container_id not in rider_category_by_fare_container:
line.add_error(NONEXISTENT_FARE_CONTAINER_ID)
fare_container_rider_cat = rider_category_by_fare_container.get(line.fare_container_id)
if line.rider_category_id and fare_container_rider_cat and (line.rider_category_id != fare_container_rider_cat):
line.add_error(CONFLICTING_RIDER_CATEGORY_ON_FARE_CONTAINER)
else:
linked_entities.fare_container_ids.add('')
linked_entities_by_fare_product[line.fare_product_id] = linked_entities
def check_bundle(line):
if line.bundle_amount:
try:
bundle_amt = int(line.bundle_amount)
if bundle_amt < 0:
line.add_error(INVALID_BUNDLE_AMOUNT)
except ValueError:
line.add_error(INVALID_BUNDLE_AMOUNT)
def check_durations_and_offsets(line):
if line.duration_start and line.duration_start not in {'0', '1'}:
line.add_error(INVALID_DURATION_START)
if line.duration_unit and line.duration_unit not in {'0', '1', '2', '3', '4', '5', '6'}:
line.add_error(INVALID_DURATION_UNIT)
if line.duration_type and line.duration_type not in {'1', '2'}:
line.add_error(INVALID_DURATION_TYPE)
if line.duration_type == '1' and line.duration_start:
line.add_error(DURATION_START_WITH_DURATION_TYPE)
if line.duration_amount:
try:
amt = int(line.duration_amount)
if amt < 1:
line.add_error(NEGATIVE_OR_ZERO_DURATION)
except ValueError:
line.add_error(NON_INT_DURATION_AMOUNT)
if not line.duration_unit:
line.add_error(DURATION_WITHOUT_UNIT)
if not line.duration_type:
line.add_error(DURATION_WITHOUT_TYPE)
else:
if line.duration_type:
line.add_error(DURATION_TYPE_WITHOUT_AMOUNT)
if line.duration_unit:
line.add_error(DURATION_UNIT_WITHOUT_AMOUNT)
if line.offset_unit and line.offset_unit not in {'0', '1', '2', '3', '4', '5', '6'}:
line.add_error(INVALID_OFFSET_UNIT)
if line.offset_amount:
try:
amt = int(line.offset_amount)
except ValueError:
line.add_error(NON_INT_OFFSET_AMOUNT)
if line.duration_type == '2':
line.add_error(OFFSET_AMOUNT_WITH_DURATION_TYPE)
if not line.offset_unit:
line.add_warning(OFFSET_AMOUNT_WITHOUT_OFFSET_UNIT)
else:
if line.offset_unit:
line.add_error(OFFSET_UNIT_WITHOUT_AMOUNT)
|
# Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the `EnvironmentModel` class.
"""
import numpy as np
import tensorflow as tf
from tf_agents.environments.tf_environment import TFEnvironment
from tf_agents.trajectories.time_step import StepType, TimeStep, restart, time_step_spec
from tf_agents.utils.nest_utils import get_outer_shape
from bellman.environments.initial_state_distribution_model import InitialStateDistributionModel
from bellman.environments.mixins import BatchSizeUpdaterMixin
from bellman.environments.reward_model import RewardModel
from bellman.environments.termination_model import TerminationModel
from bellman.environments.transition_model.transition_model import TransitionModel
class EnvironmentModel(TFEnvironment):
"""
An approximate MDP 𝓜̂.
The approximate MDP has the following form::
𝓜̂ = (S, A, r̂, P̂, ρ̂₀, γ)
where S is the state space, A is the action space, r̂ is the approximate reward function, P̂
is the approximate state transition distribution, ρ̂₀ is the approximate initial state
distribution and γ is the discount factor of the cumulative reward. Note that the terms "state"
and "observation" are used interchangeably.
This class also requires a `TerminationModel`. This function maps 𝒔 ∊ S to a boolean. If
the `TerminationModel` returns `True` then that state is an absorbing state of the MDP and
the episode is terminated. The model should include all termination criteria which
are intrinsic to the MDP.
Extrinsic termination criteria should be handled in a wrapper around this class.
"""
def __init__(
self,
transition_model: TransitionModel,
reward_model: RewardModel,
termination_model: TerminationModel,
initial_state_distribution_model: InitialStateDistributionModel,
batch_size: int = 1,
):
"""
:param transition_model: The state transition distribution that maps a state-action pair
(𝒔 ∊ S, 𝒂 ∊ A) to the next state 𝒔' ∊ S in a (possibly) probabilistic fashion
:param reward_model: The reward model that maps a state-action-next-state tuple
(𝒔 ∊ S, 𝒂 ∊ A, 𝒔' ∊ S) to a scalar real value
:param termination_model: Termination model. For each state 𝒔 ∊ S, this should return `True`
if state 𝒔 terminates an episode, and `False` otherwise.
:param initial_state_distribution_model: Distribution from which the starting state 𝒔 ∊ S of
a new episode will be sampled. The starting state must not be terminal.
:param batch_size: The batch size expected for the actions and observations, it should
be greater than 0.
"""
if batch_size < 1:
raise ValueError(f"batch_size is " + str(batch_size) + " and it should be > 0")
super().__init__(
time_step_spec(transition_model.observation_space_spec),
transition_model.action_space_spec,
batch_size,
)
self._transition_model = transition_model
self._reward_model = reward_model
self._termination_model = termination_model
self._initial_state_distribution_model = initial_state_distribution_model
self._time_step: TimeStep
self._initialise_trajectory()
@property
def termination_model(self) -> TerminationModel:
"""
Return the `TerminationModel`.
"""
return self._termination_model
def _current_time_step(self):
"""
Return the current TimeStep object.
"""
return self._time_step
def _ensure_no_terminal_observations(self, observation):
"""
Raise error when any observation in the observation batch is terminal.
:param observation: A batch of observations, one for each batch element (the batch size is
the first dimension)
"""
has_terminated = self._termination_model.terminates(observation)
has_terminated_numpy = has_terminated.numpy()
if any(has_terminated_numpy):
termination_indices = np.where(has_terminated_numpy)[0]
raise ValueError(
"Terminal observations occurred at indices "
+ np.array_str(termination_indices)
)
def _set_initial_observation(self, observation):
"""
Set initial observation of the environment model.
:param observation: A batch of observations, one for each batch element (the batch size is
the first dimension)
"""
# Make sure that the observation shape is as expected
batch_size = get_outer_shape(
observation, self._transition_model.observation_space_spec
)
assert batch_size == self._batch_size, batch_size
# Raise error when any initial observation is terminal
self._ensure_no_terminal_observations(observation)
# Create `TimeStep` object from observation tensor. Note that this will mark the observation
# as FIRST.
self._time_step = restart(observation, batch_size=self._batch_size)
def _initialise_trajectory(self):
"""
Sample initial state to start the trajectory.
"""
observation = self._initial_state_distribution_model.sample(
sample_shape=(self._batch_size,)
)
self._set_initial_observation(observation)
def _reset(self):
"""
Sample a TimeStep from the initial distribution, set as FIRST
and return the sampled TimeStep.
"""
self._initialise_trajectory()
return self._time_step
def _step(self, action):
"""
Return predictions of next states for each member of the batch.
:param action: A batch of actions (the batch size is the first dimension)
:return: A batch of next state predictions in the form of a `TimeStep` object
"""
# Make sure that action shape is as expected
batch_size = get_outer_shape(action, self._transition_model.action_space_spec)
assert batch_size == self._batch_size
# Get observation from current time step
observation = self._time_step.observation
# Identify observation batch elements in the previous time step that have terminated. Note
# the conversion to numpy is for performance reasons
is_last = self._time_step.is_last()
is_any_last = any(is_last.numpy())
# Elements of the observation batch that terminated on the previous time step require reset.
if is_any_last:
# Identify number of elements to be reset and their corresponding indexes
number_resets = tf.math.count_nonzero(is_last)
reset_indexes = tf.where(is_last)
# Sample reset observations from initial state distribution
reset_observation = self._initial_state_distribution_model.sample((number_resets,))
# Raise error when any terminal observations are left after re-initialization
self._ensure_no_terminal_observations(reset_observation)
# Get batches of next observations, update observations that were reset
next_observation = self._transition_model.step(observation, action)
if is_any_last:
next_observation = tf.tensor_scatter_nd_update(
next_observation, reset_indexes, reset_observation
)
# Get batches of rewards, set rewards from reset batch elements to 0
reward = self._reward_model.step_reward(observation, action, next_observation)
if is_any_last:
reward = tf.where(condition=is_last, x=tf.constant(0.0), y=reward)
# Get batches of termination flags
has_terminated = self._termination_model.terminates(next_observation)
# Get batches of step types, set step types from reset batch elements to FIRST
step_type = tf.where(condition=has_terminated, x=StepType.LAST, y=StepType.MID)
if is_any_last:
step_type = tf.where(condition=is_last, x=StepType.FIRST, y=step_type)
# Get batches of discounts, set discounts from reset batch elements to 1
discount = tf.where(condition=has_terminated, x=tf.constant(0.0), y=tf.constant(1.0))
if is_any_last:
discount = tf.where(condition=is_last, x=tf.constant(1.0), y=discount)
# Create TimeStep object and return
self._time_step = TimeStep(step_type, reward, discount, next_observation)
return self._time_step
def set_initial_observation(self, observation):
"""
Set initial observation of the environment model.
:param observation: A batch of observations, one for each batch element (the batch size is
the first dimension)
:return: A batch of initials states in the form of a `TimeStep` object
"""
self._set_initial_observation(observation)
return self._time_step
@property
def batch_size(self):
"""
Re-implementing the batch_size property of TFEnvironment in order to define a
setter method.
"""
return self._batch_size
@batch_size.setter
def batch_size(self, batch_size: int) -> None:
if batch_size > 0:
self._batch_size = batch_size
if isinstance(self._transition_model, BatchSizeUpdaterMixin):
self._transition_model.update_batch_size(batch_size)
else:
raise ValueError(f"batch_size is " + str(batch_size) + " and it should be > 0")
def render(self):
raise NotImplementedError("No rendering support.")
|
from django.apps import AppConfig
class ReferralConfig(AppConfig):
name = 'referral'
|
'''
Created on Nov. 30, 2017
@author Andrew Habib
'''
import json
import os
import sys
from Util import load_parsed_sb, CustomEncoder
def match_sb_msg_no_lines(msg, msgs):
for msg2 in msgs:
if (msg.proj == msg2.proj and msg.cls == msg2.cls and
msg.cat == msg2.cat and msg.abbrev == msg2.abbrev and
msg.typ == msg2.typ and msg.prio == msg2.prio and
msg.rank == msg2.rank and msg.msg == msg2.msg and
msg.mth == msg2.mth and msg.field == msg2.field):
return True
return False
def get_removed_warnings_sb(sb_b, sb_f):
removed_warnings = []
for b_msg in sb_b:
if not match_sb_msg_no_lines(b_msg, sb_f):
removed_warnings.append(b_msg)
return removed_warnings
if __name__ == '__main__':
"""Get errors/warnings that disappeared in fixed versions"""
sb_file = os.path.join(os.getcwd(), sys.argv[1])
sb_res_b = load_parsed_sb(sb_file)
sb_file = os.path.join(os.getcwd(), sys.argv[2])
sb_res_f = load_parsed_sb(sb_file)
warnings = get_removed_warnings_sb(sb_res_b, sb_res_f)
output_file_name = "sb_removed_warnings.json"
with open(output_file_name, "w") as file:
json.dump(warnings, file, cls=CustomEncoder, indent=4)
|
Nama = ("Mochamad Wilka Asyidiqi")
print(Nama)
|
"""
Plugin for Yo.
"""
from mimic.rest.yo_api import YoAPI
yo = YoAPI()
|
import requests
import json
def getEmoji(myWord):
myAPIkey = "8cb402f051b482a2dac6edef871dfdb2910c8aa2"
apiURL = "https://emoji-api.com/emojis?search={}&access_key={}".format(myWord, myAPIkey)
try:
page = requests.get(apiURL)
emojiJSON = page.json()[0]
emojiToReturn = emojiJSON["character"]
return emojiToReturn
except Exception as e:
print("Failed to find emojis through the API for {}. Error: {}".format(myWord, str(e)))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `klimaatbestendige_netwerken` package."""
import unittest
import logging
from pathlib import Path
from klimaatbestendige_netwerken import pyFIS
logging.basicConfig(level=logging.DEBUG)
class test_pyFIS(unittest.TestCase):
"""Tests for `klimaatbestendige_netwerken` package."""
skipSlowRuns = True
export_dir = Path('export_pyFIS')
def setUp(self):
"""Set up test fixtures"""
self.FIS = pyFIS.pyFIS()
if not self.export_dir.exists():
self.export_dir.mkdir()
def test_print_geogeneration(self):
print(f'Geogeneration: {self.FIS.geogeneration}, Publication Date: {self.FIS.publication_date}')
def test_000_list_geotypes(self):
list_geotypes = self.FIS.list_geotypes()
assert len(list_geotypes) > 0, "Loading failed"
print(list_geotypes)
def test_001_list_relations(self):
list_relations = self.FIS.list_relations('lock')
assert len(list_relations) > 0, "Loading failed"
print(list_relations)
def test_002_list_objects(self):
self.FIS.list_objects('chamber')
self.FIS.list_objects('chamber')
df = self.FIS.chamber
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
def test_003_merge_geotypes(self):
df = self.FIS.merge_geotypes('bridge', 'opening')
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
def test_003_merge_geotypes2(self):
df = self.FIS.merge_geotypes('lock', 'chamber')
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
def test_004_find_by_polygon(self):
pol = [(5.774, 51.898),
(5.742, 51.813),
(6.020, 51.779),
(5.951, 51.912),
(5.774, 51.898),
]
df = self.FIS.find_object_by_polygon('bridge', pol)
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
def test_006_find_closest(self):
point = (5.774, 51.898)
df = self.FIS.find_closest_object('bridge', point)
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
def test_007_list_all_objects(self):
if self.skipSlowRuns:
self.skipTest('Skipping because this test takes very long')
self.FIS.list_all_objects()
filepath = self.export_dir / f'Export_geogeneration_{self.FIS.geogeneration}.xlsx'
self.FIS.export(filepath=filepath)
self.assertTrue(filepath.is_file())
def test_008_get_object(self):
df = self.FIS.get_object('bridge', 1667)
self.assertGreater(df.shape[0], 0, "Loading failed")
print(df.head(10).to_string())
def test_008_get_object2(self):
df = self.FIS.get_object('section', 24774125)
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
def test_009_get_object_subobjects(self):
list_openings = self.FIS.get_object_subobjects('bridge', 1667, 'opening')
assert len(list_openings) > 0, "Loading failed"
print(list_openings.head(10).to_string())
def test_010_find_object_by_value(self):
df = self.FIS.find_object_by_value('bridge', 'Spoorbrug HRMK')
assert df.shape[0] > 0, "Loading failed"
print(df.head(10).to_string())
if __name__ == '__main__':
unittest.main()
|
lista = []
menor = 0
maior = 0
for c in range (0, 5):
lista.append(int(input(f'Digite o valor na posição {c}: ')))
if c == 0:
menor = maior = lista[c]
else:
if lista[c] > maior:
maior = lista[c]
if lista[c] < menor:
menor = lista[c]
print(f'Os número digitados foram {lista}')
print(f'O maior numero digitado foi {maior}')
print(f'O menor número digitado foi {menor}')
|
#!/usr/bin/env python
from contextlib import contextmanager
from typing import List, Optional
import argparse
import logging
from git_helper import commit
from version_helper import (
FILE_WITH_VERSION_PATH,
ClickHouseVersion,
VersionType,
git,
get_abs_path,
get_version_from_repo,
update_cmake_version,
)
class Release:
BIG = ("major", "minor")
SMALL = ("patch",)
def __init__(self, version: ClickHouseVersion):
self._version = version
self._git = version._git
self._release_commit = ""
self._rollback_stack = [] # type: List[str]
def run(self, cmd: str, cwd: Optional[str] = None) -> str:
cwd_text = ""
if cwd:
cwd_text = f" (CWD='{cwd}')"
logging.info("Running command%s:\n %s", cwd_text, cmd)
return self._git.run(cmd, cwd)
def update(self):
self._git.update()
self.version = get_version_from_repo()
def do(self, args: argparse.Namespace):
self.release_commit = args.commit
if not args.no_check_dirty:
logging.info("Checking if repo is clean")
self.run("git diff HEAD --exit-code")
if not args.no_check_branch:
self.check_branch(args.release_type)
if args.release_type in self.BIG:
# Checkout to the commit, it will provide the correct current version
with self._checkout(self.release_commit, True):
if args.no_prestable:
logging.info("Skipping prestable stage")
else:
with self.prestable(args):
logging.info("Prestable part of the releasing is done")
with self.testing(args):
logging.info("Testing part of the releasing is done")
self.log_rollback()
def check_no_tags_after(self):
tags_after_commit = self.run(f"git tag --contains={self.release_commit}")
if tags_after_commit:
raise Exception(
f"Commit {self.release_commit} belongs to following tags:\n"
f"{tags_after_commit}\nChoose another commit"
)
def check_branch(self, release_type: str):
if release_type in self.BIG:
# Commit to spin up the release must belong to a main branch
output = self.run(f"git branch --contains={self.release_commit} master")
if "master" not in output:
raise Exception(
f"commit {self.release_commit} must belong to 'master' for "
f"{release_type} release"
)
if release_type in self.SMALL:
branch = f"{self.version.major}.{self.version.minor}"
if self._git.branch != branch:
raise Exception(f"branch must be '{branch}' for {release_type} release")
def log_rollback(self):
if self._rollback_stack:
rollback = self._rollback_stack
rollback.reverse()
logging.info(
"To rollback the action run the following commands:\n %s",
"\n ".join(rollback),
)
@contextmanager
def prestable(self, args: argparse.Namespace):
self.check_no_tags_after()
# Create release branch
self.update()
release_branch = f"{self.version.major}.{self.version.minor}"
with self._create_branch(release_branch, self.release_commit):
with self._checkout(release_branch, True):
self.update()
self.version.with_description(VersionType.PRESTABLE)
with self._create_gh_release(args):
with self._bump_prestable_version(release_branch, args):
# At this point everything will rollback automatically
yield
@contextmanager
def testing(self, args: argparse.Namespace):
# Create branch for a version bump
self.update()
self.version = self.version.update(args.release_type)
helper_branch = f"{self.version.major}.{self.version.minor}-prepare"
with self._create_branch(helper_branch, self.release_commit):
with self._checkout(helper_branch, True):
self.update()
self.version = self.version.update(args.release_type)
with self._bump_testing_version(helper_branch, args):
yield
@property
def version(self) -> ClickHouseVersion:
return self._version
@version.setter
def version(self, version: ClickHouseVersion):
if not isinstance(version, ClickHouseVersion):
raise ValueError(f"version must be ClickHouseVersion, not {type(version)}")
self._version = version
@property
def release_commit(self) -> str:
return self._release_commit
@release_commit.setter
def release_commit(self, release_commit: str):
self._release_commit = commit(release_commit)
@contextmanager
def _bump_prestable_version(self, release_branch: str, args: argparse.Namespace):
self._git.update()
new_version = self.version.patch_update()
new_version.with_description("prestable")
update_cmake_version(new_version)
cmake_path = get_abs_path(FILE_WITH_VERSION_PATH)
self.run(
f"git commit -m 'Update version to {new_version.string}' '{cmake_path}'"
)
with self._push(release_branch, args):
with self._create_gh_label(
f"v{release_branch}-must-backport", "10dbed", args
):
with self._create_gh_label(
f"v{release_branch}-affected", "c2bfff", args
):
self.run(
f"gh pr create --repo {args.repo} --title 'Release pull "
f"request for branch {release_branch}' --head {release_branch} "
"--body 'This PullRequest is a part of ClickHouse release "
"cycle. It is used by CI system only. Do not perform any "
"changes with it.' --label release"
)
# Here the prestable part is done
yield
@contextmanager
def _bump_testing_version(self, helper_branch: str, args: argparse.Namespace):
self.version.with_description("testing")
update_cmake_version(self.version)
cmake_path = get_abs_path(FILE_WITH_VERSION_PATH)
self.run(
f"git commit -m 'Update version to {self.version.string}' '{cmake_path}'"
)
with self._push(helper_branch, args):
body_file = get_abs_path(".github/PULL_REQUEST_TEMPLATE.md")
self.run(
f"gh pr create --repo {args.repo} --title 'Update version after "
f"release' --head {helper_branch} --body-file '{body_file}'"
)
# Here the prestable part is done
yield
@contextmanager
def _checkout(self, ref: str, with_checkout_back: bool = False):
orig_ref = self._git.branch or self._git.sha
need_rollback = False
if ref not in (self._git.branch, self._git.sha):
need_rollback = True
self.run(f"git checkout {ref}")
# checkout is not put into rollback_stack intentionally
rollback_cmd = f"git checkout {orig_ref}"
try:
yield
except BaseException:
logging.warning("Rolling back checked out %s for %s", ref, orig_ref)
self.run(f"git reset --hard; git checkout {orig_ref}")
raise
else:
if with_checkout_back and need_rollback:
self.run(rollback_cmd)
@contextmanager
def _create_branch(self, name: str, start_point: str = ""):
self.run(f"git branch {name} {start_point}")
rollback_cmd = f"git branch -D {name}"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back created branch %s", name)
self.run(rollback_cmd)
raise
@contextmanager
def _create_gh_label(self, label: str, color: str, args: argparse.Namespace):
self.run(f"gh api repos/{args.repo}/labels -f name={label} -f color={color}")
rollback_cmd = f"gh api repos/{args.repo}/labels/{label} -X DELETE"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back label %s", label)
self.run(rollback_cmd)
raise
@contextmanager
def _create_gh_release(self, args: argparse.Namespace):
with self._create_tag(args):
# Preserve tag if version is changed
tag = self.version.describe
self.run(
f"gh release create --prerelease --draft --repo {args.repo} "
f"--title 'Release {tag}' '{tag}'"
)
rollback_cmd = f"gh release delete --yes --repo {args.repo} '{tag}'"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back release publishing")
self.run(rollback_cmd)
raise
@contextmanager
def _create_tag(self, args: argparse.Namespace):
tag = self.version.describe
self.run(f"git tag -a -m 'Release {tag}' '{tag}'")
rollback_cmd = f"git tag -d '{tag}'"
self._rollback_stack.append(rollback_cmd)
try:
with self._push(f"'{tag}'", args):
yield
except BaseException:
logging.warning("Rolling back tag %s", tag)
self.run(rollback_cmd)
raise
@contextmanager
def _push(self, ref: str, args: argparse.Namespace):
self.run(f"git push git@github.com:{args.repo}.git {ref}")
rollback_cmd = f"git push -d git@github.com:{args.repo}.git {ref}"
self._rollback_stack.append(rollback_cmd)
try:
yield
except BaseException:
logging.warning("Rolling back pushed ref %s", ref)
self.run(rollback_cmd)
raise
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Script to release a new ClickHouse version, requires `git` and "
"`gh` (github-cli) commands",
)
parser.add_argument(
"--repo",
default="ClickHouse/ClickHouse",
help="repository to create the release",
)
parser.add_argument(
"--type",
default="minor",
# choices=Release.BIG+Release.SMALL, # add support later
choices=Release.BIG + Release.SMALL,
dest="release_type",
help="a release type, new branch is created only for 'major' and 'minor'",
)
parser.add_argument(
"--no-prestable",
action="store_true",
help=f"for release types in {Release.BIG} skip creating prestable release and "
"release branch",
)
parser.add_argument(
"--commit",
default=git.sha,
type=commit,
help="commit create a release, default to HEAD",
)
parser.add_argument(
"--no-check-dirty",
action="store_true",
help="skip check repository for uncommited changes",
)
parser.add_argument(
"--no-check-branch",
action="store_true",
help="by default, 'major' and 'minor' types work only for master, and 'patch' "
"works only for a release branches, that name should be the same as "
"'$MAJOR.$MINOR' version, e.g. 22.2",
)
return parser.parse_args()
def prestable():
pass
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
release = Release(get_version_from_repo())
release.do(args)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.