text stringlengths 8 6.05M |
|---|
from cv2 import cv2
import numpy as np
from PIL import Image
import math
import glob
import os
import time
import csv
def rotate_image_and_crop(mat, angle, bound_w,bound_h):
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width / 2, height / 2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h),borderMode=cv2.BORDER_CONSTANT)
return rotated_mat
def rotate_image(mat, angle):
"""
Rotates an image (angle in degrees) and expands image to avoid cropping
"""
height, width = mat.shape[:2] # image shape has 3 dimensions
image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0,0])
abs_sin = abs(rotation_mat[0,1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origo) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w/2 - image_center[0]
rotation_mat[1, 2] += bound_h/2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h),flags=cv2.INTER_NEAREST)
return rotated_mat
def get_max_average_width_of_crack(display,number_of_points):
"""
## INPUT : patch_img_path : 'display : height,width shape (numpy array) , number_of_iterations : 50 (int)
## OUTPUT : MAX WIDTH , AVERAGE WIDTH , MIN X, MIN Y, MAX X, MAX Y, MAX WIDTH X, MAX WIDTH Y
"""
display_size = display.shape
index= np.nonzero(display>1)
points = np.zeros((len(index[0]), 2), dtype=np.float32)
index_y = index[0]
index_x = index[1]
if len(index_x)==0:
return 0,0,0,0,0,0,0,0
'''return total_max_width,total_average_width,minx,miny,maxx,maxy,max_width_x,max_width_y'''
minx = min(index_x)
maxx = max(index_x)
miny = min(index_y)
maxy = max(index_y)
for i in range(0,len(index[0])):
y=index[0][i]
x=index[1][i]
points[i, :] = [x, y]
# cv2.imshow('Display', display)
# cv2.waitKey(0)
# print(points.shape)
# minimum inlier distance and iterations
eeta = 20
iterations = int(number_of_points / 2)
# Initializing best params
max_inliers = 0
best_m = 0
best_b = 0
# Iterations begin
for i in range(iterations):
# Selecting random samples (two)
r1 = np.random.randint(0, points.shape[0])
r2 = np.random.randint(0, points.shape[0] - 1)
if r2 == r1:
r2 += 1
point_1 = points[r1, :]
point_2 = points[r2, :]
# Calculating new values of m and b from random samples
if (point_2[0] - point_1[0])!=0:
m = (point_2[1] - point_1[1]) / (point_2[0] - point_1[0])
b = -m * point_1[0] + point_1[1]
# Finding difference (perpendicular distance of point to line)
diff = abs(((-m * points[:, 0]) + (points[:, 1] - b)) / ((-m) ** 2 + 1) ** 0.5)
# Calculating inliers
inliers = len(np.where(diff < eeta)[0])
# Updating best params if better inliers found
if inliers > max_inliers:
max_inliers = inliers
best_m = m
best_b = b
display.flags.writeable = True
p1 = (0, int(best_b))
p2 = (display_size[0], int(best_m * display_size[0] + best_b))
cv2.line(display, p1, p2, (0, 0, 255), 2)
###############################################################
# Find Tangent's Degree
###############################################################
radian = np.arctan(best_m)
degree = radian * (180 / math.pi )
# print('origin size : ',display_size)
# print('radian : {}'.format(radian))
# print('degree : {}'.format(degree))
###############################################################
# Create a new boundary and rotate the image so that the crack is horizontal.
###############################################################
display2=rotate_image(display,degree)
display2[display2!=0]=255
# print(np.unique(display,return_counts=True))
display2_size = display2.shape
# print('rotated size : ',display_size)
total_max_width, total_average_width = 0, 0
###############################################################
# Find the maximum width and average width of the crack.
###############################################################
max_widths =[]
num_of_crack_pixel = 0
max_width_x ,max_width_y = 0,0
index = np.nonzero(display2 > 1)
if len(index[0])>0:
index_y = index[0]
index_x = index[1]
minx = min(index_x)
maxx = max(index_x)
miny = min(index_y)
maxy = max(index_y)
for j in range(minx-1,maxx+1):
max_width = 0
tmp_width = 0
isCrack = False
for i in range(miny-1,maxy+1):
value = display2[i, j]
if value==255:
isCrack = True
num_of_crack_pixel+=1
tmp_width+=1
max_width = tmp_width
else :
tmp_width = 0
if isCrack:
max_widths.append(max_width)
if(total_max_width<max_width):
total_max_width=max_width
tmp_width = 0
for i in range(miny-1,maxy+1):
value = display2[i, j]
if value == 255:
isCrack = True
tmp_width +=1
if tmp_width==total_max_width/2:
display2[max_width_y,max_width_x]=255
max_width_x = j
max_width_y = i
display2[max_width_y,max_width_x]=128
else:
tmp_width=0
# cv2.rectangle(display2, (max_width_x - 1, max_width_y - 1), (max_width_x + 1, max_width_y + 1), (0, 0, 255), 1)
display2[display2==255]=0
display2[display2!=0] = 255
display3=rotate_image_and_crop(display2,-degree,display_size[0],display_size[1])
max_width_y,max_width_x=np.where(display3==np.unique(display3)[-1])
max_width_x, max_width_y = max_width_x[0], max_width_y[0]
if num_of_crack_pixel>0:
total_average_width = sum(max_widths) / len(max_widths)
else:
print('No crack pixels.')
cv2.rectangle(display, (max_width_x - 1, max_width_y - 1), (max_width_x + 1, max_width_y + 1), (0, 0, 255), 1)
# print result
# print(max_widths)
# print('DISPLAY MAX WIDTH X , MAX WIDTH Y : ', max_width_x, max_width_y)
# print('NUMBER OF CRACK PIXELS : ', num_of_crack_pixel)
# print('MAX WIDTH PIXEL LENGTH : ',total_max_width)
# print('AVERAGE WIDTH PIXEL LENGTH : ',total_average_width)
# print('MIN X, MIN Y, MAX X, MAX Y : {} {} {} {}'.format(minx,miny,maxx,maxy))
# Displaying
# display = cv2.flip(display, 0)
#
# cv2.imshow('Display', display)
# cv2.waitKey(0)
# cv2.imshow('Display2', display2)
# cv2.waitKey(0)
# cv2.imshow('Display3', display3)
# cv2.waitKey(0)
return total_max_width,total_average_width,minx,miny,maxx,maxy,max_width_x,max_width_y
def write_csvfile(contents=[],dir_path='D:\\docker2\\hed\\test',csv_name='crack_Analysis.csv'):
f = open(os.path.join(dir_path, csv_name), 'a', encoding='utf-8', newline='')
wr = csv.writer(f)
wr.writerow(contents)
f.close()
def crack_width_analysis(gt_path,iter=30,patch_size=256,output_path=None,csv_name=None,write_csv=False):
'''
INPUT : gt path, iterations, patch size, output path, csv name, write csv
OUTPUT : dictionary => patch x coordinates(start), patch y coordinates(start), patch size, max Width, avg Width, minx, miny, maxx, maxy, max width x(center), max width y(center)
'''
full_img_dict = {}
key = 1
input_img = Image.open(gt_path).convert('L')
display = np.asarray(input_img)
height, width = input_img.size
y = int(height / patch_size)
x = int(width / patch_size)
if write_csv :
write_csvfile(['total_max_width', 'total_average_width', 'minx', 'miny', 'maxx', 'maxy', 'max_width_x', 'max_width_y'],output_path, csv_name)
for j in range(0, y):
for i in range(0, x):
patch_img = display[patch_size * j:patch_size * (j + 1), patch_size * i:patch_size * (i + 1)]
total_max_width, total_average_width, minx, miny, maxx, maxy, max_width_x, max_width_y = get_max_average_width_of_crack(
patch_img, iter)
if write_csv:
write_csvfile([total_max_width, total_average_width, minx, miny, maxx, maxy, max_width_x, max_width_y],output_path, csv_name)
full_img_dict[key]={
'start_x':patch_size*i,'start_y':patch_size*j,'patch_size':patch_size,
'total_max_width':total_max_width, 'total_average_width':total_average_width,
'minx':minx, 'miny':miny, 'maxx':maxx, 'maxy':maxy,
'max_width_x':max_width_x, 'max_width_y':max_width_y
}
key+=1
return full_img_dict
def main():
start = time.time() # 시작 시간 저장
data_path = 'D:\\docker2\\hed\\test' # 데이터 저장 경로
file_names = '*.png' # 데이터 이름들
output_path = 'D:\\docker2\\hed' # 출력 경로(csv저장시 필요)
csv_name = 'crack_Analysis.csv' # csv 파일 이름(csv저징시 필요)
iter = 30 # RANSAC 알고리즘 내부 반복 파라미터
patch_size = 256 # 패치 사이즈
CSV_WRITE = False # 분석 결과 csv 쓰기 할지 여부
gt_list = glob.glob(os.path.join(data_path, file_names))
full_dataset_dict = {}
for gt_path in gt_list:
print(gt_path)
gt_name = gt_path.split('\\')[-1][:-4]
full_dataset_dict[gt_name]=crack_width_analysis(gt_path, iter, patch_size, output_path, csv_name, CSV_WRITE)
print('----------------------------------------------------------------------------')
print(full_dataset_dict)
t = time.time() - start
print("{} files preprocessing time : {} ".format(len(gt_list),t)) # 현재시각 - 시작시간 = 실행 시간
if __name__ == '__main__':
main() |
# Default TimeServer and ClientServer buffer length.
buffer = 1024
# TimeServer default port port number.
port = 8981
# Maximum waiting time for clients' response, in seconds.
timeout = 10
# Inclusive range for server response's delay (min, max).
server_response_delay_range = [1, 2]
|
from bitstring import BitArray, BitStream
import Image
import sys
import hashlib
def getKey(pw):
sha = hashlib.sha1()
sha.update(pw)
key = BitArray(bytes=sha.digest())
return key
def getImageData(image):
img = Image.open(image)
data = img.getdata()
#print len(data)
return data
|
# coding=utf-8
"""Реализовать скрипт, в котором должна быть предусмотрена
функция расчета заработной платы сотрудника.
В расчете необходимо использовать формулу:
(выработка в часах * ставка в час) + премия.
Для выполнения расчета для конкретных значений
необходимо запускать скрипт с параметрами. """
from sys import argv
_, working_time,wage_rate,reward = argv
try:
working_time, wage_rate, reward = int(working_time),int(wage_rate),int(reward)
print(f'Ваша зарплата: {working_time*wage_rate+reward}')
except ValueError:
print('Неверно введеные данные. Введите целые числа.')
|
""" function to load pulse shape of atmoNC, IBD and fast neutron events from file and add dark counts to the
pulse shape.
These pulse shapes with dark counts are then also saved and can be analyzed with analyze_PSD_cut_v2.py.
"""
import datetime
import os
import re
import numpy as np
def get_numbers_from_filename(filename):
"""
function to get number as integer out of a string. For example: filename='file235' -> num = 235 of type integer
:param filename: string of one part of the filename 'file{}_evt{}_prompt_signal.txt'
:return:
"""
# get the number out of filename and convert it into integer:
num = int(re.search(r'\d+', filename).group(0))
return num
def pulse_shape_dcr(pathname, filename, dcr_20inch, n_20inch, dcr_3inch, n_3inch, evt_type):
"""
function to add dark counts to pulse shape
:param pathname: name of the path, where hittimes are saved
:param filename: name of the pulse shape file
:param dcr_20inch: DCR of 20inch PMTs
:param n_20inch: number of 20inch PMTs
:param dcr_3inch: DCR of 3inch PMTs
:param n_3inch: number of 3inch PMTs
:param evt_type: event type: 'atmoNC', 'IBD' or 'fastN'
:return:
"""
# split string file_ibdlike into two parts: 1. part: 'file{}', 2. part: 'vt{}_prompt_signal.txt' or
# 'vt{}_pulse_shape_R16m.txt' for fast neutron events
x = filename.split("_e")
# x[0] is string 'file{}':
file_string = x[0]
# x[1] is string 'vt{}_prompt_signal.txt' or 'vt{}_pulse_shape_R16m.txt':
end_string = x[1]
# split end_string into two parts: 1. part 'vt{}_prompt'/'vt{}_pulse, 2.part: 'ignal.txt' or 'hape_R16m.txt':
y = end_string.split("_s")
# y[0] is string 'vt{}_prompt':
event_string = y[0]
# y[1] is string 'nal.txt' or 'nal_R16m.txt':
rest_string = y[1]
# get file_number of file_string:
file_number = get_numbers_from_filename(file_string)
# get evtID of event_string:
evtid = get_numbers_from_filename(event_string)
# for fast neutron events: get radius from the filename:
if evt_type == 'fastN':
cut_radius = get_numbers_from_filename(rest_string)
# read file:
pulse_shape = np.loadtxt(pathname + filename)
# get reconstructed position in mm:
x_reco = pulse_shape[0]
y_reco = pulse_shape[1]
z_reco = pulse_shape[2]
# get start time of pulse shape in ns:
time_start = pulse_shape[3]
# get end time of pulse shape in ns:
time_end = pulse_shape[4]
# get bin-width of pulse shape in ns:
bin_width = pulse_shape[5]
# the rest is the pulse shape:
pulse_shape = pulse_shape[6:]
# time-window of the pulse shape in ns:
time_window = time_end - time_start
# calculate number of dark counts in the time window:
number_dc = int(dcr_20inch * time_window * 10**(-9) * n_20inch +
dcr_3inch * time_window * 10**(-9) * n_3inch)
# generate the time of the dark count with uniformly distributed random number in time_window
# (for all number_dc):
time_dc = np.random.uniform(time_start, time_end, size=number_dc)
# build histogram with time_DC (must have same shape like pulse_shape):
bins_hittime = np.arange(time_start, time_end+bin_width, bin_width)
npe_dc, bin_edges_dc = np.histogram(time_dc, bins_hittime)
# add npe_dc to pulse_shape to get pulse shape with DCR considered:
pulse_shape_dc = pulse_shape + npe_dc
# save new pulse shape to file:
pulse_shape_dc_save = [x_reco, y_reco, z_reco]
pulse_shape_dc_save.extend([time_start, time_end, bin_width])
pulse_shape_dc_save.extend(pulse_shape_dc)
if evt_type == 'fastN':
np.savetxt(pathname + "/file{0:d}_evt{1:d}_pulse_shape_R{2:d}_DCR.txt".format(file_number, evtid, cut_radius),
pulse_shape_dc_save, fmt='%1.2f',
header="Pulse shape of prompt signal with DCR: Number of pe as function of the time "
"(time-of-flight correction, TTS smearing, DCR considered) of file user_{6}_{0:d}.root,"
"\nevent {1:d}, {2}:"
"\ntime window of pulse shape: from {3:.3f} ns to {4:.3f} ns with bin-width = {5:0.3f} "
"ns,"
.format(file_number, evtid, now, time_start, time_end, bin_width, evt_type))
else:
np.savetxt(pathname + "/file{0:d}_evt{1:d}_prompt_signal_DCR.txt".format(file_number, evtid),
pulse_shape_dc_save, fmt='%1.2f',
header="Pulse shape of prompt signal with DCR: Number of pe as function of the time "
"(time-of-flight correction, TTS smearing, DCR considered) of file user_{6}_{0:d}.root,"
"\nevent {1:d}, {2}:"
"\ntime window of pulse shape: from {3:.3f} ns to {4:.3f} ns with bin-width = {5:0.3f} "
"ns,"
.format(file_number, evtid, now, time_start, time_end, bin_width, evt_type))
return
# get the date and time, when the script was run:
date = datetime.datetime.now()
now = date.strftime("%Y-%m-%d %H:%M")
# path, where the pulse shapes of NC events are stored:
path_NC = "/home/astro/blum/juno/atmoNC/data_NC/output_detsim_v2/hittimes/"
# path, where the pulse shapes of IBD events are stored:
path_IBD = "/home/astro/blum/juno/IBD_events/hittimes/"
# path, where the pulse shapes of Fast Neutron events are stored:
path_FN = "/home/astro/blum/PhD/work/MeVDM_JUNO/fast_neutrons/hittimes/"
""" Dark count rate parameters: """
# from file PmtData.root and PMT_position.root in folder /home/astro/blum/juno/atmoNC/PMT_information/:
# Dark count rate of small PMTs can be neglected
# total number of 20 inch PMTs:
number_20inchPMT = 17739
# number of Hamamatsu PMTs:
number_Hama = 4998
# number of MCP PMTs:
number_MCP = 12741
# total number of 3inch Pmts:
number_3inchPMT = 36572
# mean of Dark count rate of 20inch PMTs in Hz (/home/astro/blum/PhD/paper/PMT/20190114The progress of PMT test.pdf):
DCR_20inch = 31500.0
# Dark count rate of Hamamatsu PMTs in Hz:
DCR_Hama = 15500.0
# Dark count rate of MCP PMTs in Hz:
DCR_MCP = 46100.0
# Dark count rate of 3inch PMTs (/home/astro/blum/PhD/paper/PMT/hem_181115_spmt_review.pdf):
DCR_3inch = 550.0
""" loop over pulse shapes of prompt signals of atmo. NC events: """
# print("atmo NC events...")
# for file_NC in os.listdir(path_NC):
# # read only files that start with 'file' and end with 'prompt_signal.txt'
# if file_NC.startswith("file") and file_NC.endswith("prompt_signal.txt"):
#
# pulse_shape_dcr(path_NC, file_NC, DCR_20inch, number_20inchPMT, DCR_3inch, number_3inchPMT, "atmoNC")
""" loop over pulse shapes of prompt signals of IBD events: """
# print("IBD events...")
# for file_IBD in os.listdir(path_IBD):
# # read only files that start with 'file' and end with 'prompt_signal.txt'
# if file_IBD.startswith("file") and file_IBD.endswith("prompt_signal.txt"):
#
# pulse_shape_dcr(path_IBD, file_IBD, DCR_20inch, number_20inchPMT, DCR_3inch, number_3inchPMT, "IBD")
""" loop over pulse shapes of prompt signals of fast neutron events: """
print("fastN events...")
for file_FN in os.listdir(path_FN):
# read only files that start with 'file' and end with 'prompt_signal.txt'
if file_FN.startswith("file") and \
(file_FN.endswith("pulse_shape_R16.txt") or file_FN.endswith("pulse_shape_R17.txt")):
pulse_shape_dcr(path_FN, file_FN, DCR_20inch, number_20inchPMT, DCR_3inch, number_3inchPMT, "fastN")
|
# 绝对值 abs()
print('-400的绝对值', abs(-400))
# 求幂 pow() ,也可以用math.pow()
print('2的10次方:', pow(2, 10))
# 序列求和 sum()
l1 = [1, 2, 3, 4, 5]
print('l1序列的和为:', sum(l1))
print('l1序列的和再加2:', sum(l1, 2))
# 最大值 max() ,最小值 min()
print('l1序列中的最大值和最小值分别为:%d \t %d' % (max(l1), min(l1)))
|
import csv
from io import StringIO
import uuid
from guests.models import Event, Guest
def import_guests(path):
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
first_row = True
for row in reader:
if first_row:
first_row = False
continue
event_name, first_name, last_name, event_type, is_child, category, is_invited, email = row[:8]
if not event_name:
print('skipping row {}'.format(row))
continue
event = Event.objects.get_or_create(name=event_name)[0]
event.type = event_type
event.category = category
event.is_invited = _is_true(is_invited)
if not event.invitation_id:
event.invitation_id = uuid.uuid4().hex
event.save()
if email:
guest, created = Guest.objects.get_or_create(event=event, email=email)
guest.first_name = first_name
guest.last_name = last_name
else:
guest = Guest.objects.get_or_create(event=event, first_name=first_name, last_name=last_name)[0]
guest.is_child = _is_true(is_child)
guest.save()
def export_guests():
headers = [
'first_name', 'last_name', 'event_name',
'is_child', 'is_invited', 'is_attending', 'email', 'comments'
]
file = StringIO()
writer = csv.writer(file)
writer.writerow(headers)
for event in Event.in_default_order():
for guest in event.guest_set.all():
if guest.is_attending:
writer.writerow([
guest.first_name,
guest.last_name,
event.name,
guest.is_child,
guest.is_invited,
guest.is_attending,
guest.email,
guest.phone_number,
event.comments,
])
return file
def _is_true(value):
value = value or ''
return value.lower() in ('y', 'yes')
|
import json
import requests
import telepot
import re
def search_taobao(msg):
item_name = msg['text']
response = requests.get('https://s.taobao.com/search?q='+item_name)
html = response.text
regex = r'g_page_config =(.+)'
items = re.findall(regex, html)
items = items.pop().strip()
items = items[0:-1]
items = json.loads(items)
item_list= items['mods']['itemlist']['data']['auctions']
#raw name
try:
name1 = item_list[0]['raw_title']
name2 = item_list[1]['raw_title']
name3 = item_list[2]['raw_title']
#prices
price1 = '¥'+ item_list[0]['view_price']
price2 = '¥'+ item_list[1]['view_price']
price3 = '¥'+ item_list[2]['view_price']
# To send the result to users
return('The first three results of '+item_name+' in Taobao are listed below: ''\n\n'+ name1 +' '+ price1+'\n\n'+ name2 +' ' + price2+'\n\n'+ name3 + ' ' + price3
+'\n\nHere you go the links\U000026C4: \n'+ item_list[0]['detail_url']
+'\n\n'+ item_list[1]['detail_url']
+'\n\n'+ item_list[2]['detail_url'])
except IndexError:
return("\U0001F614 Sorry , the Taobao server is too busy,"+
" maybe you can try to search another item name .")
|
import pdb
import sys
import simpleaudio as sa
import wave
import time
class infinite_array():
def __init__(self,inp):
self.standard_input = inp
def pop(self,*args):
return self.standard_input
def append(self,*args):
return None
class myPdb(pdb.Pdb):
play_obj = None
def inpWav(self, wav_path, length_of_partition):
"""
:wav_path: TODO
:length_of_partition: TODO
:returns: TODO
"""
self.data = {}
wave_read = wave.open(wav_path, 'rb')
self.num_channels = wave_read.getnchannels()
self.bytes_per_sample = wave_read.getsampwidth()
self.sample_rate = wave_read.getframerate()
no_frames = wave_read.getnframes()
self.no_partitions = no_frames//length_of_partition
for i in range(self.no_partitions):
self.data[i] = wave_read.readframes(length_of_partition)
def precmd(self,line):
line_no = self.curframe.f_lineno
if self.play_obj:
self.play_obj.wait_done()
self.play_obj = sa.play_buffer(self.data[line_no%self.no_partitions], self.num_channels, self.bytes_per_sample, self.sample_rate)
return line
def postcmd(self, stop, line):
#self.play_obj.wait_done()
return stop
def _cmdloop(self):
while True:
try:
# keyboard interrupts allow for an easy way to cancel
# the current command, so allow them during interactive input
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message('--KeyboardInterrupt--')
def do_play2(self, line):
self.cmdqueue = infinite_array('n\r\n')
def do_play(self, line):
self.cmdqueue = infinite_array('s\r\n')
def my_set_trace(*, wav_path='music/alex-skrindo-miza-thinkin.wav', length_of_partition=80000, header=None):
mypdb = myPdb(skip = ['musicalpdb','importlib*'])
mypdb.inpWav(wav_path, length_of_partition)
if header is not None:
mypdb.message(header)
mypdb.set_trace(sys._getframe().f_back)
|
#!/usr/bin/env python3
from functools import partial
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import netCDF4 as nc4
from e3sm_case_output import E3SMCaseOutput, day_str
START_DAY = 1
END_DAY = 15
END_ZM10S_DAY = 19
START_AVG_DAY = 3
END_AVG_DAY = 15
DAILY_FILE_LOC="/p/lscratchh/santos36/timestep_daily_avgs/"
FOCUS_PRECIP = False
USE_PRESAER = True
LAND_ONLY = False
OCEAN_ONLY = False # Note that this includes ocean and sea-ice grid cells
TROPICS_ONLY = False
MIDLATITUDES_ONLY = False
assert not (FOCUS_PRECIP and USE_PRESAER), \
"no precipitation-specific prescribed aerosol run set has been defined"
assert not (LAND_ONLY and OCEAN_ONLY), \
"can't do only land and only ocean"
assert not (TROPICS_ONLY and MIDLATITUDES_ONLY), \
"can't do only tropics and only midlatitudes"
days = list(range(START_DAY, END_DAY+1))
ndays = len(days)
navgdays = END_AVG_DAY - START_AVG_DAY + 1
suffix = '_d{}-{}'.format(day_str(START_DAY), day_str(END_DAY))
if FOCUS_PRECIP:
suffix += '_precip'
if USE_PRESAER:
suffix += '_presaer'
if LAND_ONLY:
sfc_suffix = 'lnd'
elif OCEAN_ONLY:
sfc_suffix = 'ocn'
else:
sfc_suffix = ''
if TROPICS_ONLY:
suffix += '_{}tropics'.format(sfc_suffix)
elif MIDLATITUDES_ONLY:
suffix += '_{}midlats'.format(sfc_suffix)
elif sfc_suffix != '':
suffix += '_{}'.format(sfc_suffix)
log_file = open("plot_daily_log{}.txt".format(suffix), 'w')
if USE_PRESAER:
REF_CASE = E3SMCaseOutput("timestep_presaer_ctrl", "CTRLPA", DAILY_FILE_LOC, START_DAY, END_DAY)
TEST_CASES = [
E3SMCaseOutput("timestep_presaer_ZM_10s", "ZM10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_ZM_10s_lower_tau", "ZM10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_CLUBB_MG2_10s", "CLUBBMICRO10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_CLUBB_MG2_10s_ZM_10s", "CLUBBMICRO10ZM10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_CLUBB_MG2_10s_ZM_10s_lower_tau", "CLUBBMICRO10ZM10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_cld_10s", "CLD10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_cld_10s_lower_tau", "CLD10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_cld_10s_lower_tau2", "CLD10LT2PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_all_10s", "ALL10PA", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_presaer_all_10s_lower_tau", "ALL10LTPA", DAILY_FILE_LOC, START_DAY, END_DAY),
]
STYLES = {
"CLUBBMICRO10PA": ('indigo', '-'),
"ALL10PA": ('dimgrey', '-'),
"ZM10PA": ('g', '-'),
"CLUBBMICRO10ZM10PA": ('saddlebrown', '-'),
"CLD10PA": ('slateblue', '-'),
"ALL10LTPA": ('dimgrey', '-.'),
"ZM10LTPA": ('g', '-.'),
"CLUBBMICRO10ZM10LTPA": ('saddlebrown', '-.'),
"CLD10LTPA": ('slateblue', '-.'),
"CLD10LT2PA": ('slateblue', ':'),
}
elif FOCUS_PRECIP:
REF_CASE = E3SMCaseOutput("timestep_ctrl", "CTRL", DAILY_FILE_LOC, START_DAY, END_DAY)
TEST_CASES = [
E3SMCaseOutput("timestep_precip_grad", "PFMG", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_CLUBB_10s", "CLUBB10", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_CLUBB_10s_MG2_10s", "CLUBB10MICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_MG2_10s", "MICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_precip_grad_MG2_10s", "PFMGMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_CLUBB_MG2_60s", "CLUBBMICRO60", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_10s", "CLUBBMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_precip_grad_CLUBB_MG2_10s", "PFMGCLUBBMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_all_300s", "ALL300", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_all_60s", "ALL60", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_all_10s", "ALL10", DAILY_FILE_LOC, START_DAY, END_DAY),
]
STYLES = {
"DYN10": ('y', '-'),
"CLUBB10": ('b', '-'),
"MICRO10": ('r', '-'),
"CLUBB10MICRO10": ('maroon', '-'),
"CLUBBMICROSTR": ('m', '-'),
"CLUBBMICROSTR60": ('m', '--'),
"CLUBBMICRO60": ('indigo', '--'),
"CLUBBMICRO10": ('indigo', '-'),
"ALL10": ('dimgrey', '-'),
"ALL60": ('dimgrey', '--'),
"ALL300": ('dimgrey', ':'),
"ALLRAD10": ('orange', '-'),
"PFMG": ('k', '-.'),
"PFMGMICRO10": ('r', '-.'),
"PFMGCLUBBMICRO10": ('indigo', '-.'),
}
else:
REF_CASE = E3SMCaseOutput("timestep_ctrl", "CTRL", DAILY_FILE_LOC, START_DAY, END_DAY)
TEST_CASES = [
E3SMCaseOutput("timestep_dyn_10s", "DYN10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_10s", "CLUBB10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_10s_MG2_10s", "CLUBB10MICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_CLUBB_MG2_Strang", "CLUBBMICROSTR", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_CLUBB_MG2_Strang_60s", "CLUBBMICROSTR60", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_MG2_10s", "MICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_60s", "CLUBBMICRO60", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_CLUBB_MG2_10s", "CLUBBMICRO10", DAILY_FILE_LOC, START_DAY, END_DAY),
# E3SMCaseOutput("timestep_ZM_10s", "ZM10", DAILY_FILE_LOC, START_DAY, END_ZM10S_DAY),
# E3SMCaseOutput("timestep_ZM_300s", "ZM300", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_rad_10s", "ALLRAD10", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_300s", "ALL300", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_60s", "ALL60", DAILY_FILE_LOC, START_DAY, END_DAY),
E3SMCaseOutput("timestep_all_10s", "ALL10", DAILY_FILE_LOC, START_DAY, END_DAY),
]
STYLES = {
"DYN10": ('y', '-'),
"CLUBB10": ('b', '-'),
"MICRO10": ('r', '-'),
"CLUBB10MICRO10": ('maroon', '-'),
"CLUBBMICROSTR": ('m', '-'),
"CLUBBMICROSTR60": ('m', '--'),
"CLUBBMICRO60": ('indigo', '--'),
"CLUBBMICRO10": ('indigo', '-'),
"ALL10": ('dimgrey', '-'),
"ALL60": ('dimgrey', '--'),
"ALL300": ('dimgrey', ':'),
"ALLRAD10": ('orange', '-'),
}
case_num = len(TEST_CASES)
rfile0 = nc4.Dataset(REF_CASE.get_daily_file_name(START_DAY), 'r')
nlev = len(rfile0.dimensions['lev'])
ncol = len(rfile0.dimensions['ncol'])
area = rfile0['area'][:]
if LAND_ONLY:
landfrac = rfile0['LANDFRAC'][0,:]
area *= landfrac
elif OCEAN_ONLY:
landfrac = rfile0['LANDFRAC'][0,:]
area *= 1. - landfrac
# For tropics_only cases, just use a weight of 0 for all other cases.
if TROPICS_ONLY:
lat = rfile0['lat'][:]
for i in range(ncol):
if np.abs(lat[i]) > 30.:
area[i] = 0.
# Same for midlatitudes.
elif MIDLATITUDES_ONLY:
lat = rfile0['lat'][:]
for i in range(ncol):
if np.abs(lat[i]) < 30. or np.abs(lat[i]) > 60.:
area[i] = 0.
area_sum = area.sum()
weights = area/area_sum
rfile0.close()
def calc_var_stats(ref_case, test_cases, day, varnames):
varnames_read = [name for name in varnames if name != "PRECT" and name != "TAU"]
if "PRECT" in varnames:
if "PRECL" not in varnames:
varnames_read.append("PRECL")
if "PRECC" not in varnames:
varnames_read.append("PRECC")
if "TAU" in varnames:
if "TAUX" not in varnames:
varnames_read.append("TAUX")
if "TAUY" not in varnames:
varnames_read.append("TAUY")
ref_time_avg, test_time_avgs, diff_time_avgs = ref_case.compare_daily_averages(test_cases, day, varnames_read)
if "PRECT" in varnames:
ref_time_avg["PRECT"] = ref_time_avg["PRECL"] + ref_time_avg["PRECC"]
for icase in range(case_num):
test_time_avgs[icase]["PRECT"] = test_time_avgs[icase]["PRECL"] + test_time_avgs[icase]["PRECC"]
diff_time_avgs[icase]["PRECT"] = diff_time_avgs[icase]["PRECL"] + diff_time_avgs[icase]["PRECC"]
if "TAU" in varnames:
ref_time_avg["TAU"] = np.sqrt(ref_time_avg["TAUX"]**2 + ref_time_avg["TAUY"]**2)
for icase in range(case_num):
test_time_avgs[icase]["TAU"] = np.sqrt(test_time_avgs[icase]["TAUX"]**2 + test_time_avgs[icase]["TAUY"]**2)
diff_time_avgs[icase]["TAU"] = test_time_avgs[icase]["TAU"] - ref_time_avg["TAU"]
ref_avg = dict()
test_avgs = dict()
diff_avgs = dict()
rmses = dict()
for varname in varnames:
if varname in vars_3D:
ref_avg[varname] = np.zeros((nlev,))
for jlev in range(nlev):
ref_avg[varname][jlev] = (ref_time_avg[varname][jlev,:] * weights).sum()
else:
ref_avg[varname] = (ref_time_avg[varname] * weights).sum()
test_avgs[varname] = []
diff_avgs[varname] = []
rmses[varname] = []
for i in range(len(test_cases)):
if test_cases[i].day_is_available(day):
if varname in vars_3D:
test_avgs[varname].append(np.zeros((nlev,)))
diff_avgs[varname].append(np.zeros((nlev,)))
rmses[varname].append(np.zeros((nlev,)))
for jlev in range(nlev):
test_avgs[varname][-1][jlev] = (test_time_avgs[i][varname][jlev,:] * weights).sum()
diff_avgs[varname][-1][jlev] = (diff_time_avgs[i][varname][jlev,:] * weights).sum()
rmses[varname][-1][jlev] = np.sqrt((diff_time_avgs[i][varname][jlev,:]**2 * weights).sum())
else:
test_avgs[varname].append((test_time_avgs[i][varname] * weights).sum())
diff_avgs[varname].append((diff_time_avgs[i][varname] * weights).sum())
rmses[varname].append(np.sqrt((diff_time_avgs[i][varname]**2 * weights).sum()))
assert np.isclose(diff_avgs[varname][i], test_avgs[varname][i] - ref_avg[varname]).all(), \
"Problem with diff of variable {} from case {}".format(varname, TEST_CASES[i].short_name)
else:
test_avgs[varname].append(None)
diff_avgs[varname].append(None)
rmses[varname].append(None)
return (ref_avg, test_avgs, diff_avgs, rmses)
# Possible ways to extract a 2D section start here:
def identity(x):
return x
def slice_at(level, x):
return x[:,level]
def plot_vars_over_time(names, units, scales, log_plot_names):
ref_means = dict()
test_means = dict()
diff_means = dict()
rmses = dict()
for name in names:
if name in vars_3D:
ref_means[name] = np.zeros((ndays, nlev))
test_means[name] = np.zeros((case_num, ndays, nlev))
diff_means[name] = np.zeros((case_num, ndays, nlev))
rmses[name] = np.zeros((case_num, ndays, nlev))
else:
ref_means[name] = np.zeros((ndays,))
test_means[name] = np.zeros((case_num, ndays))
diff_means[name] = np.zeros((case_num, ndays))
rmses[name] = np.zeros((case_num, ndays))
for iday in range(ndays):
day = days[iday]
print("On day: ", day, file=log_file, flush=True)
ref_mean, test_case_means, diff_case_means, case_rmses = calc_var_stats(REF_CASE, TEST_CASES, day, names)
for name in names:
ref_means[name][iday] = ref_mean[name]*scales[name]
for i in range(case_num):
if TEST_CASES[i].day_is_available(day):
test_means[name][i,iday] = test_case_means[name][i]*scales[name]
diff_means[name][i,iday] = diff_case_means[name][i]*scales[name]
rmses[name][i,iday] = case_rmses[name][i]*scales[name]
for name in names:
plot_name = name
if name in plot_names:
plot_name = plot_names[name]
get_2D = identity
if name in vars_3D:
get_2D = partial(slice_at, nlev-1)
if name in log_plot_names:
plot_var = plt.semilogy
else:
plot_var = plt.plot
for i in range(case_num):
test_plot_var = get_2D(test_means[name][i])
start_ind = TEST_CASES[i].start_day - START_DAY
end_ind = TEST_CASES[i].end_day - START_DAY + 1
plot_var(days[start_ind:end_ind],
test_plot_var[start_ind:end_ind],
label=TEST_CASES[i].short_name,
color=STYLES[TEST_CASES[i].short_name][0],
linestyle=STYLES[TEST_CASES[i].short_name][1])
ref_plot_var = get_2D(ref_means[name])
plot_var(days, ref_plot_var, label=REF_CASE.short_name, color='k')
plt.axis('tight')
plt.xlabel("day")
plt.ylabel("Mean {} ({})".format(plot_name, units[name]))
plt.savefig('{}_time{}.png'.format(name, suffix))
plt.close()
for i in range(case_num):
diff_plot_var = get_2D(diff_means[name][i])
start_ind = TEST_CASES[i].start_day - START_DAY
end_ind = TEST_CASES[i].end_day - START_DAY + 1
plot_var(days[start_ind:end_ind],
diff_plot_var[start_ind:end_ind],
label=TEST_CASES[i].short_name,
color=STYLES[TEST_CASES[i].short_name][0],
linestyle=STYLES[TEST_CASES[i].short_name][1])
plt.axis('tight')
plt.xlabel("day")
plt.ylabel("Mean {} difference ({})".format(plot_name, units[name]))
plt.savefig('{}_diff_time{}.png'.format(name, suffix))
plt.close()
for i in range(case_num):
rmse_plot_var = get_2D(rmses[name][i])
start_ind = TEST_CASES[i].start_day - START_DAY
end_ind = TEST_CASES[i].end_day - START_DAY + 1
plot_var(days[start_ind:end_ind],
rmse_plot_var[start_ind:end_ind],
label=TEST_CASES[i].short_name,
color=STYLES[TEST_CASES[i].short_name][0],
linestyle=STYLES[TEST_CASES[i].short_name][1])
plt.axis('tight')
plt.xlabel("day")
plt.ylabel("{} RMSE ({})".format(plot_name, units[name]))
plt.savefig('{}_rmse_time{}.png'.format(name, suffix))
plt.close()
print(name, " has reference mean: ", sum(ref_plot_var[START_AVG_DAY-START_DAY:END_AVG_DAY-START_DAY+1])/navgdays,
file=log_file)
for i in range(case_num):
case_name = TEST_CASES[i].short_name
test_plot_var = get_2D(test_means[name][i])
diff_plot_var = get_2D(diff_means[name][i])
print(name, " has case ", case_name, " mean: ", sum(test_plot_var[START_AVG_DAY-START_DAY:END_AVG_DAY-START_DAY+1])/navgdays,
file=log_file)
print(name, " has difference mean: ", sum(diff_plot_var[START_AVG_DAY-START_DAY:END_AVG_DAY-START_DAY+1])/navgdays,
file=log_file)
if USE_PRESAER and "LT" in case_name:
compare_name = TEST_CASES[i-1].short_name
compare_plot_var = get_2D(test_means[name][i-1])
print(name, " has mean difference from ", compare_name, ": ",
sum(test_plot_var[START_AVG_DAY-START_DAY:END_AVG_DAY-START_DAY+1])/navgdays - \
sum(compare_plot_var[START_AVG_DAY-START_DAY:END_AVG_DAY-START_DAY+1])/navgdays,
file=log_file)
plot_names = {
'LWCF': "longwave cloud forcing",
'SWCF': "shortwave cloud forcing",
'PRECC': "convective precipitation",
'PRECL': "large-scale precipitation",
'PRECT': "total precipitation",
'TGCLDIWP': "ice water path",
'TGCLDLWP': "liquid water path",
'CLDTOT': "cloud area fraction",
'CLDLOW': "low cloud area fraction",
'CLDMED': "mid-level cloud area fraction",
'CLDHGH': "high cloud area fraction",
'LHFLX': "latent heat flux",
'SHFLX': "sensible heat flux",
'TAU': "surface wind stress",
'TS': "surface temperature",
'PSL': "sea level pressure",
'OMEGA500': "vertical velocity at 500 mb",
'U10': "10 meter wind speed",
'RELHUM': "surface relative humidity",
'Q': "specific humidity",
'CLDLIQ': "lowest level cloud liquid",
'TMQ': "precipitable water",
'CLOUD': "lowest level cloud fraction",
'T': "lowest level temperature",
}
units = {
'LWCF': r'$W/m^2$',
'SWCF': r'$W/m^2$',
'PRECC': r'$mm/day$',
'PRECL': r'$mm/day$',
'PRECT': r'$mm/day$',
'TGCLDIWP': r'$g/m^2$',
'TGCLDLWP': r'$g/m^2$',
'AODABS': r'units?',
'AODUV': r'units?',
'AODVIS': r'units?',
'FLDS': r'$W/m^2$',
'FLNS': r'$W/m^2$',
'FLNSC': r'$W/m^2$',
'FLNT': r'$W/m^2$',
'FLNTC': r'$W/m^2$',
'FLUT': r'$W/m^2$',
'FLUTC': r'$W/m^2$',
'FSDS': r'$W/m^2$',
'FSDSC': r'$W/m^2$',
'FSNS': r'$W/m^2$',
'FSNSC': r'$W/m^2$',
'FSNT': r'$W/m^2$',
'FSNTC': r'$W/m^2$',
'FSNTOA': r'$W/m^2$',
'FSNTOAC': r'$W/m^2$',
'FSUTOA': r'$W/m^2$',
'FSUTOAC': r'$W/m^2$',
'CLDTOT': r'fraction',
'CLDLOW': r'fraction',
'CLDMED': r'fraction',
'CLDHGH': r'fraction',
'OMEGA500': r'Pa/s',
'LHFLX': r'$W/m^2$',
'SHFLX': r'$W/m^2$',
'TAU': r'$N/m^2$',
'TAUX': r'$N/m^2$',
'TAUY': r'$N/m^2$',
'TS': r'$K$',
'PSL': r'$Pa$',
'U10': r'$m/s$',
'RELHUM': r'%',
'Q': r'$g/kg$',
'CLDLIQ': r"$g/kg$",
'TMQ': r'$kg/m^2$',
'CLOUD': r'$fraction$',
'T': r'$K$',
}
names = list(units.keys())
scales = dict()
for name in names:
scales[name] = 1.
scales['TGCLDIWP'] = 1000.
scales['TGCLDLWP'] = 1000.
scales['PRECC'] = 1000.*86400.
scales['PRECL'] = 1000.*86400.
scales['PRECT'] = 1000.*86400.
scales['Q'] = 1000.
scales['CLDLIQ'] = 1000.
vars_3D = [
'RELHUM',
'Q',
'CLDLIQ',
'T',
'CLOUD',
]
log_plot_names = []#'AODABS', 'AODVIS', 'AODUV']
plot_vars_over_time(names, units, scales, log_plot_names)
log_file.close()
|
import time, math
start_time = time.time()
def prime_factor(number):
if number == 1:
return False
elif number < 4:
return True
elif number % 2 == 0:
return False
elif number < 9:
return True
elif number % 3 == 0:
return False
else:
r = math.floor(number // 2)
f = 5
while f <= r:
if number % f == 0:
return False
if number % (f+2) == 0:
return False
f += 6
return True
prime_factors = []
i = 2
while len(prime_factors) < 10001:
if prime_factor(i):
prime_factors.append(i)
i += 1
print(prime_factors[10000])
print("Elapsed Time: ",(time.time() - start_time)) |
import RPi.GPIO as gpio
from time import sleep,time
#from Tkinter import *
from distance import distance_front, distance_rear
import random
def init():
gpio.setmode(gpio.BOARD)
gpio.setup(13, gpio.OUT) # Yellow wire
gpio.setup(15, gpio.OUT) # Green wire
gpio.setup(16, gpio.OUT) # Red wire
gpio.setup(18, gpio.OUT) # Brown wire
def forward(tf): # tf = time frame for action
init()
gpio.output(16, True) # Turn wheels straight forward
gpio.output(18, True) #
gpio.output(13, True) # Roll wheels forward
gpio.output(15, False) #
sleep(tf)
gpio.cleanup()
def reverse(tf): # tf = time frame for action
init()
gpio.output(16, True) # Turn wheels straight
gpio.output(18, True) #
gpio.output(13, False) # Roll wheels backward
gpio.output(15, True) #
sleep(tf)
gpio.cleanup()
def right(tf): # tf = time frame for action
init()
gpio.output(16, True) # Turn wheels to the right
gpio.output(18, False) #
gpio.output(13, True) # Roll wheels forward
gpio.output(15, False) #
sleep(tf)
gpio.cleanup()
def left(tf): # tf = time frame for action
init()
gpio.output(16, False) # Turn wheels to the left
gpio.output(18, True) #
gpio.output(13, True) # Roll wheels forward
gpio.output(15, False) #
sleep(tf)
gpio.cleanup()
def revright(tf): # tf = time frame for action
init()
gpio.output(16, True) # Turn wheels to the right
gpio.output(18, False) #
gpio.output(13, False) # Roll wheels backward
gpio.output(15, True) #
sleep(tf)
gpio.output(16, True) # Turn wheels straight
gpio.output(18, True) #
gpio.cleanup()
def revleft(tf): # tf = time frame for action
init()
gpio.output(16, False) # Turn wheels to the left
gpio.output(18, True) #
gpio.output(13, False) # Roll wheels backward
gpio.output(15, True) #
sleep(tf)
gpio.output(16, True) # Turn wheels straight
gpio.output(18, True) #
gpio.cleanup()
#action = random.randrange(1, 6+1)
#print action
#print int(time()) % 6
tf = 0.03
action_list = [ forward, reverse, left, right, revleft, revright ]
#action = int(time()) % 6
#print action
#action_list[action](tf)
try:
while True:
action = int(time()) % 6 # pick a random action
if action in (0,2,3): # forward moving actions
curDis = distance_front()
i = 0
while curDis > 30 and i < 30: # repeat action until time or distance limit
action_list[action](tf)
curDis = distance_front()
i = i + 1
if curDis < 30: # move backward if distance less than 30 cm
reverse(.5)
if action in (1,4,5): # backwards moving actions
curDis_rear = distance_rear()
i = 0
while curDis_rear > 30 and i < 30: # repeat action until time or distance limit
action_list[action](tf)
curDis_rear = distance_rear()
i = i + 1
if curDis_rear < 30: # move forward if distance less than 30 cm
forward(.5)
except KeyboardInterrupt:
gpio.cleanup()
|
import random
class Agent:
def __init__(self):
self.n = 5
def nextAction(self, percept):
return random.randint(0, self.n)
|
from json import *
from io import StringIO
class Edit(object):
def __init__(self):
self.value = 0
self.byte = 3
def changeValue(self, new):
self.value = new
def readValue(self):
return self.value
def __str__(self):
return str(self.__dict__)
class Editeur(object):
def __init__(self, Edit):
self.Edit = Edit
def editer(self, new):
self.Edit.changeValue(new)
def lire(self):
return self.Edit.readValue()
def __str__(self):
return str(self.__dict__)
LE = Edit()
edit1 = Editeur(LE)
edit2 = Editeur(LE)
print(LE.__dict__)
print(type(dumps(LE.__dict__)))
print(dumps(LE.__dict__))
Jeu = None
if Jeu == 2:
print(Jeu) |
# Python functions for NucDynamics
import sys
import numpy as np
import multiprocessing
import traceback
PROG_NAME = 'nuc_dynamics'
DESCRIPTION = 'Single-cell Hi-C genome and chromosome structure calculation module for Nuc3D and NucTools'
N3D = 'n3d'
PDB = 'pdb'
FORMATS = [N3D, PDB]
MAX_CORES = multiprocessing.cpu_count()
def warn(msg, prefix='WARNING'):
print('%8s : %s' % (prefix, msg))
def critical(msg, prefix='ABORT'):
print('%8s : %s' % (prefix, msg))
sys.exit(0)
def _parallel_func_wrapper(queue, target_func, proc_data, common_args):
for t, data_item in proc_data:
result = target_func(data_item, *common_args)
if queue:
queue.put((t, result))
elif isinstance(result, Exception):
raise(result)
def parallel_split_job(target_func, split_data, common_args, num_cpu=MAX_CORES, collect_output=True):
num_tasks = len(split_data)
num_process = min(num_cpu, num_tasks)
processes = []
if collect_output:
queue = multiprocessing.Queue() # Queue will collect parallel process output
else:
queue = None
for p in range(num_process):
# Task IDs and data for each task
# Each process can have multiple tasks if there are more tasks than processes/cpus
proc_data = [(t, data_item) for t, data_item in enumerate(split_data) if t % num_cpu == p]
args = (queue, target_func, proc_data, common_args)
proc = multiprocessing.Process(target=_parallel_func_wrapper, args=args)
processes.append(proc)
for proc in processes:
proc.start()
if queue:
results = [None] * num_tasks
for i in range(num_tasks):
t, result = queue.get() # Asynchronous fetch output: whichever process completes a task first
if isinstance(result, Exception):
print('\n* * * * C/Cython code may need to be recompiled. Try running "python setup_cython.py build_ext --inplace" * * * *\n')
raise(result)
results[t] = result
queue.close()
return results
else:
for proc in processes: # Asynchromous wait and no output captured
proc.join()
def load_ncc_file(file_path):
"""Load chromosome and contact data from NCC format file, as output from NucProcess"""
if file_path.endswith('.gz'):
import gzip
file_obj = gzip.open(file_path)
else:
file_obj = open(file_path)
# Observations are treated individually in single-cell Hi-C,
# i.e. no binning, so num_obs always 1 for each contact
num_obs = 1
contact_dict = {}
chromosomes = set()
for line in file_obj:
chr_a, f_start_a, f_end_a, start_a, end_a, strand_a, chr_b, f_start_b, f_end_b, start_b, end_b, strand_b, ambig_group, pair_id, swap_pair = line.split()
if strand_a == '+':
pos_a = int(f_start_a)
else:
pos_a = int(f_end_a)
if strand_b == '+':
pos_b = int(f_start_b)
else:
pos_b = int(f_end_b)
if chr_a > chr_b:
chr_a, chr_b = chr_b, chr_a
pos_a, pos_b = pos_b, pos_a
if chr_a not in contact_dict:
contact_dict[chr_a] = {}
chromosomes.add(chr_a)
if chr_b not in contact_dict[chr_a]:
contact_dict[chr_a][chr_b] = []
chromosomes.add(chr_b)
contact_dict[chr_a][chr_b].append((pos_a, pos_b, num_obs, int(ambig_group)))
file_obj.close()
chromo_limits = {}
for chr_a in contact_dict:
for chr_b in contact_dict[chr_a]:
contacts = np.array(contact_dict[chr_a][chr_b]).T
contact_dict[chr_a][chr_b] = contacts
seq_pos_a = contacts[0]
seq_pos_b = contacts[1]
min_a = min(seq_pos_a)
max_a = max(seq_pos_a)
min_b = min(seq_pos_b)
max_b = max(seq_pos_b)
if chr_a in chromo_limits:
prev_min, prev_max = chromo_limits[chr_a]
chromo_limits[chr_a] = [min(prev_min, min_a), max(prev_max, max_a)]
else:
chromo_limits[chr_a] = [min_a, max_a]
if chr_b in chromo_limits:
prev_min, prev_max = chromo_limits[chr_b]
chromo_limits[chr_b] = [min(prev_min, min_b), max(prev_max, max_b)]
else:
chromo_limits[chr_b] = [min_b, max_b]
chromosomes = sorted(chromosomes)
return chromosomes, chromo_limits, contact_dict
def export_n3d_coords(file_path, coords_dict, seq_pos_dict):
file_obj = open(file_path, 'w')
write = file_obj.write
for chromo in seq_pos_dict:
chromo_coords = coords_dict[chromo]
chromo_seq_pos = seq_pos_dict[chromo]
num_models = len(chromo_coords)
num_coords = len(chromo_seq_pos)
line = '%s\t%d\t%d\n' % (chromo, num_coords, num_models)
write(line)
for j in range(num_coords):
data = chromo_coords[:,j].ravel().tolist()
data = '\t'.join('%.8f' % d for d in data)
line = '%d\t%s\n' % (chromo_seq_pos[j], data)
write(line)
file_obj.close()
def export_pdb_coords(file_path, coords_dict, seq_pos_dict, particle_size, scale=1.0, extended=True):
"""
Write chromosome particle coordinates as a PDB format file
"""
alc = ' '
ins = ' '
prefix = 'HETATM'
line_format = '%-80.80s\n'
if extended:
pdb_format = '%-6.6s%5.1d %4.4s%s%3.3s %s%4.1d%s %8.3f%8.3f%8.3f%6.2f%6.2f %2.2s %10d\n'
ter_format = '%-6.6s%5.1d %s %s%4.1d%s %10d\n'
else:
pdb_format = '%-6.6s%5.1d %4.4s%s%3.3s %s%4.1d%s %8.3f%8.3f%8.3f%6.2f%6.2f %2.2s \n'
ter_format = '%-6.6s%5.1d %s %s%4.1d%s \n'
file_obj = open(file_path, 'w')
write = file_obj.write
chromosomes = list(seq_pos_dict.keys())
sort_chromos = []
for chromo in chromosomes:
if chromo[:3] == 'chr':
key = chromo[3:]
else:
key = chromo
if key.isdigit():
key = '%03d' % int(key)
sort_chromos.append((key, chromo))
sort_chromos.sort()
sort_chromos = [x[1] for x in sort_chromos]
num_models = len(coords_dict[chromosomes[0]])
title = 'NucDynamics genome structure export'
write(line_format % 'TITLE %s' % title)
write(line_format % 'REMARK 210')
write(line_format % 'REMARK 210 Atom type C is used for all particles')
write(line_format % 'REMARK 210 Atom number increases every %s bases' % particle_size)
write(line_format % 'REMARK 210 Residue code indicates chromosome')
write(line_format % 'REMARK 210 Residue number represents which sequence Mb the atom is in')
write(line_format % 'REMARK 210 Chain letter is different every chromosome, where Chr1=a, Chr2=b etc.')
if extended:
file_obj.write(line_format % 'REMARK 210 Extended PDB format with particle seq. pos. in last column')
file_obj.write(line_format % 'REMARK 210')
pos_chromo = {}
for m in range(num_models):
line = 'MODEL %4d' % (m+1)
write(line_format % line)
c = 0
j = 1
seqPrev = None
for k, chromo in enumerate(sort_chromos):
chain_code = chr(ord('a')+k)
tlc = chromo
while len(tlc) < 2:
tlc = '_' + tlc
if len(tlc) == 2:
tlc = 'C' + tlc
if len(tlc) > 3:
tlc = tlc[:3]
chromo_model_coords = coords_dict[chromo][m]
if not len(chromo_model_coords):
continue
pos = seq_pos_dict[chromo]
for i, seqPos in enumerate(pos):
c += 1
seqMb = int(seqPos//1e6) + 1
if seqMb == seqPrev:
j += 1
else:
j = 1
el = 'C'
a = 'C%d' % j
aName = '%-3s' % a
x, y, z = chromo_model_coords[i] #XYZ coordinates
seqPrev = seqMb
pos_chromo[c] = chromo
if extended:
line = pdb_format % (prefix,c,aName,alc,tlc,chain_code,seqMb,ins,x,y,z,0.0,0.0,el,seqPos)
else:
line = pdb_format % (prefix,c,aName,alc,tlc,chain_code,seqMb,ins,x,y,z,0.0,0.0,el)
write(line)
write(line_format % 'ENDMDL')
for i in range(c-2):
if pos_chromo[i+1] == pos_chromo[i+2]:
line = 'CONECT%5.1d%5.1d' % (i+1, i+2)
write(line_format % line)
write(line_format % 'END')
file_obj.close()
def remove_isolated_contacts(contact_dict, threshold=int(2e6)):
"""
Select only contacts which are within a given sequence separation of another
contact, for the same chromosome pair
"""
for chromoA in contact_dict:
for chromoB in contact_dict[chromoA]:
contacts = contact_dict[chromoA][chromoB]
positions = np.array(contacts[:2], np.int32).T
if len(positions): # Sometimes empty e.g. for MT, Y chromos
active_idx = dyn_util.getSupportedPairs(positions, np.int32(threshold))
contact_dict[chromoA][chromoB] = contacts[:,active_idx]
return contact_dict
def remove_violated_contacts(contact_dict, coords_dict, particle_seq_pos, particle_size, threshold=5.0):
"""
Remove contacts whith structure distances that exceed a given threshold
"""
for chr_a in contact_dict:
for chr_b in contact_dict[chr_a]:
contacts = contact_dict[chr_a][chr_b]
contact_pos_a = contacts[0].astype(np.int32)
contact_pos_b = contacts[1].astype(np.int32)
coords_a = coords_dict[chr_a]
coords_b = coords_dict[chr_b]
struc_dists = []
for m in range(len(coords_a)):
coord_data_a = dyn_util.getInterpolatedCoords([chr_a], {chr_a:contact_pos_a}, particle_seq_pos, coords_a[m])
coord_data_b = dyn_util.getInterpolatedCoords([chr_b], {chr_b:contact_pos_b}, particle_seq_pos, coords_b[m])
deltas = coord_data_a - coord_data_b
dists = np.sqrt((deltas*deltas).sum(axis=1))
struc_dists.append(dists)
# Average over all conformational models
struc_dists = np.array(struc_dists).T.mean(axis=1)
# Select contacts with distances below distance threshold
indices = (struc_dists < threshold).nonzero()[0]
contact_dict[chr_a][chr_b] = contacts[:,indices]
return contact_dict
def get_random_coords(pos_dict, chromosomes, num_models, radius=10.0):
"""
Get random, uniformly sampled coorinate positions, restricted to
a sphere of given radius
"""
from numpy.random import uniform
num_particles = sum([len(pos_dict[chromo]) for chromo in chromosomes])
coords = np.empty((num_models, num_particles, 3))
r2 = radius*radius
for m in range(num_models):
for i in range(num_particles):
x = y = z = radius
while x*x + y*y + z*z >= r2:
x = radius * (2*uniform(0,1) - 1)
y = radius * (2*uniform(0,1) - 1)
z = radius * (2*uniform(0,1) - 1)
coords[m,i] = [x,y,z]
return coords
def pack_chromo_coords(coords_dict, chromosomes):
"""
Place chromosome 3D coordinates stored in a dictionary keyed by
chromosome name into a single, ordered array. The chromosomes argument
is required to set the correct array storage order.
"""
chromo_num_particles = [len(coords_dict[chromo][0]) for chromo in chromosomes]
n_particles = sum(chromo_num_particles)
n_models = len(coords_dict[chromosomes[0]])
coords = np.empty((n_models, n_particles, 3), float)
j = 0
for i, chromo in enumerate(chromosomes):
span = chromo_num_particles[i]
coords[:,j:j+span] = coords_dict[chromo]
j += span
return coords
def unpack_chromo_coords(coords, chromosomes, seq_pos_dict):
"""
Exctract coords for multiple chromosomes stored in a single array into
a dictionary, keyed by chromosome name. The chromosomes argument is required
to get the correct array storage order.
"""
chromo_num_particles = [len(seq_pos_dict[chromo]) for chromo in chromosomes]
n_seq_pos = sum(chromo_num_particles)
n_models, n_particles, dims = coords.shape
if n_seq_pos != n_particles:
msg = 'Model coordinates must be an array of num models x %d' % (n_seq_pos,)
raise(Exception(msg))
coords_dict = {}
j = 0
for i, chromo in enumerate(chromosomes):
span = chromo_num_particles[i]
coords_dict[chromo] = coords[:,j:j+span] # all models, slice
j += span
return coords_dict
def anneal_model(model_data, anneal_schedule, masses, radii, restraint_indices, restraint_dists,
ambiguity, temp, time_step, dyn_steps, repulse, n_rep_max):
import gc
m, model_coords = model_data
# Anneal one model in parallel
time_taken = 0.0
if m == 0:
printInterval = max(1, dyn_steps/2)
else:
printInterval = 0
print(' starting model %d' % m)
for temp, repulse in anneal_schedule:
gc.collect() # Try to free some memory
# Update coordinates for this temp
try:
dt, n_rep_max = dyn_util.runDynamics(model_coords, masses, radii, restraint_indices, restraint_dists,
ambiguity, temp, time_step, dyn_steps, repulse, nRepMax=n_rep_max,
printInterval=printInterval)
except Exception as err:
return err
n_rep_max = np.int32(1.05 * n_rep_max) # Base on num in prev cycle, plus a small overhead
time_taken += dt
# Center
model_coords -= model_coords.mean(axis=0)
print(' done model %d' % m)
return model_coords
def anneal_genome(chromosomes, contact_dict, num_models, particle_size,
general_calc_params, anneal_params,
prev_seq_pos_dict=None, start_coords=None, num_cpu=MAX_CORES):
"""
Use chromosome contact data to generate distance restraints and then
apply a simulated annealing protocul to generate/refine coordinates.
Starting coordinates may be random of from a previous (e.g. lower
resolution) stage.
"""
from numpy import random
from math import log, exp, atan, pi
random.seed(general_calc_params['random_seed'])
particle_size = np.int32(particle_size)
# Calculate distance restrains from contact data
restraint_dict, seq_pos_dict = dyn_util.calc_restraints(chromosomes, contact_dict, particle_size,
scale=1.0, exponent=general_calc_params['dist_power_law'],
lower=general_calc_params['contact_dist_lower'],
upper=general_calc_params['contact_dist_upper'])
# Concatenate chromosomal data into a single array of particle restraints
# for structure calculation. Add backbone restraints between seq. adjasent particles.
restraint_indices, restraint_dists = dyn_util.concatenate_restraints(restraint_dict, seq_pos_dict, particle_size,
general_calc_params['backbone_dist_lower'],
general_calc_params['backbone_dist_upper'])
# Setup starting structure
if (start_coords is None) or (prev_seq_pos_dict is None):
coords = get_random_coords(seq_pos_dict, chromosomes, num_models,
general_calc_params['random_radius'])
num_coords = coords.shape[1]
else:
# Convert starting coord dict into single array
coords = pack_chromo_coords(start_coords, chromosomes)
num_coords = sum([len(seq_pos_dict[c]) for c in chromosomes])
if coords.shape[1] != num_coords: # Change of particle_size
interp_coords = np.empty((num_models, num_coords, 3))
for m in range(num_models): # Starting coords interpolated from previous particle positions
interp_coords[m] = dyn_util.getInterpolatedCoords(chromosomes, seq_pos_dict, prev_seq_pos_dict, coords[m])
coords = interp_coords
# Equal unit masses and radii for all particles
masses = np.ones(num_coords, float)
radii = np.ones(num_coords, float)
# Ambiguiity strides not used here, so set to 1
num_restraints = len(restraint_indices)
ambiguity = np.ones(num_restraints, np.int32)
# Below will be set to restrict memory allocation in the repusion list
# (otherwise all vs all can be huge)
n_rep_max = np.int32(0)
# Annealing parameters
temp_start = anneal_params['temp_start']
temp_end = anneal_params['temp_end']
temp_steps = anneal_params['temp_steps']
# Setup annealig schedule: setup temps and repulsive terms
adj = 1.0 / atan(10.0)
decay = log(temp_start/temp_end)
anneal_schedule = []
for step in range(temp_steps):
frac = step/float(temp_steps)
# exponential temp decay
temp = temp_start * exp(-decay*frac)
# sigmoidal repusion scheme
repulse = 0.5 + adj * atan(frac*20.0-10) / pi
anneal_schedule.append((temp, repulse))
# Paricle dynamics parameters
# (these need not be fixed for all stages, but are for simplicity)
dyn_steps = anneal_params['dynamics_steps']
time_step = anneal_params['time_step']
# Update coordinates in the annealing schedule which is applied to each model in parallel
common_args = [anneal_schedule, masses, radii, restraint_indices, restraint_dists,
ambiguity, temp, time_step, dyn_steps, repulse, n_rep_max]
task_data = [(m, coords[m]) for m in range(len(coords))]
coords = parallel_split_job(anneal_model, task_data, common_args, num_cpu, collect_output=True)
coords = np.array(coords)
# Convert from single coord array to dict keyed by chromosome
coords_dict = unpack_chromo_coords(coords, chromosomes, seq_pos_dict)
return coords_dict, seq_pos_dict
def open_file(file_path, mode=None, gzip_exts=('.gz','.gzip')):
"""
GZIP agnostic file opening
"""
IO_BUFFER = int(4e6)
if os.path.splitext(file_path)[1].lower() in gzip_exts:
file_obj = gzip.open(file_path, mode or 'rt')
else:
file_obj = open(file_path, mode or 'rU', IO_BUFFER)
return file_obj
def load_n3d_coords(file_path):
"""
Load genome structure coordinates and particle sequence positions from an N3D format file.
Args:
file_path: str ; Location of N3D (text) format file
Returns:
dict {str:ndarray(n_coords, int)} ; {chromo: seq_pos_array}
dict {str:ndarray((n_models, n_coords, 3), float)} ; {chromo: coord_3d_array}
"""
seq_pos_dict = {}
coords_dict = {}
with open_file(file_path) as file_obj:
chromo = None
for line in file_obj:
data = line.split()
n_items = len(data)
if not n_items:
continue
elif data[0] == '#':
continue
elif n_items == 3:
chromo, n_coords, n_models = data
#if chromo.lower()[:3] == 'chr':
# chromo = chromo[3:]
n_coords = int(n_coords)
n_models = int(n_models)
#chromo_seq_pos = np.empty(n_coords, int)
chromo_seq_pos = np.empty(n_coords, 'int32')
chromo_coords = np.empty((n_models, n_coords, 3), float)
coords_dict[chromo] = chromo_coords
seq_pos_dict[chromo] = chromo_seq_pos
check = (n_models * 3) + 1
i = 0
elif not chromo:
raise Exception('Missing chromosome record in file %s' % file_path)
elif n_items != check:
msg = 'Data size in file %s does not match Position + Models * Positions * 3'
raise Exception(msg % file_path)
else:
chromo_seq_pos[i] = int(data[0])
coord = [float(x) for x in data[1:]]
coord = np.array(coord).reshape(n_models, 3)
chromo_coords[:,i] = coord
i += 1
return seq_pos_dict, coords_dict
def export_coords(out_format, out_file_path, coords_dict, particle_seq_pos, particle_size):
# Save final coords as N3D or PDB format file
if out_format == PDB:
if not out_file_path.endswith(PDB):
out_file_path = '%s.%s' % (out_file_path, PDB)
export_pdb_coords(out_file_path, coords_dict, particle_seq_pos, particle_size)
else:
if not out_file_path.endswith(N3D):
out_file_path = '%s.%s' % (out_file_path, N3D)
export_n3d_coords(out_file_path, coords_dict, particle_seq_pos)
print('Saved structure file to: %s' % out_file_path)
def calc_genome_structure(ncc_file_path, out_file_path, general_calc_params, anneal_params,
particle_sizes, num_models=5, isolation_threshold=2e6,
out_format=N3D, num_cpu=MAX_CORES,
start_coords_path=None, save_intermediate=False):
from time import time
# Load single-cell Hi-C data from NCC contact file, as output from NucProcess
chromosomes, chromo_limits, contact_dict = load_ncc_file(ncc_file_path)
# Only use contacts which are supported by others nearby in sequence, in the initial instance
remove_isolated_contacts(contact_dict, threshold=isolation_threshold)
# Initial coords will be random
start_coords = None
# Record particle positions from previous stages
# so that coordinates can be interpolated to higher resolution
prev_seq_pos = None
if start_coords_path:
prev_seq_pos, start_coords = load_n3d_coords(start_coords_path)
if start_coords:
chromo = next(iter(start_coords)) # picks out arbitrary chromosome
num_models = len(start_coords[chromo])
for stage, particle_size in enumerate(particle_sizes):
print("Running structure caculation stage %d (%d kb)" % (stage+1, (particle_size/1e3)))
# Can remove large violations (noise contacts inconsistent with structure)
# once we have a reasonable resolution structure
if stage > 0:
if particle_size < 0.5e6:
remove_violated_contacts(contact_dict, coords_dict, particle_seq_pos,
particle_size, threshold=6.0)
elif particle_size < 0.25e6:
remove_violated_contacts(contact_dict, coords_dict, particle_seq_pos,
particle_size, threshold=5.0)
coords_dict, particle_seq_pos = anneal_genome(chromosomes, contact_dict, num_models, particle_size,
general_calc_params, anneal_params,
prev_seq_pos, start_coords, num_cpu)
if save_intermediate and stage < len(particle_sizes)-1:
file_path = '%s_%d.%s' % (out_file_path[:-4], stage, out_file_path[-3:]) # DANGER: assumes that suffix is 3 chars
export_coords(out_format, file_path, coords_dict, particle_seq_pos, particle_size)
# Next stage based on previous stage's 3D coords
# and their respective seq. positions
start_coords = coords_dict
prev_seq_pos = particle_seq_pos
# Save final coords
export_coords(out_format, out_file_path, coords_dict, particle_seq_pos, particle_size)
def test_imports(gui=False):
import sys
from distutils.core import run_setup
critical = False
try:
import numpy
except ImportError as err:
critical = True
warn('Critical Python module "numpy" is not installed or accessible')
try:
import cython
except ImportError as err:
critical = True
warn('Critical Python module "cython" is not installed or accessible')
try:
import dyn_util
except ImportError as err:
import os
cwd = os.getcwd()
try:
os.chdir(os.path.dirname(os.path.normpath(__file__)))
warn('Utility C/Cython code not compiled. Attempting to compile now...')
run_setup('setup_cython.py', ['build_ext', '--inplace'])
finally:
os.chdir(cwd)
try:
import dyn_util
warn('NucDynamics C/Cython code compiled. Please re-run command.')
sys.exit(0)
except ImportError as err:
critical = True
warn('Utility C/Cython code compilation/import failed')
if critical:
warn('NucDynamics cannot proceed because critical Python modules are not available', 'ABORT')
sys.exit(0)
def demo_calc_genome_structure():
"""
Example of settings for a typical genome structure calculation from input single-cell
Hi-C contacts stored in an NCC format file (as output from NucProcess)
"""
from nuc_dynamics import calc_genome_structure
ncc_file_path = 'example_chromo_data/Cell_1_contacts.ncc'
save_path = 'example_chromo_data/Cell_1_structure.pdb'
# Number of alternative conformations to generate from repeat calculations
# with different random starting coordinates
num_models = 2
# Parameters to setup restraints and starting coords
general_calc_params = {'dist_power_law':-0.33,
'contact_dist_lower':0.8, 'contact_dist_upper':1.2,
'backbone_dist_lower':0.1, 'backbone_dist_upper':1.1,
'random_seed':int(time()), 'random_radius':10.0}
# Annealing & dyamics parameters: the same for all stages
# (this is cautious, but not an absolute requirement)
anneal_params = {'temp_start':5000.0, 'temp_end':10.0, 'temp_steps':500,
'dynamics_steps':100, 'time_step':0.001}
# Hierarchical scale protocol
particle_sizes = [8e6, 4e6, 2e6, 4e5, 2e5, 1e5]
# Contacts must be clustered with another within this separation threshold
# (at both ends) to be considered supported, i.e. not isolated
isolation_threshold=2e6
calc_genome_structure(ncc_file_path, save_path, general_calc_params, anneal_params,
particle_sizes, num_models, isolation_threshold, out_format='pdb')
test_imports()
import dyn_util
if __name__ == '__main__':
import os, sys
from time import time
from argparse import ArgumentParser
epilog = 'For further help on running this program please email tjs23@cam.ac.uk'
arg_parse = ArgumentParser(prog=PROG_NAME, description=DESCRIPTION,
epilog=epilog, prefix_chars='-', add_help=True)
arg_parse.add_argument('ncc_path', nargs=1, metavar='NCC_FILE',
help='Input NCC format file containing single-cell Hi-C contact data, e.g. use the demo data at example_chromo_data/Cell_1_contacts.ncc')
arg_parse.add_argument('-o', metavar='OUT_FILE',
help='Optional name of output file for 3D coordinates in N3D or PDB format (see -f option). If not set this will be auto-generated from the input file name')
arg_parse.add_argument('-save_intermediate', default=False, action='store_true',
help='Write out intermediate coordinate files.')
arg_parse.add_argument('-start_coords_path', metavar='N3D_FILE',
help='Initial 3D coordinates in N3D format. If set this will override -m flag.')
arg_parse.add_argument('-m', default=1, metavar='NUM_MODELS',
type=int, help='Number of alternative conformations to generate from repeat calculations with different random starting coordinates: Default: 1')
arg_parse.add_argument('-f', metavar='OUT_FORMAT', default=N3D,
help='File format for output 3D coordinate file. Default: "%s". Also available: "%s"' % (N3D, PDB))
arg_parse.add_argument('-s', nargs='+', default=[8.0,4.0,2.0,0.4,0.2,0.1], metavar='Mb_SIZE', type=float,
help='One or more sizes (Mb) for the hierarchical structure calculation protocol (will be used in descending order). Default: 8.0 4.0 2.0 0.4 0.2 0.1')
arg_parse.add_argument('-cpu', metavar='NUM_CPU', type=int, default=MAX_CORES,
help='Number of parallel CPU cores for calculating different coordinate models. Limited by the number of models (-m) but otherwise defaults to all available CPU cores (%d)' % MAX_CORES)
arg_parse.add_argument('-iso', default=2.0, metavar='Mb_SIZE', type=float,
help='Contacts must be near another, within this (Mb) separation threshold (at both ends) to be considered supported: Default 2.0')
arg_parse.add_argument('-pow', default=-0.33, metavar='FLOAT',
type=float, help='Distance power law for combining multiple Hi-C contacts between the same particles. Default: -0.33')
arg_parse.add_argument('-lower', default=0.8, metavar='DISTANCE',
type=float, help='Lower limit for a contact-derived distance restraint, as a fraction of the ideal distance. Default: 0.8')
arg_parse.add_argument('-upper', default=1.2, metavar='DISTANCE',
type=float, help='Upper limit for a contact-derived distance restraint, as a fraction of the ideal distance. Default: 1.2')
arg_parse.add_argument('-bb_lower', default=0.1, metavar='DISTANCE',
type=float, help='Lower limit for sequential particle backbone restraints, as a fraction of the ideal distance. Default: 0.1')
arg_parse.add_argument('-bb_upper', default=1.1, metavar='DISTANCE',
type=float, help='Upper limit for sequential particle backbone restraints, as a fraction of the ideal distance. Default: 1.1')
arg_parse.add_argument('-ran', metavar='INT',
type=int, help='Seed for psuedo-random number generator')
arg_parse.add_argument('-rad', default=10.0, metavar='DISTANCE',
type=float, help='Radius of sphere for random starting coordinates. Default: 10.0')
arg_parse.add_argument('-hot', default=5000.0, metavar='TEMP_KELVIN',
type=float, help='Start annealing temperature in pseudo-Kelvin units. Default: 5000')
arg_parse.add_argument('-cold', default=10.0, metavar='TEMP_KELVIN',
type=float, help='End annealing temperature in pseudo-Kelvin units. Default: 10')
arg_parse.add_argument('-temps', default=500, metavar='NUM_STEPS',
type=int, help='Number of temperature steps in annealing protocol between start and end temperature. Default: 500')
arg_parse.add_argument('-dyns', default=100, metavar='NUM_STEPS',
type=int, help='Number of particle dynamics steps to apply at each temperature in the annealing protocol. Default: 100')
arg_parse.add_argument('-time_step', default=0.001, metavar='TIME_DELTA',
type=float, help='Simulation time step between re-calculation of particle velocities. Default: 0.001')
args = vars(arg_parse.parse_args())
ncc_file_path = args['ncc_path'][0]
save_path = args['o']
if save_path is None:
save_path = os.path.splitext(ncc_file_path)[0]
particle_sizes = args['s']
particle_sizes = sorted([x * 1e6 for x in particle_sizes if x > 0], reverse=True)
if not particle_sizes:
critical('No positive particle sizes (Mb) specified')
num_models = args['m']
out_format = args['f'].lower()
num_cpu = args['cpu'] or 1
dist_power_law = args['pow']
contact_dist_lower = args['lower']
contact_dist_upper = args['upper']
backbone_dist_lower = args['bb_lower']
backbone_dist_upper = args['bb_upper']
random_radius = args['rad']
random_seed = args['ran']
temp_start = args['hot']
temp_end = args['cold']
temp_steps = args['temps']
dynamics_steps = args['dyns']
time_step = args['time_step']
isolation_threshold = args['iso']
save_intermediate = args['save_intermediate']
start_coords_path = args['start_coords_path']
if out_format not in FORMATS:
critical('Output file format must be one of: %s' % ', '.join(FORMATS))
for val, name, sign in ((num_models, 'Number of conformational models', '+'),
(dist_power_law, 'Distance power law', '-0'),
(contact_dist_lower, 'Contact distance lower bound', '+'),
(contact_dist_upper, 'Contact distance upper bound', '+'),
(backbone_dist_lower,'Backbone distance lower bound', '+'),
(backbone_dist_upper,'Backbone distance upper bound', '+'),
(random_radius, 'Random-start radius', '+'),
(temp_start, 'Annealing start temperature', '+'),
(temp_end, 'Annealing end temperature', '+0'),
(temp_steps, 'Number of annealing temperature steps', '+'),
(dynamics_steps, 'Number of particle dynamics steps', '+'),
(time_step, 'Particle dynamics time steps', '+'),
(isolation_threshold,'Contact isolation threshold', '+0'),
):
if '+' in sign:
if '0' in sign:
if val < 0.0:
critical('%s must be non-negative' % name)
else:
if val <= 0.0:
critical('%s must be positive' % name)
elif '-' in sign:
if '0' in sign:
if val > 0.0:
critical('%s must be non-positive' % name)
else:
if val >= 0.0:
critical('%s must be negative' % name)
contact_dist_lower, contact_dist_upper = sorted([contact_dist_lower, contact_dist_upper])
backbone_dist_lower, backbone_dist_upper = sorted([backbone_dist_lower, backbone_dist_upper])
temp_end, temp_start = sorted([temp_end, temp_start])
if not random_seed:
random_seed = int(time())
general_calc_params = {'dist_power_law':dist_power_law,
'contact_dist_lower':contact_dist_lower,
'contact_dist_upper':contact_dist_upper,
'backbone_dist_lower':backbone_dist_lower,
'backbone_dist_upper':backbone_dist_upper,
'random_seed':random_seed,
'random_radius':random_radius}
anneal_params = {'temp_start':temp_start, 'temp_end':temp_end, 'temp_steps':temp_steps,
'dynamics_steps':dynamics_steps, 'time_step':time_step}
isolation_threshold *= 1e6
calc_genome_structure(ncc_file_path, save_path, general_calc_params, anneal_params,
particle_sizes, num_models, isolation_threshold, out_format, num_cpu,
start_coords_path, save_intermediate)
# TO DO
# -----
# Allow chromosomes to be specified
# Allow starting structures to be input
|
#!/usr/bin/env python
__author__ = "Master Computer Vision. Team 02"
__license__ = "M6 Video Analysis. Task 1"
# Import libraries
import os
import math
import cv2
import numpy as np
from evaluate import evaluate_sample
# Path to save images and videos
images_path = "std-mean-images/"
video_path = "background-subtraction-videos/"
STATIC = 0
HARD_SHADOW = 50
OUTSIDE_REGION = 85
UNKNOW_MOTION = 170
MOTION = 255
def get_accumulator(path_test):
"""
Description: get accumulator structure data
Depends on image size to define borders
Data are coded into 32 bits of floats
Input: path test
Output: accumulator
"""
# Initialize accumualtor
accumulator = np.zeros((0,0), np.float32)
# Set accumulator depending on dataset choosen
if path_test == "./highway/input/":
accumulator = np.zeros((240,320,150), np.float32)
if path_test == "./fall/input/":
accumulator = np.zeros((480,720,50), np.float32)
if path_test == "./traffic/input/":
accumulator = np.zeros((240,320,50), np.float32)
return accumulator
def adaptive(path_test, first_frame, last_frame, mu_matrix, sigma_matrix, alpha, rho, path_gt):
"""
Description: background adapts
Input: path_test, first_frame, last_frame, mean_matrix, std_matrix, alpha, rho
Output: None
"""
# Initialize metrics accumulators
AccFP = 0
AccFN = 0
AccTP = 0
AccTN = 0
# Initialize index to accumulate images
index = 0
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_path+"adaptive_"+str(path_test.split("/")[1])+".avi", fourcc, 60, (get_accumulator(path_test).shape[1], get_accumulator(path_test).shape[0]))
# Read sequence of images sorted
for filename in sorted(os.listdir(path_test)):
# Check that frame is into range
frame_num = int(filename[2:8])
if frame_num >= first_frame and frame_num <= last_frame:
# Read image from groundtruth in grayscale
frame = cv2.imread(path_test+filename, 0)
# Compute pixels that belongs to background
background = abs(frame - mu_matrix) >= alpha*(sigma_matrix+2)
# Convert bool to int values
background = background.astype(int)
# Replace 1 by 255
background[background == 1] = 255
# Scales, calculates absolute values, and converts the result to 8-bit
background = cv2.convertScaleAbs(background)
# Get foreground pixels
foreground = cv2.bitwise_not(background)
# Write frame into video
video_frame = cv2.cvtColor(background, cv2.COLOR_GRAY2RGB)
out.write(video_frame)
# Read groundtruth image
gt = cv2.imread(path_gt + "gt" + filename[2:8] + ".png", 0)
background_f = background.flatten()
gt_f = gt.flatten()
index2remove = [index for index, gt_f in enumerate(gt_f)
if gt_f == OUTSIDE_REGION or gt_f == UNKNOW_MOTION]
gt_f = np.delete(gt_f, index2remove)
gt_f[gt_f == HARD_SHADOW] = 0
background_f = np.delete(background_f, index2remove)
# Evaluate results
TP, FP, TN, FN = evaluate_sample(background_f, gt_f)
# Accumulate metrics
AccTP = AccTP + TP
AccTN = AccTN + TN
AccFP = AccFP + FP
AccFN = AccFN + FN
# Apply background mask to frame image to retrieve only background grayscale pixels
background = cv2.bitwise_and(frame, frame, mask = background)
# Apply foreground mask to frame image to retrieve only foreground grayscale pixels
foreground = cv2.bitwise_and(frame, frame, mask = foreground)
# Compute mu matrix on all background pixels
mu_matrix = ((rho*background)+((1-rho)*mu_matrix));
# Add foreground pixels
mu_matrix = mu_matrix + foreground
# Scales, calculates absolute values, and converts the result to 8-bit
mu_matrix = cv2.convertScaleAbs(mu_matrix)
# Compute sigma matrix on all background pixels
sigma_matrix = (rho**pow((background-mu_matrix),2))+((1-rho)**pow(sigma_matrix,2))
# Add foreground pixels
sigma_matrix = sigma_matrix + foreground
# Scales, calculates absolute values, and converts the result to 8-bit
sigma_matrix = cv2.convertScaleAbs(sigma_matrix)
return AccFP, AccFN, AccTP, AccTN
|
"""Calculates total volume of all selected elements."""
import clr
clr.AddReference('RevitAPI')
import Autodesk
clr.AddReference('RevitAPIUI')
from Autodesk.Revit.UI import TaskDialog
clr.AddReference('RevitServices')
myDialog = TaskDialog("Volume Result:")
myDialog.MainInstruction = "Hello1"
myDialog.ExpandedContent = "Hello2"
myDialog.Show()
#print(unitTypes)
|
from django.core.management.base import BaseCommand
from django.conf import settings
from architect.manager.models import Manager
class Command(BaseCommand):
help = 'Synchronise Manager objects'
def handle(self, *args, **options):
for engine_name, engine in settings.MANAGER_ENGINES.items():
if Manager.objects.filter(name=engine_name).count() == 0:
engine_engine = engine.pop('engine')
manager = Manager(**{
'engine': engine_engine,
'name': engine_name,
'metadata': engine
})
manager.save()
self.stdout.write(
self.style.SUCCESS(
'Manager "{}" resource created'.format(engine_name)))
else:
manager = Manager.objects.get(name=engine_name)
manager.metadata = engine
manager.save()
self.stdout.write(
self.style.SUCCESS(
'Manager "{}" resource '
'updated'.format(engine_name)))
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import typing
import time
import logging
import numpy as np
import pandas as pd
from d3m import container, utils as d3m_utils
from d3m.base import utils as base_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
from distil.utils import CYTHON_DEP
from common_primitives import utils
from common_primitives import utils
import version
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
parsing_semantics = hyperparams.Set(
elements=hyperparams.Enumeration(
values=[
"http://schema.org/Boolean",
"http://schema.org/Integer",
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/FloatVector",
"http://schema.org/DateTime",
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
],
default="http://schema.org/Float",
),
default=(
"http://schema.org/Boolean",
"http://schema.org/Integer",
"http://schema.org/Float",
),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of semantic types to parse. One can provide a subset of supported semantic types to limit what the primitive parses.",
)
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description='A set of column indices to not operate on. Applicable only if "use_columns" is not provided.',
)
error_handling = hyperparams.Enumeration[str](
default="coerce",
values=("ignore", "raise", "coerce"),
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Setting to deal with error when converting a column to numeric value.",
)
fuzzy_time_parsing = hyperparams.UniformBool(
default=True,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Use fuzzy time parsing.",
)
class ColumnParserPrimitive(
transformer.TransformerPrimitiveBase[
container.DataFrame, container.DataFrame, Hyperparams
]
):
"""
A primitive which parses columns and sets the appropriate dtypes according to it's respective metadata.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "e8e78214-9770-4c26-9eae-a45bd0ede91a",
"version": version.__version__,
"name": "Column Parser",
"python_path": "d3m.primitives.data_transformation.column_parser.DistilColumnParser",
"source": {
"name": "Distil",
"contact": "mailto:vkorapaty@uncharted.software",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/column_parser.py",
"https://gitlab.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=d3m_utils.current_git_commit(
os.path.dirname(__file__)
),
),
},
],
"algorithm_types": [metadata_base.PrimitiveAlgorithmType.DATA_CONVERSION],
"primitive_family": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
}
)
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None,
) -> base.CallResult[container.DataFrame]:
start = time.time()
logger.debug(f"Producing {__name__}")
cols = self._get_columns(inputs.metadata)
# outputs = container.DataFrame(generate_metadata=False)
outputs = [None] * inputs.shape[1]
parsing_semantics = self.hyperparams["parsing_semantics"]
def fromstring(x: str) -> np.ndarray:
# if column isn't a string, we'll just pass it through assuming it doesn't need to be parsed
if type(x) is not str:
return x
return np.fromstring(x, dtype=float, sep=",")
for col_index in range(len(inputs.columns)):
if col_index in cols:
column_metadata = inputs.metadata.query(
(metadata_base.ALL_ELEMENTS, col_index)
)
semantic_types = column_metadata.get("semantic_types", [])
desired_semantics = set(semantic_types).intersection(parsing_semantics)
if desired_semantics:
if (
"https://metadata.datadrivendiscovery.org/types/FloatVector"
in desired_semantics
):
outputs[col_index] = inputs.iloc[:, col_index].apply(
fromstring, convert_dtype=False
)
if outputs[col_index].shape[0] > 0:
inputs.metadata = inputs.metadata.update_column(
col_index,
{"structural_type": type(outputs[col_index][0])},
)
elif "http://schema.org/DateTime" in desired_semantics:
outputs[col_index] = inputs.iloc[:, col_index].apply(
utils.parse_datetime_to_float,
fuzzy=self.hyperparams["fuzzy_time_parsing"],
convert_dtype=False,
)
inputs.metadata = inputs.metadata.update_column(
col_index, {"structural_type": float}
)
elif (
"https://metadata.datadrivendiscovery.org/types/CategoricalData"
in desired_semantics
):
# need to make sure if a categorical type is a numeric string, convert it
if inputs[inputs.columns[col_index]][0].isnumeric():
outputs[col_index] = pd.to_numeric(
inputs.iloc[:, col_index],
errors=self.hyperparams["error_handling"],
)
if outputs[col_index].shape[0] > 0:
updated_type = type(outputs[col_index][0].item())
inputs.metadata = inputs.metadata.update_column(
col_index, {"structural_type": updated_type}
)
else:
# if it's categorical but not numerical, ensure the string stays
outputs[col_index] = inputs.iloc[:, col_index]
else:
outputs[col_index] = pd.to_numeric(
inputs.iloc[:, col_index],
errors=self.hyperparams["error_handling"],
)
# Update structural type to reflect the results of the to_numeric call. We can't rely on the semantic type because
# error coersion may result in a type becoming a float due to the presence of NaN.
if outputs[col_index].shape[0] > 0:
updated_type = type(outputs[col_index][0].item())
inputs.metadata = inputs.metadata.update_column(
col_index, {"structural_type": updated_type}
)
else:
# columns without specified semantics need to be concatenated
outputs[col_index] = inputs.iloc[:, col_index]
else:
# columns not specified still need to be concatenated
outputs[col_index] = inputs.iloc[:, col_index]
outputs = container.DataFrame(pd.concat(outputs, axis=1))
outputs.metadata = inputs.metadata
end = time.time()
logger.debug(f"Produce {__name__} completed in {end - start} ms")
return base.CallResult(outputs)
def _get_columns(
self, inputs_metadata: metadata_base.DataMetadata
) -> typing.List[int]:
def can_use_column(column_index: int) -> bool:
return True
columns_to_use, columns_not_to_use = base_utils.get_columns_to_use(
inputs_metadata,
self.hyperparams["use_columns"],
self.hyperparams["exclude_columns"],
can_use_column,
)
if self.hyperparams["use_columns"] and columns_not_to_use:
self.logger.warning(
"Not all specified columns can parsed. Skipping columns: %(columns)s",
{
"columns": columns_not_to_use,
},
)
return columns_to_use
|
from django.conf.urls import include, url
import views
urlpatterns = [
url(r'^handleRequest$',views.handleRequest),
]
|
# Generated by Django 2.0.2 on 2018-09-07 21:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inquiry', '0002_auto_20180719_1338'),
]
operations = [
migrations.AddField(
model_name='inquiry',
name='when_interested',
field=models.CharField(choices=[('asap', 'ASAP'), ('3_to_6_months', '3-6 Months'), ('7_to_12_months', '7-12 Months'), ('not_sure', 'Not sure yet - still learning')], default='not_sure', max_length=20),
),
]
|
from django.conf.urls import include, url
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.urls import reverse_lazy
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView, RedirectView
from decorator_include import decorator_include
from gim.front.views import HomeView, RedirectToIssueFromPK
class PrefixWithGithub(RedirectView):
def get_redirect_url(self, *args, **kwargs):
url = self.request.get_full_path()
if not url.startswith('/github/'):
return '/github' + url
raise Http404
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^auth/', include('gim.front.auth.urls', namespace='auth')),
url(r'^github-notifications/', decorator_include(login_required, u'gim.front.github_notifications.urls', namespace='github-notifications')),
url(r'^dashboard/', decorator_include(login_required, u'gim.front.dashboard.urls', namespace='dashboard')),
url(r'^issue/(?P<issue_pk>\d+)/$', login_required(RedirectToIssueFromPK.as_view()), name=RedirectToIssueFromPK.url_name),
url(r'^github/$', RedirectView.as_view(url=reverse_lazy('front:dashboard:home'))),
url(r'^github/(?P<owner_username>[^/]+)/(?P<repository_name>[^/]+)/', decorator_include(login_required, u'gim.front.repository.urls', namespace='repository')),
url(r'^(?P<owner_username>[^/]+)/(?P<repository_name>[^/]+)/', PrefixWithGithub.as_view()),
]
|
class LinkedList:
def __init__(self, nodes=None):
self.head = None
if nodes is not None:
node = Node(data=nodes.pop(0))
self.head = node
for elem in nodes:
node.next = Node(data=elem)
node = node.next
def __repr__(self):
node = self.head
nodes = []
while node is not None:
nodes.append(node.data)
node = node.next
nodes.append("None")
return str(nodes)
class Node:
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return self.data
def printLinkedList(a) -> LinkedList:
node = a
result = f'{node.data}'
node = node.next
while node:
result += f' -> {str(node.data)}'
node = node.next
print(result)
# find the middle node
def middle(head_element):
slow = fast = head_element
while fast.next is not None and fast.next.next is not None:
slow = slow.next
fast = fast.next.next
return slow
# reverse the second half from the list
# 1->2->3->4->5->6 to 1->2->3->6->5->4
def reverse(head_element):
if head_element is None or head_element.next is None:
return head_element
prev = None
while head_element is not None:
next = head_element.next # step1: record the next
head_element.next = prev # step2: reverse the link
prev = head_element # step3: shift positions
head_element = next
return prev
# merge two 'sorted' linked lists
# 1->2->3 6->5->4 to 1->6->2->5->3->4
def merge(one, two):
dummy = Node(0)
cur = dummy
while one is not None and two is not None:
cur.next = one
one = one.next
cur.next.next = two
two = two.next
cur = cur.next.next
if one is not None:
cur.next = one
else:
cur.next = two
return dummy.next
def reorder_list(head_element):
if head_element is None or head_element.next is None:
return head_element
mid = middle(head_element)
one = head_element
two = mid.next
mid.next = None
temp = reverse(two)
return merge(one, temp)
head = Node(1)
head.next = Node(2)
head.next.next = Node(4)
head.next.next.next = Node(6)
head.next.next.next.next = Node(8)
head.next.next.next.next.next = Node(10)
printLinkedList(reorder_list(head))
|
"""DeviceGroupRecords class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.device_services.devicerecords import DeviceRecords
from fmcapi.api_objects.device_ha_pair_services.ftddevicehapairs import FTDDeviceHAPairs
import logging
import warnings
class DeviceGroupRecords(APIClassTemplate):
"""The DeviceGroupRecords Object in the FMC."""
VALID_JSON_DATA = ["id", "name", "members"]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/devicegroups/devicegrouprecords"
def __init__(self, fmc, **kwargs):
"""
Initialize DeviceGroupRecords object.
Set self.type to "DeviceGroup" and parse the kwargs.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for DeviceGroupRecords class.")
self.type = "DeviceGroup"
self.parse_kwargs(**kwargs)
def devices(self, action, members=[]):
"""
Add/modify name to members field of DeviceGroupRecords object.
:param action: (str) 'add', 'remove', or 'clear'
:param membres: (list) List of members in group.
:return: None
"""
logging.debug("In devices() for DeviceGroupRecords class.")
if action == "add":
for member in members:
if member["type"] == "device":
dev1 = DeviceRecords(fmc=self.fmc)
dev1.get(name=member["name"])
elif member["type"] == "deviceHAPair":
dev1 = FTDDeviceHAPairs(fmc=self.fmc)
dev1.get(name=member["name"])
if "id" in dev1.__dict__:
if "members" in self.__dict__:
self.members.append(
{"id": dev1.id, "type": dev1.type, "name": dev1.name}
)
else:
self.members = [
{"id": dev1.id, "type": dev1.type, "name": dev1.name}
]
logging.info(
f'DeviceRecord "{dev1.name}" added to this DeviceGroupRecords object.'
)
else:
logging.warning(
f"{member} not found. Cannot add DeviceRecord to DeviceGroupRecords."
)
elif action == "remove":
if "members" in self.__dict__:
for member in members:
if member["type"] == "device":
dev1 = DeviceRecords(fmc=self.fmc)
dev1.get(name=member["name"])
elif member["type"] == "deviceHAPair":
dev1 = FTDDeviceHAPairs(fmc=self.fmc)
dev1.get(name=member["name"])
if "id" in dev1.__dict__:
if member["type"] == "device":
self.members = list(
filter(lambda i: i["id"] != dev1.id, self.members)
)
elif member["type"] == "deviceHAPair":
devHA1 = FTDDeviceHAPairs(fmc=self.fmc)
devHA1.get(name=member["name"])
self.members = list(
filter(
lambda i: i["id"] != devHA1.primary["id"],
self.members,
)
)
self.members = list(
filter(
lambda i: i["id"] != devHA1.secondary["id"],
self.members,
)
)
else:
logging.warning(
f"DeviceRecord {member} not registered. Cannot remove DeviceRecord"
f" from DeviceGroupRecords."
)
else:
logging.warning(
"DeviceGroupRecords has no members. Cannot remove DeviceRecord."
)
elif action == "clear":
if "members" in self.__dict__:
del self.members
logging.info(
"All device records removed from this DeviceGroupRecords object."
)
class DeviceGroups(DeviceGroupRecords):
"""
Dispose of this Class after 20210101.
Use DeviceGroupRecords() instead.
"""
def __init__(self, fmc, **kwargs):
warnings.resetwarnings()
warnings.warn(
"Deprecated: DeviceGroups() should be called via DeviceGroupRecords()."
)
super().__init__(fmc, **kwargs)
|
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the QutipEmulator, used to simulate a Sequence or its samples."""
from __future__ import annotations
import itertools
import warnings
from collections import Counter, defaultdict
from collections.abc import Mapping
from dataclasses import asdict, replace
from typing import Any, Optional, Union, cast
import matplotlib.pyplot as plt
import numpy as np
import qutip
from numpy.typing import ArrayLike
import pulser.sampler as sampler
from pulser import Sequence
from pulser.devices._device_datacls import BaseDevice
from pulser.register.base_register import BaseRegister, QubitId
from pulser.result import SampledResult
from pulser.sampler.samples import SequenceSamples, _PulseTargetSlot
from pulser.sequence._seq_drawer import draw_samples, draw_sequence
from pulser_simulation.qutip_result import QutipResult
from pulser_simulation.simconfig import SimConfig
from pulser_simulation.simresults import (
CoherentResults,
NoisyResults,
SimulationResults,
)
class QutipEmulator:
r"""Emulator of a pulse sequence using QuTiP.
Args:
sampled_seq: A pulse sequence samples used in the emulation.
register: The register associating coordinates to the qubits targeted
by the pulses within the samples.
device: The device specifications used in the emulation. Register and
samples have to satisfy its constraints.
sampling_rate: The fraction of samples that we wish to extract from
the samples to simulate. Has to be a value between 0.05 and 1.0.
config: Configuration to be used for this simulation.
evaluation_times: Choose between:
- "Full": The times are set to be the ones used to define the
Hamiltonian to the solver.
- "Minimal": The times are set to only include initial and final
times.
- An ArrayLike object of times in µs if you wish to only include
those specific times.
- A float to act as a sampling rate for the resulting state.
"""
def __init__(
self,
sampled_seq: SequenceSamples,
register: BaseRegister,
device: BaseDevice,
sampling_rate: float = 1.0,
config: Optional[SimConfig] = None,
evaluation_times: Union[float, str, ArrayLike] = "Full",
) -> None:
"""Instantiates a Simulation object."""
# Initializing the samples obj
if not isinstance(sampled_seq, SequenceSamples):
raise TypeError(
"The provided sequence has to be a valid "
"SequenceSamples instance."
)
if sampled_seq.max_duration == 0:
raise ValueError("SequenceSamples is empty.")
# Check compatibility of register and device
self._device = device
self._device.validate_register(register)
self._register = register
# Check compatibility of samples and device:
if (
sampled_seq._slm_mask.end > 0
and not self._device.supports_slm_mask
):
raise ValueError(
"Samples use SLM mask but device does not have one."
)
if not sampled_seq.used_bases <= device.supported_bases:
raise ValueError(
"Bases used in samples should be supported by device."
)
# Check compatibility of masked samples and register
if not sampled_seq._slm_mask.targets <= set(register.qubit_ids):
raise ValueError(
"The ids of qubits targeted in SLM mask"
" should be defined in register."
)
samples_list = []
for ch, ch_samples in sampled_seq.channel_samples.items():
if sampled_seq._ch_objs[ch].addressing == "Local":
# Check that targets of Local Channels are defined
# in register
if not set().union(
*(slot.targets for slot in ch_samples.slots)
) <= set(register.qubit_ids):
raise ValueError(
"The ids of qubits targeted in Local channels"
" should be defined in register."
)
samples_list.append(ch_samples)
else:
# Replace targets of Global channels by qubits of register
samples_list.append(
replace(
ch_samples,
slots=[
replace(slot, targets=set(register.qubit_ids))
for slot in ch_samples.slots
],
)
)
_sampled_seq = replace(sampled_seq, samples_list=samples_list)
self._interaction = "XY" if _sampled_seq._in_xy else "ising"
self._tot_duration = _sampled_seq.max_duration
self.samples_obj = _sampled_seq.extend_duration(self._tot_duration + 1)
# Initializing qubit infos
self._qdict = self._register.qubits
self._size = len(self._qdict)
self._qid_index = {qid: i for i, qid in enumerate(self._qdict)}
# Type hints for attributes defined outside of __init__
self.basis_name: str
self._config: SimConfig
self.op_matrix: dict[str, qutip.Qobj]
self.basis: dict[str, qutip.Qobj]
self.dim: int
self._eval_times_array: np.ndarray
self._bad_atoms: dict[Union[str, int], bool] = {}
self._doppler_detune: dict[Union[str, int], float] = {}
# Initializing sampling and evalutaion times
if not (0 < sampling_rate <= 1.0):
raise ValueError(
"The sampling rate (`sampling_rate` = "
f"{sampling_rate}) must be greater than 0 and "
"less than or equal to 1."
)
if int(self._tot_duration * sampling_rate) < 4:
raise ValueError(
"`sampling_rate` is too small, less than 4 data points."
)
self._sampling_rate = sampling_rate
self.sampling_times = self._adapt_to_sampling_rate(
# Include extra time step for final instruction from samples:
np.arange(self._tot_duration + 1, dtype=np.double)
/ 1000
)
self.set_evaluation_times(evaluation_times)
# Stores the qutip operators used in building the Hamiltonian
self.operators: dict[str, defaultdict[str, dict]] = {
addr: defaultdict(dict) for addr in ["Global", "Local"]
}
self._collapse_ops: list[qutip.Qobj] = []
# Sets the config as well as builds the hamiltonian
self.set_config(config) if config else self.set_config(SimConfig())
if self.samples_obj._measurement:
self._meas_basis = self.samples_obj._measurement
else:
if self.basis_name in {"digital", "all"}:
self._meas_basis = "digital"
else:
self._meas_basis = self.basis_name
self.set_initial_state("all-ground")
@property
def config(self) -> SimConfig:
"""The current configuration, as a SimConfig instance."""
return self._config
def set_config(self, cfg: SimConfig) -> None:
"""Sets current config to cfg and updates simulation parameters.
Args:
cfg: New configuration.
"""
if not isinstance(cfg, SimConfig):
raise ValueError(f"Object {cfg} is not a valid `SimConfig`.")
not_supported = (
set(cfg.noise) - cfg.supported_noises[self._interaction]
)
if not_supported:
raise NotImplementedError(
f"Interaction mode '{self._interaction}' does not support "
f"simulation of noise types: {', '.join(not_supported)}."
)
prev_config = self.config if hasattr(self, "_config") else SimConfig()
self._config = cfg
if not ("SPAM" in self.config.noise and self.config.eta > 0):
self._bad_atoms = {qid: False for qid in self._qid_index}
if "doppler" not in self.config.noise:
self._doppler_detune = {qid: 0.0 for qid in self._qid_index}
# Noise, samples and Hamiltonian update routine
self._construct_hamiltonian()
kraus_ops = []
self._collapse_ops = []
if "dephasing" in self.config.noise:
if self.basis_name == "digital" or self.basis_name == "all":
# Go back to previous config
self.set_config(prev_config)
raise NotImplementedError(
"Cannot include dephasing noise in digital- or all-basis."
)
# Probability of phase (Z) flip:
# First order in prob
prob = self.config.dephasing_prob / 2
n = self._size
if prob > 0.1 and n > 1:
warnings.warn(
"The dephasing model is a first-order approximation in the"
f" dephasing probability. p = {2*prob} is too large for "
"realistic results.",
stacklevel=2,
)
k = np.sqrt(prob * (1 - prob) ** (n - 1))
self._collapse_ops += [
np.sqrt((1 - prob) ** n)
* qutip.tensor([self.op_matrix["I"] for _ in range(n)])
]
kraus_ops.append(k * qutip.sigmaz())
if "depolarizing" in self.config.noise:
if self.basis_name == "digital" or self.basis_name == "all":
# Go back to previous config
self.set_config(prev_config)
raise NotImplementedError(
"Cannot include depolarizing "
+ "noise in digital- or all-basis."
)
# Probability of error occurrence
prob = self.config.depolarizing_prob / 4
n = self._size
if prob > 0.1 and n > 1:
warnings.warn(
"The depolarizing model is a first-order approximation"
f" in the depolarizing probability. p = {4*prob}"
" is too large for realistic results.",
stacklevel=2,
)
k = np.sqrt((prob) * (1 - 3 * prob) ** (n - 1))
self._collapse_ops += [
np.sqrt((1 - 3 * prob) ** n)
* qutip.tensor([self.op_matrix["I"] for _ in range(n)])
]
kraus_ops.append(k * qutip.sigmax())
kraus_ops.append(k * qutip.sigmay())
kraus_ops.append(k * qutip.sigmaz())
if "eff_noise" in self.config.noise:
if self.basis_name == "digital" or self.basis_name == "all":
# Go back to previous config
self.set_config(prev_config)
raise NotImplementedError(
"Cannot include general "
+ "noise in digital- or all-basis."
)
# Probability distribution of error occurences
n = self._size
m = len(self.config.eff_noise_opers)
if n > 1:
for i in range(1, m):
prob_i = self.config.eff_noise_probs[i]
if prob_i > 0.1:
warnings.warn(
"The effective noise model is a first-order"
" approximation in the noise probability."
f"p={prob_i} is large for realistic results.",
stacklevel=2,
)
break
# Deriving Kraus operators
prob_id = self.config.eff_noise_probs[0]
self._collapse_ops += [
np.sqrt(prob_id**n)
* qutip.tensor([self.op_matrix["I"] for _ in range(n)])
]
for i in range(1, m):
k = np.sqrt(
self.config.eff_noise_probs[i] * prob_id ** (n - 1)
)
k_op = k * self.config.eff_noise_opers[i]
kraus_ops.append(k_op)
# Building collapse operators
for operator in kraus_ops:
self._collapse_ops += [
self.build_operator([(operator, [qid])])
for qid in self._qid_index
]
def add_config(self, config: SimConfig) -> None:
"""Updates the current configuration with parameters of another one.
Mostly useful when dealing with multiple noise types in different
configurations and wanting to merge these configurations together.
Adds simulation parameters to noises that weren't available in the
former SimConfig. Noises specified in both SimConfigs will keep
former noise parameters.
Args:
config: SimConfig to retrieve parameters from.
"""
if not isinstance(config, SimConfig):
raise ValueError(f"Object {config} is not a valid `SimConfig`")
not_supported = (
set(config.noise) - config.supported_noises[self._interaction]
)
if not_supported:
raise NotImplementedError(
f"Interaction mode '{self._interaction}' does not support "
f"simulation of noise types: {', '.join(not_supported)}."
)
old_noise_set = set(self.config.noise)
new_noise_set = old_noise_set.union(config.noise)
diff_noise_set = new_noise_set - old_noise_set
# Create temporary param_dict to add noise parameters:
param_dict: dict[str, Any] = asdict(self._config)
# Begin populating with added noise parameters:
param_dict["noise"] = tuple(new_noise_set)
if "SPAM" in diff_noise_set:
param_dict["eta"] = config.eta
param_dict["epsilon"] = config.epsilon
param_dict["epsilon_prime"] = config.epsilon_prime
if "doppler" in diff_noise_set:
param_dict["temperature"] = config.temperature
if "amplitude" in diff_noise_set:
param_dict["laser_waist"] = config.laser_waist
if "dephasing" in diff_noise_set:
param_dict["dephasing_prob"] = config.dephasing_prob
if "depolarizing" in diff_noise_set:
param_dict["depolarizing_prob"] = config.depolarizing_prob
if "eff_noise" in diff_noise_set:
param_dict["eff_noise_opers"] = config.eff_noise_opers
param_dict["eff_noise_probs"] = config.eff_noise_probs
param_dict["temperature"] *= 1.0e6
# update runs:
param_dict["runs"] = config.runs
param_dict["samples_per_run"] = config.samples_per_run
# set config with the new parameters:
self.set_config(SimConfig(**param_dict))
def show_config(self, solver_options: bool = False) -> None:
"""Shows current configuration."""
print(self._config.__str__(solver_options))
def reset_config(self) -> None:
"""Resets configuration to default."""
self.set_config(SimConfig())
@property
def initial_state(self) -> qutip.Qobj:
"""The initial state of the simulation.
Args:
state: The initial state.
Choose between:
- "all-ground" for all atoms in ground state
- An ArrayLike with a shape compatible with the system
- A Qobj object
"""
return self._initial_state
def set_initial_state(
self, state: Union[str, np.ndarray, qutip.Qobj]
) -> None:
"""Sets the initial state of the simulation."""
self._initial_state: qutip.Qobj
if isinstance(state, str) and state == "all-ground":
self._initial_state = qutip.tensor(
[
self.basis["d" if self._interaction == "XY" else "g"]
for _ in range(self._size)
]
)
else:
state = cast(Union[np.ndarray, qutip.Qobj], state)
shape = state.shape[0]
legal_shape = self.dim**self._size
legal_dims = [[self.dim] * self._size, [1] * self._size]
if shape != legal_shape:
raise ValueError(
"Incompatible shape of initial state."
+ f"Expected {legal_shape}, got {shape}."
)
self._initial_state = qutip.Qobj(state, dims=legal_dims)
@property
def evaluation_times(self) -> np.ndarray:
"""The times at which the results of this simulation are returned.
Args:
value: Choose between:
- "Full": The times are set to be the ones used to define the
Hamiltonian to the solver.
- "Minimal": The times are set to only include initial and
final times.
- An ArrayLike object of times in µs if you wish to only
include those specific times.
- A float to act as a sampling rate for the resulting state.
"""
return np.array(self._eval_times_array)
def set_evaluation_times(
self, value: Union[str, ArrayLike, float]
) -> None:
"""Sets times at which the results of this simulation are returned."""
if isinstance(value, str):
if value == "Full":
eval_times = np.copy(self.sampling_times)
elif value == "Minimal":
eval_times = np.array([])
else:
raise ValueError(
"Wrong evaluation time label. It should "
"be `Full`, `Minimal`, an array of times or"
+ " a float between 0 and 1."
)
elif isinstance(value, float):
if value > 1 or value <= 0:
raise ValueError(
"evaluation_times float must be between 0 and 1."
)
indices = np.linspace(
0,
len(self.sampling_times) - 1,
int(value * len(self.sampling_times)),
dtype=int,
)
# Note: if `value` is very small `eval_times` is an empty list:
eval_times = self.sampling_times[indices]
elif isinstance(value, (list, tuple, np.ndarray)):
if np.max(value, initial=0) > self._tot_duration / 1000:
raise ValueError(
"Provided evaluation-time list extends "
"further than sequence duration."
)
if np.min(value, initial=0) < 0:
raise ValueError(
"Provided evaluation-time list contains "
"negative values."
)
eval_times = np.array(value)
else:
raise ValueError(
"Wrong evaluation time label. It should "
"be `Full`, `Minimal`, an array of times or a "
+ "float between 0 and 1."
)
# Ensure 0 and final time are included:
self._eval_times_array = np.union1d(
eval_times, [0.0, self._tot_duration / 1000]
)
self._eval_times_instruction = value
def _extract_samples(self) -> None:
"""Populates samples dictionary with every pulse in the sequence."""
local_noises = True
if set(self.config.noise).issubset(
{"dephasing", "SPAM", "depolarizing", "eff_noise"}
):
local_noises = "SPAM" in self.config.noise and self.config.eta > 0
samples = self.samples_obj.to_nested_dict(all_local=local_noises)
def add_noise(
slot: _PulseTargetSlot,
samples_dict: Mapping[QubitId, dict[str, np.ndarray]],
is_global_pulse: bool,
) -> None:
"""Builds hamiltonian coefficients.
Taking into account, if necessary, noise effects, which are local
and depend on the qubit's id qid.
"""
noise_amp_base = max(
0, np.random.normal(1.0, self.config.amp_sigma)
)
for qid in slot.targets:
if "doppler" in self.config.noise:
noise_det = self._doppler_detune[qid]
samples_dict[qid]["det"][slot.ti : slot.tf] += noise_det
# Gaussian beam loss in amplitude for global pulses only
# Noise is drawn at random for each pulse
if "amplitude" in self.config.noise and is_global_pulse:
position = self._qdict[qid]
r = np.linalg.norm(position)
w0 = self.config.laser_waist
noise_amp = noise_amp_base * np.exp(-((r / w0) ** 2))
samples_dict[qid]["amp"][slot.ti : slot.tf] *= noise_amp
if local_noises:
for ch, ch_samples in self.samples_obj.channel_samples.items():
addr = self.samples_obj._ch_objs[ch].addressing
basis = self.samples_obj._ch_objs[ch].basis
samples_dict = samples["Local"][basis]
for slot in ch_samples.slots:
add_noise(slot, samples_dict, addr == "Global")
# Delete samples for badly prepared atoms
for basis in samples["Local"]:
for qid in samples["Local"][basis]:
if self._bad_atoms[qid]:
for qty in ("amp", "det", "phase"):
samples["Local"][basis][qid][qty] = 0.0
self.samples = samples
def build_operator(self, operations: Union[list, tuple]) -> qutip.Qobj:
"""Creates an operator with non-trivial actions on some qubits.
Takes as argument a list of tuples ``[(operator_1, qubits_1),
(operator_2, qubits_2)...]``. Returns the operator given by the tensor
product of {``operator_i`` applied on ``qubits_i``} and Id on the rest.
``(operator, 'global')`` returns the sum for all ``j`` of operator
applied at ``qubit_j`` and identity elsewhere.
Example for 4 qubits: ``[(Z, [1, 2]), (Y, [3])]`` returns `ZZYI`
and ``[(X, 'global')]`` returns `XIII + IXII + IIXI + IIIX`
Args:
operations: List of tuples `(operator, qubits)`.
`operator` can be a ``qutip.Quobj`` or a string key for
``self.op_matrix``. `qubits` is the list on which operator
will be applied. The qubits can be passed as their
index or their label in the register.
Returns:
The final operator.
"""
op_list = [self.op_matrix["I"] for j in range(self._size)]
if not isinstance(operations, list):
operations = [operations]
for operator, qubits in operations:
if qubits == "global":
return sum(
self.build_operator([(operator, [q_id])])
for q_id in self._qdict
)
else:
qubits_set = set(qubits)
if len(qubits_set) < len(qubits):
raise ValueError("Duplicate atom ids in argument list.")
if not qubits_set.issubset(self._qdict.keys()):
raise ValueError(
"Invalid qubit names: "
f"{qubits_set - self._qdict.keys()}"
)
if isinstance(operator, str):
try:
operator = self.op_matrix[operator]
except KeyError:
raise ValueError(f"{operator} is not a valid operator")
for qubit in qubits:
k = self._qid_index[qubit]
op_list[k] = operator
return qutip.tensor(op_list)
def _adapt_to_sampling_rate(self, full_array: np.ndarray) -> np.ndarray:
"""Adapt list to correspond to sampling rate."""
indices = np.linspace(
0,
len(full_array) - 1,
int(self._sampling_rate * (self._tot_duration + 1)),
dtype=int,
)
return cast(np.ndarray, full_array[indices])
def _update_noise(self) -> None:
"""Updates noise random parameters.
Used at the start of each run. If SPAM isn't in chosen noises, all
atoms are set to be correctly prepared.
"""
if "SPAM" in self.config.noise and self.config.eta > 0:
dist = (
np.random.uniform(size=len(self._qid_index))
< self.config.spam_dict["eta"]
)
self._bad_atoms = dict(zip(self._qid_index, dist))
if "doppler" in self.config.noise:
detune = np.random.normal(
0, self.config.doppler_sigma, size=len(self._qid_index)
)
self._doppler_detune = dict(zip(self._qid_index, detune))
def _build_basis_and_op_matrices(self) -> None:
"""Determine dimension, basis and projector operators."""
if self._interaction == "XY":
self.basis_name = "XY"
self.dim = 2
basis = ["u", "d"]
projectors = ["uu", "du", "ud", "dd"]
else:
if "digital" not in self.samples_obj.used_bases:
self.basis_name = "ground-rydberg"
self.dim = 2
basis = ["r", "g"]
projectors = ["gr", "rr", "gg"]
elif "ground-rydberg" not in self.samples_obj.used_bases:
self.basis_name = "digital"
self.dim = 2
basis = ["g", "h"]
projectors = ["hg", "hh", "gg"]
else:
self.basis_name = "all" # All three states
self.dim = 3
basis = ["r", "g", "h"]
projectors = ["gr", "hg", "rr", "gg", "hh"]
self.basis = {b: qutip.basis(self.dim, i) for i, b in enumerate(basis)}
self.op_matrix = {"I": qutip.qeye(self.dim)}
for proj in projectors:
self.op_matrix["sigma_" + proj] = (
self.basis[proj[0]] * self.basis[proj[1]].dag()
)
def _construct_hamiltonian(self, update: bool = True) -> None:
"""Constructs the hamiltonian from the Sequence.
Also builds qutip.Qobjs related to the Sequence if not built already,
and refreshes potential noise parameters by drawing new at random.
Args:
update: Whether to update the noise parameters.
"""
if update:
self._update_noise()
self._extract_samples()
if not hasattr(self, "basis_name"):
self._build_basis_and_op_matrices()
def make_vdw_term(q1: QubitId, q2: QubitId) -> qutip.Qobj:
"""Construct the Van der Waals interaction Term.
For each pair of qubits, calculate the distance between them,
then assign the local operator "sigma_rr" at each pair.
The units are given so that the coefficient includes a
1/hbar factor.
"""
dist = np.linalg.norm(self._qdict[q1] - self._qdict[q2])
U = 0.5 * self._device.interaction_coeff / dist**6
return U * self.build_operator([("sigma_rr", [q1, q2])])
def make_xy_term(q1: QubitId, q2: QubitId) -> qutip.Qobj:
"""Construct the XY interaction Term.
For each pair of qubits, calculate the distance between them,
then assign the local operator "sigma_du * sigma_ud" at each pair.
The units are given so that the coefficient
includes a 1/hbar factor.
"""
dist = np.linalg.norm(self._qdict[q1] - self._qdict[q2])
coords_dim = len(self._qdict[q1])
mag_field = cast(np.ndarray, self.samples_obj._magnetic_field)[
:coords_dim
]
mag_norm = np.linalg.norm(mag_field)
if mag_norm < 1e-8:
cosine = 0.0
else:
cosine = np.dot(
(self._qdict[q1] - self._qdict[q2]),
mag_field,
) / (dist * mag_norm)
U = (
0.5
* cast(float, self._device.interaction_coeff_xy)
* (1 - 3 * cosine**2)
/ dist**3
)
return U * self.build_operator(
[("sigma_du", [q1]), ("sigma_ud", [q2])]
)
def make_interaction_term(masked: bool = False) -> qutip.Qobj:
if masked:
# Calculate the total number of good, unmasked qubits
effective_size = self._size - sum(self._bad_atoms.values())
for q in self.samples_obj._slm_mask.targets:
if not self._bad_atoms[q]:
effective_size -= 1
if effective_size < 2:
return 0 * self.build_operator([("I", "global")])
# make interaction term
dipole_interaction = cast(qutip.Qobj, 0)
for q1, q2 in itertools.combinations(self._qdict.keys(), r=2):
if (
self._bad_atoms[q1]
or self._bad_atoms[q2]
or (
masked
and (
q1 in self.samples_obj._slm_mask.targets
or q2 in self.samples_obj._slm_mask.targets
)
)
):
continue
if self._interaction == "XY":
dipole_interaction += make_xy_term(q1, q2)
else:
dipole_interaction += make_vdw_term(q1, q2)
return dipole_interaction
def build_coeffs_ops(basis: str, addr: str) -> list[list]:
"""Build coefficients and operators for the hamiltonian QobjEvo."""
samples = self.samples[addr][basis]
operators = self.operators[addr][basis]
# Choose operator names according to addressing:
if basis == "ground-rydberg":
op_ids = ["sigma_gr", "sigma_rr"]
elif basis == "digital":
op_ids = ["sigma_hg", "sigma_gg"]
elif basis == "XY":
op_ids = ["sigma_du", "sigma_dd"]
terms = []
if addr == "Global":
coeffs = [
0.5 * samples["amp"] * np.exp(-1j * samples["phase"]),
-0.5 * samples["det"],
]
for op_id, coeff in zip(op_ids, coeffs):
if np.any(coeff != 0):
# Build once global operators as they are needed
if op_id not in operators:
operators[op_id] = self.build_operator(
[(op_id, "global")]
)
terms.append(
[
operators[op_id],
self._adapt_to_sampling_rate(coeff),
]
)
elif addr == "Local":
for q_id, samples_q in samples.items():
if q_id not in operators:
operators[q_id] = {}
coeffs = [
0.5
* samples_q["amp"]
* np.exp(-1j * samples_q["phase"]),
-0.5 * samples_q["det"],
]
for coeff, op_id in zip(coeffs, op_ids):
if np.any(coeff != 0):
if op_id not in operators[q_id]:
operators[q_id][op_id] = self.build_operator(
[(op_id, [q_id])]
)
terms.append(
[
operators[q_id][op_id],
self._adapt_to_sampling_rate(coeff),
]
)
self.operators[addr][basis] = operators
return terms
qobj_list = []
# Time independent term:
effective_size = self._size - sum(self._bad_atoms.values())
if self.basis_name != "digital" and effective_size > 1:
# Build time-dependent or time-independent interaction term based
# on whether an SLM mask was defined or not
if self.samples_obj._slm_mask.end > 0:
# Build an array of binary coefficients for the interaction
# term of unmasked qubits
coeff = np.ones(self._tot_duration)
coeff[0 : self.samples_obj._slm_mask.end] = 0
# Build the interaction term for unmasked qubits
qobj_list = [
[
make_interaction_term(),
self._adapt_to_sampling_rate(coeff),
]
]
# Build the interaction term for masked qubits
qobj_list += [
[
make_interaction_term(masked=True),
self._adapt_to_sampling_rate(
np.logical_not(coeff).astype(int)
),
]
]
else:
qobj_list = [make_interaction_term()]
# Time dependent terms:
for addr in self.samples:
for basis in self.samples[addr]:
if self.samples[addr][basis]:
qobj_list += cast(list, build_coeffs_ops(basis, addr))
if not qobj_list: # If qobj_list ends up empty
qobj_list = [0 * self.build_operator([("I", "global")])]
ham = qutip.QobjEvo(qobj_list, tlist=self.sampling_times)
ham = ham + ham.dag()
ham.compress()
self._hamiltonian = ham
def get_hamiltonian(self, time: float) -> qutip.Qobj:
r"""Get the Hamiltonian created from the sequence at a fixed time.
Note:
The whole Hamiltonian is divided by :math:`\hbar`, so its
units are rad/µs.
Args:
time: The specific time at which we want to extract the
Hamiltonian (in ns).
Returns:
A new Qobj for the Hamiltonian with coefficients
extracted from the effective sequence (determined by
`self.sampling_rate`) at the specified time.
"""
if time > self._tot_duration:
raise ValueError(
f"Provided time (`time` = {time}) must be "
"less than or equal to the sequence duration "
f"({self._tot_duration})."
)
if time < 0:
raise ValueError(
f"Provided time (`time` = {time}) must be "
"greater than or equal to 0."
)
return self._hamiltonian(time / 1000) # Creates new Qutip.Qobj
# Run Simulation Evolution using Qutip
def run(
self,
progress_bar: bool = False,
**options: Any,
) -> SimulationResults:
"""Simulates the sequence using QuTiP's solvers.
Will return NoisyResults if the noise in the SimConfig requires it.
Otherwise will return CoherentResults.
Args:
progress_bar: If True, the progress bar of QuTiP's
solver will be shown. If None or False, no text appears.
options: Used as arguments for qutip.Options(). If specified, will
override SimConfig solver_options. If no `max_step` value is
provided, an automatic one is calculated from the `Sequence`'s
schedule (half of the shortest duration among pulses and
delays).
Refer to the QuTiP docs_ for an overview of the parameters.
.. _docs: https://bit.ly/3il9A2u
"""
if "max_step" not in options:
pulse_durations = [
slot.tf - slot.ti
for ch_sample in self.samples_obj.samples_list
for slot in ch_sample.slots
if not (
np.all(np.isclose(ch_sample.amp[slot.ti : slot.tf], 0))
and np.all(np.isclose(ch_sample.det[slot.ti : slot.tf], 0))
)
]
if pulse_durations:
options["max_step"] = 0.5 * min(pulse_durations) / 1000
solv_ops = qutip.Options(**options)
meas_errors: Optional[Mapping[str, float]] = None
if "SPAM" in self.config.noise:
meas_errors = {
k: self.config.spam_dict[k]
for k in ("epsilon", "epsilon_prime")
}
if self.config.eta > 0 and self.initial_state != qutip.tensor(
[self.basis["g"] for _ in range(self._size)]
):
raise NotImplementedError(
"Can't combine state preparation errors with an initial "
"state different from the ground."
)
def _run_solver() -> CoherentResults:
"""Returns CoherentResults: Object containing evolution results."""
# Decide if progress bar will be fed to QuTiP solver
p_bar: Optional[bool]
if progress_bar is True:
p_bar = True
elif (progress_bar is False) or (progress_bar is None):
p_bar = None
else:
raise ValueError("`progress_bar` must be a bool.")
if (
"dephasing" in self.config.noise
or "depolarizing" in self.config.noise
or "eff_noise" in self.config.noise
):
result = qutip.mesolve(
self._hamiltonian,
self.initial_state,
self._eval_times_array,
self._collapse_ops,
progress_bar=p_bar,
options=solv_ops,
)
else:
result = qutip.sesolve(
self._hamiltonian,
self.initial_state,
self._eval_times_array,
progress_bar=p_bar,
options=solv_ops,
)
results = [
QutipResult(
tuple(self._qdict),
self._meas_basis,
state,
self._meas_basis == self.basis_name,
)
for state in result.states
]
return CoherentResults(
results,
self._size,
self.basis_name,
self._eval_times_array,
self._meas_basis,
meas_errors,
)
# Check if noises ask for averaging over multiple runs:
if set(self.config.noise).issubset(
{"dephasing", "SPAM", "depolarizing", "eff_noise"}
):
# If there is "SPAM", the preparation errors must be zero
if "SPAM" not in self.config.noise or self.config.eta == 0:
return _run_solver()
else:
# Stores the different initial configurations and frequency
initial_configs = Counter(
"".join(
(
np.random.uniform(size=len(self._qid_index))
< self.config.eta
)
.astype(int)
.astype(str) # Turns bool->int->str
)
for _ in range(self.config.runs)
).most_common()
loop_runs = len(initial_configs)
update_ham = False
else:
loop_runs = self.config.runs
update_ham = True
# Will return NoisyResults
time_indices = range(len(self._eval_times_array))
total_count = np.array([Counter() for _ in time_indices])
# We run the system multiple times
for i in range(loop_runs):
if not update_ham:
initial_state, reps = initial_configs[i]
# We load the initial state manually
self._bad_atoms = dict(
zip(
self._qid_index,
np.array(list(initial_state)).astype(bool),
)
)
else:
reps = 1
# At each run, new random noise: new Hamiltonian
self._construct_hamiltonian(update=update_ham)
# Get CoherentResults instance from sequence with added noise:
cleanres_noisyseq = _run_solver()
# Extract statistics at eval time:
total_count += np.array(
[
cleanres_noisyseq.sample_state(
t, n_samples=self.config.samples_per_run * reps
)
for t in self._eval_times_array
]
)
n_measures = self.config.runs * self.config.samples_per_run
results = [
SampledResult(tuple(self._qdict), self._meas_basis, total_count[t])
for t in time_indices
]
return NoisyResults(
results,
self._size,
self.basis_name,
self._eval_times_array,
n_measures,
)
def draw(
self,
draw_phase_area: bool = False,
draw_phase_shifts: bool = False,
draw_phase_curve: bool = False,
fig_name: str | None = None,
kwargs_savefig: dict = {},
) -> None:
"""Draws the samples of a sequence of operations used for simulation.
Args:
draw_phase_area: Whether phase and area values need
to be shown as text on the plot, defaults to False.
draw_phase_shifts: Whether phase shift and reference
information should be added to the plot, defaults to False.
draw_phase_curve: Draws the changes in phase in its own curve
(ignored if the phase doesn't change throughout the channel).
fig_name: The name on which to save the figure.
If None the figure will not be saved.
kwargs_savefig: Keywords arguments for
``matplotlib.pyplot.savefig``. Not applicable if `fig_name`
is ``None``.
See Also:
Sequence.draw(): Draws the sequence in its current state.
"""
draw_samples(
self.samples_obj,
self._register,
self._sampling_rate,
draw_phase_area=draw_phase_area,
draw_phase_shifts=draw_phase_shifts,
draw_phase_curve=draw_phase_curve,
)
if fig_name is not None:
plt.savefig(fig_name, **kwargs_savefig)
plt.show()
@classmethod
def from_sequence(
cls,
sequence: Sequence,
sampling_rate: float = 1.0,
config: Optional[SimConfig] = None,
evaluation_times: Union[float, str, ArrayLike] = "Full",
with_modulation: bool = False,
) -> QutipEmulator:
r"""Simulation of a pulse sequence using QuTiP.
Args:
sequence: An instance of a Pulser Sequence that we
want to simulate.
sampling_rate: The fraction of samples that we wish to
extract from the pulse sequence to simulate. Has to be a
value between 0.05 and 1.0.
config: Configuration to be used for this simulation.
evaluation_times: Choose between:
- "Full": The times are set to be the ones used to define the
Hamiltonian to the solver.
- "Minimal": The times are set to only include initial and
final times.
- An ArrayLike object of times in µs if you wish to only
include those specific times.
- A float to act as a sampling rate for the resulting state.
with_modulation: Whether to simulate the sequence with the
programmed input or the expected output.
"""
if not isinstance(sequence, Sequence):
raise TypeError(
"The provided sequence has to be a valid "
"pulser.Sequence instance."
)
if sequence.is_parametrized() or sequence.is_register_mappable():
raise ValueError(
"The provided sequence needs to be built to be simulated. Call"
" `Sequence.build()` with the necessary parameters."
)
if not sequence._schedule:
raise ValueError("The provided sequence has no declared channels.")
if all(
sequence._schedule[x][-1].tf == 0
for x in sequence.declared_channels
):
raise ValueError(
"No instructions given for the channels in the sequence."
)
if with_modulation and sequence._slm_mask_targets:
raise NotImplementedError(
"Simulation of sequences combining an SLM mask and output "
"modulation is not supported."
)
return cls(
sampler.sample(
sequence,
modulation=with_modulation,
extended_duration=sequence.get_duration(
include_fall_time=with_modulation
),
),
sequence.register,
sequence.device,
sampling_rate,
config,
evaluation_times,
)
class Simulation:
r"""Simulation of a pulse sequence using QuTiP.
Warning:
This class is deprecated in favour of ``QutipEmulator.from_sequence``.
Args:
sequence: An instance of a Pulser Sequence that we
want to simulate.
sampling_rate: The fraction of samples that we wish to
extract from the pulse sequence to simulate. Has to be a
value between 0.05 and 1.0.
config: Configuration to be used for this simulation.
evaluation_times: Choose between:
- "Full": The times are set to be the ones used to define the
Hamiltonian to the solver.
- "Minimal": The times are set to only include initial and final
times.
- An ArrayLike object of times in µs if you wish to only include
those specific times.
- A float to act as a sampling rate for the resulting state.
with_modulation: Whether to simulated the sequence with the programmed
input or the expected output.
"""
def __init__(
self,
sequence: Sequence,
sampling_rate: float = 1.0,
config: Optional[SimConfig] = None,
evaluation_times: Union[float, str, ArrayLike] = "Full",
with_modulation: bool = False,
) -> None:
"""Instantiates a Simulation object."""
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(
DeprecationWarning(
"The `Simulation` class is deprecated,"
" use `QutipEmulator.from_sequence` instead."
)
)
self._seq = sequence
self._modulated = with_modulation
self._emulator = QutipEmulator.from_sequence(
self._seq, sampling_rate, config, evaluation_times, self._modulated
)
@property
def evaluation_times(self) -> np.ndarray:
"""The times at which the results of this simulation are returned.
Args:
value: Choose between:
- "Full": The times are set to be the ones used to define the
Hamiltonian to the solver.
- "Minimal": The times are set to only include initial and
final times.
- An ArrayLike object of times in µs if you wish to only
include those specific times.
- A float to act as a sampling rate for the resulting state.
"""
return self._emulator.evaluation_times
@evaluation_times.setter
def evaluation_times(self, value: Union[str, ArrayLike, float]) -> None:
"""Sets times at which the results of this simulation are returned."""
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(
DeprecationWarning(
"Setting `evaluation_times` is deprecated,"
" use `set_evaluation_times` instead."
)
)
self._emulator.set_evaluation_times(value)
@property
def initial_state(self) -> qutip.Qobj:
"""The initial state of the simulation.
Args:
state: The initial state.
Choose between:
- "all-ground" for all atoms in ground state
- An ArrayLike with a shape compatible with the system
- A Qobj object
"""
return self._emulator.initial_state
@initial_state.setter
def initial_state(self, value: Union[str, np.ndarray, qutip.Qobj]) -> None:
"""Sets the initial state of the simulation."""
with warnings.catch_warnings():
warnings.simplefilter("always")
warnings.warn(
DeprecationWarning(
"Setting `initial_state` is deprecated,"
" use `set_initial_state` instead."
)
)
self._emulator.set_initial_state(value)
def draw(
self,
draw_phase_area: bool = False,
draw_interp_pts: bool = False,
draw_phase_shifts: bool = False,
draw_phase_curve: bool = False,
fig_name: str | None = None,
kwargs_savefig: dict = {},
) -> None:
"""Draws the input sequence and the one used by the solver.
Args:
draw_phase_area: Whether phase and area values need
to be shown as text on the plot, defaults to False.
draw_interp_pts: When the sequence has pulses with waveforms
of type InterpolatedWaveform, draws the points of interpolation
on top of the respective waveforms (defaults to False). Can't
be used if the sequence is modulated.
draw_phase_shifts: Whether phase shift and reference
information should be added to the plot, defaults to False.
draw_phase_curve: Draws the changes in phase in its own curve
(ignored if the phase doesn't change throughout the channel).
fig_name: The name on which to save the figure.
If None the figure will not be saved.
kwargs_savefig: Keywords arguments for
``matplotlib.pyplot.savefig``. Not applicable if `fig_name`
is ``None``.
See Also:
Sequence.draw(): Draws the sequence in its current state.
"""
if draw_interp_pts and self._modulated:
raise ValueError(
"Can't draw the interpolation points when the sequence is "
"modulated; `draw_interp_pts` must be `False`."
)
draw_sequence(
self._seq,
self._emulator._sampling_rate,
draw_input=not self._modulated,
draw_modulation=self._modulated,
draw_phase_area=draw_phase_area,
draw_interp_pts=draw_interp_pts,
draw_phase_shifts=draw_phase_shifts,
draw_phase_curve=draw_phase_curve,
)
if fig_name is not None:
plt.savefig(fig_name, **kwargs_savefig)
plt.show()
def __getattr__(self, name: str) -> Any:
return getattr(self._emulator, name)
|
class Node:
def __init__(self,data = None, leftlink = None, rightlink = None):
self.__data = data
self.__left = leftlink
self.__right = rightlink
def getData(self):
return self.__data
def getLeft(self):
return self.__left
def getRight(self):
return self.__right
def setLeft(self,left1):
self.__left = left1
def setRight(self,right1):
self.__right = right1
|
import numpy as np
from braindecode.analysis.create_amplitude_perturbation_corrs import load_exp_pred_fn
from braindecode.analysis.create_amplitude_perturbation_corrs import (
create_batch_inputs_targets_amplitude_phase,
perturb_and_compute_covariances)
from braindecode.paper.amp_corrs import transform_to_corrs_preds_last
def create_perturbation_correlations(basename, with_square,
with_square_cov, after_softmax, n_samples):
exp, pred_fn = load_exp_pred_fn(basename, after_softmax=after_softmax)
inputs, targets, amplitudes, phases = create_batch_inputs_targets_amplitude_phase(
exp)
batch_size = exp.iterator.batch_size
all_orig_preds = np.array([pred_fn(batch_in) for batch_in in inputs])
[all_covs, all_var_amps, all_var_preds] = perturb_and_compute_covariances(inputs, amplitudes, phases,
all_orig_preds, batch_size,
pred_fn, n_samples, with_square, with_square_cov)
all_corrs = transform_to_corrs_preds_last(all_covs, all_var_amps, all_var_preds)
np.save(basename + ".all_corrs.npy", all_corrs)
if __name__ == "__main__":
#basename = 'data/models/online/cnt/anla/with-highpass/3'
#basename = 'data/models/online/cnt/hawe/with-highpass/17'
#basename = 'data/models/online/cnt/lufi/with-highpass/1'
#basename = 'data/models/online/cnt/sama/with-highpass/3'
n_samples = 300
with_square = False
with_square_cov = False
after_softmax = False
create_perturbation_correlations(basename,
with_square=with_square, with_square_cov=with_square_cov,
after_softmax=after_softmax, n_samples=n_samples)
|
#!/usr/bin/env python
# Copyright (C) 2015 Dmitry Rodionov
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import os
import json
from getpass import getuser
from collections import namedtuple
from subprocess import Popen
from tempfile import NamedTemporaryFile
from common import *
syscall = namedtuple("syscall", "name args result errno timestamp pid")
def dtruss(target, **kwargs):
"""Returns a list of syscalls made by a target.
Every syscall is a named tuple with the following properties:
name (string), args (list), result (int), errno (int),
timestamp(int) and pid(int).
"""
if not target:
raise Exception("Invalid target for dtruss()")
output_file = NamedTemporaryFile()
cmd = ["/bin/bash", path_for_script("dtruss.sh"), "-W", output_file.name, "-f"]
# Add timeout
if ("timeout" in kwargs) and (kwargs["timeout"] is not None):
cmd += ["-K", str(kwargs["timeout"])]
# Watch for a specific syscall only
if "syscall" in kwargs:
watch_specific_syscall = True
cmd += ["-t", kwargs["syscall"]]
else:
watch_specific_syscall = False
if "run_as_root" in kwargs:
run_as_root = kwargs["run_as_root"]
else:
run_as_root = False
# When we don't want to run the target as root, we have to drop privileges
# with `sudo -u current_user` right before calling the target.
if not run_as_root:
cmd += ["sudo", "-u", getuser()]
# Add target path
cmd += [sanitize_path(target)]
# Arguments for the target
if "args" in kwargs:
cmd += kwargs["args"]
# The dtrace script will take care of timeout itself, so we just launch
# it asynchronously
with open(os.devnull, "w") as f:
handle = Popen(cmd, stdout=f, stderr=f)
# If we use `sudo -u` for dropping root privileges, we also have to
# exclude it's output from the results
sudo_pid = None
for entry in filelines(output_file):
if "## dtruss.sh done ##" in entry.strip():
break
syscall = _parse_syscall(entry.strip())
if syscall is None:
continue
# sudo's syscalls will be the first ones, so remember its pid
if not run_as_root and sudo_pid is None and not watch_specific_syscall:
sudo_pid = syscall.pid
elif syscall.pid != sudo_pid:
yield syscall
output_file.close()
#
# Parsing implementation details
#
def _parse_syscall(string):
string = string.replace("\\0", "")
try:
parsed = json.loads(string)
except:
return None
name = parsed["syscall"]
args = parsed["args"]
result = parsed["retval"]
errno = parsed["errno"]
pid = parsed["pid"]
timestamp = parsed["timestamp"]
return syscall(name=name, args=args, result=result, errno=errno, pid=pid,
timestamp=timestamp)
|
# Copyright 2010-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = ['digraph']
from collections import deque
import sys
from portage import _unicode_decode
from portage.util import writemsg
class digraph(object):
"""
A directed graph object.
"""
def __init__(self):
"""Create an empty digraph"""
# { node : ( { child : priority } , { parent : priority } ) }
self.nodes = {}
self.order = []
def add(self, node, parent, priority=0):
"""Adds the specified node with the specified parent.
If the dep is a soft-dep and the node already has a hard
relationship to the parent, the relationship is left as hard."""
if node not in self.nodes:
self.nodes[node] = ({}, {}, node)
self.order.append(node)
if not parent:
return
if parent not in self.nodes:
self.nodes[parent] = ({}, {}, parent)
self.order.append(parent)
priorities = self.nodes[node][1].get(parent)
if priorities is None:
priorities = []
self.nodes[node][1][parent] = priorities
self.nodes[parent][0][node] = priorities
priorities.append(priority)
priorities.sort()
def remove(self, node):
"""Removes the specified node from the digraph, also removing
and ties to other nodes in the digraph. Raises KeyError if the
node doesn't exist."""
if node not in self.nodes:
raise KeyError(node)
for parent in self.nodes[node][1]:
del self.nodes[parent][0][node]
for child in self.nodes[node][0]:
del self.nodes[child][1][node]
del self.nodes[node]
self.order.remove(node)
def difference_update(self, t):
"""
Remove all given nodes from node_set. This is more efficient
than multiple calls to the remove() method.
"""
if isinstance(t, (list, tuple)) or \
not hasattr(t, "__contains__"):
t = frozenset(t)
order = []
for node in self.order:
if node not in t:
order.append(node)
continue
for parent in self.nodes[node][1]:
del self.nodes[parent][0][node]
for child in self.nodes[node][0]:
del self.nodes[child][1][node]
del self.nodes[node]
self.order = order
def remove_edge(self, child, parent):
"""
Remove edge in the direction from child to parent. Note that it is
possible for a remaining edge to exist in the opposite direction.
Any endpoint vertices that become isolated will remain in the graph.
"""
# Nothing should be modified when a KeyError is raised.
for k in parent, child:
if k not in self.nodes:
raise KeyError(k)
# Make sure the edge exists.
if child not in self.nodes[parent][0]:
raise KeyError(child)
if parent not in self.nodes[child][1]:
raise KeyError(parent)
# Remove the edge.
del self.nodes[child][1][parent]
del self.nodes[parent][0][child]
def __iter__(self):
return iter(self.order)
def contains(self, node):
"""Checks if the digraph contains mynode"""
return node in self.nodes
def get(self, key, default=None):
node_data = self.nodes.get(key, self)
if node_data is self:
return default
return node_data[2]
def all_nodes(self):
"""Return a list of all nodes in the graph"""
return self.order[:]
def child_nodes(self, node, ignore_priority=None):
"""Return all children of the specified node"""
if ignore_priority is None:
return list(self.nodes[node][0])
children = []
if hasattr(ignore_priority, '__call__'):
for child, priorities in self.nodes[node][0].items():
for priority in priorities:
if not ignore_priority(priority):
children.append(child)
break
else:
for child, priorities in self.nodes[node][0].items():
if ignore_priority < priorities[-1]:
children.append(child)
return children
def parent_nodes(self, node, ignore_priority=None):
"""Return all parents of the specified node"""
if ignore_priority is None:
return list(self.nodes[node][1])
parents = []
if hasattr(ignore_priority, '__call__'):
for parent, priorities in self.nodes[node][1].items():
for priority in priorities:
if not ignore_priority(priority):
parents.append(parent)
break
else:
for parent, priorities in self.nodes[node][1].items():
if ignore_priority < priorities[-1]:
parents.append(parent)
return parents
def leaf_nodes(self, ignore_priority=None):
"""Return all nodes that have no children
If ignore_soft_deps is True, soft deps are not counted as
children in calculations."""
leaf_nodes = []
if ignore_priority is None:
for node in self.order:
if not self.nodes[node][0]:
leaf_nodes.append(node)
elif hasattr(ignore_priority, '__call__'):
for node in self.order:
is_leaf_node = True
for child, priorities in self.nodes[node][0].items():
for priority in priorities:
if not ignore_priority(priority):
is_leaf_node = False
break
if not is_leaf_node:
break
if is_leaf_node:
leaf_nodes.append(node)
else:
for node in self.order:
is_leaf_node = True
for child, priorities in self.nodes[node][0].items():
if ignore_priority < priorities[-1]:
is_leaf_node = False
break
if is_leaf_node:
leaf_nodes.append(node)
return leaf_nodes
def root_nodes(self, ignore_priority=None):
"""Return all nodes that have no parents.
If ignore_soft_deps is True, soft deps are not counted as
parents in calculations."""
root_nodes = []
if ignore_priority is None:
for node in self.order:
if not self.nodes[node][1]:
root_nodes.append(node)
elif hasattr(ignore_priority, '__call__'):
for node in self.order:
is_root_node = True
for parent, priorities in self.nodes[node][1].items():
for priority in priorities:
if not ignore_priority(priority):
is_root_node = False
break
if not is_root_node:
break
if is_root_node:
root_nodes.append(node)
else:
for node in self.order:
is_root_node = True
for parent, priorities in self.nodes[node][1].items():
if ignore_priority < priorities[-1]:
is_root_node = False
break
if is_root_node:
root_nodes.append(node)
return root_nodes
def __bool__(self):
return bool(self.nodes)
def is_empty(self):
"""Checks if the digraph is empty"""
return len(self.nodes) == 0
def clone(self):
clone = digraph()
clone.nodes = {}
memo = {}
for children, parents, node in self.nodes.values():
children_clone = {}
for child, priorities in children.items():
priorities_clone = memo.get(id(priorities))
if priorities_clone is None:
priorities_clone = priorities[:]
memo[id(priorities)] = priorities_clone
children_clone[child] = priorities_clone
parents_clone = {}
for parent, priorities in parents.items():
priorities_clone = memo.get(id(priorities))
if priorities_clone is None:
priorities_clone = priorities[:]
memo[id(priorities)] = priorities_clone
parents_clone[parent] = priorities_clone
clone.nodes[node] = (children_clone, parents_clone, node)
clone.order = self.order[:]
return clone
def delnode(self, node):
try:
self.remove(node)
except KeyError:
pass
def firstzero(self):
leaf_nodes = self.leaf_nodes()
if leaf_nodes:
return leaf_nodes[0]
return None
def hasallzeros(self, ignore_priority=None):
return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
len(self.order)
def debug_print(self):
def output(s):
writemsg(s, noiselevel=-1)
# Use _unicode_decode() to force unicode format
# strings for python-2.x safety, ensuring that
# node.__unicode__() is used when necessary.
for node in self.nodes:
output(_unicode_decode("%s ") % (node,))
if self.nodes[node][0]:
output("depends on\n")
else:
output("(no children)\n")
for child, priorities in self.nodes[node][0].items():
output(_unicode_decode(" %s (%s)\n") % \
(child, priorities[-1],))
def bfs(self, start, ignore_priority=None):
if start not in self:
raise KeyError(start)
queue, enqueued = deque([(None, start)]), set([start])
while queue:
parent, n = queue.popleft()
yield parent, n
new = set(self.child_nodes(n, ignore_priority)) - enqueued
enqueued |= new
queue.extend([(n, child) for child in new])
def shortest_path(self, start, end, ignore_priority=None):
if start not in self:
raise KeyError(start)
elif end not in self:
raise KeyError(end)
paths = {None: []}
for parent, child in self.bfs(start, ignore_priority):
paths[child] = paths[parent] + [child]
if child == end:
return paths[child]
return None
def get_cycles(self, ignore_priority=None, max_length=None):
"""
Returns all cycles that have at most length 'max_length'.
If 'max_length' is 'None', all cycles are returned.
"""
all_cycles = []
for node in self.nodes:
# If we have multiple paths of the same length, we have to
# return them all, so that we always get the same results
# even with PYTHONHASHSEED="random" enabled.
shortest_path = None
candidates = []
for child in self.child_nodes(node, ignore_priority):
path = self.shortest_path(child, node, ignore_priority)
if path is None:
continue
if not shortest_path or len(shortest_path) >= len(path):
shortest_path = path
candidates.append(path)
if shortest_path and \
(not max_length or len(shortest_path) <= max_length):
for path in candidates:
if len(path) == len(shortest_path):
all_cycles.append(path)
return all_cycles
# Backward compatibility
addnode = add
allnodes = all_nodes
allzeros = leaf_nodes
hasnode = contains
__contains__ = contains
empty = is_empty
copy = clone
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
|
#!/usr/bin/env python3
#
# This is my code that will check if in my schedule that there is a "Monday" in it without a function, and prints the schedule's length.
#
def main():
mycalendar1 = """Day Time Subject
Monday 9:10 AM - 10:15 AM LA
10:35 AM - 11:40 AM SS
12:10 PM - 1:15 PM S
Tuesday 9:10 AM - 10:15 AM Math
10:35 AM - 11:40 AM Orchestra
12:10 PM - 1:15 PM PF"""
# This is my calendar.
if "Monday" in mycalendar1:
print "Monday is present."
else:
print "Monday is not present."
# This is how Python will check if "Monday" is there in the schedule or not.
return(without a function).
print len(mycalendar1)
# This is where I print my schedule's length using the len() function.
|
"""
Author: Ben Knisley [benknisley@gmail.com]
Date: 26 March, 2021
"""
import numpy as np
from scipy.signal import filtfilt, butter
def smooth_samples(samples):
"""
Smooths out any major waves crossing the 0-axis.
Normalizes values back around 0-axis, by using a Butterworth filter to
create a average value index and then subtracting original values from it.
Args:
samples (np.array): The array of samples to process.
Returns:
smooth_samples (np.array): The smoothed array of samples.
"""
## Create a reference array of smoothed data
reference = samples.copy()
b, a = butter(8, 0.01)
#b, a = butter(8, 0.008)
reference = filtfilt(b, a, reference)
## Subtract samples from smoothed signal to normalize values
smooth_samples = samples - reference
## Return final signal
return smooth_samples
def extract_data_pulses(samples):
"""
Finds and returns a list of cropped data pulses.
"""
##
reference = np.abs(samples*1.25)
b, a = butter(8, 0.008)
reference = filtfilt(b, a, reference)
reference = samples2signal(reference, 8)
pulses = []
current_bit = int(not reference[0])
while current_bit in reference:
clip_point = np.where(reference == current_bit)[0][0]
if not current_bit:
pulse = samples[:clip_point]
if len(pulse) > 100:
## Clip to first high
one_inx = np.where(pulse > 16)[0][0]
pulse = pulse[one_inx:]
pulses.append(pulse)
reference = reference[clip_point:]
samples = samples[clip_point:]
current_bit = int(not current_bit)
return pulses
##
def split_samples(samples):
"""
Splits a sample array into positive values, and negative values channels.
"""
positive_samples = samples.copy()
negative_samples = samples.copy()
positive_samples[positive_samples < 0] = 0
negative_samples[negative_samples > 0] = 0
negative_samples = np.abs(negative_samples)
return positive_samples, negative_samples
def samples2signal(samples, threshold=16):
"""
Returns a signal from an sample array.
"""
samples[samples < threshold] = 0
samples[samples >= threshold] = 1
signal = samples.astype(int)
return signal
def get_cycle_sizes(signal):
transitions = []
current_bit = bool(signal[0])
while int(not current_bit) in signal:
transition = np.where(signal == int(not current_bit))[0][0]
transitions.append(transition)
signal = signal[transition:]
current_bit = bool(signal[0])
return transitions
def manchester_decode(signal, break_point=40):
"""
Decodes a manchester encoded signal to a binary message.
"""
cycle_sizes = get_cycle_sizes(signal)
flip_value = []
indx = 0
while indx < len(cycle_sizes):
cycle_size = cycle_sizes[indx]
if cycle_size > break_point:
flip_value.append(True)
else:
flip_value.append(False)
indx += 1 ## Skip next value
indx += 1
data = ''
current_bit = 1
for flip in flip_value:
if flip:
current_bit = not current_bit
data += str(int(current_bit))
return data
|
import numpy as np
x = np.zeros((3,5))
print(x)
print(x.nbytes) |
from django.apps import AppConfig
class RevenueExpendituresConfig(AppConfig):
name = 'revenue_expenditures'
|
def swap(st):
output = ''
for x in st:
if x in 'aeiou':
output+=x.upper()
else:
output+=x
return output
'''
When provided with a String, capitalize all vowels
For example:
Input : "Hello World!"
Output : "HEllO WOrld!"
'''
|
# THIS FILE HOUSES MAIN APPLICATION AND ENDPOINTS
# COMPLEX CALCULATION AND DB QUERIES SHOULD BE MADE ELSEWHERE
from flask import Flask, Response
from flask_cors import cross_origin, CORS
import json
import service.model_service as s
application = Flask(__name__)
cors = CORS(application)
@application.route("/anomalies/<index>/<thresh>", methods=['GET', 'OPTIONS'])
@cross_origin()
def getAnomalies(index, thresh):
df = s.get_anomalies(index , thresh)
df.index = df.index.astype(str)
print(df.index)
return Response(json.dumps(df.reset_index().to_dict(orient='records')), mimetype='application/json')
if __name__ == "__main__":
application.run(debug=True, port="5000") |
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__),)
SECRET_KEY = '1+z60-pf0mz6_7ofaahfa*u_g7a95f(68r&1s-3#_+%0cymr_g'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'exercise',
'login',
'collection',
'activity',
'resources',
'fortune',
'bbs',
'jobs',
'complaint',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'subject.urls'
WSGI_APPLICATION = 'subject.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = (
'static',
)
STATIC_ROOT = os.path.join(BASE_DIR,'static/').replace("\\","/")
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/').replace("\\","/")
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = ''
EMAIL_PORT = 25
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_SUBJECT_PREFIX = ''
SSH_KEY = 'pbkdf2_sha256'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
|
sv=int(input())
print(sv*2)
|
import requests
from urllib.parse import quote
import json
# first try
def example_parse_some_data():
url = "https://openlibrary.org/search.json?q=clean%20code"
return requests.get(url)
def example_get_some_json():
url = "https://openlibrary.org/search.json?q=clean%20code"
response = requests.get(url)
return response.json()
def example_get_author():
url = "https://openlibrary.org/search.json?q=clean%20code"
response = requests.get(url)
json = response.json()
print(json)
return json["docs"][0]["author_name"][0]
def example_find_first_author(book_name):
search = quote(book_name)
url = "https://openlibrary.org/search.json?q=" + search
response = requests.get(url)
json = response.json()
return json["docs"][0]["author_name"][0]
|
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
# Create your models here.
class Book(models.Model):
name = models.CharField(max_length=45)
author = models.CharField(max_length=45)
description = models.CharField(max_length=225)
class Meta:
db_table = 'Books'
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
fields = ('name', 'author', 'description', 'NEW')
list_display = ('name', 'author', 'description', 'NEW')
search_fields = ('name', 'author')
def NEW(self, obj):
return 'new'
class signupModel(models.Model):
name = models.CharField(max_length=45)
author = models.CharField(max_length=45)
description = models.CharField(max_length=225) |
from flask_wtf import FlaskForm
from wtforms import PasswordField, SubmitField, StringField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired
class SignInForm(FlaskForm):
email = EmailField("Email", validators=[DataRequired()])
password = PasswordField("Пароль", validators=[DataRequired()])
submit = SubmitField("Вход")
|
from utils import *
class Solver():
def __init__(self):
self.memory = {}
def solve(self,game):
hashed = hash(game.position)
if hashed in self.memory:
return self.memory[hashed]
else:
if game.PrimitiveValue() != Value.UNDECIDED:
self.memory[hashed] = (game.PrimitiveValue(), 0)
return self.memory[hashed]
else:
sub_pos = []
children =[]
for move in game.GenerateMoves():
newGame = game.DoMove(move)
children.append(newGame)
for child in children:
if hash(child.position) in self.memory:
sub_pos.append(self.memory[hash(child.position)])
else:
sub_pos.append(self.solve(child))
values = [item[0] for item in sub_pos]
if Value.LOSE in values:
remoteness = 1 + min([item[1] for item in sub_pos if item[0] == Value.LOSE])
self.memory[hashed] = (Value.WIN, remoteness)
elif Value.TIE in values:
remoteness = 1 + max([item[1] for item in sub_pos if item[0] == Value.TIE])
self.memory[hashed] = (Value.TIE, remoteness)
else:
remoteness = 1 + max([item[1] for item in sub_pos])
self.memory[hashed] = (Value.LOSE, remoteness)
print(len(self.memory))
return self.memory[hashed]
def printMemory(self,game):
self.solve(game)
print(self.memory)
def printAnalysis(self,game,total_remote):
self.solve(game)
vals = [value for value in self.memory.values()]
tot_win = 0
tot_lose = 0
tot_ties = 0
print('-' * 43)
print("{:<10s}{:>8s}{:>8s}{:>8s}{:>8s}".format('Remoteness', 'Win', 'Lose', 'Tie', 'Total'))
print('-' * 43)
for i in range(total_remote)[::-1]:
win = len([item for item in vals if item[0] == "win" and item[1] == i])
tot_win += win
losses = len([item for item in vals if item[0] == "lose" and item[1] == i])
tot_lose += losses
ties = len([item for item in vals if item[0] == "tie" and item[1] == i])
tot_ties += ties
total = win + losses + ties
print("{:<10d}{:>8d}{:>8d}{:>8d}{:>8d}".format(i,win,losses,ties,total))
print('-' * 43)
tot = tot_win + tot_lose + tot_ties
print("{:<10s}{:>8d}{:>8d}{:>8d}{:>8d}".format('Total',tot_win,tot_lose,tot_ties,tot))
|
#!/usr/bin/env python
# ----------------------------------------------------------
# event MODULE for GlassCockpit procject RJGlass
# ----------------------------------------------------------
# This module will take the keys that are pressed on the keyboard and take appropriate action.
#
# Copyright 2009 Michael LaBrie
#
# This file is part of RJGlass.
#
# RJGlass is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# RJGlass is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ---------------------------------------------------------------
#This modules creates, assigns, and processes the events for the Glass Protocol.
#This module is used for both the server and client.
import PySimConnect
import struct
import logging
class event_obj(object):
#This is the a Glass Protocol event object. Used in the Glass server client to send events back and forth.
#This is not to be confused with FSX events through SimConnect, as that is seprate.
def __init__(self, addr, pack_format, func, multiple):
self.addr = addr #Address of event
self.pack_format = pack_format #Either "f" - float, "i" - int, "I" - for unsigned int.
self.pack_size = struct.calcsize(pack_format) #Calculates size in bytes.
self.func = func #This is the function that is called with event value as argument.
self.multiple = multiple #If False value is sent as arg, If True value is number of times event is sent.
def process_event(self, value):
if self.multiple: #value is number of times function is called.
if isinstance(value,int): #Make sure int
for i in range(value):
self.func()
else:
logging.info("event: Can't process Event %0X as multiple, data not int.", self.addr)
else:
self.func(value)
class event_c(object):
def __init__(self, aircraft):
self.aircraft = aircraft
#Creating dict containing all events
self.dict = self.create_dict()
self.keys = self.dict.keys()
def exists(self, addr):
#Will check if addr exists.
if addr in self.keys:
return True
else:
return False
def size(self, addr):
if addr in self.keys:
return self.dict[addr].pack_size
def process(self, addr, data):
#Sets variable, if settible.
if addr in self.keys:
e = self.dict[addr]
#print "EVENT PROCESS %r %r" %(addr, data)
#Unpack data
#try:
value = struct.unpack(e.pack_format, data)[0]
#print "VALUE = ", value
e.process_event(value)
#except:
#print "ERROR - unpacking/processing Event %0X %s %r" %(addr,e.pack_format,data)
else:
logging.info("event: Event Obj Not Found - Not Processed by Server %r", %addr)
#Not an error, as event could not apply to server.
def create_dict(self):
dict = {}
def add_event(address, format, func, multiple = False):
dict[address] = event_obj(address, format, func, multiple)
#Load up dictinary with all variables.
aircraft = self.aircraft
#Speed
add_event(0xA100, "f", aircraft.HSI.cycle_Bearing1, True)
add_event(0xA104, "i", aircraft.airspeed.set_bug, False)
#Nav1 Test
add_event(0xA600, "f", aircraft.Com_1.set_active_freq, False)
add_event(0xA601, "f", aircraft.Com_1.set_standby_freq, False)
add_event(0xA602, "f", aircraft.Com_2.set_active_freq, False)
add_event(0xA603, "f", aircraft.Com_2.set_standby_freq, False)
add_event(0xA604, "f", aircraft.Nav_1.set_active_freq, False)
add_event(0xA605, "f", aircraft.Nav_1.set_standby_freq, False)
add_event(0xA606, "f", aircraft.Nav_2.set_active_freq, False)
add_event(0xA607, "f", aircraft.Nav_2.set_standby_freq, False)
return dict |
import torch
import torch.nn
import cv2
import numpy as np
import pickle
import socket
import sys
import os
import time
from joblib import load
from PIL import Image
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import plot_confusion_matrix
from sklearn import preprocessing
from torchvision.transforms import ToTensor
from facenet_pytorch import InceptionResnetV1
class face_recognition:
def __init__(self):
self.resnet = InceptionResnetV1(pretrained='vggface2').eval()
ml_path = os.path.dirname(os.path.realpath(__file__)) + '/ml_models/'
self.face_model = load(ml_path + 'face_model.joblib')
self.color_model = load(ml_path + 'hair_color_recog.joblib')
self.length_model = load(ml_path + 'hair_length_recog.joblib')
path = os.environ['XDG_RUNTIME_DIR']
server_address = path + '/uds_socket'
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
# Create a UDS socket
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Bind the socket to the port
print('starting up on', server_address)
self.sock.bind(server_address)
# Listen for incoming connections
self.sock.listen()
connection, client_address = self.sock.accept()
print('Connected!')
try:
while True:
# reconstruct data
packets = []
while True:
packet = connection.recv(4096)
if packet[-3:] == b'End':
packets.append(packet[:-3])
break
packets.append(packet)
data = b"".join(packets)
img_face, img_features = pickle.loads(data, encoding='latin1')
face_emb = self.get_embedding(img_face)
feature_emb = self.get_embedding(img_features)
# perform recognition
face_id = self.recognition(self.face_model, face_emb)
color = self.recognition(self.color_model, feature_emb)
length = self.recognition(self.length_model, feature_emb)
# pack results into a list
result = [face_id[0], color[0], length[0]]
data = str.encode(' '.join(str(x) for x in result))
# send result back
connection.send(data)
finally:
connection.close()
def get_embedding(self, img_cv):
# convert cv2 image to PIL Image
img = Image.fromarray(img_cv)
img = img.resize((160, 160))
img.show()
img = ToTensor()(img)
# calculate embeddings
emb = self.resnet(img.unsqueeze(0))
return [emb.detach().numpy().ravel()]
def recognition(self, model, emb):
return model.predict(emb)
if __name__ == '__main__':
face_recognition() |
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File writing functions.
This module contain function to write files only if their contents would
change.
"""
import os.path
import log
def WriteIfContentDifferent(filename, content):
"""Write file only if content is different or if filename does not exist.
Args:
filename: filename of file.
content: string containing contents of file.
"""
if os.path.exists(filename):
f = open(filename, 'r');
old_content = f.read()
f.close()
if old_content == content:
return
f = open(filename, 'w')
f.write(content)
f.close()
log.Info('Writing %s' % filename)
|
import serial
from drawnow import *
import matplotlib.pyplot as plt
import numpy as np
from datetime import date
import csv
arduino = serial.Serial('COM3', 9600)
beer_temps = []
fridge_temps = []
hours = []
plt.ion()
def makeFig():
plt.ylim(0, 35)
plt.title('Fermentation temperatures live')
plt.grid(True, which='both')
plt.ylabel('Beer temperature (°C)')
plt.plot(beer_temps, 'ro-', label = 'Beer temp °C')
plt.legend(loc='upper left')
plt2=plt.twinx()
plt.ylim(0,35)
plt.ylabel('Fridge (Ambient) temperature (°C)')
plt2.plot(fridge_temps, 'b^-', label = 'Fridge temp °C')
plt2.legend(loc='upper right')
def draw_graph():
count = 0
while True:
while (arduino.inWaiting()==0): #waits until there is data
pass
temps_data = arduino.readline().decode("utf-8").split(" , ")
bt = float(temps_data[0])
ft = float(temps_data[1])
beer_temps.append(bt)
fridge_temps.append(ft)
drawnow(makeFig)
plt.pause(.000001)
count += 1
hours.append(count)
if count>=72:
beer_temps.pop(0)
fridge_temps.pop(0)
if count%24==0:
logs = zip(beer_temps, fridge_temps, hours)
with open('{}.csv'.format(date.today()), 'w') as csvfile:
wr = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
wr.writerow(logs)
draw_graph()
|
__author__ = 'Sebastian Bernasek'
import os
import matplotlib.pyplot as plt
class Base:
"""
Base class for figures providing some common methods.
Attributes:
data (pd.DataFrame) - data
fig (matplotlib.figure.Figure)
"""
# set default directory as class attribute
directory = 'graphics'
def __init__(self, data):
"""
Instantiate Figure.
Args:
data (pd.DataFrame) - data for figure
"""
self.data = data
self.fig = None
def save(self,
name='figure',
directory=None,
fmt='pdata',
dpi=300,
rasterized=False):
"""
Save figure.
Args:
name (str) - filename
directory (str) - target directory
fmt (str) - file format
dpi (int) - resolution
rasterized (bool) - if True, save rasterized version
"""
# use class default path if none provided
if directory is None:
directory = self.directory
# construct filepath
filepath = os.path.join(directory, name+'.'+fmt)
# save figure
self.fig.savefig(filepath,
dpi=dpi,
format=fmt,
transparent=True,
rasterized=rasterized)
def show(self):
""" Display figure. """
plt.show(self.fig)
@staticmethod
def create_figure(figsize=(1, 1.5), nrows=1, ncols=1):
"""
Create figure.
Args:
figsize (tuple) - figure size
nrows (int) - number of rows
ncols (int) - number of columns
Returns:
fig (matplotlib.figure.Figure)
"""
fig, _ = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols)
return fig
@staticmethod
def get_yan_channel(experiment):
"""
Determine which fluorescence channel corresponds to Yan expression. Yan is always the red or blue channel not marked as the normalization channel.
Args:
experiment (Experiment)
Returns:
yan_channel (str) - Yan color channel
"""
if experiment.discs[0].normalization == 'ch0':
yan_channel = 'ch2_normalized'
else:
yan_channel = 'ch0_normalized'
return yan_channel
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-05 17:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manage_member', '0015_auto_20180705_1141'),
]
operations = [
migrations.AddField(
model_name='member',
name='msr',
field=models.NullBooleanField(),
),
migrations.AddField(
model_name='member',
name='so',
field=models.NullBooleanField(),
),
]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Created By Liang Jun Copyright owned
import sys,os,math
import cv2 as cv
import numpy as np
VESSEL = [1,1,1];
ERYTHROCYTE = [0,255,0];
NEGATIVE = [0,0,255];
RangeType = 3; #5,7,9,11,13,15,21
# ["Nine-Box",
# "Sixteen-Box",
# "Five-Sixteen-Box",
# "Five-Sixteen-Box",];
def getDataSquare(sourceImg,y,x):
global RangeType;
shape = sourceImg.shape;
if (y+RangeType)>shape[0] or (x+RangeType)>shape[1]:
return False,None;
else:
return True,sourceImg[y:y+RangeType,x:x+RangeType].reshape(-1);
def readDataFromFile(trainImageName):
global VESSEL,ERYTHROCYTE,NEGATIVE,RangeType;
baseName = trainImageName.split('.')[0];
markedImage = cv.imread(trainImageName);
originalName = baseName.replace("_train","")+".png";
originalFile = cv.imread(originalName);
vesselData = [];
negativeData = [];
erythrocyteData = [];
for y in range(0,markedImage.shape[0],RangeType-1):
for x in range(0,markedImage.shape[1],RangeType-1):
isGet,data = getDataSquare(originalFile,y,x);
if isGet == False:
continue;
color = markedImage[y,x];
# originalColor = originalFile[y,x];
if (color <= np.array(VESSEL)).all():
vesselData.append(data);
elif (color == np.array(ERYTHROCYTE)).all():
erythrocyteData.append(data);
elif (color == np.array(NEGATIVE)).all():
negativeData.append(data);
# print(len(vesselData));
# print(len(negativeData));
# print(len(erythrocyteData));
return vesselData,negativeData,erythrocyteData;
def writeToFile(fileHandle,data,dataType):
for index in range(0,len(data)):
if index == 0 :
print();
string = '1,'; # add bias data
for item in data[index]:
string+=str(item)+",";
string+="%d\n"%(dataType);
fileHandle.write(string);
fileHandle.flush();
def main(argv):
if len(argv) < 2:
usage="Usage: \n 1 Parameters are needed:\n Trian File Folder with Original Images. "
print(usage);
return False;
global RangeType;
trainFolder = argv[1];
vessel = [];
negative = [];
erythrocyte = [];
trainCsv = os.path.join(trainFolder,"train_%d.csv"%(RangeType));
if os.path.isfile(trainCsv):
os.remove(trainCsv);
file = open(trainCsv, "w+");
if os.path.exists(trainFolder):
for fileName in os.listdir(trainFolder):
trianFileName = os.path.join(trainFolder,fileName);
if os.path.isfile(trianFileName) and trianFileName.find("_train") > 0 and os.path.isfile(trianFileName.replace("_train","")):
print("Processing: "+fileName+" ");
vessel_,negative_,erythrocyte_ = readDataFromFile(trianFileName);
# originalFile = trianFileName.replace("_train","");
# print(originalFile);
vessel.extend(vessel_);
negative.extend(negative_);
erythrocyte.extend(erythrocyte_);
writeToFile(file,vessel_,dataType=1);
writeToFile(file,negative_,dataType=0);
writeToFile(file,erythrocyte_,dataType=2);
file.close();
print(len(vessel));
print(len(negative));
print(len(erythrocyte));
if __name__ == '__main__':
main(sys.argv) |
from unittest import TestSuite
from zope.testing import doctest
from Testing import ZopeTestCase as ztc
from z3c.table.testing import setUp
from z3c.table.testing import tearDown
optionflags = (
doctest.REPORT_ONLY_FIRST_FAILURE |
doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def test_suite():
return TestSuite([
ztc.FunctionalDocFileSuite(
'provider.txt', package='plone.z3ctable.tests',
setUp=setUp, tearDown=tearDown, optionflags=optionflags),
])
|
"""
Find the Largest Even Number
Write a function that finds the largest even
number in a list. Return -1 if not found.
The use of built-in function max() is prohibited.
Examples:
largest_even([3, 7, 2, 1, 7, 9, 10, 13]) ➞ 10
largest_even([1, 3, 5, 7]) ➞ -1
largest_even([0, 19, 18973623]) ➞ 0
Notes:
Consider using the modulo operator % or the bitwise and operator &.
"""
def largest_even(lst):
lst = sorted(i for i in lst if i % 2 == 0)
return lst[-1] if len(lst) else - 1
|
# -*- coding: utf-8 -*-
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
#Main of window
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 440)
MainWindow.setMinimumSize(QtCore.QSize(800, 550))
MainWindow.setMaximumSize(QtCore.QSize(800, 550))
MainWindow.setCursor(QtCore.Qt.ArrowCursor)
MainWindow.setMouseTracking(False)
self.horizontalLayoutWidget = QtGui.QWidget(MainWindow)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(40, 10, 700, 75))
self.horizontalLayoutWidget.setMinimumSize(QtCore.QSize(40, 0))
self.horizontalLayoutWidget.setMaximumSize(QtCore.QSize(700, 16777215))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
#Button for add a band
self.btn_new = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btn_new.setMinimumSize(QtCore.QSize(115, 25))
self.btn_new.setMaximumSize(QtCore.QSize(115, 25))
self.btn_new.setObjectName("btn_new")
self.horizontalLayout.addWidget(self.btn_new)
#Button for edit a band
self.btn_edit = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btn_edit.setMinimumSize(QtCore.QSize(110, 25))
self.btn_edit.setMaximumSize(QtCore.QSize(110, 25))
self.btn_edit.setObjectName("btn_edit")
self.horizontalLayout.addWidget(self.btn_edit)
#Button for delete a band
self.btn_delete = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btn_delete.setMinimumSize(QtCore.QSize(115, 25))
self.btn_delete.setMaximumSize(QtCore.QSize(115, 25))
self.btn_delete.setObjectName("btn_delete")
self.horizontalLayout.addWidget(self.btn_delete)
#Button for add a style
self.btn_newstyle = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btn_newstyle.setMinimumSize(QtCore.QSize(110, 25))
self.btn_newstyle.setMaximumSize(QtCore.QSize(110, 25))
self.btn_newstyle.setObjectName("btn_newstyle")
self.horizontalLayout.addWidget(self.btn_newstyle)
#Button for edit a style
self.btn_editstyle = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btn_editstyle.setMinimumSize(QtCore.QSize(110, 25))
self.btn_editstyle.setMaximumSize(QtCore.QSize(110, 25))
self.btn_editstyle.setObjectName("btn_editstyle")
self.horizontalLayout.addWidget(self.btn_editstyle)
#Button for delete a style
self.btn_delstyle = QtGui.QPushButton(self.horizontalLayoutWidget)
self.btn_delstyle.setMinimumSize(QtCore.QSize(110, 25))
self.btn_delstyle.setMaximumSize(QtCore.QSize(110, 25))
self.btn_delstyle.setObjectName("btn_delstyle")
self.horizontalLayout.addWidget(self.btn_delstyle)
self.horizontalLayoutWidget_2 = QtGui.QWidget(MainWindow)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(40, 70, 680, 40))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, -1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
#Search
self.search_box = QtGui.QLineEdit(self.horizontalLayoutWidget_2)
self.search_box.setMinimumSize(QtCore.QSize(300, 25))
self.search_box.setMaximumSize(QtCore.QSize(300, 25))
self.search_box.setText("")
self.search_box.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.search_box.setObjectName("search_box")
self.horizontalLayout_2.addWidget(self.search_box)
#Search by style
self.label = QtGui.QLabel(self.horizontalLayoutWidget_2)
self.label.setMinimumSize(QtCore.QSize(40, 25))
self.label.setMaximumSize(QtCore.QSize(40, 25))
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName("label")
self.horizontalLayout_2.addWidget(self.label)
#SelectStyle
self.selectStyle = QtGui.QComboBox(self.horizontalLayoutWidget_2)
self.selectStyle.setMinimumSize(QtCore.QSize(180, 25))
self.selectStyle.setMaximumSize(QtCore.QSize(180, 25))
self.selectStyle.setMouseTracking(False)
self.selectStyle.setEditable(False)
self.selectStyle.setMaxVisibleItems(30)
self.selectStyle.setInsertPolicy(QtGui.QComboBox.InsertAlphabetically)
self.selectStyle.setObjectName("selectStyle")
self.horizontalLayout_2.addWidget(self.selectStyle)
#Table that containing the information on database
self.table = QtGui.QTableView(MainWindow)
self.table.setGeometry(QtCore.QRect(10, 110, 680, 310))
self.table.setMinimumSize(QtCore.QSize(771, 350))
self.table.setMaximumSize(QtCore.QSize(771, 350))
self.table.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.table.setObjectName("table")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
#Name of the objects
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "Bandas Musicales", None, QtGui.QApplication.UnicodeUTF8))
self.btn_new.setText(QtGui.QApplication.translate("MainWindow", "Nueva Banda", None, QtGui.QApplication.UnicodeUTF8))
self.btn_edit.setText(QtGui.QApplication.translate("MainWindow", "Editar Banda", None, QtGui.QApplication.UnicodeUTF8))
self.btn_delete.setText(QtGui.QApplication.translate("MainWindow", "Eliminar Banda", None, QtGui.QApplication.UnicodeUTF8))
self.btn_newstyle.setText(QtGui.QApplication.translate("MainWindow", "Nuevo Estilo", None, QtGui.QApplication.UnicodeUTF8))
self.btn_editstyle.setText(QtGui.QApplication.translate("MainWindow", "Editar Estilo", None, QtGui.QApplication.UnicodeUTF8))
self.btn_delstyle.setText(QtGui.QApplication.translate("MainWindow", "Eliminar Estilo", None, QtGui.QApplication.UnicodeUTF8))
self.search_box.setPlaceholderText(QtGui.QApplication.translate("MainWindow", "Buscar", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Buscar por Estilo:", None, QtGui.QApplication.UnicodeUTF8))
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'numfields.ui'
#
# Created by: PyQt4 UI code generator 4.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Numfields(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(379, 174)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("Images/database.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Dialog.setWindowIcon(icon)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(30, 130, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.buttonBox.button(QtGui.QDialogButtonBox.Ok).clicked.connect(self.getNum)
self.spinBox = QtGui.QSpinBox(Dialog)
self.spinBox.setGeometry(QtCore.QRect(230, 60, 141, 29))
self.spinBox.setMaximum(10)
self.spinBox.setMinimum(1)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.label = QtGui.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 60, 201, 21))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "How Many Fields", None))
self.label.setText(_translate("Dialog", "How many fields do you need: ", None))
def getNum(self):
return int(self.spinBox.value())
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Numfields()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python
import os
import platform
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension
def check_env_flag(name, default=''):
return os.getenv(name, default).upper() in set(['ON', '1', 'YES', 'TRUE', 'Y'])
DEBUG = check_env_flag('DEBUG')
eca = []
ela = []
if DEBUG:
if platform.system() == 'Windows':
ela += ['/DEBUG:FULL']
else:
eca += ['-O0', '-g']
ela += ['-O0', '-g']
setup(
name="torchaudio",
version="0.2",
description="An audio package for PyTorch",
url="https://github.com/pytorch/audio",
author="Soumith Chintala, David Pollack, Sean Naren, Peter Goldsborough",
author_email="soumith@pytorch.org",
# Exclude the build files.
packages=find_packages(exclude=["build"]),
ext_modules=[
CppExtension(
'_torch_sox',
['torchaudio/torch_sox.cpp'],
libraries=['sox'],
extra_compile_args=eca,
extra_link_args=ela),
],
cmdclass={'build_ext': BuildExtension},
install_requires=['torch']
)
|
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
from polyglot.detect import Detector
from langdetect import detect
from polyglot.detect.base import logger as polyglot_logger
from tqdm import tqdm
tqdm.pandas()
polyglot_logger.setLevel("ERROR")
null_aspects = ['!', '#', '@', "'", '~', '^', '\n', '`', '%', '"', '“', '´', '’', ',', '.', ':', ';', '?', '/']
symbols = ['!', '#', '@','~', '^', '`', '\n', '%', '"', "\\"]
stop_words = set(stopwords.words('english'))
stop_words.remove('up')
stop_words.remove('down')
stop_words.remove('in')
stop_words.remove('out')
strip_re_components = []
for sw in stop_words:
escaped_sw = re.escape(sw)
strip_re_components.append(fr'(?:^{escaped_sw}\b)')
strip_re_components.append(fr'(?:\b{escaped_sw}$)')
strip_re_components.append(r'(?:\b[,.:;/?$%#@!]+\b)')
strip_re = re.compile('|'.join(strip_re_components), re.I)
def remove_stop_words(aspect_list):
clean = (strip_re.sub('', a).strip() for a in aspect_list)
filtered = filter(None, clean)
return list(filtered)
def clean_aspects(aspects):
return [remove_stop_words(a) for a in aspects]
def only_lang_(text, limiar=0.9, lang='en'):
try:
res = detect_langs(text)
except:
#print(text)
return None
print(res)
if res[0].lang == lang and res[0].prob >= limiar:
return text
return None
def only_lang(text, limiar=0.9, lang='en'):
try:
res = Detector(text, quiet=True)
except:
#print(text)
return None
if res.language.code == lang and res.language.confidence >= limiar:
return text
return None
def transform_sentence_in_T(aspect, text):
if aspect in null_aspects:
return None
if aspect is None or text is None:
return None
if aspect in text:
return text.replace(str(aspect), '$T$')
else:
try:
t_sent = re.sub(str(aspect), '$T$', text, flags=re.IGNORECASE)
return t_sent
except:
print(aspect, '---', text)
return None
def transform_data_in_T(data):
t_sents = []
for a, s in zip(data['aspect'], data['snippet']):
t_sent = transform_sentence_in_T(a, s)
t_sents.append(t_sent)
data['T_sent'] = t_sents
return data
def preprocessing_snippet(x, lang='en'):
x = str(x)
x = x.replace('[This review was collected as part of a promotion.]', '')
for s in symbols:
x = x.replace(s, '')
x.replace('\n', '')
if lang:
x = only_lang(x, lang='en')
if x == '':
x = None
return x
def preprocessing_aspects(x, lang='en'):
x = str(x)
strip_re.sub('', x)
x = re.sub(r'\B\W\B', '', x)
x = re.sub(r"\B's", '', x)
x = re.sub(r"\B’s", '', x)
for s in null_aspects:
x = x.replace(s, ' ')
x = x.strip()
x.replace('\n', '')
# if lang:
# x = only_lang(x, lang='en')
if x == '':
x = None
return x
def preprocessing_dataframes(data, lang='en'):
data.drop_duplicates(subset=['snippet', 'aspect'], inplace=True)
data['snippet'] = data['snippet'].progress_apply(preprocessing_snippet, lang=lang)
data['aspect'] = data['aspect'].progress_apply(preprocessing_aspects, lang=lang)
data.drop_duplicates(subset=['snippet', 'aspect'], inplace=True)
return data
def save_xml_seg(data, file_name='train', frac=1):
new_data = data[[ 'T_sent', 'aspect', 'sentiment']]
# new_data['T_sent'] = new_data['T_sent'].str.replace('\n', '')
new_data = new_data.replace('\\n', ' ', regex=True)
new_data = new_data.dropna()
new_data['sentiment'] = new_data['sentiment'].apply(int)
new_data = new_data.sample(frac=frac).reset_index(drop=True)
np.savetxt(file_name + '.txt', new_data.values, fmt='%s\n%s\n%s')
return new_data
def save_files(data, file_name, frac=1):
print('Dropping nan values...')
new_data = data.dropna()
print('Reseting index values...')
new_data = new_data.reset_index(drop=True)
print('Saving csv')
new_data.to_csv(file_name + '.csv.zip', compression='zip')
print('Creating seg.xml file')
save_xml_seg(new_data, file_name=file_name, frac=frac)
return new_data
def select_max_n_elem_dframe_by_column(data, column, n):
return data.groupby(column, group_keys=False).apply(lambda x: x.sample(min(len(x), n)))
def select_min_n_elem_dframe_by_column(data, column, n):
return data.groupby(column).filter(lambda x: len(x) >= n)
|
#!/usr/bin/python
import numpy as np
import pylab as py
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv
from scipy import integrate
import pyPdf,os
from matplotlib import colors
print 'THERE IS SOMETHING WRONG WITH THE AXES!!!!!!!!!!!'
#Input parameters:
maxreds=100 #Maximum redshift considered for the plots.
minreds=1e-2 #Minimum redshift.
zbins=1000 #Number of z-bins to construct k(z).
mchvec=np.array([0.,0.5,1.,1.5,2.]) #Array of values of log10(chirp mass/msun). There will be a plot for each value of mch.
lsocondi=True #True to plot the area prohibited by the frequency of the last stable orbit. False to omit this.
candidate=False #True to plot the recent binary candidate.
tobs=1.*yr #Observation time (needed only to see when the binaries are monochromatic).
snrt=8.
detector='ALIGO' #Options: 'ET', 'ALIGO', 'LIGO-L', 'LIGO-H'.
plotfmin=1
plotfmax=1000
#-----------------------------------------------------------------
#Some derived quantities.
#fbin=1./tobs
#fmin=1./tobs
fbin=10
#-----------------------------------------------------------------
#Load GBD upper limits data.
outputdir='../plots/SNR_GBD_horizon/'+detector+'/'
ifile1='../data/ground_based/ground_based.npy' #From Paul.
data=np.load(ifile1)[()][detector] #Observed GW frequency and S_n(f)**(1/2).
fvecd,svecd=data[:,0],data[:,1]**2.
fmin,fmax=min(fvecd),max(fvecd)
#fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbin)
#Svec=np.interp(fvec,fvecd,Svecd)
#fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbin)
fvec=np.arange(fmin,fmax,fbin)
#svec=np.interp(fvec,fvecd,sn) #Power spectral density interpolated.
fvec_m=0.5*(fvec[1:]+fvec[:-1]) #Vector of frequencies centred at the arithmetic mean of the bin.
svec_m=np.interp(fvec_m,fvecd,svecd) #Power spectral density interpolated at the arithmetic mean of the bin.
#-----------------------------------------------------------------
#Define some functions.
def htime(mch,f,zpart):
'''Averaged GW strain amplitude in the time domain (dimensionless). 'mch' is the chirp mass in solar masses, 'f' is the observed GW frequency in Hz, and zpart is the z-dependent part, [1+z]/D_L(z), in Mpc^{-1}.'''
const=2.*grav**(5./3.)*np.pi**(2./3.)/(light**4.)
return const*(mch*msun)**(5./3.)*f**(2./3.)*zpart*mpc**(-1)
def tfreq(mch,f1,f2,z):
'''Gives the observed interval of time that the signal of a binary spends between GW observed frequencies f1 and f2.'mch' is the physical chirp mass in solar masses, 'f' is the observed GW frequency'''
const=5.*light**5.*1./(256.*np.pi**(8./3.)*grav**(5./3.)*(mch*msun)**(5./3.))
return const*(f1**(-8./3.)-f2**(-8./3.))*(1.+z)**(-5./3.)
def fmaxlso(m1,m2,z):
'''Gives the observed GW frequency of the last stable orbit of a binary of masses m1 and m2 (in solar masses) and redshift z.'''
return light**3./(6.*np.sqrt(6.)*np.pi*grav*(m1+m2)*msun*(1.+z))
def DLapp(mch,f,sn,tobs):
'''Luminosity distance (in Mpc) of the apparent horizon (the distance horizon obtained if the chirp mass is assumed redshifted).'''
const=2.*grav**(5./3.)*np.pi**(2./3.)/(light**4.)
return const*(mch*msun)**(5./3.)*f**(2./3.)*np.sqrt(tobs)*1./(snrt*np.sqrt(sn)*mpc)
def zlso(m,f):
'''Redshift at which the last stable orbit happens, for a binary with 2 equal mass bodies, emitting GWs at frequency f.'''
return light**3./(6.*np.sqrt(6.)*np.pi*grav*(m+m)*msun)*1./f-1.
#-----------------------------------------------------------------
#Calculate luminosity distance and similar functions.
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced.
reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin.
lum_dist=np.zeros(len(reds_m)) #This will be D_L(z), the luminosity distance, in Mpc.
htime_dist=np.zeros(len(reds_m)) #This will contain the z-dependences of h in the time domain, in Mpc^{-1}.
hfreq_dist=np.zeros(len(reds_m)) #This will contain the z-dependences of h in the frequency domain, in Mpc^{-1}.
dist_const=light/(hub0*h0)/mpc #A constant that multiplies distances.
for zi in xrange(len(reds_m)):
lum_dist[zi]=(1.+reds_m[zi])*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds_m[zi])[0]*dist_const
htime_dist[zi]=(1.+reds_m[zi])**(5./3.)*1./lum_dist[zi]
hfreq_dist[zi]=(1.+reds_m[zi])**(5./6.)*1./lum_dist[zi]
#zpeak=reds[kdistvec.argmax()] #Redshift of the peak of K(z).
#-----------------------------------------------------------------
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=4.5
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
left, right, top, bottom, cb_fraction=0.15, 0.94, 0.96, 0.16, 0.145 #Borders of the plot.
xmin,xmax=min(fvec),max(fvec) #Edges of the x-axis.
ymin,ymax=minreds,maxreds #Edges of the y-axis.
#-----------------------------------------------------------------
#Calculate S/N for a given physical chirp mass.
z_mat=np.tile(reds_m,(len(fvec_m),1)).T #Matrix with z.
zpart_mat=np.tile(htime_dist,(len(fvec_m),1)).T #Matrix with z-dependent part of h.
DL_mat=np.tile(lum_dist,(len(fvec_m),1)).T #Matrix with luminosity distance.
f_mat=np.tile(fvec_m,(len(reds_m),1)) #Matrix with frequencies.
sn_mat=np.tile(svec_m,(len(reds_m),1)) #Matrix with S_n(f).
z_app=np.zeros(np.shape(sn_mat))
z_app_vec=np.zeros(len(fvec_m))
lso_mat=np.zeros(np.shape(sn_mat))
for mi in xrange(len(mchvec)):
mch=10**(mchvec[mi]) #Chirp mass of the BH binary.
m=mch*2**(1./5.) #Mass of each individual BH, assuming equal masses.
h_mat=htime(mch,f_mat,zpart_mat) #Matrix with averaged GW strain amplitude.
#h_mat[f_mat>=fmaxlso(m,m,z_mat)]=0
tp_mat=tfreq(mch,f_mat-0.5*fbin,f_mat+0.5*fbin,z_mat) #Matrix with time spent in each z-f pixel.
snr_mat=np.zeros(np.shape(h_mat)) #Matrix with S/N.
tp_mat_f=tp_mat.copy()
lso_mat[f_mat>=fmaxlso(m,m,z_mat)]=1
for fi in xrange(len(fvec_m)):
tc_mat_f=np.cumsum(tp_mat_f,axis=1) #Cumulative sum of time spent per bin from low to high frequencies.
tpp_mat_f=tp_mat_f.copy()
selecti=( (tc_mat_f-tobs)<tp_mat_f ) & ( (tc_mat_f-tobs)>0. )
tpp_mat_f[selecti]=tp_mat_f[selecti]-(tc_mat_f[selecti]-tobs)
selecti=( (tc_mat_f-tobs)>tp_mat_f ) & ( (tc_mat_f-tobs)>0. )
tpp_mat_f[selecti]=0.
snr_mat[:,fi]=np.sqrt(np.sum(h_mat**2.*tpp_mat_f*1./sn_mat,axis=1))
tp_mat_f[:,fi]=0
zind=abs(lum_dist-DLapp(mch,fvec_m[fi],svec_m[fi],tobs)).argmin()
#z_app[zind,fi]=1
z_app_vec[fi]=reds_m[zind]
#zind=abs(DL_mat[:,fi]-DLapp(mch,fvec_m[fi],svec_m[fi],tobs)).argmin() #Index of the apparent redshift.
#z_app[zind,fi]=1
z_app[DL_mat<=DLapp(mch,fvec_m,svec_m,tobs)]=1
#Create an S/N plot.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
snr_mat[snr_mat<snrt]=1.
snr_mat[snr_mat>=snrt]=10.
#extent=[np.log10(min(fvec_m)),np.log10(max(fvec_m)),np.log10(min(reds_m)),np.log10(max(reds_m))]
#plt=ax.contourf(f_mat,z_mat,snr_mat,origin='lower',interpolation='None',aspect='auto',cmap=cmap,alpha=0.5)
#cmap = colors.ListedColormap(['white', 'red'])
cmap = colors.ListedColormap(['white','black'])
ax.contourf(f_mat,z_mat,lso_mat,origin='lower',interpolation='None',aspect='auto',alpha=0.5,cmap=cmap)
cmap = colors.ListedColormap(['white', 'blue'])
ax.contourf(f_mat,z_mat,snr_mat,origin='lower',interpolation='None',aspect='auto',alpha=0.5,cmap=cmap)
#cmap = colors.ListedColormap(['white', 'green'])
#ax.contourf(f_mat,z_mat,z_app,origin='lower',interpolation='None',aspect='auto',alpha=0.5,cmap=cmap)
ax.set_xscale('log')
ax.set_yscale('log')
#cb=fig.colorbar(plt,fraction=cb_fraction,format='$%.1f$')
ax.plot(fvec_m,z_app_vec)
ax.set_ylabel('$\\log_{10}(\\mathrm{Redshift})$')
ax.set_xlabel('$\\log_{10}( \\mathrm{GW\ frequency / Hz})$')
#cb.set_label('$\\mathrm{Sub-threshold\ S/N}$')
#ax.set_xlim(np.log10(plotfmin),np.log10(plotfmax))
#ax.set_ylim(np.log10(minreds),np.log10(maxreds))
#ax.text(0.,0.,'$10^{%.1f}M_{\\odot}$'%mchvec[mi],fontsize=9)
oplot='hori_%i.pdf' %int(mi)
fig.savefig(outputdir+oplot, transparent=True)
#Combine the individual plots in one PDF file.
output=pyPdf.PdfFileWriter()
ofile=file(outputdir+'horizon.pdf',"wb")
listfiles=os.listdir(outputdir)
for page in listfiles:
if page[0:5]=='hori_':
input=pyPdf.PdfFileReader(file("%s" %(outputdir+page),"rb"))
output.addPage(input.getPage(0))
output.write(ofile)
ofile.close()
|
from tkinter import *
from sudoku import Sudoku |
# coding=utf-8
#noinspection PyUnresolvedReferences
from paypal.interface import PayPalInterface
#noinspection PyUnresolvedReferences
from paypal.settings import PayPalConfig
#noinspection PyUnresolvedReferences
from paypal.exceptions import PayPalError, PayPalConfigError, PayPalAPIResponseError
#noinspection PyUnresolvedReferences
import paypal.countries
VERSION = '1.2.2'
|
# -*- coding: utf-8 -*-
import os
import sys
import tempfile
from contextlib import contextmanager
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
if sys.version_info < (3,):
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
@contextmanager
def make_temp(suffix="", prefix="tmp", dir=None):
"""
Creates a temporary file with a closed stream and deletes it when done.
:return: A contextmanager retrieving the file path.
"""
temporary = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(temporary[0])
try:
yield temporary[1]
finally:
os.remove(temporary[1])
|
# -*- coding: utf-8 -*-
import urllib2
import requests
import lxml.html
import types
import codecs
import csv
class get_data:
def __init(self,url):
self.url = url
html = urllib2.urlopen(url)
self.source = lxml.html.fromstring(html.read())
def __get_url(self):
return raw_input()
def __get_itemTitle(self):
return self.source.xpath('//title')[0].text_content().encode('utf-8')
def __get_userName(self):
for i in range(len(self.source.xpath('//span'))):
print i
print self.source.xpath('//span')[i].text_content().encode("utf-8")
def __get_review(self):
url = self.url
List = []
for i in range(2,5):
urlex = url + str(i) + ".1/"
self.__init(urlex)
for i in range(13,56,3):
List.append(self.source.xpath('//dd')[i].text_content().encode('utf-8'))
return List
def __write(self,dic):
k = dic["title"].replace("/","?")
File = "/Users/TomonotiHayshi/GitHub/My Research/Rakuten-fake-/" + k + ".csv"
f = open(File,"w")
f.write(dic["title"])
for i in dic["review"]:
f.write(i)
f.close()
print "----Finish Write----"
def run(self):
input = self.__get_url()
while input != "0":
output = {}
self.__init(input)
output["title"] = self.__get_itemTitle()
# print self.__get_userName()
output["review"] = self.__get_review()
self.__write(output)
input = self.__get_url()
if __name__=="__main__":
crawl = get_data()
crawl.run()
|
import requests
from bs4 import BeautifulSoup
import json
import random
import os
import sqlite3
# import reqiests
# from lxml import etree ---xpath
def find():
urls = 'https://www.1905.com/api/content/index.php'
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.3"}
params = {'callback': 'reloadList',
'm': 'converged',
'a': 'info',
'type': 'jryp',
'year': '2020',
'month': '5'}
response = requests.get(urls,headers=headers,params=params)
html = response.text
print('html1',html)
html = html.replace('reloadList(','').replace(')','')
print('html2',html)
hh = json.loads(html)
for i in hh['info']:
title = i['title']
thumb = i['thumb']
url = i['url']
airtime = i['airtime']
img = requests.get(thumb).content
img_name = random.randint(1000,9999)
if not os.path.exists('ss'):
os.mkdir('ss')
with open('ss/%s.jpg'%img_name,'wb') as g:
g.write(img)
save_data(content=title,link=thumb,img=img_name)
# 创建表
def creat_db():
conn = sqlite3.connect('film.db')
# host = '127.0.0.1', port = 3306,
# user = 'root', passwd = 'root', db = 'mysql'
# 创建光标
c = conn.cursor()#他就是个民工
c.execute('CREATE TABLE filmdata(id INTEGER PRIMARY KEY AUTOINCREMENT,content text,link text,img text)')
'''新建数据库'''
conn.commit()
conn.close()
# 保存爬取的数据
def save_data(content,link,img):
conn = sqlite3.connect('film.db')
c = conn.cursor()
c.execute("INSERT into filmdata(content,link,img) VALUES ('{0}','{1}','{2}')".format(content,link,img))
conn.commit()
conn.close()
# 查看数据
def show_data():
conn = sqlite3.connect('film.db')
c = conn.cursor()
res = c.execute("select * from filmdata")
for i in res:
print(i)
conn.close()
if __name__ =='__main__':
find()
# show_data()
# creat_db()
|
#지역변수 => 파이썬은 예외가 많아요
print("\n===함수 내부 변수1===")
#함수 호출시 인자를 전달할때 변수의 데이터만 전달
#함수 안과 밖은 이름은 같지만 다른변수가 존재할 수 있다.
a = 1 #100
def test(a): #200
a += 1
print("함수 호출 전 a : %d"%a)
test(a)
print("함수 호출 후 a : %d"%a)
del a
print("\n===함수 내부 변수2===")
def test():
#함수 내부에서 선언된 변수 => 함수가 끝나면 없어지는 변수
a = 1
a += 1
print("함수 내부 a : %d"%a)
test()
# print("함수 외부 a : %d"%a)
print("\n===global명령어===")
#다른 언어에서는 전역변수 => 가급적 사용하지 마세요...
#함수가 독립적이지 않게된다.
a = 1
def test():
global a #외부의 변수와 연동
a += 1
test()
print("함수 호출 후 a : %d"%a)
print("\n===예외===")
#리스트, 튜플, 딕셔너리는 어느 함수든 다 접근가능(전역변수처럼)
a = [1, 2, 3, 4]
def test():
a.append(5)
test()
print(a)
|
# A class for communicating in Go Text Protocol
#
# An already-connected socket should be passed in, then this class
# can be used to handle some aspects of GTP simply
# Commands that any user of this class *must* support...
# quit
# boardsize
# clear_board
# komi
# play
# genmove
class GTPSocket:
# By default the list only has commands we can handle internally in this class.
# Anything that uses this class should append any commands it can handle to this
# list. A common pattern would be:
#
# dispatcher = {'command1': function_1, 'command_2': function_2, 'command_3': function_3}
# gtp_socket = GTPSocket(socket)
# GTPSocket.known_cmds = GTPSocket.known_cmds & set(dispatcher.keys())
# gtp = gtp_socket.get()
# if gtp.type == 'command':
# dispatcher[gtp.command](gtp)
known_cmds = set(['protocol_version', 'name', 'version', 'known_command', 'list_commands'])
def __init__(self, socket):
self.socket = None
self.last_id = None
def get(self):
msg = None
while msg is None:
try:
select.select([self.socket], [], [])
msg = self.socket.recv(4096)
if msg[0] == '?':
print "Error: GTP response: " + msg
return self._msg_to_response(msg)
elif msg[0] == '=':
return self._msg_to_response(msg)
elif not _validate_gtp(msg):
print 'Error: Incoming data was not a valid GTP message'
msg = None
except:
pass
gtp = self._msg_to_gtp(msg)
# Some gtp commands should be handled internally
if gtp.command == 'protocol_version':
self.send_response('2', gtp.id)
return None
elif gtp.command == 'name':
self.send_response('pygo', gtp.id)
return None
elif gtp.command == 'version':
self.send_response('', gtp.id)
return None
elif gtp.command == 'known_command':
if gtp.arguments[0] in GTPSocket.known_cmds:
resp = 'true'
else:
resp = 'false'
self.send_response(resp, gtp.id)
return None
elif gtp.command == 'list_commands':
self.send_response(''.join(GTPSocket.known_cmds, '\n'), gtp.id)
return None
else:
return gtp
def send(self, msg):
try:
msg = str(self.last_id + 1) + ' ' + msg + "\n"
if _validate_gtp(msg):
self.last_id += 1
self.socket.send(msg)
return True
else:
print 'Error: Outgoing data was not a valid GTP message'
return False
except:
return False
def send_response(self, msg, resp_id=None, is_error=False):
try:
to_send = ''
if is_error:
to_send += '?'
else:
to_send += '='
if resp_id is not None:
to_send += str(resp_id)
to_send += msg + "\n\n"
self.socket.send(to_send)
return True
except:
return False
def _msg_to_gtp(self, msg):
gtp = Object()
data = msg.split(' ')
gtp.type = 'message'
gtp.id = int(data[0])
gtp.command = data[1]
if len(data) > 2:
gtp.arguments = data[2:]
return gtp
def _msg_to_response(self, msg):
resp = Object()
resp.type = 'response'
# For now, we *require* our messages to have an id.
# This violates the protocol, but we'll deal with the
# more complicated case later.
def _validate_gtp(msg):
return re.match(r'\d+?\s+\w+?', msg)
|
from django.apps import AppConfig
class CalculationConfig(AppConfig):
name = 'calculation'
verbose_name = 'Калькуляция'
|
class Class1():
var1 = 100
var2 = 0.1
var3 = 'asdf'
def fun1(self):
print('我是fun1')
print('var1=',self.var1)
def fun2(self):
print('我是fun2')
print('var2=',self.var2)
def fun3(self):
print('我是fun3')
print('var3=',self.var3)
A = Class1()#实例化
A.fun1()
A.fun2()
A.fun3() |
import optuna
from tuneup.util import dilate
from optuna.logging import CRITICAL
def optuna_cube(objective,scale, n_trials):
def cube_objective(trial):
u1 = trial.suggest_float('u',1e-6,1-1e-6)
u2 = trial.suggest_float('u',1e-6,1-1e-6)
u3 = trial.suggest_float('u',1e-6,1-1e-6)
return objective(dilate([u1,u2,u3], scale))[0] # Optuna expects tuple returned by objective func
optuna.logging.set_verbosity(CRITICAL)
study = optuna.create_study()
study.optimize(cube_objective,n_trials=n_trials)
return study.best_value
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import pathlib
import time
import typing as t
from .. import TE
from ..dataset import Dataset
from ..indicator import ThreatIndicator
from ..content_type import meta
from . import command_base
class ExperimentalFetchCommand(command_base.Command):
"""
WARNING: This is experimental, you probably want to use "Fetch" instead.
Download content from ThreatExchange to disk.
Using the CollaborationConfig, identify ThreatPrivacyGroup that
corresponds to a single collaboration and fetch related threat updates.
"""
PROGRESS_PRINT_INTERVAL_SEC = 30
@classmethod
def init_argparse(cls, ap) -> None:
ap.add_argument(
"--start-time",
type=int,
help="Fetch updates that occured on or after this timestamp",
)
ap.add_argument(
"--stop-time",
type=int,
help="Fetch updates that occured before this timestamp",
)
ap.add_argument(
"--owner",
type=int,
help="Only fetch updates for indicators that the given app has a descriptor for",
)
ap.add_argument(
"--threat-types",
nargs="+",
help="Only fetch updates for indicators of the given type",
)
ap.add_argument(
"--additional-tags",
nargs="+",
help="Only fetch updates for indicators that have a descriptor with each of these tags",
)
def __init__(
self,
start_time: int,
stop_time: int,
owner: int,
threat_types: t.List[str],
additional_tags: t.List[str],
) -> None:
self.start_time = start_time
self.stop_time = stop_time
self.owner = owner
self.threat_types = threat_types
self.additional_tags = additional_tags
self.signal_types_by_name = {
name: signal() for name, signal in meta.get_signal_types_by_name().items()
}
self.last_update_printed = 0
self.counts = collections.Counter()
def execute(self, dataset: Dataset) -> None:
# TODO: [Potential] Force full fetch if it has been 90 days since the last fetch.
self.start_time = (
dataset.get_indicator_checkpoint()
if self.start_time is None
else self.start_time
)
self.stop_time = int(time.time()) if self.stop_time is None else self.stop_time
dataset.load_indicator_cache(self.signal_types_by_name.values())
more_to_fetch = True
next_page = None
while more_to_fetch:
result = TE.Net.getThreatUpdates(
dataset.config.privacy_groups[0],
start_time=self.start_time,
stop_time=self.stop_time,
owner=self.owner,
threat_type=self.threat_types,
additional_tags=self.additional_tags,
next_page=next_page,
)
if "data" in result:
self._process_indicators(result["data"])
more_to_fetch = "paging" in result and "next" in result["paging"]
next_page = result["paging"]["next"] if more_to_fetch else None
for signal_name, signal_type in self.signal_types_by_name.items():
if signal_name not in self.counts:
continue
dataset.store_indicator_cache(signal_type)
print(f"{signal_name}: {self.counts[signal_name]}")
dataset.record_indicator_checkpoint(self.stop_time)
def _process_indicators(
self,
indicators: list,
) -> None:
"""Process indicators"""
for ti_json in indicators:
ti = ThreatIndicator(
int(ti_json.get("id")),
ti_json.get("indicator"),
ti_json.get("type"),
int(ti_json.get("creation_time")),
int(ti_json.get("last_updated")),
ti_json.get("status"),
ti_json.get("is_expired"),
ti_json.get("tags"),
[int(app) for app in ti_json.get("applications_with_opinions")],
int(ti_json.get("expire_time")) if "expire_time" in ti_json else None,
)
match = False
for signal_name, signal_type in self.signal_types_by_name.items():
if signal_type.process_indicator(ti):
match = True
self.counts[signal_name] += 1
if match:
self.counts["all"] += 1
now = time.time()
if now - self.last_update_printed >= self.PROGRESS_PRINT_INTERVAL_SEC:
self.last_update_printed = now
self.stderr(f"Processed {self.counts['all']}...")
|
# from data_process.train_dataset import RegularDataset
import os
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
import os.path as osp
from PIL import Image
import numpy as np
from torchvision import transforms
from torchvision import utils
from utils import pose_utils
from PIL import ImageDraw
# from utils.transforms import create_part
import time
import json
import random
import cv2
from torch.utils.tensorboard import SummaryWriter
np.seterr(divide='ignore', invalid='ignore')
class RegularDatasetDensepose(Dataset):
def __init__(self, file_name, file_type, data_folder, augment):
self.transforms = augment
pair_list = [i.strip() for i in open(file_name, 'r').readlines()]
train_list = list(
filter(lambda p: p.split('\t')[3] == file_type, pair_list))
train_list = [i for i in train_list]
self.size = (256, 192)
self.img_list = train_list
self.data_folder = data_folder
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
t0 = time.time()
img_ext = '.jpg'
try:
img_source = self.img_list[index].split('\t')[0]
img_target = self.img_list[index].split('\t')[1]
cloth_img = self.img_list[index].split('\t')[2]
except:
img_source = self.img_list[index].split(' ')[0]
img_target = self.img_list[index].split(' ')[1]
cloth_img = self.img_list[index].split(' ')[2]
source_splitext = os.path.splitext(img_source)[0]
target_splitext = os.path.splitext(img_target)[0]
cloth_splitext = os.path.splitext(cloth_img)[0]
# png or jpg
source_img_path = os.path.join('datasets/zalando/source_images',
source_splitext + img_ext)
target_img_path = os.path.join('datasets/zalando/target_images',
target_splitext + img_ext)
cloth_img_path = os.path.join('datasets/zalando/cloth', cloth_img)
cloth_parse_path = os.path.join('datasets/zalando/cloth_mask',
cloth_splitext + img_ext)
warped_cloth_name = cloth_img
# image
warped_cloth_path = os.path.join('dataset', 'cloth', warped_cloth_name)
source_img = self.open_transform(source_img_path, False)
target_img = self.open_transform(target_img_path, False)
cloth_img = self.open_transform(cloth_img_path, False)
cloth_parse = self.parse_cloth(cloth_parse_path)
try:
warped_cloth_parse_name = cloth_img
# mask
warped_cloth_parse_path = os.path.join('dataset', 'cloth_mask',
warped_cloth_parse_name)
warped_cloth_parse = self.parse_cloth(warped_cloth_parse_path)
except:
warped_cloth_parse = torch.ones(1, 256, 192)
if os.path.exists(warped_cloth_path):
warped_cloth_img = self.open_transform(warped_cloth_path, False)
else:
warped_cloth_img = cloth_img
# parsing
source_parse_vis_path = os.path.join('datasets/zalando/parse_cihp_source',
source_splitext + '_vis' + '.png')
target_parse_vis_path = os.path.join('datasets/zalando/parse_cihp_target',
target_splitext + '_vis' + '.png')
source_parse_vis = self.transforms['2'](
Image.open(source_parse_vis_path))
target_parse_vis = self.transforms['2'](
Image.open(target_parse_vis_path))
source_parse_path = os.path.join('datasets/zalando/parse_cihp_source',
source_splitext + '.png')
target_parse_path = os.path.join('datasets/zalando/parse_cihp_target',
target_splitext + '.png')
source_parse = pose_utils.parsing_embedding(source_parse_path)
source_parse_tformed = self.custom_transform(source_parse, True)
source_parse = torch.from_numpy(source_parse)
target_parse = pose_utils.parsing_embedding(target_parse_path)
target_parse_tformed = self.custom_transform(target_parse, True)
target_parse = torch.from_numpy(target_parse)
source_parse_shape = np.array(Image.open(source_parse_path))
source_parse_shape = (source_parse_shape > 0).astype(np.float32)
source_parse_shape = Image.fromarray(
(source_parse_shape * 255).astype(np.uint8))
source_parse_shape = source_parse_shape.resize(
(self.size[1] // 16, self.size[0] // 16),
Image.BILINEAR) # downsample and then upsample
source_parse_shape = source_parse_shape.resize(
(self.size[1], self.size[0]), Image.BILINEAR)
source_parse_shape = self.transforms['2'](source_parse_shape) # [-1,1]
source_parse_head = (np.array(Image.open(source_parse_path)) == 1).astype(np.float32) + \
(np.array(Image.open(source_parse_path)) == 2).astype(np.float32) + \
(np.array(Image.open(source_parse_path)) == 4).astype(np.float32) + \
(np.array(Image.open(source_parse_path)) == 13).astype(np.float32)
target_parse_cloth = (np.array(Image.open(target_parse_path)) == 5).astype(np.float32) + \
(np.array(Image.open(target_parse_path)) == 6).astype(np.float32) + \
(np.array(Image.open(target_parse_path)) == 7).astype(np.float32)
target_parse_cloth = torch.from_numpy(target_parse_cloth)
# prepare for warped cloth
phead = torch.from_numpy(source_parse_head) # [0,1]
pcm = target_parse_cloth # [0,1]
im = target_img # [-1,1]
im_c = im * pcm + (
1 - pcm) # [-1,1], fill 1 for other parts --> white same as GT ...
im_h = source_img * phead - (
1 - phead
) # [-1,1], fill -1 for other parts, thus become black visual
# pose heatmap embedding
# source_pose_path = os.path.join('datasets/zalando/source_keypoints',
# source_splitext + '_keypoints.npy')
# source_pose_org = np.load(source_pose_path)
# source_pose = []
# for i in source_pose_org:
# source_pose.extend(i)
# source_pose_loc = pose_utils.pose2loc(source_pose)
# source_pose_embedding = pose_utils.heatmap_embedding(
# self.size, source_pose_loc)
# target_pose_path = os.path.join('datasets/zalando/target_keypoints',
# target_splitext + '_keypoints.npy')
# target_pose_org = np.load(target_pose_path)
# target_pose = []
# for i in target_pose_org:
# target_pose.extend(i)
# target_pose_loc = pose_utils.pose2loc(target_pose)
# target_pose_embedding = pose_utils.heatmap_embedding(
# self.size, target_pose_loc)
# target_pose_img, _ = pose_utils.draw_pose_from_cords(
# target_pose_loc, (256, 192))
# pose heatmap embedding
source_pose_path = os.path.join(
'datasets/zalando/source_keypoints', source_splitext + '_keypoints.json')
with open(source_pose_path, 'r') as f:
a = json.load(f)
source_pose = a['people'][0]['pose_keypoints_2d']
source_pose_loc = pose_utils.pose2loc(source_pose)
source_pose_embedding = torch.from_numpy(
pose_utils.heatmap_embedding(self.size, source_pose_loc))
target_pose_path = os.path.join(
'datasets/zalando/target_keypoints', target_splitext + '_keypoints.json')
with open(target_pose_path, 'r') as f:
a = json.load(f)
target_pose = a['people'][0]['pose_keypoints_2d']
target_pose_loc = pose_utils.pose2loc(target_pose)
target_pose_embedding = torch.from_numpy(
pose_utils.heatmap_embedding(self.size, target_pose_loc))
# target_pose_img, _ = pose_utils.draw_pose_from_cords(
# target_pose_loc, (256, 192))
# Densepose preprocess source
source_densepose_path = os.path.join('datasets/zalando/densepose_numpy_source/',
source_splitext + '.npy')
source_densepose_data = np.load(
source_densepose_path).astype('uint8') # (256,192,3)
source_densepose_parts_embeddings = self.parsing_embedding(
source_densepose_data[:, :, 0])
source_densepose_parts_embeddings = np.transpose(
source_densepose_parts_embeddings, axes=(1, 2, 0))
source_densepose_data_final = np.concatenate(
(source_densepose_parts_embeddings, source_densepose_data[:, :, 1:]), axis=-1) # channel(27), H, W
source_densepose_data_final = torch.from_numpy(
np.transpose(source_densepose_data_final, axes=(2, 0, 1)))
# Densepose preprocess target
target_densepose_path = os.path.join('datasets/zalando/densepose_numpy_target/',
source_splitext + '.npy')
target_densepose_data = np.load(
target_densepose_path).astype('uint8') # (256,192,3)
target_densepose_parts_embeddings = self.parsing_embedding(
target_densepose_data[:, :, 0])
target_densepose_parts_embeddings = np.transpose(
target_densepose_parts_embeddings, axes=(1, 2, 0))
target_densepose_data_final = np.concatenate(
(target_densepose_parts_embeddings, target_densepose_data[:, :, 1:]), axis=-1) # channel(27), H, W
target_densepose_data_final = torch.from_numpy(
np.transpose(target_densepose_data_final, axes=(2, 0, 1)))
result = {
'source_parse': source_parse,
'source_parse_tformed' : source_parse_tformed,
'target_parse': target_parse,
'target_parse_tformed' : target_parse_tformed,
'source_parse_vis': source_parse_vis,
'target_parse_vis': target_parse_vis,
'source_pose_embedding': source_pose_embedding,
'target_pose_embedding': target_pose_embedding,
'target_pose_loc': target_pose_loc,
'source_image': source_img,
'target_image': target_img,
'cloth_image': cloth_img,
'cloth_parse': cloth_parse,
'source_parse_shape': source_parse_shape,
'im_h': im_h, # source image head and hair
'im_c': im_c, # target_cloth_image_warped
'source_image_name': source_splitext + img_ext,
'target_image_name': target_splitext + img_ext,
'cloth_image_name': cloth_splitext + img_ext,
'warped_cloth_image': warped_cloth_img,
'warped_cloth_name': warped_cloth_name,
'warped_cloth_path': warped_cloth_path,
'source_img_path': source_img_path,
'target_img_path': target_img_path,
'target_pose_path': target_pose_path,
'target_parse_path': target_parse_path,
'source_parse_vis_path': source_parse_vis_path,
'target_parse_vis_path': target_parse_vis_path,
# 'target_pose_img': target_pose_img,
'warped_cloth_parse': warped_cloth_parse,
'target_parse_cloth': target_parse_cloth,
'source_densepose_path': source_densepose_path,
'source_densepose_data': source_densepose_data_final,
'target_densepose_path': target_densepose_path,
'target_densepose_data': target_densepose_data_final
}
return result
def open_transform(self, path, downsample=False):
img = Image.open(path)
if downsample:
img = img.resize((96, 128), Image.BICUBIC)
img = self.transforms['2'](img)
return img
def parse_cloth(self, path, downsample=False):
cloth_parse = Image.open(path)
cloth_parse_array = np.array(cloth_parse)
cloth_parse = (cloth_parse_array == 255).astype(np.float32) # 0 | 1
cloth_parse = cloth_parse[np.newaxis, :]
if downsample:
[X, Y] = np.meshgrid(range(0, 192, 2), range(0, 256, 2))
cloth_parse = cloth_parse[:, Y, X]
cloth_parse = torch.from_numpy(cloth_parse)
return cloth_parse
def parsing_embedding(self, parse_obj):
parse = np.array(parse_obj)
parse_channel = 25
parse_emb = []
for i in range(parse_channel):
parse_emb.append((parse == i).astype(np.float32).tolist())
parse = np.array(parse_emb).astype(np.float32)
return parse
def custom_transform(self, input_image, per_channel_transform):
if per_channel_transform:
num_channel_image = input_image.shape[0]
tform_input_image_np = np.zeros(
shape=input_image.shape, dtype=input_image.dtype)
for i in range(num_channel_image):
# TODO check why i!=5 makes a big difference in the output
if i != 1 and i != 2 and i != 4 and i != 5 and i != 13:
# if i != 0 and i != 1 and i != 2 and i != 4 and i != 13:
tform_input_image_np[i] = self.transforms['1'](
input_image[i])
else:
tform_input_image_np[i] = self.transforms['3'](
input_image[i])
return torch.from_numpy(tform_input_image_np)
if __name__ == '__main__':
pass
|
# coding: utf-8
# In[2]:
import os
import numpy as np
import json
import pandas as pd
import sklearn
import xgboost as xgb
from sklearn import cross_validation
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
# In[58]:
## load xgb model
data_base = "/data/hzwangjian1/videoquality"
xgb_model = xgb.Booster({'nthread':10}) #init model
xgb_model.load_model(os.path.join(data_base, 'xgb_model')) # load data
# In[6]:
def feature_process(org_feature):
if org_feature['location_type'] != 1:
org_feature['location_type'] = 0 ## 非头条,设置为0
if org_feature['tid_level'] == 1 or org_feature['tid_level'] == 2: ## 稿源评级,优质次优的放一起
org_feature.update({'tid_score':2})
else:
org_feature.update({'tid_score':1})
if 'http' in org_feature['big_image_url']:
org_feature.update({'contain_big_image':1})
else:
org_feature.update({'contain_big_image':0})
definition = org_feature['definition'].lower().strip()
if definition == 'shd':
org_feature.update({'definition_score':3})
elif definition == 'hd':
org_feature.update({'definition_score':2})
else:
org_feature.update({'definition_score':1})
return org_feature
# In[59]:
data_path = os.path.join(data_base, 'labeled_dataset_feature')
feature_set = []
for line in open(data_path, 'r'):
feature = json.loads(line)
feature_set.append(feature_process(feature))
print("size of feature_set:" + str(len(feature_set)))
total_data = pd.DataFrame(feature_set)
total_data.head()
# In[39]:
total_data.info()
# In[60]:
feature_result = []
for line in feature_set:
data_feature = [line]
data_frame = pd.DataFrame(data_feature)
final_y = data_frame.pop('label')
final_x = data_frame.drop(['pic_url','big_image_url','category','quality','source_title','tid_level','tid_score','interests','doc_id','definition','title','m3u8HdUrl','m3u8SdUrl','m3u8ShdUrl','mp4HdUrl','mp4SdUrl','mp4ShdUrl','mp4_url','hdUrl','sdUrl','shdUrl','duration','video_duration','audio_duration','video_time_base','video_nb_frames','video_r_frame_rate','no_audio','size','size_per_pix','location_type','audio_nb_frames','video_avg_frame_rate','video_level','video_bit_rate','audio_bit_rate','bit_rate'], axis=1)
tesdmat=xgb.DMatrix(final_x)
y_pred=xgb_model.predict(tesdmat)
line.update({'y_pred': y_pred[0]})
feature_result.append(line)
result_data = pd.DataFrame(feature_result)
result_data.to_csv(os.path.join(data_base, 'labeled_dataset_result_full.csv'), sep='\t')
# In[57]:
feature_result = []
key_list = ['pic_url','big_image_url','category','quality','source_title','tid_level','tid_score','interests','title','m3u8HdUrl','m3u8SdUrl','m3u8ShdUrl','mp4HdUrl','mp4SdUrl','mp4ShdUrl','mp4_url','hdUrl','sdUrl','shdUrl','duration','video_duration','audio_duration','video_time_base','video_nb_frames','video_r_frame_rate','no_audio','size','size_per_pix','location_type','audio_nb_frames','video_avg_frame_rate','video_level','video_bit_rate','audio_bit_rate','bit_rate']
for line in feature_set:
data_feature = [line]
data_frame = pd.DataFrame(data_feature)
final_y = data_frame.pop('label')
final_x = data_frame.drop(['pic_url','big_image_url','category','quality','source_title','tid_level','tid_score','interests','doc_id','definition','title','m3u8HdUrl','m3u8SdUrl','m3u8ShdUrl','mp4HdUrl','mp4SdUrl','mp4ShdUrl','mp4_url','hdUrl','sdUrl','shdUrl','duration','video_duration','audio_duration','video_time_base','video_nb_frames','video_r_frame_rate','no_audio','size','size_per_pix','location_type','audio_nb_frames','video_avg_frame_rate','video_level','video_bit_rate','audio_bit_rate','bit_rate'], axis=1)
tesdmat=xgb.DMatrix(final_x)
y_pred=xgb_model.predict(tesdmat)
line.update({'y_pred': y_pred[0]})
for key in key_list:
line.pop(key)
feature_result.append(line)
result_data = pd.DataFrame(feature_result)
result_data.to_csv(os.path.join(data_base, 'labeled_dataset_result.csv'), sep='\t')
|
developer A : line1
developer B: line 1
|
from tkinter import *
import os
def register_user():
username_info = username.get()
password_info = password.get()
file=open(username_info, "w")
file.write("Username:\n")
file.write(username_info +"\n")
file.write("Password:\n")
file.write(password_info)
file.close()
entry_username.delete(0, END)
entry_password.delete(0, END)
Label(screen1, text = "Registration successful!", fg = "green", font =("roboto", 12)).pack()
def destroyed():
screen4.destroy()
def destroyed1():
screen5.destroy()
def login_completed():
session()
def saved():
Label(screen7, text="Saved", fg = "green", font = ("Roboto", 12)).pack()
def save():
filename_get = raw_filename.get()
notes_get = raw_notes.get()
data = open(filename_get, "w")
data.write(notes_get)
data.close()
saved()
def session():
screen6 = Toplevel(screen)
screen6.title("Dashboard")
screen6.geometry("350x250")
Label(screen6, text = "Welcome to the dashboard").pack()
Button(screen6, text = "Create secret note", command = create_secret_notes).pack()
Button(screen6, text = "View secret note", command = view_notes).pack()
Button(screen6, text = "Delete secret note", command = delete_note).pack()
def delete_note():
screen10 = Toplevel(screen)
screen10.title("Delete")
screen10.geometry("250x250")
all_files = os.listdir()
Label(screen10, text = "Choose a filename to delete: ").pack()
Label(screen10, text = all_files).pack()
global raw_delete
raw_delete = StringVar()
Entry(screen10, textvariable=raw_delete).pack()
Button(screen10, command=delete_note1, text = "OK").pack()
def delete_note1():
delete = raw_delete.get()
os.remove(delete)
screen11 = Toplevel(screen)
screen11.title("Notes")
screen11.geometry("400x400")
Label(screen11, text = delete + " has been removed").pack()
def create_secret_notes():
global raw_filename
global raw_notes
global screen7
raw_filename = StringVar()
raw_notes = StringVar()
screen7 = Toplevel(screen)
screen7.title("Make Notes")
screen7.geometry("250x150")
Label(screen7, text = "Enter a filename: ").pack()
Entry(screen7, textvariable = raw_filename).pack()
Label(screen7, text = "Enter secret notes: ").pack()
Entry(screen7, textvariable = raw_notes).pack()
Button(screen7, text = "Save", command = save).pack()
def view_notes1():
filename1 = raw_filename1.get()
data = open(filename1, "r")
data1 = data.read()
screen9 = Toplevel(screen)
screen9.title("Notes")
screen9.geometry("400x400")
Label(screen9, text = data1).pack()
def view_notes():
screen8 = Toplevel(screen)
screen8.title("Info")
screen8.geometry("250x250")
all_files = os.listdir()
Label(screen8, text = "Choose a filename below: ").pack()
Label(screen8, text = all_files).pack()
global raw_filename1
raw_filename1 = StringVar()
Entry(screen8, textvariable=raw_filename1).pack()
Button(screen8, command=view_notes1, text = "OK").pack()
def pass_not_recognized():
global screen4
screen4 = Toplevel(screen)
screen4.title("Retype pass!")
screen4.geometry("250x150")
Label(screen4, text = "Password Error!").pack()
Button(screen4, text = "OK", command = destroyed).pack()
def user_not_found():
global screen5
screen5 = Toplevel(screen)
screen5.title("Retype user!")
screen5.geometry("250x150")
Label(screen5, text = "User not found!").pack()
Button(screen5, text = "OK", command = destroyed1).pack()
def login_verify():
username1 = user_verify.get()
password1 = pass_verify.get()
list_of_files = os.listdir()
if username1 in list_of_files:
file1=open(username1, "r")
verify = file1.read().splitlines()
if password1 in verify:
login_completed()
else:
pass_not_recognized()
else:
user_not_found()
def register_page():
global screen1
screen1 = Toplevel(screen)
screen1.title("Register")
screen1.geometry("350x250")
global username
global password
global entry_username
global entry_password
username = StringVar()
password = StringVar()
Label(screen1, text = "Please enter your details").pack()
Label(screen1, text = "").pack()
Label(screen1, text = "Username * ").pack()
entry_username = Entry(screen1, textvariable = username)
entry_username.pack()
Label(screen1, text = "Password * ").pack()
entry_password = Entry(screen1, textvariable = password)
entry_password.pack()
Label(screen1, text="").pack()
entry_password.config(show="*");
password_show = Button(screen1, text="Show password!", height = "1", width = "12", command = buttonshow).pack()
password_hide = Button(screen1, text="Hide password!", height = "1", width = "12", command = buttonhide).pack()
Button(screen1, text = "Register", height = "1", width = "10", command = register_user).pack()
def buttonshow():
entry_password.config(show="");
def buttonhide():
entry_password.config(show="*");
def login_page():
global screen2
screen2 = Toplevel(screen)
screen2.title("Login")
screen2.geometry("350x250")
Label(screen2, text = "Please enter your login details").pack()
Label(screen2, text = "").pack()
global user_verify
global pass_verify
user_verify = StringVar()
pass_verify = StringVar()
global entry_username1
global entry_password1
Label(screen2, text = "Username * ").pack()
entry_username1 = Entry(screen2, textvariable = user_verify)
entry_username1.pack()
Label(screen2, text = "Password * ").pack()
entry_password1 = Entry(screen2, textvariable = pass_verify)
entry_password1.pack()
Label(screen2, text = "").pack()
entry_password1.config(show="*");
password_show1 = Button(screen2, text="Show password!", height = "1", width = "12", command = buttonshow1).pack()
password_hide1 = Button(screen2, text="Hide password!", height = "1", width = "12", command = buttonhide1).pack()
Button(screen2, text = "Login", height = "1", width = "10", command = login_verify).pack()
def buttonshow1():
entry_password1.config(show="");
def buttonhide1():
entry_password1.config(show="*");
def main_screen():
global screen
screen = Tk()
screen.geometry("350x250")
screen.title("Secret Notes")
Label(text="Secret Notes", bg = "grey", width = "300", height = "2", font = ("Roboto", 12)).pack()
Label(text = "").pack()
Button(text = "Login", height = "1", width = "25", font = ("roboto", 12), command = login_page).pack()
Label(text = "").pack()
Button(text = "Register", height = "1", width = "25", font = ("roboto", 12), command = register_page).pack()
screen.mainloop()
main_screen()
|
from django.shortcuts import render, reverse
from django.http import HttpResponse, HttpResponseRedirect
from batchthis.models import Batch, Fermenter, BatchTestType, BatchNoteType
from django.shortcuts import get_object_or_404
from .forms import BatchTestForm, BatchNoteForm, BatchAdditionForm, RefractometerCorrectionForm
from batchthis.utils import Utils
from django.contrib.auth.decorators import login_required
# Create your views here.
def index(request):
top_batches = Batch.objects.all()[:5]
total_batch_count = Batch.objects.all().count()
active_batches = Batch.objects.filter(active=True)
active_batch_count = len(active_batches)
active_fermenters = Fermenter.objects.filter(status=Fermenter.STATUS_ACTIVE)
active_fermenters_count = len(active_fermenters)
total_volume = 0
for batch in active_batches:
total_volume += batch.size
fermenter_detail = {}
for fermenter in active_fermenters:
fermenter_batch = fermenter.batch.filter(active=True)
if not fermenter.name in fermenter_detail.keys():
fermenter_detail[fermenter.name] = {'batch': fermenter_batch[0].name, 'size': fermenter_batch[0].size}
context = {
'active_batches': active_batches,
'active_fermenters': active_fermenters,
'top_batches': top_batches,
'total_batch_count': total_batch_count,
'active_batch_count': active_batch_count,
'active_fermenters_count': active_fermenters_count,
'total_volume': total_volume,
'fermenter_detail': fermenter_detail
}
return render(request,'batchthis/index.html',context=context)
@login_required
def batchListing(request):
batches = Batch.objects.all()
context = {
'batches': batches
}
return render(request,'batchthis/batches.html', context=context)
def batch(request, pk):
batch = get_object_or_404(Batch,pk=pk)
testTypes = BatchTestType.objects.all()
fermenters = batch.fermenter.all()
gravity_tests = batch.tests.filter(type__shortid='specific-gravity')
current_gravity = batch.startingGravity
if len(gravity_tests) > 0:
# We have gravity tests. Get the latest
current_gravity = gravity_tests[len(gravity_tests)-1].value
percent_complete = round((batch.startingGravity-current_gravity)/(batch.startingGravity-batch.estimatedEndGravity)*100)
thirdSugarBreak = round(batch.startingGravity-((batch.startingGravity-batch.estimatedEndGravity)/3),3)
thirdSugarBreakPercent = round((batch.startingGravity-thirdSugarBreak)/(batch.startingGravity-batch.estimatedEndGravity)*100)
ferm_notes = batch.notes.filter(notetype__name='Fermentation Note')
gen_notes = batch.notes.filter(notetype__name='General Note')
taste_notes = batch.notes.filter(notetype__name='Tasting Note')
gravityChart = {}
for test in gravity_tests:
if not "dates" in gravityChart.keys():
gravityChart["shortid"] = "specific-gravity"
gravityChart["dates"] = []
gravityChart["values"] = []
strfmt = "%m/%d/%y"
gravityChart["dates"].append(test.datetime.strftime(strfmt))
gravityChart["values"].append(test.value)
context = {
"batch": batch,
"percentComplete": percent_complete,
"gravityChart": gravityChart,
"gravityTests": gravity_tests,
"testTypes": testTypes,
"fermenters": fermenters,
"thirdSugarBreak": thirdSugarBreak,
"thirdSugarBreakPercent": thirdSugarBreakPercent,
"startingGravity": batch.startingGravity,
"endingGravity": batch.estimatedEndGravity,
"gennotes": gen_notes,
"fermnotes": ferm_notes,
"tastenotes": taste_notes
}
return render(request, 'batchthis/batch.html', context=context)
def batchTest(request, pk=None):
if request.method == 'GET':
if pk:
form = BatchTestForm()
form.fields['batch'].queryset = Batch.objects.filter(pk=pk)
form.initial = {'batch':pk}
# We have a batchID. Let's auto assign the batch to the note
else:
form = BatchTestForm()
# We don't have a batchID. Only show active batches
form.fields['batch'].queryset = Batch.objects.filter(active=True)
else:
form = BatchTestForm(request.POST)
form.save()
return HttpResponseRedirect(reverse('batch', kwargs={'pk': pk}))
return render(request,"batchthis/addTest.html", {'form':form})
def batchAddition(request, pk=None):
if request.method == 'GET':
form = BatchAdditionForm()
if pk:
form.fields['batch'].queryset = Batch.objects.filter(pk=pk)
form.initial = {'batch':pk}
else:
form.fields['batch'].queryset = Batch.objects.filter(active=True)
else:
form = BatchAdditionForm(request.POST)
form.save()
return HttpResponseRedirect(reverse('batch', kwargs={'pk': pk}))
return render(request, "batchthis/addAddon.html", {'form': form})
def batchNote(request, pk=None, noteType=None):
if request.method == 'GET':
form = BatchNoteForm()
form.initial = {}
if pk:
form.fields['batch'].queryset = Batch.objects.filter(pk=pk)
form.initial['batch'] = pk
if noteType:
noteTypes = BatchNoteType.objects.filter(name=noteType)
form.fields['notetype'].queryset = noteTypes
form.initial['notetype'] = noteTypes[0].pk
else:
form.fields['batch'].queryset = Batch.objects.all()
else:
form = BatchNoteForm(request.POST)
form.save()
return HttpResponseRedirect(reverse('batch', kwargs={'pk':pk}))
return render(request, "batchthis/addNote.html", {'form':form})
def activity(request, pk=None):
print("Getting activity for batch " + str(pk))
batch = Batch.objects.get(pk=pk)
activity = batch.activity.all().order_by('datetime')
print("Found " + str(len(activity)) + " entries in activity")
context = {
'activity': activity
}
return render(request, "batchthis/activity.html", context=context)
def refractometerCorrection(request):
form = RefractometerCorrectionForm(initial={'startUnit': 'bx','currentUnit': 'bx'})
result = (0,0)
if request.method == "POST":
form = RefractometerCorrectionForm(request.POST)
params = {}
if form.is_valid():
# Calculate Correction
startData = form.cleaned_data['startData']
startUnit = form.cleaned_data['startUnit']
currentData = form.cleaned_data['currentData']
currentUnit = form.cleaned_data['currentUnit']
if startUnit == 'sg':
params['startSG'] = startData
else:
params['startBrix'] = startData
if currentUnit == 'sg':
params['currentSG'] = currentData
else:
params['currentBrix'] = currentData
result = Utils.refractometerCorrection(**params)
context = {
'form': form,
'sg': '%.3f' % result[0], # Format SG to normal readable notation
'abv': round(result[1],1)
}
return render(request, 'batchthis/util.refractometer.html', context)
def batchGraphs(request, pk):
# Capture each type of test. No need to show graphs on tests not performed
batch = Batch.objects.get(pk=pk)
tests = batch.tests.all()
# Build data var for chart data
testGroup = {}
for test in tests:
if not test.type.name in testGroup.keys():
testGroup[test.type.name] = {}
testGroup[test.type.name]['shortid'] = test.type.shortid
testGroup[test.type.name]['dates'] = []
testGroup[test.type.name]['values'] = []
date_format = "%m/%d/%y"
strdate = test.datetime.strftime(date_format)
testGroup[test.type.name]['dates'].append(strdate)
testGroup[test.type.name]['values'].append(test.value)
context = {"tests": testGroup,
"testTypes": testGroup.keys()
}
return render(request, "batchthis/batchGraphs.html", context) |
#打印九九乘法表
'''
for i in range(1,10):
for j in range(1,i+1):
print('{0}*{1}={2}'.format(i,j,i*j),end='\t')
print()
#使用列表和字典存储表格和数据
r1=dict(name='vgh',age=17,salary=30000,city='beijing')
r2=dict(name='ghj',age=17,salary=20000,city='beijing')
r3=dict(name='mnn',age=17,salary=15000,city='beijing')
r=[r1,r2,r3]
for i in r:
if i.get('salary', 0)>18000:
print(i)
'''
s='yftydtydtrxdgsdfsdgr'
a={a:s.count(a) for a in s }
print(a)
y=[x*2 for x in range(1,10) if x>2]
print(y)
y={x*2 for x in range(1,10) if x>2}
print(y)
y=(x*2 for x in range(1,10) if x>2)
print(y)
print(tuple(y))
print(tuple(y)) |
from bsp.leveleditor.DocObject import DocObject
# Base class for serializable map data
class MapWritable(DocObject):
ObjectName = "writable"
def __init__(self, doc):
DocObject.__init__(self, doc)
def writeKeyValues(self, keyvalues):
raise NotImplementedError
def readKeyValues(self, keyvalues):
raise NotImplementedError
|
#!/usr/bin/env python3
__appname__ = '[models.py]'
__author__ = 'Pablo Lechon (plechon@uchicago.edu)'
__version__ = '0.0.1'
## IMPORTS ##
import numpy as np
## FUNCTIONS ##
def lotka_volterra(t, N, params):
'''
Differential equations of a GLV
Parameters:
s (int): number of species
r (sx1): growth rate of each species
A (sxs): Matrix of interactions
Output:
list (1xs): abundance of each species after one time
iteration
'''
#Unpack parameter values
s, r, A, = map(params.get, ('s', 'r', 'A'))
D_N = np.diag(N)
#Reshape N to column vector
N = np.array(N).reshape(s,1)
#Perform one iteration for variation of species abundances
dNdt = D_N @ (r + A @ N)
return(list(dNdt.reshape(s)))
def consumer_resouce_crossfeeding(t, z, params):
'''
Diferential equations of marsland model in matrix form
Parameters:
s (int): number f species
m (int): number of resources
g (sx1): proportionality constant harvested energy --> abundance
N (sx1): abundance of each strain
c (sxm): preference matrix of the community
l (mx1): leakage factor of each resource
x (sx1): maintenance cost of each species
D (mxm): metabolic matrix of community
Output:
list (1x(s+m)): abundance of each species and resource after
one time iteration
'''
#Unpack parameter values
s, m, g, c, l, x, D, K, t = map(params.get,('s','m','g','c','l','x','D',
'K', 't'))
#Separate species and resource vector and reshape them to columns vectors
N = np.array(z[0:s]).reshape(s,1)
R = np.array(z[s:m+s]).reshape(m,1)
#Compute one iteration step for species and resouce abundandces
dNdt = g * N * (c @ ((1 - l) * R) - x)
dRdt = K - 1 / t * R - (c.transpose() @ N) * R + \
D.transpose() @ ((l * R) * c.transpose()) @ N
return(list(dNdt.reshape(s))+list(dRdt.reshape(m)))
|
# -*- coding: utf-8 -*-
def cambiar(cantidad):
tipo_cambio=18.81
return cantidad/tipo_cambio
def main():
print('Calculadora de Dolares')
print('')
cantidad=float(input('Ingresa la cantidad de pesos que quieres convertir:'))
result=cambiar(cantidad)
print('${} pesos mx son: ${}dolares (us)'.format(cantidad,result))
print('')
if __name__=='__main__':
main()
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
import logging
import os
import os.path
from pylons import request, response, session, tmpl_context as c
from pylons import app_globals
from pycloud.pycloud.pylons.lib.base import BaseController
from pycloud.pycloud.pylons.lib import helpers as h
from pycloud.pycloud.model import Service, ServiceVM, VMImage
from pycloud.pycloud.pylons.lib.util import asjson, encoded_json_to_dict
from pycloud.manager.lib.pages import ModifyPage
from pycloud.pycloud.utils import ajaxutils
from pycloud.pycloud.utils import fileutils
log = logging.getLogger(__name__)
################################################################################################################
# Controller for the Modify page.
################################################################################################################
class ModifyController(BaseController):
################################################################################################################
# Called when loading the page to add or edit a service.
################################################################################################################
def GET_index(self, id=None):
# Mark the active tab.
c.services_active = 'active'
# Load the data into the page.
page = ModifyPage()
# If we are loading data from an existing Service, load it.
serviceID = id
page = self.loadDataIntoPage(page, serviceID)
# Render the page with the data.
return page.render()
################################################################################################################
# Loads data about the stored service vm into a page, and returns the filled page.
################################################################################################################
def loadDataIntoPage(self, page, serviceID):
# Setup the page to render.
page.form_values = {}
page.form_errors = {}
page.os_options = {'Linux':'Linux','Windows':'Windows'}
# URL to create a new Service VM.
page.createSVMURL = h.url_for(controller="modify", action='createSVM')
# Check if we are editing or creating a new service.
creatingNew = serviceID is None
page.saveInstanceURL = h.url_for(controller='modify', action='saveInstanceToRoot')
page.stopInstanceURL = h.url_for(controller='instances', action='stopInstance', id='')
page.startInstanceURL = h.url_for(controller='instances', action='startInstance', id='')
page.chooseImageURL = h.url_for(controller='modify', action='getImageInfo', id=None)
if(creatingNew):
# We are creating a new service.
page.newService = True
page.internalServiceId = ''
else:
# Look for the service with this id.
service = Service.by_id(serviceID)
# We are editing an existing service.
page.newService = False
page.internalServiceId = service._id
if service:
# Metadata values.
page.form_values['serviceID'] = service.service_id
page.form_values['servicePort'] = service.port
page.form_values['serviceDescription'] = service.description
page.form_values['serviceVersion'] = service.version
page.form_values['serviceTags'] = ",".join(service.tags)
page.form_values['numClientsSupported'] = service.num_users
page.form_values['reqMinMem'] = service.min_memory
page.form_values['reqIdealMem'] = service.ideal_memory
# VM Image values. The ...Value fields are for storing data, while the others are for
# showing it only. Since the vmDiskImageFile and vmStateImageFile fields are disabled,
# (read-only) their value is not sent, and we have to store that value in hidden variables.
if(service.vm_image.disk_image):
page.form_values['vmStoredFolder'] = os.path.dirname(service.vm_image.disk_image)
page.form_values['vmDiskImageFile'] = service.vm_image.disk_image
page.form_values['vmDiskImageFileValue'] = service.vm_image.disk_image
if(service.vm_image.state_image):
page.form_values['vmStateImageFile'] = service.vm_image.state_image
page.form_values['vmStateImageFileValue'] = service.vm_image.state_image
return page
################################################################################################################
# Modifying a Service record.
################################################################################################################
def POST_index(self):
# Mark the active tab.
c.services_active = 'active'
# Get the internal id.
internalServiceId = request.params.get("internalServiceId")
print 'Internal service id ' + internalServiceId
# Check if there is another service already with this service id.
service_id = request.params.get("serviceID")
previous_service = Service.by_id(service_id)
if previous_service and str(previous_service['_id']) != internalServiceId:
# TODO: somehow notify the error.
print "A service can't have the same service id as an existing service."
return h.redirect_to(controller='services')
# Look for a service with this id.
service = Service.by_internal_id(internalServiceId)
if not internalServiceId or not service:
# If we didn't get an internal service id or we couldn't find such service, we are creating a new one.
print 'Creating new service'
service = Service()
else:
print 'Service found, with internal id ' + str(service._id)
# Service
service.service_id = request.params.get("serviceID")
service.version = request.params.get("serviceVersion")
service.description = request.params.get("serviceDescription")
service.tags = request.params.get("serviceTags")
if service.tags:
service.tags = service.tags.split(',')
else:
service.tags = []
service.port = request.params.get("servicePort")
service.num_users = request.params.get("numClientsSupported", "")
try:
service.num_users = int(service.num_users)
except Exception as e:
service.num_users = 0
# Requirements
service.min_memory = request.params.get("reqMinMem")
service.ideal_memory = request.params.get("reqIdealMem")
# VM Image info.
service.vm_image = VMImage()
service.vm_image.disk_image = request.params.get("vmDiskImageFileValue")
service.vm_image.state_image = request.params.get("vmStateImageFileValue")
# Create or update the information.
service.save()
# Render the page.
return h.redirect_to(controller='services')
############################################################################################################
# Creates a new Service VM.
############################################################################################################
@asjson
def POST_createSVM(self):
# Get the manager.
print 'Creating SVM...'
svm = ServiceVM()
svm.generate_random_id()
# Parse the body of the request as JSON into a python object.
fields = encoded_json_to_dict(request.body)
# Create an SVM and open a VNC window to modify the VM.
svm.service_id = fields['serviceId']
try:
# Set up a new VM image.
print 'newVmFolder: ', app_globals.cloudlet.newVmFolder
print 'svm._id: ', svm._id
temp_svm_folder = os.path.join(app_globals.cloudlet.newVmFolder, svm._id)
print 'temp_svm_folder: ', temp_svm_folder
new_disk_image = os.path.join(temp_svm_folder, svm.service_id)
new_vm_image = VMImage()
print 'calling VMImage#create with "%s" and "%s"' % (fields['source'], new_disk_image)
new_vm_image.create(fields['source'], new_disk_image)
new_vm_image.unprotect()
# Set the OS type.
os_type = fields['type']
if os_type == 'Windows':
svm.os = "win"
else:
svm.os = "lin"
# Create the VM (this will also start it).
print "Creating and starting VM for user access..."
template_xml_file = os.path.abspath(app_globals.cloudlet.newVmXml)
svm.vm_image = new_vm_image
svm.service_port = fields['port']
svm.create(template_xml_file)
svm.save()
# Return info about the svm.
return svm
except Exception as e:
# If there was a problem creating the SVM, return that there was an error.
msg = 'Error creating Service VM: ' + str(e)
import traceback, sys
traceback.print_exc(file=sys.stdout)
#if svm.vm_image:
# svm.vm_image.cleanup(force=True)
return ajaxutils.show_and_return_error_dict(msg)
############################################################################################################
# Stops and saves a Service VM that was edited to its permanent root VM image.
############################################################################################################
@asjson
def GET_saveInstanceToRoot(self):
try:
id = str(request.params.get('id'))
if id is None:
msg = "No VM id was provided, VM can't be saved."
return ajaxutils.show_and_return_error_dict(msg)
# Save the VM state.
print "Saving machine state for SVM with id " + str(id)
svm = ServiceVM.by_id(id)
svm.stop(foce_save_state=True, cleanup_files=False)
print "Service VM stopped, and machine state saved."
print 'Editing? ' + str(request.params.get('editing'))
if request.params.get('editing') == 'false':
# Use the service id as the folder for this new saved SVM.
vm_image_folder = os.path.join(app_globals.cloudlet.svmCache, svm.service_id)
else:
# Get the folder of the permanent VM image, to overwrite the previous one.
service = Service.by_id(svm.service_id)
vm_image_folder = os.path.dirname(service.vm_image.disk_image)
# Permanently store the VM.
print 'Moving Service VM Image to cache, from folder {} to folder {}.'.format(os.path.dirname(svm.vm_image.disk_image), vm_image_folder)
svm.vm_image.move(vm_image_folder)
# Make the VM image read only.
print 'Making VM Image read-only.'
try:
svm.vm_image.protect()
print 'VM Image updated.'
except:
print 'Error making VM read-only. Check permissions on file.'
# Everything went well, return image info.
return svm.vm_image
except Exception as e:
# If there was a problem opening the SVM, return that there was an error.
msg = 'Error saving Service VM: ' + str(e)
return ajaxutils.show_and_return_error_dict(msg)
############################################################################################################
# Loads information about the VM image in the given folder.
############################################################################################################
@asjson
def POST_getImageInfo(self):
# Parse the body of the request as JSON into a python object.
fields = encoded_json_to_dict(request.body)
# Load VM Image information from the folder.
image_folder = fields['folder']
vm_image = VMImage()
try:
vm_image.load_from_folder(image_folder)
except Exception as e:
msg = 'Error selecting existing VM image: ' + str(e)
return ajaxutils.show_and_return_error_dict(msg)
return vm_image
|
from typing import Any
from typing import List
from typing import Optional
from fastapi import HTTPException
from pydantic_aioredis import Model
class FastAPIModel(Model):
"""
Useful with fastapi, offers extra class methods specific to using pydantic_aioredis with Fastapi
"""
@classmethod
async def select_or_404(
cls,
columns: Optional[List[str]] = None,
ids: Optional[List[Any]] = None,
custom_exception: Optional[Exception] = None,
):
"""
Selects given rows or sets of rows in the table.
Raises a HTTPException with a 404 if there is no result
"""
result = await cls.select(columns=columns, ids=ids)
if result is None:
raise HTTPException(
status_code=404, detail=f"{cls.__name__} not found"
) if custom_exception is None else custom_exception
return result
|
import random
import math
import matplotlib.pyplot as plt
from collections import Counter
l2c = 16.65
f4c = 0
f8e = 0
l4e = 0
three = 0
stat = []
for i in range(100):
result = 0
f4c = random.uniform(104.64,180)
for j in range(math.floor(f4c)):
f8e = random.uniform(150,183.2)
for k in range(math.floor(f8e)):
l4e = random.uniform(40,120)
for l in range(math.floor(l4e)):
three = random.uniform(20,35)
print(f4c, f8e, l4e, three, f4c+l2c+f8e+l4e+three)
result = math.floor(f4c+l2c+f8e+l4e+three)
stat.append(result)
stats = sorted(Counter(stat).items())
print(stats)
y = []
x = []
for tuples in stats:
y.append(tuples[1])
x.append(tuples[0]/60)
print(y,x)
plt.plot(x,y)
plt.show()
|
import json
import os
import requests
API_VERSION = "2019-11-01"
def get_secrets():
with open("keys.json") as file:
secrets = json.load(file)
return secrets
def authenticate_to_azure(secrets) -> str:
"""
Function to authenticate to Azure as a service principal via OAUTH2
"""
tenant_id = secrets["AZURE_TENANT_ID"]
url = f"https://login.microsoftonline.com/{tenant_id}/oauth2/token"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
data = {
"grant_type": "client_credentials",
"client_id": secrets["AZURE_CLIENT_ID"],
"client_secret": secrets["AZURE_CLIENT_SECRET"],
"resource": secrets["RESOURCE"],
}
r = requests.post(url, headers=headers, data=data)
return r.json()["access_token"]
def create_invitation(secrets) -> dict:
"""
Create an invitation for an existing share.
"""
subscription_id = secrets["SUBSCRIPTIONID"]
resource_group_name = "rg_crosstenant"
account_name = "kmdatashare"
share_name = "publishershare1"
invitation_name = "invitesp"
url = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.DataShare/accounts/{account_name}/shares/{share_name}/invitations/{invitation_name}"
h = {"Authorization": "Bearer " + token, "Content-Type": "application/json"}
p = {"api-version": API_VERSION}
body = {
"properties": {
"targetActiveDirectoryId": secrets["TARGET_AAD"],
"targetObjectId": secrets["TARGET_OBJECT_ID"],
}
}
r = requests.put(url=url, headers=h, params=p, json=body)
return r.json()
def list_invitations(token) -> dict:
"""
List Azure Data Share invitations.
"""
url = f"https://management.azure.com/providers/Microsoft.DataShare/ListInvitations"
h = {"Authorization": "Bearer " + token, "Content-Type": "application/json"}
p = {"api-version": API_VERSION}
r = requests.get(url=url, headers=h, params=p)
return r.json()
def accept_invitation(secrets, invitation_id, share_subscription_name):
"""
Accept a share invitation sent to an AAD Service Principal
"""
account_name = "kmdatashare2"
resource_group_name = "rg_crosstenant"
subscription_id = secrets["SUBSCRIPTIONID"]
url = f"https://management.azure.com/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.DataShare/accounts/{account_name}/shareSubscriptions/{share_subscription_name}"
h = {"Authorization": "Bearer " + token, "Content-Type": "application/json"}
p = {"api-version": API_VERSION}
body = {
"properties": {"invitationId": invitation_id, "sourceShareLocation": "eastus2",}
}
r = requests.put(url=url, headers=h, params=p, json=body)
return r.json()
if __name__ == "__main__":
secrets = get_secrets()
token = authenticate_to_azure(secrets)
# 1) Create Invitation (Publisher)
invite = create_invitation(secrets)
print("\nInvitation Created\n")
print(json.dumps(invite, indent=4, sort_keys=True))
# 2) List Invitations (Subscriber)
invitations = list_invitations(token)
print("\nInvitation List\n")
print(json.dumps(invitations, indent=4, sort_keys=True))
# Example just grabs the invitation id of the first invitation in the list.
invitation_id = invitations["value"][0]["properties"]["invitationId"]
print(f"\nInvite id is {invitation_id}\n")
# 3) Accept Invitation + Create Share (Subscriber)
accepted_invite = accept_invitation(secrets, invitation_id, "subscribershare1")
print(f"\nInvitation ID '{invitation_id}' has been accepted.\n")
print(json.dumps(accepted_invite, indent=4, sort_keys=True))
|
##Linear Regression##
#----------Preparing the data ----------#
#%%
import numpy as np
import matplotlib.pyplot as plt
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
X_new = np.array([[0], [2]])
#---------- Linear Regression ----------#
#%%
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
lin_reg.predict(X_new)
#---------- Stochastic Gradient Descent ----------#
#%%
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(n_iter=50, penalty=None, eta0=0.1)
#penalty = 'l2' if you want to add ridge regularization
sgd_reg.fit(X, y.ravel())
sgd_reg.intercept_, sgd_reg.coef_
##Polynomial Regression##
#----------Preparing the data ----------#
#%%
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
#---------- Polynomial Regression ----------#
#%%
#Convert linear feature to square
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
X[0], X_poly[0]
#Run linear regression on the new feature
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
#---------- Learning Curves ----------#
#%%
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train_predict, y_train[:m]))
val_errors.append(mean_squared_error(y_val_predict, y_val))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
#Learning curves for linear regression (underfitting)
#%%
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
#Learning curves for polynomial regression (overfitting)
#%%
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline((
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("lin_reg", LinearRegression()),
))
plot_learning_curves(polynomial_regression, X, y)
#---------- Regularization for overfitted data ----------#
##Ridge regression
#%%
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver="cholesky")
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
##Lasso Regression
#%%
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
##Elastic Net Regression
##Between lasso and ridge
##r = 1 is lasso, r = 0 is ridge
#%%
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
#---------- Early Stopping ----------#
#Stop training as soon as error starts increasing again
#Error increases due to overfitting
#%%
from sklearn.base import clone
sgd_reg = SGDRegressor(n_iter=1, warm_start=True, penalty=None,
learning_rate="constant", eta0=0.0005)
minimum_val_error = float("inf")
best_epoch = None
best_model = None
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
#%%
import warnings
warnings.filterwarnings('ignore')
for epoch in range(1000):
sgd_reg.fit(X_train, y_train)
y_val_predict = sgd_reg.predict(X_val)
val_error = mean_squared_error(y_val_predict, y_val)
if val_error < minimum_val_error:
minimum_val_error = val_error
best_epoch = epoch
best_model = clone(sgd_reg)
##Logistic Regression##
#---------- Getting the data ----------#
#%%
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
#%%
X = iris["data"][:,3:] #Petal width
y = (iris["target"] == 2).astype(np.int) #1 if Iris-Virginica, else 0
#---------- Training the regression model ----------#
#%%
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X, y)
#---------- Predicting the petal width probabilities ----------#
#%%
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], "g-", label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", label="Not Iris-Virginica")
#%%
log_reg.predict([[1.7], [1.5]])
#In sklearn, logistic regression is regularized by l2 penalty by default
#The perameter is C not alpha. The higher C is, the lower is the regularization
#---------- Softmax Regression ----------#
#Scikit uses one-vs-all by default
#Used if logistic regression for more than 2 classes
#%%
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10)
softmax_reg.fit(X, y)
softmax_reg.predict([[5, 2]])
#%%
softmax_reg.predict_proba([[5, 2]])
##Support Vector Machine##
#%%
import numpy as np
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica
svm_clf = Pipeline((
("scaler", StandardScaler()),
("linear_svc", LinearSVC(C=1, loss="hinge")),
))
svm_clf.fit(X, y)
#%%
svm_clf.predict([[5.5, 1.7]])
#For large datasets, use SGDClassifier(loss="hinge", alpha=1/(m*C));
#---------- Non-linear SVM ----------#
#Sometimes dataset won't fit an SVM
#So we add more features to make it fit an SVM
###Always use scaling for SVM
#%%
from sklearn.datasets import make_moons
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
polynomial_svm_clf = Pipeline((
("poly_features", PolynomialFeatures(degree=3)),
("scaler", StandardScaler()),
("svm_clf", LinearSVC(C=10, loss="hinge"))
))
polynomial_svm_clf.fit(X, y)
#---------- Polynomial Kernel ----------#
#Above method with very high degree and still high speed
#%%
from sklearn.svm import SVC
poly_kernel_svm_clf = Pipeline((
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=3, coef0=1, C=5))
))
poly_kernel_svm_clf.fit(X, y)
#---------- Similarity Features with Gaussian RBF Kernel ----------#
#%%
rbf_kernel_svm_clf = Pipeline((
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=5, C=0.001))
))
rbf_kernel_svm_clf.fit(X, y)
#---------- SVM Regression ----------#
#%%
from sklearn.svm import LinearSVR
svm_reg = LinearSVR(epsilon=1.5)
svm_reg.fit(X, y)
#---------- Non-linear SVM Regression ----------#
#%%
from sklearn.svm import SVR
svm_poly_reg = SVR(kernel="poly", degree=2, C=100, epsilon=0.1)
svm_poly_reg.fit(X, y)
##Decision Trees##
#---------- Classification ----------#
#%%
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data[:, 2:] # petal length and width
y = iris.target
tree_clf = DecisionTreeClassifier(max_depth=2)
tree_clf.fit(X, y)
#Visualizing the decision tree
#%%
from sklearn.tree import export_graphviz
export_graphviz(
tree_clf,
out_file=image_path("iris_tree.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
#.dot --> .png using $ dot -Tpng iris_tree.dot -o iris_tree.png
#Predicting
#%%
tree_clf.predict_proba([[5, 1.5]]), tree_clf.predict([[5, 1.5]])
#---------- Regression ----------#
#%%
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(max_depth=2)
tree_reg.fit(X, y)
##Ensemble Learning##
#---------- Voting Classifier ----------#
#%%
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
log_clf = LogisticRegression()
rnd_clf = RandomForestClassifier()
svm_clf = SVC(probability=True)
voting_clf = VotingClassifier(
estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],
voting='soft')
voting_clf.fit(X_train, y_train)
#Checking accuracy of the above ensemble
#%%
from sklearn.metrics import accuracy_score
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
#---------- Bagging and Pasting ----------#
#Baging --> with replacement
#Pasting --> without replacement
#%%
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
bag_clf = BaggingClassifier(
DecisionTreeClassifier(), n_estimators=500,
max_samples=100, bootstrap=True, n_jobs=-1, oob_score=True)
bag_clf.fit(X_train, y_train)
y_pred = bag_clf.predict(X_test)
#For pasting, set bootstrap=False
#n_jobs is the number of CPU cores to use. -1 means all.
#oob_score tests the classifier on out-of-bag values --> bag_clf.oob_score_
#To get probabilities of each training instance, --> bag_clf.oob_decision_function_
#max_features and bootstrap_features can also be used
#---------- Random Forest Classifier ----------#
#RandromForestRegressor can also be used
#%%
from sklearn.ensemble import RandomForestClassifier
rnd_clf = RandomForestClassifier(n_estimators=500, max_leaf_nodes=16, n_jobs=-1)
rnd_clf.fit(X_train, y_train)
y_pred_rf = rnd_clf.predict(X_test)
#ExtraTreesClassifier also available
#---------- Feature Importance ----------#
#%%
from sklearn.datasets import load_iris
iris = load_iris()
rnd_clf = RandomForestClassifier(n_estimators=500, n_jobs=-1)
rnd_clf.fit(iris["data"], iris["target"])
for name, score in zip(iris["feature_names"], rnd_clf.feature_importances_):
print(name, score)
#---------- Boosting ----------#
##AdaBoost
#%%
from sklearn.ensemble import AdaBoostClassifier
ada_clf = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1), n_estimators=200,
algorithm="SAMME.R", learning_rate=0.5)
ada_clf.fit(X_train, y_train)
##Gradient Boosting
#%%
from sklearn.ensemble import GradientBoostingRegressor
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=3, learning_rate=1.0)
gbrt.fit(X, y)
##Gradient boosting with ideal number of trees
#%%
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
X_train, X_val, y_train, y_val = train_test_split(X, y)
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120)
gbrt.fit(X_train, y_train)
errors = [mean_squared_error(y_val, y_pred)
for y_pred in gbrt.staged_predict(X_val)]
bst_n_estimators = np.argmin(errors)
gbrt_best = GradientBoostingRegressor(max_depth=2,n_estimators=bst_n_estimators)
gbrt_best.fit(X_train, y_train)
#Early stopping can also be used
#GradientBoostingRegressor has a subsample hyperparameter (0 to 1).
#---------- Stacking ----------#
#https://github.com/viisar/brew
##Dimensionality Reduction##
#---------- Principal Component Analysis ----------#
#%%
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D = pca.fit_transform(X)
pca.components_
pca.explained_variance_ratio_
#To choose correct number of dimensions, set n_components to a float between 0 and 1
#0.95 would mean preserving 95% variance
#---------- Recovering original data from PCA ----------#
#%%
pca = PCA(n_components = 154)
X_reduced = pca.fit_transform(X_train)
X_recovered = pca.inverse_transform(X_reduced)
#---------- Incremental PCA ----------#
#%%
from sklearn.decomposition import IncrementalPCA
n_batches = 100
inc_pca = IncrementalPCA(n_components=154)
for X_batch in np.array_split(X_train, n_batches):
inc_pca.partial_fit(X_batch)
X_reduced = inc_pca.transform(X_train)
#---------- Randomized PCA ----------#
#%%
rnd_pca = PCA(n_components=154, svd_solver="randomized")
X_reduced = rnd_pca.fit_transform(X_train)
#---------- Kernel PCA ----------#
#%%
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
#---------- Selecting Kernel and Hyperparameters ----------#
#%%
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
clf = Pipeline([
("kpca", KernelPCA(n_components=2)),
("log_reg", LogisticRegression())
])
param_grid = [{
"kpca__gamma": np.linspace(0.03, 0.05, 10),
"kpca__kernel": ["rbf", "sigmoid"]
}]
grid_search = GridSearchCV(clf, param_grid, cv=3)
grid_search.fit(X, y)
print(grid_search.best_params_)
#---------- Computing reconstruction pre-image error ----------#
#%%
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.0433,
fit_inverse_transform=True)
#fit_inverse_transform is responsible for this
X_reduced = rbf_pca.fit_transform(X)
X_preimage = rbf_pca.inverse_transform(X_reduced)
from sklearn.metrics import mean_squared_error
mean_squared_error(X, X_preimage)
#---------- LLE ----------#
from sklearn.manifold import LocallyLinearEmbedding
lle = LocallyLinearEmbedding(n_components=2, n_neighbors=10)
X_reduced = lle.fit_transform(X) |
from selenium import webdriver
class SeleniumDriver:
def __init__(self, selenium_driver):
self.driver = selenium_driver
def browser():
options = webdriver.ChromeOptions()
driver = webdriver.Chrome(options=options)
return driver
|
# -*- coding: utf-8 -*-
"""Tests using the intermediate test class."""
import unittest_templates
from tests import constants
from tests.constants import A, B, BaseLetter
class TestA(constants.TestLetter):
"""Tests for A."""
cls = A
class TestB(constants.TestLetter):
"""Tests for a B."""
cls = B
kwargs = dict(name="hello")
class MetaLetterTestCase(unittest_templates.MetaTestCase):
"""A meta test for letters."""
base_cls = BaseLetter
base_test = constants.TestLetter
class SkipperTestCase(unittest_templates.GenericTestCase[BaseLetter]):
"""A test case that should automatically skip because no ``cls`` was defined."""
|
#_calculate_basin_statsgo_summary.py
#Cody Moser
#cody.moser@amec.com
#AMEC
#Description: calculates basin % soil class from .csv files
#import script modules
import glob
import os
import re
import numpy
import csv
####################################################################
#USER INPUT SECTION
####################################################################
#ENTER RFC
RFC = 'NWRFC'
#FOLDER PATH OF STATSGO .csv DATA FILES
csv_folderPath = r'P:\\NWS\\GIS\\NWRFC\\STATSGO\\data_files\\'
#FOLDER PATH OF BASIN SUMMARY STATSGO .xls DATA FILE (!Must be different than csv_FolderPath!)
output_folderPath = r'P:\\NWS\\GIS\\NWRFC\\STATSGO\\'
####################################################################
#END USER INPUT SECTION
####################################################################
print 'Script is Running...'
statsgo_file = open(output_folderPath + '_' + RFC + '_STATSGO_Summary.csv', 'w')
statsgo_file.write('Basin,' + '%A,' + '%B,' + '%C,' + '%D,' + '\n')
#loop through gSSURGO .xls files in folderPath
for filename in glob.glob(os.path.join(csv_folderPath, "*.csv")):
#print filename
#Define output file name
name = str(os.path.basename(filename)[:])
name = name.replace('.csv', '')
#print name
txt_file = open(filename, 'r')
#csv_file = open(r'P:\\NWS\\GIS\\NERFC\\APriori\\temp.csv', 'w')
csv_file = open(output_folderPath + 'temp.csv', 'w')
grid = []
for line in txt_file:
#print line
csv_file.write(line)
csv_file.close()
txt_file.close()
csv_file = open(output_folderPath + 'temp.csv')
data_file = csv.reader(csv_file, delimiter = ',')
data_file.next()
A = []
B = []
C = []
D = []
Count = []
#GET THE RASTER GRID COUNT OF EACH SOIL TYPE
for row in data_file:
soil = str(row[6])
count = float(row[8])
if soil == 'A' or soil == 'A/D':
A.append(count)
Count.append(count)
if soil == 'B' or soil == 'B/D':
B.append(count)
Count.append(count)
if soil == 'C' or soil == 'C/D':
C.append(count)
Count.append(count)
if soil == 'D' or soil == 'D/D':
D.append(count)
Count.append(count)
#SUM THE SOIL TYPE GRID COUNTS
A_sum = numpy.sum(A)
B_sum = numpy.sum(B)
C_sum = numpy.sum(C)
D_sum = numpy.sum(D)
Count_sum = numpy.sum(Count)
#CALCULATE PERCENT OF EACH SOIL TYPE
A_percent = float(A_sum/Count_sum*100)
B_percent = float(B_sum/Count_sum*100)
C_percent = float(C_sum/Count_sum*100)
D_percent = float(D_sum/Count_sum*100)
#WRITE THE DATA TO THE RFC SUMMARY CSV FILE
statsgo_file.write(name + ',' + str(A_percent) + ',' + str(B_percent) + ',' + str(C_percent) + ',' + str(D_percent) + '\n')
csv_file.close()
statsgo_file.close()
#csv_file.close()
os.remove(output_folderPath + 'temp.csv')
print 'Script Complete'
print 'STATSGO Summary File is', statsgo_file
raw_input('Press Enter to continue...')
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-14 21:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20190813_0813'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, default='img/profilePic.png', null=True, upload_to='img'),
),
]
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure the link order of object files is the same between msvs and ninja.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('link-ordering.gyp', chdir=CHDIR)
test.build('link-ordering.gyp', test.ALL, chdir=CHDIR)
def GetDisasm(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
# Get disassembly and drop int3 padding between functions.
return '\n'.join(
x for x in test.run_dumpbin('/disasm', full_path).splitlines()
if 'CC' not in x)
# This is the full dump that we expect. The source files in the .gyp match
# this order which is what determines the ordering in the binary.
expected_disasm_basic = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
?z@@YAHXZ:
00401010: B8 03 00 00 00 mov eax,3
00401015: C3 ret
?x@@YAHXZ:
00401020: B8 01 00 00 00 mov eax,1
00401025: C3 ret
?y@@YAHXZ:
00401030: B8 02 00 00 00 mov eax,2
00401035: C3 ret
_main:
00401040: 33 C0 xor eax,eax
00401042: C3 ret
'''
if expected_disasm_basic not in GetDisasm('test_ordering_exe.exe'):
print GetDisasm('test_ordering_exe.exe')
test.fail_test()
# Similar to above. The VS generator handles subdirectories differently.
expected_disasm_subdirs = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
_main:
00401010: 33 C0 xor eax,eax
00401012: C3 ret
?y@@YAHXZ:
00401020: B8 02 00 00 00 mov eax,2
00401025: C3 ret
?z@@YAHXZ:
00401030: B8 03 00 00 00 mov eax,3
00401035: C3 ret
'''
if expected_disasm_subdirs not in GetDisasm('test_ordering_subdirs.exe'):
print GetDisasm('test_ordering_subdirs.exe')
test.fail_test()
# Similar, but with directories mixed into folders (crt and main at the same
# level, but with a subdir in the middle).
expected_disasm_subdirs_mixed = '''
_mainCRTStartup:
00401000: B8 05 00 00 00 mov eax,5
00401005: C3 ret
?x@@YAHXZ:
00401010: B8 01 00 00 00 mov eax,1
00401015: C3 ret
_main:
00401020: 33 C0 xor eax,eax
00401022: C3 ret
?z@@YAHXZ:
00401030: B8 03 00 00 00 mov eax,3
00401035: C3 ret
?y@@YAHXZ:
00401040: B8 02 00 00 00 mov eax,2
00401045: C3 ret
'''
if (expected_disasm_subdirs_mixed not in
GetDisasm('test_ordering_subdirs_mixed.exe')):
print GetDisasm('test_ordering_subdirs_mixed.exe')
test.fail_test()
test.pass_test()
|
#coding=UTF-8
x = 10
if x >= 0:
y = 1
else:
y = -1
print(y)
y = 1 if x >= 0 else -1
print(y)
def f(x):
return 1 if x >= 0 else -1
print(f(x))
|
from django.db import models
from django.contrib.auth.models import User
from django.db.models.deletion import CASCADE
from cloudinary.models import CloudinaryField
from django.db.models.fields import DateTimeField
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
# Create your models here.
class ClinicalStaff(models.Model):
staff = models.OneToOneField(User,on_delete=CASCADE)
first_name = models.CharField(max_length=144)
last_name = models.CharField(max_length=144)
email = models.EmailField()
profile_picture = CloudinaryField('image')
def __str__(self):
return self.staff.username
@receiver(post_save, sender=User)
def update_staff_signal(sender, instance, created, **kwargs):
if created:
ClinicalStaff.objects.create(staff=instance)
instance.clinicalstaff.save()
GENDER_CHOICES = (
("Male", "Male"),("Female","Female")
)
APPOINTMENT_STATUS = (
("Approved", "Approved"),("Not Approved", "Not Approved"),("Pending","Pending")
)
class Patient(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
id_number = models.IntegerField(blank=True,null=True)
birth_certificate_no = models.IntegerField(blank=True,null=True)
gender = models.CharField(max_length=15, choices= GENDER_CHOICES)
age = models.IntegerField()
phone = models.IntegerField(blank=True,null=True)
def save_patient(self):
self.save()
def delete_patient(self):
self.delete()
@classmethod
def search_patients(cls, patients):
return cls.objects.filter(first_name__icontains=patients).all()
def __str__(self):
return self.first_name
class Visit(models.Model):
date_visited = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
patient = models.ForeignKey(Patient,on_delete=CASCADE)
note = models.TextField()
def save_visit(self):
self.save()
def delete_visit(self):
self.delete()
def __str__(self):
return self.patient.first_name
class Medicine(models.Model):
name = models.CharField(max_length=144)
description = models.TextField()
date = models.DateTimeField(auto_now_add=True)
def save_medicine(self):
self.save()
def delete_medicine(self):
self.delete()
def __str__(self):
return self.name
class Prescription(models.Model):
patient = models.ForeignKey(Patient,on_delete=CASCADE)
dose = models.CharField(max_length=30)
drug = models.ForeignKey(Medicine,on_delete=CASCADE)
prescriber = models.ForeignKey(ClinicalStaff,on_delete=CASCADE)
date = models.DateTimeField(auto_now_add=True)
note = models.TextField()
def save_prescription(self):
self.save()
def delete_prescription(self):
self.delete()
def __str__(self):
return self.patient.first_name
class PatientHealthHistory(models.Model):
patient = models.ForeignKey(Patient,on_delete=CASCADE)
date_recorded = models.DateTimeField(auto_now_add=True)
health_record = models.TextField()
def save_patient_health_history(self):
self.save()
def delete_patient_health_history(self):
self.delete()
def __str__(self):
return self.patient.first_name
class PatientAppointment(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
gender = models.CharField(max_length=15, choices= GENDER_CHOICES)
age = models.IntegerField()
phone = models.IntegerField(blank=True,null=True)
date_made = models.DateTimeField(auto_now_add=True)
appointment_date = DateTimeField(default=timezone.now)
approve = models.CharField(default="Pending", max_length=15, choices= APPOINTMENT_STATUS)
def save_patient_appointment(self):
self.save()
def delete_patient_appointment(self):
self.delete()
def __str__(self):
return self.first_name
class FeedBack(models.Model):
patient = models.ForeignKey(Patient,on_delete=CASCADE)
feedback_message = models.TextField()
feedback_date = models.DateTimeField(auto_now_add=True)
def save_feedback(self):
self.save()
def delete_feedback(self):
self.delete()
def __str__(self):
return self.patient.first_name
|
from typing import Optional
import torch
from torch.nn import CrossEntropyLoss
from transformers import (
BertForSequenceClassification,
ElectraForSequenceClassification,
)
class CachedInferenceMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_cache = False
self.cache_size = None
self.cache = dict()
def empty_cache(self):
self.cache.clear()
def enable_cache(self):
self.use_cache = True
def disable_cache(self):
self.use_cache = False
self.empty_cache()
def set_cache_size(self, size: Optional[int] = 25):
self.cache_size = size
@staticmethod
def create_cache_key(tensor: torch.Tensor) -> int:
return hash(frozenset(tensor.cpu().numpy().ravel()))
def inference_body(
self,
body,
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
inputs_embeds,
output_attentions,
output_hidden_states,
return_dict,
):
cache_key = self.create_cache_key(input_ids)
if not self.use_cache or cache_key not in self.cache:
hidden_states = body(
input_ids,
attention_mask,
token_type_ids,
position_ids,
head_mask,
inputs_embeds,
output_attentions,
output_hidden_states,
return_dict,
)
if self.use_cache and (
self.cache_size is None or len(self.cache) < self.cache_size
):
self.cache[cache_key] = tuple(o.detach().cpu() for o in hidden_states)
else:
hidden_states = tuple(o.cuda() for o in self.cache[cache_key])
return hidden_states
class ElectraForSequenceClassificationCached(
CachedInferenceMixin, ElectraForSequenceClassification
):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
discriminator_hidden_states = self.inference_body(
self.electra,
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = discriminator_hidden_states[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + discriminator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=discriminator_hidden_states.hidden_states,
attentions=discriminator_hidden_states.attentions,
)
class BertForSequenceClassificationCached(
CachedInferenceMixin, BertForSequenceClassification
):
def __init__(self, config):
super().__init__(config)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.inference_body(
self.bert,
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
import cv2 as cv
import numpy as np
import os, argparse, yaml
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--filename', type=str, default='[VCB-Studio]Hyouka[05][BDRip][720p][x264_aac].mp4', help='Filename of input video')
parser.add_argument('--input_dir', type=str, default='/home/yuyang/data/Video/Hyouka/', help='Direcotry of input files')
parser.add_argument('--output_dir', type=str, default='/home/yuyang/data/Video/Converted/', help='Directory of output files')
parser.add_argument('--out_dim', type=int, nargs=2, default=None, help='Dimensions of output frames (width, height)')
parser.add_argument('--fps', type=int, default=None, help='Number of fps of output files')
args = parser.parse_args()
return args
def color2bw(inputname, inputpath, outputpath, out_dim, fps):
if inputname.endswith(".mp4"):
#print(inputpath+inputname)
cap = cv.VideoCapture(inputpath + inputname)
width, height = int(cap.get(3)), int(cap.get(4))
fourcc = cv.VideoWriter_fourcc(*'mp4v')
if out_dim == None:
new_width, new_height = width, height
else:
new_width, new_height = out_dim
if fps == None:
fps = 23.97
gray_out = cv.VideoWriter(
outputpath + 'bw_' + inputname,
fourcc,
fps,
(new_width, new_height),
isColor=False
)
# color_out = cv.VideoWriter(
# outputpath + 'color_' + inputname,
# fourcc,
# fps,
# (new_width, new_height),
# isColor=True
# )
#cap = cv.VideoCapture('vtest.avi')
# while (cap.isOpened()):
# ret, frame = cap.read()
# gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# cv.imshow('frame', gray)
# if cv.waitKey(1) & 0xFF == ord('q'):
# break
while(cap.isOpened()):
#print(1)
ret, frame = cap.read()
if ret == True:
frame = cv.resize(frame, (new_width, new_height), interpolation = cv.INTER_LINEAR)
gray = cv.cvtColor(frame, cv.COLOR_BGR2YCrCb)
frame = np.transpose(np.transpose(gray, (2, 1, 0))[0], (1, 0))
# frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
#print(frame.shape)
gray_out.write(frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
# end of the video
else:
break
cap.release()
gray_out.release()
def main():
args = parse_args()
if args.filename == '*':
for filename in os.listdir(args.input_dir):
color2bw(inputname=filename, inputpath=args.input_dir, outputpath=args.output_dir, out_dim=args.out_dim,
fps=args.fps)
else:
print(args.filename, args.input_dir, args.output_dir, args.out_dim, args.fps)
color2bw(inputname=args.filename, inputpath=args.input_dir, outputpath=args.output_dir, out_dim=args.out_dim,
fps=args.fps)
# cleanup
cv.destroyAllWindows()
return 0
if __name__ == '__main__':
main() |
'''
class for prepping shapefiles and selecting the shapes we will use in the predictor class
'''
import pandas as pd
from .utils import *
class PrepShapes():
def __init__(self, dataOrFileName, shapeNameColDict:Optional[dict]=None, boundaryBuffer:int=12000):
'''
Class for prepping shapefiles. Used to select which polygons we want to
use in our predictor and find project boundaries based on those polygons
Parameters
----------
data:
shapefiles loaded into a geopandas.GeoDataFrame or the filename/path to a file to load into self.data with geopandas.read_file()
shapeNameColDict:dict
a dictionary tying the description of a shape to it's column. example {'ultramafic': 'rock_class'}. This is used to group polygons into categories
boundaryBuffer:int
a buffer used to find the project boundary. i.e. 10% bigger than your predictor range
Example
--------
>>> InputFile = 'data/BedrockP.shp'
>>> pShapes = PrepShapes(bedrockData)
'''
if isinstance(dataOrFileName, (pathlib.Path, str)):
import geopandas as gpd
data = gpd.read_file(dataOrFileName)
self.data = data
else:
self.data=dataOrFileName
self.shapeNameColDict=dict()
self.projShapes=dict()
if shapeNameColDict:
self.shapeNameColDict.update(shapeNameColDict)
for key in self.shapeNameColDict.keys():
col = self.shapeNameColDict[key]
self.projShapes.update({key:{'cat': col,
'data': self.data.loc[self.data[col] == str(key)]}
})
self._nShapes=len(self.shapeNameColDict)
self.boundaryBuffer:int=boundaryBuffer
self._dissolved = False
self._buffered = False
self.projBounds = False
def printUnique(self, include:list=None, exclude:list=['gid', 'upid', 'geometry'],
excludeNumeric:bool=True, maxLineLength:int=100):
'''
small function to print out unique values in all columns or some columns.
assumes you are passing it a GeoPandas dataframe so it by default will exclude some columns.
'''
if excludeNumeric:
gdframe = self.data.select_dtypes(exclude='number')
else:
gdframe = self.data
if include:
catCols = [col for col in gdframe.columns.tolist() if col in include]
else:
catCols = [col for col in gdframe.columns.tolist() if col not in exclude]
categoryDesc = dict()
for key in catCols:
categoryDesc[key] = gdframe[key].unique().tolist()
printDictOfLists(categoryDesc, categoryDesc.keys(), maxLineLength=maxLineLength)
def printColumns(self, maxLineLength:int=100):
'print the column names in a slightly easier to read format'
printLists(self.data.columns.tolist(), maxLineLength=maxLineLength)
def plotData(self, column:str, ax=None, categorical:bool=True, cmap:str='gist_earth', legend:bool=True,
figsize:tuple=(10,10), kwds:dict=None)-> "matplotlib axes":
'''
basic plot for shapes grouped by values in column. This is a wrapper
for geopandas.GeoDataFrame.plot() with common defaults.
you can access the full plotting function at self.data.plot()
Parameters
----------
column: str
name of the column to color polygons by
ax: matplotlib.pyplot Artist (default None)
axes on which to draw the plot
categorical: bool (default True)
is the data categorical data?
cmap: str (default 'gist_earth')
name of any colormap recognized by matplotlib
legend: bool (default True)
whether to plot a legend
figsize: tuple of integers (default (10, 10))
Size of the resulting matplotlib.figure.Figure (width, height). If axes is
passed, then figsize is ignored.
kwds: dict (default None)
keyword dictionary to pass onto geopandas.GeoDataFrame.plot()
Returns
-------
ax: matplotlib axes instance
'''
test_cmap(cmap)
if kwds:
ax = self.data.plot(column=column, categorical=categorical,
legend=legend, ax=ax, cmap=cmap, figsize=figsize, **kwds)
else:
ax = self.data.plot(column=column, categorical=categorical,
legend=legend, ax=ax, cmap=cmap, figsize=figsize)
return ax
def plotShapes(self, ax=None, color:list=['red', 'orange'], legend:bool=True, legLoc:str='upper left',
figsize:tuple=(10,10), polyfill:bool=False, sAlpha:float=None, bAlpha:float=None,
useProjBounds:bool=False, plotBuffer:bool=False, kwds:dict=None):
'''
basic plot wrapper that loops through the self.shapeNames and plots the shape outlines relying heavily on defaults.
uses the geopandas.GeoDataFrame.plot() tied to self.data
Parameters
----------
ax: matplotlib.pyplot Artist (default None)
axes on which to draw the plot
color: list (default ['red', 'orange'])
list of colors. needs to be same length as self.shapeNames
legend: bool (default True)
whether to plot a legend
legLoc:str (default 'upper left')
the location to plot the legend for the shapes
figsize: tuple of integers (default (10, 10))
Size of the resulting matplotlib.figure.Figure (width, height). If axes is
passed, then figsize is ignored.
polyfill: bool (default False)
whether to fill in the polygon using color
sAlpha: float (default None)
transparency of the shapeNames polygon fills. should be between 0 (transparent) and 1 (opaque)
bAlpha: float (default None)
transparency of the buffer polygon fills. should be between 0 (transparent) and 1 (opaque)
useProjBounds: bool (default False)
if True will use self.projBounds to reset the plot axis limits
plotBuffer: bool (default False)
to plot buffer shapes or not to plot buffer shapes
kwds: dict (default None)
keyword dictionary to pass onto geopandas.GeoDataFrame.plot()
Returns
-------
ax: matplotlib axes instance
'''
if legend:
from matplotlib.lines import Line2D
from matplotlib.legend import Legend
handles = []
labels = []
for c, shp in zip(color, self.shapeNameColDict.keys()):
cpoly = c if polyfill else 'none'
if kwds:
ax = self.projShapes[shp]['data'].plot(categorical=True, figsize=figsize,
ax=ax, facecolor=cpoly,
edgecolor=c, alpha=sAlpha, **kwds)
if plotBuffer: ax = self.projShapes[shp]['buffer'].plot(ax=ax, facecolor=cpoly,
edgecolor=c, alpha=bAlpha, **kwds)
else:
ax = self.projShapes[shp]['data'].plot(categorical=True, figsize=figsize,
ax=ax, facecolor=cpoly,
edgecolor=c, alpha=sAlpha)
if plotBuffer: ax = self.projShapes[shp]['buffer'].plot(ax=ax, facecolor=cpoly,
edgecolor=c, alpha=bAlpha)
if legend:
handles.append(Line2D([], [], color=c, lw=0, marker='o', markerfacecolor='none'))
labels.append(shp)
if legend:
leg = Legend(ax, handles, labels, loc=legLoc, frameon=True)
ax.add_artist(leg)
if useProjBounds:
ax.set_xlim(self.projBounds['minx'][0], self.projBounds['maxx'][0])
ax.set_ylim(self.projBounds['miny'][0], self.projBounds['maxy'][0])
return ax
def dissolveData(self, forceDissolve:bool=False):
'''
dissolve the many polygons within each shape into a single multipolygon
Parameters
----------
forceDissolve:bool
force running dissolve again even if you already called it in the past.
'''
if forceDissolve or not self._dissolved:
for key in self.shapeNameColDict.keys():
column = self.projShapes[key]['cat']
dissolvedShape = self.projShapes[key]['data'][[column, 'geometry']].dissolve(by=column,
aggfunc='first',
as_index=False)
self.projShapes[key].update({'dataDissolved': dissolvedShape})
self._dissolved = True
else:
print('shapes already dissolved')
def bufferData(self, buffer:int='default', addPercent=1.1):
'''
Create a buffered shape around the project shapes
Parameters
----------
buffer:int
by default it grabs the instance self.buffer variable you set on initialization.
'''
if isinstance(buffer, str):
if buffer.lower() == 'default':
buffer = self.boundaryBuffer
else:
raise ValueError(f"'default' or a value are the two currently supported buffer values. You passed:{buffer}")
if addPercent:
buffer = buffer * 1.1
for key in self.shapeNameColDict.keys():
dataToBuffer = 'dataDissolved' if self._dissolved else 'data'
self.projShapes[key].update({'buffer': self.projShapes[key][dataToBuffer].buffer(buffer)})
if not self._buffered:
self._buffered = True
def setBuffer(self, bufferSize:int=False):
'''
update the class boundary buffer variable
Parameters
----------
bufferSize:int
size of buffer (in crs units) to used to create a buffer around shapes of interest
'''
if bufferSize:
self.__dict__.update({'boundaryBuffer': bufferSize})
def setShapes(self, shapeNameColDict:dict, reset:bool=True):
'''
updates self.shapeNameColDict as well as self.projShapes
if you want to just update (keep old keys not passed) set reset=False
Parameters
----------
shapeNameColDict:dict
dictionary of category name (i.e. 'ultramafic') and the column the descriptor is found in (i.e. 'rock_class')
reset:bool (default True)
will do a reset of the dictionaries (self.shapeNameColDict and self.projShapes) i.e clearing them before updating
'''
if reset:
self.shapeNameColDict.clear()
self.projShapes.clear()
self.shapeNameColDict.update(shapeNameColDict)
if len(self.shapeNameColDict) > 0:
for key in self.shapeNameColDict.keys():
col = self.shapeNameColDict[key]
self.projShapes.update({key:{'cat': col,
'data': self.data.loc[self.data[col] == str(key)]}
})
def setProjectBoundary(self, buffer=True):
'''
create the smallest size project boundary based on the the shapes of interest
Parameters
----------
buffer:bool (default True)
sets proj boundary based on buffer otherwise will use shapes (which wouldn't be that useful)
'''
# for key in self.dictOfProjShapes.keys():
data = 'buffer' if buffer else 'data'
minXList = [self.projShapes[key][data].bounds.minx.values for key in self.projShapes]
maxXList = [self.projShapes[key][data].bounds.maxx.values for key in self.projShapes]
minYList = [self.projShapes[key][data].bounds.miny.values for key in self.projShapes]
maxYList = [self.projShapes[key][data].bounds.maxy.values for key in self.projShapes]
minBounds = {'minx':max(minXList), 'miny':max(minYList),
'maxx':min(maxXList), 'maxy':min(maxYList)}
self.__dict__.update({'projBounds': pd.DataFrame(minBounds)})
|
import unittest
import monoalphabetic
class TestMonoalphabeticCipher(unittest.TestCase):
def test_string_without_space(self):
plaintext = 'defendtheeastwallofthecastle'
key = 'qmrhsnxbwpgjauoecklfzyvtdi'
ciphertext = 'hsnsuhfbssqlfvqjjonfbsrqlfjs'
self.assertEqual(ciphertext, monoalphabetic.encrypt(plaintext, key)) and self.assertEqual(plaintext, monoalphabetic.decrypt(ciphertext, key))
def test_string_with_spaces(self):
plaintext = 'defend the east wall of the castle'
key = 'qmrhsnxbwpgjauoecklfzyvtdi'
ciphertext = 'hsnsuh fbs sqlf vqjj on fbs rqlfjs'
self.assertEqual(ciphertext, monoalphabetic.encrypt(plaintext, key)) and self.assertEqual(plaintext, monoalphabetic.decrypt(ciphertext, key))
def test_string_with_spaces_and_numbers(self):
plaintext = 'defend the east wall of the castle 12345'
key = 'qmrhsnxbwpgjauoecklfzyvtdi 12345'
ciphertext = 'hsnsuh fbs sqlf vqjj on fbs rqlfjs 12345'
self.assertEqual(ciphertext, monoalphabetic.encrypt(plaintext, key)) and self.assertEqual(plaintext, monoalphabetic.decrypt(ciphertext, key))
if __name__ == '__main__':
unittest.main()
|
import json
from flask import request, jsonify, g
from web.controllers.api import route_api
from common.models.food.Food import Food
from common.models.member.MemberCart import MemberCart
from common.libs.member.CartSerivce import CartService
from common.libs.Helper import selectFilterObj,getDictFilterField
from common.libs.UrlManager import UrlManager
@route_api.route('/cart/index')
def cartIndex():
resp = {'code': 200, 'msg': '操作成功', 'data': {}}
member_info = g.member_info
if not member_info:
resp['code'] = -1
resp['msg'] = '获取失败未登陆'
return jsonify(resp)
cart_list = MemberCart.query.filter_by(member_id=member_info.id).all()
data_cart_list = []
if cart_list:
food_ids = selectFilterObj(cart_list,"food_id")
food_map = getDictFilterField(Food,Food.id,"id",food_ids)
for item in cart_list:
tmp_food_info = food_map[item.food_id]
tmp_data = {
"id":item.id,
"food_id":item.food_id,
"number":item.quantity,
"name":tmp_food_info.name,
"price":str(tmp_food_info.price),
"pic_url": UrlManager.buildImageUrl(tmp_food_info.main_image),
"active":True
}
data_cart_list.append(tmp_data)
resp['data']['list'] = data_cart_list
return jsonify(resp)
@route_api.route('/cart/set',methods=['POST'])
def setCart():
resp = {'code': 200, 'msg': '操作成功', 'data': {}}
req = request.values
food_id = int(req['id']) if 'id' in req else 0
number = int(req['number']) if 'number' in req else 0
if food_id <1 or number <1:
resp['code'] = -1
resp['msg'] = '添加购物车失败-1'
return jsonify(resp)
member_info = g.member_info
if not member_info:
resp['code'] = -1
resp['msg'] = '添加购物车失败-2'
return jsonify(resp)
food_info = Food.query.filter_by(id=food_id).first()
if not food_info:
resp['code'] = -1
resp['msg'] = '添加购物车失败-3'
return jsonify(resp)
if food_info.stock < number:
resp['code'] = -1
resp['msg'] = '库存不足'
return jsonify(resp)
ret = CartService.setItems(member_id=member_info.id,food_id = food_id,number=number)
if not ret:
resp['code'] = -1
resp['msg'] = '添加购物车失败-4'
return jsonify(resp)
return jsonify(resp)
@route_api.route('/cart/del',methods=['POST'])
def delCart():
resp = {'code': 200, 'msg': '操作成功', 'data': {}}
req = request.values
params_goods = req['goods'] if 'goods' in req else None
items = []
if params_goods:
items = json.loads(params_goods)
if not items or len(items) <1:
return jsonify(resp)
member_info = g.member_info
if not member_info:
resp['code'] = -1
resp['msg'] = '删除购物车失败-1'
return jsonify(resp)
ret = CartService.deleteItem(member_id=member_info.id,items=items)
if not ret:
resp['code'] = -1
resp['msg'] = '删除购物车失败-2'
return jsonify(resp)
return jsonify(resp) |
import pytest
import os
from pydodo import episode_log
from pydodo.bluebird_connect import ping_bluebird
bb_resp = ping_bluebird()
@pytest.mark.skipif(not bb_resp, reason="Can't connect to bluebird")
def test_eplog():
pytest.xfail("BlueBird currently does not return a log.")
filepath = episode_log()
assert isinstance(filepath, str)
assert os.path.exists(filepath)
|
import numpy as np
import pandas as pd
import os
import datetime
def realizedVolatility(series): # 计算波动率
series = series.set_index('date')
resampled = series.resample('W').last().ffill()
resampled['log_ret'] = np.log(resampled['adjust_net_value']/resampled['adjust_net_value'].shift(1))
vola = resampled['log_ret'].std()*np.sqrt(52)
return vola
STANDPOINT = pd.datetime.today()
instruments = pd.read_csv(r'D:\Deecamp数据\instruments.csv')
instruments.drop(columns={'advisor','trustee'}, inplace=True)
instruments['revenue'] = 0
instruments['volatility'] = 0
path = r'D:/Deecamp数据/nav/nav/'
files = os.listdir(path)
for f in files:
ints = os.listdir(path+f)
for i in ints:
code = i[:9]
print("\r Now at "+code, end="")
infos = instruments.loc[instruments['code']==code].iloc[0]
if not pd.isnull(infos['delist_date']): # 该基金已经delist,不再考虑
continue
nav = pd.read_csv(path+f+'/'+i, parse_dates=['date'])
if nav.iloc[-1]['date'] - nav['date'][0]<datetime.timedelta(days=365): # 上市不足一年,从起售起算
revenue = nav.iloc[-1]['adjust_net_value']/nav.iloc[0]['adjust_net_value']-1
series = nav[['date','adjust_net_value']]
else: # 寻找一年前的时间点
revenue = nav.iloc[-1]['adjust_net_value']/nav.loc[nav['date']>=nav.iloc[-1]['date']-datetime.timedelta(days=365)].iloc[0]['adjust_net_value']-1
series = nav.loc[nav['date']>=nav.iloc[-1]['date']-datetime.timedelta(days=365)][['date','adjust_net_value']]
instruments.loc[instruments['code']==code,'revenue'] = revenue
instruments.loc[instruments['code']==code,'volatility'] = realizedVolatility(series)
instruments['list_date'] = pd.to_datetime(instruments['list_date'])
# 基金过滤
instruments.drop(index=instruments.loc[instruments['operate_mode']!='开放式基金'].index, inplace=True) #不买封闭式
instruments = instruments.loc[pd.isnull(instruments['delist_date'])] #不买中止发行的
instruments.drop(index=instruments.loc[instruments['list_date']>STANDPOINT-datetime.timedelta(days=365)].index, inplace=True) #不买发行不到一年的
instruments.drop(index=instruments.loc[instruments['revenue']>2].index, inplace=True) # 不买收益率高于2的狗屎运
instruments.drop(index=instruments.loc[instruments['volatility']==0].index, inplace=True) # 波动率为0的认为是净值数据缺失
# 做一个简单的基金挑选
high_revenue = set(instruments.groupby(['underlying_asset_type']).apply(lambda x:x.sort_values('revenue', ascending=False).head(5))['code']) #每一类基金收益高的
low_volatility = set(instruments.groupby(['underlying_asset_type']).apply(lambda x:x.sort_values('volatility', ascending=False).head(5))['code']) #每一类基金风险小的
selected_funds = high_revenue|low_volatility #求个并集
# 构造对数收益率序列矩阵
rets = pd.DataFrame()
for sf in selected_funds:
nav = pd.read_csv(path+sf[4:6]+'/'+sf+'.csv', parse_dates=['date'])
series = nav.loc[nav['date']>=nav.iloc[-1]['date']-datetime.timedelta(days=365)][['date','adjust_net_value']]
series = series.set_index('date').resample('W').last().ffill()
series['log_ret'] = np.log(series['adjust_net_value']/series['adjust_net_value'].shift(1))
rets[sf] = series['log_ret']
rets.drop(index=rets.index[0],inplace=True)
rets.fillna(method='bfill',inplace=True)
covs = np.array(rets.cov())
means = np.array(rets.mean())
|
import json
none = "d3043820717d74d9a17694c176d39733"
# region ASG
class ASG:
def __init__(
self,
product=none,
spot_instance_types=none,
name=none):
"""
:type product: str
:type spot_instance_types: List[str]
:type name: str
"""
self.product = product
self.spot_instance_types = spot_instance_types
self.name = name
# endregion
class ImportASGRequest:
def __init__(self, group):
self.group = group
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
import numpy as np
import cv2
from imutils import imutils
# translations
image = cv2.imread("/home/mmc/code/python_opencv/Books/Practical Python and OpenCV, 3rd Edition/code/images/trex.png")
M = np.float32([[1, 0, 25], [0, 1, 50]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape
[0]))
cv2.imshow("Shifted Down and Right", shifted)
M = np.float32([[1, 0, -50], [0, 1, -90]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape
[0]))
cv2.imshow("Shifted Up and Left", shifted)
shifted = imutils.translate(image, 0, 100)
cv2.imshow('Shifted Down', shifted)
cv2.imshow('Original', image)
im_resized = imutils.resize(image, width=100)
cv2.imshow('Resized', im_resized) |
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
"""
Some extremely basic things for our system. Among the first modules loaded, useful in
loading the others in fact.
"""
import os, sys, __main__, json, shutil
## Base module - foundational stuff
WINDOWS = sys.platform.find("win32") != -1 or sys.platform.find("win64") != -1 # ??? FIXME
LINUX = sys.platform.find("linux") != -1
OSX = sys.platform.find("darwin") != -1
BSD = sys.platform.find("bsd") != -1
UNIX = LINUX or OSX or BSD
assert(WINDOWS or UNIX)
#
# Version
#
INTENSITY_VERSION_STRING = '0.0.5'
def comparable_version(version_string):
return tuple(map(int, version_string.split('.')))
INTENSITY_VERSION = comparable_version(INTENSITY_VERSION_STRING)
print "Intensity Engine version:", INTENSITY_VERSION_STRING
#def check_version(version_string, strict=True):
# version = comparable_version(version_string)
# if version == INTENSITY_VERSION: return True
# return (not strict) and version > INTENSITY_VERSION
## Global constants
class Global:
## Read this to know if the current script is running on the client. Always the opposite of SERVER.
CLIENT = None
## Read this to know if the current script is running on the server. Always the opposite of CLIENT.
SERVER = None
## Called once on initialization, to mark the running instance as a client. Sets SERVER, CLIENT.
@staticmethod
def init_as_client():
Global.CLIENT = True
Global.SERVER = False
## Called once on initialization, to mark the running instance as a server. Sets SERVER, CLIENT.
@staticmethod
def init_as_server():
Global.SERVER = True
Global.CLIENT = False
#
# Directory stuff
#
## Directory where our python scripts and modules reside
PYTHON_SCRIPT_DIR = os.path.join("src", "python", "intensity")
HOME_SUBDIR = None
def set_home_dir(home_dir):
print "Set home dir:", home_dir
global HOME_SUBDIR
HOME_SUBDIR = home_dir
## The subdirectory under the user's home directory which we use.
def get_home_subdir():
global HOME_SUBDIR
if Global.CLIENT:
suffix = "client"
else:
# If no home dir is given, the default for the server is to share it with the client
suffix = "server" if HOME_SUBDIR is not None else 'client'
# Use default value if none given to us
if HOME_SUBDIR is None:
if UNIX:
HOME_SUBDIR = os.path.join( os.path.expanduser('~'), '.cubecreate_'+suffix )
elif WINDOWS:
HOME_SUBDIR = os.path.join( os.path.expanduser('~'), 'cubecreate_'+suffix )
else:
print "Error: Not sure where to set the home directory for this platform,", sys.platform
raise Exception
print 'Home dir:', HOME_SUBDIR
# Ensure it exists.
if not os.path.exists(HOME_SUBDIR):
os.makedirs(HOME_SUBDIR)
return HOME_SUBDIR
## The subdirectory name (single name) under home
def get_asset_subdir():
return 'packages'
## The directory to which the client saves assets
def get_asset_dir():
ASSET_DIR = os.path.join( get_home_subdir(), get_asset_subdir() )
# Ensure it exists.
if not os.path.exists(ASSET_DIR):
# Populate with initial content. This moves some archive assets into the right place, so
# that they can then be unzipped as necessary
initial_packages = os.path.join('data', 'initial_packages')
if os.path.exists(initial_packages):
print 'Populating with initial packages'
shutil.copytree(initial_packages, ASSET_DIR)
else:
# No initial data, so just make the directory
os.makedirs(ASSET_DIR)
return ASSET_DIR
## The directory to which the client saves assets
def get_map_dir():
MAP_DIR = os.path.join( get_asset_dir(), 'base' )
# Ensure it exists. Done only if we are called (the server doesn't call us)
if not os.path.exists(MAP_DIR):
os.makedirs(MAP_DIR)
return MAP_DIR
## Returns the short path to an asset. If we get e.g. /home/X/intensityengine/packages/base/somemap.ogz,
## then we return base/somemap.ogz, i.e., the path under /packages. This short path can then be used
## to know where to play an asset on the client, under the client's home subdir.
## A shortpath does not include '/packages'. Thus, you can concatenate a shortpath to the asset_dir
## returned in get_asset_dir to get a real path.
def get_asset_shortpath(path):
ret = []
elements = path.split(os.path.sep)
while elements[-1] != 'packages':
ret = [elements[-1]] + ret
elements = elements[:-1]
return os.path.join(ret)
## Where user scripts (not part of the core engine) reside
PYTHON_USER_SCRIPT_DIR = os.path.join("src", "python", "user")
## Run a user script, in the directory PYTHON_USER_SCRIPT_DIR
def run_user_script(name):
execfile( os.path.join(PYTHON_USER_SCRIPT_DIR, name), __main__.__dict__, __main__.__dict__ )
#
# Config file stuff
#
## Start using a persistent config file. The server and client use different ones (although, for now,
## they use the same template). Config files have the common form of "[Section] option = value", see
## the actual files for more.
## @param path The path to the config file to use
## @param template A file with default parameters, to be used if there isn't yet a config file at
## the location specified by 'path'. This occurs the first time we run.
def init_config(path, template):
global CONFIG_FILE
CONFIG_FILE = path
if not os.path.exists(os.path.dirname(CONFIG_FILE)): # Create settings file's directory, if needed
os.mkdir(os.path.dirname(CONFIG_FILE))
if not os.path.exists(CONFIG_FILE): # Create settings file, if none exists yet
shutil.copyfile( template, CONFIG_FILE )
print "CONFIG FILE: created a new from template"
config_file = open(CONFIG_FILE);
global configFile
configFile = json.loads(config_file.read())
config_file.close();
# Apply changes based on commandline options
MARKER = '-config:'
for arg in sys.argv:
if arg[:len(MARKER)] == MARKER:
arg = arg[len(MARKER):]
section, option, value = arg.split(':')
set_config(section, option, value)
## Write out config options - safely
def save_config():
global CONFIG_FILE
config_file = open(CONFIG_FILE, 'w')
config_file.write(json.dumps(configFile, sort_keys=True, indent=4))
config_file.flush()
os.fsync(config_file.fileno())
config_file.close()
## Get a value from our persistent config file.
## @param section The section (in form [Section] in the file) where to look.
## @param option The particular option, or key, whose value we want to look up.
## @param default The default value to return if there is no value for that section/option combination.
def get_config(section, option, default):
try:
return configFile[section][option]
except:
return default
## Set a value in our persistent config file.
## @param section The section (in form [Section] in the file) where to work. The section is created if it doesn't exist.
## @param option The particular option, or key, whose value we want to set. The option is replaced if existent, or otherwise it is
## created.
## @param value The value to set for that section/option combination.
def set_config(section, option, value):
if not section in configFile:
configFile[section] = {}
try:
configFile[section][option].append(value)
except:
configFile[section][option] = value
# TODO: Save the config file at this point?
return configFile[section][option]
### Components
## Loads the components in [Components]list. They should be normal python
## import paths, e.g., list=intensity.components.example_component,some.other.component
def load_components():
components = get_config('Components', 'list', '')
# Load additional commandline-specific components
MARKER = '-component:'
for arg in sys.argv:
if arg[:len(MARKER)] == MARKER:
component = arg[len(MARKER):]
if component not in components:
components.append(component)
print "Loading components...", components
for component in components:
if component == '': continue
print "Loading component '%s'..." % component
__import__(component, level=1)
### Action queues
from intensity.safe_actionqueue import *
## Action queue for stuff to be done in the main thread
main_actionqueue = SafeActionQueue()
## Action queue for stuff to be done in a parallel thread
side_actionqueue = SafeActionQueue()
side_actionqueue.main_loop()
#
# Quitting system
#
_should_quit = False
## Notifies us to quit. Sauer checks should_quit, and quits if set to true
def quit():
global _should_quit
_should_quit = True
## @return Whether quitting has been called, and we should shut down.
def should_quit():
global _should_quit
return _should_quit
|
import unittest
from katas.kyu_7.exes_and_ohs import xo
class XOTestCase(unittest.TestCase):
def test_true(self):
self.assertTrue(xo('xo'))
def test_true_2(self):
self.assertTrue(xo('xo0'))
def test_false(self):
self.assertFalse(xo('xxxoo'))
|
# This module automates pixels
#
# Use this like:
# pixels1 = neopixel.NeoPixel(board.A1, 20, brightness=0.2, auto_write=False)
# p = EPixels(pixels1)
# p.setAll(0xff0000) # red
# p.setDisableMask(10, 0) # stops a bad pixel
import time
class EPixels:
def __init__(self, pixels):
self.pixels = pixels
self.pixCount = len(self.pixels)
self.pixelMask = [1] * self.pixCount
self.autoHue = 0
self.blank()
def autoRainbowAll(self):
self.autoHue = self.autoHue + 1
self.rainbowAll(self.autoHue)
def autoRainbowCycle(self):
self.autoHue = self.autoHue + 1
self.rainbowCycle(self.autoHue)
def blank(self):
self.setAll(0)
def setAll(self, color):
for i in range(self.pixCount):
self.pixels[i] = color
self.update()
def blinkAll(self, color, rep, delay):
for _ in range(rep):
self.setAll(color)
time.sleep(delay/2)
self.setAll(0)
time.sleep(delay/2)
def blinkSingle(self, idx, color, rep, delay):
for _ in range(rep):
self.pixels[idx] = color
self.update()
time.sleep(delay/2)
self.pixels[idx] = 0
self.update()
time.sleep(delay/2)
def setDisableMask(self, index, value):
index = index - 1
if index >= 0 and index < self.pixCount:
self.pixelMask[index] = value
self.update()
else:
print("EPixels.setMask: value out of range")
# hueBase: 0..255
def rainbowAll(self, hueBase):
for i in range(self.pixCount):
idx = int(i + hueBase)
self.pixels[i] = self.rgbWheel(idx & 255)
self.update()
# hueShiftBase: 0..255
def rainbowCycle(self, hueShiftBase):
for i in range(self.pixCount):
idx = (i * 256 // self.pixCount) + hueShiftBase * 5
self.pixels[i] = self.rgbWheel(idx & 255)
self.update()
# 0..255: transitions r - g - b - back to r.
def rgbWheel(self, w):
if w < 0 or w > 255:
return (0, 0, 0)
if w < 85:
return (255 - w * 3, w * 3, 0)
if w < 170:
w -= 85
return (0, 255 - w * 3, w * 3)
w -= 170
return (w * 3, 0, 255 - w * 3)
# update, keeping the mask into account
def update(self):
for i in range(self.pixCount):
if self.pixelMask[i] is 0:
self.pixels[i] = 0
self.pixels.show()
|
#Models creates the database
from django.db import models
from django.utils import timezone
#table
class Billboard(models.Model):
created_date = models.DateTimeField(default=timezone.now)
title = models.CharField(max_length=200)
text = models.TextField(max_length=2000)
author=models.CharField(max_length=10)
#functions
def publish(self):
self.published_date = timezone.now()
self.save()
#added for readability
def __str__(self):
return self.title
# class Comments(models.Models):
# user_comment=models.ForeignKey(Billboard,on_delete=models.CASCADE)
# author=models.CharField(max_length=10)
# text=models.TextField(max_length=2000)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.