content
stringlengths 5
1.05M
|
|---|
employes = {
"id01": {"prenom": "Paul", "nom": "Dupont", "age": 32},
"id02": {"prenom": "Julie", "nom": "Dupuit", "age": 25},
"id03": {"prenom": "Patrick", "nom": "Ferrand", "age": 36}
}
print (employes)
if "id03" in employes:
del employes["id03"]
if "id02" in employes:
employes["id02"]["age"]=26
if "id01" in employes:
print (employes["id01"]["age"])
# print(employes)
|
import sys
import cubey
def main(serialPort):
cube = cubey.Cube(serialPort)
print "Listening, Ctrl-C to stop..."
try:
while True:
rawMessage = cube.sendCommand("m n u")
printMessage(rawMessage)
except KeyboardInterrupt:
print
cube.breakOut()
print "Done!"
def printMessage(rawMessage):
print
print "Got a message!"
print "=============="
print
contents = map(int, rawMessage.split())
rowFormat = "% 4X |" + (" %02X" * 16)
print " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
print " ------------------------------------------------"
for rowStartIndex in range(0, 512, 16):
print rowFormat % tuple([rowStartIndex] + contents[rowStartIndex:rowStartIndex + 16])
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Gimme a serial port!"
sys.exit(1)
serialPort = sys.argv[1]
main(serialPort)
|
import commands
import numpy as np
import matplotlib
import pkg_resources as p
matplotlib.use('Agg')
import os
import nipype.pipeline.engine as pe
from nipype.interfaces import afni
import nipype.interfaces.utility as util
def append_to_files_in_dict_way(list_files, file_):
"""Combine files so at each resource in file appears exactly once.
Parameters
----------
list_files : list
file_ : string
Returns
-------
None
Notes
-----
Writes contents of file_ into list_files, ensuring list_files finally has
each resource appearing exactly once
"""
f_1 = open(file_, 'r')
lines = f_1.readlines()
lines = [line.rstrip('\r\n') for line in lines]
one_dict = {}
for line in lines:
if not line in one_dict:
one_dict[line] = 1
f_1.close()
for f_ in list_files:
two_dict = {}
f_2 = open(f_, 'r')
lines = f_2.readlines()
f_2.close()
f_2 = open(f_, 'w')
lines = [line.rstrip('\r\n') for line in lines]
for line in lines:
if not line in one_dict:
two_dict[line] = 1
for key in one_dict:
if not key in two_dict:
two_dict[key] = 1
for key in two_dict:
print >> f_2, key
f_2.close
def first_pass_organizing_files(qc_path):
"""First Pass at organizing qc txt files.
Parameters
----------
qc_path : string
existing path of qc_files_here directory
Returns
-------
None
Notes
-----
Combines files with same strategy. First pass combines file names,
where one file name is substring of the other.
"""
import os
from CPAC.qc.utils import append_to_files_in_dict_way
if not os.path.exists(qc_path):
os.makedirs(qc_path)
qc_files = os.listdir(qc_path)
strat_dict = {}
for file_ in sorted(qc_files, reverse=True):
if not ('.txt' in file_):
continue
file_ = os.path.join(qc_path, file_)
str_ = os.path.basename(file_)
str_ = str_.replace('qc_', '')
str_ = str_.replace('scan_', '')
str_ = str_.replace('.txt', '')
str_ = str_.replace('____', '_')
str_ = str_.replace('___', '_')
str_ = str_.replace('__', '_')
if '_hp_' in str_ and '_fwhm_' in str_ and \
not ('_bandpass_freqs_' in str_):
str_, fwhm_val = str_.split('_fwhm_')
fwhm_val = '_fwhm_' + fwhm_val
str_, hp_lp_ = str_.split('_hp_')
hp_lp_ = '_hp_' + hp_lp_
str_ = str_ + fwhm_val + hp_lp_
if strat_dict.keys() == []:
strat_dict[str_] = [file_]
else:
flag_ = 0
for key_ in strat_dict.keys():
if str_ in key_:
append_to_files_in_dict_way(strat_dict[key_], file_)
flag_ = 1
if flag_ == 1:
os.system('rm -f %s' % file_)
else:
strat_dict[str_] = [file_]
def second_pass_organizing_files(qc_path):
"""Second Pass at organizing qc txt files.
Parameters
----------
qc_path : string
existing path of qc_files_here directory
Returns
-------
None
Notes
-----
Combines files with same strategy. combines files for derivative
falff , alff with others
"""
import os
from CPAC.qc.utils import append_to_files_in_dict_way
qc_files = os.listdir(qc_path)
strat_dict = {}
got_hp_lp = 0
got_bp = 0
for file_ in sorted(qc_files, reverse=True):
if not ('.txt' in file_):
continue
str_ = file_
file_ = os.path.join(qc_path, file_)
str_ = str_.replace('qc_scan_', '')
str_ = str_.replace('.txt', '')
str_ = str_.replace('____', '_')
str_ = str_.replace('___', '_')
str_ = str_.replace('__', '_')
fwhm_val_ = ''
# organize all derivatives excluding alff falff
if '_bandpass_freqs_' in str_:
if not str_ in strat_dict:
strat_dict[str_] = [file_]
else:
print 'Error: duplicate keys for files in QC 2nd file_org ' \
'pass: %s %s' % (strat_dict[str_], file_)
raise
# organize alff falff
elif ('_hp_' in str_) and ('_lp_' in str_):
key_ = ''
key_1 = ''
hp_lp_ = ''
if '_fwhm_' in str_:
key_1 = ''
key_, hp_lp_ = str_.split('_hp_')
ignore, fwhm_val_ = hp_lp_.split('_fwhm_')
hp_lp_ = '_hp_' + ignore
key_1 = '_fwhm_' + fwhm_val_
else:
key_, hp_lp_ = str_.split('_hp_')
hp_lp_ = '_hp_' + hp_lp_
flag_ = 0
for key in strat_dict.keys():
if (key_ in key) and (key_1 in key):
append_to_files_in_dict_way(strat_dict[key], file_)
str_ = strat_dict[key][0].replace('.txt', '')
new_fname = str_ + hp_lp_ + '.txt'
os.system('mv %s %s' %(strat_dict[key][0], new_fname))
del strat_dict[key]
flag_ = 1
if flag_ == 1:
os.system('rm -f %s' % file_)
else:
if not str_ in strat_dict:
strat_dict[str_] = [file_]
else:
print 'Error: duplicate keys for files in QC 2nd file_org ' \
'pass: %s %s' % (strat_dict[str_], file_)
raise
def organize(dict_, all_ids, png_, new_dict):
"""Organizes pngs according to their IDS in new_dict dictionary
Parameters
----------
dict_ : dictionary
dict containing png id no and png type(montage/plot/hist)
all_ids : list
list of all png id numbers
png_ : string
path to png
new_dict : dictionary
dictionary containg ids and png lists
Returns
-------
all_ids : list
list of png id nos
"""
for id_no, png_type in dict_.items():
if png_type in png_:
if not id_no in new_dict.keys():
new_dict[id_no] = [png_]
else:
list_ = new_dict[id_no]
list_.append(png_)
new_dict[id_no] = list(list_)
if not id_no in all_ids:
all_ids.append(id_no)
return all_ids
def grp_pngs_by_id(pngs_, qc_montage_id_a, qc_montage_id_s, qc_plot_id, qc_hist_id):
"""Groups pngs by their ids.
Parameters
----------
pngs_ : list
list of all pngs
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
dict_a : dictionary
dictionary of axial montages key : id no
value is list of paths to axial montages
dict_s : dictionary
dictionary of sagittal montages key : id no
value is list of paths to sagittal montages
dict_plot : dictionary
dictionary of plot pngs key : id no
value is list of paths to plots
dict_hist : dictionary
dictionary of histogram pngs key : id no
value is list of paths to histogram pngs
all_ids : list
list of png id nos
"""
from CPAC.qc.utils import organize
dict_a = {}
dict_s = {}
dict_hist = {}
dict_plot = {}
all_ids = []
for png_ in pngs_:
all_ids = organize(qc_montage_id_a, all_ids, png_, dict_a)
all_ids = organize(qc_montage_id_s, all_ids, png_, dict_s)
all_ids = organize(qc_plot_id, all_ids, png_, dict_plot)
all_ids = organize(qc_hist_id, all_ids, png_, dict_hist)
return dict(dict_a), dict(dict_s), dict(dict_hist), dict(dict_plot), list(all_ids)
def add_head(f_html_, f_html_0, f_html_1, name=None):
"""Write HTML Headers to various html files.
Parameters
----------
f_html_ : string
path to main html file
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
print >>f_html_, "<html>"
print >>f_html_, "<head>"
print >>f_html_, "<title>C-PAC QC</title>"
print >>f_html_, "</head>"
print >>f_html_, ""
print >>f_html_, "<frameset cols=\"20%,80%\">"
print >>f_html_, ""
print >>f_html_, " <frame src=\"%s\" name=\"menu\"><frame src=\"%s" \
"\" name=\"content\">" \
"</frameset>" %(f_html_0.name, f_html_1.name)
print >>f_html_, ""
print >>f_html_, "</html>"
print >>f_html_0, "<html>"
print >>f_html_0, "<link href=\"%s\" rel=\"stylesheet\" " \
"media=\"screen\">"%(p.resource_filename('CPAC',"GUI/resources/html/_static/nature.css"))
print >>f_html_0, "<link href=\"%s\" rel=\"stylesheet\" " \
"media=\"screen\">"%(p.resource_filename('CPAC',"GUI/resources/html/_static/pygments.css"))
print >>f_html_0, "<head>"
print >>f_html_0, "<base target=\"content\">"
print >>f_html_0, "</head>"
print >>f_html_0, "<body bgcolor = \"#FFFF00\">"
print >>f_html_0, "<div>"
print >>f_html_0, "<div class=\"sphinxsidebarwrapper\">"
print >>f_html_0, "<p class=\"logo\"><a href=\"" \
"https://fcp-indi.github.io\" target=\"website\">"
print >>f_html_0, "<p style = \"font-family: 'Times-New-Roman'\">"
print >>f_html_0, "<img class=\"logo\" src=\"%s\" " \
"alt=\"Logo\"/>"%(p.resource_filename('CPAC', "GUI/resources/html/_static/cpac_logo.jpg"))
print >>f_html_0, "</a></p>"
print >>f_html_0, "<h3>Table Of Contents</h3>"
print >>f_html_0, "<ul>"
print >>f_html_1, '<link href="default.css" rel="stylesheet" ' \
'type="text/css" />'
print >>f_html_1, "<html>"
print >>f_html_1, "</style>"
print >>f_html_1, "<body>"
print >>f_html_1, "<a name='reverse'>"
if name:
print >>f_html_1, "<br><h1>C-PAC Visual Data Quality Control " \
"Interface</h1>"
print >>f_html_1, "<h3>C-PAC Website: <a href=\"" \
"https://fcp-indi.github.io/\" target=" \
"\"website\">https://fcp-indi.github.io</a>" \
"<br><br>"
print >>f_html_1, "C-PAC Support Forum: <a href=\"" \
"https://groups.google.com/forum/#!forum" \
"/cpax_forum\" target=\"forum\">" \
"https://groups.google.com/forum/#!forum/" \
"cpax_forum</a>"
print >>f_html_1, "<hr><br>Scan and strategy identifiers:" \
"<br>{0}".format(name)
print >>f_html_1, "</h3><br>"
def add_tail(f_html_, f_html_0, f_html_1):
"""Write HTML Tail Tags to various html files.
Parameters
----------
f_html_ : string
path to main html file
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
print >>f_html_0, "</ul>"
print >>f_html_0, "</div>"
print >>f_html_0, "</div>"
print >>f_html_0, "</body>"
print >>f_html_0, "</html>"
print >>f_html_1, "</body>"
print >>f_html_1, "</html>"
def feed_line_nav(id_, image_name, anchor, f_html_0, f_html_1):
"""Write to navigation bar html file.
Parameters
----------
id_ : string
id of the image
anchor : string
anchor id of the image
image_name : string
name of image
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
image_readable = image_name
if image_name == 'skullstrip_vis':
image_readable = 'Visual Result of Skull Strip'
if image_name == 'csf_gm_wm':
image_readable = 'Grey Matter, White Matter & CSF'
if image_name == 'snr':
image_readable = 'Signal to Noise Ratio'
if image_name.find('snr_hist') > -1:
image_readable = 'Histogram of Signal to Noise Ratio'
if image_name.find('mni_normalized') > -1:
image_readable = 'MNI Edge Overlapped on Normalized Anatomical'
if image_name == 'mean_func_with_t1_edge':
image_readable = 'T1 Edge Overlapped on Mean Functional Image'
if image_name == 'mean_func_with_mni_edge':
image_readable = 'MNI Edge Overlapped on Mean Functional Image'
if image_name.find('movement_trans_plot') >-1:
image_readable = 'Head Displacement Plot'
if image_name.find('movement_rot_plot') >-1:
image_readable = 'Head Rotation Plot'
if image_name.find('fd_plot') > -1:
image_readable = 'Framewise Displacement Plot'
if image_name == 'sca_roi_smooth':
image_readable = 'Seed-based Correlation Analysis'
if image_name == 'sca_roi_smooth_hist':
image_readable = 'Histogram of Seed-based Correlation Analysis'
if image_name == 'centrality_smooth':
image_readable = 'Network Centrality'
if image_name == 'centrality_smooth_hist':
image_readable = 'Histogram of Network Centrality'
if image_name == 'temporal_dual_regression_smooth':
image_readable = 'Temporal Dual Regression'
if image_name == 'temporal_dual_regression_smooth_hist':
image_readable = 'Histogram of Temporal Dual Regression'
if image_name == 'vmhc_smooth':
image_readable = 'Voxel-Mirrored Homotopic Connectivity'
if image_name == 'vmhc_smooth_hist':
image_readable = 'Histogram of Voxel-Mirrored Homotopic Connectivity'
if image_name == 'reho_smooth':
image_readable = 'Regional Homogeneity'
if image_name == 'reho_smooth_hist':
image_readable = 'Histogram of Regional Homogeneity'
if image_name == 'alff_smooth':
image_readable = 'Amplitude of Low-Frequency Fluctuation'
if image_name == 'alff_smooth_hist':
image_readable = 'Histogram of Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth':
image_readable = 'fractional Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth_hist':
image_readable = 'Histogram of fractional Amplitude of Low-Frequency Fluctuation'
print >>f_html_0, "<li><a href='%s#%s'> %s </a></li>" % (f_html_1.name,
anchor,
image_readable)
def feed_line_body(image_name, anchor, image, f_html_1):
"""Write to html file that has to contain images.
Parameters
----------
image_name : string
name of image
anchor : string
anchor id of the image
image : string
path to the image
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
image_readable = image_name
if image_name == 'skullstrip_vis':
image_readable = 'Visual Result of Skull Strip'
if image_name == 'csf_gm_wm':
image_readable = 'Grey Matter, White Matter & CSF'
if image_name == 'snr':
image_readable = 'Signal to Noise Ratio'
if image_name.find('snr_hist') > -1:
image_readable = 'Histogram of Signal to Noise Ratio'
if image_name.find('mni_normalized') > -1:
image_readable = 'MNI Edge Overlapped on Normalized Anatomical'
if image_name == 'mean_func_with_t1_edge':
image_readable = 'T1 Edge Overlapped on Mean Functional Image'
if image_name == 'mean_func_with_mni_edge':
image_readable = 'MNI Edge Overlapped on Mean Functional Image'
if image_name.find('movement_trans_plot') >-1:
image_readable = 'Head Displacement Plot'
if image_name.find('movement_rot_plot') >-1:
image_readable = 'Head Rotation Plot'
if image_name.find('fd_plot') > -1:
image_readable = 'Framewise Displacement Plot'
if image_name == 'sca_roi_smooth':
image_readable = 'Seed-based Correlation Analysis'
if image_name == 'sca_roi_smooth_hist':
image_readable = 'Histogram of Seed-based Correlation Analysis'
if image_name == 'centrality_smooth':
image_readable = 'Network Centrality'
if image_name == 'centrality_smooth_hist':
image_readable = 'Histogram of Network Centrality'
if image_name == 'temporal_dual_regression_smooth':
image_readable = 'Temporal Dual Regression'
if image_name == 'temporal_dual_regression_smooth_hist':
image_readable = 'Histogram of Temporal Dual Regression'
if image_name == 'vmhc_smooth':
image_readable = 'Voxel-Mirrored Homotopic Connectivity'
if image_name == 'vmhc_smooth_hist':
image_readable = 'Histogram of Voxel-Mirrored Homotopic Connectivity'
if image_name == 'reho_smooth':
image_readable = 'Regional Homogeneity'
if image_name == 'reho_smooth_hist':
image_readable = 'Histogram of Regional Homogeneity'
if image_name == 'alff_smooth':
image_readable = 'Amplitude of Low-Frequency Fluctuation'
if image_name == 'alff_smooth_hist':
image_readable = 'Histogram of Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth':
image_readable = 'fractional Amplitude of Low-Frequency Fluctuation'
if image_name == 'falff_smooth_hist':
image_readable = 'Histogram of fractional Amplitude of Low-Frequency Fluctuation'
print >>f_html_1, "<h3><a name='%s'>%s</a> <a href='#reverse'>TOP</a></h3>" %(anchor, image_readable)
img_tag = "<br><img src='%s', alt='%s'>" %(image, image_readable)
print >>f_html_1, img_tag
def get_map_id(str_, id_):
"""Returns the proper map name given identifier for it.
Parameters
----------
str_ : string
string containing text for identifier
id_ : string
string for identifier
Returns
-------
map_id : string
proper name for a map
"""
map_id = None
'''
id_: centrality_
str_: degree_centrality_binarize_99_1mm_centrality_outputs_a.png
str_ post-split: degree_centrality_binarize_99_1mm_centrality_outputs
180515-20:46:14,382 workflow ERROR:
[!] Error: The QC interface page generator ran into a problem.
Details: too many values to unpack
'''
# so whatever goes into "type_" and then "map_id" becomes the "Map: "
# Mask: should be the ROI nifti, but right now it's the nuisance strat...
# Measure: should be eigenvector binarize etc., but it's just "centrality_outputs"
if 'centrality' in id_ or 'lfcd' in id_:
# TODO: way too reliant on a very specific string format
# TODO: needs re-factoring
str_ = str_.split('_a.png')[0]
type_, str_ = str_.rsplit(id_, 1)
if "_99_1mm_" in type_:
type_ = type_.replace("_99_1mm_", "")
map_id = type_
'''
str_ = str_.split('_')[0]
type_ = type_.replace('_', '')
map_id = '_'.join([type_, id_, str_])
'''
return map_id
else:
str_ = str_.split(id_)[1]
str_ = str_.split('_')[0]
map_id = '_'.join([id_, str_])
return map_id
def get_map_and_measure(png_a):
"""Extract Map name and Measure name from png.
Parameters
----------
png_a : string
name of png
Returns
-------
map_name : string
proper name for map
measure_name : string
proper name for measure
"""
import os
from CPAC.qc.utils import get_map_id
measure_name = None
map_name = None
if '_fwhm_' in png_a:
measure_name = os.path.basename(os.path.dirname(os.path.dirname(png_a)))
else:
measure_name = os.path.basename(os.path.dirname((png_a)))
str_ = os.path.basename(png_a)
if 'sca_tempreg' in png_a:
map_name = get_map_id(str_, 'maps_roi_')
if 'sca_roi' in png_a:
map_name = get_map_id(str_, 'ROI_')
if 'dr_tempreg' in png_a:
map_name = get_map_id(str_, 'temp_reg_map_')
if 'centrality' in png_a:
map_name = get_map_id(str_, 'centrality_')
return map_name, measure_name
def feed_lines_html(id_, dict_a, dict_s, dict_hist, dict_plot,
qc_montage_id_a, qc_montage_id_s, qc_plot_id, qc_hist_id,
f_html_0, f_html_1):
"""Write HTML Tags to various html files and embeds images.
Parameters
----------
dict_a : dictionary
dictionary of axial montages key : id no
value is list of paths to axial montages
dict_s : dictionary
dictionary of sagittal montages key : id no
value is list of paths to sagittal montages
dict_plot : dictionary
dictionary of plot pngs key : id no
value is list of paths to plots
dict_hist : dictionary
dictionary of histogram pngs key : id no
value is list of paths to histogram pngs
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
f_html_0 : string
path to navigation bar html file
f_html_1 : string
path to html file contaning pngs and plots
Returns
-------
None
"""
from CPAC.qc.utils import feed_line_nav
from CPAC.qc.utils import feed_line_body
from CPAC.qc.utils import get_map_and_measure
if id_ in dict_a:
dict_a[id_] = sorted(dict_a[id_])
dict_s[id_] = sorted(dict_s[id_])
if id_ in dict_hist:
dict_hist[id_] = sorted(dict_hist[id_])
idxs = len(dict_a[id_])
for idx in range(0, idxs):
png_a = dict_a[id_][idx]
png_s = dict_s[id_][idx]
png_h = None
if id_ in dict_hist:
png_h = dict_hist[id_][idx]
measure_name = None
map_name = None
if idxs > 1:
map_name, measure_name = get_map_and_measure(png_a)
id_a = str(id_)
id_s = str(id_) + '_s'
id_h = str(id_) + '_' + str(id_)
image_name_a = None
image_name_h = None
image_name_a_nav = qc_montage_id_a[id_].replace('_a', '')
if id_ in qc_hist_id:
image_name_h_nav = qc_hist_id[id_]
if map_name is not None:
image_name_a = 'Measure: ' + qc_montage_id_a[id_].replace('_a', '') + ' Mask: ' + measure_name + ' Map: ' + map_name
if id_ in qc_hist_id:
image_name_h = 'Measure: ' + qc_hist_id[id_] + ' Mask:'+ measure_name + ' Map: ' + map_name
else:
image_name_a = qc_montage_id_a[id_].replace('_a', '')
if id_ in qc_hist_id:
image_name_h = qc_hist_id[id_]
if idx != 0:
id_a = '_'.join([id_a, str(idx), 'a'])
id_s = '_'.join([id_s, str(idx), 's'])
id_h = '_'.join([id_h, str(idx), 'h' ])
if idx == 0:
if image_name_a_nav == 'skullstrip_vis':
image_readable = 'Visual Result of Skull Strip'
if image_name_a_nav == 'csf_gm_wm':
image_readable = 'Grey Matter, White Matter & CSF'
if image_name_a_nav == 'snr':
image_readable = 'Signal to Noise Ratio'
if image_name_a_nav == 'snr_hist':
image_readable = 'Histogram of Signal to Noise Ratio'
if image_name_a_nav == 'mean_func_with_t1_edge':
image_readable = 'T1 Edge Overlapped on Mean Functional Image'
if image_name_a_nav == 'mean_func_with_mni_edge':
image_readable = 'MNI Edge Overlapped on Mean Functional Image'
if image_name_a_nav == 'movement_trans_plot':
image_readable = 'Head Displacement Plot'
if image_name_a_nav == 'movement_rot_plot':
image_readable = 'Head Rotation Plot'
if image_name_a_nav == 'fd_plot':
image_readable = 'Framewise Displacement Plot'
if image_name_a_nav == 'sca_roi_smooth':
image_readable = 'Seed-based Correlation Analysis'
if image_name_a_nav == 'sca_roi_smooth_hist':
image_readable = 'Histogram of Seed-based Correlation Analysis'
if image_name_a_nav == 'centrality_smooth':
image_readable = 'Network Centrality'
if image_name_a_nav == 'centrality_smooth_hist':
image_readable = 'Histogram of Network Centrality'
if image_name_a_nav == 'temporal_dual_regression_smooth':
image_readable = 'Temporal Dual Regression'
if image_name_a_nav == 'temporal_dual_regression_smooth_hist':
image_readable = 'Histogram of Temporal Dual Regression'
if image_name_a_nav == 'vmhc_smooth':
image_readable = 'Voxel-Mirrored Homotopic Connectivity'
if image_name_a_nav == 'vmhc_smooth_hist':
image_readable = 'Histogram of Voxel-Mirrored Homotopic Connectivity'
if image_name_a_nav == 'reho_smooth':
image_readable = 'Regional Homogeneity'
if image_name_a_nav == 'reho_smooth_hist':
image_readable = 'Histogram of Regional Homogeneity'
if image_name_a_nav == 'alff_smooth':
image_readable = 'Amplitude of Low-Frequency Fluctuation'
if image_name_a_nav == 'alff_smooth_hist':
image_readable = 'Histogram of Amplitude of Low-Frequency Fluctuation'
if image_name_a_nav == 'falff_smooth':
image_readable = 'fractional Amplitude of Low-Frequency Fluctuation'
if image_name_a_nav == 'falff_smooth_hist':
image_readable = 'Histogram of fractional Amplitude of Low-Frequency Fluctuation'
feed_line_nav(id_, image_name_a_nav, id_a, f_html_0, f_html_1)
feed_line_body(image_name_a, id_a, png_a, f_html_1)
feed_line_body('', id_s, png_s, f_html_1)
if id_ in dict_hist.keys():
if idx == 0:
feed_line_nav(id_, image_name_h_nav, id_h, f_html_0,
f_html_1)
feed_line_body(image_name_h, id_h, png_h, f_html_1)
if id_ in dict_plot:
id_a = str(id_)
image_name = qc_plot_id[id_]
png_a = dict_plot[id_][0]
feed_line_nav(id_, image_name, id_a, f_html_0, f_html_1)
feed_line_body(image_name, id_a, png_a, f_html_1)
def make_page(file_, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id):
"""Convert a 'qc_files_here' text file in the CPAC output directory into
a QC HTML page.
Parameters
----------
file_ : string
path to qc path file
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
None
"""
import os
from CPAC.qc.utils import grp_pngs_by_id, add_head, add_tail, \
feed_lines_html
with open(file_, 'r') as f:
pngs_ = [line.rstrip('\r\n') for line in f.readlines()]
html_f_name = file_.replace('.txt', '')
html_f_name = html_f_name.replace("'", "")
html_f_name_0 = html_f_name + '_navbar.html'
html_f_name_1 = html_f_name + '_page.html'
# TODO: this is a temporary patch until the completed QC interface is
# TODO: implemented
# pop the combined (navbar + content) page back into the output directory
# and give it a more obvious name
html_f_name = "{0}.html".format(html_f_name.replace("qc_scan",
"QC-interface_scan"))
html_f_name = html_f_name.replace("/qc_files_here", "")
f_html_ = open(html_f_name, 'wb')
f_html_0 = open(html_f_name_0, 'wb')
f_html_1 = open(html_f_name_1, 'wb')
dict_a, dict_s, dict_hist, dict_plot, all_ids = \
grp_pngs_by_id(pngs_, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id)
qc_path_file_id = os.path.basename(html_f_name).replace(".html", "")
add_head(f_html_, f_html_0, f_html_1, qc_path_file_id)
for id_ in sorted(all_ids):
feed_lines_html(id_, dict_a, dict_s, dict_hist, dict_plot,
qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id, f_html_0, f_html_1)
add_tail(f_html_, f_html_0, f_html_1)
f_html_.close()
f_html_0.close()
f_html_1.close()
def make_qc_pages(qc_path, qc_montage_id_a, qc_montage_id_s, qc_plot_id, qc_hist_id):
"""Generates a QC HTML file for each text file in the 'qc_files_here'
folder in the CPAC output directory.
Parameters
----------
qc_path : string
path to qc_files_here directory
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
None
"""
import os
from CPAC.qc.utils import make_page
qc_files = os.listdir(qc_path)
for file_ in qc_files:
if not (file_.endswith('.txt')):
continue
make_page(os.path.join(qc_path, file_), qc_montage_id_a,
qc_montage_id_s, qc_plot_id, qc_hist_id)
def generateQCPages(qc_path, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id):
"""Generates the QC HTML files populated with the QC images that were
created during the CPAC pipeline run.
This function runs after the pipeline is over.
Parameters
----------
qc_path : string
path to qc_files_here directory
qc_montage_id_a : dictionary
dictionary of axial montages key : id no
value is list of png types
qc_montage_id_s : dictionary
dictionary of sagittal montages key : id no
value is list of png types
qc_plot_id : dictionary
dictionary of plot pngs key : id no
value is list of png types
qc_hist_id : dictionary
dictionary of histogram pngs key : id no
value is list of png types
Returns
-------
None
"""
from CPAC.qc.utils import first_pass_organizing_files, \
second_pass_organizing_files
from CPAC.qc.utils import make_qc_pages
# according to preprocessing strategy combines the files
first_pass_organizing_files(qc_path)
# according to bandpass and hp_lp and smoothing iterables combines the
# files
second_pass_organizing_files(qc_path)
make_qc_pages(qc_path, qc_montage_id_a, qc_montage_id_s, qc_plot_id,
qc_hist_id)
def afni_edge(in_file):
"""Run AFNI 3dedge3 on the input file - temporary function until the
interface issue in Nipype is sorted out."""
in_file = os.path.abspath(in_file)
out_file = os.path.join(os.getcwd(),
"{0}".format(os.path.basename(in_file).replace(".nii", "_edge.nii")))
cmd_string = ["3dedge3", "-input", in_file, "-prefix", out_file]
try:
retcode = subprocess.check_output(cmd_string)
except Exception as e:
err = "\n\n[!] Something went wrong with AFNI 3dedge3 while " \
"creating the an overlay for the QA pages.\n\nError details: " \
"{0}\n\nAttempted command: {1}" \
"\n\n".format(e, " ".join(cmd_string))
raise Exception(err)
return out_file
def make_edge(wf_name='create_edge'):
"""Make edge file from a scan image
Parameters
----------
file_ : string
path to the scan
Returns
-------
new_fname : string
path to edge file
"""
wf_name = pe.Workflow(name=wf_name)
inputNode = pe.Node(util.IdentityInterface(fields=['file_']),
name='inputspec')
outputNode = pe.Node(util.IdentityInterface(fields=['new_fname']),
name='outputspec')
run_afni_edge_imports = ["import os", "import subprocess"]
run_afni_edge = pe.Node(util.Function(input_names=['in_file'],
output_names=['out_file'],
function=afni_edge,
imports=run_afni_edge_imports),
name='afni_3dedge3')
wf_name.connect(inputNode, 'file_', run_afni_edge, 'in_file')
wf_name.connect(run_afni_edge, 'out_file', outputNode, 'new_fname')
return wf_name
def gen_func_anat_xfm(func_, ref_, xfm_, interp_):
"""Transform functional file (std dev) into anatomical space.
Parameters
----------
func_ : string
functional scan
ref_ : string
path to reference file
xfm_ : string
path to transformation mat file
interp_ : string
interpolation measure string
Returns
-------
new_fname : string
path to the transformed scan
"""
new_fname = os.path.join(os.getcwd(), 'std_dev_anat.nii.gz')
cmd = ['applywarp', '--ref={0}'.format(ref_), '--in={0}'.format(func_),
'--out={0}'.format(new_fname), '--premat={0}'.format(xfm_),
'--interp={0}'.format(interp_)]
retcode = subprocess.check_output(cmd)
return new_fname
def gen_snr(std_dev, mean_func_anat):
"""Generate SNR file.
Parameters
----------
std_dev : string
path to std dev file in anat space
mean_func_anat : string
path to mean functional scan in anatomical space
Returns
-------
new_fname : string
path to the snr file
"""
new_fname = os.path.join(os.getcwd(), 'snr.nii.gz')
cmd = ['3dcalc', '-a', '{0}'.format(std_dev), '-b',
'{0}'.format(mean_func_anat), '-expr', 'b/a', '-prefix',
'{0}'.format(new_fname)]
retcode = subprocess.check_output(cmd)
return new_fname
def cal_snr_val(measure_file):
"""Calculate average snr value for snr image.
Parameters
----------
measure_file : string
path to input nifti file
Returns
-------
avg_snr_file : string
a text file store average snr value
"""
data = nb.load(measure_file).get_data()
data_flat = data.flatten()
data_no0 = data_flat[data_flat > 0]
snr_val = ma.mean(data_no0)
avg_snr_file = os.path.join(os.getcwd(), 'average_snr_file.txt')
f = open(avg_snr_file, 'w')
with open(avg_snr_file, 'wt') as f:
f.write(str(snr_val) + '\n')
return avg_snr_file
def gen_std_dev(mask_, func_):
"""Generate std dev file.
Parameters
----------
mask_ : string
path to whole brain mask file
func_ : string
path to functional scan
Returns
-------
new_fname : string
path to standard deviation file
"""
new_fname = os.path.join(os.getcwd(), 'std_dev.nii.gz')
cmd = ["3dTstat", "-stdev", "-mask", "{0}".format(mask_), "-prefix",
"{0}".format(new_fname), "{0}".format(func_)]
retcode = subprocess.check_output(cmd)
return new_fname
def drange(min_, max_):
"""Generate list of float values in a specified range.
Parameters
----------
min_ : float
Min value
max_ : float
Max value
Returns
-------
range_ : list
list of float values in the min_ max_ range
"""
step = float(max_ - min_) /8.0
range_ = []
while min_ <= max_:
range_.append(float('%.3f' % round(min_, 3)))
min_ += step
return range_
def gen_plot_png(arr, measure, ex_vol=None):
"""Generate Motion FD Plot. Shows which volumes were dropped.
Parameters
----------
arr : list
Frame wise Displacements
measure : string
Label of the Measure
ex_vol : list
Volumes excluded
Returns
-------
png_name : string
path to the generated plot png
"""
matplotlib.rcParams.update({'font.size': 8})
arr = np.loadtxt(arr)
if ex_vol:
try:
ex_vol = np.genfromtxt(ex_vol, delimiter=',', dtype=int)
ex_vol = ex_vol[ex_vol > 0]
except:
ex_vol = []
else:
ex_vol = []
arr = arr[1:]
del_el = [x for x in ex_vol if x < len(arr)]
ex_vol = np.array(del_el)
fig = pyplot.figure(figsize=(10, 6))
pyplot.plot([i for i in xrange(len(arr))], arr, '-')
fig.suptitle('%s plot with Mean %s = %0.4f' % (measure, measure,
arr.mean()))
if measure == 'FD' and len(ex_vol) > 0:
pyplot.scatter(ex_vol, arr[ex_vol], c="red", zorder=2)
for x in ex_vol:
pyplot.annotate('( %d , %0.3f)' % (x, arr[x]), xy=(x, arr[x]),
arrowprops=dict(facecolor='black', shrink=0.0))
pyplot.xlabel('Volumes')
pyplot.ylabel('%s' % measure)
png_name = os.path.join(os.getcwd(), '%s_plot.png' % measure)
fig.savefig(os.path.join(os.getcwd(), png_name))
pyplot.close()
matplotlib.rcdefaults()
return png_name
def gen_motion_plt(motion_parameters):
"""
Function to Generate Matplotlib plot for motion.
Separate plots for Translation and Rotation are generated.
Parameters
----------
motion_parameters: string
Motion Parameters file
Returns
-------
translation_plot : string
path to translation plot
rotation_plot : string
path to rotation plot
"""
png_name1 = 'motion_trans_plot.png'
png_name2 = 'motion_rot_plot.png'
data = np.loadtxt(motion_parameters)
data_t = data.T
translation_plot = None
rotation_plot = None
titles1 = ['x', 'y', 'z']
titles2 = ['roll', 'pitch', 'yaw']
plt.gca().set_color_cycle(['red', 'green', 'blue'])
plt.plot(data_t[0])
plt.plot(data_t[1])
plt.plot(data_t[2])
plt.legend(['x', 'y', 'z'], loc='upper right')
plt.ylabel('Translation (mm)')
plt.xlabel('Volume')
plt.savefig(os.path.join(os.getcwd(), png_name1))
plt.close()
for i in range(3, 6):
for j in range(len(data_t[i])):
data_t[i][j] = math.degrees(data_t[i][j])
plt.gca().set_color_cycle(['red', 'green', 'blue'])
plt.plot(data_t[3])
plt.plot(data_t[4])
plt.plot(data_t[5])
plt.legend(['roll', 'pitch', 'yaw'], loc='upper right')
plt.ylabel('Rotation (degrees)')
plt.xlabel('Volume')
plt.savefig(os.path.join(os.getcwd(), png_name2))
plt.close()
translation_plot = os.path.join(os.getcwd(), png_name1)
rotation_plot = os.path.join(os.getcwd(), png_name2)
return translation_plot, rotation_plot
def gen_histogram(measure_file, measure):
"""Generates Histogram Image of intensities for a given input nifti file.
Parameters
----------
measure_file : string
path to input nifti file
measure : string
Name of the measure label in the plot
Returns
-------
hist_path : string
Path to the generated histogram png
"""
hist_path = None
from CPAC.qc.utils import make_histogram
import os
m_ = measure
if isinstance(measure_file, list):
hist_path = []
for file_ in measure_file:
measure = m_
if 'sca_roi' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
fname = fname.split('ROI_')[1]
fname = 'sca_ROI_' + fname.split('_')[0]
measure = fname
if 'sca_tempreg' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
fname = fname.split('z_maps_roi_')[1]
fname = 'sca_mult_regression_maps_ROI_' + fname.split('_')[0]
measure = fname
if 'dr_tempreg' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
fname = fname.split('temp_reg_map_')[1]
fname = 'dual_regression_map_'+ fname.split('_')[0]
measure = fname
if 'centrality' in measure.lower():
fname = os.path.basename(os.path.splitext(os.path.splitext(file_)[0])[0])
type_, fname = fname.split('centrality_')
fname = type_ + 'centrality_' + fname.split('_')[0]
measure = fname
hist_path.append(make_histogram(file_, measure))
else:
hist_path = make_histogram(measure_file, measure)
return hist_path
def make_histogram(measure_file, measure):
"""
Generates Histogram Image of intensities for a given input
nifti file.
Parameters
----------
measure_file : string
path to input nifti file
measure : string
Name of the measure label in the plot
Returns
-------
hist_path : string
Path to the generated histogram png
"""
from matplotlib import pyplot
import numpy as np
import nibabel as nb
import os
data = nb.load(measure_file).get_data()
data_flat = data.flatten(order='F')
y, binEdges = np.histogram(data_flat[data_flat != 0], bins=100)
bincenters = 0.5*(binEdges[1:]+binEdges[:-1])
fig = pyplot.figure()
fig.suptitle('%s intensity plot' % measure)
pyplot.plot(bincenters, y, '-')
pyplot.xlabel('intensity')
pyplot.ylabel('# of voxels')
png_name = os.path.join(os.getcwd(), '%s_hist_plot.png' % measure)
fig.savefig(os.path.join(os.getcwd(), png_name))
pyplot.close()
hist_path = os.path.join(os.getcwd(), png_name)
"""
###
hist_file = os.path.join(os.getcwd(), '%s_hist_path_file.txt' % measure)
fl = open(hist_file, 'w')
fl.write(str(measure_file) + '\n')
fl.write(str(hist_path) + '\n')
fl.close()
"""
return hist_path
def drop_percent_(measure_file, percent_):
"""
Zeros out voxels in measure files whose intensity doesnt fall in percent_
of voxel intensities
Parameters
----------
measure_file : string
Input nifti file
percent_ : percentage of the voxels to keep
Returns
-------
modified_measure_file : string
measure_file with 1 - percent_ voxels zeroed out
"""
import nibabel as nb
import numpy as np
import os
import commands
img = nb.load(measure_file)
data = img.get_data()
x, y, z = data.shape
max_val= float(commands.getoutput('fslstats %s -P %f' %(measure_file, percent_)))
for i in range(x):
for j in range(y):
for k in range(z):
if data[i][j][k] > 0.0:
if data[i][j][k] >= max_val:
data[i][j][k] = 0.0
save_img = nb.Nifti1Image(data, header=img.get_header(), affine=img.get_affine())
f_name = os.path.basename(os.path.splitext(os.path.splitext(measure_file)[0])[0])
saved_name = None
saved_name_correct_header = None
ext = None
if '.nii.gz' in measure_file:
ext = '.nii.gz'
else:
ext = '.nii'
saved_name = '%s_%d_%s' % (f_name, percent_, ext)
saved_name_correct_header = '%s_%d%s' % (f_name, percent_, ext)
save_img.to_filename(saved_name)
commands.getoutput("3dcalc -a %s -expr 'a' -prefix %s" % (saved_name, saved_name_correct_header))
modified_measure_file = os.path.join(os.getcwd(),
saved_name_correct_header)
return modified_measure_file
def get_spacing(across, down, dimension):
"""
Get Spacing in slices to be selected for montage
display varying in given dimension
Parameters
----------
across : integer
# images placed horizontally in montage
down : integer
# images stacked vertically in montage
Returns
-------
space : integer
# of images to skip before displaying next one
"""
space = 10
prod = (across*down*space)
if prod > dimension:
while(across*down*space) > dimension:
space -= 1
else:
while(across*down*space) < dimension:
space += 1
return space
def determine_start_and_end(data, direction, percent):
"""
Determine start slice and end slice in data file in
given direction with at least threshold percent of voxels
at start and end slices.
Parameters
----------
data : string
input nifti file
direction : string
axial or sagittal
percent : float
percent(from total) of non zero voxels at starting and ending slice
Returns
-------
start : integer
Index of starting slice
end : integer
Index of the last slice
"""
x, y, z = data.shape
xx1 = 0
xx2 = x - 1
zz1 = 0
zz2 = z - 1
total_non_zero_voxels = len(np.nonzero(data.flatten())[0])
thresh = percent * float(total_non_zero_voxels)
start = None
end = None
if 'axial' in direction:
while(zz2 > 0):
d = len(np.nonzero(data[:, :, zz2].flatten())[0])
if float(d) > thresh:
break
zz2 -= 1
while(zz1 < zz2):
d = len(np.nonzero(data[:, :, zz1].flatten())[0])
if float(d) > thresh:
break
zz1 += 1
start = zz1
end = zz2
else:
while(xx2 > 0):
d = len(np.nonzero(data[xx2, :, :].flatten())[0])
if float(d) > thresh:
break
xx2 -= 1
while(xx1 < xx2):
d = len(np.nonzero(data[xx1, :, :].flatten())[0])
if float(d) > thresh:
break
xx1 += 1
start = xx1
end = xx2
return start, end
def montage_axial(overlay, underlay, png_name, cbar_name):
"""Draws Montage using overlay on Anatomical brain in Axial Direction,
calls make_montage_axial.
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
pngs = None
if isinstance(overlay, list):
pngs = []
for ov in overlay:
fname = os.path.basename(os.path.splitext(os.path.splitext(ov)[0])[0])
pngs.append(make_montage_axial(ov, underlay,
fname + '_' + png_name, cbar_name))
else:
pngs = make_montage_axial(overlay, underlay, png_name, cbar_name)
png_name = pngs
return png_name
def make_montage_axial(overlay, underlay, png_name, cbar_name):
"""
Draws Montage using overlay on Anatomical brain in Axial Direction
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
import os
import matplotlib
matplotlib.rcParams.update({'font.size': 5})
import matplotlib.cm as cm
try:
from mpl_toolkits.axes_grid1 import ImageGrid
except:
from mpl_toolkits.axes_grid import ImageGrid
import matplotlib.pyplot as plt
import nibabel as nb
import numpy as np
from CPAC.qc.utils import determine_start_and_end, get_spacing
Y = nb.load(underlay).get_data()
X = nb.load(overlay).get_data()
X = X.astype(np.float32)
Y = Y.astype(np.float32)
if 'skull_vis' in png_name:
X[X < 20.0] = 0.0
if 'skull_vis' in png_name or \
't1_edge_on_mean_func_in_t1' in png_name or \
'MNI_edge_on_mean_func_mni' in png_name:
max_ = np.nanmax(np.abs(X.flatten()))
X[X != 0.0] = max_
z1, z2 = determine_start_and_end(Y, 'axial', 0.0001)
spacing = get_spacing(6, 3, z2 - z1)
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
if ('snr' in png_name) or ('reho' in png_name) or \
('vmhc' in png_name) or ('sca_' in png_name) or \
('alff' in png_name) or ('centrality' in png_name) or \
('dr_tempreg' in png_name):
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="single", cbar_pad=0.2,
direction="row")
else:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, direction="row")
zz = z1
for i in range(6*3):
if zz >= z2:
break
try:
im = grid[i].imshow(np.rot90(Y[:, :, zz]), cmap=cm.Greys_r)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"axial montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
zz += spacing
x, y, z = X.shape
X[X == 0.0] = np.nan
max_ = np.nanmax(np.abs(X.flatten()))
zz = z1
im = None
for i in range(6*3):
if zz >= z2:
break
try:
if cbar_name is 'red_to_blue':
im = grid[i].imshow(np.rot90(X[:, :, zz]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
elif cbar_name is 'green':
im = grid[i].imshow(np.rot90(X[:, :, zz]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
else:
im = grid[i].imshow(np.rot90(X[:, :, zz]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=- max_, vmax=max_)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"axial montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
grid[i].axes.get_xaxis().set_visible(False)
grid[i].axes.get_yaxis().set_visible(False)
zz += spacing
cbar = grid.cbar_axes[0].colorbar(im)
if 'snr' in png_name:
cbar.ax.set_yticks(drange(0, max_))
elif ('reho' in png_name) or ('vmhc' in png_name) or \
('sca_' in png_name) or ('alff' in png_name) or \
('centrality' in png_name) or ('dr_tempreg' in png_name):
cbar.ax.set_yticks(drange(-max_, max_))
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
matplotlib.rcdefaults()
return png_name
def montage_sagittal(overlay, underlay, png_name, cbar_name):
"""
Draws Montage using overlay on Anatomical brain in Sagittal Direction
calls make_montage_sagittal
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
pngs = None
if isinstance(overlay, list):
pngs = []
for ov in overlay:
fname = os.path.basename(os.path.splitext(os.path.splitext(ov)[0])[0])
pngs.append(make_montage_sagittal(ov, underlay, fname + '_' + png_name, cbar_name))
else:
pngs = make_montage_sagittal(overlay, underlay, png_name, cbar_name)
png_name = pngs
return png_name
def make_montage_sagittal(overlay, underlay, png_name, cbar_name):
"""
Draws Montage using overlay on Anatomical brain in Sagittal Direction
Parameters
----------
overlay : string
Nifi file
underlay : string
Nifti for Anatomical Brain
cbar_name : string
name of the cbar
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
from CPAC.qc.utils import determine_start_and_end, get_spacing
import matplotlib
import os
import numpy as np
matplotlib.rcParams.update({'font.size': 5})
try:
from mpl_toolkits.axes_grid1 import ImageGrid
except:
from mpl_toolkits.axes_grid import ImageGrid
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import nibabel as nb
Y = nb.load(underlay).get_data()
X = nb.load(overlay).get_data()
X = X.astype(np.float32)
Y = Y.astype(np.float32)
if 'skull_vis' in png_name:
X[X < 20.0] = 0.0
if 'skull_vis' in png_name or \
't1_edge_on_mean_func_in_t1' in png_name or \
'MNI_edge_on_mean_func_mni' in png_name:
max_ = np.nanmax(np.abs(X.flatten()))
X[X != 0.0] = max_
x1, x2 = determine_start_and_end(Y, 'sagittal', 0.0001)
spacing = get_spacing(6, 3, x2 - x1)
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
if ('snr' in png_name) or ('reho' in png_name) or \
('vmhc' in png_name) or ('sca_' in png_name) or \
('alff' in png_name) or ('centrality' in png_name) or \
('dr_tempreg' in png_name):
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="single", cbar_pad=0.5,
direction="row")
else:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
xx = x1
for i in range(6*3):
if xx >= x2:
break
try:
im = grid[i].imshow(np.rot90(Y[xx, :, :]), cmap=cm.Greys_r)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"sagittal montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
grid[i].get_xaxis().set_visible(False)
grid[i].get_yaxis().set_visible(False)
xx += spacing
x, y, z = X.shape
X[X == 0.0] = np.nan
max_ = np.nanmax(np.abs(X.flatten()))
xx = x1
for i in range(6*3):
if xx >= x2:
break
im = None
try:
if cbar_name is 'red_to_blue':
im = grid[i].imshow(np.rot90(X[xx, :, :]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
elif cbar_name is 'green':
im = grid[i].imshow(np.rot90(X[xx, :, :]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=0, vmax=max_)
else:
im = grid[i].imshow(np.rot90(X[xx, :, :]),
cmap=cm.get_cmap(cbar_name), alpha=0.82,
vmin=- max_, vmax=max_)
except IndexError as e:
# TODO: send this to the logger instead
print("\n[!] QC Interface: Had a problem with creating the "
"sagittal montage for {0}\n\nDetails:{1}"
"\n".format(png_name, e))
pass
xx += spacing
cbar = grid.cbar_axes[0].colorbar(im)
if 'snr' in png_name:
cbar.ax.set_yticks(drange(0, max_))
elif ('reho' in png_name) or ('vmhc' in png_name) or \
('sca_' in png_name) or ('alff' in png_name) or \
('centrality' in png_name) or ('dr_tempreg' in png_name):
cbar.ax.set_yticks(drange(-max_, max_))
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
matplotlib.rcdefaults()
return png_name
def montage_gm_wm_csf_axial(overlay_csf, overlay_wm, overlay_gm, underlay, png_name):
"""
Draws Montage using GM WM and CSF overlays on Anatomical brain in Sagittal Direction
Parameters
----------
overlay_csf : string
Nifi file CSF MAP
overlay_wm : string
Nifti file WM MAP
overlay_gm : string
Nifti file GM MAP
underlay : string
Nifti for Anatomical Brain
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
Y = nb.load(underlay).get_data()
z1, z2 = determine_start_and_end(Y, 'axial', 0.0001)
spacing = get_spacing(6, 3, z2 - z1)
X_csf = nb.load(overlay_csf).get_data()
X_wm = nb.load(overlay_wm).get_data()
X_gm = nb.load(overlay_gm).get_data()
X_csf = X_csf.astype(np.float32)
X_wm = X_wm.astype(np.float32)
X_gm = X_gm.astype(np.float32)
Y = Y.astype(np.float32)
max_csf = np.nanmax(np.abs(X_csf.flatten()))
X_csf[X_csf != 0.0] = max_csf
max_wm = np.nanmax(np.abs(X_wm.flatten()))
X_wm[X_wm != 0.0] = max_wm
max_gm = np.nanmax(np.abs(X_gm.flatten()))
X_gm[X_gm != 0.0] = max_gm
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
try:
grid = ImageGrid1(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
except:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
zz = z1
for i in range(6*3):
if zz >= z2:
break
im = grid[i].imshow(np.rot90(Y[:, :, zz]), cmap=cm.Greys_r)
zz += spacing
x, y, z = X_csf.shape
X_csf[X_csf == 0.0] = np.nan
X_wm[X_wm == 0.0] = np.nan
X_gm[X_gm == 0.0] = np.nan
zz = z1
im = None
for i in range(6*3):
if zz >= z2:
break
im = grid[i].imshow(np.rot90(X_csf[:, :, zz]), cmap=cm.get_cmap('green'), alpha=0.82, vmin=0, vmax=max_csf)
im = grid[i].imshow(np.rot90(X_wm[:, :, zz]), cmap=cm.get_cmap('blue'), alpha=0.82, vmin=0, vmax=max_wm)
im = grid[i].imshow(np.rot90(X_gm[:, :, zz]), cmap=cm.get_cmap('red'), alpha=0.82, vmin=0, vmax=max_gm)
grid[i].axes.get_xaxis().set_visible(False)
grid[i].axes.get_yaxis().set_visible(False)
zz += spacing
cbar = grid.cbar_axes[0].colorbar(im)
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
return png_name
def montage_gm_wm_csf_sagittal(overlay_csf, overlay_wm, overlay_gm, underlay, png_name):
"""
Draws Montage using GM WM and CSF overlays on Anatomical brain in Sagittal Direction
Parameters
----------
overlay_csf : string
Nifi file CSF MAP
overlay_wm : string
Nifti file WM MAP
overlay_gm : string
Nifti file GM MAP
underlay : string
Nifti for Anatomical Brain
png_name : string
Proposed name of the montage plot
Returns
-------
png_name : Path to generated PNG
"""
Y = nb.load(underlay).get_data()
x1, x2 = determine_start_and_end(Y, 'sagittal', 0.0001)
spacing = get_spacing(6, 3, x2 - x1)
X_csf = nb.load(overlay_csf).get_data()
X_wm = nb.load(overlay_wm).get_data()
X_gm = nb.load(overlay_gm).get_data()
X_csf = X_csf.astype(np.float32)
X_wm = X_wm.astype(np.float32)
X_gm = X_gm.astype(np.float32)
Y = Y.astype(np.float32)
max_csf = np.nanmax(np.abs(X_csf.flatten()))
X_csf[X_csf != 0.0] = max_csf
max_wm = np.nanmax(np.abs(X_wm.flatten()))
X_wm[X_wm != 0.0] = max_wm
max_gm = np.nanmax(np.abs(X_gm.flatten()))
X_gm[X_gm != 0.0] = max_gm
x, y, z = Y.shape
fig = plt.figure(1)
max_ = np.max(np.abs(Y))
try:
grid = ImageGrid1(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
except:
grid = ImageGrid(fig, 111, nrows_ncols=(3, 6), share_all=True,
aspect=True, cbar_mode="None", direction="row")
zz = x1
for i in range(6*3):
if zz >= x2:
break
im = grid[i].imshow(np.rot90(Y[zz, :, :]), cmap=cm.Greys_r)
zz += spacing
x, y, z = X_csf.shape
X_csf[X_csf == 0.0] = np.nan
X_wm[X_wm == 0.0] = np.nan
X_gm[X_gm == 0.0] = np.nan
zz = x1
im = None
for i in range(6*3):
if zz >= x2:
break
im = grid[i].imshow(np.rot90(X_csf[zz, :, :]),
cmap=cm.get_cmap('green'), alpha=0.82, vmin=0,
vmax=max_csf)
im = grid[i].imshow(np.rot90(X_wm[zz, :, :]),
cmap=cm.get_cmap('blue'), alpha=0.82, vmin=0,
vmax=max_wm)
im = grid[i].imshow(np.rot90(X_gm[zz, :, :]),
cmap=cm.get_cmap('red'), alpha=0.82, vmin=0,
vmax=max_gm)
grid[i].axes.get_xaxis().set_visible(False)
grid[i].axes.get_yaxis().set_visible(False)
zz += spacing
cbar = grid.cbar_axes[0].colorbar(im)
plt.axis("off")
png_name = os.path.join(os.getcwd(), png_name)
plt.savefig(png_name, dpi=200, bbox_inches='tight')
plt.close()
return png_name
def register_pallete(file_, cbar_name):
"""
Registers color pallete to matplotlib
Parameters
----------
file_ : string
file containing colors in hexadecimal formats in each line
cbar_name : string
Proposed name for the color bar
Returns
-------
None
"""
import matplotlib.colors as col
import matplotlib.cm as cm
f = open(file_, 'r')
colors_ = f.readlines()
colors = []
for color in reversed(colors_):
colors.append(color.rstrip('\r\n'))
cmap3 = col.ListedColormap(colors, cbar_name)
cm.register_cmap(cmap=cmap3)
def resample_1mm(file_):
"""
Calls make_resample_1mm which resamples file to 1mm space
Parameters
----------
file_ : string
path to the scan
Returns
-------
new_fname : string
path to 1mm resampled nifti file
"""
new_fname = None
if isinstance(file_, list):
new_fname = []
for f in file_:
new_fname.append(make_resample_1mm(f))
else:
new_fname = make_resample_1mm(file_)
return new_fname
def make_resample_1mm(file_):
"""
Resamples input nifti file to 1mm space
Parameters
----------
file_ : string
Input Nifti File
Returns
-------
new_fname : string
Input Nifti resampled to 1mm space
"""
import os
import commands
remainder, ext_ = os.path.splitext(file_)
remainder, ext1_ = os.path.splitext(remainder)
ext = ''.join([ext1_, ext_])
new_fname = ''.join([remainder, '_1mm', ext])
new_fname = os.path.join(os.getcwd(), os.path.basename(new_fname))
cmd = " 3dresample -dxyz 1.0 1.0 1.0 -prefix %s " \
"-inset %s " % (new_fname, file_)
commands.getoutput(cmd)
return new_fname
|
#!/usr/bin/env python
# Test of relationship between system and ros time
import time
import rospy
from bdbd_common.utils import fstr
from nav_msgs.msg import Odometry
def odom_cb(odometry):
global ros_start
global sys_start
now = float(odometry.header.stamp.secs + 1.0e-9 * odometry.header.stamp.nsecs)
if ros_start is None:
ros_start = now
sys_start = time.time()
else:
lag = (time.time() - sys_start) - (now - ros_start)
print(fstr({'lag ms': lag * 1000}))
# start executing the action, driven by odometry message receipt
ros_start = None
sys_start = None
rospy.init_node('timetest')
rate = rospy.Rate(100)
while (not rospy.is_shutdown()):
sys = time.time()
ros = rospy.get_time()
print(fstr({'sys': sys, 'ros': ros, 'dif': sys-ros}))
rate.sleep()
|
import metastore.backend as metastore
from migrate_metadata import migrator
def test_migrate_datasets_migrates_only_of_type_dataset():
datasets = [
{"name": "test_pkg_0", "author": "test_user", "type": "dataset"},
{"name": "test_pkg_1", "author": "test_user", "type": "showcase"},
{"name": "test_pkg_2", "author": "test_user", "type": "dataset"},
]
metastore_client = metastore.create_metastore("filesystem", dict(uri="mem://"))
number_of_datasets = migrator.migrate_datasets((datasets), metastore_client)
assert number_of_datasets == 2
|
import torch
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch import nn, cat
import torch.nn.functional as F
import pytorch_lightning as pl
from torch.optim import Adam
from teach.logger import create_logger
logger = create_logger(__name__)
class NaiveMultiModalModel(pl.LightningModule):
def __init__(
self,
image_conv_kwargs,
image_hidden_layer_sizes,
text_word_vec_size,
text_input_words,
text_hidden_layer_sizes,
prev_actions_input_size,
prev_actions_hidden_layer_sizes,
combination_hidden_layers_size,
output_layer_size,
activations="relu",
):
super().__init__()
self.save_hyperparameters()
self.learning_rate = 0.001
activation = NaiveMultiModalModel._get_activation_layer(activations)
image_conv_seq_layers = []
for kwargs in image_conv_kwargs:
image_conv_seq_layers.append(nn.Conv2d(**kwargs))
image_conv_seq_layers.append(nn.BatchNorm2d(kwargs["out_channels"]))
image_conv_seq_layers.append(activation())
self.image_conv_input = nn.Sequential(
*image_conv_seq_layers
)
image_seq_layers = []
for in_size, out_size in image_hidden_layer_sizes:
image_seq_layers.append(nn.Linear(in_size, out_size))
image_seq_layers.append(nn.BatchNorm1d(out_size))
image_seq_layers.append(activation())
self.image_ffn_input = nn.Sequential(
*image_seq_layers
)
text_input_size = text_word_vec_size * text_input_words
prev_size = text_input_size
text_seq_layers = []
for size in text_hidden_layer_sizes:
text_seq_layers.append(nn.Linear(prev_size, size))
text_seq_layers.append(nn.BatchNorm1d(size))
text_seq_layers.append(activation())
prev_size = size
self.text_input = nn.Sequential(
*text_seq_layers
)
prev_action_seq_layers = []
prev_size = prev_actions_input_size
for size in prev_actions_hidden_layer_sizes:
prev_action_seq_layers.append(nn.Linear(prev_size, size))
prev_action_seq_layers.append(nn.BatchNorm1d(size))
prev_action_seq_layers.append(activation())
prev_size = size
self.prev_actions_input = nn.Sequential(
*prev_action_seq_layers
)
comb_seq_layers = []
prev_size = combination_hidden_layers_size[0]
for size in combination_hidden_layers_size[1:]:
comb_seq_layers.append(nn.Linear(prev_size, size))
comb_seq_layers.append(nn.BatchNorm1d(size))
comb_seq_layers.append(activation())
prev_size = size
comb_seq_layers.append(nn.Linear(prev_size, output_layer_size))
comb_seq_layers.append(nn.Softmax()) # Final activation
self.comb = nn.Sequential(
*comb_seq_layers
)
@staticmethod
def _get_activation_layer(activation="relu"):
return {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"sigmoid": nn.Sigmoid,
"softmax": nn.Softmax,
}[activation]
def forward(self, current_image_tensor, text_tensor, previous_actions_tensor):
image_conv_h = self.image_conv_input(current_image_tensor)
image_h = self.image_ffn_input(image_conv_h.flatten(start_dim=1))
text_h = self.text_input(text_tensor.flatten(start_dim=1))
prev_action_h = self.prev_actions_input(previous_actions_tensor.flatten(start_dim=1))
# logger.info(f"IMAGE H SHAPE: {image_h.size()}")
# logger.info(f"TEXT H SHAPE: {text_h.size()}")
# logger.info(f"PREV ACTION H SHAPE: {prev_action_h.size()}")
comb_h = cat((image_h, text_h, prev_action_h), dim=1)
z_out = self.comb(comb_h)
return z_out
def training_step(self, batch, batch_idx) -> STEP_OUTPUT:
x, y = batch
x_text, x_image, x_prev_actions = x["text"], x["cur_image"], x["prev_actions"]
z = self.forward(x_image, x_text, x_prev_actions)
loss = F.cross_entropy(z, y)
return loss if (not torch.isnan(loss) and not torch.isinf(loss)) else None
def validation_step(self, batch, batch_idx):
x, y = batch
x_text, x_image, x_prev_actions = x["text"], x["cur_image"], x["prev_actions"]
y_hat = self.forward(x_image, x_text, x_prev_actions)
loss = F.cross_entropy(y_hat, y)
self.log("val_loss", loss)
return loss
def configure_optimizers(self):
return Adam(self.parameters(), lr=self.learning_rate)
|
import os
import cv2
import face_recognition
import PySimpleGUI as sg
# Initial setting
tolerance = 0.5
frame_drop = 0
vcap = cv2.VideoCapture('img/video.mp4', cv2.CAP_FFMPEG)
# Mosaic function
def mosaic(src, ratio):
small = cv2.resize(src, None, fx=ratio, fy=ratio, interpolation=cv2.INTER_NEAREST)
return cv2.resize(small, src.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
# Make window layout
sg.theme('Reddit')
layout = [
[sg.Text('blur')],
[sg.Image(key='display')],
[sg.Button('terminate', key='terminate', button_color='red')]
]
window = sg.Window('blur', layout, location=(50, 50), disable_close=True)
# Encode face images in known_face directory
priset_face_image_encoding_list = []
for face in os.listdir('known_face'):
pricet_face_img = face_recognition.load_image_file('known_face/' + face)
priset_face_img_location = face_recognition.face_locations(pricet_face_img, 0, 'cnn')
priset_face_image_encoding = face_recognition.face_encodings(pricet_face_img, priset_face_img_location, 10, 'small')
priset_face_image_encoding_list.append(priset_face_image_encoding[0])
# Process each frame
frame_counter = 0
while True:
ret, frame = vcap.read()
if ret == False:
break
# Frame drop process
if frame_drop >= frame_counter:
frame_counter += 1
continue
frame_counter = 0
dst = frame.copy()
face_locations = face_recognition.face_locations(frame, 0, 'cnn')
# Mosaicize only if a face is present
if len(face_locations) > 0:
# ใใฎใใฌใผใ ใซknown_faceใใฃใฌใฏใใชๅ
ใฎไบบ็ฉใๅญๅจใใๆใflagใฏTrue
flag = False
# ๅใใใฌใผใ ๅ
ใฎใใใใใฎ้ก้ ๅใซใคใใฆใผใใๅฆ็ใ่กใ
for (top, right, bottom, left) in face_locations:
face_encoding = face_recognition.face_encodings(frame, [(top, right, bottom, left)], 0, 'small')
for priset_face_image_encoding in priset_face_image_encoding_list:
distance = face_recognition.face_distance(priset_face_image_encoding, face_encoding)
if distance[0] < tolerance:
flag = True
if flag == False:
# rectangle blur ======
# dst[top:top + (bottom - top), left:left + (right - left)] = cv2.blur(dst[top:top + (bottom - top), left:left + (right - left)], (50, 50))
# mosaic blur =========
dst[top:top + (bottom - top), left:left + (right - left)] = mosaic(dst[top:top + (bottom - top), left:left + (right - left)], 0.1)
flag = False
event, _ = window.read(timeout=1)
imgbytes = cv2.imencode(".png", dst)[1].tobytes()
window["display"].update(data=imgbytes)
if event == sg.WIN_CLOSED or event == 'terminate':
break
vcap.release()
window.close()
|
import os
import re
import collections
import base64
import gzip
import datetime as dt
import pandas as pd
import simplejson as json
from .flexbox import FlexboxCSS
class Util:
@staticmethod
def data_to_json(value, widget):
dump = json.dumps(value, default=Util.json_serial, ignore_nan=True)
if (widget.compress_data):
res = (base64.encodebytes(gzip.compress((dump).encode('utf-8'), compresslevel=9))).decode('utf-8')
return res
else:
return dump
@staticmethod
def data_from_json(value, widget):
return json.loads(gzip.decompress(value))
@staticmethod
def options_to_json(value, widget):
res = (base64.encodebytes(gzip.compress(value.encode('utf-8'), compresslevel=9))).decode('utf-8')
return res
@staticmethod
def multi_options_to_json(value, widget):
res = []
for opt in value:
res.append([opt[0], (base64.encodebytes(gzip.compress(
opt[1].encode('utf-8'), compresslevel=9))).decode('utf-8')])
return res
@staticmethod
def json_serial(obj):
"""
"""
if isinstance(obj, (dt.datetime, dt.date, pd.Timestamp)):
return obj.isoformat()
return obj
@staticmethod
def strip_comments(code):
lines = code.split('\n')
lines = [e.strip() for e in lines]
lines = [e for e in lines if not e.startswith('//')]
code = '\n'.join(lines)
return code
@staticmethod
def sanitize_str(string):
string2 = Util.strip_comments(string)
string2 = string2.replace('\n', '')
string2 = string2.replace('\t', ' ')
string2 = string2.replace('\"', '\'')
return string2
@staticmethod
def sanitize_struct(e):
if isinstance(e, (list, tuple)):
return [Util.sanitize_struct(sub_e) for sub_e in e]
elif isinstance(e, dict):
return {k: Util.sanitize_struct(v) for k, v in e.items()}
elif isinstance(e, str):
return Util.sanitize_str(e)
else:
return e
@staticmethod
def is_df(data):
if isinstance(data, pd.core.frame.DataFrame):
return True
return False
@staticmethod
def is_multiindex_row_df(df):
if Util.is_df(df):
if isinstance(df.index, pd.core.indexes.multi.MultiIndex):
return True
return False
@staticmethod
def is_multiindex_col_df(df):
if isinstance(df, pd.core.frame.DataFrame):
if isinstance(df.columns, pd.core.indexes.multi.MultiIndex):
return True
return False
@staticmethod
def is_multiindex_df(df):
if Util.is_multiindex_row_df(df) or Util.is_multiindex_col_df(df):
return True
return False
@staticmethod
def build_colDefs_for_si_cols(df, verbose=False):
colDefs = []
for c in df.columns:
dic = {}
col = df[c]
field = col.name
header_name = field.title()
if col.dtype.kind in 'O':
# string
dic['field'] = field
dic['type'] = 'textColumn'
dic['headerName'] = header_name
if col.dtype.kind in 'ifc':
# number
dic['field'] = field
dic['type'] = 'numberColumn'
dic['headerName'] = header_name
if col.dtype.kind in 'M':
# date
dic['field'] = field
dic['type'] = 'dateColumn'
dic['headerName'] = header_name
colDefs.append(dic)
return colDefs
@staticmethod
def build_colDefs_for_mi_cols(df):
"""
create agGrid columnDefs dict for column grouping
from multiindex dataframe columns
"""
# utility
def get_idx(s, x):
li_headerName = [e['colName'] for e in s]
if x not in li_headerName:
return -1
else:
return li_headerName.index(x)
mindexcol = df.columns
li_idx_col = mindexcol.tolist()
s = []
for levels in li_idx_col:
col = df.loc[:, levels]
L = len(levels)
s2 = s
flat_field = None
for k, e in enumerate(levels):
if flat_field:
flat_field = flat_field + '-' + e
else:
flat_field = e
if k < L - 1:
i = get_idx(s2, e)
if i < 0:
new_e = {'colName': e,
'headerName': e.title(),
'children': []}
s2.append(new_e)
i = len(s2) - 1
s2 = s2[i]['children']
else:
flat_field = flat_field.replace('.', '_')
new_e = {'field': flat_field,
'headerName': e.title()}
if col.dtype.kind in 'O':
# string
new_e['type'] = 'textColumn'
if col.dtype.kind in 'ifc':
# number
new_e['type'] = 'numberColumn'
if col.dtype.kind in 'M':
# date
new_e['type'] = 'dateColumn'
s2.append(new_e)
return s
@staticmethod
def build_colDefs_for_mi_rows(df, keep_multiindex):
"""
create agGrid columnDefs dict for column grouping
from multiindex dataframe columns
"""
mindexrow = df.index
s = []
for e in list(mindexrow.names):
new_e = {'field': e,
'headerName': e.title(),
'rowGroup': True}
if not keep_multiindex:
new_e['hide'] = True
s.append(new_e)
return s
@staticmethod
def flatten_mi_col_df(dfmi):
"""
create flattend dataframe
multi index col ('a', 'b', 'c') turned to 'a-b-c'
"""
df = dfmi.copy()
cols = ['-'.join(col).strip() for col in df.columns.values]
df.columns = cols
df.columns.name = 'UniqueCol'
return df
@staticmethod
def flatten_mi_row_df(dfmi):
"""
create flattend dataframe
multi index row added as regular column
"""
df = dfmi.reset_index()
return df
@staticmethod
def prepare_multiindex_df(dfmi,
options,
index=False,
keep_multiindex=False,
verbose=False):
"""
Prepare multiindex dataframe (data) and options
to display it with corresponding row grouping and
column grouping
To do that the dataframe is modified
+ multi index columns are flattened
+ multi index rows are made regular columns
+ columnDef in options are replaced with valid config
(existing columnDefs if any is replaced)
"""
df_data = dfmi
if index:
df_data = Util.add_index_as_col(df_data,
verbose=verbose)
if Util.is_multiindex_col_df(df_data):
columnDefs_col = Util.build_colDefs_for_mi_cols(df_data)
df_data = Util.flatten_mi_col_df(df_data)
else:
columnDefs_col = Util.build_colDefs_for_si_cols(df_data)
if Util.is_multiindex_row_df(df_data):
columnDefs_row = Util.build_colDefs_for_mi_rows(df_data,
keep_multiindex)
df_data = Util.flatten_mi_row_df(df_data)
else:
columnDefs_row = []
if index:
df_data = Util.add_index_as_col(df_data,
verbose=verbose)
new_columnDefs = columnDefs_row + columnDefs_col
options['columnDefs'] = new_columnDefs
options['enableRowGroup'] = True
return df_data, options
@staticmethod
def prepare_singleindex_df(data,
options,
index=False,
verbose=False):
"""
Prepare single index dataframe (data) and options
To do that the dataframe is modified
+ dots are replaced by underscore in column names
+ index is added in columns if requested
+ types are inferred from column types
"""
data = Util.correct_df_col_name(data,
verbose=verbose)
if index:
data = Util.add_index_as_col(data,
verbose=verbose)
if 'columnDefs' in options:
options = Util.update_columnDefs(data,
options,
verbose=verbose)
else:
options = Util.implicit_columnDefs(data,
options,
verbose=verbose)
return data, options
@staticmethod
def correct_df_col_name(data, verbose=False):
new_col = [e.replace('.', '_') for e in data.columns]
new_col_diff = [data.columns[i] != new_col[i]
for i in range(len(data.columns))]
if sum(new_col_diff) > 0:
if verbose:
print('In dataframe column names "." are replaced by "_".', end=' ')
print('Make sure columDefs match.')
data.columns = new_col
return data
@staticmethod
def add_index_as_col(data, verbose=False):
data = data.reset_index()
return data
@staticmethod
def update_columnDefs(df, grid_options, verbose=False):
colDefs = grid_options.get('columnDefs', [])
for colDef in colDefs:
field = colDef.get('field')
if field:
if field in df.columns:
dic = colDef
col = df[field]
if col.dtype.kind in 'O':
# string
dic['type'] = 'textColumn'
if col.dtype.kind in 'ifc':
# number
dic['type'] = 'numberColumn'
if col.dtype.kind in 'M':
# date
dic['type'] = 'dateColumn'
grid_options['columnDefs'] = colDefs
return grid_options
@staticmethod
def implicit_columnDefs(df, grid_options, verbose=False):
colDefs = Util.build_colDefs_for_si_cols(df, verbose=verbose)
grid_options['columnDefs'] = colDefs
return grid_options
@staticmethod
def update_columnTypes(grid_options, verbose=False):
columnTypes = grid_options.get('columnTypes', {})
numberColumn = {
'filter': 'agNumberColumnFilter',
}
if not 'numberColumn' in columnTypes:
columnTypes['numberColumn'] = numberColumn
textColumn = {
'filter': 'agTextColumnFilter'
}
if not 'textColumn' in columnTypes:
columnTypes['textColumn'] = textColumn
dateColumn = {
'valueFormatter': 'helpers.dateFormatter',
'filter': 'agDateColumnFilter',
'filterParams': {
'comparator': 'helpers.compareDates'
}
}
if not 'dateColumn' in columnTypes:
columnTypes['dateColumn'] = dateColumn
grid_options['columnTypes'] = columnTypes
return grid_options
@staticmethod
def build_css_rules(css_rules):
css_rules = re.findall(r'[^\{]+\{[^\}]*\}',
css_rules,
re.MULTILINE)
css_rules = [Util.sanitize_str(e) for e in css_rules]
return css_rules
@staticmethod
def get_license(filename='.ag_grid_license'):
path = os.path.join(os.path.expanduser('~'), filename)
with open(path, 'r') as f:
license = f.read()
return license
@staticmethod
def encode_b64(string):
return base64.b64encode(bytes(string, 'utf-8')).decode('utf-8')
@staticmethod
def build_data(data):
if data is None:
return {}
if isinstance(data, pd.core.frame.DataFrame):
data = data.to_dict(orient='records')
return data
@staticmethod
def build_options(options, is_multi):
if options is None:
return {}
options = Util.sanitize_struct(options)
if not is_multi:
options_json = json.dumps(options,
default=Util.json_serial,
ignore_nan=True)
return options_json
else:
options = options['data']
options_json = []
for option in options:
option[1] = json.dumps(option[1],
default=Util.json_serial,
ignore_nan=True)
return options
@staticmethod
def load_file(folder, filename):
"""
"""
here = os.path.dirname(__file__)
path = os.path.join(here, folder, filename)
with open(path, 'r') as f:
content = f.read()
return content
@staticmethod
def setup_menu(grid, menu_in, menu, options):
"""
Builds a complete menu setup out of gridOptions and menu
"""
def add_button(menu_a, name, action):
"""
Manages append regular buttons in the menu['buttons'] list.
"""
for i in range(len(menu_a['buttons'])):
button = menu_a['buttons'][i]
if button['name'] == name:
button['action'] = action
if 'hide' in button and button['hide']:
menu_a['buttons'].pop(i)
return
menu_a['buttons'].append({'name': name, 'action': action})
# Add csv export
if grid.export_csv:
add_button(
menu_in, 'Export to CSV',
'console.log("exporting to CSV"); helpers.exportToCsv(gridOptions);')
# Add excel export
if grid.export_excel:
add_button(
menu_in, 'Export to Excel',
'console.log("exporting to Excel"); helpers.exportToExcel(gridOptions);')
# Add buttons for export
if grid.export_mode == 'buttons':
# Add export data from selected range selection
add_button(
menu_in, 'Export Grid',
'exportFunc.exportGrid(gridOptions, view, parseInt(inputAggregationLevel.value, 10))')
if 'enableRangeSelection' in options and options[
'enableRangeSelection']:
# Add export data from selected range selection
add_button(menu_in, 'Export Range Data',
'exportFunc.exportRangeData(gridOptions, view)')
# Add export columns from selected range selection
add_button(menu_in, 'Export Columns',
'exportFunc.exportColumns(gridOptions, view)')
# Add export rows from selected range selection
add_button(menu_in, 'Export Rows',
'exportFunc.exportRowsOfRange(gridOptions, view)')
if 'rowSelection' in options:
# Add export rows from selected rows
add_button(menu_in, 'Export Rows',
'exportFunc.exportRows(gridOptions, view)')
# Manage menu CSS
# Build button CSS
menu_in['button_default_css'] = menu_in.get(
'button_default_css', {'font-size': '12px'})
menu_in['button_default_flex_css'] = FlexboxCSS(menu_in.get(
'button_default_flex_css', {'width': '150px', 'margin': '5px'}), kind='item')
# Manage buttons individual actions and CSS
for button in menu_in['buttons']:
if not (button.get('hide', False)):
options = {}
if 'custom_css' in button:
options = button['custom_css']
else:
options = menu_in.get(
'button_default_css', {})
if (button['name'] == 'Export Grid'):
name = button['name'].lower().replace(
' ', '-')+'-'+str(grid._id)
if 'flex_css' in button:
button['container_css'] = FlexboxCSS(
button['flex_css'], kind='item').build_css('container-'+name, opt_dic={})
button['css'] = FlexboxCSS(
{}, kind='item').build_css(name, opt_dic=options)
else:
button['container_css'] = menu_in['button_default_flex_css'].build_css(
'container-'+name, opt_dic={})
button['css'] = FlexboxCSS(
{}, kind='item').build_css(name, opt_dic=options)
else:
name = button['name'].lower().replace(
' ', '-')+'-'+str(grid._id)
if 'flex_css' in button:
button['css'] = FlexboxCSS(
button['flex_css'], kind='item').build_css(name, opt_dic=options)
else:
button['css'] = menu_in['button_default_flex_css'].build_css(
name, opt_dic=options)
menu['buttons'].append(button)
# Managing inputs custom CSS
def add_input(menu_a, name):
"""
Manages append regular buttons in the menu['buttons'] list.
"""
for i in range(len(menu_a['inputs'])):
elem = menu_a['inputs'][i]
if elem['name'] == name:
if 'hide' in elem and elem['hide']:
# Removes the element of inputs if hide == True
menu_a.pop(i)
return
menu_a['inputs'].append({'name': name})
# Add inputs
if grid.quick_filter:
add_input(menu_in, 'Quick Filter')
if grid.is_multi:
add_input(menu_in,'Dropdown Menu')
if grid.show_toggle_edit:
add_input(menu_in, 'Toggle Edit')
if grid.show_toggle_delete:
add_input(menu_in, 'Toggle Delete')
# Build button CSS
menu_in['input_default_flex_css'] = FlexboxCSS(menu_in.get(
'input_default_flex_css', {'margin': '5px'}), kind='item')
# Manage buttons individual actions and CSS
for elem in menu_in['inputs']:
options = {}
if elem['name']=='Quick Filter' or elem['name']=='Dropdown Menu':
options['width'] = '150px'
name = elem['name'].lower().replace(
' ', '-')+'-'+str(grid._id)
if 'flex_css' in elem:
options.update(elem['flex_css'])
elem['css'] = FlexboxCSS(
options, kind='item').build_css(name)
else:
elem['css'] = menu_in['input_default_flex_css'].build_css(
name, opt_dic=options)
menu['inputs'].append(elem)
# Managing button-div CSS
if not ('button_div_css' in menu_in):
menu_in['button_div_css'] = FlexboxCSS({}, kind='container')
else:
menu_in['button_div_css'] = FlexboxCSS(
menu_in['button_div_css'], kind='container')
menu['button_div_css'] = menu_in['button_div_css'].build_css(
'button-div-' + str(grid._id))
if not ('input_div_css' in menu_in):
menu_in['input_div_css'] = FlexboxCSS(
{'align-items': 'baseline'}, kind='container')
else:
menu_in['input_div_css'] = FlexboxCSS(
menu_in['input_div_css'], kind='container')
# Managing input-div CSS
menu['input_div_css'] = menu_in['input_div_css'].build_css(
'input-div-'+str(grid._id))
if not ('menu_div_css' in menu_in):
menu_in['menu_div_css'] = FlexboxCSS({}, kind='container')
else:
menu_in['menu_div_css'] = FlexboxCSS(
menu_in['menu_div_css'], kind='container')
# Managing menu-div CSS
menu['menu_div_css'] = menu_in['menu_div_css'].build_css(
'menu-div-'+str(grid._id))
|
from .classic_measures import *
from .truediversity import *
|
"""
The View classes render the data returned by a Source as a Panel
object.
"""
import sys
from io import StringIO
from weakref import WeakKeyDictionary
import numpy as np
import param
import panel as pn
from bokeh.models import NumeralTickFormatter
from panel.pane.base import PaneBase
from panel.pane.perspective import (
THEMES as _PERSPECTIVE_THEMES, Plugin as _PerspectivePlugin
)
from panel.param import Param
from ..config import _INDICATORS
from ..filters import ParamFilter
from ..sources import Source
from ..transforms import Transform
from ..util import resolve_module_reference
class View(param.Parameterized):
"""
A View renders the data returned by a Source as a Viewable Panel
object. The base class provides methods which query the Source for
the latest data given the current filters and applies all
specified `transforms`.
Subclasses should use these methods to query the data and return
a Viewable Panel object in the `get_panel` method.
"""
controls = param.List(default=[], doc="""
Parameters that should be exposed as widgets in the UI.""")
filters = param.List(constant=True, doc="""
A list of Filter object providing the query parameters for the
Source.""")
source = param.ClassSelector(class_=Source, constant=True, doc="""
The Source to query for the data.""")
selection_group = param.String(default=None, doc="""
Declares a selection group the plot is part of. This feature
requires the separate HoloViews library.""")
transforms = param.List(constant=True, doc="""
A list of transforms to apply to the data returned by the
Source before visualizing it.""")
table = param.String(doc="The table being visualized.")
field = param.Selector(doc="The field being visualized.")
view_type = None
# Panel extension to load to render this View
_extension = None
# Parameters which reference fields in the table
_field_params = ['field']
_selections = WeakKeyDictionary()
_supports_selections = False
__abstract = True
def __init__(self, **params):
self._cache = None
self._ls = None
self._panel = None
self._updates = None
self.kwargs = {k: v for k, v in params.items() if k not in self.param}
# Populate field selector parameters
params = {k: v for k, v in params.items() if k in self.param}
source, table = params.pop('source', None), params.pop('table', None)
if source is None:
raise ValueError("Views must declare a Source.")
if table is None:
raise ValueError("Views must reference a table on the declared Source.")
fields = list(source.get_schema(table))
for fp in self._field_params:
if isinstance(self.param[fp], param.Selector):
self.param[fp].objects = fields
super().__init__(source=source, table=table, **params)
for transform in self.transforms:
for fp in transform._field_params:
if isinstance(transform.param[fp], param.Selector):
transform.param[fp].objects = fields
if self.selection_group:
self._init_link_selections()
def _init_link_selections(self):
doc = pn.state.curdoc
if self._ls is not None or doc is None:
return
if doc not in View._selections and self.selection_group:
View._selections[doc] = {}
self._ls = View._selections.get(doc, {}).get(self.selection_group)
if self._ls is None:
from holoviews.selection import link_selections
self._ls = link_selections.instance()
if self.selection_group:
View._selections[doc][self.selection_group] = self._ls
if 'selection_expr' in self.param:
self._ls.param.watch(self._update_selection_expr, 'selection_expr')
def _update_selection_expr(self, event):
self.selection_expr = event.new
@classmethod
def _get_type(cls, view_type):
"""
Returns the matching View type.
"""
if '.' in view_type:
return resolve_module_reference(view_type, View)
try:
__import__(f'lumen.views.{view_type}')
except Exception:
pass
for view in param.concrete_descendents(cls).values():
if view.view_type == view_type:
return view
if view_type is not None:
raise ValueError(f"View type '{view_type}' could not be found.")
return View
@classmethod
def from_spec(cls, spec, source, filters):
"""
Resolves a View specification given the schema of the Source
it will be filtering on.
Parameters
----------
spec: dict
Specification declared as a dictionary of parameter values.
source: lumen.sources.Source
The Source object containing the tables the View renders.
filters: list(lumen.filters.Filter)
A list of Filter objects which provide query values for
the Source.
Returns
-------
The resolved View object.
"""
spec = dict(spec)
transform_specs = spec.pop('transforms', [])
transforms = [Transform.from_spec(tspec) for tspec in transform_specs]
view_type = View._get_type(spec.pop('type', None))
resolved_spec = {}
for p, value in spec.items():
if p not in view_type.param:
resolved_spec[p] = value
continue
parameter = view_type.param[p]
if isinstance(parameter, param.ObjectSelector) and parameter.names:
try:
value = parameter.names.get(value, value)
except Exception:
pass
resolved_spec[p] = value
view = view_type(
filters=filters, source=source, transforms=transforms,
**resolved_spec
)
# Resolve ParamFilter parameters
for filt in filters:
if isinstance(filt, ParamFilter):
if not isinstance(filt.parameter, str):
continue
name, parameter = filt.parameter.split('.')
if name == view.name and parameter in view.param:
filt.parameter = view.param[parameter]
return view
def __bool__(self):
return self._cache is not None and len(self._cache) > 0
def _update_panel(self, *events):
"""
Updates the cached Panel object and returns a boolean value
indicating whether a rerender is required.
"""
if self._panel is not None:
self._cleanup()
self._updates = self._get_params()
if self._updates is not None:
return False
self._panel = self.get_panel()
return True
def _cleanup(self):
"""
Method that is called on update.
"""
def get_data(self):
"""
Queries the Source for the specified table applying any
filters and transformations specified on the View. Unlike
`get_value` this should be used when multiple return values
are expected.
Returns
-------
DataFrame
The queried table after filtering and transformations are
applied.
"""
if self._cache is not None:
return self._cache
query = {}
for filt in self.filters:
filt_query = filt.query
if (filt_query is not None and
not getattr(filt, 'disabled', None) and
(filt.table is None or filt.table == self.table)):
query[filt.field] = filt_query
data = self.source.get(self.table, **query)
for transform in self.transforms:
data = transform.apply(data)
if len(data):
data = self.source._filter_dataframe(data, **query)
for filt in self.filters:
if not isinstance(filt, ParamFilter):
continue
from holoviews import Dataset
if filt.value is not None:
ds = Dataset(data)
data = ds.select(filt.value).data
self._cache = data
return data
def get_value(self, field=None):
"""
Queries the Source for the data associated with a particular
field applying any filters and transformations specified on
the View. Unlike `get_data` this method returns a single
scalar value associated with the field and should therefore
only be used if only a single.
Parameters
----------
field: str (optional)
The field from the table to return; if None uses
field defined on the View.
Returns
-------
object
A single scalar value representing the current value of
the queried field.
"""
data = self.get_data()
if not len(data) or field is not None and field not in data.columns:
return None
row = data.iloc[-1]
return row[self.field if field is None else field]
def get_panel(self):
"""
Constructs and returns a Panel object which will represent a
view of the queried table.
Returns
-------
panel.Viewable
A Panel Viewable object representing a current
representation of the queried table.
"""
return pn.panel(self.get_data())
def update(self, *events, invalidate_cache=True):
"""
Triggers an update in the View.
Parameters
----------
events: tuple
param events that may trigger an update.
invalidate_cache : bool
Whether to clear the View's cache.
Returns
-------
stale : bool
Whether the panel on the View is stale and needs to be
rerendered.
"""
if invalidate_cache:
self._cache = None
return self._update_panel()
def _get_params(self):
return None
@property
def control_panel(self):
column = pn.Column(sizing_mode='stretch_width')
if self.controls:
column.append(
Param(
self.param, parameters=self.controls, sizing_mode='stretch_width'
)
)
for trnsfm in self.transforms:
if trnsfm.controls:
column.append(trnsfm.control_panel)
index = (1 if self.controls else 0)
if len(column) > index:
column.insert(index, '### Transforms')
return column
@property
def panel(self):
if isinstance(self._panel, PaneBase):
pane = self._panel
if len(pane.layout) == 1 and pane._unpack:
return pane.layout[0]
return pane._layout
return self._panel
class StringView(View):
"""
The StringView renders the latest value of the field as a HTML
string with a specified fontsize.
"""
font_size = param.String(default='24pt', doc="""
The font size of the rendered field value.""")
view_type = 'string'
def get_panel(self):
return pn.pane.HTML(**self._get_params())
def _get_params(self):
value = self.get_value()
params = dict(self.kwargs)
if value is None:
params['object'] = 'No info'
else:
params['object'] = f'<p style="font-size: {self.font_size}">{value}</p>'
return params
class IndicatorView(View):
"""
The IndicatorView renders the latest field value as a Panel
Indicator.
"""
indicator = param.Selector(objects=_INDICATORS, doc="""
The name of the panel Indicator type.""")
label = param.String(doc="""
A custom label to use for the Indicator.""")
view_type = 'indicator'
def __init__(self, **params):
if 'indicator' in params and isinstance(params['indicator'], str):
params['indicator'] = _INDICATORS[params['indicator']]
super().__init__(**params)
name = params.get('label', params.get('field', ''))
self.kwargs['name'] = name
def get_panel(self):
return self.indicator(**self._get_params())
def _get_params(self):
params = dict(self.kwargs)
if 'data' in self.indicator.param:
params['data'] = self.get_data()
else:
value = self.get_value()
if (not isinstance(value, (type(None), str)) and np.isnan(value)):
value = None
params['value'] = value
return params
class hvPlotView(View):
"""
The hvPlotView renders the queried data as a bokeh plot generated
with hvPlot. hvPlot allows for a concise declaration of a plot via
its simple API.
"""
kind = param.String(doc="The kind of plot, e.g. 'scatter' or 'line'.")
x = param.Selector(doc="The column to render on the x-axis.")
y = param.Selector(doc="The column to render on the y-axis.")
by = param.ListSelector(doc="The column(s) to facet the plot by.")
groupby = param.ListSelector(doc="The column(s) to group by.")
opts = param.Dict(default={}, doc="HoloViews options to apply on the plot.")
streaming = param.Boolean(default=False, doc="""
Whether to stream new data to the plot or rerender the plot.""")
selection_expr = param.Parameter(doc="""
A selection expression caputirng the current selection applied
on the plot.""")
view_type = 'hvplot'
_field_params = ['x', 'y', 'by', 'groupby']
_supports_selections = True
def __init__(self, **params):
import hvplot.pandas # noqa
if 'dask' in sys.modules:
try:
import hvplot.dask # noqa
except Exception:
pass
if 'by' in params and isinstance(params['by'], str):
params['by'] = [params['by']]
if 'groupby' in params and isinstance(params['groupby'], str):
params['groupby'] = [params['groupby']]
self._stream = None
self._linked_objs = []
super().__init__(**params)
def get_plot(self, df):
processed = {}
for k, v in self.kwargs.items():
if k.endswith('formatter') and isinstance(v, str) and '%' not in v:
v = NumeralTickFormatter(format=v)
processed[k] = v
if self.streaming:
processed['stream'] = self._stream
plot = df.hvplot(
kind=self.kind, x=self.x, y=self.y, **processed
)
plot = plot.opts(**self.opts) if self.opts else plot
if self.selection_group or 'selection_expr' in self._param_watchers:
plot = self._link_plot(plot)
return plot
def _link_plot(self, plot):
self._init_link_selections()
if self._ls is None:
return plot
linked_objs = list(self._ls._plot_reset_streams)
plot = self._ls(plot)
self._linked_objs += [
o for o in self._ls._plot_reset_streams if o not in linked_objs
]
return plot
def _cleanup(self):
if self._ls is None:
return
for obj in self._linked_objs:
reset = self._ls._plot_reset_streams.pop(obj)
sel_expr = self._ls._selection_expr_streams.pop(obj)
self._ls._cross_filter_stream.input_streams.remove(sel_expr)
sel_expr.clear()
sel_expr.source = None
reset.clear()
reset.source = None
self._linked_objs = []
def get_panel(self):
return pn.pane.HoloViews(**self._get_params())
def _get_params(self):
df = self.get_data()
if self.streaming:
from holoviews.streams import Pipe
self._stream = Pipe(data=df)
return dict(object=self.get_plot(df))
def update(self, *events, invalidate_cache=True):
"""
Triggers an update in the View.
Parameters
----------
events: tuple
param events that may trigger an update.
invalidate_cache : bool
Whether to clear the View's cache.
Returns
-------
stale : bool
Whether the panel on the View is stale and needs to be
rerendered.
"""
# Skip events triggered by a parameter change on this View
own_parameters = [self.param[p] for p in self.param]
own_events = events and all(
isinstance(e.obj, ParamFilter) and
(e.obj.parameter in own_parameters or
e.new is self._ls.selection_expr)
for e in events
)
if own_events:
return False
if invalidate_cache:
self._cache = None
if not self.streaming or self._stream is None:
return self._update_panel()
self._stream.send(self.get_data())
return False
class Table(View):
"""
Renders a Source table using a Panel Table widget.
"""
view_type = 'table'
_extension = 'tabulator'
def get_panel(self):
return pn.widgets.tables.Tabulator(**self._get_params())
def _get_params(self):
return dict(value=self.get_data(), disabled=True, **self.kwargs)
class Download(View):
"""
The Download View allows downloading the current table as a csv or
xlsx file.
"""
filename = param.String(default='data', doc="""
Filename of the downloaded file.""")
filetype = param.Selector(default='csv', objects=['csv', 'xlsx'], doc="""
File type of the downloaded file.""")
save_kwargs = param.Dict(default={}, doc="""
Options for the to_csv or to_excel methods.""")
view_type = 'download'
def __bool__(self):
return True
def _get_file_data(self):
df = self.get_data()
sio = StringIO()
if self.filetype == 'csv':
savefn = df.to_csv
elif self.filetype == 'xlsx':
savefn = df.to_excel
savefn(sio, **self.save_kwargs)
sio.seek(0)
return sio
def get_panel(self):
return pn.widgets.FileDownload(**self._get_params())
def _get_params(self):
filename = f'{self.filename}.{self.filetype}'
return dict(filename=filename, callback=self._get_file_data, **self.kwargs)
class PerspectiveView(View):
aggregates = param.Dict(None, allow_None=True, doc="""
How to aggregate. For example {x: "distinct count"}""")
columns = param.ListSelector(default=None, allow_None=True, doc="""
A list of source columns to show as columns. For example ["x", "y"]""")
computed_columns = param.ListSelector(default=None, allow_None=True, doc="""
A list of computed columns. For example [""x"+"index""]""")
column_pivots = param.ListSelector(None, allow_None=True, doc="""
A list of source columns to pivot by. For example ["x", "y"]""")
filters = param.List(default=None, allow_None=True, doc="""
How to filter. For example [["x", "<", 3],["y", "contains", "abc"]]""")
row_pivots = param.ListSelector(default=None, allow_None=True, doc="""
A list of source columns to group by. For example ["x", "y"]""")
selectable = param.Boolean(default=True, allow_None=True, doc="""
Whether items are selectable.""")
sort = param.List(default=None, doc="""
How to sort. For example[["x","desc"]]""")
plugin = param.ObjectSelector(default=_PerspectivePlugin.GRID.value, objects=_PerspectivePlugin.options(), doc="""
The name of a plugin to display the data. For example hypergrid or d3_xy_scatter.""")
theme = param.ObjectSelector(default='material', objects=_PERSPECTIVE_THEMES, doc="""
The style of the PerspectiveViewer. For example material-dark""")
view_type = 'perspective'
_extension = 'perspective'
_field_params = ['columns', 'computed_columns', 'column_pivots', 'row_pivots']
def _get_params(self):
df = self.get_data()
param_values = dict(self.param.get_param_values())
params = set(View.param) ^ set(PerspectiveView.param)
kwargs = dict({p: param_values[p] for p in params}, **self.kwargs)
return dict(object=df, toggle_config=False, **kwargs)
def get_panel(self):
return pn.pane.Perspective(**self._get_params())
class AltairView(View):
chart = param.Dict(default={}, doc="Keyword argument for Chart.")
x = param.Selector(doc="The column to render on the x-axis.")
y = param.Selector(doc="The column to render on the y-axis.")
marker = param.Selector(default='line', objects=[
'area', 'bar', 'boxplot', 'circle', 'errorband', 'errorbar',
'geoshape', 'image', 'line', 'point', 'rect', 'rule', 'square',
'text', 'tick', 'trail'])
encode = param.Dict(default={}, doc="Keyword arguments for encode.")
mark = param.Dict(default={}, doc="Keyword arguments for mark.")
transform = param.Dict(doc="""
Keyword arguments for transforms, nested by the type of
transform, e.g. {'bin': {'as_': 'binned', 'field': 'x'}}.""")
project = param.Dict(doc="Keyword arguments for project.")
properties = param.Dict(doc="Keyword arguments for properties.")
view_type = 'altair'
_extension = 'vega'
def _transform_encoding(self, encoding, value):
import altair as alt
if isinstance(value, dict):
value = dict(value)
for kw, val in value.items():
if kw == 'scale':
if isinstance(val, list):
val = alt.Scale(range=val)
else:
val = alt.Scale(**val)
if kw == 'tooltip':
val = [alt.Tooltip(**v) for v in val]
value[kw] = val
value = getattr(alt, encoding.capitalize())(**value)
return value
def _get_params(self):
import altair as alt
df = self.get_data()
chart = alt.Chart(df, **self.chart)
mark = getattr(chart, f'mark_{self.marker}')(**self.mark)
x = self._transform_encoding('x', self.x)
y = self._transform_encoding('y', self.y)
encode = {k: self._transform_encoding(k, v) for k, v in self.encode.items()}
encoded = mark.encode(x=x, y=y, **encode)
if self.transform:
for key, kwargs in self.transform.items():
encoded = getattr(encoded, f'transform_{key}')(**kwargs)
if self.project:
encoded = encoded.project(**self.project)
if self.properties:
encoded = encoded.properties(**self.properties)
return dict(object=encoded, **self.kwargs)
def get_panel(self):
return pn.pane.Vega(**self._get_params())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
script for generating cache
"""
import argparse
import sys
import aioredis
import asyncio
import asyncpg
def argument_parser():
parser = argparse.ArgumentParser(description='generate cache')
parser.add_argument(
'database', nargs='?', help='database name', default='minikin')
parser.add_argument(
'--user', '-u', help='database user', default='postgres')
parser.add_argument(
'--redis', '-r', help='redis uri', dest='redis_uri',
default='redis://localhost')
return parser
async def load_cache(database, user, redis_uri):
pool = await asyncpg.create_pool(database=database, user=user)
redis = await aioredis.create_redis_pool(
redis_uri, encoding='utf-8', minsize=5, maxsize=10)
async with pool.acquire() as connection:
record = await connection.fetchrow('SELECT COUNT(slug) from short_url')
count = record['count']
print(f'start loading.\ntotal record {count}.')
i = 0
async with connection.transaction():
async for record in connection.cursor('SELECT * FROM short_url'):
await redis.set(record['slug'].strip(), record['url'])
i += 1
if i % 100 == 0:
sys.stdout.write(f'\r{i / count * 100:.2f}%')
sys.stdout.flush()
else:
sys.stdout.write('\r100.00%\n')
print('done!')
def main(argv=None):
args = argument_parser().parse_args(argv)
loop = asyncio.get_event_loop()
loop.run_until_complete(
load_cache(args.database, args.user, args.redis_uri))
if __name__ == '__main__':
main()
|
# Copyright 2018 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import exceptions as ks_exc
from neutron_lib.agent import constants as agent_const
from neutron_lib.api.definitions import agent_resources_synced
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.placement import client as place_client
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import placement_report
from neutron.notifiers import batch_notifier
LOG = logging.getLogger(__name__)
PLUGIN_TYPE = "placement_report"
@registry.has_registry_receivers
class PlacementReportPlugin(service_base.ServicePluginBase):
supported_extension_aliases = []
# A service plugin without claiming support for filter validation would
# disable filter validation for all other plugins, so we report support
# although this plugin doesn't have filters.
__filter_validation_support = True
@classmethod
def get_plugin_type(cls):
return PLUGIN_TYPE
def get_plugin_description(self):
return "Sync placement info from agent to server to placement."
def __init__(self):
self._core_plugin = directory.get_plugin()
# NOTE(bence romsics): The following bug and fix may be relevant here.
# https://bugs.launchpad.net/nova/+bug/1697825
# https://review.openstack.org/493536
self._placement_client = place_client.PlacementAPIClient(cfg.CONF)
self._agents = PlacementReporterAgents(self._core_plugin)
self._batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self._execute_deferred)
def _execute_deferred(self, deferred_batch):
for deferred in deferred_batch:
deferred()
def _get_rp_by_name(self, name):
rps = self._placement_client.list_resource_providers(
name=name)['resource_providers']
# RP names are unique, therefore we can get 0 or 1. But not many.
if len(rps) != 1:
# NOTE(bence romsics): While we could raise() here and by detect
# an error a bit earlier, we want the error to surface in the
# sync batch below so it is going to be properly caught and is
# going to influence the agent's resources_synced attribute.
LOG.warning(
'placement client: no such resource provider: %s', name)
return {'uuid': None}
return rps[0]
def _sync_placement_state(self, agent, agent_db):
configurations = agent['configurations']
mech_driver = self._agents.mechanism_driver_by_agent_type(
agent['agent_type'])
uuid_ns = mech_driver.resource_provider_uuid5_namespace
supported_vnic_types = mech_driver.supported_vnic_types
device_mappings = mech_driver.get_standard_device_mappings(agent)
try:
agent_host_rp_uuid = self._get_rp_by_name(
name=agent['host'])['uuid']
except ks_exc.HttpError:
# Delay the error for the same reason as in _get_rp_by_name().
agent_host_rp_uuid = None
state = placement_report.PlacementState(
rp_bandwidths=configurations[
'resource_provider_bandwidths'],
rp_inventory_defaults=configurations[
'resource_provider_inventory_defaults'],
driver_uuid_namespace=uuid_ns,
agent_type=agent['agent_type'],
agent_host=agent['host'],
agent_host_rp_uuid=agent_host_rp_uuid,
device_mappings=device_mappings,
supported_vnic_types=supported_vnic_types,
client=self._placement_client)
deferred_batch = state.deferred_sync()
# NOTE(bence romsics): Some client calls depend on earlier
# ones, but not all. There are calls in a batch that can succeed
# independently of earlier calls. Therefore even if a call fails
# we have to suppress its failure so the later independent calls
# have a chance to succeed. If we queue up the deferred client
# calls one by one then we cannot handle errors at the end of
# a batch. So instead we should wrap the deferred client calls
# in a single deferred batch which executes the client calls,
# continuing to the next client call even if there was an error
# but remembering if an error happened. Then at the end of the
# batch (also having access to the agent object) set the agent's
# resources_synced attribute according to the success/failure
# of the batch. Since each client call does monkey patched I/O
# we'll yield to other eventlet threads in each call therefore
# the performance should not be affected by the wrapping.
def batch():
errors = False
for deferred in deferred_batch:
try:
LOG.debug('placement client: {}'.format(deferred))
deferred.execute()
except Exception:
errors = True
LOG.exception(
'placement client call failed: %s',
str(deferred))
resources_synced = not errors
agent_db.resources_synced = resources_synced
agent_db.update()
LOG.debug(
'Synchronization of resources'
' of agent type %(type)s'
' at host %(host)s'
' to placement %(result)s.',
{'type': agent['agent_type'],
'host': agent['host'],
'result': 'succeeded' if resources_synced else 'failed'})
self._batch_notifier.queue_event(batch)
@registry.receives(resources.AGENT,
[events.AFTER_CREATE, events.AFTER_UPDATE])
def handle_placement_config(self, resource, event, trigger, payload):
# NOTE(bence romsics): This method gets called a lot, keep it quick.
agent = payload.desired_state
status = payload.metadata.get('status')
context = payload.context
if agent['agent_type'] not in self._agents.supported_agent_types:
return
if 'resource_provider_bandwidths' not in agent['configurations']:
LOG.warning(
"The mechanism driver claims agent type supports "
"placement reports, but the agent does not report "
"'resoure_provider_bandwidths' in its configurations. "
"host: %(host)s, type: %(type)s",
{'host': agent['agent_type'],
'type': agent['host']})
return
# We need to get the same agent as in
# neutron.db.agents_db.AgentDbMixin.create_or_update_agent()
agent_db = self._core_plugin._get_agent_by_type_and_host(
context, agent['agent_type'], agent['host'])
# sync the state known by us to placement
if (
# agent object in API (re-)created
status == agent_const.AGENT_NEW or
# agent (re-)started (even without config change)
'start_flag' in agent or
# never tried to sync yet or last sync failed
not agent_db[agent_resources_synced.RESOURCES_SYNCED]):
LOG.debug(
'placement: syncing state for agent type %s on host %s',
agent['agent_type'], agent['host'])
self._sync_placement_state(agent, agent_db)
else:
LOG.debug(
'placement: nothing to sync for agent type %s on host %s',
agent['agent_type'], agent['host'])
class PlacementReporterAgents(object):
# Yep, this is meant to depend on ML2.
def __init__(self, ml2_plugin):
self._mechanism_drivers = ml2_plugin.mechanism_manager.\
ordered_mech_drivers
self._supported_agent_types = []
self._agent_type_to_mech_driver = {}
@property
def supported_agent_types(self):
if not self._supported_agent_types:
# NOTE(bence romsics): We treat the presence of the
# RP uuid namespace a proxy for supporting placement reports from
# the driver's agent type. But we could introduce a property/logic
# explicitly describing the agent types supporting placement
# reports any time if this proved to be insufficient.
self._supported_agent_types = [
driver.obj.agent_type
for driver in self._mechanism_drivers
if driver.obj.resource_provider_uuid5_namespace is not None]
LOG.debug('agent types supporting placement reports: %s',
', '.join(self._supported_agent_types))
return self._supported_agent_types
def mechanism_driver_by_agent_type(self, agent_type):
if agent_type not in self._agent_type_to_mech_driver:
for driver in self._mechanism_drivers:
if (hasattr(driver.obj, 'agent_type') and
agent_type == driver.obj.agent_type):
self._agent_type_to_mech_driver[agent_type] = driver.obj
break
return self._agent_type_to_mech_driver[agent_type]
|
import configdualbot
import psycopg2
import csv
import player
import collections
import logging
import datetime
import json
from psycopg2.extras import RealDictCursor
logging.basicConfig(
filename=f'logs/{datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")}.log',
filemode='w',
format='PostgreSQL - %(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
players = collections.defaultdict(player.Player)
"""Connect to the PostgreSQL database server """
# conn = psycopg2.connect(host=configdualbot.dbhost, port = 5432, database=configdualbot.dbname, user=configdualbot.dbuser, password=configdualbot.dbpassword)
'''
Use the function below in another 'database.ini' file if you want to save db info
in the .ini file
'''
# from configparser import ConfigParser
# def config(filename='database.ini', section='postgresql'):
# # create a parser
# parser = ConfigParser()
# # read config file
# parser.read(filename)
#
# # get section, default to postgresql
# db = {}
# if parser.has_section(section):
# params = parser.items(section)
# for param in params:
# db[param[0]] = param[1]
# else:
# raise Exception('Section {0} not found in the {1} file'.format(section, filename))
#
# return db
def testconnect():
try:
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
def create_sql_players():
""" drop tables if exist in the PostgreSQL database"""
""" create tables in the PostgreSQL database"""
commands = (
f"""
DROP TABLE IF EXISTS
playerlist,
playerchatids;
""",
f"""
CREATE TABLE playerlist (
Player VARCHAR(255) PRIMARY KEY,
Angel VARCHAR(255) NOT NULL,
Mortal VARCHAR(255) NOT NULL,
Gender VARCHAR(255) NOT NULL,
Interests VARCHAR(255) NOT NULL,
Twotruthsonelie VARCHAR(255) NOT NULL,
Intro VARCHAR(255) NOT NULL
)
""",
"""
CREATE TABLE playerchatids (
playerusername VARCHAR(255) PRIMARY KEY,
chat_id BIGINT NULL,
FOREIGN KEY (playerusername)
REFERENCES playerlist (Player)
ON UPDATE CASCADE ON DELETE CASCADE
)
""",
"""
DROP TYPE IF EXISTS stringint;
CREATE TYPE stringint AS (playerusername text, chat_id bigint);
""",
)
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def import_players_from_csv():
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
# create table one by one
with open(configdualbot.PLAYERS_FILENAME, 'r') as f:
reader = csv.reader(f, delimiter=',')
next(reader) # Skip the header row.
for row in reader:
cur.execute(
f"""
INSERT INTO playerlist (player,angel,mortal,gender,interests,twotruthsonelie,intro)
VALUES ('{row[0]}','{row[1]}','{row[2]}','{row[3]}','{row[4]}','{row[5]}','{row[6]}')
ON CONFLICT (player) DO UPDATE
SET (angel,mortal,gender,interests,twotruthsonelie,intro)
= ('{row[1]}','{row[2]}','{row[3]}','{row[4]}','{row[5]}','{row[6]}')
WHERE playerlist.player = '{row[0]}';
"""
) ## Somehow, if you do not put playerlist.player, it will throw "ambiguous column" error
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
print (f"PLAYERS_FILENAME imported successfully into SQL database")
except (Exception, psycopg2.DatabaseError) as error:
print(error) ###NOTE: if you get Index error, open the CSV as notepad, then delete the last empty row. It is a known bug when exporting CSVs from SQL. Then it should be able to import flawlessly
finally:
if conn is not None:
conn.close()
def loadPlayers_fromSQL(players: dict): ##NOTE: this also loads the chat ids from playerchatids SQL
commands = (
f"""
SELECT * FROM
playerlist;
""",
)
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
# close communication with the PostgreSQL database server
print("Selecting rows from playerlist table using cursor.fetchall")
playerlist_selected = cur.fetchall()
for row in playerlist_selected:
print(row)
playerName = row[0].strip().lower() ##Note: Player is in 1st column. Angel is in 2nd column, Mortal is in 3rd column.
angelName = row[1].strip().lower()
mortalName = row[2].strip().lower()
genderPlayer = row[3].strip()
interests = row[4].strip()
twotruthsonelie = row[5].strip()
introduction = row[6].strip()
players[playerName].username = playerName
players[playerName].angel = angelName ###NOTE: DO NOT USE these two lines of code as they DO NOT WORK. When it processes the first row,
players[playerName].mortal = mortalName ###the usernames & details of the Angel & Mortal have not been initialised yet.
players[playerName].gender = genderPlayer
players[playerName].interests = interests
players[playerName].twotruthsonelie = twotruthsonelie
players[playerName].introduction = introduction
# close communication with the PostgreSQL database server
temp = players
for k, v in players.items():
temp[k].angel = players[v.angel]
temp[k].mortal = players[v.mortal]
print(f"players loaded into Telegram dualbot!")
players = temp
player.validatePairings(players)
loadChatID_fromSQL(players)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def loadChatID_fromSQL(players: dict):
commands = (
f"""
SELECT * FROM
playerchatids;
""",
)
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
# create table one by one
for command in commands:
cur.execute(command)
print("Selecting rows from playerchatids table using cursor.fetchall")
playerchatids_selected = cur.fetchall()
for row in playerchatids_selected:
playerName = row[0].strip().lower()
chatid = row[1]
players[playerName].chat_id = chatid
# close communication with the PostgreSQL database server
cur.close()
print(f"player chat_ids loaded to dualbot!")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
# def saveplayerschatids_toSQL(players: dict):
# try:
# conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
# user=configdualbot.dbuser, password=configdualbot.dbpassword)
# cur = conn.cursor()
# data = []
# for k, v in players.items():
# d = {"playerusername": k, "chat_id": players[k].chat_id}
# data.append(d)
# command1 = (
# f"""
# DROP TABLE IF EXISTS
# playerchatids;
# """,
# f"""
# CREATE TABLE IF NOT EXISTS playerchatids(
# playerusername VARCHAR(255) PRIMARY KEY,
# chat_id BIGINT NULL,
# FOREIGN KEY (playerusername)
# REFERENCES playerlist (Player)
# ON UPDATE CASCADE ON DELETE CASCADE
# )
# """
# )
# for commands in command1:
# cur.execute(commands)
# print("Command 1 success!")
# command2 = (
# f"""
# INSERT INTO playerchatids
# SELECT * FROM jsonb_populate_recordset(null::stringint, '{json.dumps(data)}') AS p
# """
# )
# cur.execute(command2)
# # close communication with the PostgreSQL database server
# cur.close()
# # commit the changes
# conn.commit()
# print("All Telegram players chat_id were dumped onto playerchatids SQL successfully!")
# except (Exception, psycopg2.DatabaseError) as error:
# print(error)
# finally:
# if conn is not None:
# conn.close()
def saveplayerschatids_toSQL(players: dict): ##USE THIS INSTEAD OF ABOVE FUNCTION
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
for k, v in players.items():
logger.info(f"{k}, {v.chat_id}")
if v.chat_id is None:
# v.chat_id = "null" ## this DOES NOT WORK - invalid input syntax for type bigint: "null"
command2 = (
f"""
INSERT INTO playerchatids (playerusername, chat_id)
VALUES ('{k}', NULL)
ON CONFLICT (playerusername) DO UPDATE
SET chat_id
= NULL
WHERE playerchatids.playerusername = '{k}';
"""
)
cur.execute(command2)
else:
command2 = (
f"""
INSERT INTO playerchatids (playerusername, chat_id)
VALUES ('{k}', '{v.chat_id}')
ON CONFLICT (playerusername) DO UPDATE
SET chat_id
= '{v.chat_id}'
WHERE playerchatids.playerusername = '{k}';
"""
)
cur.execute(command2)
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
print(f"All Telegram players chat_id were dumped onto playerchatids SQL successfully!")
logger.info(f"All Telegram players chat_id from were dumped onto playerchatids SQL successfully!")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
logger.info(error)
finally:
if conn is not None:
conn.close()
def saveplayerchatids_fromSQL_toCSV(): ##Exporting to CSV is better than to JSON
command = (
f"""
SELECT * FROM
playerchatids
"""
)
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
cur.execute(command)
print("Selecting rows from playerchatids table using cursor.fetchall")
playerchatids_selected = cur.fetchall()
with open(configdualbot.CHAT_ID_CSV, 'w+', newline = '') as f:
write = csv.writer(f)
write.writerows(playerchatids_selected)
print("Exported CSV from playerchatids table using cursor.fetchall")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def import_playerchatids_fromCSV_toSQL(): ##JUST IN CASE FUNCTION
try:
conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
user=configdualbot.dbuser, password=configdualbot.dbpassword)
cur = conn.cursor()
# create table one by one
with open(configdualbot.CHAT_ID_CSV, 'r') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if row[1] == '':
# row[1] = "null"
print(f"{row[0]}, {row[1]}")
cur.execute(
f"""
INSERT INTO playerchatids (playerusername,chat_id)
VALUES ('{row[0]}',NULL)
ON CONFLICT (playerusername) DO UPDATE
SET chat_id
= NULL
WHERE playerchatids.playerusername = '{row[0]}';
"""
)
else:
print(f"{row[0]}, {row[1]}")
cur.execute(
f"""
INSERT INTO playerchatids (playerusername,chat_id)
VALUES ('{row[0]}','{row[1]}')
ON CONFLICT (playerusername) DO UPDATE
SET chat_id
= '{row[1]}'
WHERE playerchatids.playerusername = '{row[0]}';
"""
)
print("CHAT_ID_CSV Dump onto SQL success!")
# close communication with the PostgreSQL database server
cur.close()
# commit the changes
conn.commit()
print (f"CHAT_ID_CSV is imported successfully into playerchatids SQL database")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
'''
old JSON functions - complicated and not recommended for use
'''
# def saveplayerchatids_fromSQL_toJSON(): ##JUST IN CASE FUNCTION
# commands = (
# f"""
# SELECT * FROM
# playerchatids;
#
# """,
# )
# try:
# conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
# user=configdualbot.dbuser, password=configdualbot.dbpassword)
# cur = conn.cursor(cursor_factory=RealDictCursor)
# # create table one by one
# for command in commands:
# cur.execute(command)
# # close communication with the PostgreSQL database server
# print("Selecting rows from playerchatids table using cursor.fetchall")
# playerchatids_selected = cur.fetchall()
#
# with open(configdualbot.CHAT_ID_JSON, 'w+') as f:
# json.dump(playerchatids_selected, f)
# except (Exception, psycopg2.DatabaseError) as error:
# print(error)
# finally:
# if conn is not None:
# conn.close()
# def import_playerchatids_fromJSON_toSQL(): ##JUST IN CASE FUNCTION
# try:
# conn = psycopg2.connect(host=configdualbot.dbhost, port=5432, database=configdualbot.dbname,
# user=configdualbot.dbuser, password=configdualbot.dbpassword)
# cur = conn.cursor()
# # create table one by one
# with open(configdualbot.CHAT_ID_JSON, 'r') as f:
# data = json.load(f)
# command1 = (
# f"""
# DROP TABLE IF EXISTS
# playerchatids;
# """,
# f"""
# CREATE TABLE IF NOT EXISTS playerchatids(
# playerusername VARCHAR(255) PRIMARY KEY,
# chat_id BIGINT NULL,
# FOREIGN KEY (playerusername)
# REFERENCES playerlist (Player)
# ON UPDATE CASCADE ON DELETE CASCADE
# )
# """
# )
# command2 = (
# f"""
# INSERT INTO playerchatids
# SELECT * FROM json_populate_recordset(null::stringint, '{json.dumps(data)}')
# """
# )
# for commands in command1:
# cur.execute(commands)
# print("Command 1 success!")
# cur.execute(command2)
# print("CHAT_ID_JSON Dump onto SQL success!")
# # close communication with the PostgreSQL database server
# cur.close()
# # commit the changes
# conn.commit()
# print (f"CHAT_ID_JSON is imported successfully into playerchatids SQL database")
# except (Exception, psycopg2.DatabaseError) as error:
# print(error)
# finally:
# if conn is not None:
# conn.close()
if __name__ == '__main__':
# testconnect()
create_sql_players()
import_players_from_csv()
import_playerchatids_fromCSV_toSQL()
# loadPlayers_fromSQL(players)
## print(f"players loaded to dualbot!")
# loadChatID_fromSQL(players)
## print(f"player chat_ids loaded to dualbot!")
# saveplayerschatids_toSQL(players)
# saveplayerchatids_fromSQL_toJSON()
|
# Bruce Maxwell
# Revised for spring 2013
# Tested for Python 3 fall 2017
# test function 1 for lab 9
import turtle_interpreter
import random
def main():
""" draw a 4x4 square of alternating filled and unfilled squares """
terp = turtle_interpreter.TurtleInterpreter()
square = 'F-F-F-F-'
fsquare = '{F-F-F-F-}'
for i in range(4):
for j in range(4):
terp.place( -100 + j*50, -100 + i*50 )
terp.setColor( (random.random(), random.random(), random.random() ) )
if (j + (i % 2)) % 2 == 0:
terp.drawString(square, 40, 90)
else:
terp.drawString(fsquare, 40, 90)
terp.hold()
if __name__ == "__main__":
main()
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.rules import (
download_pex_bin,
pex,
pex_from_target_closure,
prepare_chrooted_python_sources,
repl,
)
from pants.backend.python.rules.repl import PythonRepl
from pants.backend.python.subsystems import python_native_code, subprocess_environment
from pants.backend.python.targets.python_library import PythonLibrary
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.fs import FileContent
from pants.engine.interactive_runner import InteractiveRunner
from pants.engine.rules import RootRule
from pants.rules.core import strip_source_roots
from pants.rules.core.repl import Repl, run_repl
from pants.testutil.goal_rule_test_base import GoalRuleTestBase
class ReplTest(GoalRuleTestBase):
goal_cls = Repl
@classmethod
def rules(cls):
return (
*super().rules(),
*repl.rules(),
run_repl,
*pex.rules(),
*download_pex_bin.rules(),
*pex_from_target_closure.rules(),
*prepare_chrooted_python_sources.rules(),
*python_native_code.rules(),
*strip_source_roots.rules(),
*subprocess_environment.rules(),
RootRule(PythonRepl),
)
@classmethod
def alias_groups(cls) -> BuildFileAliases:
return BuildFileAliases(targets={"python_library": PythonLibrary,})
def test_repl_with_targets(self):
library_source = FileContent(path="some_lib.py", content=b"class SomeClass:\n pass\n")
self.create_library(
name="some_lib",
target_type="python_library",
path="src/python",
sources=["some_lib.py"],
)
self.create_file(
relpath="src/python/some_lib.py", contents=library_source.content.decode(),
)
output = self.execute_rule(
args=["--backend-packages2=pants.backend.python", "src/python:some_lib"],
additional_params=[InteractiveRunner(self.scheduler)],
)
assert output == "REPL exited successfully."
|
# encoding=utf8
from matplotlib import pyplot as plt
from scipy.interpolate import spline
import numpy as np
U = [
-1,
50,
100,
150,
200,
250,
300,
350,
400,
450,
500,
550,
600,
650,
700,
750,
800,
850,
900,
950,
1000,
1050,
1100,
1150,
1200,
1250,
1300,
1341,
1350,
1400,
1450,
1490
]
I = [
1.124,
1.119,
1.109,
1.100,
1.074,
1.052,
1.024,
0.992,
0.956,
0.912,
0.867,
0.817,
0.762,
0.701,
0.642,
0.576,
0.513,
0.424,
0.345,
0.289,
0.221,
0.173,
0.089,
0.061,
0.034,
0.016,
0.008,
0.0048,
0.0053,
0.0115,
0.026,
0.042
]
print len(U)
print len(I)
xnew = np.linspace(-1, 1489, 500)
smooth = spline(U, I, xnew)
plt.plot(xnew, smooth, label="Spline curve")
plt.scatter(U, I, marker='x', color='black', s=50,label="Data points")
plt.xlabel("U / V")
plt.ylabel("I / mW")
plt.legend()
plt.show()
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util."""
import os
import tempfile
from language.nql import dataset
from language.nql import nql
from language.nql import util
import numpy as np
import tensorflow as tf
def tabline(s):
return "\t".join(s.split(" ")) + "\n"
TRIPPY_KG_LINES = [
tabline("feature t1 purple"),
tabline("feature t1 green"),
tabline("feature t1 red"),
tabline("feature t2 purple"),
tabline("feature t2 red"),
tabline("feature t3 red"),
tabline("feature t3 black"),
tabline("feature b1 black"),
tabline("feature b1 tan"),
tabline("feature b2 white"),
tabline("feature b2 grey"),
tabline("feature b3 black"),
tabline("feature b3 white"),
tabline("feature b3 tan"),
tabline("feature u1 purple"),
tabline("feature t1 green"),
tabline("feature u2 green"),
tabline("feature t2 red"),
tabline("feature c1 black"),
tabline("feature b1 grey"),
tabline("feature c2 tan"),
tabline("feature c2 grey")
]
TRAIN_DATA_LINES = [
"t1|trippy", "t2|trippy", "t3|trippy", "b1|boring", "b2|boring", "b3|boring"
]
TEST_DATA_LINES = ["u1|trippy", "u2|trippy", "c1|boring", "c2|boring"]
def simple_tf_dataset(context,
tuple_input,
x_type,
y_type,
normalize_outputs=False,
batch_size=1,
shuffle_buffer_size=1000,
feature_key=None,
field_separator="\t"):
"""A dataset with just two columns, x and y.
Args:
context: a NeuralQueryContext
tuple_input: passed to util.tuple_dataset
x_type: type of entities x
y_type: type of entities y1,...,yk
normalize_outputs: make the encoding of {y1,...,yk} sum to 1
batch_size: size of minibatches
shuffle_buffer_size: if zero, do not shuffle the dataset. Otherwise, this is
passed in as argument to shuffle
feature_key: if not None, wrap the x part of the minibatch in a dictionary
with the given key
field_separator: passed in to dataset.tuple_dataset
Returns:
a tf.data.Dataset formed by wrapping the generator
"""
dset = dataset.tuple_dataset(
context,
tuple_input, [x_type, y_type],
normalize_outputs=normalize_outputs,
field_separator=field_separator)
if shuffle_buffer_size > 0:
dset = dset.shuffle(shuffle_buffer_size)
dset = dset.batch(batch_size)
if feature_key is None:
return dset
else:
wrap_x_in_dict = lambda x, y: ({feature_key: x}, y)
return dset.map(wrap_x_in_dict)
class TrippyBuilder(util.ModelBuilder):
def config_context(self, context, params=None):
context.declare_relation("feature", "instance_t", "feature_t")
context.declare_relation(
"indicates", "feature_t", "label_t", trainable=True)
context.extend_type("label_t", ["trippy", "boring"])
context.load_kg(lines=TRIPPY_KG_LINES)
context.set_initial_value(
"indicates", np.ones(context.get_shape("indicates"), dtype="float32"))
def config_model_prediction(self, model, feature_ph_dict, params=None):
model.x = model.context.as_nql(feature_ph_dict["x"], "instance_t")
model.score = model.x.feature().indicates()
model.predicted_y = model.score.tf_op(nql.nonneg_softmax)
model.predictions = {"y": model.predicted_y}
def config_model_training(self, model, labels_ph, params=None):
model.labels = model.context.as_tf(labels_ph)
model.loss = nql.nonneg_crossentropy(model.predicted_y.tf, model.labels)
optimizer = tf.train.AdagradOptimizer(1.0)
model.train_op = optimizer.minimize(
loss=model.loss, global_step=tf.train.get_global_step())
def config_model_evaluation(self, model, labels_ph, params=None):
model.accuracy = tf.metrics.accuracy(
tf.argmax(input=model.labels, axis=1),
tf.argmax(input=model.predicted_y.tf, axis=1))
model.top_labels = util.labels_of_top_ranked_predictions_in_batch(
model.labels, model.predicted_y.tf)
model.precision_at_one = tf.metrics.mean(model.top_labels)
model.evaluations = {
"accuracy": model.accuracy,
"precision@1": model.precision_at_one
}
class BaseTester(tf.test.TestCase):
def setUp(self):
super(BaseTester, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.context = TrippyBuilder().build_context()
def make_train_dset(self, num_epochs):
# need to specify a non-default field separator
# because tabs are disallowed in test input files
return simple_tf_dataset(
self.context,
TRAIN_DATA_LINES,
"instance_t",
"label_t",
feature_key="x",
field_separator="|").repeat(num_epochs)
def make_test_dset(self):
return simple_tf_dataset(
self.context,
TEST_DATA_LINES,
"instance_t",
"label_t",
shuffle_buffer_size=0,
feature_key="x",
field_separator="|")
class TestModelBuilder(BaseTester):
def setUp(self):
super(TestModelBuilder, self).setUp()
self.session = tf.Session()
def check_one_hot(self, m, i, typename):
self.assertEqual(m.shape, (self.context.get_max_id(typename),))
self.assertEqual(np.sum(m), 1.0)
self.assertEqual(m[i], 1.0)
def test_tf_dataset(self):
dset1 = simple_tf_dataset(
self.context,
TRAIN_DATA_LINES,
"instance_t",
"label_t",
shuffle_buffer_size=0,
field_separator="|")
x, y = self.session.run(
tf.data.make_one_shot_iterator(dset1).get_next())
self.check_batch(x, 0, "instance_t")
self.check_batch(y, 0, "label_t")
def check_batch(self, m, i, typename):
self.assertEqual(m.shape, (1, self.context.get_max_id(typename)))
self.assertEqual(np.sum(m), 1.0)
self.assertEqual(m[0, i], 1.0)
def test_tf_minibatch_dataset(self):
dset2 = simple_tf_dataset(
self.context,
TRAIN_DATA_LINES,
"instance_t",
"label_t",
batch_size=2,
shuffle_buffer_size=0,
field_separator="|")
x, y = self.session.run(
tf.data.make_one_shot_iterator(dset2).get_next())
# check that this is a minibatch containing the first two instances
self.assertEqual(x.shape[0], 2)
self.assertEqual(y.shape[0], 2)
self.assertEqual(x.shape[1], self.context.get_max_id("instance_t"))
self.assertEqual(y.shape[1], self.context.get_max_id("label_t"))
self.assertEqual(np.sum(x), 2.0)
self.assertEqual(np.sum(y), 2.0)
self.assertEqual(x[0, 0], 1.0)
self.assertEqual(x[1, 1], 1.0)
# both of the first two instances are negative
self.assertEqual(y[0, 0], 1.0)
self.assertEqual(y[1, 0], 1.0)
def test_ph_learn(self):
# build model
feature_ph_dict = {"x": self.context.placeholder("x", "instance_t")}
labels_ph = self.context.placeholder("y", "label_t")
builder = TrippyBuilder()
model = builder.build_model(feature_ph_dict, labels_ph)
trainer = util.Trainer(self.session, model, feature_ph_dict, labels_ph)
# train
trainer.train(self.make_train_dset(5))
# check the model fits the train data
evaluation = trainer.evaluate(self.make_train_dset(1))
self.assertEqual(evaluation["accuracy"], 1.0)
self.assertEqual(evaluation["precision@1"], 1.0)
# try running the model on something
for inst_name in ["u1", "u2", "c1", "c2"]:
x = model.context.one_hot_numpy_array(inst_name, "instance_t")
x_ph = feature_ph_dict["x"]
fd = {x_ph.name: x}
y_dict = model.predicted_y.eval(self.session, feed_dict=fd)
# the u's are class trippy
if inst_name[0] == "u":
self.assertGreater(y_dict["trippy"], y_dict["boring"])
# the c's are class boring but c1 is hard to get
elif inst_name == "c2":
self.assertLess(y_dict["trippy"], y_dict["boring"])
# test the model
evaluation = trainer.evaluate(self.make_test_dset())
self.assertGreaterEqual(evaluation["accuracy"], 0.7)
self.assertGreaterEqual(evaluation["precision@1"], 0.7)
# test callback
cb_model = builder.build_model(feature_ph_dict, labels_ph)
cb_model.loss_history = []
def my_callback(fd, loss, secs):
del fd, secs # unused
cb_model.loss_history.append(loss)
return None
cb_model.training_callback = my_callback
with tf.Session() as session:
cb_trainer = util.Trainer(session, cb_model, feature_ph_dict, labels_ph)
cb_trainer.train(self.make_train_dset(5))
self.assertEqual(len(cb_model.loss_history), 30)
self.assertLess(cb_model.loss_history[-1], 0.05)
def test_estimator_learn(self):
def train_input_fn():
return self.make_train_dset(5)
def test_input_fn():
return self.make_test_dset()
estimator = TrippyBuilder().build_estimator()
estimator.train(input_fn=train_input_fn)
evaluation = estimator.evaluate(input_fn=train_input_fn)
self.assertEqual(evaluation["accuracy"], 1.0)
self.assertEqual(evaluation["global_step"], 30)
evaluation = estimator.evaluate(input_fn=test_input_fn)
self.assertGreater(evaluation["accuracy"], 0.7)
self.assertGreaterEqual(evaluation["precision@1"], 0.7)
class TestSaveRestore(BaseTester):
def setUp(self):
super(TestSaveRestore, self).setUp()
tmp_dir = tempfile.mkdtemp("util_test")
self.checkpoint_location_a = os.path.join(tmp_dir, "trippy.ckpt")
self.checkpoint_location_b = os.path.join(tmp_dir, "trippy2.ckpt")
def test_est(self):
def train_input_fn():
return self.make_train_dset(5)
def test_input_fn():
return self.make_test_dset()
estimator = TrippyBuilder().build_estimator(
model_dir=self.checkpoint_location_a)
estimator.train(input_fn=train_input_fn)
evaluation = estimator.evaluate(input_fn=test_input_fn)
self.assertGreater(evaluation["accuracy"], 0.7)
self.assertGreaterEqual(evaluation["precision@1"], 0.7)
def test_ph(self):
def try_model_on_test_instances(model, sess, feature_ph_dict):
trial = {}
for inst_name in ["u1", "u2", "c1", "c2"]:
x = model.context.one_hot_numpy_array(inst_name, "instance_t")
x_ph = feature_ph_dict["x"]
fd = {x_ph.name: x}
y_dict = model.predicted_y.eval(sess, feed_dict=fd)
trial[inst_name] = y_dict["boring"]
return trial
# Train and save.
with tf.Graph().as_default():
with tf.Session() as sess1:
builder1 = TrippyBuilder()
context1 = builder1.build_context()
feature_ph_dict1 = {"x": context1.placeholder("x", "instance_t")}
labels_ph1 = context1.placeholder("y", "label_t")
model1 = builder1.build_model(feature_ph_dict1, labels_ph1)
trainer1 = util.Trainer(sess1, model1, feature_ph_dict1, labels_ph1)
trainer1.train(self.make_train_dset(5))
trial1a = try_model_on_test_instances(model1, sess1, feature_ph_dict1)
saver1 = tf.train.Saver()
saver1.save(sess1, self.checkpoint_location_a)
# Restore, evaluate, train, and save.
with tf.Graph().as_default():
with tf.Session() as sess2:
builder2 = TrippyBuilder()
context2 = builder2.build_context()
feature_ph_dict2 = {"x": context2.placeholder("x", "instance_t")}
labels_ph2 = context2.placeholder("y", "label_t")
model2 = builder2.build_model(feature_ph_dict2, labels_ph2)
saver2 = tf.train.Saver()
trainer2 = util.Trainer(sess2, model2, feature_ph_dict2, labels_ph2)
saver2.restore(sess2, self.checkpoint_location_a)
trainer2.evaluate(self.make_test_dset())
trial2a = try_model_on_test_instances(model2, sess2, feature_ph_dict2)
self.assertDictEqual(trial1a, trial2a)
trainer2.train(self.make_train_dset(5))
saver2.save(sess2, self.checkpoint_location_b)
trial2b = try_model_on_test_instances(model2, sess2, feature_ph_dict2)
with self.assertRaises(tf.test.TestCase.failureException):
self.assertDictEqual(trial2a, trial2b)
# Restore and evaluate.
with tf.Graph().as_default():
with tf.Session() as sess3:
builder3 = TrippyBuilder()
context3 = builder3.build_context()
feature_ph_dict3 = {"x": context3.placeholder("x", "instance_t")}
labels_ph3 = context3.placeholder("y", "label_t")
model3 = builder3.build_model(feature_ph_dict3, labels_ph3)
saver3 = tf.train.Saver()
trainer3 = util.Trainer(sess3, model3, feature_ph_dict3, labels_ph3)
saver3.restore(sess3, self.checkpoint_location_b)
trainer3.evaluate(self.make_test_dset())
trial3b = try_model_on_test_instances(model3, sess3, feature_ph_dict3)
self.assertDictEqual(trial2b, trial3b)
if __name__ == "__main__":
tf.test.main()
|
import os
import random
import glob
import numpy as np
import cv2
import torch
from skimage import io, transform
from torch.utils.data import Dataset, DataLoader
class DogCat(Dataset):
def __init__(self, root_path, training=False):
self.image_file_list = glob.glob(os.path.join(root_path, "*.jpg"))
def __getitem__(self, index):
image_path = self.image_file_list[index]
label = "0" if os.path.basename(image_path).split(".")[0] == "cat" else "1"
image = io.imread(image_path)
image = transform.resize(image, (256, 256, 3))
image = torch.from_numpy(image)
return image, label
def __len__(self):
return len(self.image_file_list)
if __name__ == "__main__":
root_path = r"E:\LiHui\datasets\cat_dog\train"
dog_cat_data = DogCat(root_path, training=True)
train_loader = DataLoader(dog_cat_data, batch_size=4, shuffle=True, num_workers=4)
for idx, (image, label) in enumerate(train_loader):
print(image.shape)
|
import sys
print('version is', sys.version)
|
from .dawa import DAWA
|
"""
Quartic Polynomial
"""
import numpy as np
class QuarticPolynomial:
def __init__(self, x0, v0, a0, v1, a1, T):
A = np.array([[3 * T ** 2, 4 * T ** 3],
[6 * T, 12 * T ** 2]])
b = np.array([v1 - v0 - a0 * T,
a1 - a0])
X = np.linalg.solve(A, b)
self.a0 = x0
self.a1 = v0
self.a2 = a0 / 2.0
self.a3 = X[0]
self.a4 = X[1]
def calc_xt(self, t):
xt = self.a0 + self.a1 * t + self.a2 * t ** 2 + \
self.a3 * t ** 3 + self.a4 * t ** 4
return xt
def calc_dxt(self, t):
xt = self.a1 + 2 * self.a2 * t + \
3 * self.a3 * t ** 2 + 4 * self.a4 * t ** 3
return xt
def calc_ddxt(self, t):
xt = 2 * self.a2 + 6 * self.a3 * t + 12 * self.a4 * t ** 2
return xt
def calc_dddxt(self, t):
xt = 6 * self.a3 + 24 * self.a4 * t
return xt
|
import re
import unittest
import helpers as h
class Md2htmlTemplateIntegralTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.OUTPUT_DIR = h.prepare_output_directory(cls.__name__)
def test_empty_title(self):
root = h.execute_simple(f'{h.INPUT_DIR}/any_content.txt',
f'{self.OUTPUT_DIR}/empty_title_test.html',
f'{h.INPUT_DIR}/test_template_title.html')
self.assertEqual('', root.head.title.text)
def test_not_empty_title_cli(self):
output_file = f'{self.OUTPUT_DIR}/not_empty_title_cli_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/any_content.txt', '-o', output_file,
'--template', f'{h.INPUT_DIR}/test_template_title.html',
'--title', 'test title from CLI'],
output_file)
self.assertEqual('test title from CLI', root.head.title.text)
def test_not_empty_title_metadata(self):
output_file = f'{self.OUTPUT_DIR}/not_empty_title_metadata_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/not_empty_title_metadata_test.txt',
'-o', output_file, '--legacy-mode',
'--template', f'{h.INPUT_DIR}/test_template_title.html'],
output_file)
self.assertEqual('test title from metadata', root.head.title.text)
def test_not_empty_title_cli_overridden(self):
output_file = f'{self.OUTPUT_DIR}/not_empty_title_cli_overridden_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/not_empty_title_metadata_test.txt',
'-o', output_file,
'--template', f'{h.INPUT_DIR}/test_template_title.html',
'--title', 'test title from CLI overridden'],
output_file)
self.assertEqual('test title from CLI overridden', root.head.title.text)
def test_no_css(self):
output_file = f'{self.OUTPUT_DIR}/no_css_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/any_content.txt', '-o', output_file,
'--template', f'{h.INPUT_DIR}/test_template_styles.html',
'--no-css'], output_file)
self.assertIsNone(root.head.link)
self.assertIsNone(root.head.style)
def test_link_css(self):
output_file = f'{self.OUTPUT_DIR}/link_css_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/any_content.txt', '-o', output_file,
'--template', f'{h.INPUT_DIR}/test_template_styles.html',
'--link-css', '../../../test_input/test_styles.css'],
output_file)
self.assertIsNone(root.head.style)
link = root.head.link
self.assertEqual(['stylesheet'], link['rel'])
self.assertEqual('text/css', link['type'])
self.assertEqual('../../../test_input/test_styles.css', link['href'])
def test_include_css(self):
output_file = f'{self.OUTPUT_DIR}/include_css_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/any_content.txt', '-o', output_file,
'--template', f'{h.INPUT_DIR}/test_template_styles.html',
'--include-css',
str(h.WORKING_DIR.joinpath('test_input/test_styles.css'))],
output_file)
self.assertIsNone(root.head.link)
style = root.head.style
self.assertEqual('body {background-color: burlywood;}', style.contents[0].strip())
def test_placeholders(self):
output_file = f'{self.OUTPUT_DIR}/placeholders_test.html'
root = h.execute(['-f', '-i', f'{h.INPUT_DIR}/placeholders_test.txt', '-o', output_file,
'--template', f'{h.INPUT_DIR}/test_template_placeholders.html',
'--no-css', '--legacy-mode'],
output_file)
pattern = re.compile('\d')
paragraphs = root.body.find_all('p')
self.assertEqual(5, len(paragraphs))
self.assertEqual('Generator name: md2html_', paragraphs[0].text[0:24])
self.assertEqual('Generator version: X.X.X', pattern.sub('X', paragraphs[1].text))
self.assertEqual('Generation date: XXXX-XX-XX', pattern.sub('X', paragraphs[2].text))
self.assertEqual('Generation time: XX:XX:XX', pattern.sub('X', paragraphs[3].text))
self.assertEqual('Custom value: test custom value', paragraphs[4].text)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
try:
from unittest import mock
except ImportError:
import mock
import pytest
from tdclient import api
from tdclient.test.test_helper import *
def setup_function(function):
unset_environ()
def test_grant_access_control_success():
td = api.API("APIKEY")
td.post = mock.MagicMock(return_value=make_response(200, b""))
access_controls = td.grant_access_control("foo", "bar", "baz", "hoge")
td.post.assert_called_with("/v3/acl/grant", {"subject": "foo", "action": "bar", "scope": "baz", "grant_option": "hoge"})
def test_revoke_access_control_success():
td = api.API("APIKEY")
td.post = mock.MagicMock(return_value=make_response(200, b""))
access_controls = td.revoke_access_control("foo", "bar", "baz")
td.post.assert_called_with("/v3/acl/revoke", {"subject": "foo", "action": "bar", "scope": "baz"})
def test_test_access_control_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"permission": "",
"access_controls": [
{"subject":"foo","action":"bar","scope":"baz"},
{"subject":"bar","action":"bar","scope":"baz"},
{"subject":"baz","action":"bar","scope":"baz"}
]
}
"""
td.get = mock.MagicMock(return_value=make_response(200, body))
access_controls = td.test_access_control("foo", "bar", "baz")
td.get.assert_called_with("/v3/acl/test", {"user": "foo", "action": "bar", "scope": "baz"})
def test_list_access_controls_success():
td = api.API("APIKEY")
# TODO: should be replaced by wire dump
body = b"""
{
"access_controls":[
{"subject":"foo","action":"","scope":"","grant_option":""},
{"subject":"bar","action":"","scope":"","grant_option":""},
{"subject":"baz","action":"","scope":"","grant_option":""}
]
}
"""
td.get = mock.MagicMock(return_value=make_response(200, body))
access_controls = td.list_access_controls()
td.get.assert_called_with("/v3/acl/list")
assert len(access_controls) == 3
def test_list_access_controls_failure():
td = api.API("APIKEY")
td.get = mock.MagicMock(return_value=make_response(500, b"error"))
with pytest.raises(api.APIError) as error:
td.list_access_controls()
assert error.value.args == ("500: Listing access control failed: error",)
|
from gazette.spiders.base.fecam import FecamGazetteSpider
class ScCorreiaPintoSpider(FecamGazetteSpider):
name = "sc_correia_pinto"
FECAM_QUERY = "cod_entidade:77"
TERRITORY_ID = "4204558"
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE ่้ฒธๅบ็ก่ฎก็ฎๅนณๅฐ available.
Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import json
import pytz
from django.conf import settings
from django.core.cache import cache
from django.utils import timezone
from django.utils.deprecation import MiddlewareMixin
from apps.api import BKLoginApi
class I18NMiddleware(MiddlewareMixin):
"""
ๅฝ้
ๅไธญ้ดไปถ๏ผไปBK_LOGIN่ทๅไธชไบบ้
็ฝฎ็ๆถๅบ
"""
def process_view(self, request, view, args, kwargs):
login_exempt = getattr(view, "login_exempt", False)
if login_exempt:
return None
user = request.user.username
user_info = cache.get("{user}_user_info".format(user=user))
if user_info is None:
try:
user_info = BKLoginApi.get_user({"username": user})
cache.set("{user}_user_info".format(user=user), json.dumps(user_info), 30)
except Exception:
user_info = {}
else:
user_info = json.loads(user_info)
tzname = user_info.get("time_zone", settings.TIME_ZONE)
timezone.activate(pytz.timezone(tzname))
request.session["bluking_timezone"] = tzname
|
# Spells and abilities
import pyglet
import logging
import math
import os
import glob
# from main import logging, pyglet
from functions import *
from enemy import Enemy
from dungeon_generator import Wall, Collidable
ROOT = os.path.dirname(__file__)
ABILITY_PATH = os.path.join(ROOT, "ability_files/")
class UsableAbility:
"""Constructor for player abilities."""
def __init__(self, origin=None):
self.owner = None
self.origin = origin
self.range = 0
self.target = None
self.cooldown_timer = 0
self.ability_attr = dict(
lvl=0,
max_lvl=99,
name="",
type=None,
magic_type=None,
cost=0,
cost_type=None,
range=0,
hit_range=0,
speed=0,
cast_time=0,
move_interrupt=False,
penetration=False,
cooldown=0
)
self.target_effects_base = dict(
dmg=0,
dmg_type="physical",
heal=0,
stun=0,
slow=0,
aoe=0,
aoe_dmg=0,
dot=None,
dot_dmg=None,
hot=None,
hot_dmg=None,
penetration=0,
crit=0
)
def get_cost(self, type=None):
if type:
pass
else:
pass
def get_name(self):
if self.ability_attr["name"]:
return "{0} lvl. {1}".format(
self.ability_attr["name"],
self.ability_attr["lvl"]
)
else:
return "UNKNOWN"
def get_effect_list(self):
"""Returns a list of effects the ability has in formatted strings."""
list = []
for e, val in self.target_effects.items():
if val:
list.append(
"{0}: {1}".format(
translate_stat(e), val
)
)
return list
def levelup(self, lvl=False):
"""Level up ability, optional argument defines what level."""
if lvl and lvl <= self.ability_attr["max_lvl"] and lvl > 0:
self.ability_attr["lvl"] = lvl
elif self.ability_attr["lvl"] < self.ability_attr["max_lvl"]:
self.ability_attr["lvl"] += 1
else:
logging.info("Skill is at its max level already.")
self.apply_level_scaling()
def apply_level_scaling(self):
lvl = self.ability_attr["lvl"]
if hasattr(self, "level_scale_attr"):
for key, value in self.level_scale_attr.items():
try:
self.ability_attr[key] = value[lvl - 1]
except IndexError:
logging.warning("Ability has no value for that lvl.")
if hasattr(self, "level_scale_effect"):
print("YOOOHO")
def apply_modifiers(self):
if self.owner:
self.target_effects = self.target_effects_base.copy()
if self.ability_attr["type"] == "spell":
sp = self.owner.stats.get("sp")
self.target_effects["dmg"] += sp
if self.target_effects["dot_dmg"]:
self.target_effects["dot_dmg"] += sp * 2
crit = self.owner.attack_effects["crit"]
self.target_effects["crit"] += crit
def update(self):
self.apply_modifiers()
def do_cost(self, dry=False):
if self.ability_attr["cost_type"] == "mp":
if self.owner.mp >= self.ability_attr["cost"]:
if not dry:
self.owner.mp -= self.ability_attr["cost"]
return True
else:
logging.info("Not enough mana for that ability.")
return False
elif self.ability_attr["cost_type"] == "sta":
if self.owner.sta >= self.ability_attr["cost"]:
if not dry:
self.owner.sta -= self.ability_attr["cost"]
return True
else:
logging.info("Not enough stamina for that ability.")
return False
def use(self, target=None):
success = False
if not self.owner.cast_object:
if self.ability_attr["cast_time"]:
if self.do_cost(dry=True):
self.owner.halt_movement()
self.target = target
self.owner.cast_object = Cast(self)
success = True
else:
self.target = target
success = self.cast()
else:
logging.info("Player is busy casting another ability.")
return success
def cast(self):
success = False
self.owner.cast_object = None
if hasattr(self, "custom_action"):
self.custom_action(self.target)
success = True
elif isinstance(self, ProjectileAbility):
if self.target:
if self.do_cost():
if isinstance(self, MultiProjectileAbility):
self.fire_multiple(self.target)
else:
p1 = (self.owner.x, self.owner.y)
p2 = self.target
angle = get_angle(*p1, *p2)
self.fire(angle)
success = True
else:
logging.debug("No target for projectile, aborting.")
return success
class Cast:
def __init__(self, ability):
self.ability = ability
self.time = ability.ability_attr["cast_time"]
self.timer = 0
def update(self, dt):
# self.ability.target = self.ability.owner.window.get_windowpos(
# *self.ability.target
# )
self.timer += dt
if self.timer >= self.time:
self.ability.cast()
class SingleTargetAbility(UsableAbility):
"""Constructor for abilities that require a target."""
def __init__(self):
super().__init__()
class ProjectileAbility(UsableAbility):
"""Constructor for projectile based abilities"""
def __init__(self):
super().__init__()
def fire(self, angle):
try:
o = self.owner
# logging.debug("Firing a projectile at {0}".format(target))
p = Projectile(
(o.x, o.y), angle,
self.ability_attr,
self.target_effects, o, self.projectile_tex
)
if hasattr(self, "impact_anim"):
p.impact_anim = self.impact_anim
if hasattr(self, "impact_anim_scale"):
p.impact_anim_scale = self.impact_anim_scale
if hasattr(self, "projectile_anim"):
p.projectile_anim = self.projectile_anim
if hasattr(self, "projectile_anim_scale"):
p.projectile_anim_scale = self.projectile_anim_scale
p.do_fire_anim()
o.child_objects.append(p)
except AttributeError as e:
logging.error(e)
class MultiProjectileAbility(ProjectileAbility):
"""Constructor for projectile based abilities with multiple projectiles"""
def __init__(self):
super().__init__()
def fire_multiple(self, target):
p1 = (self.owner.x, self.owner.y)
p2 = target
mid_angle = math.degrees(get_angle(*p1, *p2))
begin_spread = -self.spread
for i in range(self.projectile_count):
angle = math.radians(mid_angle + begin_spread + self.spread * i)
self.fire(angle)
class Projectile:
def __init__(
self, source, angle, attributes, effects, owner, texture
):
(self.x, self.y) = source
self.speed = attributes["speed"]
self.range = attributes["range"]
self.penetration = attributes["penetration"]
self.blacklist = []
self.distance_travelled = 0
self.target_effects = effects
self.owner = owner
self.game = owner.game
self.sprite = None
self.anim = None
self.angle = angle
if self.game.window: # Projectile needs window to get real position
self.window = self.game.window
p1 = source
x, y = self.window.get_windowpos(*p1, precise=True)
if texture:
self.sprite = pyglet.sprite.Sprite(
texture,
x=x, y=y,
batch=self.window.batches["projectiles"],
subpixel=True,
)
# self.sprite.image.anchor_y = 0
if hasattr(texture, "scale"):
self.sprite.scale = texture.scale
self.sprite.rotation = math.degrees(angle)
else:
self.window = None
def do_fire_anim(self):
if hasattr(self, "projectile_anim"):
self.sprite.batch = None
pos = self.window.get_windowpos(self.x, self.y, precise=True)
self.anim = self.owner.window.get_anim(self.projectile_anim)
self.anim.owner = self
self.anim.animator.set_anchor(self.anim._animation, y=-12)
self.anim.animator.set_duration(
self.anim._animation, 1.0 / (self.speed // 10)
)
self.anim.rotation = math.degrees(self.angle) - 90
self.anim.position = pos
if hasattr(self, "projectile_anim_scale"):
self.anim.scale = self.projectile_anim_scale
else:
self.anim.scale = 1.0
def check_hit(self):
enemies = self.game.spatial_hash.get_objects_from_point(
(self.x, self.y), radius=32, type=Enemy
)
for e in enemies:
if e not in self.blacklist:
if check_point_rectangle(self.x, self.y, e.rectangle):
e.do_effect(self.target_effects, origin=self.owner)
if hasattr(self, "impact_anim"):
pos = self.owner.window.get_windowpos(
self.x, self.y, precise=True
)
if hasattr(self, "impact_anim_scale"):
scale = self.impact_anim_scale
else:
scale = 1.0
self.owner.window.animator.spawn_anim(
self.impact_anim, pos, scale=scale,
rotation=math.degrees(self.angle)
)
logging.debug("Projectile hit target!")
if self.penetration > 0:
if e.hp > 0:
self.blacklist.append(e)
return True
walls = self.game.spatial_hash.get_objects_from_point(
(self.x, self.y), type=Collidable
)
for w in walls:
r = create_rect(*w.p1, 32, 32)
if check_point_rectangle(self.x, self.y, r):
self.penetration = 0
return True
else:
return False
def update(self, dt):
if self.check_hit():
if self.penetration <= 0:
self.blacklist = []
self.owner.child_objects.remove(self)
else:
self.penetration -= 1
elif self.distance_travelled >= self.range:
self.blacklist = []
self.owner.child_objects.remove(self)
else:
r = self.speed * dt
self.distance_travelled += r
self.x += r*math.cos(self.angle)
self.y -= r*math.sin(self.angle)
if self.sprite and self.window:
x, y = self.window.get_windowpos(self.x, self.y, precise=True)
self.sprite.x = x
self.sprite.y = y
if self.anim and self.window:
x, y = self.window.get_windowpos(self.x, self.y, precise=True)
self.anim.position = (x, y)
self.anim.draw()
if not self.anim and not self.sprite.batch:
self.sprite.batch = self.window.batches["projectiles"]
class DoT:
"""Constructor for DoT (damage over time) objects"""
def __init__(self, owner, dmg, time, tick, origin=None, atype="spell", dtype="none"):
self.owner = owner
self.origin = origin
self.tick = tick
self.ability_type = atype
self.target_effects = dict(
dmg=dmg,
dmg_type=dtype,
stun=0,
slow=0,
aoe=0,
aoe_dmg=0,
dot=None,
)
self.time = tick
self.ticks = int(time / tick)
tick_dmg = self.target_effects["dmg"] / self.ticks
self.target_effects["dmg"] = tick_dmg
def do_effect(self):
self.owner.do_effect(self.target_effects, self.origin)
if hasattr(self, "tick_effect"):
if hasattr(self, "tick_effect_scale"):
scale = self.tick_effect_scale
else:
scale = 1.0
pos = self.owner.window.get_windowpos(
self.owner.x, self.owner.y, precise=True
)
self.owner.window.animator.spawn_anim(
self.tick_effect, pos, scale=scale
)
def update(self, dt):
if self.ticks:
if self.time > 0:
self.time -= dt
else:
# Adds current time to timer in case it's negative
self.time = self.tick + self.time
self.ticks -= 1
self.do_effect()
else:
self.owner.active_effects.remove(self)
# Reads all python files in the ability directory and executes them,
# adding the abilities to the game
for ability_file in glob.glob(ABILITY_PATH + '*.py'):
exec(open(ability_file).read(), globals())
|
v1 = float(input('Qual o preรงo do produto? '))
print(('O preรงo com desconto รฉ R${:.2f}').format(v1*0.95))
|
# -*- coding: utf-8 -*-
"""
Define a number of distributions that can be used by the sampler to draw
lensing parameters.
This module contains classes that define distributions that can be effecitvely
sampled from.
"""
import numpy as np
from scipy.stats import truncnorm
class MultivariateLogNormal():
"""Class for drawing multiple parameters from a multivariate log normal
distribution.
Args:
mean (np.array): The mean value of the log normal distribution
covariance (np.array): The covariance matrix of the distribution
Notes:
Returned values will follow the distribution exp(normal(mu,covariance))
"""
def __init__(self,mean,covariance):
# Make sure the dimension of the mean allows for easy matrix
# operations
if len(mean.shape)==1:
mean = np.expand_dims(mean,axis=0)
self.mean = mean
self.covariance = covariance
self.L = np.linalg.cholesky(self.covariance)
def __call__(self,n=1):
"""Returns n draws from the distribution
Args:
n (int): The number of draws.
Returns
(np.array): n x len(mean) array of draws.
"""
rand_draw = np.random.randn(self.mean.shape[1]*n).reshape(
(self.mean.shape[1],n))
return np.exp(self.mean.T+np.dot(self.L,rand_draw)).T
class TruncatedMultivariateNormal():
"""Class for drawing multiple parameters from a truncated multivariate
normal distribution.
Args:
mean (np.array): The mean value of the log normal distribution
covariance (np.array): The covariance matrix of the distribution
min_values (np.array): The minimum value for each parameter
max_values (np.array): The maximum value for each parameter
Notes:
Note this code uses rejection sampling, which may be slow if most
of the multivariate normal's pdf is outside the bounds. Be careful when
using this in high dimensions.
"""
def __init__(self,mean,covariance,min_values=None,max_values=None):
# Make sure that each of the n-dimensional inputs follows
# the desired shape for matrix calculations
if len(mean.shape)==1:
mean = np.expand_dims(mean,axis=0)
# If none for min_values, set to negative infinity
if min_values is None:
min_values = np.ones(mean.shape) * -np.inf
elif len(min_values.shape)==1:
min_values = np.expand_dims(min_values,axis=0)
# If none for max_values, set to positive infinity
if max_values is None:
max_values = np.ones(mean.shape) * np.inf
elif len(max_values.shape)==1:
max_values = np.expand_dims(max_values,axis=0)
self.mean = mean
self.covariance = covariance
self.L = np.linalg.cholesky(self.covariance)
self.min_values = min_values
self.max_values = max_values
def __call__(self,n=1):
"""Returns n draws from the distribution
Args:
n (int): The number of draws.
Returns
(np.array): n x len(mean) array of draws.
"""
# Start with a regular draw
n_accepted = 0
n_samp = n
keep_draws = np.zeros((n,self.mean.shape[1]))
rand_draw = np.random.randn(self.mean.shape[1]*n_samp).reshape(
(self.mean.shape[1],n_samp))
draws = (self.mean.T+np.dot(self.L,rand_draw)).T
# Check which draws are within our bounds
keep_ind = np.prod(draws > self.min_values,axis=-1,dtype=np.bool)
keep_ind *= np.prod(draws < self.max_values,axis=-1,dtype=np.bool)
keep_draws[n_accepted:n_accepted+np.sum(keep_ind)] = draws[keep_ind]
n_accepted += np.sum(keep_ind)
# Keep drawing until we have enough samples.
while n_accepted<n:
# Draw
rand_draw = np.random.randn(self.mean.shape[1]*n_samp).reshape(
(self.mean.shape[1],n_samp))
draws = (self.mean.T+np.dot(self.L,rand_draw)).T
# Check for the values in bounds
keep_ind = np.prod(draws > self.min_values,axis=-1,dtype=np.bool)
keep_ind *= np.prod(draws < self.max_values,axis=-1,dtype=np.bool)
# Only use the ones we need
use_keep = np.minimum(n-n_accepted,np.sum(keep_ind))
keep_draws[n_accepted:n_accepted+use_keep] = (
draws[keep_ind][:use_keep])
n_accepted += use_keep
# Remove first dimension for the n=1 case
if n==1:
keep_draws = np.squeeze(keep_draws)
return keep_draws
class EllipticitiesTranslation():
"""Class that takes in distributions for q_lens and phi_lens, returns
samples of e1 and e2 correspondingly
Args:
q_dist (scipy.stats.rv_continuous.rvs or float): distribution for
axis ratio (can be callable or constant)
phi_dist (scipy.stats.rv_continuous.rvs or float): distribution for
orientation angle in radians (can be callable or constant)
Notes:
"""
def __init__(self,q_dist,phi_dist):
self.q_dist = q_dist
self.phi_dist = phi_dist
def __call__(self):
"""Returns a sample of e1,e2
Returns:
(float,float): samples of x-direction ellipticity
eccentricity, xy-direction ellipticity eccentricity
"""
if callable(self.q_dist):
q = self.q_dist()
else:
q = self.q_dist
if callable(self.phi_dist):
phi = self.phi_dist()
else:
phi = self.phi_dist
e1 = (1 - q)/(1+q) * np.cos(2*phi)
e2 = (1 - q)/(1+q) * np.sin(2*phi)
return e1,e2
class ExternalShearTranslation():
"""Class that maps samples of gamma_ext, phi_ext distributions to
gamma1, gamma2
Args:
gamma_dist (scipy.stats.rv_continuous.rvs or float): distribution for
external shear modulus (callable or constant)
phi_dist (scipy.stats.rv_continuous.rvs or float): distribution for
orientation angle in radians (callable or constant)
Notes:
"""
def __init__(self, gamma_dist,phi_dist):
self.gamma_dist = gamma_dist
self.phi_dist = phi_dist
def __call__(self):
"""Returns gamma1, gamma2 samples
Returns:
(float,float): samples of external shear coordinate values
"""
if callable(self.gamma_dist):
gamma = self.gamma_dist()
else:
gamma = self.gamma_dist
if callable(self.phi_dist):
phi = self.phi_dist()
else:
phi = self.phi_dist
gamma1 = gamma * np.cos(2*phi)
gamma2 = gamma * np.sin(2*phi)
return gamma1,gamma2
class KappaTransformDistribution():
"""Class that samples Kext given 1 / (1-Kext) ~ n. n is sampled from a
distribution given by n_dist, then Kext is computed given the
transformation
Args:
n_dist (scipy.stats.rv_continuous.rvs or float): distribution for
1 / (1-Kext) (can be callable or constant)
"""
def __init__(self,n_dist):
self.n_dist = n_dist
def __call__(self):
"""Samples 1/(1-Kext), then maps that sample to Kext value
Returns:
(float): Kext sample
"""
if callable(self.n_dist):
n = self.n_dist()
else:
n = self.n_dist
return 1 - (1/n)
class Duplicate():
"""Class that returns two copies of the same random draw.
Args:
dist (scipy.stats.rv_continuous.rvs or float): The distribution to
draw the sample from.
"""
def __init__(self,dist):
self.dist = dist
def __call__(self):
"""Returns two copies of the same sample
Returns
(float,float): Two copies of the sample.
"""
if callable(self.dist):
samp = self.dist()
else:
samp = self.dist
return samp,samp
class DuplicateXY():
"""Class that returns two copies of x, y coordinates drawn from
distributions
Args:
x_dist (scipy.stats.rv_continuous.rvs or float): distribution for x
(can be callable or constant)
y_dist (scipy.stats.rv_continuous.rvs or float): distribution for y
(can be callable or constant)
"""
def __init__(self,x_dist,y_dist):
self.x_dist = x_dist
self.y_dist = y_dist
def __call__(self):
"""Returns two copies of x,y sample
Returns
(float,float,float,float): Two copies of x,y sampled from x_dist
and y_dist
"""
if callable(self.x_dist):
x = self.x_dist()
else:
x = self.x_dist
if callable(self.y_dist):
y = self.y_dist()
else:
y = self.y_dist
return x,y,x,y
class RedshiftsTruncNorm():
"""Class that samples z_lens and z_source from truncated normal
distributions, forcing z_source > z_lens to be true
Args:
z_lens_min (float): minimum allowed lens redshift
z_lens_mean (float): lens redshift mean
z_lens_std (float): lens redshift standard deviation
z_source_min (float): minimum allowed source redshift
z_source_mean (float): source redshift mean
z_source_std (float): source redshift standard deviation
"""
def __init__(self, z_lens_min,z_lens_mean,z_lens_std,z_source_min,
z_source_mean,z_source_std):
# transform z_lens_min, z_source_min to be in units of std. deviations
self.z_lens_min = (z_lens_min - z_lens_mean) / z_lens_std
self.z_source_min = (z_source_min - z_source_mean) / z_source_std
# define truncnorm dist for lens redshift
self.z_lens_dist = truncnorm(self.z_lens_min,np.inf,loc=z_lens_mean,
scale=z_lens_std).rvs
# save z_source info
self.z_source_mean = z_source_mean
self.z_source_std = z_source_std
def __call__(self):
"""Returns samples of redshifts, ensuring z_source > z_lens
Returns:
(float,float): z_lens,z_source
"""
z_lens = self.z_lens_dist()
clip = (z_lens - self.z_source_mean) / self.z_source_std
# number of std. devs away to stop (negative)
if(clip > self.z_source_min):
self.z_source_min = clip
# define truncnorm dist for source redshift
z_source = truncnorm(self.z_source_min,np.inf,self.z_source_mean,
self.z_source_std).rvs()
return z_lens,z_source
class MultipleValues():
"""Class to call dist.rvs(size=num)
Args:
dist (scipy.stats.rv_continuous.rvs): callable distribution
num (int): number of samples to return in one call
"""
def __init__(self, dist, num):
self.dist = dist
self.num = num
def __call__(self):
"""Returns specified # of samples from dist
Returns:
list(float): |num| samples from dist
"""
return self.dist(size=self.num)
|
__________________________________________________________________________________________________
sample 32 ms submission
from functools import lru_cache
class Solution(object):
def leastOpsExpressTarget(self, x, target):
cost = list(range(40))
cost[0] = 2
@lru_cache(None)
def dp(i, targ):
if targ == 0:
return 0
if targ == 1:
return cost[i]
if i >= 39:
return float('inf')
t, r = divmod(targ, x)
return min(r * cost[i] + dp(i+1, t),
(x-r) * cost[i] + dp(i+1, t+1))
return dp(0, target) - 1
__________________________________________________________________________________________________
sample 36 ms submission
class Solution:
def leastOpsExpressTarget(self, x, target):
pos = neg = k = 0
while target:
target, cur = divmod(target, x)
if k:
pos, neg = min(cur * k + pos, (cur + 1) * k + neg), min((x - cur) * k + pos, (x - cur - 1) * k + neg)
else:
pos, neg = cur * 2, (x - cur) * 2
k += 1
return min(pos, k + neg) - 1
__________________________________________________________________________________________________
sample 48 ms submission
class Solution:
def leastOpsExpressTarget(self, x: int, target: int) -> int:
memo = {}
def helper(i, target):
if (i, target) not in memo:
# print(i, target)
c = 2 if i == 0 else i
if target == 0:
res = 0
elif target == 1:
res = c
elif target == x:
res = i + 1
else:
t, r = target // x, target % x
res = min(helper(i + 1, t) + c * r,
helper(i + 1, t + 1) + c * (x - r))
memo[(i, target)] = res
return memo[(i, target)]
return helper(0, target) - 1
|
import jwt
import datetime
from django.conf import settings
from rest_framework import authentication, exceptions
from .models import User
def generate_jwt_token(username):
"""
This method generates a jwt string with username encoded in it.
:params str username: A unique name for every user in the system
:returns: str JWT: A string with username name encoded in it.
"""
time = datetime.datetime.utcnow() + datetime.timedelta(seconds=86400)
token = jwt.encode({
"username": username,
"exp": time,
}, settings.SECRET_KEY, algorithm='HS256')
return token.decode('utf-8')
class JWTAuthentication(authentication.TokenAuthentication):
"""
Authenticate a receives a token from 'Authorization' Header prepended by
the keyword 'Bearer'
Example
Authorization: Bearer token-str-here.
"""
keyword = 'Bearer'
def authenticate_credentials(self, token):
"""
Decode and check if token is valid and if so, then authenticate the user
:param token: the token as a string
:return: Tuple of the user object and non-user authentication
information
"""
try:
payload = jwt.decode(token, settings.SECRET_KEY)
try:
user = User.objects.get(username=payload['username'])
return user, None
except User.DoesNotExist:
return None, None
except jwt.exceptions.ExpiredSignatureError:
raise exceptions.AuthenticationFailed('Expired Token.')
except jwt.exceptions.InvalidTokenError:
raise exceptions.AuthenticationFailed('Invalid token')
|
import numpy as np
from acnportal.acnsim.interface import InfrastructureInfo, SessionInfo
def infrastructure_constraints_feasible(
rates: np.ndarray,
infrastructure: InfrastructureInfo,
linear: bool = False,
violation_tolerance: float = 1e-5,
relative_tolerance: float = 1e-7,
) -> bool:
""" Return if a set of current magnitudes for each load are feasible.
For a given constraint, the larger of the violation_tolerance
and relative_tolerance is used to evaluate feasibility.
This is a static version of acnsim.ChargingNetwork.is_feasible.
Args:
rates (np.ndarray): 2-D matrix with each row corresponding to
an EVSE and each column corresponding to a time index in the schedule.
infrastructure (InfrastructureInfo): The InfrastructureInfo object that contains
information about the network constraints.
linear (bool): If True, linearize all constraints to a more conservative
but easier to compute constraint by ignoring the phase angle and taking
the absolute value of all load coefficients. Default False.
violation_tolerance (float): Absolute amount by which
rates may violate network constraints. Default 1e-5
relative_tolerance (float): Relative amount by which
schedule_matrix may violate network constraints. Default 1e-7
Returns:
bool: If load_currents is feasible according to this set of
constraints.
"""
tol = np.maximum(
violation_tolerance, relative_tolerance * infrastructure.constraint_limits
)
if not linear:
phase_in_rad = np.deg2rad(infrastructure.phases)
for j, v in enumerate(infrastructure.constraint_matrix):
a = np.stack([v * np.cos(phase_in_rad), v * np.sin(phase_in_rad)])
line_currents = np.linalg.norm(a @ rates, axis=0)
if not np.all(
line_currents <= infrastructure.constraint_limits[j] + tol[j]
):
return False
else:
for j, v in enumerate(infrastructure.constraint_matrix):
line_currents = np.linalg.norm(np.abs(v) @ rates, axis=0)
if not np.all(
line_currents <= infrastructure.constraint_limits[j] + tol[j]
):
return False
return True
def remaining_amp_periods(
session: SessionInfo, infrastructure: InfrastructureInfo, period: float
) -> float:
""" Return the session's remaining demand in A*periods. This function is a static
version of acnsim.Interface.remaining_amp_periods.
Args:
session (SessionInfo): The SessionInfo object for which to get remaining demand.
infrastructure (InfrastructureInfo): The InfrastructureInfo object that contains
voltage information about the network.
period (float): Period of the simulation in minutes.
Returns:
float: the EV's remaining demand in A*periods.
"""
i = infrastructure.get_station_index(session.station_id)
amp_hours = session.remaining_demand * 1000 / infrastructure.voltages[i]
return amp_hours * 60 / period
|
from tezos_etl import settings
from tezos_etl.session import (
setup_logging,
setup_gcs_buckets,
)
from tezos_etl.tezos_client import TezosClient
from tezos_etl.manage import (
get_last_block_number,
get_last_block_number_locally,
update_last_block_number,
update_last_block_number_locally,
initialize_block_batch,
update_block_batch,
)
import logging
setup_logging(settings)
logger = logging.getLogger(__name__)
raw_bucket, avro_bucket = setup_gcs_buckets(settings)
tezos_client = TezosClient(
settings.NODE_URL, settings.CHAIN_ID, raw_bucket, avro_bucket)
start_block_number = get_last_block_number_locally(settings.AVRO_LOCAL_STORAGE)
end_block_number = 10
batch_size = settings.BATCH_SIZE
for batch in range(start_block_number, end_block_number, batch_size):
all_batch_data = initialize_block_batch()
file_name = f"{batch}_{batch+batch_size-1}.avro"
for block_number in range(batch, batch+batch_size):
response = tezos_client.get_local_block_data(block_number)
all_block_data = tezos_client.transform_response(response)
all_batch_data = update_block_batch(all_batch_data, all_block_data)
tezos_client.save_avro_files_locally(file_name, all_batch_data)
update_last_block_number_locally(settings.AVRO_LOCAL_STORAGE, batch)
logger.info(f"Finished batch {batch/batch_size}")
|
import numba
import numpy as np
@numba.jit(nopython=True, parallel=True)
def plsa_numba(dt_row, dt_col, dt_val, topic_doc, term_topic, n_iter):
n_docs, n_topics = topic_doc.shape
n_terms = term_topic.shape[1]
nnz = len(dt_val)
topic_full = np.zeros((nnz, n_topics))
term_sum = np.zeros((n_topics))
doc_sum = np.zeros((n_docs))
for i in range(n_iter):
### Expectation ###
for idx in range(nnz):
p = np.zeros((n_topics))
d, t = dt_row[idx], dt_col[idx]
s = 0
for z in range(n_topics):
p[z] = topic_doc[d, z] * term_topic[z, t]
s += p[z]
for z in range(n_topics):
topic_full[idx, z] = p[z] / s
### Maximization ###
topic_doc[:] = 0
term_topic[:] = 0
term_sum[:] = 0
doc_sum[:] = 0
for idx in range(nnz):
for z in range(n_topics):
q = dt_val[idx] * topic_full[idx, z]
term_topic[z, dt_col[idx]] += q
term_sum[z] += q
topic_doc[dt_row[idx], z] += q
doc_sum[dt_row[idx]] += q
# Normalize P(topic | doc)
for d in range(n_docs):
for z in range(n_topics):
topic_doc[d, z] /= doc_sum[d]
# Normalize P(term | topic)
for z in range(n_topics):
for t in range(n_terms):
term_topic[z, t] /= term_sum[z]
|
import os
import sys
import argparse
def create_file(integrator, scene, accelerator, npixelsamples, extra_args=[]):
path = str(sys.argv[0]).split("create_base_pbrt.py")[0]
path += "../../../scenes/" + scene
if os.path.exists(path + "/" + scene + ".pbrt"):
f = open(path + "/eval_base.pbrt", "w")
if integrator.startswith("metric"):
f.write("Sampler \"sobol\" \"integer pixelsamples\" 1\n\n")
intgr = integrator.split('=')
f.write("Integrator \"" + intgr[0] + "\"\n")
f.write("\"string metric\" \"" + intgr[1] + "\"\n\n")
else:
f.write("Sampler \"sobol\" \"integer pixelsamples\" " + str(npixelsamples) + "\n\n")
if integrator == "path":
f.write("Integrator \"path\" \"integer maxdepth\" 20\n\n")
else:
f.write("Integrator \"" + integrator + "\"\n\n")
f.write("Accelerator \"" + accelerator + "\"\n\n")
for line in extra_args:
if line != "":
var_type = line.split(':')[0]
var_info = line.split(':')[1].split('=')
pbrt_arg = "\"" + var_type + " " + var_info[0] + "\" "
if var_type == "string":
pbrt_arg += "\"" + var_info[1] + "\"\n"
else:
pbrt_arg += var_info[1] + "\n"
f.write(pbrt_arg)
f.write("\nInclude \"" + scene + ".pbrt\"\n\n")
f.close()
def exec():
integrator = str(sys.argv[1])
scene = str(sys.argv[2])
accelerator = str(sys.argv[3])
extra_args = str(sys.argv[4]).split(';')[1:]
parser = argparse.ArgumentParser(description="Creates base .pbrt file in specified folder")
parser.add_argument("--npixelsamples", type=int, default=4)
args, unknown = parser.parse_known_args()
create_file(integrator, scene, accelerator, args.npixelsamples, extra_args)
exec()
|
from .base import BaseNetwork
from .registry import registry
@registry.register('litecoin', 'LTC')
class Litecoin(BaseNetwork):
pubkey_address_prefix = 0x30
|
import base64
import six
from mongoengine import DictField, IntField, StringField, \
EmailField, BooleanField
from mongoengine.queryset import OperationError
from social_core.storage import UserMixin, AssociationMixin, NonceMixin, \
CodeMixin, PartialMixin, BaseStorage
UNUSABLE_PASSWORD = '!' # Borrowed from django 1.4
class MongoengineUserMixin(UserMixin):
"""Social Auth association model"""
user = None
provider = StringField(max_length=32)
uid = StringField(max_length=255, unique_with='provider')
extra_data = DictField()
def str_id(self):
return str(self.id)
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
qs = cls.objects
if provider:
qs = qs.filter(provider=provider)
if id:
qs = qs.filter(id=id)
return qs.filter(user=user.id)
@classmethod
def create_social_auth(cls, user, uid, provider):
if not isinstance(type(uid), six.string_types):
uid = str(uid)
return cls.objects.create(user=user.id, uid=uid, provider=provider)
@classmethod
def username_max_length(cls):
username_field = cls.username_field()
field = getattr(cls.user_model(), username_field)
return field.max_length
@classmethod
def username_field(cls):
return getattr(cls.user_model(), 'USERNAME_FIELD', 'username')
@classmethod
def create_user(cls, *args, **kwargs):
kwargs['password'] = UNUSABLE_PASSWORD
if 'email' in kwargs:
# Empty string makes email regex validation fail
kwargs['email'] = kwargs['email'] or None
return cls.user_model().objects.create(*args, **kwargs)
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
if association_id is not None:
qs = cls.objects.filter(id__ne=association_id)
else:
qs = cls.objects.filter(provider__ne=backend_name)
qs = qs.filter(user=user)
if hasattr(user, 'has_usable_password'):
valid_password = user.has_usable_password()
else:
valid_password = True
return valid_password or qs.count() > 0
@classmethod
def changed(cls, user):
user.save()
def set_extra_data(self, extra_data=None):
if super(MongoengineUserMixin, self).set_extra_data(extra_data):
self.save()
@classmethod
def disconnect(cls, entry):
entry.delete()
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
if 'username' in kwargs:
kwargs[cls.username_field()] = kwargs.pop('username')
return cls.user_model().objects.filter(*args, **kwargs).count() > 0
@classmethod
def get_username(cls, user):
return getattr(user, cls.username_field(), None)
@classmethod
def get_user(cls, pk):
try:
return cls.user_model().objects.get(id=pk)
except cls.user_model().DoesNotExist:
return None
@classmethod
def get_users_by_email(cls, email):
return cls.user_model().objects.filter(email__iexact=email)
@classmethod
def get_social_auth(cls, provider, uid):
if not isinstance(uid, six.string_types):
uid = str(uid)
try:
return cls.objects.get(provider=provider, uid=uid)
except cls.DoesNotExist:
return None
class MongoengineNonceMixin(NonceMixin):
"""One use numbers"""
server_url = StringField(max_length=255)
timestamp = IntField()
salt = StringField(max_length=40)
@classmethod
def use(cls, server_url, timestamp, salt):
return cls.objects.get_or_create(server_url=server_url,
timestamp=timestamp,
salt=salt)[1]
class MongoengineAssociationMixin(AssociationMixin):
"""OpenId account association"""
server_url = StringField(max_length=255)
handle = StringField(max_length=255)
secret = StringField(max_length=255) # Stored base64 encoded
issued = IntField()
lifetime = IntField()
assoc_type = StringField(max_length=64)
@classmethod
def store(cls, server_url, association):
# Don't use get_or_create because issued cannot be null
try:
assoc = cls.objects.get(server_url=server_url,
handle=association.handle)
except cls.DoesNotExist:
assoc = cls(server_url=server_url,
handle=association.handle)
assoc.secret = base64.encodestring(association.secret).decode()
assoc.issued = association.issued
assoc.lifetime = association.lifetime
assoc.assoc_type = association.assoc_type
assoc.save()
@classmethod
def get(cls, *args, **kwargs):
return cls.objects.filter(*args, **kwargs)
@classmethod
def remove(cls, ids_to_delete):
cls.objects.filter(pk__in=ids_to_delete).delete()
class MongoengineCodeMixin(CodeMixin):
email = EmailField()
code = StringField(max_length=32)
verified = BooleanField(default=False)
@classmethod
def get_code(cls, code):
try:
return cls.objects.get(code=code)
except cls.DoesNotExist:
return None
class MongoenginePartialMixin(PartialMixin):
token = StringField(max_length=32)
data = DictField()
extra_data = DictField()
next_step = IntField()
backend = StringField(max_length=32)
@classmethod
def load(cls, token):
try:
return cls.objects.get(token=token)
except cls.DoesNotExist:
return None
@classmethod
def destroy(cls, token):
partial = cls.load(token)
if partial:
partial.delete()
class BaseMongoengineStorage(BaseStorage):
user = MongoengineUserMixin
nonce = MongoengineNonceMixin
association = MongoengineAssociationMixin
code = MongoengineCodeMixin
partial = MongoenginePartialMixin
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is OperationError and \
'E11000' in exception.message
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import third_party.pyperclip as clipboard
_clipList = []
def copy(text):
"""Add text onto clipList. Empty |text| is not stored."""
if text and len(text):
global _clipList
_clipList.append(text)
if clipboard.copy:
clipboard.copy(text)
def paste(clipIndex=None):
"""Fetch top of clipList; or clip at index |clipIndex|. The |clipIndex| will
wrap around if it's larger than the clipList length."""
if clipIndex is None:
osClip = clipboard.paste and clipboard.paste()
if osClip:
return osClip
# Get the top of the clipList instead.
clipIndex = -1
global _clipList
if len(_clipList):
return _clipList[clipIndex % len(_clipList)]
return None
|
# Natural Language Toolkit: Aligners
#
# Copyright (C) 2001-2011 NLTK Project
# Author:
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes and interfaces for aligning text.
"""
from api import *
from gale_church import *
__all__ = []
|
# incorrect solution
def get_neighbors(row_index, column_index, width, length):
neighbors = []
if row_index - 1 >= 0:
neighbors.append((row_index - 1, column_index))
if row_index + 1 < width:
neighbors.append((row_index + 1, column_index))
if column_index - 1 >= 0:
neighbors.append((row_index, column_index - 1))
if column_index + 1 < length:
neighbors.append((row_index, column_index + 1))
return neighbors
puzzle_input_list = []
with open("input.txt", "r") as puzzle_input:
for line in puzzle_input:
line = line.strip()
current_row = [int(digit) for digit in line]
puzzle_input_list.append(current_row)
low_points = []
width = len(puzzle_input_list)
length = 0
for row_index in range(len(puzzle_input_list)):
for column_index in range(len(puzzle_input_list[row_index])):
length = len(puzzle_input_list[row_index])
neighbors = get_neighbors(row_index, column_index, width, length)
low_point = True
for neighbor_row_index, neighbor_column_index in neighbors:
if low_point is False:
break
if puzzle_input_list[row_index][column_index] >= puzzle_input_list[neighbor_row_index][neighbor_column_index]:
low_point = False
if low_point is True:
low_points.append((row_index,column_index))
# for each low point, determine basin length (DFS out and get size of island)
basin_sizes = []
for row_index, column_index in low_points:
basin_size = 0
indices_to_visit = [(row_index, column_index)]
visited_indices = set()
while indices_to_visit:
row_index_to_visit, column_index_to_visit = indices_to_visit[0]
if (row_index_to_visit, column_index_to_visit) not in visited_indices:
basin_size += 1
visited_indices.add((row_index_to_visit, column_index_to_visit))
if puzzle_input_list[row_index_to_visit][column_index_to_visit] <= 7:
neighbors = get_neighbors(row_index_to_visit, column_index_to_visit, width, length)
for neighbor_row_index, neighbor_column_index in neighbors:
if (neighbor_row_index, neighbor_column_index) in visited_indices:
continue
if puzzle_input_list[neighbor_row_index][neighbor_column_index] - puzzle_input_list[row_index_to_visit][column_index_to_visit] == 1:
indices_to_visit.append((neighbor_row_index, neighbor_column_index))
del indices_to_visit[0]
basin_sizes.append(basin_size)
# determine 3 largest basins
basin_sizes.sort(reverse=True)
print(basin_sizes)
# return product of 3 largest basin sizes
print(basin_sizes[0] * basin_sizes[1] * basin_sizes[2])
|
"""Run control side-effect handler."""
import pytest
from decoy import Decoy
from opentrons.protocol_engine.state import StateStore, PauseAction
from opentrons.protocol_engine.execution.run_control import RunControlHandler
@pytest.fixture
def state_store(decoy: Decoy) -> StateStore:
"""Get a mocked out StateStore."""
return decoy.mock(cls=StateStore)
@pytest.fixture
def subject(state_store: StateStore) -> RunControlHandler:
"""Create a RunControlHandler with its dependencies mocked out."""
return RunControlHandler(state_store=state_store)
async def test_pause(
decoy: Decoy, state_store: StateStore, subject: RunControlHandler
) -> None:
"""It should be able to execute a pause."""
await subject.pause()
decoy.verify(
state_store.handle_action(PauseAction()),
await state_store.wait_for(condition=state_store.commands.get_is_running),
)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from w3lib.html import remove_tags
class RevistaforumItem(scrapy.Item):
site = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
url = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
subTitle = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
title = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
abstract = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
content = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
autor = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
datePublished = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
|
import os
import matplotlib.pyplot as plt
import numpy as np
from heuslertools.cristallography import Crystal
from heuslertools.cristallography.mcif_parser import MCIFParser
from heuslertools.cristallography.mcrystal import MCrystal
from heuslertools.plotting import plot_functions
from heuslertools.plotting.mpl_helpers import set_size
from heuslertools.xrd.materials import CuMnSb
cumnsb = Crystal(CuMnSb)
cumnsb.crystal = MCrystal.from_mcif('/home/luke/Coding/python/heuslertools/examples/crystallography/data/mCuMnSb.cif')
print(cumnsb)
x, y, z, c, r = cumnsb.get_3D_lattice(2, 2, 2)
"""
################################# PLOTTING #####################################
"""
fig = plt.figure(figsize=set_size(10, cm=True, ratio=1))
ax = fig.add_subplot(projection='3d')
ax.set_box_aspect((np.ptp(x), np.ptp(y), np.ptp(z)))
ax.view_init(elev=8, azim=18)
ax.scatter(x, y, z, s=r*200, c=c)
ax.set_xlabel(r'[100]')
ax.set_ylabel(r'[010]')
ax.set_zlabel(r'[001]')
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
ax.axis('off')
cumnsb.show_crystal_boarder(ax, 1, 'k', 2)
ax.text(1, 0, 0, "Sb", horizontalalignment='center',
verticalalignment='center', color='white')
ax.text(1, 0, 0.5, "Mn", horizontalalignment='center',
verticalalignment='center', color='white')
ax.text(0.75, 0.25, 0.75, "Cu", horizontalalignment='center',
verticalalignment='center', color='black')
plt.show()
|
import numpy as np
import os
import sys
import glob
import cv2
import scipy.io
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def read_img(img_dir, img1_name, img2_name):
# print(os.path.join(img_dir, img1_name + '.png'))
return cv2.imread(os.path.join(img_dir, img1_name + '.png')), cv2.imread(os.path.join(img_dir, img2_name + '.png'))
def refinement_flow(fwd_flow, img1, img2):
flow_refine = cv2.VariationalRefinement.create()
refine_flow = flow_refine.calc(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY),
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY),
fwd_flow)
return refine_flow
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def flow_to_image(flow, display=False):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
UNKNOWN_FLOW_THRESH = 100
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
# sqrt_rad = u**2 + v**2
rad = np.sqrt(u**2 + v**2)
maxrad = max(-1, np.max(rad))
if display:
print("max flow: %.4f\nflow range:\nu = %.3f .. %.3f\nv = %.3f .. %.3f" % (maxrad, minu,maxu, minv, maxv))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow_new = flow.copy()
flow_new[:,:,0] += np.arange(w)
flow_new[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow_new, None, cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
return res
def resize_flow(flow, img_h, img_w):
# flow = np.load(flow_path)
# flow_h, flow_w = flow.shape[0], flow.shape[1]
flow[:, :, 0] *= float(img_w)/float(flow_w)
flow[:, :, 1] *= float(img_h)/float(flow_h)
flow = cv2.resize(flow, (img_w, img_h), cv2.INTER_LINEAR)
return flow
def extract_poses(im):
R = im.qvec2rotmat()
t = im.tvec.reshape([3,1])
bottom = np.array([0,0,0,1.]).reshape([1,4])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
return m
def load_colmap_data(realdir):
import colmap_read_model as read_model
camerasfile = os.path.join(realdir, 'sparse/cameras.bin')
camdata = read_model.read_cameras_binary(camerasfile)
list_of_keys = list(camdata.keys())
cam = camdata[list_of_keys[0]]
print( 'Cameras', len(cam))
h, w, f = cam.height, cam.width, cam.params[0]
# w, h, f = factor * w, factor * h, factor * f
hwf = np.array([h,w,f]).reshape([3,1])
imagesfile = os.path.join(realdir, 'sparse/images.bin')
imdata = read_model.read_images_binary(imagesfile)
w2c_mats = []
# bottom = np.array([0,0,0,1.]).reshape([1,4])
names = [imdata[k].name for k in imdata]
img_keys = [k for k in imdata]
print( 'Images #', len(names))
perm = np.argsort(names)
return imdata, perm, img_keys, hwf
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def compute_epipolar_distance(T_21, K, p_1, p_2):
R_21 = T_21[:3, :3]
t_21 = T_21[:3, 3]
E_mat = np.dot(skew(t_21), R_21)
# compute bearing vector
inv_K = np.linalg.inv(K)
F_mat = np.dot(np.dot(inv_K.T, E_mat), inv_K)
l_2 = np.dot(F_mat, p_1)
algebric_e_distance = np.sum(p_2 * l_2, axis=0)
n_term = np.sqrt(l_2[0, :]**2 + l_2[1, :]**2) + 1e-8
geometric_e_distance = algebric_e_distance/n_term
geometric_e_distance = np.abs(geometric_e_distance)
return geometric_e_distance
def read_optical_flow(basedir, img_i_name, read_fwd):
flow_dir = os.path.join(basedir, 'flow_i1')
fwd_flow_path = os.path.join(flow_dir, '%s_fwd.npz'%img_i_name[:-4])
bwd_flow_path = os.path.join(flow_dir, '%s_bwd.npz'%img_i_name[:-4])
if read_fwd:
fwd_data = np.load(fwd_flow_path)#, (w, h))
fwd_flow, fwd_mask = fwd_data['flow'], fwd_data['mask']
# fwd_mask = np.float32(fwd_mask)
# bwd_flow = np.zeros_like(fwd_flow)
return fwd_flow
else:
bwd_data = np.load(bwd_flow_path)#, (w, h))
bwd_flow, bwd_mask = bwd_data['flow'], bwd_data['mask']
# bwd_mask = np.float32(bwd_mask)
# fwd_flow = np.zeros_like(bwd_flow)
return bwd_flow
# return fwd_flow, bwd_flow#, fwd_mask, bwd_mask
|
from game.roles.villager import Villager
from game.text_template import *
class Cupid(Villager):
def __init__(self, interface, player_id, player_name):
super().__init__(interface, player_id, player_name)
self.power = 1
def get_power(self):
return self.power
def on_use_power(self):
self.power = 0
async def on_start_game(self, embed_data):
await self.interface.send_text_to_channel(generate_start_game_cupid(), self.channel_name)
await self.interface.send_embed_to_channel(embed_data, self.channel_name)
|
import numpy as np
from numpy.random import rand
# Import the new module.
from cython_ftridiag import cytridiag as ct
# Construct arguments to pass to the function.
n=10
a, b, c, x = rand(n-1), rand(n), rand(n-1), rand(n)
# Construct a matrix A to test that the result is correct.
A = np.zeros((n,n))
A.ravel()[A.shape[1]::A.shape[1]+1] = a
A.ravel()[::A.shape[1]+1] = b
A.ravel()[1::A.shape[1]+1] = c
# Store x so we can verify the algorithm returned
# the correct values
x_copy = x.copy()
# Call the function.
ct(a, b, c, x)
# Test to see if the result is correct.
# Print the result in the command line.
if np.absolute(A.dot(x) - x_copy).max() < 1E-12:
print "Test Passed"
else:
print "Test Failed"
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# FileName ๏ผfroms.py
# Author ๏ผzheng xingtao
# Date ๏ผ2021/1/13 11:21
from django import forms
from markdownx.fields import MarkdownxFormField
from zanhu.qa.models import Question
class QuestionForm(forms.ModelForm):
"""็จๆทๆๅบ้ฎ้ข็่กจๅ"""
status = forms.CharField(widget=forms.HiddenInput()) # ๅฏน็จๆทไธๅฏ่ง
content = MarkdownxFormField()
class Meta:
model = Question
fields = ["title", "content", "tags", "status"]
|
import datetime
from threading import Thread
from multiprocessing import Pipe, Process
from .utils import Packet
from .base import BaseDispatcher
class MulticoreDispatcher(BaseDispatcher):
def __init__(self, workers=1, timeout=None):
"""
Initializes a MulticoreDispatcher with a configurable number of cores
#Arguments
workers: Int. Max number of worker subprocesses to spawn.
timeout: Max time in seconds to permit a Process to execute any given task.
If None, the processes will continue to run until self.shutdown() is called.
"""
self._close_thread = False #If True, monitor thread will close
self.tasks = {} #task_id: Task
self.active = set() #task_ids for active tasks
self.queued = set() #task_ids that are waiting to be run
self.timeout = timeout #max time for process
self.workers = workers #max number of worker processes to spawn at one time
self.current_task_id = 1 #Manages task ids, tasks start from 1
self.processes = {} #process_id: {'connections', 'process', 'running'}
for p_id in range(self.workers):
parent_conn, sub_conn = Pipe(duplex=True)
p = Process(
target=MulticoreDispatcher._spawn_subprocess,
args=(sub_conn,),
)
p.start()
self.processes[p_id] = {
'connections': (parent_conn, sub_conn),
'process': p,
'running': False
}
self.monitor_thread = Thread(
target=MulticoreDispatcher._monitor,
args=(self,)
)
self.monitor_thread.daemon = True
self.monitor_thread.start()
@staticmethod
def _monitor(self):
"""
Asynchronously monitors the dispatcher and performs the following:
1. Assigns idled processes any available queued tasks
2. Check each active tasks to ensure timeouts have not been reached
3. Replaces crashed subprocesses with new subprocesses
4. Receives task results from subprocesses
"""
while not self._close_thread:
####### Check each active task for timeout or subprocess crash #######
for task_id in list(self.active):
#timeout
task = self.tasks[task_id]
timeout = task['timeout']
dt = (datetime.datetime.now() - task['start_time']).total_seconds()
timeout_reached = dt > timeout if timeout is not None else False
#subprocess crashed
p_id = task['p_id']
pfield = self.processes[p_id]
process = pfield['process']
process_crashed = process.exitcode is not None
if timeout_reached or process_crashed:
#Kill attached process
process.terminate()
#Create new subprocess
parent_conn, sub_conn = pfield['connections']
new_process = Process(
target=MulticoreDispatcher._spawn_subprocess,
args=(sub_conn,),
)
new_process.start()
#Attach connections to new subprocess
pfield.update({
'connections': (parent_conn, sub_conn),
'process': new_process,
'running': False
})
#Mark task as completed
task.update({
'p_id': None,
'running': False,
'completed': True,
'terminated_early': True
})
self.active.remove(task_id)
####### Check for completed tasks #######
for p_id in self.processes.keys():
pfield = self.processes[p_id]
parent_conn = pfield['connections'][0]
has_data = parent_conn.poll()
if has_data:
#Get response to task from subprocess
packet = parent_conn.recv()
response = packet.unpack()
task_id = response['task_id']
result = response['result']
task = self.tasks[task_id]
task.update({
'p_id': None,
'result': result,
'running': False,
'completed': True,
})
self.active.remove(task_id)
pfield['running'] = False
######## Get idle processes #######
idle_processes = []
for i in self.processes.keys():
if not self.processes[i]['running']:
idle_processes.append(i)
####### Assign task to any idle process #######
p_index = len(idle_processes)-1
while p_index >= 0:
if len(self.queued):
task_id = self.queued.pop()
self.active.add(task_id)
task = self.tasks[task_id]
task['p_id'] = idle_processes[p_index]
task['running'] = True
task['start_time'] = datetime.datetime.now()
packet = Packet({
'task_id': task_id,
'func': task['func'],
'args': task['args'],
'kwargs': task['kwargs'],
})
packet.compress(iterations=0)
pfield = self.processes[idle_processes[p_index]]
parent_conn = pfield['connections'][0]
parent_conn.send(packet)
idle_processes.pop()
p_index -= 1
@staticmethod
def _spawn_subprocess(conn):
"""
Spawns a subprocess that listens to a connection and performs the
tasks that are passed through the connection.
"""
while True:
#Get task
packet = conn.recv()
task = packet.unpack()
func = task['func']
args = task['args']
kwargs = task['kwargs']
#Perform task
result = func(*args, **kwargs) #Create a perform task function for abstration???
#Send reponse
packet = Packet({
'task_id': task['task_id'],
'result': result
})
conn.send(packet)
def _new_task(self,):
#Task Schema
task_id = self.current_task_id
self.current_task_id += 1
task = {
'p_id': None,
'start_time': None,
'running': False,
'result:': None,
'timeout': self.timeout,
'completed': False,
'terminated_early': False,
}
return task_id, task
def shutdown(self,):
"""
Shutdown monitor thread, terminates spawned subprocesses, and releases
memory allocation resources utilized by the dispatcher.
"""
self._close_thread = True
self.monitor_thread.join()
######## Close processes, Free memory #########
for pfield in self.processes.values():
process = pfield['process']
process.terminate()
del self.tasks
def run(self, func, *args, **kwargs):
"""
Places a task into the dispatchers queued tasks for subprocess completion.
# Arguements
func: a function
args: value based arguements for function `func`
kwargs: keyword based arguements for `func`
# Returns
The associated task id needed to recover the results.
"""
task_id, task = self._new_task()
task.update({
'func': func,
'args': args,
'kwargs': kwargs,
})
self.queued.add(task_id)
self.tasks[task_id] = task
return task_id
def clear_task(self, task_id):
"""
Removes all traces of a task being present on the server.
Removes the task_id from all task queues and opens memory for additional
tasks.
"""
for collection in [self.queued, self.active]:
if task_id in collection:
collection.remove(task_id)
self.tasks.pop(task_id, None)
def get_results(self, task_ids=None, values_only=True, clear=True):
"""
Gets the results to tasks associated with the passed in task_ids.
Hangs current thread until all the tasks are complete.
task_ids: list of task_ids as generated by self.run(). These are used by the
server to identify which task to return the results for.
If `None`, waits for and returns results for all tasks.
values_only: if False returns dictionary that includes the task ids with its
results. Otherwise, returns the values computed in order of the
requested task ids.
clear: If True, removes tasks from dispatcher memory after returning results.
"""
if task_ids is None:
task_ids = list(self.tasks.keys())
waiting = True #Continue waiting for tasks to finish
while waiting:
waiting = False
#Check that all tasks are completed
for task_id in task_ids:
task = self.tasks[task_id]
if not task['completed']:
waiting = True
results = []
for task_id in task_ids:
task = self.tasks[task_id]
result = task.get('result')
results.append(result)
if clear:
self.clear_task(task_id)
if values_only:
retval = results
else:
retval = dict(zip(task_ids, results))
return retval
|
import subprocess
from test.python.functional_tests.conftest import DATA_PATH, LocalCommandBuilder
def test_inspect_file_works(local_command: LocalCommandBuilder):
file_path = DATA_PATH / 'file_or_project' / 'file.py'
local_command.path = file_path
process = subprocess.run(
local_command.build(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = process.stdout.decode()
assert process.returncode == 1
assert file_path.name in output
def test_inspect_project_works(local_command: LocalCommandBuilder):
file_path = DATA_PATH / 'file_or_project' / 'project'
local_command.path = file_path
process = subprocess.run(
local_command.build(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output = process.stdout.decode()
assert process.returncode == 1
assert 'one.py' in output
assert 'other.py' in output
|
import os
import tempfile
class Config(object):
AUTH0_AUDIENCE = "https://libcellml.org/mps/api"
AUTH0_DOMAIN = os.environ.get('MPS_AUTH0_DOMAIN')
AUTH0_SECRET = os.environ.get('MPS_AUTH0_SECRET')
CLIENT_ORIGIN_URL = os.environ.get("MPS_CLIENT_ORIGIN_URL", "http://localhost:4040")
CLIENT_WORKING_DIR = os.environ.get('MPS_CLIENT_WORKING_DIR')
SIMULATION_DATA_DIR = os.environ.get('MPS_SIMULATION_DATA_DIR', os.path.join(tempfile.gettempdir(), 'mps_simulation_data'))
SIMULATION_RUN_DIR = os.environ.get('MPS_SIMULATION_RUN_DIR')
SUNDIALS_CMAKE_CONFIG_DIR = os.environ.get('MPS_SUNDIALS_CMAKE_CONFIG_DIR')
|
import numpy as np
import astropy.units as u
import csv
from functools import wraps
from time import time
import collections
from astropy.wcs import WCS, WCSSUB_SPECTRAL
"""
This is just an assortment of possibly useful tools that can be called
from anywhere. Mostly to keep clean.
"""
def timeit(f, *args, **kwargs):
@wraps(f)
def timed(*args, **kwds):
t0 = time()
res = f(*args, **kwds)
t1 = time()
return res
return timed
def solve_linear_set(matrix, vector):
'''
Like Cramer's rule but easier, and you get the solution
immediately instead of handling determinants first
'''
matrix = np.array(matrix)
inv_mat = np.linalg.inv(matrix)
B = np.array(vector)
x = np.dot(inv_mat, B)
return x
def is_notebook():
"""
Are we in a jupyter notebook?
"""
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'TerminalInteractiveShell':
return False
else:
return False
except NameError:
return False
def progress_bar(*args, **kwargs):
from tqdm import tqdm, tqdm_notebook
func = tqdm_notebook if is_notebook() else tqdm
return func(*args, **kwargs)
def get_simple_slice(center, slit_width, dshape, slit_length = None):
'''
Return a slice or rectangle or whatever based on the length and width
you want
Parameters
----------
center : tuple
the center in (y, x) format from which to make the cut
slit_width : int, float
the width of the cut in pixels
dshape : tuple
make sure the cut doesn't extend beyond the extent of the data
slit_length : int or float
the length of the slit; if None, the length is determined from the
shape
'''
# convert width to two radii
ny, nx = dshape
# make sure the center is an array
center = np.asarray(center)
# the radius in x is half the slit width
rx = width / 2.
# make sure the width is in the limits of the cube
xmin = max(0, np.floor((center[1] - rx) + 0.5).astype(int))
xmax = min(nx, np.floor((center[1] + rx) + 0.5).astype(int))
# if a length is specified, find where its lower and upper
# bounds are in the data.
# For a reference point (xc, yc) and slit of length L orientated
# along the y-axis, the slit extends L/2 in either direction
# from yc. The upper and lower bounds of the slit are a distance
# (yc +/- L/2) from the origin
if length is not None:
lw = length / 2.
ymin = max(0, np.floor((center[0] - lw) + 0.5).astype(int))
ymax = min(ny, np.floor((center[0] + lw) + 0.5).astype(int))
else:
ymin = 0
ymax = ny
simple_slice = [ slice(ymin, ymax+1), slice(xmin, xmax+1) ]
return simple_slice
def format_header(hdr, mode):
hdu = hdr.copy()
#print(hdu)
# get some values and make some assumptions
# `cunit` can be discovered from the mode
# `cdelt` is, for now, either 0.2 (for arcsec) or 1.25 (for angstrom)
# because this is how MUSE handles it. Setters exist to change these
# values. If no mode is passed, `cdelt` is 1. and `cunit` is 'pixel'
cunit = get_unit_from_mode(mode)
#print('cunit = ', cunit)
ctype = "LINEAR"
if cunit is u.arcsec:
cdelt = 0.2
elif cunit is u.angstrom:
cdelt = 1.25
ctype = 'AWAV'
else:
cunit = u.Unit('pixel')
cdelt = 1.
# make essential keywords in case the header is minimal
# we're assuming `crpix` and `crval` to be at the origin for simplicity
hdr_keys = {
'CRPIX' : 1.,
'CRVAL' : 0.,
'CDELT' : cdelt,
'CUNIT' : cunit.to_string('fits'),
'CTYPE' : ctype
}
# if the important keywords are not in the header, add them
# again this is for a simple 1D case! we aren't handling n > 1.
n = hdu['NAXIS']
for key, val in hdr_keys.items():
if f'{key}{n}' not in hdu:
hdu.set(f'{key}{n}', val)
return hdu
def read_modlist_from_csv(fname):
'''
If your model list is in a CSV file, here's a way to pick it out
Parameters
-----------
fname : str
Obviously a filename; may add more kwargs later for an option to use
`pandas`
Returns
-----------
out : `OrderedDict`
A dictionary of the CSV, with headers being the keys and the columns
being the entries
'''
modlist = collections.defaultdict(list)
with open(fname) as f:
reader = csv.DictReader(f, delimiter=',')
for row in reader:
# Are some columns longer than others? Skip the blank cells
if any(row[key] in (None, '') for key in row):
continue
# Only need the first letter since that's how we roll
for key in row:
modlist[key].append(row[key][0])
return modlist
def parse_badlines(fname):
'''
A convenience function to parse and install skyline information
from a data file
'''
with open(fname) as f:
for line in f.readlines():
l1, l2, emis = line.strip().split()
yield emis, float(l1), float(l2)
def get_noise1D(flux):
'''
A simple function to return the noise of an array following the
sigma-estimation given in Czesla et al., 2018 (A&A, 609, A39):
sigma = (1.482602 / sqrt(6)) * med_i(| 2*flux_i - \
flux_{i-2} - flux_{i+2}|)
(See: http://cdsads.u-strasbg.fr/abs/2018A%26A...609A..39C)
'''
# ignore masked pixels
flux = flux.compressed()
n = len(flux)
if n > 4:
noise = (1.482602/np.sqrt(6)) * np.median(abs(2 * flux[2:n-2] \
- flux[0:n-4] - flux[4:n]))
else:
noise = 0.
return noise
|
#!/usr/bin/env python3
"""
The script merges calls from DeepVariant, HaplotypeCaller and Strelka2
"""
# at some point, stop using print statements to log and use an actual logging framework.
import argparse
import csv
import datetime
import re
import sys
# global variables:
# VCF structure (used instead of index numbers for readability)
chrom = 0
pos = 1
snpID = 2
ref = 3
alt = 4
qual = 5
filt = 6
info = 7
frmt = 8
############################################# Functions #############################################
def get_args():
"""Handle command line arguments (input and output file names)."""
parser = argparse.ArgumentParser(
description="Takes VCF file with samples that have been called by the three callers and returns a VCF file where the genotypes from each caller are combined."
)
parser.add_argument("infile", help="Input VCF file name")
parser.add_argument("outfile", help="Output VCF file name")
results = parser.parse_args()
return results.infile, results.outfile
def check_file(fname):
"""Check that file can be read; exit with error message if not."""
try:
f = open(fname, "rb")
f.close()
return 0
except IOError:
print("ERROR: Could not read file", fname)
return 1
sys.exit()
f.close()
def get_header(infile):
"""Extract header from VCF.
Exit with error message if no header detected.
"""
headerCheck = 1
with open(infile, "r") as file:
for line in csv.reader(file, delimiter="\t"):
if "#CHROM" in line:
headerCheck = vcf_check(line)
return line
if headerCheck == 1:
print("ERROR: File must contain header row matching VCF specification")
return 1
sys.exit()
def vcf_check(line):
"""Rudimentary format check.
Must have #CHROM-designated header row and >=2 genotype columns.
Must have an 3X number of genotype columns (does not actually
check pairing).
"""
if (len(line) - 9) % 3 != 0 or len(line) < 12:
print("ERROR: Unpaired sample names detected. File must contain triple number of samples.")
return 1
sys.exit()
# note that there should be (9 + 3 x no. of samples) number of columns
else:
return 0
def evaluate_variant_line(line, start1, end1, start2, end2, start3, end3):
"""For non-header lines, determines what needs to be done
do merge the genotype information:
1. Add set annotation (HC, DV, strelka2, HC-DV, HC-strelka2, DV-strelka2, HC-DV-strelka2) to INFO
2. Removes empty genotypes for variants called by one caller
3. Removes chr_pos_ref_alt from ID field for DV-only variants
4. Integrates info for variants called by both
"""
if (":DV_" in line[frmt]) and not (":HC_" in line[frmt]) and not (":strelka2_" in line[frmt]):
line = add_set_tag("DV", line)
line[filt] = "oneCaller"
line = remove_empty_genotypes(line, start1, end1, start2, end2, start3, end3)
line[snpID] = "."
return line
elif (":HC_" in line[frmt]) and not (":DV_" in line[frmt]) and not (":strelka2_" in line[frmt]):
line = add_set_tag("HC", line)
line[filt] = "oneCaller"
line = remove_empty_genotypes(line, start1, end1, start2, end2, start3, end3)
return line
elif (":DV_" in line[frmt]) and (":HC_" in line[frmt]) and not (":strelka2_" in line[frmt]):
line = add_set_tag("HC-DV", line)
line[filt] = "twoCallers"
line = combine_genotypes(line, start1, end1, start2, end2, start3, end3)
line[snpID] = "."
return line
elif (":strelka2_" in line[frmt]) and not (":HC_" in line[frmt]) and not (":DV_" in line[frmt]):
line = add_set_tag("strelka2", line)
line[filt] = "oneCaller"
line = remove_empty_genotypes(line, start1, end1, start2, end2, start3, end3)
line[snpID] = "."
return line
elif (":strelka2_" in line[frmt]) and (":HC_" in line[frmt]) and not (":DV_" in line[frmt]):
line = add_set_tag("HC-strelka2", line)
line[filt] = "twoCallers"
line = combine_genotypes(line, start1, end1, start2, end2, start3, end3)
line[snpID] = "."
return line
elif (":strelka2_" in line[frmt]) and (":DV_" in line[frmt]) and not (":HC_" in line[frmt]):
line = add_set_tag("DV-strelka2", line)
line[filt] = "twoCallers"
line = combine_genotypes(line, start1, end1, start2, end2, start3, end3)
line[snpID] = "."
return line
elif (":strelka2_" in line[frmt]) and (":HC_" in line[frmt]) and (":DV_" in line[frmt]):
line = add_set_tag("HC-DV-strelka2", line)
line[filt] = "threeCallers"
line = combine_genotypes(line, start1, end1, start2, end2, start3, end3)
line[snpID] = "."
return line
else:
print("ERROR: No caller annotation found in FORMAT field.")
return 1
sys.exit()
def find_genotype_indices(line):
"""Determines the start/stop point for genotype columns from the three callers.
bcftools merge --force-samples results in a VCF with samples as so:
chr pos ... sample1 sample2 2:sample1 2:sample2 3:sample1 3:sample2, where the first
two columns are called with caller1 and the second two with caller2, and so on.
This function determines the index numbers defining the ranges
(assuming the columns are not inter-mingled, e.g. sample1 2:sample1 2:sample2 sample2).
Bear in mind that python slicing [start:stop] gets items start to stop-1.
Example: vcf with 6 genotype fields at indices 9-14: 0,1,...8,9,10,11,12,13,14
see inline comments working through this example.
"""
start1 = 9 # start1=9; index=9 field
end3 = len(line) # end2=15; index=15-1=14 field
num_samples = int((end3 - 9) / 3)
end1 = 9 + num_samples
start2 = end1
end2 = 9 + num_samples * 2
start3 = end2
return start1, end1, start2, end2, start3, end3
def add_set_tag(caller, line):
"""Add set (HC, DV, HC-DV, strelka2, HC-strelka2, DV-strelka2 and HC-DV-strelka2) to INFO fields."""
line[info] = line[info] + ";set=" + caller
return line
def remove_empty_genotypes(line, start1, end1, start2, end2, start3, end3):
"""For variants found by only one caller, remove empty (.:.:.) fields.
If only DeepVariant has call, set dv_priority_GT to the DV GT
Set all consensus GT to ./.
"""
line[8] = line[8] + ":concensus_GT:dv_priority_GT"
if any("0" in s for s in line[start1:end1]) or any("1" in s for s in line[start1:end1]):
for i in range(start1, end1):
line[i] = line[i] + ":" + "./." + ":" + "./."
line = line[0:end1]
return line
elif any("0" in s for s in line[start2:end2]) or any("1" in s for s in line[start2:end2]):
for i in range(start2, end2):
s = line[i]
geno = s.split(":")
line[i] = line[i] + ":" + "./." + ":" + geno[0]
line = line[0:9] + line[start2:end2]
return line
elif any("0" in s for s in line[start3:end3]) or any("1" in s for s in line[start3:end3]):
for i in range(start3, end3):
line[i] = line[i] + ":" + "./." + ":" + "./."
line = line[0:9] + line[start3:end3]
return line
else:
print("remove_empty_genotypes ERROR: All genotype fields are blank.")
print(line)
return 1
sys.exit()
def flip_hets(gt):
if gt == "1/0":
gt = "0/1"
return gt
def get_concensus_gt(gt1, gt2, gt3): # In this order: HC, DV, strelka2
"""
This function finds the consensus GT
If all 3 callers have calls:
and any of the two callers are concordant: GT is set to that concordant GT;
or none are concordant: GT is set to ./.
If only 2 callers have calls:
and they are concordant: GT is set to that concordant GT;
and they are not concordant: GT is set to ./.
Finally, If only one caller has call, it's set to ./.
"""
if flip_hets(gt1) == flip_hets(gt2):
concensus_gt = gt1
elif flip_hets(gt2) == flip_hets(gt3):
concensus_gt = gt2
elif flip_hets(gt1) == flip_hets(gt3):
concensus_gt = gt1
else:
concensus_gt = "./."
return concensus_gt
def get_dv_priority_gt(gt1, gt2, gt3): # In this order: HC, DV, strelka2
"""
This function finds the GT that's from DV call when possible.
If there's no DV call:
and HC and strelka2 calls the same genotype, GT is set to that genotype
otherwise GT is set to ./.
"""
dv_priority_gt = "./."
if gt2.count(".") == 0:
dv_priority_gt = gt2
elif flip_hets(gt1) == flip_hets(gt3):
dv_priority_gt = gt1
return dv_priority_gt
def combine_genotypes(line, start1, end1, start2, end2, start3, end3):
"""For variants found by only two callers, integrate genotype info.
Variants called by both callers will look like this:
sample1 2:sample1
0/1:3:4,5:.:.:. .:.:.:0/1:2:4,3
We want the union of this information, e.g.:
sample1
0/1:3:4,5:0/1:2:4,3
This function compares the three genotype fields sample1, 2:sample1 and 3:sample1,
and anywhere there's a '.' in sample1, it updates it with non-'.'
data from 2:sample1 or 3:sample1 if available. This assumes that the VCF is well-formed,
meaning each genotype column conforms equally to the FORMAT definition.
This function also updates the GT column.
If all 3 callers have calls:
and any of the two callers are concordant: GT is set to that concordant GT;
or none are concordant: GT is set to ./.
If only 2 callers have calls:
and they are concordant: GT is set to that concordant GT;
and they are not concordant: GT is set to ./.
Finally, If only one caller has call, it's set to that GT
"""
for x, y, z in zip(line[start1:end1], line[start2:end2], line[start3:end3]):
geno1 = x.split(":")
geno2 = y.split(":")
geno3 = z.split(":")
field = line.index(x)
for i, g1 in enumerate(geno1):
if i == 0:
if (geno1[i] != geno2[i]) and (geno1[i] != geno3[i]) and (geno2[i] != geno3[i]):
geno1[i] = "./."
elif geno2[i] == geno3[i]:
geno1[i] = geno2[i]
if (geno1[i] == ".") and (geno2[i] != "."):
geno1[i] = geno2[i]
if (geno1[i] == ".") and (geno2[i] == ".") and (geno3[i] != "."):
geno1[i] = geno3[i]
line[field] = ":".join(geno1)
# add GT
concensus_gt = get_concensus_gt(geno1[0], geno2[0], geno3[0])
# print ("for dv:" + geno1[0] + " " + geno2[0] + " " + geno3[0])
dv_priority_gt = get_dv_priority_gt(geno1[0], geno2[0], geno3[0])
# print ("dv_priority_gt:" + dv_priority_gt)
line[field] = line[field] + ":" + concensus_gt + ":" + dv_priority_gt
# add field to format
line[start1 - 1] = line[start1 - 1] + ":concensus_GT:dv_priority_GT"
return line[0:end1]
def add_headers(ts, ver, scriptName, cmdString):
"""Add metadata to the vcf
To A) account for new INFO field and to B) document provenance.
###TODO: add reference info as well?
"""
infoHeader = '##INFO=<ID=set,Number=.,Type=String,Description="Set of callers that identified a variant (HC, DV, strelka2, HC-DV, HC-strelka2, DV-strelka2, or HC-DV-strelka2 )">'
filterHeaderOneCaller = (
'##FILTER=<ID=oneCaller,Description="The variant was called by exactly one caller">'
)
filterHeaderTwoCallers = (
'##FILTER=<ID=twoCallers,Description="The variant was called by exactly two callers">'
)
filterHeaderThreeCallers = (
'##FILTER=<ID=threeCallers,Description="The variant was called by all three callers">'
)
formatHeaderConcensusGT = (
'##FORMAT=<ID=concensus_GT,Number=1,Type=String,Description="Genotype">'
)
formatHeaderDVPriorityGT = (
'##FORMAT=<ID=dv_priority_GT,Number=1,Type=String,Description="Genotype">'
)
prov1 = (
"##"
+ scriptName
+ "_Version="
+ ver
+ ", Union of HC, DV and strelka2 genotype data, "
+ ts
)
prov2 = "##" + scriptName + "_Command=" + cmdString
return [
filterHeaderOneCaller,
filterHeaderTwoCallers,
filterHeaderThreeCallers,
infoHeader,
formatHeaderConcensusGT,
formatHeaderDVPriorityGT,
prov1,
prov2,
]
#####################################################################################################
if __name__ == "__main__":
ts = str(datetime.datetime.now())
ver = "someversion" # https://stackoverflow.com/questions/5581722/how-can-i-rewrite-python-version-with-git
scriptName = sys.argv[0]
cmdString = " ".join(sys.argv)
infile, outfile = get_args()
check_file(infile)
headerLine = get_header(infile)
start1, end1, start2, end2, start3, end3 = find_genotype_indices(headerLine)
with open(infile, "r") as file, open(outfile, "w") as out:
for line in csv.reader(file, delimiter="\t"):
if re.search(r"#", line[chrom]) is None:
line = evaluate_variant_line(line, start1, end1, start2, end2, start3, end3)
out.write("\t".join(line) + "\n")
elif "#CHROM" in line:
out.write("\n".join(add_headers(ts, ver, scriptName, cmdString)) + "\n")
out.write("\t".join(line[0:start2]) + "\n")
else:
out.write("\t".join(line) + "\n")
|
# Copyright (c) 2020, eQualit.ie inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import json
import traceback
from baskerville.db.dashboard_models import Message, PendingWork
from baskerville.models.config import KafkaConfig
from baskerville_dashboard.utils.enums import NotificationKind
from kafka import KafkaProducer
from sqlalchemy.exc import SQLAlchemyError
KAFKA_CONSUMER = None
ASYNC_KAFKA_CONSUMER = None
KAFKA_PRODUCER = None
def value_deserializer(v):
try:
return json.loads(v.decode('utf-8'))
except:
return {}
def get_kafka_consumer(kafka_config: KafkaConfig, topics=()):
global KAFKA_CONSUMER
from kafka import KafkaConsumer
from kafka.client_async import selectors
if not KAFKA_CONSUMER:
KAFKA_CONSUMER = KafkaConsumer(
*topics,
bootstrap_servers=kafka_config['bootstrap_servers'],
selector=selectors.DefaultSelector,
auto_offset_reset='earliest',
value_deserializer=value_deserializer
)
return KAFKA_CONSUMER
def get_aiokafka_consumer(kafka_config: KafkaConfig, topics=()):
from aiokafka import AIOKafkaConsumer
# note: for python 3.6 you'll also need to install dataclasses
global ASYNC_KAFKA_CONSUMER
if not ASYNC_KAFKA_CONSUMER:
ASYNC_KAFKA_CONSUMER = AIOKafkaConsumer(
*topics,
bootstrap_servers=kafka_config['bootstrap_servers'],
auto_offset_reset='earliest',
value_deserializer=value_deserializer
)
return ASYNC_KAFKA_CONSUMER
def get_kafka_producer(kafka_config: KafkaConfig):
global KAFKA_PRODUCER
from kafka.client_async import selectors
if not KAFKA_PRODUCER:
KAFKA_PRODUCER = KafkaProducer(
bootstrap_servers=kafka_config.bootstrap_servers,
selector=selectors.DefaultSelector,
security_protocol=kafka_config['security_protocol'],
ssl_check_hostname=kafka_config['ssl_check_hostname'],
ssl_cafile=kafka_config['ssl_cafile'],
ssl_certfile=kafka_config['ssl_certfile'],
ssl_keyfile=kafka_config['ssl_keyfile']
)
return KAFKA_PRODUCER
def consume_from_kafka(config, baskerville_config):
from baskerville_dashboard.utils.helpers import get_socket_io
from baskerville_dashboard.db.manager import SessionManager
from baskerville.db import set_up_db
# todo: either pickle it and reload it or use
feedback_context_cache = {}
socketio = get_socket_io()
Session, engine = set_up_db(baskerville_config['database'], create=True)
sm = SessionManager()
sm.set_session(Session)
sm.set_engine(engine)
kafka_consumer = get_kafka_consumer(
baskerville_config['kafka'], config.get('KAFKA_TOPICS')
)
import time
# todo: handle all this in different functions / models
for cr in kafka_consumer:
if cr.value:
if cr.topic == 'test.feedback':
try:
print('updating feedback context...')
fc_id = cr.value['id_context']
if fc_id not in feedback_context_cache:
feedback_context_cache[fc_id] = True
from baskerville.db.dashboard_models import FeedbackContext
fc = sm.session.query(FeedbackContext).filter_by(id=fc_id).first()
if fc:
message = f'Updated feedback context {fc.id} to not pending'
fc.pending = not cr.value['success']
notification = Message()
notification.message = message
notification.uuid_organization = fc.uuid_organization
notification.severity = NotificationKind.error.value
sm.session.add(notification)
sm.session.commit()
socketio.emit(
cr.value['uuid_organization'],
message
)
else:
message = f'Could not find fc {fc_id}'
notification = Message()
notification.message = message
notification.uuid_organization = fc.uuid_organization
notification.severity = NotificationKind.error.value
sm.session.add(notification)
sm.session.commit()
socketio.emit(
cr.value['uuid_organization'],
message
)
except KeyError:
traceback.print_exc()
except SQLAlchemyError:
traceback.print_exc()
sm.session.rollback()
if cr.topic == 'test.register':
try:
uuid_organization = cr.value['uuid_organization']
from baskerville.db.dashboard_models import \
Organization
org = sm.session.query(Organization).filter_by(
uuid=uuid_organization).first()
if org:
org.registered = not cr.value['success']
message = f'Organization {uuid_organization} ' \
f'is now registered'
notification = Message()
notification.message = message
notification.uuid_organization = uuid_organization
notification.severity = NotificationKind.info.value
sm.session.add(notification)
sm.session.commit()
socketio.emit(
uuid_organization,
message
)
else:
message = f'Could not find organization ' \
f'uuid={uuid_organization}'
notification = Message()
notification.message = message
notification.uuid_organization = uuid_organization
notification.severity = NotificationKind.error.value
sm.session.add(notification)
sm.session.commit()
socketio.emit(uuid_organization, message)
except KeyError:
traceback.print_exc()
except SQLAlchemyError:
traceback.print_exc()
sm.session.rollback()
if cr.topic == 'test.retrain':
try:
uuid_organization = cr.value['uuid_organization']
pw_uuid = cr.value['uuid']
success = cr.value['success']
pending = cr.value['pending']
message = cr.value['message']
notification = Message()
notification.message = message
notification.uuid_organization = uuid_organization
pw = sm.session.query(PendingWork).filter_by(
uuid=pw_uuid
).first()
if pw:
pw.success = success
pw.pending = pending
notification.id_user = pw.id_user
pw.logs = message + pw.logs if pw.logs else message
notification.severity = NotificationKind.info.value
else:
message = f'Could not find pending work ' \
f'pw_uuid={pw_uuid}'
notification.severity = NotificationKind.error.value
sm.session.add(notification)
sm.session.commit()
socketio.emit(uuid_organization, message)
except KeyError:
traceback.print_exc()
except SQLAlchemyError:
traceback.print_exc()
sm.session.rollback()
time.sleep(0.1)
|
"""
T: O(N)
S: O(1)
Remember previous calls and pop the ones that have happened too long ago. The
space complexity can be considered constant since the target period is
constant.
"""
import collections
class RecentCounter:
CUT_OFF_TIME = 3000
def __init__(self):
self.pings = collections.deque()
def ping(self, t: int) -> int:
self.pings.append(t)
while self.pings[-1] - self.pings[0] > self.CUT_OFF_TIME:
self.pings.popleft()
return len(self.pings)
|
"""
Find Where to Expand in Minesweeper
Implement a function that turns revealed cells into -2 given a location the user wants to click.
For simplicity, only reveal cells that have 0 in them.
- If the user clicks on any other type of cell (for example, -1 / bomb or 1, 2, or 3), just ignore the click and
return the original field.
- If a user clicks 0, reveal all other 0's that are connected to it.
- More details: https://www.udemy.com/11-essential-coding-interview-questions/
"""
import queue
def click_dfs(field, num_rows, num_cols, given_i, given_j):
"""
Time: O(num_rows * num_cols)
Space: O(num_rows * num_cols)
"""
if 0 <= given_i < num_rows and 0 <= given_j < num_cols and field[given_i][given_j] == 0:
field[given_i][given_j] = -2
else:
return field
for i in range(given_i - 1, given_i + 2):
for j in range(given_j - 1, given_j + 2):
click_dfs(field, num_rows, num_cols, i, j)
return field
def click_bfs(field, num_rows, num_cols, given_i, given_j):
"""
Time: O(num_rows * num_cols)
Space: O(num_rows + num_cols)
"""
q = queue.Queue()
if 0 <= given_i < num_rows and 0 <= given_j < num_cols and field[given_i][given_j] == 0:
field[given_i][given_j] = -2
q.put((given_i, given_j))
while not q.empty():
(row, col) = q.get()
for i in range(row - 1, row + 2):
for j in range(col - 1, col + 2):
if 0 <= i < num_rows and 0 <= j < num_cols and field[i][j] == 0:
field[i][j] = -2
q.put((i, j))
return field
def to_string(given_array):
"""
Converts a 2-dimensional array (a list of lists) into an easy-to-read string format
"""
list_rows = []
for row in given_array:
list_rows.append(str(row))
return '[' + ',\n '.join(list_rows) + ']'
if __name__ == '__main__':
field1 = [[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, -1, 1, 0]]
field2 = [[-1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, -1]]
print(to_string(click_bfs(field1, 3, 5, 2, 2))) # click_recursion is correct as well
print()
print(to_string(click_bfs(field1, 3, 5, 1, 4)))
print()
print(to_string(click_bfs(field2, 4, 4, 0, 1)))
print()
print(to_string(click_bfs(field2, 4, 4, 1, 3)))
|
class A(object):
__X = 1
_<ref>_X # must resolve
|
import os
path1 = "outputs"
path2 = "outputs/_imgs"
path3 = "outputs/max_sharpe_weights"
path4 = "outputs/opt_portfolio_trades"
try:
os.mkdir(path1)
except OSError:
print ("ะะธัะตะบัะพัะธั %s ัะถะต ัะพะทะดะฐะฝะฐ" % path1)
else:
print ("ะฃัะฟะตัะฝะพ ัะพะทะดะฐะฝะฐ ะดะธัะตะบัะพัะธั %s " % path1)
try:
os.makedirs(path2)
os.makedirs(path3)
os.makedirs(path4)
except OSError:
print ("ะะธัะตะบัะพัะธะธ ัะถะต ัะพะทะดะฐะฝั")
else:
print ("ะฃัะฟะตัะฝะพ ัะพะทะดะฐะฝั ะฝัะถะฝัะต ะดะธัะตะบัะพัะธะธ")
source_path = '../source_root/1m'
destination_path = 'outputs'
|
SSID="wifi_ssid_here"
WIFI_PASS="wifi_password_here"
|
from typing import List
from xinaprocessor.constants import *
import re
import emoji
import random
from collections import Counter
def replace_list(list_chars, text, replace_with=""):
chars = "".join(list_chars)
return re.sub(f"[{chars}]", replace_with, text)
def remove_extra_spaces(text: str, keep_spaces=1):
return re.sub(" +", " " * keep_spaces, "".join(text))
def remove_emoji(text: str):
return emoji.get_emoji_regexp().sub("", text)
def remove_hashtags(text: str):
return re.sub(r"#.*?(?=\s)", "", text)
def remove_links(text: str):
return re.sub("http[s]?://\S+|[wW]{3,}[\S/\?=\.&]+", "", text)
def remove_mentions(text: str):
return re.sub(r" @[\w_]+ | @[\w_]+|^@[\w_]+ ", " ", text)
def remove_emails(text: str):
return re.sub(r"\S+@\S+", "", text)
def contains_single_char(text: str):
return True if re.search(r"(?:^| )\w(?:$| )", text) else False
def contains_persian(text: str):
return True if re.search(r"[\uFB50-\uFB9F{}]".format(''.join(PERSIAN_UNIQUE_CHARS)), text) else False
def contains_english(text: str):
return True if re.search(r"[A-Za-z]", text) else False
def remove_single_char_space_before(text: str):
return re.sub(r"(?:^| )(\w)(?:$| )", r"\1 ", text).strip()
def remove_single_char_space_after(text: str):
return re.sub(r"(?:^| )(\w)(?:$| )", r" \1", text).strip()
def multi_replace(keys: List[str], values: List[str], text: str):
"""Replace each item in keys with the corresponding item in values in the input text
Args:
keys (List[str]): a list of strings to be replaces
values (List[str]): list of strings with same length of keys to with values to be replaced with
text (str): input text to apply replacements on
Returns:
str: text with strings in keys replaced with corresponding strings in values
"""
exp = "|".join(map(re.escape, keys))
def func(match): return values[keys.index(match.group(0))]
return re.sub(exp, func, text)
def keep_only(text: str, list_chars):
chars = "".join(list_chars)
spaced_text = re.sub(f"[^{chars}]", " ", text)
return remove_extra_spaces(spaced_text).strip()
def replace_repeated_chars(text: str, repeated=1, keep_char=1):
assert repeated > 0
assert keep_char >= 0
pattern = r"(.)\1{}".format(f"{{{repeated-1},}}")
return re.sub(pattern, r"\1" * keep_char, text)
def replace_except(text: str, keep_symbols: str, replace_by: str) -> str:
return re.sub(f"[^{keep_symbols}]", replace_by, text)
def contains_repeated_chars(text: str, repeated=1):
pattern = r"(.)\1{}".format(f"{{{repeated-1},}}")
return True if re.search(pattern, text) else False
def train_test_split(x: list, test_size: float, random_seed=None):
assert test_size > 0.0 and test_size < 1.0, "test size sould be between 0 and 1"
assert len(x) > 1, "the length of the given list should be greater than 1"
if random_seed:
random.random(random_seed).shuffle(x)
else:
random.shuffle(x)
test = x[: int(len(x) * test_size)]
train = x[int(len(x) * test_size):]
return train, test
def export_text(file_path, data: list, sep="\n", encoding="utf-8"):
with open(file_path, "a", encoding=encoding) as f:
f.write(sep.join(data))
def transliteration_to_arabic(text: str):
for ar_char, buc_symbole in BUCKWALTER_TRANSLITERATION.items():
text = text.replace(buc_symbole, ar_char)
return text
def arabic_to_transliteration(text: str):
for ar_char, buc_symbole in BUCKWALTER_TRANSLITERATION.items():
text = text.replace(ar_char, buc_symbole)
return text
def str_count_frequency(text: str, sep= " ", word_level= True):
if word_level:
return Counter(text.split(sep))
return Counter(text)
def doc_count_frequency(texts: list, split_by= " ", word_level= True):
text = split_by.join(texts)
return str_count_frequency(text, sep= split_by, word_level= word_level)
def swap_tanween_alef(text: str):
return text.replace(TANWEEN + NORMAL_ALEF, NORMAL_ALEF + TANWEEN)
|
# IDA Sync Server User Management Class
# Copyright (C) 2005 Pedram Amini <pedram.amini@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
import Mk4py as mk
from serverx import *
################################################################################
### user_manager
###
### this class provides an interface for the general management of users.
###
class user_manager:
############################################################################
### constructor
###
### args: none.
### raises: none.
### returns: none.
###
def __init__(self):
# open the users database, creating it if it doesn't exist.
self.db = mk.storage("databases/users.db", 1)
# define the user table (view) if it doesn't already exist.
self.view = self.db.getas("users[username:S,password:S,realname:S]")
############################################################################
### add()
###
### args: username - unique username to add.
### password - password.
### realname - real name.
### raises: exception on error.
### returns: none.
###
def add(self, username, password, realname):
# ensure the user doesn't already exist.
if (self.view.find(username=username) != -1):
raise serverx("username already exists")
# add the user and commit the changes.
self.view.append(username=username,
password=password,
realname=realname)
self.db.commit()
############################################################################
### delete()
###
### args: username - user to delete.
### raises: exception on error.
### returns: none.
###
def delete(self, username):
# ensure the user exists.
index = self.view.find(username=username)
if (index == -1):
raise serverx("username not found")
# remove the user and commit the changes.
self.view.delete(index)
self.db.commit()
############################################################################
### list()
###
### args: none.
### raises: none.
### returns: none.
###
def list(self):
return self.view.sort(self.view.username)
############################################################################
### update()
###
### args: username - user to update.
### password - new password.
### realname - new value for real name.
### raises: exception on error.
### returns: none.
###
def update(self, username, password, realname):
# ensure the user exists.
index = self.view.find(username=username)
if (index == -1):
raise serverx("username not found")
# remove the user.
self.view.delete(index)
# insert the updated user in it's place.
self.view.insert(index, username=username,
password=password,
realname=realname)
# commit the changes.
self.db.commit()
############################################################################
### validate()
###
### args: username - user to validate as.
### password - username's password.
### raises: exception on error.
### returns: none.
###
def validate(self, username, password):
# ensure the user exists.
index = self.view.find(username=username, password=password)
if (index == -1):
raise serverx("invalid username or password")
# see if the passwords match.
user = self.view[index]
if (user.password != password):
raise serverx("invalid username or password")
|
import pynput
from datetime import datetime
from .common import Event, EventKind
class KeyboardRecorder:
def __init__(self):
self.recordPress = True
self.recordRelease = True
self.listener = None
self.events = []
def on_press(self, key):
self.events.append(Event(datetime.now() - self.basetime, EventKind.KEY_PRESS, [key]))
def on_release(self, key):
self.events.append(Event(datetime.now() - self.basetime, EventKind.KEY_RELEASE, [key]))
def start(self):
self.basetime = datetime.now()
self.listener = pynput.keyboard.Listener(
on_press=self.on_press if self.recordPress else None,
on_release=self.on_release if self.recordRelease else None)
self.listener.start()
def stop(self):
self.listener.stop()
self.listener = None
def clear(self):
self.events = []
class MouseRecorder:
def __init__(self):
self.recordMove = True
self.recordClick = True
self.recordScroll = True
self.listener = None
self.events = []
def on_move(self, x, y):
self.events.append(Event(datetime.now() - self.basetime, EventKind.MOUSE_MOVE, [x, y]))
def on_click(self, x, y, button, pressed):
self.events.append(Event(datetime.now() - self.basetime, EventKind.MOUSE_CLICK, [x, y, button, pressed]))
def on_scroll(self, x, y, dx, dy):
self.events.append(Event(datetime.now() - self.basetime, EventKind.MOUSE_SCROLL, [x, y, dx, dy]))
def start(self):
self.basetime = datetime.now()
self.listener = pynput.mouse.Listener(
on_move=self.on_move if self.recordMove else None,
on_click=self.on_click if self.recordClick else None,
on_scroll=self.on_scroll if self.recordScroll else None)
self.listener.start()
def stop(self):
self.listener.stop()
self.listener = None
def clear(self):
self.events = []
|
import boto3
import datetime
import logging
import os
import pytz
events = boto3.client('events')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
TOGGLE = os.environ.get("TOGGLE", "LocalTime")
DISABLE_PUT = os.environ.get("DISABLE_PUT", "false")
def lambda_handler(event=None, context=None):
if event["detail-type"] == "Scheduled Event":
# Scheduled
logger.info("Searching for all rules in account.")
main_wrapper()
else:
if DISABLE_PUT == "true":
return
# Newly created or updated rule.
user = event["detail"]["userIdentity"]["arn"]
if f"{context.function_name}-role" in user:
logger.info("Triggered by self - Ignoring event.")
return
if event["detail"]["eventName"] == "PutRule":
main(event["detail"]["responseElements"]["ruleArn"])
else:
main(event["detail"]["requestParameters"]["resourceARN"])
def main_wrapper():
# List all rules in account
paginator = events.get_paginator('list_rules')
pages = paginator.paginate(EventBusName='default')
for page in pages:
for rule in page["Rules"]:
if "ScheduleExpression" not in rule:
# Skip for event pattern rules
continue
main(rule["Arn"], scheduled=True)
def main(arn, scheduled=False):
# Skip if resource does not have a tag with TOGGLE key
response = events.list_tags_for_resource(ResourceARN=arn)
timezone = [tag["Value"] for tag in response["Tags"] if tag["Key"] == TOGGLE]
if not timezone:
logger.info(f"Resource {arn} does not have {TOGGLE} tagged so will be ignored.")
return
# Skip if timezone (TOGGLE keys value) is invalid
timezone = timezone[0]
if timezone not in list(pytz.common_timezones):
logger.error(f"{timezone} is not a valid time zone. Please check pytz documentation.")
return
name = arn.split("rule/")[1]
logger.info(f"EventBridge Rule Arn: {arn}")
logger.info(f"Rule Name: {name}")
logger.info(f"Rule Time Zone: {timezone}")
# Get rule details
response = events.describe_rule(
Name=name,
EventBusName='default'
)
# Skip for event pattern rules
if "ScheduleExpression" not in response:
logger.info("Rule does not have a schedule expression so will be ignored.")
return
expression = response["ScheduleExpression"]
description = response.get("Description", "")
role_arn = response.get("RoleArn", None)
logger.info(f"Rule Cron Expression: {expression}")
# Calculate new expression
new_expression = calculate_expression(timezone, expression, scheduled=scheduled)
if expression != new_expression:
"""
Applying terraform overrides the adjusted expression so this should just
adjust it back. Performing TagResource manually may result in the hour
dropping by 1 every time.
"""
logger.info(f"New expression: {new_expression}")
if role_arn:
response = events.put_rule(
Name=name,
ScheduleExpression=new_expression,
Description=description,
RoleArn=role_arn
)
else:
response = events.put_rule(
Name=name,
ScheduleExpression=new_expression,
Description=description
)
return
logger.info(f"Current expression: {expression}")
def calculate_expression(tz, exp, scheduled=False):
"""Workout new Cron expression based on timezone."""
utc = pytz.timezone('UTC')
now = utc.localize(datetime.datetime.utcnow())
local_time = now.astimezone(pytz.timezone(tz))
if local_time.tzinfo._dst.seconds != 0:
logger.info("Daylight savings in effect.")
split_exp = exp.split(" ")
split_exp[1] = format_hour(split_exp[1])
return " ".join(split_exp)
else:
logger.info("Daylight savings not in effect.")
if scheduled:
# Scheduled event when DST not in effect should increment an hour
split_exp = exp.split(" ")
split_exp[1] = format_hour(split_exp[1], subtract=False)
return " ".join(split_exp)
# Otherwise expression should remain as per Terraform
return exp
def format_hour(string, subtract=True):
"""Format the hour component of the expression."""
if "*" in string or "/" in string:
# Asterisk or forward slash wildcards
return string
elif "," in string:
# Comma separated values
hours = string.split(",")
hours = [subtract_hour(h) if subtract else add_hour(h) for h in hours]
return ",".join(hours)
elif "-" in string:
# Range of values
hours = string.split("-")
hours = [subtract_hour(h) if subtract else add_hour(h) for h in hours]
return "-".join(hours)
else:
# Single value
if subtract:
return subtract_hour(string)
return add_hour(string)
def subtract_hour(string):
"""Subtracts an hour from the string - 24 hour format."""
if string == "0":
return "23"
hour = int(string)
return str(hour - 1)
def add_hour(string):
"""Adds an hour from the string - 24 hour format."""
if string == "23":
return "0"
hour = int(string)
return str(hour + 1)
|
from librabbitmq import Connection
import argparse
import paramiko
import sys
import time
import os
import shutil
python_exe = "python"
if os.system("grep \'centos\' /etc/issue -i -q") == 0:
python_exe = "python2.7"
def get_ssh_client(ip, username=None, password=None, timeout=10):
client = None
try:
ip = ip.split(':')[0]
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
username = username or cfg.SSH_USER
password = password or cfg.SSH_PASSWORD
client.connect(ip, username=username, password=password, timeout=timeout)
print("Successfully SSHed to {0}".format(ip))
except Exception as ex:
print ex
sys.exit(1)
return client
def get_sftp_client(ip, username=None, password=None,):
try:
ip = ip.split(':')[0]
trans = paramiko.Transport((ip))
username = username or cfg.SSH_USER
password = password or cfg.SSH_PASSWORD
trans.connect(username=username, password=password)
print ("SFTPing to {0}".format(ip))
sftp_client = paramiko.SFTPClient.from_transport(trans)
return sftp_client
except Exception as ex:
print ex
sys.exit(1)
def kill_process(ssh_client, process_name):
print "Killing {0}".format(process_name)
_, stdout, _ = ssh_client.exec_command("pgrep -f {0}".format(process_name))
for pid in stdout.readlines():
ssh_client.exec_command("kill -9 {0}".format(pid.split()[0]))
def start_process(ssh_client, process_name, cmd):
print ("Starting {0}...".format(process_name))
ssh_client.exec_command(cmd)
time.sleep(5)
_, stdout, _ = ssh_client.exec_command("pgrep {0}".format(process_name))
print ("{0} is running with pid {1}".format(process_name, stdout.readlines()[0]))
def start_rabbitmq():
vhost_present = False
tries = 1
print("\n##### Setting up RabbitMQ @ {0} #####".format(cfg.RABBITMQ_IP))
rabbitmq_client = get_ssh_client(cfg.RABBITMQ_IP)
_, stdout, _ = rabbitmq_client.exec_command("ps aux|grep rabbitmq|grep -v grep|awk \'{print $2}\'")
print ("Killing existing RabbitMQ process ...")
for pid in stdout:
if pid == "":
continue
rabbitmq_client.exec_command("sudo kill -9 {0}".format(pid))
if cfg.RABBITMQ_LOG_LOCATION is not "":
print("Deleting RabbitMQ logs from {0}".format(cfg.RABBITMQ_LOG_LOCATION))
rabbitmq_client.exec_command("rm -rf {0}/*.*".format(cfg.RABBITMQ_LOG_LOCATION))
print ("Starting RabbitMQ ...")
rabbitmq_client.exec_command("screen -dmS rabbitmq sh -c \'sudo rabbitmq-server start; exec bash;\'")
time.sleep(20)
_, stdout, _ = rabbitmq_client.exec_command("sudo rabbitmqctl status")
for line in stdout.readlines():
sys.stdout.write(line)
print("Rabbitmq has been restarted and is now running!")
_, stdout, _ = rabbitmq_client.exec_command("sudo rabbitmqctl list_vhosts")
for line in stdout.readlines():
if not vhost_present:
if cfg.CB_CLUSTER_TAG in line:
vhost_present = True
sys.stdout.write(line)
if not vhost_present :
print ("Adding vhost {0} and setting permissions".format(cfg.CB_CLUSTER_TAG))
rabbitmq_client.exec_command("sudo rabbitmqctl add_vhost {0}".format(cfg.CB_CLUSTER_TAG))
rabbitmq_client.exec_command("sudo rabbitmqctl set_permissions -p {0} guest '.*' '.*' '.*'".format(cfg.CB_CLUSTER_TAG))
_, stdout, _ = rabbitmq_client.exec_command("sudo rabbitmqctl list_vhosts")
for line in stdout.readlines():
sys.stdout.write(line)
time.sleep(30)
while True:
try:
tries += 1
Connection(host=cfg.RABBITMQ_IP, userid="guest", password="guest", virtual_host=cfg.CB_CLUSTER_TAG)
print("Connected to RabbitMQ vhost")
break
except Exception as e:
print e
if tries <= 5:
print("Retrying connection {0}/5 ...".format(tries))
rabbitmq_client.exec_command("sudo rabbitmqctl delete_vhost {0}".format(cfg.CB_CLUSTER_TAG))
rabbitmq_client.exec_command("sudo rabbitmqctl add_vhost {0}".format(cfg.CB_CLUSTER_TAG))
rabbitmq_client.exec_command("sudo rabbitmqctl set_permissions -p {0} guest '.*' '.*' '.*'".format(cfg.CB_CLUSTER_TAG))
time.sleep(30)
continue
sys.exit(1)
rabbitmq_client.close()
def start_worker(worker_ip):
print("##### Setting up Celery Worker @ {0} #####".format(worker_ip))
worker_client = get_ssh_client(worker_ip)
# # Update Worker's testrunner repository
# repo_dir = os.path.dirname(os.path.dirname(cfg.WORKER_PYSYSTESTS_PATH))
# worker_client.exec_command("rm -rf {0}; mkdir -p {0}".format(repo_dir))
# worker_client.exec_command("cd {0}; git clone https://github.com/couchbase/testrunner.git".format(repo_dir))
# Copy testcfg.py file to all workers
worker_client.open_sftp().put("./testcfg.py", os.path.join(cfg.WORKER_PYSYSTESTS_PATH, "testcfg.py"))
# kill celery,remove screenlog
kill_process(worker_client, "celery")
worker_client.exec_command("screen -ls | grep \'celery\' | awk '{print $1}' | xargs -i screen -X -S {} quit")
worker_client.exec_command("screen -wipe")
worker_client.exec_command("rm -rf {0}/screenlog.0".format(cfg.WORKER_PYSYSTESTS_PATH))
# memcached
kill_process(worker_client, "memcached")
cmd = "memcached -u couchbase -d -l {0} -p 11911".format(worker_ip)
start_process(worker_client, "memcached", cmd)
print("Starting celery worker...")
_, out, _ = worker_client.exec_command("celery --version")
celery_param = ""
for line in out:
if "3.1.16" in line:
celery_param = "-Ofair"
print "Celery version: {0} is installed, running it with {1} param".format(line, celery_param)
break
if worker_ip == cfg.WORKERS[0]:
_, stdout, _ = worker_client.exec_command("cd {0}; pwd; export C_FORCE_ROOT=1;screen -dmS celery -L sh -c \ "
"\'celery worker -c 8 -A app -B -l ERROR {1} --purge -I app.init; exec bash;\'".format(cfg.WORKER_PYSYSTESTS_PATH, celery_param))
else:
_, stdout, _ = worker_client.exec_command("cd {0}; pwd; screen -dmS celery -L sh -c \
\'celery worker -c 16 -A app -l ERROR {1} -I app.init; exec bash;\'".format(cfg.WORKER_PYSYSTESTS_PATH, celery_param))
time.sleep(20)
#read_screenlog(worker_ip, cfg.WORKER_PYSYSTESTS_PATH, stop_if_EOF=True)
worker_client.close()
def start_seriesly():
print("##### Setting up Seriesly @ {0} #####".format(cfg.SERIESLY_IP))
cbmonitor_client = get_ssh_client(cfg.SERIESLY_IP)
kill_process(cbmonitor_client, "seriesly")
if cfg.SERIESLY_DB_LOCATION is not "":
print("Deleting old Seriesly db files from {0}".format(cfg.SERIESLY_DB_LOCATION))
cbmonitor_client.exec_command("rm -rf {0}/*.*".format(cfg.SERIESLY_DB_LOCATION))
# kill all existing screens
cbmonitor_client.exec_command("screen -ls | grep \'seriesly\' | awk \'{print $1}\' | xargs -i screen -X -S {} quit")
cbmonitor_client.exec_command("screen -ls | grep \'webapp\' | awk \'{print $1}\' | xargs -i screen -X -S {} quit")
cbmonitor_client.exec_command("screen -ls | grep \'ns_collector\' | awk \'{print $1}\' | xargs -i screen -X -S {} quit")
cbmonitor_client.exec_command("screen -ls | grep \'atop_collector\' | awk \'{print $1}\' | xargs -i screen -X -S {} quit")
cbmonitor_client.exec_command("rm -rf {0}/screenlog.0".format(cfg.CBMONITOR_HOME_DIR))
# screen 1 - start seriesly
start_cmd = "screen -dmS seriesly -L sh -c \'cd {0}; ./seriesly; exec bash;\'".format(cfg.SERIESLY_LOCATION)
start_process(cbmonitor_client, "seriesly", start_cmd)
def fix_sample_cfg(ssh_client):
# fix sample.cfg file
cfg_file_path = os.path.join(cfg.CBMONITOR_HOME_DIR, "sample.cfg")
ssh_client.exec_command("sed -i 's/.*host_port.*/host_port = {0}:8000/' {1}".format(cfg.SERIESLY_IP, cfg_file_path))
ssh_client.exec_command("sed -i 's/.*host .*/host = {0}/' {1}".format(cfg.SERIESLY_IP, cfg_file_path))
ssh_client.exec_command("sed -i 's/.*master_node.*/master_node = {0}/' {1}".format(cfg.COUCHBASE_IP, cfg_file_path))
if cfg.COUCHBASE_OS == "windows":
ssh_client.exec_command("sed -i 's/.*ssh_username.*/ssh_username = {0}/' {1}".format(cfg.COUCHBASE_SSH_USER, cfg_file_path))
ssh_client.exec_command("sed -i 's/.*ssh_password.*/ssh_password = {0}/' {1}".format(cfg.COUCHBASE_SSH_PASSWORD, cfg_file_path))
def start_cbmonitor():
print("\n##### Setting up CBMonitor @ {0} #####".format(cfg.SERIESLY_IP))
cbmonitor_client = get_ssh_client(cfg.SERIESLY_IP)
# screen 2 - start webserver
kill_process(cbmonitor_client, "webapp")
start_cmd = "cd {0}; screen -dmS webapp -L sh -c \'./bin/webapp add-user -S;./bin/webapp syncdb; \
./bin/webapp runserver {1}:8000; exec bash;\'".format(cfg.CBMONITOR_HOME_DIR, cfg.SERIESLY_IP)
start_process(cbmonitor_client, "webapp", start_cmd)
# screen 3 - start ns_collector
fix_sample_cfg(cbmonitor_client)
kill_process(cbmonitor_client, "ns_collector")
start_cmd = "cd {0}; screen -dmS ns_collector -L sh -c \'./bin/ns_collector sample.cfg; exec bash;\'".format(cfg.CBMONITOR_HOME_DIR)
start_process(cbmonitor_client, "ns_collector", start_cmd)
# screen 4 - start atop_collector
kill_process(cbmonitor_client, "atop_collector")
start_cmd = "cd {0}; screen -dmS atop_collector -L sh -c \'./bin/atop_collector sample.cfg; exec bash;\'".format(cfg.CBMONITOR_HOME_DIR)
start_process(cbmonitor_client, "atop_collector", start_cmd)
read_screenlog(cfg.SERIESLY_IP, cfg.CBMONITOR_HOME_DIR, stop_if_EOF=True, lines_to_read=100)
cbmonitor_client.close()
def read_screenlog(ip, screenlog_dir, retry=10, stop_if_EOF=False, lines_to_read=20000):
line = ""
line_count = 0
last_pos = 0
transport_client = get_sftp_client(ip)
screen_log = "{0}/screenlog.0".format(screenlog_dir)
op_file = transport_client.open(screen_log, 'r')
while "Test Complete" not in line and line_count < lines_to_read:
op_file.seek(last_pos)
line = op_file.readline()
last_pos = op_file.tell()
if line is not None and line is not "":
sys.stdout.write(line)
line_count += 1
else:
#Reached EOF, will retry after 'retry' secs
if stop_if_EOF:
break
time.sleep(retry)
op_file.close()
transport_client.close()
def run_setup():
# kick off the setup test
print("\n##### Starting cluster setup from {0} #####".format(cfg.SETUP_JSON))
worker_client = get_ssh_client(cfg.WORKERS[0])
# Import templates if needed
for template in cfg.SETUP_TEMPLATES:
print ("Importing document template {0}...".format(template.split('--')[1].split('--')[0]))
temp = "{0} cbsystest.py import template {1}".format(python_exe, template)
print temp
_, stdout, _ = worker_client.exec_command("cd {0}; {1} cbsystest.py import template {2} --cluster {3}".
format(cfg.WORKER_PYSYSTESTS_PATH, python_exe, template, cfg.CB_CLUSTER_TAG))
for line in stdout.readlines():
print line
print ("Running test ...")
_, stdout, _ = worker_client.exec_command("cd {0}; {1} cbsystest.py run test --cluster \'{2}\' --fromfile \'{3}\'".
format(cfg.WORKER_PYSYSTESTS_PATH, python_exe, cfg.CB_CLUSTER_TAG, cfg.SETUP_JSON))
read_screenlog(cfg.WORKERS[0], cfg.WORKER_PYSYSTESTS_PATH)
worker_client.close()
def run_test():
print "\n##### Starting system test #####"
start_worker(cfg.WORKERS[0])
# import doc template in worker
worker_client = get_ssh_client(cfg.WORKERS[0])
for template in cfg.TEST_TEMPLATES:
print ("Importing document template {0}...".format(template.split('--')[1].split('--')[0]))
temp = "{0} cbsystest.py import template {1}".format(python_exe, template)
print temp
_, stdout, _ = worker_client.exec_command("cd {0}; {1} cbsystest.py import template {2} --cluster {3}".
format(cfg.WORKER_PYSYSTESTS_PATH, python_exe, template, cfg.CB_CLUSTER_TAG))
for line in stdout.readlines():
print line
# Start sys test
print ("Starting system test from {0}...".format(cfg.TEST_JSON))
_, stdout, _ = worker_client.exec_command("cd {0}; {1} cbsystest.py run test --cluster \'{2}\' --fromfile \'{3}\'".
format(cfg.WORKER_PYSYSTESTS_PATH, python_exe, cfg.CB_CLUSTER_TAG, cfg.TEST_JSON))
time.sleep(5)
for line in stdout.readlines():
sys.stdout.write(line)
read_screenlog(cfg.WORKERS[0], cfg.WORKER_PYSYSTESTS_PATH)
worker_client.close()
def pre_install_check():
try:
print("##### Pre-install inspection #####")
print("Inspecting Couchbase server VMs ...")
for vm_ip in cfg.CLUSTER_IPS:
if cfg.COUCHBASE_OS == "windows":
vm_client = get_ssh_client(vm_ip, cfg.COUCHBASE_SSH_USER, cfg.COUCHBASE_SSH_PASSWORD)
else:
vm_client = get_ssh_client(vm_ip)
vm_client.close()
print ("Inspecting RabbitMQ ...")
rabbitmq = get_ssh_client(cfg.RABBITMQ_IP)
rabbitmq.close()
print ("Inspecting Worker ...")
worker = get_ssh_client(cfg.WORKERS[0])
worker.close()
print ("Inspecting CBMonitor ...")
cbmonitor = get_ssh_client(cfg.SERIESLY_IP)
cbmonitor.close()
print("Inspection complete!")
except Exception as e:
print e
sys.exit()
def upload_stats():
print "\n##### Uploading stats to CBFS #####"
worker_client = get_ssh_client(cfg.WORKERS[0])
push_stats_cmd = "cd {0}; {1} tools/push_stats.py --version {2} --build {3} --spec {4} \
--name {5} --cluster {6}".format(cfg.WORKER_PYSYSTESTS_PATH, python_exe, args['build'].split('-')[0], args['build'].split('-')[1],
cfg.TEST_JSON, cfg.TEST_JSON[cfg.TEST_JSON.rfind('/') + 1 : cfg.TEST_JSON.find('.')] , cfg.CB_CLUSTER_TAG)
print ("Executing {0}".format(push_stats_cmd))
_, stdout, _ = worker_client.exec_command(push_stats_cmd)
time.sleep(30)
for line in stdout.readlines():
print line
worker_client.close()
def install_couchbase():
print("Installing version {0} Couchbase on servers ...".format(args['build']))
install_cmd = "cd ..; {0} scripts/install.py -i {1} -p product=cb,version={2},parallel=true,{3}".\
format(python_exe, cfg.CLUSTER_INI, args['build'], args['params'])
print("Executing : {0}".format(install_cmd))
os.system(install_cmd)
if cfg.CLUSTER_RAM_QUOTA != "":
os.system("curl -d memoryQuota={0} \"http://{1}:{2}@{3}:8091/pools/default\"".
format(cfg.CLUSTER_RAM_QUOTA, cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD, cfg.CLUSTER_IPS[0]))
for ip in cfg.CLUSTER_IPS:
os.system("curl -X POST -d \'ale:set_loglevel(xdcr_trace, debug).\' \"http://{0}:{1}@{2}:8091/diag/eval\"".
format(cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD, ip))
time.sleep(60)
def warn_skip(task):
print("\nWARNING : Skipping {0}\n".format(task))
return True
def run(args):
exlargs = args['exclude']
# Pre-install check
("inspect" in exlargs) and warn_skip("Inspection") or pre_install_check()
# Install Couchbase
("install" in exlargs) and warn_skip("Installation") or install_couchbase()
# Setup RabbitMQ
("rabbitmq" in exlargs) and warn_skip("RabbitMQ") or start_rabbitmq()
# Setup Seriesly
("seriesly" in exlargs) and warn_skip("Seriesly") or start_seriesly()
# Start workers
("worker" in exlargs) and warn_skip("Celery Worker setup") or\
[start_worker(ip) for ip in cfg.WORKERS]
# Cluster-setup/create buckets, set RAM quota
("setup" in exlargs) and warn_skip("Cluster setup") or run_setup()
# Start cbmonitor
("cbmonitor" in exlargs) and warn_skip("CBMonitor") or start_cbmonitor()
# Run test
("systest" in exlargs) and warn_skip("System Test") or run_test()
# Upload stats
("stats" in exlargs) and warn_skip("Uploading Stats to CBFS") or upload_stats()
print("\n############################# Execution Complete! #################################")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Tool for running system tests \
\nUsage: python runsystest.py --build 3.0.0-355 \
--testcfg xdcr/testcfg_source.py \
--params upr=true,xdcr_upr=false \
--exclude install,seriesly,worker,cbmonitor,cluster,systest,stats")
parser.add_argument("--build", help="required param: build-version for system test to run on", required=True)
parser.add_argument("--testcfg", default="testcfg.py", help="required param: location of testcfg file in testcfg dir ")
parser.add_argument("--params", help="optional param: additional build params eg:vbuckets=1024,upr=true,xdcr_upr=false",
required=False)
parser.add_argument("--exclude",
nargs='+',
default="",
help="optional param: inspect install rabbitmq seriesly worker cbmonitor setup systest stats",
required=False)
try:
args = vars(parser.parse_args())
testcfg = args['testcfg']
if os.path.basename(os.path.abspath(os.getcwd())) != 'pysystests':
raise Exception("Run script from testrunner/pysystests folder, current folder is: %s" % os.getcwd())
shutil.copy(testcfg, "./testcfg.py")
print "Copied {0} to {1}/testcfg.py".format(testcfg, os.getcwd())
cfg = __import__("testcfg")
run(args)
except Exception as e:
print e
raise
|
from common import *
class TestBasics(TestCase):
def test_envvars(self):
self.assertTrue('aaa', run('siteconfig KEY', env={'SITECONFIG_KEY': 'aaa'}))
self.assertTrue('bbb', run('siteconfig KEY', env={'FICONFIG_KEY': 'bbb'}))
def test_example_loaded_python(self):
for name in 'A', 'B', 'C':
self.assertTrue(config.get('LOADED_%s' % name))
def test_example_loaded_shell(self):
for name in 'A', 'B', 'C':
self.assertEqual('True', run('ficonfig LOADED_%s' % name).strip())
self.assertEqual('True', run('siteconfig LOADED_%s' % name).strip())
def test_example_loaded_shell_case(self):
self.assertEqual('True', run('siteconfig loaded.a').strip())
def test_shell_get_default(self):
self.assertEqual('fallback', run('siteconfig --default fallback DOES_NOT_EXIST').strip())
def test_shell_get_eval(self):
self.assertEqual('COM', run('''siteconfig --eval 'ALICE_HOST.split(".")[-1].upper()' ''').strip())
def test_shell_get_pattern(self):
self.assertEqual('alice@alice.com', run('''siteconfig -p alice. -f '{}@{}' user host''').strip())
def test_shell_get_format(self):
self.assertEqual('alice@alice.com', run('''siteconfig --get -p alice. -f '{user}@{host}' ''').strip())
|
sum = 0
flag = 1
while flag == 1:
data = int(raw_input("Enter the number or press 0 to quit :"))
if data == 0:
flag =0
sum = sum+data
print "Sum is ", sum
|
# Code for KiU-Net
# Author: Jeya Maria Jose
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import torch.nn.functional as F
import os
import matplotlib.pyplot as plt
class autoencoder(nn.Module):
def __init__(self):
super(autoencoder, self).__init__()
self.encoder1 = nn.Conv2d(3, 64, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(64, 128, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1,padding=2) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(512, 256, 3, stride=1, padding=2) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(64, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return out
class unet(nn.Module):
def __init__(self):
super(unet, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=1) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
t1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
t2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
t3 = out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
t4 = out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
# t2 = out
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape,t4.shape)
out = torch.add(out,t4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t1)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return out
class kinetwithsk(nn.Module):
def __init__(self):
super(kinetwithsk, self).__init__()
self.encoder1 = nn.Conv2d(1, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
# self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
# self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=2) # b, 16, 5, 5
# self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=2) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
# self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
# self.decoderf3 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
# self.encoderf1 = nn.Conv2d(16, 32, 3, stride=1, padding=1)
# self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
# self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.interpolate(self.encoder1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = out
out = F.relu(F.interpolate(self.encoder2(out),scale_factor=(2,2),mode ='bilinear'))
t2 = out
out = F.relu(F.interpolate(self.encoder3(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
out = F.relu(F.max_pool2d(self.decoder3(out),2,2))
out = torch.add(out,t2)
out = F.relu(F.max_pool2d(self.decoder4(out),2,2))
out = torch.add(out,t1)
out = F.relu(F.max_pool2d(self.decoder5(out),2,2))
# out = self.soft(out)
return out
class kitenet(nn.Module):
def __init__(self):
super(kitenet, self).__init__()
self.encoder1 = nn.Conv2d(1, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
# self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
# self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=2) # b, 16, 5, 5
# self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=2) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 2, 3, stride=1, padding=1)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.interpolate(self.encoder1(x),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.encoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.interpolate(self.encoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(F.max_pool2d(self.decoder3(out),2,2))
out = F.relu(F.max_pool2d(self.decoder4(out),2,2))
out = F.relu(F.max_pool2d(self.decoder5(out),2,2))
# out = self.soft(out)
return out
class kiunet(nn.Module):
def __init__(self, size=8):
super(kiunet, self).__init__()
assert(size % 2 == 0)
size //= 2
print(size)
self.encoder1 = nn.Conv2d(3, size * 2, 3, stride=1, padding=1) # First Layer GrayScale Image , change to input channels to 3 in case of RGB
self.en1_bn = nn.BatchNorm2d(size * 2)
self.encoder2= nn.Conv2d(size * 2, size * 4, 3, stride=1, padding=1)
self.en2_bn = nn.BatchNorm2d(size * 4)
self.encoder3= nn.Conv2d(size * 4, size * 8, 3, stride=1, padding=1)
self.en3_bn = nn.BatchNorm2d(size * 8)
self.decoder1 = nn.Conv2d(size * 8, size * 4, 3, stride=1, padding=1)
self.de1_bn = nn.BatchNorm2d(size * 4)
self.decoder2 = nn.Conv2d(size * 4,size * 2, 3, stride=1, padding=1)
self.de2_bn = nn.BatchNorm2d(size * 2)
self.decoder3 = nn.Conv2d(size * 2, size, 3, stride=1, padding=1)
self.de3_bn = nn.BatchNorm2d(size)
self.decoderf1 = nn.Conv2d(size * 8, size * 4, 3, stride=1, padding=1)
self.def1_bn = nn.BatchNorm2d(size * 4)
self.decoderf2= nn.Conv2d(size * 4, size * 2, 3, stride=1, padding=1)
self.def2_bn = nn.BatchNorm2d(size * 2)
self.decoderf3 = nn.Conv2d(size * 2, size, 3, stride=1, padding=1)
self.def3_bn = nn.BatchNorm2d(size)
self.encoderf1 = nn.Conv2d(3, size * 2, 3, stride=1, padding=1) # First Layer GrayScale Image , change to input channels to 3 in case of RGB
self.enf1_bn = nn.BatchNorm2d(size * 2)
self.encoderf2= nn.Conv2d(size * 2, size * 4, 3, stride=1, padding=1)
self.enf2_bn = nn.BatchNorm2d(size * 4)
self.encoderf3 = nn.Conv2d(size * 4, size * 8, 3, stride=1, padding=1)
self.enf3_bn = nn.BatchNorm2d(size * 8)
self.intere1_1 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.inte1_1bn = nn.BatchNorm2d(size * 2)
self.intere2_1 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.inte2_1bn = nn.BatchNorm2d(size * 4)
self.intere3_1 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.inte3_1bn = nn.BatchNorm2d(size * 8)
self.intere1_2 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.inte1_2bn = nn.BatchNorm2d(size * 2)
self.intere2_2 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.inte2_2bn = nn.BatchNorm2d(size * 4)
self.intere3_2 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.inte3_2bn = nn.BatchNorm2d(size * 8)
self.interd1_1 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.intd1_1bn = nn.BatchNorm2d(size * 4)
self.interd2_1 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.intd2_1bn = nn.BatchNorm2d(size * 2)
self.interd3_1 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.intd3_1bn = nn.BatchNorm2d(size * 8)
self.interd1_2 = nn.Conv2d(size * 4,size * 4,3, stride=1, padding=1)
self.intd1_2bn = nn.BatchNorm2d(size * 4)
self.interd2_2 = nn.Conv2d(size * 2,size * 2,3, stride=1, padding=1)
self.intd2_2bn = nn.BatchNorm2d(size * 2)
self.interd3_2 = nn.Conv2d(size * 8,size * 8,3, stride=1, padding=1)
self.intd3_2bn = nn.BatchNorm2d(size * 8)
self.final = nn.Conv2d(size,2,1,stride=1,padding=0)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(self.en1_bn(F.max_pool2d(self.encoder1(x),2,2))) #U-Net branch
out1 = F.relu(self.enf1_bn(F.interpolate(self.encoderf1(x),scale_factor=(2.,2.),mode ='bilinear'))) #Ki-Net branch
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear')) #CRFB
out1 = torch.add(out1,F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),scale_factor=(4.0,4.0),mode ='bilinear')) #CRFB
u1 = out #skip conn
o1 = out1 #skip conn
out = F.relu(self.en2_bn(F.max_pool2d(self.encoder2(out),2,2)))
out1 = F.relu(self.enf2_bn(F.interpolate(self.encoderf2(out1),scale_factor=(2.0,2.0),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),scale_factor=(16.,16.),mode ='bilinear'))
u2 = out
o2 = out1
out = F.relu(self.en3_bn(F.max_pool2d(self.encoder3(out),2,2)))
out1 = F.relu(self.enf3_bn(F.interpolate(self.encoderf3(out1),scale_factor=(2.,2.),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),scale_factor=(0.015625,0.015625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),scale_factor=(64.,64.),mode ='bilinear'))
### End of encoder block
### Start Decoder
out = F.relu(self.de1_bn(F.interpolate(self.decoder1(out),scale_factor=(2.,2.),mode ='bilinear'))) #U-NET
out1 = F.relu(self.def1_bn(F.max_pool2d(self.decoderf1(out1),2,2))) #Ki-NET
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),scale_factor=(16.,16.),mode ='bilinear'))
out = torch.add(out,u2) #skip conn
out1 = torch.add(out1,o2) #skip conn
out = F.relu(self.de2_bn(F.interpolate(self.decoder2(out),scale_factor=(2.,2.),mode ='bilinear')))
out1 = F.relu(self.def2_bn(F.max_pool2d(self.decoderf2(out1),2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),scale_factor=(4.,4.),mode ='bilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = F.relu(self.de3_bn(F.interpolate(self.decoder3(out),scale_factor=(2.,2.),mode ='bilinear')))
out1 = F.relu(self.def3_bn(F.max_pool2d(self.decoderf3(out1),2,2)))
out = torch.add(out,out1) # fusion of both branches
out = F.relu(self.final(out)) #1*1 conv
#out = self.soft(out)
return out
class reskiunet(nn.Module):
def __init__(self):
super(reskiunet, self).__init__()
self.encoder1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.en1 = nn.Conv2d(3, 16, 1, stride=1, padding=0) # b, 16, 10, 10
self.en1_bn = nn.BatchNorm2d(16)
self.encoder2= nn.Conv2d(16, 32, 3, stride=1, padding=1) # b, 8, 3, 3
self.en2= nn.Conv2d(16, 32, 1, stride=1, padding=0)
self.en2_bn = nn.BatchNorm2d(32)
self.encoder3= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.en3= nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.en3_bn = nn.BatchNorm2d(64)
self.decoder1 = nn.Conv2d(64, 32, 3, stride=1, padding=1) # b, 1, 28, 28
self.de1 = nn.Conv2d(64, 32, 1, stride=1, padding=0)
self.de1_bn = nn.BatchNorm2d(32)
self.decoder2 = nn.Conv2d(32,16, 3, stride=1, padding=1)
self.de2 = nn.Conv2d(32,16, 1, stride=1, padding=0)
self.de2_bn = nn.BatchNorm2d(16)
self.decoder3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.de3 = nn.Conv2d(16, 8, 1, stride=1, padding=0)
self.de3_bn = nn.BatchNorm2d(8)
self.decoderf1 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.def1 = nn.Conv2d(64, 32, 1, stride=1, padding=0)
self.def1_bn = nn.BatchNorm2d(32)
self.decoderf2= nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.def2= nn.Conv2d(32, 16, 1, stride=1, padding=0)
self.def2_bn = nn.BatchNorm2d(16)
self.decoderf3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.def3 = nn.Conv2d(16, 8, 1, stride=1, padding=0)
self.def3_bn = nn.BatchNorm2d(8)
self.encoderf1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.enf1 = nn.Conv2d(3, 16, 1, stride=1, padding=0)
self.enf1_bn = nn.BatchNorm2d(16)
self.encoderf2= nn.Conv2d(16, 32, 3, stride=1, padding=1)
self.enf2= nn.Conv2d(16, 32, 1, stride=1, padding=0)
self.enf2_bn = nn.BatchNorm2d(32)
self.encoderf3 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.enf3 = nn.Conv2d(32, 64, 1, stride=1, padding=0)
self.enf3_bn = nn.BatchNorm2d(64)
self.intere1_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_1bn = nn.BatchNorm2d(16)
self.intere2_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_1bn = nn.BatchNorm2d(32)
self.intere3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_1bn = nn.BatchNorm2d(64)
self.intere1_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_2bn = nn.BatchNorm2d(16)
self.intere2_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_2bn = nn.BatchNorm2d(32)
self.intere3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_2bn = nn.BatchNorm2d(64)
self.interd1_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_1bn = nn.BatchNorm2d(32)
self.interd2_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_1bn = nn.BatchNorm2d(16)
self.interd3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_1bn = nn.BatchNorm2d(64)
self.interd1_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_2bn = nn.BatchNorm2d(32)
self.interd2_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_2bn = nn.BatchNorm2d(16)
self.interd3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_2bn = nn.BatchNorm2d(64)
self.final = nn.Conv2d(8,2,1,stride=1,padding=0)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = torch.add(self.en1(x),self.encoder1(x)) #init
out = F.relu(self.en1_bn(F.max_pool2d(out,2,2))) # U-Net
out1 = torch.add(self.enf1(x),self.encoder1(x)) #init
out1 = F.relu(self.enf1_bn(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))) # ki-net
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
u1 = out
o1 = out1
out = torch.add(self.en2(out),self.encoder2(out)) #res
out1 = torch.add(self.enf2(out1),self.encoderf2(out1)) #res
out = F.relu(self.en2_bn(F.max_pool2d(out,2,2)))
out1 = F.relu(self.enf2_bn(F.interpolate(out1,scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
u2 = out
o2 = out1
out = torch.add(self.en3(out),self.encoder3(out)) #res
out1 = torch.add(self.enf3(out1),self.encoderf3(out1)) #res
out = F.relu(self.en3_bn(F.max_pool2d(out,2,2)))
out1 = F.relu(self.enf3_bn(F.interpolate(out1,scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),scale_factor=(0.015625,0.015625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),scale_factor=(64,64),mode ='bilinear'))
### End of encoder block
# print(out.shape,out1.shape)
out = torch.add(self.de1(out),self.decoder1(out)) #res
out1 = torch.add(self.def1(out1),self.decoderf1(out1)) #res
out = F.relu(self.de1_bn(F.interpolate(out,scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def1_bn(F.max_pool2d(out1,2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
out = torch.add(out,u2)
out1 = torch.add(out1,o2)
out = torch.add(self.de2(out),self.decoder2(out)) #res
out1 = torch.add(self.def2(out1),self.decoderf2(out1)) #res
out = F.relu(self.de2_bn(F.interpolate(out,scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def2_bn(F.max_pool2d(out1,2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = torch.add(self.de3(out),self.decoder3(out)) #res
out1 = torch.add(self.def3(out1),self.decoderf3(out1)) #res
out = F.relu(self.de3_bn(F.interpolate(out,scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def3_bn(F.max_pool2d(out1,2,2)))
out = torch.add(out,out1)
out = F.relu(self.final(out))
# out = self.soft(out)
# print(out.shape)
return out
class DenseBlock(nn.Module):
def __init__(self, in_planes):
super(DenseBlock, self).__init__()
# print(int(in_planes/4))
self.c1 = nn.Conv2d(in_planes,in_planes,1,stride=1, padding=0)
self.c2 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
self.b1 = nn.BatchNorm2d(in_planes)
self.b2 = nn.BatchNorm2d(int(in_planes/4))
self.c3 = nn.Conv2d(in_planes+int(in_planes/4),in_planes,1,stride=1, padding=0)
self.c4 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
self.c5 = nn.Conv2d(in_planes+int(in_planes/2),in_planes,1,stride=1, padding=0)
self.c6 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
self.c7 = nn.Conv2d(in_planes+3*int(in_planes/4),in_planes,1,stride=1, padding=0)
self.c8 = nn.Conv2d(in_planes,int(in_planes/4),3,stride=1, padding=1)
def forward(self, x):
org = x
# print(x.shape)
x= F.relu(self.b1(self.c1(x)))
# print(x.shape)
x= F.relu(self.b2(self.c2(x)))
d1 = x
# print(x.shape)
x = torch.cat((org,d1),1)
x= F.relu(self.b1(self.c3(x)))
x= F.relu(self.b2(self.c4(x)))
d2= x
x = torch.cat((org,d1,d2),1)
x= F.relu(self.b1(self.c5(x)))
x= F.relu(self.b2(self.c6(x)))
d3= x
x = torch.cat((org,d1,d2,d3),1)
x= F.relu(self.b1(self.c7(x)))
x= F.relu(self.b2(self.c8(x)))
d4= x
x = torch.cat((d1,d2,d3,d4),1)
x = torch.add(org,x)
return x
class densekiunet(nn.Module):
def __init__(self):
super(densekiunet, self).__init__()
self.encoder1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.en1 = DenseBlock(in_planes = 16) # b, 16, 10, 10
self.en1_bn = nn.BatchNorm2d(16)
self.encoder2= nn.Conv2d(16, 32, 3, stride=1, padding=1) # b, 8, 3, 3
self.en2= DenseBlock(in_planes = 32)
self.en2_bn = nn.BatchNorm2d(32)
self.encoder3= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.en3= DenseBlock(in_planes = 64)
self.en3_bn = nn.BatchNorm2d(64)
self.decoder1 = nn.Conv2d(64, 32, 3, stride=1, padding=1) # b, 1, 28, 28
self.de1 = DenseBlock(in_planes = 32)
self.de1_bn = nn.BatchNorm2d(32)
self.decoder2 = nn.Conv2d(32,16, 3, stride=1, padding=1)
self.de2 = DenseBlock(in_planes = 16)
self.de2_bn = nn.BatchNorm2d(16)
self.decoder3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.de3 = DenseBlock(in_planes = 8)
self.de3_bn = nn.BatchNorm2d(8)
self.decoderf1 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.def1 = DenseBlock(in_planes = 32)
self.def1_bn = nn.BatchNorm2d(32)
self.decoderf2= nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.def2= DenseBlock(in_planes = 16)
self.def2_bn = nn.BatchNorm2d(16)
self.decoderf3 = nn.Conv2d(16, 8, 3, stride=1, padding=1)
self.def3 = DenseBlock(in_planes = 8)
self.def3_bn = nn.BatchNorm2d(8)
self.encoderf1 = nn.Conv2d(3, 16, 3, stride=1, padding=1)
self.enf1 = DenseBlock(in_planes = 16)
self.enf1_bn = nn.BatchNorm2d(16)
self.encoderf2= nn.Conv2d(16, 32, 3, stride=1, padding=1)
self.enf2= DenseBlock(in_planes = 32)
self.enf2_bn = nn.BatchNorm2d(32)
self.encoderf3 = nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.enf3 = DenseBlock(in_planes = 64)
self.enf3_bn = nn.BatchNorm2d(64)
self.intere1_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_1bn = nn.BatchNorm2d(16)
self.intere2_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_1bn = nn.BatchNorm2d(32)
self.intere3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_1bn = nn.BatchNorm2d(64)
self.intere1_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.inte1_2bn = nn.BatchNorm2d(16)
self.intere2_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.inte2_2bn = nn.BatchNorm2d(32)
self.intere3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.inte3_2bn = nn.BatchNorm2d(64)
self.interd1_1 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_1bn = nn.BatchNorm2d(32)
self.interd2_1 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_1bn = nn.BatchNorm2d(16)
self.interd3_1 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_1bn = nn.BatchNorm2d(64)
self.interd1_2 = nn.Conv2d(32,32,3, stride=1, padding=1)
self.intd1_2bn = nn.BatchNorm2d(32)
self.interd2_2 = nn.Conv2d(16,16,3, stride=1, padding=1)
self.intd2_2bn = nn.BatchNorm2d(16)
self.interd3_2 = nn.Conv2d(64,64,3, stride=1, padding=1)
self.intd3_2bn = nn.BatchNorm2d(64)
self.final = nn.Conv2d(8,2,1,stride=1,padding=0)
self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(self.en1_bn(F.max_pool2d(self.en1(self.encoder1(x)),2,2)))
out1 = F.relu(self.enf1_bn(F.interpolate(self.enf1(self.encoderf1(x)),scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte1_1bn(self.intere1_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte1_2bn(self.intere1_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
u1 = out
o1 = out1
out = F.relu(self.en2_bn(F.max_pool2d(self.en2(self.encoder2(out)),2,2)))
out1 = F.relu(self.enf2_bn(F.interpolate(self.enf2(self.encoderf2(out1)),scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte2_1bn(self.intere2_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte2_2bn(self.intere2_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
u2 = out
o2 = out1
out = F.relu(self.en3_bn(F.max_pool2d(self.en3(self.encoder3(out)),2,2)))
out1 = F.relu(self.enf3_bn(F.interpolate(self.enf3(self.encoderf3(out1)),scale_factor=(2,2),mode ='bilinear')))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.inte3_1bn(self.intere3_1(out1))),scale_factor=(0.015625,0.015625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.inte3_2bn(self.intere3_2(tmp))),scale_factor=(64,64),mode ='bilinear'))
### End of encoder block
# print(out.shape,out1.shape)
out = F.relu(self.de1_bn(F.interpolate(self.de1(self.decoder1(out)),scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def1_bn(F.max_pool2d(self.def1(self.decoderf1(out1)),2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd1_1bn(self.interd1_1(out1))),scale_factor=(0.0625,0.0625),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd1_2bn(self.interd1_2(tmp))),scale_factor=(16,16),mode ='bilinear'))
out = torch.add(out,u2)
out1 = torch.add(out1,o2)
out = F.relu(self.de2_bn(F.interpolate(self.de2(self.decoder2(out)),scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def2_bn(F.max_pool2d(self.def2(self.decoderf2(out1)),2,2)))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intd2_1bn(self.interd2_1(out1))),scale_factor=(0.25,0.25),mode ='bilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intd2_2bn(self.interd2_2(tmp))),scale_factor=(4,4),mode ='bilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = F.relu(self.de3_bn(F.interpolate(self.de3(self.decoder3(out)),scale_factor=(2,2),mode ='bilinear')))
out1 = F.relu(self.def3_bn(F.max_pool2d(self.def3(self.decoderf3(out1)),2,2)))
out = torch.add(out,out1)
out = F.relu(self.final(out))
# out = self.soft(out)
# print(out.shape)
return out
class kiunet3d(nn.Module): #
def __init__(self, c=4,n=1,channels=128,groups = 16,norm='bn', num_classes=5):
super(kiunet3d, self).__init__()
# Entry flow
self.encoder1 = nn.Conv3d( c, n, kernel_size=3, padding=1, stride=1, bias=False)# H//2
self.encoder2 = nn.Conv3d( n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.encoder3 = nn.Conv3d( 2*n, 4*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder1 = nn.Conv3d( c, n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder2 = nn.Conv3d( n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kencoder3 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.downsample1 = nn.MaxPool3d(2, stride=2)
self.downsample2 = nn.MaxPool3d(2, stride=2)
self.downsample3 = nn.MaxPool3d(2, stride=2)
self.kdownsample1 = nn.MaxPool3d(2, stride=2)
self.kdownsample2 = nn.MaxPool3d(2, stride=2)
self.kdownsample3 = nn.MaxPool3d(2, stride=2)
self.upsample1 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//8
self.upsample2 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//4
self.upsample3 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//2
self.kupsample1 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//8
self.kupsample2 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//4
self.kupsample3 = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False) # H//2
self.decoder1 = nn.Conv3d( 4*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.decoder2 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.decoder3 = nn.Conv3d( 2*n, c, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder1 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder2 = nn.Conv3d( 2*n, 2*n, kernel_size=3, padding=1, stride=1, bias=False)
self.kdecoder3 = nn.Conv3d( 2*n, c, kernel_size=3, padding=1, stride=1, bias=False)
self.intere1_1 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.inte1_1bn = nn.BatchNorm2d(16)
self.intere2_1 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.inte2_1bn = nn.BatchNorm2d(32)
self.intere3_1 = nn.Conv3d(2*n,4*n,3, stride=1, padding=1)
# self.inte3_1bn = nn.BatchNorm2d(64)
self.intere1_2 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.inte1_2bn = nn.BatchNorm2d(16)
self.intere2_2 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.inte2_2bn = nn.BatchNorm2d(32)
self.intere3_2 = nn.Conv3d(4*n,2*n,3, stride=1, padding=1)
# self.inte3_2bn = nn.BatchNorm2d(64)
self.interd1_1 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd1_1bn = nn.BatchNorm2d(32)
self.interd2_1 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd2_1bn = nn.BatchNorm2d(16)
self.interd3_1 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.intd3_1bn = nn.BatchNorm2d(64)
self.interd1_2 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd1_2bn = nn.BatchNorm2d(32)
self.interd2_2 = nn.Conv3d(2*n,2*n,3, stride=1, padding=1)
# self.intd2_2bn = nn.BatchNorm2d(16)
self.interd3_2 = nn.Conv3d(n,n,3, stride=1, padding=1)
# self.intd3_2bn = nn.BatchNorm2d(64)
self.seg = nn.Conv3d(c, num_classes, kernel_size=1, padding=0,stride=1,bias=False)
self.softmax = nn.Softmax(dim=1)
# Initialization
for m in self.modules():
if isinstance(m, nn.Conv3d):
torch.nn.init.torch.nn.init.kaiming_normal_(m.weight) #
elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# Encoder
out = F.relu(F.max_pool3d(self.encoder1(x),2,2)) #U-Net branch
out1 = F.relu(F.interpolate(self.kencoder1(x),scale_factor=2,mode ='trilinear')) #Ki-Net branch
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intere1_1(out1)),scale_factor=0.25,mode ='trilinear')) #CRFB
out1 = torch.add(out1,F.interpolate(F.relu(self.intere1_2(tmp)),scale_factor=4,mode ='trilinear')) #CRFB
u1 = out #skip conn
o1 = out1 #skip conn
out = F.relu(F.max_pool3d(self.encoder2(out),2,2))
out1 = F.relu(F.interpolate(self.kencoder2(out1),scale_factor=2,mode ='trilinear'))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intere2_1(out1)),scale_factor=0.0625,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intere2_2(tmp)),scale_factor=16,mode ='trilinear'))
u2 = out
o2 = out1
out = F.relu(F.max_pool3d(self.encoder3(out),2,2))
out1 = F.relu(F.interpolate(self.kencoder3(out1),scale_factor=2,mode ='trilinear'))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.intere3_1(out1)),scale_factor=0.015625,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.intere3_2(tmp)),scale_factor=64,mode ='trilinear'))
### End of encoder block
### Start Decoder
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=2,mode ='trilinear')) #U-NET
out1 = F.relu(F.max_pool3d(self.kdecoder1(out1),2,2)) #Ki-NET
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.interd1_1(out1)),scale_factor=0.0625,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.interd1_2(tmp)),scale_factor=16,mode ='trilinear'))
out = torch.add(out,u2) #skip conn
out1 = torch.add(out1,o2) #skip conn
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=2,mode ='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder2(out1),2,2))
tmp = out
out = torch.add(out,F.interpolate(F.relu(self.interd2_1(out1)),scale_factor=0.25,mode ='trilinear'))
out1 = torch.add(out1,F.interpolate(F.relu(self.interd2_2(tmp)),scale_factor=4,mode ='trilinear'))
out = torch.add(out,u1)
out1 = torch.add(out1,o1)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=2,mode ='trilinear'))
out1 = F.relu(F.max_pool3d(self.kdecoder3(out1),2,2))
out = torch.add(out,out1) # fusion of both branches
out = F.relu(self.seg(out)) #1*1 conv
# out = self.soft(out)
return out
|
import time
import datetime
import requests
import lxml.etree as ET
class XMLReader():
""" Class to parse, preprocess and save XML/TEI
:param xml: An XML Document, either a File Path, an URL to an XML or an XML string
:type xml: str
:return: A XMLReader instance
:rtype: `xml.XMLReader`
"""
def __init__(self, xml=None):
""" initializes the class
:param xml: An XML Document, either a File Path, an URL to an XML or an XML string
:type xml: str
:return: A XMLReader instance
:rtype: `xml.XMLReader`
"""
self.ns_tei = {'tei': "http://www.tei-c.org/ns/1.0"}
self.ns_xml = {'xml': "http://www.w3.org/XML/1998/namespace"}
self.ns_tcf = {'tcf': "http://www.dspin.de/data/textcorpus"}
self.nsmap = {
'tei': "http://www.tei-c.org/ns/1.0",
'xml': "http://www.w3.org/XML/1998/namespace",
'tcf': "http://www.dspin.de/data/textcorpus"
}
self.file = xml
try:
self.original = ET.parse(self.file)
except Exception as e:
try:
self.original = ET.fromstring(self.file.encode('utf8'))
except Exception as e:
r = requests.get(self.file)
self.original = ET.fromstring(r.text)
try:
self.tree = ET.parse(self.file)
except Exception as e:
try:
self.tree = ET.fromstring(self.file.encode('utf8'))
except Exception as e:
r = requests.get(self.file)
self.tree = ET.fromstring(r.text)
except Exception as e:
self.parsed_file = "parsing didn't work"
def return_byte_like_object(self):
""" returns current doc as byte like object"""
return ET.tostring(self.tree, encoding="utf-8")
def return_string(self):
"""
returns current doc as string
:rtype: str
"""
return self.return_byte_like_object().decode('utf-8')
def tree_to_file(self, file=None):
"""
saves current tree to file
:param file: A filename/location to save the current doc
:type file: str
:return: The save-location
:rtype: str
"""
if file:
pass
else:
timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S')
file = "{}.xml".format(timestamp)
with open(file, 'wb') as f:
f.write(ET.tostring(self.tree, encoding="UTF-8"))
return file
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy.optimize import linear_sum_assignment
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.metric import accuracy
from ppdet.core.workspace import register
from ppdet.modeling.losses.iou_loss import GIoULoss
__all__ = ["SparseRCNNLoss"]
@register
class SparseRCNNLoss(nn.Layer):
""" This class computes the loss for SparseRCNN.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
__shared__ = ['num_classes']
def __init__(self,
losses,
focal_loss_alpha,
focal_loss_gamma,
num_classes=80,
class_weight=2.,
l1_weight=5.,
giou_weight=2.):
""" Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
weight_dict: dict containing as key the names of the losses and as values their relative weight.
losses: list of all the losses to be applied. See get_loss for list of available losses.
matcher: module able to compute a matching between targets and proposals
"""
super().__init__()
self.num_classes = num_classes
weight_dict = {
"loss_ce": class_weight,
"loss_bbox": l1_weight,
"loss_giou": giou_weight
}
self.weight_dict = weight_dict
self.losses = losses
self.giou_loss = GIoULoss(reduction="sum")
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
self.matcher = HungarianMatcher(focal_loss_alpha, focal_loss_gamma,
class_weight, l1_weight, giou_weight)
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = paddle.concat([
paddle.gather(
t["labels"], J, axis=0) for t, (_, J) in zip(targets, indices)
])
target_classes = paddle.full(
src_logits.shape[:2], self.num_classes, dtype="int32")
for i, ind in enumerate(zip(idx[0], idx[1])):
target_classes[int(ind[0]), int(ind[1])] = target_classes_o[i]
target_classes.stop_gradient = True
src_logits = src_logits.flatten(start_axis=0, stop_axis=1)
# prepare one_hot target.
target_classes = target_classes.flatten(start_axis=0, stop_axis=1)
class_ids = paddle.arange(0, self.num_classes)
labels = (target_classes.unsqueeze(-1) == class_ids).astype("float32")
labels.stop_gradient = True
# comp focal loss.
class_loss = sigmoid_focal_loss(
src_logits,
labels,
alpha=self.focal_loss_alpha,
gamma=self.focal_loss_gamma,
reduction="sum", ) / num_boxes
losses = {'loss_ce': class_loss}
if log:
label_acc = target_classes_o.unsqueeze(-1)
src_idx = [src for (src, _) in indices]
pred_list = []
for i in range(outputs["pred_logits"].shape[0]):
pred_list.append(
paddle.gather(
outputs["pred_logits"][i], src_idx[i], axis=0))
pred = F.sigmoid(paddle.concat(pred_list, axis=0))
acc = accuracy(pred, label_acc.astype("int64"))
losses["acc"] = acc
return losses
def loss_boxes(self, outputs, targets, indices, num_boxes):
"""Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
"""
assert 'pred_boxes' in outputs # [batch_size, num_proposals, 4]
src_idx = [src for (src, _) in indices]
src_boxes_list = []
for i in range(outputs["pred_boxes"].shape[0]):
src_boxes_list.append(
paddle.gather(
outputs["pred_boxes"][i], src_idx[i], axis=0))
src_boxes = paddle.concat(src_boxes_list, axis=0)
target_boxes = paddle.concat(
[
paddle.gather(
t['boxes'], I, axis=0)
for t, (_, I) in zip(targets, indices)
],
axis=0)
target_boxes.stop_gradient = True
losses = {}
losses['loss_giou'] = self.giou_loss(src_boxes,
target_boxes) / num_boxes
image_size = paddle.concat([v["img_whwh_tgt"] for v in targets])
src_boxes_ = src_boxes / image_size
target_boxes_ = target_boxes / image_size
loss_bbox = F.l1_loss(src_boxes_, target_boxes_, reduction='sum')
losses['loss_bbox'] = loss_bbox / num_boxes
return losses
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = paddle.concat(
[paddle.full_like(src, i) for i, (src, _) in enumerate(indices)])
src_idx = paddle.concat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = paddle.concat(
[paddle.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = paddle.concat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
loss_map = {
'labels': self.loss_labels,
'boxes': self.loss_boxes,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {
k: v
for k, v in outputs.items() if k != 'aux_outputs'
}
# Retrieve the matching between the outputs of the last layer and the targets
indices = self.matcher(outputs_without_aux, targets)
# Compute the average number of target boxes accross all nodes, for normalization purposes
num_boxes = sum(len(t["labels"]) for t in targets)
num_boxes = paddle.to_tensor(
[num_boxes],
dtype="float32",
place=next(iter(outputs.values())).place)
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(
self.get_loss(loss, outputs, targets, indices, num_boxes))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices,
num_boxes, **kwargs)
w_dict = {}
for k in l_dict.keys():
if k in self.weight_dict:
w_dict[k + f'_{i}'] = l_dict[k] * self.weight_dict[
k]
else:
w_dict[k + f'_{i}'] = l_dict[k]
losses.update(w_dict)
return losses
class HungarianMatcher(nn.Layer):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
focal_loss_alpha,
focal_loss_gamma,
cost_class: float=1,
cost_bbox: float=1,
cost_giou: float=1):
"""Creates the matcher
Params:
cost_class: This is the relative weight of the classification error in the matching cost
cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
"""
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
self.focal_loss_alpha = focal_loss_alpha
self.focal_loss_gamma = focal_loss_gamma
assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0"
@paddle.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Args:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates
eg. outputs = {"pred_logits": pred_logits, "pred_boxes": pred_boxes}
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
objects in the target) containing the class labels
"boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates
eg. targets = [{"labels":labels, "boxes": boxes}, ...,{"labels":labels, "boxes": boxes}]
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = F.sigmoid(outputs["pred_logits"].flatten(
start_axis=0, stop_axis=1))
out_bbox = outputs["pred_boxes"].flatten(start_axis=0, stop_axis=1)
# Also concat the target labels and boxes
tgt_ids = paddle.concat([v["labels"] for v in targets])
assert (tgt_ids > -1).all()
tgt_bbox = paddle.concat([v["boxes"] for v in targets])
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
# Compute the classification cost.
alpha = self.focal_loss_alpha
gamma = self.focal_loss_gamma
neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(
1 - out_prob + 1e-8).log())
pos_cost_class = alpha * ((1 - out_prob)
**gamma) * (-(out_prob + 1e-8).log())
cost_class = paddle.gather(
pos_cost_class, tgt_ids, axis=1) - paddle.gather(
neg_cost_class, tgt_ids, axis=1)
# Compute the L1 cost between boxes
image_size_out = paddle.concat(
[v["img_whwh"].unsqueeze(0) for v in targets])
image_size_out = image_size_out.unsqueeze(1).tile(
[1, num_queries, 1]).flatten(
start_axis=0, stop_axis=1)
image_size_tgt = paddle.concat([v["img_whwh_tgt"] for v in targets])
out_bbox_ = out_bbox / image_size_out
tgt_bbox_ = tgt_bbox / image_size_tgt
cost_bbox = F.l1_loss(
out_bbox_.unsqueeze(-2), tgt_bbox_,
reduction='none').sum(-1) # [batch_size * num_queries, num_tgts]
# Compute the giou cost betwen boxes
cost_giou = -get_bboxes_giou(out_bbox, tgt_bbox)
# Final cost matrix
C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
C = C.reshape([bs, num_queries, -1])
sizes = [len(v["boxes"]) for v in targets]
indices = [
linear_sum_assignment(c[i].numpy())
for i, c in enumerate(C.split(sizes, -1))
]
return [(paddle.to_tensor(
i, dtype="int32"), paddle.to_tensor(
j, dtype="int32")) for i, j in indices]
def box_area(boxes):
assert (boxes[:, 2:] >= boxes[:, :2]).all()
wh = boxes[:, 2:] - boxes[:, :2]
return wh[:, 0] * wh[:, 1]
def boxes_iou(boxes1, boxes2):
'''
Compute iou
Args:
boxes1 (paddle.tensor) shape (N, 4)
boxes2 (paddle.tensor) shape (M, 4)
Return:
(paddle.tensor) shape (N, M)
'''
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = paddle.maximum(boxes1.unsqueeze(-2)[:, :, :2], boxes2[:, :2])
rb = paddle.minimum(boxes1.unsqueeze(-2)[:, :, 2:], boxes2[:, 2:])
wh = (rb - lt).astype("float32").clip(min=1e-9)
inter = wh[:, :, 0] * wh[:, :, 1]
union = area1.unsqueeze(-1) + area2 - inter + 1e-9
iou = inter / union
return iou, union
def get_bboxes_giou(boxes1, boxes2, eps=1e-9):
"""calculate the ious of boxes1 and boxes2
Args:
boxes1 (Tensor): shape [N, 4]
boxes2 (Tensor): shape [M, 4]
eps (float): epsilon to avoid divide by zero
Return:
ious (Tensor): ious of boxes1 and boxes2, with the shape [N, M]
"""
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = boxes_iou(boxes1, boxes2)
lt = paddle.minimum(boxes1.unsqueeze(-2)[:, :, :2], boxes2[:, :2])
rb = paddle.maximum(boxes1.unsqueeze(-2)[:, :, 2:], boxes2[:, 2:])
wh = (rb - lt).astype("float32").clip(min=eps)
enclose_area = wh[:, :, 0] * wh[:, :, 1]
giou = iou - (enclose_area - union) / enclose_area
return giou
def sigmoid_focal_loss(inputs, targets, alpha, gamma, reduction="sum"):
assert reduction in ["sum", "mean"
], f'do not support this {reduction} reduction?'
p = F.sigmoid(inputs)
ce_loss = F.binary_cross_entropy_with_logits(
inputs, targets, reduction="none")
p_t = p * targets + (1 - p) * (1 - targets)
loss = ce_loss * ((1 - p_t)**gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss
|
#!flask/bin/python
import http.client
from urllib import request, parse, error
from flask import url_for
import base64
import traceback
import sys
from pyserver.imagedata import *
class ImageAnalysis(object):
def __init__(self, app):
self.app = app
self.apikey = self.app.config['MSVAPIKEY']
self.headers = {
# request headers
'Content-Type' : 'application/json',
'Ocp-Apim-Subscription-Key' : self.apikey
}
self.apiserver = 'westcentralus.api.cognitive.microsoft.com'
def analyzeImage(self, urlRoot, image, categories):
params = parse.urlencode({
# requested parameters
'visualFeatures' : categories,
'language' : 'en'
})
try:
conn = http.client.HTTPSConnection(self.apiserver)
fileType = 'photos/full'
body = { 'url' : image.get_url(urlRoot, fileType) }
bodyenc = parse.urlencode(body)
requestFrag = "/vision/v1.0/analyze?"
print('type of params', type(params), 'type of requestFrag', type(requestFrag), 'type of body', type(bodyenc))
print('apikey', self.apikey)
requestUrl = '{}{}'.format(requestFrag, params)
conn.request("POST", requestUrl, body=bodyenc, headers=self.headers)
response = conn.getresponse()
data = response.read()
conn.close()
return data
except Exception as e:
print("[Error {0}]".format(e))
print(traceback.format_exception(None, e, e.__traceback__), file=sys.stderr, flush=True)
return "Error"
|
"""
@author: dhoomakethu
"""
from setuptools import setup
from apocalypse import __version__
import platform
PSUTIL_ALPINE_LINUX = "4.1.0"
long_description = None
with (open('README.md')) as readme:
long_description = readme.read()
def fix_ps_util(install_requires):
for i, req in enumerate(install_requires[:]):
if "psutil" in req:
req = req.split("==")
req[-1] = PSUTIL_ALPINE_LINUX
req = "==".join(req)
install_requires[i] = req
with open('requirements.txt') as reqs:
install_requires = [
line for line in reqs.read().split('\n')
if (line and not line.startswith('--'))
]
if platform.system() == "Linux":
fix_ps_util(install_requires)
setup(name="apocalypse",
url='https://github.com/dhoomakethu/apocalypse',
version=__version__,
packages=['apocalypse', 'apocalypse.utils',
'apocalypse.chaos', 'apocalypse.app', 'apocalypse.chaos.events',
'apocalypse.exceptions', "apocalypse.server"],
description="Introduce chaos on to docker ecosystem",
long_description=long_description,
author="dhoomakethu",
author_email="otlasanju@gmail.com",
install_requires=install_requires,
scripts=['doom'],
include_package_data=True,
# dependency_links=['https://github.com/dhoomakethu/python-coloredlogs'
# '/tarball/master#egg=python-coloredlogs-5.0.1']
)
|
# TODO: Make deck_berry_py a package
# Placeholder for deck_berry_py
|
import json
import time
#from win10toast_click import ToastNotifier
import threading
import psutil
from PyQt5 import QtCore, QtGui
#from PyQt5.uic.properties import QtCore, QtGui
from PyQt5 import QtWidgets
import sys
import encrypte
from PyQt5.QtCore import QPropertyAnimation
# GUI FILE
from AppGraphic.appQtDesigner import Ui_MainWindow
from PyQt5.QtCore import QUrl
from PyQt5.QtMultimedia import QMediaContent, QMediaPlayer
from PyQt5.QtMultimediaWidgets import QVideoWidget
from PyQt5.QtWidgets import (QMainWindow)
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import os
import uuid
import subprocess
from qt_material import apply_stylesheet
import minecraft_launcher_lib
#from the laucher
from AppGraphic.button import Ui_Form
from AppGraphic.MojangLogin import Ui_Form as ml
import AppComponement.Mojang as Mojang
import AppComponement.Crack as cracki
import AppComponement.play as play
#import AppComponement.checkBox as checkBox
os.environ['QT_API'] = 'pyqt5'
CLIENT_ID = ""
SECRET = ""
REDIRECT_URL = ""
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.saidbis = None
# setting window icon
self.setWindowIcon(QIcon("../img/bg1.png"))
# setting icon text
self.setWindowIconText("logo")
self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)
videoWidget = QVideoWidget()
self.ui.setupUi(self)
self.ui.download.hide()
self.ui.play.setProperty('class', 'warning')
self.ui.comboBox_3.addItem("vanilla")
self.ui.comboBox_3.addItem("Forge")
self.w = None
self.ui.openFolder.clicked.connect(self.openFolder)
self.ui.accounte.clicked.connect(self.Window2)
self.ui.play.clicked.connect(self.play)
#self.ui.play.clicked.connect(self.)
self.ui.comboBox_2.addItem("Alpha67-server")
self.minecraft_directory = minecraft_launcher_lib.utils.get_minecraft_directory()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../img/connect logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.ui.accounte.setIcon(icon)
self.ui.accounte.setIconSize(QtCore.QSize(121, 101))
def sta():
self.Window2()
self.ui.label.setPixmap(QtGui.QPixmap("../img/Alpha67 Laucher.png"))
self.hello()
self.mediaPlayer.setVideoOutput(videoWidget)
minecraft_directory = minecraft_launcher_lib.utils.get_minecraft_directory()
versions = minecraft_launcher_lib.utils.get_version_list()
for i in versions:
# Only add release versions
if i["type"] == "release":
if not "fo" in i["id"]:
self.ui.comboBox_2.addItem(i["id"])
#self.version_select.addItem(i["id"])
self.show()
def openFolder(self):
print("open alpha folder")
os.system('cmd /c "start C:/Users\%username%\AppData\Roaming\.alpha67\minecraft"')
def execute_command(self, command):
# QProcess.start takes as first argument the program and as second the list of arguments
# So we need the filter the program from the command
arguments = command[1:]
# Deactivate the launch button
#self.launch_button.setEnabled(False)
# Clear the text field
#self.setPlainText("")
self.process = QProcess(self)
# Activate the launch button when Minecraft is closed
#self.process.finished.connect(lambda: self.play.setEnabled(True))
# Connect the function to display the output
#self.process.readyRead.connect(self.dataReady)
# Start Minecraft
self.process.start("java", arguments)
def hello(self):
th1 = threading.Thread(target=self.smart)
th1.start()
th2 = threading.Thread(target=self.updateApp)
th2.start()
print("sa")
def smart(self):
print("ok")
time.sleep(15)
def updateApp(self):
print("100")
while True:
self.ui.cpu.display(str(psutil.cpu_percent(4)))
dict(psutil.virtual_memory()._asdict())
# you can have the percentage of used RAM
ram = psutil.virtual_memory().percent
self.ui.ram.display(str(ram))
def resizeMainWindow(self, width, height):
# CREATE ANIMATION
self.animation = QPropertyAnimation(self, b"size")
self.animation.setDuration(1000)
self.animation.setEndValue(QtCore.QSize(width,height))
self.animation.setEasingCurve(QtCore.QEasingCurve.InOutQuad)
self.animation.start()
#start thread
###########################################################
def mojangThread(self):
self.Form.close()
th3 = threading.Thread(target=self.Mojang)
th3.start()
def microsoftThread(self):
self.Form.close()
th4 = threading.Thread(target=self.microsoft)
th4.start()
def crackThread(self):
self.Form.close()
th4 = threading.Thread(target=self.Crack)
th4.start()
def checkBoxThread(self):
th5 = threading.Thread(target=self.checkBox)
th5.start()
def playThread(self):
th6 = threading.Thread(target=self.play)
th6.start()
def minecraftThread(self):
th7 = threading.Thread(target=self.minecraft)
th7.start()
#self.minecraft()
#thread fonction
###########################################################
def Mojang(self):
self.Form.close()
Mojang.__init__()
def Crack(self):
self.Form.close()
cracki.__init__()
def microsoft(self):
self.Form.close()
os.startfile("microsoftLogin.exe")
#start the minecraft
def minecraft(self):
user = os.getlogin()
def maximum(max_value, value):
max_value[0] = value
version = self.ui.comboBox_2.currentText()
if version == "Alpha67-server":
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
print('start alpha laucher to connect to the server')
user = os.getlogin()
motor = "Forge"
version = "1.16.5"
def maximum(max_value, value):
max_value[0] = value
print('start minecraft')
max_value = [0]
def updateBar(value, maxValue):
percent = 100 * int(value) / int(maxValue[0])
# print(int(percent))
self.ui.download.setValue(percent)
callback = {
"setStatus": lambda text: print(text),
"setProgress": lambda value: updateBar(value, max_value),
"setMax": lambda value: maximum(max_value, value)
}
self.ui.download.show()
self.ui.play.hide()
print(motor)
forge_version = minecraft_launcher_lib.forge.find_forge_version(version)
print(forge_version)
try:
forgeLauch = forge_version.replace("-", "-forge-")
except:
print("forge version can be download or not exist")
forgeLauch = None
print(forgeLauch)
# if you lauche minecraft vanilla
if motor == "vanilla":
minecraft_launcher_lib.install.install_minecraft_version(version, directory, callback=callback)
# if you lauche ;inecrqft forge
if motor == "Forge":
if forge_version == "None":
print("version non disponible de forge")
else:
def checkVersionDoawnload():
try:
directory_mod = 'C:/Users/'+user+'/AppData/Roaming\.alpha67/alpha/versions'
files = os.listdir(directory_mod)
for f in files:
print("file: " + f)
if forgeLauch == f:
print("version already download lauching minecraft")
return True
break
except:
None
if checkVersionDoawnload() == None:
try:
print("doawnloading:" + forgeLauch)
minecraft_launcher_lib.forge.install_forge_version(forge_version, directory,
callback=callback)
print(forgeLauch)
except:
None
self.ui.play.show()
self.ui.download.hide()
login = self.getSelectVersion()
print(login)
###########
if login == "mojang":
print("okok")
with open('C:/Users/' + user + '\AppData\Roaming\.alpha67/alpha/cred.json', 'r') as file:
uInfo = json.load(file)
print(uInfo)
# uInfo = literal_eval(uInfo)
uInfo = uInfo['mojang']
uInfo = uInfo[0]
username = uInfo['username']
password = uInfo['password']
passwordEnc = str(user + "67")
password = password.replace("'", '')
password = password[1:]
print(password)
username = username.replace("'", '')
username = username[1:]
print(username)
pa = encrypte.password_decrypt(password, passwordEnc).decode()
us = encrypte.password_decrypt(username, passwordEnc).decode()
login_data = minecraft_launcher_lib.account.login_user(us, pa)
print(login_data)
options = {
"username": login_data["selectedProfile"]["name"],
"uuid": login_data["selectedProfile"]["id"],
"token": login_data["accessToken"]
}
if motor == "vanilla":
command = minecraft_launcher_lib.command.get_minecraft_command(version, minecraft_directory,
options)
self.execute_command(command)
elif motor == "Forge":
print("crack, lauching minecraft, version:" + forgeLauch)
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
minecraft_command = minecraft_launcher_lib.command.get_minecraft_command(
forgeLauch, directory, options)
subprocess.call(minecraft_command)
#################################################################################################
if login == "microsoft":
print("okok")
with open('C:/Users/' + user + '\AppData\Roaming\.alpha67/alpha/ACI.json', 'r') as file:
uInfo = json.load(file)
print(uInfo)
# uInfo = literal_eval(uInfo)
options = {
"username": uInfo["name"],
"uuid": uInfo["id"],
"token": uInfo["access_token"]
}
if motor == "vanilla":
command = minecraft_launcher_lib.command.get_minecraft_command(version, minecraft_directory,
options)
self.execute_command(command)
elif motor == "Forge":
print("crack, lauching minecraft, version:" + forgeLauch)
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
minecraft_command = minecraft_launcher_lib.command.get_minecraft_command(
forgeLauch, directory, options)
subprocess.call(minecraft_command)
#########################################################################################################
if login == "crack":
print("okoksss")
with open('C:/Users/' + user + '\AppData\Roaming\.alpha67/alpha/cred.json', 'r') as file:
uInfo = json.load(file)
print(uInfo)
uInfo = uInfo['crack']
uInfo = uInfo[0]
username = uInfo['username']
# uInfo = literal_eval(uInfo)
options = {
"username": username,
"uuid": uuid.uuid4().hex,
"token": ""
}
print(forge_version)
print(motor)
if motor == "vanilla":
command = minecraft_launcher_lib.command.get_minecraft_command(version, minecraft_directory,
options)
self.execute_command(command)
elif motor == "Forge":
try:
print("crack, lauching minecraft, version:" + forgeLauch)
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
minecraft_command = minecraft_launcher_lib.command.get_minecraft_command(
forgeLauch, directory, options)
print(minecraft_command)
subprocess.call(minecraft_command)
except:
None
else:
motor = self.ui.comboBox_3.currentText()
version = self.ui.comboBox_2.currentText()
user = os.getlogin()
def maximum(max_value, value):
max_value[0] = value
print('start minecraft')
max_value = [0]
def updateBar(value, maxValue):
percent = 100 * int(value) / int(maxValue[0])
#print(int(percent))
self.ui.download.setValue(percent)
callback = {
"setStatus": lambda text: print(text),
"setProgress": lambda value: updateBar(value, max_value),
"setMax": lambda value: maximum(max_value, value)
}
self.ui.download.show()
self.ui.play.hide()
print(motor)
forge_version = minecraft_launcher_lib.forge.find_forge_version(version)
print(forge_version)
try:
forgeLauch = forge_version.replace("-", "-forge-")
except:
print("forge version can be download or not exist")
forgeLauch = None
print(forgeLauch)
#if you lauche minecraft vanilla
if motor == "vanilla":
minecraft_launcher_lib.install.install_minecraft_version(version, directory, callback=callback)
#if you lauche ;inecrqft forge
if motor == "Forge":
if forge_version == "None":
print("version non disponible de forge")
else:
def checkVersionDoawnload():
directory_mod = 'C:/Users/'+user+'\AppData\Roaming\.alpha67\minecraft/versions'
files = os.listdir(directory_mod)
for f in files:
print("file: "+f)
if forgeLauch == f:
print("version already download lauching minecraft")
return True
break
if checkVersionDoawnload() == None:
try:
print("doawnloading:"+forgeLauch)
minecraft_launcher_lib.forge.install_forge_version(forge_version, directory,
callback=callback)
print(forgeLauch)
except:
None
self.ui.play.show()
self.ui.download.hide()
login = self.getSelectVersion()
print(login)
###########
if login == "mojang":
print("okok")
with open('C:/Users/' + user + '\AppData\Roaming\.alpha67/alpha/cred.json', 'r') as file:
uInfo = json.load(file)
print(uInfo)
# uInfo = literal_eval(uInfo)
uInfo = uInfo['mojang']
uInfo = uInfo[0]
username = uInfo['username']
password = uInfo['password']
passwordEnc = str(user + "67")
password = password.replace("'", '')
password = password[1:]
print(password)
username = username.replace("'", '')
username = username[1:]
print(username)
pa = encrypte.password_decrypt(password, passwordEnc).decode()
us = encrypte.password_decrypt(username, passwordEnc).decode()
login_data = minecraft_launcher_lib.account.login_user(us, pa)
print(login_data)
options = {
"username": login_data["selectedProfile"]["name"],
"uuid": login_data["selectedProfile"]["id"],
"token": login_data["accessToken"]
}
if motor == "vanilla":
command = minecraft_launcher_lib.command.get_minecraft_command(version, minecraft_directory,
options)
self.execute_command(command)
elif motor == "Forge":
print("crack, lauching minecraft, version:"+forgeLauch)
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
minecraft_command = minecraft_launcher_lib.command.get_minecraft_command(
forgeLauch, directory, options)
subprocess.call(minecraft_command)
#################################################################################################
if login == "microsoft":
print("okok")
with open('C:/Users/' + user + '\AppData\Roaming\.alpha67/alpha/ACI.json', 'r') as file:
uInfo = json.load(file)
print(uInfo)
# uInfo = literal_eval(uInfo)
options = {
"username": uInfo["name"],
"uuid": uInfo["id"],
"token": uInfo["access_token"]
}
if motor == "vanilla":
command = minecraft_launcher_lib.command.get_minecraft_command(version, minecraft_directory,
options)
self.execute_command(command)
elif motor == "Forge":
print("crack, lauching minecraft, version:"+forgeLauch)
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
minecraft_command = minecraft_launcher_lib.command.get_minecraft_command(
forgeLauch, directory, options)
subprocess.call(minecraft_command)
#########################################################################################################
if login == "crack":
print("okoksss")
with open('C:/Users/' + user + '\AppData\Roaming\.alpha67/alpha/cred.json', 'r') as file:
uInfo = json.load(file)
print(uInfo)
uInfo = uInfo['crack']
uInfo = uInfo[0]
username = uInfo['username']
# uInfo = literal_eval(uInfo)
options = {
"username": username,
"uuid": uuid.uuid4().hex,
"token": ""
}
print(forge_version)
print(motor)
if motor == "vanilla":
command = minecraft_launcher_lib.command.get_minecraft_command(version, minecraft_directory,
options)
self.execute_command(command)
elif motor == "Forge":
try:
print("crack, lauching minecraft, version:"+forgeLauch)
directory = 'C:/Users/'+user+'\AppData\Roaming\.alpha67/alpha/'
minecraft_command = minecraft_launcher_lib.command.get_minecraft_command(
forgeLauch, directory, options)
print(minecraft_command)
subprocess.call(minecraft_command)
except:
None
def play(self):
user = os.getlogin()
def showDialog():
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("Vous devez vous connecter afin de lancer le jeu !!")
msgBox.setWindowTitle("Attention")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
self.Window2()
def checkLogin():
try:
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
crack = data["crack"][0]["connect"]
microsoft = data["microsoft"][0]["connect"]
mojang = data["mojang"][0]["connect"]
if crack == "True":
return True
elif microsoft == "True":
return True
elif mojang == "True":
return True
else:
print("please connects")
showDialog()
return False
except:
print("please connect")
showDialog()
if checkLogin() == True:
self.minecraftThread()
def getSelectVersion(self):
user = os.getlogin()
try:
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
crack = data["crack"][0]["select"]
microsoft = data["microsoft"][0]["select"]
mojang = data["mojang"][0]["select"]
if crack == "True":
return "crack"
elif microsoft == "True":
return "microsoft"
elif mojang == "True":
return "mojang"
else:
print("please connects")
except:
print("please connect")
def Window2(self):
user = os.getlogin()
self.Form = QtWidgets.QWidget()
self.uiw = Ui_Form()
self.uiw.setupUi(self.Form)
self.Form.show()
x = {
"mojang": [
{"connect": "False", "select": "False"}
],
"microsoft": [
{"connect": "False", "select": "False"}
],
"crack": [
{"connect": "False", "select": "False"}
]
}
if os.path.isfile("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json") == True:
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
if data["mojang"][0]["select"] == "True":
self.uiw.checkBox.setChecked(True)
if data["microsoft"][0]["select"] == "True":
self.uiw.checkBox_2.setChecked(True)
if data["crack"][0]["select"] == "True":
self.uiw.checkBox_3.setChecked(True)
else:
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "w") as jsonFile:
json.dump(x,jsonFile)
def checkBox():
if self.uiw.checkBox.isChecked() == True:
self.uiw.checkBox_3.setChecked(False)
self.uiw.checkBox_2.setChecked(False)
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
print(type(data))
print(data["mojang"][0]["connect"])
if data["mojang"][0]["connect"] == "False":
self.Mojang()
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
print(data)
data["mojang"][0]["select"] = "True"
data["microsoft"][0]["select"] = "False"
data["crack"][0]["select"] = "False"
print(data)
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "w") as jsonFile:
json.dump(data, jsonFile)
def checkBox3():
if self.uiw.checkBox_3.isChecked() == True:
self.uiw.checkBox.setChecked(False)
self.uiw.checkBox_2.setChecked(False)
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
print(type(data))
print(data["crack"][0]["connect"])
if data["crack"][0]["connect"] == "False":
self.Crack()
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
print(data)
data["mojang"][0]["select"] = "False"
data["microsoft"][0]["select"] = "False"
data["crack"][0]["select"] = "True"
print(data)
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "w") as jsonFile:
json.dump(data, jsonFile)
def checkBox2():
if self.uiw.checkBox_2.isChecked() == True:
self.uiw.checkBox.setChecked(False)
self.uiw.checkBox_3.setChecked(False)
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
print(type(data))
print(data["microsoft"][0]["connect"])
if data["microsoft"][0]["connect"] == "False":
self.microsoft()
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "r") as jsonFile:
data = json.load(jsonFile)
print(data)
data["mojang"][0]["select"] = "False"
data["microsoft"][0]["select"] = "True"
data["crack"][0]["select"] = "False"
print(data)
with open("C:/Users/" + user + "\AppData\Roaming\.alpha67/alpha/select.json", "w") as jsonFile:
json.dump(data, jsonFile)
self.uiw.mojang.clicked.connect(self.Mojang)
self.uiw.microsoft.clicked.connect(self.microsoft)
self.uiw.crack.clicked.connect(self.Crack)
self.uiw.checkBox.stateChanged.connect(checkBox)
self.uiw.checkBox_2.stateChanged.connect(checkBox2)
self.uiw.checkBox_3.stateChanged.connect(checkBox3)
if __name__ == "__main__":
extra = {
# Button colors
'danger': '#dc3545',
'warning': '#ffc107',
'success': '#17a2b8',
# Font
'font-family': 'Roboto',
}
app = QApplication(sys.argv)
#apply_stylesheet(app, theme='dark_teal.xml', invert_secondary=True, extra=extra)
stylesheet = app.styleSheet()
window = MainWindow()
sys.exit(app.exec_())
|
# Cookieๅค็
import requests
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent': user_agent}
r = requests.get('https://baidu.com', headers=headers)
# ้ๅๆๆ็Cookieๅญๆฎต็ๅผ
for cookie in r.cookies.keys():
print(cookie+':'+r.cookies.get(cookie))
# ๅฆๆๆณ่ชๅฎไนCookieๅผๅๅบๅป๏ผๅฏไปฅไฝฟ็จไปฅไธๆนๅผ๏ผ็คบไพๅฆไธ๏ผ
cookies = dict(name='qiye', age='10')
r = requests.get('https://baidu.com', headers=headers, cookies=cookies)
print(r.text)
|
import sys
from PIL import Image
def main():
f = open(sys.argv[1], 'rb')
data = f.read()
f.close()
use_bits = True
BYTESPERLINE = 128
BITSPERLINE = BYTESPERLINE * 8
if use_bits:
width = BITSPERLINE
height = (len(data) * 8 + BITSPERLINE - 1) / BITSPERLINE
else:
width = BYTESPERLINE
height = (len(data) + BYTESPERLINE - 1) / BYTESPERLINE
img = Image.new('RGBA', (width, height))
for y in xrange(height):
if use_bits:
for x in xrange(width):
idx = y * BYTESPERLINE + (x / 8)
bitidx = x % 8 # 0 = topmost
if idx >= len(data):
img.putpixel((x,y), (255, 255, 255, 255))
else:
v = ord(data[idx])
v = (v >> (7 - bitidx)) & 0x01
if v > 0:
v = 0
else:
v = 255
img.putpixel((x,y), (v, v, v, 255))
else:
for x in xrange(width):
idx = y * BYTESPERLINE + x
if idx >= len(data):
img.putpixel((x,y), (255, 255, 255, 255))
else:
v = ord(data[idx])
img.putpixel((x,y), (v, v, v, 255))
img.save(sys.argv[2])
if __name__ == '__main__':
main()
|
import plotly.figure_factory as ff
import plotly.graph_objects as go
import statistics
import random
import pandas as pd
import csv
df = pd.read_csv("data.csv")
data = df["temp"].tolist()
population_mean=statistics.mean(data)
std_deviaton=statistics.stdev(data)
fig=ff.create_distplot([data],["temp"],show_hist=False)
fig.show()
print("Mean of sample:- ",population_mean)
print("std_deviation of sample:- ",std_deviaton)
|
"""
Testing aoe2record-to-json core functionality.
"""
import json
import unittest
from a2j.commands import get_commands
from tests.util import execute
class test_a2j(unittest.TestCase):
parsed = execute([
"curl",
"http://localhost:8080/a2j/v1/parse/" + "/".join(get_commands().keys()) + "/?record=test.mgz"
])
with open("tests/data/test.json", "r") as file:
read = json.loads(file.read())
file.close()
def test_completed(self):
assert self.parsed["completed"] == self.read["completed"]
def test_dataset(self):
assert self.parsed["dataset"] == self.read["dataset"]
def test_encoding(self):
assert self.parsed["encoding"] == self.read["encoding"]
def test_file_hash(self):
assert self.parsed["file_hash"] == self.read["file_hash"]
def test_hash(self):
assert self.parsed["hash"] == self.read["hash"]
def test_language(self):
assert self.parsed["language"] == self.read["language"]
def test_mirror(self):
assert self.parsed["mirror"] == self.read["mirror"]
def test_owner(self):
assert self.parsed["owner"] == self.read["owner"]
def test_platform(self):
assert self.parsed["platform"] == self.read["platform"]
def test_restored(self):
assert self.parsed["restored"] == self.read["restored"]
def test_version(self):
assert self.parsed["version"] == self.read["version"]
def test_chat(self):
assert self.parsed["chat"] == self.read["chat"]
def test_diplomacy(self):
assert self.parsed["diplomacy"] == self.read["diplomacy"]
def test_players(self):
assert self.parsed["players"] == self.read["players"]
def test_profiles(self):
assert self.parsed["profiles"] == self.read["profiles"]
def test_ratings(self):
assert self.parsed["ratings"] == self.read["ratings"]
def test_teams(self):
assert self.parsed["teams"] == self.read["teams"]
def test_achievements(self):
assert self.parsed["achievements"] == self.read["achievements"]
def test_duration(self):
assert self.parsed["duration"] == self.read["duration"]
def test_map(self):
assert self.parsed["map"] == self.read["map"]
def test_objects(self):
assert self.parsed["objects"] == self.read["objects"]
def test_postgame(self):
assert self.parsed["postgame"] == self.read["postgame"]
def test_settings(self):
assert self.parsed["settings"] == self.read["settings"]
def test_start_time(self):
assert self.parsed["start_time"] == self.read["start_time"]
|
import sys, getopt
print('Number of args:', len(sys.argv), 'arguments.')
print('Argument list :', str(sys.argv))
print()
#input("\n\nPress Enter to exit.")
# [Getopt] tuts
def main(argv):
inputfile = ''
outputfile = ''
try:
opts, args = getopt.getopt(argv, "hi:o", ["ifile=", "ofile="])
except getopt.GetoptError:
print('Exe_05.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('Exe_05.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
print(' Input file is "', inputfile, '"')
print('Output file is "', outputfile, '"')
if __name__ == "__main__":
main(sys.argv[1:])
|
##########################
# Imports
##########################
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_addons as tfa
##########################
# Function
##########################
def _pairwise_distances(embeddings: tf.Tensor):
"""Compute the 2D matrix of distances between all the embeddings.
Args:
embeddings: tensor of shape (batch_size, embed_dim)
Returns:
pairwise_distances: tensor of shape (batch_size, batch_size)
"""
dot_product = K.dot(embeddings, K.transpose(embeddings))
square_norm = tf.linalg.diag_part(dot_product)
distances = K.expand_dims(square_norm, 1) - 2.0 * \
dot_product + K.expand_dims(square_norm, 0)
distances = K.maximum(distances, 0.0)
mask = K.equal(distances, 0.0)
mask = K.cast(mask, tf.float32)
distances = K.sqrt(distances + mask * K.epsilon())
distances = distances * (1.0 - mask)
return distances
def _get_triplet_mask(labels: tf.Tensor):
"""Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid.
A triplet (i, j, k) is valid if:
- i, j, k are distinct
- labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
indices_equal = K.cast(tf.eye(K.shape(labels)[0]), tf.bool)
indices_not_equal = tf.math.logical_not(indices_equal)
i_not_equal_j = K.expand_dims(indices_not_equal, 2)
i_not_equal_k = K.expand_dims(indices_not_equal, 1)
j_not_equal_k = K.expand_dims(indices_not_equal, 0)
distinct_indices = tf.math.logical_and(tf.math.logical_and(i_not_equal_j,
i_not_equal_k),
j_not_equal_k)
label_equal = K.equal(K.expand_dims(labels, 0),
K.expand_dims(labels, 1))
i_equal_j = K.expand_dims(label_equal, 2)
i_equal_k = K.expand_dims(label_equal, 1)
valid_labels = tf.math.logical_and(
i_equal_j, tf.math.logical_not(i_equal_k))
mask = tf.math.logical_and(distinct_indices, valid_labels)
mask = K.cast(mask, tf.float32)
return mask
def invalid_triplets_ratio(labels: tf.Tensor, embeddings: tf.Tensor, margin: float) -> tf.Tensor:
"""[summary]
Args:
labels (tf.Tensor): [description]
embeddings (tf.Tensor): [description]
margin (float): [description]
Returns:
tf.Tensor: [description]
"""
pairwise_dist = _pairwise_distances(embeddings)
anchor_positive_dist = K.expand_dims(pairwise_dist, 2)
anchor_negative_dist = K.expand_dims(pairwise_dist, 1)
triplet_loss = anchor_positive_dist - anchor_negative_dist + margin
mask = _get_triplet_mask(labels)
triplet_loss = tf.math.multiply(mask, triplet_loss)
triplet_loss = K.maximum(triplet_loss, 0.0)
valid_triplets = K.cast(tf.math.greater(triplet_loss, K.epsilon()),
tf.float32)
num_positive_triplets = tf.math.reduce_sum(valid_triplets)
num_valid_triplets = tf.math.reduce_sum(mask)
fraction_positive_triplets = num_positive_triplets / num_valid_triplets
return fraction_positive_triplets
def triplet_loss_function(margin: float, mining_strategy: str):
"""[summary]
Args:
margin (float): Margin of the triplet loss.
mining_strategy (str): Strategy used to create batches
"""
if mining_strategy == "soft":
return tfa.losses.TripletSemiHardLoss(margin=margin)
if mining_strategy == "hard":
return tfa.losses.TripletHardLoss(margin=margin)
return tfa.losses.TripletSemiHardLoss(margin=margin)
def triplet_loss_metric(margin: float):
"""[summary]
Args:
margin (float): Margin of the triplet loss.
"""
@tf.function
def metric(y_true, y_pred):
return invalid_triplets_ratio(y_true, y_pred, margin)
return metric
|
import pathlib
from flask import request, current_app, make_response
from carp_api import endpoint, signal
from carp_api.common import logic
class Ping(endpoint.BaseEndpoint):
"""Ping to get response: "pong".
Used for health check.
"""
url = 'ping'
name = 'ping'
def action(self): # pylint: disable=arguments-differ
resp = logic.get_pong(request.version)
return resp
class UrlMap(endpoint.BaseEndpoint):
"""All urls available on api.
"""
url = ''
name = 'url_map'
# output_schema = output_schema.Map
def action(self): # pylint: disable=arguments-differ
func_list = logic.get_url_map(request.version)
return func_list
class ShutDown(endpoint.BaseEndpoint):
"""ShutDown rouote, that terminates the server, expose it only for
development and testing environment, unless you like server restarts.
"""
url = 'shutdown'
name = 'shutdown'
methods = ['POST']
def action(self): # pylint: disable=arguments-differ
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
# pylint: disable=protected-access
signal.app_shutdown.send(current_app._get_current_object())
# pylint: enable=protected-access
func()
class FavIcon(endpoint.BaseEndpoint):
"""Favicon, to prevent 500 when other favicons are unavailable.
"""
url = 'favicon.ico'
name = 'favicon'
trailing_slash = False
propagate = False
def action(self): # pylint: disable=arguments-differ
file_path = (
pathlib.PosixPath(__file__).parent.parent.absolute() / 'data' /
'favicon.png'
)
with open(file_path, 'rb') as fpl:
resp = make_response(fpl.read())
resp.headers['content-type'] = 'image/vnd.microsoft.icon'
return resp
|
# -*- coding: utf-8 -*-
"""
solace.views.kb
~~~~~~~~~~~~~~~
The knowledge base views.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from sqlalchemy.orm import eagerload
from werkzeug import Response, redirect
from werkzeug.exceptions import NotFound, BadRequest, Forbidden
from werkzeug.contrib.atom import AtomFeed
from solace import settings
from solace.application import url_for, require_login, json_response
from solace.database import session
from solace.models import Topic, Post, Tag, PostRevision
from solace.utils.pagination import Pagination
from solace.templating import render_template, get_macro
from solace.i18n import _, format_datetime, list_sections
from solace.forms import QuestionForm, ReplyForm, CommentForm
from solace.utils.forms import Form as EmptyForm
from solace.utils.formatting import format_creole_diff, format_creole
from solace.utils.csrf import exchange_token_protected
from solace.utils.caching import no_cache
_topic_order = {
'newest': Topic.date.desc(),
'hot': Topic.hotness.desc(),
'votes': Topic.votes.desc(),
'activity': Topic.last_change.desc()
}
def sections(request):
"""Shows a page where all sections are listed for the user to
select one.
"""
if len(settings.LANGUAGE_SECTIONS) == 1:
return redirect(url_for('kb.overview', lang_code=settings.LANGUAGE_SECTIONS[0]))
return render_template('kb/sections.html',
languages=list_sections())
def _topic_list(template_name, request, query, order_by, **context):
"""Helper for views rendering a topic list."""
# non moderators cannot see deleted posts, so we filter them out first
# for moderators the template marks the posts up as deleted so that
# they can be kept apart from non-deleted ones.
if not request.user or not request.user.is_moderator:
query = query.filter_by(is_deleted=False)
query = query.order_by(_topic_order[order_by])
# optimize the query for the template. The template needs the author
# of the topic as well (but not the editor) which is not eagerly
# loaded by default.
query = query.options(eagerload('author'))
pagination = Pagination(request, query, request.args.get('page', type=int))
return render_template(template_name, pagination=pagination,
order_by=order_by, topics=pagination.get_objects(),
**context)
def _topic_feed(request, title, query, order_by):
# non moderators cannot see deleted posts, so we filter them out first
# for moderators we mark the posts up as deleted so that
# they can be kept apart from non-deleted ones.
if not request.user or not request.user.is_moderator:
query = query.filter_by(is_deleted=False)
query = query.order_by(_topic_order[order_by])
query = query.options(eagerload('author'), eagerload('question'))
query = query.limit(max(0, min(50, request.args.get('num', 10, type=int))))
feed = AtomFeed(u'%s โ %s' % (title, settings.WEBSITE_TITLE),
subtitle=settings.WEBSITE_TAGLINE,
feed_url=request.url,
url=request.url_root)
for topic in query.all():
title = topic.title
if topic.is_deleted:
title += u' ' + _(u'(deleted)')
feed.add(title, topic.question.rendered_text, content_type='html',
author=topic.author.display_name,
url=url_for(topic, _external=True),
id=topic.guid, updated=topic.last_change, published=topic.date)
return feed.get_response()
def overview(request, order_by):
"""Shows the overview page for the given language of the knowledge base.
This page tries to select the "hottest" topics.
"""
query = Topic.query.language(request.view_lang)
return _topic_list('kb/overview.html', request, query, order_by)
def overview_feed(request, order_by):
"""Feed for the overview page."""
return _topic_feed(request, _(u'Questions'),
Topic.query.language(request.view_lang), order_by)
def unanswered(request, order_by):
"""Show only the unanswered topics."""
query = Topic.query.language(request.view_lang).unanswered()
return _topic_list('kb/unanswered.html', request, query, order_by)
def unanswered_feed(request, order_by):
"""Feed for the unanswered topic list."""
return _topic_feed(request, _(u'Unanswered Questions'),
Topic.query.language(request.view_lang).unanswered(),
order_by)
def by_tag(request, name, order_by):
"""Show only the unanswered topics."""
tag = Tag.query.filter(
(Tag.name == name) &
(Tag.locale == request.view_lang)
).first()
if tag is None:
raise NotFound()
return _topic_list('kb/by_tag.html', request, tag.topics, order_by,
tag=tag)
def by_tag_feed(request, name, order_by):
"""The feed for a tag."""
tag = Tag.query.filter(
(Tag.name == name) &
(Tag.locale == request.view_lang)
).first()
if tag is None:
raise NotFound()
return _topic_feed(request, _(u'Questions Tagged โ%sโ') % tag.name,
tag.topics, order_by)
def tags(request):
"""Shows the tag-cloud."""
tags = Tag.query.filter(
(Tag.tagged > 0) &
(Tag.locale == request.view_lang)
).order_by(Tag.tagged.desc()).limit(40).all()
tags.sort(key=lambda x: x.name.lower())
return render_template('kb/tags.html', tags=tags)
def topic(request, id, slug=None):
"""Shows a topic."""
topic = Topic.query.eagerposts().get(id)
# if the topic id does not exist or the topic is from a different
# language, we abort with 404 early
if topic is None or topic.locale != request.view_lang:
raise NotFound()
# make sure the slug is okay, otherwise redirect to the real one
# to ensure URLs are unique.
if slug is None or topic.slug != slug:
return redirect(url_for(topic))
# deleted posts cannot be seen by people without privilegs
if topic.is_deleted and not (request.user and request.user.is_moderator):
raise Forbidden()
# a form for the replies.
form = ReplyForm(topic)
if request.method == 'POST' and form.validate():
reply = form.create_reply()
session.commit()
request.flash(_(u'Your reply was posted.'))
return redirect(url_for(reply))
# pull in the votes in a single query for all the posts related to the
# topic so that we only have to fire the database once.
if request.is_logged_in:
request.user.pull_votes(topic.posts)
return render_template('kb/topic.html', topic=topic,
reply_form=form.as_widget())
def topic_feed(request, id, slug=None):
"""A feed for the answers to a question."""
topic = Topic.query.eagerposts().get(id)
# if the topic id does not exist or the topic is from a different
# language, we abort with 404 early
if topic is None or topic.locale != request.view_lang:
raise NotFound()
# make sure the slug is okay, otherwise redirect to the real one
# to ensure URLs are unique.
if slug is None or topic.slug != slug:
return redirect(url_for(topic, action='feed'))
# deleted posts cannot be seen by people without privilegs
if topic.is_deleted and not (request.user and request.user.is_moderator):
raise Forbidden()
feed = AtomFeed(u'%s โ %s' % (topic.title, settings.WEBSITE_TITLE),
subtitle=settings.WEBSITE_TAGLINE,
feed_url=request.url,
url=request.url_root)
feed.add(topic.title, topic.question.rendered_text, content_type='html',
author=topic.question.author.display_name,
url=url_for(topic, _external=True),
id=topic.guid, updated=topic.question.updated,
published=topic.question.created)
for reply in topic.replies:
if reply.is_deleted and not (request.user and request.user.is_moderator):
continue
title = _(u'Answer by %s') % reply.author.display_name
if reply.is_deleted:
title += u' ' + _('(deleted)')
feed.add(title, reply.rendered_text, content_type='html',
author=reply.author.display_name,
url=url_for(reply, _external=True),
id=reply.guid, updated=reply.updated, created=reply.created)
return feed.get_response()
@require_login
def new(request):
"""The new-question form."""
form = QuestionForm()
if request.method == 'POST' and form.validate():
topic = form.create_topic()
session.commit()
request.flash(_(u'Your question was posted.'))
return redirect(url_for(topic))
return render_template('kb/new.html', form=form.as_widget())
def _load_post_and_revision(request, id):
post = Post.query.get(id)
if post is None or post.topic.locale != request.view_lang:
raise NotFound()
if post.is_deleted and not (request.user and request.user.is_moderator):
raise Forbidden()
revision_id = request.args.get('rev', type=int)
revision = None
if revision_id is not None:
revision = post.get_revision(revision_id)
if revision is None:
raise NotFound()
return post, revision
@require_login
def edit_post(request, id):
post, revision = _load_post_and_revision(request, id)
if not request.user.can_edit(post):
raise Forbidden()
if post.is_question:
form = QuestionForm(post.topic, revision=revision)
else:
form = ReplyForm(post=post, revision=revision)
if request.method == 'POST' and form.validate():
form.save_changes()
session.commit()
request.flash(_('The post was edited.'))
return redirect(url_for(post))
def _format_entry(author, date, extra=u''):
return _(u'%s (%s)') % (author, format_datetime(date)) + extra
post_revisions = [(revision is None, '', _format_entry(
(post.editor or post.author).display_name, post.updated,
u' [%s]' % _(u'Current revision')))] + \
[(revision == entry, entry.id, _format_entry(
entry.editor.display_name, entry.date))
for entry in post.revisions.order_by(PostRevision.date.desc())]
return render_template('kb/edit_post.html', form=form.as_widget(),
post=post, all_revisions=post_revisions)
@require_login
def delete_post(request, id):
post = Post.query.get(id)
# sanity checks
if not request.user.is_moderator:
raise Forbidden()
elif post.is_deleted:
return redirect(url_for(post))
form = EmptyForm()
if request.method == 'POST' and form.validate():
if 'yes' in request.form:
post.delete()
session.commit()
request.flash(_('The post was deleted'))
return redirect(url_for(post))
return render_template('kb/delete_post.html', post=post,
form=form.as_widget())
@require_login
def restore_post(request, id):
post, revision = _load_post_and_revision(request, id)
# sanity checks
if revision is None:
if not request.user.is_moderator:
raise Forbidden()
elif not post.is_deleted:
return redirect(url_for(post))
elif not request.user.can_edit(post):
raise Forbidden()
form = EmptyForm()
if request.method == 'POST' and form.validate():
if 'yes' in request.form:
if revision is None:
request.flash(_(u'The post was restored'))
post.restore()
else:
request.flash(_(u'The revision was restored'))
revision.restore()
session.commit()
return form.redirect(post)
return render_template('kb/restore_post.html', form=form.as_widget(),
post=post, revision=revision)
def post_revisions(request, id):
"""Shows all post revisions and a diff of the text."""
post = Post.query.get(id)
if post is None or post.topic.locale != request.view_lang:
raise NotFound()
if post.is_deleted and not (request.user and request.user.is_moderator):
raise Forbidden()
revisions = [{
'id': None,
'latest': True,
'date': post.updated,
'editor': post.editor or post.author,
'text': post.text
}] + [{
'id': revision.id,
'latest': False,
'date': revision.date,
'editor': revision.editor,
'text': revision.text
} for revision in post.revisions.order_by(PostRevision.date.desc())]
last_text = None
for revision in reversed(revisions):
if last_text is not None:
revision['diff'] = format_creole_diff(last_text, revision['text'])
else:
revision['diff'] = format_creole(revision['text'])
last_text = revision['text']
return render_template('kb/post_revisions.html', post=post,
revisions=revisions)
def userlist(request):
"""Shows a user list."""
return common_userlist(request, locale=request.view_lang)
@no_cache
@require_login
@exchange_token_protected
def vote(request, post):
"""Votes on a post."""
# TODO: this is currently also fired as GET if JavaScript is
# not available. Not very nice.
post = Post.query.get(post)
if post is None:
raise NotFound()
# you cannot cast votes on deleted shit
if post.is_deleted:
message = _(u'You cannot vote on deleted posts.')
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
# otherwise
val = request.args.get('val', 0, type=int)
if val == 0:
request.user.unvote(post)
elif val == 1:
# users cannot upvote on their own stuff
if post.author == request.user:
message = _(u'You cannot upvote your own post.')
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
# also some reputation is needed
if not request.user.is_admin and \
request.user.reputation < settings.REPUTATION_MAP['UPVOTE']:
message = _(u'In order to upvote you '
u'need at least %d reputation') % \
settings.REPUTATION_MAP['UPVOTE']
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
request.user.upvote(post)
elif val == -1:
# users need some reputation to downvote. Keep in mind that
# you *can* downvote yourself.
if not request.user.is_admin and \
request.user.reputation < settings.REPUTATION_MAP['DOWNVOTE']:
message = _(u'In order to downvote you '
u'need at least %d reputation') % \
settings.REPUTATION_MAP['DOWNVOTE']
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
request.user.downvote(post)
else:
raise BadRequest()
session.commit()
# standard requests are answered with a redirect back
if not request.is_xhr:
return redirect(url_for(post))
# others get a re-rendered vote box
box = get_macro('kb/_boxes.html', 'render_vote_box')
return json_response(html=box(post, request.user))
@no_cache
@exchange_token_protected
@require_login
def accept(request, post):
"""Accept a post as an answer."""
# TODO: this is currently also fired as GET if JavaScript is
# not available. Not very nice.
post = Post.query.get(post)
if post is None:
raise NotFound()
# just for sanity. It makes no sense to accept the question
# as answer. The UI does not allow that, so the user must have
# tampered with the data here.
if post.is_question:
raise BadRequest()
# likewise you cannot accept a deleted post as answer
if post.is_deleted:
message = _(u'You cannot accept deleted posts as answers')
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
topic = post.topic
# if the post is already the accepted answer, we unaccept the
# post as answer.
if post.is_answer:
if not request.user.can_unaccept_as_answer(post):
message = _(u'You cannot unaccept this reply as an answer.')
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
topic.accept_answer(None, request.user)
session.commit()
if request.is_xhr:
return json_response(accepted=False)
return redirect(url_for(post))
# otherwise we try to accept the post as answer.
if not request.user.can_accept_as_answer(post):
message = _(u'You cannot accept this reply as answer.')
if request.is_xhr:
return json_response(message=message, error=True)
request.flash(message, error=True)
return redirect(url_for(post))
topic.accept_answer(post, request.user)
session.commit()
if request.is_xhr:
return json_response(accepted=True)
return redirect(url_for(post))
def _get_comment_form(post):
return CommentForm(post, action=url_for('kb.submit_comment',
post=post.id))
def get_comments(request, post, form=None):
"""Returns the partial comment template. This is intended to be
used on by XHR requests.
"""
if not request.is_xhr:
raise BadRequest()
post = Post.query.get(post)
if post is None:
raise NotFound()
# sanity check. This should not happen because the UI does not provide
# a link to retrieve the comments, but it could happen if the user
# accesses the URL directly or if he requests the comments to be loaded
# after a moderator deleted the post.
if post.is_deleted and not (request.user and request.user.is_moderator):
raise Forbidden()
form = _get_comment_form(post)
return json_response(html=render_template('kb/_comments.html', post=post,
form=form.as_widget()))
@require_login
def submit_comment(request, post):
"""Used by the form on `get_comments` to submit the form data to
the database. Returns partial data for the remote side.
"""
if not request.is_xhr:
raise BadRequest()
post = Post.query.get(post)
if post is None:
raise NotFound()
# not even moderators can submit comments for deleted posts.
if post.is_deleted:
message = _(u'You cannot submit comments for deleted posts')
return json_response(success=False, form_errors=[message])
form = _get_comment_form(post)
if form.validate():
comment = form.create_comment()
session.commit()
comment_box = get_macro('kb/_boxes.html', 'render_comment')
comment_link = get_macro('kb/_boxes.html', 'render_comment_link')
return json_response(html=comment_box(comment),
link=comment_link(post),
success=True)
return json_response(success=False, form_errors=form.as_widget().all_errors)
def get_tags(request):
"""A helper that returns the tags for the language."""
limit = max(0, min(request.args.get('limit', 10, type=int), 20))
query = Tag.query.filter(
(Tag.locale == request.view_lang) &
(Tag.tagged > 0)
)
q = request.args.get('q')
if q:
query = query.filter(Tag.name.like('%%%s%%' % q))
query = query.order_by(Tag.tagged.desc(), Tag.name)
return json_response(tags=[(tag.name, tag.tagged)
for tag in query.limit(limit).all()])
#: the knowledge base userlist is just a wrapper around the common
#: userlist from the users module.
from solace.views.users import userlist as common_userlist
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 - Swiss Data Science Center (SDSC)
# A partnership between รcole Polytechnique Fรฉdรฉrale de Lausanne (EPFL) and
# Eidgenรถssische Technische Hochschule Zรผrich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test ``log`` command."""
from __future__ import absolute_import, print_function
import os
from pathlib import Path
import git
import pytest
from renku.cli import cli
@pytest.mark.shelled
@pytest.mark.parametrize('format', ['json-ld', 'nt', 'rdf'])
def test_run_log_strict(runner, project, run_shell, format):
"""Test log output of run command."""
# Run a shell command with pipe.
result = run_shell('renku run echo "a" > output')
# Assert created output file.
result = runner.invoke(
cli, ['log', '--strict', '--format={}'.format(format)]
)
assert 0 == result.exit_code, result.output
assert '.renku/workflow/' in result.output
@pytest.mark.shelled
@pytest.mark.parametrize('format', ['json-ld', 'nt', 'rdf'])
def test_dataset_log_strict(tmpdir, runner, project, client, format):
"""Test output of log for dataset add."""
result = runner.invoke(cli, ['dataset', 'create', 'my-dataset'])
assert 0 == result.exit_code
paths = []
test_paths = []
for i in range(3):
new_file = tmpdir.join('file_{0}'.format(i))
new_file.write(str(i))
paths.append(str(new_file))
test_paths.append(str(new_file.relto(tmpdir.join('..'))))
# add data
result = runner.invoke(
cli,
['dataset', 'add', 'my-dataset'] + paths,
)
assert 0 == result.exit_code
result = runner.invoke(
cli, ['log', '--strict', '--format={}'.format(format)]
)
assert 0 == result.exit_code, result.output
assert all(p in result.output for p in test_paths)
@pytest.mark.shelled
@pytest.mark.parametrize('format', ['json-ld', 'nt', 'rdf'])
def test_dataset_log_invalidation_strict(
tmpdir, runner, project, client, format
):
"""Test output of log for dataset add."""
repo = git.Repo(project)
cwd = Path(project)
input_ = cwd / 'input.txt'
with input_.open('w') as f:
f.write('first')
repo.git.add('--all')
repo.index.commit('Created input.txt')
os.remove(str(input_))
repo.git.add('--all')
repo.index.commit('Removed input.txt')
result = runner.invoke(
cli, ['log', '--strict', '--format={}'.format(format)]
)
assert 0 == result.exit_code, result.output
assert 'wasInvalidatedBy' in result.output
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for quiz module."""
import cgi
import logging
import base64
import string
import random
import os
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
import django.template
# import django.utils.safestring
from django.utils import simplejson
from django.core.urlresolvers import reverse
import quiz.models as models
# For registering filter and tag libs.
register = django.template.Library()
@register.filter
def class_name(cls):
"""Returns name of the class."""
return cls.__class__.__name__
@register.filter
def get_key(cls):
"""Returns key for an object if it exists in datastore."""
try:
object_key = cls.key()
except db.NotSavedError:
return None
return str(object_key)
### Library function to interact with datastore ###
def gen_random_string(num_chars=16):
"""Generates a random string of the specified number of characters.
First char is chosen from set of alphabets as app engine requires
key name to start with an alphabet. Also '-_' are used instead of
'+/' for 64 bit encoding.
Args:
num_chars: Length of random string.
Returns:
Random string of length = num_chars
"""
# Uses base64 encoding, which has roughly 4/3 size of underlying data.
first_letter = random.choice(string.letters)
num_chars -= 1
num_bytes = ((num_chars + 3) / 4) * 3
random_byte = os.urandom(num_bytes)
random_str = base64.b64encode(random_byte, altchars='-_')
return first_letter+random_str[:num_chars]
def create_temp_session_id():
"""Creates a new random session id for temporary session.
NOTE(mukundjha): All session id should be prefixed with its type.
There can be two types of session temp or user, temp is when
no user information is available or the quiz is not embedded
inside a doc.
Returns:
A unique session id (string).
TODO(mukundjha): Decide id length.
TODO(mukundjha): To incorporate temp ids, currently this is not used.
"""
while True:
session_id = 'temp' + gen_random_string()
session_count = models.ResponseModel.all().filter(
'session_id =', session_id).count()
if not session_count:
return session_id
else:
logging.info('Clash while creating session id %s', session_id)
def insert_with_new_key(cls, parent=None, **kwargs):
"""Insert model into datastore with a random key.
Args:
cls: Data model class (ex. models.DocModel).
parent: optional parent argument to bind models in same entity group.
NOTE: If parent argument is passed, key_name may not be unique across
all entities.
Returns:
Data model entity or None if error.
TODO(mukundjha): Check for race condition.
"""
while True:
key_name = gen_random_string()
entity = cls.get_by_key_name(key_name, parent=parent)
if entity is None:
entity = cls(key_name=key_name, parent=parent, **kwargs)
entity.put()
break
else:
logging.info("Entity with key "+key_name+" exists")
return entity
def create_quiz_property(**kwargs):
"""Creates a new instance of quiz property in datastore.
Returns:
QuizPropertyModel object.
"""
quiz_property = insert_with_new_key(models.QuizPropertyModel, **kwargs);
return quiz_property
def create_quiz(**kwargs):
"""Inserts a new quiz into the database.
Returns:
QuizModel object.
"""
quiz = insert_with_new_key(models.QuizModel, **kwargs)
quiz_id = str(quiz.key())
quiz_trunk = db.run_in_transaction(create_new_trunk_with_quiz,
quiz_id, **kwargs)
quiz.trunk = quiz_trunk
quiz.put()
return quiz
def add_question(quiz, question):
"""Adds a question object to the quiz if not already added.
Args:
quiz: Quiz to which question needs to be added.
question: Question to be added.
"""
query = models.QuizQuestionListModel.all().filter(
'question =', question).filter('quiz =', quiz)
if not query.count:
insert_with_new_key(models.QuizQuestionListModel, quiz=quiz,
question=question)
def create_question(quiz, **kwargs):
"""Inserts a new question into the quiz.
Args:
quiz: Quiz object to associate question with.
Returns:
QuestionModel object.
"""
question = insert_with_new_key(models.QuestionModel, **kwargs)
insert_with_new_key(models.QuizQuestionListModel, quiz=quiz,
question=question)
return question
def create_choice(**kwargs):
"""Creates a new choice object.
Returns:
ChoiceModel object.
"""
choice = insert_with_new_key(models.ChoiceModel, **kwargs)
return choice
def store_response(session_id, quiz_id, question_id,
answered_correctly, attempts):
"""Stores user response in the datastore.
Args:
session_id: Session Identifier.
quiz_id: Key for associated quiz.
answered_correctly: True if answer was correct.
attempts: Number of attempts so far.
"""
try:
quiz = db.get(quiz_id)
except db.BadKeyError:
logging.error('Incorrect key passed for quiz %s', quiz_id)
try:
question = db.get(question_id)
except db.BadKeyError:
logging.error('Incorrect key passed for question %s', question_id)
entry = models.ResponseModel.all().filter('session_id =', session_id).filter(
'quiz =', quiz).filter('question =', question).get()
if not entry:
insert_with_new_key(models.ResponseModel, session_id=session_id, quiz=quiz,
question=question, answered_correctly=answered_correctly,
quiz_trunk=quiz.trunk, attempts=attempts)
else:
entry.answered_correctly = answered_correctly
entry.attempts = attempts
entry.put()
def reset_quiz(session_id, quiz):
"""Resets all entries for quiz in collected response.
Useful for resetting entries when questions repeat.
Args:
session_id: Id passed by parent Lantern doc, mapping the user.
quiz_id: Id for the associated quiz.
"""
score_entry = models.QuizScoreModel.all().filter(
'session_id =', session_id).filter('quiz =', quiz).get()
if score_entry:
score_entry.score = 0.0
score_entry.progress = 0.0
score_entry.questions_attempted = 0
score_entry.put()
reset_responses(session_id, quiz)
def reset_responses(session_id, quiz):
"""Resets all response entries for quiz in collected response.
Useful for resetting entries when questions repeat.
Args:
session_id: Id passed by parent Lantern doc, mapping the user.
quiz_id: Id for the associated quiz.
"""
response_entries = models.ResponseModel.all().filter(
'session_id =', session_id).filter('quiz =', quiz)
for entry in response_entries:
entry.answered_correctly = False
entry.attempts = 0
entry.put()
def pick_next_question(session_id, quiz, repeat):
"""Selects the next question to be presented.
TODO(mukundjha): Decide if the data required for selection should
come from doc or from quiz database. Currently we use the data stored
in the datastore per-session for producing next question.
Args:
session_id: Id passed by parent Lantern doc, mapping the user.
quiz_id: Id for the associated quiz.
repeat: If true questions keep recycling even after user has seen all
of them.
Returns:
Question object if there exists a valid question, else None.
"""
#quiz = db.get(quiz_id)
all_questions = models.QuizQuestionListModel.all().filter('quiz =', quiz)
if not all_questions.count():
return None
all_ques = set([entry.question.key() for entry in all_questions])
answered_correctly = models.ResponseModel.all().filter(
'session_id =', session_id).filter('quiz =', quiz).filter(
'answered_correctly =', True)
answered = set([response.question.key() for response in answered_correctly])
if answered:
allowed_questions = all_ques.difference(answered)
else:
allowed_questions = all_ques
if not allowed_questions and not repeat:
return None
elif not allowed_questions:
reset_responses(session_id, quiz)
allowed_questions = all_ques
logging.info('Allowed Questions: %r', allowed_questions)
question_key = random.choice(list(allowed_questions))
question = db.get(question_key)
logging.info('picked question: %r', question_key)
return question
def check_answer(question_id, choice_id):
"""Checks if the answer provided is correct or not.
TODO(mukundjha): Check if choice belongs to same question.
"""
try:
choice = db.get(choice_id)
except db.BadKeyError:
logging.error('Error the choice key entered in check_answer is invalid %s',
choice_id)
return None
if choice.is_correct:
return True
else:
return False
def increment_score(session_id, quiz, question, attempts):
"""Increments score associated with a quiz and a session.
Scoring: Score for a quiz is always normalized to be out of 100 points.
Based on number of maximum allowed questions (set in the quiz property)
for the quiz, each question is given equal weightage.
For each question user is allowed upto num_of_choices-1 tries. For each
wrong attempt user loses certain amount of points. Currently user loses
equal amount for each attempt.
For example: If the database for the quiz has 10 questions. Each question
carry 10 points. Suppose one of the question has 5 choices, then user is
allowed 4 attempts, (taking a hint or selecting/eleminating a choice is
considered as an attempt). Each wrong attempt user loses 10/4 = 2.5 points.
There is also a notion of completion status, which records percentage of
question attempted and is use to track if user is complete with the module.
Args:
session_id : Id associated with current user.
quiz: Associated quiz.
question: Question being attempted.
attempts: No of attempts so far including the current one.
Returns:
An updated tuple of (score, progress) describing current status of the
quiz for the given session.
"""
total_questions = models.QuizQuestionListModel.all().filter(
'quiz =', quiz).count()
min_questions = quiz.quiz_property.min_questions
if min_questions != 0 and total_questions > min_questions:
total_questions = min_questions
if not total_questions:
return (0, 0)
quanta = 100.0/total_questions
total_choices = len(question.choices)
if total_choices >= 2:
loss = quanta / (total_choices - 1)
else:
return
points = quanta - (loss * (attempts - 1))
score_entry = models.QuizScoreModel.all().filter(
'session_id =', session_id).filter('quiz =', quiz).get()
if score_entry:
if score_entry.questions_attempted < total_questions:
score_entry.score += points
score_entry.questions_attempted += 1
score_entry.progress = (
score_entry.questions_attempted * 100.0 / total_questions)
else:
score_entry.progress = 100.0
score_entry.put()
return (score_entry.score, score_entry.progress)
else:
# this is the first correctly answered question
progress = (1 * 100.0) / total_questions
insert_with_new_key(models.QuizScoreModel, session_id=session_id,
quiz=quiz, score=points, questions_attempted=1,
quiz_trunk=quiz.trunk, progress=progress)
return (points, progress)
def create_new_trunk_with_quiz(quiz_id, **kwargs):
"""Creates a new trunk with given quiz as head.
WARNING: Since we are passing parent parameter in insert_with_new_key,
function will only check for uniqueness of key among entities having 'trunk'
as an ancestor. This no longer guarantees unique key_name across all
entities.
NOTE(mukundjha): No check is done on quiz_id, it's responsibility of
other functions calling create_new_trunk_with_quiz to check the parameter
before its passed.
Args:
quiz_id: String value of key of the document to be added.
Returns:
Returns created quiz trunk.
Raises:
InvalidQuizError: If the quiz_id is invalid.
"""
quiz_trunk = insert_with_new_key(models.QuizTrunkModel)
message = kwargs.pop('commit_message', 'Commited a new revision')
quiz_revision = insert_with_new_key(models.QuizRevisionModel, parent=quiz_trunk,
quiz_id=quiz_id, commit_message=message)
quiz_trunk.head = quiz_id
quiz_trunk.put()
return quiz_trunk
def append_to_trunk(quiz_trunk_id, quiz_id, **kwargs):
"""Appends a quiz to end of the trunk.
NOTE(mukundjha): No check is done on quiz_id, it's responsibility of
other functions calling append_to_trunk to check the parameter
before its passed.
Args:
quiz_trunk_id: Key of the quiz trunk.
quiz_id: String value of key of the quiz to be added.
Returns:
Returns modified trunk.
Raises:
InvalidQuizError: If the quiz_id is invalid.
InvalidQuizTrunkError: If the quiz_trunk_id is invalid.
"""
try:
quiz_trunk = db.get(quiz_trunk_id)
except db.BadKeyError, e:
raise models.InvalidTrunkError('Quiz Trunk is not valid %s',
trunk_id)
message = kwargs.pop('commit_message', 'Commited a new revision')
quiz_revision = insert_with_new_key(
models.QuizRevisionModel, parent=quiz_trunk, quiz_id=quiz_id,
commit_message=message)
quiz_trunk.head = quiz_id
quiz_trunk.put()
return quiz_trunk
def get_quiz_from_trunk(session_id, quiz_trunk_id):
"""Retrieves relevant quiz version based on user's response history.
Returns the last version i.e., head of the trunk.
Args:
session_id: String representing session_id.
quiz_trunk_id: trunk_id representing the quiz.
Returns:
Quiz object.
"""
try:
quiz_trunk = db.get(quiz_trunk_id)
except db.BadKeyError:
return None
last_response_entry = models.ResponseModel.all().filter(
'session_id =', session_id).filter('quiz_trunk =', quiz_trunk).order(
'-time_stamp').get()
if last_response_entry:
return last_response_entry.quiz
else:
try:
quiz = db.get(quiz_trunk.head)
except db.BadKeyError:
return None
return quiz
def fetch_new_question_and_status(session_id, quiz_trunk_id, repeat):
"""Fetches new question and current status to be presented based on
session's history.
If repeat argument is True, function keeps recycling questions
even if user is finished with all the questions.
Args:
session_id: String ID for user.
quiz_trunk_id: Id of the trunk associated with the quiz.
repeat: Flag to repeat questions even if user has answered all.
Returns:
Returns a dict object with fetched question and current status. Question is
set to None if there are no questions to be fetched.
TODO(mukundjha): We can absorb get_quiz_from_trunk to reduce number of
calls to datastore.
"""
quiz = get_quiz_from_trunk(session_id, quiz_trunk_id)
# for picking the correct quiz version
last_response_entry = models.ResponseModel.all().filter(
'session_id =', session_id).filter('quiz =', quiz).order(
'-time_stamp').get()
if last_response_entry and last_response_entry.answered_correctly == False:
question = last_response_entry.question
attempts = last_response_entry.attempts
else:
question = pick_next_question(session_id, quiz, repeat)
attempts = 0
score_entry = models.QuizScoreModel.all().filter(
'session_id =', session_id).filter('quiz =', quiz).get()
if not score_entry:
score = 0
progress = 0
else:
score = round(score_entry.score)
progress = round(score_entry.progress)
return {'current_status' : {'score' : score, 'progress': progress},
'question' : question, 'attempts': attempts, 'title': quiz.title}
def remove_question_from_quiz(question, quiz):
"""Removes question from the quiz.
"""
question_entry = models.QuizQuestionListModel.all().filter(
'question =', question).filter('quiz =',quiz).get()
if question_entry:
db.delete(question_entry)
def question_to_json(question_and_status_dict, repeat):
"""Converts question dict into JSON object, also attaches other relevant
messages.
If repeat argument is True, function keeps recycling questions
even if user is finished with all the questions.
Args:
question_and_status_dict: Dict object representing question to
be presented and current status of the quiz.
repeat: Flag to repeat questions even if user has answered all.
Returns:
Returns JSON object to be presented.
"""
question = question_and_status_dict['question']
# gen_message = general message
if not question and not repeat:
gen_message = 'Congratulations you have completed the quiz'
question_dict = {
'body': 'You have completed the quiz!! Please move to the next one!',
'choices':[],
'reset': True}
elif not question:
gen_message = 'This quiz is empty.'
question_dict = {
'body': 'This quiz is empty!',
'choices':[]}
else:
question_dict = question.dump_to_dict()
gen_message = None
data_dict = {'current_status' : question_and_status_dict['current_status'],
'question' : question_dict,
'attempts': question_and_status_dict['attempts'],
'gen_message': gen_message,
'title': question_and_status_dict['title']}
return simplejson.dumps(data_dict)
|
from .scm import SCM, Variable, HiddenVariable, inverse , exp, log, negative, sqrt, square, power, sin, cos, tan, scale, add, subtract, multiply, matmul, divide, reduce_sum, reduce_prod, func
#from .math import *
import pycausal.distributions as stats
import pycausal.problems as problems
from numpy import seterr
seterr(all='raise')
|
#!/usr/bin/env python3
"""
created: 2022-03-25 20:50:18
@author: seraphโ
776
contact: seraph776@gmail.com
details: Convert to camel_case
"""
import re
def split_upper(s):
s = list(filter(None, re.split("([A-Z][^A-Z]*)", s)))
return '_'.join(s).lower()
word = 'MyFunctionName'
print(split_upper(word))
|
import lab as B
from algebra import OneFunction
from . import _dispatch
from ..mean import Mean
from ..util import num_elements, uprank
__all__ = ["OneMean"]
class OneMean(Mean, OneFunction):
"""Constant mean of `1`."""
@_dispatch
def __call__(self, x: B.Numeric):
return B.ones(B.dtype(x), *B.shape_batch(x), num_elements(x), 1)
|
# @Author ZhangGJ
# @Date 2020/12/24 08:04
|
from database import User
def get_user(username):
user = User.query.filter_by(username=username).first()
return f'{user}'
|
from os.path import join
import pytest
from sympy import atan, sqrt, symbols
from sympy.printing.printer import Printer
from sympy.printing.str import StrPrinter
from qalgebra.core.operator_algebra import LocalSigma
from qalgebra.core.state_algebra import BasisKet
from qalgebra.printing import (
ascii,
configure_printing,
init_printing,
latex,
unicode,
)
from qalgebra.printing.asciiprinter import QalgebraAsciiPrinter
from qalgebra.printing.sympy import SympyStrPrinter
from qalgebra.printing.unicodeprinter import QalgebraUnicodePrinter
from qalgebra.utils.testing import QalgebraAsciiTestPrinter, datadir
datadir = pytest.fixture(datadir)
def test_initfile(datadir):
psi = (BasisKet(0, hs=1) + BasisKet(1, hs=1)) / sqrt(2)
x = symbols('x')
sig = LocalSigma(0, 1, hs=1)
init_printing(str_format='unicode', repr_format='unicode')
str(psi) == unicode(psi)
repr(psi) == unicode(psi)
assert isinstance(ascii.printer, QalgebraAsciiPrinter)
assert isinstance(ascii.printer._sympy_printer, SympyStrPrinter)
assert isinstance(unicode.printer, QalgebraUnicodePrinter)
assert ascii(psi) == '1/sqrt(2) * (|0>^(1) + |1>^(1))'
assert unicode(psi) == '1/โ2 (|0โฉโฝยนโพ + |1โฉโฝยนโพ)'
assert (
latex(psi)
== r'\frac{1}{\sqrt{2}} \left(\left\lvert 0 \right\rangle^{(1)} + '
r'\left\lvert 1 \right\rangle^{(1)}\right)'
)
assert (
latex(atan(x) * sig)
== r'\operatorname{atan}{\left(x \right)} \left\lvert 0 \middle\rangle\!\middle\langle 1 \right\rvert^{(1)}'
)
with configure_printing(inifile=join(datadir, 'printing.ini')):
assert (
Printer._global_settings['val1']
== '1 # inline comments are not allowd'
)
assert (
Printer._global_settings['val2']
== '1 ; with either prefix character'
)
assert 'show_hs_label' in Printer._global_settings
assert 'sig_as_ketbra' in Printer._global_settings
assert 'use_unicode' in Printer._global_settings
assert len(Printer._global_settings) == 5
str(psi) == ascii(psi)
repr(psi) == unicode(psi)
assert isinstance(ascii.printer, QalgebraAsciiTestPrinter)
assert isinstance(ascii.printer._sympy_printer, StrPrinter)
assert isinstance(unicode.printer, QalgebraUnicodePrinter)
assert ascii(psi) == 'sqrt(2)/2 * (|0>^(1) + |1>^(1))'
assert unicode(psi) == '1/โ2 (|0โฉโโโ + |1โฉโโโ)'
assert (
latex(psi) == r'\frac{1}{\sqrt{2}} \left(\Ket{0} + \Ket{1}\right)'
)
assert (
latex(atan(x) * sig)
== r'\arctan{\left(x \right)} \Op{\sigma}_{0,1}'
)
assert 'use_unicode' in Printer._global_settings
assert len(Printer._global_settings) == 1
str(psi) == unicode(psi)
repr(psi) == unicode(psi)
assert isinstance(ascii.printer, QalgebraAsciiPrinter)
assert isinstance(ascii.printer._sympy_printer, SympyStrPrinter)
assert isinstance(unicode.printer, QalgebraUnicodePrinter)
assert ascii(psi) == '1/sqrt(2) * (|0>^(1) + |1>^(1))'
assert unicode(psi) == '1/โ2 (|0โฉโฝยนโพ + |1โฉโฝยนโพ)'
assert (
latex(psi)
== r'\frac{1}{\sqrt{2}} \left(\left\lvert 0 \right\rangle^{(1)} + '
r'\left\lvert 1 \right\rangle^{(1)}\right)'
)
assert (
latex(atan(x) * sig)
== r'\operatorname{atan}{\left(x \right)} \left\lvert 0 '
r'\middle\rangle\!\middle\langle 1 \right\rvert^{(1)}'
)
init_printing(reset=True)
def test_inifile_do_not_mix(datadir):
with pytest.raises(TypeError) as exc_info:
init_printing(
str_format='ascii',
repr_format='ascii',
inifile=join(datadir, 'printing.ini'),
)
assert (
"The `inifile` argument cannot be combined with any other "
"keyword arguments" in str(exc_info.value)
)
def test_invalid_section(datadir):
with pytest.raises(ValueError) as exc_info:
init_printing(inifile=join(datadir, 'invalid_section.ini'))
assert "Invalid section sympy" in str(exc_info.value)
init_printing(reset=True)
def test_invalid_options(datadir):
with pytest.raises(TypeError) as exc_info:
init_printing(inifile=join(datadir, 'invalid_value.ini'))
assert (
"some_bogus_setting is not a valid setting for either "
"QalgebraAsciiTestPrinter or StrPrinter" in str(exc_info.value)
)
init_printing(reset=True)
|
import bpy
import Blender
from Blender.Mathutils import *
from myFunction import *
from commandLib import *
import random
import os
#from bump_to_normal import *
#print 'meshLib'
def GetAlphaFromImage(path):
sys=Sys(path)
image = Blender.Image.Load(path)
imagedepth=image.getDepth()
imagesize = image.getSize()
imagenewname=sys.dir+os.sep+sys.base+'-alfa.tga'
img=Sys(imagenewname)
ImgPath=img.dir+os.sep+img.base+'.jpg'
if os.path.exists(ImgPath)==False:
#print imagenewname
imagenew = Blender.Image.New(imagenewname,imagesize[0],imagesize[1],imagedepth)
for x in range(0,imagesize[0]):
for y in range(0,imagesize[1]):
pix=image.getPixelI(x, y)[3]
imagenew.setPixelI(x,y,[255-pix,255-pix,255-pix,0])
imagenew.save()
cmd=Cmd()
cmd.input=imagenewname
cmd.JPG=True
cmd.run()
return ImgPath
def GetBlackFromImage(path):
sys=Sys(path)
image = Blender.Image.Load(path)
imagedepth=image.getDepth()
imagesize = image.getSize()
imagenewname=sys.dir+os.sep+sys.base+'-alfa.tga'
img=Sys(imagenewname)
ImgPath=img.dir+os.sep+img.base+'.jpg'
if os.path.exists(ImgPath)==False:
#print imagenewname
imagenew = Blender.Image.New(imagenewname,imagesize[0],imagesize[1],imagedepth)
for x in range(0,imagesize[0]):
for y in range(0,imagesize[1]):
pix=image.getPixelI(x, y)
if 125<pix[0]<135 and 121<pix[1]<131 and 57<pix[2]<67:
#if pix[0]==130 and pix[1]==126 and pix[2]==62:
#print pix
imagenew.setPixelI(x,y,[0,0,0,0])
else:
#imagenew.setPixelI(x,y,[255-pix[0],255-pix[1],255-pix[2],0])
imagenew.setPixelI(x,y,[255,255,255,0])
imagenew.save()
cmd=Cmd()
cmd.input=imagenewname
cmd.JPG=True
cmd.run()
return ImgPath
def setBox(box,meshList):
E=[[],[],[]]
for mesh in meshList:
for n in range(len(mesh.vertPosList)):
x,y,z=mesh.vertPosList[n]
E[0].append(x)
E[1].append(y)
E[2].append(z)
skX=(box[3]-box[0])/(max(E[0])-min(E[0]))
skY=(box[4]-box[1])/(max(E[1])-min(E[1]))
skZ=(box[5]-box[2])/(max(E[2])-min(E[2]))
sk=min(skX,skY,skZ)
trX1=(box[3]+box[0])/2#-(max(E[0])+min(E[0]))/2
trY1=(box[4]+box[1])/2#-(max(E[1])+min(E[1]))/2
trZ1=(box[5]+box[2])/2#-(max(E[2])+min(E[2]))/2
trX=-(max(E[0])+min(E[0]))/2
trY=-(max(E[1])+min(E[1]))/2
trZ=-(max(E[2])+min(E[2]))/2
#print trX,trY,trZ
#print skX,skY,skZ
for mesh in meshList:
for n in range(len(mesh.vertPosList)):
x,y,z=mesh.vertPosList[n]
mesh.vertPosList[n]=[x+trX,y+trY,z+trZ]
for n in range(len(mesh.vertPosList)):
x,y,z=mesh.vertPosList[n]
mesh.vertPosList[n]=[x*skX,y*skY,z*skZ]
for n in range(len(mesh.vertPosList)):
x,y,z=mesh.vertPosList[n]
mesh.vertPosList[n]=[x+trX1,y+trY1,z+trZ1]
#mesh.draw()
def setBox1(box,meshList):
E=[[],[],[]]
for mesh in meshList:
for n in range(len(mesh.vertPosList)):
x,y,z=mesh.vertPosList[n]
E[0].append(x)
E[1].append(y)
E[2].append(z)
skX=(box[3]-box[0])/(max(E[0])-min(E[0]))
skY=(box[4]-box[1])/(max(E[1])-min(E[1]))
skZ=(box[5]-box[2])/(max(E[2])-min(E[2]))
sk=min(skX,skY,skZ)
trX=(box[3]+box[0])/2
trY=(box[4]+box[1])/2
trZ=(box[5]+box[2])/2
for mesh in meshList:
for n in range(len(mesh.vertPosList)):
x,y,z=mesh.vertPosList[n]
mesh.vertPosList[n]=[trX+x*skX,trY+y*skY,trZ+z*skZ]
#mesh.draw()
def bindPose(bindSkeleton,poseSkeleton,meshObject):
#print 'BINDPOSE'
mesh=meshObject.getData(mesh=1)
poseBones=poseSkeleton.getData().bones
bindBones=bindSkeleton.getData().bones
for vert in mesh.verts:
index=vert.index
skinList=mesh.getVertexInfluences(index)
vco=vert.co.copy()*meshObject.matrixWorld
vector=Vector()
sum=0
for skin in skinList:
#try:
bone=skin[0]
weight=skin[1]
matA=bindBones[bone].matrix['ARMATURESPACE']*bindSkeleton.matrixWorld
matB=poseBones[bone].matrix['ARMATURESPACE']*poseSkeleton.matrixWorld
vector+=vco*matA.invert()*matB*weight
sum+=weight
#except:pass
#print sum,
vert.co=vector
mesh.update()
Blender.Window.RedrawAll()
#ID=3
#bindSkeleton=Blender.Object.Get('armature-'+str(ID))
#poseSkeleton=Blender.Object.Get('bindPose-mesh-'+str(ID))
#meshObject=Blender.Object.Get('mesh-'+str(ID))
#bindPose(bindSkeleton,poseSkeleton,meshObject)
class Model:
def __init__(self,input):
self.meshList=[]
self.filename=input
self.dirname=None
self.basename=None
#if self.filename is not None
def getMat(self):#
if self.filename is not None and self.meshList>0:
self.basename=os.path.basename(self.filename)
self.dirname=os.path.dirname(self.filename)
#matPath=self.dirname+os.sep+'mat.txt'
matPath=self.filename+'.mat'
if os.path.exists(matPath)==True:
matfile=open(matPath,'r')
lines=matfile.readlines()
for i,mesh in enumerate(self.meshList):
for j,mat in enumerate(mesh.matList):
for line in lines:
values=line.strip().split(':')
if values[0]=="-1":i=-1#pierwszy raz
if len(values)==3:
if values[0]==str(i).zfill(3) and values[1]=='d':mat.diffuse=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='n':mat.normal=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='s':mat.specular=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='a':mat.alpha=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='o':mat.ao=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='d1':mat.diffuse1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='d2':mat.diffuse2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='n1':mat.normal1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='n2':mat.normal2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='e':mat.emit=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='b':mat.bump=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='t':mat.trans=self.dirname+os.sep+values[2].split('"')[1]
if len(values)==4:
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='d':mat.diffuse=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='n':mat.normal=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='s':mat.specular=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='a':mat.alpha=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='o':mat.ao=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='d1':mat.diffuse1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='d2':mat.diffuse2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='n1':mat.normal1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='n2':mat.normal2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='e':mat.emit=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='b':mat.bump=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='t':mat.trans=self.dirname+os.sep+values[2].split('"')[1]
for i,mesh in enumerate(self.meshList):
for j,mat in enumerate(mesh.matList):
for line in lines:
values=line.strip().split(':')
#if values[0]=="-1":i=-1
if len(values)==3:
if values[0]==str(i).zfill(3) and values[1]=='d':mat.diffuse=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='n':mat.normal=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='s':mat.specular=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='a':mat.alpha=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='o':mat.ao=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='d1':mat.diffuse1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='d2':mat.diffuse2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='n1':mat.normal1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='n2':mat.normal2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='e':mat.emit=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='b':mat.bump=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[1]=='t':mat.trans=self.dirname+os.sep+values[2].split('"')[1]
if len(values)==4:
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='d':mat.diffuse=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='n':mat.normal=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='s':mat.specular=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='a':mat.alpha=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='o':mat.ao=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='d1':mat.diffuse1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='d2':mat.diffuse2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='n1':mat.normal1=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='n2':mat.normal2=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='e':mat.emit=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='b':mat.bump=self.dirname+os.sep+values[2].split('"')[1]
if values[0]==str(i).zfill(3) and values[3]==str(j) and values[1]=='t':mat.trans=self.dirname+os.sep+values[2].split('"')[1]
#print i,j,mat.diffuse """
matfile.close()
def setMat(self):
#print 'setMat'
if self.filename is not None and self.meshList>0:
self.basename=os.path.basename(self.filename)
self.dirname=os.path.dirname(self.filename)
#matPath=self.dirname+os.sep+'mat.txt'
matPath=self.filename+'.mat'
#for file in os.listdir(self.dirname):
# if file.lower()=='mat.txt':
matLines=[]
if os.path.exists(matPath)==True:
file=open(matPath,'r')
lines=file.readlines()
for line in lines:
if ':' in line:
matLines.append(line)
file.close()
#if 'mat.txt' not in os.listdir(os.path.dirname(filename)):
matFile=open(matPath,'w')
for file in os.listdir(self.dirname):
if file.split('.')[-1].lower() in ['dds','png','jpg','jpeg','tga','bmp']:
matFile.write('"'+file+'"'+'\n')
for i,mesh in enumerate(self.meshList):
for j,mat in enumerate(mesh.matList):
#print mat.name
split=mat.name.split('-')
if mat.diffuse is not None:matFile.write(str(split[0])+':d:"'+os.path.basename(mat.diffuse)+'":'+str(split[1])+'\n')
if mat.normal is not None:matFile.write(str(split[0])+':n:"'+os.path.basename(mat.normal)+'":'+str(split[1])+'\n')
if mat.specular is not None:matFile.write(str(split[0])+':s:"'+os.path.basename(mat.specular)+'":'+str(split[1])+'\n')
if mat.ao is not None:matFile.write(str(split[0])+':o:"'+os.path.basename(mat.ao)+'":'+str(split[1])+'\n')
if mat.alpha is not None:matFile.write(str(split[0])+':a:"'+os.path.basename(mat.alpha)+'":'+str(split[1])+'\n')
if mat.diffuse1 is not None:matFile.write(str(split[0])+':d1:"'+os.path.basename(mat.diffuse1)+'":'+str(split[1])+'\n')
if mat.diffuse2 is not None:matFile.write(str(split[0])+':d2:"'+os.path.basename(mat.diffuse2)+'":'+str(split[1])+'\n')
if mat.normal1 is not None:matFile.write(str(split[0])+':n1:"'+os.path.basename(mat.normal1)+'":'+str(split[1])+'\n')
if mat.normal2 is not None:matFile.write(str(split[0])+':n2:"'+os.path.basename(mat.normal2)+'":'+str(split[1])+'\n')
if mat.emit is not None:matFile.write(str(split[0])+':e:"'+os.path.basename(mat.emit)+'":'+str(split[1])+'\n')
if mat.bump is not None:matFile.write(str(split[0])+':b:"'+os.path.basename(mat.bump)+'":'+str(split[1])+'\n')
if mat.trans is not None:matFile.write(str(split[0])+':t:"'+os.path.basename(mat.trans)+'":'+str(split[1])+'\n')
matFile.close()
def draw(self):
for i,mesh in enumerate(self.meshList):
#print 'mesh:',i,'vert:',len(mesh.vertPosList),'indice:',len(mesh.indiceList),
#if len(mesh.indiceList)>0:
# print 'min:',min(mesh.indiceList),'max:',max(mesh.indiceList),
#print 'face:',len(mesh.faceList)
mesh.draw()
class Mesh():
def __init__(self):
self.vertPosList=[]
self.vertNormList=[]
self.indiceList=[]
self.faceList=[]
self.triangleList=[]
self.matList=[]
self.matIDList=[]
self.vertUVList=[]
self.faceUVList=[]
self.skinList=[]
self.skinWeightList=[]
self.skinIndiceList=[]
self.skinGroupList=[]
self.skinIDList=[]
self.bindPoseMatrixList=[]
self.boneNameList=[]
self.name=None
self.mesh=None
self.object=None
self.TRIANGLE=False
self.QUAD=False
self.TRISTRIP=False
self.BINDSKELETON=None
self.BINDPOSESKELETON=None
self.matrix=None
self.SPLIT=False
self.WARNING=False
self.DRAW=False
self.BINDPOSE=False
self.UVFLIP=False
self.sceneIDList=None
self.vertModList=[]
self.mod=False
self.filename=None
def addMat(self,mat,mesh,matID):
#if mat.name is None:
# mat.name=self.name+'-mat-'+str(matID)
#mat.name=mesh.name
mat.name=mesh.name.split('-')[0]+'-'+str(matID)+'-'+str(self.sceneIDList.objectID)
blendMat=Blender.Material.New(mat.name)
blendMat.diffuseShader=Blender.Material.Shaders.DIFFUSE_ORENNAYAR
blendMat.specShader=Blender.Material.Shaders.SPEC_WARDISO
blendMat.setRms(0.04)
blendMat.shadeMode=Blender.Material.ShadeModes.CUBIC
if mat.rgbCol is None:
blendMat.rgbCol=mat.rgba[:3]
blendMat.alpha = mat.rgba[3]
else:
blendMat.rgbCol=mat.rgbCol[:3]
#blendMat.alpha = mat.rgba[3]
if mat.rgbSpec is not None:
blendMat.specCol=mat.rgbSpec[:3]
if mat.ZTRANS==True:
blendMat.mode |= Blender.Material.Modes.ZTRANSP
blendMat.mode |= Blender.Material.Modes.TRANSPSHADOW
blendMat.alpha = 0.0
if mat.diffuse is not None:diffuse(blendMat,mat)
if mat.reflection is not None:reflection(blendMat,mat)
if mat.diffuse1 is not None:diffuse1(blendMat,mat)
if mat.diffuse2 is not None:diffuse2(blendMat,mat)
if mat.specular is not None:specular(blendMat,mat)
if mat.normal is not None:normal(blendMat,mat)
if mat.bump is not None:bump(blendMat,mat)
if mat.normal1 is not None:normal1(blendMat,mat)
if mat.normal2 is not None:normal2(blendMat,mat)
if mat.ao is not None:ao(blendMat,mat)
if mat.alpha is not None:alpha(blendMat,mat)
if mat.emit is not None:emit(blendMat,mat)
if mat.trans is not None:trans(blendMat,mat)
mesh.materials+=[blendMat]
def addvertexUV(self,blenderMesh,mesh):
blenderMesh.vertexUV = 1
for m in range(len(blenderMesh.verts)):
if self.UVFLIP==False:blenderMesh.verts[m].uvco = Vector(mesh.vertUVList[m][0],1-mesh.vertUVList[m][1])
else:blenderMesh.verts[m].uvco = Vector(mesh.vertUVList[m])
def addfaceUV(self,blenderMesh,mesh):
if self.WARNING==True:
print 'WARNING: blenderMesh.faces:',len(blenderMesh.faces)
if len(blenderMesh.faces)>0:
blenderMesh.faceUV = 1
if len(mesh.vertUVList)>0:
for ID in range(len(blenderMesh.faces)):
face=blenderMesh.faces[ID]
face.uv = [v.uvco for v in face.verts]
face.smooth = 1
if len(mesh.matIDList)>0:
face.mat=mesh.matIDList[ID]
if len(mesh.matIDList)>0:
for ID in range(len(blenderMesh.faces)):
face=blenderMesh.faces[ID]
face.smooth = 1
#print ID,len(mesh.matIDList)
face.mat=mesh.matIDList[ID]
if len(mesh.faceUVList)>0:
for ID in range(len(blenderMesh.faces)):
face=blenderMesh.faces[ID]
if mesh.faceUVList[ID] is not None:
face.uv=mesh.faceUVList[ID]
if len(self.vertNormList)==0:
blenderMesh.calcNormals()
blenderMesh.update()
def addSkinIDList(self):
if len(self.skinIDList)==0:
for m in range(len(self.vertPosList)):
self.skinIDList.append([])
for n in range(len(self.skinList)):
self.skinIDList[m].append(0)
for skinID in range(len(self.skinList)):
skin=self.skinList[skinID]
if skin.IDStart==None:
skin.IDStart=0
if skin.IDCount==None:
skin.IDCount=len(self.vertPosList)
for vertID in range(skin.IDStart,skin.IDStart+skin.IDCount):
self.skinIDList[vertID][skinID]=1
def addSkinWithIndiceList(self,blendMesh,mesh):
#print 'addskin'
for vertID in range(len(mesh.skinIDList)):
indices=mesh.skinIndiceList[vertID]
weights=mesh.skinWeightList[vertID]
#print mesh.skinIDList[vertID]
for skinID,ID in enumerate(mesh.skinIDList[vertID]):
if ID==1:
if len(weights)<len(indices):count=len(weights)
else:count=len(indices)
for n in range(count):
w = weights[n]
if type(w)==int:w=w/255.0
if w!=0.0:
grID = indices[n]
if len(self.boneNameList)==0:
if len(self.skinList[skinID].boneMap)>0:grName = str(self.skinList[skinID].boneMap[grID])
else:grName = str(grID)
else:
if len(self.skinList[skinID].boneMap)>0:
grNameID = self.skinList[skinID].boneMap[grID]
grName=self.boneNameList[grNameID]
else:
grName=self.boneNameList[grID]
if grName not in blendMesh.getVertGroupNames():
blendMesh.addVertGroup(grName)
add = Blender.Mesh.AssignModes.ADD
blendMesh.assignVertsToGroup(grName,[vertID],w,add)
blendMesh.update()
def addSkinWithGroupList(self,blendMesh,mesh):
#print 'addskin'
for vertID in range(len(mesh.skinIDList)):
groups=mesh.skinGroupList[vertID]
weights=mesh.skinWeightList[vertID]
#print mesh.skinIDList[vertID]
for skinID,ID in enumerate(mesh.skinIDList[vertID]):
if ID==1:
if len(weights)<len(groups):count=len(weights)
else:count=len(groups)
for n in range(count):
w = weights[n]
if type(w)==int:w=w/255.0
if w!=0.0:
grName=groups[n]
if grName not in blendMesh.getVertGroupNames():
blendMesh.addVertGroup(grName)
add = Blender.Mesh.AssignModes.ADD
blendMesh.assignVertsToGroup(grName,[vertID],w,add)
blendMesh.update()
def addSkin(self,blendMesh,mesh):
#print 'addskin'
for vertID in range(len(mesh.skinIDList)):
indices=mesh.skinIndiceList[vertID]
weights=mesh.skinWeightList[vertID]
#print mesh.skinIDList[vertID]
for skinID,ID in enumerate(mesh.skinIDList[vertID]):
if ID==1:
if len(weights)<len(indices):count=len(weights)
else:count=len(indices)
#print indices,weights
for n in range(count):
w = weights[n]
if type(w)==int:w=w/255.0
if w!=0.0:
grID = indices[n]
if len(self.boneNameList)==0:
if len(self.skinList[skinID].boneMap)>0:grName = str(self.skinList[skinID].boneMap[grID])
else:grName = str(grID)
else:
if len(self.skinList[skinID].boneMap)>0:
grNameID = self.skinList[skinID].boneMap[grID]
grName=self.boneNameList[grNameID]
else:
grName=self.boneNameList[grID]
if grName not in blendMesh.getVertGroupNames():
blendMesh.addVertGroup(grName)
add = Blender.Mesh.AssignModes.ADD
blendMesh.assignVertsToGroup(grName,[vertID],w,add)
blendMesh.update()
def addFaces(self):
if len(self.matList)==0:
if len(self.faceList)!=0:
self.triangleList=self.faceList
if len(self.indiceList)!=0:
if self.TRIANGLE==True:
self.indicesToTriangles(self.indiceList,0)
elif self.QUAD==True:
self.indicesToQuads(self.indiceList,0)
elif self.TRISTRIP==True:
self.indicesToTriangleStrips(self.indiceList,0)
else:
if self.WARNING==True:
print 'WARNING: class<Mesh>.TRIANGLE:',self.TRIANGLE
print 'WARNING: class<Mesh>.TRISTRIP:',self.TRISTRIP
else:
if len(self.faceList)>0:
if len(self.matIDList)==0:
for matID in range(len(self.matList)):
mat=self.matList[matID]
if mat.IDStart is not None and mat.IDCount is not None:
for faceID in range(mat.IDCount):
self.triangleList.append(self.faceList[mat.IDStart+faceID])
self.matIDList.append(matID)
else:
if mat.IDStart==None:
mat.IDStart=0
if mat.IDCount==None:
mat.IDCount=len(self.faceList)
for faceID in range(mat.IDCount):
self.triangleList.append(self.faceList[mat.IDStart+faceID])
self.matIDList.append(matID)
#self.triangleList=self.faceList
else:
self.triangleList=self.faceList
#for ID in range(len(self.matIDList)):
# mat=self.matList[matID]
#if self.matIDList[ID]==matID:
# self.triangleList.append(self.faceList[ID])
if len(self.indiceList)>0:
if len(self.matIDList)==0:
for matID in range(len(self.matList)):
mat=self.matList[matID]
if mat.IDStart==None:
mat.IDStart=0
if mat.IDCount==None:
mat.IDCount=len(self.indiceList)
indiceList=self.indiceList[mat.IDStart:mat.IDStart+mat.IDCount]
if mat.TRIANGLE==True:
self.indicesToTriangles(indiceList,matID)
elif mat.QUAD==True:
self.indicesToQuads(indiceList,matID)
elif mat.TRISTRIP==True:
self.indicesToTriangleStrips(indiceList,matID)
"""else:
if mat.TRIANGLE==True:
self.indicesToTriangles2(indiceList)
elif mat.QUAD==True:
self.indicesToQuads2(indiceList)
elif mat.TRISTRIP==True:
self.indicesToTriangleStrips2(indiceList)"""
def setBoneMatrix(self,skeletonName,boneName):
scene = bpy.data.scenes.active
for object in scene.objects:
if object.name==skeletonName:
bones=object.getData().bones
if boneName in bones.keys():
matrix=bones[boneName].matrix['ARMATURESPACE']
#self.object.setMatrix(matrix*self.object.matrixWorld)
#print boneName,matrix
self.object.setMatrix(matrix)
def buildMesh(self,mesh,mat,meshID):
blendMesh = bpy.data.meshes.new(mesh.name)
blendMesh.verts.extend(mesh.vertPosList)
blendMesh.faces.extend(mesh.triangleList,ignoreDups=True)
self.addMat(mat,blendMesh,meshID)
if len(mesh.triangleList)>0:
if len(mesh.vertUVList)>0:
self.addvertexUV(blendMesh,mesh)
self.addfaceUV(blendMesh,mesh)
if len(mesh.faceUVList)>0:
self.addfaceUV(blendMesh,mesh)
if len(mesh.vertNormList)>0:
for i,vert in enumerate(blendMesh.verts):
vert.no=Vector(self.vertNormList[i])
scene = bpy.data.scenes.active
meshobject = scene.objects.new(blendMesh,mesh.name)
try:self.addSkinWithIndiceList(blendMesh,mesh)
except:print 'WARNING:self.addSkinWithIndiceList:',mesh.name
Blender.Window.RedrawAll()
if self.BINDSKELETON is not None:
for object in scene.objects:
if object.name==self.BINDSKELETON:
#meshobject.mat*=object.mat
object.makeParentDeform([meshobject],1,0)
if mat.matrix is not None:
#blendMesh.transform(self.matrix)
meshobject.setMatrix(mat.matrix*meshobject.matrixWorld)
if mat.BINDSKELETON is not None:
for object in scene.objects:
if object.name==mat.BINDSKELETON:
object.makeParentDeform([meshobject],1,0)
Blender.Window.RedrawAll()
def addMesh(self):
self.mesh = bpy.data.meshes.new(self.name)
self.mesh.verts.extend(self.vertPosList)
if len(self.vertNormList)>0:
for i,vert in enumerate(self.mesh.verts):vert.no=Vector(self.vertNormList[i])
self.mesh.faces.extend(self.triangleList,ignoreDups=True)
scene = bpy.data.scenes.active
self.object = scene.objects.new(self.mesh,self.name)
#if self.matrix is not None:
#self.object.setMatrix(self.matrix*self.object.matrixWorld)
#self.mesh.transform(self.matrix)
def draw(self):
#if self.name is None:
#self.name=str(ParseID())+'-0-0'
self.sceneIDList=SceneIDList()
self.name=str(self.sceneIDList.meshID).zfill(3)+'-0-'+str(self.sceneIDList.objectID)
self.addFaces()
if self.SPLIT==False:
self.addMesh()
if self.mod==True:
modFile=open(self.name+'.txt','w')
if self.filename is not None:
modFile.write(self.filename+'\n')
else:
modFile.write('None'+'\n')
for m in range(len(self.vertModList)):
a,b,c=self.vertModList[m]
modFile.write(str(a)+' '+str(b)+' '+str(c)+'\n')
modFile.close()
if len(self.triangleList)>0:
if len(self.vertUVList)>0:
self.addvertexUV(self.mesh,self)
#self.addfaceUV(self.mesh,self)
#if len(self.faceUVList)>0:
self.addfaceUV(self.mesh,self)
for matID in range(len(self.matList)):
mat=self.matList[matID]
self.addMat(mat,self.mesh,matID)
if self.BINDSKELETON is not None:
scene = bpy.data.scenes.active
for object in scene.objects:
if object.name==self.BINDSKELETON:
skeletonMatrix=self.object.getMatrix()*object.mat
#self.object.setMatrix(skeletonMatrix)
object.makeParentDeform([self.object],1,0)
if len(self.skinIndiceList)>0 and len(self.skinWeightList)>0:
if len(self.skinIndiceList)==len(self.skinWeightList)>0:
try:
self.addSkinIDList()
self.addSkinWithIndiceList(self.mesh,self)
#except:
except:print 'WARNING:self.addSkinWithIndiceList:',self.mesh.name
if len(self.skinGroupList)>0 and len(self.skinWeightList)>0:
if len(self.skinGroupList)==len(self.skinWeightList)>0:
#print 'addSkinWithGroupList'
try:
self.addSkinIDList()
self.addSkinWithGroupList(self.mesh,self)
except:print 'WARNING:self.addSkinWithGroupList:',self.mesh.name
Blender.Window.RedrawAll()
if self.SPLIT==True:
#print 'Dzielenie siatek:',len(self.matList)
#print 'self.name:',self.name
meshList=[]
for matID in range(len(self.matList)):
mesh=Mesh()
mesh.IDList={}
mesh.IDCounter=0
#if self.matList[matID].name is not None:
# mesh.name=self.matList[matID].name
#else:
# mesh.name=self.name+'-'+str(matID)
mesh.name=self.name.split('-')[0]+'-'+str(matID)+'-'+str(self.sceneIDList.objectID)
#print ' '*4,'siatka:',matID,mesh.name
meshList.append(mesh)
for faceID in range(len(self.matIDList)):
matID=self.matIDList[faceID]
mesh=meshList[matID]
for vID in self.triangleList[faceID]:
mesh.IDList[vID]=None
for faceID in range(len(self.matIDList)):
matID=self.matIDList[faceID]
mesh=meshList[matID]
for vID in self.triangleList[faceID]:
if mesh.IDList[vID] is None:
mesh.IDList[vID]=mesh.IDCounter
mesh.IDCounter+=1
for faceID in range(len(self.matIDList)):
matID=self.matIDList[faceID]
mesh=meshList[matID]
face=[]
for vID in self.triangleList[faceID]:
face.append(mesh.IDList[vID])
#mesh.faceList.append(face)
mesh.triangleList.append(face)
mesh.matIDList.append(0)
if len(self.faceUVList)>0:
mesh.faceUVList.append(self.faceUVList[faceID])
for mesh in meshList:
for m in range(len(mesh.IDList)):
mesh.vertPosList.append(None)
if len(self.vertUVList)>0:
for m in range(len(mesh.IDList)):
mesh.vertUVList.append(None)
if len(self.skinList)>0:
if len(self.skinIndiceList)>0 and len(self.skinWeightList)>0:
for m in range(len(mesh.IDList)):
mesh.skinWeightList.append([])
mesh.skinIndiceList.append([])
mesh.skinIDList.append(None)
if len(self.skinList)>0:
if len(self.skinIndiceList)>0 and len(self.skinWeightList)>0:
if len(self.skinIDList)==0:
try:self.addSkinIDList()
except:print 'WARNING:self.addSkinIDList:',self.name
for i,mesh in enumerate(meshList):
#print mesh.IDList.keys()
for vID in mesh.IDList.keys():
mesh.vertPosList[mesh.IDList[vID]]=self.vertPosList[vID]
if len(self.vertUVList)>0:
for vID in mesh.IDList.keys():
mesh.vertUVList[mesh.IDList[vID]]=self.vertUVList[vID]
if len(self.skinList)>0:
if len(self.skinIndiceList)>0 and len(self.skinWeightList)>0:
if len(self.skinIDList)>0:
for vID in mesh.IDList.keys():
mesh.skinWeightList[mesh.IDList[vID]]=self.skinWeightList[vID]
mesh.skinIndiceList[mesh.IDList[vID]]=self.skinIndiceList[vID]
mesh.skinIDList[mesh.IDList[vID]]=self.skinIDList[vID]
else:
#mat=self.matList[i]]
#if mat.IDStart is not None and mat.IDCount is not None:
# for
print 'self.skinIDList is missing'
for meshID in range(len(meshList)):
mesh=meshList[meshID]
#print len(mesh.triangleList)
mat=self.matList[meshID]
self.buildMesh(mesh,mat,meshID)
#mesh.matList.append(mat)
#mesh.draw()
Blender.Window.RedrawAll()
def indicesToQuads(self,indicesList,matID):
for m in range(0, len(indicesList), 4):
self.triangleList.append(indicesList[m:m+4] )
self.matIDList.append(matID)
def indicesToTriangles(self,indicesList,matID):
for m in range(0, len(indicesList), 3):
self.triangleList.append(indicesList[m:m+3] )
self.matIDList.append(matID)
def indicesToTriangleStrips(self,indicesList,matID):
StartDirection = -1
id=0
f1 = indicesList[id]
id+=1
f2 = indicesList[id]
FaceDirection = StartDirection
while(True):
#for m in range(len(indicesList)-2):
id+=1
f3 = indicesList[id]
#print f3
if (f3==0xFFFF):
if id==len(indicesList)-1:break
id+=1
f1 = indicesList[id]
id+=1
f2 = indicesList[id]
FaceDirection = StartDirection
else:
#f3 += 1
FaceDirection *= -1
if (f1!=f2) and (f2!=f3) and (f3!=f1):
if FaceDirection > 0:
self.triangleList.append([(f1),(f2),(f3)])
self.matIDList.append(matID)
else:
self.triangleList.append([(f1),(f3),(f2)])
self.matIDList.append(matID)
if self.DRAW==True:
f1,f2,f3
f1 = f2
f2 = f3
if id==len(indicesList)-1:break
def indicesToQuads2(self,indicesList):
for m in range(0, len(indicesList), 4):
self.triangleList.append(indicesList[m:m+4] )
#self.matIDList.append(matID)
def indicesToTriangles2(self,indicesList):
for m in range(0, len(indicesList), 3):
self.triangleList.append(indicesList[m:m+3] )
#self.matIDList.append(matID)
def indicesToTriangleStrips2(self,indicesList):
StartDirection = -1
id=0
f1 = indicesList[id]
id+=1
f2 = indicesList[id]
FaceDirection = StartDirection
while(True):
#for m in range(len(indicesList)-2):
id+=1
f3 = indicesList[id]
#print f3
if (f3==0xFFFF):
if id==len(indicesList)-1:break
id+=1
f1 = indicesList[id]
id+=1
f2 = indicesList[id]
FaceDirection = StartDirection
else:
#f3 += 1
FaceDirection *= -1
if (f1!=f2) and (f2!=f3) and (f3!=f1):
if FaceDirection > 0:
self.triangleList.append([(f1),(f2),(f3)])
#self.matIDList.append(matID)
else:
self.triangleList.append([(f1),(f3),(f2)])
#self.matIDList.append(matID)
if self.DRAW==True:
f1,f2,f3
f1 = f2
f2 = f3
if id==len(indicesList)-1:break
def image2png(imagePath):
if os.path.exists(imagePath)==True:
cmd=Cmd()
cmd.PNG=True
cmd.input=imagePath
cmd.run()
def diffuse(blendMat,data):
ext=data.diffuse.split('.')[-1]
pngImage=data.diffuse#.replace('.'+ext,'.png
#print pngImage
#if os.path.exists(pngImage)==False:
#if ext.lower()!='png':
# image2png(data.diffuse)
#pngImage=data.diffuse.replace('.'+ext,'.png')
if os.path.exists(pngImage)==True:
img=Blender.Image.Load(pngImage)
imgName=blendMat.name+'-d'
img.setName(imgName)
texname=blendMat.name+'-d'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.DIFFUSESLOT,tex,Blender.Texture.TexCo.UV,\
Blender.Texture.MapTo.COL| Blender.Texture.MapTo.ALPHA)
#print dir(blendMat.getTextures()[data.DIFFUSESLOT])
#blendMat.getTextures()[data.DIFFUSESLOT].mtAlpha=-1
#else:
# if self.WARNING==True:
# print 'failed...',data.diffuse
def reflection(blendMat,data):
if os.path.exists(data.reflection)==True:
img=Blender.Image.Load(data.reflection)
imgName=blendMat.name.replace('-mat-','-refl-')
img.setName(imgName)
texname=blendMat.name.replace('-mat-','-refl-')
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.REFLECTIONSLOT,tex,Blender.Texture.TexCo.REFL,Blender.Texture.MapTo.COL)
mtextures = blendMat.getTextures()
mtex=mtextures[data.REFLECTIONSLOT]
mtex.colfac=data.REFLECTIONSTRONG
#else:
# if self.WARNING==True:
# print 'failed...',data.reflection
def alpha(blendMat,data):
if os.path.exists(data.alpha)==True:
img=Blender.Image.Load(data.alpha)
imgName=blendMat.name+'-a'
img.setName(imgName)
texname=blendMat.name+'-a'
tex = Blender.Texture.New(texname)
tex.setType('Image')
#if data.RGBTRANSPARENT==True:
tex.setImageFlags('CalcAlpha')
tex.image = img
blendMat.setTexture(data.ALPHASLOT,tex,Blender.Texture.TexCo.UV,\
Blender.Texture.MapTo.ALPHA)
if blendMat.getTextures()[data.DIFFUSESLOT] is not None:
blendMat.getTextures()[data.DIFFUSESLOT].tex.setImageFlags()
blendMat.getTextures()[data.DIFFUSESLOT].mtAlpha=0
def trans(blendMat,data):
if os.path.exists(data.trans)==True:
transPath=GetBlackFromImage(data.trans)
if os.path.exists(transPath)==True:
img=Blender.Image.Load(transPath)
imgName=blendMat.name+'-t'
img.setName(imgName)
texname=blendMat.name+'-t'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.setImageFlags('CalcAlpha')
tex.image = img
blendMat.setTexture(data.ALPHASLOT,tex,Blender.Texture.TexCo.UV,\
Blender.Texture.MapTo.ALPHA)
if blendMat.getTextures()[data.DIFFUSESLOT] is not None:
blendMat.getTextures()[data.DIFFUSESLOT].tex.setImageFlags()
def diffuse1(blendMat,data):#csp
if os.path.exists(data.diffuse1)==True:
img=Blender.Image.Load(data.diffuse1)
imgName=blendMat.name+'-d1'
img.setName(imgName)
texname=blendMat.name+'-d1'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.DIFFUSE1SLOT,tex,Blender.Texture.TexCo.UV,\
Blender.Texture.MapTo.CSP|Blender.Texture.MapTo.SPEC)
mtex=blendMat.getTextures()[data.DIFFUSE1SLOT]
mtex.blendmode=Blender.Texture.BlendModes.MULTIPLY
blendMat.getTextures()[data.DIFFUSE1SLOT].mtSpec=-1
#mtex=blendMat.getTextures()[data.DIFFUSE1SLOT]
#mtex.blendmode=Blender.Texture.BlendModes.ADD
#else:
# if self.WARNING==True:
# print 'failed...',data.diffuse1
def diffuse2(blendMat,data):
if os.path.exists(data.diffuse2)==True:
img=Blender.Image.Load(data.diffuse2)
imgName=blendMat.name+'-d2'
img.setName(imgName)
texname=blendMat.name+'-d2'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.DIFFUSE2SLOT,tex,Blender.Texture.TexCo.UV,\
Blender.Texture.MapTo.COL|Blender.Texture.MapTo.CSP)
mtex=blendMat.getTextures()[data.DIFFUSE2SLOT]
mtex.blendmode=Blender.Texture.BlendModes.MULTIPLY
#else:
# if self.WARNING==True:
# print 'failed...',data.diffuse1
def normal(blendMat,data):
ext=data.normal.split('.')[-1]
pngImage=data.normal#.replace('.'+ext,'.png')
if os.path.exists(pngImage)==False:
#if ext.lower()!='png':
image2png(data.normal)
#pngImage=data.normal.replace('.'+ext,'.png')
if os.path.exists(pngImage)==True:
img=Blender.Image.Load(pngImage)
imgName=blendMat.name+'-n'
img.setName(imgName)
texname=blendMat.name+'-n'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
tex.setImageFlags('NormalMap')
blendMat.setTexture(data.NORMALSLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.NOR)
blendMat.getTextures()[data.NORMALSLOT].norfac=data.NORMALSTRONG
blendMat.getTextures()[data.NORMALSLOT].mtNor=data.NORMALDIRECTION
blendMat.getTextures()[data.NORMALSLOT].size=data.NORMALSIZE
#else:
# if self.WARNING==True:
# print 'failed...',data.normal
def bump(blendMat,data):
ext=data.bump.split('.')[-1]
pngImage=data.bump#.replace('.'+ext,'.png')
if os.path.exists(pngImage)==False:
image2png(data.bump)
if os.path.exists(pngImage)==True:
if os.path.exists(pngImage+'.png')==False:
conv=bump2normal()
conv.input=pngImage
conv.output=pngImage+'.png'
conv.filter=sobel
conv.run()
if os.path.exists(pngImage+'.png')==True:
img=Blender.Image.Load(pngImage+'.png')
imgName=blendMat.name+'-n'
img.setName(imgName)
texname=blendMat.name+'-n'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
tex.setImageFlags('NormalMap')
blendMat.setTexture(data.NORMALSLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.NOR)
blendMat.getTextures()[data.NORMALSLOT].norfac=data.NORMALSTRONG
blendMat.getTextures()[data.NORMALSLOT].mtNor=data.NORMALDIRECTION
blendMat.getTextures()[data.NORMALSLOT].size=data.NORMALSIZE
#else:
# if self.WARNING==True:
# print 'failed...',data.normal
def specular(blendMat,data):
ext=data.specular.split('.')[-1]
pngImage=data.specular#.replace('.'+ext,'.png')
if os.path.exists(pngImage)==False:
#if ext.lower()!='png':
image2png(data.specular)
#pngImage=data.diffuse.replace('.'+ext,'.png')
if os.path.exists(pngImage)==True:
#if os.path.exists(data.specular)==True:
img=Blender.Image.Load(data.specular)
imgName=blendMat.name+'-s'
img.setName(imgName)
texname=blendMat.name+'-s'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.SPECULARSLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.CSP|Blender.Texture.MapTo.SPEC|Blender.Texture.MapTo.EMIT)
mtextures = blendMat.getTextures()
mtex=mtextures[data.SPECULARSLOT]
mtex.neg=True
mtex.blendmode=Blender.Texture.BlendModes.SUBTRACT
#else:
# if self.WARNING==True:
# print 'failed...',data.specular
def emit(blendMat,data):
if os.path.exists(data.emit)==True:
img=Blender.Image.Load(data.emit)
imgName=blendMat.name+'-e'
img.setName(imgName)
texname=blendMat.name+'-e'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.EMITSLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.COL|Blender.Texture.MapTo.EMIT)
mtex=blendMat.getTextures()[data.EMITSLOT]
mtex.blendmode=Blender.Texture.BlendModes.ADD
blendMat.getTextures()[data.EMITSLOT].mtEmit=-1
def ao(blendMat,data):
if os.path.exists(data.ao)==True:
img=Blender.Image.Load(data.ao)
imgName=blendMat.name+'-ao'
img.setName(imgName)
texname=blendMat.name+'-ao'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
blendMat.setTexture(data.AOSLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.COL)
mtex=blendMat.getTextures()[data.AOSLOT]
mtex.blendmode=Blender.Texture.BlendModes.MULTIPLY
#else:
# if self.WARNING==True:
# print 'failed...',data.ao
def normal1(blendMat,data):
if os.path.exists(data.normal1)==True:
img=Blender.Image.Load(data.normal1)
imgName=blendMat.name+'-n1'
img.setName(imgName)
texname=blendMat.name+'-n1'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
tex.setImageFlags('NormalMap')
blendMat.setTexture(data.NORMAL1SLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.NOR)
blendMat.getTextures()[data.NORMAL1SLOT].norfac=data.NORMAL1STRONG
blendMat.getTextures()[data.NORMAL1SLOT].mtNor=data.NORMAL1DIRECTION
blendMat.getTextures()[data.NORMAL1SLOT].size=data.NORMAL1SIZE
##else:
# if self.WARNING==True:
# print 'failed...',data.normal1
def normal2(blendMat,data):
if os.path.exists(data.normal2)==True:
img=Blender.Image.Load(data.normal2)
imgName=blendMat.name+'-n2'
img.setName(imgName)
texname=blendMat.name+'-n2'
tex = Blender.Texture.New(texname)
tex.setType('Image')
tex.image = img
tex.setImageFlags('NormalMap')
blendMat.setTexture(data.NORMAL2SLOT,tex,Blender.Texture.TexCo.UV,Blender.Texture.MapTo.NOR)
blendMat.getTextures()[data.NORMAL2SLOT].norfac=data.NORMAL2STRONG
blendMat.getTextures()[data.NORMAL2SLOT].mtNor=data.NORMAL2DIRECTION
blendMat.getTextures()[data.NORMAL2SLOT].size=data.NORMAL2SIZE
#else:
# if self.WARNING==True:
# print 'failed...',data.normal2
class Skin:
def __init__(self):
self.boneMap=[]
self.IDStart=None
self.IDCount=None
self.skeleton=None
self.skeletonFile=None
class Mat:
def __init__(self):#0,1,2,3,4,5,6,7,
self.name=None
self.matrix=None
self.BINDSKELETON=None
self.ZTRANS=False
self.RGBTRANSPARENT=False
self.diffuse=None
self.DIFFUSESLOT=0
self.NORMALSLOT=1
self.SPECULARSLOT=2
self.AOSLOT=3
self.NORMAL1SLOT=4
self.NORMAL2SLOT=5
self.DIFFUSE1SLOT=6
self.DIFFUSE2SLOT=7
self.REFLECTIONSLOT=8
self.ALPHASLOT=8
#self.RGBTRANSPARENTSLOT=8
self.EMITSLOT=9
self.diffuse1=None
self.diffuse2=None
self.alpha=None
self.normal=None
self.NORMALSTRONG=0.5
self.NORMALDIRECTION=1
self.NORMALSIZE=(1,1,1)
self.bump=None
self.specular=None
self.ao=None
self.normal1=None
self.NORMAL1STRONG=0.8
self.NORMAL1DIRECTION=1
self.NORMAL1SIZE=(15,15,15)
self.normal2=None
self.NORMAL2STRONG=0.8
self.NORMAL2DIRECTION=1
self.NORMAL2SIZE=(15,15,15)
self.reflection=None
self.REFLECTIONSTRONG=1.0
self.emit=None
#self.USEDTRIANGLES=[None,None]
self.TRIANGLE=False
self.TRISTRIP=False
self.QUAD=False
self.IDStart=None
self.IDCount=None
self.faceIDList=[]
self.rgbCol=None
self.rgbSpec=None
r=random.randint(0,255)
g=random.randint(0,255)
b=random.randint(0,255)
self.rgba=[r/255.0,g/255.0,b/255.0,1.0]
self.trans=None
def draw(self):
if self.name is None:
self.name=str(ParseID())+'-mat-'+str(0)
blendMat=Blender.Material.New(self.name)
blendMat.diffuseShader=Blender.Material.Shaders.DIFFUSE_ORENNAYAR
blendMat.specShader=Blender.Material.Shaders.SPEC_WARDISO
blendMat.setRms(0.04)
blendMat.shadeMode=Blender.Material.ShadeModes.CUBIC
if self.ZTRANS==True:
blendMat.mode |= Blender.Material.Modes.ZTRANSP
blendMat.mode |= Blender.Material.Modes.TRANSPSHADOW
blendMat.alpha = 0.0
if self.diffuse is not None:diffuse(blendMat,self)
if self.reflection is not None:reflection(blendMat,self)
if self.diffuse1 is not None:diffuse1(blendMat,self)
if self.diffuse2 is not None:diffuse2(blendMat,self)
if self.specular is not None:specular(blendMat,self)
if self.normal is not None:normal(blendMat,self)
if self.normal1 is not None:normal1(blendMat,self)
if self.normal2 is not None:normal2(blendMat,self)
if self.ao is not None:ao(blendMat,self)
if self.alpha is not None:alpha(blendMat,self)
|
import threading
import os,time
from django.conf.urls import url, include
from django.conf import settings
from django.conf import urls
from importlib import import_module
from . import globalVal
from .pluginListener import FileEventHandler
from watchdog.observers import Observer
from watchdog.events import *
class PluginService(threading.Thread):
name = "PluginService"
status = 0 # 0: stop; 1: start;
def __init__(self, thread_event, status=1):
threading.Thread.__init__(self)
self.name = "PluginService"
self.event = thread_event
self.status = status
self.setDaemon(True) # ่ฎพ็ฝฎไธบๅๅฐ็บฟ็จ
def run(self):
observer = Observer()
event_handler = FileEventHandler()
watch = observer.schedule(event_handler, path=globalVal.PLUGINS_DIR, recursive=False)
event_handler2 = LoggingEventHandler()
observer.add_handler_for_watch(event_handler2, watch) #ไธบwatchๆฐๆทปๅ ไธไธชevent handler
observer.start()
try:
while self.status == 1:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
print("plugin service stopped.")
|
from extruder_turtle import ExtruderTurtle
import math
t = ExtruderTurtle()
t.name("horizontal-brush.gcode")
t.setup(x=200, y=100)
t.rate(700)
t.set_density(0.05)
for l in range(100):
t.forward(5)
t.right(math.pi/2)
t.forward(20)
t.right(math.pi/2)
t.forward(5)
t.right(math.pi/2)
if l%5 == 3:
t.forward(0.5)
for i in range(19):
t.left(math.pi/2)
t.rate(3000)
t.set_density(0.02)
t.forward(50)
t.lift(0.2-l*0.2)
t.penup()
t.forward(10)
t.lift(l*0.2-0.2)
t.forward(-10)
t.lift(10)
t.right(math.pi)
t.forward(50)
t.pendown()
t.rate(700)
t.set_density(0.05)
t.left(math.pi/2)
t.forward(1)
t.lift(-10)
t.forward(0.5)
else:
t.forward(20)
t.right(math.pi/2)
t.lift(0.2)
t.finish()
|
from sc2.ids.unit_typeid import UnitTypeId
from bot.economy import economy
from bot.opponent.strategy import Strategy
from bot.util import util
class Builder:
def __init__(self, bot):
self.bot = bot
self.logger = bot.logger
self.opponent = bot.opponent
self.army = bot.army
async def _build_one(self, it):
bot = self.bot
if not (bot.units(it).exists or bot.already_pending(it)) and bot.can_afford(it):
self.logger.log(f"Building {it}")
await bot.build(it, near=bot.townhalls.first.position.towards(bot._game_info.map_center, 5))
async def _ensure_extractors(self):
bot = self.bot
if bot.units(UnitTypeId.EXTRACTOR).ready.amount > 0 and not bot.units(UnitTypeId.LAIR).ready.exists:
return
elif not bot.already_pending(UnitTypeId.EXTRACTOR):
for town in bot.townhalls:
if town.is_ready and economy.drone_rate_for_towns([town]) >= 0.90:
for geyser in bot.state.vespene_geyser.closer_than(10, town):
if await bot.can_place(UnitTypeId.EXTRACTOR, geyser.position) and bot.can_afford(UnitTypeId.EXTRACTOR):
workers = bot.workers.gathering
if workers.exists:
worker = workers.closest_to(geyser)
self.logger.log("Building extractor")
await bot.do_actions([worker.build(UnitTypeId.EXTRACTOR, geyser)])
return
def _should_train_overlord(self):
bot = self.bot
if bot.can_afford(UnitTypeId.OVERLORD):
if bot.units(UnitTypeId.OVERLORD).amount == 1:
required_buffer = 0
else:
required_buffer = int((bot.townhalls.ready.amount + bot.units(UnitTypeId.QUEEN).ready.amount) * 0.7 + 2.5)
buffer = bot.supply_left + (bot.already_pending(UnitTypeId.OVERLORD) * 8)
should = buffer <= required_buffer and bot.supply_cap < 200
return should
# Build tree
async def begin_projects(self):
bot = self.bot
random_townhall = bot.townhalls.first
tech_penalty_multiplier = 1
if {Strategy.PROXY} & self.opponent.strategies:
tech_penalty_multiplier = 2
if economy.should_build_hatchery(bot):
self.logger.log("Building hatchery")
drone = bot.workers.random
bot.active_expansion_builder = drone.tag
await bot.do_actions([drone.build(UnitTypeId.HATCHERY, bot.expansions_sorted.pop(0))]) # TODO Should not be so naive that sites are available and building will succeed and remain intact
if not economy.should_save_for_expansion(bot):
await self._build_one(UnitTypeId.SPAWNINGPOOL)
if bot.units(UnitTypeId.SPAWNINGPOOL).exists:
await self._ensure_extractors()
if bot.units(UnitTypeId.SPAWNINGPOOL).ready.exists:
await self._build_one(UnitTypeId.ROACHWARREN)
if bot.units(UnitTypeId.ROACHWARREN).ready.exists and self.army.strength >= 500 * tech_penalty_multiplier:
if (not bot.units(UnitTypeId.LAIR).exists or bot.already_pending(UnitTypeId.LAIR)) and random_townhall.noqueue:
if bot.can_afford(UnitTypeId.LAIR):
self.logger.log("Building lair")
await bot.do_actions([random_townhall.build(UnitTypeId.LAIR)])
if bot.units(UnitTypeId.LAIR).ready.exists and len(bot.townhalls.ready) > 1 and self.army.strength >= 500 * tech_penalty_multiplier:
await self._build_one(UnitTypeId.EVOLUTIONCHAMBER)
# await self._build_one(UnitTypeId.HYDRALISKDEN)
await self._build_one(UnitTypeId.SPIRE)
# Training units
def train_units(self):
bot = self.bot
actions = []
for townhall in bot.townhalls:
town_larvae = bot.units(UnitTypeId.LARVA).closer_than(5, townhall)
if town_larvae.exists:
larva = town_larvae.random
if self._should_train_overlord():
self.logger.log("<- Training overlord")
actions.append(larva.train(UnitTypeId.OVERLORD))
elif economy.should_train_drone(bot, townhall):
self.logger.debug("Training drone, current situation at this expansion {}/{}".format(townhall.assigned_harvesters, townhall.ideal_harvesters))
actions.append(larva.train(UnitTypeId.DRONE))
elif not economy.should_save_for_expansion(bot):
if bot.can_afford(UnitTypeId.MUTALISK) and bot.units(UnitTypeId.SPIRE).ready.exists:
self.logger.debug("Training mutalisk")
actions.append(larva.train(UnitTypeId.MUTALISK))
# if bot.can_afford(UnitTypeId.HYDRALISK) and bot.units(UnitTypeId.HYDRALISKDEN).ready.exists:
# self.logger.debug("Training hydralisk")
# actions.append(larva.train(UnitTypeId.HYDRALISK))
elif bot.units(UnitTypeId.ROACHWARREN).ready.exists:
if bot.can_afford(UnitTypeId.ROACH):
self.logger.debug("Training roach")
actions.append(larva.train(UnitTypeId.ROACH))
elif bot.minerals > 400 and bot.units(UnitTypeId.LARVA).amount > 5:
self.logger.debug("Training late ling because excessive minerals")
actions.append(larva.train(UnitTypeId.ZERGLING))
elif bot.can_afford(UnitTypeId.ZERGLING) and bot.units(UnitTypeId.SPAWNINGPOOL).ready.exists:
self.logger.debug("Training ling")
actions.append(larva.train(UnitTypeId.ZERGLING))
if bot.units(UnitTypeId.SPAWNINGPOOL).ready.exists and townhall.is_ready and townhall.noqueue:
if bot.can_afford(UnitTypeId.QUEEN):
if not bot.units(UnitTypeId.QUEEN).closer_than(15, townhall):
self.logger.debug("Training queen")
actions.append(townhall.train(UnitTypeId.QUEEN))
return actions
|
from django import forms
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.core.mail import send_mail
from django.db.models import Q
from django.core.mail import send_mail
import uuid
ALLERGY_CHOICES = (
("no" , "Most likely not allergy friendly coats"),
("maybe" , "Possibly some allergy friendly coats"),
("yes" , "The most allergy friendly coats"),
)
SIZE_CHOICES = (
("mini" , "Miniature (20-35 lbs)"),
("medium" , "Medium (35-50 lbs)"),
("standard", "Standard 50+ lbs"),
("any" , "All Sizes"),
)
GENDER_CHOICES = (
("male", "Male"),
("female", "Female"),
("either", "Either")
)
class LitterArchive(models.Model):
mother_name = models.CharField(max_length=100, default="")
birth_date = models.DateField(null=True, blank=True)
user_list = models.TextField(max_length=1500, default="")
class Litter(models.Model):
mother_name = models.CharField(max_length=100, default="")
birth_date = models.DateField(null=True, blank=True)
selection_date = models.DateField(null=True, blank=True)
takehome_startdate = models.DateField(null=True, blank=True)
takehome_enddate = models.DateField(null=True, blank=True)
available_count = models.IntegerField(default=0)
male_count = models.IntegerField(default=0)
female_count = models.IntegerField(default=0)
size = models.CharField(max_length = 8, default="", choices=SIZE_CHOICES)
allergy_friendly = models.CharField(max_length = 5, default="no", choices=ALLERGY_CHOICES)
breeder_notes = models.TextField(max_length=500, default="")
stripe_link = models.URLField(max_length=200, default="", blank=True)
stripe_link_pin = models.CharField(max_length=25, default="", blank=True)
is_live = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
likes = models.ManyToManyField(get_user_model(), related_name="likes", through="LitterRequest", blank=True)
def save(self, *args, **kwargs):
create = True if not self.pk else False
super(Litter, self).save(*args, **kwargs)
# if create:
# for user in get_user_model().objects.filter(first_available=True):
# self.likes.add(user)
# self.save(*args, **kwargs)
def get_size(self):
for size, display in SIZE_CHOICES:
if size == self.size:
return display
def get_allergy_friendly(self):
for friendly, display in ALLERGY_CHOICES:
if friendly == self.allergy_friendly:
return display
def get_mother_name(self):
return self.mother_name.title()
def get_litter_name(self):
return "{}'s Litter".format(self.get_mother_name)
def get_fields(self):
return [(field.name, field.value_to_string(self)) for field in Litter._meta.fields]
def __str__(self):
return "{name} ({count} Interested)".format(name=self.mother_name.title(), count=self.likes.count())
class LitterRequest(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
litter = models.ForeignKey(Litter, on_delete=models.CASCADE)
def post_pass_pin(self):
endpoint = "https://peterspuppies.com/wp-json/canine/add-pin-to-litter"
data = {"pin": self.user.pin}
headers = {
"Content-Type":"application/json",
"Accept":"application/json",
}
try:
response = requests.post(url=endpoint, data=json.dumps(data), headers=headers)
if response.ok:
print(response.content)
else:
"issue"
except requests.exceptions.Timeout:
print("Time out")
except json.decoder.JSONDecodeError:
print("Response is not in json format")
except requests.exceptions.RequestException as e:
raise SystemExit(e)
def email_shopping_request(self, *args, **kwargs):
self.post_pass_pin()
send_mail("Peter's Puppies :: {} Shopping".format(str(self.litter))
, "waitlist.peterspuppies.com/shop/{id} \n Shopping Pin: {pin}".format(id = self.id, pin=self.litter.stripe_link_pin)
, "postmaster@mg.peterspuppies.com"
, [self.user.email]
, fail_silently=False)
# def __init__(self, *args, **kwargs):
# super(LitterRequest, self).__init__(args, kwargs)
# self.my_field = 1
|
import logging
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.dispatch import receiver
# reference : https://wikidocs.net/10566
"""
@receiver ๋ฐ์ฝ๋ ์ดํฐ๋ฅผ ์ด์ฉํด sig_user_logged_in() ํจ์ ์ด๋ฆ์ผ๋ก
user_logged_in ์๊ทธ๋์ ์ฒ๋ฆฌํ๋ ํจ์๋ฅผ ๋ง๋ ๋ค.
์ฐธ๊ณ ๋ก django/contrib/auth/__init__.py ํ์ผ์
login(request, user, backend=None) ํจ์ ๋งจ ๋ฐ์ ๋ณด๋ฉด ๋ก๊ทธ์ธ ์์
์ ์๋ฃํ ํ ์๊ทธ๋์ ๋ณด๋ด๋ ์ฝ๋๊ฐ ์กด์ฌํ๋ค.
python user_logged_in.send(sender=user.__class__, request=request, user=user)
๋ก๊ทธ์ธํ ๋๋ง๋ค ์ด๋ฏธ ์์ ๊ฐ์ด ์๊ทธ๋์ ๋ณด๋ด๊ณ ์์ผ๋ฏ๋ก ์ด๋ฅผ ์์ ํ๋ ํจ์๋ฅผ ์ ์ํ๋ฉด ๋๋ค.
์๊ทธ๋ ์์ ํจ์๋ฅผ ์ ์ํ๊ณ ์ด๋ฅผ Django ์ฑ ์ค์ ์ ํตํด ์๋์ ์ฝ๋๋ก ๋ฑ๋กํด์ผ ํ๋ค.
"""
from ipware.ip import get_ip
from ..models_ex.models_signal_login_ip_logging import UserLoginLog
@receiver(user_logged_in)
def sig_user_logged_in(sender, user, request, **kwargs):
logger = logging.getLogger(__name__)
logger.debug("user logged in: %s at %s" % (user, request.META["REMOTE_ADDR"]))
# log = UserLoginLog()
# log.user = user
# log.ip_address = get_ip(request)
# log.user_agent = request.META['HTTP_USER_AGENT']
# log.save()
# ์ฌ์ฉ์ ์์ดํผ ์ฃผ์๋ฅผ ์ป๊ธฐ ์ํด request.META['REMOTE_ADDR'] ์ฝ๋๊ฐ ์๋
# django-ipware ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ฌ์ฉํ๋ค.
|
import json
import pathlib
from http import HTTPStatus
from mock import AsyncMock, MagicMock, patch
import pytest
from tornado.httpclient import HTTPClientError
from tornado.web import HTTPError
from jupyterlab_pullrequests.managers.github import GitHubManager
HERE = pathlib.Path(__file__).parent.resolve()
def read_sample_response(filename):
return MagicMock(
body=(HERE / "sample_responses" / "github" / filename).read_bytes()
)
@pytest.mark.asyncio
async def test_GitHubManager_get_file_nbdiff():
manager = GitHubManager(access_token="valid")
prev_content = (
HERE / "sample_responses" / "github" / "ipynb_base.json"
).read_text()
curr_content = (
HERE / "sample_responses" / "github" / "ipynb_remote.json"
).read_text()
result = await manager.get_file_nbdiff(prev_content, curr_content)
expected_result = json.loads(
(HERE / "sample_responses" / "github" / "ipynb_nbdiff.json").read_text()
)
assert result == expected_result
@pytest.mark.asyncio
@patch("tornado.httpclient.AsyncHTTPClient.fetch", new_callable=AsyncMock)
async def test_GitHubManager_call_provider_bad_gitresponse(mock_fetch):
manager = GitHubManager(access_token="valid")
mock_fetch.side_effect = HTTPClientError(code=404)
with pytest.raises(HTTPError) as e:
await manager._call_provider("invalid-link")
assert e.value.status_code == 404
assert "Invalid response in" in e.value.reason
@pytest.mark.asyncio
@patch("json.loads", MagicMock(side_effect=json.JSONDecodeError("", "", 0)))
@patch("tornado.httpclient.AsyncHTTPClient.fetch", new_callable=AsyncMock)
async def test_GitHubManager_call_provider_bad_parse(mock_fetch):
manager = GitHubManager(access_token="valid")
with (pytest.raises(HTTPError)) as e:
await manager._call_provider("invalid-link")
assert e.value.status_code == HTTPStatus.BAD_REQUEST
assert "Invalid response in" in e.value.reason
@pytest.mark.asyncio
@patch("tornado.httpclient.AsyncHTTPClient.fetch", new_callable=AsyncMock)
async def test_GitHubManager_call_provider_bad_unknown(mock_fetch):
manager = GitHubManager(access_token="valid")
mock_fetch.side_effect = Exception()
with (pytest.raises(HTTPError)) as e:
await manager._call_provider("invalid-link")
assert e.value.status_code == HTTPStatus.INTERNAL_SERVER_ERROR
assert "Unknown error in" in e.value.reason
@pytest.mark.asyncio
@patch("json.loads", MagicMock(return_value={"test1": "test2"}))
@patch("tornado.httpclient.AsyncHTTPClient.fetch", new_callable=AsyncMock)
async def test_GitHubManager_call_provider_valid(mock_fetch):
manager = GitHubManager(access_token="valid")
result = await manager._call_provider("valid-link")
assert result["test1"] == "test2"
|
from loginapp import app, db
if __name__ == '__main__':
db.create_all() # first we need to create table other wise HUGE ERROR. Wasted-24hr :(
app.run(debug=True, port=8000)
|
import shlex
import subprocess
from .base import PluginBase
# Examples:
# CommandLinePlugin(
# "Output to file",
# 8123,
# "touch testfile.txt",
# "rm testfile.txt",
# state_cmd = "ls testfile.txt"
# )
#
# CommandLinePlugin(
# "Output to file",
# 8123,
# "touch testfile.txt",
# "rm testfile.txt",
# use_fake_state = True
# )
class CommandLinePlugin(PluginBase):
def __init__(
self,
name: str,
port: int,
on_cmd: str,
off_cmd: str,
state_cmd: str = None,
use_fake_state: bool = False,
): # pylint:disable=too-many-arguments
super().__init__(name=name, port=port)
self._on_cmd = on_cmd
self._off_cmd = off_cmd
self._state_cmd = state_cmd
self._use_fake_state = use_fake_state
@staticmethod
def run_cmd(cmd: str) -> bool:
shlexed_cmd = shlex.split(cmd)
try:
subprocess.run(shlexed_cmd, check=True)
return True
except subprocess.CalledProcessError:
return False
def on(self) -> bool:
return self.run_cmd(self._on_cmd)
def off(self) -> bool:
return self.run_cmd(self._off_cmd)
def get_state(self) -> str:
if self._use_fake_state:
return super().get_state()
if self._state_cmd is None:
return "unknown"
returned_zero = self.run_cmd(self._state_cmd)
if returned_zero:
return "on"
return "off"
|
import Sofa
from Compliant import Rigid, Tools, Frame
mesh_path = Tools.path( __file__ )
scale = 1
# parts of the mechanism
parts = [
["Corps","Corps.msh","1.36 0 0.0268 0 0 0 1","0 0 0 0 0 0 1","22.8 751 737", "2.1e+11","0.28","7.8e+3",1291.453/scale,"TetrahedronFEMForceField","Rigid","Vec3d","TLineModel","TPointModel","ExtVec3f","0.obj","Actor_Sensor_NA",],
["Roue","Roue.msh","0 -0.00604 0.354 0 0 0 1","0 0 -0.148 0 0 0 1","105 106 205", "2.1e+11","0.28","7.8e+3",780.336/scale,"TetrahedronFEMForceField","Rigid","Vec3d","TLineModel","TPointModel","ExtVec3f","3.obj","Actor_Sensor_NA"],
["Came","Came.msh","0 0 -0.00768 0 0 0 1","1.085 -0.072 0.33 0 0 0 1","40.5 40.6 0.331", "2.1e+11","0.28","7.8e+3",161.416/scale,"TetrahedronFEMForceField","Rigid","Vec3d","TLineModel","TPointModel","ExtVec3f","2.obj","Actor_Sensor_NA"],
["Piston","Piston.msh","0 0 0.424 0 0 0 1","2.05 0 0.33 0 0 0 1","0.356 14.6 14.7", "2.1e+11","0.28","7.8e+3",132.759/scale,"TetrahedronFEMForceField","Rigid","Vec3d","TLineModel","TPointModel","ExtVec3f","1.obj","Actor_Sensor_NA"]
]
# joint offsets
offset = [
[0, Frame.Frame().read('0 0 0 0 0 0 1')],
[1, Frame.Frame().read('0 0 0.148 0 0 0 1')],
[1, Frame.Frame().read('0.24 -0.145 0.478 0 0 0 1')],
[2, Frame.Frame().read('-0.845 -0.073 0 0 0 0 1')],
[2, Frame.Frame().read('0.852 0.072 0 0 0 0 1')],
[3, Frame.Frame().read('-0.113 0 0 0 0 0 1')],
[3, Frame.Frame().read('0.15 0 0 0 0 0 1')],
[0, Frame.Frame().read('2.2 0 0.33 0 0 0 1')]
]
# joints: parent offset, child offset, joint def
links = [
# revolute joint around z
[0, 1, Rigid.RevoluteJoint(2)], # corps-roue
[2, 3, Rigid.RevoluteJoint(2)], # roue-came
[4, 5, Rigid.RevoluteJoint(2)], # came-piston
# sliding joint around x
[6, 7, Rigid.PrismaticJoint(0)]
]
def createScene(node):
node.createObject('RequiredPlugin', name = 'Compliant' )
node.createObject('CompliantAttachButtonSetting' )
node.createObject('VisualStyle', displayFlags='hideBehaviorModels hideCollisionModels hideMappings hideForceFields')
node.findData('dt').value=0.01
node.findData('gravity').value='0 -9.81 0'
node.createObject('CompliantImplicitSolver',
name='odesolver',
stabilization="true")
node.createObject('MinresSolver',
name = 'numsolver',
iterations = '250',
precision = '1e-14')
scene = node.createChild('scene')
rigid = []
joint = []
# create rigid bodies
for p in parts:
r = Rigid.Body()
r.name = p[0]
# r.collision = part_path + p[1]
r.dofs.read( p[3] )
r.visual = mesh_path + '/' + p[15]
r.collision = r.visual
r.inertia_forces = True
density = float(p[7])
r.mass_from_mesh( r.visual, density )
r.insert( scene )
rigid.append( r )
# create joints
for i in links:
j = i[2]
j.compliance = 0
p = offset[i[0]][0]
off_p = offset[i[0]][1]
c = offset[i[1]][0]
off_c = offset[i[1]][1]
j.append(rigid[p].user, off_p)
j.append(rigid[c].user, off_c)
joint.append( j.insert( scene) )
# fix first body
rigid[0].node.createObject('FixedConstraint', indices = '0' )
return node
|
# Generated by Django 3.1 on 2020-08-18 23:20
import ckeditor.fields
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Education',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('degree', models.CharField(max_length=100)),
('startDate', models.DateField(blank=True, null=True)),
('endDate', models.DateField(blank=True, null=True)),
('content', ckeditor.fields.RichTextField()),
('institution', models.CharField(max_length=100)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('country', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Experience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('image', models.ImageField(upload_to='experiences')),
('title', models.CharField(max_length=100)),
('position', models.CharField(max_length=100)),
('content', ckeditor.fields.RichTextField()),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('country', models.CharField(max_length=100)),
('startDate', models.DateField(blank=True, null=True)),
('endDate', models.DateField(blank=True, null=True)),
('dateUpdated', models.DateTimeField(auto_now=True)),
('dateAdded', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Summary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('page', models.CharField(max_length=100)),
('summary', models.TextField()),
],
),
migrations.CreateModel(
name='Technology',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('link', models.URLField()),
],
),
migrations.CreateModel(
name='WebsiteInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField()),
('name', models.CharField(max_length=100)),
('title', models.CharField(max_length=100)),
('description', models.TextField(max_length=500)),
('webType', models.CharField(max_length=10)),
('portfolioPageHeader', models.TextField()),
('experiencePageHeader', models.TextField()),
('educationPageHeader', models.TextField()),
('contactPageHeader', models.TextField()),
('email', models.EmailField(default='jiachengzhang1@email.arizona.edu', max_length=254)),
('github', models.URLField(default='https://github.com/jiachengzhang1')),
('linkedIn', models.URLField(default='www.linkedin.com/in/jiachengzhang-developer')),
('resumeLink', models.URLField()),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.IntegerField()),
('title', models.CharField(max_length=100)),
('subtitle', models.CharField(max_length=100)),
('content', ckeditor.fields.RichTextField()),
('demoURL', models.URLField(blank=True, null=True)),
('codeURL', models.URLField(blank=True, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='projects')),
('date', models.DateField(blank=True, null=True)),
('date_updated', models.DateTimeField(auto_now=True)),
('date_added', models.DateTimeField(default=django.utils.timezone.now)),
('technologies', models.ManyToManyField(related_name='project', to='portfolio_web.Technology')),
],
),
]
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import inspect
import socket
import unittest
import unittest.mock
from pants.util.socket import RecvBufferedSocket, is_readable
PATCH_OPTS = dict(autospec=True, spec_set=True)
class TestSocketUtils(unittest.TestCase):
@unittest.mock.patch('selectors.DefaultSelector', **PATCH_OPTS)
def test_is_readable(self, mock_selector):
mock_fileobj = unittest.mock.Mock()
mock_selector = mock_selector.return_value.__enter__.return_value
mock_selector.register = unittest.mock.Mock()
# NB: the return value should actually be List[Tuple[SelectorKey, Events]], but our code only
# cares that _some_ event happened so we choose a simpler mock here. See
# https://docs.python.org/3/library/selectors.html#selectors.BaseSelector.select.
mock_selector.select = unittest.mock.Mock(return_value=[(1, "")])
self.assertTrue(is_readable(mock_fileobj, timeout=0.1))
mock_selector.select = unittest.mock.Mock(return_value=[])
self.assertFalse(is_readable(mock_fileobj, timeout=0.1))
class TestRecvBufferedSocket(unittest.TestCase):
def setUp(self):
self.chunk_size = 512
self.mock_socket = unittest.mock.Mock()
self.client_sock, self.server_sock = socket.socketpair()
self.buf_sock = RecvBufferedSocket(self.client_sock, chunk_size=self.chunk_size)
self.mocked_buf_sock = RecvBufferedSocket(self.mock_socket, chunk_size=self.chunk_size)
def tearDown(self):
self.buf_sock.close()
self.server_sock.close()
def test_getattr(self):
self.assertTrue(inspect.ismethod(self.buf_sock.recv))
self.assertFalse(inspect.isbuiltin(self.buf_sock.recv))
self.assertTrue(inspect.isbuiltin(self.buf_sock.connect))
def test_recv(self):
self.server_sock.sendall(b'A' * 300)
self.assertEqual(self.buf_sock.recv(1), b'A')
self.assertEqual(self.buf_sock.recv(200), b'A' * 200)
self.assertEqual(self.buf_sock.recv(99), b'A' * 99)
def test_recv_max_larger_than_buf(self):
double_chunk = self.chunk_size * 2
self.server_sock.sendall(b'A' * double_chunk)
self.assertEqual(self.buf_sock.recv(double_chunk), b'A' * double_chunk)
@unittest.mock.patch('selectors.DefaultSelector', **PATCH_OPTS)
def test_recv_check_calls(self, mock_selector):
mock_selector = mock_selector.return_value.__enter__.return_value
mock_selector.register = unittest.mock.Mock()
# NB: the return value should actually be List[Tuple[SelectorKey, Events]], but our code only
# cares that _some_ event happened so we choose a simpler mock here. See
# https://docs.python.org/3/library/selectors.html#selectors.BaseSelector.select.
mock_selector.select = unittest.mock.Mock(return_value=[(1, "")])
self.mock_socket.recv.side_effect = [b'A' * self.chunk_size, b'B' * self.chunk_size]
self.assertEqual(self.mocked_buf_sock.recv(128), b'A' * 128)
self.mock_socket.recv.assert_called_once_with(self.chunk_size)
self.assertEqual(self.mocked_buf_sock.recv(128), b'A' * 128)
self.assertEqual(self.mocked_buf_sock.recv(128), b'A' * 128)
self.assertEqual(self.mocked_buf_sock.recv(128), b'A' * 128)
self.assertEqual(self.mock_socket.recv.call_count, 1)
self.assertEqual(self.mocked_buf_sock.recv(self.chunk_size), b'B' * self.chunk_size)
self.assertEqual(self.mock_socket.recv.call_count, 2)
|
import logging
import re
from collections import defaultdict
from typing import Callable, TextIO
from .schemas import Score, Note, Metadata
logger = logging.getLogger(__name__)
def process_metadata(lines: list[tuple[str]]) -> Metadata:
result = {}
for line in lines:
if len(line) == 2:
key, value = line
else:
key = line[0]
value = None
key = key[1:]
value = value.strip('"') if value != None else None
if key == 'TITLE':
result['title'] = value
elif key == 'SUBTITLE':
result['subtitle'] = value
elif key == 'ARTIST':
result['artist'] = value
elif key == 'GENRE':
result['genre'] = value
elif key == 'DESIGNER':
result['designer'] = value
elif key == 'DIFFICULTY':
result['difficulty'] = value
elif key == 'PLAYLEVEL':
result['playlevel'] = value
elif key == 'SONGID':
result['songid'] = value
elif key == 'WAVE':
result['wave'] = value
elif key == 'WAVEOFFSET':
result['waveoffset'] = float(value)
elif key == 'JACKET':
result['jacket'] = value
elif key == 'BACKGROUND':
result['background'] = value
elif key == 'MOVIE':
result['movie'] = value
elif key == 'MOVIEOFFSET':
result['movieoffset'] = float(value)
elif key == 'BASEBPM':
result['basebpm'] = float(value)
elif key == 'REQUEST':
if 'requests' not in result:
result['requests'] = []
result['requests'].append(value)
return Metadata.from_dict(result)
def process_score(lines: list[tuple[str]], metadata: list[tuple[str]]) -> Score:
processed_metadata = process_metadata(metadata)
try:
ticks_per_beat_request = [int(request.split()[1]) for request in processed_metadata.requests if request.startswith('ticks_per_beat')] if processed_metadata.requests else []
ticks_per_beat = ticks_per_beat_request[0]
except IndexError:
logger.warning('No ticks_per_beat request found, defaulting to 480.')
ticks_per_beat = 480
bar_lengths: list[tuple[int, float]] = []
for header, data in lines:
if len(header) == 5 and header.endswith('02') and header.isdigit():
bar_lengths.append((int(header[0:3]), float(data)))
if len(bar_lengths) == 0:
logger.warning('No bar lengths found, adding default 4/4 time signature (#00002:4)...')
bar_lengths.append((0, 4.0))
sorted_bar_lengths = sorted(bar_lengths, key=lambda x: x[0])
ticks = 0
bars = list(reversed(
[
(
measure, int(beats * ticks_per_beat),
ticks := ticks +
int((measure - sorted_bar_lengths[i - 1][0]) * sorted_bar_lengths[i - 1][1] * ticks_per_beat if i > 0 else 0)
) for i, (measure, beats) in enumerate(sorted_bar_lengths)
]
))
def to_tick(measure: int, i: int, total: int) -> int:
bar = next(bar for bar in bars if measure >= bar[0])
if not bar: raise ValueError(f'Measure {measure} is out of range.')
(bar_measure, ticks_per_measure, ticks) = bar
return ticks + (measure - bar_measure) * ticks_per_measure + (i * ticks_per_measure) // total
bpm_map = {}
bpm_change_objects = []
tap_notes = []
directional_notes = []
streams = defaultdict(list)
for header, data in lines:
if (len(header) == 5 and header.startswith('BPM')):
bpm_map[header[3:]] = float(data)
elif (len(header) == 5 and header.endswith('08')):
bpm_change_objects += to_raw_objects(header, data, to_tick)
elif (len(header) == 5 and header[3] == '1'):
tap_notes += to_note_objects(header, data, to_tick)
elif (len(header) == 6 and header[3] == '3'):
channel = header[5]
streams[channel] += to_note_objects(header, data, to_tick)
elif (len(header) == 5 and header[3] == '5'):
directional_notes += to_note_objects(header, data, to_tick)
slide_notes = []
for stream in streams.values():
slide_notes += to_slides(stream)
bpms = [
(tick, bpm_map[value] or 0)
for tick, value in
sorted(bpm_change_objects, key=lambda x: x[0])
]
return Score(
metadata=processed_metadata,
taps=tap_notes,
directionals=directional_notes,
slides=slide_notes,
bpms=bpms,
bar_lengths=bar_lengths
)
def to_slides(stream: list[Note]) -> list[list[Note]]:
slides: list[list[Note]] = []
current: list[Note] = None
for note in sorted(stream, key=lambda x: x.tick):
if not current:
current = []
slides.append(current)
current.append(note)
if note.type == 2:
current = None
return slides
def to_note_objects(header: int, data: str, to_tick: Callable[[int, int, int], int]) -> list[Note]:
return [
Note(
tick=tick,
lane=int(header[4], 36),
width=int(value[1], 36),
type=int(value[0], 36),
)
for tick, value in to_raw_objects(header, data, to_tick)
]
def to_raw_objects(header: int, data: str, to_tick: Callable[[int, int, int], int]) -> list[tuple[int, str]]:
measure = int(header[:3])
values = list(enumerate(re.findall(r'.{2}', data)))
return [
(to_tick(measure, i, len(values)), value)
for i, value in values
if value != '00'
]
def load(fp: TextIO) -> Score:
return loads(fp.read())
def loads(data: str) -> Score:
"""
Parse SUS data into a Score object.
:param data: The score data.
:return: A Score object.
"""
metadata = []
scoredata = []
for line in data.splitlines():
if not line.startswith('#'):
continue
line = line.strip()
match = re.match(r'^#(\w+):\s*(.*)$', line)
if match:
scoredata.append(match.groups())
else:
metadata.append(tuple(line.split(' ', 1)))
return process_score(scoredata, metadata)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.